From 098337d07e5cf008714396cb38a30a1218fbe1e0 Mon Sep 17 00:00:00 2001 From: yivantsov-atlassian <52448429+bianchi2@users.noreply.github.com> Date: Thu, 7 Dec 2023 11:02:52 +1100 Subject: [PATCH] Make test pod metadata and spec configurable (#727) * Make test pod metadata and spec configurable * Add docs --------- Co-authored-by: Yevhen Ivantsov --- src/main/charts/bamboo-agent/README.md | 89 ++-- src/main/charts/bamboo/README.md | 322 +++++++------- .../tests/test-application-status.yaml | 29 +- .../tests/test-database-connectivity.yaml | 29 +- .../tests/test-shared-home-permissions.yaml | 29 +- src/main/charts/bamboo/values.yaml | 15 +- src/main/charts/bitbucket/README.md | 414 +++++++++--------- .../tests/test-application-status.yaml | 29 +- .../tests/test-database-connectivity.yaml | 29 +- .../tests/test-shared-home-permissions.yaml | 29 +- src/main/charts/bitbucket/values.yaml | 13 + src/main/charts/confluence/README.md | 1 + .../tests/test-application-status.yaml | 29 +- .../tests/test-database-connectivity.yaml | 27 ++ .../tests/test-shared-home-permissions.yaml | 29 +- src/main/charts/confluence/values.yaml | 13 + src/main/charts/crowd/README.md | 268 ++++++------ .../tests/test-application-status.yaml | 27 ++ .../tests/test-shared-home-permissions.yaml | 27 ++ src/main/charts/crowd/values.yaml | 13 + src/main/charts/jira/README.md | 296 ++++++------- .../tests/test-application-status.yaml | 31 +- .../tests/test-database-connectivity.yaml | 29 +- .../tests/test-shared-home-permissions.yaml | 29 +- src/main/charts/jira/values.yaml | 13 + src/test/java/test/TestPodsTest.java | 189 ++++++++ src/test/java/test/helm/Helm.java | 35 ++ src/test/java/test/model/KubeResource.java | 2 + src/test/java/test/model/Pod.java | 16 + src/test/java/test/model/Product.java | 20 +- .../bamboo-agent/output.yaml | 1 + .../expected_helm_output/bamboo/output.yaml | 11 + .../bitbucket/output.yaml | 12 + .../confluence/output.yaml | 12 + .../expected_helm_output/crowd/output.yaml | 11 + .../expected_helm_output/jira/output.yaml | 13 +- 36 files changed, 1472 insertions(+), 709 deletions(-) create mode 100644 src/test/java/test/TestPodsTest.java create mode 100644 src/test/java/test/model/Pod.java diff --git a/src/main/charts/bamboo-agent/README.md b/src/main/charts/bamboo-agent/README.md index 6813f2665..0cb9a4945 100644 --- a/src/main/charts/bamboo-agent/README.md +++ b/src/main/charts/bamboo-agent/README.md @@ -4,8 +4,6 @@ A chart for installing Bamboo Data Center remote agents on Kubernetes -For installation please follow [the documentation](https://atlassian.github.io/data-center-helm-charts/). - **Homepage:** ## Source Code @@ -25,48 +23,51 @@ Kubernetes: `>=1.21.x-0` | Key | Type | Default | Description | |-----|------|---------|-------------| -| additionalContainers | list | `[]` | Additional container definitions that will be added to all Bamboo agent pods | -| additionalFiles | list | `[]` | Additional existing ConfigMaps and Secrets not managed by Helm that should be mounted into service container. Configuration details below (camelCase is important!): 'name' - References existing ConfigMap or Secret name. 'type' - 'configMap' or 'secret' 'key' - The file name. 'mountPath' - The destination directory in a container. VolumeMount and Volumes are added with this name and index position, for example; custom-config-0, keystore-2 | +| additionalContainers | list | `[]` | Additional container definitions that will be added to all Bamboo agent pods | +| additionalFiles | list | `[]` | Additional existing ConfigMaps and Secrets not managed by Helm that should be mounted into service container. Configuration details below (camelCase is important!): 'name' - References existing ConfigMap or Secret name. 'type' - 'configMap' or 'secret' 'key' - The file name. 'mountPath' - The destination directory in a container. VolumeMount and Volumes are added with this name and index position, for example; custom-config-0, keystore-2 | | additionalHosts | list | `[]` | Additional host aliases for each pod, equivalent to adding them to the /etc/hosts file. https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ | -| additionalInitContainers | list | `[]` | Additional initContainer definitions that will be added to all Bamboo agent pods | -| additionalLabels | object | `{}` | Additional labels that should be applied to all resources | -| affinity | object | `{}` | Standard K8s affinities that will be applied to all Bamboo agent pods | -| agent.additionalEnvironmentVariables | list | `[]` | Defines any additional environment variables to be passed to the Bamboo agent container. See https://bitbucket.org/atlassian-docker/docker-bamboo-agent-base for supported variables. | -| agent.additionalPorts | list | `[]` | Defines any additional ports for the Bamboo agent container. | -| agent.additionalVolumeMounts | object | `{}` | Defines any additional volume mounts for the Bamboo agent container. These can refer to existing volumes, or new volumes can be defined via 'volumes.additional'. | -| agent.containerSecurityContext | object | `{}` | Standard K8s field that holds security configurations that will be applied to a container. https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ | -| agent.readinessProbe.command | string | `"/probe-readiness.sh"` | Command to use to check the readiness status. This is provided by the agent image. | -| agent.readinessProbe.failureThreshold | int | `30` | The number of consecutive failures of the Bamboo agent container readiness probe before the pod fails readiness checks. | -| agent.readinessProbe.initialDelaySeconds | int | `1` | The initial delay (in seconds) for the Bamboo agent container readiness probe, after which the probe will start running. When used in conjunction with a startupProbe this can be short. | -| agent.readinessProbe.periodSeconds | int | `5` | How often (in seconds) the Bamboo agent container readiness probe will run | -| agent.resources.container.requests.cpu | string | `"1"` | Initial CPU request by Bamboo agent pod | -| agent.resources.container.requests.memory | string | `"2G"` | Initial Memory request by Bamboo agent pod | -| agent.resources.jvm.maxHeap | string | `"512m"` | The maximum amount of heap memory that will be used by the Bamboo agent JVM | -| agent.resources.jvm.minHeap | string | `"256m"` | The minimum amount of heap memory that will be used by the Bamboo agent JVM | -| agent.securityContext.fsGroup | int | `2005` | The GID used by the Bamboo docker image GID will default to 2005 if not supplied and securityContextEnabled is set to true. This is intended to ensure that the shared-home volume is group-writeable by the GID used by the Bamboo container. However, this doesn't appear to work for NFS volumes due to a K8s bug: https://github.com/kubernetes/examples/issues/260 | -| agent.securityContextEnabled | bool | `true` | | +| additionalInitContainers | list | `[]` | Additional initContainer definitions that will be added to all Bamboo agent pods | +| additionalLabels | object | `{}` | Additional labels that should be applied to all resources | +| affinity | object | `{}` | Standard K8s affinities that will be applied to all Bamboo agent pods | +| agent.additionalEnvironmentVariables | list | `[]` | Defines any additional environment variables to be passed to the Bamboo agent container. See https://bitbucket.org/atlassian-docker/docker-bamboo-agent-base for supported variables. | +| agent.additionalPorts | list | `[]` | Defines any additional ports for the Bamboo agent container. | +| agent.additionalVolumeMounts | object | `{}` | Defines any additional volume mounts for the Bamboo agent container. These can refer to existing volumes, or new volumes can be defined via 'volumes.additional'. | +| agent.containerSecurityContext | object | `{}` | Standard K8s field that holds security configurations that will be applied to a container. https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ | +| agent.readinessProbe.command | string | `"/probe-readiness.sh"` | Command to use to check the readiness status. This is provided by the agent image. | +| agent.readinessProbe.failureThreshold | int | `30` | The number of consecutive failures of the Bamboo agent container readiness probe before the pod fails readiness checks. | +| agent.readinessProbe.initialDelaySeconds | int | `1` | The initial delay (in seconds) for the Bamboo agent container readiness probe, after which the probe will start running. When used in conjunction with a startupProbe this can be short. | +| agent.readinessProbe.periodSeconds | int | `5` | How often (in seconds) the Bamboo agent container readiness probe will run | +| agent.resources.container.requests.cpu | string | `"1"` | Initial CPU request by Bamboo agent pod | +| agent.resources.container.requests.memory | string | `"2G"` | Initial Memory request by Bamboo agent pod | +| agent.resources.jvm.maxHeap | string | `"512m"` | The maximum amount of heap memory that will be used by the Bamboo agent JVM | +| agent.resources.jvm.minHeap | string | `"256m"` | The minimum amount of heap memory that will be used by the Bamboo agent JVM | +| agent.securityContext.fsGroup | int | `2005` | The GID used by the Bamboo docker image GID will default to 2005 if not supplied and securityContextEnabled is set to true. This is intended to ensure that the shared-home volume is group-writeable by the GID used by the Bamboo container. However, this doesn't appear to work for NFS volumes due to a K8s bug: https://github.com/kubernetes/examples/issues/260 | +| agent.securityContextEnabled | bool | `true` | Whether to apply security context to pod. | | agent.securityToken.secretKey | string | `"security-token"` | | -| agent.securityToken.secretName | string | `nil` | The name of the K8s Secret that contains the security token. When specified the token will be automatically utilised on agent boot. An Example of creating a K8s secret for the secret below: 'kubectl create secret generic --from-literal=security-token=' https://kubernetes.io/docs/concepts/configuration/secret/#opaque-secrets | +| agent.securityToken.secretName | string | `nil` | The name of the K8s Secret that contains the security token. When specified the token will be automatically utilised on agent boot. An Example of creating a K8s secret for the secret below: 'kubectl create secret generic --from-literal=security-token=' https://kubernetes.io/docs/concepts/configuration/secret/#opaque-secrets | | agent.server | string | `nil` | | -| agent.shutdown.command | string | `nil` | Custom command for a [preStop hook](https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/). Undefined by default which means no pre-stop hook is being executed when an agent container needs to be stopped and deleted | -| agent.shutdown.terminationGracePeriodSeconds | int | `30` | The termination grace period for pods during shutdown. This should be set to the internal grace period, plus a small buffer to allow the JVM to fully terminate. | -| agent.startupProbe.command | string | `"/probe-startup.sh"` | Command to use to check the startup status. This is provided by the agent image. | -| agent.startupProbe.failureThreshold | int | `120` | The number of consecutive failures of the Bamboo agent container startup probe before the pod fails readiness checks. | -| agent.startupProbe.initialDelaySeconds | int | `1` | The initial delay (in seconds) for the Bamboo agent container startup probe, after which the probe will start running. | -| agent.startupProbe.periodSeconds | int | `1` | How often (in seconds) the Bamboo agent container startup probe will run | -| agent.topologySpreadConstraints | list | `[]` | Defines topology spread constraints for Bamboo agent pods. See details: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ | -| image.pullPolicy | string | `"IfNotPresent"` | Image pull policy | -| image.repository | string | `"atlassian/bamboo-agent-base"` | The Bamboo agent Docker image to use https://hub.docker.com/r/atlassian/bamboo-agent-base | -| image.tag | string | `""` | The docker image tag to be used - defaults to the Chart appVersion | -| nodeSelector | object | `{}` | Standard K8s node-selectors that will be applied to all Bamboo agent pods | -| podAnnotations | object | `{}` | Custom annotations that will be applied to all Bamboo agent pods | -| podLabels | object | `{}` | Custom labels that will be applied to all Bamboo agent pods | -| priorityClassName | string | `nil` | Priority class for the application pods. The PriorityClass with this name needs to be available in the cluster. For details see https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/#priorityclass | -| replicaCount | int | `1` | The initial number of Bamboo agent pods that should be started at deployment time. | -| schedulerName | string | `nil` | Standard K8s schedulerName that will be applied to all Bamboo agent pods. Check Kubernetes documentation on how to configure multiple schedulers: https://kubernetes.io/docs/tasks/extend-kubernetes/configure-multiple-schedulers/#specify-schedulers-for-pods | -| serviceAccount.annotations | object | `{}` | Annotations to add to the ServiceAccount (if created) | -| serviceAccount.create | bool | `true` | Set to 'true' if a ServiceAccount should be created, or 'false' if it already exists. | -| serviceAccount.imagePullSecrets | list | `[]` | For Docker images hosted in private registries, define the list of image pull secrets that should be utilized by the created ServiceAccount https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod | -| serviceAccount.name | string | `nil` | The name of the ServiceAccount to be used by the pods. If not specified, but the "serviceAccount.create" flag is set to 'true', then the ServiceAccount name will be auto-generated, otherwise the 'default' ServiceAccount will be used. https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/#use-the-default-service-account-to-access-the-api-server | -| tolerations | list | `[]` | Standard K8s tolerations that will be applied to all Bamboo agent pods | -| volumes | object | `{"additional":null}` | Defines additional volumes that should be applied to all Bamboo agent pods. Note that this will not create any corresponding volume mounts which need to be defined in bamboo.additionalVolumeMounts | \ No newline at end of file +| agent.shutdown.command | string | `nil` | Custom command for a [preStop hook](https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/). Undefined by default which means no pre-stop hook is being executed when an agent container needs to be stopped and deleted | +| agent.shutdown.terminationGracePeriodSeconds | int | `30` | The termination grace period for pods during shutdown. This should be set to the internal grace period, plus a small buffer to allow the JVM to fully terminate. | +| agent.startupProbe.command | string | `"/probe-startup.sh"` | Command to use to check the startup status. This is provided by the agent image. | +| agent.startupProbe.failureThreshold | int | `120` | The number of consecutive failures of the Bamboo agent container startup probe before the pod fails readiness checks. | +| agent.startupProbe.initialDelaySeconds | int | `1` | The initial delay (in seconds) for the Bamboo agent container startup probe, after which the probe will start running. | +| agent.startupProbe.periodSeconds | int | `1` | How often (in seconds) the Bamboo agent container startup probe will run | +| agent.topologySpreadConstraints | list | `[]` | Defines topology spread constraints for Bamboo agent pods. See details: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ | +| image.pullPolicy | string | `"IfNotPresent"` | Image pull policy | +| image.repository | string | `"atlassian/bamboo-agent-base"` | The Bamboo agent Docker image to use https://hub.docker.com/r/atlassian/bamboo-agent-base | +| image.tag | string | `""` | The docker image tag to be used - defaults to the Chart appVersion | +| nodeSelector | object | `{}` | Standard K8s node-selectors that will be applied to all Bamboo agent pods | +| podAnnotations | object | `{}` | Custom annotations that will be applied to all Bamboo agent pods | +| podLabels | object | `{}` | Custom labels that will be applied to all Bamboo agent pods | +| priorityClassName | string | `nil` | Priority class for the application pods. The PriorityClass with this name needs to be available in the cluster. For details see https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/#priorityclass | +| replicaCount | int | `1` | The initial number of Bamboo agent pods that should be started at deployment time. | +| schedulerName | string | `nil` | Standard K8s schedulerName that will be applied to all Bamboo agent pods. Check Kubernetes documentation on how to configure multiple schedulers: https://kubernetes.io/docs/tasks/extend-kubernetes/configure-multiple-schedulers/#specify-schedulers-for-pods | +| serviceAccount.annotations | object | `{}` | Annotations to add to the ServiceAccount (if created) | +| serviceAccount.create | bool | `true` | Set to 'true' if a ServiceAccount should be created, or 'false' if it already exists. | +| serviceAccount.imagePullSecrets | list | `[]` | For Docker images hosted in private registries, define the list of image pull secrets that should be utilized by the created ServiceAccount https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod | +| serviceAccount.name | string | `nil` | The name of the ServiceAccount to be used by the pods. If not specified, but the "serviceAccount.create" flag is set to 'true', then the ServiceAccount name will be auto-generated, otherwise the 'default' ServiceAccount will be used. https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/#use-the-default-service-account-to-access-the-api-server | +| tolerations | list | `[]` | Standard K8s tolerations that will be applied to all Bamboo agent pods | +| volumes | object | `{"additional":null}` | Defines additional volumes that should be applied to all Bamboo agent pods. Note that this will not create any corresponding volume mounts which need to be defined in bamboo.additionalVolumeMounts | + +---------------------------------------------- +Autogenerated from chart metadata using [helm-docs v1.11.0](https://github.com/norwoodj/helm-docs/releases/v1.11.0) diff --git a/src/main/charts/bamboo/README.md b/src/main/charts/bamboo/README.md index 8a2bbd56f..7b6bf5928 100644 --- a/src/main/charts/bamboo/README.md +++ b/src/main/charts/bamboo/README.md @@ -4,8 +4,6 @@ A chart for installing Bamboo Data Center on Kubernetes -For installation please follow [the documentation](https://atlassian.github.io/data-center-helm-charts/). - **Homepage:** ## Source Code @@ -25,160 +23,166 @@ Kubernetes: `>=1.21.x-0` | Key | Type | Default | Description | |-----|------|---------|-------------| -| additionalConfigMaps | list | `[]` | Create additional ConfigMaps with given names, keys and content. Ther Helm release name will be used as a prefix for a ConfigMap name, fileName is used as subPath | -| additionalContainers | list | `[]` | Additional container definitions that will be added to all Bamboo pods | -| additionalFiles | list | `[]` | Additional existing ConfigMaps and Secrets not managed by Helm that should be mounted into service container. Configuration details below (camelCase is important!): 'name' - References existing ConfigMap or secret name. 'type' - 'configMap' or 'secret' 'key' - The file name. 'mountPath' - The destination directory in a container. VolumeMount and Volumes are added with this name and index position, for example; custom-config-0, keystore-2 | -| additionalInitContainers | list | `[]` | Additional initContainer definitions that will be added to all Bamboo pods | -| additionalLabels | object | `{}` | Additional labels that should be applied to all resources | -| affinity | object | `{}` | Standard K8s affinities that will be applied to all Bamboo pods | -| atlassianAnalyticsAndSupport.analytics.enabled | bool | `true` | Mount ConfigMap with selected Helm chart values as a JSON which DC products will read and send analytics events to Atlassian data pipelines | -| atlassianAnalyticsAndSupport.helmValues.enabled | bool | `true` | Mount ConfigMap with selected Helm chart values as a YAML file which can be optionally including to support.zip | -| bamboo.accessLog.localHomeSubPath | string | `"log"` | The subdirectory within the local-home volume where access logs should be stored. | -| bamboo.accessLog.mountPath | string | `"/opt/atlassian/bamboo/logs"` | The path within the Bamboo container where the local-home volume should be mounted in order to capture access logs. | -| bamboo.additionalBundledPlugins | list | `[]` | Specifies a list of additional Bamboo plugins that should be added to the Bamboo container. Note plugins installed via this method will appear as bundled plugins rather than user plugins. These should be specified in the same manner as the 'additionalLibraries' property. Additional details: https://atlassian.github.io/data-center-helm-charts/examples/external_libraries/EXTERNAL_LIBS/ NOTE: only .jar files can be loaded using this approach. OBR's can be extracted (unzipped) to access the associated .jar An alternative to this method is to install the plugins via "Manage Apps" in the product system administration UI. | -| bamboo.additionalCertificates | object | `{"customCmd":null,"secretName":null}` | Certificates to be added to Java truststore. Provide reference to a secret that contains the certificates | -| bamboo.additionalEnvironmentVariables | list | `[]` | Defines any additional environment variables to be passed to the Bamboo container. See https://hub.docker.com/r/atlassian/bamboo-server for supported variables. | -| bamboo.additionalJvmArgs | list | `[]` | Specifies a list of additional arguments that can be passed to the Bamboo JVM, e.g. system properties. | -| bamboo.additionalLibraries | list | `[]` | Specifies a list of additional Java libraries that should be added to the Bamboo container. Each item in the list should specify the name of the volume that contains the library, as well as the name of the library file within that volume's root directory. Optionally, a subDirectory field can be included to specify which directory in the volume contains the library file. Additional details: https://atlassian.github.io/data-center-helm-charts/examples/external_libraries/EXTERNAL_LIBS/ | -| bamboo.additionalPorts | list | `[]` | Defines any additional ports for the Bamboo container. | -| bamboo.additionalVolumeClaimTemplates | list | `[]` | Defines additional volumeClaimTemplates that should be applied to the Bamboo pod. Note that this will not create any corresponding volume mounts; those needs to be defined in bamboo.additionalVolumeMounts | -| bamboo.additionalVolumeMounts | list | `[]` | Defines any additional volumes mounts for the Bamboo container. These can refer to existing volumes, or new volumes can be defined via 'volumes.additional'. | -| bamboo.brokerUrl | string | `nil` | Override the server/agent broker URL; this is optional. | -| bamboo.containerSecurityContext | object | `{}` | Standard K8s field that holds security configurations that will be applied to a container. https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ | -| bamboo.disableAgentAuth | bool | `false` | Whether to disable agent authentication. Setting this to true skips the agent approval step in the UI. For more information see: https://confluence.atlassian.com/bamboo/agent-authentication-289277196.html The default is false. | -| bamboo.forceConfigUpdate | bool | `false` | The Docker entrypoint.py generates application configuration on first start; not all of these files are regenerated on subsequent starts. By default, bamboo.cfg.xml is generated only once. Set `forceConfigUpdate` to true to change this behavior. | -| bamboo.import | object | `{"path":null,"type":"clean"}` | Bamboo can optionally import an existing exported dataset on first-run. These optional values can configure the import file or skip this stage entirely. For more details on importing and exporting see the documentation: https://confluence.atlassian.com/bamboo/exporting-data-for-backup-289277255.html https://confluence.atlassian.com/bamboo/importing-data-from-backup-289277260.html | -| bamboo.import.path | string | `nil` | Path to the existing export to import to the new installation. This should be accessible by the cluster node; e.g. via the shared-home or `additionalVolumeMounts` below. | -| bamboo.import.type | string | `"clean"` | Import type. Valid values are `clean` (for a new install) or `import`, in which case you should provide the file path. The default is `clean`. | -| bamboo.jmsService.annotations | object | `{}` | Additional annotations to apply to the JMS Service | -| bamboo.jmsService.enabled | bool | `false` | Whether to create a separate Service for JMS Agent traffic | -| bamboo.jmsService.loadBalancerIP | string | `nil` | Use specific loadBalancerIP. Only applies to service type LoadBalancer. | -| bamboo.jmsService.port | int | `54663` | The port on which the Bamboo K8s Service will listen for Agent traffic | -| bamboo.jmsService.type | string | `"ClusterIP"` | The type of K8s service to use for JMS | -| bamboo.license.secretKey | string | `"license"` | The key (default 'licenseKey') in the Secret used to store the license information | -| bamboo.license.secretName | string | `nil` | The secret that contains the license information | -| bamboo.livenessProbe.enabled | bool | `false` | Whether to apply the livenessProbe check to pod. | -| bamboo.livenessProbe.failureThreshold | int | `12` | The number of consecutive failures of the Bamboo container liveness probe before the pod fails liveness checks. | -| bamboo.livenessProbe.initialDelaySeconds | int | `60` | Time to wait before starting the first probe | -| bamboo.livenessProbe.periodSeconds | int | `5` | How often (in seconds) the Bamboo container liveness probe will run | -| bamboo.livenessProbe.timeoutSeconds | int | `1` | Number of seconds after which the probe times out | -| bamboo.ports.http | int | `8085` | The port on which the Bamboo container listens for HTTP traffic | -| bamboo.ports.jms | int | `54663` | JMS port | -| bamboo.readinessProbe.customProbe | object | `{}` | Custom ReadinessProbe to override the default /status httpGet | -| bamboo.readinessProbe.enabled | bool | `true` | Whether to apply the readinessProbe check to pod. | -| bamboo.readinessProbe.failureThreshold | int | `30` | The number of consecutive failures of the Bamboo container readiness probe before the pod fails readiness checks. | -| bamboo.readinessProbe.initialDelaySeconds | int | `30` | The initial delay (in seconds) for the Bamboo container readiness probe, after which the probe will start running. | -| bamboo.readinessProbe.periodSeconds | int | `10` | How often (in seconds) the Bamboo container readiness probe will run | -| bamboo.readinessProbe.timeoutSeconds | int | `1` | Number of seconds after which the probe times out | -| bamboo.resources.container.requests.cpu | string | `"2"` | Initial CPU request by Bamboo pod | -| bamboo.resources.container.requests.memory | string | `"2G"` | Initial Memory request by Bamboo pod | -| bamboo.resources.jvm.maxHeap | string | `"1024m"` | The maximum amount of heap memory that will be used by the Bamboo JVM | -| bamboo.resources.jvm.minHeap | string | `"512m"` | The minimum amount of heap memory that will be used by the Bamboo JVM | -| bamboo.securityContext.fsGroup | int | `2005` | The GID used by the Bamboo docker image GID will default to 2005 if not supplied and securityContextEnabled is set to true. This is intended to ensure that the shared-home volume is group-writeable by the GID used by the Bamboo container. However, this doesn't appear to work for NFS volumes due to a K8s bug: https://github.com/kubernetes/examples/issues/260 | -| bamboo.securityContextEnabled | bool | `true` | | -| bamboo.securityToken.secretKey | string | `"security-token"` | The key (default `secretKey`) in the Secret used to store the Bamboo shared key. | -| bamboo.securityToken.secretName | string | `nil` | The name of the K8s Secret that contains the security token. When specified the token will overrided the generated one. This secret should also be shared with the agent deployment. An Example of creating a K8s secret for the secret below: 'kubectl create secret generic --from-literal=security-token=' https://kubernetes.io/docs/concepts/configuration/secret/#opaque-secrets | -| bamboo.service.annotations | object | `{}` | Additional annotations to apply to the Service | -| bamboo.service.contextPath | string | `nil` | The Tomcat context path that Bamboo will use. The ATL_TOMCAT_CONTEXTPATH will be set automatically. | -| bamboo.service.loadBalancerIP | string | `nil` | Use specific loadBalancerIP. Only applies to service type LoadBalancer. | -| bamboo.service.port | int | `80` | The port on which the Bamboo K8s Service will listen for http traffic | -| bamboo.service.sessionAffinity | string | `"None"` | Session affinity type. If you want to make sure that connections from a particular client are passed to the same pod each time, set sessionAffinity to ClientIP. See: https://kubernetes.io/docs/reference/networking/virtual-ips/#session-affinity | -| bamboo.service.sessionAffinityConfig | object | `{"clientIP":{"timeoutSeconds":null}}` | Session affinity configuration | -| bamboo.service.sessionAffinityConfig.clientIP.timeoutSeconds | string | `nil` | Specifies the seconds of ClientIP type session sticky time. The value must be > 0 && <= 86400 (for 1 day) if ServiceAffinity == "ClientIP". Default value is 10800 (for 3 hours). | -| bamboo.service.type | string | `"ClusterIP"` | The type of K8s service to use for Bamboo | -| bamboo.setPermissions | bool | `true` | Boolean to define whether to set local home directory permissions on startup of Bamboo container. Set to 'false' to disable this behaviour. | -| bamboo.shutdown.command | string | `"/shutdown-wait.sh"` | By default pods will be stopped via a [preStop hook](https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/), using a script supplied by the Docker image. If any other shutdown behaviour is needed it can be achieved by overriding this value. Note that the shutdown command needs to wait for the application shutdown completely before exiting; see [the default command](https://bitbucket.org/atlassian-docker/docker-bamboo-server/src/master/shutdown-wait.sh) for details. | -| bamboo.shutdown.terminationGracePeriodSeconds | int | `30` | The termination grace period for pods during shutdown. This should be set to the internal grace period, plus a small buffer to allow the JVM to fully terminate. | -| bamboo.startupProbe.enabled | bool | `false` | Whether to apply the startupProbe check to pod. | -| bamboo.startupProbe.failureThreshold | int | `120` | The number of consecutive failures of the Bamboo container startup probe before the pod fails startup checks. | -| bamboo.startupProbe.initialDelaySeconds | int | `60` | Time to wait before starting the first probe | -| bamboo.startupProbe.periodSeconds | int | `5` | How often (in seconds) the Bamboo container startup probe will run | -| bamboo.sysadminCredentials.displayNameSecretKey | string | `"displayName"` | The key in the Kubernetes Secret that contains the sysadmin display name | -| bamboo.sysadminCredentials.emailAddressSecretKey | string | `"emailAddress"` | The key in the Kubernetes Secret that contains the sysadmin email address | -| bamboo.sysadminCredentials.passwordSecretKey | string | `"password"` | The key in the Kubernetes Secret that contains the sysadmin password | -| bamboo.sysadminCredentials.secretName | string | `nil` | The secret that contains the admin user information | -| bamboo.sysadminCredentials.usernameSecretKey | string | `"username"` | The key in the Kubernetes Secret that contains the sysadmin username | -| bamboo.topologySpreadConstraints | list | `[]` | Defines topology spread constraints for Bamboo pods. See details: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ | -| bamboo.unattendedSetup | bool | `true` | To skip the setup wizard post deployment set this property to 'true' and ensure values for all 'REQUIRED' and 'UNATTENDED-SETUP' stanzas (see banner of this file) have been supplied. For release 1.0.0 this value is by default set to 'true' and should not be changed. | -| bamboo.useHelmReleaseNameAsContainerName | bool | `false` | Whether the main container should acquire helm release name. By default the container name is `bamboo` which corresponds to the name of the Helm Chart. | -| database.credentials.passwordSecretKey | string | `"password"` | The key ('password') in the Secret used to store the database login password | -| database.credentials.secretName | string | `nil` | The name of the K8s Secret that contains the database login credentials. If the secret is specified, then the credentials will be automatically utilised on Bamboo startup. If the secret is not provided, then the credentials will need to be provided via the browser during manual configuration post deployment. Example of creating a database credentials K8s secret below: 'kubectl create secret generic --from-literal=username= \ --from-literal=password=' https://kubernetes.io/docs/concepts/configuration/secret/#opaque-secrets | -| database.credentials.usernameSecretKey | string | `"username"` | The key ('username') in the Secret used to store the database login username | -| database.type | string | `nil` | The database type that should be used. If not specified, then it will need to be provided via the browser during manual configuration post deployment. Valid values include: - 'postgresql' - 'mysql' - 'oracle12c' - 'mssql' https://atlassian.github.io/data-center-helm-charts/userguide/CONFIGURATION/#databasetype | -| database.url | string | `nil` | The jdbc URL of the database. If not specified, then it will need to be provided via the browser during manual configuration post deployment. Example URLs include: - 'jdbc:postgresql://:5432/' - 'jdbc:mysql:///' - 'jdbc:sqlserver://:1433;databaseName=' - 'jdbc:oracle:thin:@:1521:' https://atlassian.github.io/data-center-helm-charts/userguide/CONFIGURATION/#databaseurl | -| fluentd.command | string | `nil` | The command used to start Fluentd. If not supplied the default command will be used: "fluentd -c /fluentd/etc/fluent.conf -v" Note: The custom command can be free-form, however pay particular attention to the process that should ultimately be left running in the container. This process should be invoked with 'exec' so that signals are appropriately propagated to it, for instance SIGTERM. An example of how such a command may look is: " && && exec " | -| fluentd.customConfigFile | bool | `false` | Set to 'true' if a custom config (see 'configmap-fluentd.yaml' for default) should be used for Fluentd. If enabled this config must supplied via the 'fluentdCustomConfig' property below. | -| fluentd.elasticsearch.enabled | bool | `true` | Set to 'true' if Fluentd should send all log events to an Elasticsearch service. | -| fluentd.elasticsearch.hostname | string | `"elasticsearch"` | The hostname of the Elasticsearch service that Fluentd should send logs to. | -| fluentd.elasticsearch.indexNamePrefix | string | `"bamboo"` | The prefix of the Elasticsearch index name that will be used | -| fluentd.enabled | bool | `false` | Set to 'true' if the Fluentd sidecar (DaemonSet) should be added to each pod | -| fluentd.extraVolumes | list | `[]` | Specify custom volumes to be added to Fluentd container (e.g. more log sources) | -| fluentd.fluentdCustomConfig | object | `{}` | Custom fluent.conf file | -| fluentd.httpPort | int | `9880` | The port on which the Fluentd sidecar will listen | -| fluentd.imageRepo | string | `"fluent/fluentd-kubernetes-daemonset"` | The Fluentd sidecar image repository | -| fluentd.imageTag | string | `"v1.11.5-debian-elasticsearch7-1.2"` | The Fluentd sidecar image tag | -| fluentd.resources | object | `{}` | Resources requests and limits for fluentd sidecar container See: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ | -| image.pullPolicy | string | `"IfNotPresent"` | Image pull policy | -| image.repository | string | `"atlassian/bamboo"` | The Bamboo Docker image to use https://hub.docker.com/r/atlassian/bamboo-server | -| image.tag | string | `""` | The docker image tag to be used - defaults to the Chart appVersion | -| ingress.annotations | object | `{}` | The custom annotations that should be applied to the Ingress Resource. If using an ingress-nginx controller be sure that the annotations you add here are compatible with those already defined in the 'ingess.yaml' template | -| ingress.className | string | `"nginx"` | The class name used by the ingress controller if it's being used. Please follow documenation of your ingress controller. If the cluster contains multiple ingress controllers, this setting allows you to control which of them is used for Atlassian application traffic. | -| ingress.create | bool | `false` | Set to 'true' if an Ingress Resource should be created. This depends on a pre-provisioned Ingress Controller being available. | -| ingress.host | string | `nil` | The fully-qualified hostname (FQDN) of the Ingress Resource. Traffic coming in on this hostname will be routed by the Ingress Resource to the appropriate backend Service. | -| ingress.https | bool | `true` | Set to 'true' if browser communication with the application should be TLS (HTTPS) enforced. If not using an ingress and you want to reach the service on localhost using port-forwarding then this value should be set to 'false' | -| ingress.maxBodySize | string | `"250m"` | The max body size to allow. Requests exceeding this size will result in an HTTP 413 error being returned to the client. | -| ingress.nginx | bool | `true` | Set to 'true' if the Ingress Resource is to use the K8s 'ingress-nginx' controller. https://kubernetes.github.io/ingress-nginx/ This will populate the Ingress Resource with annotations that are specific to the K8s ingress-nginx controller. Set to 'false' if a different controller is to be used, in which case the appropriate annotations for that controller must be specified below under 'ingress.annotations'. | -| ingress.path | string | `nil` | The base path for the Ingress Resource. For example '/bamboo'. Based on a 'ingress.host' value of 'company.k8s.com' this would result in a URL of 'company.k8s.com/bamboo'. Default value is 'bamboo.service.contextPath' | -| ingress.proxyConnectTimeout | int | `60` | Defines a timeout for establishing a connection with a proxied server. It should be noted that this timeout cannot usually exceed 75 seconds. | -| ingress.proxyReadTimeout | int | `60` | Defines a timeout for reading a response from the proxied server. The timeout is set only between two successive read operations, not for the transmission of the whole response. If the proxied server does not transmit anything within this time, the connection is closed. | -| ingress.proxySendTimeout | int | `60` | Sets a timeout for transmitting a request to the proxied server. The timeout is set only between two successive write operations, not for the transmission of the whole request. If the proxied server does not receive anything within this time, the connection is closed. | -| ingress.tlsSecretName | string | `nil` | The name of the K8s Secret that contains the TLS private key and corresponding certificate. When utilised, TLS termination occurs at the ingress point where traffic to the Service and it's Pods is in plaintext. Usage is optional and depends on your use case. The Ingress Controller itself can also be configured with a TLS secret for all Ingress Resources. https://kubernetes.io/docs/concepts/configuration/secret/#tls-secrets https://kubernetes.io/docs/concepts/services-networking/ingress/#tls | -| monitoring.exposeJmxMetrics | bool | `false` | Expose JMX metrics with jmx_exporter https://github.com/prometheus/jmx_exporter | -| monitoring.fetchJmxExporterJar | bool | `true` | Fetch jmx_exporter jar from the image. If set to false make sure to manually copy the jar to shared home and provide an absolute path in jmxExporterCustomJarLocation | -| monitoring.grafana.createDashboards | bool | `false` | Create ConfigMaps with Grafana dashboards | -| monitoring.grafana.dashboardAnnotations | object | `{}` | Annotations added to Grafana dashboards ConfigMaps. See: https://github.com/kiwigrid/k8s-sidecar#usage | -| monitoring.grafana.dashboardLabels | object | `{}` | Label selector for Grafana dashboard importer sidecar | -| monitoring.jmxExporterCustomConfig | object | `{}` | Custom JMX config with the rules | -| monitoring.jmxExporterCustomJarLocation | string | `nil` | Location of jmx_exporter jar file if mounted from a secret or manually copied to shared home | -| monitoring.jmxExporterImageRepo | string | `"bitnami/jmx-exporter"` | Image repository with jmx_exporter jar | -| monitoring.jmxExporterImageTag | string | `"0.18.0"` | Image tag to be used to pull jmxExporterImageRepo | -| monitoring.jmxExporterInitContainer | object | `{"customSecurityContext":{},"resources":{},"runAsRoot":true}` | JMX exporter init container configuration | -| monitoring.jmxExporterInitContainer.customSecurityContext | object | `{}` | Custom SecurityContext for the jmx exporter init container | -| monitoring.jmxExporterInitContainer.resources | object | `{}` | Resources requests and limits for the JMX exporter init container See: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ | -| monitoring.jmxExporterInitContainer.runAsRoot | bool | `true` | Whether to run JMX exporter init container as root to copy JMX exporter binary to shared home volume. Set to false if running containers as root is not allowed in the cluster. | -| monitoring.jmxExporterPort | int | `9999` | Port number on which metrics will be available | -| monitoring.jmxExporterPortType | string | `"ClusterIP"` | JMX exporter port type | -| monitoring.jmxServiceAnnotations | object | `{}` | Annotations added to the jmx service | -| monitoring.serviceMonitor.create | bool | `false` | Create ServiceMonitor to start scraping metrics. ServiceMonitor CRD needs to be created in advance. | -| monitoring.serviceMonitor.prometheusLabelSelector | object | `{}` | ServiceMonitorSelector of the prometheus instance. | -| monitoring.serviceMonitor.scrapeIntervalSeconds | int | `30` | Scrape interval for the JMX service. | -| nodeSelector | object | `{}` | Standard K8s node-selectors that will be applied to all Bamboo pods | -| podAnnotations | object | `{}` | Custom annotations that will be applied to all Bamboo pods | -| podDisruptionBudget | object | `{"annotations":{},"enabled":false,"labels":{},"maxUnavailable":null,"minAvailable":null}` | PodDisruptionBudget: https://kubernetes.io/docs/tasks/run-application/configure-pdb/ You can specify only one of maxUnavailable and minAvailable in a single PodDisruptionBudget. When both minAvailable and maxUnavailable are set, maxUnavailable takes precedence. | -| podLabels | object | `{}` | Custom labels that will be applied to all Bamboo pods | -| priorityClassName | string | `nil` | Priority class for the application pods. The PriorityClass with this name needs to be available in the cluster. For details see https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/#priorityclass | -| replicaCount | int | `1` | The initial number of Bamboo pods that should be started at deployment time. Note that Bamboo requires manual configuration via the browser post deployment after the first pod is deployed. At present Bamboo Data Center utilizes an `active-passive` clustering model. This architecture is not ideal where K8s deployments are concerned. As such a Bamboo server cluster comprising only `1` pod is the recommended topology for now. For more detail see: https://atlassian.github.io/data-center-helm-charts/troubleshooting/LIMITATIONS#cluster-size | -| schedulerName | string | `nil` | Standard K8s schedulerName that will be applied to all Bamboo pods. Check Kubernetes documentation on how to configure multiple schedulers: https://kubernetes.io/docs/tasks/extend-kubernetes/configure-multiple-schedulers/#specify-schedulers-for-pods | -| serviceAccount.annotations | object | `{}` | Annotations to add to the ServiceAccount (if created) | -| serviceAccount.create | bool | `true` | Set to 'true' if a ServiceAccount should be created, or 'false' if it already exists. | -| serviceAccount.imagePullSecrets | list | `[]` | For Docker images hosted in private registries, define the list of image pull secrets that should be utilized by the created ServiceAccount https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod | -| serviceAccount.name | string | `nil` | The name of the ServiceAccount to be used by the pods. If not specified, but the "serviceAccount.create" flag is set to 'true', then the ServiceAccount name will be auto-generated, otherwise the 'default' ServiceAccount will be used. https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/#use-the-default-service-account-to-access-the-api-server | -| tolerations | list | `[]` | Standard K8s tolerations that will be applied to all Bamboo pods | -| volumes.additional | list | `[]` | Defines additional volumes that should be applied to all Bamboo pods. Note that this will not create any corresponding volume mounts; those needs to be defined in bamboo.additionalVolumeMounts | -| volumes.localHome.customVolume | object | `{}` | Static provisioning of local-home using K8s PVs and PVCs NOTE: Due to the ephemeral nature of pods this approach to provisioning volumes for pods is not recommended. Dynamic provisioning described above is the prescribed approach. When 'persistentVolumeClaim.create' is 'false', then this value can be used to define a standard K8s volume that will be used for the local-home volume(s). If not defined, then an 'emptyDir' volume is utilised. Having provisioned a 'PersistentVolume', specify the bound 'persistentVolumeClaim.claimName' for the 'customVolume' object. https://kubernetes.io/docs/concepts/storage/persistent-volumes/#static | -| volumes.localHome.mountPath | string | `"/var/atlassian/application-data/bamboo"` | Specifies the path in the Bamboo container to which the local-home volume will be mounted. | -| volumes.localHome.persistentVolumeClaim.create | bool | `false` | If 'true', then a 'PersistentVolume' and 'PersistentVolumeClaim' will be dynamically created for each pod based on the 'StorageClassName' supplied below. | -| volumes.localHome.persistentVolumeClaim.resources | object | `{"requests":{"storage":"1Gi"}}` | Specifies the standard K8s resource requests and/or limits for the local-home volume claims. | -| volumes.localHome.persistentVolumeClaim.storageClassName | string | `nil` | Specify the name of the 'StorageClass' that should be used for the local-home volume claim. | -| volumes.sharedHome.customVolume | object | `{}` | Static provisioning of shared-home using K8s PVs and PVCs When 'persistentVolumeClaim.create' is 'false', then this value can be used to define a standard K8s volume that will be used for the shared-home volume. If not defined, then an 'emptyDir' volume is utilised. Having provisioned a 'PersistentVolume', specify the bound 'persistentVolumeClaim.claimName' for the 'customVolume' object. https://kubernetes.io/docs/concepts/storage/persistent-volumes/#static https://atlassian.github.io/data-center-helm-charts/examples/storage/aws/SHARED_STORAGE/ | -| volumes.sharedHome.mountPath | string | `"/var/atlassian/application-data/shared-home"` | Specifies the path in the Bamboo container to which the shared-home volume will be mounted. | -| volumes.sharedHome.nfsPermissionFixer.command | string | `nil` | By default, the fixer will change the group ownership of the volume's root directory to match the Bamboo container's GID (2001), and then ensures the directory is group-writeable. If this is not the desired behaviour, command used can be specified here. | -| volumes.sharedHome.nfsPermissionFixer.enabled | bool | `true` | If 'true', this will alter the shared-home volume's root directory so that Bamboo can write to it. This is a workaround for a K8s bug affecting NFS volumes: https://github.com/kubernetes/examples/issues/260 | -| volumes.sharedHome.nfsPermissionFixer.imageRepo | string | `"alpine"` | Image repository for the permission fixer init container. Defaults to alpine | -| volumes.sharedHome.nfsPermissionFixer.imageTag | string | `"latest"` | Image tag for the permission fixer init container. Defaults to latest | -| volumes.sharedHome.nfsPermissionFixer.mountPath | string | `"/shared-home"` | The path in the K8s initContainer where the shared-home volume will be mounted | -| volumes.sharedHome.nfsPermissionFixer.resources | object | `{}` | Resources requests and limits for nfsPermissionFixer init container See: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ | -| volumes.sharedHome.persistentVolumeClaim.create | bool | `false` | If 'true', then a 'PersistentVolumeClaim' and 'PersistentVolume' will be dynamically created for shared-home based on the 'StorageClassName' supplied below. | -| volumes.sharedHome.persistentVolumeClaim.resources | object | `{"requests":{"storage":"1Gi"}}` | Specifies the standard K8s resource requests and/or limits for the shared-home volume claims. | -| volumes.sharedHome.persistentVolumeClaim.storageClassName | string | `nil` | Specify the name of the 'StorageClass' that should be used for the 'shared-home' | -| volumes.sharedHome.subPath | string | `nil` | Specifies the sub-directory of the shared-home volume that will be mounted in to the Bamboo container. | \ No newline at end of file +| additionalConfigMaps | list | `[]` | Create additional ConfigMaps with given names, keys and content. Ther Helm release name will be used as a prefix for a ConfigMap name, fileName is used as subPath | +| additionalContainers | list | `[]` | Additional container definitions that will be added to all Bamboo pods | +| additionalFiles | list | `[]` | Additional existing ConfigMaps and Secrets not managed by Helm that should be mounted into service container. Configuration details below (camelCase is important!): 'name' - References existing ConfigMap or secret name. 'type' - 'configMap' or 'secret' 'key' - The file name. 'mountPath' - The destination directory in a container. VolumeMount and Volumes are added with this name and index position, for example; custom-config-0, keystore-2 | +| additionalInitContainers | list | `[]` | Additional initContainer definitions that will be added to all Bamboo pods | +| additionalLabels | object | `{}` | Additional labels that should be applied to all resources | +| affinity | object | `{}` | Standard K8s affinities that will be applied to all Bamboo pods | +| atlassianAnalyticsAndSupport.analytics.enabled | bool | `true` | Mount ConfigMap with selected Helm chart values as a JSON which DC products will read and send analytics events to Atlassian data pipelines | +| atlassianAnalyticsAndSupport.helmValues.enabled | bool | `true` | Mount ConfigMap with selected Helm chart values as a YAML file which can be optionally including to support.zip | +| bamboo.accessLog.localHomeSubPath | string | `"log"` | The subdirectory within the local-home volume where access logs should be stored. | +| bamboo.accessLog.mountPath | string | `"/opt/atlassian/bamboo/logs"` | The path within the Bamboo container where the local-home volume should be mounted in order to capture access logs. | +| bamboo.additionalBundledPlugins | list | `[]` | Specifies a list of additional Bamboo plugins that should be added to the Bamboo container. Note plugins installed via this method will appear as bundled plugins rather than user plugins. These should be specified in the same manner as the 'additionalLibraries' property. Additional details: https://atlassian.github.io/data-center-helm-charts/examples/external_libraries/EXTERNAL_LIBS/ NOTE: only .jar files can be loaded using this approach. OBR's can be extracted (unzipped) to access the associated .jar An alternative to this method is to install the plugins via "Manage Apps" in the product system administration UI. | +| bamboo.additionalCertificates | object | `{"customCmd":null,"secretName":null}` | Certificates to be added to Java truststore. Provide reference to a secret that contains the certificates | +| bamboo.additionalEnvironmentVariables | list | `[]` | Defines any additional environment variables to be passed to the Bamboo container. See https://hub.docker.com/r/atlassian/bamboo-server for supported variables. | +| bamboo.additionalJvmArgs | list | `[]` | Specifies a list of additional arguments that can be passed to the Bamboo JVM, e.g. system properties. | +| bamboo.additionalLibraries | list | `[]` | Specifies a list of additional Java libraries that should be added to the Bamboo container. Each item in the list should specify the name of the volume that contains the library, as well as the name of the library file within that volume's root directory. Optionally, a subDirectory field can be included to specify which directory in the volume contains the library file. Additional details: https://atlassian.github.io/data-center-helm-charts/examples/external_libraries/EXTERNAL_LIBS/ | +| bamboo.additionalPorts | list | `[]` | Defines any additional ports for the Bamboo container. | +| bamboo.additionalVolumeClaimTemplates | list | `[]` | Defines additional volumeClaimTemplates that should be applied to the Bamboo pod. Note that this will not create any corresponding volume mounts; those needs to be defined in bamboo.additionalVolumeMounts | +| bamboo.additionalVolumeMounts | list | `[]` | Defines any additional volumes mounts for the Bamboo container. These can refer to existing volumes, or new volumes can be defined via 'volumes.additional'. | +| bamboo.brokerUrl | string | `nil` | Override the server/agent broker URL; this is optional. | +| bamboo.containerSecurityContext | object | `{}` | Standard K8s field that holds security configurations that will be applied to a container. https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ | +| bamboo.disableAgentAuth | bool | `false` | Whether to disable agent authentication. Setting this to true skips the agent approval step in the UI. For more information see: https://confluence.atlassian.com/bamboo/agent-authentication-289277196.html The default is false. | +| bamboo.forceConfigUpdate | bool | `false` | The Docker entrypoint.py generates application configuration on first start; not all of these files are regenerated on subsequent starts. By default, bamboo.cfg.xml is generated only once. Set `forceConfigUpdate` to true to change this behavior. | +| bamboo.import | object | `{"path":null,"type":"clean"}` | Bamboo can optionally import an existing exported dataset on first-run. These optional values can configure the import file or skip this stage entirely. For more details on importing and exporting see the documentation: https://confluence.atlassian.com/bamboo/exporting-data-for-backup-289277255.html https://confluence.atlassian.com/bamboo/importing-data-from-backup-289277260.html | +| bamboo.import.path | string | `nil` | Path to the existing export to import to the new installation. This should be accessible by the cluster node; e.g. via the shared-home or `additionalVolumeMounts` below. | +| bamboo.import.type | string | `"clean"` | Import type. Valid values are `clean` (for a new install) or `import`, in which case you should provide the file path. The default is `clean`. | +| bamboo.jmsService.annotations | object | `{}` | Additional annotations to apply to the JMS Service | +| bamboo.jmsService.enabled | bool | `false` | Whether to create a separate Service for JMS Agent traffic | +| bamboo.jmsService.loadBalancerIP | string | `nil` | Use specific loadBalancerIP. Only applies to service type LoadBalancer. | +| bamboo.jmsService.port | int | `54663` | The port on which the Bamboo K8s Service will listen for Agent traffic | +| bamboo.jmsService.type | string | `"ClusterIP"` | The type of K8s service to use for JMS | +| bamboo.license | object | `{"secretKey":"license","secretName":null}` | The Bamboo DC license that should be used. If supplied here the license configuration will be skipped in the setup wizard. | +| bamboo.license.secretKey | string | `"license"` | The key (default 'licenseKey') in the Secret used to store the license information | +| bamboo.license.secretName | string | `nil` | The secret that contains the license information | +| bamboo.livenessProbe.enabled | bool | `false` | Whether to apply the livenessProbe check to pod. | +| bamboo.livenessProbe.failureThreshold | int | `12` | The number of consecutive failures of the Bamboo container liveness probe before the pod fails liveness checks. | +| bamboo.livenessProbe.initialDelaySeconds | int | `60` | Time to wait before starting the first probe | +| bamboo.livenessProbe.periodSeconds | int | `5` | How often (in seconds) the Bamboo container liveness probe will run | +| bamboo.livenessProbe.timeoutSeconds | int | `1` | Number of seconds after which the probe times out | +| bamboo.ports.http | int | `8085` | The port on which the Bamboo container listens for HTTP traffic | +| bamboo.ports.jms | int | `54663` | JMS port | +| bamboo.readinessProbe.customProbe | object | `{}` | Custom ReadinessProbe to override the default /status httpGet | +| bamboo.readinessProbe.enabled | bool | `true` | Whether to apply the readinessProbe check to pod. | +| bamboo.readinessProbe.failureThreshold | int | `30` | The number of consecutive failures of the Bamboo container readiness probe before the pod fails readiness checks. | +| bamboo.readinessProbe.initialDelaySeconds | int | `30` | The initial delay (in seconds) for the Bamboo container readiness probe, after which the probe will start running. | +| bamboo.readinessProbe.periodSeconds | int | `10` | How often (in seconds) the Bamboo container readiness probe will run | +| bamboo.readinessProbe.timeoutSeconds | int | `1` | Number of seconds after which the probe times out | +| bamboo.resources.container.requests.cpu | string | `"2"` | Initial CPU request by Bamboo pod | +| bamboo.resources.container.requests.memory | string | `"2G"` | Initial Memory request by Bamboo pod | +| bamboo.resources.jvm.maxHeap | string | `"1024m"` | The maximum amount of heap memory that will be used by the Bamboo JVM | +| bamboo.resources.jvm.minHeap | string | `"512m"` | The minimum amount of heap memory that will be used by the Bamboo JVM | +| bamboo.securityContext.fsGroup | int | `2005` | The GID used by the Bamboo docker image GID will default to 2005 if not supplied and securityContextEnabled is set to true. This is intended to ensure that the shared-home volume is group-writeable by the GID used by the Bamboo container. However, this doesn't appear to work for NFS volumes due to a K8s bug: https://github.com/kubernetes/examples/issues/260 | +| bamboo.securityContextEnabled | bool | `true` | Whether to apply security context to pod. | +| bamboo.securityToken.secretKey | string | `"security-token"` | The key (default `secretKey`) in the Secret used to store the Bamboo shared key. | +| bamboo.securityToken.secretName | string | `nil` | The name of the K8s Secret that contains the security token. When specified the token will overrided the generated one. This secret should also be shared with the agent deployment. An Example of creating a K8s secret for the secret below: 'kubectl create secret generic --from-literal=security-token=' https://kubernetes.io/docs/concepts/configuration/secret/#opaque-secrets | +| bamboo.service.annotations | object | `{}` | Additional annotations to apply to the Service | +| bamboo.service.contextPath | string | `nil` | The Tomcat context path that Bamboo will use. The ATL_TOMCAT_CONTEXTPATH will be set automatically. | +| bamboo.service.loadBalancerIP | string | `nil` | Use specific loadBalancerIP. Only applies to service type LoadBalancer. | +| bamboo.service.port | int | `80` | The port on which the Bamboo K8s Service will listen for http traffic | +| bamboo.service.sessionAffinity | string | `"None"` | Session affinity type. If you want to make sure that connections from a particular client are passed to the same pod each time, set sessionAffinity to ClientIP. See: https://kubernetes.io/docs/reference/networking/virtual-ips/#session-affinity | +| bamboo.service.sessionAffinityConfig | object | `{"clientIP":{"timeoutSeconds":null}}` | Session affinity configuration | +| bamboo.service.sessionAffinityConfig.clientIP.timeoutSeconds | string | `nil` | Specifies the seconds of ClientIP type session sticky time. The value must be > 0 && <= 86400 (for 1 day) if ServiceAffinity == "ClientIP". Default value is 10800 (for 3 hours). | +| bamboo.service.type | string | `"ClusterIP"` | The type of K8s service to use for Bamboo | +| bamboo.setPermissions | bool | `true` | Boolean to define whether to set local home directory permissions on startup of Bamboo container. Set to 'false' to disable this behaviour. | +| bamboo.shutdown.command | string | `"/shutdown-wait.sh"` | By default pods will be stopped via a [preStop hook](https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/), using a script supplied by the Docker image. If any other shutdown behaviour is needed it can be achieved by overriding this value. Note that the shutdown command needs to wait for the application shutdown completely before exiting; see [the default command](https://bitbucket.org/atlassian-docker/docker-bamboo-server/src/master/shutdown-wait.sh) for details. | +| bamboo.shutdown.terminationGracePeriodSeconds | int | `30` | The termination grace period for pods during shutdown. This should be set to the internal grace period, plus a small buffer to allow the JVM to fully terminate. | +| bamboo.startupProbe.enabled | bool | `false` | Whether to apply the startupProbe check to pod. | +| bamboo.startupProbe.failureThreshold | int | `120` | The number of consecutive failures of the Bamboo container startup probe before the pod fails startup checks. | +| bamboo.startupProbe.initialDelaySeconds | int | `60` | Time to wait before starting the first probe | +| bamboo.startupProbe.periodSeconds | int | `5` | How often (in seconds) the Bamboo container startup probe will run | +| bamboo.sysadminCredentials | object | `{"displayNameSecretKey":"displayName","emailAddressSecretKey":"emailAddress","passwordSecretKey":"password","secretName":null,"usernameSecretKey":"username"}` | The admin user configuration, and credentials that Bamboo should use. If supplied here the admin configuration will be skipped in the setup wizard. | +| bamboo.sysadminCredentials.displayNameSecretKey | string | `"displayName"` | The key in the Kubernetes Secret that contains the sysadmin display name | +| bamboo.sysadminCredentials.emailAddressSecretKey | string | `"emailAddress"` | The key in the Kubernetes Secret that contains the sysadmin email address | +| bamboo.sysadminCredentials.passwordSecretKey | string | `"password"` | The key in the Kubernetes Secret that contains the sysadmin password | +| bamboo.sysadminCredentials.secretName | string | `nil` | The secret that contains the admin user information | +| bamboo.sysadminCredentials.usernameSecretKey | string | `"username"` | The key in the Kubernetes Secret that contains the sysadmin username | +| bamboo.topologySpreadConstraints | list | `[]` | Defines topology spread constraints for Bamboo pods. See details: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ | +| bamboo.unattendedSetup | bool | `true` | To skip the setup wizard post deployment set this property to 'true' and ensure values for all 'REQUIRED' and 'UNATTENDED-SETUP' stanzas (see banner of this file) have been supplied. For release 1.0.0 this value is by default set to 'true' and should not be changed. | +| bamboo.useHelmReleaseNameAsContainerName | bool | `false` | Whether the main container should acquire helm release name. By default the container name is `bamboo` which corresponds to the name of the Helm Chart. | +| database.credentials.passwordSecretKey | string | `"password"` | The key ('password') in the Secret used to store the database login password | +| database.credentials.secretName | string | `nil` | from-literal=password=' https://kubernetes.io/docs/concepts/configuration/secret/#opaque-secrets | +| database.credentials.usernameSecretKey | string | `"username"` | The key ('username') in the Secret used to store the database login username | +| database.type | string | `nil` | The database type that should be used. If not specified, then it will need to be provided via the browser during manual configuration post deployment. Valid values include: - 'postgresql' - 'mysql' - 'oracle12c' - 'mssql' https://atlassian.github.io/data-center-helm-charts/userguide/CONFIGURATION/#databasetype | +| database.url | string | `nil` | The jdbc URL of the database. If not specified, then it will need to be provided via the browser during manual configuration post deployment. Example URLs include: - 'jdbc:postgresql://:5432/' - 'jdbc:mysql:///' - 'jdbc:sqlserver://:1433;databaseName=' - 'jdbc:oracle:thin:@:1521:' https://atlassian.github.io/data-center-helm-charts/userguide/CONFIGURATION/#databaseurl | +| fluentd.command | string | `nil` | The command used to start Fluentd. If not supplied the default command will be used: "fluentd -c /fluentd/etc/fluent.conf -v" Note: The custom command can be free-form, however pay particular attention to the process that should ultimately be left running in the container. This process should be invoked with 'exec' so that signals are appropriately propagated to it, for instance SIGTERM. An example of how such a command may look is: " && && exec " | +| fluentd.customConfigFile | bool | `false` | Set to 'true' if a custom config (see 'configmap-fluentd.yaml' for default) should be used for Fluentd. If enabled this config must supplied via the 'fluentdCustomConfig' property below. | +| fluentd.elasticsearch.enabled | bool | `true` | Set to 'true' if Fluentd should send all log events to an Elasticsearch service. | +| fluentd.elasticsearch.hostname | string | `"elasticsearch"` | The hostname of the Elasticsearch service that Fluentd should send logs to. | +| fluentd.elasticsearch.indexNamePrefix | string | `"bamboo"` | The prefix of the Elasticsearch index name that will be used | +| fluentd.enabled | bool | `false` | Set to 'true' if the Fluentd sidecar (DaemonSet) should be added to each pod | +| fluentd.extraVolumes | list | `[]` | Specify custom volumes to be added to Fluentd container (e.g. more log sources) | +| fluentd.fluentdCustomConfig | object | `{}` | Custom fluent.conf file | +| fluentd.httpPort | int | `9880` | The port on which the Fluentd sidecar will listen | +| fluentd.imageRepo | string | `"fluent/fluentd-kubernetes-daemonset"` | The Fluentd sidecar image repository | +| fluentd.imageTag | string | `"v1.11.5-debian-elasticsearch7-1.2"` | The Fluentd sidecar image tag | +| fluentd.resources | object | `{}` | Resources requests and limits for fluentd sidecar container See: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ | +| image.pullPolicy | string | `"IfNotPresent"` | Image pull policy | +| image.repository | string | `"atlassian/bamboo"` | The Bamboo Docker image to use https://hub.docker.com/r/atlassian/bamboo-server | +| image.tag | string | `""` | The docker image tag to be used - defaults to the Chart appVersion | +| ingress.annotations | object | `{}` | The custom annotations that should be applied to the Ingress Resource. If using an ingress-nginx controller be sure that the annotations you add here are compatible with those already defined in the 'ingess.yaml' template | +| ingress.className | string | `"nginx"` | The class name used by the ingress controller if it's being used. Please follow documenation of your ingress controller. If the cluster contains multiple ingress controllers, this setting allows you to control which of them is used for Atlassian application traffic. | +| ingress.create | bool | `false` | Set to 'true' if an Ingress Resource should be created. This depends on a pre-provisioned Ingress Controller being available. | +| ingress.host | string | `nil` | The fully-qualified hostname (FQDN) of the Ingress Resource. Traffic coming in on this hostname will be routed by the Ingress Resource to the appropriate backend Service. | +| ingress.https | bool | `true` | Set to 'true' if browser communication with the application should be TLS (HTTPS) enforced. If not using an ingress and you want to reach the service on localhost using port-forwarding then this value should be set to 'false' | +| ingress.maxBodySize | string | `"250m"` | The max body size to allow. Requests exceeding this size will result in an HTTP 413 error being returned to the client. | +| ingress.nginx | bool | `true` | Set to 'true' if the Ingress Resource is to use the K8s 'ingress-nginx' controller. https://kubernetes.github.io/ingress-nginx/ This will populate the Ingress Resource with annotations that are specific to the K8s ingress-nginx controller. Set to 'false' if a different controller is to be used, in which case the appropriate annotations for that controller must be specified below under 'ingress.annotations'. | +| ingress.path | string | `nil` | The base path for the Ingress Resource. For example '/bamboo'. Based on a 'ingress.host' value of 'company.k8s.com' this would result in a URL of 'company.k8s.com/bamboo'. Default value is 'bamboo.service.contextPath' | +| ingress.proxyConnectTimeout | int | `60` | Defines a timeout for establishing a connection with a proxied server. It should be noted that this timeout cannot usually exceed 75 seconds. | +| ingress.proxyReadTimeout | int | `60` | Defines a timeout for reading a response from the proxied server. The timeout is set only between two successive read operations, not for the transmission of the whole response. If the proxied server does not transmit anything within this time, the connection is closed. | +| ingress.proxySendTimeout | int | `60` | Sets a timeout for transmitting a request to the proxied server. The timeout is set only between two successive write operations, not for the transmission of the whole request. If the proxied server does not receive anything within this time, the connection is closed. | +| ingress.tlsSecretName | string | `nil` | The name of the K8s Secret that contains the TLS private key and corresponding certificate. When utilised, TLS termination occurs at the ingress point where traffic to the Service and it's Pods is in plaintext. Usage is optional and depends on your use case. The Ingress Controller itself can also be configured with a TLS secret for all Ingress Resources. https://kubernetes.io/docs/concepts/configuration/secret/#tls-secrets https://kubernetes.io/docs/concepts/services-networking/ingress/#tls | +| monitoring.exposeJmxMetrics | bool | `false` | Expose JMX metrics with jmx_exporter https://github.com/prometheus/jmx_exporter | +| monitoring.fetchJmxExporterJar | bool | `true` | Fetch jmx_exporter jar from the image. If set to false make sure to manually copy the jar to shared home and provide an absolute path in jmxExporterCustomJarLocation | +| monitoring.grafana.createDashboards | bool | `false` | Create ConfigMaps with Grafana dashboards | +| monitoring.grafana.dashboardAnnotations | object | `{}` | Annotations added to Grafana dashboards ConfigMaps. See: https://github.com/kiwigrid/k8s-sidecar#usage | +| monitoring.grafana.dashboardLabels | object | `{}` | Label selector for Grafana dashboard importer sidecar | +| monitoring.jmxExporterCustomConfig | object | `{}` | Custom JMX config with the rules | +| monitoring.jmxExporterCustomJarLocation | string | `nil` | Location of jmx_exporter jar file if mounted from a secret or manually copied to shared home | +| monitoring.jmxExporterImageRepo | string | `"bitnami/jmx-exporter"` | Image repository with jmx_exporter jar | +| monitoring.jmxExporterImageTag | string | `"0.18.0"` | Image tag to be used to pull jmxExporterImageRepo | +| monitoring.jmxExporterInitContainer | object | `{"customSecurityContext":{},"resources":{},"runAsRoot":true}` | JMX exporter init container configuration | +| monitoring.jmxExporterInitContainer.customSecurityContext | object | `{}` | Custom SecurityContext for the jmx exporter init container | +| monitoring.jmxExporterInitContainer.resources | object | `{}` | Resources requests and limits for the JMX exporter init container See: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ | +| monitoring.jmxExporterInitContainer.runAsRoot | bool | `true` | Whether to run JMX exporter init container as root to copy JMX exporter binary to shared home volume. Set to false if running containers as root is not allowed in the cluster. | +| monitoring.jmxExporterPort | int | `9999` | Port number on which metrics will be available | +| monitoring.jmxExporterPortType | string | `"ClusterIP"` | JMX exporter port type | +| monitoring.jmxServiceAnnotations | object | `{}` | Annotations added to the jmx service | +| monitoring.serviceMonitor.create | bool | `false` | Create ServiceMonitor to start scraping metrics. ServiceMonitor CRD needs to be created in advance. | +| monitoring.serviceMonitor.prometheusLabelSelector | object | `{}` | ServiceMonitorSelector of the prometheus instance. | +| monitoring.serviceMonitor.scrapeIntervalSeconds | int | `30` | Scrape interval for the JMX service. | +| nodeSelector | object | `{}` | Standard K8s node-selectors that will be applied to all Bamboo pods | +| podAnnotations | object | `{}` | Custom annotations that will be applied to all Bamboo pods | +| podDisruptionBudget | object | `{"annotations":{},"enabled":false,"labels":{},"maxUnavailable":null,"minAvailable":null}` | PodDisruptionBudget: https://kubernetes.io/docs/tasks/run-application/configure-pdb/ You can specify only one of maxUnavailable and minAvailable in a single PodDisruptionBudget. When both minAvailable and maxUnavailable are set, maxUnavailable takes precedence. | +| podLabels | object | `{}` | Custom labels that will be applied to all Bamboo pods | +| priorityClassName | string | `nil` | Priority class for the application pods. The PriorityClass with this name needs to be available in the cluster. For details see https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/#priorityclass | +| replicaCount | int | `1` | The initial number of Bamboo pods that should be started at deployment time. Note that Bamboo requires manual configuration via the browser post deployment after the first pod is deployed. At present Bamboo Data Center utilizes an `active-passive` clustering model. This architecture is not ideal where K8s deployments are concerned. As such a Bamboo server cluster comprising only `1` pod is the recommended topology for now. For more detail see: https://atlassian.github.io/data-center-helm-charts/troubleshooting/LIMITATIONS#cluster-size | +| schedulerName | string | `nil` | Standard K8s schedulerName that will be applied to all Bamboo pods. Check Kubernetes documentation on how to configure multiple schedulers: https://kubernetes.io/docs/tasks/extend-kubernetes/configure-multiple-schedulers/#specify-schedulers-for-pods | +| serviceAccount.annotations | object | `{}` | Annotations to add to the ServiceAccount (if created) | +| serviceAccount.create | bool | `true` | Set to 'true' if a ServiceAccount should be created, or 'false' if it already exists. | +| serviceAccount.imagePullSecrets | list | `[]` | For Docker images hosted in private registries, define the list of image pull secrets that should be utilized by the created ServiceAccount https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod | +| serviceAccount.name | string | `nil` | The name of the ServiceAccount to be used by the pods. If not specified, but the "serviceAccount.create" flag is set to 'true', then the ServiceAccount name will be auto-generated, otherwise the 'default' ServiceAccount will be used. https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/#use-the-default-service-account-to-access-the-api-server | +| testPods | object | `{"affinity":{},"annotations":{},"image":{"permissionsTestContainer":"debian:stable-slim","statusTestContainer":"alpine:latest"},"labels":{},"nodeSelector":{},"schedulerName":null,"tolerations":[]}` | Metadata and pod spec for pods started in Helm tests | +| tolerations | list | `[]` | Standard K8s tolerations that will be applied to all Bamboo pods | +| volumes.additional | list | `[]` | Defines additional volumes that should be applied to all Bamboo pods. Note that this will not create any corresponding volume mounts; those needs to be defined in bamboo.additionalVolumeMounts | +| volumes.localHome.customVolume | object | `{}` | Static provisioning of local-home using K8s PVs and PVCs NOTE: Due to the ephemeral nature of pods this approach to provisioning volumes for pods is not recommended. Dynamic provisioning described above is the prescribed approach. When 'persistentVolumeClaim.create' is 'false', then this value can be used to define a standard K8s volume that will be used for the local-home volume(s). If not defined, then an 'emptyDir' volume is utilised. Having provisioned a 'PersistentVolume', specify the bound 'persistentVolumeClaim.claimName' for the 'customVolume' object. https://kubernetes.io/docs/concepts/storage/persistent-volumes/#static | +| volumes.localHome.mountPath | string | `"/var/atlassian/application-data/bamboo"` | Specifies the path in the Bamboo container to which the local-home volume will be mounted. | +| volumes.localHome.persistentVolumeClaim.create | bool | `false` | If 'true', then a 'PersistentVolume' and 'PersistentVolumeClaim' will be dynamically created for each pod based on the 'StorageClassName' supplied below. | +| volumes.localHome.persistentVolumeClaim.resources | object | `{"requests":{"storage":"1Gi"}}` | Specifies the standard K8s resource requests and/or limits for the local-home volume claims. | +| volumes.localHome.persistentVolumeClaim.storageClassName | string | `nil` | Specify the name of the 'StorageClass' that should be used for the local-home volume claim. | +| volumes.sharedHome.customVolume | object | `{}` | Static provisioning of shared-home using K8s PVs and PVCs When 'persistentVolumeClaim.create' is 'false', then this value can be used to define a standard K8s volume that will be used for the shared-home volume. If not defined, then an 'emptyDir' volume is utilised. Having provisioned a 'PersistentVolume', specify the bound 'persistentVolumeClaim.claimName' for the 'customVolume' object. https://kubernetes.io/docs/concepts/storage/persistent-volumes/#static https://atlassian.github.io/data-center-helm-charts/examples/storage/aws/SHARED_STORAGE/ | +| volumes.sharedHome.mountPath | string | `"/var/atlassian/application-data/shared-home"` | Specifies the path in the Bamboo container to which the shared-home volume will be mounted. | +| volumes.sharedHome.nfsPermissionFixer.command | string | `nil` | By default, the fixer will change the group ownership of the volume's root directory to match the Bamboo container's GID (2001), and then ensures the directory is group-writeable. If this is not the desired behaviour, command used can be specified here. | +| volumes.sharedHome.nfsPermissionFixer.enabled | bool | `true` | If 'true', this will alter the shared-home volume's root directory so that Bamboo can write to it. This is a workaround for a K8s bug affecting NFS volumes: https://github.com/kubernetes/examples/issues/260 | +| volumes.sharedHome.nfsPermissionFixer.imageRepo | string | `"alpine"` | Image repository for the permission fixer init container. Defaults to alpine | +| volumes.sharedHome.nfsPermissionFixer.imageTag | string | `"latest"` | Image tag for the permission fixer init container. Defaults to latest | +| volumes.sharedHome.nfsPermissionFixer.mountPath | string | `"/shared-home"` | The path in the K8s initContainer where the shared-home volume will be mounted | +| volumes.sharedHome.nfsPermissionFixer.resources | object | `{}` | Resources requests and limits for nfsPermissionFixer init container See: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ | +| volumes.sharedHome.persistentVolumeClaim.create | bool | `false` | If 'true', then a 'PersistentVolumeClaim' and 'PersistentVolume' will be dynamically created for shared-home based on the 'StorageClassName' supplied below. | +| volumes.sharedHome.persistentVolumeClaim.resources | object | `{"requests":{"storage":"1Gi"}}` | Specifies the standard K8s resource requests and/or limits for the shared-home volume claims. | +| volumes.sharedHome.persistentVolumeClaim.storageClassName | string | `nil` | Specify the name of the 'StorageClass' that should be used for the 'shared-home' volume claim. | +| volumes.sharedHome.subPath | string | `nil` | Specifies the sub-directory of the shared-home volume that will be mounted in to the Bamboo container. | + +---------------------------------------------- +Autogenerated from chart metadata using [helm-docs v1.11.0](https://github.com/norwoodj/helm-docs/releases/v1.11.0) diff --git a/src/main/charts/bamboo/templates/tests/test-application-status.yaml b/src/main/charts/bamboo/templates/tests/test-application-status.yaml index 95a5cd982..cc46cda6f 100644 --- a/src/main/charts/bamboo/templates/tests/test-application-status.yaml +++ b/src/main/charts/bamboo/templates/tests/test-application-status.yaml @@ -5,9 +5,21 @@ metadata: annotations: "helm.sh/hook": test "helm.sh/hook-delete-policy": "before-hook-creation,hook-succeeded" + {{- if not .Values.testPods.annotations }} {{- include "bamboo.podAnnotations" . | nindent 4 }} + {{- else }} + {{- range $key, $value := .Values.testPods.annotations }} + {{ $key | quote }}: {{ tpl $value $ | quote }} + {{- end }} + {{- end }} labels: + {{- if not .Values.testPods.labels }} {{- include "common.labels.commonLabels" . | nindent 4 }} + {{- else }} + {{- range $key, $value := .Values.testPods.labels }} + {{ $key | quote }}: {{ tpl $value $ | quote }} + {{- end }} + {{- end }} spec: containers: - name: test @@ -36,4 +48,19 @@ spec: sleep 10 count=$(( $count + 1 )) done - restartPolicy: Never \ No newline at end of file + restartPolicy: Never + {{- with .Values.testPods.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.testPods.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.testPods.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- if .Values.testPods.schedulerName }} + schedulerName: {{ .Values.testPods.schedulerName | quote }} + {{- end }} diff --git a/src/main/charts/bamboo/templates/tests/test-database-connectivity.yaml b/src/main/charts/bamboo/templates/tests/test-database-connectivity.yaml index 6c7fcdcef..dbefd5213 100644 --- a/src/main/charts/bamboo/templates/tests/test-database-connectivity.yaml +++ b/src/main/charts/bamboo/templates/tests/test-database-connectivity.yaml @@ -6,9 +6,21 @@ metadata: annotations: "helm.sh/hook": test "helm.sh/hook-delete-policy": "before-hook-creation,hook-succeeded" + {{- if not .Values.testPods.annotations }} {{- include "bamboo.podAnnotations" . | nindent 4 }} + {{- else }} + {{- range $key, $value := .Values.testPods.annotations }} + {{ $key | quote }}: {{ tpl $value $ | quote }} + {{- end }} + {{- end }} labels: + {{- if not .Values.testPods.labels }} {{- include "common.labels.commonLabels" . | nindent 4 }} + {{- else }} + {{- range $key, $value := .Values.testPods.labels }} + {{ $key | quote }}: {{ tpl $value $ | quote }} + {{- end }} + {{- end }} spec: serviceAccountName: {{ include "bamboo.serviceAccountName" . }} containers: @@ -49,4 +61,19 @@ spec: cat output.txt grep -q "Connection established OK" output.txt restartPolicy: Never -{{ end }} \ No newline at end of file + {{- with .Values.testPods.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.testPods.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.testPods.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- if .Values.testPods.schedulerName }} + schedulerName: {{ .Values.testPods.schedulerName | quote }} + {{- end }} +{{ end }} diff --git a/src/main/charts/bamboo/templates/tests/test-shared-home-permissions.yaml b/src/main/charts/bamboo/templates/tests/test-shared-home-permissions.yaml index 11d373a27..d97e59285 100644 --- a/src/main/charts/bamboo/templates/tests/test-shared-home-permissions.yaml +++ b/src/main/charts/bamboo/templates/tests/test-shared-home-permissions.yaml @@ -5,9 +5,21 @@ metadata: annotations: "helm.sh/hook": test "helm.sh/hook-delete-policy": "before-hook-creation,hook-succeeded" + {{- if not .Values.testPods.annotations }} {{- include "bamboo.podAnnotations" . | nindent 4 }} + {{- else }} + {{- range $key, $value := .Values.testPods.annotations }} + {{ $key | quote }}: {{ tpl $value $ | quote }} + {{- end }} + {{- end }} labels: + {{- if not .Values.testPods.labels }} {{- include "common.labels.commonLabels" . | nindent 4 }} + {{- else }} + {{- range $key, $value := .Values.testPods.labels }} + {{ $key | quote }}: {{ tpl $value $ | quote }} + {{- end }} + {{- end }} spec: containers: - name: test @@ -41,4 +53,19 @@ spec: rm /shared-home/permissions-test volumes: {{ include "bamboo.volumes.sharedHome" . | nindent 4 }} - restartPolicy: Never \ No newline at end of file + restartPolicy: Never + {{- with .Values.testPods.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.testPods.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.testPods.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- if .Values.testPods.schedulerName }} + schedulerName: {{ .Values.testPods.schedulerName | quote }} + {{- end }} diff --git a/src/main/charts/bamboo/values.yaml b/src/main/charts/bamboo/values.yaml index 0be2d5734..2c31d48de 100644 --- a/src/main/charts/bamboo/values.yaml +++ b/src/main/charts/bamboo/values.yaml @@ -1146,7 +1146,7 @@ additionalConfigMaps: [] atlassianAnalyticsAndSupport: analytics: - + # -- Mount ConfigMap with selected Helm chart values as a JSON # which DC products will read and send analytics events to Atlassian data pipelines # @@ -1158,3 +1158,16 @@ atlassianAnalyticsAndSupport: # which can be optionally including to support.zip # enabled: true + +# -- Metadata and pod spec for pods started in Helm tests +# +testPods: + labels: {} + annotations: {} + nodeSelector: {} + tolerations: [] + affinity: {} + schedulerName: + image: + permissionsTestContainer: debian:stable-slim + statusTestContainer: alpine:latest diff --git a/src/main/charts/bitbucket/README.md b/src/main/charts/bitbucket/README.md index 9265de209..dfbde6af1 100644 --- a/src/main/charts/bitbucket/README.md +++ b/src/main/charts/bitbucket/README.md @@ -4,8 +4,6 @@ A chart for installing Bitbucket Data Center on Kubernetes -For installation please follow [the documentation](https://atlassian.github.io/data-center-helm-charts/). - **Homepage:** ## Source Code @@ -25,212 +23,216 @@ Kubernetes: `>=1.21.x-0` | Key | Type | Default | Description | |-----|------|---------|-------------| -| additionalConfigMaps | list | `[]` | Create additional ConfigMaps with given names, keys and content. Ther Helm release name will be used as a prefix for a ConfigMap name, fileName is used as subPath | -| additionalContainers | list | `[]` | Additional container definitions that will be added to all Bitbucket pods | -| additionalFiles | list | `[]` | Additional existing ConfigMaps and Secrets not managed by Helm that should be mounted into service container. Configuration details below (camelCase is important!): 'name' - References existing ConfigMap or secret name. 'type' - 'configMap' or 'secret' 'key' - The file name. 'mountPath' - The destination directory in a container. VolumeMount and Volumes are added with this name and index position, for example; custom-config-0, keystore-2 | +| additionalConfigMaps | list | `[]` | Create additional ConfigMaps with given names, keys and content. Ther Helm release name will be used as a prefix for a ConfigMap name, fileName is used as subPath | +| additionalContainers | list | `[]` | Additional container definitions that will be added to all Bitbucket pods | +| additionalFiles | list | `[]` | Additional existing ConfigMaps and Secrets not managed by Helm that should be mounted into service container. Configuration details below (camelCase is important!): 'name' - References existing ConfigMap or secret name. 'type' - 'configMap' or 'secret' 'key' - The file name. 'mountPath' - The destination directory in a container. VolumeMount and Volumes are added with this name and index position, for example; custom-config-0, keystore-2 | | additionalHosts | list | `[]` | Additional host aliases for each pod, equivalent to adding them to the /etc/hosts file. https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ | -| additionalInitContainers | list | `[]` | Additional initContainer definitions that will be added to all Bitbucket pods | -| additionalLabels | object | `{}` | Additional labels that should be applied to all resources | -| affinity | object | `{}` | Standard Kubernetes affinities that will be applied to all Bitbucket pods Due to the performance requirements it is highly recommended running all Bitbucket pods in the same availability zone as your dedicated NFS server. To achieve this, you can define `affinity` and `podAffinity` rules that will place all pods into the same zone, and therefore minimise the real distance between the application pods and the shared storage. More specific documentation can be found in the official Affinity and Anti-affinity documentation: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity This is an example on how to ensure the pods are in the same zone as NFS server that is labeled with `role=nfs-server`: podAffinity: requiredDuringSchedulingIgnoredDuringExecution: - labelSelector: matchExpressions: - key: role operator: In values: - nfs-server # needs to be the same value as NFS server deployment topologyKey: topology.kubernetes.io/zone | -| atlassianAnalyticsAndSupport.analytics.enabled | bool | `true` | Mount ConfigMap with selected Helm chart values as a JSON which DC products will read and send analytics events to Atlassian data pipelines | -| atlassianAnalyticsAndSupport.helmValues.enabled | bool | `true` | Mount ConfigMap with selected Helm chart values as a YAML file which can be optionally including to support.zip | -| bitbucket.additionalBundledPlugins | list | `[]` | Specifies a list of additional Bitbucket plugins that should be added to the Bitbucket container. Note plugins installed via this method will appear as bundled plugins rather than user plugins. These should be specified in the same manner as the 'additionalLibraries' property. Additional details: https://atlassian.github.io/data-center-helm-charts/examples/external_libraries/EXTERNAL_LIBS/ NOTE: only .jar files can be loaded using this approach. OBR's can be extracted (unzipped) to access the associated .jar An alternative to this method is to install the plugins via "Manage Apps" in the product system administration UI. | -| bitbucket.additionalCertificates | object | `{"customCmd":null,"secretName":null}` | Certificates to be added to Java truststore. Provide reference to a secret that contains the certificates | -| bitbucket.additionalEnvironmentVariables | list | `[]` | Defines any additional environment variables to be passed to the Bitbucket container. See https://hub.docker.com/r/atlassian/bitbucket-server for supported variables. | -| bitbucket.additionalJvmArgs | list | `[]` | Specifies a list of additional arguments that can be passed to the Bitbucket JVM, e.g. system properties. | -| bitbucket.additionalLibraries | list | `[]` | Specifies a list of additional Java libraries that should be added to the Bitbucket container. Each item in the list should specify the name of the volume that contains the library, as well as the name of the library file within that volume's root directory. Optionally, a subDirectory field can be included to specify which directory in the volume contains the library file. Additional details: https://atlassian.github.io/data-center-helm-charts/examples/external_libraries/EXTERNAL_LIBS/ | -| bitbucket.additionalPorts | list | `[]` | Defines any additional ports for the Bitbucket container. | -| bitbucket.additionalVolumeClaimTemplates | list | `[]` | Defines additional volumeClaimTemplates that should be applied to the Bitbucket pod. Note that this will not create any corresponding volume mounts; those needs to be defined in bitbucket.additionalVolumeMounts | -| bitbucket.additionalVolumeMounts | list | `[]` | Defines any additional volumes mounts for the Bitbucket container. These can refer to existing volumes, or new volumes can be defined via 'volumes.additional'. | -| bitbucket.applicationMode | string | `"default"` | Application Mode This can be either 'default' or 'mirror' | -| bitbucket.clustering.enabled | bool | `false` | Set to 'true' if Data Center clustering should be enabled This will automatically configure cluster peer discovery between cluster nodes. | -| bitbucket.clustering.group.nameSecretKey | string | `"name"` | The key in the Kubernetes Secret that contains the Hazelcast group name. | -| bitbucket.clustering.group.passwordSecretKey | string | `"password"` | The key in the Kubernetes Secret that contains the Hazelcast group password. | -| bitbucket.clustering.group.secretName | string | `nil` | The name of the Kubernetes Secret that contains the Hazelcast group credentials. Example of creating a credentials K8s secret below: 'kubectl create secret generic --from-literal=name= \ --from-literal=password=' https://kubernetes.io/docs/concepts/configuration/secret/#opaque-secrets If no secret is specified, a default group name will be used and a random password will be generated during installation. | -| bitbucket.containerSecurityContext | object | `{}` | Standard K8s field that holds security configurations that will be applied to a container. https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ | -| bitbucket.displayName | string | `nil` | Set the display name of the Bitbucket instance. Note that this value is only used during installation and changing the value during an upgrade has no effect. | -| bitbucket.elasticSearch.baseUrl | string | `nil` | The base URL of the external Elasticsearch instance to be used, for example: http://elasticsearch-master..svc.cluster.local:9200 If this is defined, then Bitbucket will disable its internal Elasticsearch instance. | -| bitbucket.elasticSearch.credentials.passwordSecretKey | string | `"password"` | The key in the Kubernetes Secret that contains the Elasticsearch password. | -| bitbucket.elasticSearch.credentials.secretName | string | `nil` | The name of the Kubernetes Secret that contains the Elasticsearch credentials. Example of creating a credentials K8s secret below: 'kubectl create secret generic --from-literal=username= \ --from-literal=password=' https://kubernetes.io/docs/concepts/configuration/secret/#opaque-secrets | -| bitbucket.elasticSearch.credentials.usernameSecretKey | string | `"username"` | The key in the Kubernetes Secret that contains the Elasticsearch username. | -| bitbucket.hazelcastService.annotations | object | `{}` | Additional annotations to apply to the Hazelcast Service | +| additionalInitContainers | list | `[]` | Additional initContainer definitions that will be added to all Bitbucket pods | +| additionalLabels | object | `{}` | Additional labels that should be applied to all resources | +| affinity | object | `{}` | Standard Kubernetes affinities that will be applied to all Bitbucket pods Due to the performance requirements it is highly recommended running all Bitbucket pods in the same availability zone as your dedicated NFS server. To achieve this, you can define `affinity` and `podAffinity` rules that will place all pods into the same zone, and therefore minimise the real distance between the application pods and the shared storage. More specific documentation can be found in the official Affinity and Anti-affinity documentation: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity This is an example on how to ensure the pods are in the same zone as NFS server that is labeled with `role=nfs-server`: podAffinity: requiredDuringSchedulingIgnoredDuringExecution: - labelSelector: matchExpressions: - key: role operator: In values: - nfs-server # needs to be the same value as NFS server deployment topologyKey: topology.kubernetes.io/zone | +| atlassianAnalyticsAndSupport.analytics.enabled | bool | `true` | Mount ConfigMap with selected Helm chart values as a JSON which DC products will read and send analytics events to Atlassian data pipelines | +| atlassianAnalyticsAndSupport.helmValues.enabled | bool | `true` | Mount ConfigMap with selected Helm chart values as a YAML file which can be optionally including to support.zip | +| bitbucket.additionalBundledPlugins | list | `[]` | Specifies a list of additional Bitbucket plugins that should be added to the Bitbucket container. Note plugins installed via this method will appear as bundled plugins rather than user plugins. These should be specified in the same manner as the 'additionalLibraries' property. Additional details: https://atlassian.github.io/data-center-helm-charts/examples/external_libraries/EXTERNAL_LIBS/ NOTE: only .jar files can be loaded using this approach. OBR's can be extracted (unzipped) to access the associated .jar An alternative to this method is to install the plugins via "Manage Apps" in the product system administration UI. | +| bitbucket.additionalCertificates | object | `{"customCmd":null,"secretName":null}` | Certificates to be added to Java truststore. Provide reference to a secret that contains the certificates | +| bitbucket.additionalEnvironmentVariables | list | `[]` | Defines any additional environment variables to be passed to the Bitbucket container. See https://hub.docker.com/r/atlassian/bitbucket-server for supported variables. | +| bitbucket.additionalJvmArgs | list | `[]` | Specifies a list of additional arguments that can be passed to the Bitbucket JVM, e.g. system properties. | +| bitbucket.additionalLibraries | list | `[]` | Specifies a list of additional Java libraries that should be added to the Bitbucket container. Each item in the list should specify the name of the volume that contains the library, as well as the name of the library file within that volume's root directory. Optionally, a subDirectory field can be included to specify which directory in the volume contains the library file. Additional details: https://atlassian.github.io/data-center-helm-charts/examples/external_libraries/EXTERNAL_LIBS/ | +| bitbucket.additionalPorts | list | `[]` | Defines any additional ports for the Bitbucket container. | +| bitbucket.additionalVolumeClaimTemplates | list | `[]` | Defines additional volumeClaimTemplates that should be applied to the Bitbucket pod. Note that this will not create any corresponding volume mounts; those needs to be defined in bitbucket.additionalVolumeMounts | +| bitbucket.additionalVolumeMounts | list | `[]` | Defines any additional volumes mounts for the Bitbucket container. These can refer to existing volumes, or new volumes can be defined via 'volumes.additional'. | +| bitbucket.applicationMode | string | `"default"` | Application Mode This can be either 'default' or 'mirror' | +| bitbucket.clustering.enabled | bool | `false` | Set to 'true' if Data Center clustering should be enabled This will automatically configure cluster peer discovery between cluster nodes. | +| bitbucket.clustering.group.nameSecretKey | string | `"name"` | The key in the Kubernetes Secret that contains the Hazelcast group name. | +| bitbucket.clustering.group.passwordSecretKey | string | `"password"` | The key in the Kubernetes Secret that contains the Hazelcast group password. | +| bitbucket.clustering.group.secretName | string | `nil` | from-literal=password=' https://kubernetes.io/docs/concepts/configuration/secret/#opaque-secrets If no secret is specified, a default group name will be used and a random password will be generated during installation. | +| bitbucket.containerSecurityContext | object | `{}` | Standard K8s field that holds security configurations that will be applied to a container. https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ | +| bitbucket.displayName | string | `nil` | Set the display name of the Bitbucket instance. Note that this value is only used during installation and changing the value during an upgrade has no effect. | +| bitbucket.elasticSearch.baseUrl | string | `nil` | The base URL of the external Elasticsearch instance to be used, for example: http://elasticsearch-master..svc.cluster.local:9200 If this is defined, then Bitbucket will disable its internal Elasticsearch instance. | +| bitbucket.elasticSearch.credentials.passwordSecretKey | string | `"password"` | The key in the Kubernetes Secret that contains the Elasticsearch password. | +| bitbucket.elasticSearch.credentials.secretName | string | `nil` | from-literal=password=' https://kubernetes.io/docs/concepts/configuration/secret/#opaque-secrets | +| bitbucket.elasticSearch.credentials.usernameSecretKey | string | `"username"` | The key in the Kubernetes Secret that contains the Elasticsearch username. | +| bitbucket.hazelcastService.annotations | object | `{}` | Additional annotations to apply to the Hazelcast Service | | bitbucket.hazelcastService.enabled | bool | `false` | Enable or disable an additional Hazelcast service that Bitbucket nodes can use to join a cluster. It is recommended to create a separate Hazelcast service if the Bitbucket service uses a LoadBalancer type (e.g., NLB), ensuring that the Hazelcast port is not exposed at all. | -| bitbucket.hazelcastService.port | int | `5701` | The port on which the Bitbucket K8s Hazelcast Service will listen | -| bitbucket.hazelcastService.type | string | `"ClusterIP"` | The type of the Hazelcast K8s service to use for Bitbucket | -| bitbucket.license.secretKey | string | `"license-key"` | The key in the K8s Secret that contains the Bitbucket license key | -| bitbucket.license.secretName | string | `nil` | The name of the K8s Secret that contains the Bitbucket license key. If specified, then the license will be automatically populated during Bitbucket setup. Otherwise, it will need to be provided via the browser after initial startup. An Example of creating a K8s secret for the license below: 'kubectl create secret generic --from-literal=license-key= https://kubernetes.io/docs/concepts/configuration/secret/#opaque-secrets | -| bitbucket.livenessProbe.enabled | bool | `false` | Whether to apply the livenessProbe check to pod. | -| bitbucket.livenessProbe.failureThreshold | int | `12` | The number of consecutive failures of the Bitbucket container liveness probe before the pod fails liveness checks. | -| bitbucket.livenessProbe.initialDelaySeconds | int | `60` | Time to wait before starting the first probe | -| bitbucket.livenessProbe.periodSeconds | int | `5` | How often (in seconds) the Bitbucket container liveness probe will run | -| bitbucket.livenessProbe.timeoutSeconds | int | `1` | Number of seconds after which the probe times out | -| bitbucket.mesh.additionalCertificates | object | `{"customCmd":null,"secretName":null}` | Certificates to be added to Java truststore. Provide reference to a secret that contains the certificates | -| bitbucket.mesh.additionalEnvironmentVariables | object | `{}` | Defines any additional environment variables to be passed to the Bitbucket mesh containers. | -| bitbucket.mesh.additionalFiles | string | `nil` | Additional existing ConfigMaps and Secrets not managed by Helm that should be mounted into service container | -| bitbucket.mesh.additionalInitContainers | object | `{}` | Additional initContainer definitions that will be added to all Bitbucket pods | -| bitbucket.mesh.additionalJvmArgs | list | `[]` | Specifies a list of additional arguments that can be passed to the Bitbucket Mesh JVM, e.g. system properties. | -| bitbucket.mesh.affinity | object | `{}` | Standard Kubernetes affinities that will be applied to all Bitbucket mesh pods | -| bitbucket.mesh.enabled | bool | `false` | Enable Bitbucket Mesh. See: https://confluence.atlassian.com/bitbucketserver/bitbucket-mesh-1128304351.html | -| bitbucket.mesh.image | object | `{"pullPolicy":"IfNotPresent","repository":"atlassian/bitbucket-mesh","tag":"2.0.1"}` | The Bitbucket Mesh image to use https://hub.docker.com/r/atlassian/bitbucket-mesh | -| bitbucket.mesh.image.pullPolicy | string | `"IfNotPresent"` | Image pull policy | -| bitbucket.mesh.image.repository | string | `"atlassian/bitbucket-mesh"` | The Bitbucket Mesh image repository https://hub.docker.com/r/atlassian/bitbucket-mesh | -| bitbucket.mesh.image.tag | string | `"2.0.1"` | The docker image tag to be used | -| bitbucket.mesh.nodeAutoRegistration | bool | `false` | Experimental! Automatically register Bitbucket mesh nodes with the Bitbucket server. `bitbucket.sysadminCredentials.secretName` needs to be defined to provide credentials to post-install node registration jobs that are created only for new Helm chart installations. It is recommended to manually register Mesh nodes in Butbucket UI. | -| bitbucket.mesh.nodeSelector | object | `{}` | Standard K8s node-selectors that will be applied to all Bitbucket Mesh pods | -| bitbucket.mesh.podAnnotations | object | `{}` | Custom annotations that will be applied to all Bitbucket Mesh pods | -| bitbucket.mesh.podLabels | object | `{}` | Custom labels that will be applied to all Bitbucket Mesh pods | +| bitbucket.hazelcastService.port | int | `5701` | The port on which the Bitbucket K8s Hazelcast Service will listen | +| bitbucket.hazelcastService.type | string | `"ClusterIP"` | The type of the Hazelcast K8s service to use for Bitbucket | +| bitbucket.license.secretKey | string | `"license-key"` | The key in the K8s Secret that contains the Bitbucket license key | +| bitbucket.license.secretName | string | `nil` | The name of the K8s Secret that contains the Bitbucket license key. If specified, then the license will be automatically populated during Bitbucket setup. Otherwise, it will need to be provided via the browser after initial startup. An Example of creating a K8s secret for the license below: 'kubectl create secret generic --from-literal=license-key= https://kubernetes.io/docs/concepts/configuration/secret/#opaque-secrets | +| bitbucket.livenessProbe.enabled | bool | `false` | Whether to apply the livenessProbe check to pod. | +| bitbucket.livenessProbe.failureThreshold | int | `12` | The number of consecutive failures of the Bitbucket container liveness probe before the pod fails liveness checks. | +| bitbucket.livenessProbe.initialDelaySeconds | int | `60` | Time to wait before starting the first probe | +| bitbucket.livenessProbe.periodSeconds | int | `5` | How often (in seconds) the Bitbucket container liveness probe will run | +| bitbucket.livenessProbe.timeoutSeconds | int | `1` | Number of seconds after which the probe times out | +| bitbucket.mesh.additionalCertificates | object | `{"customCmd":null,"secretName":null}` | Certificates to be added to Java truststore. Provide reference to a secret that contains the certificates | +| bitbucket.mesh.additionalEnvironmentVariables | object | `{}` | Defines any additional environment variables to be passed to the Bitbucket mesh containers. | +| bitbucket.mesh.additionalFiles | string | `nil` | Additional existing ConfigMaps and Secrets not managed by Helm that should be mounted into service container | +| bitbucket.mesh.additionalInitContainers | object | `{}` | Additional initContainer definitions that will be added to all Bitbucket pods | +| bitbucket.mesh.additionalJvmArgs | list | `[]` | Specifies a list of additional arguments that can be passed to the Bitbucket Mesh JVM, e.g. system properties. | +| bitbucket.mesh.affinity | object | `{}` | Standard Kubernetes affinities that will be applied to all Bitbucket mesh pods | +| bitbucket.mesh.enabled | bool | `false` | Enable Bitbucket Mesh. See: https://confluence.atlassian.com/bitbucketserver/bitbucket-mesh-1128304351.html | +| bitbucket.mesh.image | object | `{"pullPolicy":"IfNotPresent","repository":"atlassian/bitbucket-mesh","tag":"2.0.1"}` | The Bitbucket Mesh image to use https://hub.docker.com/r/atlassian/bitbucket-mesh | +| bitbucket.mesh.image.pullPolicy | string | `"IfNotPresent"` | Image pull policy | +| bitbucket.mesh.image.repository | string | `"atlassian/bitbucket-mesh"` | The Bitbucket Mesh image repository https://hub.docker.com/r/atlassian/bitbucket-mesh | +| bitbucket.mesh.image.tag | string | `"2.0.1"` | The docker image tag to be used | +| bitbucket.mesh.nodeAutoRegistration | bool | `false` | Experimental! Automatically register Bitbucket mesh nodes with the Bitbucket server. `bitbucket.sysadminCredentials.secretName` needs to be defined to provide credentials to post-install node registration jobs that are created only for new Helm chart installations. It is recommended to manually register Mesh nodes in Butbucket UI. | +| bitbucket.mesh.nodeSelector | object | `{}` | Standard K8s node-selectors that will be applied to all Bitbucket Mesh pods | +| bitbucket.mesh.podAnnotations | object | `{}` | Custom annotations that will be applied to all Bitbucket Mesh pods | +| bitbucket.mesh.podLabels | object | `{}` | Custom labels that will be applied to all Bitbucket Mesh pods | | bitbucket.mesh.podManagementPolicy | string | `"OrderedReady"` | | -| bitbucket.mesh.priorityClassName | string | `nil` | Pod PriorityClassName https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/#priorityclass | -| bitbucket.mesh.replicaCount | int | `3` | Number of Bitbucket Mesh nodes. Do not change it. Currently, only the quorum of 3 mesh nodes is supported. Reducing the number of replicas will result in mesh degradation while increasing the number of Mesh nodes will result in new nodes being unused by the Bitbucket server. | -| bitbucket.mesh.resources | object | `{"container":{"limits":{"cpu":"2","memory":"2G"},"requests":{"cpu":"1","memory":"2G"}},"jvm":{"maxHeap":"1g","minHeap":"512m"}}` | Bitbucket Mesh resources requests and limits | -| bitbucket.mesh.resources.container | object | `{"limits":{"cpu":"2","memory":"2G"},"requests":{"cpu":"1","memory":"2G"}}` | Bitbucket Mesh container cpu/mem requests and limits | -| bitbucket.mesh.resources.jvm | object | `{"maxHeap":"1g","minHeap":"512m"}` | Bitbucket Mesh JVM heap settings | -| bitbucket.mesh.schedulerName | string | `nil` | Standard K8s schedulerName that will be applied to all Bitbucket pods. Check Kubernetes documentation on how to configure multiple schedulers: https://kubernetes.io/docs/tasks/extend-kubernetes/configure-multiple-schedulers/#specify-schedulers-for-pods | -| bitbucket.mesh.service.annotations | object | `{}` | Bitbucket mesh service annotations | -| bitbucket.mesh.service.loadBalancerIP | string | `nil` | Use specific loadBalancerIP. Only applies to service type LoadBalancer. | -| bitbucket.mesh.service.port | int | `7777` | Bitbucket Mesh port | -| bitbucket.mesh.service.type | string | `"ClusterIP"` | The type of K8s service to use for Bitbucket mesh service | -| bitbucket.mesh.setByDefault | bool | `false` | Experimental! Automatically create all new repositories on Bitbucket mesh nodes. `bitbucket.sysadminCredentials.secretName` needs to be defined to provide credentials to node post-install job. It is recommended to manually configure it in Bitbucket UI. | -| bitbucket.mesh.shutdown.terminationGracePeriodSeconds | int | `35` | The termination grace period for pods during shutdown. This should be set to the Bitbucket internal grace period (default 30 seconds), plus a small buffer to allow the JVM to fully terminate. | -| bitbucket.mesh.tolerations | object | `{}` | Standard K8s tolerations that will be applied to all Bitbucket Mesh pods | -| bitbucket.mesh.topologySpreadConstraints | object | `{}` | Defines topology spread constraints for Bitbucket Mesh pods. See details: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ | -| bitbucket.mesh.volume | object | `{"create":true,"mountPath":"/var/atlassian/application-data/mesh","resources":{"requests":{"storage":"1Gi"}},"storageClass":null}` | Mesh home volume settings. Disabling persistence results in data loss! | -| bitbucket.mirror.upstreamUrl | string | `nil` | Specifies the URL of the upstream Bitbucket server for this mirror. | -| bitbucket.podManagementStrategy | string | `"OrderedReady"` | Pod management strategy. Bitbucket Data Center requires the "OrderedReady" value but for Bitbucket Mirrors you can use the "Parallel" option. To learn more, visit https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#pod-management-policies | -| bitbucket.ports.hazelcast | int | `5701` | The port on which the Hazelcast listens for client traffic | -| bitbucket.ports.http | int | `7990` | The port on which the Bitbucket container listens for HTTP traffic | -| bitbucket.ports.ssh | int | `7999` | The port on which the Bitbucket SSH service will listen on. Must be within 1024-65535 range | -| bitbucket.readinessProbe.customProbe | object | `{}` | Custom readinessProbe to override the default /status httpGet | -| bitbucket.readinessProbe.enabled | bool | `true` | Whether to apply the readinessProbe check to pod. | -| bitbucket.readinessProbe.failureThreshold | int | `60` | The number of consecutive failures of the Bitbucket container readiness probe before the pod fails readiness checks. | -| bitbucket.readinessProbe.initialDelaySeconds | int | `10` | The initial delay (in seconds) for the Bitbucket container readiness probe, after which the probe will start running. | -| bitbucket.readinessProbe.periodSeconds | int | `5` | How often (in seconds) the Bitbucket container readiness probe will run | -| bitbucket.readinessProbe.timeoutSeconds | int | `1` | Number of seconds after which the probe times out | -| bitbucket.resources.container.requests.cpu | string | `"2"` | Initial CPU request by Bitbucket pod | -| bitbucket.resources.container.requests.memory | string | `"2G"` | Initial Memory request by Bitbucket pod | +| bitbucket.mesh.priorityClassName | string | `nil` | Pod PriorityClassName https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/#priorityclass | +| bitbucket.mesh.replicaCount | int | `3` | Number of Bitbucket Mesh nodes. Do not change it. Currently, only the quorum of 3 mesh nodes is supported. Reducing the number of replicas will result in mesh degradation while increasing the number of Mesh nodes will result in new nodes being unused by the Bitbucket server. | +| bitbucket.mesh.resources | object | `{"container":{"limits":{"cpu":"2","memory":"2G"},"requests":{"cpu":"1","memory":"2G"}},"jvm":{"maxHeap":"1g","minHeap":"512m"}}` | Bitbucket Mesh resources requests and limits | +| bitbucket.mesh.resources.container | object | `{"limits":{"cpu":"2","memory":"2G"},"requests":{"cpu":"1","memory":"2G"}}` | Bitbucket Mesh container cpu/mem requests and limits | +| bitbucket.mesh.resources.jvm | object | `{"maxHeap":"1g","minHeap":"512m"}` | Bitbucket Mesh JVM heap settings | +| bitbucket.mesh.schedulerName | string | `nil` | Standard K8s schedulerName that will be applied to all Bitbucket pods. Check Kubernetes documentation on how to configure multiple schedulers: https://kubernetes.io/docs/tasks/extend-kubernetes/configure-multiple-schedulers/#specify-schedulers-for-pods | +| bitbucket.mesh.service.annotations | object | `{}` | Bitbucket mesh service annotations | +| bitbucket.mesh.service.loadBalancerIP | string | `nil` | Use specific loadBalancerIP. Only applies to service type LoadBalancer. | +| bitbucket.mesh.service.port | int | `7777` | Bitbucket Mesh port | +| bitbucket.mesh.service.type | string | `"ClusterIP"` | The type of K8s service to use for Bitbucket mesh service | +| bitbucket.mesh.setByDefault | bool | `false` | Experimental! Automatically create all new repositories on Bitbucket mesh nodes. `bitbucket.sysadminCredentials.secretName` needs to be defined to provide credentials to node post-install job. It is recommended to manually configure it in Bitbucket UI. | +| bitbucket.mesh.shutdown.terminationGracePeriodSeconds | int | `35` | The termination grace period for pods during shutdown. This should be set to the Bitbucket internal grace period (default 30 seconds), plus a small buffer to allow the JVM to fully terminate. | +| bitbucket.mesh.tolerations | object | `{}` | Standard K8s tolerations that will be applied to all Bitbucket Mesh pods | +| bitbucket.mesh.topologySpreadConstraints | object | `{}` | Defines topology spread constraints for Bitbucket Mesh pods. See details: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ | +| bitbucket.mesh.volume | object | `{"create":true,"mountPath":"/var/atlassian/application-data/mesh","resources":{"requests":{"storage":"1Gi"}},"storageClass":null}` | Mesh home volume settings. Disabling persistence results in data loss! | +| bitbucket.mirror.upstreamUrl | string | `nil` | Specifies the URL of the upstream Bitbucket server for this mirror. | +| bitbucket.podManagementStrategy | string | `"OrderedReady"` | Pod management strategy. Bitbucket Data Center requires the "OrderedReady" value but for Bitbucket Mirrors you can use the "Parallel" option. To learn more, visit https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#pod-management-policies | +| bitbucket.ports.hazelcast | int | `5701` | The port on which the Hazelcast listens for client traffic | +| bitbucket.ports.http | int | `7990` | The port on which the Bitbucket container listens for HTTP traffic | +| bitbucket.ports.ssh | int | `7999` | The port on which the Bitbucket SSH service will listen on. Must be within 1024-65535 range | +| bitbucket.readinessProbe.customProbe | object | `{}` | Custom readinessProbe to override the default /status httpGet | +| bitbucket.readinessProbe.enabled | bool | `true` | Whether to apply the readinessProbe check to pod. | +| bitbucket.readinessProbe.failureThreshold | int | `60` | The number of consecutive failures of the Bitbucket container readiness probe before the pod fails readiness checks. | +| bitbucket.readinessProbe.initialDelaySeconds | int | `10` | The initial delay (in seconds) for the Bitbucket container readiness probe, after which the probe will start running. | +| bitbucket.readinessProbe.periodSeconds | int | `5` | How often (in seconds) the Bitbucket container readiness probe will run | +| bitbucket.readinessProbe.timeoutSeconds | int | `1` | Number of seconds after which the probe times out | +| bitbucket.resources.container.requests.cpu | string | `"2"` | Initial CPU request by Bitbucket pod | +| bitbucket.resources.container.requests.memory | string | `"2G"` | Initial Memory request by Bitbucket pod | | bitbucket.resources.jvm.maxHeap | string | `"1g"` | The maximum amount of heap memory that will be used by the Bitbucket JVM The same value will be used by the Elasticsearch JVM. | | bitbucket.resources.jvm.minHeap | string | `"512m"` | The minimum amount of heap memory that will be used by the Bitbucket JVM The same value will be used by the Elasticsearch JVM. | -| bitbucket.securityContext.fsGroup | int | `2003` | The GID used by the Bitbucket docker image GID will default to 2003 if not supplied and securityContextEnabled is set to true. This is intended to ensure that the shared-home volume is group-writeable by the GID used by the Bitbucket container. However, this doesn't appear to work for NFS volumes due to a K8s bug: https://github.com/kubernetes/examples/issues/260 | -| bitbucket.securityContextEnabled | bool | `true` | | -| bitbucket.service.annotations | object | `{}` | Additional annotations to apply to the Service | -| bitbucket.service.contextPath | string | `nil` | The context path that Bitbucket will use. | -| bitbucket.service.loadBalancerIP | string | `nil` | Use specific loadBalancerIP. Only applies to service type LoadBalancer. | -| bitbucket.service.port | int | `80` | The port on which the Bitbucket K8s HTTP Service will listen | -| bitbucket.service.sessionAffinity | string | `"None"` | Session affinity type. If you want to make sure that connections from a particular client are passed to the same pod each time, set sessionAffinity to ClientIP. See: https://kubernetes.io/docs/reference/networking/virtual-ips/#session-affinity | -| bitbucket.service.sessionAffinityConfig | object | `{"clientIP":{"timeoutSeconds":null}}` | Session affinity configuration | -| bitbucket.service.sessionAffinityConfig.clientIP.timeoutSeconds | string | `nil` | Specifies the seconds of ClientIP type session sticky time. The value must be > 0 && <= 86400 (for 1 day) if ServiceAffinity == "ClientIP". Default value is 10800 (for 3 hours). | -| bitbucket.service.sshPort | int | `7999` | The port on which the Bitbucket K8s SSH Service will listen | -| bitbucket.service.type | string | `"ClusterIP"` | The type of K8s service to use for Bitbucket | -| bitbucket.setPermissions | bool | `true` | Boolean to define whether to set local home directory permissions on startup of Bitbucket container. Set to 'false' to disable this behaviour. | -| bitbucket.shutdown.command | string | `"/shutdown-wait.sh"` | By default pods will be stopped via a [preStop hook](https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/), using a script supplied by the Docker image. If any other shutdown behaviour is needed it can be achieved by overriding this value. Note that the shutdown command needs to wait for the application shutdown completely before exiting; see [the default command](https://bitbucket.org/atlassian-docker/docker-atlassian-bitbucket-server/src/master/shutdown-wait.sh) for details. | -| bitbucket.shutdown.terminationGracePeriodSeconds | int | `35` | The termination grace period for pods during shutdown. This should be set to the Bitbucket internal grace period (default 30 seconds), plus a small buffer to allow the JVM to fully terminate. | -| bitbucket.sshService | object | `{"annotations":{},"enabled":false,"host":null,"loadBalancerIP":null,"port":22,"type":"LoadBalancer"}` | Enable or disable an additional service for exposing SSH for external access. Disable when the SSH service is exposed through the ingress controller, or enable if the ingress controller does not support TCP. | -| bitbucket.sshService.annotations | object | `{}` | Annotations for the SSH service. Useful if a load balancer controller needs extra annotations. | -| bitbucket.sshService.enabled | bool | `false` | Set to 'true' if an additional SSH Service should be created | -| bitbucket.sshService.host | string | `nil` | The hostname of the SSH service. If set, it'll be used to configure the SSH base URL for the application. | -| bitbucket.sshService.loadBalancerIP | string | `nil` | Use specific loadBalancerIP. Only applies to service type LoadBalancer. | -| bitbucket.sshService.port | int | `22` | Port to expose the SSH service on. | -| bitbucket.sshService.type | string | `"LoadBalancer"` | SSH Service type | -| bitbucket.startupProbe.enabled | bool | `false` | Whether to apply the startupProbe check to pod. | -| bitbucket.startupProbe.failureThreshold | int | `120` | The number of consecutive failures of the Bitbucket container startup probe before the pod fails startup checks. | -| bitbucket.startupProbe.initialDelaySeconds | int | `60` | Time to wait before starting the first probe | -| bitbucket.startupProbe.periodSeconds | int | `5` | How often (in seconds) the Bitbucket container startup probe will run | -| bitbucket.sysadminCredentials.displayNameSecretKey | string | `"displayName"` | The key in the Kubernetes Secret that contains the sysadmin display name | -| bitbucket.sysadminCredentials.emailAddressSecretKey | string | `"emailAddress"` | The key in the Kubernetes Secret that contains the sysadmin email address | -| bitbucket.sysadminCredentials.passwordSecretKey | string | `"password"` | The key in the Kubernetes Secret that contains the sysadmin password | -| bitbucket.sysadminCredentials.secretName | string | `nil` | The name of the Kubernetes Secret that contains the Bitbucket sysadmin credentials If specified, then these will be automatically populated during Bitbucket setup. Otherwise, they will need to be provided via the browser after initial startup. | -| bitbucket.sysadminCredentials.usernameSecretKey | string | `"username"` | The key in the Kubernetes Secret that contains the sysadmin username | -| bitbucket.topologySpreadConstraints | list | `[]` | Defines topology spread constraints for Bitbucket pods. See details: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ | -| bitbucket.useHelmReleaseNameAsContainerName | bool | `false` | Whether the main container should acquire helm release name. By default the container name is `bitbucket` which corresponds to the name of the Helm Chart. | -| database.credentials.passwordSecretKey | string | `"password"` | The key ('password') in the Secret used to store the database login password | -| database.credentials.secretName | string | `nil` | The name of the K8s Secret that contains the database login credentials. If the secret is specified, then the credentials will be automatically utilised on Bitbucket startup. If the secret is not provided, then the credentials will need to be provided via the browser during manual configuration post deployment. Example of creating a database credentials K8s secret below: 'kubectl create secret generic --from-literal=username= \ --from-literal=password=' https://kubernetes.io/docs/concepts/configuration/secret/#opaque-secrets | -| database.credentials.usernameSecretKey | string | `"username"` | The key ('username') in the Secret used to store the database login username | -| database.driver | string | `nil` | The Java class name of the JDBC driver to be used. If not specified, then it will need to be provided via the browser during manual configuration post deployment. Valid drivers are: - 'org.postgresql.Driver' - 'com.mysql.jdbc.Driver' - 'oracle.jdbc.OracleDriver' - 'com.microsoft.sqlserver.jdbc.SQLServerDriver' https://atlassian.github.io/data-center-helm-charts/userguide/CONFIGURATION/#databasedriver: | -| database.url | string | `nil` | The jdbc URL of the database. If not specified, then it will need to be provided via the browser during manual configuration post deployment. Example URLs include: - 'jdbc:postgresql://:5432/' - 'jdbc:mysql:///' - 'jdbc:sqlserver://:1433;databaseName=' - 'jdbc:oracle:thin:@:1521:' https://atlassian.github.io/data-center-helm-charts/userguide/CONFIGURATION/#databaseurl | -| fluentd.command | string | `nil` | The command used to start Fluentd. If not supplied the default command will be used: "fluentd -c /fluentd/etc/fluent.conf -v" Note: The custom command can be free-form, however pay particular attention to the process that should ultimately be left running in the container. This process should be invoked with 'exec' so that signals are appropriately propagated to it, for instance SIGTERM. An example of how such a command may look is: " && && exec " | -| fluentd.customConfigFile | bool | `false` | Set to 'true' if a custom config (see 'configmap-fluentd.yaml' for default) should be used for Fluentd. If enabled this config must be supplied via the 'fluentdCustomConfig' property below. | -| fluentd.elasticsearch.enabled | bool | `true` | Set to 'true' if Fluentd should send all log events to an Elasticsearch service. | -| fluentd.elasticsearch.hostname | string | `"elasticsearch"` | The hostname of the Elasticsearch service that Fluentd should send logs to. | -| fluentd.enabled | bool | `false` | Set to 'true' if the Fluentd sidecar (DaemonSet) should be added to each pod | -| fluentd.extraVolumes | list | `[]` | Specify custom volumes to be added to Fluentd container (e.g. more log sources) | -| fluentd.fluentdCustomConfig | object | `{}` | Custom fluent.conf file | -| fluentd.imageRepo | string | `"fluent/fluentd-kubernetes-daemonset"` | The Fluentd sidecar image repository | -| fluentd.imageTag | string | `"v1.11.5-debian-elasticsearch7-1.2"` | The Fluentd sidecar image tag | -| fluentd.resources | object | `{}` | Resources requests and limits for fluentd sidecar container See: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ | -| image | object | `{"pullPolicy":"IfNotPresent","repository":"atlassian/bitbucket","tag":""}` | Image configuration | -| image.pullPolicy | string | `"IfNotPresent"` | Image pull policy | -| image.repository | string | `"atlassian/bitbucket"` | The Bitbucket Docker image to use https://hub.docker.com/r/atlassian/bitbucket-server | -| image.tag | string | `""` | The docker image tag to be used - defaults to the Chart appVersion | -| ingress.annotations | object | `{}` | The custom annotations that should be applied to the Ingress Resource. If using an ingress-nginx controller be sure that the annotations you add here are compatible with those already defined in the 'ingess.yaml' template | -| ingress.className | string | `"nginx"` | The class name used by the ingress controller if it's being used. Please follow documentation of your ingress controller. If the cluster contains multiple ingress controllers, this setting allows you to control which of them is used for Atlassian application traffic. | -| ingress.create | bool | `false` | Set to 'true' if an Ingress Resource should be created. This depends on a pre-provisioned Ingress Controller being available. | -| ingress.host | string | `nil` | The fully-qualified hostname (FQDN) of the Ingress Resource. Traffic coming in on this hostname will be routed by the Ingress Resource to the appropriate backend Service. | -| ingress.https | bool | `true` | Set to 'true' if browser communication with the application should be TLS (HTTPS) enforced. | -| ingress.maxBodySize | string | `"250m"` | The max body size to allow. Requests exceeding this size will result in an HTTP 413 error being returned to the client. | -| ingress.nginx | bool | `true` | Set to 'true' if the Ingress Resource is to use the K8s 'ingress-nginx' controller. https://kubernetes.github.io/ingress-nginx/ This will populate the Ingress Resource with annotations that are specific to the K8s ingress-nginx controller. Set to 'false' if a different controller is to be used, in which case the appropriate annotations for that controller must be specified below under 'ingress.annotations'. | -| ingress.path | string | `nil` | The base path for the Ingress Resource. For example '/bitbucket'. Based on a 'ingress.host' value of 'company.k8s.com' this would result in a URL of 'company.k8s.com/bitbucket'. Default value is 'bitbucket.service.contextPath'. | -| ingress.proxyConnectTimeout | int | `60` | Defines a timeout for establishing a connection with a proxied server. It should be noted that this timeout cannot usually exceed 75 seconds. | -| ingress.proxyReadTimeout | int | `60` | Defines a timeout for reading a response from the proxied server. The timeout is set only between two successive read operations, not for the transmission of the whole response. If the proxied server does not transmit anything within this time, the connection is closed. | -| ingress.proxySendTimeout | int | `60` | Sets a timeout for transmitting a request to the proxied server. The timeout is set only between two successive write operations, not for the transmission of the whole request. If the proxied server does not receive anything within this time, the connection is closed. | -| ingress.tlsSecretName | string | `nil` | The name of the K8s Secret that contains the TLS private key and corresponding certificate. When utilised, TLS termination occurs at the ingress point where traffic to the Service, and it's Pods is in plaintext. Usage is optional and depends on your use case. The Ingress Controller itself can also be configured with a TLS secret for all Ingress Resources. https://kubernetes.io/docs/concepts/configuration/secret/#tls-secrets https://kubernetes.io/docs/concepts/services-networking/ingress/#tls | -| monitoring.exposeJmxMetrics | bool | `false` | Expose JMX metrics with jmx_exporter https://github.com/prometheus/jmx_exporter | -| monitoring.fetchJmxExporterJar | bool | `true` | Fetch jmx_exporter jar from the image. If set to false make sure to manually copy the jar to shared home and provide an absolute path in jmxExporterCustomJarLocation | -| monitoring.grafana.createDashboards | bool | `false` | Create ConfigMaps with Grafana dashboards | -| monitoring.grafana.dashboardAnnotations | object | `{}` | Annotations added to Grafana dashboards ConfigMaps. See: https://github.com/kiwigrid/k8s-sidecar#usage | -| monitoring.grafana.dashboardLabels | object | `{}` | Label selector for Grafana dashboard importer sidecar | -| monitoring.jmxExporterCustomConfig | object | `{}` | Custom JMX config with the rules | -| monitoring.jmxExporterCustomJarLocation | string | `nil` | Location of jmx_exporter jar file if mounted from a secret or manually copied to shared home | -| monitoring.jmxExporterImageRepo | string | `"bitnami/jmx-exporter"` | Image repository with jmx_exporter jar | -| monitoring.jmxExporterImageTag | string | `"0.18.0"` | Image tag to be used to pull jmxExporterImageRepo | -| monitoring.jmxExporterInitContainer | object | `{"customSecurityContext":{},"resources":{},"runAsRoot":true}` | JMX exporter init container configuration | -| monitoring.jmxExporterInitContainer.customSecurityContext | object | `{}` | Custom SecurityContext for the jmx exporter init container | -| monitoring.jmxExporterInitContainer.resources | object | `{}` | Resources requests and limits for the JMX exporter init container See: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ | -| monitoring.jmxExporterInitContainer.runAsRoot | bool | `true` | Whether to run JMX exporter init container as root to copy JMX exporter binary to shared home volume. Set to false if running containers as root is not allowed in the cluster. | -| monitoring.jmxExporterPort | int | `9999` | Port number on which metrics will be available | -| monitoring.jmxExporterPortType | string | `"ClusterIP"` | JMX exporter port type | -| monitoring.jmxServiceAnnotations | object | `{}` | Annotations added to the jmx service | -| monitoring.serviceMonitor.create | bool | `false` | Create ServiceMonitor to start scraping metrics. ServiceMonitor CRD needs to be created in advance. | -| monitoring.serviceMonitor.prometheusLabelSelector | object | `{}` | ServiceMonitorSelector of the prometheus instance. | -| monitoring.serviceMonitor.scrapeIntervalSeconds | int | `30` | Scrape interval for the JMX service. | -| nodeSelector | object | `{}` | Standard K8s node-selectors that will be applied to all Bitbucket pods | -| podAnnotations | object | `{}` | Custom annotations that will be applied to all Bitbucket pods | -| podDisruptionBudget | object | `{"annotations":{},"enabled":false,"labels":{},"maxUnavailable":null,"minAvailable":null}` | PodDisruptionBudget: https://kubernetes.io/docs/tasks/run-application/configure-pdb/ You can specify only one of maxUnavailable and minAvailable in a single PodDisruptionBudget. When both minAvailable and maxUnavailable are set, maxUnavailable takes precedence. | -| podLabels | object | `{}` | Custom labels that will be applied to all Bitbucket pods | -| priorityClassName | string | `nil` | Priority class for the application pods. The PriorityClass with this name needs to be available in the cluster. For details see https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/#priorityclass | -| replicaCount | int | `1` | The initial number of Bitbucket pods that should be started at deployment time. Note that if Bitbucket is fully configured (see above) during initial deployment a 'replicaCount' greater than 1 can be supplied. | -| schedulerName | string | `nil` | Standard K8s schedulerName that will be applied to all Bitbucket pods. Check Kubernetes documentation on how to configure multiple schedulers: https://kubernetes.io/docs/tasks/extend-kubernetes/configure-multiple-schedulers/#specify-schedulers-for-pods | -| serviceAccount.annotations | object | `{}` | Annotations to add to the ServiceAccount (if created) | -| serviceAccount.clusterRole.create | bool | `false` | Set to 'true' if a ClusterRole should be created, or 'false' if it already exists. | -| serviceAccount.clusterRole.name | string | `nil` | The name of the ClusterRole to be used. If not specified, but the "serviceAccount.clusterRole.create" flag is set to 'true', then the ClusterRole name will be auto-generated. | -| serviceAccount.clusterRoleBinding.create | bool | `false` | Set to 'true' if a ClusterRoleBinding should be created, or 'false' if it already exists. | -| serviceAccount.clusterRoleBinding.name | string | `nil` | The name of the ClusterRoleBinding to be created. If not specified, but the "serviceAccount.clusterRoleBinding.create" flag is set to 'true', then the ClusterRoleBinding name will be auto-generated. | -| serviceAccount.create | bool | `true` | Set to 'true' if a ServiceAccount should be created, or 'false' if it already exists. | -| serviceAccount.imagePullSecrets | list | `[]` | For Docker images hosted in private registries, define the list of image pull secrets that should be utilized by the created ServiceAccount https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod | -| serviceAccount.name | string | `nil` | The name of the ServiceAccount to be used by the pods. If not specified, but the "serviceAccount.create" flag is set to 'true', then the ServiceAccount name will be auto-generated, otherwise the 'default' ServiceAccount will be used. https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/#use-the-default-service-account-to-access-the-api-server | -| serviceAccount.role.create | bool | `true` | Create a role for Hazelcast client with privileges to get and list pods and endpoints in the namespace. Set to false if you need to create a Role and RoleBinding manually | -| serviceAccount.roleBinding | object | `{"create":true}` | Grant permissions defined in Role (list and get pods and endpoints) to a service account. | -| tolerations | list | `[]` | Standard K8s tolerations that will be applied to all Bitbucket pods | -| volumes.additional | list | `[]` | Defines additional volumes that should be applied to all Bitbucket pods. Note that this will not create any corresponding volume mounts; those need to be defined in bitbucket.additionalVolumeMounts | -| volumes.localHome.customVolume | object | `{}` | Static provisioning of local-home using K8s PVs and PVCs NOTE: Due to the ephemeral nature of pods this approach to provisioning volumes for pods is not recommended. Dynamic provisioning described above is the prescribed approach. When 'persistentVolumeClaim.create' is 'false', then this value can be used to define a standard K8s volume that will be used for the local-home volume(s). If not defined, then an 'emptyDir' volume is utilised. Having provisioned a 'PersistentVolume', specify the bound 'persistentVolumeClaim.claimName' for the 'customVolume' object. https://kubernetes.io/docs/concepts/storage/persistent-volumes/#static | -| volumes.localHome.mountPath | string | `"/var/atlassian/application-data/bitbucket"` | Specifies the path in the Bitbucket container to which the local-home volume will be mounted. | -| volumes.localHome.persistentVolumeClaim.create | bool | `false` | If 'true', then a 'PersistentVolume' and 'PersistentVolumeClaim' will be dynamically created for each pod based on the 'StorageClassName' supplied below. | -| volumes.localHome.persistentVolumeClaim.resources | object | `{"requests":{"storage":"1Gi"}}` | Specifies the standard K8s resource requests and/or limits for the local-home volume claims. | -| volumes.localHome.persistentVolumeClaim.storageClassName | string | `nil` | Specify the name of the 'StorageClass' that should be used for the local-home volume claim. | -| volumes.sharedHome.customVolume | object | `{}` | Static provisioning of shared-home using K8s PVs and PVCs When 'persistentVolume.create' and 'persistentVolumeClaim.create' are 'false', then this property can be used to define a custom volume that will be used for shared-home If not defined, then an 'emptyDir' volume is utilised. Having manually provisioned a 'PersistentVolume' with corresponding 'PersistentVolumeClaim' specify the bound claim name below https://kubernetes.io/docs/concepts/storage/persistent-volumes/#static https://atlassian.github.io/data-center-helm-charts/examples/storage/aws/SHARED_STORAGE/ | -| volumes.sharedHome.mountPath | string | `"/var/atlassian/application-data/shared-home"` | Specifies the path in the Bitbucket container to which the shared-home volume will be mounted. | -| volumes.sharedHome.nfsPermissionFixer.command | string | `nil` | By default, the fixer will change the group ownership of the volume's root directory to match the Bitbucket container's GID (2003), and then ensures the directory is group-writeable. If this is not the desired behaviour, command used can be specified here. | -| volumes.sharedHome.nfsPermissionFixer.enabled | bool | `true` | If 'true', this will alter the shared-home volume's root directory so that Bitbucket can write to it. This is a workaround for a K8s bug affecting NFS volumes: https://github.com/kubernetes/examples/issues/260 | -| volumes.sharedHome.nfsPermissionFixer.imageRepo | string | `"alpine"` | Image repository for the permission fixer init container. Defaults to alpine | -| volumes.sharedHome.nfsPermissionFixer.imageTag | string | `"latest"` | Image tag for the permission fixer init container. Defaults to latest | -| volumes.sharedHome.nfsPermissionFixer.mountPath | string | `"/shared-home"` | The path in the K8s initContainer where the shared-home volume will be mounted | -| volumes.sharedHome.nfsPermissionFixer.resources | object | `{}` | Resources requests and limits for nfsPermissionFixer init container See: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ | -| volumes.sharedHome.persistentVolume.create | bool | `false` | If 'true' then a 'PersistentVolume' will be created for the NFS server | -| volumes.sharedHome.persistentVolume.mountOptions | list | `[]` | Additional options to be used when mounting the NFS volume | -| volumes.sharedHome.persistentVolume.nfs.path | string | `""` | Specifies NFS directory share. This will be mounted into the Pod(s) using the 'volumes.sharedHome.mountPath' | -| volumes.sharedHome.persistentVolume.nfs.server | string | `""` | The address of the NFS server. It needs to be resolvable by the kubelet, so consider using an IP address. | -| volumes.sharedHome.persistentVolumeClaim.accessMode | string | `"ReadWriteMany"` | Specifies the access mode of the volume to claim | -| volumes.sharedHome.persistentVolumeClaim.create | bool | `false` | If 'true', then a 'PersistentVolumeClaim' will be created for the 'PersistentVolume' | -| volumes.sharedHome.persistentVolumeClaim.resources | object | `{"requests":{"storage":"1Gi"}}` | Specifies the standard K8s resource requests and/or limits for the shared-home | -| volumes.sharedHome.persistentVolumeClaim.storageClassName | string | `nil` | Specify the name of the 'StorageClass' that should be used If set to non-empty string value, this will specify the storage class to be used. If left without value, the default Storage Class will be utilised. Alternatively, can be set to the empty string "", to indicate that no Storage Class should be used here. | -| volumes.sharedHome.persistentVolumeClaim.volumeName | string | `nil` | If persistentVolume.create and persistentVolumeClaim.create are both true then any value supplied here is ignored and the default used. A custom value here is useful when bringing your own 'PersistentVolume' i.e. 'persistentVolume.create' is false. | -| volumes.sharedHome.subPath | string | `nil` | Specifies the sub-directory of the shared-home volume that will be mounted in to the Bitbucket container. | \ No newline at end of file +| bitbucket.securityContext.fsGroup | int | `2003` | The GID used by the Bitbucket docker image GID will default to 2003 if not supplied and securityContextEnabled is set to true. This is intended to ensure that the shared-home volume is group-writeable by the GID used by the Bitbucket container. However, this doesn't appear to work for NFS volumes due to a K8s bug: https://github.com/kubernetes/examples/issues/260 | +| bitbucket.securityContextEnabled | bool | `true` | Whether to apply security context to pod. | +| bitbucket.service.annotations | object | `{}` | Additional annotations to apply to the Service | +| bitbucket.service.contextPath | string | `nil` | The context path that Bitbucket will use. | +| bitbucket.service.loadBalancerIP | string | `nil` | Use specific loadBalancerIP. Only applies to service type LoadBalancer. | +| bitbucket.service.port | int | `80` | The port on which the Bitbucket K8s HTTP Service will listen | +| bitbucket.service.sessionAffinity | string | `"None"` | Session affinity type. If you want to make sure that connections from a particular client are passed to the same pod each time, set sessionAffinity to ClientIP. See: https://kubernetes.io/docs/reference/networking/virtual-ips/#session-affinity | +| bitbucket.service.sessionAffinityConfig | object | `{"clientIP":{"timeoutSeconds":null}}` | Session affinity configuration | +| bitbucket.service.sessionAffinityConfig.clientIP.timeoutSeconds | string | `nil` | Specifies the seconds of ClientIP type session sticky time. The value must be > 0 && <= 86400 (for 1 day) if ServiceAffinity == "ClientIP". Default value is 10800 (for 3 hours). | +| bitbucket.service.sshPort | int | `7999` | The port on which the Bitbucket K8s SSH Service will listen | +| bitbucket.service.type | string | `"ClusterIP"` | The type of K8s service to use for Bitbucket | +| bitbucket.setPermissions | bool | `true` | Boolean to define whether to set local home directory permissions on startup of Bitbucket container. Set to 'false' to disable this behaviour. | +| bitbucket.shutdown.command | string | `"/shutdown-wait.sh"` | By default pods will be stopped via a [preStop hook](https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/), using a script supplied by the Docker image. If any other shutdown behaviour is needed it can be achieved by overriding this value. Note that the shutdown command needs to wait for the application shutdown completely before exiting; see [the default command](https://bitbucket.org/atlassian-docker/docker-atlassian-bitbucket-server/src/master/shutdown-wait.sh) for details. | +| bitbucket.shutdown.terminationGracePeriodSeconds | int | `35` | The termination grace period for pods during shutdown. This should be set to the Bitbucket internal grace period (default 30 seconds), plus a small buffer to allow the JVM to fully terminate. | +| bitbucket.sshService | object | `{"annotations":{},"enabled":false,"host":null,"loadBalancerIP":null,"port":22,"type":"LoadBalancer"}` | Enable or disable an additional service for exposing SSH for external access. Disable when the SSH service is exposed through the ingress controller, or enable if the ingress controller does not support TCP. | +| bitbucket.sshService.annotations | object | `{}` | Annotations for the SSH service. Useful if a load balancer controller needs extra annotations. | +| bitbucket.sshService.enabled | bool | `false` | Set to 'true' if an additional SSH Service should be created | +| bitbucket.sshService.host | string | `nil` | The hostname of the SSH service. If set, it'll be used to configure the SSH base URL for the application. | +| bitbucket.sshService.loadBalancerIP | string | `nil` | Use specific loadBalancerIP. Only applies to service type LoadBalancer. | +| bitbucket.sshService.port | int | `22` | Port to expose the SSH service on. | +| bitbucket.sshService.type | string | `"LoadBalancer"` | SSH Service type | +| bitbucket.startupProbe.enabled | bool | `false` | Whether to apply the startupProbe check to pod. | +| bitbucket.startupProbe.failureThreshold | int | `120` | The number of consecutive failures of the Bitbucket container startup probe before the pod fails startup checks. | +| bitbucket.startupProbe.initialDelaySeconds | int | `60` | Time to wait before starting the first probe | +| bitbucket.startupProbe.periodSeconds | int | `5` | How often (in seconds) the Bitbucket container startup probe will run | +| bitbucket.sysadminCredentials.displayNameSecretKey | string | `"displayName"` | The key in the Kubernetes Secret that contains the sysadmin display name | +| bitbucket.sysadminCredentials.emailAddressSecretKey | string | `"emailAddress"` | The key in the Kubernetes Secret that contains the sysadmin email address | +| bitbucket.sysadminCredentials.passwordSecretKey | string | `"password"` | The key in the Kubernetes Secret that contains the sysadmin password | +| bitbucket.sysadminCredentials.secretName | string | `nil` | The name of the Kubernetes Secret that contains the Bitbucket sysadmin credentials If specified, then these will be automatically populated during Bitbucket setup. Otherwise, they will need to be provided via the browser after initial startup. | +| bitbucket.sysadminCredentials.usernameSecretKey | string | `"username"` | The key in the Kubernetes Secret that contains the sysadmin username | +| bitbucket.topologySpreadConstraints | list | `[]` | Defines topology spread constraints for Bitbucket pods. See details: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ | +| bitbucket.useHelmReleaseNameAsContainerName | bool | `false` | Whether the main container should acquire helm release name. By default the container name is `bitbucket` which corresponds to the name of the Helm Chart. | +| database.credentials.passwordSecretKey | string | `"password"` | The key ('password') in the Secret used to store the database login password | +| database.credentials.secretName | string | `nil` | from-literal=password=' https://kubernetes.io/docs/concepts/configuration/secret/#opaque-secrets | +| database.credentials.usernameSecretKey | string | `"username"` | The key ('username') in the Secret used to store the database login username | +| database.driver | string | `nil` | The Java class name of the JDBC driver to be used. If not specified, then it will need to be provided via the browser during manual configuration post deployment. Valid drivers are: - 'org.postgresql.Driver' - 'com.mysql.jdbc.Driver' - 'oracle.jdbc.OracleDriver' - 'com.microsoft.sqlserver.jdbc.SQLServerDriver' https://atlassian.github.io/data-center-helm-charts/userguide/CONFIGURATION/#databasedriver: | +| database.url | string | `nil` | The jdbc URL of the database. If not specified, then it will need to be provided via the browser during manual configuration post deployment. Example URLs include: - 'jdbc:postgresql://:5432/' - 'jdbc:mysql:///' - 'jdbc:sqlserver://:1433;databaseName=' - 'jdbc:oracle:thin:@:1521:' https://atlassian.github.io/data-center-helm-charts/userguide/CONFIGURATION/#databaseurl | +| fluentd.command | string | `nil` | The command used to start Fluentd. If not supplied the default command will be used: "fluentd -c /fluentd/etc/fluent.conf -v" Note: The custom command can be free-form, however pay particular attention to the process that should ultimately be left running in the container. This process should be invoked with 'exec' so that signals are appropriately propagated to it, for instance SIGTERM. An example of how such a command may look is: " && && exec " | +| fluentd.customConfigFile | bool | `false` | Set to 'true' if a custom config (see 'configmap-fluentd.yaml' for default) should be used for Fluentd. If enabled this config must be supplied via the 'fluentdCustomConfig' property below. | +| fluentd.elasticsearch.enabled | bool | `true` | Set to 'true' if Fluentd should send all log events to an Elasticsearch service. | +| fluentd.elasticsearch.hostname | string | `"elasticsearch"` | The hostname of the Elasticsearch service that Fluentd should send logs to. | +| fluentd.enabled | bool | `false` | Set to 'true' if the Fluentd sidecar (DaemonSet) should be added to each pod | +| fluentd.extraVolumes | list | `[]` | Specify custom volumes to be added to Fluentd container (e.g. more log sources) | +| fluentd.fluentdCustomConfig | object | `{}` | Custom fluent.conf file | +| fluentd.imageRepo | string | `"fluent/fluentd-kubernetes-daemonset"` | The Fluentd sidecar image repository | +| fluentd.imageTag | string | `"v1.11.5-debian-elasticsearch7-1.2"` | The Fluentd sidecar image tag | +| fluentd.resources | object | `{}` | Resources requests and limits for fluentd sidecar container See: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ | +| image | object | `{"pullPolicy":"IfNotPresent","repository":"atlassian/bitbucket","tag":""}` | Image configuration | +| image.pullPolicy | string | `"IfNotPresent"` | Image pull policy | +| image.repository | string | `"atlassian/bitbucket"` | The Bitbucket Docker image to use https://hub.docker.com/r/atlassian/bitbucket-server | +| image.tag | string | `""` | The docker image tag to be used - defaults to the Chart appVersion | +| ingress.annotations | object | `{}` | The custom annotations that should be applied to the Ingress Resource. If using an ingress-nginx controller be sure that the annotations you add here are compatible with those already defined in the 'ingess.yaml' template | +| ingress.className | string | `"nginx"` | The class name used by the ingress controller if it's being used. Please follow documentation of your ingress controller. If the cluster contains multiple ingress controllers, this setting allows you to control which of them is used for Atlassian application traffic. | +| ingress.create | bool | `false` | Set to 'true' if an Ingress Resource should be created. This depends on a pre-provisioned Ingress Controller being available. | +| ingress.host | string | `nil` | The fully-qualified hostname (FQDN) of the Ingress Resource. Traffic coming in on this hostname will be routed by the Ingress Resource to the appropriate backend Service. | +| ingress.https | bool | `true` | Set to 'true' if browser communication with the application should be TLS (HTTPS) enforced. | +| ingress.maxBodySize | string | `"250m"` | The max body size to allow. Requests exceeding this size will result in an HTTP 413 error being returned to the client. | +| ingress.nginx | bool | `true` | Set to 'true' if the Ingress Resource is to use the K8s 'ingress-nginx' controller. https://kubernetes.github.io/ingress-nginx/ This will populate the Ingress Resource with annotations that are specific to the K8s ingress-nginx controller. Set to 'false' if a different controller is to be used, in which case the appropriate annotations for that controller must be specified below under 'ingress.annotations'. | +| ingress.path | string | `nil` | The base path for the Ingress Resource. For example '/bitbucket'. Based on a 'ingress.host' value of 'company.k8s.com' this would result in a URL of 'company.k8s.com/bitbucket'. Default value is 'bitbucket.service.contextPath'. | +| ingress.proxyConnectTimeout | int | `60` | Defines a timeout for establishing a connection with a proxied server. It should be noted that this timeout cannot usually exceed 75 seconds. | +| ingress.proxyReadTimeout | int | `60` | Defines a timeout for reading a response from the proxied server. The timeout is set only between two successive read operations, not for the transmission of the whole response. If the proxied server does not transmit anything within this time, the connection is closed. | +| ingress.proxySendTimeout | int | `60` | Sets a timeout for transmitting a request to the proxied server. The timeout is set only between two successive write operations, not for the transmission of the whole request. If the proxied server does not receive anything within this time, the connection is closed. | +| ingress.tlsSecretName | string | `nil` | The name of the K8s Secret that contains the TLS private key and corresponding certificate. When utilised, TLS termination occurs at the ingress point where traffic to the Service, and it's Pods is in plaintext. Usage is optional and depends on your use case. The Ingress Controller itself can also be configured with a TLS secret for all Ingress Resources. https://kubernetes.io/docs/concepts/configuration/secret/#tls-secrets https://kubernetes.io/docs/concepts/services-networking/ingress/#tls | +| monitoring.exposeJmxMetrics | bool | `false` | Expose JMX metrics with jmx_exporter https://github.com/prometheus/jmx_exporter | +| monitoring.fetchJmxExporterJar | bool | `true` | Fetch jmx_exporter jar from the image. If set to false make sure to manually copy the jar to shared home and provide an absolute path in jmxExporterCustomJarLocation | +| monitoring.grafana.createDashboards | bool | `false` | Create ConfigMaps with Grafana dashboards | +| monitoring.grafana.dashboardAnnotations | object | `{}` | Annotations added to Grafana dashboards ConfigMaps. See: https://github.com/kiwigrid/k8s-sidecar#usage | +| monitoring.grafana.dashboardLabels | object | `{}` | Label selector for Grafana dashboard importer sidecar | +| monitoring.jmxExporterCustomConfig | object | `{}` | Custom JMX config with the rules | +| monitoring.jmxExporterCustomJarLocation | string | `nil` | Location of jmx_exporter jar file if mounted from a secret or manually copied to shared home | +| monitoring.jmxExporterImageRepo | string | `"bitnami/jmx-exporter"` | Image repository with jmx_exporter jar | +| monitoring.jmxExporterImageTag | string | `"0.18.0"` | Image tag to be used to pull jmxExporterImageRepo | +| monitoring.jmxExporterInitContainer | object | `{"customSecurityContext":{},"resources":{},"runAsRoot":true}` | JMX exporter init container configuration | +| monitoring.jmxExporterInitContainer.customSecurityContext | object | `{}` | Custom SecurityContext for the jmx exporter init container | +| monitoring.jmxExporterInitContainer.resources | object | `{}` | Resources requests and limits for the JMX exporter init container See: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ | +| monitoring.jmxExporterInitContainer.runAsRoot | bool | `true` | Whether to run JMX exporter init container as root to copy JMX exporter binary to shared home volume. Set to false if running containers as root is not allowed in the cluster. | +| monitoring.jmxExporterPort | int | `9999` | Port number on which metrics will be available | +| monitoring.jmxExporterPortType | string | `"ClusterIP"` | JMX exporter port type | +| monitoring.jmxServiceAnnotations | object | `{}` | Annotations added to the jmx service | +| monitoring.serviceMonitor.create | bool | `false` | Create ServiceMonitor to start scraping metrics. ServiceMonitor CRD needs to be created in advance. | +| monitoring.serviceMonitor.prometheusLabelSelector | object | `{}` | ServiceMonitorSelector of the prometheus instance. | +| monitoring.serviceMonitor.scrapeIntervalSeconds | int | `30` | Scrape interval for the JMX service. | +| nodeSelector | object | `{}` | Standard K8s node-selectors that will be applied to all Bitbucket pods | +| podAnnotations | object | `{}` | Custom annotations that will be applied to all Bitbucket pods | +| podDisruptionBudget | object | `{"annotations":{},"enabled":false,"labels":{},"maxUnavailable":null,"minAvailable":null}` | PodDisruptionBudget: https://kubernetes.io/docs/tasks/run-application/configure-pdb/ You can specify only one of maxUnavailable and minAvailable in a single PodDisruptionBudget. When both minAvailable and maxUnavailable are set, maxUnavailable takes precedence. | +| podLabels | object | `{}` | Custom labels that will be applied to all Bitbucket pods | +| priorityClassName | string | `nil` | Priority class for the application pods. The PriorityClass with this name needs to be available in the cluster. For details see https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/#priorityclass | +| replicaCount | int | `1` | The initial number of Bitbucket pods that should be started at deployment time. Note that if Bitbucket is fully configured (see above) during initial deployment a 'replicaCount' greater than 1 can be supplied. | +| schedulerName | string | `nil` | Standard K8s schedulerName that will be applied to all Bitbucket pods. Check Kubernetes documentation on how to configure multiple schedulers: https://kubernetes.io/docs/tasks/extend-kubernetes/configure-multiple-schedulers/#specify-schedulers-for-pods | +| serviceAccount.annotations | object | `{}` | Annotations to add to the ServiceAccount (if created) | +| serviceAccount.clusterRole.create | bool | `false` | Set to 'true' if a ClusterRole should be created, or 'false' if it already exists. | +| serviceAccount.clusterRole.name | string | `nil` | The name of the ClusterRole to be used. If not specified, but the "serviceAccount.clusterRole.create" flag is set to 'true', then the ClusterRole name will be auto-generated. | +| serviceAccount.clusterRoleBinding.create | bool | `false` | Set to 'true' if a ClusterRoleBinding should be created, or 'false' if it already exists. | +| serviceAccount.clusterRoleBinding.name | string | `nil` | The name of the ClusterRoleBinding to be created. If not specified, but the "serviceAccount.clusterRoleBinding.create" flag is set to 'true', then the ClusterRoleBinding name will be auto-generated. | +| serviceAccount.create | bool | `true` | Set to 'true' if a ServiceAccount should be created, or 'false' if it already exists. | +| serviceAccount.imagePullSecrets | list | `[]` | For Docker images hosted in private registries, define the list of image pull secrets that should be utilized by the created ServiceAccount https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod | +| serviceAccount.name | string | `nil` | The name of the ServiceAccount to be used by the pods. If not specified, but the "serviceAccount.create" flag is set to 'true', then the ServiceAccount name will be auto-generated, otherwise the 'default' ServiceAccount will be used. https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/#use-the-default-service-account-to-access-the-api-server | +| serviceAccount.role.create | bool | `true` | Create a role for Hazelcast client with privileges to get and list pods and endpoints in the namespace. Set to false if you need to create a Role and RoleBinding manually | +| serviceAccount.roleBinding | object | `{"create":true}` | Grant permissions defined in Role (list and get pods and endpoints) to a service account. | +| testPods | object | `{"affinity":{},"annotations":{},"image":{"permissionsTestContainer":"debian:stable-slim","statusTestContainer":"alpine:latest"},"labels":{},"nodeSelector":{},"schedulerName":null,"tolerations":[]}` | Metadata and pod spec for pods started in Helm tests | +| tolerations | list | `[]` | Standard K8s tolerations that will be applied to all Bitbucket pods | +| volumes.additional | list | `[]` | Defines additional volumes that should be applied to all Bitbucket pods. Note that this will not create any corresponding volume mounts; those need to be defined in bitbucket.additionalVolumeMounts | +| volumes.localHome.customVolume | object | `{}` | Static provisioning of local-home using K8s PVs and PVCs NOTE: Due to the ephemeral nature of pods this approach to provisioning volumes for pods is not recommended. Dynamic provisioning described above is the prescribed approach. When 'persistentVolumeClaim.create' is 'false', then this value can be used to define a standard K8s volume that will be used for the local-home volume(s). If not defined, then an 'emptyDir' volume is utilised. Having provisioned a 'PersistentVolume', specify the bound 'persistentVolumeClaim.claimName' for the 'customVolume' object. https://kubernetes.io/docs/concepts/storage/persistent-volumes/#static | +| volumes.localHome.mountPath | string | `"/var/atlassian/application-data/bitbucket"` | Specifies the path in the Bitbucket container to which the local-home volume will be mounted. | +| volumes.localHome.persistentVolumeClaim.create | bool | `false` | If 'true', then a 'PersistentVolume' and 'PersistentVolumeClaim' will be dynamically created for each pod based on the 'StorageClassName' supplied below. | +| volumes.localHome.persistentVolumeClaim.resources | object | `{"requests":{"storage":"1Gi"}}` | Specifies the standard K8s resource requests and/or limits for the local-home volume claims. | +| volumes.localHome.persistentVolumeClaim.storageClassName | string | `nil` | Specify the name of the 'StorageClass' that should be used for the local-home volume claim. | +| volumes.sharedHome.customVolume | object | `{}` | Static provisioning of shared-home using K8s PVs and PVCs When 'persistentVolume.create' and 'persistentVolumeClaim.create' are 'false', then this property can be used to define a custom volume that will be used for shared-home If not defined, then an 'emptyDir' volume is utilised. Having manually provisioned a 'PersistentVolume' with corresponding 'PersistentVolumeClaim' specify the bound claim name below https://kubernetes.io/docs/concepts/storage/persistent-volumes/#static https://atlassian.github.io/data-center-helm-charts/examples/storage/aws/SHARED_STORAGE/ | +| volumes.sharedHome.mountPath | string | `"/var/atlassian/application-data/shared-home"` | Specifies the path in the Bitbucket container to which the shared-home volume will be mounted. | +| volumes.sharedHome.nfsPermissionFixer.command | string | `nil` | By default, the fixer will change the group ownership of the volume's root directory to match the Bitbucket container's GID (2003), and then ensures the directory is group-writeable. If this is not the desired behaviour, command used can be specified here. | +| volumes.sharedHome.nfsPermissionFixer.enabled | bool | `true` | If 'true', this will alter the shared-home volume's root directory so that Bitbucket can write to it. This is a workaround for a K8s bug affecting NFS volumes: https://github.com/kubernetes/examples/issues/260 | +| volumes.sharedHome.nfsPermissionFixer.imageRepo | string | `"alpine"` | Image repository for the permission fixer init container. Defaults to alpine | +| volumes.sharedHome.nfsPermissionFixer.imageTag | string | `"latest"` | Image tag for the permission fixer init container. Defaults to latest | +| volumes.sharedHome.nfsPermissionFixer.mountPath | string | `"/shared-home"` | The path in the K8s initContainer where the shared-home volume will be mounted | +| volumes.sharedHome.nfsPermissionFixer.resources | object | `{}` | Resources requests and limits for nfsPermissionFixer init container See: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ | +| volumes.sharedHome.persistentVolume.create | bool | `false` | If 'true' then a 'PersistentVolume' will be created for the NFS server | +| volumes.sharedHome.persistentVolume.mountOptions | list | `[]` | Additional options to be used when mounting the NFS volume | +| volumes.sharedHome.persistentVolume.nfs.path | string | `""` | Specifies NFS directory share. This will be mounted into the Pod(s) using the 'volumes.sharedHome.mountPath' | +| volumes.sharedHome.persistentVolume.nfs.server | string | `""` | The address of the NFS server. It needs to be resolvable by the kubelet, so consider using an IP address. | +| volumes.sharedHome.persistentVolumeClaim.accessMode | string | `"ReadWriteMany"` | Specifies the access mode of the volume to claim | +| volumes.sharedHome.persistentVolumeClaim.create | bool | `false` | If 'true', then a 'PersistentVolumeClaim' will be created for the 'PersistentVolume' | +| volumes.sharedHome.persistentVolumeClaim.resources | object | `{"requests":{"storage":"1Gi"}}` | Specifies the standard K8s resource requests and/or limits for the shared-home volume claims. | +| volumes.sharedHome.persistentVolumeClaim.storageClassName | string | `nil` | Specify the name of the 'StorageClass' that should be used If set to non-empty string value, this will specify the storage class to be used. If left without value, the default Storage Class will be utilised. Alternatively, can be set to the empty string "", to indicate that no Storage Class should be used here. | +| volumes.sharedHome.persistentVolumeClaim.volumeName | string | `nil` | If persistentVolume.create and persistentVolumeClaim.create are both true then any value supplied here is ignored and the default used. A custom value here is useful when bringing your own 'PersistentVolume' i.e. 'persistentVolume.create' is false. | +| volumes.sharedHome.subPath | string | `nil` | Specifies the sub-directory of the shared-home volume that will be mounted in to the Bitbucket container. | + +---------------------------------------------- +Autogenerated from chart metadata using [helm-docs v1.11.0](https://github.com/norwoodj/helm-docs/releases/v1.11.0) diff --git a/src/main/charts/bitbucket/templates/tests/test-application-status.yaml b/src/main/charts/bitbucket/templates/tests/test-application-status.yaml index c6993416d..719d44606 100644 --- a/src/main/charts/bitbucket/templates/tests/test-application-status.yaml +++ b/src/main/charts/bitbucket/templates/tests/test-application-status.yaml @@ -5,9 +5,21 @@ metadata: annotations: "helm.sh/hook": test "helm.sh/hook-delete-policy": "before-hook-creation,hook-succeeded" + {{- if not .Values.testPods.annotations }} {{- include "bitbucket.podAnnotations" . | nindent 4 }} + {{- else }} + {{- range $key, $value := .Values.testPods.annotations }} + {{ $key | quote }}: {{ tpl $value $ | quote }} + {{- end }} + {{- end }} labels: + {{- if not .Values.testPods.labels }} {{- include "common.labels.commonLabels" . | nindent 4 }} + {{- else }} + {{- range $key, $value := .Values.testPods.labels }} + {{ $key | quote }}: {{ tpl $value $ | quote }} + {{- end }} + {{- end }} spec: containers: - name: test @@ -24,4 +36,19 @@ spec: STATUS=$(curl -s "$STATUS_URL") echo "Verifying application state is RUNNING or FIRST_RUN: $STATUS" echo $STATUS | jq -e '.state|test("RUNNING|FIRST_RUN")' - restartPolicy: Never \ No newline at end of file + restartPolicy: Never + {{- with .Values.testPods.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.testPods.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.testPods.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- if .Values.testPods.schedulerName }} + schedulerName: {{ .Values.testPods.schedulerName | quote }} + {{- end }} diff --git a/src/main/charts/bitbucket/templates/tests/test-database-connectivity.yaml b/src/main/charts/bitbucket/templates/tests/test-database-connectivity.yaml index 783915662..6161deac5 100644 --- a/src/main/charts/bitbucket/templates/tests/test-database-connectivity.yaml +++ b/src/main/charts/bitbucket/templates/tests/test-database-connectivity.yaml @@ -6,9 +6,21 @@ metadata: annotations: "helm.sh/hook": test "helm.sh/hook-delete-policy": "before-hook-creation,hook-succeeded" + {{- if not .Values.testPods.annotations }} {{- include "bitbucket.podAnnotations" . | nindent 4 }} + {{- else }} + {{- range $key, $value := .Values.testPods.annotations }} + {{ $key | quote }}: {{ tpl $value $ | quote }} + {{- end }} + {{- end }} labels: + {{- if not .Values.testPods.labels }} {{- include "common.labels.commonLabels" . | nindent 4 }} + {{- else }} + {{- range $key, $value := .Values.testPods.labels }} + {{ $key | quote }}: {{ tpl $value $ | quote }} + {{- end }} + {{- end }} spec: serviceAccountName: {{ include "bitbucket.serviceAccountName" . }} containers: @@ -53,4 +65,19 @@ spec: cat output.txt grep -q "Connection established OK" output.txt restartPolicy: Never -{{ end }} \ No newline at end of file + {{- with .Values.testPods.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.testPods.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.testPods.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- if .Values.testPods.schedulerName }} + schedulerName: {{ .Values.testPods.schedulerName | quote }} + {{- end }} +{{ end }} diff --git a/src/main/charts/bitbucket/templates/tests/test-shared-home-permissions.yaml b/src/main/charts/bitbucket/templates/tests/test-shared-home-permissions.yaml index a39a9aac7..f6dc10f94 100644 --- a/src/main/charts/bitbucket/templates/tests/test-shared-home-permissions.yaml +++ b/src/main/charts/bitbucket/templates/tests/test-shared-home-permissions.yaml @@ -5,9 +5,21 @@ metadata: annotations: "helm.sh/hook": test "helm.sh/hook-delete-policy": "before-hook-creation,hook-succeeded" + {{- if not .Values.testPods.annotations }} {{- include "bitbucket.podAnnotations" . | nindent 4 }} + {{- else }} + {{- range $key, $value := .Values.testPods.annotations }} + {{ $key | quote }}: {{ tpl $value $ | quote }} + {{- end }} + {{- end }} labels: + {{- if not .Values.testPods.labels }} {{- include "common.labels.commonLabels" . | nindent 4 }} + {{- else }} + {{- range $key, $value := .Values.testPods.labels }} + {{ $key | quote }}: {{ tpl $value $ | quote }} + {{- end }} + {{- end }} spec: containers: - name: test @@ -54,4 +66,19 @@ spec: {{- else }} {{ include "bitbucket.volumes.localHome" . | nindent 4 }} {{- end }} - restartPolicy: Never \ No newline at end of file + restartPolicy: Never + {{- with .Values.testPods.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.testPods.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.testPods.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- if .Values.testPods.schedulerName }} + schedulerName: {{ .Values.testPods.schedulerName | quote }} + {{- end }} diff --git a/src/main/charts/bitbucket/values.yaml b/src/main/charts/bitbucket/values.yaml index 3b49c1c50..4f25f6cf0 100644 --- a/src/main/charts/bitbucket/values.yaml +++ b/src/main/charts/bitbucket/values.yaml @@ -1476,3 +1476,16 @@ atlassianAnalyticsAndSupport: # which can be optionally including to support.zip # enabled: true + +# -- Metadata and pod spec for pods started in Helm tests +# +testPods: + labels: {} + annotations: {} + nodeSelector: {} + tolerations: [] + affinity: {} + schedulerName: + image: + permissionsTestContainer: debian:stable-slim + statusTestContainer: alpine:latest diff --git a/src/main/charts/confluence/README.md b/src/main/charts/confluence/README.md index 42438415c..f2dd733aa 100644 --- a/src/main/charts/confluence/README.md +++ b/src/main/charts/confluence/README.md @@ -198,6 +198,7 @@ Kubernetes: `>=1.21.x-0` | synchrony.setPermissions | bool | `true` | Boolean to define whether to set synchrony home directory permissions on startup of Synchrony container. Set to 'false' to disable this behaviour. | | synchrony.shutdown.terminationGracePeriodSeconds | int | `25` | The termination grace period for pods during shutdown. This should be set to the Synchrony internal grace period (default 20 seconds), plus a small buffer to allow the JVM to fully terminate. | | synchrony.topologySpreadConstraints | list | `[]` | Defines topology spread constraints for Synchrony pods. See details: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ | +| testPods | object | `{"affinity":{},"annotations":{},"image":{"permissionsTestContainer":"debian:stable-slim","statusTestContainer":"alpine:latest"},"labels":{},"nodeSelector":{},"schedulerName":null,"tolerations":[]}` | Metadata and pod spec for pods started in Helm tests | | tolerations | list | `[]` | Standard K8s tolerations that will be applied to all Confluence pods | | volumes.additional | list | `[]` | Defines additional volumes that should be applied to all Confluence pods. Note that this will not create any corresponding volume mounts; those needs to be defined in confluence.additionalVolumeMounts | | volumes.additionalSynchrony | list | `[]` | Defines additional volumes that should be applied to all Synchrony pods. Note that this will not create any corresponding volume mounts; those needs to be defined in synchrony.additionalVolumeMounts | diff --git a/src/main/charts/confluence/templates/tests/test-application-status.yaml b/src/main/charts/confluence/templates/tests/test-application-status.yaml index d91fd3bb2..10ff2eafb 100644 --- a/src/main/charts/confluence/templates/tests/test-application-status.yaml +++ b/src/main/charts/confluence/templates/tests/test-application-status.yaml @@ -5,9 +5,21 @@ metadata: annotations: "helm.sh/hook": test "helm.sh/hook-delete-policy": "before-hook-creation,hook-succeeded" + {{- if not .Values.testPods.annotations }} {{- include "confluence.podAnnotations" . | nindent 4 }} + {{- else }} + {{- range $key, $value := .Values.testPods.annotations }} + {{ $key | quote }}: {{ tpl $value $ | quote }} + {{- end }} + {{- end }} labels: + {{- if not .Values.testPods.labels }} {{- include "common.labels.commonLabels" . | nindent 4 }} + {{- else }} + {{- range $key, $value := .Values.testPods.labels }} + {{ $key | quote }}: {{ tpl $value $ | quote }} + {{- end }} + {{- end }} spec: containers: - name: test @@ -23,4 +35,19 @@ spec: STATUS=$(curl -s "$STATUS_URL") echo "Verifying application state is RUNNING or FIRST_RUN: $STATUS" echo $STATUS | jq -e '.state|test("RUNNING|FIRST_RUN")' - restartPolicy: Never \ No newline at end of file + restartPolicy: Never + {{- with .Values.testPods.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.testPods.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.testPods.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- if .Values.testPods.schedulerName }} + schedulerName: {{ .Values.testPods.schedulerName | quote }} + {{- end }} diff --git a/src/main/charts/confluence/templates/tests/test-database-connectivity.yaml b/src/main/charts/confluence/templates/tests/test-database-connectivity.yaml index ef4089b9f..b05617d7b 100644 --- a/src/main/charts/confluence/templates/tests/test-database-connectivity.yaml +++ b/src/main/charts/confluence/templates/tests/test-database-connectivity.yaml @@ -6,9 +6,21 @@ metadata: annotations: "helm.sh/hook": test "helm.sh/hook-delete-policy": "before-hook-creation,hook-succeeded" + {{- if not .Values.testPods.annotations }} {{- include "confluence.podAnnotations" . | nindent 4 }} + {{- else }} + {{- range $key, $value := .Values.testPods.annotations }} + {{ $key | quote }}: {{ tpl $value $ | quote }} + {{- end }} + {{- end }} labels: + {{- if not .Values.testPods.labels }} {{- include "common.labels.commonLabels" . | nindent 4 }} + {{- else }} + {{- range $key, $value := .Values.testPods.labels }} + {{ $key | quote }}: {{ tpl $value $ | quote }} + {{- end }} + {{- end }} spec: serviceAccountName: {{ include "confluence.serviceAccountName" . }} volumes: @@ -53,4 +65,19 @@ spec: cat output.txt grep -q "Connection established OK" output.txt restartPolicy: Never + {{- with .Values.testPods.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.testPods.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.testPods.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- if .Values.testPods.schedulerName }} + schedulerName: {{ .Values.testPods.schedulerName | quote }} + {{- end }} {{ end }} diff --git a/src/main/charts/confluence/templates/tests/test-shared-home-permissions.yaml b/src/main/charts/confluence/templates/tests/test-shared-home-permissions.yaml index 2bf493a9e..fa1f55007 100644 --- a/src/main/charts/confluence/templates/tests/test-shared-home-permissions.yaml +++ b/src/main/charts/confluence/templates/tests/test-shared-home-permissions.yaml @@ -5,9 +5,21 @@ metadata: annotations: "helm.sh/hook": test "helm.sh/hook-delete-policy": "before-hook-creation,hook-succeeded" + {{- if not .Values.testPods.annotations }} {{- include "confluence.podAnnotations" . | nindent 4 }} + {{- else }} + {{- range $key, $value := .Values.testPods.annotations }} + {{ $key | quote }}: {{ tpl $value $ | quote }} + {{- end }} + {{- end }} labels: + {{- if not .Values.testPods.labels }} {{- include "common.labels.commonLabels" . | nindent 4 }} + {{- else }} + {{- range $key, $value := .Values.testPods.labels }} + {{ $key | quote }}: {{ tpl $value $ | quote }} + {{- end }} + {{- end }} spec: containers: - name: test @@ -44,4 +56,19 @@ spec: rm /shared-home/permissions-test volumes: {{ include "confluence.volumes.sharedHome" . | nindent 4 }} - restartPolicy: Never \ No newline at end of file + restartPolicy: Never + {{- with .Values.testPods.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.testPods.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.testPods.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- if .Values.testPods.schedulerName }} + schedulerName: {{ .Values.testPods.schedulerName | quote }} + {{- end }} diff --git a/src/main/charts/confluence/values.yaml b/src/main/charts/confluence/values.yaml index d75fc8076..23ab8e7a9 100644 --- a/src/main/charts/confluence/values.yaml +++ b/src/main/charts/confluence/values.yaml @@ -1486,3 +1486,16 @@ atlassianAnalyticsAndSupport: # which can be optionally including to support.zip # enabled: true + +# -- Metadata and pod spec for pods started in Helm tests +# +testPods: + labels: {} + annotations: {} + nodeSelector: {} + tolerations: [] + affinity: {} + schedulerName: + image: + permissionsTestContainer: debian:stable-slim + statusTestContainer: alpine:latest diff --git a/src/main/charts/crowd/README.md b/src/main/charts/crowd/README.md index 604ff8373..fee425807 100644 --- a/src/main/charts/crowd/README.md +++ b/src/main/charts/crowd/README.md @@ -4,8 +4,6 @@ A chart for installing Crowd Data Center on Kubernetes -For installation please follow [the documentation](https://atlassian.github.io/data-center-helm-charts/). - **Homepage:** ## Source Code @@ -25,137 +23,141 @@ Kubernetes: `>=1.21.x-0` | Key | Type | Default | Description | |-----|------|---------|-------------| -| additionalConfigMaps | list | `[]` | Create additional ConfigMaps with given names, keys and content. Ther Helm release name will be used as a prefix for a ConfigMap name, fileName is used as subPath | -| additionalContainers | list | `[]` | Additional container definitions that will be added to all Crowd pods | -| additionalFiles | list | `[]` | Additional existing ConfigMaps and Secrets not managed by Helm that should be mounted into service container. Configuration details below (camelCase is important!): 'name' - References existing ConfigMap or secret name. 'type' - 'configMap' or 'secret' 'key' - The file name. 'mountPath' - The destination directory in a container. VolumeMount and Volumes are added with this name and index position, for example; custom-config-0, keystore-2 | +| additionalConfigMaps | list | `[]` | Create additional ConfigMaps with given names, keys and content. Ther Helm release name will be used as a prefix for a ConfigMap name, fileName is used as subPath | +| additionalContainers | list | `[]` | Additional container definitions that will be added to all Crowd pods | +| additionalFiles | list | `[]` | Additional existing ConfigMaps and Secrets not managed by Helm that should be mounted into service container. Configuration details below (camelCase is important!): 'name' - References existing ConfigMap or secret name. 'type' - 'configMap' or 'secret' 'key' - The file name. 'mountPath' - The destination directory in a container. VolumeMount and Volumes are added with this name and index position, for example; custom-config-0, keystore-2 | | additionalHosts | list | `[]` | Additional host aliases for each pod, equivalent to adding them to the /etc/hosts file. https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ | -| additionalInitContainers | list | `[]` | Additional initContainer definitions that will be added to all Crowd pods | -| additionalLabels | object | `{}` | Additional labels that should be applied to all resources | -| affinity | object | `{}` | Standard K8s affinities that will be applied to all Crowd pods | -| atlassianAnalyticsAndSupport.analytics.enabled | bool | `true` | Mount ConfigMap with selected Helm chart values as a JSON which DC products will read and send analytics events to Atlassian data pipelines | -| atlassianAnalyticsAndSupport.helmValues.enabled | bool | `true` | Mount ConfigMap with selected Helm chart values as a YAML file which can be optionally including to support.zip | -| crowd.accessLog.enabled | bool | `true` | Set to 'true' if access logging should be enabled. | -| crowd.accessLog.localHomeSubPath | string | `"logs"` | The subdirectory within the local-home volume where access logs should be stored. | -| crowd.accessLog.mountPath | string | `"/opt/atlassian/crowd/apache-tomcat/logs"` | The path within the Crowd container where the local-home volume should be mounted in order to capture access logs. | -| crowd.additionalBundledPlugins | list | `[]` | Specifies a list of additional Crowd plugins that should be added to the Crowd container. Note plugins installed via this method will appear as bundled plugins rather than user plugins. These should be specified in the same manner as the 'additionalLibraries' property. Additional details: https://atlassian.github.io/data-center-helm-charts/examples/external_libraries/EXTERNAL_LIBS/ NOTE: only .jar files can be loaded using this approach. OBR's can be extracted (unzipped) to access the associated .jar An alternative to this method is to install the plugins via "Manage Apps" in the product system administration UI. | -| crowd.additionalCertificates | object | `{"customCmd":null,"secretName":null}` | Certificates to be added to Java truststore. Provide reference to a secret that contains the certificates | +| additionalInitContainers | list | `[]` | Additional initContainer definitions that will be added to all Crowd pods | +| additionalLabels | object | `{}` | Additional labels that should be applied to all resources | +| affinity | object | `{}` | Standard K8s affinities that will be applied to all Crowd pods | +| atlassianAnalyticsAndSupport.analytics.enabled | bool | `true` | Mount ConfigMap with selected Helm chart values as a JSON which DC products will read and send analytics events to Atlassian data pipelines | +| atlassianAnalyticsAndSupport.helmValues.enabled | bool | `true` | Mount ConfigMap with selected Helm chart values as a YAML file which can be optionally including to support.zip | +| crowd.accessLog.enabled | bool | `true` | Set to 'true' if access logging should be enabled. | +| crowd.accessLog.localHomeSubPath | string | `"logs"` | The subdirectory within the local-home volume where access logs should be stored. | +| crowd.accessLog.mountPath | string | `"/opt/atlassian/crowd/apache-tomcat/logs"` | The path within the Crowd container where the local-home volume should be mounted in order to capture access logs. | +| crowd.additionalBundledPlugins | list | `[]` | Specifies a list of additional Crowd plugins that should be added to the Crowd container. Note plugins installed via this method will appear as bundled plugins rather than user plugins. These should be specified in the same manner as the 'additionalLibraries' property. Additional details: https://atlassian.github.io/data-center-helm-charts/examples/external_libraries/EXTERNAL_LIBS/ NOTE: only .jar files can be loaded using this approach. OBR's can be extracted (unzipped) to access the associated .jar An alternative to this method is to install the plugins via "Manage Apps" in the product system administration UI. | +| crowd.additionalCertificates | object | `{"customCmd":null,"secretName":null}` | Certificates to be added to Java truststore. Provide reference to a secret that contains the certificates | | crowd.additionalEnvironmentVariables | list | `[]` | Defines any additional environment variables to be passed to the Crowd container. See https://hub.docker.com/r/atlassian/crowd for supported variables. | -| crowd.additionalJvmArgs | list | `[]` | Specifies a list of additional arguments that can be passed to the Crowd JVM, e.g. system properties. | -| crowd.additionalLibraries | list | `[]` | Specifies a list of additional Java libraries that should be added to the Crowd container. Each item in the list should specify the name of the volume that contains the library, as well as the name of the library file within that volume's root directory. Optionally, a subDirectory field can be included to specify which directory in the volume contains the library file. Additional details: https://atlassian.github.io/data-center-helm-charts/examples/external_libraries/EXTERNAL_LIBS/ | -| crowd.additionalPorts | list | `[]` | Defines any additional ports for the Crowd container. | -| crowd.additionalVolumeClaimTemplates | list | `[]` | Defines additional volumeClaimTemplates that should be applied to the Crowd pod. Note that this will not create any corresponding volume mounts; those needs to be defined in crowd.additionalVolumeMounts | +| crowd.additionalJvmArgs | list | `[]` | Specifies a list of additional arguments that can be passed to the Crowd JVM, e.g. system properties. | +| crowd.additionalLibraries | list | `[]` | Specifies a list of additional Java libraries that should be added to the Crowd container. Each item in the list should specify the name of the volume that contains the library, as well as the name of the library file within that volume's root directory. Optionally, a subDirectory field can be included to specify which directory in the volume contains the library file. Additional details: https://atlassian.github.io/data-center-helm-charts/examples/external_libraries/EXTERNAL_LIBS/ | +| crowd.additionalPorts | list | `[]` | Defines any additional ports for the Crowd container. | +| crowd.additionalVolumeClaimTemplates | list | `[]` | Defines additional volumeClaimTemplates that should be applied to the Crowd pod. Note that this will not create any corresponding volume mounts; those needs to be defined in crowd.additionalVolumeMounts | | crowd.additionalVolumeMounts | list | `[]` | Defines any additional volumes mounts for the Crowd container. These can refer to existing volumes, or new volumes can be defined in volumes.additional. | -| crowd.containerSecurityContext | object | `{}` | Standard K8s field that holds security configurations that will be applied to a container. https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ | -| crowd.livenessProbe.enabled | bool | `false` | Whether to apply the livenessProbe check to pod. | -| crowd.livenessProbe.failureThreshold | int | `12` | The number of consecutive failures of the Crowd container liveness probe before the pod fails liveness checks. | -| crowd.livenessProbe.initialDelaySeconds | int | `60` | Time to wait before starting the first probe | -| crowd.livenessProbe.periodSeconds | int | `5` | How often (in seconds) the Crowd container liveness probe will run | -| crowd.livenessProbe.timeoutSeconds | int | `1` | Number of seconds after which the probe times out | -| crowd.ports.http | int | `8095` | The port on which the Crowd container listens for HTTP traffic | -| crowd.readinessProbe.customProbe | object | `{}` | Custom readinessProbe to override the default /status httpGet | -| crowd.readinessProbe.enabled | bool | `true` | Whether to apply the readinessProbe check to pod. | -| crowd.readinessProbe.failureThreshold | int | `10` | The number of consecutive failures of the Crowd container readiness probe before the pod fails readiness checks. | -| crowd.readinessProbe.initialDelaySeconds | int | `10` | The initial delay (in seconds) for the Crowd container readiness probe, after which the probe will start running. | -| crowd.readinessProbe.periodSeconds | int | `5` | How often (in seconds) the Crowd container readiness probe will run | -| crowd.readinessProbe.timeoutSeconds | int | `1` | Number of seconds after which the probe times out | -| crowd.resources.container.requests.cpu | string | `"2"` | Initial CPU request by Crowd pod | -| crowd.resources.container.requests.memory | string | `"1G"` | Initial Memory request by Crowd pod | -| crowd.resources.jvm.maxHeap | string | `"768m"` | The maximum amount of heap memory that will be used by the Crowd JVM | -| crowd.resources.jvm.minHeap | string | `"384m"` | The minimum amount of heap memory that will be used by the Crowd JVM | -| crowd.securityContext.fsGroup | int | `2004` | The GID used by the Crowd docker image GID will default to 2004 if not supplied and securityContextEnabled is set to true. This is intended to ensure that the shared-home volume is group-writeable by the GID used by the Crowd container. However, this doesn't appear to work for NFS volumes due to a K8s bug: https://github.com/kubernetes/examples/issues/260 | -| crowd.securityContextEnabled | bool | `true` | | -| crowd.service.annotations | object | `{}` | Additional annotations to apply to the Service | -| crowd.service.loadBalancerIP | string | `nil` | Use specific loadBalancerIP. Only applies to service type LoadBalancer. | -| crowd.service.port | int | `80` | The port on which the Crowd K8s Service will listen | -| crowd.service.sessionAffinity | string | `"None"` | Session affinity type. If you want to make sure that connections from a particular client are passed to the same pod each time, set sessionAffinity to ClientIP. See: https://kubernetes.io/docs/reference/networking/virtual-ips/#session-affinity | -| crowd.service.sessionAffinityConfig | object | `{"clientIP":{"timeoutSeconds":null}}` | Session affinity configuration | -| crowd.service.sessionAffinityConfig.clientIP.timeoutSeconds | string | `nil` | Specifies the seconds of ClientIP type session sticky time. The value must be > 0 && <= 86400(for 1 day) if ServiceAffinity == "ClientIP". Default value is 10800 (for 3 hours). | -| crowd.service.type | string | `"ClusterIP"` | The type of K8s service to use for Crowd. For loadBalancer type, deselect the consistent client IP address in Crowd Session configuration. Read more: https://atlassian.github.io/data-center-helm-charts/troubleshooting/LIMITATIONS/#loadbalancer-service-type | -| crowd.setPermissions | bool | `true` | Boolean to define whether to set local home directory permissions on startup of Crowd container. Set to 'false' to disable this behaviour. | -| crowd.shutdown.command | string | `"/shutdown-wait.sh"` | By default pods will be stopped via a [preStop hook](https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/), using a script supplied by the Docker image. If any other shutdown behaviour is needed it can be achieved by overriding this value. Note that the shutdown command needs to wait for the application shutdown completely before exiting; see [the default command](https://bitbucket.org/atlassian-docker/docker-atlassian-crowd/src/master/shutdown-wait.sh) for details. | -| crowd.shutdown.terminationGracePeriodSeconds | int | `30` | The termination grace period for pods during shutdown. This should be set to the internal grace period, plus a small buffer to allow the JVM to fully terminate. | -| crowd.startupProbe.enabled | bool | `false` | Whether to apply the startupProbe check to pod. | -| crowd.startupProbe.failureThreshold | int | `120` | The number of consecutive failures of the Crowd container startup probe before the pod fails startup checks. | -| crowd.startupProbe.initialDelaySeconds | int | `60` | Time to wait before starting the first probe | -| crowd.startupProbe.periodSeconds | int | `5` | How often (in seconds) the Crowd container startup probe will run | -| crowd.topologySpreadConstraints | list | `[]` | Defines topology spread constraints for Crowd pods. See details: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ | -| crowd.umask | string | `"0022"` | The umask used by the Crowd process when it creates new files. The default is 0022. This gives the new files: - read/write permissions for the Crowd user - read permissions for everyone else. | -| crowd.useHelmReleaseNameAsContainerName | bool | `false` | Whether the main container should acquire helm release name. By default the container name is `crowd` which corresponds to the name of the Helm Chart. | -| fluentd.command | string | `nil` | The command used to start Fluentd. If not supplied the default command will be used: "fluentd -c /fluentd/etc/fluent.conf -v" Note: The custom command can be free-form, however pay particular attention to the process that should ultimately be left running in the container. This process should be invoked with 'exec' so that signals are appropriately propagated to it, for instance SIGTERM. An example of how such a command may look is: " && && exec " | -| fluentd.customConfigFile | bool | `false` | Set to 'true' if a custom config (see 'configmap-fluentd.yaml' for default) should be used for Fluentd. If enabled this config must be supplied via the 'fluentdCustomConfig' property below. | -| fluentd.elasticsearch.enabled | bool | `true` | Set to 'true' if Fluentd should send all log events to an Elasticsearch service. | -| fluentd.elasticsearch.extraVolumes | list | `[]` | Specify custom volumes to be added to Fluentd container (e.g. more log sources) | -| fluentd.elasticsearch.hostname | string | `"elasticsearch"` | The hostname of the Elasticsearch service that Fluentd should send logs to. | -| fluentd.elasticsearch.indexNamePrefix | string | `"crowd"` | The prefix of the Elasticsearch index name that will be used | -| fluentd.enabled | bool | `false` | Set to 'true' if the Fluentd sidecar (DaemonSet) should be added to each pod | -| fluentd.fluentdCustomConfig | object | `{}` | Custom fluent.conf file | -| fluentd.httpPort | int | `9880` | The port on which the Fluentd sidecar will listen | -| fluentd.imageRepo | string | `"fluent/fluentd-kubernetes-daemonset"` | The Fluentd sidecar image repository | -| fluentd.imageTag | string | `"v1.11.5-debian-elasticsearch7-1.2"` | The Fluentd sidecar image tag | -| fluentd.resources | object | `{}` | Resources requests and limits for fluentd sidecar container See: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ | -| image | object | `{"pullPolicy":"IfNotPresent","repository":"atlassian/crowd","tag":""}` | Image configuration | -| image.pullPolicy | string | `"IfNotPresent"` | Image pull policy | -| image.repository | string | `"atlassian/crowd"` | The Docker Crowd Docker image to use https://hub.docker.com/r/atlassian/crowd | -| image.tag | string | `""` | The docker image tag to be used. Defaults to appVersion in Chart.yaml | -| ingress.annotations | object | `{}` | The custom annotations that should be applied to the Ingress Resource. If using an ingress-nginx controller be sure that the annotations you add here are compatible with those already defined in the 'ingess.yaml' template | -| ingress.className | string | `"nginx"` | The class name used by the ingress controller if it's being used. Please follow documentation of your ingress controller. If the cluster contains multiple ingress controllers, this setting allows you to control which of them is used for Atlassian application traffic. | -| ingress.create | bool | `false` | Set to 'true' if an Ingress Resource should be created. This depends on a pre-provisioned Ingress Controller being available. | -| ingress.host | string | `nil` | The fully-qualified hostname (FQDN) of the Ingress Resource. Traffic coming in on this hostname will be routed by the Ingress Resource to the appropriate backend Service. | -| ingress.https | bool | `true` | Set to 'true' if browser communication with the application should be TLS (HTTPS) enforced. | -| ingress.maxBodySize | string | `"250m"` | The max body size to allow. Requests exceeding this size will result in an HTTP 413 error being returned to the client. | -| ingress.nginx | bool | `true` | Set to 'true' if the Ingress Resource is to use the K8s 'ingress-nginx' controller. https://kubernetes.github.io/ingress-nginx/ This will populate the Ingress Resource with annotations that are specific to the K8s ingress-nginx controller. Set to 'false' if a different controller is to be used, in which case the appropriate annotations for that controller must be specified below under 'ingress.annotations'. | -| ingress.path | string | `"/"` | The base path for the Ingress Resource. For example '/crowd'. Based on a 'ingress.host' value of 'company.k8s.com' this would result in a URL of 'company.k8s.com/crowd'. Due to temporary limitations with changing Crowd context on the application level, only "/" and "/crowd" paths are supported. | -| ingress.proxyConnectTimeout | int | `60` | Defines a timeout for establishing a connection with a proxied server. It should be noted that this timeout cannot usually exceed 75 seconds. | -| ingress.proxyReadTimeout | int | `60` | Defines a timeout for reading a response from the proxied server. The timeout is set only between two successive read operations, not for the transmission of the whole response. If the proxied server does not transmit anything within this time, the connection is closed. | -| ingress.proxySendTimeout | int | `60` | Sets a timeout for transmitting a request to the proxied server. The timeout is set only between two successive write operations, not for the transmission of the whole request. If the proxied server does not receive anything within this time, the connection is closed. | -| ingress.tlsSecretName | string | `nil` | The name of the K8s Secret that contains the TLS private key and corresponding certificate. When utilised, TLS termination occurs at the ingress point where traffic to the Service, and it's Pods is in plaintext. Usage is optional and depends on your use case. The Ingress Controller itself can also be configured with a TLS secret for all Ingress Resources. https://kubernetes.io/docs/concepts/configuration/secret/#tls-secrets https://kubernetes.io/docs/concepts/services-networking/ingress/#tls | -| monitoring.exposeJmxMetrics | bool | `false` | Expose JMX metrics with jmx_exporter https://github.com/prometheus/jmx_exporter | -| monitoring.fetchJmxExporterJar | bool | `true` | Fetch jmx_exporter jar from the image. If set to false make sure to manually copy the jar to shared home and provide an absolute path in jmxExporterCustomJarLocation | -| monitoring.grafana.createDashboards | bool | `false` | Create ConfigMaps with Grafana dashboards | -| monitoring.grafana.dashboardAnnotations | object | `{}` | Annotations added to Grafana dashboards ConfigMaps. See: https://github.com/kiwigrid/k8s-sidecar#usage | -| monitoring.grafana.dashboardLabels | object | `{}` | Label selector for Grafana dashboard importer sidecar | -| monitoring.jmxExporterCustomConfig | object | `{}` | Custom JMX config with the rules | -| monitoring.jmxExporterCustomJarLocation | string | `nil` | Location of jmx_exporter jar file if mounted from a secret or manually copied to shared home | -| monitoring.jmxExporterImageRepo | string | `"bitnami/jmx-exporter"` | Image repository with jmx_exporter jar | -| monitoring.jmxExporterImageTag | string | `"0.18.0"` | Image tag to be used to pull jmxExporterImageRepo | -| monitoring.jmxExporterInitContainer | object | `{"customSecurityContext":{},"resources":{},"runAsRoot":true}` | JMX exporter init container configuration | -| monitoring.jmxExporterInitContainer.customSecurityContext | object | `{}` | Custom SecurityContext for the jmx exporter init container | -| monitoring.jmxExporterInitContainer.resources | object | `{}` | Resources requests and limits for the JMX exporter init container See: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ | -| monitoring.jmxExporterInitContainer.runAsRoot | bool | `true` | Whether to run JMX exporter init container as root to copy JMX exporter binary to shared home volume. Set to false if running containers as root is not allowed in the cluster. | -| monitoring.jmxExporterPort | int | `9999` | Port number on which metrics will be available | -| monitoring.jmxExporterPortType | string | `"ClusterIP"` | JMX exporter port type | -| monitoring.jmxServiceAnnotations | object | `{}` | Annotations added to the jmx service | -| monitoring.serviceMonitor.create | bool | `false` | Create ServiceMonitor to start scraping metrics. ServiceMonitor CRD needs to be created in advance. | -| monitoring.serviceMonitor.prometheusLabelSelector | object | `{}` | ServiceMonitorSelector of the prometheus instance. | -| monitoring.serviceMonitor.scrapeIntervalSeconds | int | `30` | Scrape interval for the JMX service. | -| nodeSelector | object | `{}` | Standard K8s node-selectors that will be applied to all Crowd pods | -| podAnnotations | object | `{}` | Custom annotations that will be applied to all Crowd pods | -| podDisruptionBudget | object | `{"annotations":{},"enabled":false,"labels":{},"maxUnavailable":null,"minAvailable":null}` | PodDisruptionBudget: https://kubernetes.io/docs/tasks/run-application/configure-pdb/ You can specify only one of maxUnavailable and minAvailable in a single PodDisruptionBudget. When both minAvailable and maxUnavailable are set, maxUnavailable takes precedence. | -| podLabels | object | `{}` | Custom labels that will be applied to all Crowd pods | -| priorityClassName | string | `nil` | Priority class for the application pods. The PriorityClass with this name needs to be available in the cluster. For details see https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/#priorityclass | -| replicaCount | int | `1` | The initial number of Crowd pods that should be started at deployment time. Note that Crowd requires manual configuration via the browser post deployment after the first pod is deployed. This configuration must be completed before scaling up additional pods. As such this value should always be kept as 1, but can be altered once manual configuration is complete. | -| schedulerName | string | `nil` | Standard K8s schedulerName that will be applied to all Crowd pods. Check Kubernetes documentation on how to configure multiple schedulers: https://kubernetes.io/docs/tasks/extend-kubernetes/configure-multiple-schedulers/#specify-schedulers-for-pods | -| serviceAccount.annotations | object | `{}` | Annotations to add to the ServiceAccount (if created) | -| serviceAccount.create | bool | `true` | Set to 'true' if a ServiceAccount should be created, or 'false' if it already exists. | -| serviceAccount.imagePullSecrets | list | `[]` | For Docker images hosted in private registries, define the list of image pull secrets that should be utilized by the created ServiceAccount https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod | -| serviceAccount.name | string | `nil` | The name of the ServiceAccount to be used by the pods. If not specified, but the "serviceAccount.create" flag is set to 'true', then the ServiceAccount name will be auto-generated, otherwise the 'default' ServiceAccount will be used. https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/#use-the-default-service-account-to-access-the-api-server | -| terminationGracePeriodSeconds | int | `30` | The termination grace period for pods during shutdown. 30s is the -- Kubernetes default, but can be overridden here. | -| tolerations | list | `[]` | Standard K8s tolerations that will be applied to all Crowd pods | -| volumes.additional | list | `[]` | Defines additional volumes that should be applied to all Crowd pods. Note that this will not create any corresponding volume mounts; those needs to be defined in crowd.additionalVolumeMounts | -| volumes.localHome.customVolume | object | `{}` | Static provisioning of local-home using K8s PVs and PVCs NOTE: Due to the ephemeral nature of pods this approach to provisioning volumes for pods is not recommended. Dynamic provisioning described above is the prescribed approach. When 'persistentVolumeClaim.create' is 'false', then this value can be used to define a standard K8s volume that will be used for the local-home volume(s). If not defined, then an 'emptyDir' volume is utilised. Having provisioned a 'PersistentVolume', specify the bound 'persistentVolumeClaim.claimName' for the 'customVolume' object. https://kubernetes.io/docs/concepts/storage/persistent-volumes/#static | -| volumes.localHome.mountPath | string | `"/var/atlassian/application-data/crowd"` | Specifies the path in the Crowd container to which the local-home volume will be mounted. | -| volumes.localHome.persistentVolumeClaim.create | bool | `false` | If 'true', then a 'PersistentVolume' and 'PersistentVolumeClaim' will be dynamically created for each pod based on the 'StorageClassName' supplied below. | -| volumes.localHome.persistentVolumeClaim.resources | object | `{"requests":{"storage":"1Gi"}}` | Specifies the standard K8s resource requests and/or limits for the local-home volume claims. | -| volumes.localHome.persistentVolumeClaim.storageClassName | string | `nil` | Specify the name of the 'StorageClass' that should be used for the local-home volume claim. | -| volumes.sharedHome.customVolume | object | `{}` | Static provisioning of shared-home using K8s PVs and PVCs When 'persistentVolumeClaim.create' is 'false', then this value can be used to define a standard K8s volume that will be used for the shared-home volume. If not defined, then an 'emptyDir' volume is utilised. Having provisioned a 'PersistentVolume', specify the bound 'persistentVolumeClaim.claimName' for the 'customVolume' object. https://kubernetes.io/docs/concepts/storage/persistent-volumes/#static https://atlassian.github.io/data-center-helm-charts/examples/storage/aws/SHARED_STORAGE/ | -| volumes.sharedHome.mountPath | string | `"/var/atlassian/application-data/crowd/shared"` | Specifies the path in the Crowd container to which the shared-home volume will be mounted. | -| volumes.sharedHome.nfsPermissionFixer.command | string | `nil` | By default, the fixer will change the group ownership of the volume's root directory to match the Crowd container's GID (2002), and then ensures the directory is group-writeable. If this is not the desired behaviour, command used can be specified here. | -| volumes.sharedHome.nfsPermissionFixer.enabled | bool | `true` | If 'true', this will alter the shared-home volume's root directory so that Crowd can write to it. This is a workaround for a K8s bug affecting NFS volumes: https://github.com/kubernetes/examples/issues/260 | -| volumes.sharedHome.nfsPermissionFixer.imageRepo | string | `"alpine"` | Image repository for the permission fixer init container. Defaults to alpine | -| volumes.sharedHome.nfsPermissionFixer.imageTag | string | `"latest"` | Image tag for the permission fixer init container. Defaults to latest | -| volumes.sharedHome.nfsPermissionFixer.mountPath | string | `"/shared-home"` | The path in the K8s initContainer where the shared-home volume will be mounted | -| volumes.sharedHome.nfsPermissionFixer.resources | object | `{}` | Resources requests and limits for nfsPermissionFixer init container See: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ | -| volumes.sharedHome.persistentVolumeClaim.create | bool | `false` | If 'true', then a 'PersistentVolumeClaim' and 'PersistentVolume' will be dynamically created for shared-home based on the 'StorageClassName' supplied below. | -| volumes.sharedHome.persistentVolumeClaim.resources | object | `{"requests":{"storage":"1Gi"}}` | Specifies the standard K8s resource requests and/or limits for the shared-home volume claims. | -| volumes.sharedHome.persistentVolumeClaim.storageClassName | string | `nil` | Specify the name of the 'StorageClass' that should be used for the 'shared-home' volume claim. | -| volumes.sharedHome.subPath | string | `nil` | Specifies the sub-directory of the shared-home volume that will be mounted in to the Crowd container. | \ No newline at end of file +| crowd.containerSecurityContext | object | `{}` | Standard K8s field that holds security configurations that will be applied to a container. https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ | +| crowd.livenessProbe.enabled | bool | `false` | Whether to apply the livenessProbe check to pod. | +| crowd.livenessProbe.failureThreshold | int | `12` | The number of consecutive failures of the Crowd container liveness probe before the pod fails liveness checks. | +| crowd.livenessProbe.initialDelaySeconds | int | `60` | Time to wait before starting the first probe | +| crowd.livenessProbe.periodSeconds | int | `5` | How often (in seconds) the Crowd container liveness probe will run | +| crowd.livenessProbe.timeoutSeconds | int | `1` | Number of seconds after which the probe times out | +| crowd.ports.http | int | `8095` | The port on which the Crowd container listens for HTTP traffic | +| crowd.readinessProbe.customProbe | object | `{}` | Custom readinessProbe to override the default /status httpGet | +| crowd.readinessProbe.enabled | bool | `true` | Whether to apply the readinessProbe check to pod. | +| crowd.readinessProbe.failureThreshold | int | `10` | The number of consecutive failures of the Crowd container readiness probe before the pod fails readiness checks. | +| crowd.readinessProbe.initialDelaySeconds | int | `10` | The initial delay (in seconds) for the Crowd container readiness probe, after which the probe will start running. | +| crowd.readinessProbe.periodSeconds | int | `5` | How often (in seconds) the Crowd container readiness probe will run | +| crowd.readinessProbe.timeoutSeconds | int | `1` | Number of seconds after which the probe times out | +| crowd.resources.container.requests.cpu | string | `"2"` | Initial CPU request by Crowd pod | +| crowd.resources.container.requests.memory | string | `"1G"` | Initial Memory request by Crowd pod | +| crowd.resources.jvm.maxHeap | string | `"768m"` | The maximum amount of heap memory that will be used by the Crowd JVM | +| crowd.resources.jvm.minHeap | string | `"384m"` | The minimum amount of heap memory that will be used by the Crowd JVM | +| crowd.securityContext.fsGroup | int | `2004` | The GID used by the Crowd docker image GID will default to 2004 if not supplied and securityContextEnabled is set to true. This is intended to ensure that the shared-home volume is group-writeable by the GID used by the Crowd container. However, this doesn't appear to work for NFS volumes due to a K8s bug: https://github.com/kubernetes/examples/issues/260 | +| crowd.securityContextEnabled | bool | `true` | Whether to apply security context to pod. | +| crowd.service.annotations | object | `{}` | Additional annotations to apply to the Service | +| crowd.service.loadBalancerIP | string | `nil` | Use specific loadBalancerIP. Only applies to service type LoadBalancer. | +| crowd.service.port | int | `80` | The port on which the Crowd K8s Service will listen | +| crowd.service.sessionAffinity | string | `"None"` | Session affinity type. If you want to make sure that connections from a particular client are passed to the same pod each time, set sessionAffinity to ClientIP. See: https://kubernetes.io/docs/reference/networking/virtual-ips/#session-affinity | +| crowd.service.sessionAffinityConfig | object | `{"clientIP":{"timeoutSeconds":null}}` | Session affinity configuration | +| crowd.service.sessionAffinityConfig.clientIP.timeoutSeconds | string | `nil` | Specifies the seconds of ClientIP type session sticky time. The value must be > 0 && <= 86400(for 1 day) if ServiceAffinity == "ClientIP". Default value is 10800 (for 3 hours). | +| crowd.service.type | string | `"ClusterIP"` | The type of K8s service to use for Crowd. For loadBalancer type, deselect the consistent client IP address in Crowd Session configuration. Read more: https://atlassian.github.io/data-center-helm-charts/troubleshooting/LIMITATIONS/#loadbalancer-service-type | +| crowd.setPermissions | bool | `true` | Boolean to define whether to set local home directory permissions on startup of Crowd container. Set to 'false' to disable this behaviour. | +| crowd.shutdown.command | string | `"/shutdown-wait.sh"` | By default pods will be stopped via a [preStop hook](https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/), using a script supplied by the Docker image. If any other shutdown behaviour is needed it can be achieved by overriding this value. Note that the shutdown command needs to wait for the application shutdown completely before exiting; see [the default command](https://bitbucket.org/atlassian-docker/docker-atlassian-crowd/src/master/shutdown-wait.sh) for details. | +| crowd.shutdown.terminationGracePeriodSeconds | int | `30` | The termination grace period for pods during shutdown. This should be set to the internal grace period, plus a small buffer to allow the JVM to fully terminate. | +| crowd.startupProbe.enabled | bool | `false` | Whether to apply the startupProbe check to pod. | +| crowd.startupProbe.failureThreshold | int | `120` | The number of consecutive failures of the Crowd container startup probe before the pod fails startup checks. | +| crowd.startupProbe.initialDelaySeconds | int | `60` | Time to wait before starting the first probe | +| crowd.startupProbe.periodSeconds | int | `5` | How often (in seconds) the Crowd container startup probe will run | +| crowd.topologySpreadConstraints | list | `[]` | Defines topology spread constraints for Crowd pods. See details: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ | +| crowd.umask | string | `"0022"` | The umask used by the Crowd process when it creates new files. The default is 0022. This gives the new files: - read/write permissions for the Crowd user - read permissions for everyone else. | +| crowd.useHelmReleaseNameAsContainerName | bool | `false` | Whether the main container should acquire helm release name. By default the container name is `crowd` which corresponds to the name of the Helm Chart. | +| fluentd.command | string | `nil` | The command used to start Fluentd. If not supplied the default command will be used: "fluentd -c /fluentd/etc/fluent.conf -v" Note: The custom command can be free-form, however pay particular attention to the process that should ultimately be left running in the container. This process should be invoked with 'exec' so that signals are appropriately propagated to it, for instance SIGTERM. An example of how such a command may look is: " && && exec " | +| fluentd.customConfigFile | bool | `false` | Set to 'true' if a custom config (see 'configmap-fluentd.yaml' for default) should be used for Fluentd. If enabled this config must be supplied via the 'fluentdCustomConfig' property below. | +| fluentd.elasticsearch.enabled | bool | `true` | Set to 'true' if Fluentd should send all log events to an Elasticsearch service. | +| fluentd.elasticsearch.extraVolumes | list | `[]` | Specify custom volumes to be added to Fluentd container (e.g. more log sources) | +| fluentd.elasticsearch.hostname | string | `"elasticsearch"` | The hostname of the Elasticsearch service that Fluentd should send logs to. | +| fluentd.elasticsearch.indexNamePrefix | string | `"crowd"` | The prefix of the Elasticsearch index name that will be used | +| fluentd.enabled | bool | `false` | Set to 'true' if the Fluentd sidecar (DaemonSet) should be added to each pod | +| fluentd.fluentdCustomConfig | object | `{}` | Custom fluent.conf file | +| fluentd.httpPort | int | `9880` | The port on which the Fluentd sidecar will listen | +| fluentd.imageRepo | string | `"fluent/fluentd-kubernetes-daemonset"` | The Fluentd sidecar image repository | +| fluentd.imageTag | string | `"v1.11.5-debian-elasticsearch7-1.2"` | The Fluentd sidecar image tag | +| fluentd.resources | object | `{}` | Resources requests and limits for fluentd sidecar container See: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ | +| image | object | `{"pullPolicy":"IfNotPresent","repository":"atlassian/crowd","tag":""}` | Image configuration | +| image.pullPolicy | string | `"IfNotPresent"` | Image pull policy | +| image.repository | string | `"atlassian/crowd"` | The Docker Crowd Docker image to use https://hub.docker.com/r/atlassian/crowd | +| image.tag | string | `""` | The docker image tag to be used. Defaults to appVersion in Chart.yaml | +| ingress.annotations | object | `{}` | The custom annotations that should be applied to the Ingress Resource. If using an ingress-nginx controller be sure that the annotations you add here are compatible with those already defined in the 'ingess.yaml' template | +| ingress.className | string | `"nginx"` | The class name used by the ingress controller if it's being used. Please follow documentation of your ingress controller. If the cluster contains multiple ingress controllers, this setting allows you to control which of them is used for Atlassian application traffic. | +| ingress.create | bool | `false` | Set to 'true' if an Ingress Resource should be created. This depends on a pre-provisioned Ingress Controller being available. | +| ingress.host | string | `nil` | The fully-qualified hostname (FQDN) of the Ingress Resource. Traffic coming in on this hostname will be routed by the Ingress Resource to the appropriate backend Service. | +| ingress.https | bool | `true` | Set to 'true' if browser communication with the application should be TLS (HTTPS) enforced. | +| ingress.maxBodySize | string | `"250m"` | The max body size to allow. Requests exceeding this size will result in an HTTP 413 error being returned to the client. | +| ingress.nginx | bool | `true` | Set to 'true' if the Ingress Resource is to use the K8s 'ingress-nginx' controller. https://kubernetes.github.io/ingress-nginx/ This will populate the Ingress Resource with annotations that are specific to the K8s ingress-nginx controller. Set to 'false' if a different controller is to be used, in which case the appropriate annotations for that controller must be specified below under 'ingress.annotations'. | +| ingress.path | string | `"/"` | The base path for the Ingress Resource. For example '/crowd'. Based on a 'ingress.host' value of 'company.k8s.com' this would result in a URL of 'company.k8s.com/crowd'. Due to temporary limitations with changing Crowd context on the application level, only "/" and "/crowd" paths are supported. | +| ingress.proxyConnectTimeout | int | `60` | Defines a timeout for establishing a connection with a proxied server. It should be noted that this timeout cannot usually exceed 75 seconds. | +| ingress.proxyReadTimeout | int | `60` | Defines a timeout for reading a response from the proxied server. The timeout is set only between two successive read operations, not for the transmission of the whole response. If the proxied server does not transmit anything within this time, the connection is closed. | +| ingress.proxySendTimeout | int | `60` | Sets a timeout for transmitting a request to the proxied server. The timeout is set only between two successive write operations, not for the transmission of the whole request. If the proxied server does not receive anything within this time, the connection is closed. | +| ingress.tlsSecretName | string | `nil` | The name of the K8s Secret that contains the TLS private key and corresponding certificate. When utilised, TLS termination occurs at the ingress point where traffic to the Service, and it's Pods is in plaintext. Usage is optional and depends on your use case. The Ingress Controller itself can also be configured with a TLS secret for all Ingress Resources. https://kubernetes.io/docs/concepts/configuration/secret/#tls-secrets https://kubernetes.io/docs/concepts/services-networking/ingress/#tls | +| monitoring.exposeJmxMetrics | bool | `false` | Expose JMX metrics with jmx_exporter https://github.com/prometheus/jmx_exporter | +| monitoring.fetchJmxExporterJar | bool | `true` | Fetch jmx_exporter jar from the image. If set to false make sure to manually copy the jar to shared home and provide an absolute path in jmxExporterCustomJarLocation | +| monitoring.grafana.createDashboards | bool | `false` | Create ConfigMaps with Grafana dashboards | +| monitoring.grafana.dashboardAnnotations | object | `{}` | Annotations added to Grafana dashboards ConfigMaps. See: https://github.com/kiwigrid/k8s-sidecar#usage | +| monitoring.grafana.dashboardLabels | object | `{}` | Label selector for Grafana dashboard importer sidecar | +| monitoring.jmxExporterCustomConfig | object | `{}` | Custom JMX config with the rules | +| monitoring.jmxExporterCustomJarLocation | string | `nil` | Location of jmx_exporter jar file if mounted from a secret or manually copied to shared home | +| monitoring.jmxExporterImageRepo | string | `"bitnami/jmx-exporter"` | Image repository with jmx_exporter jar | +| monitoring.jmxExporterImageTag | string | `"0.18.0"` | Image tag to be used to pull jmxExporterImageRepo | +| monitoring.jmxExporterInitContainer | object | `{"customSecurityContext":{},"resources":{},"runAsRoot":true}` | JMX exporter init container configuration | +| monitoring.jmxExporterInitContainer.customSecurityContext | object | `{}` | Custom SecurityContext for the jmx exporter init container | +| monitoring.jmxExporterInitContainer.resources | object | `{}` | Resources requests and limits for the JMX exporter init container See: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ | +| monitoring.jmxExporterInitContainer.runAsRoot | bool | `true` | Whether to run JMX exporter init container as root to copy JMX exporter binary to shared home volume. Set to false if running containers as root is not allowed in the cluster. | +| monitoring.jmxExporterPort | int | `9999` | Port number on which metrics will be available | +| monitoring.jmxExporterPortType | string | `"ClusterIP"` | JMX exporter port type | +| monitoring.jmxServiceAnnotations | object | `{}` | Annotations added to the jmx service | +| monitoring.serviceMonitor.create | bool | `false` | Create ServiceMonitor to start scraping metrics. ServiceMonitor CRD needs to be created in advance. | +| monitoring.serviceMonitor.prometheusLabelSelector | object | `{}` | ServiceMonitorSelector of the prometheus instance. | +| monitoring.serviceMonitor.scrapeIntervalSeconds | int | `30` | Scrape interval for the JMX service. | +| nodeSelector | object | `{}` | Standard K8s node-selectors that will be applied to all Crowd pods | +| podAnnotations | object | `{}` | Custom annotations that will be applied to all Crowd pods | +| podDisruptionBudget | object | `{"annotations":{},"enabled":false,"labels":{},"maxUnavailable":null,"minAvailable":null}` | PodDisruptionBudget: https://kubernetes.io/docs/tasks/run-application/configure-pdb/ You can specify only one of maxUnavailable and minAvailable in a single PodDisruptionBudget. When both minAvailable and maxUnavailable are set, maxUnavailable takes precedence. | +| podLabels | object | `{}` | Custom labels that will be applied to all Crowd pods | +| priorityClassName | string | `nil` | Priority class for the application pods. The PriorityClass with this name needs to be available in the cluster. For details see https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/#priorityclass | +| replicaCount | int | `1` | The initial number of Crowd pods that should be started at deployment time. Note that Crowd requires manual configuration via the browser post deployment after the first pod is deployed. This configuration must be completed before scaling up additional pods. As such this value should always be kept as 1, but can be altered once manual configuration is complete. | +| schedulerName | string | `nil` | Standard K8s schedulerName that will be applied to all Crowd pods. Check Kubernetes documentation on how to configure multiple schedulers: https://kubernetes.io/docs/tasks/extend-kubernetes/configure-multiple-schedulers/#specify-schedulers-for-pods | +| serviceAccount.annotations | object | `{}` | Annotations to add to the ServiceAccount (if created) | +| serviceAccount.create | bool | `true` | Set to 'true' if a ServiceAccount should be created, or 'false' if it already exists. | +| serviceAccount.imagePullSecrets | list | `[]` | For Docker images hosted in private registries, define the list of image pull secrets that should be utilized by the created ServiceAccount https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod | +| serviceAccount.name | string | `nil` | The name of the ServiceAccount to be used by the pods. If not specified, but the "serviceAccount.create" flag is set to 'true', then the ServiceAccount name will be auto-generated, otherwise the 'default' ServiceAccount will be used. https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/#use-the-default-service-account-to-access-the-api-server | +| terminationGracePeriodSeconds | int | `30` | Kubernetes default, but can be overridden here. | +| testPods | object | `{"affinity":{},"annotations":{},"image":{"permissionsTestContainer":"debian:stable-slim","statusTestContainer":"alpine:latest"},"labels":{},"nodeSelector":{},"schedulerName":null,"tolerations":[]}` | Metadata and pod spec for pods started in Helm tests | +| tolerations | list | `[]` | Standard K8s tolerations that will be applied to all Crowd pods | +| volumes.additional | list | `[]` | Defines additional volumes that should be applied to all Crowd pods. Note that this will not create any corresponding volume mounts; those needs to be defined in crowd.additionalVolumeMounts | +| volumes.localHome.customVolume | object | `{}` | Static provisioning of local-home using K8s PVs and PVCs NOTE: Due to the ephemeral nature of pods this approach to provisioning volumes for pods is not recommended. Dynamic provisioning described above is the prescribed approach. When 'persistentVolumeClaim.create' is 'false', then this value can be used to define a standard K8s volume that will be used for the local-home volume(s). If not defined, then an 'emptyDir' volume is utilised. Having provisioned a 'PersistentVolume', specify the bound 'persistentVolumeClaim.claimName' for the 'customVolume' object. https://kubernetes.io/docs/concepts/storage/persistent-volumes/#static | +| volumes.localHome.mountPath | string | `"/var/atlassian/application-data/crowd"` | Specifies the path in the Crowd container to which the local-home volume will be mounted. | +| volumes.localHome.persistentVolumeClaim.create | bool | `false` | If 'true', then a 'PersistentVolume' and 'PersistentVolumeClaim' will be dynamically created for each pod based on the 'StorageClassName' supplied below. | +| volumes.localHome.persistentVolumeClaim.resources | object | `{"requests":{"storage":"1Gi"}}` | Specifies the standard K8s resource requests and/or limits for the local-home volume claims. | +| volumes.localHome.persistentVolumeClaim.storageClassName | string | `nil` | Specify the name of the 'StorageClass' that should be used for the local-home volume claim. | +| volumes.sharedHome.customVolume | object | `{}` | Static provisioning of shared-home using K8s PVs and PVCs When 'persistentVolumeClaim.create' is 'false', then this value can be used to define a standard K8s volume that will be used for the shared-home volume. If not defined, then an 'emptyDir' volume is utilised. Having provisioned a 'PersistentVolume', specify the bound 'persistentVolumeClaim.claimName' for the 'customVolume' object. https://kubernetes.io/docs/concepts/storage/persistent-volumes/#static https://atlassian.github.io/data-center-helm-charts/examples/storage/aws/SHARED_STORAGE/ | +| volumes.sharedHome.mountPath | string | `"/var/atlassian/application-data/crowd/shared"` | Specifies the path in the Crowd container to which the shared-home volume will be mounted. | +| volumes.sharedHome.nfsPermissionFixer.command | string | `nil` | By default, the fixer will change the group ownership of the volume's root directory to match the Crowd container's GID (2002), and then ensures the directory is group-writeable. If this is not the desired behaviour, command used can be specified here. | +| volumes.sharedHome.nfsPermissionFixer.enabled | bool | `true` | If 'true', this will alter the shared-home volume's root directory so that Crowd can write to it. This is a workaround for a K8s bug affecting NFS volumes: https://github.com/kubernetes/examples/issues/260 | +| volumes.sharedHome.nfsPermissionFixer.imageRepo | string | `"alpine"` | Image repository for the permission fixer init container. Defaults to alpine | +| volumes.sharedHome.nfsPermissionFixer.imageTag | string | `"latest"` | Image tag for the permission fixer init container. Defaults to latest | +| volumes.sharedHome.nfsPermissionFixer.mountPath | string | `"/shared-home"` | The path in the K8s initContainer where the shared-home volume will be mounted | +| volumes.sharedHome.nfsPermissionFixer.resources | object | `{}` | Resources requests and limits for nfsPermissionFixer init container See: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ | +| volumes.sharedHome.persistentVolumeClaim.create | bool | `false` | If 'true', then a 'PersistentVolumeClaim' and 'PersistentVolume' will be dynamically created for shared-home based on the 'StorageClassName' supplied below. | +| volumes.sharedHome.persistentVolumeClaim.resources | object | `{"requests":{"storage":"1Gi"}}` | Specifies the standard K8s resource requests and/or limits for the shared-home volume claims. | +| volumes.sharedHome.persistentVolumeClaim.storageClassName | string | `nil` | Specify the name of the 'StorageClass' that should be used for the 'shared-home' volume claim. | +| volumes.sharedHome.subPath | string | `nil` | Specifies the sub-directory of the shared-home volume that will be mounted in to the Crowd container. | + +---------------------------------------------- +Autogenerated from chart metadata using [helm-docs v1.11.0](https://github.com/norwoodj/helm-docs/releases/v1.11.0) diff --git a/src/main/charts/crowd/templates/tests/test-application-status.yaml b/src/main/charts/crowd/templates/tests/test-application-status.yaml index d4a2e2559..db39300cd 100644 --- a/src/main/charts/crowd/templates/tests/test-application-status.yaml +++ b/src/main/charts/crowd/templates/tests/test-application-status.yaml @@ -5,9 +5,21 @@ metadata: annotations: "helm.sh/hook": test "helm.sh/hook-delete-policy": "before-hook-creation,hook-succeeded" + {{- if not .Values.testPods.annotations }} {{- include "crowd.podAnnotations" . | nindent 4 }} + {{- else }} + {{- range $key, $value := .Values.testPods.annotations }} + {{ $key | quote }}: {{ tpl $value $ | quote }} + {{- end }} + {{- end }} labels: + {{- if not .Values.testPods.labels }} {{- include "common.labels.commonLabels" . | nindent 4 }} + {{- else }} + {{- range $key, $value := .Values.testPods.labels }} + {{ $key | quote }}: {{ tpl $value $ | quote }} + {{- end }} + {{- end }} spec: containers: - name: test @@ -24,3 +36,18 @@ spec: echo "Verifying application state is RUNNING or FIRST_RUN: $STATUS" echo $STATUS | jq -e '.state|test("RUNNING|FIRST_RUN")' restartPolicy: Never + {{- with .Values.testPods.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.testPods.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.testPods.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- if .Values.testPods.schedulerName }} + schedulerName: {{ .Values.testPods.schedulerName | quote }} + {{- end }} diff --git a/src/main/charts/crowd/templates/tests/test-shared-home-permissions.yaml b/src/main/charts/crowd/templates/tests/test-shared-home-permissions.yaml index b349198bb..93b62bc64 100644 --- a/src/main/charts/crowd/templates/tests/test-shared-home-permissions.yaml +++ b/src/main/charts/crowd/templates/tests/test-shared-home-permissions.yaml @@ -5,9 +5,21 @@ metadata: annotations: "helm.sh/hook": test "helm.sh/hook-delete-policy": "before-hook-creation,hook-succeeded" + {{- if not .Values.testPods.annotations }} {{- include "crowd.podAnnotations" . | nindent 4 }} + {{- else }} + {{- range $key, $value := .Values.testPods.annotations }} + {{ $key | quote }}: {{ tpl $value $ | quote }} + {{- end }} + {{- end }} labels: + {{- if not .Values.testPods.labels }} {{- include "common.labels.commonLabels" . | nindent 4 }} + {{- else }} + {{- range $key, $value := .Values.testPods.labels }} + {{ $key | quote }}: {{ tpl $value $ | quote }} + {{- end }} + {{- end }} spec: containers: - name: test @@ -45,3 +57,18 @@ spec: volumes: {{ include "crowd.volumes.sharedHome" . | nindent 4 }} restartPolicy: Never + {{- with .Values.testPods.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.testPods.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.testPods.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- if .Values.testPods.schedulerName }} + schedulerName: {{ .Values.testPods.schedulerName | quote }} + {{- end }} diff --git a/src/main/charts/crowd/values.yaml b/src/main/charts/crowd/values.yaml index 9f53d7ae5..50ef44f36 100644 --- a/src/main/charts/crowd/values.yaml +++ b/src/main/charts/crowd/values.yaml @@ -958,3 +958,16 @@ atlassianAnalyticsAndSupport: # which can be optionally including to support.zip # enabled: true + +# -- Metadata and pod spec for pods started in Helm tests +# +testPods: + labels: {} + annotations: {} + nodeSelector: {} + tolerations: [] + affinity: {} + schedulerName: + image: + permissionsTestContainer: debian:stable-slim + statusTestContainer: alpine:latest diff --git a/src/main/charts/jira/README.md b/src/main/charts/jira/README.md index 9ca9d05aa..651fd6a3b 100644 --- a/src/main/charts/jira/README.md +++ b/src/main/charts/jira/README.md @@ -4,8 +4,6 @@ A chart for installing Jira Data Center on Kubernetes -For installation please follow [the documentation](https://atlassian.github.io/data-center-helm-charts/). - **Homepage:** ## Source Code @@ -25,154 +23,158 @@ Kubernetes: `>=1.21.x-0` | Key | Type | Default | Description | |-----|------|---------|-------------| -| additionalConfigMaps | list | `[]` | Create additional ConfigMaps with given names, keys and content. Ther Helm release name will be used as a prefix for a ConfigMap name, fileName is used as subPath | -| additionalContainers | list | `[]` | Additional container definitions that will be added to all Jira pods | -| additionalFiles | list | `[]` | Additional existing ConfigMaps and Secrets not managed by Helm that should be mounted into service container. Configuration details below (camelCase is important!): 'name' - References existing ConfigMap or secret name. 'type' - 'configMap' or 'secret' 'key' - The file name. 'mountPath' - The destination directory in a container. VolumeMount and Volumes are added with this name and index position, for example; custom-config-0, keystore-2 | +| additionalConfigMaps | list | `[]` | Create additional ConfigMaps with given names, keys and content. Ther Helm release name will be used as a prefix for a ConfigMap name, fileName is used as subPath | +| additionalContainers | list | `[]` | Additional container definitions that will be added to all Jira pods | +| additionalFiles | list | `[]` | Additional existing ConfigMaps and Secrets not managed by Helm that should be mounted into service container. Configuration details below (camelCase is important!): 'name' - References existing ConfigMap or secret name. 'type' - 'configMap' or 'secret' 'key' - The file name. 'mountPath' - The destination directory in a container. VolumeMount and Volumes are added with this name and index position, for example; custom-config-0, keystore-2 | | additionalHosts | list | `[]` | Additional host aliases for each pod, equivalent to adding them to the /etc/hosts file. https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ | -| additionalInitContainers | list | `[]` | Additional initContainer definitions that will be added to all Jira pods | -| additionalLabels | object | `{}` | Additional labels that should be applied to all resources | -| affinity | object | `{}` | Standard K8s affinities that will be applied to all Jira pods | -| atlassianAnalyticsAndSupport.analytics.enabled | bool | `true` | Mount ConfigMap with selected Helm chart values as a JSON which DC products will read and send analytics events to Atlassian data pipelines | -| atlassianAnalyticsAndSupport.helmValues.enabled | bool | `true` | Mount ConfigMap with selected Helm chart values as a YAML file which can be optionally including to support.zip | -| database.credentials.passwordSecretKey | string | `"password"` | The key ('password') in the Secret used to store the database login password | -| database.credentials.secretName | string | `nil` | The name of the K8s Secret that contains the database login credentials. If the secret is specified, then the credentials will be automatically utilised on Jira startup. If the secret is not provided, then the credentials will need to be provided via the browser during manual configuration post deployment. Example of creating a database credentials K8s secret below: 'kubectl create secret generic --from-literal=username= \ --from-literal=password=' https://kubernetes.io/docs/concepts/configuration/secret/#opaque-secrets | -| database.credentials.usernameSecretKey | string | `"username"` | The key ('username') in the Secret used to store the database login username | -| database.driver | string | `nil` | The Java class name of the JDBC driver to be used. If not specified, then it will need to be provided via the browser during manual configuration post deployment. Valid drivers are: - 'org.postgresql.Driver' - 'com.mysql.jdbc.Driver' - 'oracle.jdbc.OracleDriver' - 'com.microsoft.sqlserver.jdbc.SQLServerDriver' https://atlassian.github.io/data-center-helm-charts/userguide/CONFIGURATION/#databasedriver: | -| database.type | string | `nil` | The database type that should be used. If not specified, then it will need to be provided via the browser during manual configuration post deployment. Valid values include: - 'postgres72' - 'mysql57' - 'mysql8' - 'oracle10g' - 'mssql' - 'postgresaurora96' https://atlassian.github.io/data-center-helm-charts/userguide/CONFIGURATION/#databasetype | -| database.url | string | `nil` | The jdbc URL of the database. If not specified, then it will need to be provided via the browser during manual configuration post deployment. Example URLs include: - 'jdbc:postgresql://:5432/' - 'jdbc:mysql:///' - 'jdbc:sqlserver://:1433;databaseName=' - 'jdbc:oracle:thin:@:1521:' https://atlassian.github.io/data-center-helm-charts/userguide/CONFIGURATION/#databaseurl | -| fluentd.command | string | `nil` | The command used to start Fluentd. If not supplied the default command will be used: "fluentd -c /fluentd/etc/fluent.conf -v" Note: The custom command can be free-form, however pay particular attention to the process that should ultimately be left running in the container. This process should be invoked with 'exec' so that signals are appropriately propagated to it, for instance SIGTERM. An example of how such a command may look is: " && && exec " | -| fluentd.customConfigFile | bool | `false` | Set to 'true' if a custom config (see 'configmap-fluentd.yaml' for default) should be used for Fluentd. If enabled this config must be supplied via the 'fluentdCustomConfig' property below. | -| fluentd.elasticsearch.enabled | bool | `true` | Set to 'true' if Fluentd should send all log events to an Elasticsearch service. | -| fluentd.elasticsearch.hostname | string | `"elasticsearch"` | The hostname of the Elasticsearch service that Fluentd should send logs to. | -| fluentd.elasticsearch.indexNamePrefix | string | `"jira"` | The prefix of the Elasticsearch index name that will be used | -| fluentd.enabled | bool | `false` | Set to 'true' if the Fluentd sidecar (DaemonSet) should be added to each pod | -| fluentd.extraVolumes | list | `[]` | Specify custom volumes to be added to Fluentd container (e.g. more log sources) | -| fluentd.fluentdCustomConfig | object | `{}` | Custom fluent.conf file | -| fluentd.httpPort | int | `9880` | The port on which the Fluentd sidecar will listen | -| fluentd.imageRepo | string | `"fluent/fluentd-kubernetes-daemonset"` | The Fluentd sidecar image repository | -| fluentd.imageTag | string | `"v1.11.5-debian-elasticsearch7-1.2"` | The Fluentd sidecar image tag | -| fluentd.resources | object | `{}` | Resources requests and limits for fluentd sidecar container See: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ | -| image.pullPolicy | string | `"IfNotPresent"` | Image pull policy | -| image.repository | string | `"atlassian/jira-software"` | The Jira Docker image to use https://hub.docker.com/r/atlassian/jira-software | -| image.tag | string | `""` | The docker image tag to be used - defaults to the Chart appVersion | -| ingress.annotations | object | `{}` | The custom annotations that should be applied to the Ingress Resource. If using an ingress-nginx controller be sure that the annotations you add here are compatible with those already defined in the 'ingess.yaml' template | -| ingress.className | string | `"nginx"` | The class name used by the ingress controller if it's being used. Please follow documentation of your ingress controller. If the cluster contains multiple ingress controllers, this setting allows you to control which of them is used for Atlassian application traffic. | -| ingress.create | bool | `false` | Set to 'true' if an Ingress Resource should be created. This depends on a pre-provisioned Ingress Controller being available. | -| ingress.host | string | `nil` | The fully-qualified hostname (FQDN) of the Ingress Resource. Traffic coming in on this hostname will be routed by the Ingress Resource to the appropriate backend Service. | -| ingress.https | bool | `true` | Set to 'true' if browser communication with the application should be TLS (HTTPS) enforced. | -| ingress.maxBodySize | string | `"250m"` | The max body size to allow. Requests exceeding this size will result in an HTTP 413 error being returned to the client. | -| ingress.nginx | bool | `true` | Set to 'true' if the Ingress Resource is to use the K8s 'ingress-nginx' controller. https://kubernetes.github.io/ingress-nginx/ This will populate the Ingress Resource with annotations that are specific to the K8s ingress-nginx controller. Set to 'false' if a different controller is to be used, in which case the appropriate annotations for that controller must be specified below under 'ingress.annotations'. | -| ingress.path | string | `nil` | The base path for the Ingress Resource. For example '/jira'. Based on a 'ingress.host' value of 'company.k8s.com' this would result in a URL of 'company.k8s.com/jira'. Default value is 'jira.service.contextPath' | -| ingress.proxyConnectTimeout | int | `60` | Defines a timeout for establishing a connection with a proxied server. It should be noted that this timeout cannot usually exceed 75 seconds. | -| ingress.proxyReadTimeout | int | `60` | Defines a timeout for reading a response from the proxied server. The timeout is set only between two successive read operations, not for the transmission of the whole response. If the proxied server does not transmit anything within this time, the connection is closed. | -| ingress.proxySendTimeout | int | `60` | Sets a timeout for transmitting a request to the proxied server. The timeout is set only between two successive write operations, not for the transmission of the whole request. If the proxied server does not receive anything within this time, the connection is closed. | -| ingress.tlsSecretName | string | `nil` | The name of the K8s Secret that contains the TLS private key and corresponding certificate. When utilised, TLS termination occurs at the ingress point where traffic to the Service, and it's Pods is in plaintext. Usage is optional and depends on your use case. The Ingress Controller itself can also be configured with a TLS secret for all Ingress Resources. https://kubernetes.io/docs/concepts/configuration/secret/#tls-secrets https://kubernetes.io/docs/concepts/services-networking/ingress/#tls | -| jira.accessLog.localHomeSubPath | string | `"log"` | The subdirectory within the local-home volume where access logs should be stored. | -| jira.accessLog.mountPath | string | `"/opt/atlassian/jira/logs"` | The path within the Jira container where the local-home volume should be mounted in order to capture access logs. | -| jira.additionalBundledPlugins | list | `[]` | Specifies a list of additional Jira plugins that should be added to the Jira container. Note plugins installed via this method will appear as bundled plugins rather than user plugins. These should be specified in the same manner as the 'additionalLibraries' property. Additional details: https://atlassian.github.io/data-center-helm-charts/examples/external_libraries/EXTERNAL_LIBS/ NOTE: only .jar files can be loaded using this approach. OBR's can be extracted (unzipped) to access the associated .jar An alternative to this method is to install the plugins via "Manage Apps" in the product system administration UI. | -| jira.additionalCertificates | object | `{"customCmd":null,"secretName":null}` | Certificates to be added to Java truststore. Provide reference to a secret that contains the certificates | -| jira.additionalEnvironmentVariables | list | `[]` | Defines any additional environment variables to be passed to the Jira container. See https://hub.docker.com/r/atlassian/jira-software for supported variables. | +| additionalInitContainers | list | `[]` | Additional initContainer definitions that will be added to all Jira pods | +| additionalLabels | object | `{}` | Additional labels that should be applied to all resources | +| affinity | object | `{}` | Standard K8s affinities that will be applied to all Jira pods | +| atlassianAnalyticsAndSupport.analytics.enabled | bool | `true` | Mount ConfigMap with selected Helm chart values as a JSON which DC products will read and send analytics events to Atlassian data pipelines | +| atlassianAnalyticsAndSupport.helmValues.enabled | bool | `true` | Mount ConfigMap with selected Helm chart values as a YAML file which can be optionally including to support.zip | +| database.credentials.passwordSecretKey | string | `"password"` | The key ('password') in the Secret used to store the database login password | +| database.credentials.secretName | string | `nil` | from-literal=password=' https://kubernetes.io/docs/concepts/configuration/secret/#opaque-secrets | +| database.credentials.usernameSecretKey | string | `"username"` | The key ('username') in the Secret used to store the database login username | +| database.driver | string | `nil` | The Java class name of the JDBC driver to be used. If not specified, then it will need to be provided via the browser during manual configuration post deployment. Valid drivers are: - 'org.postgresql.Driver' - 'com.mysql.jdbc.Driver' - 'oracle.jdbc.OracleDriver' - 'com.microsoft.sqlserver.jdbc.SQLServerDriver' https://atlassian.github.io/data-center-helm-charts/userguide/CONFIGURATION/#databasedriver: | +| database.type | string | `nil` | The database type that should be used. If not specified, then it will need to be provided via the browser during manual configuration post deployment. Valid values include: - 'postgres72' - 'mysql57' - 'mysql8' - 'oracle10g' - 'mssql' - 'postgresaurora96' https://atlassian.github.io/data-center-helm-charts/userguide/CONFIGURATION/#databasetype | +| database.url | string | `nil` | The jdbc URL of the database. If not specified, then it will need to be provided via the browser during manual configuration post deployment. Example URLs include: - 'jdbc:postgresql://:5432/' - 'jdbc:mysql:///' - 'jdbc:sqlserver://:1433;databaseName=' - 'jdbc:oracle:thin:@:1521:' https://atlassian.github.io/data-center-helm-charts/userguide/CONFIGURATION/#databaseurl | +| fluentd.command | string | `nil` | The command used to start Fluentd. If not supplied the default command will be used: "fluentd -c /fluentd/etc/fluent.conf -v" Note: The custom command can be free-form, however pay particular attention to the process that should ultimately be left running in the container. This process should be invoked with 'exec' so that signals are appropriately propagated to it, for instance SIGTERM. An example of how such a command may look is: " && && exec " | +| fluentd.customConfigFile | bool | `false` | Set to 'true' if a custom config (see 'configmap-fluentd.yaml' for default) should be used for Fluentd. If enabled this config must be supplied via the 'fluentdCustomConfig' property below. | +| fluentd.elasticsearch.enabled | bool | `true` | Set to 'true' if Fluentd should send all log events to an Elasticsearch service. | +| fluentd.elasticsearch.hostname | string | `"elasticsearch"` | The hostname of the Elasticsearch service that Fluentd should send logs to. | +| fluentd.elasticsearch.indexNamePrefix | string | `"jira"` | The prefix of the Elasticsearch index name that will be used | +| fluentd.enabled | bool | `false` | Set to 'true' if the Fluentd sidecar (DaemonSet) should be added to each pod | +| fluentd.extraVolumes | list | `[]` | Specify custom volumes to be added to Fluentd container (e.g. more log sources) | +| fluentd.fluentdCustomConfig | object | `{}` | Custom fluent.conf file | +| fluentd.httpPort | int | `9880` | The port on which the Fluentd sidecar will listen | +| fluentd.imageRepo | string | `"fluent/fluentd-kubernetes-daemonset"` | The Fluentd sidecar image repository | +| fluentd.imageTag | string | `"v1.11.5-debian-elasticsearch7-1.2"` | The Fluentd sidecar image tag | +| fluentd.resources | object | `{}` | Resources requests and limits for fluentd sidecar container See: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ | +| image.pullPolicy | string | `"IfNotPresent"` | Image pull policy | +| image.repository | string | `"atlassian/jira-software"` | The Jira Docker image to use https://hub.docker.com/r/atlassian/jira-software | +| image.tag | string | `""` | The docker image tag to be used - defaults to the Chart appVersion | +| ingress.annotations | object | `{}` | The custom annotations that should be applied to the Ingress Resource. If using an ingress-nginx controller be sure that the annotations you add here are compatible with those already defined in the 'ingess.yaml' template | +| ingress.className | string | `"nginx"` | The class name used by the ingress controller if it's being used. Please follow documentation of your ingress controller. If the cluster contains multiple ingress controllers, this setting allows you to control which of them is used for Atlassian application traffic. | +| ingress.create | bool | `false` | Set to 'true' if an Ingress Resource should be created. This depends on a pre-provisioned Ingress Controller being available. | +| ingress.host | string | `nil` | The fully-qualified hostname (FQDN) of the Ingress Resource. Traffic coming in on this hostname will be routed by the Ingress Resource to the appropriate backend Service. | +| ingress.https | bool | `true` | Set to 'true' if browser communication with the application should be TLS (HTTPS) enforced. | +| ingress.maxBodySize | string | `"250m"` | The max body size to allow. Requests exceeding this size will result in an HTTP 413 error being returned to the client. | +| ingress.nginx | bool | `true` | Set to 'true' if the Ingress Resource is to use the K8s 'ingress-nginx' controller. https://kubernetes.github.io/ingress-nginx/ This will populate the Ingress Resource with annotations that are specific to the K8s ingress-nginx controller. Set to 'false' if a different controller is to be used, in which case the appropriate annotations for that controller must be specified below under 'ingress.annotations'. | +| ingress.path | string | `nil` | The base path for the Ingress Resource. For example '/jira'. Based on a 'ingress.host' value of 'company.k8s.com' this would result in a URL of 'company.k8s.com/jira'. Default value is 'jira.service.contextPath' | +| ingress.proxyConnectTimeout | int | `60` | Defines a timeout for establishing a connection with a proxied server. It should be noted that this timeout cannot usually exceed 75 seconds. | +| ingress.proxyReadTimeout | int | `60` | Defines a timeout for reading a response from the proxied server. The timeout is set only between two successive read operations, not for the transmission of the whole response. If the proxied server does not transmit anything within this time, the connection is closed. | +| ingress.proxySendTimeout | int | `60` | Sets a timeout for transmitting a request to the proxied server. The timeout is set only between two successive write operations, not for the transmission of the whole request. If the proxied server does not receive anything within this time, the connection is closed. | +| ingress.tlsSecretName | string | `nil` | The name of the K8s Secret that contains the TLS private key and corresponding certificate. When utilised, TLS termination occurs at the ingress point where traffic to the Service, and it's Pods is in plaintext. Usage is optional and depends on your use case. The Ingress Controller itself can also be configured with a TLS secret for all Ingress Resources. https://kubernetes.io/docs/concepts/configuration/secret/#tls-secrets https://kubernetes.io/docs/concepts/services-networking/ingress/#tls | +| jira.accessLog.localHomeSubPath | string | `"log"` | The subdirectory within the local-home volume where access logs should be stored. | +| jira.accessLog.mountPath | string | `"/opt/atlassian/jira/logs"` | The path within the Jira container where the local-home volume should be mounted in order to capture access logs. | +| jira.additionalBundledPlugins | list | `[]` | Specifies a list of additional Jira plugins that should be added to the Jira container. Note plugins installed via this method will appear as bundled plugins rather than user plugins. These should be specified in the same manner as the 'additionalLibraries' property. Additional details: https://atlassian.github.io/data-center-helm-charts/examples/external_libraries/EXTERNAL_LIBS/ NOTE: only .jar files can be loaded using this approach. OBR's can be extracted (unzipped) to access the associated .jar An alternative to this method is to install the plugins via "Manage Apps" in the product system administration UI. | +| jira.additionalCertificates | object | `{"customCmd":null,"secretName":null}` | Certificates to be added to Java truststore. Provide reference to a secret that contains the certificates | +| jira.additionalEnvironmentVariables | list | `[]` | Defines any additional environment variables to be passed to the Jira container. See https://hub.docker.com/r/atlassian/jira-software for supported variables. | | jira.additionalJvmArgs | list | `[]` | | -| jira.additionalLibraries | list | `[]` | Specifies a list of additional Java libraries that should be added to the Jira container. Each item in the list should specify the name of the volume that contains the library, as well as the name of the library file within that volume's root directory. Optionally, a subDirectory field can be included to specify which directory in the volume contains the library file. Additional details: https://atlassian.github.io/data-center-helm-charts/examples/external_libraries/EXTERNAL_LIBS/ | -| jira.additionalPorts | list | `[]` | Defines any additional ports for the Jira container. | -| jira.additionalVolumeClaimTemplates | list | `[]` | Defines additional volumeClaimTemplates that should be applied to the Jira pod. Note that this will not create any corresponding volume mounts; those needs to be defined in jira.additionalVolumeMounts | -| jira.additionalVolumeMounts | list | `[]` | Defines any additional volumes mounts for the Jira container. These can refer to existing volumes, or new volumes can be defined via 'volumes.additional'. | -| jira.clustering.enabled | bool | `false` | Set to 'true' if Data Center clustering should be enabled This will automatically configure cluster peer discovery between cluster nodes. | -| jira.containerSecurityContext | object | `{}` | Standard K8s field that holds security configurations that will be applied to a container. https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ | -| jira.forceConfigUpdate | bool | `false` | The Docker entrypoint.py generates application configuration on first start; not all of these files are regenerated on subsequent starts. By default, dbconfig.xml is generated only once. Set `forceConfigUpdate` to true to change this behavior. | -| jira.livenessProbe.enabled | bool | `false` | Whether to apply the livenessProbe check to pod. | -| jira.livenessProbe.failureThreshold | int | `12` | The number of consecutive failures of the Jira container liveness probe before the pod fails liveness checks. | -| jira.livenessProbe.initialDelaySeconds | int | `60` | Time to wait before starting the first probe | -| jira.livenessProbe.periodSeconds | int | `5` | How often (in seconds) the Jira container liveness probe will run | -| jira.livenessProbe.timeoutSeconds | int | `1` | Number of seconds after which the probe times out | -| jira.ports.ehcache | int | `40001` | Ehcache port | -| jira.ports.ehcacheobject | int | `40011` | Ehcache object port | -| jira.ports.http | int | `8080` | The port on which the Jira container listens for HTTP traffic | -| jira.readinessProbe.customProbe | object | `{}` | Custom readinessProbe to override the default /status httpGet | -| jira.readinessProbe.enabled | bool | `true` | Whether to apply the readinessProbe check to pod. | -| jira.readinessProbe.failureThreshold | int | `10` | The number of consecutive failures of the Jira container readiness probe before the pod fails readiness checks. | -| jira.readinessProbe.initialDelaySeconds | int | `10` | The initial delay (in seconds) for the Jira container readiness probe, after which the probe will start running. | -| jira.readinessProbe.periodSeconds | int | `5` | How often (in seconds) the Jira container readiness probe will run | -| jira.readinessProbe.timeoutSeconds | int | `1` | Number of seconds after which the probe times out | -| jira.resources.container.requests.cpu | string | `"2"` | Initial CPU request by Jira pod | -| jira.resources.container.requests.memory | string | `"2G"` | Initial Memory request by Jira pod | -| jira.resources.jvm.maxHeap | string | `"768m"` | The maximum amount of heap memory that will be used by the Jira JVM | -| jira.resources.jvm.minHeap | string | `"384m"` | The minimum amount of heap memory that will be used by the Jira JVM | -| jira.resources.jvm.reservedCodeCache | string | `"512m"` | The memory reserved for the Jira JVM code cache | +| jira.additionalLibraries | list | `[]` | Specifies a list of additional Java libraries that should be added to the Jira container. Each item in the list should specify the name of the volume that contains the library, as well as the name of the library file within that volume's root directory. Optionally, a subDirectory field can be included to specify which directory in the volume contains the library file. Additional details: https://atlassian.github.io/data-center-helm-charts/examples/external_libraries/EXTERNAL_LIBS/ | +| jira.additionalPorts | list | `[]` | Defines any additional ports for the Jira container. | +| jira.additionalVolumeClaimTemplates | list | `[]` | Defines additional volumeClaimTemplates that should be applied to the Jira pod. Note that this will not create any corresponding volume mounts; those needs to be defined in jira.additionalVolumeMounts | +| jira.additionalVolumeMounts | list | `[]` | Defines any additional volumes mounts for the Jira container. These can refer to existing volumes, or new volumes can be defined via 'volumes.additional'. | +| jira.clustering.enabled | bool | `false` | Set to 'true' if Data Center clustering should be enabled This will automatically configure cluster peer discovery between cluster nodes. | +| jira.containerSecurityContext | object | `{}` | Standard K8s field that holds security configurations that will be applied to a container. https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ | +| jira.forceConfigUpdate | bool | `false` | The Docker entrypoint.py generates application configuration on first start; not all of these files are regenerated on subsequent starts. By default, dbconfig.xml is generated only once. Set `forceConfigUpdate` to true to change this behavior. | +| jira.livenessProbe.enabled | bool | `false` | Whether to apply the livenessProbe check to pod. | +| jira.livenessProbe.failureThreshold | int | `12` | The number of consecutive failures of the Jira container liveness probe before the pod fails liveness checks. | +| jira.livenessProbe.initialDelaySeconds | int | `60` | Time to wait before starting the first probe | +| jira.livenessProbe.periodSeconds | int | `5` | How often (in seconds) the Jira container liveness probe will run | +| jira.livenessProbe.timeoutSeconds | int | `1` | Number of seconds after which the probe times out | +| jira.ports.ehcache | int | `40001` | Ehcache port | +| jira.ports.ehcacheobject | int | `40011` | Ehcache object port | +| jira.ports.http | int | `8080` | The port on which the Jira container listens for HTTP traffic | +| jira.readinessProbe.customProbe | object | `{}` | Custom readinessProbe to override the default /status httpGet | +| jira.readinessProbe.enabled | bool | `true` | Whether to apply the readinessProbe check to pod. | +| jira.readinessProbe.failureThreshold | int | `10` | The number of consecutive failures of the Jira container readiness probe before the pod fails readiness checks. | +| jira.readinessProbe.initialDelaySeconds | int | `10` | The initial delay (in seconds) for the Jira container readiness probe, after which the probe will start running. | +| jira.readinessProbe.periodSeconds | int | `5` | How often (in seconds) the Jira container readiness probe will run | +| jira.readinessProbe.timeoutSeconds | int | `1` | Number of seconds after which the probe times out | +| jira.resources.container.requests.cpu | string | `"2"` | Initial CPU request by Jira pod | +| jira.resources.container.requests.memory | string | `"2G"` | Initial Memory request by Jira pod | +| jira.resources.jvm.maxHeap | string | `"768m"` | The maximum amount of heap memory that will be used by the Jira JVM | +| jira.resources.jvm.minHeap | string | `"384m"` | The minimum amount of heap memory that will be used by the Jira JVM | +| jira.resources.jvm.reservedCodeCache | string | `"512m"` | The memory reserved for the Jira JVM code cache | | jira.s3Storage.avatars.bucketName | string | `nil` | | | jira.s3Storage.avatars.bucketRegion | string | `nil` | | | jira.s3Storage.avatars.endpointOverride | string | `nil` | | -| jira.securityContext.fsGroup | int | `2001` | The GID used by the Jira docker image GID will default to 2001 if not supplied and securityContextEnabled is set to true. This is intended to ensure that the shared-home volume is group-writeable by the GID used by the Jira container. However, this doesn't appear to work for NFS volumes due to a K8s bug: https://github.com/kubernetes/examples/issues/260 | -| jira.securityContextEnabled | bool | `true` | | -| jira.seraphConfig | object | `{"autoLoginCookieAge":"1209600","generateByHelm":false}` | By default seraph-config.xml is generated in the container entrypoint from a template shipped with an official Jira image. However, seraph-config.xml generation may fail if container is not run as root, which is a common case if Jira is deployed to OpenShift. | -| jira.seraphConfig.generateByHelm | bool | `false` | Mount seraph-config.xml as a ConfigMap. Override configuration elements if necessary | -| jira.service.annotations | object | `{}` | Additional annotations to apply to the Service | -| jira.service.contextPath | string | `nil` | The Tomcat context path that Jira will use. The ATL_TOMCAT_CONTEXTPATH will be set automatically. | -| jira.service.loadBalancerIP | string | `nil` | Use specific loadBalancerIP. Only applies to service type LoadBalancer. | -| jira.service.port | int | `80` | The port on which the Jira K8s Service will listen | -| jira.service.sessionAffinity | string | `"None"` | Session affinity type. If you want to make sure that connections from a particular client are passed to the same pod each time, set sessionAffinity to ClientIP. See: https://kubernetes.io/docs/reference/networking/virtual-ips/#session-affinity | -| jira.service.sessionAffinityConfig | object | `{"clientIP":{"timeoutSeconds":null}}` | Session affinity configuration | -| jira.service.sessionAffinityConfig.clientIP.timeoutSeconds | string | `nil` | Specifies the seconds of ClientIP type session sticky time. The value must be > 0 && <= 86400(for 1 day) if ServiceAffinity == "ClientIP". Default value is 10800 (for 3 hours). | -| jira.service.type | string | `"ClusterIP"` | The type of K8s service to use for Jira | -| jira.setPermissions | bool | `true` | Boolean to define whether to set local home directory permissions on startup of Jira container. Set to 'false' to disable this behaviour. | -| jira.shutdown.command | string | `"/shutdown-wait.sh"` | By default pods will be stopped via a [preStop hook](https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/), using a script supplied by the Docker image. If any other shutdown behaviour is needed it can be achieved by overriding this value. Note that the shutdown command needs to wait for the application shutdown completely before exiting; see [the default command](https://bitbucket.org/atlassian-docker/docker-atlassian-jira/src/master/shutdown-wait.sh) for details. | -| jira.shutdown.terminationGracePeriodSeconds | int | `30` | The termination grace period for pods during shutdown. This should be set to the internal grace period, plus a small buffer to allow the JVM to fully terminate. | -| jira.startupProbe.enabled | bool | `false` | Whether to apply the startupProbe check to pod. | -| jira.startupProbe.failureThreshold | int | `120` | The number of consecutive failures of the Jira container startup probe before the pod fails startup checks. | -| jira.startupProbe.initialDelaySeconds | int | `60` | Time to wait before starting the first probe | -| jira.startupProbe.periodSeconds | int | `5` | How often (in seconds) the Jira container startup probe will run | -| jira.tomcatConfig | object | `{"acceptCount":"10","connectionTimeout":"20000","customServerXml":"","enableLookups":"false","generateByHelm":false,"maxHttpHeaderSize":"8192","maxThreads":"100","mgmtPort":"8005","minSpareThreads":"10","port":"8080","protocol":"HTTP/1.1","proxyName":null,"proxyPort":null,"redirectPort":"8443","scheme":null,"secure":null}` | By default Tomcat's server.xml is generated in the container entrypoint from a template shipped with an official Jira image. However, server.xml generation may fail if container is not run as root, which is a common case if Jira is deployed to OpenShift. | -| jira.tomcatConfig.customServerXml | string | `""` | Custom server.xml to be mounted into /opt/atlassian/jira/conf | -| jira.tomcatConfig.generateByHelm | bool | `false` | Mount server.xml as a ConfigMap. Override configuration elements if necessary | -| jira.topologySpreadConstraints | list | `[]` | Defines topology spread constraints for Jira pods. See details: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ | -| jira.useHelmReleaseNameAsContainerName | bool | `false` | Whether the main container should acquire helm release name. The default, the container name is jira (Helm chart name) | -| monitoring.exposeJmxMetrics | bool | `false` | Expose JMX metrics with jmx_exporter https://github.com/prometheus/jmx_exporter | -| monitoring.fetchJmxExporterJar | bool | `true` | Fetch jmx_exporter jar from the image. If set to false make sure to manually copy the jar to shared home and provide an absolute path in jmxExporterCustomJarLocation | -| monitoring.grafana.createDashboards | bool | `false` | Create ConfigMaps with Grafana dashboards | -| monitoring.grafana.dashboardAnnotations | object | `{}` | Annotations added to Grafana dashboards ConfigMaps. See: https://github.com/kiwigrid/k8s-sidecar#usage | -| monitoring.grafana.dashboardLabels | object | `{}` | Label selector for Grafana dashboard importer sidecar | -| monitoring.jmxExporterCustomConfig | object | `{}` | Custom JMX config with the rules | -| monitoring.jmxExporterCustomJarLocation | string | `nil` | Location of jmx_exporter jar file if mounted from a secret or manually copied to shared home | -| monitoring.jmxExporterImageRepo | string | `"bitnami/jmx-exporter"` | Image repository with jmx_exporter jar | -| monitoring.jmxExporterImageTag | string | `"0.18.0"` | Image tag to be used to pull jmxExporterImageRepo | -| monitoring.jmxExporterInitContainer | object | `{"customSecurityContext":{},"resources":{},"runAsRoot":true}` | JMX exporter init container configuration | -| monitoring.jmxExporterInitContainer.customSecurityContext | object | `{}` | Custom SecurityContext for the jmx exporter init container | -| monitoring.jmxExporterInitContainer.resources | object | `{}` | Resources requests and limits for the JMX exporter init container See: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ | -| monitoring.jmxExporterInitContainer.runAsRoot | bool | `true` | Whether to run JMX exporter init container as root to copy JMX exporter binary to shared home volume. Set to false if running containers as root is not allowed in the cluster. | -| monitoring.jmxExporterPort | int | `9999` | Port number on which metrics will be available | -| monitoring.jmxExporterPortType | string | `"ClusterIP"` | JMX exporter port type | -| monitoring.jmxServiceAnnotations | object | `{}` | Annotations added to the jmx service | -| monitoring.serviceMonitor.create | bool | `false` | Create ServiceMonitor to start scraping metrics. ServiceMonitor CRD needs to be created in advance. | -| monitoring.serviceMonitor.prometheusLabelSelector | object | `{}` | ServiceMonitorSelector of the prometheus instance. | -| monitoring.serviceMonitor.scrapeIntervalSeconds | int | `30` | Scrape interval for the JMX service. | -| nodeSelector | object | `{}` | Standard K8s node-selectors that will be applied to all Jira pods | -| podAnnotations | object | `{}` | Custom annotations that will be applied to all Jira pods | -| podDisruptionBudget | object | `{"annotations":{},"enabled":false,"labels":{},"maxUnavailable":null,"minAvailable":null}` | PodDisruptionBudget: https://kubernetes.io/docs/tasks/run-application/configure-pdb/ You can specify only one of maxUnavailable and minAvailable in a single PodDisruptionBudget. When both minAvailable and maxUnavailable are set, maxUnavailable takes precedence. | -| podLabels | object | `{}` | Custom labels that will be applied to all Jira pods | -| priorityClassName | string | `nil` | Priority class for the application pods. The PriorityClass with this name needs to be available in the cluster. For details see https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/#priorityclass | -| replicaCount | int | `1` | The initial number of Jira pods that should be started at deployment time. Note that Jira requires manual configuration via the browser post deployment after the first pod is deployed. This configuration must be completed before scaling up additional pods. As such this value should always be kept as 1, but can be altered once manual configuration is complete. | -| schedulerName | string | `nil` | Standard K8s schedulerName that will be applied to all Jira pods. Check Kubernetes documentation on how to configure multiple schedulers: https://kubernetes.io/docs/tasks/extend-kubernetes/configure-multiple-schedulers/#specify-schedulers-for-pods | -| serviceAccount.annotations | object | `{}` | Annotations to add to the ServiceAccount (if created) | -| serviceAccount.create | bool | `true` | Set to 'true' if a ServiceAccount should be created, or 'false' if it already exists. | +| jira.securityContext.fsGroup | int | `2001` | The GID used by the Jira docker image GID will default to 2001 if not supplied and securityContextEnabled is set to true. This is intended to ensure that the shared-home volume is group-writeable by the GID used by the Jira container. However, this doesn't appear to work for NFS volumes due to a K8s bug: https://github.com/kubernetes/examples/issues/260 | +| jira.securityContextEnabled | bool | `true` | Whether to apply security context to pod. | +| jira.seraphConfig | object | `{"autoLoginCookieAge":"1209600","generateByHelm":false}` | By default seraph-config.xml is generated in the container entrypoint from a template shipped with an official Jira image. However, seraph-config.xml generation may fail if container is not run as root, which is a common case if Jira is deployed to OpenShift. | +| jira.seraphConfig.generateByHelm | bool | `false` | Mount seraph-config.xml as a ConfigMap. Override configuration elements if necessary | +| jira.service.annotations | object | `{}` | Additional annotations to apply to the Service | +| jira.service.contextPath | string | `nil` | The Tomcat context path that Jira will use. The ATL_TOMCAT_CONTEXTPATH will be set automatically. | +| jira.service.loadBalancerIP | string | `nil` | Use specific loadBalancerIP. Only applies to service type LoadBalancer. | +| jira.service.port | int | `80` | The port on which the Jira K8s Service will listen | +| jira.service.sessionAffinity | string | `"None"` | Session affinity type. If you want to make sure that connections from a particular client are passed to the same pod each time, set sessionAffinity to ClientIP. See: https://kubernetes.io/docs/reference/networking/virtual-ips/#session-affinity | +| jira.service.sessionAffinityConfig | object | `{"clientIP":{"timeoutSeconds":null}}` | Session affinity configuration | +| jira.service.sessionAffinityConfig.clientIP.timeoutSeconds | string | `nil` | Specifies the seconds of ClientIP type session sticky time. The value must be > 0 && <= 86400(for 1 day) if ServiceAffinity == "ClientIP". Default value is 10800 (for 3 hours). | +| jira.service.type | string | `"ClusterIP"` | The type of K8s service to use for Jira | +| jira.setPermissions | bool | `true` | Boolean to define whether to set local home directory permissions on startup of Jira container. Set to 'false' to disable this behaviour. | +| jira.shutdown.command | string | `"/shutdown-wait.sh"` | By default pods will be stopped via a [preStop hook](https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/), using a script supplied by the Docker image. If any other shutdown behaviour is needed it can be achieved by overriding this value. Note that the shutdown command needs to wait for the application shutdown completely before exiting; see [the default command](https://bitbucket.org/atlassian-docker/docker-atlassian-jira/src/master/shutdown-wait.sh) for details. | +| jira.shutdown.terminationGracePeriodSeconds | int | `30` | The termination grace period for pods during shutdown. This should be set to the internal grace period, plus a small buffer to allow the JVM to fully terminate. | +| jira.startupProbe.enabled | bool | `false` | Whether to apply the startupProbe check to pod. | +| jira.startupProbe.failureThreshold | int | `120` | The number of consecutive failures of the Jira container startup probe before the pod fails startup checks. | +| jira.startupProbe.initialDelaySeconds | int | `60` | Time to wait before starting the first probe | +| jira.startupProbe.periodSeconds | int | `5` | How often (in seconds) the Jira container startup probe will run | +| jira.tomcatConfig | object | `{"acceptCount":"10","connectionTimeout":"20000","customServerXml":"","enableLookups":"false","generateByHelm":false,"maxHttpHeaderSize":"8192","maxThreads":"100","mgmtPort":"8005","minSpareThreads":"10","port":"8080","protocol":"HTTP/1.1","proxyName":null,"proxyPort":null,"redirectPort":"8443","scheme":null,"secure":null}` | By default Tomcat's server.xml is generated in the container entrypoint from a template shipped with an official Jira image. However, server.xml generation may fail if container is not run as root, which is a common case if Jira is deployed to OpenShift. | +| jira.tomcatConfig.customServerXml | string | `""` | Custom server.xml to be mounted into /opt/atlassian/jira/conf | +| jira.tomcatConfig.generateByHelm | bool | `false` | Mount server.xml as a ConfigMap. Override configuration elements if necessary | +| jira.topologySpreadConstraints | list | `[]` | Defines topology spread constraints for Jira pods. See details: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ | +| jira.useHelmReleaseNameAsContainerName | bool | `false` | Whether the main container should acquire helm release name. The default, the container name is jira (Helm chart name) | +| monitoring.exposeJmxMetrics | bool | `false` | Expose JMX metrics with jmx_exporter https://github.com/prometheus/jmx_exporter | +| monitoring.fetchJmxExporterJar | bool | `true` | Fetch jmx_exporter jar from the image. If set to false make sure to manually copy the jar to shared home and provide an absolute path in jmxExporterCustomJarLocation | +| monitoring.grafana.createDashboards | bool | `false` | Create ConfigMaps with Grafana dashboards | +| monitoring.grafana.dashboardAnnotations | object | `{}` | Annotations added to Grafana dashboards ConfigMaps. See: https://github.com/kiwigrid/k8s-sidecar#usage | +| monitoring.grafana.dashboardLabels | object | `{}` | Label selector for Grafana dashboard importer sidecar | +| monitoring.jmxExporterCustomConfig | object | `{}` | Custom JMX config with the rules | +| monitoring.jmxExporterCustomJarLocation | string | `nil` | Location of jmx_exporter jar file if mounted from a secret or manually copied to shared home | +| monitoring.jmxExporterImageRepo | string | `"bitnami/jmx-exporter"` | Image repository with jmx_exporter jar | +| monitoring.jmxExporterImageTag | string | `"0.18.0"` | Image tag to be used to pull jmxExporterImageRepo | +| monitoring.jmxExporterInitContainer | object | `{"customSecurityContext":{},"resources":{},"runAsRoot":true}` | JMX exporter init container configuration | +| monitoring.jmxExporterInitContainer.customSecurityContext | object | `{}` | Custom SecurityContext for the jmx exporter init container | +| monitoring.jmxExporterInitContainer.resources | object | `{}` | Resources requests and limits for the JMX exporter init container See: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ | +| monitoring.jmxExporterInitContainer.runAsRoot | bool | `true` | Whether to run JMX exporter init container as root to copy JMX exporter binary to shared home volume. Set to false if running containers as root is not allowed in the cluster. | +| monitoring.jmxExporterPort | int | `9999` | Port number on which metrics will be available | +| monitoring.jmxExporterPortType | string | `"ClusterIP"` | JMX exporter port type | +| monitoring.jmxServiceAnnotations | object | `{}` | Annotations added to the jmx service | +| monitoring.serviceMonitor.create | bool | `false` | Create ServiceMonitor to start scraping metrics. ServiceMonitor CRD needs to be created in advance. | +| monitoring.serviceMonitor.prometheusLabelSelector | object | `{}` | ServiceMonitorSelector of the prometheus instance. | +| monitoring.serviceMonitor.scrapeIntervalSeconds | int | `30` | Scrape interval for the JMX service. | +| nodeSelector | object | `{}` | Standard K8s node-selectors that will be applied to all Jira pods | +| podAnnotations | object | `{}` | Custom annotations that will be applied to all Jira pods | +| podDisruptionBudget | object | `{"annotations":{},"enabled":false,"labels":{},"maxUnavailable":null,"minAvailable":null}` | PodDisruptionBudget: https://kubernetes.io/docs/tasks/run-application/configure-pdb/ You can specify only one of maxUnavailable and minAvailable in a single PodDisruptionBudget. When both minAvailable and maxUnavailable are set, maxUnavailable takes precedence. | +| podLabels | object | `{}` | Custom labels that will be applied to all Jira pods | +| priorityClassName | string | `nil` | Priority class for the application pods. The PriorityClass with this name needs to be available in the cluster. For details see https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/#priorityclass | +| replicaCount | int | `1` | The initial number of Jira pods that should be started at deployment time. Note that Jira requires manual configuration via the browser post deployment after the first pod is deployed. This configuration must be completed before scaling up additional pods. As such this value should always be kept as 1, but can be altered once manual configuration is complete. | +| schedulerName | string | `nil` | Standard K8s schedulerName that will be applied to all Jira pods. Check Kubernetes documentation on how to configure multiple schedulers: https://kubernetes.io/docs/tasks/extend-kubernetes/configure-multiple-schedulers/#specify-schedulers-for-pods | +| serviceAccount.annotations | object | `{}` | Annotations to add to the ServiceAccount (if created) | +| serviceAccount.create | bool | `true` | Set to 'true' if a ServiceAccount should be created, or 'false' if it already exists. | | serviceAccount.eksIrsa.roleArn | string | `nil` | | -| serviceAccount.imagePullSecrets | list | `[]` | For Docker images hosted in private registries, define the list of image pull secrets that should be utilized by the created ServiceAccount https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod | -| serviceAccount.name | string | `nil` | The name of the ServiceAccount to be used by the pods. If not specified, but the "serviceAccount.create" flag is set to 'true', then the ServiceAccount name will be auto-generated, otherwise the 'default' ServiceAccount will be used. https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/#use-the-default-service-account-to-access-the-api-server | -| tolerations | list | `[]` | Standard K8s tolerations that will be applied to all Jira pods | -| volumes.additional | list | `[]` | Defines additional volumes that should be applied to all Jira pods. Note that this will not create any corresponding volume mounts; those needs to be defined in jira.additionalVolumeMounts | -| volumes.localHome.customVolume | object | `{}` | Static provisioning of local-home using K8s PVs and PVCs NOTE: Due to the ephemeral nature of pods this approach to provisioning volumes for pods is not recommended. Dynamic provisioning described above is the prescribed approach. When 'persistentVolumeClaim.create' is 'false', then this value can be used to define a standard K8s volume that will be used for the local-home volume(s). If not defined, then an 'emptyDir' volume is utilised. Having provisioned a 'PersistentVolume', specify the bound 'persistentVolumeClaim.claimName' for the 'customVolume' object. https://kubernetes.io/docs/concepts/storage/persistent-volumes/#static | -| volumes.localHome.mountPath | string | `"/var/atlassian/application-data/jira"` | Specifies the path in the Jira container to which the local-home volume will be mounted. | -| volumes.localHome.persistentVolumeClaim.create | bool | `false` | If 'true', then a 'PersistentVolume' and 'PersistentVolumeClaim' will be dynamically created for each pod based on the 'StorageClassName' supplied below. | -| volumes.localHome.persistentVolumeClaim.resources | object | `{"requests":{"storage":"1Gi"}}` | Specifies the standard K8s resource requests and/or limits for the local-home volume claims. | -| volumes.localHome.persistentVolumeClaim.storageClassName | string | `nil` | Specify the name of the 'StorageClass' that should be used for the local-home volume claim. | -| volumes.sharedHome.customVolume | object | `{}` | Static provisioning of shared-home using K8s PVs and PVCs When 'persistentVolumeClaim.create' is 'false', then this value can be used to define a standard K8s volume that will be used for the shared-home volume. If not defined, then an 'emptyDir' volume is utilised. Having provisioned a 'PersistentVolume', specify the bound 'persistentVolumeClaim.claimName' for the 'customVolume' object. https://kubernetes.io/docs/concepts/storage/persistent-volumes/#static https://atlassian.github.io/data-center-helm-charts/examples/storage/aws/SHARED_STORAGE/ | -| volumes.sharedHome.mountPath | string | `"/var/atlassian/application-data/shared-home"` | Specifies the path in the Jira container to which the shared-home volume will be mounted. | -| volumes.sharedHome.nfsPermissionFixer.command | string | `nil` | By default, the fixer will change the group ownership of the volume's root directory to match the Jira container's GID (2001), and then ensures the directory is group-writeable. If this is not the desired behaviour, command used can be specified here. | -| volumes.sharedHome.nfsPermissionFixer.enabled | bool | `true` | If 'true', this will alter the shared-home volume's root directory so that Jira can write to it. This is a workaround for a K8s bug affecting NFS volumes: https://github.com/kubernetes/examples/issues/260 | -| volumes.sharedHome.nfsPermissionFixer.imageRepo | string | `"alpine"` | Image repository for the permission fixer init container. Defaults to alpine | -| volumes.sharedHome.nfsPermissionFixer.imageTag | string | `"latest"` | Image tag for the permission fixer init container. Defaults to latest | -| volumes.sharedHome.nfsPermissionFixer.mountPath | string | `"/shared-home"` | The path in the K8s initContainer where the shared-home volume will be mounted | -| volumes.sharedHome.nfsPermissionFixer.resources | object | `{}` | Resources requests and limits for nfsPermissionFixer init container See: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ | -| volumes.sharedHome.persistentVolumeClaim.create | bool | `false` | If 'true', then a 'PersistentVolumeClaim' and 'PersistentVolume' will be dynamically created for shared-home based on the 'StorageClassName' supplied below. | -| volumes.sharedHome.persistentVolumeClaim.resources | object | `{"requests":{"storage":"1Gi"}}` | Specifies the standard K8s resource requests and/or limits for the shared-home volume claims. | -| volumes.sharedHome.persistentVolumeClaim.storageClassName | string | `nil` | Specify the name of the 'StorageClass' that should be used for the 'shared-home' | -| volumes.sharedHome.subPath | string | `nil` | Specifies the sub-directory of the shared-home volume that will be mounted in to the Jira container. | \ No newline at end of file +| serviceAccount.imagePullSecrets | list | `[]` | For Docker images hosted in private registries, define the list of image pull secrets that should be utilized by the created ServiceAccount https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod | +| serviceAccount.name | string | `nil` | The name of the ServiceAccount to be used by the pods. If not specified, but the "serviceAccount.create" flag is set to 'true', then the ServiceAccount name will be auto-generated, otherwise the 'default' ServiceAccount will be used. https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/#use-the-default-service-account-to-access-the-api-server | +| testPods | object | `{"affinity":{},"annotations":{},"image":{"permissionsTestContainer":"debian:stable-slim","statusTestContainer":"alpine:latest"},"labels":{},"nodeSelector":{},"schedulerName":null,"tolerations":[]}` | Metadata and pod spec for pods started in Helm tests | +| tolerations | list | `[]` | Standard K8s tolerations that will be applied to all Jira pods | +| volumes.additional | list | `[]` | Defines additional volumes that should be applied to all Jira pods. Note that this will not create any corresponding volume mounts; those needs to be defined in jira.additionalVolumeMounts | +| volumes.localHome.customVolume | object | `{}` | Static provisioning of local-home using K8s PVs and PVCs NOTE: Due to the ephemeral nature of pods this approach to provisioning volumes for pods is not recommended. Dynamic provisioning described above is the prescribed approach. When 'persistentVolumeClaim.create' is 'false', then this value can be used to define a standard K8s volume that will be used for the local-home volume(s). If not defined, then an 'emptyDir' volume is utilised. Having provisioned a 'PersistentVolume', specify the bound 'persistentVolumeClaim.claimName' for the 'customVolume' object. https://kubernetes.io/docs/concepts/storage/persistent-volumes/#static | +| volumes.localHome.mountPath | string | `"/var/atlassian/application-data/jira"` | Specifies the path in the Jira container to which the local-home volume will be mounted. | +| volumes.localHome.persistentVolumeClaim.create | bool | `false` | If 'true', then a 'PersistentVolume' and 'PersistentVolumeClaim' will be dynamically created for each pod based on the 'StorageClassName' supplied below. | +| volumes.localHome.persistentVolumeClaim.resources | object | `{"requests":{"storage":"1Gi"}}` | Specifies the standard K8s resource requests and/or limits for the local-home volume claims. | +| volumes.localHome.persistentVolumeClaim.storageClassName | string | `nil` | Specify the name of the 'StorageClass' that should be used for the local-home volume claim. | +| volumes.sharedHome.customVolume | object | `{}` | Static provisioning of shared-home using K8s PVs and PVCs When 'persistentVolumeClaim.create' is 'false', then this value can be used to define a standard K8s volume that will be used for the shared-home volume. If not defined, then an 'emptyDir' volume is utilised. Having provisioned a 'PersistentVolume', specify the bound 'persistentVolumeClaim.claimName' for the 'customVolume' object. https://kubernetes.io/docs/concepts/storage/persistent-volumes/#static https://atlassian.github.io/data-center-helm-charts/examples/storage/aws/SHARED_STORAGE/ | +| volumes.sharedHome.mountPath | string | `"/var/atlassian/application-data/shared-home"` | Specifies the path in the Jira container to which the shared-home volume will be mounted. | +| volumes.sharedHome.nfsPermissionFixer.command | string | `nil` | By default, the fixer will change the group ownership of the volume's root directory to match the Jira container's GID (2001), and then ensures the directory is group-writeable. If this is not the desired behaviour, command used can be specified here. | +| volumes.sharedHome.nfsPermissionFixer.enabled | bool | `true` | If 'true', this will alter the shared-home volume's root directory so that Jira can write to it. This is a workaround for a K8s bug affecting NFS volumes: https://github.com/kubernetes/examples/issues/260 | +| volumes.sharedHome.nfsPermissionFixer.imageRepo | string | `"alpine"` | Image repository for the permission fixer init container. Defaults to alpine | +| volumes.sharedHome.nfsPermissionFixer.imageTag | string | `"latest"` | Image tag for the permission fixer init container. Defaults to latest | +| volumes.sharedHome.nfsPermissionFixer.mountPath | string | `"/shared-home"` | The path in the K8s initContainer where the shared-home volume will be mounted | +| volumes.sharedHome.nfsPermissionFixer.resources | object | `{}` | Resources requests and limits for nfsPermissionFixer init container See: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ | +| volumes.sharedHome.persistentVolumeClaim.create | bool | `false` | If 'true', then a 'PersistentVolumeClaim' and 'PersistentVolume' will be dynamically created for shared-home based on the 'StorageClassName' supplied below. | +| volumes.sharedHome.persistentVolumeClaim.resources | object | `{"requests":{"storage":"1Gi"}}` | Specifies the standard K8s resource requests and/or limits for the shared-home volume claims. | +| volumes.sharedHome.persistentVolumeClaim.storageClassName | string | `nil` | Specify the name of the 'StorageClass' that should be used for the 'shared-home' volume claim. | +| volumes.sharedHome.subPath | string | `nil` | Specifies the sub-directory of the shared-home volume that will be mounted in to the Jira container. | + +---------------------------------------------- +Autogenerated from chart metadata using [helm-docs v1.11.0](https://github.com/norwoodj/helm-docs/releases/v1.11.0) diff --git a/src/main/charts/jira/templates/tests/test-application-status.yaml b/src/main/charts/jira/templates/tests/test-application-status.yaml index a7dbf0d57..e15de8fdb 100644 --- a/src/main/charts/jira/templates/tests/test-application-status.yaml +++ b/src/main/charts/jira/templates/tests/test-application-status.yaml @@ -5,13 +5,25 @@ metadata: annotations: "helm.sh/hook": test "helm.sh/hook-delete-policy": "before-hook-creation,hook-succeeded" + {{- if not .Values.testPods.annotations }} {{- include "jira.podAnnotations" . | nindent 4 }} + {{- else }} + {{- range $key, $value := .Values.testPods.annotations }} + {{ $key | quote }}: {{ tpl $value $ | quote }} + {{- end }} + {{- end }} labels: + {{- if not .Values.testPods.labels }} {{- include "common.labels.commonLabels" . | nindent 4 }} + {{- else }} + {{- range $key, $value := .Values.testPods.labels }} + {{ $key | quote }}: {{ tpl $value $ | quote }} + {{- end }} + {{- end }} spec: containers: - name: test - image: alpine + image: {{ .Values.testPods.image.statusTestContainer }} imagePullPolicy: IfNotPresent env: - name: STATUS_URL @@ -24,4 +36,19 @@ spec: STATUS=$(curl -s "$STATUS_URL") echo "Verifying application state is RUNNING or FIRST_RUN: $STATUS" echo $STATUS | jq -e '.state|test("RUNNING|FIRST_RUN")' - restartPolicy: Never \ No newline at end of file + restartPolicy: Never + {{- with .Values.testPods.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.testPods.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.testPods.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- if .Values.testPods.schedulerName }} + schedulerName: {{ .Values.testPods.schedulerName | quote }} + {{- end }} diff --git a/src/main/charts/jira/templates/tests/test-database-connectivity.yaml b/src/main/charts/jira/templates/tests/test-database-connectivity.yaml index 7de742f2e..ce406c748 100644 --- a/src/main/charts/jira/templates/tests/test-database-connectivity.yaml +++ b/src/main/charts/jira/templates/tests/test-database-connectivity.yaml @@ -6,9 +6,21 @@ metadata: annotations: "helm.sh/hook": test "helm.sh/hook-delete-policy": "before-hook-creation,hook-succeeded" + {{- if not .Values.testPods.annotations }} {{- include "jira.podAnnotations" . | nindent 4 }} + {{- else }} + {{- range $key, $value := .Values.testPods.annotations }} + {{ $key | quote }}: {{ tpl $value $ | quote }} + {{- end }} + {{- end }} labels: + {{- if not .Values.testPods.labels }} {{- include "common.labels.commonLabels" . | nindent 4 }} + {{- else }} + {{- range $key, $value := .Values.testPods.labels }} + {{ $key | quote }}: {{ tpl $value $ | quote }} + {{- end }} + {{- end }} spec: serviceAccountName: {{ include "jira.serviceAccountName" . }} containers: @@ -53,4 +65,19 @@ spec: cat output.txt grep -q "Connection established OK" output.txt restartPolicy: Never -{{ end }} \ No newline at end of file + {{- with .Values.testPods.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.testPods.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.testPods.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- if .Values.testPods.schedulerName }} + schedulerName: {{ .Values.testPods.schedulerName | quote }} + {{- end }} +{{ end }} diff --git a/src/main/charts/jira/templates/tests/test-shared-home-permissions.yaml b/src/main/charts/jira/templates/tests/test-shared-home-permissions.yaml index 1aa549cc6..8be5c96e6 100644 --- a/src/main/charts/jira/templates/tests/test-shared-home-permissions.yaml +++ b/src/main/charts/jira/templates/tests/test-shared-home-permissions.yaml @@ -5,9 +5,21 @@ metadata: annotations: "helm.sh/hook": test "helm.sh/hook-delete-policy": "before-hook-creation,hook-succeeded" + {{- if not .Values.testPods.annotations }} {{- include "jira.podAnnotations" . | nindent 4 }} + {{- else }} + {{- range $key, $value := .Values.testPods.annotations }} + {{ $key | quote }}: {{ tpl $value $ | quote }} + {{- end }} + {{- end }} labels: + {{- if not .Values.testPods.labels }} {{- include "common.labels.commonLabels" . | nindent 4 }} + {{- else }} + {{- range $key, $value := .Values.testPods.labels }} + {{ $key | quote }}: {{ tpl $value $ | quote }} + {{- end }} + {{- end }} spec: containers: - name: test @@ -44,4 +56,19 @@ spec: rm /shared-home/permissions-test volumes: {{ include "jira.volumes.sharedHome" . | nindent 4 }} - restartPolicy: Never \ No newline at end of file + restartPolicy: Never + {{- with .Values.testPods.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.testPods.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.testPods.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- if .Values.testPods.schedulerName }} + schedulerName: {{ .Values.testPods.schedulerName | quote }} + {{- end }} diff --git a/src/main/charts/jira/values.yaml b/src/main/charts/jira/values.yaml index af8745a82..2bb238f1b 100644 --- a/src/main/charts/jira/values.yaml +++ b/src/main/charts/jira/values.yaml @@ -1133,3 +1133,16 @@ atlassianAnalyticsAndSupport: # which can be optionally including to support.zip # enabled: true + +# -- Metadata and pod spec for pods started in Helm tests +# +testPods: + labels: {} + annotations: {} + nodeSelector: {} + tolerations: [] + affinity: {} + schedulerName: + image: + permissionsTestContainer: debian:stable-slim + statusTestContainer: alpine:latest diff --git a/src/test/java/test/TestPodsTest.java b/src/test/java/test/TestPodsTest.java new file mode 100644 index 000000000..4bbf24282 --- /dev/null +++ b/src/test/java/test/TestPodsTest.java @@ -0,0 +1,189 @@ +package test; + +import com.fasterxml.jackson.databind.JsonNode; +import org.assertj.vavr.api.VavrAssertions; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.TestInfo; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.EnumSource; +import test.helm.Helm; +import test.model.*; + +import java.util.List; +import java.util.Map; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static test.jackson.JsonNodeAssert.assertThat; + +class TestPodsTest { + private Helm helm; + + @BeforeEach + void initHelm(TestInfo testInfo) { + helm = new Helm(testInfo); + } + + @ParameterizedTest + @EnumSource(value = Product.class, names = {"bamboo_agent"}, mode = EnumSource.Mode.EXCLUDE) + void test_pods_default_annotations(Product product) throws Exception { + List testPods = List.of("application-status-test", "shared-home-permissions-test", "db-connectivity-test"); + if (product.name().equals("crowd")) { + testPods = List.of("application-status-test", "shared-home-permissions-test"); + } + for (String testPod : testPods) { + final var resources = helm.captureKubeResourcesFromHelmChart(product, Map.of( + "database.credentials.secretName", "db-secret" + )); + final var pod = resources.get(Kind.Pod, Pod.class, product.getHelmReleaseName() + "-" + testPod); + assertThat(pod.getAnnotations()).isObject(Map.of( + "helm.sh/hook", "test", + "helm.sh/hook-delete-policy", "before-hook-creation,hook-succeeded" + )); + } + } + + @ParameterizedTest + @EnumSource(value = Product.class, names = {"bamboo_agent"}, mode = EnumSource.Mode.EXCLUDE) + void test_pods_custom_annotations(Product product) throws Exception { + + List testPods = List.of("application-status-test", "shared-home-permissions-test", "db-connectivity-test"); + if (product.name().equals("crowd")) { + testPods = List.of("application-status-test", "shared-home-permissions-test"); + } + for (String testPod : testPods) { + final var resources = helm.captureKubeResourcesFromHelmChart(product, Map.of( + "database.credentials.secretName", "db-secret", + "testPods.annotations.podAnnotation1", "podOfHumpbacks", + "testPods.annotations.podAnnotation2", "podOfOrcas" + )); + final var pod = resources.get(Kind.Pod, Pod.class, product.getHelmReleaseName() + "-" + testPod); + assertThat(pod.getAnnotations()).isObject(Map.of( + "podAnnotation1", "podOfHumpbacks", + "podAnnotation2", "podOfOrcas" + )); + } + } + + @ParameterizedTest + @EnumSource(value = Product.class, names = {"bamboo_agent"}, mode = EnumSource.Mode.EXCLUDE) + void test_pods_default_labels(Product product) throws Exception { + List testPods = List.of("application-status-test", "shared-home-permissions-test", "db-connectivity-test"); + if (product.name().equals("crowd")) { + testPods = List.of("application-status-test", "shared-home-permissions-test"); + } + String helmChartVersion = product.getHelmChartVersion(); + String appVersion = product.getAppVersion(); + for (String testPod : testPods) { + final var resources = helm.captureKubeResourcesFromHelmChart(product, Map.of( + "database.credentials.secretName", "db-secret" + )); + final var pod = resources.get(Kind.Pod, Pod.class, product.getHelmReleaseName() + "-" + testPod); + assertThat(pod.getMetadata().path("labels")).isObject(Map.of( + "helm.sh/chart", product.name() + "-" + helmChartVersion, + "app.kubernetes.io/name",product.name(), + "app.kubernetes.io/instance", product.getHelmReleaseName(), + "app.kubernetes.io/version", appVersion, + "app.kubernetes.io/managed-by", "Helm" + )); + } + } + + @ParameterizedTest + @EnumSource(value = Product.class, names = {"bamboo_agent"}, mode = EnumSource.Mode.EXCLUDE) + void test_pods_custom_labels(Product product) throws Exception { + List testPods = List.of("application-status-test", "shared-home-permissions-test", "db-connectivity-test"); + if (product.name().equals("crowd")) { + testPods = List.of("application-status-test", "shared-home-permissions-test"); + } + for (String testPod : testPods) { + final var resources = helm.captureKubeResourcesFromHelmChart(product, Map.of( + "database.credentials.secretName", "db-secret", + "testPods.labels.label1", "value1", + "testPods.labels.label2", "value2" + )); + final var pod = resources.get(Kind.Pod, Pod.class, product.getHelmReleaseName() + "-" + testPod); + assertThat(pod.getMetadata().path("labels")).isObject(Map.of( + "label1", "value1", + "label2", "value2" + )); + } + } + + @ParameterizedTest + @EnumSource(value = Product.class, names = {"bamboo_agent"}, mode = EnumSource.Mode.EXCLUDE) + void test_pods_custom_node_selector(Product product) throws Exception { + final var resources = helm.captureKubeResourcesFromHelmChart(product, Map.of( + "database.credentials.secretName", "db-secret", + "testPods.nodeSelector.nodename", "special-node" + )); + List testPods = List.of("application-status-test", "shared-home-permissions-test", "db-connectivity-test"); + if (product.name().equals("crowd")) { + testPods = List.of("application-status-test", "shared-home-permissions-test"); + } + for (String testPod : testPods) { + + final var pod = resources.get(Kind.Pod, Pod.class, product.getHelmReleaseName() + "-" + testPod); + + assertThat(pod.getSpec().path("nodeSelector").path("nodename")).hasTextEqualTo("special-node"); + } + } + + @ParameterizedTest + @EnumSource(value = Product.class, names = {"bamboo_agent"}, mode = EnumSource.Mode.EXCLUDE) + void test_pods_custom_scheduler_name(Product product) throws Exception { + final var resources = helm.captureKubeResourcesFromHelmChart(product, Map.of( + "database.credentials.secretName", "db-secret", + "testPods.schedulerName", "my-scheduler")); + List testPods = List.of("application-status-test", "shared-home-permissions-test", "db-connectivity-test"); + if (product.name().equals("crowd")) { + testPods = List.of("application-status-test", "shared-home-permissions-test"); + } + for (String testPod : testPods) { + final var pod = resources.get(Kind.Pod, Pod.class, product.getHelmReleaseName() + "-" + testPod); + assertEquals("my-scheduler", pod.getSpec().path("schedulerName").asText()); + + } + + } + + @ParameterizedTest + @EnumSource(value = Product.class, names = {"bamboo_agent"}, mode = EnumSource.Mode.EXCLUDE) + void test_pods_custom_tolerations(Product product) throws Exception { + final var resources = helm.captureKubeResourcesFromHelmChart(product, Map.of( + "database.credentials.secretName", "db-secret", + "testPods.tolerations[0].key", "other-pod", + "testPods.tolerations[0].operator", "Exists", + "testPods.tolerations[0].effect", "NoSchedule")); + List testPods = List.of("application-status-test", "shared-home-permissions-test", "db-connectivity-test"); + if (product.name().equals("crowd")) { + testPods = List.of("application-status-test", "shared-home-permissions-test"); + } + for (String testPod : testPods) { + final var pod = resources.get(Kind.Pod, Pod.class, product.getHelmReleaseName() + "-" + testPod); + JsonNode tolerations = pod.getSpec().get("tolerations"); + assertThat(tolerations).isArrayWithNumberOfChildren(1); + assertThat(tolerations.get(0).get("key")).hasTextEqualTo("other-pod"); + assertThat(tolerations.get(0).get("operator")).hasTextEqualTo("Exists"); + assertThat(tolerations.get(0).get("effect")).hasTextContaining("NoSchedule"); + } + } + + @ParameterizedTest + @EnumSource(value = Product.class, names = {"bamboo_agent"}, mode = EnumSource.Mode.EXCLUDE) + void test_pods_custom_affinity(Product product) throws Exception { + final var resources = helm.captureKubeResourcesFromHelmChart(product, Map.of( + "database.credentials.secretName", "db-secret", + "testPods.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].key", "kubernetes.io/os", + "testPods.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].operator", "in", + "testPods.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].values[0]", "centos")); + List testPods = List.of("application-status-test", "shared-home-permissions-test", "db-connectivity-test"); + if (product.name().equals("crowd")) { + testPods = List.of("application-status-test", "shared-home-permissions-test"); + } + for (String testPod : testPods) { + final var pod = resources.get(Kind.Pod, Pod.class, product.getHelmReleaseName() + "-" + testPod); + JsonNode affinity = pod.getSpec().get("affinity"); + assertThat(affinity.path("nodeAffinity").path("requiredDuringSchedulingIgnoredDuringExecution").path("nodeSelectorTerms").get(0).path("matchExpressions").get(0).path("values").get(0)).hasTextEqualTo("centos"); + } + } +} diff --git a/src/test/java/test/helm/Helm.java b/src/test/java/test/helm/Helm.java index 76b98ec12..76e282c67 100644 --- a/src/test/java/test/helm/Helm.java +++ b/src/test/java/test/helm/Helm.java @@ -1,11 +1,16 @@ package test.helm; +import com.fasterxml.jackson.databind.JsonNode; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.fasterxml.jackson.dataformat.yaml.YAMLFactory; import io.vavr.collection.HashMap; import org.junit.jupiter.api.TestInfo; import test.model.KubeResources; import test.model.Product; +import java.io.FileInputStream; import java.io.IOException; +import java.io.InputStream; import java.lang.reflect.Method; import java.nio.file.Files; import java.nio.file.Path; @@ -88,6 +93,36 @@ private static void captureHelmTemplateOutput(Product product, Path outputFile, }).isEqualTo(0); } + public static String getHelmChartVersion(Product product) { + + Path chartPath = getHelmChartPath(product); + String chartYamlPath = chartPath.resolve("Chart.yaml").toString(); + + try { + ObjectMapper objectMapper = new ObjectMapper(new YAMLFactory()); + JsonNode yamlData = objectMapper.readTree(Paths.get(chartYamlPath).toFile()); + return yamlData.get("version").asText(); + } catch (IOException e) { + e.printStackTrace(); + } + return null; + } + + public static String getAppVersion(Product product) { + + Path chartPath = getHelmChartPath(product); + String chartYamlPath = chartPath.resolve("Chart.yaml").toString(); + + try { + ObjectMapper objectMapper = new ObjectMapper(new YAMLFactory()); + JsonNode yamlData = objectMapper.readTree(Paths.get(chartYamlPath).toFile()); + return yamlData.get("appVersion").asText(); + } catch (IOException e) { + e.printStackTrace(); + } + return null; + } + public static String getHelmReleaseName(Product product) { return String.format("unittest-%s", product); } diff --git a/src/test/java/test/model/KubeResource.java b/src/test/java/test/model/KubeResource.java index eb2afc9be..e64ccbd11 100644 --- a/src/test/java/test/model/KubeResource.java +++ b/src/test/java/test/model/KubeResource.java @@ -61,6 +61,8 @@ static KubeResource wrap(JsonNode node) { return new Deployment(node); case ConfigMap: return new ConfigMap(node); + case Pod: + return new Pod(node); default: return new KubeResource(kind, node); } diff --git a/src/test/java/test/model/Pod.java b/src/test/java/test/model/Pod.java new file mode 100644 index 000000000..6eab4cd85 --- /dev/null +++ b/src/test/java/test/model/Pod.java @@ -0,0 +1,16 @@ +package test.model; + +import com.fasterxml.jackson.databind.JsonNode; +import io.vavr.collection.Array; +import io.vavr.collection.Traversable; + +public class Pod extends KubeResource { + Pod(JsonNode node) { + super(Kind.Pod, node); + } + + public JsonNode getPodMetadata() { + return getNode("metadata"); + } + +} diff --git a/src/test/java/test/model/Product.java b/src/test/java/test/model/Product.java index 9a569c02d..923a9c7a4 100644 --- a/src/test/java/test/model/Product.java +++ b/src/test/java/test/model/Product.java @@ -3,7 +3,7 @@ import test.helm.Helm; /* - * When adding additional products charts, products names comprising more than + * When adding additional products charts, products names comprising more than * one word should be separated by an underscore(s) "_". Hyphens "-" are not valid * when declaring enum names. See "bamboo_agent" below as an example of this */ @@ -76,15 +76,23 @@ public String getContainerGid() { public String getHelmReleaseName() { return Helm.getHelmReleaseName(this); } - + + public String getHelmChartVersion() { + return Helm.getHelmChartVersion(this); + } + + public String getAppVersion() { + return Helm.getAppVersion(this); + } + /* - * So that we can create Chart directories of the form: - * + * So that we can create Chart directories of the form: + * * "src/main/charts/bamboo-agent" - * + * * but also ensure our tests still work, we override this method * to replace underscores with hyphens. - * + * */ @Override public String toString() { diff --git a/src/test/resources/expected_helm_output/bamboo-agent/output.yaml b/src/test/resources/expected_helm_output/bamboo-agent/output.yaml index 501338172..b16f9fafd 100644 --- a/src/test/resources/expected_helm_output/bamboo-agent/output.yaml +++ b/src/test/resources/expected_helm_output/bamboo-agent/output.yaml @@ -46,6 +46,7 @@ spec: template: metadata: annotations: + checksum/config-jvm: 71e1317f2015f6192f7a43c6e3c21c93c72739255fc31462661c64aab894e286 labels: app.kubernetes.io/name: bamboo-agent app.kubernetes.io/instance: unittest-bamboo-agent diff --git a/src/test/resources/expected_helm_output/bamboo/output.yaml b/src/test/resources/expected_helm_output/bamboo/output.yaml index e37aef845..01c3cfae8 100644 --- a/src/test/resources/expected_helm_output/bamboo/output.yaml +++ b/src/test/resources/expected_helm_output/bamboo/output.yaml @@ -253,6 +253,16 @@ data: create: true imagePullSecrets: [] name: null + testPods: + affinity: {} + annotations: {} + image: + permissionsTestContainer: debian:stable-slim + statusTestContainer: alpine:latest + labels: {} + nodeSelector: {} + schedulerName: null + tolerations: [] tolerations: [] volumes: additional: [] @@ -386,6 +396,7 @@ spec: template: metadata: annotations: + checksum/config-jvm: 87b697c0ecf75445766b13f8c1aedeefbb0f656c5365a43e418d166380e67b0e labels: app.kubernetes.io/name: bamboo app.kubernetes.io/instance: unittest-bamboo diff --git a/src/test/resources/expected_helm_output/bitbucket/output.yaml b/src/test/resources/expected_helm_output/bitbucket/output.yaml index 503260e18..32fff83f6 100644 --- a/src/test/resources/expected_helm_output/bitbucket/output.yaml +++ b/src/test/resources/expected_helm_output/bitbucket/output.yaml @@ -334,6 +334,16 @@ data: create: true roleBinding: create: true + testPods: + affinity: {} + annotations: {} + image: + permissionsTestContainer: debian:stable-slim + statusTestContainer: alpine:latest + labels: {} + nodeSelector: {} + schedulerName: null + tolerations: [] tolerations: [] volumes: additional: [] @@ -545,6 +555,7 @@ spec: template: metadata: annotations: + checksum/config-jvm: aed68fcf426d2cc41b7fc491665468da978bced1d29c96c2ee6c2e6dc9064250 labels: app.kubernetes.io/name: bitbucket-mesh app.kubernetes.io/instance: unittest-bitbucket @@ -667,6 +678,7 @@ spec: template: metadata: annotations: + checksum/config-jvm: c81ba94e8e89fca1eb988f8bc863b561ad914819034a569bcb1c72e1ff9ba001 labels: app.kubernetes.io/name: bitbucket app.kubernetes.io/instance: unittest-bitbucket diff --git a/src/test/resources/expected_helm_output/confluence/output.yaml b/src/test/resources/expected_helm_output/confluence/output.yaml index 20c760d07..5e7a41587 100644 --- a/src/test/resources/expected_helm_output/confluence/output.yaml +++ b/src/test/resources/expected_helm_output/confluence/output.yaml @@ -323,6 +323,16 @@ data: shutdown: terminationGracePeriodSeconds: 25 topologySpreadConstraints: [] + testPods: + affinity: {} + annotations: {} + image: + permissionsTestContainer: debian:stable-slim + statusTestContainer: alpine:latest + labels: {} + nodeSelector: {} + schedulerName: null + tolerations: [] tolerations: [] volumes: additional: [] @@ -499,6 +509,7 @@ spec: template: metadata: annotations: + checksum/config-jvm: 5c7e4f3183d49bd4e8c82a29b06246e551e4120042495652f1f9b27a0599a882 labels: app.kubernetes.io/name: confluence-synchrony app.kubernetes.io/instance: unittest-confluence @@ -580,6 +591,7 @@ spec: template: metadata: annotations: + checksum/config-jvm: bb3be4203b052588513ff3a759fe5eabb404e391407d5c7c19473423c38aec89 labels: app.kubernetes.io/name: confluence app.kubernetes.io/instance: unittest-confluence diff --git a/src/test/resources/expected_helm_output/crowd/output.yaml b/src/test/resources/expected_helm_output/crowd/output.yaml index f8d95f2ea..2775a5cba 100644 --- a/src/test/resources/expected_helm_output/crowd/output.yaml +++ b/src/test/resources/expected_helm_output/crowd/output.yaml @@ -223,6 +223,16 @@ data: imagePullSecrets: [] name: null terminationGracePeriodSeconds: 30 + testPods: + affinity: {} + annotations: {} + image: + permissionsTestContainer: debian:stable-slim + statusTestContainer: alpine:latest + labels: {} + nodeSelector: {} + schedulerName: null + tolerations: [] tolerations: [] volumes: additional: [] @@ -333,6 +343,7 @@ spec: template: metadata: annotations: + checksum/config-jvm: e1870daf2ca6d4446b45f03537917a2a11c8af2143d335b94971600655474ee3 labels: app.kubernetes.io/name: crowd app.kubernetes.io/instance: unittest-crowd diff --git a/src/test/resources/expected_helm_output/jira/output.yaml b/src/test/resources/expected_helm_output/jira/output.yaml index 5d2e30fa8..6ba3de803 100644 --- a/src/test/resources/expected_helm_output/jira/output.yaml +++ b/src/test/resources/expected_helm_output/jira/output.yaml @@ -260,6 +260,16 @@ data: roleArn: null imagePullSecrets: [] name: null + testPods: + affinity: {} + annotations: {} + image: + permissionsTestContainer: debian:stable-slim + statusTestContainer: alpine:latest + labels: {} + nodeSelector: {} + schedulerName: null + tolerations: [] tolerations: [] volumes: additional: [] @@ -372,6 +382,7 @@ spec: template: metadata: annotations: + checksum/config-jvm: f057f4d464bde2e94ec75c95fb61f1d9d2febf7f3551ca83a69591ade0aaba63 labels: app.kubernetes.io/name: jira app.kubernetes.io/instance: unittest-jira @@ -510,7 +521,7 @@ metadata: spec: containers: - name: test - image: alpine + image: alpine:latest imagePullPolicy: IfNotPresent env: - name: STATUS_URL