From 96201959f395059342b1b587fcd17a67981f2bfa Mon Sep 17 00:00:00 2001 From: Deniz Erdogan <91744937+deer-wmde@users.noreply.github.com> Date: Thu, 8 Jun 2023 17:14:40 +0200 Subject: [PATCH] Fork ElasticSearch 6.8 chart (#117) * add elasticsearch chart dir from https://github.com/elastic/helm-charts/releases/tag/v6.8.22 * move PodDisruptionBudget API version to policy/v1 * Bump chart version to 6.8.22-wmde1 * Add CHANGELOG * formatting * Add LICENSE from upstream --------- Co-authored-by: Thomas Arrow --- charts/elasticsearch/.helmignore | 2 + charts/elasticsearch/CHANGELOG.md | 7 + charts/elasticsearch/Chart.yaml | 12 + charts/elasticsearch/LICENSE.md | 201 +++ charts/elasticsearch/Makefile | 1 + charts/elasticsearch/README.md | 472 ++++++ charts/elasticsearch/examples/config/Makefile | 21 + .../elasticsearch/examples/config/README.md | 27 + .../examples/config/test/goss.yaml | 29 + .../elasticsearch/examples/config/values.yaml | 28 + .../examples/config/watcher_encryption_key | 1 + .../elasticsearch/examples/default/Makefile | 14 + .../elasticsearch/examples/default/README.md | 25 + .../examples/default/rolling_upgrade.sh | 19 + .../examples/default/test/goss.yaml | 38 + .../examples/docker-for-mac/Makefile | 13 + .../examples/docker-for-mac/README.md | 23 + .../examples/docker-for-mac/values.yaml | 23 + .../examples/kubernetes-kind/Makefile | 17 + .../examples/kubernetes-kind/README.md | 36 + .../kubernetes-kind/values-local-path.yaml | 23 + .../examples/kubernetes-kind/values.yaml | 23 + .../elasticsearch/examples/microk8s/Makefile | 13 + .../elasticsearch/examples/microk8s/README.md | 32 + .../examples/microk8s/values.yaml | 32 + .../elasticsearch/examples/migration/Makefile | 10 + .../examples/migration/README.md | 167 ++ .../examples/migration/client.yaml | 23 + .../examples/migration/data.yaml | 17 + .../examples/migration/master.yaml | 27 + .../elasticsearch/examples/minikube/Makefile | 13 + .../elasticsearch/examples/minikube/README.md | 38 + .../examples/minikube/values.yaml | 23 + charts/elasticsearch/examples/multi/Makefile | 19 + charts/elasticsearch/examples/multi/README.md | 29 + .../elasticsearch/examples/multi/client.yaml | 12 + charts/elasticsearch/examples/multi/data.yaml | 9 + .../elasticsearch/examples/multi/master.yaml | 9 + .../examples/multi/test/goss.yaml | 9 + .../examples/networkpolicy/Makefile | 14 + .../examples/networkpolicy/values.yaml | 37 + .../elasticsearch/examples/openshift/Makefile | 13 + .../examples/openshift/README.md | 24 + .../examples/openshift/test/goss.yaml | 16 + .../examples/openshift/values.yaml | 11 + charts/elasticsearch/examples/oss/Makefile | 14 + charts/elasticsearch/examples/oss/README.md | 23 + .../elasticsearch/examples/oss/test/goss.yaml | 16 + charts/elasticsearch/examples/oss/values.yaml | 4 + .../elasticsearch/examples/security/Makefile | 38 + .../elasticsearch/examples/security/README.md | 29 + .../examples/security/test/goss.yaml | 44 + .../examples/security/values.yaml | 33 + .../elasticsearch/examples/upgrade/Makefile | 16 + .../elasticsearch/examples/upgrade/README.md | 17 + .../examples/upgrade/test/goss.yaml | 16 + .../examples/upgrade/values.yaml | 2 + charts/elasticsearch/templates/NOTES.txt | 6 + charts/elasticsearch/templates/_helpers.tpl | 57 + charts/elasticsearch/templates/configmap.yaml | 16 + charts/elasticsearch/templates/ingress.yaml | 54 + .../templates/networkpolicy.yaml | 61 + .../templates/poddisruptionbudget.yaml | 12 + .../templates/podsecuritypolicy.yaml | 14 + charts/elasticsearch/templates/role.yaml | 25 + .../elasticsearch/templates/rolebinding.yaml | 24 + charts/elasticsearch/templates/service.yaml | 77 + .../templates/serviceaccount.yaml | 20 + .../elasticsearch/templates/statefulset.yaml | 432 +++++ .../test/test-elasticsearch-health.yaml | 36 + .../elasticsearch/tests/elasticsearch_test.py | 1485 +++++++++++++++++ charts/elasticsearch/values.yaml | 347 ++++ 72 files changed, 4570 insertions(+) create mode 100644 charts/elasticsearch/.helmignore create mode 100644 charts/elasticsearch/CHANGELOG.md create mode 100755 charts/elasticsearch/Chart.yaml create mode 100644 charts/elasticsearch/LICENSE.md create mode 100644 charts/elasticsearch/Makefile create mode 100644 charts/elasticsearch/README.md create mode 100644 charts/elasticsearch/examples/config/Makefile create mode 100644 charts/elasticsearch/examples/config/README.md create mode 100644 charts/elasticsearch/examples/config/test/goss.yaml create mode 100644 charts/elasticsearch/examples/config/values.yaml create mode 100644 charts/elasticsearch/examples/config/watcher_encryption_key create mode 100644 charts/elasticsearch/examples/default/Makefile create mode 100644 charts/elasticsearch/examples/default/README.md create mode 100755 charts/elasticsearch/examples/default/rolling_upgrade.sh create mode 100644 charts/elasticsearch/examples/default/test/goss.yaml create mode 100644 charts/elasticsearch/examples/docker-for-mac/Makefile create mode 100644 charts/elasticsearch/examples/docker-for-mac/README.md create mode 100644 charts/elasticsearch/examples/docker-for-mac/values.yaml create mode 100644 charts/elasticsearch/examples/kubernetes-kind/Makefile create mode 100644 charts/elasticsearch/examples/kubernetes-kind/README.md create mode 100644 charts/elasticsearch/examples/kubernetes-kind/values-local-path.yaml create mode 100644 charts/elasticsearch/examples/kubernetes-kind/values.yaml create mode 100644 charts/elasticsearch/examples/microk8s/Makefile create mode 100644 charts/elasticsearch/examples/microk8s/README.md create mode 100644 charts/elasticsearch/examples/microk8s/values.yaml create mode 100644 charts/elasticsearch/examples/migration/Makefile create mode 100644 charts/elasticsearch/examples/migration/README.md create mode 100644 charts/elasticsearch/examples/migration/client.yaml create mode 100644 charts/elasticsearch/examples/migration/data.yaml create mode 100644 charts/elasticsearch/examples/migration/master.yaml create mode 100644 charts/elasticsearch/examples/minikube/Makefile create mode 100644 charts/elasticsearch/examples/minikube/README.md create mode 100644 charts/elasticsearch/examples/minikube/values.yaml create mode 100644 charts/elasticsearch/examples/multi/Makefile create mode 100644 charts/elasticsearch/examples/multi/README.md create mode 100644 charts/elasticsearch/examples/multi/client.yaml create mode 100644 charts/elasticsearch/examples/multi/data.yaml create mode 100644 charts/elasticsearch/examples/multi/master.yaml create mode 100644 charts/elasticsearch/examples/multi/test/goss.yaml create mode 100644 charts/elasticsearch/examples/networkpolicy/Makefile create mode 100644 charts/elasticsearch/examples/networkpolicy/values.yaml create mode 100644 charts/elasticsearch/examples/openshift/Makefile create mode 100644 charts/elasticsearch/examples/openshift/README.md create mode 100644 charts/elasticsearch/examples/openshift/test/goss.yaml create mode 100644 charts/elasticsearch/examples/openshift/values.yaml create mode 100644 charts/elasticsearch/examples/oss/Makefile create mode 100644 charts/elasticsearch/examples/oss/README.md create mode 100644 charts/elasticsearch/examples/oss/test/goss.yaml create mode 100644 charts/elasticsearch/examples/oss/values.yaml create mode 100644 charts/elasticsearch/examples/security/Makefile create mode 100644 charts/elasticsearch/examples/security/README.md create mode 100644 charts/elasticsearch/examples/security/test/goss.yaml create mode 100644 charts/elasticsearch/examples/security/values.yaml create mode 100644 charts/elasticsearch/examples/upgrade/Makefile create mode 100644 charts/elasticsearch/examples/upgrade/README.md create mode 100644 charts/elasticsearch/examples/upgrade/test/goss.yaml create mode 100644 charts/elasticsearch/examples/upgrade/values.yaml create mode 100755 charts/elasticsearch/templates/NOTES.txt create mode 100755 charts/elasticsearch/templates/_helpers.tpl create mode 100644 charts/elasticsearch/templates/configmap.yaml create mode 100644 charts/elasticsearch/templates/ingress.yaml create mode 100644 charts/elasticsearch/templates/networkpolicy.yaml create mode 100644 charts/elasticsearch/templates/poddisruptionbudget.yaml create mode 100644 charts/elasticsearch/templates/podsecuritypolicy.yaml create mode 100644 charts/elasticsearch/templates/role.yaml create mode 100644 charts/elasticsearch/templates/rolebinding.yaml create mode 100644 charts/elasticsearch/templates/service.yaml create mode 100644 charts/elasticsearch/templates/serviceaccount.yaml create mode 100644 charts/elasticsearch/templates/statefulset.yaml create mode 100644 charts/elasticsearch/templates/test/test-elasticsearch-health.yaml create mode 100755 charts/elasticsearch/tests/elasticsearch_test.py create mode 100755 charts/elasticsearch/values.yaml diff --git a/charts/elasticsearch/.helmignore b/charts/elasticsearch/.helmignore new file mode 100644 index 0000000..e12c0b4 --- /dev/null +++ b/charts/elasticsearch/.helmignore @@ -0,0 +1,2 @@ +tests/ +.pytest_cache/ diff --git a/charts/elasticsearch/CHANGELOG.md b/charts/elasticsearch/CHANGELOG.md new file mode 100644 index 0000000..66fbe7d --- /dev/null +++ b/charts/elasticsearch/CHANGELOG.md @@ -0,0 +1,7 @@ +# CHANGELOG + + + +## 6.8.22-wmde1 +* Move PodDisruptionBudget API version to policy/v1 (from policy/v1beta1) + * note: PodSecurityPolicy is deprecated too, but we don't use it currently diff --git a/charts/elasticsearch/Chart.yaml b/charts/elasticsearch/Chart.yaml new file mode 100755 index 0000000..57fbafd --- /dev/null +++ b/charts/elasticsearch/Chart.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +description: Official Elastic helm chart for Elasticsearch +home: https://github.com/elastic/helm-charts +maintainers: +- email: helm-charts@elastic.co + name: Elastic +name: elasticsearch +version: 6.8.22-wmde1 +appVersion: 6.8.22-wmde1 +sources: + - https://github.com/elastic/elasticsearch +icon: https://helm.elastic.co/icons/elasticsearch.png diff --git a/charts/elasticsearch/LICENSE.md b/charts/elasticsearch/LICENSE.md new file mode 100644 index 0000000..3439728 --- /dev/null +++ b/charts/elasticsearch/LICENSE.md @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] Authors of elastic/helm-charts with minor alterations by Wikibase Cloud team + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. \ No newline at end of file diff --git a/charts/elasticsearch/Makefile b/charts/elasticsearch/Makefile new file mode 100644 index 0000000..22218a1 --- /dev/null +++ b/charts/elasticsearch/Makefile @@ -0,0 +1 @@ +include ../helpers/common.mk diff --git a/charts/elasticsearch/README.md b/charts/elasticsearch/README.md new file mode 100644 index 0000000..f7df6f7 --- /dev/null +++ b/charts/elasticsearch/README.md @@ -0,0 +1,472 @@ +# Elasticsearch Helm Chart + +[![Build Status](https://img.shields.io/jenkins/s/https/devops-ci.elastic.co/job/elastic+helm-charts+master.svg)](https://devops-ci.elastic.co/job/elastic+helm-charts+master/) [![Artifact HUB](https://img.shields.io/endpoint?url=https://artifacthub.io/badge/repository/elastic)](https://artifacthub.io/packages/search?repo=elastic) + +This Helm chart is a lightweight way to configure and run our official +[Elasticsearch Docker image][]. + + + + + + +- [Requirements](#requirements) +- [Installing](#installing) + - [Install released version using Helm repository](#install-released-version-using-helm-repository) + - [Install development version using 6.8 branch and 6.8.22 versions](#install-development-version-using-68-branch-and-6813-snapshot-versions) +- [Upgrading](#upgrading) +- [Usage notes](#usage-notes) +- [Configuration](#configuration) + - [Deprecated](#deprecated) +- [FAQ](#faq) + - [How to deploy this chart on a specific K8S distribution?](#how-to-deploy-this-chart-on-a-specific-k8s-distribution) + - [How to deploy dedicated nodes types?](#how-to-deploy-dedicated-nodes-types) + - [Clustering and Node Discovery](#clustering-and-node-discovery) + - [How to deploy clusters with security (authentication and TLS) enabled?](#how-to-deploy-clusters-with-security-authentication-and-tls-enabled) + - [How to migrate from helm/charts stable chart?](#how-to-migrate-from-helmcharts-stable-chart) + - [How to install OSS version of Elasticsearch?](#how-to-install-oss-version-of-elasticsearch) + - [How to install plugins?](#how-to-install-plugins) + - [How to use the keystore?](#how-to-use-the-keystore) + - [Basic example](#basic-example) + - [Multiple keys](#multiple-keys) + - [Custom paths and keys](#custom-paths-and-keys) + - [How to enable snapshotting?](#how-to-enable-snapshotting) + - [How to configure templates post-deployment?](#how-to-configure-templates-post-deployment) +- [Contributing](#contributing) + + + + + + +## Requirements + +* Kubernetes >= 1.14 +* [Helm][] >= 2.17.0 +* Minimum cluster requirements include the following to run this chart with +default settings. All of these settings are configurable. + * Three Kubernetes nodes to respect the default "hard" affinity settings + * 1GB of RAM for the JVM heap + +See [supported configurations][] for more details. + + +## Installing + +This chart is tested with the latest 6.8.22 version. + +### Install released version using Helm repository + +* Add the Elastic Helm charts repo: +`helm repo add elastic https://helm.elastic.co` + +* Install it: + - with Helm 3: `helm install elasticsearch --version elastic/elasticsearch` + - with Helm 2 (deprecated): `helm install --name elasticsearch --version elastic/elasticsearch` + + +### Install development version using 6.8 branch and 6.8.22 versions + +* Clone the git repo: `git clone git@github.com:elastic/helm-charts.git` + +* Checkout the branch : git checkout 6.8 + +* Install it: + - with Helm 3: `helm install elasticsearch ./helm-charts/elasticsearch --set imageTag=6.8.22` + - with Helm 2 (deprecated): `helm install --name elasticsearch ./helm-charts/elasticsearch --set imageTag=6.8.22` + + +## Upgrading + +Please always check [CHANGELOG.md][] and [BREAKING_CHANGES.md][] before +upgrading to a new chart version. + + +## Usage notes + +* This repo includes a number of [examples][] configurations which can be used +as a reference. They are also used in the automated testing of this chart. +* Automated testing of this chart is currently only run against GKE (Google +Kubernetes Engine). +* The chart deploys a StatefulSet and by default will do an automated rolling +update of your cluster. It does this by waiting for the cluster health to become +green after each instance is updated. If you prefer to update manually you can +set `OnDelete` [updateStrategy][]. +* It is important to verify that the JVM heap size in `esJavaOpts` and to set +the CPU/Memory `resources` to something suitable for your cluster. +* To simplify chart and maintenance each set of node groups is deployed as a +separate Helm release. Take a look at the [multi][] example to get an idea for +how this works. Without doing this it isn't possible to resize persistent +volumes in a StatefulSet. By setting it up this way it makes it possible to add +more nodes with a new storage size then drain the old ones. It also solves the +problem of allowing the user to determine which node groups to update first when +doing upgrades or changes. +* We have designed this chart to be very un-opinionated about how to configure +Elasticsearch. It exposes ways to set environment variables and mount secrets +inside of the container. Doing this makes it much easier for this chart to +support multiple versions with minimal changes. + + +## Configuration + +| Parameter | Description | Default | +|------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------| +| `antiAffinityTopologyKey` | The [anti-affinity][] topology key. By default this will prevent multiple Elasticsearch nodes from running on the same Kubernetes node | `kubernetes.io/hostname` | +| `antiAffinity` | Setting this to hard enforces the [anti-affinity][] rules. If it is set to soft it will be done "best effort". Other values will be ignored | `hard` | +| `clusterHealthCheckParams` | The [Elasticsearch cluster health status params][] that will be used by readiness [probe][] command | `wait_for_status=green&timeout=1s` | +| `clusterName` | This will be used as the Elasticsearch [cluster.name][] and should be unique per cluster in the namespace | `elasticsearch` | +| `enableServiceLinks` | Set to false to disabling service links, which can cause slow pod startup times when there are many services in the current namespace. | `true` | +| `envFrom` | Templatable string to be passed to the [environment from variables][] which will be appended to the `envFrom:` definition for the container | `[]` | +| `esConfig` | Allows you to add any config files in `/usr/share/elasticsearch/config/` such as `elasticsearch.yml` and `log4j2.properties`. See [values.yaml][] for an example of the formatting | `{}` | +| `esJavaOpts` | [Java options][] for Elasticsearch. This is where you could configure the [jvm heap size][] | `""` | +| `esMajorVersion` | Deprecated. Instead, use the version of the chart corresponding to your ES minor version. Used to set major version specific configuration. If you are using a custom image and not running the default Elasticsearch version you will need to set this to the version you are running (e.g. `esMajorVersion: 6`) | `""` | +| `extraContainers` | Templatable string of additional `containers` to be passed to the `tpl` function | `""` | +| `extraEnvs` | Extra [environment variables][] which will be appended to the `env:` definition for the container | `[]` | +| `extraInitContainers` | Templatable string of additional `initContainers` to be passed to the `tpl` function | `""` | +| `extraVolumeMounts` | Templatable string of additional `volumeMounts` to be passed to the `tpl` function | `""` | +| `extraVolumes` | Templatable string of additional `volumes` to be passed to the `tpl` function | `""` | +| `fullnameOverride` | Overrides the `clusterName` and `nodeGroup` when used in the naming of resources. This should only be used when using a single `nodeGroup`, otherwise you will have name conflicts | `""` | +| `healthNameOverride` | Overrides `test-elasticsearch-health` pod name | `""` | +| `hostAliases` | Configurable [hostAliases][] | `[]` | +| `httpPort` | The http port that Kubernetes will use for the healthchecks and the service. If you change this you will also need to set [http.port][] in `extraEnvs` | `9200` | +| `imagePullPolicy` | The Kubernetes [imagePullPolicy][] value | `IfNotPresent` | +| `imagePullSecrets` | Configuration for [imagePullSecrets][] so that you can use a private registry for your image | `[]` | +| `imageTag` | The Elasticsearch Docker image tag | `6.8.22` | +| `image` | The Elasticsearch Docker image | `docker.elastic.co/elasticsearch/elasticsearch` | +| `ingress` | Configurable [ingress][] to expose the Elasticsearch service. See [values.yaml][] for an example | see [values.yaml][] | +| `initResources` | Allows you to set the [resources][] for the `initContainer` in the StatefulSet | `{}` | +| `keystore` | Allows you map Kubernetes secrets into the keystore. See the [config example][] and [how to use the keystore][] | `[]` | +| `labels` | Configurable [labels][] applied to all Elasticsearch pods | `{}` | +| `lifecycle` | Allows you to add [lifecycle hooks][]. See [values.yaml][] for an example of the formatting | `{}` | +| `masterService` | The service name used to connect to the masters. You only need to set this if your master `nodeGroup` is set to something other than `master`. See [Clustering and Node Discovery][] for more information | `""` | +| `masterTerminationFix` | A workaround needed for Elasticsearch < 7.2 to prevent master status being lost during restarts [#63][] | `false` | +| `maxUnavailable` | The [maxUnavailable][] value for the pod disruption budget. By default this will prevent Kubernetes from having more than 1 unhealthy pod in the node group | `1` | +| `minimumMasterNodes` | The value for [discovery.zen.minimum_master_nodes][]. Should be set to `(master_eligible_nodes / 2) + 1`. Ignored in Elasticsearch versions >= 7 | `2` | +| `nameOverride` | Overrides the `clusterName` when used in the naming of resources | `""` | +| `networkHost` | Value for the [network.host Elasticsearch setting][] | `0.0.0.0` | +| `networkPolicy` | The [NetworkPolicy](https://kubernetes.io/docs/concepts/services-networking/network-policies/) to set. See [`values.yaml`](./values.yaml) for an example | `{http.enabled: false,transport.enabled: false}` | +| `nodeAffinity` | Value for the [node affinity settings][] | `{}` | +| `nodeGroup` | This is the name that will be used for each group of nodes in the cluster. The name will be `clusterName-nodeGroup-X` , `nameOverride-nodeGroup-X` if a `nameOverride` is specified, and `fullnameOverride-X` if a `fullnameOverride` is specified | `master` | +| `nodeSelector` | Configurable [nodeSelector][] so that you can target specific nodes for your Elasticsearch cluster | `{}` | +| `persistence` | Enables a persistent volume for Elasticsearch data. Can be disabled for nodes that only have [roles][] which don't require persistent data | see [values.yaml][] | +| `podAnnotations` | Configurable [annotations][] applied to all Elasticsearch pods | `{}` | +| `podManagementPolicy` | By default Kubernetes [deploys StatefulSets serially][]. This deploys them in parallel so that they can discover each other | `Parallel` | +| `podSecurityContext` | Allows you to set the [securityContext][] for the pod | see [values.yaml][] | +| `podSecurityPolicy` | Configuration for create a pod security policy with minimal permissions to run this Helm chart with `create: true`. Also can be used to reference an external pod security policy with `name: "externalPodSecurityPolicy"` | see [values.yaml][] | +| `priorityClassName` | The name of the [PriorityClass][]. No default is supplied as the PriorityClass must be created first | `""` | +| `protocol` | The protocol that will be used for the readiness [probe][]. Change this to `https` if you have `xpack.security.http.ssl.enabled` set | `http` | +| `rbac` | Configuration for creating a role, role binding and ServiceAccount as part of this Helm chart with `create: true`. Also can be used to reference an external ServiceAccount with `serviceAccountName: "externalServiceAccountName"`, or automount the service account token | see [values.yaml][] | +| `readinessProbe` | Configuration fields for the readiness [probe][] | see [values.yaml][] | +| `replicas` | Kubernetes replica count for the StatefulSet (i.e. how many pods) | `3` | +| `resources` | Allows you to set the [resources][] for the StatefulSet | see [values.yaml][] | +| `roles` | A hash map with the specific [roles][] for the `nodeGroup` | see [values.yaml][] | +| `schedulerName` | Name of the [alternate scheduler][] | `""` | +| `secretMounts` | Allows you easily mount a secret as a file inside the StatefulSet. Useful for mounting certificates and other secrets. See [values.yaml][] for an example | `[]` | +| `securityContext` | Allows you to set the [securityContext][] for the container | see [values.yaml][] | +| `service.annotations` | [LoadBalancer annotations][] that Kubernetes will use for the service. This will configure load balancer if `service.type` is `LoadBalancer` | `{}` | +| `service.enabled` | Enable non-headless service | `true` | +| `service.externalTrafficPolicy` | Some cloud providers allow you to specify the [LoadBalancer externalTrafficPolicy][]. Kubernetes will use this to preserve the client source IP. This will configure load balancer if `service.type` is `LoadBalancer` | `""` | +| `service.httpPortName` | The name of the http port within the service | `http` | +| `service.labelsHeadless` | Labels to be added to headless service | `{}` | +| `service.labels` | Labels to be added to non-headless service | `{}` | +| `service.loadBalancerIP` | Some cloud providers allow you to specify the [loadBalancer][] IP. If the `loadBalancerIP` field is not specified, the IP is dynamically assigned. If you specify a `loadBalancerIP` but your cloud provider does not support the feature, it is ignored. | `""` | +| `service.loadBalancerSourceRanges` | The IP ranges that are allowed to access | `[]` | +| `service.nodePort` | Custom [nodePort][] port that can be set if you are using `service.type: nodePort` | `""` | +| `service.transportPortName` | The name of the transport port within the service | `transport` | +| `service.type` | Elasticsearch [Service Types][] | `ClusterIP` | +| `sidecarResources` | Allows you to set the [resources][] for the sidecar containers in the StatefulSet | {} | +| `sysctlInitContainer` | Allows you to disable the `sysctlInitContainer` if you are setting [sysctl vm.max_map_count][] with another method | `enabled: true` | +| `sysctlVmMaxMapCount` | Sets the [sysctl vm.max_map_count][] needed for Elasticsearch | `262144` | +| `terminationGracePeriod` | The [terminationGracePeriod][] in seconds used when trying to stop the pod | `120` | +| `tests.enabled` | Enable creating test related resources when running `helm template` or `helm test` | `true` | +| `tolerations` | Configurable [tolerations][] | `[]` | +| `transportPort` | The transport port that Kubernetes will use for the service. If you change this you will also need to set [transport port configuration][] in `extraEnvs` | `9300` | +| `updateStrategy` | The [updateStrategy][] for the StatefulSet. By default Kubernetes will wait for the cluster to be green after upgrading each pod. Setting this to `OnDelete` will allow you to manually delete each pod during upgrades | `RollingUpdate` | +| `volumeClaimTemplate` | Configuration for the [volumeClaimTemplate for StatefulSets][]. You will want to adjust the storage (default `30Gi` ) and the `storageClassName` if you are using a different storage class | see [values.yaml][] | + +### Deprecated + +| Parameter | Description | Default | +|-----------|---------------------------------------------------------------------------------------------------------------|---------| +| `fsGroup` | The Group ID (GID) for [securityContext][] so that the Elasticsearch user can read from the persistent volume | `""` | + + +## FAQ + +### How to deploy this chart on a specific K8S distribution? + +This chart is designed to run on production scale Kubernetes clusters with +multiple nodes, lots of memory and persistent storage. For that reason it can be +a bit tricky to run them against local Kubernetes environments such as +[Minikube][]. + +This chart is highly tested with [GKE][], but some K8S distribution also +requires specific configurations. + +We provide examples of configuration for the following K8S providers: + +- [Docker for Mac][] +- [KIND][] +- [Minikube][] +- [MicroK8S][] +- [OpenShift][] + +### How to deploy dedicated nodes types? + +All the Elasticsearch pods deployed share the same configuration. If you need to +deploy dedicated [nodes types][] (for example dedicated master and data nodes), +you can deploy multiple releases of this chart with different configurations +while they share the same `clusterName` value. + +For each Helm release, the nodes types can then be defined using `roles` value. + +An example of Elasticsearch cluster using 2 different Helm releases for master +and data nodes can be found in [examples/multi][]. + +#### Clustering and Node Discovery + +This chart facilitates Elasticsearch node discovery and services by creating two +`Service` definitions in Kubernetes, one with the name `$clusterName-$nodeGroup` +and another named `$clusterName-$nodeGroup-headless`. +Only `Ready` pods are a part of the `$clusterName-$nodeGroup` service, while all +pods ( `Ready` or not) are a part of `$clusterName-$nodeGroup-headless`. + +If your group of master nodes has the default `nodeGroup: master` then you can +just add new groups of nodes with a different `nodeGroup` and they will +automatically discover the correct master. If your master nodes have a different +`nodeGroup` name then you will need to set `masterService` to +`$clusterName-$masterNodeGroup`. + +The chart value for `masterService` is used to populate +`discovery.zen.ping.unicast.hosts` , which Elasticsearch nodes will use to +contact master nodes and form a cluster. +Therefore, to add a group of nodes to an existing cluster, setting +`masterService` to the desired `Service` name of the related cluster is +sufficient. + +### How to deploy clusters with security (authentication and TLS) enabled? + +This Helm chart can use existing [Kubernetes secrets][] to setup +credentials or certificates for examples. These secrets should be created +outside of this chart and accessed using [environment variables][] and volumes. + +An example of Elasticsearch cluster using security can be found in +[examples/security][]. + +### How to migrate from helm/charts stable chart? + +If you currently have a cluster deployed with the [helm/charts stable][] chart +you can follow the [migration guide][]. + +### How to install OSS version of Elasticsearch? + +Deploying OSS version of Elasticsearch can be done by setting `image` value to +[Elasticsearch OSS Docker image][] + +An example of Elasticsearch cluster using OSS version can be found in +[examples/oss][]. + +### How to install plugins? + +The recommended way to install plugins into our Docker images is to create a +[custom Docker image][]. + +The Dockerfile would look something like: + +``` +ARG elasticsearch_version +FROM docker.elastic.co/elasticsearch/elasticsearch:${elasticsearch_version} + +RUN bin/elasticsearch-plugin install --batch repository-gcs +``` + +And then updating the `image` in values to point to your custom image. + +There are a couple reasons we recommend this. + +1. Tying the availability of Elasticsearch to the download service to install +plugins is not a great idea or something that we recommend. Especially in +Kubernetes where it is normal and expected for a container to be moved to +another host at random times. +2. Mutating the state of a running Docker image (by installing plugins) goes +against best practices of containers and immutable infrastructure. + +### How to use the keystore? + +#### Basic example + +Create the secret, the key name needs to be the keystore key path. In this +example we will create a secret from a file and from a literal string. + +``` +kubectl create secret generic encryption-key --from-file=xpack.watcher.encryption_key=./watcher_encryption_key +kubectl create secret generic slack-hook --from-literal=xpack.notification.slack.account.monitoring.secure_url='https://hooks.slack.com/services/asdasdasd/asdasdas/asdasd' +``` + +To add these secrets to the keystore: + +``` +keystore: + - secretName: encryption-key + - secretName: slack-hook +``` + +#### Multiple keys + +All keys in the secret will be added to the keystore. To create the previous +example in one secret you could also do: + +``` +kubectl create secret generic keystore-secrets --from-file=xpack.watcher.encryption_key=./watcher_encryption_key --from-literal=xpack.notification.slack.account.monitoring.secure_url='https://hooks.slack.com/services/asdasdasd/asdasdas/asdasd' +``` + +``` +keystore: + - secretName: keystore-secrets +``` + +#### Custom paths and keys + +If you are using these secrets for other applications (besides the Elasticsearch +keystore) then it is also possible to specify the keystore path and which keys +you want to add. Everything specified under each `keystore` item will be passed +through to the `volumeMounts` section for mounting the [secret][]. In this +example we will only add the `slack_hook` key from a secret that also has other +keys. Our secret looks like this: + +``` +kubectl create secret generic slack-secrets --from-literal=slack_channel='#general' --from-literal=slack_hook='https://hooks.slack.com/services/asdasdasd/asdasdas/asdasd' +``` + +We only want to add the `slack_hook` key to the keystore at path +`xpack.notification.slack.account.monitoring.secure_url`: + +``` +keystore: + - secretName: slack-secrets + items: + - key: slack_hook + path: xpack.notification.slack.account.monitoring.secure_url +``` + +You can also take a look at the [config example][] which is used as part of the +automated testing pipeline. + +### How to enable snapshotting? + +1. Install your [snapshot plugin][] into a custom Docker image following the +[how to install plugins guide][]. +2. Add any required secrets or credentials into an Elasticsearch keystore +following the [how to use the keystore][] guide. +3. Configure the [snapshot repository][] as you normally would. +4. To automate snapshots you can use a tool like [curator][]. In the future +there are plans to have Elasticsearch manage automated snapshots with +[Snapshot Lifecycle Management][]. + +### How to configure templates post-deployment? + +You can use `postStart` [lifecycle hooks][] to run code triggered after a +container is created. + +Here is an example of `postStart` hook to configure templates: + +```yaml +lifecycle: + postStart: + exec: + command: + - bash + - -c + - | + #!/bin/bash + # Add a template to adjust number of shards/replicas + TEMPLATE_NAME=my_template + INDEX_PATTERN="logstash-*" + SHARD_COUNT=8 + REPLICA_COUNT=1 + ES_URL=http://localhost:9200 + while [[ "$(curl -s -o /dev/null -w '%{http_code}\n' $ES_URL)" != "200" ]]; do sleep 1; done + curl -XPUT "$ES_URL/_template/$TEMPLATE_NAME" -H 'Content-Type: application/json' -d'{"index_patterns":['\""$INDEX_PATTERN"\"'],"settings":{"number_of_shards":'$SHARD_COUNT',"number_of_replicas":'$REPLICA_COUNT'}}' +``` + + +## Contributing + +Please check [CONTRIBUTING.md][] before any contribution or for any questions +about our development and testing process. + +[6.x]: https://github.com/elastic/helm-charts/releases +[#63]: https://github.com/elastic/helm-charts/issues/63 +[6.8.22]: https://github.com/elastic/helm-charts/blob/6.8.22/elasticsearch/README.md +[BREAKING_CHANGES.md]: https://github.com/elastic/helm-charts/blob/master/BREAKING_CHANGES.md +[CHANGELOG.md]: https://github.com/elastic/helm-charts/blob/master/CHANGELOG.md +[CONTRIBUTING.md]: https://github.com/elastic/helm-charts/blob/master/CONTRIBUTING.md +[alternate scheduler]: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/#specify-schedulers-for-pods +[annotations]: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ +[anti-affinity]: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity +[cluster.name]: https://www.elastic.co/guide/en/elasticsearch/reference/6.8/cluster.name.html +[clustering and node discovery]: https://github.com/elastic/helm-charts/tree/6.8/elasticsearch/README.md#clustering-and-node-discovery +[config example]: https://github.com/elastic/helm-charts/tree/6.8/elasticsearch/examples/config/values.yaml +[curator]: https://www.elastic.co/guide/en/elasticsearch/client/curator/6.8/snapshot.html +[custom docker image]: https://www.elastic.co/guide/en/elasticsearch/reference/6.8/docker.html#_c_customized_image +[deploys statefulsets serially]: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#pod-management-policies +[discovery.zen.minimum_master_nodes]: https://www.elastic.co/guide/en/elasticsearch/reference/6.8/discovery-settings.html#minimum_master_nodes +[docker for mac]: https://github.com/elastic/helm-charts/tree/6.8/elasticsearch/examples/docker-for-mac +[elasticsearch cluster health status params]: https://www.elastic.co/guide/en/elasticsearch/reference/6.8/cluster-health.html#request-params +[elasticsearch docker image]: https://www.elastic.co/guide/en/elasticsearch/reference/6.8/docker.html +[elasticsearch oss docker image]: https://www.docker.elastic.co/r/elasticsearch/elasticsearch-oss +[environment variables]: https://kubernetes.io/docs/tasks/inject-data-application/define-environment-variable-container/#using-environment-variables-inside-of-your-config +[environment from variables]: https://kubernetes.io/docs/tasks/configure-pod-container/configure-pod-configmap/#configure-all-key-value-pairs-in-a-configmap-as-container-environment-variables +[examples]: https://github.com/elastic/helm-charts/tree/6.8/elasticsearch/examples/ +[examples/multi]: https://github.com/elastic/helm-charts/tree/6.8/elasticsearch/examples/multi +[examples/oss]: https://github.com/elastic/helm-charts/tree/6.8/elasticsearch/examples/oss +[examples/security]: https://github.com/elastic/helm-charts/tree/6.8/elasticsearch/examples/security +[gke]: https://cloud.google.com/kubernetes-engine +[helm]: https://helm.sh +[helm/charts stable]: https://github.com/helm/charts/tree/master/stable/elasticsearch/ +[how to install plugins guide]: https://github.com/elastic/helm-charts/tree/6.8/elasticsearch/README.md#how-to-install-plugins +[how to use the keystore]: https://github.com/elastic/helm-charts/tree/6.8/elasticsearch/README.md#how-to-use-the-keystore +[http.port]: https://www.elastic.co/guide/en/elasticsearch/reference/6.8/modules-http.html#_settings +[imagePullPolicy]: https://kubernetes.io/docs/concepts/containers/images/#updating-images +[imagePullSecrets]: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/#create-a-pod-that-uses-your-secret +[ingress]: https://kubernetes.io/docs/concepts/services-networking/ingress/ +[java options]: https://www.elastic.co/guide/en/elasticsearch/reference/6.8/jvm-options.html +[jvm heap size]: https://www.elastic.co/guide/en/elasticsearch/reference/6.8/heap-size.html +[hostAliases]: https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ +[kind]: https://github.com/elastic/helm-charts/tree/6.8/elasticsearch/examples/kubernetes-kind +[kubernetes secrets]: https://kubernetes.io/docs/concepts/configuration/secret/ +[labels]: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ +[lifecycle hooks]: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/ +[loadBalancer annotations]: https://kubernetes.io/docs/concepts/services-networking/service/#ssl-support-on-aws +[loadBalancer externalTrafficPolicy]: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip +[loadBalancer]: https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer +[maxUnavailable]: https://kubernetes.io/docs/tasks/run-application/configure-pdb/#specifying-a-poddisruptionbudget +[migration guide]: https://github.com/elastic/helm-charts/tree/6.8/elasticsearch/examples/migration/README.md +[minikube]: https://github.com/elastic/helm-charts/tree/6.8/elasticsearch/examples/minikube +[microk8s]: https://github.com/elastic/helm-charts/tree/6.8/elasticsearch/examples/microk8s +[multi]: https://github.com/elastic/helm-charts/tree/6.8/elasticsearch/examples/multi/ +[network.host elasticsearch setting]: https://www.elastic.co/guide/en/elasticsearch/reference/6.8/network.host.html +[node affinity settings]: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#node-affinity-beta-feature +[node-certificates]: https://www.elastic.co/guide/en/elasticsearch/reference/6.8/configuring-tls.html#node-certificates +[nodePort]: https://kubernetes.io/docs/concepts/services-networking/service/#nodeport +[nodes types]: https://www.elastic.co/guide/en/elasticsearch/reference/6.8/modules-node.html +[nodeSelector]: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector +[openshift]: https://github.com/elastic/helm-charts/tree/6.8/elasticsearch/examples/openshift +[priorityClass]: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass +[probe]: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/ +[resources]: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ +[roles]: https://www.elastic.co/guide/en/elasticsearch/reference/6.8/modules-node.html +[secret]: https://kubernetes.io/docs/concepts/configuration/secret/#using-secrets +[securityContext]: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ +[service types]: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types +[snapshot lifecycle management]: https://github.com/elastic/elasticsearch/issues/38461 +[snapshot plugin]: https://www.elastic.co/guide/en/elasticsearch/plugins/6.8/repository.html +[snapshot repository]: https://www.elastic.co/guide/en/elasticsearch/reference/6.8/modules-snapshots.html +[supported configurations]: https://github.com/elastic/helm-charts/tree/6.8/README.md#supported-configurations +[sysctl vm.max_map_count]: https://www.elastic.co/guide/en/elasticsearch/reference/6.8/vm-max-map-count.html#vm-max-map-count +[terminationGracePeriod]: https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods +[tolerations]: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ +[transport port configuration]: https://www.elastic.co/guide/en/elasticsearch/reference/6.8/modules-transport.html#_transport_settings +[updateStrategy]: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/ +[values.yaml]: https://github.com/elastic/helm-charts/tree/6.8/elasticsearch/values.yaml +[volumeClaimTemplate for statefulsets]: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#stable-storage diff --git a/charts/elasticsearch/examples/config/Makefile b/charts/elasticsearch/examples/config/Makefile new file mode 100644 index 0000000..9ae9c37 --- /dev/null +++ b/charts/elasticsearch/examples/config/Makefile @@ -0,0 +1,21 @@ +default: test + +include ../../../helpers/examples.mk + +RELEASE := helm-es-config +TIMEOUT := 1200s + +install: + helm upgrade --wait --timeout=$(TIMEOUT) --install --values values.yaml $(RELEASE) ../../ + +secrets: + kubectl delete secret elastic-config-credentials elastic-config-secret elastic-config-slack elastic-config-custom-path || true + kubectl create secret generic elastic-config-credentials --from-literal=password=changeme --from-literal=username=elastic + kubectl create secret generic elastic-config-slack --from-literal=xpack.notification.slack.account.monitoring.secure_url='https://hooks.slack.com/services/asdasdasd/asdasdas/asdasd' + kubectl create secret generic elastic-config-secret --from-file=xpack.watcher.encryption_key=./watcher_encryption_key + kubectl create secret generic elastic-config-custom-path --from-literal=slack_url='https://hooks.slack.com/services/asdasdasd/asdasdas/asdasd' --from-literal=thing_i_don_tcare_about=test + +test: secrets install goss + +purge: + helm del $(RELEASE) diff --git a/charts/elasticsearch/examples/config/README.md b/charts/elasticsearch/examples/config/README.md new file mode 100644 index 0000000..5994e26 --- /dev/null +++ b/charts/elasticsearch/examples/config/README.md @@ -0,0 +1,27 @@ +# Config + +This example deploy a single node Elasticsearch 6.8.22 with authentication and +custom [values][]. + + +## Usage + +* Create the required secrets: `make secrets` + +* Deploy Elasticsearch chart with the default values: `make install` + +* You can now setup a port forward to query Elasticsearch API: + + ``` + kubectl port-forward svc/config-master 9200 + curl -u elastic:changeme http://localhost:9200/_cat/indices + ``` + + +## Testing + +You can also run [goss integration tests][] using `make test` + + +[goss integration tests]: https://github.com/elastic/helm-charts/tree/6.8/elasticsearch/examples/config/test/goss.yaml +[values]: https://github.com/elastic/helm-charts/tree/6.8/elasticsearch/examples/config/values.yaml diff --git a/charts/elasticsearch/examples/config/test/goss.yaml b/charts/elasticsearch/examples/config/test/goss.yaml new file mode 100644 index 0000000..752db8d --- /dev/null +++ b/charts/elasticsearch/examples/config/test/goss.yaml @@ -0,0 +1,29 @@ +http: + http://localhost:9200/_cluster/health: + status: 200 + timeout: 2000 + username: elastic + password: "{{ .Env.ELASTIC_PASSWORD }}" + body: + - "green" + - '"number_of_nodes":1' + - '"number_of_data_nodes":1' + + http://localhost:9200: + status: 200 + timeout: 2000 + username: elastic + password: "{{ .Env.ELASTIC_PASSWORD }}" + body: + - '"cluster_name" : "config"' + - "You Know, for Search" + +command: + "elasticsearch-keystore list": + exit-status: 0 + stdout: + - keystore.seed + - bootstrap.password + - xpack.notification.slack.account.monitoring.secure_url + - xpack.notification.slack.account.otheraccount.secure_url + - xpack.watcher.encryption_key diff --git a/charts/elasticsearch/examples/config/values.yaml b/charts/elasticsearch/examples/config/values.yaml new file mode 100644 index 0000000..b8b980b --- /dev/null +++ b/charts/elasticsearch/examples/config/values.yaml @@ -0,0 +1,28 @@ +--- + +clusterName: "config" +replicas: 1 +minimumMasterNodes: 1 + +extraEnvs: + - name: ELASTIC_PASSWORD + valueFrom: + secretKeyRef: + name: elastic-config-credentials + key: password + +# This is just a dummy file to make sure that +# the keystore can be mounted at the same time +# as a custom elasticsearch.yml +esConfig: + elasticsearch.yml: | + xpack.security.enabled: true + path.data: /usr/share/elasticsearch/data + +keystore: + - secretName: elastic-config-secret + - secretName: elastic-config-slack + - secretName: elastic-config-custom-path + items: + - key: slack_url + path: xpack.notification.slack.account.otheraccount.secure_url diff --git a/charts/elasticsearch/examples/config/watcher_encryption_key b/charts/elasticsearch/examples/config/watcher_encryption_key new file mode 100644 index 0000000..b5f9078 --- /dev/null +++ b/charts/elasticsearch/examples/config/watcher_encryption_key @@ -0,0 +1 @@ +supersecret diff --git a/charts/elasticsearch/examples/default/Makefile b/charts/elasticsearch/examples/default/Makefile new file mode 100644 index 0000000..389bf99 --- /dev/null +++ b/charts/elasticsearch/examples/default/Makefile @@ -0,0 +1,14 @@ +default: test + +include ../../../helpers/examples.mk + +RELEASE := helm-es-default +TIMEOUT := 1200s + +install: + helm upgrade --wait --timeout=$(TIMEOUT) --install $(RELEASE) ../../ + +test: install goss + +purge: + helm del $(RELEASE) diff --git a/charts/elasticsearch/examples/default/README.md b/charts/elasticsearch/examples/default/README.md new file mode 100644 index 0000000..5de84d8 --- /dev/null +++ b/charts/elasticsearch/examples/default/README.md @@ -0,0 +1,25 @@ +# Default + +This example deploy a 3 nodes Elasticsearch 6.8.22 cluster using +[default values][]. + + +## Usage + +* Deploy Elasticsearch chart with the default values: `make install` + +* You can now setup a port forward to query Elasticsearch API: + + ``` + kubectl port-forward svc/elasticsearch-master 9200 + curl localhost:9200/_cat/indices + ``` + + +## Testing + +You can also run [goss integration tests][] using `make test` + + +[goss integration tests]: https://github.com/elastic/helm-charts/tree/6.8/elasticsearch/examples/default/test/goss.yaml +[default values]: https://github.com/elastic/helm-charts/tree/6.8/elasticsearch/values.yaml diff --git a/charts/elasticsearch/examples/default/rolling_upgrade.sh b/charts/elasticsearch/examples/default/rolling_upgrade.sh new file mode 100755 index 0000000..c5a2a88 --- /dev/null +++ b/charts/elasticsearch/examples/default/rolling_upgrade.sh @@ -0,0 +1,19 @@ +#!/usr/bin/env bash -x + +kubectl proxy || true & + +make & +PROC_ID=$! + +while kill -0 "$PROC_ID" >/dev/null 2>&1; do + echo "PROCESS IS RUNNING" + if curl --fail 'http://localhost:8001/api/v1/proxy/namespaces/default/services/elasticsearch-master:9200/_search' ; then + echo "cluster is healthy" + else + echo "cluster not healthy!" + exit 1 + fi + sleep 1 +done +echo "PROCESS TERMINATED" +exit 0 diff --git a/charts/elasticsearch/examples/default/test/goss.yaml b/charts/elasticsearch/examples/default/test/goss.yaml new file mode 100644 index 0000000..5f968b6 --- /dev/null +++ b/charts/elasticsearch/examples/default/test/goss.yaml @@ -0,0 +1,38 @@ +kernel-param: + vm.max_map_count: + value: "262144" + +http: + http://elasticsearch-master:9200/_cluster/health: + status: 200 + timeout: 2000 + body: + - "green" + - '"number_of_nodes":3' + - '"number_of_data_nodes":3' + + http://localhost:9200: + status: 200 + timeout: 2000 + body: + - '"number" : "6.8.22"' + - '"cluster_name" : "elasticsearch"' + - "You Know, for Search" + +file: + /usr/share/elasticsearch/data: + exists: true + mode: "2775" + owner: root + group: elasticsearch + filetype: directory + +mount: + /usr/share/elasticsearch/data: + exists: true + +user: + elasticsearch: + exists: true + uid: 1000 + gid: 1000 diff --git a/charts/elasticsearch/examples/docker-for-mac/Makefile b/charts/elasticsearch/examples/docker-for-mac/Makefile new file mode 100644 index 0000000..18fd053 --- /dev/null +++ b/charts/elasticsearch/examples/docker-for-mac/Makefile @@ -0,0 +1,13 @@ +default: test + +RELEASE := helm-es-docker-for-mac +TIMEOUT := 1200s + +install: + helm upgrade --wait --timeout=$(TIMEOUT) --install --values values.yaml $(RELEASE) ../../ + +test: install + helm test $(RELEASE) + +purge: + helm del $(RELEASE) diff --git a/charts/elasticsearch/examples/docker-for-mac/README.md b/charts/elasticsearch/examples/docker-for-mac/README.md new file mode 100644 index 0000000..fa550bc --- /dev/null +++ b/charts/elasticsearch/examples/docker-for-mac/README.md @@ -0,0 +1,23 @@ +# Docker for Mac + +This example deploy a 3 nodes Elasticsearch 6.8.22 cluster on [Docker for Mac][] +using [custom values][]. + +Note that this configuration should be used for test only and isn't recommended +for production. + + +## Usage + +* Deploy Elasticsearch chart with the default values: `make install` + +* You can now setup a port forward to query Elasticsearch API: + + ``` + kubectl port-forward svc/elasticsearch-master 9200 + curl localhost:9200/_cat/indices + ``` + + +[custom values]: https://github.com/elastic/helm-charts/tree/6.8/elasticsearch/examples/docker-for-mac/values.yaml +[docker for mac]: https://docs.docker.com/docker-for-mac/kubernetes/ diff --git a/charts/elasticsearch/examples/docker-for-mac/values.yaml b/charts/elasticsearch/examples/docker-for-mac/values.yaml new file mode 100644 index 0000000..f7deba6 --- /dev/null +++ b/charts/elasticsearch/examples/docker-for-mac/values.yaml @@ -0,0 +1,23 @@ +--- +# Permit co-located instances for solitary minikube virtual machines. +antiAffinity: "soft" + +# Shrink default JVM heap. +esJavaOpts: "-Xmx128m -Xms128m" + +# Allocate smaller chunks of memory per pod. +resources: + requests: + cpu: "100m" + memory: "512M" + limits: + cpu: "1000m" + memory: "512M" + +# Request smaller persistent volumes. +volumeClaimTemplate: + accessModes: [ "ReadWriteOnce" ] + storageClassName: "hostpath" + resources: + requests: + storage: 100M diff --git a/charts/elasticsearch/examples/kubernetes-kind/Makefile b/charts/elasticsearch/examples/kubernetes-kind/Makefile new file mode 100644 index 0000000..9e5602d --- /dev/null +++ b/charts/elasticsearch/examples/kubernetes-kind/Makefile @@ -0,0 +1,17 @@ +default: test + +RELEASE := helm-es-kind +TIMEOUT := 1200s + +install: + helm upgrade --wait --timeout=$(TIMEOUT) --install --values values.yaml $(RELEASE) ../../ + +install-local-path: + kubectl apply -f https://raw.githubusercontent.com/rancher/local-path-provisioner/master/deploy/local-path-storage.yaml + helm upgrade --wait --timeout=$(TIMEOUT) --install --values values-local-path.yaml $(RELEASE) ../../ + +test: install + helm test $(RELEASE) + +purge: + helm del $(RELEASE) diff --git a/charts/elasticsearch/examples/kubernetes-kind/README.md b/charts/elasticsearch/examples/kubernetes-kind/README.md new file mode 100644 index 0000000..c0a90f7 --- /dev/null +++ b/charts/elasticsearch/examples/kubernetes-kind/README.md @@ -0,0 +1,36 @@ +# KIND + +This example deploy a 3 nodes Elasticsearch 6.8.22 cluster on [Kind][] +using [custom values][]. + +Note that this configuration should be used for test only and isn't recommended +for production. + +Note that Kind < 0.7.0 are affected by a [kind issue][] with mount points +created from PVCs not writable by non-root users. [kubernetes-sigs/kind#1157][] +fix it in Kind 0.7.0. + +The workaround for Kind < 0.7.0 is to install manually +[Rancher Local Path Provisioner][] and use `local-path` storage class for +Elasticsearch volumes (see [Makefile][] instructions). + + +## Usage + +* For Kind >= 0.7.0: Deploy Elasticsearch chart with the default values: `make install` +* For Kind < 0.7.0: Deploy Elasticsearch chart with `local-path` storage class: `make install-local-path` + +* You can now setup a port forward to query Elasticsearch API: + + ``` + kubectl port-forward svc/elasticsearch-master 9200 + curl localhost:9200/_cat/indices + ``` + + +[custom values]: https://github.com/elastic/helm-charts/blob/6.8/elasticsearch/examples/kubernetes-kind/values.yaml +[kind]: https://kind.sigs.k8s.io/ +[kind issue]: https://github.com/kubernetes-sigs/kind/issues/830 +[kubernetes-sigs/kind#1157]: https://github.com/kubernetes-sigs/kind/pull/1157 +[rancher local path provisioner]: https://github.com/rancher/local-path-provisioner +[Makefile]: https://github.com/elastic/helm-charts/blob/6.8/elasticsearch/examples/kubernetes-kind/Makefile#L5 diff --git a/charts/elasticsearch/examples/kubernetes-kind/values-local-path.yaml b/charts/elasticsearch/examples/kubernetes-kind/values-local-path.yaml new file mode 100644 index 0000000..500ad4b --- /dev/null +++ b/charts/elasticsearch/examples/kubernetes-kind/values-local-path.yaml @@ -0,0 +1,23 @@ +--- +# Permit co-located instances for solitary minikube virtual machines. +antiAffinity: "soft" + +# Shrink default JVM heap. +esJavaOpts: "-Xmx128m -Xms128m" + +# Allocate smaller chunks of memory per pod. +resources: + requests: + cpu: "100m" + memory: "512M" + limits: + cpu: "1000m" + memory: "512M" + +# Request smaller persistent volumes. +volumeClaimTemplate: + accessModes: [ "ReadWriteOnce" ] + storageClassName: "local-path" + resources: + requests: + storage: 100M diff --git a/charts/elasticsearch/examples/kubernetes-kind/values.yaml b/charts/elasticsearch/examples/kubernetes-kind/values.yaml new file mode 100644 index 0000000..500ad4b --- /dev/null +++ b/charts/elasticsearch/examples/kubernetes-kind/values.yaml @@ -0,0 +1,23 @@ +--- +# Permit co-located instances for solitary minikube virtual machines. +antiAffinity: "soft" + +# Shrink default JVM heap. +esJavaOpts: "-Xmx128m -Xms128m" + +# Allocate smaller chunks of memory per pod. +resources: + requests: + cpu: "100m" + memory: "512M" + limits: + cpu: "1000m" + memory: "512M" + +# Request smaller persistent volumes. +volumeClaimTemplate: + accessModes: [ "ReadWriteOnce" ] + storageClassName: "local-path" + resources: + requests: + storage: 100M diff --git a/charts/elasticsearch/examples/microk8s/Makefile b/charts/elasticsearch/examples/microk8s/Makefile new file mode 100644 index 0000000..2d0012d --- /dev/null +++ b/charts/elasticsearch/examples/microk8s/Makefile @@ -0,0 +1,13 @@ +default: test + +RELEASE := helm-es-microk8s +TIMEOUT := 1200s + +install: + helm upgrade --wait --timeout=$(TIMEOUT) --install --values values.yaml $(RELEASE) ../../ + +test: install + helm test $(RELEASE) + +purge: + helm del $(RELEASE) diff --git a/charts/elasticsearch/examples/microk8s/README.md b/charts/elasticsearch/examples/microk8s/README.md new file mode 100644 index 0000000..4700054 --- /dev/null +++ b/charts/elasticsearch/examples/microk8s/README.md @@ -0,0 +1,32 @@ +# MicroK8S + +This example deploy a 3 nodes Elasticsearch 6.8.22 cluster on [MicroK8S][] +using [custom values][]. + +Note that this configuration should be used for test only and isn't recommended +for production. + + +## Requirements + +The following MicroK8S [addons][] need to be enabled: +- `dns` +- `helm` +- `storage` + + +## Usage + +* Deploy Elasticsearch chart with the default values: `make install` + +* You can now setup a port forward to query Elasticsearch API: + + ``` + kubectl port-forward svc/elasticsearch-master 9200 + curl localhost:9200/_cat/indices + ``` + + +[addons]: https://microk8s.io/docs/addons +[custom values]: https://github.com/elastic/helm-charts/tree/6.8/elasticsearch/examples/microk8s/values.yaml +[MicroK8S]: https://microk8s.io diff --git a/charts/elasticsearch/examples/microk8s/values.yaml b/charts/elasticsearch/examples/microk8s/values.yaml new file mode 100644 index 0000000..2627ecb --- /dev/null +++ b/charts/elasticsearch/examples/microk8s/values.yaml @@ -0,0 +1,32 @@ +--- +# Disable privileged init Container creation. +sysctlInitContainer: + enabled: false + +# Restrict the use of the memory-mapping when sysctlInitContainer is disabled. +esConfig: + elasticsearch.yml: | + node.store.allow_mmap: false + +# Permit co-located instances for solitary minikube virtual machines. +antiAffinity: "soft" + +# Shrink default JVM heap. +esJavaOpts: "-Xmx128m -Xms128m" + +# Allocate smaller chunks of memory per pod. +resources: + requests: + cpu: "100m" + memory: "512M" + limits: + cpu: "1000m" + memory: "512M" + +# Request smaller persistent volumes. +volumeClaimTemplate: + accessModes: [ "ReadWriteOnce" ] + storageClassName: "microk8s-hostpath" + resources: + requests: + storage: 100M diff --git a/charts/elasticsearch/examples/migration/Makefile b/charts/elasticsearch/examples/migration/Makefile new file mode 100644 index 0000000..020906f --- /dev/null +++ b/charts/elasticsearch/examples/migration/Makefile @@ -0,0 +1,10 @@ +PREFIX := helm-es-migration + +data: + helm upgrade --wait --timeout=$(TIMEOUT) --install --values data.yaml $(PREFIX)-data ../../ + +master: + helm upgrade --wait --timeout=$(TIMEOUT) --install --values master.yaml $(PREFIX)-master ../../ + +client: + helm upgrade --wait --timeout=$(TIMEOUT) --install --values client.yaml $(PREFIX)-client ../../ diff --git a/charts/elasticsearch/examples/migration/README.md b/charts/elasticsearch/examples/migration/README.md new file mode 100644 index 0000000..7bde14d --- /dev/null +++ b/charts/elasticsearch/examples/migration/README.md @@ -0,0 +1,167 @@ +# Migration Guide from helm/charts + +There are two viable options for migrating from the community Elasticsearch Helm +chart from the [helm/charts][] repo. + +1. Restoring from Snapshot to a fresh cluster +2. Live migration by joining a new cluster to the existing cluster. + +## Restoring from Snapshot + +This is the recommended and preferred option. The downside is that it will +involve a period of write downtime during the migration. If you have a way to +temporarily stop writes to your cluster then this is the way to go. This is also +a lot simpler as it just involves launching a fresh cluster and restoring a +snapshot following the [restoring to a different cluster guide][]. + +## Live migration + +If restoring from a snapshot is not possible due to the write downtime then a +live migration is also possible. It is very important to first test this in a +testing environment to make sure you are comfortable with the process and fully +understand what is happening. + +This process will involve joining a new set of master, data and client nodes to +an existing cluster that has been deployed using the [helm/charts][] community +chart. Nodes will then be replaced one by one in a controlled fashion to +decommission the old cluster. + +This example will be using the default values for the existing helm/charts +release and for the Elastic helm-charts release. If you have changed any of the +default values then you will need to first make sure that your values are +configured in a compatible way before starting the migration. + +The process will involve a re-sync and a rolling restart of all of your data +nodes. Therefore it is important to disable shard allocation and perform a synced +flush like you normally would during any other rolling upgrade. See the +[rolling upgrades guide][] for more information. + +* The default image for this chart is +`docker.elastic.co/elasticsearch/elasticsearch` which contains the default +distribution of Elasticsearch with a [basic license][]. Make sure to update the +`image` and `imageTag` values to the correct Docker image and Elasticsearch +version that you currently have deployed. + +* Convert your current helm/charts configuration into something that is +compatible with this chart. + +* Take a fresh snapshot of your cluster. If something goes wrong you want to be +able to restore your data no matter what. + +* Check that your clusters health is green. If not abort and make sure your +cluster is healthy before continuing: + + ``` + curl localhost:9200/_cluster/health + ``` + +* Deploy new data nodes which will join the existing cluster. Take a look at the +configuration in [data.yaml][]: + + ``` + make data + ``` + +* Check that the new nodes have joined the cluster (run this and any other curl +commands from within one of your pods): + + ``` + curl localhost:9200/_cat/nodes + ``` + +* Check that your cluster is still green. If so we can now start to scale down +the existing data nodes. Assuming you have the default amount of data nodes (2) +we now want to scale it down to 1: + + ``` + kubectl scale statefulsets my-release-elasticsearch-data --replicas=1 + ``` + +* Wait for your cluster to become green again: + + ``` + watch 'curl -s localhost:9200/_cluster/health' + ``` + +* Once the cluster is green we can scale down again: + + ``` + kubectl scale statefulsets my-release-elasticsearch-data --replicas=0 + ``` + +* Wait for the cluster to be green again. +* OK. We now have all data nodes running in the new cluster. Time to replace the +masters by firstly scaling down the masters from 3 to 2. Between each step make +sure to wait for the cluster to become green again, and check with +`curl localhost:9200/_cat/nodes` that you see the correct amount of master +nodes. During this process we will always make sure to keep at least 2 master +nodes as to not lose quorum: + + ``` + kubectl scale statefulsets my-release-elasticsearch-master --replicas=2 + ``` + +* Now deploy a single new master so that we have 3 masters again. See +[master.yaml][] for the configuration: + + ``` + make master + ``` + +* Scale down old masters to 1: + + ``` + kubectl scale statefulsets my-release-elasticsearch-master --replicas=1 + ``` + +* Edit the masters in [masters.yaml][] to 2 and redeploy: + + ``` + make master + ``` + +* Scale down the old masters to 0: + + ``` + kubectl scale statefulsets my-release-elasticsearch-master --replicas=0 + ``` + +* Edit the [masters.yaml][] to have 3 replicas and remove the +`discovery.zen.ping.unicast.hosts` entry from `extraEnvs` then redeploy the +masters. This will make sure all 3 masters are running in the new cluster and +are pointing at each other for discovery: + + ``` + make master + ``` + +* Remove the `discovery.zen.ping.unicast.hosts` entry from `extraEnvs` then +redeploy the data nodes to make sure they are pointing at the new masters: + + ``` + make data + ``` + +* Deploy the client nodes: + + ``` + make client + ``` + +* Update any processes that are talking to the existing client nodes and point +them to the new client nodes. Once this is done you can scale down the old +client nodes: + + ``` + kubectl scale deployment my-release-elasticsearch-client --replicas=0 + ``` + +* The migration should now be complete. After verifying that everything is +working correctly you can cleanup leftover resources from your old cluster. + +[basic license]: https://www.elastic.co/subscriptions +[data.yaml]: https://github.com/elastic/helm-charts/blob/6.8/elasticsearch/examples/migration/data.yaml +[helm/charts]: https://github.com/helm/charts/tree/6.8/stable/elasticsearch +[master.yaml]: https://github.com/elastic/helm-charts/blob/6.8/elasticsearch/examples/migration/master.yaml +[restoring to a different cluster guide]: https://www.elastic.co/guide/en/elasticsearch/reference/6.8/modules-snapshots.html#_restoring_to_a_different_cluster +[rolling upgrades guide]: https://www.elastic.co/guide/en/elasticsearch/reference/6.8/rolling-upgrades.html diff --git a/charts/elasticsearch/examples/migration/client.yaml b/charts/elasticsearch/examples/migration/client.yaml new file mode 100644 index 0000000..30ee700 --- /dev/null +++ b/charts/elasticsearch/examples/migration/client.yaml @@ -0,0 +1,23 @@ +--- + +replicas: 2 + +clusterName: "elasticsearch" +nodeGroup: "client" + +esMajorVersion: 6 + +roles: + master: "false" + ingest: "false" + data: "false" + +volumeClaimTemplate: + accessModes: [ "ReadWriteOnce" ] + storageClassName: "standard" + resources: + requests: + storage: 1Gi # Currently needed till pvcs are made optional + +persistence: + enabled: false diff --git a/charts/elasticsearch/examples/migration/data.yaml b/charts/elasticsearch/examples/migration/data.yaml new file mode 100644 index 0000000..eedcbb0 --- /dev/null +++ b/charts/elasticsearch/examples/migration/data.yaml @@ -0,0 +1,17 @@ +--- + +replicas: 2 + +esMajorVersion: 6 + +extraEnvs: + - name: discovery.zen.ping.unicast.hosts + value: "my-release-elasticsearch-discovery" + +clusterName: "elasticsearch" +nodeGroup: "data" + +roles: + master: "false" + ingest: "false" + data: "true" diff --git a/charts/elasticsearch/examples/migration/master.yaml b/charts/elasticsearch/examples/migration/master.yaml new file mode 100644 index 0000000..6e40a4d --- /dev/null +++ b/charts/elasticsearch/examples/migration/master.yaml @@ -0,0 +1,27 @@ +--- + +# Temporarily set to 3 so we can scale up/down the old a new cluster +# one at a time whilst always keeping 3 masters running +replicas: 1 +minimumMasterNodes: 1 + +esMajorVersion: 6 + +extraEnvs: + - name: discovery.zen.ping.unicast.hosts + value: "my-release-elasticsearch-discovery" + +clusterName: "elasticsearch" +nodeGroup: "master" + +roles: + master: "true" + ingest: "false" + data: "false" + +volumeClaimTemplate: + accessModes: [ "ReadWriteOnce" ] + storageClassName: "standard" + resources: + requests: + storage: 4Gi diff --git a/charts/elasticsearch/examples/minikube/Makefile b/charts/elasticsearch/examples/minikube/Makefile new file mode 100644 index 0000000..1021d98 --- /dev/null +++ b/charts/elasticsearch/examples/minikube/Makefile @@ -0,0 +1,13 @@ +default: test + +RELEASE := helm-es-minikube +TIMEOUT := 1200s + +install: + helm upgrade --wait --timeout=$(TIMEOUT) --install --values values.yaml $(RELEASE) ../../ + +test: install + helm test $(RELEASE) + +purge: + helm del $(RELEASE) diff --git a/charts/elasticsearch/examples/minikube/README.md b/charts/elasticsearch/examples/minikube/README.md new file mode 100644 index 0000000..48eb228 --- /dev/null +++ b/charts/elasticsearch/examples/minikube/README.md @@ -0,0 +1,38 @@ +# Minikube + +This example deploy a 3 nodes Elasticsearch 6.8.22 cluster on [Minikube][] +using [custom values][]. + +If helm or kubectl timeouts occur, you may consider creating a minikube VM with +more CPU cores or memory allocated. + +Note that this configuration should be used for test only and isn't recommended +for production. + + +## Requirements + +In order to properly support the required persistent volume claims for the +Elasticsearch StatefulSet, the `default-storageclass` and `storage-provisioner` +minikube addons must be enabled. + +``` +minikube addons enable default-storageclass +minikube addons enable storage-provisioner +``` + + +## Usage + +* Deploy Elasticsearch chart with the default values: `make install` + +* You can now setup a port forward to query Elasticsearch API: + + ``` + kubectl port-forward svc/elasticsearch-master 9200 + curl localhost:9200/_cat/indices + ``` + + +[custom values]: https://github.com/elastic/helm-charts/tree/6.8/elasticsearch/examples/minikube/values.yaml +[minikube]: https://minikube.sigs.k8s.io/docs/ diff --git a/charts/elasticsearch/examples/minikube/values.yaml b/charts/elasticsearch/examples/minikube/values.yaml new file mode 100644 index 0000000..ccceb3a --- /dev/null +++ b/charts/elasticsearch/examples/minikube/values.yaml @@ -0,0 +1,23 @@ +--- +# Permit co-located instances for solitary minikube virtual machines. +antiAffinity: "soft" + +# Shrink default JVM heap. +esJavaOpts: "-Xmx128m -Xms128m" + +# Allocate smaller chunks of memory per pod. +resources: + requests: + cpu: "100m" + memory: "512M" + limits: + cpu: "1000m" + memory: "512M" + +# Request smaller persistent volumes. +volumeClaimTemplate: + accessModes: [ "ReadWriteOnce" ] + storageClassName: "standard" + resources: + requests: + storage: 100M diff --git a/charts/elasticsearch/examples/multi/Makefile b/charts/elasticsearch/examples/multi/Makefile new file mode 100644 index 0000000..243e504 --- /dev/null +++ b/charts/elasticsearch/examples/multi/Makefile @@ -0,0 +1,19 @@ +default: test + +include ../../../helpers/examples.mk + +PREFIX := helm-es-multi +RELEASE := helm-es-multi-master +TIMEOUT := 1200s + +install: + helm upgrade --wait --timeout=$(TIMEOUT) --install --values master.yaml $(PREFIX)-master ../../ + helm upgrade --wait --timeout=$(TIMEOUT) --install --values data.yaml $(PREFIX)-data ../../ + helm upgrade --wait --timeout=$(TIMEOUT) --install --values client.yaml $(PREFIX)-client ../../ + +test: install goss + +purge: + helm del $(PREFIX)-master + helm del $(PREFIX)-data + helm del $(PREFIX)-client diff --git a/charts/elasticsearch/examples/multi/README.md b/charts/elasticsearch/examples/multi/README.md new file mode 100644 index 0000000..43aa72c --- /dev/null +++ b/charts/elasticsearch/examples/multi/README.md @@ -0,0 +1,29 @@ +# Multi + +This example deploy an Elasticsearch 6.8.22 cluster composed of 3 different Helm +releases: + +- `helm-es-multi-master` for the 3 master nodes using [master values][] +- `helm-es-multi-data` for the 3 data nodes using [data values][] +- `helm-es-multi-client` for the 3 client nodes using [client values][] + +## Usage + +* Deploy the 3 Elasticsearch releases: `make install` + +* You can now setup a port forward to query Elasticsearch API: + + ``` + kubectl port-forward svc/multi-master 9200 + curl -u elastic:changeme http://localhost:9200/_cat/indices + ``` + +## Testing + +You can also run [goss integration tests][] using `make test` + + +[client values]: https://github.com/elastic/helm-charts/tree/6.8/elasticsearch/examples/multi/client.yaml +[data values]: https://github.com/elastic/helm-charts/tree/6.8/elasticsearch/examples/multi/data.yaml +[goss integration tests]: https://github.com/elastic/helm-charts/tree/6.8/elasticsearch/examples/multi/test/goss.yaml +[master values]: https://github.com/elastic/helm-charts/tree/6.8/elasticsearch/examples/multi/master.yaml diff --git a/charts/elasticsearch/examples/multi/client.yaml b/charts/elasticsearch/examples/multi/client.yaml new file mode 100644 index 0000000..e5d9f0e --- /dev/null +++ b/charts/elasticsearch/examples/multi/client.yaml @@ -0,0 +1,12 @@ +--- + +clusterName: "multi" +nodeGroup: "client" + +roles: + master: "false" + ingest: "false" + data: "false" + +persistence: + enabled: false diff --git a/charts/elasticsearch/examples/multi/data.yaml b/charts/elasticsearch/examples/multi/data.yaml new file mode 100644 index 0000000..ecc6893 --- /dev/null +++ b/charts/elasticsearch/examples/multi/data.yaml @@ -0,0 +1,9 @@ +--- + +clusterName: "multi" +nodeGroup: "data" + +roles: + master: "false" + ingest: "true" + data: "true" diff --git a/charts/elasticsearch/examples/multi/master.yaml b/charts/elasticsearch/examples/multi/master.yaml new file mode 100644 index 0000000..2ca4cca --- /dev/null +++ b/charts/elasticsearch/examples/multi/master.yaml @@ -0,0 +1,9 @@ +--- + +clusterName: "multi" +nodeGroup: "master" + +roles: + master: "true" + ingest: "false" + data: "false" diff --git a/charts/elasticsearch/examples/multi/test/goss.yaml b/charts/elasticsearch/examples/multi/test/goss.yaml new file mode 100644 index 0000000..794416b --- /dev/null +++ b/charts/elasticsearch/examples/multi/test/goss.yaml @@ -0,0 +1,9 @@ +http: + http://localhost:9200/_cluster/health: + status: 200 + timeout: 2000 + body: + - 'green' + - '"cluster_name":"multi"' + - '"number_of_nodes":9' + - '"number_of_data_nodes":3' diff --git a/charts/elasticsearch/examples/networkpolicy/Makefile b/charts/elasticsearch/examples/networkpolicy/Makefile new file mode 100644 index 0000000..e7b20c5 --- /dev/null +++ b/charts/elasticsearch/examples/networkpolicy/Makefile @@ -0,0 +1,14 @@ +default: test + +include ../../../helpers/examples.mk + +RELEASE := helm-es-networkpolicy +TIMEOUT := 1200s + +install: + helm upgrade --wait --timeout=$(TIMEOUT) --install --values values.yaml $(RELEASE) ../../ + +test: install goss + +purge: + helm del $(RELEASE) diff --git a/charts/elasticsearch/examples/networkpolicy/values.yaml b/charts/elasticsearch/examples/networkpolicy/values.yaml new file mode 100644 index 0000000..1963d20 --- /dev/null +++ b/charts/elasticsearch/examples/networkpolicy/values.yaml @@ -0,0 +1,37 @@ +networkPolicy: + http: + enabled: true + explicitNamespacesSelector: + # Accept from namespaces with all those different rules (from whitelisted Pods) + matchLabels: + role: frontend-http + matchExpressions: + - {key: role, operator: In, values: [frontend-http]} + additionalRules: + - podSelector: + matchLabels: + role: frontend-http + - podSelector: + matchExpressions: + - key: role + operator: In + values: + - frontend-http + transport: + enabled: true + allowExternal: true + explicitNamespacesSelector: + matchLabels: + role: frontend-transport + matchExpressions: + - {key: role, operator: In, values: [frontend-transport]} + additionalRules: + - podSelector: + matchLabels: + role: frontend-transport + - podSelector: + matchExpressions: + - key: role + operator: In + values: + - frontend-transport diff --git a/charts/elasticsearch/examples/openshift/Makefile b/charts/elasticsearch/examples/openshift/Makefile new file mode 100644 index 0000000..078c33c --- /dev/null +++ b/charts/elasticsearch/examples/openshift/Makefile @@ -0,0 +1,13 @@ +default: test + +include ../../../helpers/examples.mk + +RELEASE := elasticsearch + +install: + helm upgrade --wait --timeout=$(TIMEOUT) --install --values values.yaml $(RELEASE) ../../ + +test: install goss + +purge: + helm del $(RELEASE) diff --git a/charts/elasticsearch/examples/openshift/README.md b/charts/elasticsearch/examples/openshift/README.md new file mode 100644 index 0000000..d8ec343 --- /dev/null +++ b/charts/elasticsearch/examples/openshift/README.md @@ -0,0 +1,24 @@ +# OpenShift + +This example deploy a 3 nodes Elasticsearch 6.8.22 cluster on [OpenShift][] +using [custom values][]. + +## Usage + +* Deploy Elasticsearch chart with the default values: `make install` + +* You can now setup a port forward to query Elasticsearch API: + + ``` + kubectl port-forward svc/elasticsearch-master 9200 + curl localhost:9200/_cat/indices + ``` + +## Testing + +You can also run [goss integration tests][] using `make test` + + +[custom values]: https://github.com/elastic/helm-charts/tree/6.8/elasticsearch/examples/openshift/values.yaml +[goss integration tests]: https://github.com/elastic/helm-charts/tree/6.8/elasticsearch/examples/openshift/test/goss.yaml +[openshift]: https://www.openshift.com/ diff --git a/charts/elasticsearch/examples/openshift/test/goss.yaml b/charts/elasticsearch/examples/openshift/test/goss.yaml new file mode 100644 index 0000000..5c69b82 --- /dev/null +++ b/charts/elasticsearch/examples/openshift/test/goss.yaml @@ -0,0 +1,16 @@ +http: + http://localhost:9200/_cluster/health: + status: 200 + timeout: 2000 + body: + - "green" + - '"number_of_nodes":3' + - '"number_of_data_nodes":3' + + http://localhost:9200: + status: 200 + timeout: 2000 + body: + - '"number" : "6.8.22"' + - '"cluster_name" : "elasticsearch"' + - "You Know, for Search" diff --git a/charts/elasticsearch/examples/openshift/values.yaml b/charts/elasticsearch/examples/openshift/values.yaml new file mode 100644 index 0000000..8a21126 --- /dev/null +++ b/charts/elasticsearch/examples/openshift/values.yaml @@ -0,0 +1,11 @@ +--- + +securityContext: + runAsUser: null + +podSecurityContext: + fsGroup: null + runAsUser: null + +sysctlInitContainer: + enabled: false diff --git a/charts/elasticsearch/examples/oss/Makefile b/charts/elasticsearch/examples/oss/Makefile new file mode 100644 index 0000000..e7425e7 --- /dev/null +++ b/charts/elasticsearch/examples/oss/Makefile @@ -0,0 +1,14 @@ +default: test + +include ../../../helpers/examples.mk + +RELEASE := helm-es-oss +TIMEOUT := 1200s + +install: + helm upgrade --wait --timeout=$(TIMEOUT) --install --values values.yaml $(RELEASE) ../../ + +test: install goss + +purge: + helm del $(RELEASE) diff --git a/charts/elasticsearch/examples/oss/README.md b/charts/elasticsearch/examples/oss/README.md new file mode 100644 index 0000000..1fd4222 --- /dev/null +++ b/charts/elasticsearch/examples/oss/README.md @@ -0,0 +1,23 @@ +# OSS + +This example deploy a 3 nodes Elasticsearch 6.8.22 cluster using +[Elasticsearch OSS][] version. + +## Usage + +* Deploy Elasticsearch chart with the default values: `make install` + +* You can now setup a port forward to query Elasticsearch API: + + ``` + kubectl port-forward svc/oss-master 9200 + curl localhost:9200/_cat/indices + ``` + +## Testing + +You can also run [goss integration tests][] using `make test` + + +[elasticsearch oss]: https://www.elastic.co/downloads/elasticsearch-oss +[goss integration tests]: https://github.com/elastic/helm-charts/tree/6.8/elasticsearch/examples/oss/test/goss.yaml diff --git a/charts/elasticsearch/examples/oss/test/goss.yaml b/charts/elasticsearch/examples/oss/test/goss.yaml new file mode 100644 index 0000000..021696f --- /dev/null +++ b/charts/elasticsearch/examples/oss/test/goss.yaml @@ -0,0 +1,16 @@ +http: + http://localhost:9200/_cluster/health: + status: 200 + timeout: 2000 + body: + - 'green' + - '"number_of_nodes":3' + - '"number_of_data_nodes":3' + + http://localhost:9200: + status: 200 + timeout: 2000 + body: + - '"number" : "6.8.22"' + - '"cluster_name" : "oss"' + - 'You Know, for Search' diff --git a/charts/elasticsearch/examples/oss/values.yaml b/charts/elasticsearch/examples/oss/values.yaml new file mode 100644 index 0000000..adcb7df --- /dev/null +++ b/charts/elasticsearch/examples/oss/values.yaml @@ -0,0 +1,4 @@ +--- + +clusterName: "oss" +image: "docker.elastic.co/elasticsearch/elasticsearch-oss" diff --git a/charts/elasticsearch/examples/security/Makefile b/charts/elasticsearch/examples/security/Makefile new file mode 100644 index 0000000..beddbef --- /dev/null +++ b/charts/elasticsearch/examples/security/Makefile @@ -0,0 +1,38 @@ +default: test + +include ../../../helpers/examples.mk + +RELEASE := helm-es-security +ELASTICSEARCH_IMAGE := docker.elastic.co/elasticsearch/elasticsearch:$(STACK_VERSION) +TIMEOUT := 1200s + +install: + helm upgrade --wait --timeout=$(TIMEOUT) --install --values values.yaml $(RELEASE) ../../ + +test: secrets install goss + +purge: + kubectl delete secrets elastic-credentials elastic-certificates elastic-certificate-pem elastic-certificate-crt|| true + helm del $(RELEASE) + +pull-elasticsearch-image: + docker pull $(ELASTICSEARCH_IMAGE) + +secrets: + docker rm -f elastic-helm-charts-certs || true + rm -f elastic-certificates.p12 elastic-certificate.pem elastic-certificate.crt elastic-stack-ca.p12 || true + password=$$([ ! -z "$$ELASTIC_PASSWORD" ] && echo $$ELASTIC_PASSWORD || echo $$(docker run --rm busybox:1.31.1 /bin/sh -c "< /dev/urandom tr -cd '[:alnum:]' | head -c20")) && \ + docker run --name elastic-helm-charts-certs -i -w /app \ + $(ELASTICSEARCH_IMAGE) \ + /bin/sh -c " \ + elasticsearch-certutil ca --out /app/elastic-stack-ca.p12 --pass '' && \ + elasticsearch-certutil cert --name security-master --dns security-master --ca /app/elastic-stack-ca.p12 --pass '' --ca-pass '' --out /app/elastic-certificates.p12" && \ + docker cp elastic-helm-charts-certs:/app/elastic-certificates.p12 ./ && \ + docker rm -f elastic-helm-charts-certs && \ + openssl pkcs12 -nodes -passin pass:'' -in elastic-certificates.p12 -out elastic-certificate.pem && \ + openssl x509 -outform der -in elastic-certificate.pem -out elastic-certificate.crt && \ + kubectl create secret generic elastic-certificates --from-file=elastic-certificates.p12 && \ + kubectl create secret generic elastic-certificate-pem --from-file=elastic-certificate.pem && \ + kubectl create secret generic elastic-certificate-crt --from-file=elastic-certificate.crt && \ + kubectl create secret generic elastic-credentials --from-literal=password=$$password --from-literal=username=elastic && \ + rm -f elastic-certificates.p12 elastic-certificate.pem elastic-certificate.crt elastic-stack-ca.p12 diff --git a/charts/elasticsearch/examples/security/README.md b/charts/elasticsearch/examples/security/README.md new file mode 100644 index 0000000..d353f41 --- /dev/null +++ b/charts/elasticsearch/examples/security/README.md @@ -0,0 +1,29 @@ +# Security + +This example deploy a 3 nodes Elasticsearch 6.8.22 with authentication and +autogenerated certificates for TLS (see [values][]). + +Note that this configuration should be used for test only. For a production +deployment you should generate SSL certificates following the [official docs][]. + +## Usage + +* Create the required secrets: `make secrets` + +* Deploy Elasticsearch chart with the default values: `make install` + +* You can now setup a port forward to query Elasticsearch API: + + ``` + kubectl port-forward svc/security-master 9200 + curl -u elastic:changeme https://localhost:9200/_cat/indices + ``` + +## Testing + +You can also run [goss integration tests][] using `make test` + + +[goss integration tests]: https://github.com/elastic/helm-charts/tree/6.8/elasticsearch/examples/security/test/goss.yaml +[official docs]: https://www.elastic.co/guide/en/elasticsearch/reference/6.8/configuring-tls.html#node-certificates +[values]: https://github.com/elastic/helm-charts/tree/6.8/elasticsearch/examples/security/values.yaml diff --git a/charts/elasticsearch/examples/security/test/goss.yaml b/charts/elasticsearch/examples/security/test/goss.yaml new file mode 100644 index 0000000..c52e05f --- /dev/null +++ b/charts/elasticsearch/examples/security/test/goss.yaml @@ -0,0 +1,44 @@ +http: + https://security-master:9200/_cluster/health: + status: 200 + timeout: 2000 + allow-insecure: true + username: elastic + password: "{{ .Env.ELASTIC_PASSWORD }}" + body: + - "green" + - '"number_of_nodes":3' + - '"number_of_data_nodes":3' + + https://localhost:9200/: + status: 200 + timeout: 2000 + allow-insecure: true + username: elastic + password: "{{ .Env.ELASTIC_PASSWORD }}" + body: + - '"cluster_name" : "security"' + - "You Know, for Search" + + https://localhost:9200/_xpack/license: + status: 200 + timeout: 2000 + allow-insecure: true + username: elastic + password: "{{ .Env.ELASTIC_PASSWORD }}" + body: + - "active" + - "basic" + +file: + /usr/share/elasticsearch/config/elasticsearch.yml: + exists: true + contains: + - "xpack.security.enabled: true" + - "xpack.security.transport.ssl.enabled: true" + - "xpack.security.transport.ssl.verification_mode: certificate" + - "xpack.security.transport.ssl.keystore.path: /usr/share/elasticsearch/config/certs/elastic-certificates.p12" + - "xpack.security.transport.ssl.truststore.path: /usr/share/elasticsearch/config/certs/elastic-certificates.p12" + - "xpack.security.http.ssl.enabled: true" + - "xpack.security.http.ssl.truststore.path: /usr/share/elasticsearch/config/certs/elastic-certificates.p12" + - "xpack.security.http.ssl.keystore.path: /usr/share/elasticsearch/config/certs/elastic-certificates.p12" diff --git a/charts/elasticsearch/examples/security/values.yaml b/charts/elasticsearch/examples/security/values.yaml new file mode 100644 index 0000000..ac26231 --- /dev/null +++ b/charts/elasticsearch/examples/security/values.yaml @@ -0,0 +1,33 @@ +--- +clusterName: "security" +nodeGroup: "master" + +roles: + master: "true" + ingest: "true" + data: "true" + +protocol: https + +esConfig: + elasticsearch.yml: | + xpack.security.enabled: true + xpack.security.transport.ssl.enabled: true + xpack.security.transport.ssl.verification_mode: certificate + xpack.security.transport.ssl.keystore.path: /usr/share/elasticsearch/config/certs/elastic-certificates.p12 + xpack.security.transport.ssl.truststore.path: /usr/share/elasticsearch/config/certs/elastic-certificates.p12 + xpack.security.http.ssl.enabled: true + xpack.security.http.ssl.truststore.path: /usr/share/elasticsearch/config/certs/elastic-certificates.p12 + xpack.security.http.ssl.keystore.path: /usr/share/elasticsearch/config/certs/elastic-certificates.p12 + +extraEnvs: + - name: ELASTIC_PASSWORD + valueFrom: + secretKeyRef: + name: elastic-credentials + key: password + +secretMounts: + - name: elastic-certificates + secretName: elastic-certificates + path: /usr/share/elasticsearch/config/certs diff --git a/charts/elasticsearch/examples/upgrade/Makefile b/charts/elasticsearch/examples/upgrade/Makefile new file mode 100644 index 0000000..aa26d23 --- /dev/null +++ b/charts/elasticsearch/examples/upgrade/Makefile @@ -0,0 +1,16 @@ +default: test + +include ../../../helpers/examples.mk + +CHART := elasticsearch +RELEASE := helm-es-upgrade +FROM := 6.8.9 # this is the first 6.x release + +install: + ../../../helpers/upgrade.sh --chart $(CHART) --release $(RELEASE) --from $(FROM) + kubectl rollout status statefulset upgrade-master + +test: install goss + +purge: + helm del $(RELEASE) diff --git a/charts/elasticsearch/examples/upgrade/README.md b/charts/elasticsearch/examples/upgrade/README.md new file mode 100644 index 0000000..85977f5 --- /dev/null +++ b/charts/elasticsearch/examples/upgrade/README.md @@ -0,0 +1,17 @@ +# Upgrade + +This example will deploy a 3 node Elasticsearch cluster chart using an old chart +version, then upgrade it. + + +## Usage + +* Deploy and upgrade Elasticsearch chart with the default values: `make install` + + +## Testing + +You can also run [goss integration tests][] using `make test`. + + +[goss integration tests]: https://github.com/elastic/helm-charts/tree/master/elasticsearch/examples/upgrade/test/goss.yaml diff --git a/charts/elasticsearch/examples/upgrade/test/goss.yaml b/charts/elasticsearch/examples/upgrade/test/goss.yaml new file mode 100644 index 0000000..3f7164c --- /dev/null +++ b/charts/elasticsearch/examples/upgrade/test/goss.yaml @@ -0,0 +1,16 @@ +http: + http://localhost:9200/_cluster/health: + status: 200 + timeout: 2000 + body: + - "green" + - '"number_of_nodes":3' + - '"number_of_data_nodes":3' + + http://localhost:9200: + status: 200 + timeout: 2000 + body: + - '"number" : "6.8.22"' + - '"cluster_name" : "upgrade"' + - "You Know, for Search" diff --git a/charts/elasticsearch/examples/upgrade/values.yaml b/charts/elasticsearch/examples/upgrade/values.yaml new file mode 100644 index 0000000..de0283a --- /dev/null +++ b/charts/elasticsearch/examples/upgrade/values.yaml @@ -0,0 +1,2 @@ +--- +clusterName: upgrade diff --git a/charts/elasticsearch/templates/NOTES.txt b/charts/elasticsearch/templates/NOTES.txt new file mode 100755 index 0000000..88b5dd5 --- /dev/null +++ b/charts/elasticsearch/templates/NOTES.txt @@ -0,0 +1,6 @@ +1. Watch all cluster members come up. + $ kubectl get pods --namespace={{ .Release.Namespace }} -l app={{ template "elasticsearch.uname" . }} -w +{{- if .Values.tests.enabled -}} +2. Test cluster health using Helm test. + $ helm --namespace={{ .Release.Namespace }} test {{ .Release.Name }} +{{- end -}} diff --git a/charts/elasticsearch/templates/_helpers.tpl b/charts/elasticsearch/templates/_helpers.tpl new file mode 100755 index 0000000..78fe567 --- /dev/null +++ b/charts/elasticsearch/templates/_helpers.tpl @@ -0,0 +1,57 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "elasticsearch.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "elasticsearch.fullname" -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{- define "elasticsearch.uname" -}} +{{- if empty .Values.fullnameOverride -}} +{{- if empty .Values.nameOverride -}} +{{ .Values.clusterName }}-{{ .Values.nodeGroup }} +{{- else -}} +{{ .Values.nameOverride }}-{{ .Values.nodeGroup }} +{{- end -}} +{{- else -}} +{{ .Values.fullnameOverride }} +{{- end -}} +{{- end -}} + +{{- define "elasticsearch.masterService" -}} +{{- if empty .Values.masterService -}} +{{- if empty .Values.fullnameOverride -}} +{{- if empty .Values.nameOverride -}} +{{ .Values.clusterName }}-master +{{- else -}} +{{ .Values.nameOverride }}-master +{{- end -}} +{{- else -}} +{{ .Values.fullnameOverride }} +{{- end -}} +{{- else -}} +{{ .Values.masterService }} +{{- end -}} +{{- end -}} + +{{- define "elasticsearch.esMajorVersion" -}} +{{- if .Values.esMajorVersion -}} +{{ .Values.esMajorVersion }} +{{- else -}} +{{- $version := int (index (.Values.imageTag | splitList ".") 0) -}} + {{- if and (contains "docker.elastic.co/elasticsearch/elasticsearch" .Values.image) (not (eq $version 0)) -}} +{{ $version }} + {{- else -}} +6 + {{- end -}} +{{- end -}} +{{- end -}} diff --git a/charts/elasticsearch/templates/configmap.yaml b/charts/elasticsearch/templates/configmap.yaml new file mode 100644 index 0000000..4274d8b --- /dev/null +++ b/charts/elasticsearch/templates/configmap.yaml @@ -0,0 +1,16 @@ +{{- if .Values.esConfig }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "elasticsearch.uname" . }}-config + labels: + heritage: {{ .Release.Service | quote }} + release: {{ .Release.Name | quote }} + chart: "{{ .Chart.Name }}" + app: "{{ template "elasticsearch.uname" . }}" +data: +{{- range $path, $config := .Values.esConfig }} + {{ $path }}: | +{{ $config | indent 4 -}} +{{- end -}} +{{- end -}} diff --git a/charts/elasticsearch/templates/ingress.yaml b/charts/elasticsearch/templates/ingress.yaml new file mode 100644 index 0000000..bcb2bef --- /dev/null +++ b/charts/elasticsearch/templates/ingress.yaml @@ -0,0 +1,54 @@ +{{- if .Values.ingress.enabled -}} +{{- $fullName := include "elasticsearch.uname" . -}} +{{- $httpPort := .Values.httpPort -}} +{{- $ingressPath := .Values.ingress.path -}} +apiVersion: networking.k8s.io/v1beta1 +kind: Ingress +metadata: + name: {{ $fullName }} + labels: + app: {{ .Chart.Name }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +{{- with .Values.ingress.annotations }} + annotations: +{{ toYaml . | indent 4 }} +{{- end }} +spec: +{{- if .Values.ingress.tls }} + tls: + {{- if .ingressPath }} + {{- range .Values.ingress.tls }} + - hosts: + {{- range .hosts }} + - {{ . }} + {{- end }} + secretName: {{ .secretName }} + {{- end }} + {{- else }} +{{ toYaml .Values.ingress.tls | indent 4 }} + {{- end }} +{{- end }} + rules: + {{- range .Values.ingress.hosts }} + {{- if $ingressPath }} + - host: {{ . }} + http: + paths: + - path: {{ $ingressPath }} + backend: + serviceName: {{ $fullName }} + servicePort: {{ $httpPort }} + {{- else }} + - host: {{ .host }} + http: + paths: + {{- range .paths }} + - path: {{ .path }} + backend: + serviceName: {{ $fullName }} + servicePort: {{ .servicePort | default $httpPort }} + {{- end }} + {{- end }} + {{- end }} +{{- end }} diff --git a/charts/elasticsearch/templates/networkpolicy.yaml b/charts/elasticsearch/templates/networkpolicy.yaml new file mode 100644 index 0000000..62bb1bd --- /dev/null +++ b/charts/elasticsearch/templates/networkpolicy.yaml @@ -0,0 +1,61 @@ +{{- if (or .Values.networkPolicy.http.enabled .Values.networkPolicy.transport.enabled) }} +kind: NetworkPolicy +apiVersion: networking.k8s.io/v1 +metadata: + name: {{ template "elasticsearch.uname" . }} + labels: + heritage: {{ .Release.Service | quote }} + release: {{ .Release.Name | quote }} + chart: "{{ .Chart.Name }}" + app: "{{ template "elasticsearch.uname" . }}" +spec: + podSelector: + matchLabels: + app: "{{ template "elasticsearch.uname" . }}" + ingress: # Allow inbound connections + +{{- if .Values.networkPolicy.http.enabled }} + # For HTTP access + - ports: + - port: {{ .Values.httpPort }} + from: + # From authorized Pods (having the correct label) + - podSelector: + matchLabels: + {{ template "elasticsearch.uname" . }}-http-client: "true" +{{- with .Values.networkPolicy.http.explicitNamespacesSelector }} + # From authorized namespaces + namespaceSelector: +{{ toYaml . | indent 12 }} +{{- end }} +{{- with .Values.networkPolicy.http.additionalRules }} + # Or from custom additional rules +{{ toYaml . | indent 8 }} +{{- end }} +{{- end }} + +{{- if .Values.networkPolicy.transport.enabled }} + # For transport access + - ports: + - port: {{ .Values.transportPort }} + from: + # From authorized Pods (having the correct label) + - podSelector: + matchLabels: + {{ template "elasticsearch.uname" . }}-transport-client: "true" +{{- with .Values.networkPolicy.transport.explicitNamespacesSelector }} + # From authorized namespaces + namespaceSelector: +{{ toYaml . | indent 12 }} +{{- end }} +{{- with .Values.networkPolicy.transport.additionalRules }} + # Or from custom additional rules +{{ toYaml . | indent 8 }} +{{- end }} + # Or from other ElasticSearch Pods + - podSelector: + matchLabels: + app: "{{ template "elasticsearch.uname" . }}" +{{- end }} + +{{- end }} diff --git a/charts/elasticsearch/templates/poddisruptionbudget.yaml b/charts/elasticsearch/templates/poddisruptionbudget.yaml new file mode 100644 index 0000000..7f887da --- /dev/null +++ b/charts/elasticsearch/templates/poddisruptionbudget.yaml @@ -0,0 +1,12 @@ +--- +{{- if .Values.maxUnavailable }} +apiVersion: policy/v1 +kind: PodDisruptionBudget +metadata: + name: "{{ template "elasticsearch.uname" . }}-pdb" +spec: + maxUnavailable: {{ .Values.maxUnavailable }} + selector: + matchLabels: + app: "{{ template "elasticsearch.uname" . }}" +{{- end }} diff --git a/charts/elasticsearch/templates/podsecuritypolicy.yaml b/charts/elasticsearch/templates/podsecuritypolicy.yaml new file mode 100644 index 0000000..d8b3545 --- /dev/null +++ b/charts/elasticsearch/templates/podsecuritypolicy.yaml @@ -0,0 +1,14 @@ +{{- if .Values.podSecurityPolicy.create -}} +{{- $fullName := include "elasticsearch.uname" . -}} +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ default $fullName .Values.podSecurityPolicy.name | quote }} + labels: + heritage: {{ .Release.Service | quote }} + release: {{ .Release.Name | quote }} + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + app: {{ $fullName | quote }} +spec: +{{ toYaml .Values.podSecurityPolicy.spec | indent 2 }} +{{- end -}} diff --git a/charts/elasticsearch/templates/role.yaml b/charts/elasticsearch/templates/role.yaml new file mode 100644 index 0000000..d3a7ee3 --- /dev/null +++ b/charts/elasticsearch/templates/role.yaml @@ -0,0 +1,25 @@ +{{- if .Values.rbac.create -}} +{{- $fullName := include "elasticsearch.uname" . -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ $fullName | quote }} + labels: + heritage: {{ .Release.Service | quote }} + release: {{ .Release.Name | quote }} + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + app: {{ $fullName | quote }} +rules: + - apiGroups: + - extensions + resources: + - podsecuritypolicies + resourceNames: + {{- if eq .Values.podSecurityPolicy.name "" }} + - {{ $fullName | quote }} + {{- else }} + - {{ .Values.podSecurityPolicy.name | quote }} + {{- end }} + verbs: + - use +{{- end -}} diff --git a/charts/elasticsearch/templates/rolebinding.yaml b/charts/elasticsearch/templates/rolebinding.yaml new file mode 100644 index 0000000..7a529d9 --- /dev/null +++ b/charts/elasticsearch/templates/rolebinding.yaml @@ -0,0 +1,24 @@ +{{- if .Values.rbac.create -}} +{{- $fullName := include "elasticsearch.uname" . -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ $fullName | quote }} + labels: + heritage: {{ .Release.Service | quote }} + release: {{ .Release.Name | quote }} + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + app: {{ $fullName | quote }} +subjects: + - kind: ServiceAccount + {{- if eq .Values.rbac.serviceAccountName "" }} + name: {{ $fullName | quote }} + {{- else }} + name: {{ .Values.rbac.serviceAccountName | quote }} + {{- end }} + namespace: {{ .Release.Namespace | quote }} +roleRef: + kind: Role + name: {{ $fullName | quote }} + apiGroup: rbac.authorization.k8s.io +{{- end -}} diff --git a/charts/elasticsearch/templates/service.yaml b/charts/elasticsearch/templates/service.yaml new file mode 100644 index 0000000..1da6951 --- /dev/null +++ b/charts/elasticsearch/templates/service.yaml @@ -0,0 +1,77 @@ +--- +{{- if .Values.service.enabled -}} +kind: Service +apiVersion: v1 +metadata: +{{- if eq .Values.nodeGroup "master" }} + name: {{ template "elasticsearch.masterService" . }} +{{- else }} + name: {{ template "elasticsearch.uname" . }} +{{- end }} + labels: + heritage: {{ .Release.Service | quote }} + release: {{ .Release.Name | quote }} + chart: "{{ .Chart.Name }}" + app: "{{ template "elasticsearch.uname" . }}" +{{- if .Values.service.labels }} +{{ toYaml .Values.service.labels | indent 4}} +{{- end }} + annotations: +{{ toYaml .Values.service.annotations | indent 4 }} +spec: + type: {{ .Values.service.type }} + selector: + release: {{ .Release.Name | quote }} + chart: "{{ .Chart.Name }}" + app: "{{ template "elasticsearch.uname" . }}" + ports: + - name: {{ .Values.service.httpPortName | default "http" }} + protocol: TCP + port: {{ .Values.httpPort }} +{{- if .Values.service.nodePort }} + nodePort: {{ .Values.service.nodePort }} +{{- end }} + - name: {{ .Values.service.transportPortName | default "transport" }} + protocol: TCP + port: {{ .Values.transportPort }} +{{- if .Values.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.service.loadBalancerIP }} +{{- end }} +{{- with .Values.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: +{{ toYaml . | indent 4 }} +{{- end }} +{{- if .Values.service.externalTrafficPolicy }} + externalTrafficPolicy: {{ .Values.service.externalTrafficPolicy }} +{{- end }} +{{- end }} +--- +kind: Service +apiVersion: v1 +metadata: +{{- if eq .Values.nodeGroup "master" }} + name: {{ template "elasticsearch.masterService" . }}-headless +{{- else }} + name: {{ template "elasticsearch.uname" . }}-headless +{{- end }} + labels: + heritage: {{ .Release.Service | quote }} + release: {{ .Release.Name | quote }} + chart: "{{ .Chart.Name }}" + app: "{{ template "elasticsearch.uname" . }}" +{{- if .Values.service.labelsHeadless }} +{{ toYaml .Values.service.labelsHeadless | indent 4 }} +{{- end }} + annotations: + service.alpha.kubernetes.io/tolerate-unready-endpoints: "true" +spec: + clusterIP: None # This is needed for statefulset hostnames like elasticsearch-0 to resolve + # Create endpoints also if the related pod isn't ready + publishNotReadyAddresses: true + selector: + app: "{{ template "elasticsearch.uname" . }}" + ports: + - name: {{ .Values.service.httpPortName | default "http" }} + port: {{ .Values.httpPort }} + - name: {{ .Values.service.transportPortName | default "transport" }} + port: {{ .Values.transportPort }} diff --git a/charts/elasticsearch/templates/serviceaccount.yaml b/charts/elasticsearch/templates/serviceaccount.yaml new file mode 100644 index 0000000..801d1cf --- /dev/null +++ b/charts/elasticsearch/templates/serviceaccount.yaml @@ -0,0 +1,20 @@ +{{- if .Values.rbac.create -}} +{{- $fullName := include "elasticsearch.uname" . -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + {{- if eq .Values.rbac.serviceAccountName "" }} + name: {{ $fullName | quote }} + {{- else }} + name: {{ .Values.rbac.serviceAccountName | quote }} + {{- end }} + annotations: + {{- with .Values.rbac.serviceAccountAnnotations }} + {{- toYaml . | nindent 4 }} + {{- end }} + labels: + heritage: {{ .Release.Service | quote }} + release: {{ .Release.Name | quote }} + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + app: {{ $fullName | quote }} +{{- end -}} diff --git a/charts/elasticsearch/templates/statefulset.yaml b/charts/elasticsearch/templates/statefulset.yaml new file mode 100644 index 0000000..5d58c39 --- /dev/null +++ b/charts/elasticsearch/templates/statefulset.yaml @@ -0,0 +1,432 @@ +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ template "elasticsearch.uname" . }} + labels: + heritage: {{ .Release.Service | quote }} + release: {{ .Release.Name | quote }} + chart: "{{ .Chart.Name }}" + app: "{{ template "elasticsearch.uname" . }}" + {{- range $key, $value := .Values.labels }} + {{ $key }}: {{ $value | quote }} + {{- end }} + annotations: + esMajorVersion: "{{ include "elasticsearch.esMajorVersion" . }}" +spec: + serviceName: {{ template "elasticsearch.uname" . }}-headless + selector: + matchLabels: + app: "{{ template "elasticsearch.uname" . }}" + replicas: {{ .Values.replicas }} + podManagementPolicy: {{ .Values.podManagementPolicy }} + updateStrategy: + type: {{ .Values.updateStrategy }} + {{- if .Values.persistence.enabled }} + volumeClaimTemplates: + - metadata: + name: {{ template "elasticsearch.uname" . }} + {{- if .Values.persistence.labels.enabled }} + labels: + release: {{ .Release.Name | quote }} + chart: "{{ .Chart.Name }}" + app: "{{ template "elasticsearch.uname" . }}" + {{- range $key, $value := .Values.labels }} + {{ $key }}: {{ $value | quote }} + {{- end }} + {{- end }} + {{- with .Values.persistence.annotations }} + annotations: +{{ toYaml . | indent 8 }} + {{- end }} + spec: +{{ toYaml .Values.volumeClaimTemplate | indent 6 }} + {{- end }} + template: + metadata: + name: "{{ template "elasticsearch.uname" . }}" + labels: + release: {{ .Release.Name | quote }} + chart: "{{ .Chart.Name }}" + app: "{{ template "elasticsearch.uname" . }}" + {{- range $key, $value := .Values.labels }} + {{ $key }}: {{ $value | quote }} + {{- end }} + annotations: + {{- range $key, $value := .Values.podAnnotations }} + {{ $key }}: {{ $value | quote }} + {{- end }} + {{/* This forces a restart if the configmap has changed */}} + {{- if .Values.esConfig }} + configchecksum: {{ include (print .Template.BasePath "/configmap.yaml") . | sha256sum | trunc 63 }} + {{- end }} + spec: + {{- if .Values.schedulerName }} + schedulerName: "{{ .Values.schedulerName }}" + {{- end }} + securityContext: +{{ toYaml .Values.podSecurityContext | indent 8 }} + {{- if .Values.fsGroup }} + fsGroup: {{ .Values.fsGroup }} # Deprecated value, please use .Values.podSecurityContext.fsGroup + {{- end }} + {{- if .Values.rbac.create }} + serviceAccountName: "{{ template "elasticsearch.uname" . }}" + {{- else if not (eq .Values.rbac.serviceAccountName "") }} + serviceAccountName: {{ .Values.rbac.serviceAccountName | quote }} + {{- end }} + automountServiceAccountToken: {{ .Values.rbac.automountToken }} + {{- with .Values.tolerations }} + tolerations: +{{ toYaml . | indent 6 }} + {{- end }} + {{- with .Values.nodeSelector }} + nodeSelector: +{{ toYaml . | indent 8 }} + {{- end }} + {{- if or (eq .Values.antiAffinity "hard") (eq .Values.antiAffinity "soft") .Values.nodeAffinity }} + {{- if .Values.priorityClassName }} + priorityClassName: {{ .Values.priorityClassName }} + {{- end }} + affinity: + {{- end }} + {{- if eq .Values.antiAffinity "hard" }} + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: app + operator: In + values: + - "{{ template "elasticsearch.uname" .}}" + topologyKey: {{ .Values.antiAffinityTopologyKey }} + {{- else if eq .Values.antiAffinity "soft" }} + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 1 + podAffinityTerm: + topologyKey: {{ .Values.antiAffinityTopologyKey }} + labelSelector: + matchExpressions: + - key: app + operator: In + values: + - "{{ template "elasticsearch.uname" . }}" + {{- end }} + {{- with .Values.nodeAffinity }} + nodeAffinity: +{{ toYaml . | indent 10 }} + {{- end }} + terminationGracePeriodSeconds: {{ .Values.terminationGracePeriod }} + volumes: + {{- range .Values.secretMounts }} + - name: {{ .name }} + secret: + secretName: {{ .secretName }} + {{- if .defaultMode }} + defaultMode: {{ .defaultMode }} + {{- end }} + {{- end }} + {{- if .Values.esConfig }} + - name: esconfig + configMap: + name: {{ template "elasticsearch.uname" . }}-config + {{- end }} +{{- if .Values.keystore }} + - name: keystore + emptyDir: {} + {{- range .Values.keystore }} + - name: keystore-{{ .secretName }} + secret: {{ toYaml . | nindent 12 }} + {{- end }} +{{ end }} + {{- if .Values.extraVolumes }} + # Currently some extra blocks accept strings + # to continue with backwards compatibility this is being kept + # whilst also allowing for yaml to be specified too. + {{- if eq "string" (printf "%T" .Values.extraVolumes) }} +{{ tpl .Values.extraVolumes . | indent 8 }} + {{- else }} +{{ toYaml .Values.extraVolumes | indent 8 }} + {{- end }} + {{- end }} + {{- if .Values.imagePullSecrets }} + imagePullSecrets: +{{ toYaml .Values.imagePullSecrets | indent 8 }} + {{- end }} + enableServiceLinks: {{ .Values.enableServiceLinks }} + {{- if .Values.hostAliases }} + hostAliases: {{ toYaml .Values.hostAliases | nindent 8 }} + {{- end }} + {{- if or (.Values.extraInitContainers) (.Values.sysctlInitContainer.enabled) (.Values.keystore) }} + initContainers: + {{- if .Values.sysctlInitContainer.enabled }} + - name: configure-sysctl + securityContext: + runAsUser: 0 + privileged: true + image: "{{ .Values.image }}:{{ .Values.imageTag }}" + imagePullPolicy: "{{ .Values.imagePullPolicy }}" + command: ["sysctl", "-w", "vm.max_map_count={{ .Values.sysctlVmMaxMapCount}}"] + resources: +{{ toYaml .Values.initResources | indent 10 }} + {{- end }} +{{ if .Values.keystore }} + - name: keystore + image: "{{ .Values.image }}:{{ .Values.imageTag }}" + imagePullPolicy: "{{ .Values.imagePullPolicy }}" + command: + - sh + - -c + - | + #!/usr/bin/env bash + set -euo pipefail + + elasticsearch-keystore create + + for i in /tmp/keystoreSecrets/*/*; do + key=$(basename $i) + echo "Adding file $i to keystore key $key" + elasticsearch-keystore add-file "$key" "$i" + done + + # Add the bootstrap password since otherwise the Elasticsearch entrypoint tries to do this on startup + if [ ! -z ${ELASTIC_PASSWORD+x} ]; then + echo 'Adding env $ELASTIC_PASSWORD to keystore as key bootstrap.password' + echo "$ELASTIC_PASSWORD" | elasticsearch-keystore add -x bootstrap.password + fi + + cp -a /usr/share/elasticsearch/config/elasticsearch.keystore /tmp/keystore/ + env: {{ toYaml .Values.extraEnvs | nindent 10 }} + envFrom: {{ toYaml .Values.envFrom | nindent 10 }} + resources: {{ toYaml .Values.initResources | nindent 10 }} + volumeMounts: + - name: keystore + mountPath: /tmp/keystore + {{- range .Values.keystore }} + - name: keystore-{{ .secretName }} + mountPath: /tmp/keystoreSecrets/{{ .secretName }} + {{- end }} +{{ end }} + {{- if .Values.extraInitContainers }} + # Currently some extra blocks accept strings + # to continue with backwards compatibility this is being kept + # whilst also allowing for yaml to be specified too. + {{- if eq "string" (printf "%T" .Values.extraInitContainers) }} +{{ tpl .Values.extraInitContainers . | indent 6 }} + {{- else }} +{{ toYaml .Values.extraInitContainers | indent 6 }} + {{- end }} + {{- end }} + {{- end }} + containers: + - name: "{{ template "elasticsearch.name" . }}" + securityContext: +{{ toYaml .Values.securityContext | indent 10 }} + image: "{{ .Values.image }}:{{ .Values.imageTag }}" + imagePullPolicy: "{{ .Values.imagePullPolicy }}" + readinessProbe: + exec: + command: + - sh + - -c + - | + #!/usr/bin/env bash -e + # If the node is starting up wait for the cluster to be ready (request params: "{{ .Values.clusterHealthCheckParams }}" ) + # Once it has started only check that the node itself is responding + START_FILE=/tmp/.es_start_file + + # Disable nss cache to avoid filling dentry cache when calling curl + # This is required with Elasticsearch Docker using nss < 3.52 + export NSS_SDB_USE_CACHE=no + + http () { + local path="${1}" + local args="${2}" + set -- -XGET -s + + if [ "$args" != "" ]; then + set -- "$@" $args + fi + + if [ -n "${ELASTIC_PASSWORD}" ]; then + set -- "$@" -u "elastic:${ELASTIC_PASSWORD}" + fi + + curl --output /dev/null -k "$@" "{{ .Values.protocol }}://127.0.0.1:{{ .Values.httpPort }}${path}" + } + + if [ -f "${START_FILE}" ]; then + echo 'Elasticsearch is already running, lets check the node is healthy' + HTTP_CODE=$(http "/" "-w %{http_code}") + RC=$? + if [[ ${RC} -ne 0 ]]; then + echo "curl --output /dev/null -k -XGET -s -w '%{http_code}' \${BASIC_AUTH} {{ .Values.protocol }}://127.0.0.1:{{ .Values.httpPort }}/ failed with RC ${RC}" + exit ${RC} + fi + # ready if HTTP code 200, 503 is tolerable if ES version is 6.x + if [[ ${HTTP_CODE} == "200" ]]; then + exit 0 + elif [[ ${HTTP_CODE} == "503" && "{{ include "elasticsearch.esMajorVersion" . }}" == "6" ]]; then + exit 0 + else + echo "curl --output /dev/null -k -XGET -s -w '%{http_code}' \${BASIC_AUTH} {{ .Values.protocol }}://127.0.0.1:{{ .Values.httpPort }}/ failed with HTTP code ${HTTP_CODE}" + exit 1 + fi + + else + echo 'Waiting for elasticsearch cluster to become ready (request params: "{{ .Values.clusterHealthCheckParams }}" )' + if http "/_cluster/health?{{ .Values.clusterHealthCheckParams }}" "--fail" ; then + touch ${START_FILE} + exit 0 + else + echo 'Cluster is not yet ready (request params: "{{ .Values.clusterHealthCheckParams }}" )' + exit 1 + fi + fi +{{ toYaml .Values.readinessProbe | indent 10 }} + ports: + - name: http + containerPort: {{ .Values.httpPort }} + - name: transport + containerPort: {{ .Values.transportPort }} + resources: +{{ toYaml .Values.resources | indent 10 }} + env: + - name: node.name + valueFrom: + fieldRef: + fieldPath: metadata.name + {{- if eq .Values.roles.master "true" }} + - name: discovery.zen.minimum_master_nodes + value: "{{ .Values.minimumMasterNodes }}" + {{- end }} + {{- if lt (int (include "elasticsearch.esMajorVersion" .)) 7 }} + - name: discovery.zen.ping.unicast.hosts + value: "{{ template "elasticsearch.masterService" . }}-headless" + {{- else }} + - name: discovery.seed_hosts + value: "{{ template "elasticsearch.masterService" . }}-headless" + {{- end }} + - name: cluster.name + value: "{{ .Values.clusterName }}" + - name: network.host + value: "{{ .Values.networkHost }}" + {{- if .Values.esJavaOpts }} + - name: ES_JAVA_OPTS + value: "{{ .Values.esJavaOpts }}" + {{- end }} + {{- range $role, $enabled := .Values.roles }} + - name: node.{{ $role }} + value: "{{ $enabled }}" + {{- end }} +{{- if .Values.extraEnvs }} +{{ toYaml .Values.extraEnvs | indent 10 }} +{{- end }} +{{- if .Values.envFrom }} + envFrom: +{{ toYaml .Values.envFrom | indent 10 }} +{{- end }} + volumeMounts: + {{- if .Values.persistence.enabled }} + - name: "{{ template "elasticsearch.uname" . }}" + mountPath: /usr/share/elasticsearch/data + {{- end }} +{{ if .Values.keystore }} + - name: keystore + mountPath: /usr/share/elasticsearch/config/elasticsearch.keystore + subPath: elasticsearch.keystore +{{ end }} + {{- range .Values.secretMounts }} + - name: {{ .name }} + mountPath: {{ .path }} + {{- if .subPath }} + subPath: {{ .subPath }} + {{- end }} + {{- end }} + {{- range $path, $config := .Values.esConfig }} + - name: esconfig + mountPath: /usr/share/elasticsearch/config/{{ $path }} + subPath: {{ $path }} + {{- end -}} + {{- if .Values.extraVolumeMounts }} + # Currently some extra blocks accept strings + # to continue with backwards compatibility this is being kept + # whilst also allowing for yaml to be specified too. + {{- if eq "string" (printf "%T" .Values.extraVolumeMounts) }} +{{ tpl .Values.extraVolumeMounts . | indent 10 }} + {{- else }} +{{ toYaml .Values.extraVolumeMounts | indent 10 }} + {{- end }} + {{- end }} + {{- if .Values.masterTerminationFix }} + {{- if eq .Values.roles.master "true" }} + # This sidecar will prevent slow master re-election + # https://github.com/elastic/helm-charts/issues/63 + - name: elasticsearch-master-graceful-termination-handler + image: "{{ .Values.image }}:{{ .Values.imageTag }}" + imagePullPolicy: "{{ .Values.imagePullPolicy }}" + command: + - "sh" + - -c + - | + #!/usr/bin/env bash + set -eo pipefail + + http () { + local path="${1}" + if [ -n "${ELASTIC_USERNAME}" ] && [ -n "${ELASTIC_PASSWORD}" ]; then + BASIC_AUTH="-u ${ELASTIC_USERNAME}:${ELASTIC_PASSWORD}" + else + BASIC_AUTH='' + fi + curl -XGET -s -k --fail ${BASIC_AUTH} {{ .Values.protocol }}://{{ template "elasticsearch.masterService" . }}:{{ .Values.httpPort }}${path} + } + + cleanup () { + while true ; do + local master="$(http "/_cat/master?h=node" || echo "")" + if [[ $master == "{{ template "elasticsearch.masterService" . }}"* && $master != "${NODE_NAME}" ]]; then + echo "This node is not master." + break + fi + echo "This node is still master, waiting gracefully for it to step down" + sleep 1 + done + + exit 0 + } + + trap cleanup SIGTERM + + sleep infinity & + wait $! + resources: +{{ toYaml .Values.sidecarResources | indent 10 }} + env: + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + {{- if .Values.extraEnvs }} +{{ toYaml .Values.extraEnvs | indent 10 }} + {{- end }} + {{- if .Values.envFrom }} + envFrom: +{{ toYaml .Values.envFrom | indent 10 }} + {{- end }} + {{- end }} + {{- end }} +{{- if .Values.lifecycle }} + lifecycle: +{{ toYaml .Values.lifecycle | indent 10 }} +{{- end }} + {{- if .Values.extraContainers }} + # Currently some extra blocks accept strings + # to continue with backwards compatibility this is being kept + # whilst also allowing for yaml to be specified too. + {{- if eq "string" (printf "%T" .Values.extraContainers) }} +{{ tpl .Values.extraContainers . | indent 6 }} + {{- else }} +{{ toYaml .Values.extraContainers | indent 6 }} + {{- end }} + {{- end }} diff --git a/charts/elasticsearch/templates/test/test-elasticsearch-health.yaml b/charts/elasticsearch/templates/test/test-elasticsearch-health.yaml new file mode 100644 index 0000000..704cd3d --- /dev/null +++ b/charts/elasticsearch/templates/test/test-elasticsearch-health.yaml @@ -0,0 +1,36 @@ +--- +{{- if .Values.tests.enabled -}} +apiVersion: v1 +kind: Pod +metadata: +{{- if .Values.healthNameOverride }} + name: {{ .Values.healthNameOverride | quote }} +{{- else }} + name: "{{ .Release.Name }}-{{ randAlpha 5 | lower }}-test" +{{- end }} + annotations: + "helm.sh/hook": test + "helm.sh/hook-delete-policy": hook-succeeded +spec: + securityContext: +{{ toYaml .Values.podSecurityContext | indent 4 }} + containers: +{{- if .Values.healthNameOverride }} + - name: {{ .Values.healthNameOverride | quote }} +{{- else }} + - name: "{{ .Release.Name }}-{{ randAlpha 5 | lower }}-test" +{{- end }} + image: "{{ .Values.image }}:{{ .Values.imageTag }}" + imagePullPolicy: "{{ .Values.imagePullPolicy }}" + command: + - "sh" + - "-c" + - | + #!/usr/bin/env bash -e + curl -XGET --fail '{{ template "elasticsearch.uname" . }}:{{ .Values.httpPort }}/_cluster/health?{{ .Values.clusterHealthCheckParams }}' + {{- if .Values.imagePullSecrets }} + imagePullSecrets: +{{ toYaml .Values.imagePullSecrets | indent 4 }} + {{- end }} + restartPolicy: Never +{{- end -}} diff --git a/charts/elasticsearch/tests/elasticsearch_test.py b/charts/elasticsearch/tests/elasticsearch_test.py new file mode 100755 index 0000000..a29edef --- /dev/null +++ b/charts/elasticsearch/tests/elasticsearch_test.py @@ -0,0 +1,1485 @@ +import os +import sys + +sys.path.insert(1, os.path.join(sys.path[0], "../../helpers")) +from helpers import helm_template +import yaml + +clusterName = "elasticsearch" +nodeGroup = "master" +uname = clusterName + "-" + nodeGroup + + +def test_defaults(): + config = """ + """ + + r = helm_template(config) + + # Statefulset + assert r["statefulset"][uname]["spec"]["replicas"] == 3 + assert r["statefulset"][uname]["spec"]["updateStrategy"] == { + "type": "RollingUpdate" + } + assert r["statefulset"][uname]["spec"]["podManagementPolicy"] == "Parallel" + assert r["statefulset"][uname]["spec"]["serviceName"] == uname + "-headless" + assert r["statefulset"][uname]["spec"]["template"]["spec"]["affinity"][ + "podAntiAffinity" + ]["requiredDuringSchedulingIgnoredDuringExecution"][0] == { + "labelSelector": { + "matchExpressions": [{"key": "app", "operator": "In", "values": [uname]}] + }, + "topologyKey": "kubernetes.io/hostname", + } + + # Default environment variables + env_vars = [ + { + "name": "node.name", + "valueFrom": {"fieldRef": {"fieldPath": "metadata.name"}}, + }, + {"name": "discovery.zen.ping.unicast.hosts", "value": uname + "-headless"}, + {"name": "network.host", "value": "0.0.0.0"}, + {"name": "cluster.name", "value": clusterName}, + {"name": "node.master", "value": "true"}, + {"name": "node.data", "value": "true"}, + {"name": "node.ingest", "value": "true"}, + ] + + c = r["statefulset"][uname]["spec"]["template"]["spec"]["containers"][0] + for env in env_vars: + assert env in c["env"] + + # Image + assert c["image"].startswith("docker.elastic.co/elasticsearch/elasticsearch:") + assert c["imagePullPolicy"] == "IfNotPresent" + assert c["name"] == "elasticsearch" + + # Ports + assert c["ports"][0] == {"name": "http", "containerPort": 9200} + assert c["ports"][1] == {"name": "transport", "containerPort": 9300} + + # Health checks + assert c["readinessProbe"]["failureThreshold"] == 3 + assert c["readinessProbe"]["initialDelaySeconds"] == 10 + assert c["readinessProbe"]["periodSeconds"] == 10 + assert c["readinessProbe"]["successThreshold"] == 3 + assert c["readinessProbe"]["timeoutSeconds"] == 5 + + assert "curl" in c["readinessProbe"]["exec"]["command"][-1] + assert "http://127.0.0.1:9200" in c["readinessProbe"]["exec"]["command"][-1] + + # Resources + assert c["resources"] == { + "requests": {"cpu": "1000m", "memory": "2Gi"}, + "limits": {"cpu": "1000m", "memory": "2Gi"}, + } + + # Mounts + assert c["volumeMounts"][0]["mountPath"] == "/usr/share/elasticsearch/data" + assert c["volumeMounts"][0]["name"] == uname + + # volumeClaimTemplates + v = r["statefulset"][uname]["spec"]["volumeClaimTemplates"][0] + assert v["metadata"]["name"] == uname + assert "labels" not in v["metadata"] + assert v["spec"]["accessModes"] == ["ReadWriteOnce"] + assert v["spec"]["resources"]["requests"]["storage"] == "30Gi" + + # Init container + i = r["statefulset"][uname]["spec"]["template"]["spec"]["initContainers"][0] + assert i["name"] == "configure-sysctl" + assert i["command"] == ["sysctl", "-w", "vm.max_map_count=262144"] + assert i["image"].startswith("docker.elastic.co/elasticsearch/elasticsearch:") + assert i["securityContext"] == {"privileged": True, "runAsUser": 0} + + # Other + assert r["statefulset"][uname]["spec"]["template"]["spec"]["securityContext"] == { + "fsGroup": 1000, + "runAsUser": 1000, + } + assert ( + r["statefulset"][uname]["spec"]["template"]["spec"][ + "terminationGracePeriodSeconds" + ] + == 120 + ) + + # Pod disruption budget + assert r["poddisruptionbudget"][uname + "-pdb"]["spec"]["maxUnavailable"] == 1 + + # Service + s = r["service"][uname] + assert s["metadata"]["name"] == uname + assert s["metadata"]["annotations"] == {} + assert s["spec"]["type"] == "ClusterIP" + assert len(s["spec"]["ports"]) == 2 + assert s["spec"]["ports"][0] == {"name": "http", "port": 9200, "protocol": "TCP"} + assert s["spec"]["ports"][1] == { + "name": "transport", + "port": 9300, + "protocol": "TCP", + } + assert "loadBalancerSourceRanges" not in s["spec"] + + # Headless Service + h = r["service"][uname + "-headless"] + assert h["spec"]["clusterIP"] == "None" + assert h["spec"]["publishNotReadyAddresses"] == True + assert h["spec"]["ports"][0]["name"] == "http" + assert h["spec"]["ports"][0]["port"] == 9200 + assert h["spec"]["ports"][1]["name"] == "transport" + assert h["spec"]["ports"][1]["port"] == 9300 + + # Empty customizable defaults + assert "imagePullSecrets" not in r["statefulset"][uname]["spec"]["template"]["spec"] + assert "tolerations" not in r["statefulset"][uname]["spec"]["template"]["spec"] + assert "nodeSelector" not in r["statefulset"][uname]["spec"]["template"]["spec"] + assert "ingress" not in r + assert "hostAliases" not in r["statefulset"][uname]["spec"]["template"]["spec"] + + +def test_increasing_the_replicas(): + config = """ +replicas: 5 +""" + r = helm_template(config) + assert r["statefulset"][uname]["spec"]["replicas"] == 5 + + +def test_disabling_pod_disruption_budget(): + config = """ +maxUnavailable: false +""" + r = helm_template(config) + assert "poddisruptionbudget" not in r + + +def test_overriding_the_image_and_tag(): + config = """ +image: customImage +imageTag: 6.2.4 +""" + r = helm_template(config) + assert ( + r["statefulset"][uname]["spec"]["template"]["spec"]["containers"][0]["image"] + == "customImage:6.2.4" + ) + + +def test_set_discovery_hosts_to_custom_master_service(): + config = """ +esMajorVersion: 6 +masterService: "elasticsearch-custommaster" +""" + r = helm_template(config) + env = r["statefulset"][uname]["spec"]["template"]["spec"]["containers"][0]["env"] + assert { + "name": "discovery.zen.ping.unicast.hosts", + "value": "elasticsearch-custommaster-headless", + } in env + + +def test_set_master_service_to_default_nodegroup_name_if_not_set(): + config = """ +esMajorVersion: 6 +nodeGroup: "data" +""" + r = helm_template(config) + env = r["statefulset"]["elasticsearch-data"]["spec"]["template"]["spec"][ + "containers" + ][0]["env"] + assert { + "name": "discovery.zen.ping.unicast.hosts", + "value": "elasticsearch-master-headless", + } in env + + +def test_set_master_service_to_default_nodegroup_name_with_custom_cluster_name(): + config = """ +esMajorVersion: 6 +clusterName: "custom" +nodeGroup: "data" +""" + r = helm_template(config) + env = r["statefulset"]["custom-data"]["spec"]["template"]["spec"]["containers"][0][ + "env" + ] + assert { + "name": "discovery.zen.ping.unicast.hosts", + "value": "custom-master-headless", + } in env + + +def test_enabling_machine_learning_role(): + config = """ +roles: + ml: "true" +""" + r = helm_template(config) + env = r["statefulset"][uname]["spec"]["template"]["spec"]["containers"][0]["env"] + + assert {"name": "node.ml", "value": "true"} in env + + +def test_adding_extra_env_vars(): + config = """ +extraEnvs: + - name: hello + value: world +""" + r = helm_template(config) + env = r["statefulset"][uname]["spec"]["template"]["spec"]["containers"][0]["env"] + assert {"name": "hello", "value": "world"} in env + + +def test_adding_env_from(): + config = """ +envFrom: +- secretRef: + name: secret-name +""" + r = helm_template(config) + secretRef = r["statefulset"][uname]["spec"]["template"]["spec"]["containers"][0][ + "envFrom" + ][0]["secretRef"] + assert secretRef == {"name": "secret-name"} + + +def test_adding_a_extra_volume_with_volume_mount(): + config = """ +extraVolumes: | + - name: extras + emptyDir: {} +extraVolumeMounts: | + - name: extras + mountPath: /usr/share/extras + readOnly: true +""" + r = helm_template(config) + extraVolume = r["statefulset"][uname]["spec"]["template"]["spec"]["volumes"] + assert {"name": "extras", "emptyDir": {}} in extraVolume + extraVolumeMounts = r["statefulset"][uname]["spec"]["template"]["spec"][ + "containers" + ][0]["volumeMounts"] + assert { + "name": "extras", + "mountPath": "/usr/share/extras", + "readOnly": True, + } in extraVolumeMounts + + +def test_adding_a_extra_volume_with_volume_mount_as_yaml(): + config = """ +extraVolumes: + - name: extras + emptyDir: {} +extraVolumeMounts: + - name: extras + mountPath: /usr/share/extras + readOnly: true +""" + r = helm_template(config) + extraVolume = r["statefulset"][uname]["spec"]["template"]["spec"]["volumes"] + assert {"name": "extras", "emptyDir": {}} in extraVolume + extraVolumeMounts = r["statefulset"][uname]["spec"]["template"]["spec"][ + "containers" + ][0]["volumeMounts"] + assert { + "name": "extras", + "mountPath": "/usr/share/extras", + "readOnly": True, + } in extraVolumeMounts + + +def test_adding_a_extra_container(): + config = """ +extraContainers: | + - name: do-something + image: busybox + command: ['do', 'something'] +""" + r = helm_template(config) + extraContainer = r["statefulset"][uname]["spec"]["template"]["spec"]["containers"] + assert { + "name": "do-something", + "image": "busybox", + "command": ["do", "something"], + } in extraContainer + + +def test_adding_a_extra_container_as_yaml(): + config = """ +extraContainers: + - name: do-something + image: busybox + command: ['do', 'something'] +""" + r = helm_template(config) + extraContainer = r["statefulset"][uname]["spec"]["template"]["spec"]["containers"] + assert { + "name": "do-something", + "image": "busybox", + "command": ["do", "something"], + } in extraContainer + + +def test_adding_a_extra_init_container(): + config = """ +extraInitContainers: | + - name: do-something + image: busybox + command: ['do', 'something'] +""" + r = helm_template(config) + extraInitContainer = r["statefulset"][uname]["spec"]["template"]["spec"][ + "initContainers" + ] + assert { + "name": "do-something", + "image": "busybox", + "command": ["do", "something"], + } in extraInitContainer + + +def test_adding_a_extra_init_container_as_yaml(): + config = """ +extraInitContainers: + - name: do-something + image: busybox + command: ['do', 'something'] +""" + r = helm_template(config) + extraInitContainer = r["statefulset"][uname]["spec"]["template"]["spec"][ + "initContainers" + ] + assert { + "name": "do-something", + "image": "busybox", + "command": ["do", "something"], + } in extraInitContainer + + +def test_sysctl_init_container_disabled(): + config = """ +sysctlInitContainer: + enabled: false +""" + r = helm_template(config) + assert "initContainers" not in r["statefulset"][uname]["spec"]["template"]["spec"] + + +def test_sysctl_init_container_enabled(): + config = """ +sysctlInitContainer: + enabled: true +""" + r = helm_template(config) + initContainers = r["statefulset"][uname]["spec"]["template"]["spec"][ + "initContainers" + ] + assert initContainers[0]["name"] == "configure-sysctl" + + +def test_sysctl_init_container_image(): + config = """ +image: customImage +imageTag: 6.2.4 +imagePullPolicy: Never +sysctlInitContainer: + enabled: true +""" + r = helm_template(config) + initContainers = r["statefulset"][uname]["spec"]["template"]["spec"][ + "initContainers" + ] + assert initContainers[0]["image"] == "customImage:6.2.4" + assert initContainers[0]["imagePullPolicy"] == "Never" + + +def test_adding_storageclass_annotation_to_volumeclaimtemplate(): + config = """ +persistence: + annotations: + volume.beta.kubernetes.io/storage-class: id +""" + r = helm_template(config) + annotations = r["statefulset"][uname]["spec"]["volumeClaimTemplates"][0][ + "metadata" + ]["annotations"] + assert annotations["volume.beta.kubernetes.io/storage-class"] == "id" + + +def test_adding_multiple_persistence_annotations(): + config = """ + persistence: + annotations: + hello: world + world: hello + """ + r = helm_template(config) + annotations = r["statefulset"][uname]["spec"]["volumeClaimTemplates"][0][ + "metadata" + ]["annotations"] + + assert annotations["hello"] == "world" + assert annotations["world"] == "hello" + + +def test_enabling_persistence_label_in_volumeclaimtemplate(): + config = """ +persistence: + labels: + enabled: true +""" + r = helm_template(config) + volume_claim_template_labels = r["statefulset"][uname]["spec"][ + "volumeClaimTemplates" + ][0]["metadata"]["labels"] + statefulset_labels = r["statefulset"][uname]["metadata"]["labels"] + expected_labels = statefulset_labels + # heritage label shouldn't be present in volumeClaimTemplates labels + expected_labels.pop("heritage") + assert volume_claim_template_labels == expected_labels + + +def test_adding_a_secret_mount(): + config = """ +secretMounts: + - name: elastic-certificates + secretName: elastic-certs + path: /usr/share/elasticsearch/config/certs +""" + r = helm_template(config) + s = r["statefulset"][uname]["spec"]["template"]["spec"] + assert s["containers"][0]["volumeMounts"][-1] == { + "mountPath": "/usr/share/elasticsearch/config/certs", + "name": "elastic-certificates", + } + assert s["volumes"] == [ + {"name": "elastic-certificates", "secret": {"secretName": "elastic-certs"}} + ] + + +def test_adding_a_secret_mount_with_subpath(): + config = """ +secretMounts: + - name: elastic-certificates + secretName: elastic-certs + path: /usr/share/elasticsearch/config/certs + subPath: cert.crt +""" + r = helm_template(config) + s = r["statefulset"][uname]["spec"]["template"]["spec"] + assert s["containers"][0]["volumeMounts"][-1] == { + "mountPath": "/usr/share/elasticsearch/config/certs", + "subPath": "cert.crt", + "name": "elastic-certificates", + } + + +def test_adding_a_secret_mount_with_default_mode(): + config = """ +secretMounts: + - name: elastic-certificates + secretName: elastic-certs + path: /usr/share/elasticsearch/config/certs + subPath: cert.crt + defaultMode: 0755 +""" + r = helm_template(config) + s = r["statefulset"][uname]["spec"]["template"]["spec"] + assert s["containers"][0]["volumeMounts"][-1] == { + "mountPath": "/usr/share/elasticsearch/config/certs", + "subPath": "cert.crt", + "name": "elastic-certificates", + } + + +def test_adding_image_pull_secrets(): + config = """ +imagePullSecrets: + - name: test-registry +""" + r = helm_template(config) + assert ( + r["statefulset"][uname]["spec"]["template"]["spec"]["imagePullSecrets"][0][ + "name" + ] + == "test-registry" + ) + + +def test_adding_tolerations(): + config = """ +tolerations: +- key: "key1" + operator: "Equal" + value: "value1" + effect: "NoExecute" + tolerationSeconds: 3600 +""" + r = helm_template(config) + assert ( + r["statefulset"][uname]["spec"]["template"]["spec"]["tolerations"][0]["key"] + == "key1" + ) + + +def test_adding_pod_annotations(): + config = """ +podAnnotations: + iam.amazonaws.com/role: es-role +""" + r = helm_template(config) + assert ( + r["statefulset"][uname]["spec"]["template"]["metadata"]["annotations"][ + "iam.amazonaws.com/role" + ] + == "es-role" + ) + + +def test_adding_serviceaccount_annotations(): + config = """ +rbac: + create: true + serviceAccountAnnotations: + eks.amazonaws.com/role-arn: arn:aws:iam::111111111111:role/k8s.clustername.namespace.serviceaccount +""" + r = helm_template(config) + assert ( + r["serviceaccount"][uname]["metadata"]["annotations"][ + "eks.amazonaws.com/role-arn" + ] + == "arn:aws:iam::111111111111:role/k8s.clustername.namespace.serviceaccount" + ) + + +def test_adding_a_node_selector(): + config = """ +nodeSelector: + disktype: ssd +""" + r = helm_template(config) + assert ( + r["statefulset"][uname]["spec"]["template"]["spec"]["nodeSelector"]["disktype"] + == "ssd" + ) + + +def test_adding_resources_to_initcontainer(): + config = """ +initResources: + limits: + cpu: "25m" + memory: "128Mi" + requests: + cpu: "25m" + memory: "128Mi" +""" + r = helm_template(config) + i = r["statefulset"][uname]["spec"]["template"]["spec"]["initContainers"][0] + + assert i["resources"] == { + "requests": {"cpu": "25m", "memory": "128Mi"}, + "limits": {"cpu": "25m", "memory": "128Mi"}, + } + + +def test_adding_resources_to_sidecar_container(): + config = """ +masterTerminationFix: true +sidecarResources: + limits: + cpu: "100m" + memory: "128Mi" + requests: + cpu: "100m" + memory: "128Mi" +""" + r = helm_template(config) + i = r["statefulset"][uname]["spec"]["template"]["spec"]["containers"][1] + + assert i["resources"] == { + "requests": {"cpu": "100m", "memory": "128Mi"}, + "limits": {"cpu": "100m", "memory": "128Mi"}, + } + + +def test_adding_a_node_affinity(): + config = """ +nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: mylabel + operator: In + values: + - myvalue +""" + r = helm_template(config) + assert r["statefulset"][uname]["spec"]["template"]["spec"]["affinity"][ + "nodeAffinity" + ] == { + "preferredDuringSchedulingIgnoredDuringExecution": [ + { + "weight": 100, + "preference": { + "matchExpressions": [ + {"key": "mylabel", "operator": "In", "values": ["myvalue"]} + ] + }, + } + ] + } + + +def test_adding_an_ingress_rule(): + config = """ +ingress: + enabled: true + annotations: + kubernetes.io/ingress.class: nginx + hosts: + - host: elasticsearch.elastic.co + paths: + - path: / + - host: '' + paths: + - path: / + - path: /mypath + servicePort: 8888 + - host: elasticsearch.hello.there + paths: + - path: / + servicePort: 9999 + tls: + - secretName: elastic-co-wildcard + hosts: + - elasticsearch.elastic.co +""" + + r = helm_template(config) + assert uname in r["ingress"] + i = r["ingress"][uname]["spec"] + assert i["tls"][0]["hosts"][0] == "elasticsearch.elastic.co" + assert i["tls"][0]["secretName"] == "elastic-co-wildcard" + + assert i["rules"][0]["host"] == "elasticsearch.elastic.co" + assert i["rules"][0]["http"]["paths"][0]["path"] == "/" + assert i["rules"][0]["http"]["paths"][0]["backend"]["serviceName"] == uname + assert i["rules"][0]["http"]["paths"][0]["backend"]["servicePort"] == 9200 + assert i["rules"][1]["host"] == None + assert i["rules"][1]["http"]["paths"][0]["path"] == "/" + assert i["rules"][1]["http"]["paths"][0]["backend"]["serviceName"] == uname + assert i["rules"][1]["http"]["paths"][0]["backend"]["servicePort"] == 9200 + assert i["rules"][1]["http"]["paths"][1]["path"] == "/mypath" + assert i["rules"][1]["http"]["paths"][1]["backend"]["serviceName"] == uname + assert i["rules"][1]["http"]["paths"][1]["backend"]["servicePort"] == 8888 + assert i["rules"][2]["host"] == "elasticsearch.hello.there" + assert i["rules"][2]["http"]["paths"][0]["path"] == "/" + assert i["rules"][2]["http"]["paths"][0]["backend"]["serviceName"] == uname + assert i["rules"][2]["http"]["paths"][0]["backend"]["servicePort"] == 9999 + + +def test_adding_a_deprecated_ingress_rule(): + config = """ +ingress: + enabled: true + annotations: + kubernetes.io/ingress.class: nginx + path: / + hosts: + - elasticsearch.elastic.co + tls: + - secretName: elastic-co-wildcard + hosts: + - elasticsearch.elastic.co +""" + + r = helm_template(config) + assert uname in r["ingress"] + i = r["ingress"][uname]["spec"] + assert i["tls"][0]["hosts"][0] == "elasticsearch.elastic.co" + assert i["tls"][0]["secretName"] == "elastic-co-wildcard" + + assert i["rules"][0]["host"] == "elasticsearch.elastic.co" + assert i["rules"][0]["http"]["paths"][0]["path"] == "/" + assert i["rules"][0]["http"]["paths"][0]["backend"]["serviceName"] == uname + assert i["rules"][0]["http"]["paths"][0]["backend"]["servicePort"] == 9200 + + +def test_changing_the_protocol(): + config = """ +protocol: https +""" + r = helm_template(config) + c = r["statefulset"][uname]["spec"]["template"]["spec"]["containers"][0] + assert "https://127.0.0.1:9200" in c["readinessProbe"]["exec"]["command"][-1] + + +def test_changing_the_cluster_health_status(): + config = """ +clusterHealthCheckParams: 'wait_for_no_initializing_shards=true&timeout=60s' +""" + r = helm_template(config) + c = r["statefulset"][uname]["spec"]["template"]["spec"]["containers"][0] + assert ( + "/_cluster/health?wait_for_no_initializing_shards=true&timeout=60s" + in c["readinessProbe"]["exec"]["command"][-1] + ) + + +def test_adding_in_es_config(): + config = """ +esConfig: + elasticsearch.yml: | + key: + nestedkey: value + dot.notation: test + + log4j2.properties: | + appender.rolling.name = rolling +""" + r = helm_template(config) + c = r["configmap"][uname + "-config"]["data"] + + assert "elasticsearch.yml" in c + assert "log4j2.properties" in c + + assert "nestedkey: value" in c["elasticsearch.yml"] + assert "dot.notation: test" in c["elasticsearch.yml"] + + assert "appender.rolling.name = rolling" in c["log4j2.properties"] + + s = r["statefulset"][uname]["spec"]["template"]["spec"] + + assert { + "configMap": {"name": "elasticsearch-master-config"}, + "name": "esconfig", + } in s["volumes"] + assert { + "mountPath": "/usr/share/elasticsearch/config/elasticsearch.yml", + "name": "esconfig", + "subPath": "elasticsearch.yml", + } in s["containers"][0]["volumeMounts"] + assert { + "mountPath": "/usr/share/elasticsearch/config/log4j2.properties", + "name": "esconfig", + "subPath": "log4j2.properties", + } in s["containers"][0]["volumeMounts"] + + assert ( + "configchecksum" + in r["statefulset"][uname]["spec"]["template"]["metadata"]["annotations"] + ) + + +def test_dont_add_data_volume_when_persistance_is_disabled(): + config = """ +persistence: + enabled: false +""" + r = helm_template(config) + assert "volumeClaimTemplates" not in r["statefulset"][uname]["spec"] + assert ( + r["statefulset"][uname]["spec"]["template"]["spec"]["containers"][0][ + "volumeMounts" + ] + == None + ) + + +def test_priority_class_name(): + config = """ +priorityClassName: "" +""" + r = helm_template(config) + spec = r["statefulset"][uname]["spec"]["template"]["spec"] + assert "priorityClassName" not in spec + + config = """ +priorityClassName: "highest" +""" + r = helm_template(config) + priority_class_name = r["statefulset"][uname]["spec"]["template"]["spec"][ + "priorityClassName" + ] + assert priority_class_name == "highest" + + +def test_scheduler_name(): + r = helm_template("") + spec = r["statefulset"][uname]["spec"]["template"]["spec"] + assert "schedulerName" not in spec + + config = """ +schedulerName: "stork" +""" + r = helm_template(config) + assert ( + r["statefulset"][uname]["spec"]["template"]["spec"]["schedulerName"] == "stork" + ) + + +def test_disabling_non_headless_service(): + config = "" + + r = helm_template(config) + + assert uname in r["service"] + + config = """ +service: + enabled: false +""" + + r = helm_template(config) + + assert uname not in r["service"] + + +def test_adding_a_nodePort(): + config = "" + + r = helm_template(config) + + assert "nodePort" not in r["service"][uname]["spec"]["ports"][0] + + config = """ + service: + nodePort: 30001 + """ + + r = helm_template(config) + + assert r["service"][uname]["spec"]["ports"][0]["nodePort"] == 30001 + + +def test_adding_a_loadBalancerIP(): + config = "" + + r = helm_template(config) + + assert "loadBalancerIP" not in r["service"][uname]["spec"] + + config = """ + service: + loadBalancerIP: 12.4.19.81 + """ + + r = helm_template(config) + + assert r["service"][uname]["spec"]["loadBalancerIP"] == "12.4.19.81" + + +def test_adding_an_externalTrafficPolicy(): + config = "" + + r = helm_template(config) + + assert "externalTrafficPolicy" not in r["service"][uname]["spec"] + + config = """ + service: + externalTrafficPolicy: Local + """ + + r = helm_template(config) + + assert r["service"][uname]["spec"]["externalTrafficPolicy"] == "Local" + + +def test_adding_a_label_on_non_headless_service(): + config = "" + + r = helm_template(config) + + assert "label1" not in r["service"][uname]["metadata"]["labels"] + + config = """ + service: + labels: + label1: value1 + """ + + r = helm_template(config) + + assert r["service"][uname]["metadata"]["labels"]["label1"] == "value1" + + +def test_adding_a_label_on_headless_service(): + config = "" + + r = helm_template(config) + + assert "label1" not in r["service"][uname + "-headless"]["metadata"]["labels"] + + config = """ + service: + labelsHeadless: + label1: value1 + """ + + r = helm_template(config) + + assert r["service"][uname + "-headless"]["metadata"]["labels"]["label1"] == "value1" + + +def test_adding_load_balancer_source_ranges(): + config = """ +service: + loadBalancerSourceRanges: + - 0.0.0.0/0 + """ + r = helm_template(config) + assert r["service"][uname]["spec"]["loadBalancerSourceRanges"][0] == "0.0.0.0/0" + + config = """ +service: + loadBalancerSourceRanges: + - 192.168.0.0/24 + - 192.168.1.0/24 + """ + r = helm_template(config) + ranges = r["service"][uname]["spec"]["loadBalancerSourceRanges"] + assert ranges[0] == "192.168.0.0/24" + assert ranges[1] == "192.168.1.0/24" + + +def test_master_termination_fixed_enabled(): + config = "" + + r = helm_template(config) + + assert len(r["statefulset"][uname]["spec"]["template"]["spec"]["containers"]) == 1 + + config = """ + masterTerminationFix: true + """ + + r = helm_template(config) + + c = r["statefulset"][uname]["spec"]["template"]["spec"]["containers"][1] + assert c["name"] == "elasticsearch-master-graceful-termination-handler" + + +def test_lifecycle_hooks(): + config = "" + r = helm_template(config) + c = r["statefulset"][uname]["spec"]["template"]["spec"]["containers"][0] + assert "lifecycle" not in c + + config = """ + lifecycle: + preStop: + exec: + command: ["/bin/bash","/preStop"] + """ + r = helm_template(config) + c = r["statefulset"][uname]["spec"]["template"]["spec"]["containers"][0] + + assert c["lifecycle"]["preStop"]["exec"]["command"] == ["/bin/bash", "/preStop"] + + +def test_esMajorVersion_detect_default_version(): + config = "" + + r = helm_template(config) + assert r["statefulset"][uname]["metadata"]["annotations"]["esMajorVersion"] == "6" + + +def test_esMajorVersion_default_to_6_if_not_elastic_image(): + config = """ + image: notElastic + imageTag: 1.0.0 + """ + + r = helm_template(config) + assert r["statefulset"][uname]["metadata"]["annotations"]["esMajorVersion"] == "6" + + +def test_esMajorVersion_default_to_6_if_no_version_is_found(): + config = """ + imageTag: not_a_number + """ + + r = helm_template(config) + assert r["statefulset"][uname]["metadata"]["annotations"]["esMajorVersion"] == "6" + + +def test_esMajorVersion_set_to_6_based_on_image_tag(): + config = """ + imageTag: 6.8.1 + """ + + r = helm_template(config) + assert r["statefulset"][uname]["metadata"]["annotations"]["esMajorVersion"] == "6" + + +def test_esMajorVersion_parse_image_tag_for_oss_image(): + config = """ + image: docker.elastic.co/elasticsearch/elasticsearch-oss + imageTag: 6.3.2 + """ + + r = helm_template(config) + assert r["statefulset"][uname]["metadata"]["annotations"]["esMajorVersion"] == "6" + + +def test_set_pod_security_context(): + config = "" + r = helm_template(config) + assert ( + r["statefulset"][uname]["spec"]["template"]["spec"]["securityContext"][ + "fsGroup" + ] + == 1000 + ) + assert ( + r["statefulset"][uname]["spec"]["template"]["spec"]["securityContext"][ + "runAsUser" + ] + == 1000 + ) + + config = """ + podSecurityContext: + fsGroup: 1001 + other: test + """ + + r = helm_template(config) + + assert ( + r["statefulset"][uname]["spec"]["template"]["spec"]["securityContext"][ + "fsGroup" + ] + == 1001 + ) + assert ( + r["statefulset"][uname]["spec"]["template"]["spec"]["securityContext"]["other"] + == "test" + ) + + +def test_fsGroup_backwards_compatability(): + config = """ + fsGroup: 1001 + """ + + r = helm_template(config) + + assert ( + r["statefulset"][uname]["spec"]["template"]["spec"]["securityContext"][ + "fsGroup" + ] + == 1001 + ) + + +def test_set_container_security_context(): + config = "" + + r = helm_template(config) + c = r["statefulset"][uname]["spec"]["template"]["spec"]["containers"][0] + assert c["securityContext"]["capabilities"]["drop"] == ["ALL"] + assert c["securityContext"]["runAsNonRoot"] == True + assert c["securityContext"]["runAsUser"] == 1000 + + config = """ + securityContext: + runAsUser: 1001 + other: test + """ + + r = helm_template(config) + c = r["statefulset"][uname]["spec"]["template"]["spec"]["containers"][0] + assert c["securityContext"]["capabilities"]["drop"] == ["ALL"] + assert c["securityContext"]["runAsNonRoot"] == True + assert c["securityContext"]["runAsUser"] == 1001 + assert c["securityContext"]["other"] == "test" + + +def test_adding_pod_labels(): + config = """ +labels: + app.kubernetes.io/name: elasticsearch +""" + r = helm_template(config) + assert ( + r["statefulset"][uname]["metadata"]["labels"]["app.kubernetes.io/name"] + == "elasticsearch" + ) + assert ( + r["statefulset"][uname]["spec"]["template"]["metadata"]["labels"][ + "app.kubernetes.io/name" + ] + == "elasticsearch" + ) + + +def test_keystore_enable(): + config = "" + + r = helm_template(config) + s = r["statefulset"][uname]["spec"]["template"]["spec"] + + assert s["volumes"] == None + + config = """ +keystore: + - secretName: test + """ + + r = helm_template(config) + s = r["statefulset"][uname]["spec"]["template"]["spec"] + + assert {"name": "keystore", "emptyDir": {}} in s["volumes"] + + +def test_keystore_init_container(): + config = "" + + r = helm_template(config) + i = r["statefulset"][uname]["spec"]["template"]["spec"]["initContainers"][-1] + + assert i["name"] != "keystore" + + config = """ +keystore: + - secretName: test + """ + + r = helm_template(config) + i = r["statefulset"][uname]["spec"]["template"]["spec"]["initContainers"][-1] + + assert i["name"] == "keystore" + + +def test_keystore_init_container_image(): + config = """ +image: customImage +imageTag: 6.2.4 +imagePullPolicy: Never +keystore: + - secretName: test +""" + r = helm_template(config) + i = r["statefulset"][uname]["spec"]["template"]["spec"]["initContainers"][-1] + assert i["image"] == "customImage:6.2.4" + assert i["imagePullPolicy"] == "Never" + + +def test_keystore_mount(): + config = """ +keystore: + - secretName: test +""" + + r = helm_template(config) + s = r["statefulset"][uname]["spec"]["template"]["spec"] + assert s["containers"][0]["volumeMounts"][-1] == { + "mountPath": "/usr/share/elasticsearch/config/elasticsearch.keystore", + "subPath": "elasticsearch.keystore", + "name": "keystore", + } + + +def test_keystore_init_volume_mounts(): + config = """ +keystore: + - secretName: test + - secretName: test-with-custom-path + items: + - key: slack_url + path: xpack.notification.slack.account.otheraccount.secure_url +""" + r = helm_template(config) + s = r["statefulset"][uname]["spec"]["template"]["spec"] + assert s["initContainers"][-1]["volumeMounts"] == [ + {"mountPath": "/tmp/keystore", "name": "keystore"}, + {"mountPath": "/tmp/keystoreSecrets/test", "name": "keystore-test"}, + { + "mountPath": "/tmp/keystoreSecrets/test-with-custom-path", + "name": "keystore-test-with-custom-path", + }, + ] + + +def test_keystore_volumes(): + config = """ +keystore: + - secretName: test + - secretName: test-with-custom-path + items: + - key: slack_url + path: xpack.notification.slack.account.otheraccount.secure_url +""" + r = helm_template(config) + s = r["statefulset"][uname]["spec"]["template"]["spec"] + + assert {"name": "keystore-test", "secret": {"secretName": "test"}} in s["volumes"] + + assert { + "name": "keystore-test-with-custom-path", + "secret": { + "secretName": "test-with-custom-path", + "items": [ + { + "key": "slack_url", + "path": "xpack.notification.slack.account.otheraccount.secure_url", + } + ], + }, + } in s["volumes"] + + +def test_pod_security_policy(): + ## Make sure the default config is not creating any resources + config = "" + resources = ("role", "rolebinding", "serviceaccount", "podsecuritypolicy") + r = helm_template(config) + for resource in resources: + assert resource not in r + assert ( + "serviceAccountName" not in r["statefulset"][uname]["spec"]["template"]["spec"] + ) + + ## Make sure all the resources are created with default values + config = """ +rbac: + create: true + serviceAccountName: "" + +podSecurityPolicy: + create: true + name: "" +""" + r = helm_template(config) + for resource in resources: + assert resource in r + assert r["role"][uname]["rules"][0] == { + "apiGroups": ["extensions"], + "verbs": ["use"], + "resources": ["podsecuritypolicies"], + "resourceNames": [uname], + } + assert r["rolebinding"][uname]["subjects"] == [ + {"kind": "ServiceAccount", "namespace": "default", "name": uname} + ] + assert r["rolebinding"][uname]["roleRef"] == { + "apiGroup": "rbac.authorization.k8s.io", + "kind": "Role", + "name": uname, + } + assert ( + r["statefulset"][uname]["spec"]["template"]["spec"]["serviceAccountName"] + == uname + ) + psp_spec = r["podsecuritypolicy"][uname]["spec"] + assert psp_spec["privileged"] is True + + +def test_external_pod_security_policy(): + ## Make sure we can use an externally defined pod security policy + config = """ +rbac: + create: true + serviceAccountName: "" + +podSecurityPolicy: + create: false + name: "customPodSecurityPolicy" +""" + resources = ("role", "rolebinding") + r = helm_template(config) + for resource in resources: + assert resource in r + + assert r["role"][uname]["rules"][0] == { + "apiGroups": ["extensions"], + "verbs": ["use"], + "resources": ["podsecuritypolicies"], + "resourceNames": ["customPodSecurityPolicy"], + } + + +def test_external_service_account(): + ## Make sure we can use an externally defined service account + config = """ +rbac: + create: false + serviceAccountName: "customServiceAccountName" + +podSecurityPolicy: + create: false + name: "" +""" + resources = ("role", "rolebinding", "serviceaccount") + r = helm_template(config) + + assert ( + r["statefulset"][uname]["spec"]["template"]["spec"]["serviceAccountName"] + == "customServiceAccountName" + ) + # When referencing an external service account we do not want any resources to be created. + for resource in resources: + assert resource not in r + + +def test_name_override(): + ## Make sure we can use a name override + config = """ +nameOverride: "customName" +""" + r = helm_template(config) + + assert "customName-master" in r["statefulset"] + assert "customName-master" in r["service"] + + +def test_full_name_override(): + ## Make sure we can use a full name override + config = """ +fullnameOverride: "customfullName" +""" + r = helm_template(config) + + assert "customfullName" in r["statefulset"] + assert "customfullName" in r["service"] + + +def test_hostaliases(): + config = """ +hostAliases: +- ip: "127.0.0.1" + hostnames: + - "foo.local" + - "bar.local" +""" + r = helm_template(config) + hostAliases = r["statefulset"][uname]["spec"]["template"]["spec"]["hostAliases"] + assert {"ip": "127.0.0.1", "hostnames": ["foo.local", "bar.local"]} in hostAliases + + +def test_network_policy(): + config = """ +networkPolicy: + http: + enabled: true + explicitNamespacesSelector: + # Accept from namespaces with all those different rules (from whitelisted Pods) + matchLabels: + role: frontend-http + matchExpressions: + - {key: role, operator: In, values: [frontend-http]} + additionalRules: + - podSelector: + matchLabels: + role: frontend-http + - podSelector: + matchExpressions: + - key: role + operator: In + values: + - frontend-http + transport: + enabled: true + allowExternal: true + explicitNamespacesSelector: + matchLabels: + role: frontend-transport + matchExpressions: + - {key: role, operator: In, values: [frontend-transport]} + additionalRules: + - podSelector: + matchLabels: + role: frontend-transport + - podSelector: + matchExpressions: + - key: role + operator: In + values: + - frontend-transport + +""" + r = helm_template(config) + ingress = r["networkpolicy"][uname]["spec"]["ingress"] + pod_selector = r["networkpolicy"][uname]["spec"]["podSelector"] + http = ingress[0] + transport = ingress[1] + assert http["from"] == [ + { + "podSelector": { + "matchLabels": {"elasticsearch-master-http-client": "true"} + }, + "namespaceSelector": { + "matchExpressions": [ + {"key": "role", "operator": "In", "values": ["frontend-http"]} + ], + "matchLabels": {"role": "frontend-http"}, + }, + }, + {"podSelector": {"matchLabels": {"role": "frontend-http"}}}, + { + "podSelector": { + "matchExpressions": [ + {"key": "role", "operator": "In", "values": ["frontend-http"]} + ] + } + }, + ] + assert http["ports"][0]["port"] == 9200 + assert transport["from"] == [ + { + "podSelector": { + "matchLabels": {"elasticsearch-master-transport-client": "true"} + }, + "namespaceSelector": { + "matchExpressions": [ + {"key": "role", "operator": "In", "values": ["frontend-transport"]} + ], + "matchLabels": {"role": "frontend-transport"}, + }, + }, + {"podSelector": {"matchLabels": {"role": "frontend-transport"}}}, + { + "podSelector": { + "matchExpressions": [ + {"key": "role", "operator": "In", "values": ["frontend-transport"]} + ] + } + }, + {"podSelector": {"matchLabels": {"app": "elasticsearch-master"}}}, + ] + assert transport["ports"][0]["port"] == 9300 + assert pod_selector == {"matchLabels": {"app": "elasticsearch-master",}} + + +def test_default_automount_sa_token(): + config = """ +""" + r = helm_template(config) + assert ( + r["statefulset"][uname]["spec"]["template"]["spec"][ + "automountServiceAccountToken" + ] + == True + ) + + +def test_disable_automount_sa_token(): + config = """ +rbac: + automountToken: false +""" + r = helm_template(config) + assert ( + r["statefulset"][uname]["spec"]["template"]["spec"][ + "automountServiceAccountToken" + ] + == False + ) diff --git a/charts/elasticsearch/values.yaml b/charts/elasticsearch/values.yaml new file mode 100755 index 0000000..e1d314b --- /dev/null +++ b/charts/elasticsearch/values.yaml @@ -0,0 +1,347 @@ +--- +clusterName: "elasticsearch" +nodeGroup: "master" + +# The service that non master groups will try to connect to when joining the cluster +# This should be set to clusterName + "-" + nodeGroup for your master group +masterService: "" + +# Elasticsearch roles that will be applied to this nodeGroup +# These will be set as environment variables. E.g. node.master=true +roles: + master: "true" + ingest: "true" + data: "true" + +replicas: 3 +minimumMasterNodes: 2 + +esMajorVersion: "" + +# Allows you to add any config files in /usr/share/elasticsearch/config/ +# such as elasticsearch.yml and log4j2.properties +esConfig: {} +# elasticsearch.yml: | +# key: +# nestedkey: value +# log4j2.properties: | +# key = value + +# Extra environment variables to append to this nodeGroup +# This will be appended to the current 'env:' key. You can use any of the kubernetes env +# syntax here +extraEnvs: [] +# - name: MY_ENVIRONMENT_VAR +# value: the_value_goes_here + +# Allows you to load environment variables from kubernetes secret or config map +envFrom: [] +# - secretRef: +# name: env-secret +# - configMapRef: +# name: config-map + +# A list of secrets and their paths to mount inside the pod +# This is useful for mounting certificates for security and for mounting +# the X-Pack license +secretMounts: [] +# - name: elastic-certificates +# secretName: elastic-certificates +# path: /usr/share/elasticsearch/config/certs +# defaultMode: 0755 + +hostAliases: [] +# - ip: "127.0.0.1" +# hostnames: +# - "foo.local" +# - "bar.local" + +image: "docker.elastic.co/elasticsearch/elasticsearch" +imageTag: "6.8.22" +imagePullPolicy: "IfNotPresent" + +podAnnotations: {} + # iam.amazonaws.com/role: es-cluster + +# additionals labels +labels: {} + +esJavaOpts: "" # example: "-Xmx1g -Xms1g" + +resources: + requests: + cpu: "1000m" + memory: "2Gi" + limits: + cpu: "1000m" + memory: "2Gi" + +initResources: {} + # limits: + # cpu: "25m" + # # memory: "128Mi" + # requests: + # cpu: "25m" + # memory: "128Mi" + +sidecarResources: {} + # limits: + # cpu: "25m" + # # memory: "128Mi" + # requests: + # cpu: "25m" + # memory: "128Mi" + +networkHost: "0.0.0.0" + +volumeClaimTemplate: + accessModes: ["ReadWriteOnce"] + resources: + requests: + storage: 30Gi + +rbac: + create: false + serviceAccountAnnotations: {} + serviceAccountName: "" + automountToken: true + +podSecurityPolicy: + create: false + name: "" + spec: + privileged: true + fsGroup: + rule: RunAsAny + runAsUser: + rule: RunAsAny + seLinux: + rule: RunAsAny + supplementalGroups: + rule: RunAsAny + volumes: + - secret + - configMap + - persistentVolumeClaim + - emptyDir + +persistence: + enabled: true + labels: + # Add default labels for the volumeClaimTemplate of the StatefulSet + enabled: false + annotations: {} + +extraVolumes: [] + # - name: extras + # emptyDir: {} + +extraVolumeMounts: [] + # - name: extras + # mountPath: /usr/share/extras + # readOnly: true + +extraContainers: [] + # - name: do-something + # image: busybox + # command: ['do', 'something'] + +extraInitContainers: [] + # - name: do-something + # image: busybox + # command: ['do', 'something'] + +# This is the PriorityClass settings as defined in +# https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass +priorityClassName: "" + +# By default this will make sure two pods don't end up on the same node +# Changing this to a region would allow you to spread pods across regions +antiAffinityTopologyKey: "kubernetes.io/hostname" + +# Hard means that by default pods will only be scheduled if there are enough nodes for them +# and that they will never end up on the same node. Setting this to soft will do this "best effort" +antiAffinity: "hard" + +# This is the node affinity settings as defined in +# https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#node-affinity-beta-feature +nodeAffinity: {} + +# The default is to deploy all pods serially. By setting this to parallel all pods are started at +# the same time when bootstrapping the cluster +podManagementPolicy: "Parallel" + +# The environment variables injected by service links are not used, but can lead to slow Elasticsearch boot times when +# there are many services in the current namespace. +# If you experience slow pod startups you probably want to set this to `false`. +enableServiceLinks: true + +protocol: http +httpPort: 9200 +transportPort: 9300 + +service: + enabled: true + labels: {} + labelsHeadless: {} + type: ClusterIP + nodePort: "" + annotations: {} + httpPortName: http + transportPortName: transport + loadBalancerIP: "" + loadBalancerSourceRanges: [] + externalTrafficPolicy: "" + +updateStrategy: RollingUpdate + +# This is the max unavailable setting for the pod disruption budget +# The default value of 1 will make sure that kubernetes won't allow more than 1 +# of your pods to be unavailable during maintenance +maxUnavailable: 1 + +podSecurityContext: + fsGroup: 1000 + runAsUser: 1000 + +securityContext: + capabilities: + drop: + - ALL + # readOnlyRootFilesystem: true + runAsNonRoot: true + runAsUser: 1000 + +# How long to wait for elasticsearch to stop gracefully +terminationGracePeriod: 120 + +sysctlVmMaxMapCount: 262144 + +readinessProbe: + failureThreshold: 3 + initialDelaySeconds: 10 + periodSeconds: 10 + successThreshold: 3 + timeoutSeconds: 5 + +# https://www.elastic.co/guide/en/elasticsearch/reference/6.8/cluster-health.html#request-params wait_for_status +clusterHealthCheckParams: "wait_for_status=green&timeout=1s" + +## Use an alternate scheduler. +## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ +## +schedulerName: "" + +imagePullSecrets: [] +nodeSelector: {} +tolerations: [] + +# Enabling this will publicly expose your Elasticsearch instance. +# Only enable this if you have security enabled on your cluster +ingress: + enabled: false + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + hosts: + - host: chart-example.local + paths: + - path: / + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +nameOverride: "" +fullnameOverride: "" +healthNameOverride: "" + +# https://github.com/elastic/helm-charts/issues/63 +masterTerminationFix: false + +lifecycle: {} + # preStop: + # exec: + # command: ["/bin/sh", "-c", "echo Hello from the postStart handler > /usr/share/message"] + # postStart: + # exec: + # command: + # - bash + # - -c + # - | + # #!/bin/bash + # # Add a template to adjust number of shards/replicas + # TEMPLATE_NAME=my_template + # INDEX_PATTERN="logstash-*" + # SHARD_COUNT=8 + # REPLICA_COUNT=1 + # ES_URL=http://localhost:9200 + # while [[ "$(curl -s -o /dev/null -w '%{http_code}\n' $ES_URL)" != "200" ]]; do sleep 1; done + # curl -XPUT "$ES_URL/_template/$TEMPLATE_NAME" -H 'Content-Type: application/json' -d'{"index_patterns":['\""$INDEX_PATTERN"\"'],"settings":{"number_of_shards":'$SHARD_COUNT',"number_of_replicas":'$REPLICA_COUNT'}}' + +sysctlInitContainer: + enabled: true + +keystore: [] + +networkPolicy: + ## Enable creation of NetworkPolicy resources. Only Ingress traffic is filtered for now. + ## In order for a Pod to access Elasticsearch, it needs to have the following label: + ## {{ template "uname" . }}-client: "true" + ## Example for default configuration to access HTTP port: + ## elasticsearch-master-http-client: "true" + ## Example for default configuration to access transport port: + ## elasticsearch-master-transport-client: "true" + + http: + enabled: false + ## if explicitNamespacesSelector is not set or set to {}, only client Pods being in the networkPolicy's namespace + ## and matching all criteria can reach the DB. + ## But sometimes, we want the Pods to be accessible to clients from other namespaces, in this case, we can use this + ## parameter to select these namespaces + ## + # explicitNamespacesSelector: + # # Accept from namespaces with all those different rules (only from whitelisted Pods) + # matchLabels: + # role: frontend + # matchExpressions: + # - {key: role, operator: In, values: [frontend]} + + ## Additional NetworkPolicy Ingress "from" rules to set. Note that all rules are OR-ed. + ## + # additionalRules: + # - podSelector: + # matchLabels: + # role: frontend + # - podSelector: + # matchExpressions: + # - key: role + # operator: In + # values: + # - frontend + + transport: + ## Note that all Elasticsearch Pods can talk to themselves using transport port even if enabled. + enabled: false + # explicitNamespacesSelector: + # matchLabels: + # role: frontend + # matchExpressions: + # - {key: role, operator: In, values: [frontend]} + # additionalRules: + # - podSelector: + # matchLabels: + # role: frontend + # - podSelector: + # matchExpressions: + # - key: role + # operator: In + # values: + # - frontend + +tests: + enabled: true + +# Deprecated +# please use the above podSecurityContext.fsGroup instead +fsGroup: ""