From cf89b43c822d3124dce7904610b1cbe5f882ca71 Mon Sep 17 00:00:00 2001 From: "kavi_elrey@1993" <25226238+kavi-egov@users.noreply.github.com> Date: Sun, 6 Jul 2025 19:39:15 +0530 Subject: [PATCH 01/10] Delete config-as-code/helm/charts/backbone-services/elasticsearch-data directory --- .../elasticsearch-data/Chart.yaml | 5 - .../elasticsearch-data/templates/_helpers.tpl | 101 ---- .../templates/headless-service.yaml | 27 -- .../templates/poddisruptionbudget.yaml | 12 - .../templates/secret-cert.yaml | 15 - .../elasticsearch-data/templates/secret.yaml | 21 - .../elasticsearch-data/templates/service.yaml | 30 -- .../templates/statefulset.yaml | 455 ------------------ .../elasticsearch-data/values.yaml | 315 ------------ 9 files changed, 981 deletions(-) delete mode 100644 config-as-code/helm/charts/backbone-services/elasticsearch-data/Chart.yaml delete mode 100644 config-as-code/helm/charts/backbone-services/elasticsearch-data/templates/_helpers.tpl delete mode 100644 config-as-code/helm/charts/backbone-services/elasticsearch-data/templates/headless-service.yaml delete mode 100644 config-as-code/helm/charts/backbone-services/elasticsearch-data/templates/poddisruptionbudget.yaml delete mode 100644 config-as-code/helm/charts/backbone-services/elasticsearch-data/templates/secret-cert.yaml delete mode 100644 config-as-code/helm/charts/backbone-services/elasticsearch-data/templates/secret.yaml delete mode 100644 config-as-code/helm/charts/backbone-services/elasticsearch-data/templates/service.yaml delete mode 100644 config-as-code/helm/charts/backbone-services/elasticsearch-data/templates/statefulset.yaml delete mode 100644 config-as-code/helm/charts/backbone-services/elasticsearch-data/values.yaml diff --git a/config-as-code/helm/charts/backbone-services/elasticsearch-data/Chart.yaml b/config-as-code/helm/charts/backbone-services/elasticsearch-data/Chart.yaml deleted file mode 100644 index 2637562ec..000000000 --- a/config-as-code/helm/charts/backbone-services/elasticsearch-data/Chart.yaml +++ /dev/null @@ -1,5 +0,0 @@ -apiVersion: v1 -appVersion: "1.0" -description: A Helm chart for Confluent Kafka on Kubernetes -name: elasticsearch-data -version: 0.1.0 \ No newline at end of file diff --git a/config-as-code/helm/charts/backbone-services/elasticsearch-data/templates/_helpers.tpl b/config-as-code/helm/charts/backbone-services/elasticsearch-data/templates/_helpers.tpl deleted file mode 100644 index a107c5399..000000000 --- a/config-as-code/helm/charts/backbone-services/elasticsearch-data/templates/_helpers.tpl +++ /dev/null @@ -1,101 +0,0 @@ -{{/* vim: set filetype=mustache: */}} -{{- define "name" -}} -{{- $envOverrides := index .Values (tpl (default .Chart.Name .Values.name) .) -}} -{{- $baseValues := .Values | deepCopy -}} -{{- $values := dict "Values" (mustMergeOverwrite $baseValues $envOverrides) -}} -{{- with mustMergeOverwrite . $values -}} -{{- default .Chart.Name .Values.name -}} -{{- end }} -{{- end }} - -{{- define "elasticsearch.roles" -}} -{{- range $.Values.roles -}} -{{ . }}, -{{- end -}} -{{- end -}} - -{{/* -Generate certificates when the secret doesn't exist -*/}} -{{- define "elasticsearch.gen-certs" -}} -{{- $certs := lookup "v1" "Secret" "es-cluster-v8" ( printf "%s-certs" (include "name" . ) ) -}} -{{- if $certs -}} -tls.crt: {{ index $certs.data "tls.crt" }} -tls.key: {{ index $certs.data "tls.key" }} -ca.crt: {{ index $certs.data "ca.crt" }} -{{- else -}} -{{- $altNames := list ( include "elasticsearch.masterService" . ) ( printf "%s.es-cluster-v8" (include "elasticsearch.masterService" .) ) ( printf "%s.es-cluster-v8.svc" (include "elasticsearch.masterService" .) ) -}} -{{- $ca := genCA "elasticsearch-ca" 365 -}} -{{- $cert := genSignedCert ( include "elasticsearch.masterService" . ) nil $altNames 365 $ca -}} -tls.crt: {{ $cert.Cert | toString | b64enc }} -tls.key: {{ $cert.Key | toString | b64enc }} -ca.crt: {{ $ca.Cert | toString | b64enc }} -{{- end -}} -{{- end -}} - -{{- define "elasticsearch.masterService" -}} -{{- if empty .Values.masterService -}} -{{- if empty .Values.fullnameOverride -}} -{{- if empty .Values.nameOverride -}} -{{ .Values.clusterName }}-master -{{- else -}} -{{ .Values.nameOverride }}-master -{{- end -}} -{{- else -}} -{{ .Values.fullnameOverride }} -{{- end -}} -{{- else -}} -{{ .Values.masterService }} -{{- end -}} -{{- end -}} - -{{- define "elasticsearch.endpoints" -}} -{{- $replicas := int (toString (.Values.replicas)) }} -{{- $uname := printf "%s-%s" .Values.clusterName .Values.nodeGroup }} - {{- range $i, $e := untilStep 0 $replicas 1 -}} -{{ $uname }}-{{ $i }}, - {{- end -}} -{{- end -}} - -{{- define "elasticsearch.esMajorVersion" -}} -{{- if .Values.esMajorVersion -}} -{{ .Values.esMajorVersion }} -{{- else -}} -{{- $version := int (index (.Values.image.tag | splitList ".") 0) -}} - {{- if and (contains "docker.elastic.co/elasticsearch/elasticsearch" .Values.image.repository) (not (eq $version 0)) -}} -{{ $version }} - {{- else -}} -8 - {{- end -}} -{{- end -}} -{{- end -}} - -{{/* -Return the appropriate apiVersion for statefulset. -*/}} -{{- define "elasticsearch.statefulset.apiVersion" -}} -{{- if semverCompare "<1.9-0" .Capabilities.KubeVersion.GitVersion -}} -{{- print "apps/v1beta2" -}} -{{- else -}} -{{- print "apps/v1" -}} -{{- end -}} -{{- end -}} - -{{/* -Return the appropriate apiVersion for ingress. -*/}} -{{- define "elasticsearch.ingress.apiVersion" -}} -{{- if semverCompare "<1.14-0" .Capabilities.KubeVersion.GitVersion -}} -{{- print "extensions/v1beta1" -}} -{{- else -}} -{{- print "networking.k8s.io/v1beta1" -}} -{{- end -}} -{{- end -}} - -{{- define "common.image" -}} -{{- if contains "/" .repository -}} -{{- printf "%s:%s" .repository ( required "Tag is mandatory" .tag ) -}} -{{- else -}} -{{- printf "%s/%s:%s" $.Values.global.containerRegistry .repository ( required "Tag is mandatory" .tag ) -}} -{{- end -}} -{{- end -}} \ No newline at end of file diff --git a/config-as-code/helm/charts/backbone-services/elasticsearch-data/templates/headless-service.yaml b/config-as-code/helm/charts/backbone-services/elasticsearch-data/templates/headless-service.yaml deleted file mode 100644 index 857fe8794..000000000 --- a/config-as-code/helm/charts/backbone-services/elasticsearch-data/templates/headless-service.yaml +++ /dev/null @@ -1,27 +0,0 @@ -kind: Service -apiVersion: v1 -metadata: -{{- if eq .Values.nodeGroup "master" }} - name: {{ template "name" . }}-headless -{{- else }} - name: {{ template "name" . }}-headless -{{- end }} - namespace: {{ .Values.namespace }} - labels: - app: "{{ template "name" . }}" -{{- if .Values.service.labelsHeadless }} -{{ toYaml .Values.service.labelsHeadless | indent 4 }} -{{- end }} - annotations: - service.alpha.kubernetes.io/tolerate-unready-endpoints: "true" -spec: - clusterIP: None # This is needed for statefulset hostnames like elasticsearch-0 to resolve - # Create endpoints also if the related pod isn't ready - publishNotReadyAddresses: true - selector: - app: "{{ template "name" . }}" - ports: - - name: {{ .Values.service.httpPortName | default "http" }} - port: {{ .Values.httpPort }} - - name: {{ .Values.service.transportPortName | default "transport" }} - port: {{ .Values.transportPort }} \ No newline at end of file diff --git a/config-as-code/helm/charts/backbone-services/elasticsearch-data/templates/poddisruptionbudget.yaml b/config-as-code/helm/charts/backbone-services/elasticsearch-data/templates/poddisruptionbudget.yaml deleted file mode 100644 index 6582bcfce..000000000 --- a/config-as-code/helm/charts/backbone-services/elasticsearch-data/templates/poddisruptionbudget.yaml +++ /dev/null @@ -1,12 +0,0 @@ -{{- if .Values.maxUnavailable }} -apiVersion: policy/v1 -kind: PodDisruptionBudget -metadata: - name: "{{ template "name" . }}-pdb" - namespace: {{ .Values.namespace }} -spec: - maxUnavailable: {{ .Values.maxUnavailable }} - selector: - matchLabels: - app: "{{ template "name" . }}" -{{- end }} \ No newline at end of file diff --git a/config-as-code/helm/charts/backbone-services/elasticsearch-data/templates/secret-cert.yaml b/config-as-code/helm/charts/backbone-services/elasticsearch-data/templates/secret-cert.yaml deleted file mode 100644 index 9109a71ec..000000000 --- a/config-as-code/helm/charts/backbone-services/elasticsearch-data/templates/secret-cert.yaml +++ /dev/null @@ -1,15 +0,0 @@ -{{- if .Values.createCert }} -apiVersion: v1 -kind: Secret -type: kubernetes.io/tls -metadata: - name: {{ template "name" . }}-certs - namespace: {{ .Values.namespace }} - labels: - app: "{{ template "name" . }}" - {{- range $key, $value := .Values.labels }} - {{ $key }}: {{ $value | quote }} - {{- end }} -data: -{{ ( include "elasticsearch.gen-certs" . ) | indent 2 }} -{{- end }} diff --git a/config-as-code/helm/charts/backbone-services/elasticsearch-data/templates/secret.yaml b/config-as-code/helm/charts/backbone-services/elasticsearch-data/templates/secret.yaml deleted file mode 100644 index 9285ef654..000000000 --- a/config-as-code/helm/charts/backbone-services/elasticsearch-data/templates/secret.yaml +++ /dev/null @@ -1,21 +0,0 @@ -{{- if .Values.secret.enabled -}} -{{- $passwordValue := (randAlphaNum 24) | b64enc | quote }} -apiVersion: v1 -kind: Secret -metadata: - name: {{ template "name" . }}-credentials - namespace: {{ .Values.namespace }} - labels: - app: "{{ template "name" . }}" - {{- range $key, $value := .Values.labels }} - {{ $key }}: {{ $value | quote }} - {{- end }} -type: Opaque -data: - username: {{ "elastic" | b64enc }} - {{- if .Values.secret.password }} - password: {{ .Values.secret.password | b64enc }} - {{- else }} - password: {{ $passwordValue }} - {{- end }} -{{- end }} diff --git a/config-as-code/helm/charts/backbone-services/elasticsearch-data/templates/service.yaml b/config-as-code/helm/charts/backbone-services/elasticsearch-data/templates/service.yaml deleted file mode 100644 index 3a2c57c75..000000000 --- a/config-as-code/helm/charts/backbone-services/elasticsearch-data/templates/service.yaml +++ /dev/null @@ -1,30 +0,0 @@ -kind: Service -apiVersion: v1 -metadata: -{{- if eq .Values.nodeGroup "master" }} - name: {{ template "name" . }} -{{- else }} - name: {{ template "name" . }} -{{- end }} - namespace: {{ .Values.namespace }} - labels: - app: "{{ template "name" . }}" -{{- if .Values.service.labels }} -{{ toYaml .Values.service.labels | indent 4}} -{{- end }} - annotations: -{{ toYaml .Values.service.annotations | indent 4 }} -spec: - type: {{ .Values.service.type }} - selector: - app: "{{ template "name" . }}" - ports: - - name: {{ .Values.service.httpPortName | default "http" }} - protocol: TCP - port: {{ .Values.httpPort }} -{{- if .Values.service.nodePort }} - nodePort: {{ .Values.service.nodePort }} -{{- end }} - - name: {{ .Values.service.transportPortName | default "transport" }} - protocol: TCP - port: {{ .Values.transportPort }} \ No newline at end of file diff --git a/config-as-code/helm/charts/backbone-services/elasticsearch-data/templates/statefulset.yaml b/config-as-code/helm/charts/backbone-services/elasticsearch-data/templates/statefulset.yaml deleted file mode 100644 index 013d25943..000000000 --- a/config-as-code/helm/charts/backbone-services/elasticsearch-data/templates/statefulset.yaml +++ /dev/null @@ -1,455 +0,0 @@ ---- -apiVersion: {{ template "elasticsearch.statefulset.apiVersion" . }} -kind: StatefulSet -metadata: - name: {{ template "name" . }} - namespace: {{ .Values.namespace }} - labels: - app: "{{ template "name" . }}" - {{- range $key, $value := .Values.labels }} - {{ $key }}: {{ $value | quote }} - {{- end }} - annotations: - esMajorVersion: "{{ include "elasticsearch.esMajorVersion" . }}" -spec: - serviceName: {{ template "name" . }}-headless - selector: - matchLabels: - app: "{{ template "name" . }}" - replicas: {{ .Values.replicas }} - podManagementPolicy: {{ .Values.podManagementPolicy }} - updateStrategy: - type: {{ .Values.updateStrategy }} - {{- if .Values.persistence.enabled }} - volumeClaimTemplates: - - metadata: - name: es-storage - {{- with .Values.persistence.annotations }} - annotations: -{{ toYaml . | indent 8 }} - {{- end }} - spec: -{{ toYaml .Values.volumeClaimTemplate | indent 6 }} - {{- end }} - template: - metadata: - name: "{{ template "name" . }}" - labels: - app: "{{ template "name" . }}" - {{- range $key, $value := .Values.labels }} - {{ $key }}: {{ $value | quote }} - {{- end }} - annotations: - {{- range $key, $value := .Values.podAnnotations }} - {{ $key }}: {{ $value | quote }} - {{- end }} - {{/* This forces a restart if the configmap has changed */}} - {{- if .Values.esConfig }} - configchecksum: {{ include (print .Template.BasePath "/configmap.yaml") . | sha256sum | trunc 63 }} - {{- end }} - spec: - {{- if .Values.schedulerName }} - schedulerName: "{{ .Values.schedulerName }}" - {{- end }} - securityContext: -{{ toYaml .Values.podSecurityContext | indent 8 }} - {{- if .Values.fsGroup }} - fsGroup: {{ .Values.fsGroup }} # Deprecated value, please use .Values.podSecurityContext.fsGroup - {{- end }} - {{- if .Values.rbac.create }} - serviceAccountName: "{{ template "name" . }}" - {{- else if not (eq .Values.rbac.serviceAccountName "") }} - serviceAccountName: {{ .Values.rbac.serviceAccountName | quote }} - {{- end }} - {{- with .Values.tolerations }} - tolerations: -{{ toYaml . | indent 6 }} - {{- end }} - {{- with .Values.nodeSelector }} - nodeSelector: -{{ toYaml . | indent 8 }} - {{- end }} - {{- if or (eq .Values.antiAffinity "hard") (eq .Values.antiAffinity "soft") .Values.nodeAffinity }} - {{- if .Values.priorityClassName }} - priorityClassName: {{ .Values.priorityClassName }} - {{- end }} - affinity: - {{- end }} - {{- if eq .Values.antiAffinity "hard" }} - podAntiAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - - labelSelector: - matchExpressions: - - key: app - operator: In - values: - - "{{ template "name" .}}" - topologyKey: {{ .Values.antiAffinityTopologyKey }} - {{- else if eq .Values.antiAffinity "soft" }} - podAntiAffinity: - preferredDuringSchedulingIgnoredDuringExecution: - - weight: 1 - podAffinityTerm: - topologyKey: {{ .Values.antiAffinityTopologyKey }} - labelSelector: - matchExpressions: - - key: app - operator: In - values: - - "{{ template "name" . }}" - {{- end }} - {{- with .Values.nodeAffinity }} - nodeAffinity: -{{ toYaml . | indent 10 }} - {{- end }} - terminationGracePeriodSeconds: {{ .Values.terminationGracePeriod }} - volumes: - {{- range .Values.secretMounts }} - - name: {{ .name }} - secret: - secretName: {{ .secretName }} - {{- end }} - {{- if .Values.esConfig }} - - name: esconfig - configMap: - name: {{ template "name" . }}-config - {{- end }} - {{- if .Values.createCert }} - - name: elasticsearch-certs - secret: - secretName: {{ template "name" . }}-certs - {{- end }} -{{- if .Values.keystore }} - - name: keystore - emptyDir: {} - {{- range .Values.keystore }} - - name: keystore-{{ .secretName }} - secret: {{ toYaml . | nindent 12 }} - {{- end }} -{{ end }} - {{- if .Values.extraVolumes }} - # Currently some extra blocks accept strings - # to continue with backwards compatibility this is being kept - # whilst also allowing for yaml to be specified too. - {{- if eq "string" (printf "%T" .Values.extraVolumes) }} -{{ tpl .Values.extraVolumes . | indent 8 }} - {{- else }} -{{ toYaml .Values.extraVolumes | indent 8 }} - {{- end }} - {{- end }} - {{- if .Values.imagePullSecrets }} - imagePullSecrets: -{{ toYaml .Values.imagePullSecrets | indent 8 }} - {{- end }} - initContainers: - {{- if .Values.sysctlInitContainer.enabled }} - - name: configure-sysctl - securityContext: - runAsUser: 0 - privileged: true - image: {{ template "common.image" (dict "Values" .Values "repository" .Values.image.repository "tag" .Values.image.tag) }} - imagePullPolicy: "{{ .Values.image.pullPolicy }}" - command: ["sysctl", "-w", "vm.max_map_count={{ .Values.sysctlVmMaxMapCount}}"] - resources: -{{ toYaml .Values.initResources | indent 10 }} - {{- end }} -{{ if .Values.keystore }} - - name: keystore - securityContext: -{{ toYaml .Values.securityContext | indent 10 }} - image: {{ template "common.image" (dict "Values" .Values "repository" .Values.image.repository "tag" .Values.image.tag) }} - imagePullPolicy: "{{ .Values.image.pullPolicy }}" - command: - - sh - - -c - - | - #!/usr/bin/env bash - set -euo pipefail - - elasticsearch-keystore create - - for i in /tmp/keystoreSecrets/*/*; do - key=$(basename $i) - echo "Adding file $i to keystore key $key" - elasticsearch-keystore add-file "$key" "$i" - done - - # Add the bootstrap password since otherwise the Elasticsearch entrypoint tries to do this on startup - if [ ! -z ${ELASTIC_PASSWORD+x} ]; then - echo 'Adding env $ELASTIC_PASSWORD to keystore as key bootstrap.password' - echo "$ELASTIC_PASSWORD" | elasticsearch-keystore add -x bootstrap.password - fi - - cp -a /usr/share/elasticsearch/config/elasticsearch.keystore /tmp/keystore/ - env: {{ toYaml .Values.extraEnvs | nindent 10 }} - envFrom: {{ toYaml .Values.envFrom | nindent 10 }} - resources: {{ toYaml .Values.initResources | nindent 10 }} - volumeMounts: - - name: keystore - mountPath: /tmp/keystore - {{- range .Values.keystore }} - - name: keystore-{{ .secretName }} - mountPath: /tmp/keystoreSecrets/{{ .secretName }} - {{- end }} -{{ end }} - {{- if .Values.extraInitContainers }} - # Currently some extra blocks accept strings - # to continue with backwards compatibility this is being kept - # whilst also allowing for yaml to be specified too. - {{- if eq "string" (printf "%T" .Values.extraInitContainers) }} -{{ tpl .Values.extraInitContainers . | indent 6 }} - {{- else }} -{{ toYaml .Values.extraInitContainers | indent 6 }} - {{- end }} - {{- end }} - containers: - - name: "elasticsearch" - securityContext: -{{ toYaml .Values.securityContext | indent 10 }} - image: {{ template "common.image" (dict "Values" .Values "repository" .Values.image.repository "tag" .Values.image.tag) }} - imagePullPolicy: "{{ .Values.image.pullPolicy }}" - readinessProbe: - exec: - command: - - bash - - -c - - | - set -e - - # Exit if ELASTIC_PASSWORD in unset - if [ -z "${ELASTIC_PASSWORD}" ]; then - echo "ELASTIC_PASSWORD variable is missing, exiting" - exit 1 - fi - - # If the node is starting up wait for the cluster to be ready (request params: "{{ .Values.clusterHealthCheckParams }}" ) - # Once it has started only check that the node itself is responding - START_FILE=/tmp/.es_start_file - - # Disable nss cache to avoid filling dentry cache when calling curl - # This is required with Elasticsearch Docker using nss < 3.52 - export NSS_SDB_USE_CACHE=no - - http () { - local path="${1}" - local args="${2}" - set -- -XGET -s - - if [ "$args" != "" ]; then - set -- "$@" $args - fi - - set -- "$@" -u "elastic:${ELASTIC_PASSWORD}" - - curl --output /dev/null -k "$@" "{{ .Values.protocol }}://127.0.0.1:{{ .Values.httpPort }}${path}" - } - - if [ -f "${START_FILE}" ]; then - echo 'Elasticsearch is already running, lets check the node is healthy' - HTTP_CODE=$(http "/" "-w %{http_code}") - RC=$? - if [[ ${RC} -ne 0 ]]; then - echo "curl --output /dev/null -k -XGET -s -w '%{http_code}' \${BASIC_AUTH} {{ .Values.protocol }}://127.0.0.1:{{ .Values.httpPort }}/ failed with RC ${RC}" - exit ${RC} - fi - # ready if HTTP code 200, 503 is tolerable if ES version is 6.x - if [[ ${HTTP_CODE} == "200" ]]; then - exit 0 - elif [[ ${HTTP_CODE} == "503" && "{{ include "elasticsearch.esMajorVersion" . }}" == "6" ]]; then - exit 0 - else - echo "curl --output /dev/null -k -XGET -s -w '%{http_code}' \${BASIC_AUTH} {{ .Values.protocol }}://127.0.0.1:{{ .Values.httpPort }}/ failed with HTTP code ${HTTP_CODE}" - exit 1 - fi - - else - echo 'Waiting for elasticsearch cluster to become ready (request params: "{{ .Values.clusterHealthCheckParams }}" )' - if http "/_cluster/health?{{ .Values.clusterHealthCheckParams }}" "--fail" ; then - touch ${START_FILE} - exit 0 - else - echo 'Cluster is not yet ready (request params: "{{ .Values.clusterHealthCheckParams }}" )' - exit 1 - fi - fi -{{ toYaml .Values.readinessProbe | indent 10 }} - ports: - - name: http - containerPort: {{ .Values.httpPort }} - - name: transport - containerPort: {{ .Values.transportPort }} - resources: -{{ toYaml .Values.resources | indent 10 }} - env: - - name: node.name - valueFrom: - fieldRef: - fieldPath: metadata.name - {{- if has "master" .Values.roles }} - - name: cluster.initial_master_nodes - value: "{{ template "elasticsearch.endpoints" . }}" - {{- end }} - {{- if gt (len (include "elasticsearch.roles" .)) 0 }} - - name: node.roles - value: "{{ template "elasticsearch.roles" . }}" - {{- end }} - {{- if lt (int (include "elasticsearch.esMajorVersion" .)) 7 }} - - name: discovery.zen.ping.unicast.hosts - value: "{{ template "elasticsearch.masterService" . }}-headless" - {{- else }} - - name: discovery.seed_hosts - value: "{{ template "elasticsearch.masterService" . }}-headless" - {{- end }} - - name: cluster.name - value: {{ .Values.clusterName | quote }} - - name: network.host - value: {{ .Values.networkHost | quote }} - {{- if .Values.secret.enabled }} - - name: ELASTIC_PASSWORD - valueFrom: - secretKeyRef: - name: {{ template "name" . }}-credentials - key: password - {{- end }} - - name: ES_JAVA_OPTS - value: {{ .Values.esJavaOpts | quote }} - {{- if .Values.createCert }} - - name: xpack.security.enabled - value: "true" - - name: xpack.security.transport.ssl.enabled - value: "true" - - name: xpack.security.enrollment.enabled - value: "true" - - name: xpack.security.http.ssl.enabled - value: "true" - - name: xpack.security.transport.ssl.verification_mode - value: "certificate" - - name: xpack.security.transport.ssl.key - value: "/usr/share/elasticsearch/config/certs/tls.key" - - name: xpack.security.transport.ssl.certificate - value: "/usr/share/elasticsearch/config/certs/tls.crt" - - name: xpack.security.transport.ssl.certificate_authorities - value: "/usr/share/elasticsearch/config/certs/ca.crt" - - name: xpack.security.http.ssl.key - value: "/usr/share/elasticsearch/config/certs/tls.key" - - name: xpack.security.http.ssl.certificate - value: "/usr/share/elasticsearch/config/certs/tls.crt" - - name: xpack.security.http.ssl.certificate_authorities - value: "/usr/share/elasticsearch/config/certs/ca.crt" - {{- end }} -{{- if .Values.extraEnvs }} -{{ toYaml .Values.extraEnvs | indent 10 }} -{{- end }} -{{- if .Values.envFrom }} - envFrom: -{{ toYaml .Values.envFrom | indent 10 }} -{{- end }} - volumeMounts: - {{- if .Values.persistence.enabled }} - - name: "es-storage" - mountPath: /usr/share/elasticsearch/data - {{- end }} - {{- if .Values.createCert }} - - name: elasticsearch-certs - mountPath: /usr/share/elasticsearch/config/certs - readOnly: true - {{- end }} -{{ if .Values.keystore }} - - name: keystore - mountPath: /usr/share/elasticsearch/config/elasticsearch.keystore - subPath: elasticsearch.keystore -{{ end }} - {{- range .Values.secretMounts }} - - name: {{ .name }} - mountPath: {{ .path }} - {{- if .subPath }} - subPath: {{ .subPath }} - {{- end }} - {{- end }} - {{- range $path, $config := .Values.esConfig }} - - name: esconfig - mountPath: /usr/share/elasticsearch/config/{{ $path }} - subPath: {{ $path }} - {{- end -}} - {{- if .Values.extraVolumeMounts }} - # Currently some extra blocks accept strings - # to continue with backwards compatibility this is being kept - # whilst also allowing for yaml to be specified too. - {{- if eq "string" (printf "%T" .Values.extraVolumeMounts) }} -{{ tpl .Values.extraVolumeMounts . | indent 10 }} - {{- else }} -{{ toYaml .Values.extraVolumeMounts | indent 10 }} - {{- end }} - {{- end }} - {{- if .Values.masterTerminationFix }} - {{- if has "master" .Values.roles }} - # This sidecar will prevent slow master re-election - # https://github.com/elastic/helm-charts/issues/63 - - name: elasticsearch-master-graceful-termination-handler - image: {{ template "common.image" (dict "Values" .Values "repository" .Values.image.repository "tag" .Values.image.tag) }} - imagePullPolicy: "{{ .Values.image.pullPolicy }}" - command: - - "sh" - - -c - - | - #!/usr/bin/env bash - set -eo pipefail - - http () { - local path="${1}" - if [ -n "${ELASTIC_USERNAME}" ] && [ -n "${ELASTIC_PASSWORD}" ]; then - BASIC_AUTH="-u ${ELASTIC_USERNAME}:${ELASTIC_PASSWORD}" - else - BASIC_AUTH='' - fi - curl -XGET -s -k --fail ${BASIC_AUTH} {{ .Values.protocol }}://{{ .Values.masterService }}:{{ .Values.httpPort }}${path} - } - - cleanup () { - while true ; do - local master="$(http "/_cat/master?h=node" || echo "")" - if [[ $master == "{{ .Values.masterService }}"* && $master != "${NODE_NAME}" ]]; then - echo "This node is not master." - break - fi - echo "This node is still master, waiting gracefully for it to step down" - sleep 1 - done - - exit 0 - } - - trap cleanup SIGTERM - - sleep infinity & - wait $! - resources: -{{ toYaml .Values.sidecarResources | indent 10 }} - env: - - name: NODE_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - {{- if .Values.extraEnvs }} -{{ toYaml .Values.extraEnvs | indent 10 }} - {{- end }} - {{- if .Values.envFrom }} - envFrom: -{{ toYaml .Values.envFrom | indent 10 }} - {{- end }} - {{- end }} - {{- end }} -{{- if .Values.lifecycle }} - lifecycle: -{{ toYaml .Values.lifecycle | indent 10 }} -{{- end }} - {{- if .Values.extraContainers }} - # Currently some extra blocks accept strings - # to continue with backwards compatibility this is being kept - # whilst also allowing for yaml to be specified too. - {{- if eq "string" (printf "%T" .Values.extraContainers) }} -{{ tpl .Values.extraContainers . | indent 6 }} - {{- else }} -{{ toYaml .Values.extraContainers | indent 6 }} - {{- end }} - {{- end }} \ No newline at end of file diff --git a/config-as-code/helm/charts/backbone-services/elasticsearch-data/values.yaml b/config-as-code/helm/charts/backbone-services/elasticsearch-data/values.yaml deleted file mode 100644 index 421d95ee4..000000000 --- a/config-as-code/helm/charts/backbone-services/elasticsearch-data/values.yaml +++ /dev/null @@ -1,315 +0,0 @@ - -name: elasticsearch-data -replicas: 3 - -image: - pullPolicy: IfNotPresent - repository: docker.elastic.co/elasticsearch/elasticsearch - tag: 8.11.3 - -clusterName: "elasticsearch" -nodeGroup: "data" -namespace: es-cluster - -# The service that non master groups will try to connect to when joining the cluster -# This should be set to clusterName + "-" + nodeGroup for your master group -masterService: "elasticsearch-master" - -# Elasticsearch roles that will be applied to this nodeGroup -# These will be set as environment variables. E.g. node.master=true -roles: - - ingest - - data - -minimumMasterNodes: 2 - -esMajorVersion: "" - -# Allows you to add any config files in /usr/share/elasticsearch/config/ -# such as elasticsearch.yml and log4j2.properties -esConfig: {} -# elasticsearch.yml: | -# key: -# nestedkey: value -# log4j2.properties: | -# key = value - -# Extra environment variables to append to this nodeGroup -# This will be appended to the current 'env:' key. You can use any of the kubernetes env -# syntax here -extraEnvs: -# - name: MY_ENVIRONMENT_VAR -# value: the_value_goes_here - - name: path.data - value: "/usr/share/elasticsearch/data" - - name: path.logs - value: "/usr/share/elasticsearch/logs" -# - name: gateway.expected_master_nodes -# value: "2" - - name: gateway.expected_data_nodes - value: "1" - - name: gateway.recover_after_time - value: "5m" -# - name: gateway.recover_after_master_nodes -# value: "2" - - name: gateway.recover_after_data_nodes - value: "1" - - name: ELASTIC_PASSWORD - valueFrom: - secretKeyRef: - name: elasticsearch-master-credentials - key: password - - name: xpack.security.enabled - value: "true" - - name: xpack.security.transport.ssl.enabled - value: "true" - - name: xpack.security.http.ssl.enabled - value: "true" - - name: xpack.security.transport.ssl.verification_mode - value: "certificate" - - name: xpack.security.transport.ssl.key - value: "/usr/share/elasticsearch/config/certs/tls.key" - - name: xpack.security.transport.ssl.certificate - value: "/usr/share/elasticsearch/config/certs/tls.crt" - - name: xpack.security.transport.ssl.certificate_authorities - value: "/usr/share/elasticsearch/config/certs/ca.crt" - - name: xpack.security.http.ssl.key - value: "/usr/share/elasticsearch/config/certs/tls.key" - - name: xpack.security.http.ssl.certificate - value: "/usr/share/elasticsearch/config/certs/tls.crt" - - name: xpack.security.http.ssl.certificate_authorities - value: "/usr/share/elasticsearch/config/certs/ca.crt" - -createCert: false - -# Allows you to load environment variables from kubernetes secret or config map -envFrom: [] -# - secretRef: -# name: env-secret -# - configMapRef: -# name: config-map - -# A list of secrets and their paths to mount inside the pod -# This is useful for mounting certificates for security and for mounting -# the X-Pack license -secretMounts: - - name: elastic-certificates - secretName: elasticsearch-master-certs - path: /usr/share/elasticsearch/config/certs - -podAnnotations: {} - # iam.amazonaws.com/role: es-cluster - -# additionals labels -labels: {} - -esJavaOpts: "-Xmx1g -Xms1g" - -resources: - requests: - # cpu: "1000m" - memory: "2Gi" - limits: - # cpu: "1000m" - memory: "2Gi" - -initResources: {} - # limits: - # cpu: "25m" - # # memory: "128Mi" - # requests: - # cpu: "25m" - # memory: "128Mi" - -sidecarResources: {} - # limits: - # cpu: "25m" - # # memory: "128Mi" - # requests: - # cpu: "25m" - # memory: "128Mi" - -networkHost: "0.0.0.0" - -volumeClaimTemplate: - accessModes: [ "ReadWriteOnce" ] - resources: - requests: - storage: 25Gi - -rbac: - create: false - serviceAccountName: "" - -podSecurityPolicy: - create: false - name: "" - spec: - privileged: true - fsGroup: - rule: RunAsAny - runAsUser: - rule: RunAsAny - seLinux: - rule: RunAsAny - supplementalGroups: - rule: RunAsAny - volumes: - - secret - - configMap - - persistentVolumeClaim - -persistence: - enabled: true - dataDirSize: "25Gi" - annotations: {} - -extraVolumes: [] - # - name: extras - # emptyDir: {} - -extraVolumeMounts: [] - # - name: extras - # mountPath: /usr/share/extras - # readOnly: true - -extraContainers: [] - # - name: do-something - # image: busybox - # command: ['do', 'something'] - -extraInitContainers: [] - # - name: do-something - # image: busybox - # command: ['do', 'something'] - -# This is the PriorityClass settings as defined in -# https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass -priorityClassName: "" - -# By default this will make sure two pods don't end up on the same node -# Changing this to a region would allow you to spread pods across regions -antiAffinityTopologyKey: "kubernetes.io/hostname" - -# Hard means that by default pods will only be scheduled if there are enough nodes for them -# and that they will never end up on the same node. Setting this to soft will do this "best effort" -antiAffinity: "hard" - -# This is the node affinity settings as defined in -# https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#node-affinity-beta-feature -nodeAffinity: {} - -# The default is to deploy all pods serially. By setting this to parallel all pods are started at -# the same time when bootstrapping the cluster -podManagementPolicy: "Parallel" - -protocol: https -httpPort: 9200 -transportPort: 9300 - -secret: - enabled: false - -service: - labels: {} - labelsHeadless: {} - type: ClusterIP - nodePort: "" - annotations: {} - httpPortName: http - transportPortName: transport - loadBalancerIP: "" - loadBalancerSourceRanges: [] - -updateStrategy: OnDelete - -# This is the max unavailable setting for the pod disruption budget -# The default value of 1 will make sure that kubernetes won't allow more than 1 -# of your pods to be unavailable during maintenance -maxUnavailable: 1 - -podSecurityContext: - fsGroup: 1000 - runAsUser: 1000 - -securityContext: - capabilities: - drop: - - ALL - # readOnlyRootFilesystem: true - runAsNonRoot: true - runAsUser: 1000 - -# How long to wait for elasticsearch to stop gracefully -terminationGracePeriod: 120 - -sysctlVmMaxMapCount: 262144 - -readinessProbe: - failureThreshold: 3 - initialDelaySeconds: 10 - periodSeconds: 10 - successThreshold: 3 - timeoutSeconds: 5 - -# https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-health.html#request-params wait_for_status -clusterHealthCheckParams: "wait_for_status=green&timeout=1s" - -## Use an alternate scheduler. -## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ -## -schedulerName: "" - -imagePullSecrets: [] -nodeSelector: {} -tolerations: [] - -# Enabling this will publically expose your Elasticsearch instance. -# Only enable this if you have security enabled on your cluster -ingress: - enabled: false - annotations: {} - # kubernetes.io/ingress.class: nginx - # kubernetes.io/tls-acme: "true" - path: / - hosts: - - chart-example.local - tls: [] - # - secretName: chart-example-tls - # hosts: - # - chart-example.local - -nameOverride: "" -fullnameOverride: "" - -# https://github.com/elastic/helm-charts/issues/63 -masterTerminationFix: false - -lifecycle: {} - # preStop: - # exec: - # command: ["/bin/sh", "-c", "echo Hello from the postStart handler > /usr/share/message"] - # postStart: - # exec: - # command: - # - bash - # - -c - # - | - # #!/bin/bash - # # Add a template to adjust number of shards/replicas - # TEMPLATE_NAME=my_template - # INDEX_PATTERN="logstash-*" - # SHARD_COUNT=8 - # REPLICA_COUNT=1 - # ES_URL=http://localhost:9200 - # while [[ "$(curl -s -o /dev/null -w '%{http_code}\n' $ES_URL)" != "200" ]]; do sleep 1; done - # curl -XPUT "$ES_URL/_template/$TEMPLATE_NAME" -H 'Content-Type: application/json' -d'{"index_patterns":['\""$INDEX_PATTERN"\"'],"settings":{"number_of_shards":'$SHARD_COUNT',"number_of_replicas":'$REPLICA_COUNT'}}' - -sysctlInitContainer: - enabled: true - -keystore: [] - -# Deprecated -# please use the above podSecurityContext.fsGroup instead -fsGroup: "" \ No newline at end of file From c3f0ef50efb822dba9ad6e9273e647c2018c4d69 Mon Sep 17 00:00:00 2001 From: "kavi_elrey@1993" <25226238+kavi-egov@users.noreply.github.com> Date: Sun, 6 Jul 2025 19:39:45 +0530 Subject: [PATCH 02/10] Delete config-as-code/helm/charts/backbone-services/elasticsearch-master directory --- .../elasticsearch-master/Chart.yaml | 5 - .../templates/_helpers.tpl | 102 ---- .../templates/headless-service.yaml | 27 -- .../templates/poddisruptionbudget.yaml | 12 - .../templates/secret-cert.yaml | 15 - .../templates/service.yaml | 30 -- .../templates/statefulset.yaml | 455 ------------------ .../elasticsearch-master/values.yaml | 291 ----------- 8 files changed, 937 deletions(-) delete mode 100644 config-as-code/helm/charts/backbone-services/elasticsearch-master/Chart.yaml delete mode 100644 config-as-code/helm/charts/backbone-services/elasticsearch-master/templates/_helpers.tpl delete mode 100644 config-as-code/helm/charts/backbone-services/elasticsearch-master/templates/headless-service.yaml delete mode 100644 config-as-code/helm/charts/backbone-services/elasticsearch-master/templates/poddisruptionbudget.yaml delete mode 100644 config-as-code/helm/charts/backbone-services/elasticsearch-master/templates/secret-cert.yaml delete mode 100644 config-as-code/helm/charts/backbone-services/elasticsearch-master/templates/service.yaml delete mode 100644 config-as-code/helm/charts/backbone-services/elasticsearch-master/templates/statefulset.yaml delete mode 100644 config-as-code/helm/charts/backbone-services/elasticsearch-master/values.yaml diff --git a/config-as-code/helm/charts/backbone-services/elasticsearch-master/Chart.yaml b/config-as-code/helm/charts/backbone-services/elasticsearch-master/Chart.yaml deleted file mode 100644 index 213170f04..000000000 --- a/config-as-code/helm/charts/backbone-services/elasticsearch-master/Chart.yaml +++ /dev/null @@ -1,5 +0,0 @@ -apiVersion: v1 -appVersion: "1.0" -description: A Helm chart for Confluent Kafka on Kubernetes -name: elasticsearch -version: 0.1.0 \ No newline at end of file diff --git a/config-as-code/helm/charts/backbone-services/elasticsearch-master/templates/_helpers.tpl b/config-as-code/helm/charts/backbone-services/elasticsearch-master/templates/_helpers.tpl deleted file mode 100644 index 7c9588984..000000000 --- a/config-as-code/helm/charts/backbone-services/elasticsearch-master/templates/_helpers.tpl +++ /dev/null @@ -1,102 +0,0 @@ -{{/* vim: set filetype=mustache: */}} -{{- define "name" -}} -{{- $envOverrides := index .Values (tpl (default .Chart.Name .Values.name) .) -}} -{{- $baseValues := .Values | deepCopy -}} -{{- $values := dict "Values" (mustMergeOverwrite $baseValues $envOverrides) -}} -{{- with mustMergeOverwrite . $values -}} -{{- default .Chart.Name .Values.name -}} -{{- end }} -{{- end }} - -{{- define "elasticsearch.roles" -}} -{{- range $.Values.roles -}} -{{ . }}, -{{- end -}} -{{- end -}} - -{{/* -Generate certificates when the secret doesn't exist -*/}} -{{- define "elasticsearch.gen-certs" -}} -{{- $certs := lookup "v1" "Secret" .Values.namespace ( printf "%s-certs" (include "name" . ) ) -}} -{{- if $certs -}} -tls.crt: {{ index $certs.data "tls.crt" }} -tls.key: {{ index $certs.data "tls.key" }} -ca.crt: {{ index $certs.data "ca.crt" }} -{{- else -}} -{{- $altNames := list ( include "elasticsearch.masterService" . ) ( printf "%s.%s" (include "elasticsearch.masterService" .) .Values.namespace ) ( printf "%s.%s.svc" (include "elasticsearch.masterService" .) .Values.namespace ) -}} -{{- $ca := genCA "elasticsearch-ca" 365 -}} -{{- $cert := genSignedCert ( include "elasticsearch.masterService" . ) nil $altNames 365 $ca -}} -tls.crt: {{ $cert.Cert | toString | b64enc }} -tls.key: {{ $cert.Key | toString | b64enc }} -ca.crt: {{ $ca.Cert | toString | b64enc }} -{{- end -}} -{{- end -}} - - -{{- define "elasticsearch.masterService" -}} -{{- if empty .Values.masterService -}} -{{- if empty .Values.fullnameOverride -}} -{{- if empty .Values.nameOverride -}} -{{ .Values.clusterName }}-master -{{- else -}} -{{ .Values.nameOverride }}-master -{{- end -}} -{{- else -}} -{{ .Values.fullnameOverride }} -{{- end -}} -{{- else -}} -{{ .Values.masterService }} -{{- end -}} -{{- end -}} - -{{- define "elasticsearch.endpoints" -}} -{{- $replicas := int (toString (.Values.replicas)) }} -{{- $uname := printf "%s-%s" .Values.clusterName .Values.nodeGroup }} - {{- range $i, $e := untilStep 0 $replicas 1 -}} -{{ $uname }}-{{ $i }}, - {{- end -}} -{{- end -}} - -{{- define "elasticsearch.esMajorVersion" -}} -{{- if .Values.esMajorVersion -}} -{{ .Values.esMajorVersion }} -{{- else -}} -{{- $version := int (index (.Values.image.tag | splitList ".") 0) -}} - {{- if and (contains "docker.elastic.co/elasticsearch/elasticsearch" .Values.image.repository) (not (eq $version 0)) -}} -{{ $version }} - {{- else -}} -8 - {{- end -}} -{{- end -}} -{{- end -}} - -{{/* -Return the appropriate apiVersion for statefulset. -*/}} -{{- define "elasticsearch.statefulset.apiVersion" -}} -{{- if semverCompare "<1.9-0" .Capabilities.KubeVersion.GitVersion -}} -{{- print "apps/v1beta2" -}} -{{- else -}} -{{- print "apps/v1" -}} -{{- end -}} -{{- end -}} - -{{/* -Return the appropriate apiVersion for ingress. -*/}} -{{- define "elasticsearch.ingress.apiVersion" -}} -{{- if semverCompare "<1.14-0" .Capabilities.KubeVersion.GitVersion -}} -{{- print "extensions/v1beta1" -}} -{{- else -}} -{{- print "networking.k8s.io/v1beta1" -}} -{{- end -}} -{{- end -}} - -{{- define "common.image" -}} -{{- if contains "/" .repository -}} -{{- printf "%s:%s" .repository ( required "Tag is mandatory" .tag ) -}} -{{- else -}} -{{- printf "%s/%s:%s" $.Values.global.containerRegistry .repository ( required "Tag is mandatory" .tag ) -}} -{{- end -}} -{{- end -}} \ No newline at end of file diff --git a/config-as-code/helm/charts/backbone-services/elasticsearch-master/templates/headless-service.yaml b/config-as-code/helm/charts/backbone-services/elasticsearch-master/templates/headless-service.yaml deleted file mode 100644 index 857fe8794..000000000 --- a/config-as-code/helm/charts/backbone-services/elasticsearch-master/templates/headless-service.yaml +++ /dev/null @@ -1,27 +0,0 @@ -kind: Service -apiVersion: v1 -metadata: -{{- if eq .Values.nodeGroup "master" }} - name: {{ template "name" . }}-headless -{{- else }} - name: {{ template "name" . }}-headless -{{- end }} - namespace: {{ .Values.namespace }} - labels: - app: "{{ template "name" . }}" -{{- if .Values.service.labelsHeadless }} -{{ toYaml .Values.service.labelsHeadless | indent 4 }} -{{- end }} - annotations: - service.alpha.kubernetes.io/tolerate-unready-endpoints: "true" -spec: - clusterIP: None # This is needed for statefulset hostnames like elasticsearch-0 to resolve - # Create endpoints also if the related pod isn't ready - publishNotReadyAddresses: true - selector: - app: "{{ template "name" . }}" - ports: - - name: {{ .Values.service.httpPortName | default "http" }} - port: {{ .Values.httpPort }} - - name: {{ .Values.service.transportPortName | default "transport" }} - port: {{ .Values.transportPort }} \ No newline at end of file diff --git a/config-as-code/helm/charts/backbone-services/elasticsearch-master/templates/poddisruptionbudget.yaml b/config-as-code/helm/charts/backbone-services/elasticsearch-master/templates/poddisruptionbudget.yaml deleted file mode 100644 index 6582bcfce..000000000 --- a/config-as-code/helm/charts/backbone-services/elasticsearch-master/templates/poddisruptionbudget.yaml +++ /dev/null @@ -1,12 +0,0 @@ -{{- if .Values.maxUnavailable }} -apiVersion: policy/v1 -kind: PodDisruptionBudget -metadata: - name: "{{ template "name" . }}-pdb" - namespace: {{ .Values.namespace }} -spec: - maxUnavailable: {{ .Values.maxUnavailable }} - selector: - matchLabels: - app: "{{ template "name" . }}" -{{- end }} \ No newline at end of file diff --git a/config-as-code/helm/charts/backbone-services/elasticsearch-master/templates/secret-cert.yaml b/config-as-code/helm/charts/backbone-services/elasticsearch-master/templates/secret-cert.yaml deleted file mode 100644 index 9109a71ec..000000000 --- a/config-as-code/helm/charts/backbone-services/elasticsearch-master/templates/secret-cert.yaml +++ /dev/null @@ -1,15 +0,0 @@ -{{- if .Values.createCert }} -apiVersion: v1 -kind: Secret -type: kubernetes.io/tls -metadata: - name: {{ template "name" . }}-certs - namespace: {{ .Values.namespace }} - labels: - app: "{{ template "name" . }}" - {{- range $key, $value := .Values.labels }} - {{ $key }}: {{ $value | quote }} - {{- end }} -data: -{{ ( include "elasticsearch.gen-certs" . ) | indent 2 }} -{{- end }} diff --git a/config-as-code/helm/charts/backbone-services/elasticsearch-master/templates/service.yaml b/config-as-code/helm/charts/backbone-services/elasticsearch-master/templates/service.yaml deleted file mode 100644 index 3a2c57c75..000000000 --- a/config-as-code/helm/charts/backbone-services/elasticsearch-master/templates/service.yaml +++ /dev/null @@ -1,30 +0,0 @@ -kind: Service -apiVersion: v1 -metadata: -{{- if eq .Values.nodeGroup "master" }} - name: {{ template "name" . }} -{{- else }} - name: {{ template "name" . }} -{{- end }} - namespace: {{ .Values.namespace }} - labels: - app: "{{ template "name" . }}" -{{- if .Values.service.labels }} -{{ toYaml .Values.service.labels | indent 4}} -{{- end }} - annotations: -{{ toYaml .Values.service.annotations | indent 4 }} -spec: - type: {{ .Values.service.type }} - selector: - app: "{{ template "name" . }}" - ports: - - name: {{ .Values.service.httpPortName | default "http" }} - protocol: TCP - port: {{ .Values.httpPort }} -{{- if .Values.service.nodePort }} - nodePort: {{ .Values.service.nodePort }} -{{- end }} - - name: {{ .Values.service.transportPortName | default "transport" }} - protocol: TCP - port: {{ .Values.transportPort }} \ No newline at end of file diff --git a/config-as-code/helm/charts/backbone-services/elasticsearch-master/templates/statefulset.yaml b/config-as-code/helm/charts/backbone-services/elasticsearch-master/templates/statefulset.yaml deleted file mode 100644 index 013d25943..000000000 --- a/config-as-code/helm/charts/backbone-services/elasticsearch-master/templates/statefulset.yaml +++ /dev/null @@ -1,455 +0,0 @@ ---- -apiVersion: {{ template "elasticsearch.statefulset.apiVersion" . }} -kind: StatefulSet -metadata: - name: {{ template "name" . }} - namespace: {{ .Values.namespace }} - labels: - app: "{{ template "name" . }}" - {{- range $key, $value := .Values.labels }} - {{ $key }}: {{ $value | quote }} - {{- end }} - annotations: - esMajorVersion: "{{ include "elasticsearch.esMajorVersion" . }}" -spec: - serviceName: {{ template "name" . }}-headless - selector: - matchLabels: - app: "{{ template "name" . }}" - replicas: {{ .Values.replicas }} - podManagementPolicy: {{ .Values.podManagementPolicy }} - updateStrategy: - type: {{ .Values.updateStrategy }} - {{- if .Values.persistence.enabled }} - volumeClaimTemplates: - - metadata: - name: es-storage - {{- with .Values.persistence.annotations }} - annotations: -{{ toYaml . | indent 8 }} - {{- end }} - spec: -{{ toYaml .Values.volumeClaimTemplate | indent 6 }} - {{- end }} - template: - metadata: - name: "{{ template "name" . }}" - labels: - app: "{{ template "name" . }}" - {{- range $key, $value := .Values.labels }} - {{ $key }}: {{ $value | quote }} - {{- end }} - annotations: - {{- range $key, $value := .Values.podAnnotations }} - {{ $key }}: {{ $value | quote }} - {{- end }} - {{/* This forces a restart if the configmap has changed */}} - {{- if .Values.esConfig }} - configchecksum: {{ include (print .Template.BasePath "/configmap.yaml") . | sha256sum | trunc 63 }} - {{- end }} - spec: - {{- if .Values.schedulerName }} - schedulerName: "{{ .Values.schedulerName }}" - {{- end }} - securityContext: -{{ toYaml .Values.podSecurityContext | indent 8 }} - {{- if .Values.fsGroup }} - fsGroup: {{ .Values.fsGroup }} # Deprecated value, please use .Values.podSecurityContext.fsGroup - {{- end }} - {{- if .Values.rbac.create }} - serviceAccountName: "{{ template "name" . }}" - {{- else if not (eq .Values.rbac.serviceAccountName "") }} - serviceAccountName: {{ .Values.rbac.serviceAccountName | quote }} - {{- end }} - {{- with .Values.tolerations }} - tolerations: -{{ toYaml . | indent 6 }} - {{- end }} - {{- with .Values.nodeSelector }} - nodeSelector: -{{ toYaml . | indent 8 }} - {{- end }} - {{- if or (eq .Values.antiAffinity "hard") (eq .Values.antiAffinity "soft") .Values.nodeAffinity }} - {{- if .Values.priorityClassName }} - priorityClassName: {{ .Values.priorityClassName }} - {{- end }} - affinity: - {{- end }} - {{- if eq .Values.antiAffinity "hard" }} - podAntiAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - - labelSelector: - matchExpressions: - - key: app - operator: In - values: - - "{{ template "name" .}}" - topologyKey: {{ .Values.antiAffinityTopologyKey }} - {{- else if eq .Values.antiAffinity "soft" }} - podAntiAffinity: - preferredDuringSchedulingIgnoredDuringExecution: - - weight: 1 - podAffinityTerm: - topologyKey: {{ .Values.antiAffinityTopologyKey }} - labelSelector: - matchExpressions: - - key: app - operator: In - values: - - "{{ template "name" . }}" - {{- end }} - {{- with .Values.nodeAffinity }} - nodeAffinity: -{{ toYaml . | indent 10 }} - {{- end }} - terminationGracePeriodSeconds: {{ .Values.terminationGracePeriod }} - volumes: - {{- range .Values.secretMounts }} - - name: {{ .name }} - secret: - secretName: {{ .secretName }} - {{- end }} - {{- if .Values.esConfig }} - - name: esconfig - configMap: - name: {{ template "name" . }}-config - {{- end }} - {{- if .Values.createCert }} - - name: elasticsearch-certs - secret: - secretName: {{ template "name" . }}-certs - {{- end }} -{{- if .Values.keystore }} - - name: keystore - emptyDir: {} - {{- range .Values.keystore }} - - name: keystore-{{ .secretName }} - secret: {{ toYaml . | nindent 12 }} - {{- end }} -{{ end }} - {{- if .Values.extraVolumes }} - # Currently some extra blocks accept strings - # to continue with backwards compatibility this is being kept - # whilst also allowing for yaml to be specified too. - {{- if eq "string" (printf "%T" .Values.extraVolumes) }} -{{ tpl .Values.extraVolumes . | indent 8 }} - {{- else }} -{{ toYaml .Values.extraVolumes | indent 8 }} - {{- end }} - {{- end }} - {{- if .Values.imagePullSecrets }} - imagePullSecrets: -{{ toYaml .Values.imagePullSecrets | indent 8 }} - {{- end }} - initContainers: - {{- if .Values.sysctlInitContainer.enabled }} - - name: configure-sysctl - securityContext: - runAsUser: 0 - privileged: true - image: {{ template "common.image" (dict "Values" .Values "repository" .Values.image.repository "tag" .Values.image.tag) }} - imagePullPolicy: "{{ .Values.image.pullPolicy }}" - command: ["sysctl", "-w", "vm.max_map_count={{ .Values.sysctlVmMaxMapCount}}"] - resources: -{{ toYaml .Values.initResources | indent 10 }} - {{- end }} -{{ if .Values.keystore }} - - name: keystore - securityContext: -{{ toYaml .Values.securityContext | indent 10 }} - image: {{ template "common.image" (dict "Values" .Values "repository" .Values.image.repository "tag" .Values.image.tag) }} - imagePullPolicy: "{{ .Values.image.pullPolicy }}" - command: - - sh - - -c - - | - #!/usr/bin/env bash - set -euo pipefail - - elasticsearch-keystore create - - for i in /tmp/keystoreSecrets/*/*; do - key=$(basename $i) - echo "Adding file $i to keystore key $key" - elasticsearch-keystore add-file "$key" "$i" - done - - # Add the bootstrap password since otherwise the Elasticsearch entrypoint tries to do this on startup - if [ ! -z ${ELASTIC_PASSWORD+x} ]; then - echo 'Adding env $ELASTIC_PASSWORD to keystore as key bootstrap.password' - echo "$ELASTIC_PASSWORD" | elasticsearch-keystore add -x bootstrap.password - fi - - cp -a /usr/share/elasticsearch/config/elasticsearch.keystore /tmp/keystore/ - env: {{ toYaml .Values.extraEnvs | nindent 10 }} - envFrom: {{ toYaml .Values.envFrom | nindent 10 }} - resources: {{ toYaml .Values.initResources | nindent 10 }} - volumeMounts: - - name: keystore - mountPath: /tmp/keystore - {{- range .Values.keystore }} - - name: keystore-{{ .secretName }} - mountPath: /tmp/keystoreSecrets/{{ .secretName }} - {{- end }} -{{ end }} - {{- if .Values.extraInitContainers }} - # Currently some extra blocks accept strings - # to continue with backwards compatibility this is being kept - # whilst also allowing for yaml to be specified too. - {{- if eq "string" (printf "%T" .Values.extraInitContainers) }} -{{ tpl .Values.extraInitContainers . | indent 6 }} - {{- else }} -{{ toYaml .Values.extraInitContainers | indent 6 }} - {{- end }} - {{- end }} - containers: - - name: "elasticsearch" - securityContext: -{{ toYaml .Values.securityContext | indent 10 }} - image: {{ template "common.image" (dict "Values" .Values "repository" .Values.image.repository "tag" .Values.image.tag) }} - imagePullPolicy: "{{ .Values.image.pullPolicy }}" - readinessProbe: - exec: - command: - - bash - - -c - - | - set -e - - # Exit if ELASTIC_PASSWORD in unset - if [ -z "${ELASTIC_PASSWORD}" ]; then - echo "ELASTIC_PASSWORD variable is missing, exiting" - exit 1 - fi - - # If the node is starting up wait for the cluster to be ready (request params: "{{ .Values.clusterHealthCheckParams }}" ) - # Once it has started only check that the node itself is responding - START_FILE=/tmp/.es_start_file - - # Disable nss cache to avoid filling dentry cache when calling curl - # This is required with Elasticsearch Docker using nss < 3.52 - export NSS_SDB_USE_CACHE=no - - http () { - local path="${1}" - local args="${2}" - set -- -XGET -s - - if [ "$args" != "" ]; then - set -- "$@" $args - fi - - set -- "$@" -u "elastic:${ELASTIC_PASSWORD}" - - curl --output /dev/null -k "$@" "{{ .Values.protocol }}://127.0.0.1:{{ .Values.httpPort }}${path}" - } - - if [ -f "${START_FILE}" ]; then - echo 'Elasticsearch is already running, lets check the node is healthy' - HTTP_CODE=$(http "/" "-w %{http_code}") - RC=$? - if [[ ${RC} -ne 0 ]]; then - echo "curl --output /dev/null -k -XGET -s -w '%{http_code}' \${BASIC_AUTH} {{ .Values.protocol }}://127.0.0.1:{{ .Values.httpPort }}/ failed with RC ${RC}" - exit ${RC} - fi - # ready if HTTP code 200, 503 is tolerable if ES version is 6.x - if [[ ${HTTP_CODE} == "200" ]]; then - exit 0 - elif [[ ${HTTP_CODE} == "503" && "{{ include "elasticsearch.esMajorVersion" . }}" == "6" ]]; then - exit 0 - else - echo "curl --output /dev/null -k -XGET -s -w '%{http_code}' \${BASIC_AUTH} {{ .Values.protocol }}://127.0.0.1:{{ .Values.httpPort }}/ failed with HTTP code ${HTTP_CODE}" - exit 1 - fi - - else - echo 'Waiting for elasticsearch cluster to become ready (request params: "{{ .Values.clusterHealthCheckParams }}" )' - if http "/_cluster/health?{{ .Values.clusterHealthCheckParams }}" "--fail" ; then - touch ${START_FILE} - exit 0 - else - echo 'Cluster is not yet ready (request params: "{{ .Values.clusterHealthCheckParams }}" )' - exit 1 - fi - fi -{{ toYaml .Values.readinessProbe | indent 10 }} - ports: - - name: http - containerPort: {{ .Values.httpPort }} - - name: transport - containerPort: {{ .Values.transportPort }} - resources: -{{ toYaml .Values.resources | indent 10 }} - env: - - name: node.name - valueFrom: - fieldRef: - fieldPath: metadata.name - {{- if has "master" .Values.roles }} - - name: cluster.initial_master_nodes - value: "{{ template "elasticsearch.endpoints" . }}" - {{- end }} - {{- if gt (len (include "elasticsearch.roles" .)) 0 }} - - name: node.roles - value: "{{ template "elasticsearch.roles" . }}" - {{- end }} - {{- if lt (int (include "elasticsearch.esMajorVersion" .)) 7 }} - - name: discovery.zen.ping.unicast.hosts - value: "{{ template "elasticsearch.masterService" . }}-headless" - {{- else }} - - name: discovery.seed_hosts - value: "{{ template "elasticsearch.masterService" . }}-headless" - {{- end }} - - name: cluster.name - value: {{ .Values.clusterName | quote }} - - name: network.host - value: {{ .Values.networkHost | quote }} - {{- if .Values.secret.enabled }} - - name: ELASTIC_PASSWORD - valueFrom: - secretKeyRef: - name: {{ template "name" . }}-credentials - key: password - {{- end }} - - name: ES_JAVA_OPTS - value: {{ .Values.esJavaOpts | quote }} - {{- if .Values.createCert }} - - name: xpack.security.enabled - value: "true" - - name: xpack.security.transport.ssl.enabled - value: "true" - - name: xpack.security.enrollment.enabled - value: "true" - - name: xpack.security.http.ssl.enabled - value: "true" - - name: xpack.security.transport.ssl.verification_mode - value: "certificate" - - name: xpack.security.transport.ssl.key - value: "/usr/share/elasticsearch/config/certs/tls.key" - - name: xpack.security.transport.ssl.certificate - value: "/usr/share/elasticsearch/config/certs/tls.crt" - - name: xpack.security.transport.ssl.certificate_authorities - value: "/usr/share/elasticsearch/config/certs/ca.crt" - - name: xpack.security.http.ssl.key - value: "/usr/share/elasticsearch/config/certs/tls.key" - - name: xpack.security.http.ssl.certificate - value: "/usr/share/elasticsearch/config/certs/tls.crt" - - name: xpack.security.http.ssl.certificate_authorities - value: "/usr/share/elasticsearch/config/certs/ca.crt" - {{- end }} -{{- if .Values.extraEnvs }} -{{ toYaml .Values.extraEnvs | indent 10 }} -{{- end }} -{{- if .Values.envFrom }} - envFrom: -{{ toYaml .Values.envFrom | indent 10 }} -{{- end }} - volumeMounts: - {{- if .Values.persistence.enabled }} - - name: "es-storage" - mountPath: /usr/share/elasticsearch/data - {{- end }} - {{- if .Values.createCert }} - - name: elasticsearch-certs - mountPath: /usr/share/elasticsearch/config/certs - readOnly: true - {{- end }} -{{ if .Values.keystore }} - - name: keystore - mountPath: /usr/share/elasticsearch/config/elasticsearch.keystore - subPath: elasticsearch.keystore -{{ end }} - {{- range .Values.secretMounts }} - - name: {{ .name }} - mountPath: {{ .path }} - {{- if .subPath }} - subPath: {{ .subPath }} - {{- end }} - {{- end }} - {{- range $path, $config := .Values.esConfig }} - - name: esconfig - mountPath: /usr/share/elasticsearch/config/{{ $path }} - subPath: {{ $path }} - {{- end -}} - {{- if .Values.extraVolumeMounts }} - # Currently some extra blocks accept strings - # to continue with backwards compatibility this is being kept - # whilst also allowing for yaml to be specified too. - {{- if eq "string" (printf "%T" .Values.extraVolumeMounts) }} -{{ tpl .Values.extraVolumeMounts . | indent 10 }} - {{- else }} -{{ toYaml .Values.extraVolumeMounts | indent 10 }} - {{- end }} - {{- end }} - {{- if .Values.masterTerminationFix }} - {{- if has "master" .Values.roles }} - # This sidecar will prevent slow master re-election - # https://github.com/elastic/helm-charts/issues/63 - - name: elasticsearch-master-graceful-termination-handler - image: {{ template "common.image" (dict "Values" .Values "repository" .Values.image.repository "tag" .Values.image.tag) }} - imagePullPolicy: "{{ .Values.image.pullPolicy }}" - command: - - "sh" - - -c - - | - #!/usr/bin/env bash - set -eo pipefail - - http () { - local path="${1}" - if [ -n "${ELASTIC_USERNAME}" ] && [ -n "${ELASTIC_PASSWORD}" ]; then - BASIC_AUTH="-u ${ELASTIC_USERNAME}:${ELASTIC_PASSWORD}" - else - BASIC_AUTH='' - fi - curl -XGET -s -k --fail ${BASIC_AUTH} {{ .Values.protocol }}://{{ .Values.masterService }}:{{ .Values.httpPort }}${path} - } - - cleanup () { - while true ; do - local master="$(http "/_cat/master?h=node" || echo "")" - if [[ $master == "{{ .Values.masterService }}"* && $master != "${NODE_NAME}" ]]; then - echo "This node is not master." - break - fi - echo "This node is still master, waiting gracefully for it to step down" - sleep 1 - done - - exit 0 - } - - trap cleanup SIGTERM - - sleep infinity & - wait $! - resources: -{{ toYaml .Values.sidecarResources | indent 10 }} - env: - - name: NODE_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - {{- if .Values.extraEnvs }} -{{ toYaml .Values.extraEnvs | indent 10 }} - {{- end }} - {{- if .Values.envFrom }} - envFrom: -{{ toYaml .Values.envFrom | indent 10 }} - {{- end }} - {{- end }} - {{- end }} -{{- if .Values.lifecycle }} - lifecycle: -{{ toYaml .Values.lifecycle | indent 10 }} -{{- end }} - {{- if .Values.extraContainers }} - # Currently some extra blocks accept strings - # to continue with backwards compatibility this is being kept - # whilst also allowing for yaml to be specified too. - {{- if eq "string" (printf "%T" .Values.extraContainers) }} -{{ tpl .Values.extraContainers . | indent 6 }} - {{- else }} -{{ toYaml .Values.extraContainers | indent 6 }} - {{- end }} - {{- end }} \ No newline at end of file diff --git a/config-as-code/helm/charts/backbone-services/elasticsearch-master/values.yaml b/config-as-code/helm/charts/backbone-services/elasticsearch-master/values.yaml deleted file mode 100644 index d6eeccc39..000000000 --- a/config-as-code/helm/charts/backbone-services/elasticsearch-master/values.yaml +++ /dev/null @@ -1,291 +0,0 @@ - -name: elasticsearch-master -replicas: 3 - -image: - pullPolicy: IfNotPresent - repository: docker.elastic.co/elasticsearch/elasticsearch - tag: 8.11.3 - -clusterName: "elasticsearch" -nodeGroup: "master" -namespace: es-cluster - -# The service that non master groups will try to connect to when joining the cluster -# This should be set to clusterName + "-" + nodeGroup for your master group -masterService: "elasticsearch-master" - -# Elasticsearch roles that will be applied to this nodeGroup -# These will be set as environment variables. E.g. node.master=true -roles: - - master -minimumMasterNodes: 2 - -esMajorVersion: "" - -# Allows you to add any config files in /usr/share/elasticsearch/config/ -# such as elasticsearch.yml and log4j2.properties -esConfig: {} -# elasticsearch.yml: | -# key: -# nestedkey: value -# log4j2.properties: | -# key = value - -# Extra environment variables to append to this nodeGroup -# This will be appended to the current 'env:' key. You can use any of the kubernetes env -# syntax here -extraEnvs: -# - name: MY_ENVIRONMENT_VAR -# value: the_value_goes_here - - name: path.data - value: "/usr/share/elasticsearch/data" - - name: path.logs - value: "/usr/share/elasticsearch/logs" -# - name: gateway.expected_master_nodes -# value: "2" - - name: gateway.expected_data_nodes - value: "1" - - name: gateway.recover_after_time - value: "5m" -# - name: gateway.recover_after_master_nodes -# value: "2" - - name: gateway.recover_after_data_nodes - value: "1" -# - name: xpack.security.enabled -# value: "false" -# - name: xpack.security.audit.enabled -# value: "false" - -createCert: true - -# Allows you to load environment variables from kubernetes secret or config map -envFrom: [] -# - secretRef: -# name: env-secret -# - configMapRef: -# name: config-map - -# A list of secrets and their paths to mount inside the pod -# This is useful for mounting certificates for security and for mounting -# the X-Pack license -secretMounts: [] -# - name: elastic-certificates -# secretName: elastic-certificates -# path: /usr/share/elasticsearch/config/certs - -podAnnotations: {} - # iam.amazonaws.com/role: es-cluster - -# additionals labels -labels: {} - -esJavaOpts: "-Xmx448m -Xms448m" - -resources: - requests: - memory: "896Mi" - limits: - memory: "1500Mi" - -initResources: {} - # limits: - # cpu: "25m" - # # memory: "128Mi" - # requests: - # cpu: "25m" - # memory: "128Mi" - -sidecarResources: {} - # limits: - # cpu: "25m" - # # memory: "128Mi" - # requests: - # cpu: "25m" - # memory: "128Mi" - -networkHost: "0.0.0.0" - -volumeClaimTemplate: - accessModes: [ "ReadWriteOnce" ] - resources: - requests: - storage: 2Gi - -rbac: - create: false - serviceAccountName: "" - -podSecurityPolicy: - create: false - name: "" - spec: - privileged: true - fsGroup: - rule: RunAsAny - runAsUser: - rule: RunAsAny - seLinux: - rule: RunAsAny - supplementalGroups: - rule: RunAsAny - volumes: - - secret - - configMap - - persistentVolumeClaim - -persistence: - enabled: true - dataDirSize: "2Gi" - annotations: {} - -extraVolumes: [] - # - name: extras - # emptyDir: {} - -extraVolumeMounts: [] - # - name: extras - # mountPath: /usr/share/extras - # readOnly: true - -extraContainers: [] - # - name: do-something - # image: busybox - # command: ['do', 'something'] - -extraInitContainers: [] - # - name: do-something - # image: busybox - # command: ['do', 'something'] - -# This is the PriorityClass settings as defined in -# https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass -priorityClassName: "" - -# By default this will make sure two pods don't end up on the same node -# Changing this to a region would allow you to spread pods across regions -antiAffinityTopologyKey: "kubernetes.io/hostname" - -# Hard means that by default pods will only be scheduled if there are enough nodes for them -# and that they will never end up on the same node. Setting this to soft will do this "best effort" -antiAffinity: "hard" - -# This is the node affinity settings as defined in -# https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#node-affinity-beta-feature -nodeAffinity: {} - -# The default is to deploy all pods serially. By setting this to parallel all pods are started at -# the same time when bootstrapping the cluster -podManagementPolicy: "Parallel" - -protocol: https -httpPort: 9200 -transportPort: 9300 - -secret: - enabled: true - password: 8fwbD6HbJh6HU0oddsHm8TEI - -service: - labels: {} - labelsHeadless: {} - type: ClusterIP - nodePort: "" - annotations: {} - httpPortName: http - transportPortName: transport - loadBalancerIP: "" - loadBalancerSourceRanges: [] - -updateStrategy: OnDelete - -# This is the max unavailable setting for the pod disruption budget -# The default value of 1 will make sure that kubernetes won't allow more than 1 -# of your pods to be unavailable during maintenance -maxUnavailable: 1 - -podSecurityContext: - fsGroup: 1000 - runAsUser: 1000 - -securityContext: - capabilities: - drop: - - ALL - # readOnlyRootFilesystem: true - runAsNonRoot: true - runAsUser: 1000 - -# How long to wait for elasticsearch to stop gracefully -terminationGracePeriod: 120 - -sysctlVmMaxMapCount: 262144 - -readinessProbe: - failureThreshold: 3 - initialDelaySeconds: 10 - periodSeconds: 10 - successThreshold: 3 - timeoutSeconds: 5 - -# https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-health.html#request-params wait_for_status -clusterHealthCheckParams: "wait_for_status=green&timeout=1s" - -## Use an alternate scheduler. -## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ -## -schedulerName: "" - -imagePullSecrets: [] -nodeSelector: {} -tolerations: [] - -# Enabling this will publically expose your Elasticsearch instance. -# Only enable this if you have security enabled on your cluster -ingress: - enabled: false - annotations: {} - # kubernetes.io/ingress.class: nginx - # kubernetes.io/tls-acme: "true" - path: / - hosts: - - chart-example.local - tls: [] - # - secretName: chart-example-tls - # hosts: - # - chart-example.local - -nameOverride: "" -fullnameOverride: "" - -# https://github.com/elastic/helm-charts/issues/63 -masterTerminationFix: true - -lifecycle: {} - # preStop: - # exec: - # command: ["/bin/sh", "-c", "echo Hello from the postStart handler > /usr/share/message"] - # postStart: - # exec: - # command: - # - bash - # - -c - # - | - # #!/bin/bash - # # Add a template to adjust number of shards/replicas - # TEMPLATE_NAME=my_template - # INDEX_PATTERN="logstash-*" - # SHARD_COUNT=8 - # REPLICA_COUNT=1 - # ES_URL=http://localhost:9200 - # while [[ "$(curl -s -o /dev/null -w '%{http_code}\n' $ES_URL)" != "200" ]]; do sleep 1; done - # curl -XPUT "$ES_URL/_template/$TEMPLATE_NAME" -H 'Content-Type: application/json' -d'{"index_patterns":['\""$INDEX_PATTERN"\"'],"settings":{"number_of_shards":'$SHARD_COUNT',"number_of_replicas":'$REPLICA_COUNT'}}' - -sysctlInitContainer: - enabled: true - -keystore: [] - -# Deprecated -# please use the above podSecurityContext.fsGroup instead -fsGroup: "" From c94ca0b802ea54bc0bf0de834ef2507a8a794332 Mon Sep 17 00:00:00 2001 From: "kavi_elrey@1993" <25226238+kavi-egov@users.noreply.github.com> Date: Sun, 6 Jul 2025 19:40:00 +0530 Subject: [PATCH 03/10] Delete config-as-code/helm/charts/backbone-services/elasticsearch directory --- .../elasticsearch/Chart.yaml | 5 - .../elasticsearch-data-infra-v1-values.yaml | 301 ------------- .../elasticsearch-data-v1-values.yaml | 286 ------------- .../elasticsearch-master-infra-v1-values.yaml | 284 ------------- .../elasticsearch-master-v1-values.yaml | 284 ------------- .../elasticsearch/templates/_helpers.tpl | 61 --- .../templates/headless-service.yaml | 27 -- .../templates/poddisruptionbudget.yaml | 12 - .../elasticsearch/templates/service.yaml | 30 -- .../elasticsearch/templates/statefulset.yaml | 400 ------------------ 10 files changed, 1690 deletions(-) delete mode 100644 config-as-code/helm/charts/backbone-services/elasticsearch/Chart.yaml delete mode 100644 config-as-code/helm/charts/backbone-services/elasticsearch/elasticsearch-data-infra-v1-values.yaml delete mode 100644 config-as-code/helm/charts/backbone-services/elasticsearch/elasticsearch-data-v1-values.yaml delete mode 100644 config-as-code/helm/charts/backbone-services/elasticsearch/elasticsearch-master-infra-v1-values.yaml delete mode 100644 config-as-code/helm/charts/backbone-services/elasticsearch/elasticsearch-master-v1-values.yaml delete mode 100644 config-as-code/helm/charts/backbone-services/elasticsearch/templates/_helpers.tpl delete mode 100644 config-as-code/helm/charts/backbone-services/elasticsearch/templates/headless-service.yaml delete mode 100644 config-as-code/helm/charts/backbone-services/elasticsearch/templates/poddisruptionbudget.yaml delete mode 100644 config-as-code/helm/charts/backbone-services/elasticsearch/templates/service.yaml delete mode 100644 config-as-code/helm/charts/backbone-services/elasticsearch/templates/statefulset.yaml diff --git a/config-as-code/helm/charts/backbone-services/elasticsearch/Chart.yaml b/config-as-code/helm/charts/backbone-services/elasticsearch/Chart.yaml deleted file mode 100644 index 213170f04..000000000 --- a/config-as-code/helm/charts/backbone-services/elasticsearch/Chart.yaml +++ /dev/null @@ -1,5 +0,0 @@ -apiVersion: v1 -appVersion: "1.0" -description: A Helm chart for Confluent Kafka on Kubernetes -name: elasticsearch -version: 0.1.0 \ No newline at end of file diff --git a/config-as-code/helm/charts/backbone-services/elasticsearch/elasticsearch-data-infra-v1-values.yaml b/config-as-code/helm/charts/backbone-services/elasticsearch/elasticsearch-data-infra-v1-values.yaml deleted file mode 100644 index 9975f3f42..000000000 --- a/config-as-code/helm/charts/backbone-services/elasticsearch/elasticsearch-data-infra-v1-values.yaml +++ /dev/null @@ -1,301 +0,0 @@ - -name: elasticsearch-data-infra-v1 -namespace: es-cluster-infra -replicas: 3 - -image: - pullPolicy: IfNotPresent - repository: docker.elastic.co/elasticsearch/elasticsearch - tag: 6.4.2 - -clusterName: "elasticsearch-infra-v1" -nodeGroup: "data" - -# The service that non master groups will try to connect to when joining the cluster -# This should be set to clusterName + "-" + nodeGroup for your master group -masterService: "elasticsearch-master-infra-v1" - -# Elasticsearch roles that will be applied to this nodeGroup -# These will be set as environment variables. E.g. node.master=true -roles: - master: "false" - ingest: "true" - data: "true" - -minimumMasterNodes: 2 - -esMajorVersion: "" - -# Allows you to add any config files in /usr/share/elasticsearch/config/ -# such as elasticsearch.yml and log4j2.properties -esConfig: {} -# elasticsearch.yml: | -# key: -# nestedkey: value -# log4j2.properties: | -# key = value - -# Extra environment variables to append to this nodeGroup -# This will be appended to the current 'env:' key. You can use any of the kubernetes env -# syntax here -extraEnvs: -# - name: MY_ENVIRONMENT_VAR -# value: the_value_goes_here - - name: path.data - value: "/usr/share/elasticsearch/data" - - name: path.logs - value: "/usr/share/elasticsearch/logs" - - name: gateway.expected_master_nodes - value: "2" - - name: gateway.expected_data_nodes - value: "1" - - name: gateway.recover_after_time - value: "5m" - - name: gateway.recover_after_master_nodes - value: "2" - - name: gateway.recover_after_data_nodes - value: "1" - -# Allows you to load environment variables from kubernetes secret or config map -envFrom: [] -# - secretRef: -# name: env-secret -# - configMapRef: -# name: config-map - -# A list of secrets and their paths to mount inside the pod -# This is useful for mounting certificates for security and for mounting -# the X-Pack license -secretMounts: [] -# - name: elastic-certificates -# secretName: elastic-certificates -# path: /usr/share/elasticsearch/config/certs - -podAnnotations: {} - # iam.amazonaws.com/role: es-cluster - -# additionals labels -labels: {} - -esJavaOpts: "-Xmx1g -Xms1g" - -resources: - requests: - # cpu: "1000m" - memory: "2Gi" - limits: - # cpu: "1000m" - memory: "2Gi" - -initResources: {} - # limits: - # cpu: "25m" - # # memory: "128Mi" - # requests: - # cpu: "25m" - # memory: "128Mi" - -sidecarResources: {} - # limits: - # cpu: "25m" - # # memory: "128Mi" - # requests: - # cpu: "25m" - # memory: "128Mi" - -networkHost: "0.0.0.0" - -volumeClaimTemplate: - accessModes: [ "ReadWriteOnce" ] - resources: - requests: - storage: 25Gi - -rbac: - create: false - serviceAccountName: "" - -podSecurityPolicy: - create: false - name: "" - spec: - privileged: true - fsGroup: - rule: RunAsAny - runAsUser: - rule: RunAsAny - seLinux: - rule: RunAsAny - supplementalGroups: - rule: RunAsAny - volumes: - - secret - - configMap - - persistentVolumeClaim - -persistence: - enabled: false - # aws: - # - volumeId: value - # zone: ap-south-1a - # - volumeId: value - # zone: ap-south-1b - # - volumeId: value - # zone: ap-south-1c - - # azure: - # - diskName: zookeeper-0 - # diskUri: value - # - diskName: zookeeper-1 - # diskUri: value - # - diskName: zookeeper-2 - # diskUri: value - dataDirSize: "25Gi" - annotations: {} - -extraVolumes: [] - # - name: extras - # emptyDir: {} - -extraVolumeMounts: [] - # - name: extras - # mountPath: /usr/share/extras - # readOnly: true - -extraContainers: [] - # - name: do-something - # image: busybox - # command: ['do', 'something'] - -extraInitContainers: [] - # - name: do-something - # image: busybox - # command: ['do', 'something'] - -# This is the PriorityClass settings as defined in -# https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass -priorityClassName: "" - -# By default this will make sure two pods don't end up on the same node -# Changing this to a region would allow you to spread pods across regions -antiAffinityTopologyKey: "kubernetes.io/hostname" - -# Hard means that by default pods will only be scheduled if there are enough nodes for them -# and that they will never end up on the same node. Setting this to soft will do this "best effort" -antiAffinity: "hard" - -# This is the node affinity settings as defined in -# https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#node-affinity-beta-feature -nodeAffinity: {} - -# The default is to deploy all pods serially. By setting this to parallel all pods are started at -# the same time when bootstrapping the cluster -podManagementPolicy: "Parallel" - -protocol: http -httpPort: 9200 -transportPort: 9300 - -service: - labels: {} - labelsHeadless: {} - type: ClusterIP - nodePort: "" - annotations: {} - httpPortName: http - transportPortName: transport - loadBalancerIP: "" - loadBalancerSourceRanges: [] - -updateStrategy: OnDelete - -# This is the max unavailable setting for the pod disruption budget -# The default value of 1 will make sure that kubernetes won't allow more than 1 -# of your pods to be unavailable during maintenance -maxUnavailable: 1 - -podSecurityContext: - fsGroup: 1000 - runAsUser: 1000 - -securityContext: - capabilities: - drop: - - ALL - # readOnlyRootFilesystem: true - runAsNonRoot: true - runAsUser: 1000 - -# How long to wait for elasticsearch to stop gracefully -terminationGracePeriod: 120 - -sysctlVmMaxMapCount: 262144 - -readinessProbe: - failureThreshold: 3 - initialDelaySeconds: 10 - periodSeconds: 10 - successThreshold: 3 - timeoutSeconds: 5 - -# https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-health.html#request-params wait_for_status -clusterHealthCheckParams: "wait_for_status=green&timeout=1s" - -## Use an alternate scheduler. -## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ -## -schedulerName: "" - -imagePullSecrets: [] -nodeSelector: {} -tolerations: [] - -# Enabling this will publically expose your Elasticsearch instance. -# Only enable this if you have security enabled on your cluster -ingress: - enabled: false - annotations: {} - # kubernetes.io/ingress.class: nginx - # kubernetes.io/tls-acme: "true" - path: / - hosts: - - chart-example.local - tls: [] - # - secretName: chart-example-tls - # hosts: - # - chart-example.local - -nameOverride: "" -fullnameOverride: "" - -# https://github.com/elastic/helm-charts/issues/63 -masterTerminationFix: false - -lifecycle: {} - # preStop: - # exec: - # command: ["/bin/sh", "-c", "echo Hello from the postStart handler > /usr/share/message"] - # postStart: - # exec: - # command: - # - bash - # - -c - # - | - # #!/bin/bash - # # Add a template to adjust number of shards/replicas - # TEMPLATE_NAME=my_template - # INDEX_PATTERN="logstash-*" - # SHARD_COUNT=8 - # REPLICA_COUNT=1 - # ES_URL=http://localhost:9200 - # while [[ "$(curl -s -o /dev/null -w '%{http_code}\n' $ES_URL)" != "200" ]]; do sleep 1; done - # curl -XPUT "$ES_URL/_template/$TEMPLATE_NAME" -H 'Content-Type: application/json' -d'{"index_patterns":['\""$INDEX_PATTERN"\"'],"settings":{"number_of_shards":'$SHARD_COUNT',"number_of_replicas":'$REPLICA_COUNT'}}' - -sysctlInitContainer: - enabled: true - -keystore: [] - -# Deprecated -# please use the above podSecurityContext.fsGroup instead -fsGroup: "" \ No newline at end of file diff --git a/config-as-code/helm/charts/backbone-services/elasticsearch/elasticsearch-data-v1-values.yaml b/config-as-code/helm/charts/backbone-services/elasticsearch/elasticsearch-data-v1-values.yaml deleted file mode 100644 index ddddf146a..000000000 --- a/config-as-code/helm/charts/backbone-services/elasticsearch/elasticsearch-data-v1-values.yaml +++ /dev/null @@ -1,286 +0,0 @@ - -name: elasticsearch-data-v1 -namespace: es-cluster-6 -replicas: 3 - -image: - pullPolicy: IfNotPresent - repository: docker.elastic.co/elasticsearch/elasticsearch - tag: 6.6.2 - -clusterName: "elasticsearch-v1" -nodeGroup: "data" - -# The service that non master groups will try to connect to when joining the cluster -# This should be set to clusterName + "-" + nodeGroup for your master group -masterService: "elasticsearch-master-v1" - -# Elasticsearch roles that will be applied to this nodeGroup -# These will be set as environment variables. E.g. node.master=true -roles: - master: "false" - ingest: "true" - data: "true" - -minimumMasterNodes: 2 - -esMajorVersion: "" - -# Allows you to add any config files in /usr/share/elasticsearch/config/ -# such as elasticsearch.yml and log4j2.properties -esConfig: {} -# elasticsearch.yml: | -# key: -# nestedkey: value -# log4j2.properties: | -# key = value - -# Extra environment variables to append to this nodeGroup -# This will be appended to the current 'env:' key. You can use any of the kubernetes env -# syntax here -extraEnvs: -# - name: MY_ENVIRONMENT_VAR -# value: the_value_goes_here - - name: path.data - value: "/usr/share/elasticsearch/data" - - name: path.logs - value: "/usr/share/elasticsearch/logs" - - name: gateway.expected_master_nodes - value: "2" - - name: gateway.expected_data_nodes - value: "1" - - name: gateway.recover_after_time - value: "5m" - - name: gateway.recover_after_master_nodes - value: "2" - - name: gateway.recover_after_data_nodes - value: "1" - -# Allows you to load environment variables from kubernetes secret or config map -envFrom: [] -# - secretRef: -# name: env-secret -# - configMapRef: -# name: config-map - -# A list of secrets and their paths to mount inside the pod -# This is useful for mounting certificates for security and for mounting -# the X-Pack license -secretMounts: [] -# - name: elastic-certificates -# secretName: elastic-certificates -# path: /usr/share/elasticsearch/config/certs - -podAnnotations: {} - # iam.amazonaws.com/role: es-cluster - -# additionals labels -labels: {} - -esJavaOpts: "-Xmx1g -Xms1g" - -resources: - requests: - # cpu: "1000m" - memory: "2Gi" - limits: - # cpu: "1000m" - memory: "2Gi" - -initResources: {} - # limits: - # cpu: "25m" - # # memory: "128Mi" - # requests: - # cpu: "25m" - # memory: "128Mi" - -sidecarResources: {} - # limits: - # cpu: "25m" - # # memory: "128Mi" - # requests: - # cpu: "25m" - # memory: "128Mi" - -networkHost: "0.0.0.0" - -volumeClaimTemplate: - accessModes: [ "ReadWriteOnce" ] - resources: - requests: - storage: 25Gi - -rbac: - create: false - serviceAccountName: "" - -podSecurityPolicy: - create: false - name: "" - spec: - privileged: true - fsGroup: - rule: RunAsAny - runAsUser: - rule: RunAsAny - seLinux: - rule: RunAsAny - supplementalGroups: - rule: RunAsAny - volumes: - - secret - - configMap - - persistentVolumeClaim - -persistence: - enabled: true - dataDirSize: "25Gi" - annotations: {} - -extraVolumes: [] - # - name: extras - # emptyDir: {} - -extraVolumeMounts: [] - # - name: extras - # mountPath: /usr/share/extras - # readOnly: true - -extraContainers: [] - # - name: do-something - # image: busybox - # command: ['do', 'something'] - -extraInitContainers: [] - # - name: do-something - # image: busybox - # command: ['do', 'something'] - -# This is the PriorityClass settings as defined in -# https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass -priorityClassName: "" - -# By default this will make sure two pods don't end up on the same node -# Changing this to a region would allow you to spread pods across regions -antiAffinityTopologyKey: "kubernetes.io/hostname" - -# Hard means that by default pods will only be scheduled if there are enough nodes for them -# and that they will never end up on the same node. Setting this to soft will do this "best effort" -antiAffinity: "hard" - -# This is the node affinity settings as defined in -# https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#node-affinity-beta-feature -nodeAffinity: {} - -# The default is to deploy all pods serially. By setting this to parallel all pods are started at -# the same time when bootstrapping the cluster -podManagementPolicy: "Parallel" - -protocol: http -httpPort: 9200 -transportPort: 9300 - -service: - labels: {} - labelsHeadless: {} - type: ClusterIP - nodePort: "" - annotations: {} - httpPortName: http - transportPortName: transport - loadBalancerIP: "" - loadBalancerSourceRanges: [] - -updateStrategy: OnDelete - -# This is the max unavailable setting for the pod disruption budget -# The default value of 1 will make sure that kubernetes won't allow more than 1 -# of your pods to be unavailable during maintenance -maxUnavailable: 1 - -podSecurityContext: - fsGroup: 1000 - runAsUser: 1000 - -securityContext: - capabilities: - drop: - - ALL - # readOnlyRootFilesystem: true - runAsNonRoot: true - runAsUser: 1000 - -# How long to wait for elasticsearch to stop gracefully -terminationGracePeriod: 120 - -sysctlVmMaxMapCount: 262144 - -readinessProbe: - failureThreshold: 3 - initialDelaySeconds: 10 - periodSeconds: 10 - successThreshold: 3 - timeoutSeconds: 5 - -# https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-health.html#request-params wait_for_status -clusterHealthCheckParams: "wait_for_status=green&timeout=1s" - -## Use an alternate scheduler. -## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ -## -schedulerName: "" - -imagePullSecrets: [] -nodeSelector: {} -tolerations: [] - -# Enabling this will publically expose your Elasticsearch instance. -# Only enable this if you have security enabled on your cluster -ingress: - enabled: false - annotations: {} - # kubernetes.io/ingress.class: nginx - # kubernetes.io/tls-acme: "true" - path: / - hosts: - - chart-example.local - tls: [] - # - secretName: chart-example-tls - # hosts: - # - chart-example.local - -nameOverride: "" -fullnameOverride: "" - -# https://github.com/elastic/helm-charts/issues/63 -masterTerminationFix: false - -lifecycle: {} - # preStop: - # exec: - # command: ["/bin/sh", "-c", "echo Hello from the postStart handler > /usr/share/message"] - # postStart: - # exec: - # command: - # - bash - # - -c - # - | - # #!/bin/bash - # # Add a template to adjust number of shards/replicas - # TEMPLATE_NAME=my_template - # INDEX_PATTERN="logstash-*" - # SHARD_COUNT=8 - # REPLICA_COUNT=1 - # ES_URL=http://localhost:9200 - # while [[ "$(curl -s -o /dev/null -w '%{http_code}\n' $ES_URL)" != "200" ]]; do sleep 1; done - # curl -XPUT "$ES_URL/_template/$TEMPLATE_NAME" -H 'Content-Type: application/json' -d'{"index_patterns":['\""$INDEX_PATTERN"\"'],"settings":{"number_of_shards":'$SHARD_COUNT',"number_of_replicas":'$REPLICA_COUNT'}}' - -sysctlInitContainer: - enabled: true - -keystore: [] - -# Deprecated -# please use the above podSecurityContext.fsGroup instead -fsGroup: "" \ No newline at end of file diff --git a/config-as-code/helm/charts/backbone-services/elasticsearch/elasticsearch-master-infra-v1-values.yaml b/config-as-code/helm/charts/backbone-services/elasticsearch/elasticsearch-master-infra-v1-values.yaml deleted file mode 100644 index abbbe5a8f..000000000 --- a/config-as-code/helm/charts/backbone-services/elasticsearch/elasticsearch-master-infra-v1-values.yaml +++ /dev/null @@ -1,284 +0,0 @@ - -name: elasticsearch-master-infra-v1 -namespace: es-cluster-infra -replicas: 3 - -image: - pullPolicy: IfNotPresent - repository: docker.elastic.co/elasticsearch/elasticsearch - tag: 6.4.2 - -clusterName: "elasticsearch-infra-v1" -nodeGroup: "master" - -# The service that non master groups will try to connect to when joining the cluster -# This should be set to clusterName + "-" + nodeGroup for your master group -masterService: "elasticsearch-master-infra-v1" - -# Elasticsearch roles that will be applied to this nodeGroup -# These will be set as environment variables. E.g. node.master=true -roles: - master: "true" - ingest: "false" - data: "false" - -minimumMasterNodes: 2 - -esMajorVersion: "" - -# Allows you to add any config files in /usr/share/elasticsearch/config/ -# such as elasticsearch.yml and log4j2.properties -esConfig: {} -# elasticsearch.yml: | -# key: -# nestedkey: value -# log4j2.properties: | -# key = value - -# Extra environment variables to append to this nodeGroup -# This will be appended to the current 'env:' key. You can use any of the kubernetes env -# syntax here -extraEnvs: -# - name: MY_ENVIRONMENT_VAR -# value: the_value_goes_here - - name: path.data - value: "/usr/share/elasticsearch/data" - - name: path.logs - value: "/usr/share/elasticsearch/logs" - - name: gateway.expected_master_nodes - value: "2" - - name: gateway.expected_data_nodes - value: "1" - - name: gateway.recover_after_time - value: "5m" - - name: gateway.recover_after_master_nodes - value: "2" - - name: gateway.recover_after_data_nodes - value: "1" - -# Allows you to load environment variables from kubernetes secret or config map -envFrom: [] -# - secretRef: -# name: env-secret -# - configMapRef: -# name: config-map - -# A list of secrets and their paths to mount inside the pod -# This is useful for mounting certificates for security and for mounting -# the X-Pack license -secretMounts: [] -# - name: elastic-certificates -# secretName: elastic-certificates -# path: /usr/share/elasticsearch/config/certs - -podAnnotations: {} - # iam.amazonaws.com/role: es-cluster - -# additionals labels -labels: {} - -esJavaOpts: "-Xmx448m -Xms448m" - -resources: - requests: - memory: "896Mi" - limits: - memory: "896Mi" - -initResources: {} - # limits: - # cpu: "25m" - # # memory: "128Mi" - # requests: - # cpu: "25m" - # memory: "128Mi" - -sidecarResources: {} - # limits: - # cpu: "25m" - # # memory: "128Mi" - # requests: - # cpu: "25m" - # memory: "128Mi" - -networkHost: "0.0.0.0" - -volumeClaimTemplate: - accessModes: [ "ReadWriteOnce" ] - resources: - requests: - storage: 2Gi - -rbac: - create: false - serviceAccountName: "" - -podSecurityPolicy: - create: false - name: "" - spec: - privileged: true - fsGroup: - rule: RunAsAny - runAsUser: - rule: RunAsAny - seLinux: - rule: RunAsAny - supplementalGroups: - rule: RunAsAny - volumes: - - secret - - configMap - - persistentVolumeClaim - -persistence: - enabled: true - dataDirSize: "2Gi" - annotations: {} - -extraVolumes: [] - # - name: extras - # emptyDir: {} - -extraVolumeMounts: [] - # - name: extras - # mountPath: /usr/share/extras - # readOnly: true - -extraContainers: [] - # - name: do-something - # image: busybox - # command: ['do', 'something'] - -extraInitContainers: [] - # - name: do-something - # image: busybox - # command: ['do', 'something'] - -# This is the PriorityClass settings as defined in -# https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass -priorityClassName: "" - -# By default this will make sure two pods don't end up on the same node -# Changing this to a region would allow you to spread pods across regions -antiAffinityTopologyKey: "kubernetes.io/hostname" - -# Hard means that by default pods will only be scheduled if there are enough nodes for them -# and that they will never end up on the same node. Setting this to soft will do this "best effort" -antiAffinity: "hard" - -# This is the node affinity settings as defined in -# https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#node-affinity-beta-feature -nodeAffinity: {} - -# The default is to deploy all pods serially. By setting this to parallel all pods are started at -# the same time when bootstrapping the cluster -podManagementPolicy: "Parallel" - -protocol: http -httpPort: 9200 -transportPort: 9300 - -service: - labels: {} - labelsHeadless: {} - type: ClusterIP - nodePort: "" - annotations: {} - httpPortName: http - transportPortName: transport - loadBalancerIP: "" - loadBalancerSourceRanges: [] - -updateStrategy: OnDelete - -# This is the max unavailable setting for the pod disruption budget -# The default value of 1 will make sure that kubernetes won't allow more than 1 -# of your pods to be unavailable during maintenance -maxUnavailable: 1 - -podSecurityContext: - fsGroup: 1000 - runAsUser: 1000 - -securityContext: - capabilities: - drop: - - ALL - # readOnlyRootFilesystem: true - runAsNonRoot: true - runAsUser: 1000 - -# How long to wait for elasticsearch to stop gracefully -terminationGracePeriod: 120 - -sysctlVmMaxMapCount: 262144 - -readinessProbe: - failureThreshold: 3 - initialDelaySeconds: 10 - periodSeconds: 10 - successThreshold: 3 - timeoutSeconds: 5 - -# https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-health.html#request-params wait_for_status -clusterHealthCheckParams: "wait_for_status=green&timeout=1s" - -## Use an alternate scheduler. -## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ -## -schedulerName: "" - -imagePullSecrets: [] -nodeSelector: {} -tolerations: [] - -# Enabling this will publically expose your Elasticsearch instance. -# Only enable this if you have security enabled on your cluster -ingress: - enabled: false - annotations: {} - # kubernetes.io/ingress.class: nginx - # kubernetes.io/tls-acme: "true" - path: / - hosts: - - chart-example.local - tls: [] - # - secretName: chart-example-tls - # hosts: - # - chart-example.local - -nameOverride: "" -fullnameOverride: "" - -# https://github.com/elastic/helm-charts/issues/63 -masterTerminationFix: true - -lifecycle: {} - # preStop: - # exec: - # command: ["/bin/sh", "-c", "echo Hello from the postStart handler > /usr/share/message"] - # postStart: - # exec: - # command: - # - bash - # - -c - # - | - # #!/bin/bash - # # Add a template to adjust number of shards/replicas - # TEMPLATE_NAME=my_template - # INDEX_PATTERN="logstash-*" - # SHARD_COUNT=8 - # REPLICA_COUNT=1 - # ES_URL=http://localhost:9200 - # while [[ "$(curl -s -o /dev/null -w '%{http_code}\n' $ES_URL)" != "200" ]]; do sleep 1; done - # curl -XPUT "$ES_URL/_template/$TEMPLATE_NAME" -H 'Content-Type: application/json' -d'{"index_patterns":['\""$INDEX_PATTERN"\"'],"settings":{"number_of_shards":'$SHARD_COUNT',"number_of_replicas":'$REPLICA_COUNT'}}' - -sysctlInitContainer: - enabled: true - -keystore: [] - -# Deprecated -# please use the above podSecurityContext.fsGroup instead -fsGroup: "" \ No newline at end of file diff --git a/config-as-code/helm/charts/backbone-services/elasticsearch/elasticsearch-master-v1-values.yaml b/config-as-code/helm/charts/backbone-services/elasticsearch/elasticsearch-master-v1-values.yaml deleted file mode 100644 index 25fcab388..000000000 --- a/config-as-code/helm/charts/backbone-services/elasticsearch/elasticsearch-master-v1-values.yaml +++ /dev/null @@ -1,284 +0,0 @@ - -name: elasticsearch-master-v1 -namespace: es-cluster-6 -replicas: 3 - -image: - pullPolicy: IfNotPresent - repository: docker.elastic.co/elasticsearch/elasticsearch - tag: 6.6.2 - -clusterName: "elasticsearch-v1" -nodeGroup: "master" - -# The service that non master groups will try to connect to when joining the cluster -# This should be set to clusterName + "-" + nodeGroup for your master group -masterService: "elasticsearch-master-v1" - -# Elasticsearch roles that will be applied to this nodeGroup -# These will be set as environment variables. E.g. node.master=true -roles: - master: "true" - ingest: "false" - data: "false" - -minimumMasterNodes: 2 - -esMajorVersion: "" - -# Allows you to add any config files in /usr/share/elasticsearch/config/ -# such as elasticsearch.yml and log4j2.properties -esConfig: {} -# elasticsearch.yml: | -# key: -# nestedkey: value -# log4j2.properties: | -# key = value - -# Extra environment variables to append to this nodeGroup -# This will be appended to the current 'env:' key. You can use any of the kubernetes env -# syntax here -extraEnvs: -# - name: MY_ENVIRONMENT_VAR -# value: the_value_goes_here - - name: path.data - value: "/usr/share/elasticsearch/data" - - name: path.logs - value: "/usr/share/elasticsearch/logs" - - name: gateway.expected_master_nodes - value: "2" - - name: gateway.expected_data_nodes - value: "1" - - name: gateway.recover_after_time - value: "5m" - - name: gateway.recover_after_master_nodes - value: "2" - - name: gateway.recover_after_data_nodes - value: "1" - -# Allows you to load environment variables from kubernetes secret or config map -envFrom: [] -# - secretRef: -# name: env-secret -# - configMapRef: -# name: config-map - -# A list of secrets and their paths to mount inside the pod -# This is useful for mounting certificates for security and for mounting -# the X-Pack license -secretMounts: [] -# - name: elastic-certificates -# secretName: elastic-certificates -# path: /usr/share/elasticsearch/config/certs - -podAnnotations: {} - # iam.amazonaws.com/role: es-cluster - -# additionals labels -labels: {} - -esJavaOpts: "-Xmx448m -Xms448m" - -resources: - requests: - memory: "896Mi" - limits: - memory: "896Mi" - -initResources: {} - # limits: - # cpu: "25m" - # # memory: "128Mi" - # requests: - # cpu: "25m" - # memory: "128Mi" - -sidecarResources: {} - # limits: - # cpu: "25m" - # # memory: "128Mi" - # requests: - # cpu: "25m" - # memory: "128Mi" - -networkHost: "0.0.0.0" - -volumeClaimTemplate: - accessModes: [ "ReadWriteOnce" ] - resources: - requests: - storage: 2Gi - -rbac: - create: false - serviceAccountName: "" - -podSecurityPolicy: - create: false - name: "" - spec: - privileged: true - fsGroup: - rule: RunAsAny - runAsUser: - rule: RunAsAny - seLinux: - rule: RunAsAny - supplementalGroups: - rule: RunAsAny - volumes: - - secret - - configMap - - persistentVolumeClaim - -persistence: - enabled: true - dataDirSize: "2Gi" - annotations: {} - -extraVolumes: [] - # - name: extras - # emptyDir: {} - -extraVolumeMounts: [] - # - name: extras - # mountPath: /usr/share/extras - # readOnly: true - -extraContainers: [] - # - name: do-something - # image: busybox - # command: ['do', 'something'] - -extraInitContainers: [] - # - name: do-something - # image: busybox - # command: ['do', 'something'] - -# This is the PriorityClass settings as defined in -# https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass -priorityClassName: "" - -# By default this will make sure two pods don't end up on the same node -# Changing this to a region would allow you to spread pods across regions -antiAffinityTopologyKey: "kubernetes.io/hostname" - -# Hard means that by default pods will only be scheduled if there are enough nodes for them -# and that they will never end up on the same node. Setting this to soft will do this "best effort" -antiAffinity: "hard" - -# This is the node affinity settings as defined in -# https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#node-affinity-beta-feature -nodeAffinity: {} - -# The default is to deploy all pods serially. By setting this to parallel all pods are started at -# the same time when bootstrapping the cluster -podManagementPolicy: "Parallel" - -protocol: http -httpPort: 9200 -transportPort: 9300 - -service: - labels: {} - labelsHeadless: {} - type: ClusterIP - nodePort: "" - annotations: {} - httpPortName: http - transportPortName: transport - loadBalancerIP: "" - loadBalancerSourceRanges: [] - -updateStrategy: OnDelete - -# This is the max unavailable setting for the pod disruption budget -# The default value of 1 will make sure that kubernetes won't allow more than 1 -# of your pods to be unavailable during maintenance -maxUnavailable: 1 - -podSecurityContext: - fsGroup: 1000 - runAsUser: 1000 - -securityContext: - capabilities: - drop: - - ALL - # readOnlyRootFilesystem: true - runAsNonRoot: true - runAsUser: 1000 - -# How long to wait for elasticsearch to stop gracefully -terminationGracePeriod: 120 - -sysctlVmMaxMapCount: 262144 - -readinessProbe: - failureThreshold: 3 - initialDelaySeconds: 10 - periodSeconds: 10 - successThreshold: 3 - timeoutSeconds: 5 - -# https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-health.html#request-params wait_for_status -clusterHealthCheckParams: "wait_for_status=green&timeout=1s" - -## Use an alternate scheduler. -## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ -## -schedulerName: "" - -imagePullSecrets: [] -nodeSelector: {} -tolerations: [] - -# Enabling this will publically expose your Elasticsearch instance. -# Only enable this if you have security enabled on your cluster -ingress: - enabled: false - annotations: {} - # kubernetes.io/ingress.class: nginx - # kubernetes.io/tls-acme: "true" - path: / - hosts: - - chart-example.local - tls: [] - # - secretName: chart-example-tls - # hosts: - # - chart-example.local - -nameOverride: "" -fullnameOverride: "" - -# https://github.com/elastic/helm-charts/issues/63 -masterTerminationFix: true - -lifecycle: {} - # preStop: - # exec: - # command: ["/bin/sh", "-c", "echo Hello from the postStart handler > /usr/share/message"] - # postStart: - # exec: - # command: - # - bash - # - -c - # - | - # #!/bin/bash - # # Add a template to adjust number of shards/replicas - # TEMPLATE_NAME=my_template - # INDEX_PATTERN="logstash-*" - # SHARD_COUNT=8 - # REPLICA_COUNT=1 - # ES_URL=http://localhost:9200 - # while [[ "$(curl -s -o /dev/null -w '%{http_code}\n' $ES_URL)" != "200" ]]; do sleep 1; done - # curl -XPUT "$ES_URL/_template/$TEMPLATE_NAME" -H 'Content-Type: application/json' -d'{"index_patterns":['\""$INDEX_PATTERN"\"'],"settings":{"number_of_shards":'$SHARD_COUNT',"number_of_replicas":'$REPLICA_COUNT'}}' - -sysctlInitContainer: - enabled: true - -keystore: [] - -# Deprecated -# please use the above podSecurityContext.fsGroup instead -fsGroup: "" \ No newline at end of file diff --git a/config-as-code/helm/charts/backbone-services/elasticsearch/templates/_helpers.tpl b/config-as-code/helm/charts/backbone-services/elasticsearch/templates/_helpers.tpl deleted file mode 100644 index 3e7ac57fb..000000000 --- a/config-as-code/helm/charts/backbone-services/elasticsearch/templates/_helpers.tpl +++ /dev/null @@ -1,61 +0,0 @@ -{{/* vim: set filetype=mustache: */}} -{{- define "name" -}} -{{- $envOverrides := index .Values (tpl (default .Chart.Name .Values.name) .) -}} -{{- $baseValues := .Values | deepCopy -}} -{{- $values := dict "Values" (mustMergeOverwrite $baseValues $envOverrides) -}} -{{- with mustMergeOverwrite . $values -}} -{{- default .Chart.Name .Values.name -}} -{{- end }} -{{- end }} - - -{{- define "elasticsearch.endpoints" -}} -{{- $replicas := int (toString (.Values.replicas)) }} -{{- $uname := printf "%s-%s" .Values.clusterName .Values.nodeGroup }} - {{- range $i, $e := untilStep 0 $replicas 1 -}} -{{ $uname }}-{{ $i }}, - {{- end -}} -{{- end -}} - -{{- define "elasticsearch.esMajorVersion" -}} -{{- if .Values.esMajorVersion -}} -{{ .Values.esMajorVersion }} -{{- else -}} -{{- $version := int (index (.Values.image.tag | splitList ".") 0) -}} - {{- if and (contains "docker.elastic.co/elasticsearch/elasticsearch" .Values.image.repository) (not (eq $version 0)) -}} -{{ $version }} - {{- else -}} -7 - {{- end -}} -{{- end -}} -{{- end -}} - -{{/* -Return the appropriate apiVersion for statefulset. -*/}} -{{- define "elasticsearch.statefulset.apiVersion" -}} -{{- if semverCompare "<1.9-0" .Capabilities.KubeVersion.GitVersion -}} -{{- print "apps/v1beta2" -}} -{{- else -}} -{{- print "apps/v1" -}} -{{- end -}} -{{- end -}} - -{{/* -Return the appropriate apiVersion for ingress. -*/}} -{{- define "elasticsearch.ingress.apiVersion" -}} -{{- if semverCompare "<1.14-0" .Capabilities.KubeVersion.GitVersion -}} -{{- print "extensions/v1beta1" -}} -{{- else -}} -{{- print "networking.k8s.io/v1beta1" -}} -{{- end -}} -{{- end -}} - -{{- define "common.image" -}} -{{- if contains "/" .repository -}} -{{- printf "%s:%s" .repository ( required "Tag is mandatory" .tag ) -}} -{{- else -}} -{{- printf "%s/%s:%s" $.Values.global.containerRegistry .repository ( required "Tag is mandatory" .tag ) -}} -{{- end -}} -{{- end -}} \ No newline at end of file diff --git a/config-as-code/helm/charts/backbone-services/elasticsearch/templates/headless-service.yaml b/config-as-code/helm/charts/backbone-services/elasticsearch/templates/headless-service.yaml deleted file mode 100644 index 857fe8794..000000000 --- a/config-as-code/helm/charts/backbone-services/elasticsearch/templates/headless-service.yaml +++ /dev/null @@ -1,27 +0,0 @@ -kind: Service -apiVersion: v1 -metadata: -{{- if eq .Values.nodeGroup "master" }} - name: {{ template "name" . }}-headless -{{- else }} - name: {{ template "name" . }}-headless -{{- end }} - namespace: {{ .Values.namespace }} - labels: - app: "{{ template "name" . }}" -{{- if .Values.service.labelsHeadless }} -{{ toYaml .Values.service.labelsHeadless | indent 4 }} -{{- end }} - annotations: - service.alpha.kubernetes.io/tolerate-unready-endpoints: "true" -spec: - clusterIP: None # This is needed for statefulset hostnames like elasticsearch-0 to resolve - # Create endpoints also if the related pod isn't ready - publishNotReadyAddresses: true - selector: - app: "{{ template "name" . }}" - ports: - - name: {{ .Values.service.httpPortName | default "http" }} - port: {{ .Values.httpPort }} - - name: {{ .Values.service.transportPortName | default "transport" }} - port: {{ .Values.transportPort }} \ No newline at end of file diff --git a/config-as-code/helm/charts/backbone-services/elasticsearch/templates/poddisruptionbudget.yaml b/config-as-code/helm/charts/backbone-services/elasticsearch/templates/poddisruptionbudget.yaml deleted file mode 100644 index 6582bcfce..000000000 --- a/config-as-code/helm/charts/backbone-services/elasticsearch/templates/poddisruptionbudget.yaml +++ /dev/null @@ -1,12 +0,0 @@ -{{- if .Values.maxUnavailable }} -apiVersion: policy/v1 -kind: PodDisruptionBudget -metadata: - name: "{{ template "name" . }}-pdb" - namespace: {{ .Values.namespace }} -spec: - maxUnavailable: {{ .Values.maxUnavailable }} - selector: - matchLabels: - app: "{{ template "name" . }}" -{{- end }} \ No newline at end of file diff --git a/config-as-code/helm/charts/backbone-services/elasticsearch/templates/service.yaml b/config-as-code/helm/charts/backbone-services/elasticsearch/templates/service.yaml deleted file mode 100644 index 3a2c57c75..000000000 --- a/config-as-code/helm/charts/backbone-services/elasticsearch/templates/service.yaml +++ /dev/null @@ -1,30 +0,0 @@ -kind: Service -apiVersion: v1 -metadata: -{{- if eq .Values.nodeGroup "master" }} - name: {{ template "name" . }} -{{- else }} - name: {{ template "name" . }} -{{- end }} - namespace: {{ .Values.namespace }} - labels: - app: "{{ template "name" . }}" -{{- if .Values.service.labels }} -{{ toYaml .Values.service.labels | indent 4}} -{{- end }} - annotations: -{{ toYaml .Values.service.annotations | indent 4 }} -spec: - type: {{ .Values.service.type }} - selector: - app: "{{ template "name" . }}" - ports: - - name: {{ .Values.service.httpPortName | default "http" }} - protocol: TCP - port: {{ .Values.httpPort }} -{{- if .Values.service.nodePort }} - nodePort: {{ .Values.service.nodePort }} -{{- end }} - - name: {{ .Values.service.transportPortName | default "transport" }} - protocol: TCP - port: {{ .Values.transportPort }} \ No newline at end of file diff --git a/config-as-code/helm/charts/backbone-services/elasticsearch/templates/statefulset.yaml b/config-as-code/helm/charts/backbone-services/elasticsearch/templates/statefulset.yaml deleted file mode 100644 index 4a8dc64fb..000000000 --- a/config-as-code/helm/charts/backbone-services/elasticsearch/templates/statefulset.yaml +++ /dev/null @@ -1,400 +0,0 @@ ---- -apiVersion: {{ template "elasticsearch.statefulset.apiVersion" . }} -kind: StatefulSet -metadata: - name: {{ template "name" . }} - namespace: {{ .Values.namespace }} - labels: - app: "{{ template "name" . }}" - {{- range $key, $value := .Values.labels }} - {{ $key }}: {{ $value | quote }} - {{- end }} - annotations: - esMajorVersion: "{{ include "elasticsearch.esMajorVersion" . }}" -spec: - serviceName: {{ template "name" . }}-headless - selector: - matchLabels: - app: "{{ template "name" . }}" - replicas: {{ .Values.replicas }} - podManagementPolicy: {{ .Values.podManagementPolicy }} - updateStrategy: - type: {{ .Values.updateStrategy }} - {{- if .Values.persistence.enabled }} - volumeClaimTemplates: - - metadata: - name: es-storage - {{- with .Values.persistence.annotations }} - annotations: -{{ toYaml . | indent 8 }} - {{- end }} - spec: -{{ toYaml .Values.volumeClaimTemplate | indent 6 }} - {{- end }} - template: - metadata: - name: "{{ template "name" . }}" - labels: - app: "{{ template "name" . }}" - {{- range $key, $value := .Values.labels }} - {{ $key }}: {{ $value | quote }} - {{- end }} - annotations: - {{- range $key, $value := .Values.podAnnotations }} - {{ $key }}: {{ $value | quote }} - {{- end }} - {{/* This forces a restart if the configmap has changed */}} - {{- if .Values.esConfig }} - configchecksum: {{ include (print .Template.BasePath "/configmap.yaml") . | sha256sum | trunc 63 }} - {{- end }} - spec: - {{- if .Values.schedulerName }} - schedulerName: "{{ .Values.schedulerName }}" - {{- end }} - securityContext: -{{ toYaml .Values.podSecurityContext | indent 8 }} - {{- if .Values.fsGroup }} - fsGroup: {{ .Values.fsGroup }} # Deprecated value, please use .Values.podSecurityContext.fsGroup - {{- end }} - {{- if .Values.rbac.create }} - serviceAccountName: "{{ template "name" . }}" - {{- else if not (eq .Values.rbac.serviceAccountName "") }} - serviceAccountName: {{ .Values.rbac.serviceAccountName | quote }} - {{- end }} - {{- with .Values.tolerations }} - tolerations: -{{ toYaml . | indent 6 }} - {{- end }} - {{- with .Values.nodeSelector }} - nodeSelector: -{{ toYaml . | indent 8 }} - {{- end }} - {{- if or (eq .Values.antiAffinity "hard") (eq .Values.antiAffinity "soft") .Values.nodeAffinity }} - {{- if .Values.priorityClassName }} - priorityClassName: {{ .Values.priorityClassName }} - {{- end }} - affinity: - {{- end }} - {{- if eq .Values.antiAffinity "hard" }} - podAntiAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - - labelSelector: - matchExpressions: - - key: app - operator: In - values: - - "{{ template "name" .}}" - topologyKey: {{ .Values.antiAffinityTopologyKey }} - {{- else if eq .Values.antiAffinity "soft" }} - podAntiAffinity: - preferredDuringSchedulingIgnoredDuringExecution: - - weight: 1 - podAffinityTerm: - topologyKey: {{ .Values.antiAffinityTopologyKey }} - labelSelector: - matchExpressions: - - key: app - operator: In - values: - - "{{ template "name" . }}" - {{- end }} - {{- with .Values.nodeAffinity }} - nodeAffinity: -{{ toYaml . | indent 10 }} - {{- end }} - terminationGracePeriodSeconds: {{ .Values.terminationGracePeriod }} - volumes: - {{- range .Values.secretMounts }} - - name: {{ .name }} - secret: - secretName: {{ .secretName }} - {{- end }} - {{- if .Values.esConfig }} - - name: esconfig - configMap: - name: {{ template "name" . }}-config - {{- end }} -{{- if .Values.keystore }} - - name: keystore - emptyDir: {} - {{- range .Values.keystore }} - - name: keystore-{{ .secretName }} - secret: {{ toYaml . | nindent 12 }} - {{- end }} -{{ end }} - {{- if .Values.extraVolumes }} - # Currently some extra blocks accept strings - # to continue with backwards compatibility this is being kept - # whilst also allowing for yaml to be specified too. - {{- if eq "string" (printf "%T" .Values.extraVolumes) }} -{{ tpl .Values.extraVolumes . | indent 8 }} - {{- else }} -{{ toYaml .Values.extraVolumes | indent 8 }} - {{- end }} - {{- end }} - {{- if .Values.imagePullSecrets }} - imagePullSecrets: -{{ toYaml .Values.imagePullSecrets | indent 8 }} - {{- end }} - initContainers: - {{- if .Values.sysctlInitContainer.enabled }} - - name: configure-sysctl - securityContext: - runAsUser: 0 - privileged: true - image: {{ template "common.image" (dict "Values" .Values "repository" .Values.image.repository "tag" .Values.image.tag) }} - imagePullPolicy: "{{ .Values.image.pullPolicy }}" - command: ["sysctl", "-w", "vm.max_map_count={{ .Values.sysctlVmMaxMapCount}}"] - resources: -{{ toYaml .Values.initResources | indent 10 }} - {{- end }} -{{ if .Values.keystore }} - - name: keystore - image: {{ template "common.image" (dict "Values" .Values "repository" .Values.image.repository "tag" .Values.image.tag) }} - imagePullPolicy: "{{ .Values.image.pullPolicy }}" - command: - - sh - - -c - - | - #!/usr/bin/env bash - set -euo pipefail - - elasticsearch-keystore create - - for i in /tmp/keystoreSecrets/*/*; do - key=$(basename $i) - echo "Adding file $i to keystore key $key" - elasticsearch-keystore add-file "$key" "$i" - done - - # Add the bootstrap password since otherwise the Elasticsearch entrypoint tries to do this on startup - if [ ! -z ${ELASTIC_PASSWORD+x} ]; then - echo 'Adding env $ELASTIC_PASSWORD to keystore as key bootstrap.password' - echo "$ELASTIC_PASSWORD" | elasticsearch-keystore add -x bootstrap.password - fi - - cp -a /usr/share/elasticsearch/config/elasticsearch.keystore /tmp/keystore/ - env: {{ toYaml .Values.extraEnvs | nindent 10 }} - envFrom: {{ toYaml .Values.envFrom | nindent 10 }} - resources: {{ toYaml .Values.initResources | nindent 10 }} - volumeMounts: - - name: keystore - mountPath: /tmp/keystore - {{- range .Values.keystore }} - - name: keystore-{{ .secretName }} - mountPath: /tmp/keystoreSecrets/{{ .secretName }} - {{- end }} -{{ end }} - {{- if .Values.extraInitContainers }} - # Currently some extra blocks accept strings - # to continue with backwards compatibility this is being kept - # whilst also allowing for yaml to be specified too. - {{- if eq "string" (printf "%T" .Values.extraInitContainers) }} -{{ tpl .Values.extraInitContainers . | indent 6 }} - {{- else }} -{{ toYaml .Values.extraInitContainers | indent 6 }} - {{- end }} - {{- end }} - imagePullSecrets: - - name: docker-registry-secret - containers: - - name: "elasticsearch" - securityContext: -{{ toYaml .Values.securityContext | indent 10 }} - image: {{ template "common.image" (dict "Values" .Values "repository" .Values.image.repository "tag" .Values.image.tag) }} - imagePullPolicy: "{{ .Values.image.pullPolicy }}" - readinessProbe: - exec: - command: - - sh - - -c - - | - #!/usr/bin/env bash -e - # If the node is starting up wait for the cluster to be ready (request params: '{{ .Values.clusterHealthCheckParams }}' ) - # Once it has started only check that the node itself is responding - START_FILE=/tmp/.es_start_file - - if [ -n "${ELASTIC_USERNAME}" ] && [ -n "${ELASTIC_PASSWORD}" ]; then - BASIC_AUTH="-u ${ELASTIC_USERNAME}:${ELASTIC_PASSWORD}" - else - BASIC_AUTH='' - fi - - if [ -f "${START_FILE}" ]; then - echo 'Elasticsearch is already running, lets check the node is healthy' - HTTP_CODE=$(curl -XGET -s -k ${BASIC_AUTH} -o /dev/null -w '%{http_code}' {{ .Values.protocol }}://127.0.0.1:{{ .Values.httpPort }}/) - RC=$? - if [[ ${RC} -ne 0 ]]; then - echo "curl -XGET -s -k \${BASIC_AUTH} -o /dev/null -w '%{http_code}' {{ .Values.protocol }}://127.0.0.1:{{ .Values.httpPort }}/ failed with RC ${RC}" - exit ${RC} - fi - # ready if HTTP code 200, 503 is tolerable if ES version is 6.x - if [[ ${HTTP_CODE} == "200" ]]; then - exit 0 - elif [[ ${HTTP_CODE} == "503" && "{{ include "elasticsearch.esMajorVersion" . }}" == "6" ]]; then - exit 0 - else - echo "curl -XGET -s -k \${BASIC_AUTH} -o /dev/null -w '%{http_code}' {{ .Values.protocol }}://127.0.0.1:{{ .Values.httpPort }}/ failed with HTTP code ${HTTP_CODE}" - exit 1 - fi - - else - echo 'Waiting for elasticsearch cluster to become ready (request params: "{{ .Values.clusterHealthCheckParams }}" )' - if curl -XGET -s -k --fail ${BASIC_AUTH} {{ .Values.protocol }}://127.0.0.1:{{ .Values.httpPort }}/_cluster/health?{{ .Values.clusterHealthCheckParams }} ; then - touch ${START_FILE} - exit 0 - else - echo 'Cluster is not yet ready (request params: "{{ .Values.clusterHealthCheckParams }}" )' - exit 1 - fi - fi -{{ toYaml .Values.readinessProbe | indent 10 }} - ports: - - name: http - containerPort: {{ .Values.httpPort }} - - name: transport - containerPort: {{ .Values.transportPort }} - resources: -{{ toYaml .Values.resources | indent 10 }} - env: - - name: node.name - valueFrom: - fieldRef: - fieldPath: metadata.name - {{- if eq .Values.roles.master "true" }} - {{- if ge (int (include "elasticsearch.esMajorVersion" .)) 7 }} - - name: cluster.initial_master_nodes - value: "{{ template "elasticsearch.endpoints" . }}" - {{- else }} - - name: discovery.zen.minimum_master_nodes - value: "{{ .Values.minimumMasterNodes }}" - {{- end }} - {{- end }} - {{- if lt (int (include "elasticsearch.esMajorVersion" .)) 7 }} - - name: discovery.zen.ping.unicast.hosts - value: {{ .Values.masterService | quote }} - {{- else }} - - name: discovery.seed_hosts - value: {{ .Values.masterService | quote }} - {{- end }} - - name: cluster.name - value: {{ .Values.clusterName | quote }} - - name: network.host - value: {{ .Values.networkHost | quote }} - - name: ES_JAVA_OPTS - value: {{ .Values.esJavaOpts | quote }} - {{- range $role, $enabled := .Values.roles }} - - name: node.{{ $role }} - value: {{ $enabled | quote }} - {{- end }} -{{- if .Values.extraEnvs }} -{{ toYaml .Values.extraEnvs | indent 10 }} -{{- end }} -{{- if .Values.envFrom }} - envFrom: -{{ toYaml .Values.envFrom | indent 10 }} -{{- end }} - volumeMounts: - {{- if .Values.persistence.enabled }} - - name: "es-storage" - mountPath: /usr/share/elasticsearch/data - {{- end }} -{{ if .Values.keystore }} - - name: keystore - mountPath: /usr/share/elasticsearch/config/elasticsearch.keystore - subPath: elasticsearch.keystore -{{ end }} - {{- range .Values.secretMounts }} - - name: {{ .name }} - mountPath: {{ .path }} - {{- if .subPath }} - subPath: {{ .subPath }} - {{- end }} - {{- end }} - {{- range $path, $config := .Values.esConfig }} - - name: esconfig - mountPath: /usr/share/elasticsearch/config/{{ $path }} - subPath: {{ $path }} - {{- end -}} - {{- if .Values.extraVolumeMounts }} - # Currently some extra blocks accept strings - # to continue with backwards compatibility this is being kept - # whilst also allowing for yaml to be specified too. - {{- if eq "string" (printf "%T" .Values.extraVolumeMounts) }} -{{ tpl .Values.extraVolumeMounts . | indent 10 }} - {{- else }} -{{ toYaml .Values.extraVolumeMounts | indent 10 }} - {{- end }} - {{- end }} - {{- if .Values.masterTerminationFix }} - {{- if eq .Values.roles.master "true" }} - # This sidecar will prevent slow master re-election - # https://github.com/elastic/helm-charts/issues/63 - - name: elasticsearch-master-graceful-termination-handler - image: {{ template "common.image" (dict "Values" .Values "repository" .Values.image.repository "tag" .Values.image.tag) }} - imagePullPolicy: "{{ .Values.image.pullPolicy }}" - command: - - "sh" - - -c - - | - #!/usr/bin/env bash - set -eo pipefail - - http () { - local path="${1}" - if [ -n "${ELASTIC_USERNAME}" ] && [ -n "${ELASTIC_PASSWORD}" ]; then - BASIC_AUTH="-u ${ELASTIC_USERNAME}:${ELASTIC_PASSWORD}" - else - BASIC_AUTH='' - fi - curl -XGET -s -k --fail ${BASIC_AUTH} {{ .Values.protocol }}://{{ .Values.masterService }}:{{ .Values.httpPort }}${path} - } - - cleanup () { - while true ; do - local master="$(http "/_cat/master?h=node" || echo "")" - if [[ $master == "{{ .Values.masterService }}"* && $master != "${NODE_NAME}" ]]; then - echo "This node is not master." - break - fi - echo "This node is still master, waiting gracefully for it to step down" - sleep 1 - done - - exit 0 - } - - trap cleanup SIGTERM - - sleep infinity & - wait $! - resources: -{{ toYaml .Values.sidecarResources | indent 10 }} - env: - - name: NODE_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - {{- if .Values.extraEnvs }} -{{ toYaml .Values.extraEnvs | indent 10 }} - {{- end }} - {{- if .Values.envFrom }} - envFrom: -{{ toYaml .Values.envFrom | indent 10 }} - {{- end }} - {{- end }} - {{- end }} -{{- if .Values.lifecycle }} - lifecycle: -{{ toYaml .Values.lifecycle | indent 10 }} -{{- end }} - {{- if .Values.extraContainers }} - # Currently some extra blocks accept strings - # to continue with backwards compatibility this is being kept - # whilst also allowing for yaml to be specified too. - {{- if eq "string" (printf "%T" .Values.extraContainers) }} -{{ tpl .Values.extraContainers . | indent 6 }} - {{- else }} -{{ toYaml .Values.extraContainers | indent 6 }} - {{- end }} - {{- end }} \ No newline at end of file From b6a473daee12531d11628a48857417a869590f21 Mon Sep 17 00:00:00 2001 From: "kavi_elrey@1993" <25226238+kavi-egov@users.noreply.github.com> Date: Sun, 6 Jul 2025 19:40:12 +0530 Subject: [PATCH 04/10] Delete config-as-code/helm/charts/backbone-services/es-curator directory --- .../backbone-services/es-curator/Chart.yaml | 26 ------ .../es-curator/es-curator-infra-values.yaml | 59 -------------- .../es-curator/templates/_helpers.tpl | 8 -- .../es-curator/templates/configmap.yaml | 81 ------------------- .../es-curator/templates/cronjob.yaml | 2 - 5 files changed, 176 deletions(-) delete mode 100644 config-as-code/helm/charts/backbone-services/es-curator/Chart.yaml delete mode 100644 config-as-code/helm/charts/backbone-services/es-curator/es-curator-infra-values.yaml delete mode 100644 config-as-code/helm/charts/backbone-services/es-curator/templates/_helpers.tpl delete mode 100644 config-as-code/helm/charts/backbone-services/es-curator/templates/configmap.yaml delete mode 100644 config-as-code/helm/charts/backbone-services/es-curator/templates/cronjob.yaml diff --git a/config-as-code/helm/charts/backbone-services/es-curator/Chart.yaml b/config-as-code/helm/charts/backbone-services/es-curator/Chart.yaml deleted file mode 100644 index 6c6bc0926..000000000 --- a/config-as-code/helm/charts/backbone-services/es-curator/Chart.yaml +++ /dev/null @@ -1,26 +0,0 @@ -apiVersion: v2 -name: es-curator -description: A Helm chart for Kubernetes - -# A chart can be either an 'application' or a 'library' chart. -# -# Application charts are a collection of templates that can be packaged into versioned archives -# to be deployed. -# -# Library charts provide useful utilities or functions for the chart developer. They're included as -# a dependency of application charts to inject those utilities and functions into the rendering -# pipeline. Library charts do not define any templates and therefore cannot be deployed. -type: application - -# This is the chart version. This version number should be incremented each time you make changes -# to the chart and its templates, including the app version. -version: 0.1.0 - -# This is the version number of the application being deployed. This version number should be -# incremented each time you make changes to the application. -appVersion: 1.16.0 - -dependencies: -- name: common - version: 0.0.5 - repository: file://../../common \ No newline at end of file diff --git a/config-as-code/helm/charts/backbone-services/es-curator/es-curator-infra-values.yaml b/config-as-code/helm/charts/backbone-services/es-curator/es-curator-infra-values.yaml deleted file mode 100644 index f1c8f218b..000000000 --- a/config-as-code/helm/charts/backbone-services/es-curator/es-curator-infra-values.yaml +++ /dev/null @@ -1,59 +0,0 @@ -# Common Labels -labels: - group: "es-curator-infra" - -cron: - schedule: "45 18 * * *" -# Container Configs -namespace: es-cluster-infra -image: - repository: "bobrik/curator" - tag: 5.6.0 -logs-cleanup-enabled: true -jaeger-cleanup-enabled: true -logs-to-retain-in-days: 7 -memory_limits: 512Mi -args: -- --config -- /etc/config/config.yml -- /etc/config/action_file.yml - -# Additional Container Envs -env: | - - name: SERVER_PORT - value: "8080" - - name: JAVA_OPTS - value: {{ index .Values "heap" | quote }} - - name: ES_CLIENT_HOST - valueFrom: - configMapKeyRef: - name: egov-config - key: es-infra-host - - name: ES_CLIENT_PORT - value: "9200" - - name: LOG_LEVEL - value: "DEBUG" - {{- if index .Values "logs-cleanup-enabled" }} - - name: LOGS_CLEANUP_DISABLED - value: "False" - - name: RETAIN_LOGS_IN_DAYS - value: {{ index .Values "logs-to-retain-in-days" | quote }} - {{- end }} - {{- if index .Values "jaeger-cleanup-enabled" }} - - name: JAEGER_CLEANUP_DISABLED - value: "False" - - name: RETAIN_JAEGER_DATA_IN_DAYS - value: "14" - {{- end }} -extraVolumes: | - - name: config-volume - configMap: - name: {{ template "name" . }}-config -extraVolumeMounts: | - - mountPath: /etc/config - name: config-volume -resources: | - requests: - memory: {{ .Values.memory_limits | quote }} - limits: - memory: {{ .Values.memory_limits | quote }} diff --git a/config-as-code/helm/charts/backbone-services/es-curator/templates/_helpers.tpl b/config-as-code/helm/charts/backbone-services/es-curator/templates/_helpers.tpl deleted file mode 100644 index c3b5dfbae..000000000 --- a/config-as-code/helm/charts/backbone-services/es-curator/templates/_helpers.tpl +++ /dev/null @@ -1,8 +0,0 @@ -{{- define "name" -}} -{{- $envOverrides := index .Values (tpl (default .Chart.Name .Values.name) .) -}} -{{- $baseValues := .Values | deepCopy -}} -{{- $values := dict "Values" (mustMergeOverwrite $baseValues $envOverrides) -}} -{{- with mustMergeOverwrite . $values -}} -{{- default .Chart.Name .Values.name -}} -{{- end }} -{{- end }} \ No newline at end of file diff --git a/config-as-code/helm/charts/backbone-services/es-curator/templates/configmap.yaml b/config-as-code/helm/charts/backbone-services/es-curator/templates/configmap.yaml deleted file mode 100644 index 57f099a0f..000000000 --- a/config-as-code/helm/charts/backbone-services/es-curator/templates/configmap.yaml +++ /dev/null @@ -1,81 +0,0 @@ -apiVersion: v1 -kind: ConfigMap -metadata: - name: {{ template "name" . }}-config - namespace: {{ .Values.namespace }} -data: - action_file.yml: |- - --- - # Remember, leave a key empty if there is no value. None will be a string, - # not a Python "NoneType" - # - # Also remember that all examples have 'disable_action' set to True. If you - # want to use this action as a template, be sure to set this to False after - # copying it. - actions: - 1: - action: delete_indices - description: "Clean up ES by deleting old logs" - options: - timeout_override: 300 - continue_if_exception: False - disable_action: ${LOGS_CLEANUP_DISABLED:True} - ignore_empty_list: True - filters: - - filtertype: pattern - kind: regex - value: '^(egov-services-logs-|egov-infra-logs-|pbprod-logstash-|pbuat-logstash-).*$' - - filtertype: age - source: creation_date - direction: older - unit: days - unit_count: ${RETAIN_LOGS_IN_DAYS} - # - filtertype: age - # source: name - # direction: older - # timestring: '%Y.%m.%d' - # unit: days - # unit_count: 3 - # field: - # stats_result: - # epoch: - # exclude: False - 2: - action: delete_indices - description: "Clean up ES by deleting old jaeger data" - options: - timeout_override: 300 - continue_if_exception: False - disable_action: ${JAEGER_CLEANUP_DISABLED:True} - ignore_empty_list: True - filters: - - filtertype: pattern - kind: regex - value: '^(jaeger-service-|jaeger-span-|jaeger-dependencies-).*$' - - filtertype: age - source: creation_date - direction: older - unit: days - unit_count: ${RETAIN_JAEGER_DATA_IN_DAYS} - config.yml: |- - --- - # Remember, leave a key empty if there is no value. None will be a string, - # not a Python "NoneType" - client: - hosts: - - ${ES_CLIENT_HOST} - port: ${ES_CLIENT_PORT} - url_prefix: - use_ssl: False - certificate: - client_cert: - client_key: - ssl_no_validate: False - http_auth: - timeout: 30 - master_only: False - logging: - loglevel: ${LOG_LEVEL:INFO} - logfile: - logformat: default - blacklist: ['elasticsearch', 'urllib3'] \ No newline at end of file diff --git a/config-as-code/helm/charts/backbone-services/es-curator/templates/cronjob.yaml b/config-as-code/helm/charts/backbone-services/es-curator/templates/cronjob.yaml deleted file mode 100644 index 74d9de7ea..000000000 --- a/config-as-code/helm/charts/backbone-services/es-curator/templates/cronjob.yaml +++ /dev/null @@ -1,2 +0,0 @@ -# cronjob.yaml -{{- template "common.cronjob" . -}} \ No newline at end of file From 8203a5e00ca0a654725311f2db8e5e16f602f75c Mon Sep 17 00:00:00 2001 From: "kavi_elrey@1993" <25226238+kavi-egov@users.noreply.github.com> Date: Sun, 6 Jul 2025 19:40:25 +0530 Subject: [PATCH 05/10] Delete config-as-code/helm/charts/backbone-services/fluent-bit directory --- .../backbone-services/fluent-bit/Chart.yaml | 21 --- .../fluent-bit/templates/_helpers.tpl | 8 - .../fluent-bit/templates/clusterrole.yaml | 10 -- .../templates/clusterrolebinding.yaml | 12 -- .../fluent-bit/templates/configmap.yaml | 151 ------------------ .../fluent-bit/templates/daemonset.yaml | 51 ------ .../fluent-bit/templates/serviceaccount.yaml | 5 - .../backbone-services/fluent-bit/values.yaml | 8 - 8 files changed, 266 deletions(-) delete mode 100644 config-as-code/helm/charts/backbone-services/fluent-bit/Chart.yaml delete mode 100644 config-as-code/helm/charts/backbone-services/fluent-bit/templates/_helpers.tpl delete mode 100644 config-as-code/helm/charts/backbone-services/fluent-bit/templates/clusterrole.yaml delete mode 100644 config-as-code/helm/charts/backbone-services/fluent-bit/templates/clusterrolebinding.yaml delete mode 100644 config-as-code/helm/charts/backbone-services/fluent-bit/templates/configmap.yaml delete mode 100644 config-as-code/helm/charts/backbone-services/fluent-bit/templates/daemonset.yaml delete mode 100644 config-as-code/helm/charts/backbone-services/fluent-bit/templates/serviceaccount.yaml delete mode 100644 config-as-code/helm/charts/backbone-services/fluent-bit/values.yaml diff --git a/config-as-code/helm/charts/backbone-services/fluent-bit/Chart.yaml b/config-as-code/helm/charts/backbone-services/fluent-bit/Chart.yaml deleted file mode 100644 index 5ba5a2462..000000000 --- a/config-as-code/helm/charts/backbone-services/fluent-bit/Chart.yaml +++ /dev/null @@ -1,21 +0,0 @@ -apiVersion: v2 -name: fluent-bit -description: A Helm chart for Kubernetes - -# A chart can be either an 'application' or a 'library' chart. -# -# Application charts are a collection of templates that can be packaged into versioned archives -# to be deployed. -# -# Library charts provide useful utilities or functions for the chart developer. They're included as -# a dependency of application charts to inject those utilities and functions into the rendering -# pipeline. Library charts do not define any templates and therefore cannot be deployed. -type: application - -# This is the chart version. This version number should be incremented each time you make changes -# to the chart and its templates, including the app version. -version: 0.1.0 - -# This is the version number of the application being deployed. This version number should be -# incremented each time you make changes to the application. -appVersion: 1.16.0 diff --git a/config-as-code/helm/charts/backbone-services/fluent-bit/templates/_helpers.tpl b/config-as-code/helm/charts/backbone-services/fluent-bit/templates/_helpers.tpl deleted file mode 100644 index c3b5dfbae..000000000 --- a/config-as-code/helm/charts/backbone-services/fluent-bit/templates/_helpers.tpl +++ /dev/null @@ -1,8 +0,0 @@ -{{- define "name" -}} -{{- $envOverrides := index .Values (tpl (default .Chart.Name .Values.name) .) -}} -{{- $baseValues := .Values | deepCopy -}} -{{- $values := dict "Values" (mustMergeOverwrite $baseValues $envOverrides) -}} -{{- with mustMergeOverwrite . $values -}} -{{- default .Chart.Name .Values.name -}} -{{- end }} -{{- end }} \ No newline at end of file diff --git a/config-as-code/helm/charts/backbone-services/fluent-bit/templates/clusterrole.yaml b/config-as-code/helm/charts/backbone-services/fluent-bit/templates/clusterrole.yaml deleted file mode 100644 index beeb0daca..000000000 --- a/config-as-code/helm/charts/backbone-services/fluent-bit/templates/clusterrole.yaml +++ /dev/null @@ -1,10 +0,0 @@ -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: {{ template "name" . }}-read -rules: -- apiGroups: [""] - resources: - - namespaces - - pods - verbs: ["get", "list", "watch"] diff --git a/config-as-code/helm/charts/backbone-services/fluent-bit/templates/clusterrolebinding.yaml b/config-as-code/helm/charts/backbone-services/fluent-bit/templates/clusterrolebinding.yaml deleted file mode 100644 index 515c03238..000000000 --- a/config-as-code/helm/charts/backbone-services/fluent-bit/templates/clusterrolebinding.yaml +++ /dev/null @@ -1,12 +0,0 @@ -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: {{ template "name" . }}-read -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: {{ template "name" . }}-read -subjects: -- kind: ServiceAccount - name: {{ template "name" . }} - namespace: {{ .Values.namespace }} diff --git a/config-as-code/helm/charts/backbone-services/fluent-bit/templates/configmap.yaml b/config-as-code/helm/charts/backbone-services/fluent-bit/templates/configmap.yaml deleted file mode 100644 index 38e71c249..000000000 --- a/config-as-code/helm/charts/backbone-services/fluent-bit/templates/configmap.yaml +++ /dev/null @@ -1,151 +0,0 @@ -apiVersion: v1 -kind: ConfigMap -metadata: - name: {{ template "name" . }}-config - namespace: {{ .Values.namespace }} - labels: - app: {{ template "name" . }} -data: - # Configuration files: server, input, filters and output - # ====================================================== - fluent-bit.conf: | - [SERVICE] - Flush 1 - Log_Level info - Daemon off - Parsers_File parsers.conf - HTTP_Server On - HTTP_Listen 0.0.0.0 - HTTP_Port 2020 - @INCLUDE input-egov-services.conf - @INCLUDE input-egov-infra.conf - @INCLUDE filter-kubernetes.conf - @INCLUDE output-kafka-egov-services.conf - @INCLUDE output-kafka-infra.conf - [INPUT] - Name tail - Tag kube.* - Path /var/log/containers/*.log - Parser docker - DB /var/log/flb_kube.db - Mem_Buf_Limit 5MB - Skip_Long_Lines On - [OUTPUT] - Name es - Match * - Host elasticsearch-data-v1.es-cluster - Port 9200 - Index fluentbit-%Y.%m.%d - Type flb - Logstash_Format On - Logstash_Prefix fluentbit - - input-egov-services.conf: | - [INPUT] - Name tail - Tag kube_egov_services.* - Path /var/log/containers/*_egov_*.log - DB /var/log/flb_egov_services_log_offsets.db - Buffer_Max_Size 10MB - Mem_Buf_Limit 30MB - Refresh_Interval 60 - input-egov-infra.conf: | - [INPUT] - Name tail - Tag kube_egov_infra.* - Path /var/log/containers/*.log - Exclude_Path *.gz,*.1,/var/log/containers/*_egov_*.log,/var/log/containers/*_ispirit_*.log,/var/log/containers/*_kube-system_*.log - DB /var/log/flb_egov_infra_log_offsets.db - Mem_Buf_Limit 3MB - Skip_Long_Lines On - Refresh_Interval 60 - filter-kubernetes.conf: | - [FILTER] - Name parser - Match kube_egov_services.* - Key_Name log - Parser json - Reserve_Data True - [FILTER] - Name kubernetes - Match * - Kube_URL https://kubernetes.default.svc.cluster.local:443 - Merge_Log On - Annotations Off - output-kafka-egov-services.conf: | - [OUTPUT] - Name kafka - Match kube_egov_services.* - Brokers ${KAFKA_BROKERS} - Topics ${KAFKA_EGOV_SERVICES_LOGS_TOPIC} - Timestamp_Key @ts - # hides errors "Receive failed: Disconnected" when kafka kills idle connections - rdkafka.log.connection.close false - # producer buffer is not included in http://fluentbit.io/documentation/0.12/configuration/memory_usage.html#estimating - rdkafka.queue.buffering.max.kbytes 10240 - # for logs you'll probably want this ot be 0 or 1, not more - rdkafka.request.required.acks 1 - rdkafka.max.in.flight.requests.per.connection 5 - rdkafka.retry.backoff.ms 500 - rdkafka.linger.ms 500 - output-kafka-infra.conf: | - [OUTPUT] - Name kafka - Match kube_egov_infra.* - Brokers ${KAFKA_BROKERS} - Topics ${KAFKA_EGOV_INFRA_LOGS_TOPIC} - Timestamp_Key @ts - # hides errors "Receive failed: Disconnected" when kafka kills idle connections - rdkafka.log.connection.close false - # producer buffer is not included in http://fluentbit.io/documentation/0.12/configuration/memory_usage.html#estimating - rdkafka.queue.buffering.max.kbytes 10240 - # for logs you'll probably want this ot be 0 or 1, not more - rdkafka.request.required.acks 1 - rdkafka.max.in.flight.requests.per.connection 5 - rdkafka.retry.backoff.ms 500 - rdkafka.linger.ms 500 - parsers.conf: | - [PARSER] - Name apache - Format regex - Regex ^(?[^ ]*) [^ ]* (?[^ ]*) \[(?