I'm trying to enable xpack security enable so that internal and external communication will happen on https instead of http so please guide me how I do that, and guide me how to resolve below error.
Error:
{"type": "deprecation.elasticsearch", "timestamp": "2024-01-12T12:17:09,313Z", "level": "CRITICAL", "component": "o.e.d.n.Node", "cluster.name": "elasticsearch", "node.name": "elasticsearch-master-0", "message": "legacy role settings [node.data, node.remote_cluster_client, node.ingest, node.master, node.ml] are deprecated, use [node.roles=[transform, data_frozen, master, remote_cluster_client, data, ml, data_content, data_hot, data_warm, data_cold, ingest]]", "key": "legacy role settings", "category": "settings" }
{"type": "server", "timestamp": "2024-01-12T12:17:21,986Z", "level": "ERROR", "component": "o.e.b.ElasticsearchUncaughtExceptionHandler", "cluster.name": "elasticsearch", "node.name": "elasticsearch-master-0", "message": "uncaught exception in thread [main]",
"stacktrace": ["org.elasticsearch.bootstrap.StartupException: java.lang.IllegalArgumentException: you cannot specify a keystore and key file",
"at org.elasticsearch.bootstrap.Elasticsearch.init(Elasticsearch.java:170) ~[elasticsearch-7.17.1.jar:7.17.1]",
"at org.elasticsearch.bootstrap.Elasticsearch.execute(Elasticsearch.java:157) ~[elasticsearch-7.17.1.jar:7.17.1]",
"at org.elasticsearch.cli.EnvironmentAwareCommand.execute(EnvironmentAwareCommand.java:77) ~[elasticsearch-7.17.1.jar:7.17.1]",
"at org.elasticsearch.cli.Command.mainWithoutErrorHandling(Command.java:112) ~[elasticsearch-cli-7.17.1.jar:7.17.1]",
"at org.elasticsearch.cli.Command.main(Command.java:77) ~[elasticsearch-cli-7.17.1.jar:7.17.1]",
"at org.elasticsearch.bootstrap.Elasticsearch.main(Elasticsearch.java:122) ~[elasticsearch-7.17.1.jar:7.17.1]",
"at org.elasticsearch.bootstrap.Elasticsearch.main(Elasticsearch.java:80) ~[elasticsearch-7.17.1.jar:7.17.1]",
"Caused by: java.lang.IllegalArgumentException: you cannot specify a keystore and key file",
"at org.elasticsearch.xpack.core.ssl.CertParsingUtils.createKeyConfig(CertParsingUtils.java:232) ~[?:?]",
"at org.elasticsearch.xpack.core.ssl.SSLConfiguration.createKeyConfig(SSLConfiguration.java:164) ~[?:?]",
uncaught exception in thread [main]
$ kubectl get secret |grep elastic
elastic-certificates Opaque 1 74m
elastic-secret Opaque 2 55m
[azadmin@
Also I used mixed solution from above blogs.
- Q1. which one is better having configuration k8 with helm?
- Q2. How to enable https security in elasticsearch with configuration k8 + helm?
- Q3. what are the changes need to do here?
here is the values.yaml that I'm using
---
clusterName: "elasticsearch"
nodeGroup: "master"
# The service that non master groups will try to connect to when joining the cluster
# This should be set to clusterName + "-" + nodeGroup for your master group
masterService: ""
# Elasticsearch roles that will be applied to this nodeGroup
# These will be set as environment variables. E.g. node.master=true
roles:
master: "true"
ingest: "true"
data: "true"
remote_cluster_client: "true"
ml: "true"
replicas: 3
minimumMasterNodes: 2
esMajorVersion: ""
clusterDeprecationIndexing: "false"
# Allows you to add any config files in /usr/share/elasticsearch/config/
# such as elasticsearch.yml and log4j2.properties
#esConfig: {}
# elasticsearch.yml: |
# key:
# nestedkey: value
# log4j2.properties: |
# key = value
## shiva added
esConfig:
elasticsearch.yml: |
cluster.name: "docker-cluster"
network.host: 0.0.0.0
xpack.security.enabled: "true"
xpack.security.transport.ssl.enabled: "true"
xpack.security.transport.ssl.supported_protocols: "TLSv1.2"
#xpack.security.transport.ssl.client_authentication: "none"
xpack.security.transport.ssl.client_authentication: "required"
xpack.security.transport.ssl.keystore.path: "/usr/share/elasticsearch/config/elastic-certificates.p12"
xpack.security.transport.ssl.truststore.path: "/usr/share/elasticsearch/config/elastic-certificates.p12"
xpack.security.transport.ssl.key: "/usr/share/elasticsearch/config/elastic-certificates.p12"
xpack.security.transport.ssl.certificate: "/usr/share/elasticsearch/config/certs/tls.crt"
#xpack.security.transport.ssl.certificate_authorities: "/usr/share/elasticsearch/config/certs/homelab-ca.crt"
xpack.security.transport.ssl.certificate_authorities: "/usr/share/elasticsearch/config/elastic-certificates.p12"
xpack.security.transport.ssl.verification_mode: "certificate"
xpack.security.http.ssl.enabled: "true"
xpack.security.http.ssl.supported_protocols: "TLSv1.2"
#xpack.security.http.ssl.client_authentication: "none"
xpack.security.http.ssl.client_authentication: "required"
xpack.security.http.ssl.key: "/usr/share/elasticsearch/config/certs/tls.key"
xpack.security.http.ssl.certificate: "/usr/share/elasticsearch/config/certs/tls.crt"
#xpack.security.http.ssl.certificate_authorities: "/usr/share/elasticsearch/config/certs/homelab-ca.crt"
esJvmOptions: {}
# processors.options: |
# -XX:ActiveProcessorCount=3
# Extra environment variables to append to this nodeGroup
# This will be appended to the current 'env:' key. You can use any of the kubernetes env
# syntax here
#extraEnvs: []
# - name: MY_ENVIRONMENT_VAR
# value: the_value_goes_here
## shiva added
extraEnvs:
- name: "ELASTIC_PASSWORD"
valueFrom:
secretKeyRef:
name: "elastic-secret"
key: "ES_PASSWORD"
- name: "ELASTIC_USERNAME"
valueFrom:
secretKeyRef:
name: "elastic-secret"
key: "ES_USERNAME"
# Allows you to load environment variables from kubernetes secret or config map
envFrom: []
# - secretRef:
# name: env-secret
# - configMapRef:
# name: config-map
# A list of secrets and their paths to mount inside the pod
# This is useful for mounting certificates for security and for mounting
# the X-Pack license
#secretMounts: []
# - name: elastic-certificates
# secretName: elastic-certificates
# path: /usr/share/elasticsearch/config/certs
# defaultMode: 0755
## shiva added
secretMounts:
- name: "elastic-certificates"
secretName: "elastic-certificates"
path: "/usr/share/elasticsearch/config/certs"
defaultMode: "0755"
hostAliases: []
#- ip: "127.0.0.1"
# hostnames:
# - "foo.local"
# - "bar.local"
image: "docker.elastic.co/elasticsearch/elasticsearch"
imageTag: "7.17.1"
imagePullPolicy: "IfNotPresent"
podAnnotations:
{}
# iam.amazonaws.com/role: es-cluster
# additionals labels
labels: {}
esJavaOpts: "" # example: "-Xmx1g -Xms1g"
resources:
requests:
cpu: "1000m"
memory: "2Gi"
limits:
cpu: "1000m"
memory: "2Gi"
initResources:
{}
# limits:
# cpu: "25m"
# # memory: "128Mi"
# requests:
# cpu: "25m"
# memory: "128Mi"
networkHost: "0.0.0.0"
volumeClaimTemplate:
accessModes: ["ReadWriteOnce"]
resources:
requests:
storage: 30Gi
rbac:
create: false
serviceAccountAnnotations: {}
serviceAccountName: ""
automountToken: true
podSecurityPolicy:
create: false
name: ""
spec:
privileged: true
fsGroup:
rule: RunAsAny
runAsUser:
rule: RunAsAny
seLinux:
rule: RunAsAny
supplementalGroups:
rule: RunAsAny
volumes:
- secret
- configMap
- persistentVolumeClaim
- emptyDir
## Shiva added changes
lifecycle:
postStart:
exec:
command: ["/bin/sh", "-c", "cp /usr/share/elasticsearch/config/certs/elastic-certificates.p12 /usr/share/elasticsearch/config/" ]
persistence:
enabled: true
labels:
# Add default labels for the volumeClaimTemplate of the StatefulSet
enabled: false
annotations: {}
extraVolumes:
[]
# - name: extras
# emptyDir: {}
extraVolumeMounts:
[]
# - name: extras
# mountPath: /usr/share/extras
# readOnly: true
extraContainers:
[]
# - name: do-something
# image: busybox
# command: ['do', 'something']
extraInitContainers:
[]
# - name: do-something
# image: busybox
# command: ['do', 'something']
# This is the PriorityClass settings as defined in
# https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass
priorityClassName: ""
# By default this will make sure two pods don't end up on the same node
# Changing this to a region would allow you to spread pods across regions
antiAffinityTopologyKey: "kubernetes.io/hostname"
# and that they will never end up on the same node. Setting this to soft will do this "best effort"
antiAffinity: "hard"
# This is the node affinity settings as defined in
# https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#node-affinity-beta-feature
nodeAffinity: {}
# The default is to deploy all pods serially. By setting this to parallel all pods are started at
# the same time when bootstrapping the cluster
podManagementPolicy: "Parallel"
# The environment variables injected by service links are not used, but can lead to slow Elasticsearch boot times when
# there are many services in the current namespace.
# If you experience slow pod startups you probably want to set this to `false`.
enableServiceLinks: true
protocol: https
httpPort: 9200
transportPort: 9300
service:
enabled: true
labels: {}
labelsHeadless: {}
type: ClusterIP
# Consider that all endpoints are considered "ready" even if the Pods themselves are not
# https://kubernetes.io/docs/reference/kubernetes-api/service-resources/service-v1/#ServiceSpec
publishNotReadyAddresses: false
nodePort: ""
annotations: {}
httpPortName: https
transportPortName: transport
loadBalancerIP: ""
loadBalancerSourceRanges: []
externalTrafficPolicy: ""
updateStrategy: RollingUpdate
# This is the max unavailable setting for the pod disruption budget
# The default value of 1 will make sure that kubernetes won't allow more than 1
# of your pods to be unavailable during maintenance
maxUnavailable: 1
podSecurityContext:
fsGroup: 1000
runAsUser: 1000
securityContext:
capabilities:
drop:
- ALL
# readOnlyRootFilesystem: true
runAsNonRoot: true
runAsUser: 1000
# How long to wait for elasticsearch to stop gracefully
terminationGracePeriod: 120
sysctlVmMaxMapCount: 262144
readinessProbe:
failureThreshold: 3
initialDelaySeconds: 10
periodSeconds: 10
successThreshold: 3
timeoutSeconds: 5
# https://www.elastic.co/guide/en/elasticsearch/reference/7.17/cluster-health.html#request-params wait_for_status
clusterHealthCheckParams: "wait_for_status=green&timeout=1s"
## Use an alternate scheduler.
## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
##
schedulerName: ""
imagePullSecrets: []
nodeSelector: {}
tolerations: []
# Enabling this will publicly expose your Elasticsearch instance.
# Only enable this if you have security enabled on your cluster
ingress:
enabled: false
annotations: {}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
className: "nginx"
pathtype: ImplementationSpecific
hosts:
- host: chart-example.local
paths:
- path: /
tls: []
# - secretName: chart-example-tls
# hosts:
# - chart-example.local
nameOverride: ""
fullnameOverride: ""
healthNameOverride: ""
lifecycle:
{}
# preStop:
# exec:
# command: ["/bin/sh", "-c", "echo Hello from the postStart handler > /usr/share/message"]
# postStart:
# exec:
# command:
# - bash
# - -c
# - |
sysctlInitContainer:
enabled: true
keystore: []
networkPolicy:
## Enable creation of NetworkPolicy resources. Only Ingress traffic is filtered for now.
## In order for a Pod to access Elasticsearch, it needs to have the following label:
## {{ template "uname" . }}-client: "true"
## Example for default configuration to access HTTP port:
## elasticsearch-master-http-client: "true"
## Example for default configuration to access transport port:
## elasticsearch-master-transport-client: "true"
http:
enabled: false
## if explicitNamespacesSelector is not set or set to {}, only client Pods being in the networkPolicy's namespace
## and matching all criteria can reach the DB.
## But sometimes, we want the Pods to be accessible to clients from other namespaces, in this case, we can use this
## parameter to select these namespaces
##
# explicitNamespacesSelector:
# # Accept from namespaces with all those different rules (only from whitelisted Pods)
# matchLabels:
# role: frontend
# matchExpressions:
# - {key: role, operator: In, values: [frontend]}
## Additional NetworkPolicy Ingress "from" rules to set. Note that all rules are OR-ed.
##
# additionalRules:
# - podSelector:
# matchLabels:
# role: frontend
# - podSelector:
# matchExpressions:
# - key: role
# operator: In
# values:
# - frontend
transport:
## Note that all Elasticsearch Pods can talk to themselves using transport port even if enabled.
enabled: false
# explicitNamespacesSelector:
# matchLabels:
# role: frontend
# matchExpressions:
transport:
## Note that all Elasticsearch Pods can talk to themselves using transport port even if enabled.
enabled: false
# explicitNamespacesSelector:
# matchLabels:
# role: frontend
# matchExpressions:
# - {key: role, operator: In, values: [frontend]}
# additionalRules:
# - podSelector:
# matchLabels:
# role: frontend
# - podSelector:
# matchExpressions:
# - key: role
# operator: In
# values:
# - frontend
tests:
enabled: true
# Deprecated
# please use the above podSecurityContext.fsGroup instead
fsGroup: ""