I'm facing .elasticsearch.bootstrap.StartupException: java.lang.IllegalArgumentException: you cannot specify a keystore and key file

I'm trying to enable xpack security enable so that internal and external communication will happen on https instead of http so please guide me how I do that, and guide me how to resolve below error.
Error:

{"type": "deprecation.elasticsearch", "timestamp": "2024-01-12T12:17:09,313Z", "level": "CRITICAL", "component": "o.e.d.n.Node", "cluster.name": "elasticsearch", "node.name": "elasticsearch-master-0", "message": "legacy role settings [node.data, node.remote_cluster_client, node.ingest, node.master, node.ml] are deprecated, use [node.roles=[transform, data_frozen, master, remote_cluster_client, data, ml, data_content, data_hot, data_warm, data_cold, ingest]]", "key": "legacy role settings", "category": "settings" }
{"type": "server", "timestamp": "2024-01-12T12:17:21,986Z", "level": "ERROR", "component": "o.e.b.ElasticsearchUncaughtExceptionHandler", "cluster.name": "elasticsearch", "node.name": "elasticsearch-master-0", "message": "uncaught exception in thread [main]",
"stacktrace": ["org.elasticsearch.bootstrap.StartupException: java.lang.IllegalArgumentException: you cannot specify a keystore and key file",
"at org.elasticsearch.bootstrap.Elasticsearch.init(Elasticsearch.java:170) ~[elasticsearch-7.17.1.jar:7.17.1]",
"at org.elasticsearch.bootstrap.Elasticsearch.execute(Elasticsearch.java:157) ~[elasticsearch-7.17.1.jar:7.17.1]",
"at org.elasticsearch.cli.EnvironmentAwareCommand.execute(EnvironmentAwareCommand.java:77) ~[elasticsearch-7.17.1.jar:7.17.1]",
"at org.elasticsearch.cli.Command.mainWithoutErrorHandling(Command.java:112) ~[elasticsearch-cli-7.17.1.jar:7.17.1]",
"at org.elasticsearch.cli.Command.main(Command.java:77) ~[elasticsearch-cli-7.17.1.jar:7.17.1]",
"at org.elasticsearch.bootstrap.Elasticsearch.main(Elasticsearch.java:122) ~[elasticsearch-7.17.1.jar:7.17.1]",
"at org.elasticsearch.bootstrap.Elasticsearch.main(Elasticsearch.java:80) ~[elasticsearch-7.17.1.jar:7.17.1]",
"Caused by: java.lang.IllegalArgumentException: you cannot specify a keystore and key file",
"at org.elasticsearch.xpack.core.ssl.CertParsingUtils.createKeyConfig(CertParsingUtils.java:232) ~[?:?]",
"at org.elasticsearch.xpack.core.ssl.SSLConfiguration.createKeyConfig(SSLConfiguration.java:164) ~[?:?]",
uncaught exception in thread [main]

$ kubectl get secret |grep elastic
elastic-certificates Opaque 1 74m
elastic-secret Opaque 2 55m
[azadmin@

Also I used mixed solution from above blogs.

  • Q1. which one is better having configuration k8 with helm?
  • Q2. How to enable https security in elasticsearch with configuration k8 + helm?
  • Q3. what are the changes need to do here?

here is the values.yaml that I'm using

---
clusterName: "elasticsearch"
nodeGroup: "master"

# The service that non master groups will try to connect to when joining the cluster
# This should be set to clusterName + "-" + nodeGroup for your master group
masterService: ""

# Elasticsearch roles that will be applied to this nodeGroup
# These will be set as environment variables. E.g. node.master=true
roles:
  master: "true"
  ingest: "true"
  data: "true"
  remote_cluster_client: "true"
  ml: "true"

replicas: 3
minimumMasterNodes: 2

esMajorVersion: ""

clusterDeprecationIndexing: "false"

# Allows you to add any config files in /usr/share/elasticsearch/config/
# such as elasticsearch.yml and log4j2.properties
#esConfig: {}
#  elasticsearch.yml: |
#    key:
#      nestedkey: value
#  log4j2.properties: |
#    key = value

## shiva added
esConfig:
  elasticsearch.yml: |
    cluster.name: "docker-cluster"
    network.host: 0.0.0.0
    xpack.security.enabled: "true"
    xpack.security.transport.ssl.enabled: "true"
    xpack.security.transport.ssl.supported_protocols: "TLSv1.2"
    #xpack.security.transport.ssl.client_authentication: "none"
    xpack.security.transport.ssl.client_authentication: "required"
    xpack.security.transport.ssl.keystore.path: "/usr/share/elasticsearch/config/elastic-certificates.p12"
    xpack.security.transport.ssl.truststore.path: "/usr/share/elasticsearch/config/elastic-certificates.p12"
    xpack.security.transport.ssl.key: "/usr/share/elasticsearch/config/elastic-certificates.p12"
    xpack.security.transport.ssl.certificate: "/usr/share/elasticsearch/config/certs/tls.crt"
    #xpack.security.transport.ssl.certificate_authorities: "/usr/share/elasticsearch/config/certs/homelab-ca.crt"
    xpack.security.transport.ssl.certificate_authorities: "/usr/share/elasticsearch/config/elastic-certificates.p12"
    xpack.security.transport.ssl.verification_mode: "certificate"
    xpack.security.http.ssl.enabled: "true"
    xpack.security.http.ssl.supported_protocols: "TLSv1.2"
    #xpack.security.http.ssl.client_authentication: "none"
    xpack.security.http.ssl.client_authentication: "required"
    xpack.security.http.ssl.key: "/usr/share/elasticsearch/config/certs/tls.key"
    xpack.security.http.ssl.certificate: "/usr/share/elasticsearch/config/certs/tls.crt"
    #xpack.security.http.ssl.certificate_authorities: "/usr/share/elasticsearch/config/certs/homelab-ca.crt"
 
esJvmOptions: {}
#  processors.options: |
#    -XX:ActiveProcessorCount=3

# Extra environment variables to append to this nodeGroup
# This will be appended to the current 'env:' key. You can use any of the kubernetes env
# syntax here
#extraEnvs: []
#  - name: MY_ENVIRONMENT_VAR
#    value: the_value_goes_here

## shiva added
extraEnvs:
  - name: "ELASTIC_PASSWORD"
    valueFrom:
      secretKeyRef:
        name: "elastic-secret"
        key: "ES_PASSWORD"
  - name: "ELASTIC_USERNAME"
    valueFrom:
      secretKeyRef:
        name: "elastic-secret"
        key: "ES_USERNAME"


# Allows you to load environment variables from kubernetes secret or config map
envFrom: []
# - secretRef:
#     name: env-secret
# - configMapRef:
#     name: config-map

# A list of secrets and their paths to mount inside the pod
# This is useful for mounting certificates for security and for mounting
# the X-Pack license
#secretMounts: []
#  - name: elastic-certificates
#    secretName: elastic-certificates
#    path: /usr/share/elasticsearch/config/certs
#    defaultMode: 0755

## shiva added
secretMounts:
  - name: "elastic-certificates"
    secretName: "elastic-certificates"
    path: "/usr/share/elasticsearch/config/certs"
    defaultMode: "0755"

hostAliases: []
#- ip: "127.0.0.1"
#  hostnames:
#  - "foo.local"
#  - "bar.local"

image: "docker.elastic.co/elasticsearch/elasticsearch"
imageTag: "7.17.1"
imagePullPolicy: "IfNotPresent"

podAnnotations:
  {}
  # iam.amazonaws.com/role: es-cluster

# additionals labels
labels: {}

esJavaOpts: "" # example: "-Xmx1g -Xms1g"

resources:
  requests:
    cpu: "1000m"
    memory: "2Gi"
  limits:
    cpu: "1000m"
    memory: "2Gi"

initResources:
  {}
  # limits:
  #   cpu: "25m"
  #   # memory: "128Mi"
  # requests:
  #   cpu: "25m"
  #   memory: "128Mi"

networkHost: "0.0.0.0"

volumeClaimTemplate:
  accessModes: ["ReadWriteOnce"]
  resources:
    requests:
      storage: 30Gi

rbac:
  create: false
  serviceAccountAnnotations: {}
  serviceAccountName: ""
  automountToken: true

podSecurityPolicy:
  create: false
  name: ""
  spec:
    privileged: true
    fsGroup:
      rule: RunAsAny
    runAsUser:
      rule: RunAsAny
    seLinux:
      rule: RunAsAny
    supplementalGroups:
      rule: RunAsAny
    volumes:
      - secret
      - configMap
      - persistentVolumeClaim
      - emptyDir

## Shiva added changes
lifecycle:
  postStart:
    exec:
            command: ["/bin/sh", "-c", "cp /usr/share/elasticsearch/config/certs/elastic-certificates.p12 /usr/share/elasticsearch/config/" ]


persistence:
  enabled: true
  labels:
    # Add default labels for the volumeClaimTemplate of the StatefulSet
    enabled: false
  annotations: {}

extraVolumes:
  []
  # - name: extras
  #   emptyDir: {}

extraVolumeMounts:
  []
  # - name: extras
  #   mountPath: /usr/share/extras
  #   readOnly: true

extraContainers:
  []
  # - name: do-something
  #   image: busybox
  #   command: ['do', 'something']

extraInitContainers:
  []
  # - name: do-something
  #   image: busybox
  #   command: ['do', 'something']

# This is the PriorityClass settings as defined in
# https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass
priorityClassName: ""

# By default this will make sure two pods don't end up on the same node
# Changing this to a region would allow you to spread pods across regions
antiAffinityTopologyKey: "kubernetes.io/hostname"

# and that they will never end up on the same node. Setting this to soft will do this "best effort"
antiAffinity: "hard"

# This is the node affinity settings as defined in
# https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#node-affinity-beta-feature
nodeAffinity: {}

# The default is to deploy all pods serially. By setting this to parallel all pods are started at
# the same time when bootstrapping the cluster
podManagementPolicy: "Parallel"

# The environment variables injected by service links are not used, but can lead to slow Elasticsearch boot times when
# there are many services in the current namespace.
# If you experience slow pod startups you probably want to set this to `false`.
enableServiceLinks: true

protocol: https
httpPort: 9200
transportPort: 9300

service:
  enabled: true
  labels: {}
  labelsHeadless: {}
  type: ClusterIP
  # Consider that all endpoints are considered "ready" even if the Pods themselves are not
  # https://kubernetes.io/docs/reference/kubernetes-api/service-resources/service-v1/#ServiceSpec
  publishNotReadyAddresses: false
  nodePort: ""
  annotations: {}
  httpPortName: https
  transportPortName: transport
  loadBalancerIP: ""
  loadBalancerSourceRanges: []
  externalTrafficPolicy: ""

updateStrategy: RollingUpdate

# This is the max unavailable setting for the pod disruption budget
# The default value of 1 will make sure that kubernetes won't allow more than 1
# of your pods to be unavailable during maintenance
maxUnavailable: 1

podSecurityContext:
  fsGroup: 1000
  runAsUser: 1000

securityContext:
  capabilities:
    drop:
      - ALL
  # readOnlyRootFilesystem: true
  runAsNonRoot: true
  runAsUser: 1000

# How long to wait for elasticsearch to stop gracefully
terminationGracePeriod: 120

sysctlVmMaxMapCount: 262144

readinessProbe:
  failureThreshold: 3
  initialDelaySeconds: 10
  periodSeconds: 10
  successThreshold: 3
  timeoutSeconds: 5

# https://www.elastic.co/guide/en/elasticsearch/reference/7.17/cluster-health.html#request-params wait_for_status
clusterHealthCheckParams: "wait_for_status=green&timeout=1s"

## Use an alternate scheduler.
## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
##
schedulerName: ""

imagePullSecrets: []
nodeSelector: {}
tolerations: []

# Enabling this will publicly expose your Elasticsearch instance.
# Only enable this if you have security enabled on your cluster
ingress:
  enabled: false
  annotations: {}
  # kubernetes.io/ingress.class: nginx
  # kubernetes.io/tls-acme: "true"
  className: "nginx"
  pathtype: ImplementationSpecific
  hosts:
    - host: chart-example.local
      paths:
        - path: /
  tls: []
  #  - secretName: chart-example-tls
  #    hosts:
  #      - chart-example.local

nameOverride: ""
fullnameOverride: ""
healthNameOverride: ""

lifecycle:
  {}
  # preStop:
  #   exec:
  #     command: ["/bin/sh", "-c", "echo Hello from the postStart handler > /usr/share/message"]
  # postStart:
  #   exec:
  #     command:
  #       - bash
  #       - -c
  #       - |

sysctlInitContainer:
  enabled: true

keystore: []

networkPolicy:
  ## Enable creation of NetworkPolicy resources. Only Ingress traffic is filtered for now.
  ## In order for a Pod to access Elasticsearch, it needs to have the following label:
  ## {{ template "uname" . }}-client: "true"
  ## Example for default configuration to access HTTP port:
  ## elasticsearch-master-http-client: "true"
  ## Example for default configuration to access transport port:
  ## elasticsearch-master-transport-client: "true"

  http:
    enabled: false
    ## if explicitNamespacesSelector is not set or set to {}, only client Pods being in the networkPolicy's namespace
    ## and matching all criteria can reach the DB.
    ## But sometimes, we want the Pods to be accessible to clients from other namespaces, in this case, we can use this
    ## parameter to select these namespaces
    ##
    # explicitNamespacesSelector:
    #   # Accept from namespaces with all those different rules (only from whitelisted Pods)
    #   matchLabels:
    #     role: frontend
    #   matchExpressions:
    #     - {key: role, operator: In, values: [frontend]}
    ## Additional NetworkPolicy Ingress "from" rules to set. Note that all rules are OR-ed.
    ##
    # additionalRules:
    #   - podSelector:
    #       matchLabels:
    #         role: frontend
    #   - podSelector:
    #       matchExpressions:
    #         - key: role
    #           operator: In
    #           values:
    #             - frontend

  transport:
    ## Note that all Elasticsearch Pods can talk to themselves using transport port even if enabled.
    enabled: false
    # explicitNamespacesSelector:
    #   matchLabels:
    #     role: frontend
    #   matchExpressions:
 transport:
    ## Note that all Elasticsearch Pods can talk to themselves using transport port even if enabled.
    enabled: false
    # explicitNamespacesSelector:
    #   matchLabels:
    #     role: frontend
    #   matchExpressions:
    #     - {key: role, operator: In, values: [frontend]}
    # additionalRules:
    #   - podSelector:
    #       matchLabels:
    #         role: frontend
    #   - podSelector:
    #       matchExpressions:
    #         - key: role
    #           operator: In
    #           values:
    #             - frontend

tests:
  enabled: true

# Deprecated
# please use the above podSecurityContext.fsGroup instead
fsGroup: ""                          

Hi @bshiwanand,

Looking at your config and this similar issue I think the main problem is you have key and keystore configuration both set and should choose one or the other.

Hope that helps!

Hi,

In your Elasticsearch configuration, you should either use a keystore or specify the key and certificate files, but not both.

In your values.yaml file, you have specified both xpack.security.transport.ssl.keystore.path and xpack.security.transport.ssl.key, which is causing the error. You should remove one of them.

If you want to use the keystore, your configuration should look like this:

xpack.security.transport.ssl.enabled: "true"
xpack.security.transport.ssl.keystore.path: "/usr/share/elasticsearch/config/elastic-certificates.p12"
xpack.security.transport.ssl.truststore.path: "/usr/share/elasticsearch/config/elastic-certificates.p12"

If you want to use the key and certificate files, your configuration should look like this:

xpack.security.transport.ssl.enabled: "true"
xpack.security.transport.ssl.key: "/usr/share/elasticsearch/config/certs/tls.key"
xpack.security.transport.ssl.certificate: "/usr/share/elasticsearch/config/certs/tls.crt"

Regards

Hi @yago82

Thanks for your reply, I had updated the configuration file as per keystore
Now it's looks like

## shiva added
esConfig:
  elasticsearch.yml: |
    cluster.name: "docker-cluster"
    network.host: 0.0.0.0
    xpack.security.enabled: "true"
    xpack.security.transport.ssl.enabled: "true"
    xpack.security.transport.ssl.keystore.path: elastic-certificates.p12
    xpack.security.transport.ssl.truststore.path: elastic-certificates.p12

and ran helm upgrade then I'm seeing below isseus

  • Elasticsearch pods are not getting up
  • Containers are in failed state
  • Elasticsearch pods failed to come up with below error
{"type": "deprecation.elasticsearch", "timestamp": "2024-01-15T08:02:43,323Z", "level": "CRITICAL", "component": "o.e.d.c.s.Settings", "cluster.name": "elasticsearch", "node.name": "elasticsearch-master-2", "message": "[node.data] setting was deprecated in Elasticsearch and will be removed in a future release! See the breaking changes documentation for the next major version.", "key": "node.data", "category": "settings" }
{"type": "server", "timestamp": "2024-01-15T08:02:43,339Z", "level": "INFO", "component": "o.e.e.NodeEnvironment", "cluster.name": "elasticsearch", "node.name": "elasticsearch-master-2", "message": "using [1] data paths, mounts [[/usr/share/elasticsearch/data (/dev/sdf)]], net usable_space [29.3gb], net total_space [29.3gb], types [ext4]" }
{"type": "server", "timestamp": "2024-01-15T08:02:43,339Z", "level": "INFO", "component": "o.e.e.NodeEnvironment", "cluster.name": "elasticsearch", "node.name": "elasticsearch-master-2", "message": "heap size [1gb], compressed ordinary object pointers [true]" }
{"type": "deprecation.elasticsearch", "timestamp": "2024-01-15T08:02:43,343Z", "level": "CRITICAL", "component": "o.e.d.c.s.Settings", "cluster.name": "elasticsearch", "node.name": "elasticsearch-master-2", "message": "[node.master] setting was deprecated in Elasticsearch and will be removed in a future release! See the breaking changes documentation for the next major version.", "key": "node.master", "category": "settings" }
{"type": "deprecation.elasticsearch", "timestamp": "2024-01-15T08:02:43,347Z", "level": "CRITICAL", "component": "o.e.d.c.s.Settings", "cluster.name": "elasticsearch", "node.name": "elasticsearch-master-2", "message": "[node.remote_cluster_client] setting was deprecated in Elasticsearch and will be removed in a future release! See the breaking changes documentation for the next major version.", "key": "node.remote_cluster_client", "category": "settings" }
{"type": "deprecation.elasticsearch", "timestamp": "2024-01-15T08:02:43,349Z", "level": "CRITICAL", "component": "o.e.d.c.s.Settings", "cluster.name": "elasticsearch", "node.name": "elasticsearch-master-2", "message": "[node.ingest] setting was deprecated in Elasticsearch and will be removed in a future release! See the breaking changes documentation for the next major version.", "key": "node.ingest", "category": "settings" }
{"type": "server", "timestamp": "2024-01-15T08:02:43,527Z", "level": "INFO", "component": "o.e.n.Node", "cluster.name": "elasticsearch", "node.name": "elasticsearch-master-2", "message": "node name [elasticsearch-master-2], node ID [pswl4B9VSwC77ItVDyF0SQ], cluster name [elasticsearch], roles [transform, data_frozen, master, remote_cluster_client, data, ml, data_content, data_hot, data_warm, data_cold, ingest]" }
{"type": "deprecation.elasticsearch", "timestamp": "2024-01-15T08:02:43,529Z", "level": "CRITICAL", "component": "o.e.d.n.Node", "cluster.name": "elasticsearch", "node.name": "elasticsearch-master-2", "message": "legacy role settings [node.data, node.remote_cluster_client, node.ingest, node.master, node.ml] are deprecated, use [node.roles=[transform, data_frozen, master, remote_cluster_client, data, ml, data_content, data_hot, data_warm, data_cold, ingest]]", "key": "legacy role settings", "category": "settings" }
{"type": "server", "timestamp": "2024-01-15T08:02:53,626Z", "level": "ERROR", "component": "o.e.b.ElasticsearchUncaughtExceptionHandler", "cluster.name": "elasticsearch", "node.name": "elasticsearch-master-2", "message": "uncaught exception in thread [main]",
"stacktrace": ["org.elasticsearch.bootstrap.StartupException: ElasticsearchSecurityException[failed to load SSL configuration [xpack.security.transport.ssl]]; nested: ElasticsearchException[failed to initialize SSL TrustManager]; nested: IOException[keystore password was incorrect]; nested: UnrecoverableKeyException[failed to decrypt safe contents entry: javax.crypto.BadPaddingException: Given final block not properly padded. Such issues can arise if a bad key is used during decryption.];",
"at org.elasticsearch.bootstrap.Elasticsearch.init(Elasticsearch.java:170) ~[elasticsearch-7.17.1.jar:7.17.1]",
"at org.elasticsearch.bootstrap.Elasticsearch.execute(Elasticsearch.java:157) ~[elasticsearch-7.17.1.jar:7.17.1]",
"at org.elasticsearch.cli.EnvironmentAwareCommand.execute(EnvironmentAwareCommand.java:77) ~[elasticsearch-7.17.1.jar:7.17.1]",
"at org.elasticsearch.cli.Command.mainWithoutErrorHandling(Command.java:112) ~[elasticsearch-cli-7.17.1.jar:7.17.1]",
"at org.elasticsearch.cli.Command.main(Command.java:77) ~[elasticsearch-cli-7.17.1.jar:7.17.1]",
"at org.elasticsearch.bootstrap.Elasticsearch.main(Elasticsearch.java:122) ~[elasticsearch-7.17.1.jar:7.17.1]",
"at org.elasticsearch.bootstrap.Elasticsearch.main(Elasticsearch.java:80) ~[elasticsearch-7.17.1.jar:7.17.1]",
"Caused by: org.elasticsearch.ElasticsearchSecurityException: failed to load SSL configuration [xpack.security.transport.ssl]",
"at org.elasticsearch.xpack.core.ssl.SSLService.lambda$loadSSLConfigurations$5(SSLService.java:548) ~[?:?]",
"at java.util.HashMap.forEach(HashMap.java:1421) ~[?:?]",
"at java.util.Collections$UnmodifiableMap.forEach(Collections.java:1553) ~[?:?]",
"at org.elasticsearch.xpack.core.ssl.SSLService.loadSSLConfigurations(SSLService.java:544) ~[?:?]",
"at org.elasticsearch.xpack.core.ssl.SSLService.<init>(SSLService.java:145) ~[?:?]",
"at org.elasticsearch.xpack.core.XPackPlugin.createSSLService(XPackPlugin.java:525) ~[?:?]",
"at org.elasticsearch.xpack.core.XPackPlugin.createComponents(XPackPlugin.java:338) ~[?:?]",
"at org.elasticsearch.node.Node.lambda$new$18(Node.java:736) ~[elasticsearch-7.17.1.jar:7.17.1]",
"at java.util.stream.ReferencePipeline$7$1.accept(ReferencePipeline.java:273) ~[?:?]",
"at java.util.ArrayList$ArrayListSpliterator.forEachRemaining(ArrayList.java:1625) ~[?:?]",
"at java.util.stream.AbstractPipeline.copyInto(AbstractPipeline.java:509) ~[?:?]",
"at java.util.stream.AbstractPipeline.wrapAndCopyInto(AbstractPipeline.java:499) ~[?:?]",
"at java.util.stream.ReduceOps$ReduceOp.evaluateSequential(ReduceOps.java:921) ~[?:?]",
"at java.util.stream.AbstractPipeline.evaluate(AbstractPipeline.java:234) ~[?:?]",
"at java.util.stream.ReferencePipeline.collect(ReferencePipeline.java:682) ~[?:?]",
"at org.elasticsearch.node.Node.<init>(Node.java:750) ~[elasticsearch-7.17.1.jar:7.17.1]",
"at org.elasticsearch.node.Node.<init>(Node.java:309) ~[elasticsearch-7.17.1.jar:7.17.1]",
"at org.elasticsearch.bootstrap.Bootstrap$5.<init>(Bootstrap.java:234) ~[elasticsearch-7.17.1.jar:7.17.1]",
"at org.elasticsearch.bootstrap.Bootstrap.setup(Bootstrap.java:234) ~[elasticsearch-7.17.1.jar:7.17.1]",
"at org.elasticsearch.bootstrap.Bootstrap.init(Bootstrap.java:434) ~[elasticsearch-7.17.1.jar:7.17.1]",
"at org.elasticsearch.bootstrap.Elasticsearch.init(Elasticsearch.java:166) ~[elasticsearch-7.17.1.jar:7.17.1]",
"... 6 more",
"Caused by: org.elasticsearch.ElasticsearchException: failed to initialize SSL TrustManager",
"at org.elasticsearch.xpack.core.ssl.StoreTrustConfig.createTrustManager(StoreTrustConfig.java:75) ~[?:?]",
"at org.elasticsearch.xpack.core.ssl.SSLService.createSslContext(SSLService.java:453) ~[?:?]",
uncaught exception in thread [main]
"at java.util.HashMap.computeIfAbsent(HashMap.java:1220) ~[?:?]",
"at org.elasticsearch.xpack.core.ssl.SSLService.lambda$loadSSLConfigurations$5(SSLService.java:546) ~[?:?]",
"at java.util.HashMap.forEach(HashMap.java:1421) ~[?:?]",
"at java.util.Collections$UnmodifiableMap.forEach(Collections.java:1553) ~[?:?]",
"at org.elasticsearch.xpack.core.ssl.SSLService.loadSSLConfigurations(SSLService.java:544) ~[?:?]",
"at org.elasticsearch.xpack.core.ssl.SSLService.<init>(SSLService.java:145) ~[?:?]",
"at org.elasticsearch.xpack.core.XPackPlugin.createSSLService(XPackPlugin.java:525) ~[?:?]",
"at org.elasticsearch.xpack.core.XPackPlugin.createComponents(XPackPlugin.java:338) ~[?:?]",
"at org.elasticsearch.node.Node.lambda$new$18(Node.java:736) ~[elasticsearch-7.17.1.jar:7.17.1]",
"at java.util.stream.ReferencePipeline$7$1.accept(ReferencePipeline.java:273) ~[?:?]",
"at java.util.ArrayList$ArrayListSpliterator.forEachRemaining(ArrayList.java:1625) ~[?:?]",
"at java.util.stream.AbstractPipeline.copyInto(AbstractPipeline.java:509) ~[?:?]",
"at java.util.stream.AbstractPipeline.wrapAndCopyInto(AbstractPipeline.java:499) ~[?:?]",
"at java.util.stream.ReduceOps$ReduceOp.evaluateSequential(ReduceOps.java:921) ~[?:?]",
"at java.util.stream.AbstractPipeline.evaluate(AbstractPipeline.java:234) ~[?:?]",
"at java.util.stream.ReferencePipeline.collect(ReferencePipeline.java:682) ~[?:?]",
"at org.elasticsearch.node.Node.<init>(Node.java:750) ~[elasticsearch-7.17.1.jar:7.17.1]",
"at org.elasticsearch.node.Node.<init>(Node.java:309) ~[elasticsearch-7.17.1.jar:7.17.1]",
"at org.elasticsearch.bootstrap.Bootstrap$5.<init>(Bootstrap.java:234) ~[elasticsearch-7.17.1.jar:7.17.1]",
"at org.elasticsearch.bootstrap.Bootstrap.setup(Bootstrap.java:234) ~[elasticsearch-7.17.1.jar:7.17.1]",
"at org.elasticsearch.bootstrap.Bootstrap.init(Bootstrap.java:434) ~[elasticsearch-7.17.1.jar:7.17.1]",
"at org.elasticsearch.bootstrap.Elasticsearch.init(Elasticsearch.java:166) ~[elasticsearch-7.17.1.jar:7.17.1]",
"... 6 more",

Hi @carly.richmond thanks for your time, and reply.
Working and removing the un-useful stuffs from configuration.

This topic was automatically closed 28 days after the last reply. New replies are no longer allowed.