We setup a Elasticsearch using the eck-stack github repo and let elasticsearch creates its own CA transport certificate. within sometimes, we see the Elasticsearch is moving from green to yellow
On analysis, we observed bad_certificate in either one of the node.
org.elasticsearch.http.AbstractHttpServerTransport","elasticsearch.cluster.uuid"
:"JtDDnWhOSV-hhcmt5dVvZg","elasticsearch.node.id":"FMDZTDRjQRaa6LJ_dThtjA","elasticsearch.node.name":"elasticsearch-es-masters-0","elasticsearch.cluster.name":"docker-cluster",
"error.type":"io.netty.handler.codec.DecoderException","error.message":"javax.net.ssl.SSLHandshakeException: (bad_certificate) Received fatal alert: bad_certificate","error.sta
ck_trace":"io.netty.handler.codec.DecoderException: javax.net.ssl.SSLHandshakeException: (bad_certificate) Received fatal alert: bad_certificate\\n\\tat io.netty.codec@4.1.118.Fi
nal/io.netty.handler.codec.ByteToMessageDecoder.callDecode(ByteToMessageDecoder.java:500)\\n\\tat io.netty.codec@4.1.118.Final/io.netty.handler.codec.ByteToMessageDecoder.channel
Read(ByteToMessageDecoder.java:290)\\n\\tat io.netty.transport@4.1.118.Final/io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:4
44)\\n\\tat io.netty.transport@4.1.118.Final/io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420)\\n\\tat io.netty.transport@4.1
.118.Final/io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412)\\n\\tat io.netty.transport@4.1.118.Final/io.netty.channel.Defaul
tChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357)\\n\\tat io.netty.transport@4.1.118.Final/io.netty.channel.AbstractChannelHandlerContext.invokeChannelRe
ad(AbstractChannelHandlerContext.java:440)\\
GET _cat/nodes?v&h=id,ip,name,nodeRole"
{
"statusCode": 502,
"error": "Bad Gateway",
"message": "self-signed certificate in certificate chain"
}
Elasticsearch : 9.1.0
Kubernetes : 1.32.9
Elastic configuration
elasticsearch-es-masters-0
elasticsearch-es-masters-1
elasticsearch-es-data-nodes-2
elasticsearch-es-data-nodes-0
elasticsearch-es-data-nodes-1
elasticsearch-es-masters-2
ElasticStack.yaml
eck-elasticsearch:
version: 9.1.0
nodeSets:
- name: masters
count: 3
config:
node.roles: ["master"]
node.store.allow_mmap: false
podTemplate:
spec:
initContainers:
- name: sysctl
command:
- sh
- "-c"
- sysctl -w vm.max_map_count=262144
securityContext:
privileged: true
runAsUser: 0
containers:
- name: elasticsearch-master
env:
- name: ES_JAVA_OPTS
value: "-Xmx3g -Xms3g"
resources:
limits:
memory: 10Gi
cpu: 4
requests:
memory: 6Gi
cpu: 2
securityContext:
capabilities:
add:
- IPC_LOCK
- SYS_RESOURCE
volumeClaimTemplates:
- metadata:
name: elasticsearch-data
spec:
accessModes: ["ReadWriteOnce"]
storageClassName: default
resources:
requests:
storage: 20Gi
- name: data-nodes
count: 3
config:
node.roles: ["data"]
node.store.allow_mmap: false
podTemplate:
spec:
initContainers:
- name: sysctl
command:
- sh
- "-c"
- sysctl -w vm.max_map_count=262144
securityContext:
privileged: true
runAsUser: 0
containers:
- name: elasticsearch-data
env:
- name: ES_JAVA_OPTS
value: "-Xmx16g -Xms16g"
resources:
limits:
memory: 64Gi
cpu: 14
requests:
memory: 32Gi
cpu: 10
securityContext:
capabilities:
add:
- IPC_LOCK
- SYS_RESOURCE
volumeClaimTemplates:
- metadata:
name: elasticsearch-data
spec:
accessModes: ["ReadWriteOnce"]
storageClassName: default
resources:
requests:
storage: 800Gi
eck-kibana:
version: 9.1.0
count: 1
enabled: true
elasticsearchRef:
name: elasticsearch
ingress:
enabled: true
className: nginx
annotations:
kubernetes.io/ingress.class: nginx
nginx.ingress.kubernetes.io/backend-protocol: HTTPS
nginx.ingress.kubernetes.io/proxy-ssl-verify: "false"
hosts:
- host: kibana.xxxx.com
tls:
enabled: true
secretName: kibana-cert-secret
podTemplate:
spec:
containers:
- name: kibana