Hi, I previously had 3 elasticsearch nodes consisting of node 1, node 2 and node 3, but my node 2 encountered some OS related issues so it had to be formatted.
So I reinstalled and reconfigured node 2 as before it is formatted but somehow node 2 is not joining the existing cluster and formed a new elasticsearch cluster instead. All my elasticsearch nodes are using the same version which is 7.12.0.
When I print out list of nodes from the primary/master node using curl, its only showing node 1 and node 3:
[root@node-1 ~]# curl -k --user elastic -X GET "https://node-1:9200/_cat/nodes?pretty"
Enter host password for user 'elastic':
192.168.23.90 44 99 4 0.00 0.03 0.02 cdfhilmrstw * node-1
192.168.23.92 52 98 1 0.00 0.03 0.05 cdfhilmrstw - node-3
[root@node-1 ~]#
I am able to view cluster status for node 1:
[root@node-1 ~]# curl -k --user elastic -H 'Content-Type: application/json' -XGET https://node-1:9200/_cluster/health?pretty
Enter host password for user 'elastic':
{
"cluster_name" : "umtcentral-log",
"status" : "green",
"timed_out" : false,
"number_of_nodes" : 2,
"number_of_data_nodes" : 2,
"active_primary_shards" : 914,
"active_shards" : 1820,
"relocating_shards" : 0,
"initializing_shards" : 0,
"unassigned_shards" : 0,
"delayed_unassigned_shards" : 0,
"number_of_pending_tasks" : 0,
"number_of_in_flight_fetch" : 0,
"task_max_waiting_in_queue_millis" : 0,
"active_shards_percent_as_number" : 100.0
}
and also node 3:
[root@node-1 ~]# curl -k --user elastic -H 'Content-Type: application/json' -XGET https://node-3:9200/_cluster/health?pretty
Enter host password for user 'elastic':
{
"cluster_name" : "umtcentral-log",
"status" : "green",
"timed_out" : false,
"number_of_nodes" : 2,
"number_of_data_nodes" : 2,
"active_primary_shards" : 914,
"active_shards" : 1820,
"relocating_shards" : 0,
"initializing_shards" : 0,
"unassigned_shards" : 0,
"delayed_unassigned_shards" : 0,
"number_of_pending_tasks" : 0,
"number_of_in_flight_fetch" : 0,
"task_max_waiting_in_queue_millis" : 0,
"active_shards_percent_as_number" : 100.0
}
[root@node-1 ~]#
when i check cluster status for node 2 it says "no route to host" instead:
[root@node-1 ~]# curl -k --user elastic -H 'Content-Type: application/json' -XGET https://node-2:9200/_cluster/health?pretty
Enter host password for user 'elastic':
curl: (7) Failed connect to node-2:9200; No route to host
but what confused me is that, when i check cluster status directly from node 2 and not from the existing primary node, i am able to get a reply, not a "no route to host":
[root@node-2 ~]# curl -k --user elastic -H 'Content-Type: application/json' -XGET https://node-2:9200/_cluster/health?pretty
Enter host password for user 'elastic':
{
"cluster_name" : "umtcentral-log",
"status" : "green",
"timed_out" : false,
"number_of_nodes" : 1,
"number_of_data_nodes" : 1,
"active_primary_shards" : 5,
"active_shards" : 5,
"relocating_shards" : 0,
"initializing_shards" : 0,
"unassigned_shards" : 0,
"delayed_unassigned_shards" : 0,
"number_of_pending_tasks" : 0,
"number_of_in_flight_fetch" : 0,
"task_max_waiting_in_queue_millis" : 0,
"active_shards_percent_as_number" : 100.0
}
[root@node-2 ~]#
Below are snippets of my elasticsearch.yml file for node 1:
# ---------------------------------- Cluster -----------------------------------
# Use a descriptive name for your cluster:
#
#cluster.name: my-application
cluster.name: umtcentral-log
# ------------------------------------ Node ------------------------------------
#
# Use a descriptive name for the node:
#
#node.name: node-1
node.name: node-1
node.master: true
# ----------------------------------- Paths ------------------------------------
#
# Path to directory where to store the data (separate multiple locations by comma):
#
path.data: /var/lib/elasticsearch
#
# Path to log files:
#
path.logs: /var/log/elasticsearch
#
# ----------------------------------- Memory -----------------------------------
#
# Lock the memory on startup:
#
#bootstrap.memory_lock: true
bootstrap.memory_lock: true
# ---------------------------------- Network -----------------------------------
#
# Set the bind address to a specific IP (IPv4 or IPv6):
#
#network.host: 192.168.0.1
network.host: "0.0.0.0"
network.bind_host: "0.0.0.0"
network.publish_host: "0.0.0.0"
#
# Set a custom port for HTTP:
#
#http.port: 9200
http.host: "0.0.0.0"
# For more information, consult the network module documentation.
#
# --------------------------------- Discovery ----------------------------------
#
# Pass an initial list of hosts to perform discovery when this node is started:
# The default list of hosts is ["127.0.0.1", "[::1]"]
#
#discovery.seed_hosts: ["host1", "host2"]
discovery.seed_hosts: ["node-1", "node-2", node-3"]
discovery.zen.minimum_master_nodes: 2
#
# Bootstrap the cluster using an initial set of master-eligible nodes:
#
#cluster.initial_master_nodes: ["node-1", "node-2"]
cluster.initial_master_nodes: ["node-1", "node-2", "node-3"]
elasticsearch.yml for node 2:
# ---------------------------------- Cluster -----------------------------------
#
# Use a descriptive name for your cluster:
#
#cluster.name: my-application
cluster.name: umtcentral-log
node.data: true
#
# ------------------------------------ Node ------------------------------------
#
# Use a descriptive name for the node:
#
node.name: node-2
#
# ----------------------------------- Paths ------------------------------------
#
# Path to directory where to store the data (separate multiple locations by comma):
#
path.data: /var/lib/elasticsearch
#
# Path to log files:
#
path.logs: /var/log/elasticsearch
#
# ----------------------------------- Memory -----------------------------------
#
# Lock the memory on startup:
#
#bootstrap.memory_lock: true
bootstrap.memory_lock: true
#
# ---------------------------------- Network -----------------------------------
#
# Set the bind address to a specific IP (IPv4 or IPv6):
#
#network.host: 192.168.0.1
network.host: "0.0.0.0"
network.bind_host: "0.0.0.0"
network.publish_host: "0.0.0.0"
#
# Set a custom port for HTTP:
#
#http.port: 9200
http.host: "0.0.0.0"
# For more information, consult the network module documentation.
#
# --------------------------------- Discovery ----------------------------------
#
# Pass an initial list of hosts to perform discovery when this node is started:
# The default list of hosts is ["127.0.0.1", "[::1]"]
#
#discovery.seed_hosts: ["host1", "host2"]
discovery.seed_hosts: ["node-1", "node-2", "node-3"]
discovery.zen.minimum_master_nodes: 2
#
# Bootstrap the cluster using an initial set of master-eligible nodes:
#
#cluster.initial_master_nodes: ["node-1", "node-2"]
cluster.initial_master_nodes: ["node-1", "node-2", "node-3"]
lastly elasticsearch.yml for node 3:
# ---------------------------------- Cluster -----------------------------------
#
# Use a descriptive name for your cluster:
#
#cluster.name: my-application
cluster.name: umtcentral-log
#
# ------------------------------------ Node ------------------------------------
#
# Use a descriptive name for the node:
#
#node.name: node-3
node.name: node-3
node.data: true
# ----------------------------------- Paths ------------------------------------
#
# Path to directory where to store the data (separate multiple locations by comma):
#
path.data: /var/lib/elasticsearch
#
# Path to log files:
#
path.logs: /var/log/elasticsearch
#
# ----------------------------------- Memory -----------------------------------
#
# Lock the memory on startup:
#
#bootstrap.memory_lock: true
bootstrap.memory_lock: true
# Elasticsearch performs poorly when the system is swapping the memory.
#
# ---------------------------------- Network -----------------------------------
#
# Set the bind address to a specific IP (IPv4 or IPv6):
#
#network.host: 192.168.0.1
network.host: "0.0.0.0"
network.bind_host: "0.0.0.0"
network.publish_host: "0.0.0.0"
#
# Set a custom port for HTTP:
#
#http.port: 9200
http.host: "0.0.0.0"
# For more information, consult the network module documentation.
#
# --------------------------------- Discovery ----------------------------------
#
# Pass an initial list of hosts to perform discovery when this node is started:
# The default list of hosts is ["127.0.0.1", "[::1]"]
#
#discovery.seed_hosts: ["host1", "host2"]
discovery.seed_hosts: ["node-1", "node-2", "node-3"]
discovery.zen.minimum_master_nodes: 2
#
# Bootstrap the cluster using an initial set of master-eligible nodes:
#
#cluster.initial_master_nodes: ["node-1", "node-2"]
cluster.initial_master_nodes: ["node-1", "node-2", "node-3"]
I have had these issues for days and tried to troubleshoot by referring to the official documentations, forums and stuffs but its still a dead end for me.
What are the steps that I missed out to make sure all my elasticsearch nodes are in sync and be in the same cluster? I could really use some help here, please. Thanks in advanced.