I am using file provider discovery plugin to discover & setup my elastic cluster.
It works nice intially when unicast_hosts.txt file is update with nodes as we discover and cluster is formed.
root@node1 elasticsearch]# pwd
/usr/share/elasticsearch
192ot@node1 elasticsearch]# cat config/discovery-file/unicast_hosts.txt | grep 1
192.168.30.13:9300
192.168.30.12:9300
192.168.30.11:9300
[root@node1 elasticsearch]# curl 192.168.30.11:9200
{
"name" : "8QGUY9q",
"cluster_name" : "elasticcluster",
"cluster_uuid" : "7PMVJfFOTiqSKSjaJG6Yhg",
"version" : {
"number" : "6.2.2",
"build_hash" : "10b1edd",
"build_date" : "2018-02-16T19:01:30.685723Z",
"build_snapshot" : false,
"lucene_version" : "7.2.1",
"minimum_wire_compatibility_version" : "5.6.0",
"minimum_index_compatibility_version" : "5.0.0"
},
"tagline" : "You Know, for Search"
}
[root@node1 elasticsearch]# curl 192.168.30.11:9200/_cat/nodes?v
ip heap.percent ram.percent cpu load_1m load_5m load_15m node.role master name
192.168.30.12 52 93 25 3.29 2.91 3.82 mdi - ttFgsgv
192.168.30.13 51 93 25 3.29 2.91 3.82 mdi * CpfMf5q
192.168.30.11 60 93 25 3.29 2.91 3.82 mdi - 8QGUY9q
Then I removed 192.168.30.12 from unicast_hosts.txt from all elastic nodes to simulate a network partition (though the node still available and reachable)
Removed a node.12
[root@node1 elasticsearch]# cat config/discovery-file/unicast_hosts.txt | grep 1
192.168.30.13:9300
192.168.30.11:9300
Waited several minutes, but .12 node is still part of cluster from node1 perspective
[root@node1 elasticsearch]# curl 192.168.30.11:9200/_cat/nodes?v
ip heap.percent ram.percent cpu load_1m load_5m load_15m node.role master name
192.168.30.12 49 92 40 2.33 2.53 3.32 mdi - ttFgsgv
192.168.30.13 71 92 40 2.33 2.53 3.32 mdi * CpfMf5q
192.168.30.11 66 92 40 2.33 2.53 3.32 mdi - 8QGUY9q
May I know why .12 node is not removed from the node list ? I was expecting file provider plugin to react to unicast_hosts.txt file change and update the cluster node list.
Appreciate your help on this issue.