Hi
I am using ES 6.4 version. And I have 3 machines, Each of them have one instance of ES master and ES data node running as separate processes. totally 6 nodes, 3 master and 3 data nodes. Replication Factor is being set to 1.
When we restart all nodes together, ES goes to red state. We had set min master and min data node required to be 2. Any thing else can lead to this issue?
metrics-2019.08.01 2 p UNASSIGNED CLUSTER_RECOVERED
metrics-2019.08.01 2 r UNASSIGNED CLUSTER_RECOVERED
{
"index": "metrics-2019.08.01",
"shard": 2,
"primary": true,
"current_state": "unassigned",
"unassigned_info": {
"reason": "CLUSTER_RECOVERED",
"at": "2019-08-01T19:22:50.394Z",
"last_allocation_status": "no_valid_shard_copy"
},
"can_allocate": "no_valid_shard_copy",
"allocate_explanation": "cannot allocate because a previous copy of the primary shard existed but can no longer be found on the nodes in the cluster",
"node_allocation_decisions": [
{
"node_id": "I8hrSGVsQcO0c7DQTdmdgA",
"node_name": "metrics-datastore-1",
"transport_address": "192.168.25.79:9300",
"node_attributes": {
"ml.machine_memory": "33566429184",
"ml.max_open_jobs": "20",
"xpack.installed": "true",
"ml.enabled": "true"
},
"node_decision": "no",
"store": {
"found": false
}
},
{
"node_id": "L-TlEqTJRjuQKJBMFsnSgw",
"node_name": "metrics-datastore-0",
"transport_address": "192.168.25.18:9300",
"node_attributes": {
"ml.machine_memory": "33566429184",
"ml.max_open_jobs": "20",
"xpack.installed": "true",
"ml.enabled": "true"
},
"node_decision": "no",
"store": {
"found": false
}
},
{
"node_id": "zTKAccDPSZezu7iyYbVVww",
"node_name": "metrics-datastore-2",
"transport_address": "192.168.25.53:9300",
"node_attributes": {
"ml.machine_memory": "33566429184",
"ml.max_open_jobs": "20",
"xpack.installed": "true",
"ml.enabled": "true"
},
"node_decision": "no",
"store": {
"found": false
}
}
]
}