How to recover my elasticsearch cluster status to `green`

My elasticsearch cluster have 3 nodes and both are data and master node.there are some unassigned_shardsare UNASSIGNED, how can i resolve this problem?
elasticsearch version: 2.2.0

root@elasticsearch-logging-v1-6rldl:/elasticsearch/config# curl localhost:9200/_cluster/health?pretty
{
"cluster_name" : "kubernetes-logging",
"status" : "red",
"timed_out" : false,
"number_of_nodes" : 3,
"number_of_data_nodes" : 3,
"active_primary_shards" : 13,
"active_shards" : 25,
"relocating_shards" : 0,
"initializing_shards" : 0,
"unassigned_shards" : 5,
"delayed_unassigned_shards" : 0,
"number_of_pending_tasks" : 0,
"number_of_in_flight_fetch" : 0,
"task_max_waiting_in_queue_millis" : 0,
"active_shards_percent_as_number" : 83.33333333333334
}
root@elasticsearch-logging-v1-6rldl:/elasticsearch/config# curl localhost:9200/_cat/nodes
172.17.28.12 172.17.28.12 34 99 0.20 d m elasticsearch-logging-v1-4ln4d
172.17.28.13 172.17.28.13 29 99 0.20 d m elasticsearch-logging-v1-6rldl
172.17.18.16 172.17.18.16 25 99 1.13 d * elasticsearch-logging-v1-hqvfn
root@elasticsearch-logging-v1-6rldl:/elasticsearch/config# curl localhost:9200/_cat/indices
red open logstash-2018.03.26 5 1 4345964 0 1.6gb 826.1mb
yellow open logstash-2018.03.27 5 1 56808073 0 25.1gb 13.9gb
red open logstash-2018.03.28 5 1 23086497 0 11.6gb 5.7gb
close logstash-2018.03.20
close logstash-2018.03.22
root@elasticsearch-logging-v1-6rldl:/elasticsearch/config# curl localhost:9200/_cat/allocation
11 16.3gb 33.9gb 5.3gb 39.2gb 86 172.17.28.12 172.17.28.12 elasticsearch-logging-v1-4ln4d
2 2.9gb 33.9gb 5.3gb 39.2gb 86 172.17.28.13 172.17.28.13 elasticsearch-logging-v1-6rldl
12 19gb 30.6gb 8.5gb 39.2gb 78 172.17.18.16 172.17.18.16 elasticsearch-logging-v1-hqvfn
5 UNASSIGNED

root@elasticsearch-logging-v1-6rldl:/elasticsearch/config# curl localhost:9200/_cluster/health?level=indices
{"cluster_name":"kubernetes-logging","status":"red","timed_out":false,"number_of_nodes":3,"number_of_data_nodes":3,"active_primary_shards":13,"active_shards":25,"relocating_shards":0,"initializing_shards":0,"unassigned_shards":5,"delayed_unassigned_shards":0,"number_of_pending_tasks":0,"number_of_in_flight_fetch":0,"task_max_waiting_in_queue_millis":0,"active_shards_percent_as_number":83.33333333333334,
"indices":{"logstash-2018.03.26":{"status":"red","number_of_shards":5,"number_of_replicas":1,"active_primary_shards":4,"active_shards":8,"relocating_shards":0,"initializing_shards":0,"unassigned_shards":2},
"logstash-2018.03.27":{"status":"yellow","number_of_shards":5,"number_of_replicas":1,"active_primary_shards":5,"active_shards":9,"relocating_shards":0,"initializing_shards":0,"unassigned_shards":1},
"logstash-2018.03.28":{"status":"red","number_of_shards":5,"number_of_replicas":1,"active_primary_shards":4,"active_shards":8,"relocating_shards":0,"initializing_shards":0,"unassigned_shards":2}}}

This topic was automatically closed 28 days after the last reply. New replies are no longer allowed.