How to merge shareds in elasticsearch

My cluster status returns as follows,

{
"_nodes": {
"total": 1,
"successful": 1,
"failed": 0
},
"cluster_name": "elasticsearch",
"timestamp": 1555593693195,
"status": "yellow",
"indices": {
"count": 15,
"shards": {
"total": 23,
"primaries": 23,
"replication": 0,
"index": {
"shards": {
"min": 1,
"max": 5,
"avg": 1.5333333333333334
},
"primaries": {
"min": 1,
"max": 5,
"avg": 1.5333333333333334
},
"replication": {
"min": 0,
"max": 0,
"avg": 0
}
}
},
"docs": {
"count": 2120836,
"deleted": 194362
},
"store": {
"size": "945.3mb",
"size_in_bytes": 991320586
},
"fielddata": {
"memory_size": "928b",
"memory_size_in_bytes": 928,
"evictions": 0
},
"query_cache": {
"memory_size": "0b",
"memory_size_in_bytes": 0,
"total_count": 0,
"hit_count": 0,
"miss_count": 0,
"cache_size": 0,
"cache_count": 0,
"evictions": 0
},
"completion": {
"size": "0b",
"size_in_bytes": 0
},
"segments": {
"count": 155,
"memory": "4.1mb",
"memory_in_bytes": 4326515,
"terms_memory": "1.9mb",
"terms_memory_in_bytes": 2046106,
"stored_fields_memory": "360.5kb",
"stored_fields_memory_in_bytes": 369152,
"term_vectors_memory": "0b",
"term_vectors_memory_in_bytes": 0,
"norms_memory": "150kb",
"norms_memory_in_bytes": 153664,
"points_memory": "325kb",
"points_memory_in_bytes": 332901,
"doc_values_memory": "1.3mb",
"doc_values_memory_in_bytes": 1424692,
"index_writer_memory": "0b",
"index_writer_memory_in_bytes": 0,
"version_map_memory": "0b",
"version_map_memory_in_bytes": 0,
"fixed_bit_set": "0b",
"fixed_bit_set_memory_in_bytes": 0,
"max_unsafe_auto_id_timestamp": 1555593379495,
"file_sizes": {}
}
},
"nodes": {
"count": {
"total": 1,
"data": 1,
"coordinating_only": 0,
"master": 1,
"ingest": 1
},
"versions": [
"6.4.0"
],
"os": {
"available_processors": 8,
"allocated_processors": 8,
"names": [
{
"name": "Windows 10",
"count": 1
}
],
"mem": {
"total": "7.8gb",
"total_in_bytes": 8470802432,
"free": "5.6gb",
"free_in_bytes": 6118526976,
"used": "2.1gb",
"used_in_bytes": 2352275456,
"free_percent": 72,
"used_percent": 28
}
},
"process": {
"cpu": {
"percent": 0
},
"open_file_descriptors": {
"min": -1,
"max": -1,
"avg": 0
}
},
"jvm": {
"max_uptime": "5.4m",
"max_uptime_in_millis": 326286,
"versions": [
{
"version": "9",
"vm_name": "Java HotSpot(TM) 64-Bit Server VM",
"vm_version": "9+181",
"vm_vendor": "Oracle Corporation",
"count": 1
}
],
"mem": {
"heap_used": "393.9mb",
"heap_used_in_bytes": 413092408,
"heap_max": "989.8mb",
"heap_max_in_bytes": 1037959168
},
"threads": 100
},
"fs": {
"total": "115.9gb",
"total_in_bytes": 124510007296,
"free": "50.4gb",
"free_in_bytes": 54195118080,
"available": "50.4gb",
"available_in_bytes": 54195118080
},
"plugins": ,
"network_types": {
"transport_types": {
"security4": 1
},
"http_types": {
"security4": 1
}
}
}
}

From this, it is evident that there are 23 shareds for approximate 1GB of data. My Kibana is going to unresponsive state sometimes. Is 23 shared fine for 1GB of data ? Should I merge shareds for better performance ? How to merge them ?

It shouldn't be a big problem, but it is a little excessive for that amount of data on that size heap.

Have a look at the _shrink API :slight_smile:

Thaks @warkolm. Do you mean I should shrik the shareds ? Or shrink is not preferred for this case?

This topic was automatically closed 28 days after the last reply. New replies are no longer allowed.