Hi all. Expert help needed.
I have a 1 node cluster with a 7.4 TB hard drive dedicated to it. The server has 32 GB of RAM. Elastic is configured automatically and here are some of its health/stats:
health:
{
"cluster_name" : "elasticsearch",
"status" : "yellow",
"timed_out" : false,
"number_of_nodes" : 1,
"number_of_data_nodes" : 1,
"active_primary_shards" : 20,
"active_shards" : 20,
"relocating_shards" : 0,
"initializing_shards" : 0,
"unassigned_shards" : 1,
"delayed_unassigned_shards" : 0,
"number_of_pending_tasks" : 0,
"number_of_in_flight_fetch" : 0,
"task_max_waiting_in_queue_millis" : 0,
"active_shards_percent_as_number" : 95.23809523809523
}
stats:
{
"_nodes" : {
"total" : 1,
"successful" : 1,
"failed" : 0
},
"cluster_name" : "elasticsearch",
"nodes" : {
"e8wO2rMwQ2W00nYlOwkTVA" : {
"timestamp" : 1690874548557,
"name" : "node-name",
"transport_address" : "127.0.0.1:9300",
"host" : "127.0.0.1",
"ip" : "127.0.0.1:9300",
"roles" : [
"data",
"data_cold",
"data_content",
"data_frozen",
"data_hot",
"data_warm",
"ingest",
"master",
"ml",
"remote_cluster_client",
"transform"
],
"attributes" : {
"ml.machine_memory" : "34358714368",
"xpack.installed" : "true",
"ml.max_jvm_size" : "17179869184"
},
"indices" : {
"docs" : {
"count" : 12415,
"deleted" : 884709
},
"shard_stats" : {
"total_count" : 20
},
...
"jvm" : {
"timestamp" : 1690874548572,
"uptime_in_millis" : 944135426,
"mem" : {
"heap_used_in_bytes" : 8950064208,
"heap_used_percent" : 52,
"heap_committed_in_bytes" : 17179869184,
"heap_max_in_bytes" : 17179869184,
"non_heap_used_in_bytes" : 240232544,
"non_heap_committed_in_bytes" : 249692160,
"pools" : {
"young" : {
"used_in_bytes" : 8732540928,
"max_in_bytes" : 0,
"peak_used_in_bytes" : 10292822016,
"peak_max_in_bytes" : 0
},
"old" : {
"used_in_bytes" : 210986496,
"max_in_bytes" : 17179869184,
"peak_used_in_bytes" : 210986496,
"peak_max_in_bytes" : 17179869184
},
"survivor" : {
"used_in_bytes" : 6536784,
"max_in_bytes" : 0,
"peak_used_in_bytes" : 78485600,
"peak_max_in_bytes" : 0
}
}
},
...
Now there are very few files, but in the near future there will be several million.
Automatic configuration created 16Gb heap and 20 shards for me, is this a sufficient value for me, or should I increase it? Because even now some search queries result in [search_phase_execution_exception] all shards failed.
If you have suggestions for customization, or links to good articles, I will be very grateful.