I created an ElasticSearch server, but today it stopped and in the log it reported that the error was:
[ERROR][o.e.b.ElasticsearchUncaughtExceptionHandler] [main-node] fatal error in thread [elasticsearch[main-node][search][T#7]], exiting
java.lang.OutOfMemoryError: Java heap space
My server has 8GB of RAM and 160GB of space. It is exclusive to the ElasticSearch server only.
Unfortunately my knowledge of ElasticSearch and Java is very limited, so I don't know what may be wrong with my server settings and what I can do to improve it.
The results of the Status and Health report are:
Cluster Health:
{
"status" : "yellow",
"timed_out" : false,
"number_of_nodes" : 1,
"number_of_data_nodes" : 1,
"active_primary_shards" : 6,
"active_shards" : 6,
"relocating_shards" : 0,
"initializing_shards" : 0,
"unassigned_shards" : 6,
"delayed_unassigned_shards" : 0,
"number_of_pending_tasks" : 0,
"number_of_in_flight_fetch" : 0,
"task_max_waiting_in_queue" : "0s",
"task_max_waiting_in_queue_millis" : 0,
"active_shards_percent" : "50.0%",
"active_shards_percent_as_number" : 50.0
}
Cluster Stats:
{
"_nodes" : {
"total" : 1,
"successful" : 1,
"failed" : 0
},
"timestamp" : 1619816028246,
"status" : "yellow",
"indices" : {
"count" : 6,
"shards" : {
"total" : 6,
"primaries" : 6,
"replication" : 0.0,
"index" : {
"shards" : {
"min" : 1,
"max" : 1,
"avg" : 1.0
},
"primaries" : {
"min" : 1,
"max" : 1,
"avg" : 1.0
},
"replication" : {
"min" : 0.0,
"max" : 0.0,
"avg" : 0.0
}
}
},
"store" : {
"size" : "51.3gb",
"size_in_bytes" : 55094870917
},
"fielddata" : {
"memory_size" : "2.7kb",
"memory_size_in_bytes" : 2840,
"evictions" : 0
},
"query_cache" : {
"memory_size" : "12.8mb",
"memory_size_in_bytes" : 13425080,
"total_count" : 18312,
"hit_count" : 2289,
"miss_count" : 16023,
"cache_size" : 506,
"cache_count" : 506,
"evictions" : 0
},
"completion" : {
"size" : "0b",
"size_in_bytes" : 0
},
"segments" : {
"count" : 89,
"memory" : "59.8mb",
"memory_in_bytes" : 62711188,
"terms_memory" : "30.8mb",
"terms_memory_in_bytes" : 32377094,
"stored_fields_memory" : "21.7mb",
"stored_fields_memory_in_bytes" : 22810752,
"term_vectors_memory" : "0b",
"term_vectors_memory_in_bytes" : 0,
"norms_memory" : "78.8kb",
"norms_memory_in_bytes" : 80704,
"points_memory" : "6.5mb",
"points_memory_in_bytes" : 6895826,
"doc_values_memory" : "533.9kb",
"doc_values_memory_in_bytes" : 546812,
"index_writer_memory" : "0b",
"index_writer_memory_in_bytes" : 0,
"version_map_memory" : "138b",
"version_map_memory_in_bytes" : 138,
"fixed_bit_set" : "24.2mb",
"fixed_bit_set_memory_in_bytes" : 25450456,
"max_unsafe_auto_id_timestamp" : -1,
"file_sizes" : { }
}
},
"nodes" : {
"count" : {
"total" : 1,
"data" : 1,
"coordinating_only" : 0,
"master" : 1,
"ingest" : 1
},
"versions" : [
"6.8.15"
],
"os" : {
"available_processors" : 4,
"allocated_processors" : 4,
"names" : [
{
"name" : "Linux",
"count" : 1
}
],
"pretty_names" : [
{
"pretty_name" : "UbuntuLTS",
"count" : 1
}
],
"mem" : {
"total" : "7.7gb",
"total_in_bytes" : 8363642880,
"free" : "1.5gb",
"free_in_bytes" : 1707053056,
"used" : "6.1gb",
"used_in_bytes" : 6656589824,
"free_percent" : 20,
"used_percent" : 80
}
},
"process" : {
"cpu" : {
"percent" : 25
},
"open_file_descriptors" : {
"min" : 339,
"max" : 339,
"avg" : 339
}
},
"jvm" : {
"max_uptime" : "7m",
"max_uptime_in_millis" : 421125,
"versions" : [
{
"version" : "1.8.0_201",
"vm_name" : "Java HotSpot(TM) 64-Bit Server VM",
"vm_version" : "25.201-b09",
"vm_vendor" : "Oracle Corporation",
"count" : 1
}
],
"mem" : {
"heap_used" : "710.6mb",
"heap_used_in_bytes" : 745131592,
"heap_max" : "990.7mb",
"heap_max_in_bytes" : 1038876672
},
"threads" : 62
},
"fs" : {
"total" : "154.8gb",
"total_in_bytes" : 166318571520,
"free" : "88.1gb",
"free_in_bytes" : 94688387072,
"available" : "88.1gb",
"available_in_bytes" : 94671609856
},
"plugins" : [ ],
"network_types" : {
"transport_types" : {
"security4" : 1
},
"http_types" : {
"security4" : 1
}
}
}
What can I do to prevent this from happening again?