ES cluster is down: indexing is not supported on a read-only engine

Hi all,
we have very sever issue for the past 24H. our ES is down no data is received from logstash into ES.
Logstash logs shows unsupported_operation_exception: indexing is not supported on a read-only engine.
There is no disk space issue and the indices are open (manually indexing is working).

Any help will be much appreciated.

Thanks,
Moshe.

What is the outputof the cluster health and cluster stats APIs?

Hi,
Cluster Health:
{
"cluster_name" : "credorax-elk-prod",
"status" : "green",
"timed_out" : false,
"number_of_nodes" : 6,
"number_of_data_nodes" : 6,
"active_primary_shards" : 1222,
"active_shards" : 2448,
"relocating_shards" : 2,
"initializing_shards" : 0,
"unassigned_shards" : 0,
"delayed_unassigned_shards" : 0,
"number_of_pending_tasks" : 0,
"number_of_in_flight_fetch" : 0,
"task_max_waiting_in_queue_millis" : 0,
"active_shards_percent_as_number" : 100.0
}

Stats:
{
"_nodes" : {
"total" : 6,
"successful" : 6,
"failed" : 0
},
"cluster_name" : "credorax-elk-prod",
"cluster_uuid" : "IPy2nbl6RZWNev8WfZH_sg",
"timestamp" : 1572682271315,
"status" : "green",
"indices" : {
"count" : 550,
"shards" : {
"total" : 2448,
"primaries" : 1222,
"replication" : 1.0032733224222585,
"index" : {
"shards" : {
"min" : 2,
"max" : 12,
"avg" : 4.450909090909091
},
"primaries" : {
"min" : 1,
"max" : 6,
"avg" : 2.2218181818181817
},
"replication" : {
"min" : 1.0,
"max" : 5.0,
"avg" : 1.0072727272727273
}
}
},
"docs" : {
"count" : 32319209453,
"deleted" : 2982642
},
"store" : {
"size_in_bytes" : 28455475315490
},
"fielddata" : {
"memory_size_in_bytes" : 278884020,
"evictions" : 0
},
"query_cache" : {
"memory_size_in_bytes" : 362807680,
"total_count" : 3333650,
"hit_count" : 385333,
"miss_count" : 2948317,
"cache_size" : 4313,
"cache_count" : 4313,
"evictions" : 0
},
"completion" : {
"size_in_bytes" : 0
},
"segments" : {
"count" : 17285,
"memory_in_bytes" : 44191871950,
"terms_memory_in_bytes" : 39207382835,
"stored_fields_memory_in_bytes" : 3437104248,
"term_vectors_memory_in_bytes" : 0,
"norms_memory_in_bytes" : 1224256,
"points_memory_in_bytes" : 1487544831,
"doc_values_memory_in_bytes" : 58615780,
"index_writer_memory_in_bytes" : 7270701,
"version_map_memory_in_bytes" : 178843,
"fixed_bit_set_memory_in_bytes" : 112992,
"max_unsafe_auto_id_timestamp" : 1572645593793,
"file_sizes" : { }
}
},
"nodes" : {
"count" : {
"total" : 6,
"data" : 6,
"coordinating_only" : 0,
"master" : 6,
"ingest" : 6
},
"versions" : [
"6.7.2"
],
"os" : {
"available_processors" : 96,
"allocated_processors" : 96,
"names" : [
{
"name" : "Linux",
"count" : 6
}
],
"pretty_names" : [
{
"pretty_name" : "CentOS Linux 7 (Core)",
"count" : 6
}
],
"mem" : {
"total_in_bytes" : 398111711232,
"free_in_bytes" : 27745533952,
"used_in_bytes" : 370366177280,
"free_percent" : 7,
"used_percent" : 93
}
},
"process" : {
"cpu" : {
"percent" : 21
},
"open_file_descriptors" : {
"min" : 2539,
"max" : 3492,
"avg" : 2850
}
},
"jvm" : {
"max_uptime_in_millis" : 2364849,
"versions" : [
{
"version" : "12.0.1",
"vm_name" : "OpenJDK 64-Bit Server VM",
"vm_version" : "12.0.1+12",
"vm_vendor" : "Oracle Corporation",
"count" : 6
}
],
"mem" : {
"heap_used_in_bytes" : 73909170144,
"heap_max_in_bytes" : 194740748288
},
"threads" : 1267
},
"fs" : {
"total_in_bytes" : 75769646678016,
"free_in_bytes" : 47260913737728,
"available_in_bytes" : 47260913737728
},
"plugins" : [
{
"name" : "repository-s3",
"version" : "6.7.2",
"elasticsearch_version" : "6.7.2",
"java_version" : "1.8",
"description" : "The S3 repository plugin adds S3 repositories",
"classname" : "org.elasticsearch.repositories.s3.S3RepositoryPlugin",
"extended_plugins" : ,
"has_native_controller" : false
}
],
"network_types" : {
"transport_types" : {
"security4" : 6
},
"http_types" : {
"security4" : 6
}
}
}
}

Have you checked the status/setting of all indices Logstash could be writing to? Is there anything in the Elasticsearch logs?

I suspect at some point you may have ran out of disk space even though there is disk space now. What happens is elastic goes into read-only mode. Now you will need to reset that read-only mode.

https://www.elastic.co/guide/en/elasticsearch/reference/7.4/disk-allocator.html

https://www.elastic.co/guide/en/elasticsearch/reference/7.4/index-modules.html

PUT /your_index/_settings
{
  "index.blocks.read_only_allow_delete": null,
  "index.blocks.read_only": false
}

Clarification : false sets the setting to false and null remove the setting from the dynamic settings thus cleaning up unnecessary metadata. They have the same result on the index but null ends up with less clutter.

You may need to run that on multiple indices.

Also do you have any frozen indices that you are attempting to index too?
If you are trying to index into a frozen index which normally responds with blocked by: [FORBIDDEN/8/index write (api)] because by default frozen indices have index.blocks.write: true, but if you remove that block then you may get the exception you are seeing.

This topic was automatically closed 28 days after the last reply. New replies are no longer allowed.