Elasticsearch not handled more indexes, So goes to Read Only mode

Hi Team,

I am configure kibana and elasticsearch to our server. If elastic search generate more log or I am not deleted last 10 days data or kibana memory full or elastic search not handled data, So Elastic search configure automatic goes to Read Only mode and stop entry new Data.
So What configure set and elastic search not going to Readable mode.

Elasticsearch goes into readonly mode when it is running out of disk space. You can do some tuning to reduce disk usage which will help up to a point. Apart from that, you can either ask Elasticsearch to store less data, or else give it more space to use.

Hi David,

Elasticsearch goes into read-only mode before full to disk space and every 7 to 10 days elasticsearch suddenly down.

Can you please provide the full output of the cluster stats API? What kind of hardware is this cluster deployed on?

Hi Christian,

We are using single node and not adding cluster.

Can you still provide the output?

Hi Christian,

According to your requirement, I am run command and below mention output:

{
"_nodes": {
"total": 1,
"successful": 1,
"failed": 0
},
"cluster_name": "elasticsearch",
"timestamp": 1563441575072,
"status": "yellow",
"indices": {
"count": 280,
"shards": {
  "total": 430,
  "primaries": 430,
  "replication": 0,
  "index": {
    "shards": {
      "min": 1,
      "max": 3,
      "avg": 1.5357142857142858
    },
    "primaries": {
      "min": 1,
      "max": 3,
      "avg": 1.5357142857142858
    },
    "replication": {
      "min": 0,
      "max": 0,
      "avg": 0
    }
  }
},
"docs": {
  "count": 244580918,
  "deleted": 338249
},
"store": {
  "size": "59.4gb",
  "size_in_bytes": 63796464361
},
"fielddata": {
  "memory_size": "462.4kb",
  "memory_size_in_bytes": 473560,
  "evictions": 0
},
"query_cache": {
  "memory_size": "907kb",
  "memory_size_in_bytes": 928832,
  "total_count": 6828349,
  "hit_count": 670,
  "miss_count": 6827679,
  "cache_size": 431,
  "cache_count": 1005,
  "evictions": 574
},
"completion": {
  "size": "0b",
  "size_in_bytes": 0
},
"segments": {
  "count": 3906,
  "memory": "148.1mb",
  "memory_in_bytes": 155369476,
  "terms_memory": "92.1mb",
  "terms_memory_in_bytes": 96629902,
  "stored_fields_memory": "22.3mb",
  "stored_fields_memory_in_bytes": 23405096,
  "term_vectors_memory": "0b",
  "term_vectors_memory_in_bytes": 0,
  "norms_memory": "28.4kb",
  "norms_memory_in_bytes": 29120,
  "points_memory": "11.8mb",
  "points_memory_in_bytes": 12390422,
  "doc_values_memory": "21.8mb",
  "doc_values_memory_in_bytes": 22914936,
  "index_writer_memory": "14.3mb",
  "index_writer_memory_in_bytes": 15026513,
  "version_map_memory": "1mb",
  "version_map_memory_in_bytes": 1109334,
  "fixed_bit_set": "5.6kb",
  "fixed_bit_set_memory_in_bytes": 5776,
  "max_unsafe_auto_id_timestamp": 1563388210638,
  "file_sizes": {}
}
},
"nodes": {
"count": {
  "total": 1,
  "data": 1,
  "coordinating_only": 0,
  "master": 1,
  "ingest": 1
},
"versions": [
  "6.4.2"
],
"os": {
  "available_processors": 8,
  "allocated_processors": 8,
  "names": [
    {
      "name": "Linux",
      "count": 1
    }
  ],
  "mem": {
    "total": "31.3gb",
    "total_in_bytes": 33616257024,
    "free": "14.4gb",
    "free_in_bytes": 15519318016,
    "used": "16.8gb",
    "used_in_bytes": 18096939008,
    "free_percent": 46,
    "used_percent": 54
  }
},
"process": {
  "cpu": {
    "percent": 14
  },
  "open_file_descriptors": {
    "min": 3907,
    "max": 3907,
    "avg": 3907
  }
},
"jvm": {
  "max_uptime": "1.1d",
  "max_uptime_in_millis": 101842364,
  "versions": [
    {
      "version": "1.8.0_181",
      "vm_name": "Java HotSpot(TM) 64-Bit Server VM",
      "vm_version": "25.181-b13",
      "vm_vendor": "Oracle Corporation",
      "count": 1
    }
  ],
  "mem": {
    "heap_used": "983.2mb",
    "heap_used_in_bytes": 1030965984,
    "heap_max": "989.8mb",
    "heap_max_in_bytes": 1037959168
  },
  "threads": 275
},
"fs": {
  "total": "885.3gb",
  "total_in_bytes": 950674096128,
  "free": "752gb",
  "free_in_bytes": 807454461952,
  "available": "707gb",
  "available_in_bytes": 759139438592
},
"plugins": [
  {
    "name": "ingest-user-agent",
    "version": "6.4.2",
    "elasticsearch_version": "6.4.2",
    "java_version": "1.8",
    "description": "Ingest processor that extracts information from a user agent",
    "classname": "org.elasticsearch.ingest.useragent.IngestUserAgentPlugin",
    "extended_plugins": [],
    "has_native_controller": false
  },
  {
    "name": "ingest-geoip",
    "version": "6.4.2",
    "elasticsearch_version": "6.4.2",
    "java_version": "1.8",
    "description": "Ingest processor that uses looksup geo data based on ip adresses using the Maxmind geo database",
    "classname": "org.elasticsearch.ingest.geoip.IngestGeoIpPlugin",
    "extended_plugins": [],
    "has_native_controller": false
  }
],
"network_types": {
  "transport_types": {
    "security4": 1
  },
  "http_types": {
    "security4": 1
  }
}
}
}

430 shards for 59.4gb of data where probably 2 shards would be enough is clearly a waste of resources.
And all that with on 1gb of HEAP allocated to the system is probably the root cause of any issue.

1 Like

Hi Dadoonet,

If I am using 2 shards, and how can increase store memory using a command line.

Have a look at https://www.elastic.co/guide/en/elasticsearch/reference/current/heap-size.html

This topic was automatically closed 28 days after the last reply. New replies are no longer allowed.