Closing the Index released the RAM and Inodes?

Hi Team,

I have around 150 Indices open and my server has around 64 GB of RAM and 8 core CPU. Soon I believe my system would start complaining about RAM and inodes. Hence wanted to confirm if I close the index will that release the RAM as well as inodes?

How many shards is that?

Its a single standalone server and not a cluster.

A single node cluster is still a cluster.

However the answer to this depends on how many shards you have.
Ideally you would provide us with the output from the _cluster/stats?pretty&human API.

{
  "_nodes" : {
    "total" : 1,
    "successful" : 1,
    "failed" : 0
  },
  "cluster_name" : "xxxxxxxxxxxxx",
  "cluster_uuid" : "RSH2mraXQMCEl3MtzCvzag",
  "timestamp" : 1637641610370,
  "status" : "yellow",
  "indices" : {
    "count" : 281,
    "shards" : {
      "total" : 281,
      "primaries" : 281,
      "replication" : 0.0,
      "index" : {
        "shards" : {
          "min" : 1,
          "max" : 1,
          "avg" : 1.0
        },
        "primaries" : {
          "min" : 1,
          "max" : 1,
          "avg" : 1.0
        },
        "replication" : {
          "min" : 0.0,
          "max" : 0.0,
          "avg" : 0.0
        }
      }
    },
    "docs" : {
      "count" : 725697,
      "deleted" : 32
    },
    "store" : {
      "size" : "283.4mb",
      "size_in_bytes" : 297207264
    },
    "fielddata" : {
      "memory_size" : "675.6kb",
      "memory_size_in_bytes" : 691860,
      "evictions" : 0
    },
    "query_cache" : {
      "memory_size" : "63.6kb",
      "memory_size_in_bytes" : 65173,
      "total_count" : 1398,
      "hit_count" : 783,
      "miss_count" : 615,
      "cache_size" : 54,
      "cache_count" : 54,
      "evictions" : 0
    },
    "completion" : {
      "size" : "0b",
      "size_in_bytes" : 0
    },
    "segments" : {
      "count" : 869,
      "memory" : "14.1mb",
      "memory_in_bytes" : 14828944,
      "terms_memory" : "12.5mb",
      "terms_memory_in_bytes" : 13166241,
      "stored_fields_memory" : "377.6kb",
      "stored_fields_memory_in_bytes" : 386728,
      "term_vectors_memory" : "0b",
      "term_vectors_memory_in_bytes" : 0,
      "norms_memory" : "197.4kb",
      "norms_memory_in_bytes" : 202176,
      "points_memory" : "54.7kb",
      "points_memory_in_bytes" : 56107,
      "doc_values_memory" : "993.8kb",
      "doc_values_memory_in_bytes" : 1017692,
      "index_writer_memory" : "0b",
      "index_writer_memory_in_bytes" : 0,
      "version_map_memory" : "0b",
      "version_map_memory_in_bytes" : 0,
      "fixed_bit_set" : "1.3kb",
      "fixed_bit_set_memory_in_bytes" : 1400,
      "max_unsafe_auto_id_timestamp" : 1637021728239,
      "file_sizes" : { }
    }
  },
  "nodes" : {
    "count" : {
      "total" : 1,
      "data" : 1,
      "coordinating_only" : 0,
      "master" : 1,
      "ingest" : 1
    },
    "versions" : [
      "7.2.1"

Thanks!

You're definitely over sharded. 281 shards for 283MB is 1MB per shard. A massive waste of heap and CPU. You will find you get much better performance by reducing that shard count over closing indices.

Also 7.2 is EOL and unsupported, 7.15 is latest. Please upgrade as a matter of urgency.

Being a single device or server; will I be able to increase the shards? or it is always recommended to form a two server cluster?

You can, yes.

This topic was automatically closed 28 days after the last reply. New replies are no longer allowed.