Issue with index rollover via ILM

Hi Elastic support team,

I am trying to implement index rollover via ILM. However my test index is not rolling over at the right size.

Note: index lifecycle poll interval is unchanged, therefore its 10m (default).

Policy generic-rollover-policy-test:

{
    "policy": {
        "phases": {
            "hot": {
                "min_age": "0ms",
                "actions": {
                    "rollover": {
                        "max_size": "30mb"
                    },
                    "set_priority": {
                        "priority": 100
                    }
                }
            },
            "warm": {
                "min_age": "0ms",
                "actions": {
                    "allocate": {
                        "include": {},
                        "exclude": {},
                        "require": {
                            "box_type": "warm"
                        }
                    },
                    "forcemerge": {
                        "max_num_segments": 1
                    },
                    "set_priority": {
                        "priority": 50
                    }
                }
            },
            "cold": {
                "min_age": "30d",
                "actions": {
                    "allocate": {
                        "include": {},
                        "exclude": {},
                        "require": {
                            "box_type": "warm"
                        }
                    },
                    "freeze": {},
                    "set_priority": {
                        "priority": 0
                    }
                }
            },
            "delete": {
                "min_age": "90d",
                "actions": {
                    "delete": {}
                }
            }
        }
    }
}

According to the above policy, index is supposed to rollover at 30mb (primary shard size as per the docs).

Here is the index settings that shows the policy is applied to the index.

{
  "usprod-kafkaconnect-000001" : {
    "settings" : {
      "index" : {
        "lifecycle" : {
          "name" : "generic-rollover-policy-test",
          "rollover_alias" : "usprod-kafkaconnect"
        },
        "routing" : {
          "allocation" : {
            "require" : {
              "box_type" : "hot"
            },
            "total_shards_per_node" : "2"
          }
        },
        "refresh_interval" : "5s",
        "number_of_shards" : "1",
        "provided_name" : "<usprod-kafkaconnect-000001>",
        "creation_date" : "1579208744548",
        "analysis" : {
          "normalizer" : {
            "default" : {
              "filter" : [
                "lowercase"
              ],
              "type" : "custom",
              "char_filter" : [ ]
            }
          }
        },
        "priority" : "100",
        "number_of_replicas" : "1",
        "uuid" : "GVBGz_LNQYSL2zBtGY-EMA",
        "version" : {
          "created" : "7030199"
        }
      }
    }
  }
}

Here is the relevant piece of logstash config that writes to this index.

if [type] == "usprod-kafkaconnect" {
      elasticsearch {
        id => "usprod-kafkaconnect-output"
        hosts => <%= to_json($es_nodes) %>
        ilm_enabled => true
        ilm_rollover_alias => "usprod-kafkaconnect"
        ilm_pattern => "000001"
        ilm_policy => "generic-rollover-policy-test"
        document_id => "%{[@metadata][fingerprint]}"
      }
  }

Finally, below is the reported size of the index obtained by running GET <index_name>/_stats
(Note: output is partially truncated)

{
"_shards": {
    "total": 2,
    "successful": 2,
    "failed": 0
},
"_all": {
    "primaries": {
        "docs": {
            "count": 198306,
            "deleted": 588
        },
        "store": {
            "size_in_bytes": 42929726
        },
        "indexing": {
            "index_total": 200104,
            "index_time_in_millis": 46128,
            "index_current": 0,
            "index_failed": 0,
            "delete_total": 0,
            "delete_time_in_millis": 0,
            "delete_current": 0,
            "noop_update_total": 0,
            "is_throttled": false,
            "throttle_time_in_millis": 0
        },
        "get": {
            "total": 0,
            "time_in_millis": 0,
            "exists_total": 0,
            "exists_time_in_millis": 0,
            "missing_total": 0,
            "missing_time_in_millis": 0,
            "current": 0
        },
        "search": {
            "open_contexts": 0,
            "query_total": 154,
            "query_time_in_millis": 1663,
            "query_current": 0,
            "fetch_total": 99,
            "fetch_time_in_millis": 12529,
            "fetch_current": 0,
            "scroll_total": 0,
            "scroll_time_in_millis": 0,
            "scroll_current": 0,
            "suggest_total": 0,
            "suggest_time_in_millis": 0,
            "suggest_current": 0
        },
        "merges": {
            "current": 0,
            "current_docs": 0,
            "current_size_in_bytes": 0,
            "total": 371,
            "total_time_in_millis": 48810,
            "total_docs": 2868780,
            "total_size_in_bytes": 624105587,
            "total_stopped_time_in_millis": 0,
            "total_throttled_time_in_millis": 0,
            "total_auto_throttle_in_bytes": 20971520
        },
        "refresh": {
            "total": 1511,
            "total_time_in_millis": 33764,
            "external_total": 1510,
            "external_total_time_in_millis": 35103,
            "listeners": 0
        },
        "flush": {
            "total": 1,
            "periodic": 0,
            "total_time_in_millis": 21
        },
        "warmer": {
            "current": 0,
            "total": 1509,
            "total_time_in_millis": 27
        },
        "query_cache": {
            "memory_size_in_bytes": 0,
            "total_count": 495,
            "hit_count": 101,
            "miss_count": 394,
            "cache_size": 0,
            "cache_count": 7,
            "evictions": 7
        },
        "fielddata": {
            "memory_size_in_bytes": 0,
            "evictions": 0
        },
        "completion": {
            "size_in_bytes": 0
        },
        "segments": {
            "count": 9,
            "memory_in_bytes": 179662,
            "terms_memory_in_bytes": 132233,
            "stored_fields_memory_in_bytes": 35168,
            "term_vectors_memory_in_bytes": 0,
            "norms_memory_in_bytes": 0,
            "points_memory_in_bytes": 3321,
            "doc_values_memory_in_bytes": 8940,
            "index_writer_memory_in_bytes": 7910300,
            "version_map_memory_in_bytes": 55096,
            "fixed_bit_set_memory_in_bytes": 0,
            "max_unsafe_auto_id_timestamp": -1,
            "file_sizes": {}
        },
        "translog": {
            "operations": 200104,
            "size_in_bytes": 178010189,
            "uncommitted_operations": 167970,
            "uncommitted_size_in_bytes": 150887305,
            "earliest_last_modified_age": 0
        },
        "request_cache": {
            "memory_size_in_bytes": 0,
            "evictions": 0,
            "hit_count": 0,
            "miss_count": 0
        },
        "recovery": {
            "current_as_source": 0,
            "current_as_target": 0,
            "throttle_time_in_millis": 0
        }
    }
    }
}

}

Unless I am looking at the wrong field, 42929726 is the size of the primary shard in bytes which when converted to mb is 40.9

"store" : {
        "size_in_bytes" : 42929726
 }

Its way past the index lifecycle poll interval, I do not understand why the index is not rolling over to usprod-kafkaconnect-000002

FYI, When I look at the ILM section in the UI for the index usprod-kafkaconnect-000001, I see that the current action is rollover and the current phase is hot.

Any help would be appreciated.

This topic was automatically closed 28 days after the last reply. New replies are no longer allowed.