Metricbeat index lifecycle policy (ILM) not rolling over

ECE 2.3
ES and Kibana v7.3.0

I'm having trouble getting an index lifecycle policy to rollover. I currently have an index called metricbeat-7.0.0-2019.10.21-000001 that has an alias of metricbeat-7.0.0. The index has three primary shards and 1 replica. I added a Index Lifecycle Policy calle metricbeat-7.0.0 to the metricbeat-7.0.0-2019.10.21-000001 index that has a maximum index size of 150GB, max docs of 2000000000 and a maximum age of 30 days. If I understand the shard to max index size ratio correctly you can multiply your desired index size by the amount of primary shards. In this example that would be 3 times 50GB = 150Gb. As of right now the index is way past 150GB and still has not rolled over. Details below.

GET _cat/aliases/metricbeat*  metricbeat-7.0.0 metricbeat-7.0.0-2019.10.21-000001 - - -
GET /_template/metricbeat-7.0.0
{
  "metricbeat-7.0.0" : {
    "order" : 1,
    "index_patterns" : [
      "metricbeat-7.0.0-*"
    ],
    "settings" : {
      "index" : {
        "lifecycle" : {
          "name" : "metricbeat-7.0.0",
          "rollover_alias" : "metricbeat-7.0.0"
        },
        "codec" : "best_compression",
        "mapping" : {
          "total_fields" : {
            "limit" : "10000"
          }
        },
        "refresh_interval" : "5s",
        "number_of_shards" : "3",
        "query" : {
          ..... *DELETED THIS INFO
		  },
    "aliases" : { }
  }
}
GET _ilm/policy
{
  "metricbeat-7.0.0" : {
    "version" : 5,
    "modified_date" : "2019-10-29T14:02:00.819Z",
    "policy" : {
      "phases" : {
        "hot" : {
          "min_age" : "0ms",
          "actions" : {
            "rollover" : {
              "max_size" : "150gb",
              "max_age" : "30d",
              "max_docs" : 2000000000
            },
            "set_priority" : {
              "priority" : 100
            }
          }
        },
        "delete" : {
          "min_age" : "93d",
          "actions" : {
            "delete" : { }
          }
        }
      }
    }
  },
  "watch-history-ilm-policy" : {
    "version" : 1,
    "modified_date" : "2019-09-11T19:40:10.125Z",
    "policy" : {
      "phases" : {
        "delete" : {
          "min_age" : "7d",
          "actions" : {
            "delete" : { }
          }
        }
      }
    }
  }
GET _ilm/status
{
  "operation_mode" : "RUNNING"
}
GET _cluster/settings
{
  "persistent" : {
    "action" : {
      "auto_create_index" : "true"
    },
    "cluster" : {
      "routing" : {
        "allocation" : {
          "disk" : {
            "threshold_enabled" : "true"
          }
        }
      },
      "indices" : {
        "close" : {
          "enable" : "false"
        }
      }
    },
    "xpack" : {
      "monitoring" : {
        "collection" : {
          "enabled" : "true"
        }
      }
    }
  },
  "transient" : {
    "action" : {
      "auto_create_index" : "true"
    },
    "cluster" : {
      "routing" : {
        "allocation" : {
          "disk" : {
            "threshold_enabled" : "true"
          },
          "exclude" : {
            "_name" : "no_instances_excluded"
          },
          "awareness" : {
            "attributes" : "region,availability_zone,logical_availability_zone"
          },
          "enable" : "all"
        }
      },
      "indices" : {
        "close" : {
          "enable" : "false"
        }
      }
    }
  }
}

Bump :slight_smile:
So this did eventually roll over but at 300GB. Not exactly sure why that is but any insight would be appreciated.