ILM waiting shards to be allocated

Hi,

I have a cluster on elastic cloud with Hot-Warm Architecture template (v 7.5.1). I've set an ILM policy for moving indexes to warm nodes after 1h (testing purpose)

Here is the policy defined with Kibana

  { - 
  "jsi": { - 
    "version": 4,
    "modified_date": "2020-02-03T15:44:46.701Z",
    "policy": { - 
      "phases": { - 
        "warm": { - 
          "min_age": "1h",
          "actions": { - 
            "allocate": { - 
              "include": { - 

              },
              "exclude": { - 

              },
              "require": { - 
                "data": "warm"
              }
            }
          }
        },
        "delete": { - 
          "min_age": "3h",
          "actions": { - 
            "delete": { - 
            }
          }
        }
      }
    }
  }
}

After 1hour, I get this error in Kibana

image

Here is the index explain :

{ - 
  "index": "jsi-0000001",
  "shard": 0,
  "primary": true,
  "current_state": "started",
  "current_node": { - 
    "id": "aAX5r3cPT1Ksi9iBRRNvPA",
    "name": "instance-0000000012",
    "transport_address": "10.43.1.90:19701",
    "attributes": { - 
      "logical_availability_zone": "zone-0",
      "server_name": "instance-0000000012.050d06d5e3ac4a6e81fbb9cd7bd969b9",
      "availability_zone": "europe-west1-d",
      "xpack.installed": "true",
      "data": "hot",
      "instance_configuration": "gcp.data.highio.1",
      "region": "unknown-region"
    }
  },
  "can_remain_on_current_node": "no",
  "can_remain_decisions": [ - 
    { - 
      "decider": "filter",
      "decision": "NO",
      "explanation": "node does not match index setting [index.routing.allocation.require] filters [data:\"warm\"]"
    }
  ],
  "can_move_to_other_node": "no",
  "move_explanation": "cannot move shard to another node, even though it is not allowed to remain on its current node",
  "node_allocation_decisions": [ - 
    { - 
      "node_id": "473jHBkFRIiHkPGUXyV0Bg",
      "node_name": "instance-0000000013",
      "transport_address": "10.43.1.58:19088",
      "node_attributes": { - 
        "logical_availability_zone": "zone-1",
        "server_name": "instance-0000000013.050d06d5e3ac4a6e81fbb9cd7bd969b9",
        "availability_zone": "europe-west1-c",
        "xpack.installed": "true",
        "data": "hot",
        "instance_configuration": "gcp.data.highio.1",
        "region": "unknown-region"
      },
      "node_decision": "no",
      "weight_ranking": 1,
      "deciders": [ - 
        { - 
          "decider": "filter",
          "decision": "NO",
          "explanation": "node does not match index setting [index.routing.allocation.require] filters [data:\"warm\"]"
        },
        { - 
          "decider": "same_shard",
          "decision": "NO",
          "explanation": "the shard cannot be allocated to the same node on which a copy of the shard already exists [[jsi-0000001][0], node[473jHBkFRIiHkPGUXyV0Bg], [R], s[STARTED], a[id=7J43qRclTVS3kwaMRGMdBg]]"
        },
        { - 
          "decider": "awareness",
          "decision": "NO",
          "explanation": "there are too many copies of the shard allocated to nodes with attribute [availability_zone], there are [2] total configured shard copies for this shard id and [2] total attribute values, expected the allocated shard count per attribute [2] to be less than or equal to the upper bound of the required number of shards per attribute [1]"
        }
      ]
    },
    { - 
      "node_id": "napclrkuS26H_H5o3oyfJg",
      "node_name": "instance-0000000010",
      "transport_address": "10.43.1.123:19458",
      "node_attributes": { - 
        "logical_availability_zone": "zone-1",
        "server_name": "instance-0000000010.050d06d5e3ac4a6e81fbb9cd7bd969b9",
        "availability_zone": "europe-west1-d",
        "xpack.installed": "true",
        "data": "warm",
        "instance_configuration": "gcp.data.highstorage.1",
        "region": "unknown-region"
      },
      "node_decision": "no",
      "weight_ranking": 2,
      "deciders": [ - 
        { - 
          "decider": "awareness",
          "decision": "NO",
          "explanation": "there are too many copies of the shard allocated to nodes with attribute [logical_availability_zone], there are [2] total configured shard copies for this shard id and [2] total attribute values, expected the allocated shard count per attribute [2] to be less than or equal to the upper bound of the required number of shards per attribute [1]"
        }
      ]
    },
    { - 
      "node_id": "OWYUqht8QMWVJLH6YAdvrw",
      "node_name": "instance-0000000009",
      "transport_address": "10.43.0.168:19268",
      "node_attributes": { - 
        "logical_availability_zone": "zone-0",
        "server_name": "instance-0000000009.050d06d5e3ac4a6e81fbb9cd7bd969b9",
        "availability_zone": "europe-west1-c",
        "xpack.installed": "true",
        "data": "warm",
        "instance_configuration": "gcp.data.highstorage.1",
        "region": "unknown-region"
      },
      "node_decision": "no",
      "weight_ranking": 3,
      "deciders": [ - 
        { - 
          "decider": "awareness",
          "decision": "NO",
          "explanation": "there are too many copies of the shard allocated to nodes with attribute [availability_zone], there are [2] total configured shard copies for this shard id and [2] total attribute values, expected the allocated shard count per attribute [2] to be less than or equal to the upper bound of the required number of shards per attribute [1]"
        }
      ]
    }
  ]
}

It seems that elastic don't know the warm type nodes :thinking: And If I close the index then reopen it, the shards start to move to the warm nodes !
There is enough freespace on each node too.

Any ideas ?

Thank you for your help

This topic was automatically closed 28 days after the last reply. New replies are no longer allowed.