Hi, I am trying to reindex many indices in to one ILM based alias/indices...
However I get the below error, smallish DB at 150GB but many indices 3000 (hence the re-structuring)
First it complained about too many scroll contexts, so I then increased the limit scroll contexts... now I get the below.
This is running on a c6gd.2xlarge AWS EC2 instance. With nvme drive to speed it up.
Elasicsearch version 8.18
My request is as follows
POST _reindex?wait_for_completion=false&slices=auto
{
  "source": {
    "index": "dataset-*"
  },
  "dest": {
    "index": "measurement-data",
    "op_type": "create"
  },
  "script": {
    "inline": "ctx._source.dataseriesId = ctx._source.remove(\"id\"); ctx._source.datasetId = ctx._index.substring(8);",
    "lang": "painless"
  }
}
TASK status with errors at the bottom
{
  "completed": true,
  "task": {
    "node": "91hv2jovSRqTYMy6Z2HmLw",
    "id": 32814079,
    "type": "transport",
    "action": "indices:data/write/reindex",
    "status": {
      "total": 2611898649,
      "updated": 0,
      "created": 3000,
      "deleted": 0,
      "batches": 3,
      "version_conflicts": 0,
      "noops": 0,
      "retries": {
        "bulk": 0,
        "search": 0
      },
      "throttled_millis": 0,
      "requests_per_second": -1,
      "throttled_until_millis": 0,
      "slices": [
        {
          "slice_id": 0,
          "total": 1188375375,
          "updated": 0,
          "created": 2000,
          "deleted": 0,
          "batches": 2,
          "version_conflicts": 0,
          "noops": 0,
          "retries": {
            "bulk": 0,
            "search": 0
          },
          "throttled_millis": 0,
          "requests_per_second": -1,
          "throttled_until_millis": 0
        },
        {
          "slice_id": 1,
          "total": 1423523274,
          "updated": 0,
          "created": 1000,
          "deleted": 0,
          "batches": 1,
          "version_conflicts": 0,
          "noops": 0,
          "retries": {
            "bulk": 0,
            "search": 0
          },
          "throttled_millis": 0,
          "requests_per_second": -1,
          "throttled_until_millis": 0
        }
      ]
    },
    "description": "reindex from [dataset-*] updated with Script{type=inline, lang='painless', idOrCode='ctx._source.dataseriesId = ctx._source.remove(\"id\"); ctx._source.datasetId = ctx._index.substring(8);', options={}, params={}} to [measurement-data]",
    "start_time_in_millis": 1748518640325,
    "running_time_in_nanos": 979225413,
    "cancellable": true,
    "cancelled": false,
    "headers": {}
  },
  "response": {
    "took": 957,
    "timed_out": false,
    "total": 2611898649,
    "updated": 0,
    "created": 3000,
    "deleted": 0,
    "batches": 3,
    "version_conflicts": 0,
    "noops": 0,
    "retries": {
      "bulk": 0,
      "search": 0
    },
    "throttled": "0s",
    "throttled_millis": 0,
    "requests_per_second": -1,
    "throttled_until": "0s",
    "throttled_until_millis": 0,
    "slices": [
      {
        "slice_id": 0,
        "total": 1188375375,
        "updated": 0,
        "created": 2000,
        "deleted": 0,
        "batches": 2,
        "version_conflicts": 0,
        "noops": 0,
        "retries": {
          "bulk": 0,
          "search": 0
        },
        "throttled": "0s",
        "throttled_millis": 0,
        "requests_per_second": -1,
        "throttled_until": "0s",
        "throttled_until_millis": 0
      },
      {
        "slice_id": 1,
        "total": 1423523274,
        "updated": 0,
        "created": 1000,
        "deleted": 0,
        "batches": 1,
        "version_conflicts": 0,
        "noops": 0,
        "retries": {
          "bulk": 0,
          "search": 0
        },
        "throttled": "0s",
        "throttled_millis": 0,
        "requests_per_second": -1,
        "throttled_until": "0s",
        "throttled_until_millis": 0
      }
    ],
    "failures": [
      {
        "shard": -1,
        "status": 429,
        "reason": {
          "type": "es_rejected_execution_exception",
          "reason": "rejected execution of TimedRunnable{original=ActionRunnable#wrap[org.elasticsearch.search.SearchService$$Lambda/0x00000000744f2c48@6698fec7], creationTimeNanos=167646586603173, startTimeNanos=0, finishTimeNanos=-1, failedOrRejected=false} on TaskExecutionTimeTrackingEsThreadPoolExecutor[name =[NAME]/search, queue capacity = 1000, task execution EWMA = 1ms, total task execution time = 9.2m, org.elasticsearch.common.util.concurrent.TaskExecutionTimeTrackingEsThreadPoolExecutor@4b158e16[Running, pool size = 30, active threads = 30, queued tasks = 992, completed tasks = 1697334]]"
        }
      },
      {
        "shard": -1,
        "status": 429,
        "reason": {
          "type": "es_rejected_execution_exception",
          "reason": "rejected execution of TimedRunnable{original=ActionRunnable#wrap[org.elasticsearch.search.SearchService$$Lambda/0x00000000744f2c48@3d680ccf], creationTimeNanos=167646587724683, startTimeNanos=0, finishTimeNanos=-1, failedOrRejected=false} on TaskExecutionTimeTrackingEsThreadPoolExecutor[name = [NAME]/search, queue capacity = 1000, task execution EWMA = 1.4ms, total task execution time = 9.2m, org.elasticsearch.common.util.concurrent.TaskExecutionTimeTrackingEsThreadPoolExecutor@4b158e16[Running, pool size = 30, active threads = 29, queued tasks = 999, completed tasks = 1697355]]"
        }
      },
     **... (with many more removed)**
    ]
  }
}
Any help/suggestions appreciate to get around this issue. Also anything that can make this reindex process optimised. I think it would currently take a good few hours to complete. Ideally I'd be able to speed it up...