Since the upgrade of my ELK stack from 7.17 to 8.6, our alert rules of type "Uptime monitor status", "Log threshold" and "Metric threshold" fail with the following message:
ResponseError: search_phase_execution_exception: [illegal_argument_exception] Reason: collapse is not supported for the field [kibana.alert.uuid] of the type [text]
at KibanaTransport.request (/usr/share/kibana/node_modules/@elastic/transport/lib/Transport.js:476:27)
at runMicrotasks (<anonymous>)
at processTicksAndRejections (node:internal/process/task_queues:96:5)
at KibanaTransport.request (/usr/share/kibana/node_modules/@kbn/core-elasticsearch-client-server-internal/target_node/src/create_transport.js:51:16)
at ClientTraced.SearchApi [as search] (/usr/share/kibana/node_modules/@elastic/elasticsearch/lib/api/api/search.js:66:12)
at Object.search (/usr/share/kibana/x-pack/plugins/rule_registry/server/rule_data_client/rule_data_client.js:81:18)
at fetchAlertsForStates (/usr/share/kibana/x-pack/plugins/rule_registry/server/utils/fetch_existing_alerts.js:45:7)
at async Promise.all (index 0)
at fetchExistingAlerts (/usr/share/kibana/x-pack/plugins/rule_registry/server/utils/fetch_existing_alerts.js:49:19)
at Object.executor (/usr/share/kibana/x-pack/plugins/rule_registry/server/utils/create_lifecycle_executor.js:102:20)
at /usr/share/kibana/x-pack/plugins/alerting/server/task_runner/task_runner.js:226:24
at TaskRunnerTimer.runWithTimer (/usr/share/kibana/x-pack/plugins/alerting/server/task_runner/task_runner_timer.js:49:20)
at TaskRunner.runRule (/usr/share/kibana/x-pack/plugins/alerting/server/task_runner/task_runner.js:193:9)
at TaskRunner.run (/usr/share/kibana/x-pack/plugins/alerting/server/task_runner/task_runner.js:517:49)
at TaskManagerRunner.run (/usr/share/kibana/x-pack/plugins/task_manager/server/task_running/task_runner.js:266:22)"},"service":{"node":{"roles":["background_tasks","ui"]}},"ecs":{"version":"8.4.0"},"@timestamp":"2023-02-10T19:44:41.289+00:00","message":"Executing Rule default:xpack.uptime.alerts.monitorStatus:5187ec40-a97a-11ed-a3f1-b19cb54a7af3 has resulted in Error: search_phase_execution_exception: [illegal_argument_exception] Reason: collapse is not supported for the field [kibana.alert.uuid] of the type [text], caused by: \"collapse is not supported for the field [kibana.alert.uuid] of the type [text],collapse is not supported for the field [kibana.alert.uuid] of the type [text]\" - ResponseError: search_phase_execution_exception: [illegal_argument_exception] Reason: collapse is not supported for the field [kibana.alert.uuid] of the type [text]
at KibanaTransport.request (/usr/share/kibana/node_modules/@elastic/transport/lib/Transport.js:476:27)
at runMicrotasks (<anonymous>)
at processTicksAndRejections (node:internal/process/task_queues:96:5)
at KibanaTransport.request (/usr/share/kibana/node_modules/@kbn/core-elasticsearch-client-server-internal/target_node/src/create_transport.js:51:16)
at ClientTraced.SearchApi [as search] (/usr/share/kibana/node_modules/@elastic/elasticsearch/lib/api/api/search.js:66:12)
at Object.search (/usr/share/kibana/x-pack/plugins/rule_registry/server/rule_data_client/rule_data_client.js:81:18)
at fetchAlertsForStates (/usr/share/kibana/x-pack/plugins/rule_registry/server/utils/fetch_existing_alerts.js:45:7)
at async Promise.all (index 0)
at fetchExistingAlerts (/usr/share/kibana/x-pack/plugins/rule_registry/server/utils/fetch_existing_alerts.js:49:19)
at Object.executor (/usr/share/kibana/x-pack/plugins/rule_registry/server/utils/create_lifecycle_executor.js:102:20)
at /usr/share/kibana/x-pack/plugins/alerting/server/task_runner/task_runner.js:226:24
at TaskRunnerTimer.runWithTimer (/usr/share/kibana/x-pack/plugins/alerting/server/task_runner/task_runner_timer.js:49:20)
at TaskRunner.runRule (/usr/share/kibana/x-pack/plugins/alerting/server/task_runner/task_runner.js:193:9)
at TaskRunner.run (/usr/share/kibana/x-pack/plugins/alerting/server/task_runner/task_runner.js:517:49)
at TaskManagerRunner.run (/usr/share/kibana/x-pack/plugins/task_manager/server/task_running/task_runner.js:266:22)
There is only one node on the cluster.
Can someone explain what is wrong here ?