Possible snapshot bug v7.8.0

Hi, I try to create a snapshot of five indices via the Kibana dev_tools like below

PUT _snapshot/shrink-cori-syslog/shrink-cori-syslog_2020-04-24_2020-04-29
{
"indices": "shrink-cori-syslog-000101,shrink-cori-syslog-000102,shrink-cori-syslog-000103,shrink-cori-syslog-000104,shrink-cori-syslog-000105",
"metadata": {
"taken_by": "Siqi",
"description": "test"
}
}

It returns acknowledge: true
But then I check the snapshot status

GET _snapshot/shrink-cori-syslog/shrink-cori-syslog_2020-04-24_2020-04-29

It ended up creating a snapshot of all my indices, an 2T+ snapshot.

{
"snapshots" : [
{
"snapshot" : "shrink-cori-syslog_2020-04-24_2020-04-29",
"uuid" : "OUqZXEgtSNSPNp2vN99bIA",
"version_id" : 7080099,
"version" : "7.8.0",
"indices" : [
"readonlyrest_audit-2020-03-25",
"cori-syslog-000150",
".monitoring-es-7-2020.07.23",
"readonlyrest_audit-2020-07-09",
"cori-syslog-000154",
"cori-syslog-000153",
"readonlyrest_audit-2020-04-11",
"cori-syslog-000146",
"crt-syslog-named-000038",
"crt-syslog-000165",
"nast-syslog-000019",
"cori-syslog-000158",
"readonlyrest_audit-2020-06-06",
"readonlyrest_audit-2020-03-08",
"shrink-cori-syslog-000105",
# # # # # #
#
# I skipped a few hundred index names here
#
# # # # # #
"shrink-cori-syslog-000113",
"cori-syslog-000149",
"shrink-cori-syslog-000096",
"readonlyrest_audit-2020-03-23",
"gerty-syslog-000031",
"crt-syslog-es6-000069",
"readonlyrest_audit-2020-06-09",
"shrink-cori-syslog-000124",
"readonlyrest_audit-2020-07-02",
"crt-syslog-000186",
"cori-syslog-000142",
"crt-syslog-es6-000080",
"crt-syslog-es6-000071",
"crt-syslog-000182",
"crt-syslog-000191",
"cori-syslog-000179",
"crt-syslog-000173",
"cori-syslog-000152",
"env-syslog-000083",
"crt-syslog-es6-000084",
"readonlyrest_audit-2020-04-16",
".kibana_dunford",
"readonlyrest_audit-2020-06-15",
"readonlyrest_audit-2020-06-12",
"shrink-cori-syslog-000090",
"readonlyrest_audit-2020-07-08",
"crt-syslog-es6-000081",
"readonlyrest_audit-2020-03-26",
"readonlyrest_audit-2020-03-24",
"shrink-cori-syslog-000127",
"readonlyrest_audit-2020-02-25",
".monitoring-kibana-7-2020.07.24",
"readonlyrest_audit-2020-03-30",
"shrink-cori-syslog-000099",
"readonlyrest_audit-2020-04-25",
"crt-syslog-es6-000078",
"cori-syslog-000151",
".kibana_siqideng",
"readonlyrest_audit-2020-06-24",
"shrink-gerty-syslog-000016"
],
"include_global_state" : true,
"metadata" : {
"taken_by" : "Siqi",
"description" : "test"
},
"state" : "IN_PROGRESS",
"start_time" : "2020-07-30T18:52:42.109Z",
"start_time_in_millis" : 1596135162109,
"end_time" : "1970-01-01T00:00:00.000Z",
"end_time_in_millis" : 0,
"duration_in_millis" : 0,
"failures" : ,
"shards" : {
"total" : 0,
"failed" : 0,
"successful" : 0
}
}
]
}

Here is my Elasticsearch version

{
"name" : "es-syslog-client-1",
"cluster_name" : "es-syslog",
"cluster_uuid" : "cfi_0dsbQ5Gb1ok47A7GwA",
"version" : {
"number" : "7.8.0",
"build_flavor" : "default",
"build_type" : "rpm",
"build_hash" : "757314695644ea9a1dc2fecd26d1a43856725e65",
"build_date" : "2020-06-14T19:35:50.234439Z",
"build_snapshot" : false,
"lucene_version" : "8.5.1",
"minimum_wire_compatibility_version" : "6.8.0",
"minimum_index_compatibility_version" : "6.0.0-beta1"
},
"tagline" : "You Know, for Search"
}

I don't think it behaved like this before. Please take a look. Thank you