SLM policy for elastic snapshot for 30 mins is failing

I had set up a Elasticsearch snapshot to s3 and setup an SLM policy to create snapshot for every 30 mins but it's throwing an error and i'm sharing the slm policy details as well can anyone help me out

curl -X GET "10.0.0.170:9200/_slm/policy/hourly-snapshots?human&pretty"
{
"hourly-snapshots" : {
"version" : 5,
"modified_date" : "2020-06-10T20:20:35.058Z",
"modified_date_millis" : 1591820435058,
"policy" : {
"name" : "<hourly-snap-{now/d}>",
"schedule" : "0 0,30 * * * ?",
"repository" : "my_s3_repository",
"config" : {
"indices" : [
"*"
]
},
"retention" : {
"expire_after" : "15d",
"min_count" : 5,
"max_count" : 700
}
},
"last_success" : {
"snapshot_name" : "hourly-snap-2020.06.16-yqyteoqsr8cqjzcgwv6boq",
"time_string" : "2020-06-16T05:00:12.703Z",
"time" : 1592283612703
},
"last_failure" : {
"snapshot_name" : "hourly-snap-2020.06.16-ozk-g5o6tzkyol-qkrjlnq",
"time_string" : "2020-06-16T07:00:00.142Z",
"time" : 1592290800142,
"details" : "{"type":"concurrent_snapshot_execution_exception","reason":"[my_s3_repository:hourly-snap-2020.06.16-ozk-g5o6tzkyol-qkrjlnq] a snapshot is already running","stack_trace":"ConcurrentSnapshotExecutionException[[my_s3_repository:hourly-snap-2020.06.16-ozk-g5o6tzkyol-qkrjlnq] a snapshot is already running]\n\tat org.elasticsearch.snapshots.SnapshotsService$1.execute(SnapshotsService.java:319)\n\tat org.elasticsearch.cluster.ClusterStateUpdateTask.execute(ClusterStateUpdateTask.java:47)\n\tat org.elasticsearch.cluster.service.MasterService.executeTasks(MasterService.java:702)\n\tat org.elasticsearch.cluster.service.MasterService.calculateTaskOutputs(MasterService.java:324)\n\tat org.elasticsearch.cluster.service.MasterService.runTasks(MasterService.java:219)\n\tat org.elasticsearch.cluster.service.MasterService.access$000(MasterService.java:73)\n\tat org.elasticsearch.cluster.service.MasterService$Batcher.run(MasterService.java:151)\n\tat org.elasticsearch.cluster.service.TaskBatcher.runIfNotProcessed(TaskBatcher.java:150)\n\tat org.elasticsearch.cluster.service.TaskBatcher$BatchedTask.run(TaskBatcher.java:188)\n\tat org.elasticsearch.common.util.concurrent.ThreadContext$ContextPreservingRunnable.run(ThreadContext.java:633)\n\tat org.elasticsearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor$TieBreakingPrioritizedRunnable.runAndClean(PrioritizedEsThreadPoolExecutor.java:252)\n\tat org.elasticsearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor$TieBreakingPrioritizedRunnable.run(PrioritizedEsThreadPoolExecutor.java:215)\n\tat java.base/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1128)\n\tat java.base/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:628)\n\tat java.base/java.lang.Thread.run(Thread.java:830)\n"}"
},
"next_execution" : "2020-06-16T07:30:00.000Z",
"next_execution_millis" : 1592292600000,
"stats" : {
"policy" : "hourly-snapshots",
"snapshots_taken" : 127,
"snapshots_failed" : 174,
"snapshots_deleted" : 0,
"snapshot_deletion_failures" : 0
}
}
}

This topic was automatically closed 28 days after the last reply. New replies are no longer allowed.