While the job I created was running, suddenly its memory status changed to soft_limit. Can you explain the cause and remedy for this situation? My Elasticsearch is running on an ec2 instance(can this be a reason for this situation ?)
the below is the job description:
{
"count" : 1,
"jobs" : [
{
"job_id" : "my_job_low_sum",
"job_type" : "anomaly_detector",
"job_version" : "7.0.1",
"create_time" : 1561121146128,
"analysis_config" : {
"bucket_span" : "15m",
"detectors" : [
{
"detector_description" : "sum per method_status",
"function" : "low_sum",
"field_name" : "EVENTHOUR",
"partition_field_name" : "SHIPPERID_CARRIERID",
"detector_index" : 0
}
],
"influencers" : [
"SHIPPERID",
"CARRIERID"
]
},
"analysis_limits" : {
"model_memory_limit" : "1024mb",
"categorization_examples_limit" : 4
},
"data_description" : {
"time_field" : "EVENTTIME",
"time_format" : "epoch_ms"
},
"model_snapshot_retention_days" : 10,
"model_snapshot_id" : "1561390307",
"results_index_name" : "custom-low_sum_results"
}
]
}