Dear All,
Facing issue on logstash 5.6.6 netflow module where index is not created after a few days, and the only way to resolve it is to stop logstash and starting logstash again.
During stopping logstash, it will take some time and this is display in the log file:
[2018-03-20T15:52:27,264][WARN ][logstash.shutdownwatcher ] {"inflight_count"=>0, "stalling_thread_info"=>{["LogStash::Filters::DNS", {"reverse"=>["host"], "action"=>"replace", "id"=>"c8e0438f52fad050177fa0732bdbf7b64cf960e6-3"}]=>[{"thread_id"=>31, "name"=>"[main]>worker0", "current_call"=>"[...]/logstash-core/lib/logstash/util/wrapped_acked_queue.rb:196:in lock'"}, {"thread_id"=>32, "name"=>"[main]>worker1", "current_call"=>"[...]/logstash-core/lib/logstash/util/wrapped_acked_queue.rb:196:in
lock'"}, {"thread_id"=>33, "name"=>"[main]>worker2", "current_call"=>"[...]/logstash-core/lib/logstash/util/wrapped_acked_queue.rb:196:in lock'"}, {"thread_id"=>34, "name"=>"[main]>worker3", "current_call"=>"[...]/logstash-core/lib/logstash/util/wrapped_acked_queue.rb:196:in
lock'"}]}}
[2018-03-20T15:52:27,265][ERROR][logstash.shutdownwatcher ] The shutdown process appears to be stalled due to busy or blocked plugins. Check the logs for more information.
[2018-03-20T15:52:31,489][WARN ][logstash.shutdownwatcher ] {"inflight_count"=>0, "stalling_thread_info"=>{["LogStash::Filters::DNS", {"reverse"=>["host"], "action"=>"replace", "id"=>"c8e0438f52fad050177fa0732bdbf7b64cf960e6-3"}]=>[{"thread_id"=>31, "name"=>"[main]>worker0", "current_call"=>"[...]/logstash-core/lib/logstash/util/wrapped_acked_queue.rb:196:in lock'"}, {"thread_id"=>32, "name"=>"[main]>worker1", "current_call"=>"[...]/logstash-core/lib/logstash/util/wrapped_acked_queue.rb:196:in
lock'"}, {"thread_id"=>33, "name"=>"[main]>worker2", "current_call"=>"[...]/logstash-core/lib/logstash/util/wrapped_acked_queue.rb:196:in lock'"}, {"thread_id"=>34, "name"=>"[main]>worker3", "current_call"=>"[...]/logstash-core/lib/logstash/util/wrapped_acked_queue.rb:196:in
lock'"}]}}
[2018-03-20T15:52:38,730][WARN ][logstash.shutdownwatcher ] {"inflight_count"=>0, "stalling_thread_info"=>{["LogStash::Filters::DNS", {"reverse"=>["host"], "action"=>"replace", "id"=>"c8e0438f52fad050177fa0732bdbf7b64cf960e6-3"}]=>[{"thread_id"=>31, "name"=>"[main]>worker0", "current_call"=>"[...]/logstash-core/lib/logstash/util/wrapped_acked_queue.rb:196:in lock'"}, {"thread_id"=>32, "name"=>"[main]>worker1", "current_call"=>"[...]/logstash-core/lib/logstash/util/wrapped_acked_queue.rb:196:in
lock'"}, {"thread_id"=>33, "name"=>"[main]>worker2", "current_call"=>"[...]/logstash-core/lib/logstash/util/wrapped_acked_queue.rb:196:in lock'"}, {"thread_id"=>34, "name"=>"[main]>worker3", "current_call"=>"[...]/logstash-core/lib/logstash/util/wrapped_acked_queue.rb:196:in
lock'"}]}}
[2018-03-20T15:52:47,691][WARN ][logstash.shutdownwatcher ] {"inflight_count"=>0, "stalling_thread_info"=>{["LogStash::Filters::DNS", {"reverse"=>["host"], "action"=>"replace", "id"=>"c8e0438f52fad050177fa0732bdbf7b64cf960e6-3"}]=>[{"thread_id"=>31, "name"=>"[main]>worker0", "current_call"=>"[...]/logstash-core/lib/logstash/util/wrapped_acked_queue.rb:196:in lock'"}, {"thread_id"=>32, "name"=>"[main]>worker1", "current_call"=>"[...]/logstash-core/lib/logstash/util/wrapped_acked_queue.rb:196:in
lock'"}, {"thread_id"=>33, "name"=>"[main]>worker2", "current_call"=>"[...]/logstash-core/lib/logstash/util/wrapped_acked_queue.rb:196:in lock'"}, {"thread_id"=>34, "name"=>"[main]>worker3", "current_call"=>"[...]/logstash-core/lib/logstash/util/wrapped_acked_queue.rb:196:in
lock'"}]}}
[2018-03-20T15:52:56,449][WARN ][logstash.shutdownwatcher ] {"inflight_count"=>0, "stalling_thread_info"=>{["LogStash::Filters::DNS", {"reverse"=>["host"], "action"=>"replace", "id"=>"c8e0438f52fad050177fa0732bdbf7b64cf960e6-3"}]=>[{"thread_id"=>31, "name"=>"[main]>worker0", "current_call"=>"[...]/logstash-core/lib/logstash/util/wrapped_acked_queue.rb:196:in lock'"}, {"thread_id"=>32, "name"=>"[main]>worker1", "current_call"=>"[...]/logstash-core/lib/logstash/util/wrapped_acked_queue.rb:196:in
lock'"}, {"thread_id"=>33, "name"=>"[main]>worker2", "current_call"=>"[...]/logstash-core/lib/logstash/util/wrapped_acked_queue.rb:170:in lock'"}, {"thread_id"=>34, "name"=>"[main]>worker3", "current_call"=>"[...]/logstash-core/lib/logstash/util/wrapped_acked_queue.rb:170:in
lock'"}]}}
[2018-03-20T15:53:01,045][WARN ][logstash.shutdownwatcher ] {"inflight_count"=>0, "stalling_thread_info"=>{["LogStash::Filters::DNS", {"reverse"=>["host"], "action"=>"replace", "id"=>"c8e0438f52fad050177fa0732bdbf7b64cf960e6-3"}]=>[{"thread_id"=>31, "name"=>"[main]>worker0", "current_call"=>"[...]/logstash-core/lib/logstash/util/wrapped_acked_queue.rb:170:in lock'"}, {"thread_id"=>32, "name"=>"[main]>worker1", "current_call"=>"[...]/logstash-core/lib/logstash/util/wrapped_acked_queue.rb:196:in
lock'"}, {"thread_id"=>33, "name"=>"[main]>worker2", "current_call"=>"[...]/logstash-core/lib/logstash/util/wrapped_acked_queue.rb:196:in lock'"}, {"thread_id"=>34, "name"=>"[main]>worker3", "current_call"=>"[...]/logstash-core/lib/logstash/util/wrapped_acked_queue.rb:196:in
lock'"}]}}
[2018-03-20T15:53:07,198][WARN ][logstash.shutdownwatcher ] {"inflight_count"=>0, "stalling_thread_info"=>{["LogStash::Filters::DNS", {"reverse"=>["host"], "action"=>"replace", "id"=>"c8e0438f52fad050177fa0732bdbf7b64cf960e6-3"}]=>[{"thread_id"=>31, "name"=>"[main]>worker0", "current_call"=>"[...]/logstash-core/lib/logstash/util/wrapped_acked_queue.rb:196:in lock'"}, {"thread_id"=>32, "name"=>"[main]>worker1", "current_call"=>"[...]/logstash-core/lib/logstash/util/wrapped_acked_queue.rb:196:in
lock'"}, {"thread_id"=>33, "name"=>"[main]>worker2", "current_call"=>"[...]/logstash-core/lib/logstash/util/wrapped_acked_queue.rb:196:in lock'"}, {"thread_id"=>34, "name"=>"[main]>worker3", "current_call"=>"[...]/logstash-core/lib/logstash/util/wrapped_acked_queue.rb:196:in
lock'"}]}}
Then after stopping the logstash and starting again, the issue will be resolve and after a few more days netflow module will stop working