Syslogs from fortigate

Hello,
I just configured fortigate to send all the syslogs to logstash
This is my logstash config file:

filter {
   udp {
     port => 514
   {
  if [type] == "SYSLOG" {
    grok {
     patterns_dir => ["/etc/logstash/patterns/"]
      match => ["message" , "%{FORTIGATE_52BASE} %{FORTIGATE_52IPS}"]
      add_tag => ["fortigate"]
    }
    grok {
       patterns_dir => ["/etc/logstash/patterns/"]
       match => ["message" , "%{FORTIGATE_52BASEV2} %{FORTIGATE_52DOS}"]
    }
  }
output {
  elasticsearch_http {
    host => "10.130.233.242:9200"
  }
}

For some reason, this isn't working.
I can't telnet to the fortigate ip/port, I can't see the port on netstat, but if I do tcpdump port 514 I receive this:

Edit#1:

Got this error log on logstash:

[2018-07-24T16:39:42,408][ERROR][logstash.agent ] Failed to execute action {:action=>LogStash::PipelineAction::Create/pipeline_id:mypipeline_1, :exception=>"LogStash::ConfigurationError", :message=>"Expected one of #, => at line 120, column 22 (byte 2373) after filter {\n if [type] == "syslog" {\n grok {\n patterns_dir => ["/etc/logstash/patterns/"]\n match => ["message" , "%{FORTIGATE_52BASE} %{FORTIGATE_52IPS}"]\n add_tag => ["fortigate"] \n }\n grok {\n patterns_dir => ["/etc/logstash/patterns/"]\n match => ["message" , "%{FORTIGATE_52BASEV2} %{FORTIGATE_52DOS}"]\n }\n } \n\noutput {\n elasticsearch_http ", :backtrace=>["/usr/share/logstash/logstash-core/lib/logstash/compiler.rb:42:in compile_imperative'", "/usr/share/logstash/logstash-core/lib/logstash/compiler.rb:50:incompile_graph'", "/usr/share/logstash/logstash-core/lib/logstash/compiler.rb:12:in block in compile_sources'", "org/jruby/RubyArray.java:2486:inmap'", "/usr/share/logstash/logstash-core/lib/logstash/compiler.rb:11:in compile_sources'", "/usr/share/logstash/logstash-core/lib/logstash/pipeline.rb:49:ininitialize'", "/usr/share/logstash/logstash-core/lib/logstash/pipeline.rb:167:in initialize'", "/usr/share/logstash/logstash-core/lib/logstash/pipeline_action/create.rb:40:inexecute'", "/usr/share/logstash/logstash-core/lib/logstash/agent.rb:305:in `block in converge_state'"]}

Hello @Marcos_Felix,

Can you just try this configuration file:

input {
udp {
port => 7000 #whatever port you have define change this
type => "forti_log"
}
}

filter {
if [type] == "forti_log" {

grok {
		match => ["message", "%{SYSLOG5424PRI:syslog_index}%{GREEDYDATA:message}"]
		overwrite => [ "message" ]
		tag_on_failure => [ "forti_grok_failure" ]
	}


    kv {
source => "message"
value_split => "="
field_split => ","

}

mutate {
add_field => { "temp_time" => "%{date} %{time}" }
rename => { "type" => "ftg_type" }
rename => { "subtype" => "ftg_subtype" }
add_field => { "type" => "forti_log" }
convert => { "rcvdbyte" => "integer" }
convert => { "sentbyte" => "integer" }

}

date {
match => [ "temp_time", "yyyy-MM-dd HH:mm:ss" ]
timezone => "UTC" #change with your timestamp
target => "@timestamp"
}

mutate {
remove_field => ["syslog_index","syslog5424_pri","path","temp_time","service","date","time","sentpkt","rcvdpkt","log_id","message","poluuid"]

}
}
}

output {
stdout { codec => rubydebug }
if [type] == "forti_log" {
elasticsearch {
hosts => "10.10.10.10:9200" #change with your elastic ip
http_compression => "true"
index => "forti-%{+YYYY.MM.dd}"
user => "elastic"
password => "elastic"
template => "/usr/share/logstash/bin/forti.json"
template_name => "forti-*"
}
}
}

This is the template save it with forti.json

{
"template" : "forti-",
"version" : 50001,
"settings" : {
"index.refresh_interval" : "5s"
},
"mappings" : {
"default" : {
"_all" : {"enabled" : true, "omit_norms" : false},
"dynamic_templates" : [ {
"message_field" : {
"path_match" : "message",
"match_mapping_type" : "string",
"mapping" : {
"type" : "text",
"omit_norms" : false
}
}
}, {
"string_fields" : {
"match" : "
",
"match_mapping_type" : "string",
"mapping" : {
"type" : "text", "omit_norms" : false,
"fields" : {
"keyword" : { "type": "keyword", "ignore_above": 256 }
}
}
}
} ],
"properties" : {
"@timestamp": { "type": "date", "include_in_all": false },
"@version": { "type": "keyword", "include_in_all": false },
"geoip" : {
"dynamic": true,
"properties" : {
"ip": { "type": "ip" },
"location" : { "type" : "geo_point" },
"latitude" : { "type" : "half_float" },
"longitude" : { "type" : "half_float" }
}
},
"location": { "type": "geo_point" }
}
}
}
}

I hope this may be worked for you and its sufficient.

Thanks & Regards,
Krunal.

Thank you very much for your help,
I have given that a go and it returns this on the logs:

[2018-07-25T09:33:41,247][WARN ][logstash.inputs.udp ] UDP listener died {:exception=>#<Errno::EACCES: Permission denied - bind(2) for "0.0.0.0" port 514>, :backtrace=>["org/jruby/ext/socket/RubyUDPSocket.java:197:in bind'", "/usr/share/logstash/vendor/bundle/jruby/2.3.0/gems/logstash-input-udp-3.3.3/lib/logstash/inputs/udp.rb:102:inudp_listener'", "/usr/share/logstash/vendor/bundle/jruby/2.3.0/gems/logstash-input-udp-3.3.3/lib/logstash/inputs/udp.rb:58:in run'", "/usr/share/logstash/logstash-core/lib/logstash/pipeline.rb:512:ininputworker'", "/usr/share/logstash/logstash-core/lib/logstash/pipeline.rb:505:in `block in start_input'"]}

Now, it is my understanding that ports below 1024 are classified as privileged ports, if I change it to port 5000, would that work?

edit#1: I rerouted the port from 514 to 5000 using iptables and this is the output on the log file:

[2018-07-25T11:09:49,598][ERROR][logstash.shutdownwatcher ] The shutdown process appears to be stalled due to busy or blocked plugins. Check the logs for more information.

[2018-07-25T11:11:09,524][WARN ][logstash.shutdownwatcher ] {"inflight_count"=>0, "stalling_thread_info"=>{"other"=>[{"thread_id"=>41, "name"=>"[mypipeline_1]<beats", "current_call"=>"[...]/vendor/bundle/jruby/2.3.0/gems/logstash-input-beats-5.0.16-java/lib/logstash/inputs/beats.rb:198:in run'"}], ["LogStash::Filters::Grok", {"match"=>{"message"=>"%{COMBINEDAPACHELOG}"}, "id"=>"8df83643126484154472f728e7f4953099536aad4ab1debfc967ff3b0ed7bffa"}]=>[{"thread_id"=>36, "name"=>nil, "current_call"=>"[...]/logstash-core/lib/logstash/pipeline.rb:418:inread_batch'"}, {"thread_id"=>37, "name"=>nil, "current_call"=>"[...]/logstash-core/lib/logstash/pipeline.rb:418:in read_batch'"}, {"thread_id"=>38, "name"=>nil, "current_call"=>"[...]/logstash-core/lib/logstash/pipeline.rb:418:inread_batch'"}, {"thread_id"=>39, "name"=>nil, "current_call"=>"[...]/logstash-core/lib/logstash/pipeline.rb:418:in `read_batch'"}]}}

and this:

[2018-07-25T11:09:43,884][WARN ][logstash.outputs.elasticsearch] Could not index event to Elasticsearch. {:status=>400, :action=>["index", {:_id=>nil, :_index=>"logstash-2018.07.25", :_type=>"doc", :_routing=>nil}, #LogStash::Event:0x715e8d40], :response=>{"index"=>{"_index"=>"logstash-2018.07.25", "_type"=>"doc", "_id"=>"VITq0GQBxLj9BdGHddnJ", "status"=>400, "error"=>{"type"=>"mapper_parsing_exception", "reason"=>"object mapping for [host] tried to parse field [host] as object, but found a concrete value"}}}}


I then stopped metricbeat service and restart logstash, this is my logs now:
https://pastebin.com/Rxv8s8Tp

made a few changes to the config files and now I get this:

[2018-07-25T11:45:16,958][INFO ][org.logstash.beats.Server] Starting server on port: 5044
[2018-07-25T11:45:22,211][WARN ][logstash.runner ] SIGTERM received. Shutting down.
[2018-07-25T11:45:27,566][WARN ][logstash.shutdownwatcher ] {"inflight_count"=>0, "stalling_thread_info"=>{"other"=>[{"thread_id"=>43, "name"=>"[mypipeline_1]<beats", "current_call"=>"[...]/vendor/bundle/jruby/2.3.0/gems/logstash-input-beats-5.0.16-java/lib/logstash/inputs/beats.rb:198:in run'"}, {"thread_id"=>45, "name"=>"[mypipeline_1]<udp", "current_call"=>"[...]/vendor/bundle/jruby/2.3.0/gems/logstash-input-udp-3.3.3/lib/logstash/inputs/udp.rb:115:inselect'"}], ["LogStash::Filters::Grok", {"match"=>{"message"=>"%{COMBINEDAPACHELOG}"}, "id"=>"b1f79616121e232cdde9d1d269e42bf12030dea59870f3fbe31643cf88f78544"}]=>[{"thread_id"=>36, "name"=>nil, "current_call"=>"[...]/logstash-core/lib/logstash/pipeline.rb:418:in read_batch'"}, {"thread_id"=>37, "name"=>nil, "current_call"=>"[...]/logstash-core/lib/logstash/pipeline.rb:418:inread_batch'"}, {"thread_id"=>38, "name"=>nil, "current_call"=>"[...]/logstash-core/lib/logstash/pipeline.rb:418:in read_batch'"}, {"thread_id"=>39, "name"=>nil, "current_call"=>"[...]/logstash-core/lib/logstash/pipeline.rb:418:inread_batch'"}]}}
[2018-07-25T11:45:27,585][ERROR][logstash.shutdownwatcher ] The shutdown process appears to be stalled due to busy or blocked plugins. Check the logs for more information.
[2018-07-25T11:45:32,526][WARN ][logstash.shutdownwatcher ] {"inflight_count"=>0, "stalling_thread_info"=>{}}
[2018-07-25T11:45:33,168][INFO ][logstash.pipeline ] Pipeline has terminated {:pipeline_id=>"mypipeline_1", :thread=>"#<Thread:0x2c2493bc run>"}
[2018-07-25T11:46:06,661][INFO ][logstash.runner ] Starting Logstash {"logstash.version"=>"6.3.1"}

my pipeline config looks like this:

- pipeline.id: mypipeline_1
  path.config: "/etc/logstash/conf.d/*.conf"

Ok, noticing I had the host error, I added this to my config file:

mutate {
      remove_field => [ "host" ]
    }

Now the output on the log file is this:

[2018-07-25T11:55:58,310][INFO ][logstash.inputs.beats ] Beats inputs: Starting input listener {:address=>"0.0.0.0:5044"}
[2018-07-25T11:55:58,414][INFO ][logstash.pipeline ] Pipeline started successfully {:pipeline_id=>"mypipeline_1", :thread=>"#<Thread:0x4d7084f0 run>"}
[2018-07-25T11:55:58,464][INFO ][logstash.inputs.udp ] Starting UDP listener {:address=>"0.0.0.0:5000"}
[2018-07-25T11:55:58,577][INFO ][org.logstash.beats.Server] Starting server on port: 5044
[2018-07-25T11:55:58,632][INFO ][logstash.inputs.udp ] UDP listener started {:address=>"0.0.0.0:5000", :receive_buffer_bytes=>"106496", :queue_size=>"2000"}
[2018-07-25T11:55:58,701][INFO ][logstash.agent ] Pipelines running {:count=>1, :running_pipelines=>[:mypipeline_1], :non_running_pipelines=>}
[2018-07-25T11:55:59,156][INFO ][logstash.agent ] Successfully started Logstash API endpoint {:port=>9600}
[2018-07-25T11:56:52,941][WARN ][logstash.runner ] SIGTERM received. Shutting down.
[2018-07-25T11:56:58,190][WARN ][logstash.shutdownwatcher ] {"inflight_count"=>0, "stalling_thread_info"=>{"other"=>[{"thread_id"=>36, "name"=>"[mypipeline_1]<beats", "curre nt_call"=>"[...]/vendor/bundle/jruby/2.3.0/gems/logstash-input-beats-5.0.16-java/lib/logstash/inputs/beats.rb:198:in run'"}, {"thread_id"=>38, "name"=>"[mypipeline_1]<udp", "current_call"=>"[...]/vendor/bundle/jruby/2.3.0/gems/logstash-input-udp-3.3.3/lib/logstash/inputs/udp.rb:115:inselect'"}], ["LogStash::Filters::Grok", {"match"=>{"messag e"=>"%{COMBINEDAPACHELOG}"}, "id"=>"3518e40d4f70ed431698e90ae320d64fe782a95977b1b17704d0510d2f12a871"}]=>[{"thread_id"=>31, "name"=>nil, "current_call"=>"[...]/logstash-core /lib/logstash/pipeline.rb:418:in read_batch'"}, {"thread_id"=>32, "name"=>nil, "current_call"=>"[...]/logstash-core/lib/logstash/pipeline.rb:418:inread_batch'"}, {"thread _id"=>33, "name"=>nil, "current_call"=>"[...]/logstash-core/lib/logstash/pipeline.rb:418:in read_batch'"}, {"thread_id"=>34, "name"=>nil, "current_call"=>"[...]/logstash-co re/lib/logstash/pipeline.rb:418:inread_batch'"}]}}
[2018-07-25T11:56:58,203][ERROR][logstash.shutdownwatcher ] The shutdown process appears to be stalled due to busy or blocked plugins. Check the logs for more information.
[2018-07-25T11:57:02,251][INFO ][logstash.pipeline ] Pipeline has terminated {:pipeline_id=>"mypipeline_1", :thread=>"#<Thread:0x4d7084f0 run>"}
[2018-07-25T11:57:35,644][INFO ][logstash.runner ] Starting Logstash {"logstash.version"=>"6.3.1"}
[2018-07-25T11:57:37,276][ERROR][logstash.agent ] Failed to execute action {:action=>LogStash::PipelineAction::Create/pipeline_id:mypipeline_1, :exception=>"LogSta sh::ConfigurationError", :message=>"Expected one of #, input, filter, output at line 84, column 1 (byte 1437) after ", :backtrace=>["/usr/share/logstash/logstash-core/lib/lo gstash/compiler.rb:42:in compile_imperative'", "/usr/share/logstash/logstash-core/lib/logstash/compiler.rb:50:incompile_graph'", "/usr/share/logstash/logstash-core/lib/lo gstash/compiler.rb:12:in block in compile_sources'", "org/jruby/RubyArray.java:2486:inmap'", "/usr/share/logstash/logstash-core/lib/logstash/compiler.rb:11:in compile_so urces'", "/usr/share/logstash/logstash-core/lib/logstash/pipeline.rb:49:ininitialize'", "/usr/share/logstash/logstash-core/lib/logstash/pipeline.rb:167:in initialize'", " /usr/share/logstash/logstash-core/lib/logstash/pipeline_action/create.rb:40:inexecute'", "/usr/share/logstash/logstash-core/lib/logstash/agent.rb:305:in `block in converge _state'"]}
[2018-07-25T11:57:38,065][INFO ][logstash.agent ] Successfully started Logstash API endpoint {:port=>9600}

As far as I can tell it worked, but I dont know what is wrong with those ERROR messages, is it my pipeline.yml file?

just checked my logs, for forti_log as I stated in my config file and this popped up:
4 results:

my question here is: This is a configuration issue, does identation matter in a config file? if so then that's it because I copied and pasted the config given to me by Krunal

update: well now no logs are getting to Kibana, there are plenty of disk space. Already restarted the services but no luck

yes why not ! definitely its working do it.

Do you know perhaps why there isn't any logs being displayed on Kibana?
This is what my logstash logs are showing:

[2018-07-25T14:01:42,381][INFO ][logstash.agent ] Successfully started Logstash API endpoint {:port=>9600}
[2018-07-25T14:02:18,128][INFO ][logstash.runner ] Starting Logstash {"logstash.version"=>"6.3.1"}
[2018-07-25T14:02:19,526][ERROR][logstash.agent ] Failed to execute action {:action=>LogStash::PipelineAction::Create/pipeline_id:mypipeline_1, :exception=>"LogStash::ConfigurationError", :message=>"Expected one of #, input, filter, output at line 78, column 1 (byte 1386) after ", :backtrace=>["/usr/share/logstash/logstash-core/lib/logstash/compiler.rb:42:in compile_imperative'", "/usr/share/logstash/logstash-core/lib/logstash/compiler.rb:50:incompile_graph'", "/usr/share/logstash/logstash-core/lib/logstash/compiler.rb:12:in block in compile_sources'", "org/jruby/RubyArray.java:2486:inmap'", "/usr/share/logstash/logstash-core/lib/logstash/compiler.rb:11:in compile_sources'", "/usr/share/logstash/logstash-core/lib/logstash/pipeline.rb:49:ininitialize'", "/usr/share/logstash/logstash-core/lib/logstash/pipeline.rb:167:in initialize'", "/usr/share/logstash/logstash-core/lib/logstash/pipeline_action/create.rb:40:inexecute'", "/usr/share/logstash/logstash-core/lib/logstash/agent.rb:305:in `block in converge_state'"]}
[2018-07-25T14:02:20,106][INFO ][logstash.agent ] Successfully started Logstash API endpoint {:port=>9600}

Also, the same message is being displayed multiple times

you have installed x-pack on this ?

no I have not, I commented out the username/password from the config file.

It's ok can you just sent me the config file what you have changed because error is showing like [ERROR][logstash.agent ] Failed to execute action {:action=>LogStash::PipelineAction::Create/pipeline_id:mypipeline_1, :exception=>"LogStash::ConfigurationError", :message=>"Expected one of #, input, filter, output at line 78, column 1 (byte 1386) after ", :backtrace=>["/usr/share/logstash/logstash-

means line number 78 has some issue so i want to check that in that line what is the issue ?

or you just mark that line and send me ill check it again.

Hey Krunal, thanks for all the attention - I really appreciate it.
Regarding the topic, do you mean line 78 of what exactly? the syslog configuration or? In any case I will post my config files for logstash here:

Pipeline:

- pipeline.id: mypipeline_1
  path.config: "/etc/logstash/conf.d/*.conf"

Apache logstash:

input {
  file {
    path => "/var/log/logstash/*_log"
  }
}

filter {
  if [path] =~ "access" {
    mutate { replace => { type => "apache_access" } }
    grok {
      match => { "message" => "%{COMBINEDAPACHELOG}" }
    }
    date {
      match => [ "timestamp" , "dd/MMM/yyyy:HH:mm:ss Z" ]
    }
  } else if [path] =~ "error" {
    mutate { replace => { type => "apache_error" } }
  } else {
    mutate { replace => { type => "random_logs" } }
  }
}

output {
  elasticsearch { hosts => ["localhost:9200"] }
  stdout { codec => rubydebug }
}

Logstash Beats:

input {
  beats {
    port => 5044
  }
}

output {
  elasticsearch {
    hosts => "localhost:9200"
    manage_template => false
    index => "%{[@metadata][beat]}-%{[@metadata][version]}-%{+YYYY.MM.dd}"
  }
}

Logstash-Simple (from the official documentation)

input { stdin { } }

filter {
  grok {
    match => { "message" => "%{COMBINEDAPACHELOG}" }
  }
  date {
    match => [ "timestamp" , "dd/MMM/yyyy:HH:mm:ss Z" ]
  }
}

output {
  elasticsearch { hosts => ["localhost:9200"] }
  stdout { codec => rubydebug }
}

Logstash Syslog:

port => 5000
        type => "forti_log"
        }
}
mutate {
      remove_field => [ "host" ]
}
filter {
        if [type] == "forti_log" {
                grok {
                                        match => ["message", "%{SYSLOG5424PRI:syslog_index}%{GREEDYDATA:message}"]
                                        overwrite => [ "message" ]
                                        tag_on_failure => [ "forti_grok_failure" ]
                }
                kv {
                        source => "message"
                        value_split => "="
                        field_split => ","
                }
                mutate {
                        add_field => { "temp_time" => "%{date} %{time}" }
                        rename => { "type" => "ftg_type" }
                        rename => { "subtype" => "ftg_subtype" }
                        add_field => { "type" => "forti_log" }
                        convert => { "rcvdbyte" => "integer" }
                        convert => { "sentbyte" => "integer" }
                }
                date {
                        match => [ "temp_time", "yyyy-MM-dd HH:mm:ss" ]
                        timezone => "UTC" #change with your timestamp
                        target => "@timestamp"
                }
                mutate {
                remove_field => ["syslog_index","syslog5424_pri","path","temp_time","service","date","time","sentpkt","rcvdpkt","log_id","message","poluuid"]
                }
        }
}
output {
        stdout { codec => rubydebug }
        if [type] == "forti_log" {
                elasticsearch {
                hosts => "localhost:9200"
                http_compression => "true"
                index => "forti-%{+YYYY.MM.dd}"
                #user => "elastic"
                #password => "elastic"
                template => "/usr/share/logstash/bin/forti.json"
                template_name => "forti-*"
                }
        }
}

The only goal here is to gather the syslogs from fortigate and event viewer logs from windows. I don't need anything from the linux box, or anything else.
Thanks for helping me, Krunal.

Still having this issue.
Getting issue in this line:

[2018-07-26T09:55:21,957][ERROR][logstash.agent ] Failed to execute action {:action=>LogStash::PipelineAction::Create/pipeline_id:mypipeline_1, :exception=>"LogStash::ConfigurationError", :message=>"Expected one of #, input, filter, output at line 86, column 1 (byte 1403) after ", :backtrace=>["/usr/share/logstash/logstash-core/lib/logstash/compiler.rb:42:in compile_imperative'", "/usr/share/logstash/logstash-core/lib/logstash/compiler.rb:50:incompile_graph'", "/usr/share/logstash/logstash-core/lib/logstash/compiler.rb:12:in block in compile_sources'", "org/jruby/RubyArray.java:2486:inmap'", "/usr/share/logstash/logstash-core/lib/logstash/compiler.rb:11:in compile_sources'", "/usr/share/logstash/logstash-core/lib/logstash/pipeline.rb:49:ininitialize'", "/usr/share/logstash/logstash-core/lib/logstash/pipeline.rb:167:in initialize'", "/usr/share/logstash/logstash-core/lib/logstash/pipeline_action/create.rb:40:inexecute'", "/usr/share/logstash/logstash-core/lib/logstash/agent.rb:305:in `block in converge_state'"]}
[2018-07-26T09:55:22,398][INFO ][logstash.agent ] Successfully started Logstash API endpoint {:port=>9600}

ok So I fixed the issue with no logs coming in, I had to fix the config file on syslog and do indentation. now I am getting this:

[2018-07-26T10:02:56,620][INFO ][org.logstash.beats.Server] Starting server on port: 5044
[2018-07-26T10:02:58,909][WARN ][logstash.runner ] SIGTERM received. Shutting down.
[2018-07-26T10:03:00,109][WARN ][logstash.outputs.elasticsearch] Could not index event to Elasticsearch. {:status=>400, :action=>["index", {:_id=>nil, :_index=>"logstash-2018.07.26", :_type=>"doc", :_routing=>nil}, #LogStash::Event:0x37523f08], :response=>{"index"=>{"_index"=>"logstash-2018.07.26", "_type"=>"doc", "_id"=>"xzPT1WQBrYy0Cu31ueP-", "status"=>400, "error"=>{"type"=>"mapper_parsing_exception", "reason"=>"object mapping for [host] tried to parse field [host] as object, but found a concrete value"}}}}
[2018-07-26T10:03:00,238][WARN ][logstash.outputs.elasticsearch] Could not index event to Elasticsearch. {:status=>400, :action=>["index", {:_id=>nil, :_index=>"logstash-2018.07.26", :_type=>"doc", :_routing=>nil}, #LogStash::Event:0x37523f08], :response=>{"index"=>{"_index"=>"logstash-2018.07.26", "_type"=>"doc", "_id"=>"_jPT1WQBrYy0Cu31uuNm", "status"=>400, "error"=>{"type"=>"mapper_parsing_exception", "reason"=>"object mapping for [host] tried to parse field [host] as object, but found a concrete value"}}}}
[2018-07-26T10:03:04,251][WARN ][logstash.shutdownwatcher ] {"inflight_count"=>0, "stalling_thread_info"=>{"other"=>[{"thread_id"=>43, "name"=>"[mypipeline_1]<beats", "current_call"=>"[...]/vendor/bundle/jruby/2.3.0/gems/logstash-input-beats-5.0.16-java/lib/logstash/inputs/beats.rb:198:in run'"}, {"thread_id"=>45, "name"=>"[mypipeline_1]<udp", "current_call"=>"[...]/vendor/bundle/jruby/2.3.0/gems/logstash-input-udp-3.3.3/lib/logstash/inputs/udp.rb:115:inselect'"}], ["LogStash::Filters::Grok", {"match"=>{"message"=>"%{COMBINEDAPACHELOG}"}, "id"=>"3dd8d363b43bfe16e533fed30b8f1b24a1fba341b07ad09b14191c12cc5f0ba7"}]=>[{"thread_id"=>36, "name"=>nil, "current_call"=>"[...]/logstash-core/lib/logstash/pipeline.rb:418:in read_batch'"}, {"thread_id"=>37, "name"=>nil, "current_call"=>"[...]/logstash-core/lib/logstash/pipeline.rb:418:inread_batch'"}, {"thread_id"=>38, "name"=>nil, "current_call"=>"[...]/logstash-core/lib/logstash/pipeline.rb:418:in read_batch'"}, {"thread_id"=>39, "name"=>nil, "current_call"=>"[...]/logstash-core/lib/logstash/pipeline.rb:418:inread_batch'"}]}}
[2018-07-26T10:03:04,268][ERROR][logstash.shutdownwatcher ] The shutdown process appears to be stalled due to busy or blocked plugins. Check the logs for more information.
[2018-07-26T10:03:09,224][WARN ][logstash.shutdownwatcher ] {"inflight_count"=>0, "stalling_thread_info"=>{"other"=>[{"thread_id"=>43, "name"=>"[mypipeline_1]<beats", "current_call"=>"[...]/vendor/bundle/jruby/2.3.0/gems/logstash-input-beats-5.0.16-java/lib/logstash/inputs/beats.rb:198:in run'"}, {"thread_id"=>45, "name"=>"[mypipeline_1]<udp", "current_call"=>"[...]/vendor/bundle/jruby/2.3.0/gems/logstash-input-udp-3.3.3/lib/logstash/inputs/udp.rb:115:inselect'"}], ["LogStash::Filters::Grok", {"match"=>{"message"=>"%{COMBINEDAPACHELOG}"}, "id"=>"3dd8d363b43bfe16e533fed30b8f1b24a1fba341b07ad09b14191c12cc5f0ba7"}]=>[{"thread_id"=>36, "name"=>nil, "current_call"=>"[...]/logstash-core/lib/logstash/pipeline.rb:418:in read_batch'"}, {"thread_id"=>37, "name"=>nil, "current_call"=>"[...]/logstash-core/lib/logstash/pipeline.rb:418:inread_batch'"}, {"thread_id"=>38, "name"=>nil, "current_call"=>"[...]/logstash-core/lib/logstash/pipeline.rb:418:in read_batch'"}, {"thread_id"=>39, "name"=>nil, "current_call"=>"[...]/logstash-core/lib/logstash/pipeline.rb:418:inread_batch'"}]}}
[2018-07-26T10:03:11,720][INFO ][logstash.pipeline ] Pipeline has terminated {:pipeline_id=>"mypipeline_1", :thread=>"#<Thread:0x6c6dbebc run>"}

I get this error when I put this line of code:

mutate {
remove_field => [ "host" ]
}

if I remove that field I get:

[2018-07-26T10:58:18,282][WARN ][logstash.outputs.elasticsearch] Could not index event to Elasticsearch. {:status=>400, :action=>["index", {:_id=>nil, :_index=>"logstash-2018.07.26", :_type=>"doc", :_routing=>nil}, #LogStash::Event:0x7eb25fb5], :response=>{"index"=>{"_index"=>"logstash-2018.07.26", "_type"=>"doc", "_id"=>"KDQG1mQBrYy0Cu31W06n", "status"=>400, "error"=>{"type"=>"mapper_parsing_exception", "reason"=>"object mapping for [host] tried to parse field [host] as object, but found a concrete value"}}}}
[2018-07-26T10:58:18,337][WARN ][logstash.outputs.elasticsearch] Could not index event to Elasticsearch. {:status=>400, :action=>["index", {:_id=>nil, :_index=>"logstash-2018.07.26", :_type=>"doc", :_routing=>nil}, #LogStash::Event:0x7eb25fb5], :response=>{"index"=>{"_index"=>"logstash-2018.07.26", "_type"=>"doc", "_id"=>"KjQG1mQBrYy0Cu31W07f", "status"=>400, "error"=>{"type"=>"mapper_parsing_exception", "reason"=>"object mapping for [host] tried to parse field [host] as object, but found a concrete value"}}}}

all the other logs are working fine (winlogbeat, logstash) its just fortigate logs not showing

so I took that config file and changed it to instead of asking for forti_gate it will ask for SYSLOG. This is the output on the log file now:

[2018-07-26T11:42:50,764][INFO ][logstash.inputs.beats ] Beats inputs: Starting input listener {:address=>"0.0.0.0:5044"}
[2018-07-26T11:42:51,281][INFO ][logstash.inputs.beats ] Beats inputs: Starting input listener {:address=>"0.0.0.0:5044"}
[2018-07-26T11:42:51,369][INFO ][logstash.pipeline ] Pipeline started successfully {:pipeline_id=>"mypipeline_1", :thread=>"#<Thread:0x771e32d4 run>"}
[2018-07-26T11:42:51,453][INFO ][logstash.inputs.udp ] Starting UDP listener {:address=>"0.0.0.0:5000"}
[2018-07-26T11:42:51,514][INFO ][org.logstash.beats.Server] Starting server on port: 5044
[2018-07-26T11:42:51,525][INFO ][org.logstash.beats.Server] Starting server on port: 5044
[2018-07-26T11:42:51,707][INFO ][logstash.inputs.udp ] UDP listener started {:address=>"0.0.0.0:5000", :receive_buffer_bytes=>"106496", :queue_size=>"2000"}
[2018-07-26T11:42:51,819][INFO ][logstash.agent ] Pipelines running {:count=>1, :running_pipelines=>[:mypipeline_1], :non_running_pipelines=>[]}
[2018-07-26T11:42:52,285][INFO ][logstash.agent ] Successfully started Logstash API endpoint {:port=>9600}
[2018-07-26T11:42:57,891][ERROR][logstash.pipeline ] A plugin had an unrecoverable error. Will restart this plugin.
Pipeline_id:mypipeline_1
Plugin: <LogStash::Inputs::Beats port=>5044, id=>"44f06ca174b66bdb5151e826e76a4669260e0e995067f407287498a446dc866e", enable_metric=>true, codec=><LogStash::Codecs::Plain id=>"plain_cbbb44c2-6855-41a1-8030-977d2e86747f", enable_metric=>true, charset=>"UTF-8">, host=>"0.0.0.0", ssl=>false, ssl_verify_mode=>"none", include_codec_tag=>true, ssl_handshake_timeout=>10000, tls_min_version=>1, tls_max_version=>1.2, cipher_suites=>["TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384", "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384", "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256", "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256"], client_inactivity_timeout=>60, executor_threads=>4>
Error: Address already in use
Exception: Java::JavaNet::BindException
Stack: sun.nio.ch.Net.listen(Native Method)
sun.nio.ch.ServerSocketChannelImpl.bind(sun/nio/ch/ServerSocketChannelImpl.java:224)
io.netty.channel.socket.nio.NioServerSocketChannel.doBind(io/netty/channel/socket/nio/NioServerSocketChannel.java:128)
io.netty.channel.AbstractChannel$AbstractUnsafe.bind(io/netty/channel/AbstractChannel.java:558)
io.netty.channel.DefaultChannelPipeline$HeadContext.bind(io/netty/channel/DefaultChannelPipeline.java:1283)
io.netty.channel.AbstractChannelHandlerContext.invokeBind(io/netty/channel/AbstractChannelHandlerContext.java:501)
io.netty.channel.AbstractChannelHandlerContext.bind(io/netty/channel/AbstractChannelHandlerContext.java:486)
io.netty.channel.DefaultChannelPipeline.bind(io/netty/channel/DefaultChannelPipeline.java:989)
io.netty.channel.AbstractChannel.bind(io/netty/channel/AbstractChannel.java:254)
io.netty.bootstrap.AbstractBootstrap$2.run(io/netty/bootstrap/AbstractBootstrap.java:364)
io.netty.util.concurrent.AbstractEventExecutor.safeExecute(io/netty/util/concurrent/AbstractEventExecutor.java:163)
io.netty.util.concurrent.SingleThreadEventExecutor.runAllTasks(io/netty/util/concurrent/SingleThreadEventExecutor.java:403)
io.netty.channel.nio.NioEventLoop.run(io/netty/channel/nio/NioEventLoop.java:463)
io.netty.util.concurrent.SingleThreadEventExecutor$5.run(io/netty/util/concurrent/SingleThreadEventExecutor.java:858)
io.netty.util.concurrent.FastThreadLocalRunnable.run(io/netty/util/concurrent/FastThreadLocalRunnable.java:30)
java.lang.Thread.run(java/lang/Thread.java:748)
[2018-07-26T11:42:58,895][INFO ][org.logstash.beats.Server] Starting server on port: 5044

I still can't see the logs on kibana.

Well I switched to "syslog" in lower case and now there are no logs being displayed in kibana. This is the error log:

[2018-07-26T11:53:16,509][ERROR][logstash.outputs.elasticsearch] Failed to install template. {:message=>"Got response code '400' contacting Elasticsearch at URL 'http://localhost:9200/_template/forti-'", :class=>"LogStash::Outputs::ElasticSearch::HttpClient::Pool::BadResponseCodeError", :backtrace=>["/usr/share/logstash/vendor/bundle/jruby/2.3.0/gems/logstash-output-elasticsearch-9.2.0-java/lib/logstash/outputs/elasticsearch/http_client/manticore_adapter.rb:80:in perform_request'", "/usr/share/logstash/vendor/bundle/jruby/2.3.0/gems/logstash-output-elasticsearch-9.2.0-java/lib/logstash/outputs/elasticsearch/http_client/pool.rb:291:inperform_request_to_url'", "/usr/share/logstash/vendor/bundle/jruby/2.3.0/gems/logstash-output-elasticsearch-9.2.0-java/lib/logstash/outputs/elasticsearch/http_client/pool.rb:278:in block in perform_request'", "/usr/share/logstash/vendor/bundle/jruby/2.3.0/gems/logstash-output-elasticsearch-9.2.0-java/lib/logstash/outputs/elasticsearch/http_client/pool.rb:373:inwith_connection'", "/usr/share/logstash/vendor/bundle/jruby/2.3.0/gems/logstash-output-elasticsearch-9.2.0-java/lib/logstash/outputs/elasticsearch/http_client/pool.rb:277:in perform_request'", "/usr/share/logstash/vendor/bundle/jruby/2.3.0/gems/logstash-output-elasticsearch-9.2.0-java/lib/logstash/outputs/elasticsearch/http_client/pool.rb:285:inblock in Pool'", "/usr/share/logstash/vendor/bundle/jruby/2.3.0/gems/logstash-output-elasticsearch-9.2.0-java/lib/logstash/outputs/elasticsearch/http_client.rb:348:in template_put'", "/usr/share/logstash/vendor/bundle/jruby/2.3.0/gems/logstash-output-elasticsearch-9.2.0-java/lib/logstash/outputs/elasticsearch/http_client.rb:86:intemplate_install'", "/usr/share/logstash/vendor/bundle/jruby/2.3.0/gems/logstash-output-elasticsearch-9.2.0-java/lib/logstash/outputs/elasticsearch/template_manager.rb:21:in install'", "/usr/share/logstash/vendor/bundle/jruby/2.3.0/gems/logstash-output-elasticsearch-9.2.0-java/lib/logstash/outputs/elasticsearch/template_manager.rb:9:ininstall_template'", "/usr/share/logstash/vendor/bundle/jruby/2.3.0/gems/logstash-output-elasticsearch-9.2.0-java/lib/logstash/outputs/elasticsearch/common.rb:118:in install_template'", "/usr/share/logstash/vendor/bundle/jruby/2.3.0/gems/logstash-output-elasticsearch-9.2.0-java/lib/logstash/outputs/elasticsearch/common.rb:49:inblock in install_template_after_successful_connection'"]}
[2018-07-26T11:53:16,882][ERROR][logstash.pipeline ] Error registering plugin {:pipeline_id=>"mypipeline_1", :plugin=>"#<LogStash::FilterDelegator:0x5eaa8acd @metric_events_out=org.jruby.proxy.org.logstash.instrument.metrics.counter.LongCounter$Proxy2 - name: out value:0, @metric_events_in=org.jruby.proxy.org.logstash.instrument.metrics.counter.LongCounter$Proxy2 - name: in value:0, @metric_events_time=org.jruby.proxy.org.logstash.instrument.metrics.counter.LongCounter$Proxy2 - name: duration_in_millis value:0, @id="81faf2474e19610debbc305e1d2339f26704d907f2725b0657f636af1a22628a", @klass=LogStash::Filters::Grok, @metric_events=#<LogStash::Instrument::NamespacedMetric:0x1a605abf @metric=#<LogStash::Instrument::Metric:0x6d18c494 @collector=#<LogStash::Instrument::Collector:0x788c641e @agent=nil, @metric_store=#<LogStash::Instrument::MetricStore:0x727a7520 @store=#<Concurrent::map:0x00000000000fcc entries=2 default_proc=nil>, @structured_lookup_mutex=#Mutex:0x5a77096a, @fast_lookup=#<Concurrent::map:0x00000000000fd0 entries=141 default_proc=nil>>>>, @namespace_name=[:stats, :pipelines, :mypipeline_1, :plugins, :filters, :"81faf2474e19610debbc305e1d2339f26704d907f2725b0657f636af1a22628a", :events]>, @filter=<LogStash::Filters::Grok match=>{"message"=>"%{syslog5424PRI:syslog_index}%{GREEDYDATA:message}"}, overwrite=>["message"], tag_on_failure=>["forti_grok_failure"], id=>"81faf2474e19610debbc305e1d2339f26704d907f2725b0657f636af1a22628a", enable_metric=>true, periodic_flush=>false, patterns_files_glob=>"
", break_on_match=>true, named_captures_only=>true, keep_empty_captures=>false, timeout_millis=>30000, tag_on_timeout=>"_groktimeout">>", :error=>"pattern %{syslog5424PRI:syslog_index} not defined", :thread=>"#<Thread:0x2a60f39f run>"}
[2018-07-26T11:53:16,964][ERROR][logstash.pipeline ] Pipeline aborted due to error {:pipeline_id=>"mypipeline_1", :exception=>#<Grok::PatternError: pattern %{syslog5424PRI:syslog_index} not defined>, :backtrace=>["/usr/share/logstash/vendor/bundle/jruby/2.3.0/gems/jls-grok-0.11.5/lib/grok-pure.rb:123:in block in compile'", "org/jruby/RubyKernel.java:1292:inloop'", "/usr/share/logstash/vendor/bundle/jruby/2.3.0/gems/jls-grok-0.11.5/lib/grok-pure.rb:93:in compile'", "/usr/share/logstash/vendor/bundle/jruby/2.3.0/gems/logstash-filter-grok-4.0.3/lib/logstash/filters/grok.rb:281:inblock in register'", "org/jruby/RubyArray.java:1734:in each'", "/usr/share/logstash/vendor/bundle/jruby/2.3.0/gems/logstash-filter-grok-4.0.3/lib/logstash/filters/grok.rb:275:inblock in register'", "org/jruby/RubyHash.java:1343:in each'", "/usr/share/logstash/vendor/bundle/jruby/2.3.0/gems/logstash-filter-grok-4.0.3/lib/logstash/filters/grok.rb:270:inregister'", "/usr/share/logstash/logstash-core/lib/logstash/pipeline.rb:340:in register_plugin'", "/usr/share/logstash/logstash-core/lib/logstash/pipeline.rb:351:inblock in register_plugins'", "org/jruby/RubyArray.java:1734:in each'", "/usr/share/logstash/logstash-core/lib/logstash/pipeline.rb:351:inregister_plugins'", "/usr/share/logstash/logstash-core/lib/logstash/pipeline.rb:729:in maybe_setup_out_plugins'", "/usr/share/logstash/logstash-core/lib/logstash/pipeline.rb:361:instart_workers'", "/usr/share/logstash/logstash-core/lib/logstash/pipeline.rb:288:in run'", "/usr/share/logstash/logstash-core/lib/logstash/pipeline.rb:248:inblock in start'"], :thread=>"#<Thread:0x2a60f39f run>"}
[2018-07-26T11:53:17,011][ERROR][logstash.agent ] Failed to execute action {:id=>:mypipeline_1, :action_type=>LogStash::ConvergeResult::FailedAction, :message=>"Could not execute action: PipelineAction::Create<mypipeline_1>, action_result: false", :backtrace=>nil}

change IP on my config files from localhost:9200 to IP:9200. This is the log output:

[2018-07-26T12:07:26,063][INFO ][logstash.outputs.elasticsearch] Attempting to install template {:manage_template=>{"template"=>"forti-", "version"=>50001, "settings"=>{"index.refresh_interval"=>"5s"}, "mappings"=>{"default"=>{"_all"=>{"enabled"=>true, "omit_norms"=>false}, "dynamic_templates"=>[{"message_field"=>{"path_match"=>"message", "match_mapping_type"=>"string", "mapping"=>{"type"=>"text", "omit_norms"=>false}}}, {"string_fields"=>{"match"=>"", "match_mapping_type"=>"string", "mapping"=>{"type"=>"text", "omit_norms"=>false, "fields"=>{"keyword"=>{"type"=>"keyword", "ignore_above"=>256}}}}}], "properties"=>{"@timestamp"=>{"type"=>"date", "include_in_all"=>false}, "@version"=>{"type"=>"keyword", "include_in_all"=>false}, "geoip"=>{"dynamic"=>true, "properties"=>{"ip"=>{"type"=>"ip"}, "location"=>{"type"=>"geo_point"}, "latitude"=>{"type"=>"half_float"}, "longitude"=>{"type"=>"half_float"}}}, "location"=>{"type"=>"geo_point"}}}}}}
[2018-07-26T12:07:26,081][INFO ][logstash.outputs.elasticsearch] Installing elasticsearch template to _template/forti-*
[2018-07-26T12:07:26,200][ERROR][logstash.outputs.elasticsearch] Failed to install template. {:message=>"Got response code '400' contacting Elasticsearch at URL 'http://IP:9200/_template/forti-*'", :class=>"LogStash::Outputs::ElasticSearch::HttpClient::Pool::BadResponseCodeError", :backtrace=>["/usr/share/logstash/vendor/bundle/jruby/2.3.0/gems/logstash-output-elasticsearch-9.2.0-java/lib/logstash/outputs/elasticsearch/http_client/manticore_adapter.rb:80:in perform_request'", "/usr/share/logstash/vendor/bundle/jruby/2.3.0/gems/logstash-output-elasticsearch-9.2.0-java/lib/logstash/outputs/elasticsearch/http_client/pool.rb:291:inperform_request_to_url'", "/usr/share/logstash/vendor/bundle/jruby/2.3.0/gems/logstash-output-elasticsearch-9.2.0-java/lib/logstash/outputs/elasticsearch/http_client/pool.rb:278:in block in perform_request'", "/usr/share/logstash/vendor/bundle/jruby/2.3.0/gems/logstash-output-elasticsearch-9.2.0-java/lib/logstash/outputs/elasticsearch/http_client/pool.rb:373:inwith_connection'", "/usr/share/logstash/vendor/bundle/jruby/2.3.0/gems/logstash-output-elasticsearch-9.2.0-java/lib/logstash/outputs/elasticsearch/http_client/pool.rb:277:in perform_request'", "/usr/share/logstash/vendor/bundle/jruby/2.3.0/gems/logstash-output-elasticsearch-9.2.0-java/lib/logstash/outputs/elasticsearch/http_client/pool.rb:285:inblock in Pool'", "/usr/share/logstash/vendor/bundle/jruby/2.3.0/gems/logstash-output-elasticsearch-9.2.0-java/lib/logstash/outputs/elasticsearch/http_client.rb:348:in template_put'", "/usr/share/logstash/vendor/bundle/jruby/2.3.0/gems/logstash-output-elasticsearch-9.2.0-java/lib/logstash/outputs/elasticsearch/http_client.rb:86:intemplate_install'", "/usr/share/logstash/vendor/bundle/jruby/2.3.0/gems/logstash-output-elasticsearch-9.2.0-java/lib/logstash/outputs/elasticsearch/template_manager.rb:21:in install'", "/usr/share/logstash/vendor/bundle/jruby/2.3.0/gems/logstash-output-elasticsearch-9.2.0-java/lib/logstash/outputs/elasticsearch/template_manager.rb:9:ininstall_template'", "/usr/share/logstash/vendor/bundle/jruby/2.3.0/gems/logstash-output-elasticsearch-9.2.0-java/lib/logstash/outputs/elasticsearch/common.rb:118:in install_template'", "/usr/share/logstash/vendor/bundle/jruby/2.3.0/gems/logstash-output-elasticsearch-9.2.0-java/lib/logstash/outputs/elasticsearch/common.rb:49:inblock in install_template_after_successful_connection'"]}
[2018-07-26T12:07:27,170][INFO ][logstash.inputs.beats ] Beats inputs: Starting input listener {:address=>"0.0.0.0:5044"}
[2018-07-26T12:07:27,645][INFO ][logstash.inputs.beats ] Beats inputs: Starting input listener {:address=>"0.0.0.0:5044"}
[2018-07-26T12:07:27,736][INFO ][logstash.pipeline ] Pipeline started successfully {:pipeline_id=>"mypipeline_1", :thread=>"#<Thread:0x34d90fe5 run>"}
[2018-07-26T12:07:27,809][INFO ][logstash.inputs.udp ] Starting UDP listener {:address=>"0.0.0.0:5000"}
[2018-07-26T12:07:27,874][INFO ][org.logstash.beats.Server] Starting server on port: 5044
[2018-07-26T12:07:27,875][INFO ][org.logstash.beats.Server] Starting server on port: 5044
[2018-07-26T12:07:27,979][INFO ][logstash.inputs.udp ] UDP listener started {:address=>"0.0.0.0:5000", :receive_buffer_bytes=>"106496", :queue_size=>"2000"}
[2018-07-26T12:07:28,120][INFO ][logstash.agent ] Pipelines running {:count=>1, :running_pipelines=>[:mypipeline_1], :non_running_pipelines=>[]}
[2018-07-26T12:07:28,619][INFO ][logstash.agent ] Successfully started Logstash API endpoint {:port=>9600}
[2018-07-26T12:07:34,257][ERROR][logstash.pipeline ] A plugin had an unrecoverable error. Will restart this plugin.
Pipeline_id:mypipeline_1
Plugin: <LogStash::Inputs::Beats port=>5044,
[2018-07-26T12:07:35,263][INFO ][org.logstash.beats.Server] Starting server on port: 5044

The problem now is: Can't get other logs like event viewer to display on kibana
and it seems to be a template issue.

This topic was automatically closed 28 days after the last reply. New replies are no longer allowed.