Hi all.
I have an ELK container and I send logs to it using filebeats.
root@20e13712ecb2:/# cat /etc/logstash/conf.d/02-beats-input.conf
input {
beats {
port => 5044
ssl => false
ssl_certificate => "/etc/pki/tls/certs/logstash-beats.crt"
ssl_key => "/etc/pki/tls/private/logstash-beats.key"
}
}
filter {
grok {
match => { 'message' => '(.*)%{TIMESTAMP_ISO8601:ttime} %{TZ} [%{DATA:module}] %{WORD:function} -> %{WORD:loglevel}(.*?) %{GREEDYDATA:message}' }
}
date {
"match" => [
"ttime",
"yyyy-MM-dd HH:mm:ss.SSS",
"ISO8601"
]
}
}
output {
elasticsearch {
hosts => [ "localhost:9200" ]
}
if "_grokparsefailure" in [tags] {
file { path => "/var/log/logstash/grokparsefailure.log" }
}
}
When I have the beats input config above, I get:
beats_input_codec_plain_applied, _grokparsefailure
In the tags field, on all log entries (the logs sent to the ELK container are formatted in the same way)
I want, of course- to use the timestamp of the log entries to be the @timestamp in kibana.
When I delete the date {} block, the grok succeeds and the ttime field is evaluated successfully to values like '17-03-30 17:35:21.319', and the rest of the fields are also caught except that the message field is from some reason, the entire log entry.
Any advice would be appreciated.
Thanks and regards.