Thanks for helping me.
I started the Logstash container as follow:
sudo docker run --rm -it --network mynetwork --name logstash -p 5000:5000 -v ~/pipeline/:/usr/share/logstash/pipeline/ docker.elastic.co/logstash/logstash:7.7.0
The configuration of the Logstash is as follow:
input {
syslog {
port => 5000
#type => "docker"
}
}
filter {
grok {
match => { "message" => "%{SYSLOG5424PRI}%{NONNEGINT:ver} +(?:%{TIMESTAMP_ISO8601:ts}|-) +(?:%{HOSTNAME:service}|-) +(?:%{NOTSPACE:containerName}|-) +(?:%{NOTSPACE:proc}|-) +(?:%{WORD:msgid}|-)
+(?:%{SYSLOG5424SD:sd}|-|) +%{GREEDYDATA:msg}" }
}
syslog_pri { }
date {
match => [ "syslog_timestamp", "MMM d HH:mm:ss", "MMM dd HH:mm:ss" ]
}
mutate {
remove_field => [ "message", "priority", "ts", "severity", "facility", "facility_label", "severity_label", "syslog5424_pri", "proc", "syslog_severity_code", "syslog_facility_code", "syslog_faci
lity", "syslog_severity", "syslog_hostname", "syslog_message", "syslog_timestamp", "ver" ]
}
mutate {
remove_tag => [ "_grokparsefailure_sysloginput" ]
}
mutate {
gsub => [
"service", "[0123456789-]", ""
]
}
if [msg] =~ "^ *{" {
json {
source => "msg"
}
if "_jsonparsefailure" in [tags] {
drop {}
}
mutate {
remove_field => [ "msg" ]
}
}
}
output {
elasticsearch {
hosts => ["elasticsearch:9200"]
}
stdout { codec => rubydebug }
}
The container that I start to get the logs, I have started as follow:
sudo docker run -d --name piet --log-driver=syslog --log-opt syslog-address=tcp://:5000 --log-opt syslog-facility=daemon -p 8080:80 httpd
sudo docker run --name telegraf -d --log-driver=syslog --log-opt syslog-address=tcp://:5000 --log-opt syslog-facility=local5 -p 8092:8092/udp telegraf
The output that I see on Logstash is as follow:
{
"host" => "172.18.0.1",
"tags" => [
[0] "_grokparsefailure"
],
"@timestamp" => 2020-05-26T16:04:20.002Z,
"@version" => "1"
}
{
"host" => "172.18.0.1",
"tags" => [
[0] "_grokparsefailure"
],
"@timestamp" => 2020-05-26T16:04:20.003Z,
"@version" => "1"
}
Note: Also this is what I see in Kibana