Hi all,
I have setup of logstash that is processing some logs (text files), and then puts output to elasticsearch, s3 and file. The problem is that output is not consistent, eg. columns are not always in the same order as input is. Is there a way to force output data do be always in same order?
Here is logstash.conf file:
input {
file {
path => "/input.log.*"
start_position => "beginning"
sincedb_path => "/dev/null"
codec => plain
type => "cgn"
}
}
filter {
grok {
patterns_dir => ["/etc/logstash/conf.d/patterns"]
match => { "message" => "%{DATE:date} %{IP:ip} %{SECBIND:secbind} %{DUMP1:dump1} %{NAT444:nat444} %{DUMP2:dump2} %{PRIVATEIP:privateip} %{SRCVRFID:srcvrfid} %{PUBLICIP:publicip} %{PORTRANGE:portrange} %{TIME:time}" }
}
mutate {
remove_field => [ "date", "ip", "srcvrfid", "path", "secbind", "dump1", "dump2", "@version", "host", "message" ]
gsub => [
"publicip", "[publicip\=\']", "",
"privateip", "[privateip\=\']", "",
"portrange", "[publicportrange\=\']", "",
"time", "[time\=\']", "",
"nat444", "[\,]", ""
]
}
date {
match => [ "time", "yyyy-MM-dd HH:mm:ss" ]
target => "@timestamp"
}
mutate {
remove_field => [ "time" ]
}
}
output {
file {
path => ["/var/log/outputfile.log"]
}
elasticsearch {
hosts => ["elk.hyperoptic.com:9200"]
index => ["cgn"]
}
s3 {
access_key_id => "AccessKeyID"
secret_access_key => "SecretAccessKey"
region => "Region-1"
bucket => "bucket-name"
time_file => 1440
size_file => 1073741824
codec => "json"
canned_acl => "private"
}
}
Here is pattern file:
DATE (\w{3}\s*\d{1,2}\s*\d{1,2}\:\d{1,2}\:\d{1,2})
IP (\b\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}\b)
SECBIND (\-\s*\%\%.{1,20}\:)
DUMP1 (.{23})
NAT444 (.{5,9})
DUMP2 (.{16})
PRIVATEIP (\w{9}\=\'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}\')
PORTRANGE (\w{15}\=\'\d{1,5}\~\d{1,5}\')
SRCVRFID (\w{8}\=\'\d\')
PUBLICIP (\w{8}\=\'\b\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}\')
TIME (\w{4}\=\'\d{4}\-\d{1,2}\-\d{1,2}\s*\d{1,2}\:\d{1,2}\:\d{1,2}\')