i have a stream. filebeat->logstash->(s3, kafka)
sometimes, it does not guarantee ordering.
what is the checkpoint to solve this problem?
# filebeat conf
input {
beats {
port => 5100
type => "rhyme"
congestion_threshold => 60
}
}
filter {
if [type] == "rhyme" {
grok {
match => ["message", "%{DATA:topic_id}\|%{DATA:message_id}\|%{GREEDYDATA:body}"]
}
}
}
output {
if [type] == "rhyme" {
s3 {
access_key_id => "???"
secret_access_key => "???"
region => "???"
bucket => "???"
time_file => 60
prefix => "rhyme-"
}
kafka {
topic_id => "%{topic_id}"
message_key => "%{message_id}"
codec => plain {
format => "%{body}"
}
bootstrap_servers => "internal.kafka:6667"
}
}
}
#logstash conf
filebeat:
# List of prospectors to fetch data.
prospectors:
-
paths:
- D:\*.log
input_type: log
document_type: rhyme
ignore_older: 2h
output:
logstash:
# The Logstash hosts
hosts: ["internal.logstash:5100"]
logging:
level: debug
# enable file rotation with default configuration
to_files: true
# do not log to syslog
to_syslog: false
files:
path: C:\Program Files\Filebeat\log
rotateeverybytes: 10485760 # = 10MB
name: beat.log
keepfiles: 7