I have logstash properly working to send ELB logs in s3 to a single elasticsearch index. Relevant configs are shown below. This works great. But now I'd like to filter some of the ELB logs to specific indexes. Let's say part of the "message" string contains the phrase "company-A", and I want those to go to an index called "elb-company-a-%{+YYYY.MM.dd}", while everything else simply goes to "elb-%{+YYYY.MM.dd}".
Do I need to do this in the filter, or in the output, or both?
IN
input {
s3 {
type => "elb"
bucket => "MY-elb-logs"
prefix => "MY-S3-BUCKET"
region => "us-east-1"
use_ssl => "false"
delete => "false"
interval => "120"
temporary_directory => "/storage/elk-storage/logstash/"
sincedb_path => "/var/log/logstash/my_last_elb_s3_file"
codec => plain {charset => "US-ASCII"}
}
}
FILTER
filter {
if [type] == "elb" {
grok {
match => [ 'message', '%{TIMESTAMP_ISO8601:timestamp} %{NOTSPACE:elb_name} %{IP:client_ip}:%{NUMBER:client_port} %{IP:backend_ip}:%{NUMBER:backend_port} %{NUMBER:request_processing_time} %{NUMBER:backend_processing_time} %{NUMBER:response_processing_time} (?:%{NUMBER:elb_status_code}|-) (?:%{NUMBER:backend_status_code}|-) %{NUMBER:elb_received_bytes} %{NUMBER:elb_sent_bytes} (?:%{QS:elb_request}|-) (?:%{QS:userAgent}|-) (?:%{NOTSPACE:elb_sslcipher}|-) (?:%{NOTSPACE:elb_sslprotocol}|-)' ]
}
date {
match => [ "timestamp", "ISO8601" ]
}
# Add geolocalization attributes based on ip.
geoip {
source => "client_ip"
target => "geoip"
database => "/etc/logstash/GeoLiteCity.dat"
add_field => [ "[geoip][coordinates]", "%{[geoip][longitude]}" ]
add_field => [ "[geoip][coordinates]", "%{[geoip][latitude]}" ]
}
mutate {
convert => [ "[geoip][coordinates]", "float"]
}
}
}
OUT
output {
if [type] == "elb" {
elasticsearch {
hosts => ["<% @elk_nodes.each do |elk_node| -%><%= elk_node['ipaddress'] -%><% end -%>:9200"]
sniffing => true
manage_template => false
index => "elb-%{+YYYY.MM.dd}"
document_type => "%{[@metadata][type]}"
}
}