How to create multiple index logstash

Hi!
I am handling pfsense data with logstash but having problems with indexing.
This is my configuration. I have 2 pfsense.
01-inputpf1.conf

input {
  tcp {
    type => "syslog1"
    port => 5140
  }
}
input {
  udp {
    type => "syslog1"
    port => 5140
  }
}

01-inputpf2.conf

input {
  tcp {
    type => "syslog2"
    port => 1234
  }
}
input {
  udp {
    type => "syslog2"
    port => 1234
  }
}

10-fifltersyslog1.conf

filter {  
  if [type] == "syslog1" {
    #change to pfSense ip address
    if [host] =~ /10\.10\.1\.11/ {
      mutate {
        add_tag => ["PFSense", "Ready"]
      }
    }
    if "Ready" not in [tags] {
      mutate {
        add_tag => [ "syslog1" ]
      }
    }
  }
}
filter {  
  if [type] == "syslog1" {
    mutate {
      remove_tag => "Ready"
    }
  }
}

filter {  
  if "syslog1" in [tags] {
    grok {
      match => { "message" => "%{SYSLOGTIMESTAMP:syslog_timestamp} %{SYSLOGHOST:syslog_hostname} %{DATA:syslog_program}(?:\[%{POSINT:syslog_pid}\])?: %{GREEDYDATA:syslog_message}" }
      add_field => [ "received_at", "%{@timestamp}" ]
      add_field => [ "received_from", "%{host}" ]
    }
    syslog_pri { }
    date {
      match => [ "syslog_timestamp", "MMM  d HH:mm:ss", "MMM  dd HH:mm:ss" ]
      locale => "en"
    }
    if !("_grokparsefailure" in [tags]) {
      mutate {
        replace => [ "@source_host", "%{syslog_hostname}" ]
        replace => [ "@message", "%{syslog_message}" ]
      }
    }
    mutate {
      remove_field => [ "syslog_hostname", "syslog_message", "syslog_timestamp" ]
    }
  }
}

11-filtersyslog2.conf

filter {  
  if [type] == "syslog2" {
    #change to pfSense ip address
    if [host] =~ /10\.10\.2\.2/ {
      mutate {
        add_tag => ["PFSense", "Ready"]
      }
    }
    if "Ready" not in [tags] {
      mutate {
        add_tag => [ "syslog2" ]
      }
    }
  }
}
filter {  
  if [type] == "syslog2" {
    mutate {
      remove_tag => "Ready"
    }
  }
}

filter {  
  if "syslog2" in [tags] {
    grok {
      match => { "message" => "%{SYSLOGTIMESTAMP:syslog_timestamp} %{SYSLOGHOST:syslog_hostname} %{DATA:syslog_program}(?:\[%{POSINT:syslog_pid}\])?: %{GREEDYDATA:syslog_message}" }
      add_field => [ "received_at", "%{@timestamp}" ]
      add_field => [ "received_from", "%{host}" ]
    }
    syslog_pri { }
    date {
      match => [ "syslog_timestamp", "MMM  d HH:mm:ss", "MMM  dd HH:mm:ss" ]
      locale => "en"
    }
    if !("_grokparsefailure" in [tags]) {
      mutate {
        replace => [ "@source_host", "%{syslog_hostname}" ]
        replace => [ "@message", "%{syslog_message}" ]
      }
    }
    mutate {
      remove_field => [ "syslog_hostname", "syslog_message", "syslog_timestamp" ]
    }
  }
}

20-filterpfsense.conf

filter {  
  if "PFSense1" in [tags] {
    grok {
      add_tag => [ "firewall" ]
      match => [ "message", "<(?<evtid>.*)>(?<datetime>(?:Jan(?:uary)?|Feb(?:ruary)?|Mar(?:ch)?|Apr(?:il)?|May|Jun(?:e)?|Jul(?:y)?|Aug(?:ust)?|Sep(?:tember)?|Oct(?:ober)?|Nov(?:ember)?|Dec(?:ember)?)\s+(?:(?:0[1-9])|(?:[12][0-9])|(?:3[01])|[1-9]) (?:2[0123]|[01]?[0-9]):(?:[0-5][0-9]):(?:[0-5][0-9])) (?<prog>.*?): (?<msg>.*)" ]
    }
    mutate {
      gsub => ["datetime","  "," "]
    }
    date {
      match => [ "datetime", "MMM dd HH:mm:ss" ]
      timezone => "Asia/Ho_Chi_Minh"
    }
    mutate {
      replace => [ "message", "%{msg}" ]
    }
    mutate {
      remove_field => [ "msg", "datetime" ]
    }
}
if [prog] =~ /^filterlog$/ {  
    mutate {
      remove_field => [ "msg", "datetime" ]
    }
    grok {
      patterns_dir => "/etc/logstash/conf.d/patterns"
      match => [ "message", "%{PFSENSE_LOG_DATA}%{PFSENSE_IP_SPECIFIC_DATA}%{PFSENSE_IP_DATA}%{PFSENSE_PROTOCOL_DATA}",
                 "message", "%{PFSENSE_LOG_DATA}%{PFSENSE_IPv4_SPECIFIC_DATA_ECN}%{PFSENSE_IP_DATA}%{PFSENSE_PROTOCOL_DATA}",
                 "message", "%{PFSENSE_LOG_DATA}%{PFSENSE_IPv6_SPECIFIC_DATA}"]
    }
    mutate {
      lowercase => [ 'proto' ]
    }
    geoip {
      add_tag => [ "GeoIP" ]
      source => "src_ip"
      database => "/etc/logstash/GeoLite2-City.mmdb"
    }
  }
}

50-output.conf

output { stdout { codec => rubydebug }
        if [type] == "syslog1" {
          elasticsearch {
          hosts => ["http://10.10.1.162:9200"]
          index => "pfsense1-%{+YYYY.MM.dd}"
       }
          elasticsearch {
          hosts => ["http://10.10.1.162:9200"]
          index => "logstash-%{+YYY.MM.dd}"
        }
        }
        else if [type] == "syslog2" {
          elasticsearch {
          hosts => ["http://10.10.1.162:9200"]
          index => "pfsense2-%{+YYYY.MM.dd}"
        }
        }
        else if [type] != "syslog2" {
         elasticsearch {
         hosts => ["http://10.10.1.162:9200"]
         index => "logstash-%{+YYY.MM.dd}"
        }
        }
}

when I use the above configuration, logstash has created 3 indexes (pfsense1, pfsense2 and logstash), but I cannot use the data to create visualizations.
But when I used the following output, it worked and I could create visualizations. How to split 2 data of 2 pfsense into 2 different indexes? please help me. thank you.

output { stdout { codec => rubydebug }
              elasticsearch {
              hosts => ["http://10.10.1.162:9200"]
              index => "logstash-%{+YYY.MM.dd}"
            }
            }

When you say that you can't use data to create visualizations, do you mean that the data isn't inside of those indices? Or something else?

With the first example, if you do GET _cat/indices what do you get?

Oh, I tried again and tried to create an image with the logstash index, it worked. but 2 pfsense indices still cannot create visualization. please help me.

root@srv-test-ids:/etc/logstash/conf.d# curl '10.10.1.162:9200/_cat/indices?v'
health status index                           uuid                   pri rep docs.count docs.deleted store.size pri.store.size
green  open   .kibana_task_manager            s1R2_DFtQIaHDlq0RZJ4og   1   0          2            0     31.1kb         31.1kb
yellow open   metricbeat-7.3.1                lnmnZ56VQ-Gsrr5WbppPRw   1   1     747080            0        1gb            1gb
green  open   .monitoring-es-7-2019.10.12     bWP2UcPPRx6Y5Mithy3FuA   1   0     182195       156164     94.9mb         94.9mb
green  open   .monitoring-es-7-2019.10.13     dVY9G5TdSki8cKetmEZZpQ   1   0     182214       156182     96.4mb         96.4mb
yellow open   pfsense1-2019.10.16             zWtVFMXKTZm33RuvrlLKcw   1   1       4720            0      3.2mb          3.2mb
green  open   .monitoring-es-7-2019.10.10     p7IbL5XMSp65-uUKtxn7ZQ   1   0     190860       173456    103.7mb        103.7mb
green  open   .monitoring-es-7-2019.10.11     bZFo1EVzSg2R4qHT3UBWbA   1   0     183245       136278     95.1mb         95.1mb
yellow open   pfsense2-2019.10.16             CU48Th1nRbWCdquK1kEJwA   1   1       6279            0      4.3mb          4.3mb
green  open   .monitoring-es-7-2019.10.16     E1vn0MWVRC2N_tgNb-4jeA   1   0      18262        17058     12.2mb         12.2mb
yellow open   logstash-2019.10.16             wHLRRXYUS7e0Qkdfy3YeBw   1   1       8754            0      3.9mb          3.9mb
green  open   .monitoring-es-7-2019.10.14     TVxW1thVTfGBRpgrL2Lelg   1   0     205097       201662    119.5mb        119.5mb
green  open   .monitoring-es-7-2019.10.15     b5dO4Qb_Qk6TxKqV50no2Q   1   0     196534       182896    120.1mb        120.1mb
green  open   .kibana_1                       _144DGkVQxGYFsfh8rJUwg   1   0        720           19    411.7kb        411.7kb
green  open   .monitoring-kibana-7-2019.10.15 g2XzupnoS4W_IMHGUsI4tg   1   0       8639            0      2.5mb          2.5mb
green  open   .monitoring-kibana-7-2019.10.14 e186OLPMRm2emVCVW24tRA   1   0       8639            0      2.5mb          2.5mb
green  open   .monitoring-kibana-7-2019.10.13 fX4Jhh46R5OmYa8pN8wdMw   1   0       8639            0      2.4mb          2.4mb
green  open   .monitoring-kibana-7-2019.10.12 qXv_oqxwSgOFbXBsSmZrlg   1   0       8639            0      2.4mb          2.4mb
green  open   .monitoring-kibana-7-2019.10.11 FK63LxD0RXqo0ts157olww   1   0       8627            0      2.4mb          2.4mb
green  open   .monitoring-kibana-7-2019.10.10 eGryc6cPTEaRcir6ygsZfg   1   0       8639            0      2.5mb          2.5mb
green  open   .monitoring-kibana-7-2019.10.16 TAyYswybRIO3pkS8aONTYA   1   0        753            0    278.9kb        278.9kb

When I modified the output, everything was fine but my logstash2* index did not create an intuitive location. Do you have any way?
50-output.conf

output { stdout { codec => rubydebug }
        if [type] == "syslog1" {
          elasticsearch {
          hosts => ["http://10.10.1.162:9200"]
          index => "pfsense1-%{+YYYY.MM.dd}"
       }
          elasticsearch {
          hosts => ["http://10.10.1.162:9200"]
          index => "logstash-%{+YYY.MM.dd}"
        }
        }
        else if [type] == "syslog2" {
          elasticsearch {
          hosts => ["http://10.10.1.162:9200"]
          index => "pfsense2-%{+YYYY.MM.dd}"
        }
        }
        else if [type] != "syslog2" {
         elasticsearch {
         hosts => ["http://10.10.1.162:9200"]
         index => "logstash2-%{+YYY.MM.dd}"
        }
        }
}

This is an error to create visual location:
Logstash* index still works fine.

Sorry, this is the configuration I have edited:
50-output.conf

output { stdout { codec => rubydebug }
        if [type] == "syslog1" {
          elasticsearch {
          hosts => ["http://10.10.1.162:9200"]
          index => "pfsense1-%{+YYYY.MM.dd}"
       }
          elasticsearch {
          hosts => ["http://10.10.1.162:9200"]
          index => "logstash-%{+YYY.MM.dd}"
        }
        }
        else if [type] == "syslog2" {
          elasticsearch {
          hosts => ["http://10.10.1.162:9200"]
          index => "pfsense2-%{+YYYY.MM.dd}"
         }
         elasticsearch {
         hosts => ["http://10.10.1.162:9200"]
         index => "logstash2-%{+YYY.MM.dd}"
        }
        }
}

Maybe you need to create an index pattern for the other indices?

Or maybe the mapping isn't correct? Like in order to do aggregations you need to have a keyword mapping type.. Or if you want a Geo hash it also needs a certain type.

It's hard to say what is going wrong. If you have data in the index, you should be able to visualize it

This topic was automatically closed 28 days after the last reply. New replies are no longer allowed.