1 pipline not working

Hi Team,

ELK version :

I have setup one pipeline in logstash . In input i am using 4 file input for different files ingestion and in output same 4 i am pointing to elasticsearch . In kibana i am getting 4 different index but surpisingly in 4th index i am seeing the data of 1st ,2nd and 3rd file input data. When i try to run manually using bin/logstash -f <pipeline.conf> than i am getting the desired result like 1st input is going to 1st index 2nd input file data going to 2nd index and so on but with systemctl start logstash not getting expected result .

pipeline.conf

file name: /etc/logstash/pipelines.yml 
# This file is where you define your pipelines. You can define multiple.
# For more information on multiple pipelines, see the documentation:
#   https://www.elastic.co/guide/en/logstash/current/multiple-pipelines.html

- pipeline.id: main
  path.config: "/etc/logstash/conf.d/*.conf"


``
not working expected
file name: haproxy.conf

''
input {
  file {
    path => "/APIGW_APP_LOG/3scale_OCP_haproxy-64/haproxy.log"
    start_position => "beginning"
    sincedb_path => "/dev/null"
  }
}

filter {
  grok {
    match => {
      "message" => "%{SYSLOGTIMESTAMP:syslog_timestamp} %{HOSTNAME:host} haproxy\[%{NUMBER:haproxy_pid}\]: %{NOTSPACE:haproxy_hostname} %{IP:client_ip}:%{NUMBER:client_port} \[%{DATA:resptimestamp}\] %{NOTSPACE:frontend_name} %{NOTSPACE:backend_name} %{NUMBER:time_Tq}/%{NUMBER:time_Tw}/%{NUMBER:time_Tc}/%{NUMBER:time_Tr}/%{NUMBER:time_Tt} %{NUMBER:http_status} %{NUMBER:bytes_read} - (?:-|%{NUMBER:captured_request_cookie}) - ---- %{NUMBER:actconn}/%{NUMBER:feconn}/%{NUMBER:beconn}/%{NUMBER:srvconn}/%{NUMBER:retries} %{NUMBER:queue_time}/%{NUMBER:queue_length} \{(?:%{DATA:collration_id})?\} \"%{WORD:method} %{DATA:request} HTTP/%{NUMBER:http_version}\""
    }
  }
}

output {
  elasticsearch {
    hosts => ["http://localhost:9200"]
    user => "elastic"
    password => "redhat"
    action => "index"
    index => "esbhaproxy-64-logs-%{+YYYY.MM.dd}"
  }

  stdout { codec => rubydebug }
}


=============

Working expected

file name: logstash.conf

input {
  file {
    path => "/APIGW_APP_LOG/3scale_OCP_haproxy-66/haproxy.log"
    start_position => "beginning"
    sincedb_path => "/dev/null"
  }


  file {
    path => "/APIGW_APP_LOG/3scale_OCP_haproxy/haproxy.log"
    start_position => "beginning"
    sincedb_path => "/dev/null"
  }

  file {
    path => "/APIGW_APP_LOG/3scale_OCP_haproxy-bll/haproxy.log"
    start_position => "beginning"
    sincedb_path => "/dev/null"
  }

}

filter {
  # BLL logs
  if [message] =~ "haproxy0.apigw.finopaymentbank.in" {
    mutate {
      add_tag => ["bll_haproxy_log"]
    }

    grok {
      match => {
        "message" => "%{SYSLOGTIMESTAMP:syslog_timestamp} %{IP:haproxy_host} haproxy\[%{NUMBER:pid}\]: %{HOSTNAME:haproxy_hostname} %{IP:client_ip}:%{NUMBER:client_port} \[%{DATA:resptimestamp}\ %{NOTSPACE:frontend} %{NOTSPACE:backend}/%{NOTSPACE:server} %{NUMBER:Tq}/%{NUMBER:Tw}/%{NUMBER:Tc}/%{NUMBER:Tr}/%{NUMBER:Tt} %{NUMBER:status} %{NUMBER:bytes} - %{NUMBER:backend_port} - %{NOTSPACE:termination_state} %{NUMBER:actconn}/%{NUMBER:feconn}/%{NUMBER:beconn}/%{NUMBER:srv_conn}/%{NUMBER:retries} %{NUMBER:srv_queue}/%{NUMBER:backend_queue} \{%{DATA:service_name}\} \"%{WORD:method} %{URIPATHPARAM:request} HTTP/%{NUMBER:http_version}\""
      }
    }
  }

  # OCP logs
  if [message] =~ "worker" {
    mutate {
      add_tag => ["ocp_haproxy_log"]
    }

    grok {
      match => {
        "message" => "%{SYSLOGTIMESTAMP:syslog_timestamp} %{HOSTNAME:hostname} haproxy\[%{NUMBER:pid}\]: %{IP:client_ip}:%{NUMBER:client_port} \[%{DATA:resptimestamp}\] %{NOTSPACE:frontend} %{NOTSPACE:backend}/%{NOTSPACE:server} %{NUMBER:Tq:int}/%{NUMBER:Tw:int}/%{NUMBER:Tc:int}/%{NUMBER:Tr:int}/%{NUMBER:Tt:int} %{NUMBER:status:int} %{NUMBER:bytes:int} - - %{NOTSPACE:termination_state} %{NUMBER:actconn:int}/%{NUMBER:feconn:int}/%{NUMBER:beconn:int}/%{NUMBER:srv_conn:int}/%{NUMBER:retries:int} %{NUMBER:srv_queue:int}/%{NUMBER:backend_queue:int} \"%{WORD:method} %{URIPATHPARAM:request} HTTP/%{NUMBER:http_version}\""
      }
    }
  }

  # SRVPRDESBHA logs
  if [message] =~ "srvprdesbha" {
    grok {
      match => {
        "message" => "%{SYSLOGTIMESTAMP:syslog_timestamp} %{HOSTNAME:host} haproxy\[%{NUMBER:haproxy_pid}\]: %{NOTSPACE:haproxy_hostname} %{IP:client_ip}:%{NUMBER:client_port} \[%{DATA:resptimestamp}\] %{NOTSPACE:frontend_name} %{NOTSPACE:backend_name} %{NUMBER:time_Tq}/%{NUMBER:time_Tw}/%{NUMBER:time_Tc}/%{NUMBER:time_Tr}/%{NUMBER:time_Tt} %{NUMBER:http_status} %{NUMBER:bytes_read} - (?:-|%{NUMBER:captured_request_cookie}) - ---- %{NUMBER:actconn}/%{NUMBER:feconn}/%{NUMBER:beconn}/%{NUMBER:srvconn}/%{NUMBER:retries} %{NUMBER:queue_time}/%{NUMBER:queue_length} \{(?:%{DATA:collration_id})?\} \"%{WORD:method} %{DATA:request} HTTP/%{NUMBER:http_version}\""
      }
      add_tag => ["srvprdesbha"]
    }
  }
}

output {
  if "ocp_haproxy_log" in [tags] {
    elasticsearch {
      hosts => ["http://localhost:9200"]
      user => "elastic"
      password => "redhat"
      action => "index"
      index => "haproxy-ocp-logs-%{+YYYY.MM.dd}"
    }
  }

  if "bll_haproxy_log" in [tags] {
    elasticsearch {
      hosts => ["http://localhost:9200"]
      user => "elastic"
      password => "redhat"
      action => "index"
      index => "haproxy-bll-logs-%{+YYYY.MM.dd}"
    }
  }


  if "srvprdesbha" in [tags] {
   elasticsearch {
     hosts => ["http://localhost:9200"]
     user => "elastic"
     password => "redhat"
     action => "create"
     index => "srvprdesbha-66-logs-%{+YYYY.MM.dd}"
   }
 }
}



logstash will combine all of those files into a single configuration. It will read events from all of the output and (unless there are conditionals preventing it) send the events to all of the outputs.

Since there is no conditional around the elasticsearch output that writes to "esbhaproxy-64-logs-%{+YYYY.MM.dd}", all the events will be written there.

Similary, the grok filter from haproxy.conf will process every event and add a _grokparsefailure tag to most of them.

If you want to run the two configurations separately then configure two pipelines using pipelines.yml.