Metrics filter out to Elasticsearch

Hi,

This seems simple. I wish I could say it was, but I cannot get metrics to show up in Kibana.

filter{
metrics {
meter => [ "events" ]
add_tag => "metric"
}
}
output {
elasticsearch {
cluster => "logstash"
}
}

I figure I'm probably missing something here, but I don't know what. I've review the documentation, but it only mentions stdout or graphite outputs.

Does anyone know what I'm doing wrong?

Thanks!

Can you see any events with the metric tag in the discover part of KB?

Can you provide your full LS config?

I cannot see any metric tags at all.

Here's the entire config.

input {
   redis {
      host => '<@= @redis_ip %>'
      port => 6900
      data_type => 'list'
      tags => "<%= @hostname %>"
      key => 'logstash:redis'
      password => "<@= @redis_pass %>"
   }
}
filter {
  dns {
    reverse => ["host"]
    action => ["replace"]
    add_tag => ["dns"]
  }
  metrics {
    meter => [ "events" ]
    add_tag => "metric"
  }
  grok {
    match => [ "message", "%{GREEDYDATA}java\.lang\.OutOfMemoryError%{GREEDYDATA}" ]
    add_tag => [ "outofmem" ]
  }
  if [type] == "auth" {
	  grok {
		  match => [ "message", "%{SYSLOGTIMESTAMP:logdate} %{HOST:name} %{DATA:program}\: %{GREEDYDATA:message}"]
	    overwrite => [ "message" ]
	  }
    date {
      match => [ "logdate", "MMM dd HH:mm:ss" ]
      target => "@timestamp"
      timezone => "America/Los_Angeles"
    }
  }
  if [type] == "syslog" {
    grok {
      patterns_dir => "/opt/logstash/patterns/extra"
      break_on_match => false
      match => [ "message", "%{SYSLOGTIMESTAMP:logdate} %{DATA:hostname} %{DATA:program}\[?%{NUMBER:pid}?\]?\: %{GREEDYDATA:message}" ]
      overwrite => [ "message" ]
    }
    date {
      match => [ "logdate", "MMM dd HH:mm:ss" ]
      target => "@timestamp"
      timezone => "America/Chicago"
    }
  }
  if [type] == "monit" {
    grok {
      break_on_match => false
      match => [ "message", "\[%{TZ} %{SYSLOGTIMESTAMP:logdate}\]\s%{LOGLEVEL:severity_label}\s*\:\s%{GREEDYDATA:message}" ]
      overwrite => [ "message" ]
    }
    date {
      match => [ "logdate", "MMM dd HH:mm:ss" ]
      target => "@timestamp"
      timezone => "America/Los_Angeles"
    }
  }
  if [type] == "storm" {
    grok {
      break_on_match => false
      match => [ "message", "(?m)%{TIMESTAMP_ISO8601:logdate} %{PROG:verb} \[%{LOGLEVEL:severity_label}\] %{GREEDYDATA:parsed_message}" ]
    }
    grok {
      match => [ "message", "(?m)Emitting\:\s%{DATA:connection_status}?\-connections%{GREEDYDATA}" ]
      tag_on_failure => []
    }
    mutate {
      replace => [ "message", "%{parsed_message}" ]
      remove_field => [ "parsed_message" ]
    }
    date {
      match => [ "logdate", "YYYY-MM-dd HH:mm:ss" ]
      target => "@timestamp"
      timezone => "America/Los_Angeles"
    }
  }
  if [type] == "postgresql" {
    grok {
      break_on_match => false
      match => [ "message", "%{TIMESTAMP_ISO8601:logdate} %{TZ:tz} %{WORD:method}\: %{GREEDYDATA:message}" ]
      overwrite => [ "message" ]
    }
    date {
      match => [ "logdate", "YYYY-MM-dd HH:mm:ss" ]
      target => "@timestamp"
      timezone => "America/Los_Angeles"
    }
  }
  if [type] == "zookeeper" {
    multiline {
      pattern => "^%{TIMESTAMP_ISO8601} "
      negate => true
      what => previous
      stream_identity => "%{host}.%{file}.%{path}"
    }
    grok {
      patterns_dir => "/opt/logstash/patterns/extra"
      break_on_match => false
      match => [ "message", "(?m)%{TIMESTAMP_ISO8601:logdate} \[myid:%{POSINT}?\] \- %{LOGLEVEL:severity_label}\s? \[%{DATA:verb}\] \- %{GREEDYDATA:message}" ]
      overwrite => [ "message" ]
    }
    date {
      match => [ "logdate", "YYYY-MM-dd HH:mm:ss,SSS" ]
      target => "@timestamp"
      timezone => "America/Los_Angeles"
    }
  }
  if [type] == "supervisor" {
    grok {
      patterns_dir => "/opt/logstash/patterns/extra"
      break_on_match => false
      match => [ "message", "%{TIMESTAMP_ISO8601:logdate}?\s?%{LOGLEVEL:severity_label}?\s?%{GREEDYDATA:message}" ]
      overwrite => [ "message" ]
    }
    date {
      match => [ "logdate", "YYYY-MM-dd HH:mm:ss,SSS" ]
      target => "@timestamp"
      timezone => "America/Los_Angeles"
    }
  }
  if [type] == "nginx" {
    grok {
      break_on_match => false
	    match => [ "message", "%{IPORHOST:http_host} (%{IPORHOST:clientip}|\-)?\s?\- \- \[%{HTTPDATE:logdate}\] \"(%{WORD:method} %{URIPATH:path}%{DATA:rawdata}|-)?\" %{NUMBER:response} (?:%{NUMBER:bytes}|-) \"(%{URI:referrer}|\-)?\"\;? \"(%{DATA:agent}|-)?\"" ]
    }
    date {
      match => [ "logdate", "dd/MMM/YYYY:HH:mm:ss Z" ]
      target => "@timestamp"
      timezone => "America/Los_Angeles"
    }
  }
  if [type] == "uwsgi" {
    grok {
      break_on_match => false
      match => [ "message", "\[%{DATA:data}\] %{IP:source} \(\) \{%{DATA:metrics}\} \[%{DAY} %{DATA:logdate}\] %{WORD:method} %{GREEDYDATA:details}" ]
    }
    mutate {
      replace => [ "message", "%{method} %{details}" ]
      remove_field => [ "details" ]
    }
  }
  if [type] == "elasticsearch" {
   	multiline {
      pattern => "^\[%{TIMESTAMP_ISO8601}\]\[%{LOGLEVEL}\]\[%{DATA}\] "
      negate => true
      what => previous
      stream_identity => "%{host}.%{path}"
    }
    grok {
      break_on_match => false
	    match => [ "message", "(?m)\[%{TIMESTAMP_ISO8601:logdate}\]\[%{LOGLEVEL:severity_label}\]\[%{DATA:verb}\] \[%{HOST:name}\] %{GREEDYDATA:message}" ]
		  overwrite => [ "message" ]
    }
    date {
      match => [ "logdate", "YYYY-MM-dd HH:mm:ss,SSS" ]
      target => "@timestamp"
      timezone => "America/Los_Angeles"
    }
  }
  if [type] == "nginx-access" {
 	  grok {
 		  match => ["message", "%{IP:srv_ip} %{IP:src_ip} %{GREEDYDATA:message}" ]
 	    add_tag => ["grokd"]
    }
	  geoip {
		  source => "src_ip"
		  target => "geoip"
		  add_field => ["[geoip][coordinates]","%{[geoip][longitude]}"]
		  add_field => ["[geoip][coordinates]","%{[geoip][latitude]}"]
		  add_tag => "worldly"
	  }
  }
}
output {
# stdout { }
  elasticsearch {
    cluster => "logstash"
  }
}

No longer an issue.

I was able to get metrics logging as expected after I upgraded an instance to 1.5.0. So glad this is taken care of.

Thanks again for the help.