Creating Data to vizualize in kibana with http_poller and filtering in logstash

I am pulling metrics with http_poller. its got all sort of fun. so i tried to sake of ease to copy a field out of an array and put it in its own field. I want to graph it based on timestamps. But elastic is creating it as a text and not a numeric value:

the command i used to pull it out of the array:

 mutate { copy => { "[volumes][0][avg-latency]" => "[xio-ds1-latency]" }

but when i check ES index it is showing up:

         "xio-ds1-latency": {
      "type": "text",
      "fields": {
        "keyword": {
          "type": "keyword",
          "ignore_above": 256

example of a value:
@timestamp = February 16th 2019, 15:38:00.495
xio-ds1-latency = 431

how do i make it numeric before it hits ES? END GAME is: I want to build a bar graph showing ever minute the latency value of from this field. The idea is so i can show lag based on time of the polling.

If i understand it all...
I cant add it to the y field in kibana because it not numeric.

i hope that makes sense.

Thanks - Anthony

Is it numeric in logstash? If you use a stdout { codec => rubydebug } do you get

  "xio-ds1-latency" =>	431

or

  "xio-ds1-latency" =>	"431"

Anyways, the way to make sure is to use a mapping template. Questions about details of how to use a template would be better asked in the elasticsearch forum.

so i see it showing "xio-ds1-latency" => "552"

OK, it is a string. Try

mutate { convert => { "xio-ds1-latency" => "integer" } }

If you are using a daily index then if you change that it will start working when the index rolls over. For the current index it is already locked in as being a string, but in a new index it should appear as numeric.

so this is what i have so far.. but it doesn't seem to going to integer..

but I have been deleting the index in kibana before it run it, and in kibana i deleted the index under elastic search. what is the best way to wipe out the index. i am just testing....

here is my pipeline so far...

 input {
  http_poller {
    urls => {
       norcalxio => {
        # Supports all options supported by ruby's Manticore HTTP client
         method => get
         user => "xxxxx"
    password => "xxxxx"
    url => "https://10.237.33.100/api/json/v3/types/volumes?full=1"
    headers => {
      Accept => "application/json"
    }
 }
}
request_timeout => 60
# Supports "cron", "every", "at" and "in" schedules by rufus scheduler
schedule => { cron => "* * * * * UTC"}
codec => "json"
# A hash of request metadata info (timing, response headers, etc.) will be sent here
metadata_target => "http_poller_metadata"
cacert => "/etc/logstash/norcalx1.cer"
    }
 }

filter {
    json {
            source => "message"
    }

    mutate {
            remove_field => [ "[volumes][0][lun-mapping-list]" ]
            remove_field => [ "[volumes][1][lun-mapping-list]" ]
            remove_field => [ "[volumes][2][lun-mapping-list]" ]
            remove_field => [ "[volumes][3][lun-mapping-list]" ]
            remove_field => [ "[volumes][4][lun-mapping-list]" ]
            remove_field => [ "[volumes][5][lun-mapping-list]" ]
            remove_field => [ "[volumes][6][lun-mapping-list]" ]
            remove_field => [ "[volumes][7][lun-mapping-list]" ]
            remove_field => [ "[volumes][0][xms-id]" ]
            remove_field => [ "[volumes][1][xms-id]" ]
            remove_field => [ "[volumes][2][xms-id]" ]
            remove_field => [ "[volumes][3][xms-id]" ]
            remove_field => [ "[volumes][4][xms-id]" ]
            remove_field => [ "[volumes][5][xms-id]" ]
            remove_field => [ "[volumes][6][xms-id]" ]
            remove_field => [ "[volumes][7][xms-id]" ]
            remove_field => [ "[volumes][0][sys-id]" ]
            remove_field => [ "[volumes][1][sys-id]" ]
            remove_field => [ "[volumes][2][sys-id]" ]
            remove_field => [ "[volumes][3][sys-id]" ]
            remove_field => [ "[volumes][4][sys-id]" ]
            remove_field => [ "[volumes][5][sys-id]" ]
            remove_field => [ "[volumes][6][sys-id]" ]
            remove_field => [ "[volumes][7][sys-id]" ]
            remove_field => [ "[volumes][0][vol-id]" ]
            remove_field => [ "[volumes][1][vol-id]" ]
            remove_field => [ "[volumes][2][vol-id]" ]
            remove_field => [ "[volumes][3][vol-id]" ]
            remove_field => [ "[volumes][4][vol-id]" ]
            remove_field => [ "[volumes][5][vol-id]" ]
            remove_field => [ "[volumes][6][vol-id]" ]
            remove_field => [ "[volumes][7][vol-id]" ]
            remove_field => [ "[volumes][0][snapgrp-id]" ]
            remove_field => [ "[volumes][1][snapgrp-id]" ]
            remove_field => [ "[volumes][2][snapgrp-id]" ]
            remove_field => [ "[volumes][3][snapgrp-id]" ]
            remove_field => [ "[volumes][4][snapgrp-id]" ]
            remove_field => [ "[volumes][5][snapgrp-id]" ]
            remove_field => [ "[volumes][6][snapgrp-id]" ]
            remove_field => [ "[volumes][7][snapgrp-id]" ]
            copy => { "[volumes][0][avg-latency]" => "xio-ds1-latency" }
    }
       mutate {
            convert => { "xio-dsl-latency" => "integer" }
        }
}

 output {
  stdout {
     codec => rubydebug
 }
 elasticsearch {
    hosts => ["http://10.237.33.168:9200"]
    action => "index"
    index => "norcalx1"
  }
}

thanks!

You copy it to DS1, but you convert DSL

sweet christmas...... if your in the SF Bayarea i owe you a beer...

This topic was automatically closed 28 days after the last reply. New replies are no longer allowed.