Date_histogram facet float possible overflow

HI all,

I am using ELK stack to visualising our monitoring data, yesterday i came
across a weird problem: ElasticSearch date_histogram facet returned
floating results that look like an overflow ("min" : 4.604480259023595E
18).
Our dataflow is : collectd (cpu/memory) -> sends it to riemann -> logstash
-> elasticsearch

At first the values were correct, after a few days the values became huge
(see attached snapshot of kibana graph)

filtered query + Result:

query:
url -XGET 'http://localhost:9200/logstash-2014.08.24/_search?pretty' -d '{
"query": {
"filtered": {
"query": {
"bool": {
"should": [
{
"query_string": {
"query":
"subservice.raw:"processes-cpu_percent/gauge-collectd" AND
(plugin_instance:"cpu_percent")"
}
}
]
}
},
"filter": {
"bool": {
"must": [
{
"range": {
"@timestamp": {
"from": 1408884312966,
"to": 1408884612966
}
}
},
{
"range": {
"@timestamp": {
"from": 1408884311948,
"to": 1408884327941
}
}
},
{
"fquery": {
"query": {
"query_string": {
"query":
"subservice:("processes-cpu_percent/gauge-collectd")"
}
},
"_cache": false
}
}
]
}
}
}
},
"size": 500,
"sort": [
{
"metric": {
"order": "desc",
"ignore_unmapped": false
}
},
{
"@timestamp": {
"order": "desc",
"ignore_unmapped": false
}
}
]
}'

result:
{
"took" : 47,
"timed_out" : false,
"_shards" : {
"total" : 5,
"successful" : 5,
"failed" : 0
},
"hits" : {
"total" : 2,
"max_score" : null,
"hits" : [ {
"_index" : "logstash-2014.08.24",
"_type" : "gauge",
"_id" : "SlzG8bGJQziU0LMoN7nrbQ",
"_score" : null,
"_source":{"host":"host1","service":
"instance-2014-08-24T1106/processes-cpu_percent/gauge-collectd","state":null
,"description":null,"metric":0.7,"tags":["collectd"],"time":
"2014-08-24T12:45:25.000Z","ttl":20.0,"type":"gauge","source":"host1",
"ds_type":"gauge","plugin_instance":"cpu_percent","ds_name":"value",
"type_instance":"collectd","plugin":"processes","ds_index":"0","@version":
"1","@timestamp":"2014-08-24T12:45:15.079Z"},
"sort" : [ 4604480259023595110, 1408884325088 ]

}, {

  "_index" : "logstash-2014.08.24",
  "_type" : "gauge",
  "_id" : "8hxToMjpQ5WQIw15DQqIGA",
  "_score" : null,
  "_source":{"host":"host1","service":

"instance-2014-08-24T1106/processes-cpu_percent/gauge-collectd","state":null
,"description":null,"metric":0.5,"tags":["collectd"],"time":
"2014-08-24T12:45:15.000Z","ttl":20.0,"type":"gauge","source":"host1",
"ds_type":"gauge","plugin_instance":"cpu_percent","ds_name":"value",
"type_instance":"collectd","plugin":"processes","ds_index":"0","@version":
"1","@timestamp":"2014-08-24T12:45:15.079Z"},
"sort" : [ 4602678819172646912, 1408884315079 ]
} ]
}
}

date histogram Facet + Results:query:
curl -XGET 'http://localhost:9200/logstash-2014.08.24/_search?pretty' -d '{
"facets": {
"0": {
"date_histogram": {
"key_field": "@timestamp",
"value_field": "metric",
"interval": "1s"
},
"global": true,
"facet_filter": {
"fquery": {
"query": {
"filtered": {
"query": {
"query_string": {
"query":
"subservice.raw:"processes-cpu_percent/gauge-collectd" AND
(plugin_instance:cpu_percent) AND *"
}
},
"filter": {
"bool": {
"must": [
{
"range": {
"@timestamp": {
"from": 1408884199622,
"to": 1408884499623
}
}
},
{
"range": {
"@timestamp": {
"from": 1408884311948,
"to": 1408884327941
}
}
},
{
"fquery": {
"query": {
"query_string": {
"query":
"subservice:("processes-cpu_percent/gauge-collectd")"
}
},
"_cache": true
}
}
]
}
}
}
}
}
}
}
},
"size": 0
}'

result:
{
"took" : 24,
"timed_out" : false,
"_shards" : {
"total" : 5,
"successful" : 5,
"failed" : 0
},
"hits" : {
"total" : 1197141,
"max_score" : 0.0,
"hits" : [ ]
},
"facets" : {
"0" : {
"_type" : "date_histogram",
"entries" : [ {
"time" : 1408884315000,
"count" : 1,
"min" : 4.6026788191726469E18,
"max" : 4.6026788191726469E18,
"total" : 4.6026788191726469E18,
"total_count" : 1,
"mean" : 4.6026788191726469E18
}, {
"time" : 1408884325000,
"count" : 1,
"min" : 4.604480259023595E18,
"max" : 4.604480259023595E18,
"total" : 4.604480259023595E18,
"total_count" : 1,
"mean" : 4.604480259023595E18
} ]
}
}
}

Regards,
Moshe

--
You received this message because you are subscribed to the Google Groups "elasticsearch" group.
To unsubscribe from this group and stop receiving emails from it, send an email to elasticsearch+unsubscribe@googlegroups.com.
To view this discussion on the web visit https://groups.google.com/d/msgid/elasticsearch/44f54a8a-d3c8-4ea1-8050-71c5d029c409%40googlegroups.com.
For more options, visit https://groups.google.com/d/optout.