Kibana is not able to fetch data from kibana collector

Kibana is not able to fetch data from kibana collector error is showing uo whenever i am starting kibana through cmd.

The image error in the cmd is

Can you elaborate more what you are tryin to achieve? What is the version you are using , operating system, screenshots and what are the steps you took.

Thanks
Rashmi

Hi, sorry for the late reply
I am running kibana on vm machine trying to collect data from different machine.
My elasticsearch and kibana version is 6.4.2. I am running it on Microsoft server 2012 R2 standard. The step is i am running winlogbeat and metric beat on different machine and its data are sent to elastic search of vm which is sent to kibana on the same machine.
since 5 days, i am getting this error along with this, request timeout after 3000 ms.

Can you please provide the full output from the cluster stats API?

The output which i got from cluster stats api is
{
"_nodes": {
"total": 1,
"successful": 1,
"failed": 0
},
"cluster_name": "elasticsearch",
"timestamp": 1542190232485,
"status": "yellow",
"indices": {
"count": 223,
"shards": {
"total": 747,
"primaries": 747,
"replication": 0,
"index": {
"shards": {
"min": 1,
"max": 5,
"avg": 3.3497757847533634
},
"primaries": {
"min": 1,
"max": 5,
"avg": 3.3497757847533634
},
"replication": {
"min": 0,
"max": 0,
"avg": 0
}
}
},
"docs": {
"count": 82631803,
"deleted": 107329
},
"store": {
"size": "39gb",
"size_in_bytes": 41895795374
},
"fielddata": {
"memory_size": "2.1kb",
"memory_size_in_bytes": 2168,
"evictions": 0
},
"query_cache": {
"memory_size": "0b",
"memory_size_in_bytes": 0,
"total_count": 0,
"hit_count": 0,
"miss_count": 0,
"cache_size": 0,
"cache_count": 0,
"evictions": 0
},
"completion": {
"size": "0b",
"size_in_bytes": 0
},
"segments": {
"count": 4886,
"memory": "142mb",
"memory_in_bytes": 148944722,
"terms_memory": "110.5mb",
"terms_memory_in_bytes": 115903827,
"stored_fields_memory": "23.5mb",
"stored_fields_memory_in_bytes": 24697264,
"term_vectors_memory": "0b",
"term_vectors_memory_in_bytes": 0,
"norms_memory": "3.1kb",
"norms_memory_in_bytes": 3200,
"points_memory": "2.3mb",
"points_memory_in_bytes": 2499175,
"doc_values_memory": "5.5mb",
"doc_values_memory_in_bytes": 5841256,
"index_writer_memory": "11.5mb",
"index_writer_memory_in_bytes": 12158292,
"version_map_memory": "0b",
"version_map_memory_in_bytes": 0,
"fixed_bit_set": "0b",
"fixed_bit_set_memory_in_bytes": 0,
"max_unsafe_auto_id_timestamp": 1542189906526,
"file_sizes": {}
}
},
"nodes": {
"count": {
"total": 1,
"data": 1,
"coordinating_only": 0,
"master": 1,
"ingest": 1
},
"versions": [
"6.4.2"
],
"os": {
"available_processors": 4,
"allocated_processors": 4,
"names": [
{
"name": "Windows Server 2012 R2",
"count": 1
}
],
"mem": {
"total": "10.4gb",
"total_in_bytes": 11227680768,
"free": "1.4gb",
"free_in_bytes": 1604513792,
"used": "8.9gb",
"used_in_bytes": 9623166976,
"free_percent": 14,
"used_percent": 86
}
},
"process": {
"cpu": {
"percent": 6
},
"open_file_descriptors": {
"min": -1,
"max": -1,
"avg": 0
}
},
"jvm": {
"max_uptime": "6.3m",
"max_uptime_in_millis": 381600,
"versions": [
{
"version": "1.8.0_101",
"vm_name": "Java HotSpot(TM) 64-Bit Server VM",
"vm_version": "25.101-b13",
"vm_vendor": "Oracle Corporation",
"count": 1
}
],
"mem": {
"heap_used": "1.1gb",
"heap_used_in_bytes": 1190462064,
"heap_max": "1.9gb",
"heap_max_in_bytes": 2112618496
},
"threads": 65
},
"fs": {
"total": "119.6gb",
"total_in_bytes": 128479916032,
"free": "62gb",
"free_in_bytes": 66627973120,
"available": "62gb",
"available_in_bytes": 66627973120
},
"plugins": ,
"network_types": {
"transport_types": {
"security4": 1
},
"http_types": {
"security4": 1
}
}
}
}

That is an awful lot of shards for only 39GB of data and 2GB of heap. I would recommend reading this blog post about shards and sharing and then reducing this substantially.

This topic was automatically closed 28 days after the last reply. New replies are no longer allowed.