Request timeout on longer duration in Discover app of Kibana

Hi,

If I select a time-range of greater than 7 days (Example: Set it to 15 days), I receive a request timeout in Kibana.

I have already increased the elasticsearch.timeout value to 10x its original value.

elasticsearch.requestTimeout: 330000

Should I continue to increase this value? Are there any adverse effects of doing this?

Here is the output of the cluster stats api:

{
  "_nodes" : {
    "total" : 16,
    "successful" : 16,
    "failed" : 0
  },
  "cluster_name" : "elk",
  "cluster_uuid" : "pvOFcPGKRzeSz9Lnw1VYQw",
  "timestamp" : 1548649969710,
  "status" : "green",
  "indices" : {
    "count" : 88,
    "shards" : {
      "total" : 565,
      "primaries" : 276,
      "replication" : 1.0471014492753623,
      "index" : {
        "shards" : {
          "min" : 2,
          "max" : 15,
          "avg" : 6.420454545454546
        },
        "primaries" : {
          "min" : 1,
          "max" : 5,
          "avg" : 3.1363636363636362
        },
        "replication" : {
          "min" : 1.0,
          "max" : 14.0,
          "avg" : 1.1477272727272727
        }
      }
    },
    "docs" : {
      "count" : 6679589700,
      "deleted" : 852219
    },
    "store" : {
      "size" : "7.5tb",
      "size_in_bytes" : 8326673310906
    },
    "fielddata" : {
      "memory_size" : "4.2gb",
      "memory_size_in_bytes" : 4594862792,
      "evictions" : 0
    },
    "query_cache" : {
      "memory_size" : "7.5gb",
      "memory_size_in_bytes" : 8118757506,
      "total_count" : 110862683,
      "hit_count" : 61847274,
      "miss_count" : 49015409,
      "cache_size" : 109852,
      "cache_count" : 190555,
      "evictions" : 80703
    },
    "completion" : {
      "size" : "0b",
      "size_in_bytes" : 0
    },
    "segments" : {
      "count" : 10962,
      "memory" : "14.7gb",
      "memory_in_bytes" : 15876423338,
      "terms_memory" : "13.1gb",
      "terms_memory_in_bytes" : 14154681909,
      "stored_fields_memory" : "1gb",
      "stored_fields_memory_in_bytes" : 1173691128,
      "term_vectors_memory" : "0b",
      "term_vectors_memory_in_bytes" : 0,
      "norms_memory" : "22.3mb",
      "norms_memory_in_bytes" : 23393792,
      "points_memory" : "474.1mb",
      "points_memory_in_bytes" : 497180085,
      "doc_values_memory" : "26.2mb",
      "doc_values_memory_in_bytes" : 27476424,
      "index_writer_memory" : "10.7mb",
      "index_writer_memory_in_bytes" : 11320382,
      "version_map_memory" : "116.6kb",
      "version_map_memory_in_bytes" : 119500,
      "fixed_bit_set" : "10.2mb",
      "fixed_bit_set_memory_in_bytes" : 10741440,
      "max_unsafe_auto_id_timestamp" : 1548633607681,
      "file_sizes" : { }
    }
  },
  "nodes" : {
    "count" : {
      "total" : 16,
      "data" : 15,
      "coordinating_only" : 1,
      "master" : 15,
      "ingest" : 15
    },
    "versions" : [
      "6.5.4"
    ],
    "os" : {
      "available_processors" : 124,
      "allocated_processors" : 124,
      "names" : [
        {
          "name" : "Linux",
          "count" : 16
        }
      ],
      "mem" : {
        "total" : "487gb",
        "total_in_bytes" : 522986086400,
        "free" : "10.9gb",
        "free_in_bytes" : 11798892544,
        "used" : "476gb",
        "used_in_bytes" : 511187193856,
        "free_percent" : 2,
        "used_percent" : 98
      }
    },
    "process" : {
      "cpu" : {
        "percent" : 24
      },
      "open_file_descriptors" : {
        "min" : 710,
        "max" : 1232,
        "avg" : 1124
      }
    },
    "jvm" : {
      "max_uptime" : "24.8d",
      "max_uptime_in_millis" : 2149189516,
      "versions" : [
        {
          "version" : "1.8.0_191",
          "vm_name" : "OpenJDK 64-Bit Server VM",
          "vm_version" : "25.191-b12",
          "vm_vendor" : "Oracle Corporation",
          "count" : 16
        }
      ],
      "mem" : {
        "heap_used" : "109.2gb",
        "heap_used_in_bytes" : 117258404416,
        "heap_max" : "246.9gb",
        "heap_max_in_bytes" : 265207152640
      },
      "threads" : 1716
    },
    "fs" : {
      "total" : "14.1tb",
      "total_in_bytes" : 15546706960384,
      "free" : "6.2tb",
      "free_in_bytes" : 6867217747968,
      "available" : "5.5tb",
      "available_in_bytes" : 6076681416704
    },
    "plugins" : [
      {
        "name" : "search-guard-6",
        "version" : "6.5.4-24.0",
        "elasticsearch_version" : "6.5.4",
        "java_version" : "1.8",
        "description" : "Provide access control related features for Elasticsearch 6",
        "classname" : "com.floragunn.searchguard.SearchGuardPlugin",
        "extended_plugins" : [ ],
        "has_native_controller" : false
      },
      {
        "name" : "ingest-geoip",
        "version" : "6.5.4",
        "elasticsearch_version" : "6.5.4",
        "java_version" : "1.8",
        "description" : "Ingest processor that uses looksup geo data based on ip adresses using the Maxmind geo database",
        "classname" : "org.elasticsearch.ingest.geoip.IngestGeoIpPlugin",
        "extended_plugins" : [ ],
        "has_native_controller" : false
      }
    ],
    "network_types" : {
      "transport_types" : {
        "com.floragunn.searchguard.ssl.http.netty.SearchGuardSSLNettyTransport" : 16
      },
      "http_types" : {
        "com.floragunn.searchguard.http.SearchGuardHttpServerTransport" : 16
      }
    }
  }
}

Request, throws a timeout error after reading these many documents without any search or filter applied:

How should I proceed?

Hello @NerdSec,
It can be due to heap size also,
i faced the issue due to heap size .
Please check it
and Second reason can be due to large size of scripted fields.

Regards
Shrikant

Hi Shrikant,

I believe there is enough heap size. Is there a way that I can measure if this is enough?

"mem" : {
        "heap_used" : "109.2gb",
        "heap_used_in_bytes" : 117258404416,
        "heap_max" : "246.9gb",
        "heap_max_in_bytes" : 265207152640
      },

Also, there are no scripted fields present.

@NerdSec,
I was going through this
Hope this helps you out

Any other thoughts on this? Would really be a big help as we planned to use this stack for querying longer duration data set, but now it does not seem to work for that requirement.

This topic was automatically closed 28 days after the last reply. New replies are no longer allowed.