How to resolve rejected `execution of org.elasticsearch.common.util.concurrent.TimedRunnable`

Last night our ES production server got bogged down for the first time and the only recent change (a few weeks ago) was enabling future mode for all search requests. I'm really new to ES and not sure at all how to go about fixing this. My first thought is maybe we need more shards? Or maybe fewer shards?

Any help would be greatly appreciated.

{
  "_nodes" : {
    "total" : 2,
    "successful" : 2,
    "failed" : 0
  },
  "timestamp" : 1568393471474,
  "status" : "green",
  "indices" : {
    "count" : 2,
    "shards" : {
      "total" : 4,
      "primaries" : 2,
      "replication" : 1.0,
      "index" : {
        "shards" : {
          "min" : 2,
          "max" : 2,
          "avg" : 2.0
        },
        "primaries" : {
          "min" : 1,
          "max" : 1,
          "avg" : 1.0
        },
        "replication" : {
          "min" : 1.0,
          "max" : 1.0,
          "avg" : 1.0
        }
      }
    },
    "docs" : {
      "count" : 1276350,
      "deleted" : 331573
    },
    "store" : {
      "size" : "23.5gb",
      "size_in_bytes" : 25253154700
    },
    "fielddata" : {
      "memory_size" : "0b",
      "memory_size_in_bytes" : 0,
      "evictions" : 0
    },
    "query_cache" : {
      "memory_size" : "238mb",
      "memory_size_in_bytes" : 249645656,
      "total_count" : 1516983968,
      "hit_count" : 584409067,
      "miss_count" : 932574901,
      "cache_size" : 79912,
      "cache_count" : 429094,
      "evictions" : 349182
    },
    "completion" : {
      "size" : "0b",
      "size_in_bytes" : 0
    },
    "segments" : {
      "count" : 56,
      "memory" : "27.7mb",
      "memory_in_bytes" : 29106703,
      "terms_memory" : "23.8mb",
      "terms_memory_in_bytes" : 24959669,
      "stored_fields_memory" : "3.4mb",
      "stored_fields_memory_in_bytes" : 3598200,
      "term_vectors_memory" : "0b",
      "term_vectors_memory_in_bytes" : 0,
      "norms_memory" : "145.1kb",
      "norms_memory_in_bytes" : 148608,
      "points_memory" : "190.2kb",
      "points_memory_in_bytes" : 194770,
      "doc_values_memory" : "200.6kb",
      "doc_values_memory_in_bytes" : 205456,
      "index_writer_memory" : "0b",
      "index_writer_memory_in_bytes" : 0,
      "version_map_memory" : "266b",
      "version_map_memory_in_bytes" : 266,
      "fixed_bit_set" : "0b",
      "fixed_bit_set_memory_in_bytes" : 0,
      "max_unsafe_auto_id_timestamp" : -1,
      "file_sizes" : { }
    }
  },
  "nodes" : {
    "count" : {
      "total" : 2,
      "data" : 2,
      "coordinating_only" : 0,
      "master" : 2,
      "ingest" : 2
    },
    "versions" : [ "6.7.0" ],
    "os" : {
      "available_processors" : 8,
      "allocated_processors" : 8,
      "names" : [ {
        "count" : 2
      } ],
      "pretty_names" : [ {
        "pretty_name" : "Amazon Linux AMI 2018.03",
        "count" : 2
      } ],
      "mem" : {
        "total" : "31.3gb",
        "total_in_bytes" : 33646977024,
        "free" : "1.3gb",
        "free_in_bytes" : 1482551296,
        "used" : "29.9gb",
        "used_in_bytes" : 32164425728,
        "free_percent" : 4,
        "used_percent" : 96
      }
    },
    "process" : {
      "cpu" : {
        "percent" : 14
      },
      "open_file_descriptors" : {
        "min" : 1202,
        "max" : 1234,
        "avg" : 1218
      }
    },
    "jvm" : {
      "max_uptime" : "64.4d",
      "max_uptime_in_millis" : 5571353354,
      "mem" : {
        "heap_used" : "6.1gb",
        "heap_used_in_bytes" : 6606791360,
        "heap_max" : "15.9gb",
        "heap_max_in_bytes" : 17110138880
      },
      "threads" : 265
    },
    "fs" : {
      "total" : "391.7gb",
      "total_in_bytes" : 420608950272,
      "free" : "368gb",
      "free_in_bytes" : 395199074304,
      "available" : "348gb",
      "available_in_bytes" : 373690683392
    },
    "network_types" : {
      "transport_types" : {
        "netty4" : 2
      },
      "http_types" : {
        "filter-jetty" : 2
      }
    }
  }
}

Hi @nathanburgess,

seeing the actual exception message including full stack trace could be helpful here, I hope you can share that?

It could sound like you overloaded the search thread pool by submitting too many async search requests, the stack trace and/or message will likely reveal that.

This topic was automatically closed 28 days after the last reply. New replies are no longer allowed.