Did I get to the upper limit of /bulk upload API?

1.I do not change the default refresh intervals .
2. actually , those 25 nodes of ES 5.0 was moved from the pre ES 2.1 cluster which had 50 nodes ever . and yes ,same HW .
3. there is no asnc commit on 2.1 cluster , I am sure
here is part of the GET _nodes/stats/jvm,thread_pool?pretty results

"nodes": {
      "uNsUVjXoToCwNl2apc4Z_g": {
         "timestamp": 1480509175720,
         "name": "xxx",
         "transport_address": "10.107.29.40:9300",
         "host": "10.107.29.40",
         "ip": [
            "10.107.29.40:9300",
            "NONE"
         ],
         "jvm": {
            "timestamp": 1480509175720,
            "uptime_in_millis": 20074880605,
            "mem": {
               "heap_used_in_bytes": 21931278896,
               "heap_used_percent": 63,
               "heap_committed_in_bytes": 34272509952,
               "heap_max_in_bytes": 34272509952,
               "non_heap_used_in_bytes": 157184464,
               "non_heap_committed_in_bytes": 160215040,
               "pools": {
                  "young": {
                     "used_in_bytes": 601474072,
                     "max_in_bytes": 697958400,
                     "peak_used_in_bytes": 697958400,
                     "peak_max_in_bytes": 697958400
                  },
                  "survivor": {
                     "used_in_bytes": 67158032,
                     "max_in_bytes": 87228416,
                     "peak_used_in_bytes": 87228416,
                     "peak_max_in_bytes": 87228416
                  },
                  "old": {
                     "used_in_bytes": 21262646792,
                     "max_in_bytes": 33487323136,
                     "peak_used_in_bytes": 31837840704,
                     "peak_max_in_bytes": 33487323136
                  }
               }
            },
            "threads": {
               "count": 170,
               "peak_count": 9210
            },
            "gc": {
               "collectors": {
                  "young": {
                     "collection_count": 6363729,
                     "collection_time_in_millis": 290189972
                  },
                  "old": {
                     "collection_count": 644,
                     "collection_time_in_millis": 898140
                  }
               }
            },
            "buffer_pools": {
               "direct": {
                  "count": 23447,
                  "used_in_bytes": 422858394,
                  "total_capacity_in_bytes": 422858394
               },
               "mapped": {
                  "count": 5107,
                  "used_in_bytes": 465583433300,
                  "total_capacity_in_bytes": 465583433300
               }
            }
         },

"thread_pool": {

            "bulk": {
               "threads": 12,
               "queue": 0,
               "active": 0,
               "rejected": 16878,
               "largest": 12,
               "completed": 2262487473
            },
            "fetch_shard_started": {
               "threads": 1,
               "queue": 0,
               "active": 0,
               "rejected": 0,
               "largest": 23,
               "completed": 58252
            },
            "fetch_shard_store": {
               "threads": 1,
               "queue": 0,
               "active": 0,
               "rejected": 0,
               "largest": 24,
               "completed": 154982
            },
            "flush": {
               "threads": 4,
               "queue": 0,
               "active": 0,
               "rejected": 0,
               "largest": 5,
               "completed": 908140
            },
            "force_merge": {
               "threads": 0,
               "queue": 0,
               "active": 0,
               "rejected": 0,
               "largest": 0,
               "completed": 0
            },
            "generic": {
               "threads": 1,
               "queue": 0,
               "active": 0,
               "rejected": 0,
               "largest": 9045,
               "completed": 27608243
            },
            "get": {
               "threads": 12,
               "queue": 0,
               "active": 0,
               "rejected": 0,
               "largest": 12,
               "completed": 1793
            },
            "index": {
               "threads": 12,
               "queue": 0,
               "active": 0,
               "rejected": 0,
               "largest": 12,
               "completed": 1290
            },
            "listener": {
               "threads": 6,
               "queue": 0,
               "active": 0,
               "rejected": 0,
               "largest": 6,
               "completed": 50669
            },
            "management": {
               "threads": 5,
               "queue": 0,
               "active": 1,
               "rejected": 0,
               "largest": 5,
               "completed": 10879423
            },
            "percolate": {
               "threads": 0,
               "queue": 0,
               "active": 0,
               "rejected": 0,
               "largest": 0,
               "completed": 0
            },
            "refresh": {
               "threads": 6,
               "queue": 0,
               "active": 1,
               "rejected": 0,
               "largest": 6,
               "completed": 97083385
            },
            "search": {
               "threads": 19,
               "queue": 0,
               "active": 0,
               "rejected": 1121380,
               "largest": 30,
               "completed": 9218999
            },
            "snapshot": {
               "threads": 0,
               "queue": 0,
               "active": 0,
               "rejected": 0,
               "largest": 0,
               "completed": 0
            },
            "suggest": {
               "threads": 0,
               "queue": 0,
               "active": 0,
               "rejected": 0,
               "largest": 0,
               "completed": 0
            },
            "warmer": {
               "threads": 5,
               "queue": 0,
               "active": 0,
               "rejected": 0,
               "largest": 5,
               "completed": 113856768
            }
         }
      }

so now you have a cluster that is 1/2 of the size of the one you compare to? how are your shards distributed, are they all on different nodes? can you tell?

Actually , both cluster have 25 nodes now, as 25 nodes have been removed from 2.1 ES cluster , so it's not 1/2 of the size... but the same size
Strangly, according to the kibana , some of the shards of an indix are on the same node . oh , how can that be ! the index with the most number of primary shards is 10 ,which is less than the number of nodes

after checked the 2.1 cluster , there are some shards of one index on the same node too .

here is 2.1 cluster :

and here is 5.0 cluster

finally , it ran OOM again

sorry I don't know what you mean can you provide some allocation output. you keep on saying it works on 2.1 but I can't really tell what the difference is. Are you hammering that cluster with the same queue size and everything?

yes , cause I use the same templates (the string is not_analyzed in 2.1 , and keyword in 5.0 ), and the same config params ,

This topic was automatically closed 28 days after the last reply. New replies are no longer allowed.