::: {127.0.0.1}{Bg4oFeaxxxxxxV3cGY4sboQ}{ceDy3FuxRxxxxxUWNhA}{127.0.0.x}{127.0.0.x:9300}{storetype=ssd}
Hot threads at 2019-05-24T07:17:19.700Z, interval=500ms, busiestThreads=3, ignoreIdleThreads=true:
101.1% (505.4ms out of 500ms) cpu usage by thread 'elasticsearch[xxxxxxxxx][bulk][T#27]'
10/10 snapshots sharing following 35 elements
java.lang.ThreadLocal$ThreadLocalMap.expungeStaleEntry(ThreadLocal.java:617)
java.lang.ThreadLocal$ThreadLocalMap.replaceStaleEntry(ThreadLocal.java:575)
java.lang.ThreadLocal$ThreadLocalMap.set(ThreadLocal.java:476)
java.lang.ThreadLocal$ThreadLocalMap.access$100(ThreadLocal.java:298)
java.lang.ThreadLocal.setInitialValue(ThreadLocal.java:184)
java.lang.ThreadLocal.get(ThreadLocal.java:170)
org.apache.lucene.util.CloseableThreadLocal.get(CloseableThreadLocal.java:78)
org.elasticsearch.common.lucene.uid.VersionsResolver.getLookupState(VersionsResolver.java:72)
org.elasticsearch.common.lucene.uid.VersionsResolver.loadDocIdAndVersion(VersionsResolver.java:120)
org.elasticsearch.common.lucene.uid.VersionsResolver.loadVersion(VersionsResolver.java:137)
org.elasticsearch.index.engine.InternalEngine.loadCurrentVersionFromIndex(InternalEngine.java:1377)
org.elasticsearch.index.engine.InternalEngine.resolveDocVersion(InternalEngine.java:393)
org.elasticsearch.index.engine.InternalEngine.compareOpToLuceneDocBasedOnVersions(InternalEngine.java:408)
org.elasticsearch.index.engine.InternalEngine.planIndexingAsNonPrimary(InternalEngine.java:545)
org.elasticsearch.index.engine.InternalEngine.index(InternalEngine.java:496)
org.elasticsearch.index.shard.IndexShard.index(IndexShard.java:557)
org.elasticsearch.index.shard.IndexShard.index(IndexShard.java:546)
org.elasticsearch.action.bulk.TransportShardBulkAction.executeIndexRequestOnReplica(TransportShardBulkAction.java:449)
org.elasticsearch.action.bulk.TransportShardBulkAction.shardOperationOnReplica(TransportShardBulkAction.java:383)
org.elasticsearch.action.bulk.TransportShardBulkAction.shardOperationOnReplica(TransportShardBulkAction.java:69)
org.elasticsearch.action.support.replication.TransportReplicationAction$AsyncReplicaAction.onResponse(TransportReplicationAction.java:522)
org.elasticsearch.action.support.replication.TransportReplicationAction$AsyncReplicaAction.onResponse(TransportReplicationAction.java:491)
org.elasticsearch.index.shard.IndexShardOperationsLock.acquire(IndexShardOperationsLock.java:151)
org.elasticsearch.index.shard.IndexShard.acquireReplicaOperationLock(IndexShard.java:1675)
org.elasticsearch.action.support.replication.TransportReplicationAction$AsyncReplicaAction.doRun(TransportReplicationAction.java:594)
org.elasticsearch.common.util.concurrent.AbstractRunnable.run(AbstractRunnable.java:37)
org.elasticsearch.action.support.replication.TransportReplicationAction$ReplicaOperationTransportHandler.messageReceived(TransportReplicationAction.java:475)
org.elasticsearch.action.support.replication.TransportReplicationAction$ReplicaOperationTransportHandler.messageReceived(TransportReplicationAction.java:464)
org.elasticsearch.transport.RequestHandlerRegistry.processMessageReceived(RequestHandlerRegistry.java:69)
org.elasticsearch.transport.TcpTransport$RequestHandler.doRun(TcpTransport.java:1556)
org.elasticsearch.common.util.concurrent.ThreadContext$ContextPreservingAbstractRunnable.doRun(ThreadContext.java:675)
org.elasticsearch.common.util.concurrent.AbstractRunnable.run(AbstractRunnable.java:37)
java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
java.lang.Thread.run(Thread.java:748)
101.1% (505.4ms out of 500ms) cpu usage by thread 'elasticsearch[xxxxx.xxxx.xxxx][bulk][T#29]'
10/10 snapshots sharing following 41 elements
java.lang.ThreadLocal$ThreadLocalMap.expungeStaleEntry(ThreadLocal.java:617)
java.lang.ThreadLocal$ThreadLocalMap.getEntryAfterMiss(ThreadLocal.java:440)
java.lang.ThreadLocal$ThreadLocalMap.getEntry(ThreadLocal.java:419)
java.lang.ThreadLocal$ThreadLocalMap.access$000(ThreadLocal.java:298)
java.lang.ThreadLocal.get(ThreadLocal.java:163)
org.apache.lucene.util.CloseableThreadLocal.get(CloseableThreadLocal.java:78)
org.elasticsearch.common.lucene.uid.VersionsResolver.getLookupState(VersionsResolver.java:72)
org.elasticsearch.common.lucene.uid.VersionsResolver.loadDocIdAndVersion(VersionsResolver.java:120)
org.elasticsearch.common.lucene.uid.VersionsResolver.loadVersion(VersionsResolver.java:137)
org.elasticsearch.index.engine.InternalEngine.loadCurrentVersionFromIndex(InternalEngine.java:1377)
org.elasticsearch.index.engine.InternalEngine.resolveDocVersion(InternalEngine.java:393)
org.elasticsearch.index.engine.InternalEngine.planIndexingAsPrimary(InternalEngine.java:570)
org.elasticsearch.index.engine.InternalEngine.index(InternalEngine.java:493)
org.elasticsearch.index.shard.IndexShard.index(IndexShard.java:557)
org.elasticsearch.index.shard.IndexShard.index(IndexShard.java:546)
++++++++++++++++++++++++++++++++++++++=
I find one node rejected always great than 2706146
Thanks
It looks like the nodes are very busy processing bulk requests. How many indices/shards do you have in the cluster? How many of these are you actively indexing into? Are these evenly distributed across the nodes in the cluster? What is your use-case?
I would also recommend you look at the following blog posts:
A few days ago, I do a reindex for the cluster, from one to another with _reindex api,
But it reject, I have the same another cluster, that cluster es version is es 5.1.2, but that cluster doesn't * frequently cause the rejected
My all 248 indices have indexing, but the 100indices larger
Apache, Apache Lucene, Apache Hadoop, Hadoop, HDFS and the yellow elephant
logo are trademarks of the
Apache Software Foundation
in the United States and/or other countries.