[2018-11-20T11:25:56,873][WARN ][o.e.x.m.e.l.LocalExporter] unexpected error while indexing monitoring document
org.elasticsearch.xpack.monitoring.exporter.ExportException: UnavailableShardsException[[.monitoring-es-6-2018.11.20][0] primary shard is not active Timeout: [1m], request: [BulkShardRequest [[.monitoring-es-6-2018.11.20][0]] containing [5532] requests]]
at org.elasticsearch.xpack.monitoring.exporter.local.LocalBulk.lambda$throwExportException$2(LocalBulk.java:128) ~[?:?]
at java.util.stream.ReferencePipeline$3$1.accept(ReferencePipeline.java:193) ~[?:1.8.0_162]
at java.util.stream.ReferencePipeline$2$1.accept(ReferencePipeline.java:175) ~[?:1.8.0_162]
at java.util.Spliterators$ArraySpliterator.forEachRemaining(Spliterators.java:948) ~[?:1.8.0_162]
at java.util.stream.AbstractPipeline.copyInto(AbstractPipeline.java:481) ~[?:1.8.0_162]
at java.util.stream.AbstractPipeline.wrapAndCopyInto(AbstractPipeline.java:471) ~[?:1.8.0_162]
at java.util.stream.ForEachOps$ForEachOp.evaluateSequential(ForEachOps.java:151) ~[?:1.8.0_162]
at java.util.stream.ForEachOps$ForEachOp$OfRef.evaluateSequential(ForEachOps.java:174) ~[?:1.8.0_162]
at java.util.stream.AbstractPipeline.evaluate(AbstractPipeline.java:234) ~[?:1.8.0_162]
at java.util.stream.ReferencePipeline.forEach(ReferencePipeline.java:418) ~[?:1.8.0_162]
at org.elasticsearch.xpack.monitoring.exporter.local.LocalBulk.throwExportException(LocalBulk.java:129) ~[?:?]
at org.elasticsearch.xpack.monitoring.exporter.local.LocalBulk.lambda$doFlush$0(LocalBulk.java:111) ~[?:?]
at org.elasticsearch.action.ActionListener$1.onResponse(ActionListener.java:60) ~[elasticsearch-6.3.2.jar:6.3.2]
at org.elasticsearch.action.support.ContextPreservingActionListener.onResponse(ContextPreservingActionListener.java:43) ~[elasticsearch-6.3.2.jar:6.3.2]
at org.elasticsearch.action.support.TransportAction$1.onResponse(TransportAction.java:85) ~[elasticsearch-6.3.2.jar:6.3.2]
at org.elasticsearch.action.support.TransportAction$1.onResponse(TransportAction.java:81) ~[elasticsearch-6.3.2.jar:6.3.2]
at org.elasticsearch.action.support.ContextPreservingActionListener.onResponse(ContextPreservingActionListener.java:43) ~[elasticsearch-6.3.2.jar:6.3.2]
at org.elasticsearch.action.bulk.TransportBulkAction$BulkRequestModifier.lambda$wrapActionListenerIfNeeded$0(TransportBulkAction.java:570) ~[elasticsearch-6.3.2.jar:6.3.2]
at org.elasticsearch.action.ActionListener$1.onResponse(ActionListener.java:60) [elasticsearch-6.3.2.jar:6.3.2]
at org.elasticsearch.action.bulk.TransportBulkAction$BulkOperation$1.finishHim(TransportBulkAction.java:379) [elasticsearch-6.3.2.jar:6.3.2]
at org.elasticsearch.action.bulk.TransportBulkAction$BulkOperation$1.onFailure(TransportBulkAction.java:374) [elasticsearch-6.3.2.jar:6.3.2]
at org.elasticsearch.action.support.TransportAction$1.onFailure(TransportAction.java:91) [elasticsearch-6.3.2.jar:6.3.2]
at org.elasticsearch.action.support.ContextPreservingActionListener.onFailure(ContextPreservingActionListener.java:50) [elasticsearch-6.3.2.jar:6.3.2]
at org.elasticsearch.action.support.replication.TransportReplicationAction$ReroutePhase.finishAsFailed(TransportReplicationAction.java:897) [elasticsearch-6.3.2.jar:6.3.2]
at org.elasticsearch.action.support.replication.TransportReplicationAction$ReroutePhase.retry(TransportReplicationAction.java:869) [elasticsearch-6.3.2.jar:6.3.2]
at org.elasticsearch.action.support.replication.TransportReplicationAction$ReroutePhase.retryBecauseUnavailable(TransportReplicationAction.java:928) [elasticsearch-6.3.2.jar:6.3.2]
at org.elasticsearch.action.support.replication.TransportReplicationAction$ReroutePhase.retryIfUnavailable(TransportReplicationAction.java:774) [elasticsearch-6.3.2.jar:6.3.2]
at org.elasticsearch.action.support.replication.TransportReplicationAction$ReroutePhase.doRun(TransportReplicationAction.java:727) [elasticsearch-6.3.2.jar:6.3.2]
at org.elasticsearch.common.util.concurrent.AbstractRunnable.run(AbstractRunnable.java:37) [elasticsearch-6.3.2.jar:6.3.2]
at org.elasticsearch.action.support.replication.TransportReplicationAction$ReroutePhase$2.onTimeout(TransportReplicationAction.java:888) [elasticsearch-6.3.2.jar:6.3.2]
at org.elasticsearch.cluster.ClusterStateObserver$ContextPreservingListener.onTimeout(ClusterStateObserver.java:317) [elasticsearch-6.3.2.jar:6.3.2]
at org.elasticsearch.cluster.ClusterStateObserver$ObserverClusterStateListener.onTimeout(ClusterStateObserver.java:244) [elasticsearch-6.3.2.jar:6.3.2]
at org.elasticsearch.cluster.service.ClusterApplierService$NotifyTimeout.run(ClusterApplierService.java:576) [elasticsearch-6.3.2.jar:6.3.2]
at org.elasticsearch.common.util.concurrent.ThreadContext$ContextPreservingRunnable.run(ThreadContext.java:626) [elasticsearch-6.3.2.jar:6.3.2]
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) [?:1.8.0_162]
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) [?:1.8.0_162]
at java.lang.Thread.run(Thread.java:748) [?:1.8.0_162]
Caused by: org.elasticsearch.action.UnavailableShardsException: [.monitoring-es-6-2018.11.20][0] primary shard is not active Timeout: [1m], request: [BulkShardRequest [[.monitoring-es-6-2018.11.20][0]] containing [5532] requests]
Are you asking for help here or just posting a contextless log entry?
I want some help,but I posted it out when not complete.
Here is my question:
My ELK was got some problem"
_cluster/health?pretty=true
{
"cluster_name" : "hexin-ELK",
"status" : "red",
"timed_out" : false,
"number_of_nodes" : 1,
"number_of_data_nodes" : 1,
"active_primary_shards" : 2696,
"active_shards" : 2696,
"relocating_shards" : 0,
"initializing_shards" : 0,
"unassigned_shards" : 2738,
"delayed_unassigned_shards" : 0,
"number_of_pending_tasks" : 0,
"number_of_in_flight_fetch" : 0,
"task_max_waiting_in_queue_millis" : 0,
"active_shards_percent_as_number" : 49.613544350386455
}
You have way too many shards for your node. Can you delete any indices?
Yes, I had deleted 2000+ more indices yesterday.
cat 2 | while read line; do curl --user elastic:hexinpass -XDELETE http://10.0.255.15:9200/$line; done &
You still have too many then, that's causing a heap of problems for you.
[root@hexinpass-log tmp]# curl --user elastic:hexinpass -X GET 'http://10.0.255.15:9200/_cluster/health?pretty=true'
{
"cluster_name" : "hexin-ELK",
"status" : "red",
"timed_out" : false,
"number_of_nodes" : 1,
"number_of_data_nodes" : 1,
"active_primary_shards" : 11,
"active_shards" : 11,
"relocating_shards" : 0,
"initializing_shards" : 0,
"unassigned_shards" : 4,
"delayed_unassigned_shards" : 0,
"number_of_pending_tasks" : 0,
"number_of_in_flight_fetch" : 0,
"task_max_waiting_in_queue_millis" : 0,
"active_shards_percent_as_number" : 73.33333333333333
}
I have dlete all indices,but the problem still there...
Caused by: org.elasticsearch.action.UnavailableShardsException: [.monitoring-kibana-6-2018.11.20][0] primary shard is not active Timeout: [1m], request: [BulkShardRequest [[.monitoring-kibana-6-2018.11.20][0]] containing [index {[.monitoring-kibana-6-2018.11.20][doc][R9rLL2cBAMAmvv-Q6Uu3], source[{"cluster_uuid":"14ZwzZY-THCMgwlQwE8oPA","timestamp":"2018-11-20T06:25:52.051Z","interval_ms":10000,"type":"kibana_stats","source_node":{"uuid":"ga2U78v8T1GgVvpQ5avV9A","host":"10.0.255.15","transport_address":"10.0.255.15:9300","ip":"10.0.255.15","name":"node-1","timestamp":"2018-11-20T06:25:52.051Z"},"kibana_stats":{"concurrent_connections":4,"os":{"load":{"1m":0.00341796875,"5m":0.0302734375,"15m":0.07568359375},"memory":{"total_in_bytes":33516027904,"free_in_bytes":12923293696,"used_in_bytes":20592734208},"uptime_in_millis":85978000},"process":{"event_loop_delay":0.7524039894342422,"memory":{"heap":{"total_in_bytes":133857280,"used_in_bytes":117613072,"size_limit":1501560832},"resident_set_size_in_bytes":198152192},"uptime_in_millis":1432559},"requests":{"disconnects":0,"total":0,"status_codes":{}},"response_times":{"average":0,"max":0},"timestamp":"2018-11-20T06:25:48.831Z","kibana":{"uuid":"efaf3ff1-342e-4461-8a34-2e5b2c75eeee","name":"hexinpass-log","index":".kibana","host":"hexinpass-log","transport_address":"10.0.255.15:5601","version":"6.3.2","snapshot":false,"status":"green"},"usage":{"index":".kibana","dashboard":{"total":2},"visualization":{"total":10},"search":{"total":0},"index_pattern":{"total":16},"graph_workspace":{"total":0},"timelion_sheet":{"total":0},"xpack":{"reporting":{"available":true,"enabled":true,"browser_type":"phantom","_all":6,"csv":{"available":true,"total":3},"printable_pdf":{"available":true,"total":3,"app":{"visualization":3,"dashboard":0},"layout":{"print":0,"preserve_layout":3}},"status":{"completed":4,"failed":2},"lastDay":{"_all":0,"csv":{"available":true,"total":0},"printable_pdf":{"available":true,"total":0,"app":{"visualization":0,"dashboard":0},"layout":{"print":0,"preserve_layout":0}},"status":{}},"last7Days":{"_all":0,"csv":{"available":true,"total":0},"printable_pdf":{"available":true,"total":0,"app":{"visualization":0,"dashboard":0},"layout":{"print":0,"preserve_layout":0}},"status":{}}}}}}}]}]]
... 12 more
[2018-11-20T14:26:52,141][ERROR][o.e.x.w.e.ExecutionService] [node-1] failed to update watch record [14ZwzZY-THCMgwlQwE8oPA_logstash_version_mismatch_a7e52f3e-fc03-40bb-8542-383bed747e31-2018-11-20T06:26:22.054Z]
org.elasticsearch.ElasticsearchTimeoutException: java.util.concurrent.TimeoutException: Timeout waiting for task.
at org.elasticsearch.common.util.concurrent.FutureUtils.get(FutureUtils.java:72) ~[elasticsearch-6.3.2.jar:6.3.2]
at org.elasticsearch.action.support.AdapterActionFuture.actionGet(AdapterActionFuture.java:54) ~[elasticsearch-6.3.2.jar:6.3.2]
at org.elasticsearch.xpack.watcher.history.HistoryStore.put(HistoryStore.java:85) ~[x-pack-watcher-6.3.2.jar:6.3.2]
at org.elasticsearch.xpack.watcher.execution.ExecutionService.execute(ExecutionService.java:325) ~[x-pack-watcher-6.3.2.jar:6.3.2]
at org.elasticsearch.xpack.watcher.execution.ExecutionService.lambda$executeAsync$6(ExecutionService.java:409) ~[x-pack-watcher-6.3.2.jar:6.3.2]
at org.elasticsearch.xpack.watcher.execution.ExecutionService$WatchExecutionTask.run(ExecutionService.java:563) [x-pack-watcher-6.3.2.jar:6.3.2]
at org.elasticsearch.common.util.concurrent.ThreadContext$ContextPreservingRunnable.run(ThreadContext.java:626) [elasticsearch-6.3.2.jar:6.3.2]
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) [?:1.8.0_162]
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) [?:1.8.0_162]
at java.lang.Thread.run(Thread.java:748) [?:1.8.0_162]
Caused by: java.util.concurrent.TimeoutException: Timeout waiting for task.
at org.elasticsearch.common.util.concurrent.BaseFuture$Sync.get(BaseFuture.java:235) ~[elasticsearch-6.3.2.jar:6.3.2]
at org.elasticsearch.common.util.concurrent.BaseFuture.get(BaseFuture.java:69) ~[elasticsearch-6.3.2.jar:6.3.2]
at org.elasticsearch.common.util.concurrent.FutureUtils.get(FutureUtils.java:70) ~[elasticsearch-6.3.2.jar:6.3.2]
... 9 more
This topic was automatically closed 28 days after the last reply. New replies are no longer allowed.