LOG:
[2015-11-12 21:28:00,463][DEBUG][action.search.type ] [crawler_service_001] [carrier-2015.11.12][3], node[Moxif4lVSmGvGC4pAXgC3Q], [P], s[STARTED]: Failed to execute [org.elasticsearch.action.search.SearchRequest@353c87bf] lastShard [true]
org.elasticsearch.common.util.concurrent.EsRejectedExecutionException: rejected execution (queue capacity 1000) on org.elasticsearch.search.action.SearchServiceTransportAction$23@5607fe71
at org.elasticsearch.common.util.concurrent.EsAbortPolicy.rejectedExecution(EsAbortPolicy.java:62)
at java.util.concurrent.ThreadPoolExecutor.reject(ThreadPoolExecutor.java:823)
at java.util.concurrent.ThreadPoolExecutor.execute(ThreadPoolExecutor.java:1369)
at org.elasticsearch.common.util.concurrent.EsThreadPoolExecutor.execute(EsThreadPoolExecutor.java:79)
at org.elasticsearch.search.action.SearchServiceTransportAction.execute(SearchServiceTransportAction.java:551)
at org.elasticsearch.search.action.SearchServiceTransportAction.sendExecuteQuery(SearchServiceTransportAction.java:228)
at org.elasticsearch.action.search.type.TransportSearchQueryThenFetchAction$AsyncAction.sendExecuteFirstPhase(TransportSearchQueryThenFetchAction.java:83)
at org.elasticsearch.action.search.type.TransportSearchTypeAction$BaseAsyncAction.performFirstPhase(TransportSearchTypeAction.java:175)
at org.elasticsearch.action.search.type.TransportSearchTypeAction$BaseAsyncAction.start(TransportSearchTypeAction.java:157)
at org.elasticsearch.action.search.type.TransportSearchQueryThenFetchAction.doExecute(TransportSearchQueryThenFetchAction.java:62)
at org.elasticsearch.action.search.type.TransportSearchQueryThenFetchAction.doExecute(TransportSearchQueryThenFetchAction.java:52)
at org.elasticsearch.action.support.TransportAction.execute(TransportAction.java:75)
at org.elasticsearch.action.search.TransportSearchAction.doExecute(TransportSearchAction.java:100)
at org.elasticsearch.action.search.TransportSearchAction.doExecute(TransportSearchAction.java:43)
at org.elasticsearch.action.support.TransportAction.execute(TransportAction.java:75)
at org.elasticsearch.action.support.TransportAction.execute(TransportAction.java:55)
at org.elasticsearch.client.node.NodeClient.execute(NodeClient.java:90)
at org.elasticsearch.client.support.AbstractClient.search(AbstractClient.java:333)
at org.elasticsearch.watcher.support.init.proxy.ClientProxy.search(ClientProxy.java:120)
at org.elasticsearch.watcher.input.search.ExecutableSearchInput.doExecute(ExecutableSearchInput.java:81)
at org.elasticsearch.watcher.input.search.ExecutableSearchInput.execute(ExecutableSearchInput.java:68)
at org.elasticsearch.watcher.input.search.ExecutableSearchInput.execute(ExecutableSearchInput.java:46)
at org.elasticsearch.watcher.execution.ExecutionService.executeInner(ExecutionService.java:347)
at org.elasticsearch.watcher.execution.ExecutionService.execute(ExecutionService.java:271)
at org.elasticsearch.watcher.execution.ExecutionService$WatchExecutionTask.run(ExecutionService.java:417)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
at java.lang.Thread.run(Thread.java:745)
I use logstash to reindex my older index in another cluster. From that log, it seems the queue is full of reindex request and it can't handle normal request. So I will suffer some loss of a data?
PS: that is my reindex configuration, is there any possible improvement for that configuration?
input {
elasticsearch {
host => "x.x.x.x"
index => "carrier-2015.11.11"
size => 500
scroll => "5m"
docinfo => true
}
}
output {
elasticsearch {
host => "x.x.x.y"
index => "%{[@metadata][_index]}"
document_type => "%{[@metadata][_type]}"
document_id => "%{[@metadata][_id]}"
cluster => "elasticsearch_dc_001"
codec => "json"
protocol => "http"
template => "/usr/local/logstash-1.4.2/conf/carrier_template.json"
template_name => "carrier_template"
manage_template => true
template_overwrite => true
}
}