Hi ,
I have on Elasticsearch server with one node with six indexes . I indexing some documents in one of my index name as A , some of my documents are ingested successfully and I can able to see my documents but after some couple of hours indexing gets stops then I check elasticsearch service it is running then I check elasticlogs I get following error
failed to run scheduled task [org.elasticsearch.indices.IndexingMemoryController$ShardsIndicesStatusChecker@56e6384c] on thread pool [same]
org.apache.lucene.store.AlreadyClosedException: this IndexWriter is closed
at org.apache.lucene.index.IndexWriter.ensureOpen(IndexWriter.java:877) ~[lucene-core-8.11.1.jar:8.11.1 0b002b11819df70783e83ef36b42ed1223c14b50 - janhoy - 2021-12-14 13:46:43]
at org.apache.lucene.index.IndexWriter.ensureOpen(IndexWriter.java:891) ~[lucene-core-8.11.1.jar:8.11.1 0b002b11819df70783e83ef36b42ed1223c14b50 - janhoy - 2021-12-14 13:46:43]
at org.apache.lucene.index.IndexWriter.getFlushingBytes(IndexWriter.java:781) ~[lucene-core-8.11.1.jar:8.11.1 0b002b11819df70783e83ef36b42ed1223c14b50 - janhoy - 2021-12-14 13:46:43]
at org.elasticsearch.index.engine.InternalEngine.getWritingBytes(InternalEngine.java:649) ~[elasticsearch-7.17.9.jar:7.17.9]
at org.elasticsearch.index.shard.IndexShard.getWritingBytes(IndexShard.java:1296) ~[elasticsearch-7.17.9.jar:7.17.9]
at org.elasticsearch.indices.IndexingMemoryController.getShardWritingBytes(IndexingMemoryController.java:184) ~[elasticsearch-7.17.9.jar:7.17.9]
at org.elasticsearch.indices.IndexingMemoryController$ShardsIndicesStatusChecker.runUnlocked(IndexingMemoryController.java:312) ~[elasticsearch-7.17.9.jar:7.17.9]
at org.elasticsearch.indices.IndexingMemoryController$ShardsIndicesStatusChecker.run(IndexingMemoryController.java:292) ~[elasticsearch-7.17.9.jar:7.17.9]
at org.elasticsearch.threadpool.Scheduler$ReschedulingRunnable.doRun(Scheduler.java:214) [elasticsearch-7.17.9.jar:7.17.9]
at org.elasticsearch.common.util.concurrent.ThreadContext$ContextPreservingAbstractRunnable.doRun(ThreadContext.java:777) [elasticsearch-7.17.9.jar:7.17.9]
at org.elasticsearch.common.util.concurrent.AbstractRunnable.run(AbstractRunnable.java:26) [elasticsearch-7.17.9.jar:7.17.9]
at org.elasticsearch.threadpool.ThreadPool$1.run(ThreadPool.java:444) [elasticsearch-7.17.9.jar:7.17.9]
at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:515) [?:?]
at java.util.concurrent.FutureTask.run(FutureTask.java:264) [?:?]
at java.util.concurrent.ScheduledThreadPoolExecutor$ScheduledFutureTask.run(ScheduledThreadPoolExecutor.java:304) [?:?]
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1128) [?:?]
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:628) [?:?]
at java.lang.Thread.run(Thread.java:830) [?:?]
Caused by: java.lang.ArrayIndexOutOfBoundsException: Index -65536 out of bounds for length 66192
at org.apache.lucene.index.TermsHashPerField.writeByte(TermsHashPerField.java:207) ~[lucene-core-8.11.1.jar:8.11.1 0b002b11819df70783e83ef36b42ed1223c14b50 - janhoy - 2021-12-14 13:46:43]
at org.apache.lucene.index.TermsHashPerField.writeVInt(TermsHashPerField.java:230) ~[lucene-core-8.11.1.jar:8.11.1 0b002b11819df70783e83ef36b42ed1223c14b50 - janhoy - 2021-12-14 13:46:43]
at org.apache.lucene.index.FreqProxTermsWriterPerField.writeProx(FreqProxTermsWriterPerField.java:75) ~[lucene-core-8.11.1.jar:8.11.1 0b002b11819df70783e83ef36b42ed1223c14b50 - janhoy - 2021-12-14 13:46:43]
at org.apache.lucene.index.FreqProxTermsWriterPerField.newTerm(FreqProxTermsWriterPerField.java:116) ~[lucene-core-8.11.1.jar:8.11.1 0b002b11819df70783e83ef36b42ed1223c14b50 - janhoy - 2021-12-14 13:46:43]
at org.apache.lucene.index.TermsHashPerField.initStreamSlices(TermsHashPerField.java:165) ~[lucene-core-8.11.1.jar:8.11.1 0b002b11819df70783e83ef36b42ed1223c14b50 - janhoy - 2021-12-14 13:46:43]
at org.apache.lucene.index.TermsHashPerField.add(TermsHashPerField.java:186) ~[lucene-core-8.11.1.jar:8.11.1 0b002b11819df70783e83ef36b42ed1223c14b50 - janhoy - 2021-12-14 13:46:43]
at org.apache.lucene.index.DefaultIndexingChain$PerField.invert(DefaultIndexingChain.java:974) ~[lucene-core-8.11.1.jar:8.11.1 0b002b11819df70783e83ef36b42ed1223c14b50 - janhoy - 2021-12-14 13:46:43]
at org.apache.lucene.index.DefaultIndexingChain.processField(DefaultIndexingChain.java:527) ~[lucene-core-8.11.1.jar:8.11.1 0b002b11819df70783e83ef36b42ed1223c14b50 - janhoy - 2021-12-14 13:46:43]
at org.apache.lucene.index.DefaultIndexingChain.processDocument(DefaultIndexingChain.java:491) ~[lucene-core-8.11.1.jar:8.11.1 0b002b11819df70783e83ef36b42ed1223c14b50 - janhoy - 2021-12-14 13:46:43]
at org.apache.lucene.index.DocumentsWriterPerThread.updateDocuments(DocumentsWriterPerThread.java:208) ~[lucene-core-8.11.1.jar:8.11.1 0b002b11819df70783e83ef36b42ed1223c14b50 - janhoy - 2021-12-14 13:46:43]
at org.apache.lucene.index.DocumentsWriter.updateDocuments(DocumentsWriter.java:415) ~[lucene-core-8.11.1.jar:8.11.1 0b002b11819df70783e83ef36b42ed1223c14b50 - janhoy - 2021-12-14 13:46:43]
at org.apache.lucene.index.IndexWriter.updateDocuments(IndexWriter.java:1471) ~[lucene-core-8.11.1.jar:8.11.1 0b002b11819df70783e83ef36b42ed1223c14b50 - janhoy - 2021-12-14 13:46:43]
at org.apache.lucene.index.IndexWriter.addDocuments(IndexWriter.java:1444) ~[lucene-core-8.11.1.jar:8.11.1 0b002b11819df70783e83ef36b42ed1223c14b50 - janhoy - 2021-12-14 13:46:43]
at org.elasticsearch.index.engine.InternalEngine.addDocs(InternalEngine.java:1310) ~[elasticsearch-7.17.9.jar:7.17.9]
at org.elasticsearch.index.engine.InternalEngine.indexIntoLucene(InternalEngine.java:1248) ~[elasticsearch-7.17.9.jar:7.17.9]
at org.elasticsearch.index.engine.InternalEngine.index(InternalEngine.java:1051) ~[elasticsearch-7.17.9.jar:7.17.9]
at org.elasticsearch.index.shard.IndexShard.index(IndexShard.java:1066) ~[elasticsearch-7.17.9.jar:7.17.9]
at org.elasticsearch.index.shard.IndexShard.applyIndexOperation(IndexShard.java:998) ~[elasticsearch-7.17.9.jar:7.17.9]
at org.elasticsearch.index.shard.IndexShard.applyIndexOperationOnPrimary(IndexShard.java:900) ~[elasticsearch-7.17.9.jar:7.17.9]
at org.elasticsearch.action.bulk.TransportShardBulkAction.executeBulkItemRequest(TransportShardBulkAction.java:320) ~[elasticsearch-7.17.9.jar:7.17.9]
at org.elasticsearch.action.bulk.TransportShardBulkAction$2.doRun(TransportShardBulkAction.java:181) ~[elasticsearch-7.17.9.jar:7.17.9]
at org.elasticsearch.common.util.concurrent.AbstractRunnable.run(AbstractRunnable.java:26) ~[elasticsearch-7.17.9.jar:7.17.9]
at org.elasticsearch.action.bulk.TransportShardBulkAction.performOnPrimary(TransportShardBulkAction.java:245) ~[elasticsearch-7.17.9.jar:7.17.9]
at org.elasticsearch.action.bulk.TransportShardBulkAction.dispatchedShardOperationOnPrimary(TransportShardBulkAction.java:134) ~[elasticsearch-7.17.9.jar:7.17.9]
at org.elasticsearch.action.bulk.TransportShardBulkAction.dispatchedShardOperationOnPrimary(TransportShardBulkAction.java:74) ~[elasticsearch-7.17.9.jar:7.17.9]
at org.elasticsearch.action.support.replication.TransportWriteAction$1.doRun(TransportWriteAction.java:196) ~[elasticsearch-7.17.9.jar:7.17.9]
at org.elasticsearch.common.util.concurrent.ThreadContext$ContextPreservingAbstractRunnable.doRun(ThreadContext.java:777) ~[elasticsearch-7.17.9.jar:7.17.9]
at org.elasticsearch.common.util.concurrent.AbstractRunnable.run(AbstractRunnable.java:26) ~[elasticsearch-7.17.9.jar:7.17.9]
and I then started searching for this problem and I got solution we need to use elasticsearch-shard tool to recover indexes I go through the documentation and I got solution I run following command and it solve my issue
elasticsearch-shard remove-corrupted-data --index mails --shard-id 0 --truncate-clean-translog -v
but after some hours the same issue occurs I run this command 4-5 times in a day and it is totally time consuming I want to know cause of this issue so I can fixed this permanently please help me in this to know the cause