Receiving following error after upgrading to spark 3.0.0 with any of the version of elasticsearch-hadoop jar
<dependency>
<groupId>org.elasticsearch</groupId>
<artifactId>elasticsearch-hadoop</artifactId>
<version>${elastic.version}</version>
<scope>compile</scope>
</dependency>
tried elastic.version with 7.1.1 , 7.4.0,7.5.0 , 7.10.0
2020-11-20 14:54:25 INFO DAGScheduler:57 - ResultStage 89 (runJob at EsSpark.scala:108) failed in 0.102 s due to Job aborted due to stage failure: Task 1 in stage 89.0 failed 4 times, most recent failure: Lost task 1.3 in stage 89.0 (TID 517, 10.4.6.202, executor 2): java.lang.NoClassDefFoundError: Could not initialize class org.elasticsearch.spark.serialization.ScalaMetadataExtractor$
at org.elasticsearch.spark.serialization.ScalaMetadataExtractor.getValue(ScalaMetadataExtractor.scala:32)
at org.elasticsearch.hadoop.serialization.bulk.PerEntityPoolingMetadataExtractor.get(PerEntityPoolingMetadataExtractor.java:97)
at org.elasticsearch.hadoop.serialization.bulk.AbstractBulkFactory.getMetadataExtractorOrFallback(AbstractBulkFactory.java:493)
at org.elasticsearch.hadoop.serialization.bulk.AbstractBulkFactory.writeObjectHeader(AbstractBulkFactory.java:404)
at org.elasticsearch.hadoop.serialization.bulk.AbstractBulkFactory$DynamicHeaderRef.getDynamicContent(AbstractBulkFactory.java:177)
at org.elasticsearch.hadoop.serialization.bulk.TemplatedBulk.writeTemplate(TemplatedBulk.java:84)
at org.elasticsearch.hadoop.serialization.bulk.TemplatedBulk.write(TemplatedBulk.java:56)
at org.elasticsearch.hadoop.serialization.bulk.BulkEntryWriter.writeBulkEntry(BulkEntryWriter.java:68)
at org.elasticsearch.hadoop.rest.RestRepository.writeToIndex(RestRepository.java:170)
at org.elasticsearch.spark.rdd.EsRDDWriter.write(EsRDDWriter.scala:74)
at org.elasticsearch.spark.rdd.EsSpark$$anonfun$doSaveToEs$1.apply(EsSpark.scala:108)
at org.elasticsearch.spark.rdd.EsSpark$$anonfun$doSaveToEs$1.apply(EsSpark.scala:108)
at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
at org.apache.spark.scheduler.Task.run(Task.scala:127)
at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:444)
at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1377)
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:447)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
at java.lang.Thread.run(Thread.java:748)