Data Loss in elasticsearch in high load

Why not simply GET service_transaction/_count? Since all documents have a different ID, that should do the work, shouldn't it?

Not exactly.

If you see my code.

The top most for loop is generating transaction ids.

for this transaction ids i will have say 5 document. [Inner for loop] . This count will varies. Sometime 5, sometime 500. For simple example i made static values. But My JMX program is like that.

so the transaction id is : 10000
but document inserted are 50000

i need to know how many unique transaction id generated from GET.

It would be easier to read it if you formatted your code properly :slight_smile: You can use the "preformatted text" function of the editor.

Sorry !! Will try here.

import java.io.IOException;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import java.util.UUID;
import java.util.logging.Level;
import java.util.logging.Logger;
import org.apache.http.HttpHost;
import org.elasticsearch.action.bulk.BulkRequest;
import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.client.RestClient;
import org.elasticsearch.client.RestHighLevelClient;

/**

  • @author Param
    */
    public class SampleLoadProgram {

    private static Logger LOGGER = Logger.getLogger("SampleLoadProgram");
    private static final String ELASTIC_HOST = "127.0.0.1:9200:http";

    public static void main(String[] a) throws IOException {
    BulkRequest requestBulk = new BulkRequest();
    RestHighLevelClient client = null;
    Set instanceid = new HashSet<>();

     HttpHost[] hosts = null;
    
     String[] hostArr = ELASTIC_HOST.split(",");
     hosts = new HttpHost[hostArr.length];
     int index = 0;
    
     //Praparing HTTPHost array
     for (String hostString : hostArr) {
         String[] hostSArr = hostString.split("\\:");
         Integer port = Integer.parseInt(hostSArr[1]);
         HttpHost host = new HttpHost(hostSArr[0], port, hostSArr[2]);
         hosts[index++] = host;
     }
    
     //Initializing client
     client = new RestHighLevelClient(RestClient.builder(hosts));
    
     // change max length to increasae load 
     for (int i = 0; i < 10000; i++) {
         String iidstr = UUID.randomUUID().toString();
         instanceid.add(iidstr); // Maintain unique instance id
         for (int j = 0; j < 5; j++) {
             Map<String, Object> data = new HashMap<>();
    
             data.put("trans_id", iidstr);
             data.put("j_id", UUID.randomUUID().toString());
             
             IndexRequest indexRequest = new IndexRequest("service_transaction", "service_transaction_type")
                     .source(data);
    
             requestBulk.add(indexRequest);
    
         }
    
         if (i % 10000 == 0) { //Push the batch and create new 
             client.bulk(requestBulk);
             requestBulk = new BulkRequest();
         }
    
     }
     client.bulk(requestBulk);
     client.close();
    
     LOGGER.log(Level.INFO, "Total expected unique instance count : {0}", instanceid.size());
    

    }
    }

This topic was automatically closed 28 days after the last reply. New replies are no longer allowed.