Elasticsearch inserting issue

Hi,

My application is reading data from a sqs and writtes the data in
elasticsearch(alias ES) and mysql. We are writting in both for back-up(for
the moment). This is done with 2 consumers with 5 workers. One worker is
writing in mysql and ES. The app is written in java and supports inserting,
deleting and updating documents(one by one and not bulk). The ES insertings
and updatings are done with upsert(@update : a document may be updated if
exists or inserted if it doesn't; @insert : insert the document or if the
update is first done, we shouldn't do anything).
The ES is installed on a different server(ubuntu; 8gb Ram; 4 cpu) than
mysql.
ES configuration :
/etc/elasticsearch/elasticsearch.yml
bootstrap.mlockall: true
ES_MIN_MEM: 4g
ES_MAX_MEM: 4g
http.max_initial_line_length : 48k
/etc/default/elasticsearch
ES_HEAP_SIZE=4g
MAX_LOCKED_MEMORY=unlimited
When I run in console

  • ulimit the answer is unlimited
  • ulimit -n the answer is 1024

The code for upsert :

client.prepareUpdate(indexListener, tableListener, suId)
            .setScript("ctx._source." + columnName + "=\"" + 

columnValue + """)

.setConsistencyLevel(WriteConsistencyLevel.DEFAULT).setRetryOnConflict(10)
.setUpsert(map)
.execute().actionGet();

Node configuration :

localhost:9200/_nodes?pretty

{
"cluster_name" : "test",
"nodes" : {
"Aa-bbb" : {
"settings" : {
"node" : {
"name" : "testing"
},
"bootstrap" : {
"mlockall" : "true"
},
"http" : {
"max_initial_line_length" : "48k"
},
"ES_MIN_MEM" : "4g",
"name" : "testing",
"pidfile" : "/var/run/elasticsearch.pid",
"path" : {
"data" : "/var/lib/elasticsearch",
"work" : "/tmp/elasticsearch",
"home" : "/usr/share/elasticsearch",
"conf" : "/etc/elasticsearch",
"logs" : "/var/log/elasticsearch"
},
"cluster" : {
"name" : "test"
},
"ES_MAX_MEM" : "4g",
"config" : "/etc/elasticsearch/elasticsearch.yml"
},
"os" : {
"refresh_interval" : 1000,
"available_processors" : 4,
"cpu" : {
"vendor" : "Intel",
"model" : "Xeon",
"mhz" : 2800,
"total_cores" : 4,
"total_sockets" : 4,
"cores_per_socket" : 32,
"cache_size_in_bytes" : 25600
},
"mem" : {
"total_in_bytes" : 7812386816
},
"swap" : {
"total_in_bytes" : 0
}
},
"process" : {
"refresh_interval" : 1000,
"id" : 1111,
"max_file_descriptors" : 65535,
"mlockall" : true
},
"jvm" : {
"pid" : 1111,
"version" : "1.7.0_55",
"vm_name" : "OpenJDK 64-Bit Server VM",
"vm_version" : "vers",
"vm_vendor" : "Oracle Corporation",
"start_time" : 1402687303832,
"mem" : {
"heap_init_in_bytes" : 4294967296,
"heap_max_in_bytes" : 4260102144,
"non_heap_init_in_bytes" : 24313856,
"non_heap_max_in_bytes" : 224395264,
"direct_max_in_bytes" : 4260102144
},
"gc_collectors" : [ "ParNew", "ConcurrentMarkSweep" ],
"memory_pools" : [ "Code Cache", "Par Eden Space", "Par Survivor
Space", "CMS Old Gen", "CMS Perm Gen" ]
},
"thread_pool" : {
"generic" : {
"type" : "cached",
"keep_alive" : "30s"
},
"index" : {
"type" : "fixed",
"min" : 4,
"max" : 4,
"queue_size" : "200"
},
"get" : {
"type" : "fixed",
"min" : 4,
"max" : 4,
"queue_size" : "1k"
},
"snapshot" : {
"type" : "scaling",
"min" : 1,
"max" : 2,
"keep_alive" : "5m"
},
"merge" : {
"type" : "scaling",
"min" : 1,
"max" : 2,
"keep_alive" : "5m"
},
"suggest" : {
"type" : "fixed",
"min" : 4,
"max" : 4,
"queue_size" : "1k"
},
"bulk" : {
"type" : "fixed",
"min" : 4,
"max" : 4,
"queue_size" : "50"
},
"optimize" : {
"type" : "fixed",
"min" : 1,
"max" : 1
},
"warmer" : {
"type" : "scaling",
"min" : 1,
"max" : 2,
"keep_alive" : "5m"
},
"flush" : {
"type" : "scaling",
"min" : 1,
"max" : 2,
"keep_alive" : "5m"
},
"search" : {
"type" : "fixed",
"min" : 12,
"max" : 12,
"queue_size" : "1k"
},
"percolate" : {
"type" : "fixed",
"min" : 4,
"max" : 4,
"queue_size" : "1k"
},
"management" : {
"type" : "scaling",
"min" : 1,
"max" : 5,
"keep_alive" : "5m"
},
"refresh" : {
"type" : "scaling",
"min" : 1,
"max" : 2,
"keep_alive" : "5m"
}
},
"network" : {
"refresh_interval" : 5000,

  }

}
}

 The problem is that we have a lot of messages in sqs that are not 

consumed(~20000). If I'm going to stop the Es, the documents are read ok.
Is there any configuration that I've missed it for ES?

--
You received this message because you are subscribed to the Google Groups "elasticsearch" group.
To unsubscribe from this group and stop receiving emails from it, send an email to elasticsearch+unsubscribe@googlegroups.com.
To view this discussion on the web visit https://groups.google.com/d/msgid/elasticsearch/875319d3-861a-4799-8527-fee2ca5d9b6c%40googlegroups.com.
For more options, visit https://groups.google.com/d/optout.