PowerEdge R420
Physical Memory: 24GB
2 X Intel(R) Xeon(R) CPU E5-2420 0 @ 1.90GHz (Total 24 cores with
Multi-threading)
Centos _6.4_x86_64
Logstash error:
Exception in thread "LogStash::Runner" org.jruby.exceptions.RaiseException:
(TimeoutError) watchdog timeout
seems to be caused by:
ElasticSearch Error:
[2013-08-29 00:04:07,236][WARN
][netty.channel.socket.nio.AbstractNioSelector] Unexpected exception in the
selector loop.
java.lang.OutOfMemoryError: Java heap space
at
org.elasticsearch.common.netty.buffer.HeapChannelBuffer.(HeapChannelBuffer.java:42)
at
org.elasticsearch.common.netty.buffer.BigEndianHeapChannelBuffer.(BigEndianHeapChannelBuffer.java:34)
at
org.elasticsearch.common.netty.buffer.ChannelBuffers.buffer(ChannelBuffers.java:134)
at
org.elasticsearch.common.netty.buffer.HeapChannelBufferFactory.getBuffer(HeapChannelBufferFactory.java:68)
at
org.elasticsearch.common.netty.buffer.AbstractChannelBufferFactory.getBuffer(AbstractChannelBufferFactory.java:48)
at
org.elasticsearch.common.netty.channel.socket.nio.NioWorker.read(NioWorker.java:80)
at
org.elasticsearch.common.netty.channel.socket.nio.AbstractNioWorker.process(AbstractNioWorker.java:107)
at
org.elasticsearch.common.netty.channel.socket.nio.AbstractNioSelector.run(AbstractNioSelector.java:312)
at
org.elasticsearch.common.netty.channel.socket.nio.AbstractNioWorker.run(AbstractNioWorker.java:88)
at
org.elasticsearch.common.netty.channel.socket.nio.NioWorker.run(NioWorker.java:178)
at
org.elasticsearch.common.netty.util.ThreadRenamingRunnable.run(ThreadRenamingRunnable.java:108)
at
org.elasticsearch.common.netty.util.internal.DeadLockProofWorker$1.run(DeadLockProofWorker.java:42)
at
java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
at
java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
at java.lang.Thread.run(Thread.java:724)
[2013-08-29 00:08:33,743][WARN ][index.engine.robin ]
[elastic-master] [application-2013.08.29][0] failed engine
java.lang.OutOfMemoryError: Java heap space
[2013-08-29 00:08:33,743][WARN ][transport.netty ]
[elastic-master] exception caught on transport layer [[id: 0x7e825467,
/192.168.x.x:33673 => /192.168.x.x:9300]], closing connection
Logstash runs with:
-Xmx8192M -Xms2048M -XX:+UseConcMarkSweepGC
ElasticSearch runs with:
-Xms4g -Xmx8g -Xss512k -Djava.awt.headless=true -XX:+UseParNewGC
-XX:+UseConcMarkSweepGC -XX:CMSInitiatingOccupancyFraction=75
-XX:+UseCMSInitiatingOccupancyOnly -XX:+UseCondCardMark
-XX:+HeapDumpOnOutOfMemoryError -Delasticsearch
-Des.path.home=/opt/elasticsearch-0.20.6 -cp
:/opt/elasticsearch-0.20.6/lib/elasticsearch-0.20.6.jar:/opt/elasticsearch-0.20.6/lib/:/opt/elasticsearch-0.20.6/lib/sigar/
org.elasticsearch.bootstrap.ElasticSearch
Logstash config:
input {
gelf {
type => application
port => 12201
}
gelf {
type => access_log
port => 12202
format => 'plain'
}
gelf {
type => trackinstall
port => 12203
}
}
filter {
grok {
type => "access_log"
patterns_dir => "./patterns"
pattern => "%{IP:client} \- \- \"%{WORD:method}
%{URIPATH:uri_path}%{URIPARAM:params} %{DATA:protocol}" %{NUMBER:code}
%{NUMBER:bytes}"
pattern => "%{IP:client} - - "%{WORD:method}
%{URIPATH:uri_path}%{URIPARAM:params} %{DATA:protocol}" %{NUMBER:code} -"
pattern => "%{IP:client} - - "%{WORD:method} %{URIPATH:uri_path}
%{DATA:protocol}" %{NUMBER:code} %{NUMBER:bytes}"
}
grok {
type => "trackinstall"
patterns_dir => "./patterns"
pattern => "%{IP:client} \- \- \"%{WORD:method}
%{URIPATH:uri_path}%{URIPARAM:params} %{DATA:protocol}" %{NUMBER:code}
%{NUMBER:bytes}"
pattern => "%{IP:client} - - "%{WORD:method}
%{URIPATH:uri_path}%{URIPARAM:params} %{DATA:protocol}" %{NUMBER:code} -"
pattern => "%{IP:client} - - "%{WORD:method} %{URIPATH:uri_path}
%{DATA:protocol}" %{NUMBER:code} %{NUMBER:bytes}"
}
kv {
type => "access_log"
fields => ["params"]
field_split=> "&? "
}
kv {
type => "trackinstall"
fields => ["params"]
field_split=> "&? "
}
urldecode {
type => "access_log"
all_fields => true
}
urldecode {
type => "trackinstall"
all_fields => true
}
}
output {
elasticsearch {
host => "192.168.x.x"
bind_host => "192.168.x.x"
cluster => "startapp"
node_name => "logstash_node_ls1"
max_inflight_requests => 250000
type => "application"
index => "application-%{+YYYY.MM.dd}"
}
elasticsearch {
host => "192.168.x.x"
bind_host => "192.168.x.x"
cluster => "startapp"
node_name => "logstash_node_ls1"
max_inflight_requests => 250000
type => "access_log"
index => "access-%{+YYYY.MM.dd}"
}
elasticsearch {
host => "192.168.x.x"
bind_host => "192.168.x.x"
cluster => "startapp"
node_name => "logstash_node_ls1"
max_inflight_requests => 250000
type => "trackinstall"
index => "trackinstall-%{+YYYY.MM.dd}"
}
}
ElasticSearch config:
*
*
cluster.name: somename
node.name: "elastic-master"
index.number_of_shards: 1
index.number_of_replicas: 0
bootstrap.mlockall: true
network.host: 192.168.x.x
gateway.expected_nodes: 1
indices.memory.index_buffer_size: 25%
index.refresh_interval: 30
index.translog.flush_threshold_ops: 5000
index.store.compress.stored: true
threadpool.search.type: fixed
threadpool.search.size: 20
threadpool.search.queue_size: 100
threadpool.index.type: fixed
threadpool.index.size: 60
indices.store.throttle.type: none
indices.store.throttle.max_bytes_per_sec: 5m
index.cache.field.max_size: 5000
index.cache.field.expire: 10m
--
You received this message because you are subscribed to the Google Groups "elasticsearch" group.
To unsubscribe from this group and stop receiving emails from it, send an email to elasticsearch+unsubscribe@googlegroups.com.
For more options, visit https://groups.google.com/groups/opt_out.