Here I am trying to isolate the pipelines using output isolator pattern, and when
I have rsyslog server and elastic 2 configured under one pipeline.
And I have another pipeline which has only elastic 1 alone.
When my rsyslog server is down and not available.
[WARN ] 2022-12-11 17:34:39.509 [[es-host3]>worker0] syslog - syslog tcp output exception: closing, reconnecting and resending event {:host=>"rsyslog-service", :port=>514, :exception=>#<Errno::ECONNREFUSED: Connection refused - connect(2) for "rsyslog-service" port 514>, :backtrace=>["org/jruby/ext/socket/RubyTCPSocket.java:144:in `initialize'", "org/jruby/RubyIO.java:876:in `new'", "/usr/share/logstash/vendor/bundle/jruby/2.5.0/gems/logstash-output-syslog-3.0.5/lib/logstash/outputs/syslog.rb:209:in `connect'", "/usr/share/logstash/vendor/bundle/jruby/2.5.0/gems/logstash-output-syslog-3.0.5/lib/logstash/outputs/syslog.rb:177:in `publish'", "/usr/share/logstash/vendor/bundle/jruby/2.5.0/gems/logstash-codec-plain-3.0.6/lib/logstash/codecs/plain.rb:40:in `encode'", "/usr/share/logstash/logstash-core/lib/logstash/codecs/delegator.rb:48:in `block in encode'", "org/logstash/instrument/metrics/AbstractSimpleMetricExt.java:65:in `time'", "org/logstash/instrument/metrics/AbstractNamespacedMetricExt.java:64:in `time'", "/usr/share/logstash/logstash-core/lib/logstash/codecs/delegator.rb:47:in `encode'", "/usr/share/logstash/vendor/bundle/jruby/2.5.0/gems/logstash-output-syslog-3.0.5/lib/logstash/outputs/syslog.rb:147:in `receive'", "/usr/share/logstash/logstash-core/lib/logstash/outputs/base.rb:105:in `block in multi_receive'", "org/jruby/RubyArray.java:1809:in `each'", "/usr/share/logstash/logstash-core/lib/logstash/outputs/base.rb:105:in `multi_receive'", "org/logstash/config/ir/compiler/OutputStrategyExt.java:138:in `multi_receive'", "org/logstash/config/ir/compiler/AbstractOutputDelegatorExt.java:121:in `multi_receive'", "/usr/share/logstash/logstash-core/lib/logstash/java_pipeline.rb:279:in `block in start_workers'"], :event=>#<LogStash::Event:0x3307f4fe>}
Here is the way i have implemented the Output isolator pattern,
apiVersion: v1
kind: ConfigMap
metadata:
name: logstash-configmap
namespace: udp-test-poc
data:
logstash.yml: |
http.host: "0.0.0.0"
http.port: 9600
pipelines.yml: |
- pipeline.id: intake
queue.type: persisted
queue.max_bytes: 1024mb
path.config: "/usr/share/logstash/config/logstash.conf"
- pipeline.id: es-host1
queue.type: persisted
queue.max_bytes: 1024mb
path.config: "/usr/share/logstash/config/es1.conf"
- pipeline.id: es-host2
queue.type: persisted
queue.max_bytes: 1024mb
path.config: "/usr/share/logstash/config/es2.conf"
- pipeline.id: es-host3
queue.type: persisted
queue.max_bytes: 1024mb
path.config: "/usr/share/logstash/config/es3.conf"
logstash.conf: |
input {
beats {
port => 5044
}
}
filter {
}
output {
pipeline {
send_to => ["es-host1"]
}
pipeline {
send_to => ["es-host2"]
}
}
es1.conf: |
input {
pipeline {
address => "es-host1"
}
}
output {
elasticsearch {
ilm_enabled => false
hosts => ["eric-data-search-engine:9200"]
user => 'logstash'
password => '${LOGSTASH_PW}'
index => "logstash-beta-%{+YYYY.MM.dd}"
}
}
es2.conf: |
input {
pipeline {
address => "es-host2"
}
}
output {
es2.conf: |
input {
pipeline {
address => "es-host2"
}
}
output {
pipeline {
send_to => ["es-host3"]
}
}
es3.conf: |
input {
pipeline {
address => "es-host3"
}
}
output {
elasticsearch {
ilm_enabled => false
hosts => ["ext-se:9200"]
user => 'logstash'
password => '${LOGSTASH_PW}'
index => "logstash-beta-%{+YYYY.MM.dd}"
}
syslog{
host => "rsyslog-service"
protocol => "tcp"
port => 514
}
}
Am i missing out anything, moreover I don't see my queue are full, still the pipelines are blocked. Below are the queue details when my elastic output is blocked.
es-host2
-------------------------------------------------
Initial - "queue": {
"type": "persisted",
"events_count": 0,
"queue_size_in_bytes": 15843912,
"max_queue_size_in_bytes": 1073741824
}
BLocked - "queue": {
"type": "persisted",
"events_count": 0,
"queue_size_in_bytes": 52550657,
"max_queue_size_in_bytes": 1073741824
},
-------------------------------------------------
intake
-------------------------------------------------
Initial - "queue": {
"type": "persisted",
"events_count": 0,
"queue_size_in_bytes": 4350597,
"max_queue_size_in_bytes": 1073741824
}
BLocked - "queue": {
"type": "persisted",
"events_count": 0,
"queue_size_in_bytes": 45269409,
"max_queue_size_in_bytes": 1073741824
},
-------------------------------------------------
es-host3
-------------------------------------------------
Initial - "queue": {
"type": "persisted",
"events_count": 219580,
"queue_size_in_bytes": 754021751,
"max_queue_size_in_bytes": 1073741824
},
BLocked - "queue": {
"type": "persisted",
"events_count": 121156,
"queue_size_in_bytes": 455198908,
"max_queue_size_in_bytes": 1073741824
},
-------------------------------------------------
es-host1
-------------------------------------------------
Initial - "queue": {
"type": "persisted",
"events_count": 74390,
"queue_size_in_bytes": 284270470,
"max_queue_size_in_bytes": 1073741824
}
BLocked - "queue": {
"type": "persisted",
"events_count": 93025,
"queue_size_in_bytes": 320982236,
"max_queue_size_in_bytes": 1073741824
}
-------------------------------------------------
[INFO ] 2022-12-11 19:00:21.650 [[es-host1]>worker1] elasticsearch - retrying failed action with response code: 429 ({"type"=>"cluster_block_exception", "reason"=>"index [logstash-beta-2022.12.11] blocked by: [TOO_MANY_REQUESTS/12/disk usage exceeded flood-stage watermark, index has read-only-allow-delete block];"})
[INFO ] 2022-12-11 19:00:21.650 [[es-host1]>worker1] elasticsearch - retrying failed action with response code: 429 ({"type"=>"cluster_block_exception", "reason"=>"index [logstash-beta-2022.12.11] blocked by: [TOO_MANY_REQUESTS/12/disk usage exceeded flood-stage watermark, index has read-only-allow-delete block];"})
[INFO ] 2022-12-11 19:00:21.650 [[es-host1]>worker1] elasticsearch - Retrying individual bulk actions that failed or were rejected by the previous bulk request. {:count=>125}
[WARN ] 2022-12-11 19:00:23.285 [[es-host3]>worker0] syslog - syslog tcp output exception: closing, reconnecting and resending event {:host=>"rsyslog-service", :port=>514, :exception=>#<Errno::ECONNREFUSED: Connection refused - connect(2) for "rsyslog-service" port 514>, :backtrace=>["org/jruby/ext/socket/RubyTCPSocket.java:144:in `initialize'", "org/jruby/RubyIO.java:876:in `new'", "/usr/share/logstash/vendor/bundle/jruby/2.5.0/gems/logstash-output-syslog-3.0.5/lib/logstash/outputs/syslog.rb:209:in `connect'", "/usr/share/logstash/vendor/bundle/jruby/2.5.0/gems/logstash-output-syslog-3.0.5/lib/logstash/outputs/syslog.rb:177:in `publish'", "/usr/share/logstash/vendor/bundle/jruby/2.5.0/gems/logstash-codec-plain-3.0.6/lib/logstash/codecs/plain.rb:40:in `encode'", "/usr/share/logstash/logstash-core/lib/logstash/codecs/delegator.rb:48:in `block in encode'", "org/logstash/instrument/metrics/AbstractSimpleMetricExt.java:65:in `time'", "org/logstash/instrument/metrics/AbstractNamespacedMetricExt.java:64:in `time'", "/usr/share/logstash/logstash-core/lib/logstash/codecs/delegator.rb:47:in `encode'", "/usr/share/logstash/vendor/bundle/jruby/2.5.0/gems/logstash-output-syslog-3.0.5/lib/logstash/outputs/syslog.rb:147:in `receive'", "/usr/share/logstash/logstash-core/lib/logstash/outputs/base.rb:105:in `block in multi_receive'", "org/jruby/RubyArray.java:1809:in `each'", "/usr/share/logstash/logstash-core/lib/logstash/outputs/base.rb:105:in `multi_receive'", "org/logstash/config/ir/compiler/OutputStrategyExt.java:138:in `multi_receive'", "org/logstash/config/ir/compiler/AbstractOutputDelegatorExt.java:121:in `multi_receive'", "/usr/share/logstash/logstash-core/lib/logstash/java_pipeline.rb:279:in `block in start_workers'"], :event=>#<LogStash::Event:0x5d6cd2ed>}
[WARN ] 2022-12-11 19:00:25.301 [[es-host3]>worker0] syslog - syslog tcp output exception: closing, reconnecting and resending event {:host=>"rsyslog-service", :port=>514, :exception=>#<Errno::ECONNREFUSED: Connection refused - connect(2) for "rsyslog-service" port 514>, :backtrace=>["org/jruby/ext/socket/RubyTCPSocket.java:144:in `initialize'", "org/jruby/RubyIO.java:876:in `new'", "/usr/share/logstash/vendor/bundle/jruby/2.5.0/gems/logstash-output-syslog-3.0.5/lib/logstash/outputs/syslog.rb:209:in `connect'", "/usr/share/logstash/vendor/bundle/jruby/2.5.0/gems/logstash-output-syslog-3.0.5/lib/logstash/outputs/syslog.rb:177:in `publish'", "/usr/share/logstash/vendor/bundle/jruby/2.5.0/gems/logstash-codec-plain-3.0.6/lib/logstash/codecs/plain.rb:40:in `encode'", "/usr/share/logstash/logstash-core/lib/logstash/codecs/delegator.rb:48:in `block in encode'", "org/logstash/instrument/metrics/AbstractSimpleMetricExt.java:65:in `time'", "org/logstash/instrument/metrics/AbstractNamespacedMetricExt.java:64:in `time'", "/usr/share/logstash/logstash-core/lib/logstash/codecs/delegator.rb:47:in `encode'", "/usr/share/logstash/vendor/bundle/jruby/2.5.0/gems/logstash-output-syslog-3.0.5/lib/logstash/outputs/syslog.rb:147:in `receive'", "/usr/share/logstash/logstash-core/lib/logstash/outputs/base.rb:105:in `block in multi_receive'", "org/jruby/RubyArray.java:1809:in `each'", "/usr/share/logstash/logstash-core/lib/logstash/outputs/base.rb:105:in `multi_receive'", "org/logstash/config/ir/compiler/OutputStrategyExt.java:138:in `multi_receive'", "org/logstash/config/ir/compiler/AbstractOutputDelegatorExt.java:121:in `multi_receive'", "/usr/share/logstash/logstash-core/lib/logstash/java_pipeline.rb:279:in `block in start_workers'"], :event=>#<LogStash::Event:0x5d6cd2ed>}
Waiting for suggestion.