Hello,
I've had limited success in running multiple (4 concurrent) SQL queries from an Oracle Database, and pushing their data across to my Elastic server.
When these queries run once a minute, the first 1-2 queries will complete and insert information into Elastic, however the remaining queries do not, with logstash providing the following error:
13:47:00.588 [[main]-pipeline-manager] INFO logstash.pipeline - Pipeline main started
13:47:00.706 [Api Webserver] INFO logstash.agent - Successfully started Logstash API endpoint {:port=>9600}
13:48:01.590 [Ruby-0-Thread-28: /opt/logstash-5.5.0/vendor/bundle/jruby/1.9/gems/rufus-scheduler-3.0.9/lib/rufus/scheduler/jobs.rb:283] INFO logstash.inputs.jdbc - (0.743000s) SELECT kv.hora_inicio as snap_B_time, kv.hora_fin as snap_E_time ,kv.inicio as start_sn_id,kv.fin as end_start_id, (s2.value - s1.value)/(kv.intervalo*1000000) as ASL from
(select iv.start_snap_time as hora_inicio,iv.end_snap_time as hora_fin,iv.start_snap_id as inicio, iv.end_snap_id as fin, extract(hour from (iv.end_snap_time - iv.start_snap_time))*3600 + extract(minute from (iv.end_snap_time - iv.start_snap_time))*60 + extract(second from
(iv.end_snap_time - iv.start_snap_time))as intervalo
from
(SELECT lag(dbid) over (order by dbid, snap_id) AS start_dbid,
dbid AS end_dbid,lag(snap_id) over (order by dbid, snap_id) AS start_snap_id, snap_id AS end_snap_id,
lag(end_interval_time) over (order by dbid, snap_id)
AS start_snap_time, end_interval_time AS end_snap_time,
lag(startup_time) over (order by dbid, snap_id)
AS start_startup_time, startup_time AS end_startup_time
FROM sys.wrm$_snapshot) iv
WHERE iv.start_snap_id IS NOT NULL
AND iv.start_dbid=iv.end_dbid
AND iv.start_startup_time=iv.end_startup_time) kv, sys.wrh$_sys_time_model s1, sys.wrh$_sys_time_model s2, v$sysstat n
where
kv.inicio = s1.snap_id and
kv.fin = s2.snap_id and
n.name = 'DB time' and
s1.stat_id = s2.stat_id and
s2.stat_id= n.stat_id and
kv.hora_inicio > trunc(sysdate - 1)
order by kv.hora_inicio
13:48:01.690 [Ruby-0-Thread-25: /opt/logstash-5.5.0/vendor/bundle/jruby/1.9/gems/rufus-scheduler-3.0.9/lib/rufus/scheduler/jobs.rb:283] INFO logstash.inputs.jdbc - (0.842000s) select enq_time, q_name from WLSTEST_SOAINFRA.EDN_EVENT_QUEUE_TABLE
13:48:01.759 [Ruby-0-Thread-26: /opt/logstash-5.5.0/vendor/bundle/jruby/1.9/gems/rufus-scheduler-3.0.9/lib/rufus/scheduler/jobs.rb:283] INFO logstash.inputs.jdbc - (0.979000s) select 'AVL' Source_Loc,trunc(record_date, 'HH24') Rec_Date, source_flag, count(*) from ods_avl
where record_date > trunc(sysdate) group by trunc(record_date, 'HH24'), source_flag
union
select 'Unit' Source_Loc,trunc(record_date, 'HH24') Rec_Date, source_flag, count(*) from ods_unit_status
where record_date > trunc(sysdate) group by trunc(record_date, 'HH24'), source_flag
union
select 'Event' Source_Loc,trunc(record_date, 'HH24') Rec_Date, source_flag, count(*) from ods_event
where record_date > trunc(sysdate) group by trunc(record_date, 'HH24'), source_flag
order by 2 desc
13:48:02.078 [Ruby-0-Thread-27: /opt/logstash-5.5.0/vendor/bundle/jruby/1.9/gems/rufus-scheduler-3.0.9/lib/rufus/scheduler/jobs.rb:283] INFO logstash.inputs.jdbc - (1.232000s) select enq_time, q_name, user_Data from wlstest_soainfra.edn_oaoo_delivery_table
13:48:02.758 [[main]>worker0] ERROR logstash.outputs.elasticsearch - An unknown error occurred sending a bulk request to Elasticsearch. We will retry indefinitely {:error_message=>"Direct self-reference leading to cycle (through reference chain: oracle.sql.STRUCT[\"descriptor\"]->oracle.sql.StructDescriptor[\"pickler\"]->oracle.jdbc.oracore.OracleTypeADT[\"connection\"]->oracle.jdbc.driver.T4CConnection[\"wrapper\"])", :error_class=>"LogStash::Json::GeneratorError", :backtrace=>["/opt/logstash-5.5.0/logstash-core/lib/logstash/json.rb:52:in `jruby_dump'", "/opt/logstash-5.5.0/vendor/bundle/jruby/1.9/gems/logstash-output-elasticsearch-7.3.6-java/lib/logstash/outputs/elasticsearch/http_client.rb:116:in `bulk'", "org/jruby/RubyArray.java:2414:in `map'", "/opt/logstash-5.5.0/vendor/bundle/jruby/1.9/gems/logstash-output-elasticsearch-7.3.6-java/lib/logstash/outputs/elasticsearch/http_client.rb:115:in `bulk'", "org/jruby/RubyArray.java:1613:in `each'", "/opt/logstash-5.5.0/vendor/bundle/jruby/1.9/gems/logstash-output-elasticsearch-7.3.6-java/lib/logstash/outputs/elasticsearch/http_client.rb:114:in `bulk'", "/opt/logstash-5.5.0/vendor/bundle/jruby/1.9/gems/logstash-output-elasticsearch-7.3.6-java/lib/logstash/outputs/elasticsearch/common.rb:225:in `safe_bulk'", "/opt/logstash-5.5.0/vendor/bundle/jruby/1.9/gems/logstash-output-elasticsearch-7.3.6-java/lib/logstash/outputs/elasticsearch/common.rb:123:in `submit'", "/opt/logstash-5.5.0/vendor/bundle/jruby/1.9/gems/logstash-output-elasticsearch-7.3.6-java/lib/logstash/outputs/elasticsearch/common.rb:91:in `retrying_submit'", "/opt/logstash-5.5.0/vendor/bundle/jruby/1.9/gems/logstash-output-elasticsearch-7.3.6-java/lib/logstash/outputs/elasticsearch/common.rb:42:in `multi_receive'", "/opt/logstash-5.5.0/logstash-core/lib/logstash/output_delegator_strategies/shared.rb:13:in `multi_receive'", "/opt/logstash-5.5.0/logstash-core/lib/logstash/output_delegator.rb:47:in `multi_receive'", "/opt/logstash-5.5.0/logstash-core/lib/logstash/pipeline.rb:420:in `output_batch'", "org/jruby/RubyHash.java:1342:in `each'", "/opt/logstash-5.5.0/logstash-core/lib/logstash/pipeline.rb:419:in `output_batch'", "/opt/logstash-5.5.0/logstash-core/lib/logstash/pipeline.rb:365:in `worker_loop'", "/opt/logstash-5.5.0/logstash-core/lib/logstash/pipeline.rb:330:in `start_workers'"]}
To assist in troubleshooting , I've reply to this post with my logstash configuration.
From what I can tell, it appears to get caught on the rufus-scheduler, however I'm open to suggestions and resolutions to resolve it.
Thanks in advance.