Logstash pipeline and agent errors in docker logs

I'm running logstash 6.5.4 in a docker container with the following conf file:

input {
  redis {
    host => "${REDIS_0_HOST}"
    port => "${REDIS_0_PORT}"
    data_type => "list"
    key => "logstash"
  }
}
input {
  redis {
    host => "${REDIS_1_HOST}"
    port => "${REDIS_1_PORT}"
    data_type => "list"
    key => "logstash"
  }
}

filter {

  # if we were successful parsing a message from the raw log, let's dive deeper into the message and assign more fields 
  if [message] {
  
    # catch gelatin lib output on startup in containers and drop them
    if "20500017" in [message] { drop { } }
    if "2050001c" in [message] { drop { } }
 
    # remove trailing whitespace from message field
    mutate {
      strip => ["message"]
    } 
  
    # handle message repeated X times messages 
    grok {
      match => ["message", "message repeated %{NUMBER:repeat_count} times: \[ %{GREEDYDATA:message}\]"]
      overwrite => [ "message" ]
      tag_on_failure => [ ]
    }
    
    # handle message fields that already have structured json content
    if [program] == "austin-perf" { 
      json {
        source => "message"
        remove_field => ["message"]
      }
    } else { 
      grok {
        break_on_match => true
        patterns_dir => ["/usr/share/logstash/config/patterns"]
        match => [ 
          "message", "%{OBLOG_REVIVE_DATE}",
          "message", "%{OBLOG_REVIVE}",
          "message", "%{OBLOG_DATE}",
          "message", "%{OBLOG}",
          "message", "%{WORD}, \[%{TIMESTAMP_ISO8601} #%{NUMBER}\]  ?%{WORD:level} -- : %{GREEDYDATA:kvpairs}", # ruby app logs
          "message", "%{USERNAME:level}: ?%{PATH:file} %{NUMBER:line_num} %{GREEDYDATA:kvpairs}",
          "message", "%{USERNAME:level}: ?%{GREEDYDATA:kvpairs}",
          "message", "%{URIPATH:file}:%{POSINT:line_num}" #ruby app exceptions
        ]
      }
      
      if "\." not in [kvpairs] {
        kv {
          source => "kvpairs"
          include_keys => [
            "pulse_git_events",
            "pulse_trending_count",
            "pulse_news_count",
            "kafka_records",
            "repeat_count",
            "used_memory",
            "new_kafka_articles",
            "wcs_training_time",
            "rokerbot_event",
            "health_check",
            "rokerbot_bot_utterance",
            "rokerbot_user_utterance",
            "Date_Conn_Time",
            "Date_Query_Time",
            "Date_Parse_Time",
            "News_Conn_Time",
            "News_Query_Time",
            "NEWS_FAIL_TIME",
            "writing_image",
            "timed_app",
            "ran_for",
            "app_name",
            "klocker_app_name",
            "memory_used",
            "cpu_usage",
            "rss_mem",
            "vms_mem",
            "shared_mem",
            "uss_mem",
            "pss_mem",
            "text_mem",
            "data_mem",
            "total_gpu_mem",
            "used_gpu_mem",
            "free_gpu_mem"
          ] 
        }
      }

      prune {
        blacklist_names => ["%{URI}"]
      }
    }
    
    if [file] and [line_num] { 
      mutate {
        add_field => {
          "test_unique" => "%{file}:%{line_num}"
        }
      }
    }
  }

  mutate {
    convert => {
      "pulse_git_events" => "integer"
      "pulse_trending_count" => "integer"
      "pulse_news_count" => "integer"
      "kafka_records" => "integer"
      "repeat_count" => "integer"
      "used_memory" => "integer"
      "new_kafka_articles" => "integer"
      "wcs_training_time" => "integer"
      "ran_for" => "integer"
      "Date_Conn_Time" => "integer"
      "Date_Query_Time" => "integer"
      "Date_Parse_Time" => "integer"
      "News_Conn_Time" => "integer"
      "News_Query_Time" => "integer"
      "NEWS_FAIL_TIME" => "integer"
      "memory_used" => "integer"
      "cpu_usage" => "double"
      "rss_mem" => "integer"
      "vms_mem" => "integer"
      "shared_mem" => "integer"
      "uss_mem" => "integer"
      "pss_mem" => "integer"
      "text_mem" => "integer"
      "data_mem" => "integer"
      "total_gpu_mem" => "integer"
      "used_gpu_mem" => "integer"
      "free_gpu_mem" => "integer"
    }

    lowercase => "level" 
    remove_field => [ "timestamp", "kvpairs", "type", "_type" ]

    add_field => {
      "time" => "%{+HHmmssZ}"
      "weekday" => "%{+EEE}"
    }
  }
}

output {
  elasticsearch {
    hosts => ["${ES_DATA_0}","${ES_DATA_1}"]
    index => "logstash-%{+YYYY.MM.dd}"
  }
}

And the errors in the log file (second post due to char limit):

[2019-02-15T20:40:16,170][ERROR][logstash.pipeline        ] Error registering plugin {:pipeline_id=>"main", :plugin=>"#<LogStash::FilterDelegator:0x2167b8b9 @metric_events_out=org.jruby.proxy.org.logstash.instrument.metrics.counter.LongCounter$Proxy2 -  name: out value:0, @metric_events_in=org.jruby.proxy.org.logstash.instrument.metrics.counter.LongCounter$Proxy2 -  name: in value:0, @metric_events_time=org.jruby.proxy.org.logstash.instrument.metrics.counter.LongCounter$Proxy2 -  name: duration_in_millis value:0, @id=\"aea4aafb026ad131d9c4ff839dd2cc3e93b33c057aa29e817d5a758c8a489fe6\", @klass=LogStash::Filters::Mutate, @metric_events=#<LogStash::Instrument::NamespacedMetric:0x27034ac7>, @filter=<LogStash::Filters::Mutate add_field=>{\"time\"=>\"%{+HHmmssZ}\", \"weekday\"=>\"%{+EEE}\"}, convert=>{\"pulse_git_events\"=>\"integer\", \"pulse_trending_count\"=>\"integer\", \"pulse_news_count\"=>\"integer\", \"kafka_records\"=>\"integer\", \"repeat_count\"=>\"integer\", \"used_memory\"=>\"integer\", \"new_kafka_articles\"=>\"integer\", \"wcs_training_time\"=>\"integer\", \"ran_for\"=>\"integer\", \"Date_Conn_Time\"=>\"integer\", \"Date_Query_Time\"=>\"integer\", \"Date_Parse_Time\"=>\"integer\", \"News_Conn_Time\"=>\"integer\", \"News_Query_Time\"=>\"integer\", \"NEWS_FAIL_TIME\"=>\"integer\", \"memory_used\"=>\"integer\", \"cpu_usage\"=>\"double\", \"rss_mem\"=>\"integer\", \"vms_mem\"=>\"integer\", \"shared_mem\"=>\"integer\", \"uss_mem\"=>\"integer\", \"pss_mem\"=>\"integer\", \"text_mem\"=>\"integer\", \"data_mem\"=>\"integer\", \"total_gpu_mem\"=>\"integer\", \"used_gpu_mem\"=>\"integer\", \"free_gpu_mem\"=>\"integer\"}, id=>\"aea4aafb026ad131d9c4ff839dd2cc3e93b33c057aa29e817d5a758c8a489fe6\", lowercase=>[\"level\"], remove_field=>[\"timestamp\", \"kvpairs\", \"type\", \"_type\"], enable_metric=>true, periodic_flush=>false>>", :error=>"translation missing: en.logstash.agent.configuration.invalid_plugin_register", :thread=>"#<Thread:0x47fabf98 run>"}
[2019-02-15T20:40:16,478][ERROR][logstash.pipeline        ] Pipeline aborted due to error {:pipeline_id=>"main", :exception=>#<LogStash::ConfigurationError: translation missing: en.logstash.agent.configuration.invalid_plugin_register>, :backtrace=>["/usr/share/logstash/vendor/bundle/jruby/2.3.0/gems/logstash-filter-mutate-3.3.4/lib/logstash/filters/mutate.rb:219:in `block in register'", "org/jruby/RubyHash.java:1343:in `each'", "/usr/share/logstash/vendor/bundle/jruby/2.3.0/gems/logstash-filter-mutate-3.3.4/lib/logstash/filters/mutate.rb:217:in `register'", "/usr/share/logstash/logstash-core/lib/logstash/pipeline.rb:242:in `register_plugin'", "/usr/share/logstash/logstash-core/lib/logstash/pipeline.rb:253:in `block in register_plugins'", "org/jruby/RubyArray.java:1734:in `each'", "/usr/share/logstash/logstash-core/lib/logstash/pipeline.rb:253:in `register_plugins'", "/usr/share/logstash/logstash-core/lib/logstash/pipeline.rb:595:in `maybe_setup_out_plugins'", "/usr/share/logstash/logstash-core/lib/logstash/pipeline.rb:263:in `start_workers'", "/usr/share/logstash/logstash-core/lib/logstash/pipeline.rb:200:in `run'", "/usr/share/logstash/logstash-core/lib/logstash/pipeline.rb:160:in `block in start'"], :thread=>"#<Thread:0x47fabf98 run>"}
[2019-02-15T20:40:16,498][ERROR][logstash.agent           ] Failed to execute action {:id=>:main, :action_type=>LogStash::ConvergeResult::FailedAction, :message=>"Could not execute action: PipelineAction::Create<main>, action_result: false", :backtrace=>nil}
[2019-02-15T20:40:16,803][INFO ][logstash.agent           ] Successfully started Logstash API endpoint {:port=>9600}
[2019-02-15T20:40:22,003][ERROR][logstash.agent           ] Failed to execute action {:action=>LogStash::PipelineAction::Stop/pipeline_id:main, :exception=>"NoMethodError", :message=>"undefined method `call' for nil:NilClass", :backtrace=>["/usr/share/logstash/vendor/bundle/jruby/2.3.0/gems/logstash-input-redis-3.4.0/lib/logstash/inputs/redis.rb:110:in `stop'", "/usr/share/logstash/logstash-core/lib/logstash/inputs/base.rb:87:in `do_stop'", "org/jruby/RubyArray.java:1734:in `each'", "/usr/share/logstash/logstash-core/lib/logstash/pipeline.rb:464:in `stop_inputs'", "/usr/share/logstash/logstash-core/lib/logstash/pipeline.rb:447:in `shutdown'", "/usr/share/logstash/logstash-core/lib/logstash/pipeline_action/stop.rb:14:in `block in execute'", "/usr/share/logstash/logstash-core/lib/logstash/pipeline_action/stop.rb:13:in `execute'", "/usr/share/logstash/logstash-core/lib/logstash/agent.rb:317:in `block in converge_state'"]}

Not supported. Does float work for you?

Oh wait, this is the guy having the float index errors. I tried changing this float value to double and thought that fixed it and found new errors, but it seems the initial error in the other topic is still persisting.

This topic was automatically closed 28 days after the last reply. New replies are no longer allowed.