How to trouble shoot logstash crashing

Hi,

I have 2 nodes that has the same log stash configuration, one of them keep failing on this error:

TypeError: can't convert nil into String

Is there a way for me to understand on what message it failed?On what line in my configuration?

my Conf file is:

input {
file {
path => "/opt/Fabrix.TV/logs/manager.log"
type => manager_vspp_log
codec => multiline {
pattern => "^%{DATESTAMP:log_timestamp} "
negate => true
what => previous
}
}
file {
path => "/opt/Fabrix.TV/logs/streamer.log"
type => streamer_vspp_log
codec => multiline {
pattern => "^%{DATESTAMP:log_timestamp} "
negate => true
what => previous
}
}
}

filter {
environment {
add_field => { "first_syspu" => "${FIRST_SYSPU}" }
add_field => { "drop_events" => "${DROP_LOGLEVELS}" }
}
fingerprint {
source => ["message"]
target => "fingerprint"
key => "78787878"
method => "SHA1"
concatenate_sources => true
}
date {
match => [ "log_timestamp", "MM/dd/YY HH:mm:ss.SSS" ]
}
if [type] =~ "vspp_log" {
mutate { add_field => { "event_type" => "log" } }
if [path] =~ "manager.log" {
grok {
match => { "message" => "%{DATESTAMP:log_timestamp}%{SPACE}%{WORD:loglevel}%{SPACE}(?<component_name>\w*[
| ]\w*)%{SPACE}%{WORD:message_type}%{SPACE}%{INT:message_code}%{SPACE}(?<session_id>(@\w{16})?)%{GREEDYDATA:data}" }
}
mutate { update => { "component_name" => "vspp_manager" } }
} else {
grok {
match => { "message" => "%{DATESTAMP:log_timestamp}%{SPACE}%{WORD:loglevel}%{SPACE}(?<component_name>\w)%{IP:clientip}%{SPACE}%{WORD:message_type}%{SPACE}%{INT:message_code}%{SPACE}(?<session_id>(@\w{16})?)%{GREEDYDATA:data}" }
}
if [type] == "streamer" {
mutate { update => { "component_name" => "vspp_streamer" } }
}
}
mutate {
convert => { "has_session_id" => "boolean" }
}
if [session_id] =~ "@" {
mutate { add_field => { "has_session_id" => true } }
} else {
mutate { add_field => { "has_session_id" => false } }
}
translate {
field => "message_code"
destination => "translated_message_code"
dictionary_path => "/etc/logstash/utils/logstash_translate.yml"
}
mutate {
convert => { "message_code" => "integer" }
}
if [message_code] in [17019, 17023] {
# Tear Down Message Codes.
mutate { update => { "loglevel" => "W" } }
}
geoip {
source => "clientip"
}
# Setting all events to be dropped unless setted othrwise.
mutate { add_field => { "drop" => false } }
## Adding simple messages regexes
if [message_code] == 0 {
if [data] =~ /ABR LEVEL %{INT:layer} is out of sync/ {
mutate { add_field => { "translated_message_code" => "ABR level is out of sync" } }
}
if [data] =~ /Failure in closing file/ {
mutate { add_field => { "translated_message_code" => "Failure in closing file" } }
}
if [data] =~ /ntp not synchronized or failed getting synchronization state due to/ {
mutate { add_field => { "translated_message_code" => "ntp not synchronized" } }
}
if [data] =~ /FX_Packets_List_Reader::get_nibble: empty/ {
mutate { add_field => { "translated_message_code" => "FX_Packets_List_Reader::get_nibble: empty" } }
}
if [data] =~ /FX_HTTP_Adaptive_Streaming_Base_Service::reply_err_ 404/ {
mutate { add_field => { "translated_message_code" => "FX_HTTP_Adaptive_Streaming_Base_Service::reply_err_ 404" } }
}
}
if [loglevel] in [drop_events] {
if [message_code] not in [ 17302, 17303, 17031, 17018, 17023 ] {
# Drop Events if log level should be dropped and message_code arent in the list above.
drop{}
}
}
}
}
output {
elasticsearch {
hosts => [<ELASTIC_HOSTS_MACRO>]
document_id => "%{fingerprint}"
}
}

Are there any more interesting log messages surrounding the one you quoted? What if you crank up the log level with --debug? There should be a stack trace somewhere.