Windows logstash problem

hi

im running version 2,4.1 on a windows 2012 server, I have run the ELK stack sucessfully on linux but in order to get it in production i must migrate to windows. I thought i could just move everything to the wintel platform no problem now i get this hen i start the logstash

i start with

logstash.bat -f c:\blabala\bla.conf

e[33mUDP listener died {:exception=>#<SocketError: bind: name or service not known>, :backtrace=>["org/jruby/ext/socket/RubyUDPSocket.java:160:in bind'", "C:/Elk/logstash-2.4.1/logstash-2.4.1/vendor/bundle/jruby/1.9/gems/logstash-input-udp-2.0.5/lib/logstash/inputs/udp.rb:67:inudp_listener'", "C:/Elk/logstash-2.4.1/logstash-2.4.1/vendor/bundle/jruby/1.9/gems/logstash-input-udp-2.0.5/lib/logstash/inputs/udp.rb:50:in run'", "C:/Elk/logstash-2.4.1/logstash-2.4.1/vendor/bundle/jruby/1.9/gems/logstash-core-2.4.1-java/lib/logstash/pipeline.rb:342:ininputworker'", "C:/Elk/logstash-2.4.1/logstash-2.4.1/vendor/bundle/jruby/1.9/gems/logstash-core-2.4.1-java/lib/logstash/pipeline.rb:336:in `start_input'"], :level=>:warn}e[0m

logstash conf file

input {

stdin {}

#firewall logs
udp{
type => "cisco-asa"
port => 514
}
}
#########################
filter {
if [type] == "cisco-asa" {
# Split the syslog part and Cisco tag out of the message
grok {
match => ["message", "%{CISCO_TAGGED_SYSLOG} %{GREEDYDATA:cisco_message}"]
}
# Parse the syslog severity and facility
syslog_pri { }
# Parse the date from the "timestamp" field to the "@timestamp" field
date {
match => ["timestamp",
"MMM dd HH:mm:ss",
"MMM d HH:mm:ss",
"MMM dd yyyy HH:mm:ss",
"MMM d yyyy HH:mm:ss"
]
timezone => "Europe/Paris"
}
# Clean up redundant fields if parsing was successful
if "_grokparsefailure" not in [tags] {
mutate {
rename => ["cisco_message", "message"]
remove_field => ["timestamp"]
}
}
# Extract fields from the each of the detailed message types
# The patterns provided below are included in Logstash since 1.2.0
grok {
match => [
"message", "%{CISCOFW106001}",
"message", "%{CISCOFW106006_106007_106010}",
"message", "%{CISCOFW106014}",
"message", "%{CISCOFW106015}",
"message", "%{CISCOFW106021}",
"message", "%{CISCOFW106023}",
"message", "%{CISCOFW106100}",
"message", "%{CISCOFW110002}",
"message", "%{CISCOFW302010}",
"message", "%{CISCOFW302013_302014_302015_302016}",
"message", "%{CISCOFW302020_302021}",
"message", "%{CISCOFW305011}",
"message", "%{CISCOFW313001_313004_313008}",
"message", "%{CISCOFW313005}",
"message", "%{CISCOFW402117}",
"message", "%{CISCOFW402119}",
"message", "%{CISCOFW419001}",
"message", "%{CISCOFW419002}",
"message", "%{CISCOFW500004}",
"message", "%{CISCOFW602303_602304}",
"message", "%{CISCOFW710001_710002_710003_710005_710006}",
"message", "%{CISCOFW713172}",
"message", "%{CISCOFW733100}"
]
}

}

}
output {
stdout {
codec => rubydebug
}
elasticsearch {
hosts => ["localhost:9200"]

a test run of the conf file says ok

C:\Elk\logstash-2.4.1\logstash-2.4.1\bin>logstash.bat -f C:\Elk\logstash\config\logstash_win5.conf -t
JAVA_OPTS was set to [-Xms1g -Xmx2g]. Logstash will trust these options, and not set any defaults that it might usually set
Picked up _JAVA_OPTIONS: -Xms1024m -Xmx2048m
Configuration OK

what to do?

Please format your log snippet as preformatted text so we can see what comes after UDP listener died {:exception=>#.

{:exception=>#, :backtrace=>["org/jruby/ext/socket/RubyUDPSocket.java:160:in bind'", "C:/Elk/logstash-2.4.1/logstash-2.4.1/vendor/bundle/jruby/1.9/gems/logstash-input-udp-2.0.5/lib/logstash/inputs/udp.rb:67:inudp_listener'", "C:/Elk/logstash-2.4.1/logstash-2.4.1/vendor/bundle/jruby/1.9/gems/logstash-input-udp-2.0.5/lib/logstash/inputs/udp.rb:50:in run'", "C:/Elk/logstash-2.4.1/logstash-2.4.1/vendor/bundle/jruby/1.9/gems/logstash-core-2.4.1-java/lib/logstash/pipeline.rb:342:ininputworker'", "C:/Elk/logstash-2.4.1/logstash-2.4.1/vendor/bundle/jruby/1.9/gems/logstash-core-2.4.1-java/lib/logstash/pipeline.rb:336:in `start_input'"], :level=>:warn}e[0m

Ive solved it, somehow. I changed to logstash5.2.2

I removed the stdin{}
in input section

I removed the codec => rubydebug in the output section

below is the working config for cisco asa firewalls and logstash


input {
#firewall logs
udp {
port => 514
type => "cisco-asa"
}
}
#########################
filter {
if [type] == "cisco-asa" {
# Split the syslog part and Cisco tag out of the message
grok {
match => ["message", "%{CISCO_TAGGED_SYSLOG} %{GREEDYDATA:cisco_message}"]
}
# Parse the syslog severity and facility
syslog_pri { }
# Parse the date from the "timestamp" field to the "@timestamp" field
date {
match => ["timestamp",
"MMM dd HH:mm:ss",
"MMM d HH:mm:ss",
"MMM dd yyyy HH:mm:ss",
"MMM d yyyy HH:mm:ss"
]
timezone => "Europe/Paris"
}
# Clean up redundant fields if parsing was successful
if "_grokparsefailure" not in [tags] {
mutate {
rename => ["cisco_message", "message"]
remove_field => ["timestamp"]
}
}
# Extract fields from the each of the detailed message types
# The patterns provided below are included in Logstash since 1.2.0
grok {
match => [
"message", "%{CISCOFW106001}",
"message", "%{CISCOFW106006_106007_106010}",
"message", "%{CISCOFW106014}",
"message", "%{CISCOFW106015}",
"message", "%{CISCOFW106021}",
"message", "%{CISCOFW106023}",
"message", "%{CISCOFW106100}",
"message", "%{CISCOFW110002}",
"message", "%{CISCOFW302010}",
"message", "%{CISCOFW302013_302014_302015_302016}",
"message", "%{CISCOFW302020_302021}",
"message", "%{CISCOFW305011}",
"message", "%{CISCOFW313001_313004_313008}",
"message", "%{CISCOFW313005}",
"message", "%{CISCOFW402117}",
"message", "%{CISCOFW402119}",
"message", "%{CISCOFW419001}",
"message", "%{CISCOFW419002}",
"message", "%{CISCOFW500004}",
"message", "%{CISCOFW602303_602304}",
"message", "%{CISCOFW710001_710002_710003_710005_710006}",
"message", "%{CISCOFW713172}",
"message", "%{CISCOFW733100}"
]
}

}

}
output {
elasticsearch {
hosts => ["localhost:9200"]
}
}

created a git for this,

This topic was automatically closed 28 days after the last reply. New replies are no longer allowed.