I am now using tcp 8514 collect the our cisco switches' syslogs,In most cases it does work,but i found in my device it does not work.I can not even see any information in my logs.My configuration is as below:
input {
tcp {
port => "8514"
type => "syslog-cisco"
}
udp {
port => "514"
type => "syslog-cisco"
}
}
filter {
if [type] == "syslog-cisco" {
# The switches are sending the same message to all syslog servers for redundancy, this allows us to
## only store the message in elasticsearch once by generating a hash of the message and using that as
## the document_id.
fingerprint {
source => [ "message" ]
method => "SHA1"
key => "0123"
concatenate_sources => true
target => "[@metadata][fingerprint]"
}
grok {
# There are a couple of custom patterns associated with this filter.
patterns_dir => [ "/opt/logstash/patterns" ]
match => [
"message", "<%{NUMBER:message_type_id}>%{NUMBER:internal_id}:%{SPACE}%{SYSLOGHOST:origin_hostname}:%{SPACE}%{CISCOTIMESTAMPTZ:cisco_timestamp}: \%%{WORD:facility}-%{INT:severity}-%{WORD:mnemonic}: %{GREEDYDATA:msg}",
"message", "<%{NUMBER:message_type_id}>%{NUMBER:internal_id}:%{SPACE}%{SYSLOGHOST:origin_hostname}:%{SPACE}%{CISCOTIMESTAMP:cisco_timestamp}.*: \%%{WORD:facility}-%{INT:severity}-%{WORD:mnemonic}: %{GREEDYDATA:msg}",
"message", "<%{NUMBER:message_type_id}>%{NUMBER:internal_id}:%{SPACE}%{SYSLOGHOST:origin_hostname}:%{SPACE}.*%{CISCOTIMESTAMP:cisco_timestamp}.*: \%%{WORD:facility}-%{INT:severity}-%{WORD:mnemonic}: %{GREEDYDATA:msg}",
"message", "<%{NUMBER:message_type_id}>%{NUMBER:internal_id}:%{SPACE}%{SYSLOGHOST:origin_hostname}:.*\]:%{SPACE}%{CISCOTIMESTAMPTZ:cisco_timestamp}.*\%%{WORD:facility}-%{INT:severity}-%{WORD:mnemonic}: %{GREEDYDATA:msg}",
"message", "<%{NUMBER:message_type_id}>%{NUMBER:internal_id}:%{SPACE}%{SYSLOGHOST:origin_hostname}:.*\]: %{NUMBER:sequencenumber}:%{SPACE}%{CISCOTIMESTAMPTZ:cisco_timestamp}.*\%%{WORD:facility}-%{INT:severity}-%{WORD:mnemonic}: %{GREEDYDATA:msg}",
"message", "%{CISCOTIMESTAMPTZ:cisco_timestamp}%{SPACE}%{SYSLOGHOST:origin_hostname}%{SPACE}\%%{WORD:facility}-%{INT:severity}-%{WORD:mnemonic}: %{GREEDYDATA:msg}"
]
add_tag => [ "cisco" ]
remove_field => [ "cisco_timestamp" ]
remove_field => [ "message_type_id" ]
remove_field => [ "internal_id" ]
remove_field => [ "sequencenumber" ]
}
output {
# The message was parsed correctly, and should be sent to elasicsearch.
if "cisco" in [tags] {
elasticsearch {
hosts => [ "ph71v-esn01.ae007.com:9200", "ph71v-esn02.ae007.com:9200" ]
index => "network-log-%{+YYYY.MM.dd}"
document_type => "Network Device"
document_id => "%{[@metadata][fingerprint]}"
}
}
# Something went wrong with the grok parsing, don't discard the messages though
else if "_grokparsefailure" in [tags] {
file {
path => "/var/log/logstash/fail-%{type}-%{+YYYY.MM.dd}.log"
}
}
else {
file {
path => "/var/log/logstash/unknown_msg.log"
}
}
}
From my previous experiences , if there is anything wrong i can see the relevant logs in either "unknown_msg.log" or "fail-%{type}-%{+YYYY.MM.dd}.log",but i can even not find anything in them.
I've tried to use tcpdump to capture the packet , the syslog was indeed sent to logstash
09:42:50.989089 IP 10.60.185.9.51437 > ph71v-ls01.8514: Flags [.], seq 2161533703:2161533807, ack 140636346, win 4128, length 104
09:42:50.989110 IP ph71v-ls01.8514 > 10.60.185.9.51437: Flags [.], ack 104, win 64320, length 0
09:42:53.698110 IP 10.60.185.9.51437 > ph71v-ls01.8514: Flags [.], seq 104:228, ack 1, win 4128, length 124
09:42:53.698138 IP ph71v-ls01.8514 > 10.60.185.9.51437: Flags [.], ack 228, win 64320, length 0
how should i make a troubleshooting ?