Extract SNMP table from "SNMP input plugin" with key-value or Split filter

Hello everyone,

SNMP input plugin doesn't include (for the moment) an option to split SNMP table :

indexed documents looks like this :

{
  "_index": "XXXX-2020.03.17",
  "_type": "doc",
  "_id": "Puby6XABeaR9KJCdhk0N",
  "_version": 1,
  "_score": null,
  "_source": {
"type": "snmp",
"host": "XXXXXXXXXX",
"XXXX-alarms": [
  {
    "1": 1,
    "2": "1.3.6.1.4.1.8708.2.30.2.3.1.1.2.1.76",
    "3": "1.3.6.1.4.1.8708.2.30.2.3.1.1.2.1.76",
    "4": "XXXXXXXXXX",
    "5": 0,
    "6": 0,
    "7": 0,
    "8": 0,
    "9": "L2 service operation status is down",
    "10": 5,
    "11": "XXXXXXX2b:00:00",
    "12": "07:e4:01:1fXXXXXXXXXX00:00",
    "13": 1,
    "14": "XXXXXXXXXXX",
    "15": "0.0.0.0",
    "index": "1"
  },
  {
    "1": 2,
    "2": "1.3.6.1.4.1.8708.2.5.2.5.1.0",
    "3": "1.3.6.1.4.1.8708.2.5.2.5.7.0",
    "4": "upload",
    "5": 0,
    "6": 0,
    "7": 1,
    "8": 4,
    "9": "Upload failed",
    "10": 4,
    "11": "XXXXXXXXXX00",
    "12": "07:e4:01:XXXXXXXXXX00",
    "13": 2,
    "14": "XXXXXXXXXX",
    "15": "XXXXXXXXXX",
    "index": "2"
  },
  {
    "1": 3,
    "2": "0.0",
    "3": "1.3.6.1.4.1.8708.2.1.2.7.8.0",
    "4": "XXX",
    "5": 0,
    "6": 0,
    "7": 0,
    "8": 0,
    "9": "The FTP access to get PM from the node failed",
    "10": 4,
    "11": "0XXXXXXXXXX00:00",
    "12": "07:eXXXXXXXX:00:00",
    "13": 3,
    "14": "XXXXXXXXX",
    "15": "XXXXXX",
    "index": "3"
  }
],
"message": "infinera alarms list",
"@version": "1",
"@timestamp": "2020-03-17T19:22:30.169Z"
  },
  "fields": {
"@timestamp": [
  "2020-03-17T19:22:30.169Z"
]
  },
  "sort": [
1584472950169
  ]
}

With a logstash conf similar to :

input {

  snmp {
  mib_paths => ["/usr/share/logstash/vendor/bundle/jruby/2.5.0/gems/logstash-input-snmp-1.2.1/lib/mibs/infinera/"]
  hosts => [{host => "udp:XXXXXXXX/161" community => "XXXXXXX" retries => 2  timeout => 30}]
  oid_root_skip => 13
  
     tables => [
            {
               name => "alarms"
               columns => [
                   ".1.3.6.1.4.1.8708.2.1.2.2.1.1.1",
                   ".1.3.6.1.4.1.8708.2.1.2.2.1.1.2",
				   ".1.3.6.1.4.1.8708.2.1.2.2.1.1.3",
                   ".1.3.6.1.4.1.8708.2.1.2.2.1.1.4",
				   ".1.3.6.1.4.1.8708.2.1.2.2.1.1.5",
                   ".1.3.6.1.4.1.8708.2.1.2.2.1.1.6",
				   ".1.3.6.1.4.1.8708.2.1.2.2.1.1.7",
                   ".1.3.6.1.4.1.8708.2.1.2.2.1.1.8",
				   ".1.3.6.1.4.1.8708.2.1.2.2.1.1.9",
                   ".1.3.6.1.4.1.8708.2.1.2.2.1.1.10",
				   ".1.3.6.1.4.1.8708.2.1.2.2.1.1.11",
                   ".1.3.6.1.4.1.8708.2.1.2.2.1.1.12",
				   ".1.3.6.1.4.1.8708.2.1.2.2.1.1.13",
                   ".1.3.6.1.4.1.8708.2.1.2.2.1.1.14",
				   ".1.3.6.1.4.1.8708.2.1.2.2.1.1.15"
                   
               ]
            }
        ]
  
  type => "snmp"
  interval => 30
  }
}

output {

	 elasticsearch {
       hosts => ["http://es-cluster-0:9200","http://es-cluster-1:9200","http://es-cluster-2:9200","http://es-cluster-3:9200"]
       index => "xxxxxx-%{+YYYY.MM.dd}"
	   codec => "json"
		 
      }
}

Someone has an idea of how to etract with KV filter or Split filter each table line into a document ?

Thanks for your help. :slight_smile:

Hi @piellick,

I've included below the "business end" of the (long) pipeline that I use for collecting stats from Cisco Nexus switches and put it into something close to ECS naming. End goal was so that the Switches would be available under the InfraUI. I collect other tables too like CPU and Memory but have shortened it for the char limit.

I use a combination of the clone filter to make new documents for each table I'm polling and then the split filter to split each row of the table into a specific document.

Hope the below helps, hit me up if you need more help with it.

Dan.

input {
    snmp {
        id => "snmp_cisco_nexus_input"
        mib_paths => ["/etc/logstash/mibs/"]
        hosts => [
            {host => "udp:<removed>/161" version => "3"}
        ]
        get => [
            "1.3.6.1.4.1.9.9.25.1.1.1.2.5",
            "1.3.6.1.4.1.9.9.25.1.1.1.2.2",
            "1.3.6.1.2.1.1.1.0",
            "1.3.6.1.2.1.1.2.0",
            "1.3.6.1.2.1.1.3.0",
            "1.3.6.1.2.1.1.4.0",
            "1.3.6.1.2.1.1.5.0",
            "1.3.6.1.2.1.1.6.0"
        ]
        tables => [
            {
               name => "ifTable"
               columns => [
                   "1.3.6.1.2.1.2.2.1.2",
                   "1.3.6.1.2.1.2.2.1.3",
                   "1.3.6.1.2.1.2.2.1.4",
                   "1.3.6.1.2.1.2.2.1.5",
                   "1.3.6.1.2.1.2.2.1.6",
                   "1.3.6.1.2.1.2.2.1.7",
                   "1.3.6.1.2.1.2.2.1.8",
                   "1.3.6.1.2.1.2.2.1.9",
                   "1.3.6.1.2.1.2.2.1.10",
                   "1.3.6.1.2.1.2.2.1.11",
                   "1.3.6.1.2.1.2.2.1.13",
                   "1.3.6.1.2.1.2.2.1.14",
                   "1.3.6.1.2.1.2.2.1.15",
                   "1.3.6.1.2.1.2.2.1.16",
                   "1.3.6.1.2.1.2.2.1.17",
                   "1.3.6.1.2.1.2.2.1.18",
                   "1.3.6.1.2.1.31.1.1.1.1",
                   "1.3.6.1.2.1.31.1.1.1.2",
                   "1.3.6.1.2.1.31.1.1.1.3",
                   "1.3.6.1.2.1.31.1.1.1.4",
                   "1.3.6.1.2.1.31.1.1.1.5",
                   "1.3.6.1.2.1.31.1.1.1.6",
                   "1.3.6.1.2.1.31.1.1.1.7",
                   "1.3.6.1.2.1.31.1.1.1.8",
                   "1.3.6.1.2.1.31.1.1.1.9",
                   "1.3.6.1.2.1.31.1.1.1.10",
                   "1.3.6.1.2.1.31.1.1.1.11",
                   "1.3.6.1.2.1.31.1.1.1.12",
                   "1.3.6.1.2.1.31.1.1.1.13",
                   "1.3.6.1.2.1.31.1.1.1.15",
                   "1.3.6.1.2.1.31.1.1.1.16",
                   "1.3.6.1.2.1.31.1.1.1.17",
                   "1.3.6.1.2.1.31.1.1.1.18"
               ]
            },

            {
               name => "entPhysicalTable"
               columns => [
                   "1.3.6.1.2.1.47.1.1.1.1.2",
                   "1.3.6.1.2.1.47.1.1.1.1.3",
                   "1.3.6.1.2.1.47.1.1.1.1.5",
                   "1.3.6.1.2.1.47.1.1.1.1.7",
                   "1.3.6.1.2.1.47.1.1.1.1.8",
                   "1.3.6.1.2.1.47.1.1.1.1.9",
                   "1.3.6.1.2.1.47.1.1.1.1.10",
                   "1.3.6.1.2.1.47.1.1.1.1.11",
                   "1.3.6.1.2.1.47.1.1.1.1.12",
                   "1.3.6.1.2.1.47.1.1.1.1.13",
                   "1.3.6.1.2.1.47.1.1.1.1.14",
                   "1.3.6.1.2.1.99.1.1.1.1",
                   "1.3.6.1.2.1.99.1.1.1.2",
                   "1.3.6.1.2.1.99.1.1.1.3",
                   "1.3.6.1.2.1.99.1.1.1.4",
                   "1.3.6.1.2.1.99.1.1.1.5",
                   "1.3.6.1.2.1.99.1.1.1.6"
               ]
            }
        ]
        interval => 300
        security_name => "<removed>"
        auth_protocol => "<removed>"
        auth_pass => "<removed>"
        priv_protocol => "<removed>"
        priv_pass => "<removed>"
        security_level => "<removed>"
        oid_path_length => 2
        add_field => { "vendor" => "cisco"}
    }
}
filter {
    mutate {
        add_field => {
            "[beat][name]" => "%{[@metadata][host_address]}"
            "[beat][hostname]" => "%{[@metadata][host_address]}"
            "[host][name]" => "%{[@metadata][host_address]}"
        }
        rename => {
            "sysContact.0" => "[system][contact]"
            "sysDescr.0" => "[system][description]"
            "sysObjectID.0" => "[system][oid]"
            "sysUpTime.sysUpTimeInstance" => "sysuptime"
            "sysName.0" => "[system][name]"
            "sysLocation.0" => "[system][location]"
        }
    }
    grok {
        match => { "ciscoImageString.5" => "CW_VERSION\$%{NOTSPACE:system.os.version}\$" }
    }
    grok {
        match => { "ciscoImageString.2" => "CW_IMAGE\$%{NOTSPACE:system.os.name}\$" }
    }
    ruby {
        code => "event.set('sysuptime', event.get('sysuptime').to_i / 100 )"
    }
    mutate {
        rename => {
            "sysuptime" => "[system][uptime]"
        }
        remove_field => [ "ciscoImageString.2", "ciscoImageString.5" ]
    }
    clone {
        clones => ["interface", "entity"]
        add_tag => [ "clone"]
    }
    if "clone" in [tags] {
        if [type] == "interface" {
            mutate {
                remove_field => [ "entPhysicalTable"]
            }
            split {
                field => "ifTable"
            }
            mutate {
                add_field => {
                    "metricset.module" => "system"
                    "metricset.name" => "network"
                }
                rename => {
                    "[ifTable][ifAdminStatus]"      => "[system][network][status][admin]"
                    "[ifTable][ifOperStatus]"       => "[system][network][status][oper]"
                    "[ifTable][ifIndex]"            => "[system][network][index]"
                    "[ifTable][ifLastChange]"       => "[system][network][lastchange]"
                    "[ifTable][ifDescr]"            => "[system][network][description]"
                    "[ifTable][ifMtu]"              => "[system][network][mtu]"
                    "[ifTable][ifPhysAddress]"      => "[system][network][address][physical]"
                    "[ifTable][ifType]"             => "[system][network][type]"
                    "[ifTable][ifName]"             => "[system][network][name]"
                    "[ifTable][ifAlias]"            => "[system][network][alias]"
                }
            }
            if [ifTable][ifHighSpeed] {
                mutate {
                    rename => {
                        "[ifTable][ifHighSpeed]"        => "[system][network][speed]"
                        "[ifTable][ifInDiscards]"       => "[system][network][in][dropped]"
                        "[ifTable][ifInErrors]"         => "[system][network][in][errors]"
                        "[ifTable][ifHCInMulticastPkts]"     => "[system][network][in][nonunicast]"
                        "[ifTable][ifHCInOctets]"       => "[system][network][in][bytes]"
                        "[ifTable][ifHCInUcastPkts]"    => "[system][network][in][unicast]"
                        "[ifTable][ifInUnknownProtos]"  => "[system][network][in][unknown]"
                        "[ifTable][ifOutDiscards]"      => "[system][network][out][dropped]"
                        "[ifTable][ifOutErrors]"        => "[system][network][out][errors]"
                        "[ifTable][ifHCOutMulticastPkts]"    => "[system][network][out][nonunicast]"
                        "[ifTable][ifHCOutOctets]"      => "[system][network][out][bytes]"
                        "[ifTable][ifHCOutUcastPkts]"   => "[system][network][out][unicast]"
                        "[ifTable][ifOutUnknownProtos]" => "[system][network][out][unknown]"
                    }
                }
            } else {
                mutate {
                    rename => {
                        "[ifTable][ifSpeed]"            => "[system][network][speed]"
                        "[ifTable][ifInDiscards]"       => "[system][network][in][dropped]"
                        "[ifTable][ifInErrors]"         => "[system][network][in][errors]"
                        "[ifTable][ifInNUcastPkts]"     => "[system][network][in][nonunicast]"
                        "[ifTable][ifInOctets]"         => "[system][network][in][bytes]"
                        "[ifTable][ifInUcastPkts]"      => "[system][network][in][unicast]"
                        "[ifTable][ifInUnknownProtos]"  => "[system][network][in][unknown]"
                        "[ifTable][ifOutDiscards]"      => "[system][network][out][dropped]"
                        "[ifTable][ifOutErrors]"        => "[system][network][out][errors]"
                        "[ifTable][ifOutNUcastPkts]"    => "[system][network][out][nonunicast]"
                        "[ifTable][ifOutOctets]"        => "[system][network][out][bytes]"
                        "[ifTable][ifOutUcastPkts]"     => "[system][network][out][unicast]"
                        "[ifTable][ifOutUnknownProtos]" => "[system][network][out][unknown]"
                    }
                }
            }
            mutate {
                remove_field => ["[ifTable]"]
            }
            ruby {
                code => "
                    event.set('system.network.in.packets', event.get('[system][network][in][unicast]').to_i + event.get('[system][network][in][nonunicast]').to_i )
                    event.set('system.network.out.packets', event.get('[system][network][out][unicast]').to_i + event.get('[system][network][out][nonunicast]').to_i )
                "
            }
        } else if [type] == "entity" {
            mutate {
                remove_field => [ "ifTable"]
                add_field => {
                    "metricset.module" => "system"
                    "metricset.name" => "entity"
                }
            }
            split {
                field => "entPhysicalTable"
            }
            if [entPhysicalTable][entPhysicalClass] in [3,9] {
                mutate {
                    rename => {
                        "[entPhysicalTable][entPhysicalSerialNum]" => "[system][module][serial]"
                        "[entPhysicalTable][entPhysicalModelName]" => "[system][module][model]"
                        "[entPhysicalTable][entPhysicalClass]" => "[system][module][class]"
                        "[entPhysicalTable][entPhysicalDescr]" => "[system][module][description]"
                        "[entPhysicalTable][entPhysicalHardwareRev]" => "[system][module][hardware][version]"
                        "[entPhysicalTable][entPhysicalFirmwareRev]" => "[system][module][firmware][version]"
                        "[entPhysicalTable][entPhysicalSoftwareRev]" => "[system][module][software][version]"
                    }
                }
            } else {
                drop { }
            }
            mutate {
                remove_field => ["[entPhysicalTable]"]
            }
        } else {
            drop { }
        }
    } else {
        drop { }
    }
    mutate {
        remove_tag => [ "clone" ]
        remove_field => [ "type" ]
    }
}
2 Likes

This topic was automatically closed 28 days after the last reply. New replies are no longer allowed.