Multiline not working as expected

Hi,

 I have an application which logs for each events. So for each event the application logs multiple lines which are nothing but the steps taken by the application to do its job. So
  • I tried to use multiline filter to bundle all the logs for each event

  • Also I am trying to extract as much as information from it. Like i used grok over these logs to extract lot of info like ip address, vm name and event success status etc. Multiline is done after the grok.

  • Everything was working fine. But the issue i see in Kibana when i view it is, same lines are added multiple times.

Logs:

2019-01-24 05:14:12,943 INFO Recieved messages from Kafka
2019-01-24 05:14:12,981 INFO Offset committed
2019-01-24 05:14:13strong text,152 INFO Support type:
2019-01-24 05:14:13,164 INFO Network type of the project -
2019-01-24 05:14:17,344 INFO Successfully added DNS Entry for IP - Name - in AM
2019-01-24 05:14:17,354 INFO Triggering VIP FIP ESP Handler process with pid
2019-01-24 05:14:17,355 INFO Created pid file
2019-01-24 05:14:17,355 INFO Dispatch Successful: loadbalancer.create.end

But what i see in Kibana:

2019-01-25 09:20:48,280 INFO Recieved messages from Kafka
2019-01-25 09:20:48,318 INFO Offset committed
2019-01-25 09:20:48,453 INFO Support type:
2019-01-25 09:20:48,464 INFO Network type of the project -
2019-01-25 09:20:52,314 INFO Successfully added DNS Entry for IP - Name - in AM
2019-01-25 09:20:52,326 INFO Triggering VIP FIP ESP Handler process with pid 3359
2019-01-25 09:20:52,327 INFO Created pid file
2019-01-25 09:20:52,327 INFO Dispatch Successful: loadbalancer.create.end
2019-01-25 09:20:52,314 INFO Successfully added DNS Entry for IP - Name - in AM
2019-01-25 09:20:52,326 INFO Triggering VIP FIP ESP Handler process with pid
2019-01-25 09:20:52,327 INFO Created pid file
2019-01-25 09:20:52,327 INFO Dispatch Successful: loadbalancer.create.end

So as you can see that same line are repeated again and again.

Why is this happening and what can i do to fix this ?
Another surprising thing is when i run manually using logstash -f file.conf , it works, but in kibana i see them coming like how i described above.

My logstash config: (file.conf)

input {
file {
type => "cmdb_datapump"
path => "/var/log/datapump/svl_stage_consumer.log"
}
}

filter {

    mutate {
            add_field => {
                    "am_overlap" => "false"
                    "esp_overlap" => "false"
                    "error" => "false"
                    "warn" => "false"
                    "am_error" => "false"
                    "esp_error" => "false"
                    "kafka_error" => "false"
                    "kafka_error_temp" => ""
                    "os_error" =>""
            }
    }

    grok {
            patterns_dir => ["/etc/cmdb_datapump/patterns"]
            match => {
                    "message" => "%{GEN}"
            }
    }

   multiline {
           patterns_dir => ["/etc/cmdb_datapump/patterns"]
           pattern => "%{GEN}"
           what => "previous"
           max_age => 15
   }



    if "compute.instance.create.end" in [message] {
            mutate {
                    add_field => {
                            "event_type" => "vm_creation"
                    }
            }
    }
    else if "compute.instance.delete.start" in [message] {
            mutate {
                    add_field => {
                            "event_type" => "vm_deletion"
                    }
            }
    }
    else if "loadbalancer.create.end" in [message] {
            mutate {
                    add_field => {
                            "event_type" => "vip_creation"
                    }
            }
    }
    else if "loadbalancer.delete.end" in [message] {
            mutate {
                    add_field => {
                            "event_type" => "vip_deletion"
                    }
            }
    }
    else if "floatingip.create.end" in [message] {
            mutate {
                    add_field => {
                            "event_type" => "fip_creation"
                    }
            }
    }
    else if "floatingip.delete.end" in [message] {
            mutate {
                    add_field => {
                            "event_type" => "fip_deletion"
                    }
            }
    }
    if "OVERLAP AM ERROR" in [message] {
            mutate {
                    update => {
                            "am_overlap" => "true"
                    }
            }
    }
    if "OVERLAP in ESP" in [message] {
            mutate {
                    update => {
                            "esp_overlap" => "true"
                    }
            }
    }
    if "ERROR AM ERROR" in [message] {
            mutate {
                    update  => {
                            "am_error" => "true"
                    }
            }
    }
    if " ERROR " in [message] {
            mutate {
                    update  => {
                            "error" => "true"
                    }
            }
    }
    if "ESP ERROR" in [message] {
            mutate {
                    update  => {
                            "esp_error" => "true"
                    }
            }
    }
    if " WARN" in [message] {
            mutate {
                    update => {
                            "warn" => "true"
                    }
            }
    }
    if [kafka_error_temp]!="" {
            mutate {
                    update => {
                            "kafka_error" => "true"
                    }
            }
    }
    mutate {
            remove_field => ["kafka_error_temp"]
    }
    if [os_error]!=""{
            mutate {
                    update => {
                            "os_error" => "true"
                    }
            }
    }
    else{
            mutate {
                    remove_field => ["os_error"]
            }
    }

}

output {

if [event_type] in ["vm_creation","vm_deletion","fip_creation","fip_deletion","vip_creation","vip_deletion"] {
    if [dispatch_status] == "Successful"{
        statsd{
            host => "host"
            namespace => "cmdb"
            count => {"cmdb.total" => 1}
            count => {"cmdb.success" => 1}
            count => {"%{event_type}.total" => 1}
            count => {"%{event_type}.success" => 1}
        }
    }
    if [dispatch_status] == "failed"{
        statsd{
            host => "host"
            namespace => "cmdb"
            count => {"cmdb.total" => 1}
            count => {"cmdb.fail" => 1}
            count => {"%{event_type}.total" => 1}
            count => {"%{event_type}.fail" => 1}
        }
    }
    if [am_overlap] =="true"{
        statsd{
            host => "host"
            namespace => "cmdb"
            count => {"%{event_type}.am_overlap" => 1}
        }
    }
    if [esp_overlap] =="true"{
        statsd{
            host => "host"
            namespace => "cmdb"
            count => {"%{event_type}.esp_overlap" => 1}
        }
    }
    if [esp_error] =="true"{
        statsd{
            host => "host"
            namespace => "cmdb"
            count => {"%{event_type}.esp_error" => 1}
            count => {"cmdb.error.esp_error" => 1}
        }
    }
    if [am_error] =="true"{
        statsd{
            host => "host"
            namespace => "cmdb"
            count => {"%{event_type}.am_error" => 1}
            count => {"cmdb.error.am_error" => 1}
        }
    }
}
if [os_error]=="true"{
    statsd{
            host => "host"
        namespace => "cmdb"
        count => {"cmdb.error.os_error" => 1}
    }
}
if [kafka_error]=="true"{
    statsd{
        host => "host"
        namespace => "cmdb"
        count => {"cmdb.error.kafka_error" => 1}
    }
}

}

My pattern file:

DATE [0-9]{4}-[0-9]{2}-[0-9]{2}
TIME [0-9]{2}:[0-9]{2}:[0-9]{2},[0-9]+
LOGSTART %{DATE} %{TIME} %{LOGLEVEL}
DIS_MES %{LOGSTART} Dispatch %{WORD:dispatch_status}%{GREEDYDATA}
DNS_ADD_SUCCESS %{LOGSTART} Successfully added DNS Entry for IP - %{IPV4:ipaddress}%{GREEDYDATA}
MON_COLLECTOR_SUCCESS %{LOGSTART} Successfully added monitoring collector %{NOTSPACE:monitoring_collector}%{GREEDYDATA}
UM_MON_SYS_SUCCESS %{LOGSTART} Successfully added UM Primary monitoring system%{GREEDYDATA}
SL_TEMPLATE_SUCCESS %{LOGSTART} Successfully added SL template for the instance%{GREEDYDATA}
MON_CONFIG_SUCCESS %{LOGSTART} Successfully added monitoring configuration for instance %{GREEDYDATA}
ID_FOUND %{LOGSTART} Found%{DATA}sys id%{GREEDYDATA}
ESP_HOST_ADD %{LOGSTART} Added host %{NOTSPACE:vm_name} successfully to ESP%{GREEDYDATA}
SERIAL_NUM_ADD %{LOGSTART} Serial number added successfully%{GREEDYDATA}
ESP_TABLE_INSERT %{LOGSTART} Inserting into table%{GREEDYDATA}
ESP_TABLE_QUERY %{LOGSTART} Querying table%{GREEDYDATA}
ESP_UNIQ_CHECK %{LOGSTART} Checking for uniqueness of entry%{GREEDYDATA}
CONTACT_FAIL %{LOGSTART} Filling contacts for esp failed.
SUPPORT_TYPE_DETAIL %{LOGSTART} Support type: %{NOTSPACE:support_type}
NET_TYPE %{LOGSTART} Network type of the project - %{NOTSPACE:network_type}
KAFKA_OFFSET_COMMIT %{LOGSTART} Offset committed%{GREEDYDATA}
ERROR1 %{LOGSTART} Dispatcher error: %{GREEDYDATA}
EXCEPTION %{LOGSTART} Traceback %{GREEDYDATA}
WITHOUT_TIMESTAMP ^[^0-9]
DNS_OVERLAP %{LOGSTART} OVERLAP AM ERROR%{GREEDYDATA}
AM_ERROR %{LOGSTART} AM ERROR:%{GREEDYDATA}
DNS_DELETE %{LOGSTART} Removed the DNS [Ee]ntry%{GREEDYDATA}
DNS_PRESENCE_WARN %{LOGSTART} DNS Name - %{NOTSPACE} already do not exist
SERVER_PICK_FAIL %{LOGSTART} Server type pick up failed%{GREEDYDATA}
ESP_CONNECT_FAIL %{LOGSTART} ESP ERROR: Failed to connect to ESP.%{GREEDYDATA}
OS_CONNECT_FAIL %{LOGSTART} Failed to connect to Openstack cloud.%{GREEDYDATA}
DNS_DELETE_FAIL %{LOGSTART} DNS Delete for name - %{NOTSPACE} failed%{GREEDYDATA}
SERIAL_NUMBER_CHECK %{LOGSTART} Checking against ESP serial number table for identifying the instance%{GREEDYDATA}
WRONG_VM %{LOGSTART} This is a wrong VM%{GREEDYDATA}
SERIAL_NUMBER_LOOKUP_FAIL %{LOGSTART} ESP ERROR: Failed to retrieve ESP record from serial number table%{GREEDYDATA}
DNS_ESP_HOST %{LOGSTART} No hosts found with that name%{GREEDYDATA}
DNS_ESP_HOST1 %{LOGSTART} Too many hosts with the same name%{GREEDYDATA}
DNS_ESP_HOST2 %{LOGSTART} Host - %{NOTSPACE} already does not exist in ESP
PRIMARY_USE_MISS %{LOGSTART} Primary Use attribute missing for the VM%{GREEDYDATA}
ESP_ADD_FAIL %{LOGSTART} ESP ERROR: Adding host %{NOTSPACE:vm_name} in ESP Failed%{GREEDYDATA}
SERIAL_NUMBER_ADD_FAIL Serial number add failed in ESP%{GREEDYDATA}
MON_CONFIG_ADD_FAIL %{LOGSTART} Failed to add monitoring configuration for the instance%{GREEDYDATA}
MON_CONFIG_ADD_FAIL1 %{LOGSTART} ESP ERROR: Monitoring config failed%{GREEDYDATA}
ESP_DELETE %{LOGSTART} Host - %{NOTSPACE:vm_name} deleted%{GREEDYDATA}
ESP_DELETE_FAIL %{LOGSTART} ESP ERROR: Failed to delete host in ESP%{GREEDYDATA}
MON_COL_SYS_ID_FAIL %{LOGSTART} Failed to get the sys id of the monitoring collector %{NOTSPACE:monitoring_collector}
MON_COL_FAIL %{LOGSTART} Failed to set monitoring collector%{GREEDYDATA}
UM_MON_FAIL %{LOGSTART} Failed to set UM Primary monitoring system%{GREEDYDATA}
SL_TEMPLATE_FAIL %{LOGSTART} Failed to get the science logic template sys id for template%{GREEDYDATA}
MON_CONFIG_FAIL %{LOGSTART} Failed to set SL template for the instance%{GREEDYDATA}
APP_CLUSTER_ID_MISS %{LOGSTART} CLUSTER ID MISSING for project%{GREEDYDATA}
IT_MAP_FAIL %{LOGSTART} IT Mapping failed%{GREEDYDATA}
IT_MAP_FAIL1 %{LOGSTART} ESP ERROR: IT Mapping failed for host%{GREEDYDATA}
DNS_BACKOUT %{LOGSTART} Triggering dns backout for%{GREEDYDATA}
REMOVE_INVALID_MONGO %{LOGSTART} Deleted %{NOTSPACE} invalid entries from mongo%{GREEDYDATA}
REMOVE_INVALID_MONGO_FAIL %{LOGSTART} Error while deleting invalid mongo entries%{GREEDYDATA}
ID_FIND_FAIL %{LOGSTART} Cannot find sys id%{GREEDYDATA}
PROCESS_HANDLER_EXCEED %{LOGSTART} %{NOTSPACE} Process runtime exceeds the threshold%{GREEDYDATA}
VIP_FIP_ESP_MONGO_ADD %{LOGSTART} Added the failed esp dns add message to mongo with doc id%{GREEDYDATA}
VIP_FIP_FILE_REMOVE %{LOGSTART} %{NOTSPACE} pid file removed%{GREEDYDATA}
PROCESS_KILLED %{LOGSTART} %{NUMBER} Process killed%{GREEDYDATA}
VIP_FIP_FILE_REMOVE_FAIL %{LOGSTART} Exception: Removing the pid file%{GREEDYDATA}
USE_MONGO %{LOGSTART} Use mongo%{GREEDYDATA}
VIP_FIP_TRIGGER %{LOGSTART} Triggering VIP FIP ESP Handler process with pid%{GREEDYDATA}
PID_FILE_CREATE %{LOGSTART} Created pid file%{GREEDYDATA}
KAFKA_OFFSET_COMMIT_FAIL %{LOGSTART} Offset commit error:%{GREEDYDATA}
PID_PROCESS_KILL %{LOGSTART} Looking for processes in pid folder and attempting to kill it if it tries to update esp entry for the deleted ip%{GREEDYDATA}

My pattern file continues:

ESP_TABLE_UPDATE %{LOGSTART} Updating table%{GREEDYDATA}
ESP_TABLE_UPDATE1 %{LOGSTART} Deleted [0-9]+ records from table%{GREEDYDATA}
ESP_OVERLAP %{LOGSTART} OVERLAP in ESP for instance%{GREEDYDATA}
EMAIL_SEND %{LOGSTART} Email sent%{GREEDYDATA}
ESP_MISC %{LOGSTART} Saved new token to%{GREEDYDATA}
ESP_MISC1 %{LOGSTART} Updated cache and saved to file%{GREEDYDATA}
ESP_MISC2 %{LOGSTART} Looking up inheritance for known classes%{GREEDYDATA}
VIP_FIP_ESP_ADD %{LOGSTART} Successfully updated dns entry in esp for %{NOTSPACE}
VIP_FIP_ESP_ADD_FAIL %{LOGSTART} Failed to update dns entry for %{DATA}\.%{GREEDYDATA}
VIP_FIP_ESP_FOUND %{LOGSTART} Found dns entry in esp for %{NOTSPACE:vip_fip_dns}%{GREEDYDATA}
EMPTYLINE ^$
KAFKA_HEARTBEAT_FAIL %{LOGSTART} Heartbeat failed%{GREEDYDATA:kafka_error_temp}
KAFKA_HEARTBEAT_FAIL1 %{LOGSTART}%{GREEDYDATA:kafka_error_temp}Heartbeat session expired%{GREEDYDATA:kafka_error_temp}
KAFKA_CONNECTION_FAIL %{LOGSTART} <BrokerConnection%{DATA:kafka_error_temp}close() called on disconnected connection with error: ConnectionError: Unable to connect%{GREEDYDATA:kafka_error_temp}
KAFKA_CONNECTION_FAIL1 %{LOGSTART} Coordinator unknown during heartbeat%{GREEDYDATA:kafka_error_temp}
KAFKA_CONNECTION_FAIL2 %{LOGSTART} Unable to connect to any of the names%{GREEDYDATA:kafka_error_temp}
KAFKA_CONNECTION_FAIL3 %{LOGSTART} Kafka connection error: NoBrokers%{GREEDYDATA:kafka_error_temp}
KAFKA_CONNECTION_FAIL4 %{LOGSTART} DNS lookup failed for%{DATA:kafka_error_temp}, exception was %{DATA:kafka_error_temp} Name or service not known. Is your advertised.%{DATA:kafka_error_temp}correct and resolvable?
KAFKA_CONNECTION_FAIL5 %{LOGSTART} Unable to bootstrap from%{GREEDYDATA:kafka_error_temp}
KAFKA_CONNECTION_FAIL6 %{LOGSTART} Marking the coordinator dead%{GREEDYDATA:kafka_error_temp}
KAFKA_CONNECTION_FAIL7 %{LOGSTART} Node %{DATA:kafka_error_temp} connection failed -- refreshing metadat%{GREEDYDATA:kafka_error_temp}
KAFKA_CONNECTION_FAIL8 %{LOGSTART}%{GREEDYDATA:kafka_error_temp}RequestTimedOutError: Request timed out after%{GREEDYDATA:kafka_error_temp}
KAFKA_CONNECTION_FAIL9 %{LOGSTART}%{GREEDYDATA:kafka_error_temp}timed out after [0-9]* ms. Closing connection%{GREEDYDATA}
KAFKA_CONNECTION_FAIL10 %{LOGSTART} Error sending OffsetCommitRequest%{GREEDYDATA:kafka_error_temp}
KAFKA_CONNECTION_FAIL11 %{LOGSTART} Commit cannot be completed since the group has already%{GREEDYDATA:kafka_error_temp}
OS_ERROR %{LOGSTART} Failed to connect to Openstack %{GREEDYDATA:os_error}
GEN ((%{DNS_ADD_SUCCESS})|(%{MON_COLLECTOR_SUCCESS})|(%{UM_MON_SYS_SUCCESS})|(%{SL_TEMPLATE_SUCCESS})|(%{MON_CONFIG_SUCCESS})|(%{SUPPORT_TYPE_DETAIL})|(%{NET_TYPE})|(%{CONTACT_FAIL})|(%{ESP_UNIQ_CHECK})|(%{ESP_TABLE_QUERY})|(%{ESP_TABLE_INSERT})|(%{ESP_HOST_ADD})|(%{SERIAL_NUM_ADD})|(%{ID_FOUND})|(%{DIS_MES})|(%{KAFKA_OFFSET_COMMIT})|(%{WITHOUT_TIMESTAMP})|(%{EXCEPTION})|(%{ERROR1})|(%{DNS_OVERLAP})|(%{PID_PROCESS_KILL})|(%{KAFKA_OFFSET_COMMIT_FAIL})|(%{PID_FILE_CREATE})|(%{VIP_FIP_TRIGGER})|(%{USE_MONGO})|(%{VIP_FIP_FILE_REMOVE_FAIL})|(%{PROCESS_KILLED})|(%{VIP_FIP_FILE_REMOVE})|(%{VIP_FIP_ESP_MONGO_ADD})|(%{PROCESS_HANDLER_EXCEED})|(%{ID_FIND_FAIL})|(%{REMOVE_INVALID_MONGO_FAIL})|(%{REMOVE_INVALID_MONGO})|(%{DNS_BACKOUT})|(%{IT_MAP_FAIL})|(%{APP_CLUSTER_ID_MISS})|(%{MON_CONFIG_FAIL})|(%{SL_TEMPLATE_FAIL})|(%{UM_MON_FAIL})|(%{MON_COL_FAIL})|(%{MON_COL_SYS_ID_FAIL})|(%{ESP_DELETE_FAIL})|(%{ESP_DELETE})|(%{MON_CONFIG_ADD_FAIL})|(%{SERIAL_NUMBER_ADD_FAIL})|(%{ESP_ADD_FAIL})|(%{PRIMARY_USE_MISS})|(%{DNS_ESP_HOST2})|(%{DNS_ESP_HOST1})|(%{DNS_ESP_HOST})|(%{SERIAL_NUMBER_LOOKUP_FAIL})|(%{WRONG_VM})|(%{SERIAL_NUMBER_CHECK})|(%{DNS_DELETE_FAIL})|(%{OS_CONNECT_FAIL})|(%{ESP_CONNECT_FAIL})|(%{SERVER_PICK_FAIL})|(%{DNS_PRESENCE_WARN})|(%{DNS_DELETE})|(%{AM_ERROR})|(%{ESP_TABLE_UPDATE})|(%{ESP_TABLE_UPDATE1})|(%{ESP_OVERLAP})|(%{MON_CONFIG_ADD_FAIL1})|(%{IT_MAP_FAIL1})|(%{EMAIL_SEND})|(%{ESP_MISC})|(%{ESP_MISC1})|(%{ESP_MISC2})|(%{VIP_FIP_ESP_ADD})|(%{VIP_FIP_ESP_ADD_FAIL})|(%{VIP_FIP_ESP_FOUND})|(%{EMPTYLINE})|(%{KAFKA_HEARTBEAT_FAIL})|(%{KAFKA_HEARTBEAT_FAIL1})|(%{KAFKA_CONNECTION_FAIL})|(%{KAFKA_CONNECTION_FAIL1})|(%{KAFKA_CONNECTION_FAIL2})|(%{KAFKA_CONNECTION_FAIL3})|(%{KAFKA_CONNECTION_FAIL4})|(%{KAFKA_CONNECTION_FAIL5})|(%{KAFKA_CONNECTION_FAIL6})|(%{KAFKA_CONNECTION_FAIL7})|(%{KAFKA_CONNECTION_FAIL8})|(%{KAFKA_CONNECTION_FAIL9})|(%{KAFKA_CONNECTION_FAIL10})|(%{KAFKA_CONNECTION_FAIL11})|(%{OS_ERROR}))

This topic was automatically closed 28 days after the last reply. New replies are no longer allowed.