Dates dates dates

Thanks in advance sorry it's a bit long. Trying not to be frustrated here, but the handling of dates is not behaving as documented.
Below is a pretty complicated conf file, able to handle multiple .csv formats
I have 2 date strings inside the .csv files, both failed to register as "date" fields and were converted to strings.
I need one of them to be dates.

Either "run_date" formatted as yyyyMMDDHH (example input: 2017100615)
or "datetime" formatted as yyyy-MM-DD HH:mm:ss (example input 2017-10-06 15:31:06)

Below I tried to use the date plugin. If I try it on the datetime field the field becomes messed up or gets another field's value. How is it possible? I don't know.

I tried different targets for the "date" plugin. Nothing works.
Tried putting the date plugin in different location. No change.
Spent hours already trying to resolve this, no luck.

Example call to date plugin:

date {
target => [ "datetime" ]
match => [ "datetime", "yyyyMMdd HH:mm:ss" ]
}

Here is most of the conf file for logstash:

input {
file {
path => [ "/some_path/*.csv" ]
start_position => beginning
sincedb_path => "/dev/null"
}
}

filter {
grok {
patterns_dir => ["patterns"]
match => ["path", "%{HOURLYDAILY:timeframe}%{YEAR:year}%{MONTHNUM:month}%{MONTHDAY:day}%{HOUR:hour}_%{VMPM:vm_or_pm}.csv$"]
add_field => [ "received_from", "%{host}" ]
add_tag => [ "%{timeframe}", "%{vm_or_pm}" ]
}
if [vm_or_pm] == "PM" {
csv {
separator => " "
skip_empty_columns => false
columns => ["run_date",
"location_continent",
"location_state_country",
"location_city",
"datacenter",
"cluster",
"datetime",
"uuid",
"host",
"primary_key",
"num_cpus",
"num_sockets", "cpu_avg_util", "cpu_max_util", "cpu_avg_ghz", "cpu_max_ghz", "cpu_cap_ghz",
"memory_avg_util", "memory_max_util", "memory_avg_gb", "memory_max_gb", "memory_cap_gb", "memory_baloon_avg_util", "memory_baloon_max_util", "memory_baloon_cap_gb",
"swapping_avg_util", "swapping_max_util", "swapping_cap_gb", "network_avg_util","network_max_util","network_cap_gb","io_avg_util","io_max_util","io_cap_gb",
"avg_q1vcpu_util","max_q1vcpu_util","avg_q2vcpu_util","max_q2vcpu_util","avg_q4vcpu_util","max_q4vcpu_util","avg_q8vcpu_util","max_q4vcpu_util","avg_q16vcpu_util","max_q16vcpu_util"]
} # end of PM csv clause
} # end if PM
else if [vm_or_pm] == "VM" {
csv {
#source => "message"
separator => " "
skip_empty_columns => false
#quote_char => """
columns => ["run_date", "location_continent", "location_state_country", "location_city", "datacenter", "cluster", "primary_key", "uuid", "vmname", "group", "datetime", "host", "num_vcpus", "cpu_avg_util", "cpu_avg_ghz", "cpu_max_util", "cpu_max_ghz", "cpu_cap_ghz",
"memory_avg_util", "memory_avg_gb", "memory_max_util", "memory_max_gb", "memory_cap_gb", "storage_util_avg",
"storage_util_max", "storage_cap_gb", "vdrive", "storage_used_gb", "iops_avg", "iops_max", "iops_cap",
"io_thruput_avg", "io_thruput_max", "io_thruput_cap_gb", "memory_baloon_avg_util", "memory_baloon_max_util", "memory_baloon_cap_gb", "network_avg", "network_max", "network_cap_gb", "storage_latency_avg", "storage_latency_max", "storage_latency_cap"]
} # end of VM csv clause
} # end if VM
else {
csv {
remove_tag => [ "%{timeframe}" ]
separator => " "
skip_empty_columns => false
columns => ["datacenter","cluster","vm_uuid","vmname","primary_key","DeployedState","datetime","host","numvcpus","cpu_cap_ghz","memory_cap_gb","storage_cap_gb"]
} # end of VM csv clause
} # and of else - vmremlist/vmaddlist
date {
target => [ "datetime" ]
match => [ "datetime", "yyyyMMdd HH:mm:ss" ]
}
mutate {
remove_field => ["month", "day", "year", "hour"]
}
mutate {

  	# mutual PM and VM fields
      convert => ["cpu_avg_ghz","float"]
  convert => ["cpu_cap_ghz","float"]
    	convert => ["cpu_max_ghz","float"]
  convert => ["memory_avg_gb","float"]        	
  convert => ["memory_avg_util","float"] 
  convert => ["memory_max_util","float"]		
    	convert => ["network_cap_gb","float"]
  convert => ["memory_max_gb","integer"]
  convert => ["memory_cap_gb","integer"]
    	convert => ["memory_baloon_avg_util","float"]
    	convert => ["memory_baloon_max_util","float"]
    	convert => ["memory_baloon_cap_gb","float"]
    	
    	# addremvm/addrempm fields
    	convert => ["numvcpus","integer"]
    	
  	# PM hourly fields
  convert => ["num_cpus","integer"]
  convert => ["num_sockets","integer"]
	        convert => ["cpu_avg_util","float"]
      convert => ["cpu_max_util","float"]
  convert => ["swapping_avg_util","float"]
  convert => ["swapping_max_util","float"]
  convert => ["swapping_cap_gb","float"]
  convert => ["network_avg_util","float"]
  convert => ["network_max_util","float"]
  convert => ["io_avg_util","float"]
  convert => ["io_max_util","float"]
  convert => ["io_cap_gb","float"]
  convert => ["avg_q1vcpu_util","float"]
  convert => ["max_q1vcpu_util","float"]
  convert => ["avg_q2vcpu_util","float"]
  convert => ["max_q2vcpu_util","float"]
  convert => ["avg_q4vcpu_util","float"]
  convert => ["max_q4vcpu_util","float"]
  convert => ["avg_q8vcpu_util","float"]
  convert => ["max_q8vcpu_util","float"]
  convert => ["avg_q16vcpu_util","float"]
  convert => ["max_q16vcpu_util","float"]  

VM hourly fields

    	convert => ["num_vcpus","integer"]
    	convert => ["storage_util_avg","float"]
    	convert => ["storage_util_max","float"]
    	convert => ["storage_cap_gb","integer"]
    	convert => ["storage_used_gb","float"]
    	convert => ["iops_avg","float"]
    	convert => ["iops_max","float"]
    	convert => ["iops_cap","float"]
    	convert => ["io_thruput_avg","float"]
    	convert => ["io_thruput_max","float"]
    	convert => ["io_thruput_cap_gb","float"]
    	convert => ["network_avg","float"]
    	convert => ["network_max","integer"]
    	convert => ["storage_latency_avg","integer"]
    	convert => ["storage_latency_max","integer"]
    	convert => ["storage_latency_cap","integer"]

} # end mutate
} # end filter

output {
elasticsearch {
hosts => "localhost"
index => "tokiomarine"
document_type => "%{vm_or_pm}_%{timeframe}_report"
}
stdout { }
}

Without seeing the data it's difficult to answer this.

I rather use something like

date {
    match        => [ "datetime", "yyyyMMdd HH:mm:ss" ]
    target       => [ "newdatetime" ]
    remove_field => [ "datetime" ]
}

to convert the content from the field datetimeinto the field newdatetime`, but after a successful conversion the old field is deleted.

This way we easily can check if the conversion happened. (And also by looking at the field tags).

@dorj1234 Can you please properly format your code as code? Would be much easier to read with proper indentation.

Thank you ! eventually that worked after I removed and recreated the index. Something may be buggy there but at least there is an override.

Thanks again for reading and helping.
JD

This topic was automatically closed 28 days after the last reply. New replies are no longer allowed.