In my config file, I have a bunch of fields and convert them to integer
, as shown below
input {
redis {
host => "${REDIS_0_HOST}"
port => "${REDIS_0_PORT}"
data_type => "list"
key => "logstash"
}
}
input {
redis {
host => "${REDIS_1_HOST}"
port => "${REDIS_1_PORT}"
data_type => "list"
key => "logstash"
}
}
filter {
# if we were successful parsing a message from the raw log, let's dive deeper into the message and assign more fields
if [message] {
# catch gelatin lib output on startup in containers and drop them
if "20500017" in [message] { drop { } }
if "2050001c" in [message] { drop { } }
# remove trailing whitespace from message field
mutate {
strip => ["message"]
}
# handle message repeated X times messages
grok {
match => ["message", "message repeated %{NUMBER:repeat_count} times: \[ %{GREEDYDATA:message}\]"]
overwrite => [ "message" ]
tag_on_failure => [ ]
}
# handle message fields that already have structured json content
if [program] == "austin-perf" {
json {
source => "message"
remove_field => ["message"]
}
} else {
grok {
break_on_match => true
patterns_dir => ["/usr/share/logstash/config/patterns"]
match => [
"message", "%{OBLOG_REVIVE_DATE}",
"message", "%{OBLOG_REVIVE}",
"message", "%{OBLOG_DATE}",
"message", "%{OBLOG}",
"message", "%{WORD}, \[%{TIMESTAMP_ISO8601} #%{NUMBER}\] ?%{WORD:level} -- : %{GREEDYDATA:kvpairs}", # ruby app logs
"message", "%{USERNAME:level}: ?%{URIPATH:file} %{NUMBER:line_num} %{GREEDYDATA:kvpairs}",
"message", "%{USERNAME:level}: ?%{GREEDYDATA:kvpairs}",
"message", "%{URIPATH:file}:%{POSINT:line_num}" #ruby app exceptions
]
}
if "\." not in [kvpairs] {
kv {
source => "kvpairs"
include_keys => [
"pulse_git_events",
"pulse_trending_count",
"pulse_news_count",
"kafka_records",
"repeat_count",
"used_memory",
"new_kafka_articles",
"wcs_training_time",
"rokerbot_event",
"health_check",
"rokerbot_bot_utterance",
"rokerbot_user_utterance",
"Date_Conn_Time",
"Date_Query_Time",
"Date_Parse_Time",
"News_Conn_Time",
"News_Query_Time",
"NEWS_FAIL_TIME",
"writing_image",
"timed_app",
"ran_for",
"app_name",
"klocker_app_name",
"memory_used",
"cpu_usage",
"rss_mem",
"vms_mem",
"shared_mem",
"uss_mem",
"pss_mem",
"text_mem",
"data_mem",
"total_gpu_mem",
"used_gpu_mem",
"free_gpu_mem"
]
}
}
prune {
blacklist_names => ["%{URI}"]
}
}
if [file] and [line_num] {
mutate {
add_field => {
"test_unique" => "%{file}:%{line_num}"
}
}
}
}
mutate {
convert => {
"pulse_git_events" => "integer"
"pulse_trending_count" => "integer"
"pulse_news_count" => "integer"
"kafka_records" => "integer"
"repeat_count" => "integer"
"used_memory" => "integer"
"new_kafka_articles" => "integer"
"wcs_training_time" => "integer"
"ran_for" => "integer"
"Date_Conn_Time" => "integer"
"Date_Query_Time" => "integer"
"Date_Parse_Time" => "integer"
"News_Conn_Time" => "integer"
"News_Query_Time" => "integer"
"NEWS_FAIL_TIME" => "integer"
"memory_used" => "integer"
"cpu_usage" => "float"
"rss_mem" => "integer"
"vms_mem" => "integer"
"shared_mem" => "integer"
"uss_mem" => "integer"
"pss_mem" => "integer"
"text_mem" => "integer"
"data_mem" => "integer"
"total_gpu_mem" => "integer"
"used_gpu_mem" => "integer"
"free_gpu_mem" => "integer"
}
lowercase => "level"
remove_field => [ "timestamp", "kvpairs", "type", "_type" ]
add_field => {
"time" => "%{+HHmmssZ}"
"weekday" => "%{+EEE}"
}
}
}
output {
elasticsearch {
hosts => ["${ES_DATA_0}","${ES_DATA_1}"]
index => "logstash-%{+YYYY.MM.dd}"
template => "/usr/share/logstash/config/logstash_index.template.json"
template_name => "logstash-*"
}
}
and my template is as shown
{
"template": "logstash-*",
"order": 1,
"settings": {
"number_of_shards": 2,
"number_of_replicas": 1
},
"mappings":
{
"doc": {
"properties":
{
"time": {
"type": "date",
"format": "basic_time_no_millis"
},
"before": {
"type": "date",
"format": "strict_date_time"
},
"after": {
"type": "date",
"format": "strict_date_time"
},
"logsource": {
"type": "ip"
}
}
}
}
}
however when performing a curl -X GET "{ES_MASTER}:9200/logstash-2019.09.04/_mappings"?pretty
to check the mappings of the index, you can see that the conversions in the config file and the field mappings in the template file are not applied, as the intended integer
types are of type long
and the date
types are of type text