Hello,
I hope, I made it as you asked:
{
"filebeat-7.17.5-elasticsearch-server-pipeline" : {
"description" : "Pipeline for parsing elasticsearch server logs",
"processors" : [
{
"set" : {
"value" : "{{_ingest.timestamp}}",
"field" : "event.ingested"
}
},
{
"rename" : {
"field" : "@timestamp",
"target_field" : "event.created"
}
},
{
"grok" : {
"field" : "message",
"patterns" : [
"^%{CHAR:first_char}"
],
"pattern_definitions" : {
"CHAR" : "."
}
}
},
{
"pipeline" : {
"name" : "filebeat-7.17.5-elasticsearch-server-pipeline-plaintext",
"if" : "ctx.first_char != '{'"
}
},
{
"pipeline" : {
"if" : "ctx.first_char == '{'",
"name" : "filebeat-7.17.5-elasticsearch-server-pipeline-json"
}
},
{
"script" : {
"source" : "if (ctx.elasticsearch.server.gc != null && ctx.elasticsearch.server.gc.observation_duration != null) {\n if (ctx.elasticsearch.server.gc.observation_duration.unit == params.seconds_unit) {\n ctx.elasticsearch.server.gc.observation_duration.ms = ctx.elasticsearch.server.gc.observation_duration.time * params.ms_in_one_s;\n }\n if (ctx.elasticsearch.server.gc.observation_duration.unit == params.milliseconds_unit) {\n ctx.elasticsearch.server.gc.observation_duration.ms = ctx.elasticsearch.server.gc.observation_duration.time;\n }\n if (ctx.elasticsearch.server.gc.observation_duration.unit == params.minutes_unit) {\n ctx.elasticsearch.server.gc.observation_duration.ms = ctx.elasticsearch.server.gc.observation_duration.time * params.ms_in_one_m;\n }\n} if (ctx.elasticsearch.server.gc != null && ctx.elasticsearch.server.gc.collection_duration != null) {\n if (ctx.elasticsearch.server.gc.collection_duration.unit == params.seconds_unit) {\n ctx.elasticsearch.server.gc.collection_duration.ms = ctx.elasticsearch.server.gc.collection_duration.time * params.ms_in_one_s;\n }\n if (ctx.elasticsearch.server.gc.collection_duration.unit == params.milliseconds_unit) {\n ctx.elasticsearch.server.gc.collection_duration.ms = ctx.elasticsearch.server.gc.collection_duration.time;\n }\n if (ctx.elasticsearch.server.gc.collection_duration.unit == params.minutes_unit) {\n ctx.elasticsearch.server.gc.collection_duration.ms = ctx.elasticsearch.server.gc.collection_duration.time * params.ms_in_one_m;\n }\n}",
"params" : {
"minutes_unit" : "m",
"seconds_unit" : "s",
"milliseconds_unit" : "ms",
"ms_in_one_s" : 1000,
"ms_in_one_m" : 60000
},
"lang" : "painless"
}
},
{
"set" : {
"field" : "event.kind",
"value" : "event"
}
},
{
"set" : {
"field" : "event.category",
"value" : "database"
}
},
{
"script" : {
"source" : "def errorLevels = ['FATAL', 'ERROR']; if (ctx?.log?.level != null) {\n if (errorLevels.contains(ctx.log.level)) {\n ctx.event.type = 'error';\n } else {\n ctx.event.type = 'info';\n }\n}",
"lang" : "painless"
}
},
{
"set" : {
"ignore_empty_value" : true,
"field" : "host.name",
"value" : "{{elasticsearch.node.name}}"
}
},
{
"set" : {
"field" : "host.id",
"value" : "{{elasticsearch.node.id}}",
"ignore_empty_value" : true
}
},
{
"remove" : {
"field" : [
"elasticsearch.server.gc.collection_duration.time",
"elasticsearch.server.gc.collection_duration.unit",
"elasticsearch.server.gc.observation_duration.time",
"elasticsearch.server.gc.observation_duration.unit"
],
"ignore_missing" : true
}
},
{
"remove" : {
"ignore_missing" : true,
"field" : [
"elasticsearch.server.timestamp",
"elasticsearch.server.@timestamp"
]
}
},
{
"remove" : {
"field" : [
"first_char"
]
}
}
],
"on_failure" : [
{
"set" : {
"field" : "error.message",
"value" : "{{ _ingest.on_failure_message }}"
}
}
]
}
}
Never happened - I'm sorry for misunderstanding.
For installation I used ES repo 7.x and than apt-get - this way it's easier to use Ansible scripts.
This is my ES output (I always test everything with ES output before I start to use Logstash):
# ---------------------------- Elasticsearch Output ----------------------------
output.elasticsearch:
# Array of hosts to connect to.
hosts: ["my_ip_address:10000"]
# Protocol - either `http` (default) or `https`.
protocol: "http"
# Authentication credentials - either API key or username/password.
#api_key: "id:api_key"
#username: "elastic"
#password: "changeme"
Kibana settings:
# =================================== Kibana ===================================
# Starting with Beats version 6.0.0, the dashboards are loaded via the Kibana API.
# This requires a Kibana endpoint configuration.
setup.kibana:
# Kibana Host
# Scheme and port can be left out and will be set to the default (http and 5601)
# In case you specify and additional path, the scheme is required: http://localhost:5601/path
# IPv6 addresses should always be defined as: https://[2001:db8::1]:5601
host: "my_ip_address:8801"
# Kibana Space ID
# ID of the Kibana Space into which the dashboards should be loaded. By default,
# the Default Space will be used.
#space.id:
Logging section:
# ================================== Logging ===================================
# Sets log level. The default log level is info.
# Available log levels are: error, warning, info, debug
logging.level: info
# At debug level, you can selectively enable logging only for some components.
# To enable all selectors use ["*"]. Examples of other selectors are "beat",
# "publisher", "service".
logging.selectors: ["*"]
Chris