Hello Noemi,
Please find all the config files:
1) filebeat.yml
###################### Filebeat Configuration Example #########################
#This file is an example configuration file highlighting only the most commo
#=========================== Filebeat prospectors =============================
filebeat.prospectors:
#Each - is a prospector. Most options can be set at the prospector level, so
#you can use different prospectors for various configurations.
#Below are the prospector specific configurations.
-
type: log
#Change to true to enable this prospector configuration.
enabled: true
#Paths that should be crawled and fetched. Glob based paths.
paths:
#- /var/log/syslog
#- /var/log/auth.log
- /var/apps/mobilock-emm-staging/shared/log/staging.log
exclude_lines: ['^.nrpe.$']
document_type: syslog
#- c:\programdata\elasticsearch\logs*
#Exclude lines. A list of regular expressions to match. It drops the lines that are
#matching any regular expression from the list.
#exclude_lines: ['^DBG']
#Include lines. A list of regular expressions to match. It exports the lines that are
#matching any regular expression from the list.
#include_lines: ['^ERR', '^WARN']
#Exclude files. A list of regular expressions to match. Filebeat drops the files that
#are matching any regular expression from the list. By default, no files are dropped.
#exclude_files: ['.gz$']
#Optional additional fields. These fields can be freely picked
#to add additional information to the crawled log files for filtering
#fields:
#level: debug
#review: 1
###Multiline options
#Mutiline can be used for log messages spanning multiple lines. This is common
#for Java Stack Traces or C-Line Continuation
#The regexp Pattern that has to be matched. The example pattern matches all lines starting with [
#multiline.pattern: ^[
#Defines if the pattern set under pattern should be negated or not. Default is false.
#multiline.negate: false
#Match can be set to "after" or "before". It is used to define if lines should be append to a pattern
#that was (not) matched before or after or as long as a pattern is not matched based on negate.
#Note: After is the equivalent to previous and before is the equivalent to to next in Logstash
#multiline.match: after
multiline.pattern: '[A-Z]{1}, [[0-9]{4}-[0-9]{2}-[0-9]{2}'
multiline.negate: true
multiline.match: after
#============================= Filebeat modules ===============================
filebeat.config.modules:
#Glob pattern for configuration loading
path: ${path.config}/modules.d/*.yml
#Set to true to enable config reloading
reload.enabled: true
#Period on which files under path should be checked for changes
#reload.period: 10s
#==================== Elasticsearch template setting ==========================
setup.template.settings:
index.number_of_shards: 3
#index.codec: best_compression
#_source.enabled: false
#================================ General =====================================
#The name of the shipper that publishes the network data. It can be used to group
#all the transactions sent by a single shipper in the web interface.
#name:
#The tags of the shipper are included in their own field with each
#transaction published.
#tags: ["service-X", "web-tier"]
#Optional fields that you can specify to add additional information to the
#output.
#fields:
#env: staging
#============================== Dashboards =====================================
#These settings control loading the sample dashboards to the Kibana index. Loading
#the dashboards is disabled by default and can be enabled either by setting the
#options here, or by using the -setup
CLI flag or the setup
command.
setup.dashboards.enabled: false
#The URL from where to download the dashboards archive. By default this URL
#has a value which is computed based on the Beat name and version. For released
#versions, this URL points to the dashboard archive on the artifacts.elastic.co
#website.
#setup.dashboards.url:
#============================== Kibana =====================================
#Starting with Beats version 6.0.0, the dashboards are loaded via the Kibana API.
#This requires a Kibana endpoint configuration.
setup.kibana:
host: "xyz:5601"
#----------------------------- Logstash output --------------------------------
output.logstash:
#The Logstash hosts
hosts: ["xyz:5044"]
bulk_max_size: 1024
#Optional SSL. By default is off.
#List of root certificates for HTTPS server verifications
#ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]
ssl.certificate_authorities: ["/etc/pki/tls/ca.crt"]
ssl.certificate: "/etc/pki/tls/client.crt"
ssl.key: "/etc/pki/tls/client.key"
ssl.key_passphrase: "ky9D=h=w2z2uUCjRqqWF"
2. Logstash input,filter and output file:
input {
beats {
port => 5044
ssl => true
ssl_certificate_authorities => ["/etc/pki/tls/ca.crt"]
ssl_certificate => "/etc/pki/tls/server.crt"
ssl_key => "/etc/pki/tls/server.key"
ssl_verify_mode => "peer"
tls_min_version => "1.2"
}
}
filter {
grok {
match => { "message" => [ "%{SYSLOGTIMESTAMP:syslog_timestamp} %{SYSLOGHOST:syslog_hostname} %{DATA:syslog_program}(?:[%{POSINT:syslog_pid}])?: %{GREEDYDATA:syslog_message}", "\I,\s[(?[\d-\w:.]+)\s#(?\d+)]\s+(?\w+)\s-+\s:\s[(?[\d\w-]+)]\s(?[\w\s]+)\s"(?[\w/.]+)"\s(?.)", "\I,\s[(?[\d-\w:.]+)\s#(?[\d]+)]\s\s(?[\w]+)\s--\s:\s[(?[\d-\w]+)]\s(?:[cC]urrent\s)?[dD]evice[\s:]+(?[\w\s:]+)", "\I,\s[(?[\d-\w:.]+)\s#(?\d+)]\s+(?\w+)\s-+\s:\s[(?[\d\w-]+)]\s(?.)", "\w,\s[(?[\w-:.]+)\s#(?\d+)]\s+(?\w+)\s(?.*)" ] }
add_field => [ "received_at", "%{@timestamp}" ] add_field => [ "received_from", "%{host}" ]
}
}
output {
elasticsearch {
hosts => ["xyz:9200"]
sniffing => true
manage_template => false
index => "%{[@metadata][beat]}-%{[@metadata][version]}-%{+YYYY.MM.dd}"
document_type => "%{[@metadata][type]}"
}
}
3. List of enabled and disabled module at filebeat:-
root@xyz:~# filebeat modules list
Enabled:
system
Disabled:
apache2
auditd
elasticsearch
icinga
iis
kafka
kibana
logstash
mongodb
mysql
nginx
osquery
postgresql
redis
traefik
Please find the requested config files.
Note:- I have used xyz instead of IP, DNS name for security point of view.This text will be hidden