Hi,
Here's my sample config.
=========================== Filebeat inputs ===============================
filebeat.inputs:
</- type: log />
#Change to true to enable this input configuration.
enabled: true
#Paths that should be crawled and fetched. Glob based paths.
paths:
</ - /var/log/*.log />
#============================== Filebeat modules ==============================
filebeat.config.modules:
#Glob pattern for configuration loading
path: ${path.config}/modules.d/*.yml
#Set to true to enable config reloading
reload.enabled true
#Period on which files under path should be checked for changes
reload.period: 10s
#-------------------------------- Kafka Output --------------------------------
output.kafka:
#Boolean flag to enable or disable the output module.
enabled: true
#The list of Kafka broker addresses from which to fetch the cluster metadata.
#The cluster metadata contain the actual Kafka brokers events are published
#to.
hosts: ["192.168.1.120:9092"]
#The Kafka topic used for produced events. The setting can be a format string
#using any event field. To set the topic from document type use %{[type]}
.
topic: TutorialTopic
#the Kafka event key setting. Use format string to create a unique event key.
#By default no event key will be generated.
#key: ''
#The Kafka event partitioning strategy. Default hashing strategy is hash
#using the output.kafka.key
setting or randomly distributes events if
#output.kafka.key` is not configured.
#partition.hash:
# If enabled, events will only be published to partitions with reachable
# leaders. Default is false.
#reachable_only: false
#Configure alternative event field names used to compute the hash value.
#If empty `output.kafka.key` setting will be used.
#Default value is empty list.
#hash: []
#Authentication details. Password is required if username is set.
username: 'xxxxx'
password: 'xxxxxxx'
#SASL authentication mechanism used. Can be one of PLAIN, SCRAM-SHA-256 or SCRAM-SHA-512.
#Defaults to PLAIN when username
and password
are configured.
#sasl.mechanism: ''
#Kafka version Filebeat is assumed to run against. Defaults to the "1.0.0".
#version: '1.0.0'
#Configure JSON encoding
#codec.json:
#Pretty-print JSON event
#pretty: false
#Configure escaping HTML symbols in strings.
#escape_html: false
#Metadata update configuration. Metadata contains leader information
#used to decide which broker to use when publishing.
#metadata:
#Max metadata request retry attempts when cluster is in middle of leader
#election. Defaults to 3 retries.
retry.max: 3
# Wait time between retries during leader elections. Default is 250ms.
#retry.backoff: 250ms
#Refresh metadata interval. Defaults to every 10 minutes.
#refresh_frequency: 10m
#Strategy for fetching the topics metadata from the broker. Default is false.
#full: false
#The number of concurrent load-balanced Kafka output workers.
#worker: 1
#The number of times to retry publishing an event after a publishing failure.
#After the specified number of retries, events are typically dropped.
#Some Beats, such as Filebeat, ignore the max_retries setting and retry until
#all events are published. Set max_retries to a value less than 0 to retry
#until all events are published. The default is 3.
#max_retries: 3
#The number of seconds to wait before trying to republish to Kafka
#after a network error. After waiting backoff.init seconds, the Beat
#tries to republish. If the attempt fails, the backoff timer is increased
#exponentially up to backoff.max. After a successful publish, the backoff
#timer is reset. The default is 1s.
#backoff.init: 1s
#The maximum number of seconds to wait before attempting to republish to
#Kafka after a network error. The default is 60s.
#backoff.max: 60s
#The maximum number of events to bulk in a single Kafka request. The default
#is 2048.
#bulk_max_size: 2048
#Duration to wait before sending bulk Kafka request. 0 is no delay. The default
#is 0.
#bulk_flush_frequency: 0s
#The number of seconds to wait for responses from the Kafka brokers before
#timing out. The default is 30s.
#timeout: 30s
#The maximum duration a broker will wait for number of required ACKs. The
#default is 10s.
#broker_timeout: 10s
#The number of messages buffered for each Kafka broker. The default is 256.
#channel_buffer_size: 256
#The keep-alive period for an active network connection. If 0s, keep-alives
#are disabled. The default is 0 seconds.
#keep_alive: 0
#Sets the output compression codec. Must be one of none, snappy and gzip. The
#default is gzip.
#compression: gzip
#Set the compression level. Currently only gzip provides a compression level
#between 0 and 9. The default value is chosen by the compression algorithm.
#compression_level: 4
#The maximum permitted size of JSON-encoded messages. Bigger messages will be
#dropped. The default value is 1000000 (bytes). This value should be equal to
#or less than the broker's message.max.bytes.
#max_message_bytes: 1000000
#The ACK reliability level required from broker. 0=no response, 1=wait for
#local commit, -1=wait for all replicas to commit. The default is 1. Note:
#If set to 0, no ACKs are returned by Kafka. Messages might be lost silently
#on error.
#required_acks: 1
#The configurable ClientID used for logging, debugging, and auditing
#purposes. The default is "beats".
#client_id: beats
#Enables Kerberos FAST authentication in the Kafka output. This may
#conflict with certain Active Directory configurations.
#enable_krb5_fast: false
#Use SSL settings for HTTPS.
#ssl.enabled: true
#List of supported/valid TLS versions. By default all TLS versions from 1.1
#up to 1.3 are enabled.
#ssl.supported_protocols: [TLSv1.1, TLSv1.2, TLSv1.3]
#List of root certificates for HTTPS server verifications
#ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]
#Certificate for SSL client authentication
#ssl.certificate: "/etc/pki/client/cert.pem"
#Client certificate key
#ssl.key: "/etc/pki/client/cert.key"
#Optional passphrase for decrypting the certificate key.
#ssl.key_passphrase: ''
#Configure cipher suites to be used for SSL connections
#ssl.cipher_suites:
#Configure curve types for ECDHE-based cipher suites
#ssl.curve_types:
#Configure what types of renegotiation are supported. Valid options are
#never, once, and freely. Default is never.
#ssl.renegotiation: never
#Configure a pin that can be used to do extra validation of the verified certificate chain,
#this allow you to ensure that a specific certificate is used to validate the chain of trust.
#The pin is a base64 encoded string of the SHA-256 fingerprint.
#ssl.ca_sha256: ""
#Enable Kerberos support. Kerberos is automatically enabled if any Kerberos setting is set.
#kerberos.enabled: true
#Authentication type to use with Kerberos. Available options: keytab, password.
#kerberos.auth_type: password
#Path to the keytab file. It is used when auth_type is set to keytab.
#kerberos.keytab: /etc/security/keytabs/kafka.keytab
#Path to the Kerberos configuration.
#kerberos.config_path: /etc/krb5.conf
#The service name. Service principal name is contructed from
#service_name/hostname@realm.
#kerberos.service_name: kafka
#Name of the Kerberos user.
#kerberos.username: elastic
#Password of the Kerberos user. It is used when auth_type is set to password.
#kerberos.password: changeme
#Kerberos realm.
#kerberos.realm: ELASTIC
#================================= Processors =================================
processors:
- add_host_metadata:
when.not.contains.tags: forwarded
- add_cloud_metadata: ~
- add_docker_metadata: ~
- add_kubernetes_metadata: ~
#================================== Logging ===================================
#Sets log level. The default log level is info.
#Available log levels are: error, warning, info, debug
#logging.level: debug
logging.level: error
#At debug level, you can selectively enable logging only for some components.
#To enable all selectors use [""]. Examples of other selectors are "beat",
#"publisher", "service".
logging.selectors: [""]
#Logging to rotating files. Set logging.to_files to false to disable logging to
#files.
logging.to_files: true
logging.files:
#Configure the path where the logs are written. The default is the logs directory
#under the home path (the binary location).
path: /var/log/filebeat
#The name of the files where the logs are written to.
name: filebeat
#Configure log file size limit. If limit is reached, log file will be
automatically rotated
rotateeverybytes: 10485760 # = 10MB
#Number of rotated log files to keep. Oldest files will be deleted first.
keepfiles: 7