Hi Stephen
filebeat.yml: (comments shoretened to avoid max 7000)
</>### Filebeat Configuration Example ###
</># This file is an example cghlighting only the most common
</># options. The filebeat.reference.yml file from the same directory contains all the
</># supported option can use it as a reference.
</># You can finduration reference here:
</># https://wwwat/index.html
</># For more available modules and options, please see the filebeat.reference.yml sample
</># configuration file.
</>#========= Filebeat inputs ===========
filebeat.inputs:
</># Each - is e set at the input level, so
</># you can ous configurations.
</># Below ar configurations.
-
type: log
</># Change to true to enable this input configuration.
</># enabled: true
</># Paths that should be crawled and fetched. Glob based paths.
paths:
- /var/log/*.log
#- c:\programdata\elasticsearch\logs*
</># Exclude lines. A list of regular expressions to match. It drops the lines that are
</># matching any regular expression from the list.
#exclude_lines: ['^DBG']
</># Include lines. A list of regular expressions to match. It exports the lines that are
</># matching any regular expression from the list.
#include_lines: ['^ERR', '^WARN']
</># Exclude files. A list of regular expressions to match. Filebeat drops the files that
</># are matching any regular expression from the list. By default, no files are dropped.
#exclude_files: ['.gz$']
</># Optional additional fields. These fields can be freely picked
</># to add additional information to the crawled log files for filtering
#fields:
</># level: debug
</># review: 1
</># Multiline options
</># Multiline can be used for log messages spanning multiple lines. This is common
</># for Java Stack Traces or C-Line Continuation
</># The regexp Pattern that has to be matched. The example pattern matches all lines starting with [
#multiline.pattern: ^[
</># Defines if the pattern set under pattern should be negated or not. Default is false.
#multiline.negate: false
</># Match can be se append to a pattern
</># that was (not) m matched based on negate.
</># Note: After is thstash
#multiline.match: after
</>#=========== Filebeat modules ===========
filebeat.config.modules:
</># Glob pattern for configuration loading
path: ${path.config}/modules.d/*.yml
</># Set to true to enable config reloading
reload.enabled: false
</># Period on which files under path should be checked for changes
#reload.period: 10s
</>#======== Elasticsearch template setting ==========
setup.template.settings:
index.number_of_shards: 1
#index.codec: best_compression
#_source.enabled: false
</>#============ General =============
</># The name of the shipcan be used to group
</># all the transactions sface.
#name:
</># The tags of the shipper are i own field with each
</># transaction published.
#tags: ["service-X", "web-tier"]
</># Optional fields that you cmation to the
</># output.
#fields:
</># env: staging
</>#========== Dashboards =============
</># Th se sample dashex. Loading
</># the dashboardsng
</># options here or by up` command.
</># setup.dashboards.enabled: true
</># The URL from whey default this URL
</># has a value which name and version. For released
</># versions, this URL on the artifacts.elastic.co
</># website.
</># setup.dashboards.url: http://192.168.1.82:5601
</>#========== Kibana =============
</># Starting with Beats version 6.0.0, the dashboards are loaded via the Kibana API.
</># This requires a Kibana endpoint configuration.
setup.kibana:
</># Kibana Host
</># Scheme and portset to the default (http and 5601)
</># In case you specie scheme is required: http://localhost:5601/path
</># IPv6 addresses s::1]:5601
host: "192.168.1.82:5601"
</># Kibana Space ID
</># ID of theaded. By default,
</># the Default Space will be used.
#space.id:
</>#=========== Elastic Cloud ============
</># These settings simplify using Filebeat with the Elastic Cloud (https://cloud.elastic.co/).
</># The cloud.id setting overwrites the output.elasticsearch.hosts
and
</># setup.kibana.host
options.
</># You can find the cloud.id
in the Elastic Cloud web UI.
#cloud.id:
</># The cloud.auth settioutput.elasticsearch.usernameand </>#
output.elasticsearche format is <user>:<pass>
.
#cloud.auth:
</>#============ Outputs =============
</># Configure what outected by the beat.
#-------------------------- Elasticsearch output ------------------------------
output.elasticsearch:
</># Array of hosts to connect to.
hosts: ["192.168.1.82:9200"]
</># Optional protocol and basic auth credentials.
#protocol: "https"
#username: "elastic"
#password: "changeme"
#----------------------------- Logstash output --------------------------------
#output.logstash:
</># The Logstash hosts
#hosts: ["localhost:5044"]
</># Optional SSL. By default is off.
</># List of root certificates for HTTPS server verifications
#ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]
</># Certificate for SSL client authentication
#ssl.certificate: "/etc/pki/client/cert.pem"
</># Client Certificate Key
#ssl.key: "/etc/pki/client/cert.key"
</>#============ Processors =============
</># Configure processors to enhance or manipulate events generated by the beat.
processors:
- add_host_metadata: ~
- add_cloud_metadata: ~
</>#============ Logging =============
</># Sets log level. The default log level is info.
</># Available log levels are: error, warning, info, debug
#logging.level: debug
</># At debug level, you cnly for some components.
</># To enable all selectors use [""]. Examples of other selectors are "beat",
</># "publish", "service".
#logging.selectors: [""]
</>#========== X-Pack Monitoring ===========
</># filebeentral Elasticsearch monitoring
</># clusteElasticsearch. The
</># reporting is disabled by default.
</># Set to true to enable the monitoring reporter.
#monitoring.enabled: false
</># Sets the UUID ofitoring data for this
</># Filebeat instanceasticsearch
</># is enabled, the UUID is derput.elasticsearch.
#monitoring.cluster_uuid:
</># Uncommench. Most settings from the
</># Elasticsearced here as well.
</># Note that ta* cluster.
</># Any settinglm the Elasticsearch
</># output conlt configured such
</># that it is pog cluster, you can simply
</># uncomment the following line.
#monitoring.elasticsearch:
</>#=========== Migration ============
</># This allows to enable 6.7 migration aliases
#migration.6_to_7.enabled: true
</>#=========== Added from Reference by Herman =========
</># If enabled,s that have changed
</># in the last m the value at
</># the beginntotal values for
</># all non-zerdown. The default is true.
logging.metrics.enabled: true
</># The period after which to log the internal metrics. The default is 30s.
logging.metrics.period: 300s