Hello,
we are in a progress of migration from input: container to filestream.
Filebeat is configured as daemonset on kubernetes and each filebeat pod collects logs from all applications deployed on corresponding kubernetes node. When using input: container each filebeat pod consumes ~100 MB RAM. After switch to filestream RAM usage grows up to 3GB RAM per pod. Is it normal behaviour? I post our config:
filebeat.inputs:
- type: filestream
id: "filebeat-${NODE_NAME}"
prospector.scanner.symlinks: true
format: cri
paths:
- /var/log/containers/*.log
enabled: true
parsers:
- container: ~
fields:
logstashSource: "filebeat-k8s--console-json"
processors:
- add_kubernetes_metadata:
host: ${NODE_NAME}
in_cluster: true
default_matchers.enabled: false
matchers:
- logs_path:
logs_path: /var/log/containers/
- drop_event:
when:
not.equals.kubernetes.labels.logging-type: "console-json"
- decode_json_fields:
fields: ["message"]
process_array: true
max_depth: 1
target: "logData"
- rename:
when:
not.has_fields: ['logData.appName']
fields:
- from: "kubernetes.labels.app"
to: "logData.appName"
- rename:
when:
not.has_fields: ['logData.message']
fields:
- from: "message"
to: "logData.message"
- rename:
fields:
- from: "kubernetes.node.name"
to: "logData.node"
- rename:
fields:
- from: "fields.logstashSource"
to: "logData.logstashSource"
- drop_fields:
fields: ["stream", "message", "prospector", "offset", "input", "source", "kubernetes", "fields", "log"]