ECK Filebeat processor add_kubernetes_metadata does not add fields with kube metadata

Kubernetes: 1.24.3
Kibana: 8.10.2
Elastic: 8.10.2
Filebeat: 8.10.2

Fresh install via ECK 2.9.0

The processor "add_kubernetesmetadata" does not add kubernetes metadata fields to elasticsearch, the filebeat log does not contain error messages.

Beat config

apiVersion: beat.k8s.elastic.co/v1beta1
kind: Beat
metadata:
  name: eck-filebeat
  namespace: eck
spec:
  type: filebeat
  version: 8.10.2
  image: elastic/filebeat:8.10.2
  elasticsearchRef:
    name: eck-elastic
  config:
    filebeat.inputs:
    # Ingress-nginx block
    - type: container
      paths:
        - /var/log/containers/*ingress-nginx*.log
      processors:
      - add_kubernetes_metadata:
          host: ${NODE_NAME}
          matchers:
            - logs_path:
                logs_path: "/var/log/containers/"
      - dissect:
          tokenizer: '%{p.remote_addr} - %{p.remote_user} [%{p.date_time}] "%{p.req_method} %{p.req_uri} %{p.http_ver}" %{p.status} %{p.bytes_sent} "%{p.http_referer}" "%{p.http_user_agent}" %{p.request_length} %{p.request_time} %{p.proxy_upstream_name} [%{proxy_alternative_upstream_name}] %{p.upstream_addr} %{p.upstream_response_length} %{p.upstream_response_time} %{p.upstream_status} %{p.req_id} %{p.host_name} %{p.kube_service} %{p.kube_namespace} %{p.upstream_http_location} %{p.scheme}://%{p.full_req_url}'
          field: "message"
          target_prefix: ""
      - convert:
          fields:
            - {from: "p.request_time", type: "float"}
          ignore_missing: true
          fail_on_error: false
    # Java apps block
    - type: container
      paths:
        - /var/log/containers/*pf-dev*.log
        - /var/log/containers/*mp-dev*.log
        - /var/log/containers/*mp-rc*.log
        - /var/log/containers/*pf-rc*.log
      exclude_files: ['/var/log/containers/*postgres-logger-dev*.log']
      multiline:
        pattern: '^[[:space:]]+(at|\.{3})|^Caused by:|^org\.|^com\.'
        negate: false
        match: after
      processors:
      - add_kubernetes_metadata:
          host: ${NODE_NAME}
          matchers:
          - logs_path:
              logs_path: "/var/log/containers/"
      - decode_json_fields:
          fields: ["message"]
          process_array: false
          max_depth: 1
          target: ""
          overwrite_keys: true
          add_error_key: true
  daemonSet:
    podTemplate:
      spec:
        dnsPolicy: ClusterFirstWithHostNet
        hostNetwork: true
        securityContext:
          runAsUser: 0
        containers:
        - name: filebeat
          volumeMounts:
          - name: varlogcontainers
            mountPath: /var/log/containers
          - name: varlogpods
            mountPath: /var/log/pods
          - name: varlibdockercontainers
            mountPath: /var/lib/docker/containers
          env:
          - name: NODE_NAME
            valueFrom:
              fieldRef:
                fieldPath: spec.nodeName
        volumes:
        - name: varlogcontainers
          hostPath:
            path: /var/log/containers
        - name: varlogpods
          hostPath:
            path: /var/log/pods
        - name: varlibdockercontainers
          hostPath:
            path: /var/lib/docker/containers
        tolerations:
          - key: "type"
            operator: "Equal"
            value: "monitoring"
            effect: "NoSchedule"
          - key: "node-role.kubernetes.io/master"
            operator: "Exists"
            effect: "NoSchedule"

Expanded document from elastic

{
  "_index": ".ds-filebeat-8.10.2-2023.10.03-000001",
  "_id": "yCAF9ooBHIX6wP7VM999",
  "_version": 1,
  "_score": 0,
  "_source": {
    "@timestamp": "2023-10-03T14:50:49.188Z",
    "ecs": {
      "version": "8.0.0"
    },
    "host": {
      "name": "dev-app-worker1"
    },
    "message": "HikariPool-1 - Closing connection org.postgresql.jdbc.PgConnection@742206cf: (connection has passed maxLifetime)",
    "log": {
      "offset": 2242232,
      "file": {
        "path": "/var/log/containers/schedule-service-bd4746dc4-wp55z_mp-rc_schedule-service-app-c3d76cc2cd736d8716a88811b7628bd94881edc297e3c19b54ce8bf05f3c8336.log"
      }
    },
    "input": {
      "type": "container"
    },
    "timestamp": "2023-10-03 17:50:49.188",
    "level": "DEBUG",
    "thread": "HikariPool-1 connection closer",
    "agent": {
      "type": "filebeat",
      "version": "8.10.2",
      "ephemeral_id": "bc8388ad-8337-482c-9429-5f2d4894fd26",
      "id": "9199c504-b67a-41a0-8465-180f006ac977",
      "name": "dev-app-worker1"
    },
    "stream": "stdout",
    "logger": "com.zaxxer.hikari.pool.PoolBase",
    "context": "default"
  },
  "fields": {
    "level": [
      "DEBUG"
    ],
    "logger": [
      "com.zaxxer.hikari.pool.PoolBase"
    ],
    "input.type": [
      "container"
    ],
    "log.offset": [
      2242232
    ],
    "thread": [
      "HikariPool-1 connection closer"
    ],
    "agent.hostname": [
      "dev-app-worker1"
    ],
    "message": [
      "HikariPool-1 - Closing connection org.postgresql.jdbc.PgConnection@742206cf: (connection has passed maxLifetime)"
    ],
    "agent.type": [
      "filebeat"
    ],
    "@timestamp": [
      "2023-10-03T14:50:49.188Z"
    ],
    "agent.id": [
      "9199c504-b67a-41a0-8465-180f006ac977"
    ],
    "ecs.version": [
      "8.0.0"
    ],
    "stream": [
      "stdout"
    ],
    "log.file.path": [
      "/var/log/containers/schedule-service-bd4746dc4-wp55z_mp-rc_schedule-service-app-c3d76cc2cd736d8716a88811b7628bd94881edc297e3c19b54ce8bf05f3c8336.log"
    ],
    "context": [
      "default"
    ],
    "agent.ephemeral_id": [
      "bc8388ad-8337-482c-9429-5f2d4894fd26"
    ],
    "agent.name": [
      "dev-app-worker1"
    ],
    "agent.version": [
      "8.10.2"
    ],
    "host.name": [
      "dev-app-worker1"
    ],
    "timestamp": [
      "2023-10-03 17:50:49.188"
    ]
  }
}

any ideas how to fix this?

am I really facing such a unique problem?) Does anyone have any ideas, please?

The solution has been found! Look at the post

It was necessary to create a service account, configure the correct permissions for it

The final configuration that works for me

apiVersion: beat.k8s.elastic.co/v1beta1
kind: Beat
metadata:
  name: eck-filebeat
  namespace: eck
spec:
  [...]
  config:
    filebeat.inputs:
    # Ingress-nginx block
    - type: container
      paths:
        - /var/log/containers/*ingress-nginx*.log
      processors:
      - add_kubernetes_metadata:
          host: ${NODE_NAME}
          matchers:
          - logs_path:
              logs_path: "/var/log/containers/"
    # Java apps block
    - type: container
      paths:
        - /var/log/containers/*mp-dev*.log
        - /var/log/containers/*mp-rc*.log
      [...]
      processors:
      - add_kubernetes_metadata:
          host: ${NODE_NAME}
          matchers:
          - logs_path:
              logs_path: "/var/log/containers/"
      [...]
  daemonSet:
    podTemplate:
      spec:
        serviceAccount: elastic-beat-filebeat
        automountServiceAccountToken: true
        dnsPolicy: ClusterFirstWithHostNet
        hostNetwork: false
        securityContext:
          runAsUser: 0
        containers:
        - name: filebeat
          volumeMounts:
          - name: varlogcontainers
            mountPath: /var/log/containers
          - name: varlogpods
            mountPath: /var/log/pods
          - name: varlibdockercontainers
            mountPath: /var/lib/docker/containers
          env:
          - name: NODE_NAME
            valueFrom:
              fieldRef:
                fieldPath: spec.nodeName
        volumes:
        - name: varlogcontainers
          hostPath:
            path: /var/log/containers
        - name: varlogpods
          hostPath:
            path: /var/log/pods
        - name: varlibdockercontainers
          hostPath:
            path: /var/lib/docker/containers
        tolerations:
          - key: "type"
            operator: "Equal"
            value: "monitoring"
            effect: "NoSchedule"
          - key: "node-role.kubernetes.io/master"
            operator: "Exists"
            effect: "NoSchedule"
---
apiVersion: v1
kind: ServiceAccount
metadata:
  name: elastic-beat-filebeat
  namespace: eck
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: elastic-beat-autodiscover-binding
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: elastic-beat-autodiscover
subjects:
- kind: ServiceAccount
  name: elastic-beat-filebeat
  namespace: eck
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
  name: elastic-beat-autodiscover
rules:
- apiGroups:
  - ""
  - apps
  - batch
  resources:
  - nodes
  - namespaces
  - events
  - pods
  - replicasets
  - jobs
  verbs:
  - get
  - list
  - watch
1 Like

This topic was automatically closed 28 days after the last reply. New replies are no longer allowed.