Some kubernetes metadata fields aren't populated in Filebeat 7.9.0

In Filebeat 7.6.2 or higher (I'm using 7.9.0) the field kubernetes.deployment.name isn't populated, even using add_kubernetes_metadata + autodiscover.

I'm getting only the higher level kubernetes.replicaset.name.

There isn't any error in log.

My Config:

filebeat.inputs:

- type: google-pubsub
  project_id: xxxxxx
  topic: xxxxx
  subscription.name: x
  subscription.create: false
  credentials_file: /usr/share/filebeat/pub-sub-key/pkey.json
  processors:
    - decode_json_fields:
        process_array: true
        max_depth: 20
        target: ""
        overwrite_keys: true
        fields: ["message"]
    - drop_fields:
        fields: ["message"]

filebeat.autodiscover:
  providers:
    - type: kubernetes
      labels.dedot: true
      annotations.dedot: true
      # include_labels: '*'
      # cleanup_timeout: 0
      scope: node
      templates:
        - condition:
            and:
              - has_fields: ['kubernetes.container.id']
              - equals:
                  kubernetes.labels.elastic_logs/json: "true"
              # - regexp:
              #     kubernetes.container.name: "xxx.*|xxxxx.*"
          config:
            - type: container
              stream: stdout
              paths:
                # - "/var/lib/docker/containers/${data.kubernetes.container.id}/*.log"
                - /var/log/containers/*-${data.kubernetes.container.id}.log
              encoding: utf-8
              symlinks: true
              scan_frequency: 1s
              publisher_pipeline.disable_host: true
              # multiline.pattern: '^[[:space:]]+(\bat\b|\.{3})|^Caused by:'
              # multiline.negate: false
              # multiline.match: after
              processors:
                - decode_json_fields:
                    process_array: true
                    max_depth: 10
                    target: ""
                    overwrite_keys: true
                    fields: ["message"]
                # - add_cloud_metadata:
                # - add_docker_metadata:
                #     labels.dedot: true
                - add_kubernetes_metadata:
                    default_indexers.enabled: false
                    indexers:
                      - pod_uid:
                    default_matchers.enabled: false
                    matchers:
                      - logs_path:
                          logs_path: '/var/log/containers/'
                          # resource_type: 'pod'
                #     labels.dedot: true
                #     annotations.dedot: true
            - type: container
              stream: stderr
              paths:
                # - "/var/lib/docker/containers/${data.kubernetes.container.id}/*.log"
                - /var/log/containers/*-${data.kubernetes.container.id}.log
              encoding: utf-8
              symlinks: true
              scan_frequency: 1s
              publisher_pipeline.disable_host: true
              multiline.pattern: '^[[:space:]]+(\bat\b|\.{3})|^Caused by:'
              multiline.negate: false
              multiline.match: after
              processors:
                - decode_json_fields:
                    process_array: true
                    max_depth: 10
                    target: ""
                    overwrite_keys: true
                    fields: ["message"]
                # - add_cloud_metadata:
                # - add_docker_metadata:
                #     labels.dedot: true
                - add_kubernetes_metadata:
                    default_indexers.enabled: false
                    indexers:
                      - container:
                    default_matchers.enabled: false
                    matchers:
                      - logs_path:
                          logs_path: '/var/log/containers/'
                          # resource_type: 'pod'
                #     labels.dedot: true
                #     annotations.dedot: true

# logging.level: debug
# logging.selectors: ["kubernetes","autodiscover"]

monitoring.enabled: "true"
monitoring.elasticsearch.username: ${beats-username}
monitoring.elasticsearch.password: ${beats-password}

queue:
  mem:
    events: 2000
    flush.min_events: 1000
    flush.timeout: 0s
  # spool:
  #   file:
  #     # path: "${path.data}/spool.dat"
  #     size: 512MiB
  #     # page_size: 16KiB
  #   write:
  #     buffer_size: 45MiB
  #     flush.timeout: 0
  #     # flush.events: 1024
  #   read:
  #     flush.timeout: 1s

setup.dashboards.enabled: false
setup.template:
  enabled: true
  overwrite: false
  order: 50
  name: flb-k8s
  pattern: "flb-k8s-*"
  settings.index:
    number_of_shards: 2
    number_of_replicas: 1
    number_of_routing_shards: 30
    refresh_interval: "30s"
    translog.durability: "async"
    mapping.ignore_malformed: "true"

setup.ilm:
  enabled: false

# output.console.pretty: true

output.elasticsearch:
  worker: 1
  hosts: http://xxxxxxxxx:80
  username: ${filebeat-elastic-username}
  password: ${filebeat-elastic-password}
  bulk_max_size: 1000
  compression_level: 9
  indices:
    - index: "flb-k8s-pubsub"
      when.contains:
        input.type: "google-pubsub"
    - index: "flb-k8s-%{[kubernetes.namespace]}"
      when.contains:
        input.type: "container"

setup.kibana:
  host: "http://xxxxxxx:80"
  username: ${filebeat_kibana_user}
  password: ${filebeat_kibana_pwd}

So, is there anything else I could do? I've tried without add_kubernetes_metadata processor and I got the same results.

This topic was automatically closed 28 days after the last reply. New replies are no longer allowed.