Filebeat not parsing nginx when output is not to elastic

I am using filebeat in kubernetes to send logs to logstash. logstash pass the logs to elastic.
When I output the logs from filebeat to elastic the nginx fields are being parsed.
Example of a document when the filebeat output is elastic (filebeat on kubernetes -> elastic):

Example document

{
"_index": "filebeat-6.6.0-2019.02.14",
"_type": "doc",
"_id": "gC5y7GgBCdtQ0C9azdQn",
"_version": 1,
"_source": {
    "kubernetes": {
    "container": {
        "name": "nginx"
    },
    "node": {
        "name": "ip-xx-x-xxx-xxx.ec2.internal"
    },
    "namespace": "default",
    "labels": {
        "app": "router",
        "tier": "frontend",
        "pod-template-hash": "3502309327"
    }
    },
    "offset": 122474,
    "nginx": {
    "access": {
        "referrer": "https://xxxxxxx?itemIndex=0&itemOpen=false",
        "response_code": "200",
        "remote_ip": "xxx.xxx.xxx.xx",
        "geoip": {
        "continent_name": "Asia",
        "region_iso_code": "IL-TA",
        "city_name": "Tel Aviv",
        "country_iso_code": "IL",
        "region_name": "Tel Aviv",
        "location": {
            "lon": xx.xxxx,
            "lat": xx.xxxx
        }
        },
        "method": "GET",
        "user_name": "-",
        "http_version": "1.1",
        "body_sent": {
        "bytes": "4387"
        },
        "remote_ip_list": [
        "xxx.xxx.xxx.xx"
        ],
        "url": "/assets/0.926.0/64x64.png",
        "user_agent": {
        "patch": "3578",
        "original": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36",
        "major": "71",
        "minor": "0",
        "os": "Mac OS X 10.14.2",
        "os_minor": "14",
        "os_major": "10",
        "name": "Chrome",
        "os_name": "Mac OS X",
        "device": "Other"
        }
    }
    },
    "log": {
    "file": {
        "path": "/var/lib/docker/containers/497c3f493975c3134e03fbf49203381fa3b19e51e6c8be06a4b6bd7cdccb3557/497c3f493975c3134e03fbf49203381fa3b19e51e6c8be06a4b6bd7cdccb3557-json.log"
    }
    },
    "prospector": {
    "type": "docker"
    },
    "read_timestamp": "2019-02-14T14:39:31.066Z",
    "source": "/var/lib/docker/containers/497c3f493975c3134e03fbf49203381fa3b19e51e6c8be06a4b6bd7cdccb3557/497c3f493975c3134e03fbf49203381fa3b19e51e6c8be06a4b6bd7cdccb3557-json.log",
    "fileset": {
    "module": "nginx",
    "name": "access"
    },
    "input": {
    "type": "docker"
    },
    "@timestamp": "2019-02-14T14:39:31.000Z",
    "stream": "stdout",
    "beat": {
    "hostname": "filebeat-kpnrp",
    "name": "filebeat-kpnrp",
    "version": "6.6.0"
    },
    "host": {
    "name": "filebeat-kpnrp"
    },
    "event": {
    "dataset": "nginx.access"
    }
},
"fields": {
    "@timestamp": [
    "2019-02-14T14:39:31.000Z"
    ]
},
"highlight": {
    "kubernetes.container.name": [
    "@kibana-highlighted-field@nginx@/kibana-highlighted-field@"
    ],
    "event.module": [
    "@kibana-highlighted-field@nginx@/kibana-highlighted-field@"
    ],
    "fileset.module": [
    "@kibana-highlighted-field@nginx@/kibana-highlighted-field@"
    ]
},
"sort": [
    1550155171000
]
}

when I change the filebeat output to logstash (filebeat on kubernetes -> logstash -> elastic) the nginx.* fields are missing.

I checked and logstash receive the message without the fields parsed. I saw it in logstash logger "logger.logstash.pipeline". So I think filebeat nginx module is not working when the output is not elastic.

filebeat config:

apiVersion: v1
data:
filebeat.yml: |-
    filebeat.modules:
    - module: nginx

    filebeat.config:
    modules:
        path: /usr/share/filebeat/modules.d/*.yml
        # Reload module configs as they change:
        reload.enabled: false

    # To enable hints based autodiscover, remove `filebeat.config.inputs` configuration and uncomment this:
    filebeat.autodiscover:
    providers:
    - type: kubernetes
        hints.enabled: true

    processors:
    - add_cloud_metadata:

    fields:
    kubernetes.cluster: 'xxxxxxx'

    fields_under_root: 'true'

    output.elasticsearch:
    hosts: ['http://xxxxxxx']
    username: xxxxxx
    password: xxxxxx

    # output.logstash:
    #   hosts: ['logs-server:5044']

kind: ConfigMap
metadata:
labels:
    k8s-app: filebeat
name: filebeat-config
namespace: monitoring

damonset yaml:

apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
labels:
    k8s-app: filebeat
name: filebeat
namespace: monitoring
spec:
template:
    metadata:
    labels:
        k8s-app: filebeat
    spec:
    containers:
    - args: [
        "-c", "/usr/share/filebeat/filebeat.yml",
        "-e",
        ]
        image: docker.elastic.co/beats/filebeat:6.6.0
        imagePullPolicy: IfNotPresent
        name: filebeat
        resources:
        limits:
            memory: 200Mi
        requests:
            cpu: 100m
            memory: 100Mi
        securityContext:
        volumeMounts:
        - mountPath: /usr/share/filebeat/filebeat.yml
        name: config
        readOnly: true
        subPath: filebeat.yml
        - mountPath: /usr/share/filebeat/data
        name: data
        - mountPath: /var/lib/docker/containers
        name: varlibdockercontainers
        readOnly: true
    dnsPolicy: ClusterFirst
    restartPolicy: Always
    schedulerName: default-scheduler
    securityContext: {}
    serviceAccount: filebeat
    serviceAccountName: filebeat
    terminationGracePeriodSeconds: 30
    volumes:
    - configMap:
        defaultMode: 384
        name: filebeat-config
        name: config
    - hostPath:
        path: /var/lib/docker/containers
        type: ""
        name: varlibdockercontainers
    - hostPath:
        path: /var/lib/filebeat-data
        type: DirectoryOrCreate
        name: data
updateStrategy:
    type: OnDelete

logstash config:

input {
        beats {
        port => 5044
        host => "0.0.0.0"
    }
}
output {
    elasticsearch {
        hosts => ["xxxxxx"]
        user => "xxxxx"
        password => "xxxxx"
        ssl => false
    }
}
1 Like

This topic was automatically closed 28 days after the last reply. New replies are no longer allowed.