Issue with JSON logs and field 'log.level'

Hi,

I decide to upgrade our logs format to respect elastic ECS.

So a sample of my log application looks like:

{
  "@timestamp": "2019-06-28T09:40:16.551Z",
  "service": {
    "runtime": {
      "name": "node",
      "version": "11.15.0"
    },
    "language": {
      "name": "javascript"
    }
  },
  "host": {
    "architecture": "x64",
    "name": "example-1234",
    "os": {
      "platform": "linux",
      "version": "4.15.0-52-generic"
    }
  },
  "process": {
    "executable": "/usr/bin/node",
    "args": ["/usr/bin/node", "/app/src/service.js"],
    "pid": 1,
    "title": "node",
    "working_directory": "/app",
    "ppid": 0
  },
  "log": {
    "level": "info"
  },
  "message": "Progress 188993.15625/200020 => 94.48712941205879%"
}

The problem is that when filebeat handle that, it replace my root field log and the output in elasticsearch looks like:

{
  "_index": "filebeat-7.2.0-2019.06.28-000001",
  "_type": "_doc",
  "_id": "GJ92nWsB4ZW2WJqvBJL0",
  "_version": 1,
  "_score": null,
  "_source": {
    "@timestamp": "2019-06-28T09:40:16.552Z",
    "log": {
      "offset": 1912699,
      "file": {
        "path": "/var/lib/docker/containers/c3379c07d0c76dbd186905bca01402ed6557eddf44f5496ab57bd6cedf503427/c3379c07d0c76dbd186905bca01402ed6557eddf44f5496ab57bd6cedf503427-json.log"
      }
    },
    "process": {
      "working_directory": "/app",
      "ppid": 0,
      "executable": "/usr/bin/node",
      "args": [
        "/usr/bin/node",
        "/app/src/service.js"
      ],
      "pid": 1,
      "title": "node"
    },
    "message": "Progress 188993.15625/200020 => 94.48712941205879%",
    "agent": {
      "hostname": "filebeat-d87tm",
      "id": "e34017b1-2d91-4058-89a5-0431065429e2",
      "version": "7.2.0",
      "type": "filebeat",
      "ephemeral_id": "59460577-d51a-483a-8215-f2e840e64323"
    },
    "host": {
      "name": "worker",
      "os": {
        "platform": "linux",
        "version": "4.15.0-52-generic"
      },
      "architecture": "x64"
    },
    "stream": "stdout",
    "service": {
      "runtime": {
        "name": "node",
        "version": "11.15.0"
      },
      "language": {
        "name": "javascript"
      }
    },
    "input": {
      "type": "docker"
    },
    "kubernetes": {
      "container": {
        "name": "example"
      },
      "namespace": "staging",
      "replicaset": {
        "name": "example-69667bc7c7"
      },
      "labels": {
        "pod-template-hash": "69667bc7c7",
      },
      "pod": {
        "uid": "86bc7220-97fa-11e9-b137-a81e84f1212b",
        "name": "example-69667bc7c7-mmhlc"
      },
      "node": {
        "name": "worker"
      }
    },
    "ecs": {
      "version": "1.0.0"
    },
    "container": {
      "id": "c3379c07d0c76dbd186905bca01402ed6557eddf44f5496ab57bd6cedf503427",
      "labels": {
        "annotation_io_kubernetes_container_hash": "7cbf1a68",
        "annotation_io_kubernetes_container_restartCount": "0",
        "io_kubernetes_sandbox_id": "7ae0779cb8f4f95fa14e34b2d6c96c62bea4799098800ecd23bf8e889468c4e1",
        "com_nvidia_cudnn_version": "7.6.0.64",
        "io_kubernetes_container_logpath": "/var/log/pods/staging_example-69667bc7c7-mmhlc_86bc7220-97fa-11e9-b137-a81e84f1212b/example/0.log",
        "io_kubernetes_pod_uid": "86bc7220-97fa-11e9-b137-a81e84f1212b",
        "annotation_io_kubernetes_container_terminationMessagePath": "/dev/termination-log",
        "annotation_io_kubernetes_container_terminationMessagePolicy": "File",
        "io_kubernetes_container_name": "example",
        "io_kubernetes_pod_name": "example-69667bc7c7-mmhlc",
        "io_kubernetes_pod_namespace": "staging",
        "maintainer": "NVIDIA CORPORATION <cudatools@nvidia.com>",
        "io_kubernetes_docker_type": "container",
        "annotation_io_kubernetes_pod_terminationGracePeriod": "30"
      }
    }
  },
  "fields": {
    "@timestamp": [
      "2019-06-28T09:40:16.552Z"
    ],
    "suricata.eve.timestamp": [
      "2019-06-28T09:40:16.552Z"
    ]
  },
  "highlight": {
    "kubernetes.container.name": [
      "@kibana-highlighted-field@example@/kibana-highlighted-field@"
    ],
    "message": [
      "@kibana-highlighted-field@Progress@/kibana-highlighted-field@ @kibana-highlighted-field@188993.15625@/kibana-highlighted-field@/@kibana-highlighted-field@200020@/kibana-highlighted-field@ => @kibana-highlighted-field@94.48712941205879@/kibana-highlighted-field@%"
    ]
  },
  "sort": [
    1561714816552
  ]
}

I want to keep my field log.level. Any ideas ?

It's not a perfect solution but one option is, in your decode_json_fields configuration, set the overwrite_keys field to true (see the decode_json_fields docs). This will overwrite filebeat's log annotation with whatever is in your json logs. I'm not sure how to preserve both your log and filebeat's annotation, but perhaps your logs have enough other metadata that you don't need it.

It's already enabled and the bug is still here

This topic was automatically closed 28 days after the last reply. New replies are no longer allowed.