How to read the monitoring log of filebeat?

I'm working on tuning for the filebeat to process logs.
Changing the output.Elasticsearch

There're 7 Elasticsearch targets in the hosts list.

workers:2
bulk_max_size: 2048
flush_interval: 5

It's around 600 log lines per sec in the node.

Is there any document that explains the monitoring log output? Should we care about metrics other than output?

{
  "monitoring": {
    "metrics": {
      "beat": {
        "cgroup": {
          "cpu": {
            "cfs": {
              "period": {
                "us": 100000
              }
            },
            "id": "user.slice"
          },
          "cpuacct": {
            "id": "user.slice",
            "total": {
              "ns": 663150142856580
            }
          },
          "memory": {
            "id": "user.slice",
            "mem": {
              "limit": {
                "bytes": 9223372036854772000
              },
              "usage": {
                "bytes": 6989246464
              }
            }
          }
        },
        "cpu": {
          "system": {
            "ticks": 370,
            "time": {
              "ms": 375
            }
          },
          "total": {
            "ticks": 3160,
            "time": {
              "ms": 3165
            },
            "value": 3160
          },
          "user": {
            "ticks": 2790,
            "time": {
              "ms": 2790
            }
          }
        },
        "handles": {
          "limit": {
            "hard": 65535,
            "soft": 65535
          },
          "open": 16
        },
        "info": {
          "ephemeral_id": "f7da6692-85a3-42c6-a09b-35883457168b",
          "uptime": {
            "ms": 5964
          }
        },
        "memstats": {
          "gc_next": 59097552,
          "memory_alloc": 36246952,
          "memory_total": 256353272,
          "rss": 75694080
        },
        "runtime": {
          "goroutines": 35
        }
      },
      "filebeat": {
        "events": {
          "active": 8105,
          "added": 8113,
          "done": 8
        },
        "harvester": {
          "closed": 1,
          "open_files": 0,
          "running": 0,
          "started": 1
        }
      },
      "libbeat": {
        "config": {
          "module": {
            "running": 0
          }
        },
        "output": {
          "events": {
            "active": 4096,
            "batches": 5,
            "total": 4096
          },
          "read": {
            "bytes": 17817
          },
          "type": "elasticsearch",
          "write": {
            "bytes": 4054959
          }
        },
        "pipeline": {
          "clients": 0,
          "events": {
            "active": 4116,
            "failed": 1,
            "filtered": 3996,
            "published": 4116,
            "retry": 6010,
            "total": 8113
          }
        }
      },
      "registrar": {
        "states": {
          "current": 7,
          "update": 7
        },
        "writes": {
          "success": 7,
          "total": 7
        }
      },
      "system": {
        "cpu": {
          "cores": 48
        },
        "load": {
          "1": 20.49,
          "5": 21.41,
          "15": 20.44,
          "norm": {
            "1": 0.4269,
            "5": 0.446,
            "15": 0.4258
          }
        }
      }
    }
  }
}

The active -1140 is kind confusing.

"output":{"events":{"acked":9332,"active":-1140,"batches":8,"total":8192}

Regards // Hugo

Hi,

yeah, the negative value looks concerning. You can consider opening a Github issue for Beats.

reported [libbeat] The output.events.active metric can be negative · Issue #31782 · elastic/beats · GitHub

This topic was automatically closed 28 days after the last reply. New replies are no longer allowed.