Filebeat flag files as inactive after roll

Hello,

We are facing an issue on our production server. We are logging into a file using Log4Net with a rolling file appender.
Sometimes, when the file is rolling the file is flagged as inactive and nor more logs are send to our logstash instance.

Here is the filebeat configuration :
filebeat.inputs:

  • type: log
    enabled: true
    paths:
    • C:\Gaming1\APR\BettingSlipManagement\Logs\BSM.txt
      fields:
      service: bsm
      multiline.pattern: ^[?\d{2,4}-\d{2}-\d{2,4} \d{2}:\d{2}:\d{2}[,.]\d{3}]?
      multiline.negate: true
      multiline.match: after
  • type: log
    enabled: true
    paths:
    • C:**\Logs\serv1.txt
      fields:
      service: serv1
      multiline.pattern: ^[?\d{2,4}-\d{2}-\d{2,4} \d{2}:\d{2}:\d{2}[,.]\d{3}]?
      multiline.negate: true
      multiline.match: after
  • type: log
    enabled: true
    paths:
    • C:**\Logs\serv2.txt
      fields:
      service: serv2
      multiline.pattern: ^[?\d{2,4}-\d{2}-\d{2,4} \d{2}:\d{2}:\d{2}[,.]\d{3}]?
      multiline.negate: true
      multiline.match: after
      filebeat.config.modules:
      path: ${path.config}/modules.d/.yml
      reload.enabled: false
      setup.template.settings:
      index.number_of_shards: 1
      fields:
      environment: production
      country: pt
      partner: estoril
      output.logstash:
      hosts: [ "
      " ]
      ssl.certificate_authorities: [ "
      *" ]
      ssl.certificate: ***/beat.crt
      ssl.key: ***/beat.key
      processors:
    • add_host_metadata: ~
    • add_cloud_metadata: ~

Here is the filebeat logs :
2019-11-17T23:59:50.003Z INFO [monitoring] log/log.go:145 Non-zero metrics in the last 30s {"monitoring": {"metrics": {"beat":{"cpu":{"system":{"ticks":4977796,"time":{"ms":343}},"total":{"ticks":6638202,"time":{"ms":343},"value":6638202},"user":{"ticks":1660406}},"handles":{"open":465},"info":{"ephemeral_id":"1de5841e-625d-4ef3-a266-56f141950438","uptime":{"ms":466710119}},"memstats":{"gc_next":8175824,"memory_alloc":5838712,"memory_total":396617788864,"rss":-888832},"runtime":{"goroutines":46}},"filebeat":{"events":{"active":3,"added":111,"done":108},"harvester":{"open_files":2,"running":2}},"libbeat":{"config":{"module":{"running":0}},"output":{"events":{"acked":108,"batches":18,"total":108},"read":{"bytes":630},"write":{"bytes":25998}},"pipeline":{"clients":3,"events":{"active":4,"published":111,"total":111},"queue":{"acked":108}}},"registrar":{"states":{"current":3,"update":108},"writes":{"success":18,"total":18}}}}}
2019-11-18T00:00:04.958Z INFO log/harvester.go:253 Harvester started for file: C:**\Logs\serv1.txt
2019-11-18T00:00:09.910Z INFO log/harvester.go:253 Harvester started for file: C:**\Logs\serv2.txt
2019-11-18T00:00:19.995Z INFO [monitoring] log/log.go:145 Non-zero metrics in the last 30s {"monitoring": {"metrics": {"beat":{"cpu":{"system":{"ticks":4978250,"time":{"ms":454}},"total":{"ticks":6638656,"time":{"ms":454},"value":6638656},"user":{"ticks":1660406}},"handles":{"open":469},"info":{"ephemeral_id":"1de5841e-625d-4ef3-a266-56f141950438","uptime":{"ms":466740108}},"memstats":{"gc_next":8162800,"memory_alloc":4209992,"memory_total":396642895056,"rss":995328},"runtime":{"goroutines":56}},"filebeat":{"events":{"active":2,"added":104,"done":102},"harvester":{"open_files":4,"running":4,"started":2}},"libbeat":{"config":{"module":{"running":0}},"output":{"events":{"acked":100,"batches":15,"total":100},"read":{"bytes":525},"write":{"bytes":19993}},"pipeline":{"clients":3,"events":{"active":6,"filtered":2,"published":102,"total":104},"queue":{"acked":100}}},"registrar":{"states":{"current":5,"update":102},"writes":{"success":17,"total":17}}}}}
...
2019-11-18T00:04:50.000Z INFO [monitoring] log/log.go:145 Non-zero metrics in the last 30s {"monitoring": {"metrics": {"beat":{"cpu":{"system":{"ticks":4981484,"time":{"ms":266}},"total":{"ticks":6642874,"time":{"ms":406},"value":6642874},"user":{"ticks":1661390,"time":{"ms":140}}},"handles":{"open":469},"info":{"ephemeral_id":"1de5841e-625d-4ef3-a266-56f141950438","uptime":{"ms":467010109}},"memstats":{"gc_next":8141232,"memory_alloc":4103880,"memory_total":396880342912,"rss":647168},"runtime":{"goroutines":56}},"filebeat":{"events":{"added":84,"done":84},"harvester":{"open_files":4,"running":4}},"libbeat":{"config":{"module":{"running":0}},"output":{"events":{"acked":84,"batches":15,"total":84},"read":{"bytes":525},"write":{"bytes":18272}},"pipeline":{"clients":3,"events":{"active":0,"published":84,"total":84},"queue":{"acked":84}}},"registrar":{"states":{"current":5,"update":84},"writes":{"success":15,"total":15}}}}}
2019-11-18T00:05:01.003Z INFO log/harvester.go:278 File is inactive: C:**\Logs\serv1.txt. Closing because close_inactive of 5m0s reached.
2019-11-18T00:05:08.020Z INFO log/harvester.go:278 File is inactive: C:**\Logs\serv2.txt. Closing because close_inactive of 5m0s reached.
2019-11-18T00:05:19.999Z INFO [monitoring] log/log.go:145 Non-zero metrics in the last 30s {"monitoring": {"metrics": {"beat":{"cpu":{"system":{"ticks":4981750,"time":{"ms":266}},"total":{"ticks":6643453,"time":{"ms":579},"value":6643453},"user":{"ticks":1661703,"time":{"ms":313}}},"handles":{"open":467},"info":{"ephemeral_id":"1de5841e-625d-4ef3-a266-56f141950438","uptime":{"ms":467040108}},"memstats":{"gc_next":5209232,"memory_alloc":2696552,"memory_total":396903572888,"rss":131072},"runtime":{"goroutines":46}},"filebeat":{"events":{"active":12,"added":96,"done":84},"harvester":{"closed":2,"open_files":2,"running":2}},"libbeat":{"config":{"module":{"running":0}},"output":{"events":{"acked":80,"batches":14,"total":80},"read":{"bytes":490},"write":{"bytes":15483}},"pipeline":{"clients":3,"events":{"active":12,"filtered":4,"published":92,"total":96},"queue":{"acked":80}}},"registrar":{"states":{"cleanup":2,"current":3,"update":84},"writes":{"success":16,"total":16}}}}}

Could you please share your log4j appender config?

This topic was automatically closed 28 days after the last reply. New replies are no longer allowed.