Slow Data Collection and Limited Event Count in Filebeat

I am experiencing an issue with Filebeat where the number of events collected at a time is limited to either 50 or 100, and the data collection speed to Elasticsearch is noticeably slow.
This problem doesn't occur all the time but happens occasionally.
Filebeat is configured to collect logs from only one specified file.
Below are some details from the Filebeat logs that might be relevant:

{
  "monitoring": {
    "metrics": {
      "beat": {
        "cgroup": {
          "memory": {
            "mem": {
              "usage": {
                "bytes": 155648
              }
            }
          }
        },
        "cpu": {
          "system": {
            "ticks": 2705320,
            "time": {
              "ms": 3
            }
          },
          "total": {
            "ticks": 10235400,
            "time": {
              "ms": 12
            },
            "value": 10235400
          },
          "user": {
            "ticks": 7530080,
            "time": {
              "ms": 9
            }
          }
        },
        "handles": {
          "limit": {
            "hard": 1048576,
            "soft": 1048576
          },
          "open": 11
        },
        "info": {
          "ephemeral_id": "af19442e-a54f-4742-90d8-90d84f35dc00",
          "uptime": {
            "ms": 1535340065
          },
          "version": "7.16.2"
        },
        "memstats": {
          "gc_next": 42721040,
          "memory_alloc": 27386584,
          "memory_total": 1462737301576,
          "rss": 156786688
        },
        "runtime": {
          "goroutines": 46
        }
      },
      "filebeat": {
        "events": {
          "added": 100,
          "done": 100
        },
        "harvester": {
          "open_files": 0,
          "running": 0
        }
      },
      "libbeat": {
        "config": {
          "module": {
            "running": 0
          }
        },
        "output": {
          "events": {
            "acked": 100,
            "active": 350,
            "batches": 2,
            "total": 100
          },
          "read": {
            "bytes": 1734
          },
          "write": {
            "bytes": 127596
          }
        },
        "pipeline": {
          "clients": 1,
          "events": {
            "active": 4117,
            "published": 100,
            "total": 100
          },
          "queue": {
            "acked": 100
          }
        }
      },
      "registrar": {
        "states": {
          "current": 0
        }
      },
      "system": {
        "load": {
          "1": 0.09,
          "15": 0.09,
          "5": 0.1,
          "norm": {
            "1": 0.0056,
            "15": 0.0056,
            "5": 0.0063
          }
        }
      }
    }
  }
}

Any insights or recommendations on configuration changes or diagnostics steps to resolve this issue would be greatly appreciated.

Thank you in advance for your assistance!