MongoDB Integration generates huge amount of temp files

Recently something generated 80Gb of temp files under 15 minutes in my primary MongoDB. I disabled the DiskUse then I got the following errors:
Plan executor error during find command

  "stats": {
    "works": 88450,
    "restoreState": 88,
    "saveState": 88,
    "advanced": 0,
    "spilledDataStorageSize": 0,
    "nReturned": 0,
    "inputStage": {
      "works": 88450,
      "restoreState": 88,
      "stage": "COLLSCAN",
      "saveState": 88,
      "advanced": 88450,
      "nReturned": 88450,
      "needYield": 0,
      "docsExamined": 88450,
      "needTime": 0,
      "isEOF": 0,
      "direction": "forward"
    },
    "failed": true,
    "usedDisk": false,
    "type": "simple",
    "isEOF": 0,
    "memLimit": 104857600,
    "stage": "SORT",
    "sortPattern": {
      "-$natural": 1
    },
    "needYield": 0,
    "needTime": 88449,
    "totalDataSizeSorted": 0,
    "spills": 0
  },
  "cmd": {
    "filter": {},
    "lsid": {
      "id": {
        "$uuid": "2617b402-0c61-4035-bc39-d616e412fbb3"
      }
    },
    "$readPreference": {
      "mode": "primary"
    },
    "$db": "local",
    "$clusterTime": {
      "clusterTime": {
        "$timestamp": {
          "t": 1704123123,
          "i": 1
        }
      },
      "signature": {
        "keyId": 0,
        "hash": {
          "$binary": {
            "base64": "AAAAAAAAAAAAAAAAAAAAAAAAAAA=",
            "subType": "0"
          }
        }
      }
    },
    "find": "oplog.rs",
    "sort": {
      "-$natural": 1
    }
  },
  "error": {
    "code": 292,
    "codeName": "QueryExceededMemoryLimitNoDiskUseAllowed",
    "errmsg": "Sort exceeded memory limit of 104857600 bytes, but did not opt in to external sorting."
  }
}

As you can see it uses the sort wrong, it should be "$natural": -1

Can I disable that query until they fix it?

I have found a similar topic with no answer:

Can I file an issue ticket somewhere?

1 Like

If you have a support contract I would recommend starting there otherwise if you're using a fleet managed integration you can file a ticket in GitHub here: Issues · elastic/integrations · GitHub

The issue has been acknowledged and fixed, and it will be available in the next release.