Now it even takes about 7 minutes to update a doc.
Single node, heap size: 8G, 30 shards, a total of 6.2G data, 2 million doc. This is too slow, if anyone can tell me how to solve it, I would appreciate it
iostat
Linux 4.15.0-20-generic (max-master) 12/18/2020 _x86_64_ (32 CPU)
avg-cpu: %user %nice %system %iowait %steal %idle
15.46 0.00 8.95 0.09 0.00 75.51
Device tps kB_read/s kB_wrtn/s kB_read kB_wrtn
sda 222.31 92.21 5448.83 23575235 1393042693
cat _nodes/hot_threads
::: {es-node1}{VxAcSdn8QJKRBu705pFVDA}{ozapTgHcRDaIgu9Cb-tB9w}{10.244.0.19}{10.244.0.19:9300}{ml.machine_memory=66933485568, xpack.installed=true, ml.max_open_jobs=20, ml.enabled=true}
Hot threads at 2020-12-18T08:36:29.005Z, interval=500ms, busiestThreads=3, ignoreIdleThreads=true:
100.4% (502ms out of 500ms) cpu usage by thread 'elasticsearch[es-node1][write][T#7]'
2/10 snapshots sharing following 205 elements
...
22.7% (113.6ms out of 500ms) cpu usage by thread 'elasticsearch[es-node1][write][T#4]'
6/10 snapshots sharing following 192 elements
...
cat /_nodes/stats/thread_pool?human&pretty
// 20201218163630
// http://10.0.17.21:9200/_nodes/stats/thread_pool?human&pretty
{
"_nodes": {
"total": 1,
"successful": 1,
"failed": 0
},
"cluster_name": "sd",
"nodes": {
"VxAcSdn8QJKRBu705pFVDA": {
"timestamp": 1608280589005,
"name": "es-node1",
"transport_address": "10.244.0.19:9300",
"host": "10.244.0.19",
"ip": "10.244.0.19:9300",
"roles": [
"master",
"data",
"ingest"
],
"attributes": {
"ml.machine_memory": "66933485568",
"xpack.installed": "true",
"ml.max_open_jobs": "20",
"ml.enabled": "true"
},
"thread_pool": {
"analyze": {
"threads": 0,
"queue": 0,
"active": 0,
"rejected": 0,
"largest": 0,
"completed": 0
},
"ccr": {
"threads": 0,
"queue": 0,
"active": 0,
"rejected": 0,
"largest": 0,
"completed": 0
},
"fetch_shard_started": {
"threads": 1,
"queue": 0,
"active": 0,
"rejected": 0,
"largest": 16,
"completed": 167
},
"fetch_shard_store": {
"threads": 0,
"queue": 0,
"active": 0,
"rejected": 0,
"largest": 0,
"completed": 0
},
"flush": {
"threads": 1,
"queue": 0,
"active": 0,
"rejected": 0,
"largest": 4,
"completed": 368
},
"force_merge": {
"threads": 0,
"queue": 0,
"active": 0,
"rejected": 0,
"largest": 0,
"completed": 0
},
"generic": {
"threads": 24,
"queue": 0,
"active": 1,
"rejected": 0,
"largest": 24,
"completed": 10495
},
"get": {
"threads": 8,
"queue": 0,
"active": 0,
"rejected": 0,
"largest": 8,
"completed": 389
},
"index": {
"threads": 0,
"queue": 0,
"active": 0,
"rejected": 0,
"largest": 0,
"completed": 0
},
"listener": {
"threads": 3,
"queue": 0,
"active": 0,
"rejected": 0,
"largest": 3,
"completed": 3
},
"management": {
"threads": 5,
"queue": 0,
"active": 2,
"rejected": 0,
"largest": 5,
"completed": 7194
},
"ml_autodetect": {
"threads": 0,
"queue": 0,
"active": 0,
"rejected": 0,
"largest": 0,
"completed": 0
},
"ml_datafeed": {
"threads": 0,
"queue": 0,
"active": 0,
"rejected": 0,
"largest": 0,
"completed": 0
},
"ml_utility": {
"threads": 1,
"queue": 0,
"active": 0,
"rejected": 0,
"largest": 1,
"completed": 1
},
"refresh": {
"threads": 4,
"queue": 0,
"active": 0,
"rejected": 0,
"largest": 4,
"completed": 51313
},
"rollup_indexing": {
"threads": 0,
"queue": 0,
"active": 0,
"rejected": 0,
"largest": 0,
"completed": 0
},
"search": {
"threads": 13,
"queue": 0,
"active": 1,
"rejected": 0,
"largest": 13,
"completed": 217319
},
"search_throttled": {
"threads": 0,
"queue": 0,
"active": 0,
"rejected": 0,
"largest": 0,
"completed": 0
},
"security-token-key": {
"threads": 0,
"queue": 0,
"active": 0,
"rejected": 0,
"largest": 0,
"completed": 0
},
"snapshot": {
"threads": 0,
"queue": 0,
"active": 0,
"rejected": 0,
"largest": 0,
"completed": 0
},
"warmer": {
"threads": 4,
"queue": 0,
"active": 0,
"rejected": 0,
"largest": 4,
"completed": 399
},
"watcher": {
"threads": 0,
"queue": 0,
"active": 0,
"rejected": 0,
"largest": 0,
"completed": 0
},
"write": {
"threads": 8,
"queue": 0,
"active": 6,
"rejected": 0,
"largest": 8,
"completed": 1401
}
}
}
}
}