I deployed packbeat on a mysql server to capture mysql traffic, and then send to the kafka,
but I found that, QPS that was sent to kafka was much lower than I saw in monitoring system(I use Percona Monitoring and Management(PMM) to monitor mysql database).
after checking the packbeat log, we found that there are many "unmatched_responses", so here are my questions:
- what does "unmatched_responses" mean? I did not find explain in document, did I miss something?
- as you can see the monitor screenshot blow, the QPS of mysql server is about 20000,
but as the log of packetbeat, it only published approximately 200000 to kafka, this log was printed every 30secons, which means 6666 QPS, which is much smaller than what PMM shows.why is that? - even there are huge amount of "unmatched_responses"(approximately 140000), the summary of "published" + "unmatched_responses" is 340000, divided by 30, that is 11333, still much smaller than what PMM shows, so how can I get the same data between PMM and packetbeat?
QPS showed by PMM:
packbeat log:
2020-03-26T16:50:10.995+0800 INFO [monitoring] log/log.go:145 Non-zero metrics in the last 30s {"monitoring": {"metrics": {"beat":{"cpu":{"system":{"ticks":53030,"time":{"ms":6254}},"total":{"ticks":965690,"time":{"ms":140400},"value":965690},"user":{"ticks":912660,"time":{"ms":134146}}},"handles":{"limit":{"hard":4096,"soft":1024},"open":18},"info":{"ephemeral_id":"f65eecf1-0f47-4452-9e38-2f70d532a1a8","uptime":{"ms":210037}},"memstats":{"gc_next":50600592,"memory_alloc":51149800,"memory_total":230887573952,"rss":24576},"runtime":{"goroutines":62}},"libbeat":{"config":{"module":{"running":0}},"output":{"events":{"acked":200704,"active":256,"batches":785,"total":200960}},"outputs":{"kafka":{"bytes_read":1865808,"bytes_write":38026679}},"pipeline":{"clients":1,"events":{"active":306,"failed":5157,"published":200925,"total":206082},"queue":{"acked":200704}}},"mysql":{"unmatched_requests":540,"**unmatched_responses":138995**},"system":{"load":{"1":7.49,"15":7.42,"5":7.1,"norm":{"1":0.156,"15":0.1546,"5":0.1479}}},"tcp":{"dropped_because_of_gaps":17}}}}
packetbeat.yml(I masked the IP address):
path.home: /data/app/packetbeat
max_procs: 6
logging:
level: info
to_files: true
files:
name: packetbeat.log
keepfiles: 5
rotateeverybytes: 20971520
permissions: 0644
interval: 168h
packetbeat.ignore_outgoing: true
packetbeat.interfaces.device: any
packetbeat.interfaces.type: af_packet
packetbeat.interfaces.buffer_size_mb: 2048
packetbeat.flows:
enabled: false
processors:
- add_locale:
format: offset
- drop_fields:
fields: ["host", "ecs", "agent"]
# packetbeat.interfaces.bpf_filter: "port 3306 or port 7001 or port 7002"
packetbeat.protocols:
# mysql
- type: mysql
enabled: true
ports: [3306, 3307, 3308, 3309, 3310]
send_request: false
send_response: false
max_rows: 100
max_row_length: 10485760
processors:
- include_fields:
fields: ["name", "tags", "client", "server", "type", "method", "event", "query", "mysql"]
- add_fields:
target: ''
fields:
cluster_name: ${cluster_name_mysql:mysql}
- drop_fields:
fields: ["event.dataset", "event.kind", "event.category"]
# redis
- type: redis
enabled: false
ports: [6379, 7001, 7002]
send_request: false
send_response: false
queue_max_bytes: 1048576
queue_max_messages: 20000
processors:
- include_fields:
fields: ["name", "tags", "client", "server", "type", "method", "resource", "event", "redis"]
- add_fields:
target: ''
fields:
cluster_name: ${cluster_name_redis:redis}
- drop_fields:
fields: ["event.dataset", "event.kind", "event.category"]
queue.mem:
events: 10240
flush.min_events: 256
flush.timeout: 1s
# output configuration
# file
output:
file:
enabled: false
path: "/data/app/packetbeat/data"
filename: "packetbeat_file.out"
number_of_file: 5
rotate_every_kb: 20480
#codec.json:
# pretty: true
#codec.format:
# string: '%{[@timestamp]} %{[message]}'
# kafka
kafka:
enabled: true
hosts: ["xxx.xxx.xxx.xxx:9092", "xxx.xxx.xxx.xxx:9092", "xxx.xxx.xxx.xxx:9092", "xxx.xxx.xxx.xxx:9092", "xxx.xxx.xxx.xxx:9092"]
topic: "packetbeat_mysql_01"
partition.round_robin:
reachable_only: true
metadata:
refresh_frequency: 5m
full: false
#codec.json:
# pretty: true
#codec.format:
# string: '%{[@timestamp]} %{[message]}'