I've been banging my head against this for a couple days. I have a local test environment set up via docker compose with Cassandra and an ELK stack. I've added a monitoring container that has the Cassandra logs mounted to it. Delivery of logs via filebeat -> logstash works fine. For the life of me I can't get metricbeat to deliver metrics.
The docker-compose.yml:
version: "3.7"
services:
# ----------------- CASSANDRA -----------------
cassandra:
build: ./cassandra
image: local/cassandra:latest
hostname: cassandra
networks:
default:
ipv4_address: 10.6.0.11
ports:
# CQL
- 9042:9042
# Jolokia
- 9000:9000
environment:
CASSANDRA_SEEDS: 10.6.0.11
CASSANDRA_USER: cassandra
CASSANDRA_PASSWORD: cassandra
CASSANDRA_PASSWORD_SEEDER: "yes"
restart: unless-stopped
volumes:
- cassandra_logs:/var/log
cassandra-web:
build: ./cassandra-web
image: local/cassandra-web:latest
depends_on:
- cassandra
ports:
- 3000:3000
environment:
CASSANDRA_HOST_IPS: 10.6.0.11
CASSANDRA_PORT: 9042
CASSANDRA_USER: cassandra
CASSANDRA_PASSWORD: cassandra
restart: unless-stopped
cassandra-monitoring-agent:
build: ./monitoring-agent
image: local/monitoring-agent:latest
depends_on:
- cassandra
- logstash
- elasticsearch
- kibana
environment:
LOGSTASH_HOST: logstash
ELASTICSEARCH_HOST: elasticsearch
ELASTICSEARCH_USERNAME: elastic
ELASTICSEARCH_PASSWORD: changeme
KIBANA_USERNAME: elastic
KIBANA_PASSWORD: changeme
KIBANA_HOST: kibana
LOGSTASH_USERNAME: logstash_internal
LOGSTASH_PASSWORD: changeme
CONTAINER_NAME: "cassandra-monitoring-agent"
SERVICE: "cassandra"
ENVIRONMENT: "local"
DBUS_SYSTEM_BUS_ADDRESS: "unix:path=/hostfs/var/run/dbus/system_bus_socket"
system.hostfs: /hostfs
cgroup: host
volumes:
# mounting logs from cassandra instance
- cassandra_logs:/opt/log
# mounting host procs to container for metricbeat
- /proc:/hostfs/proc:ro
- /sys/fs/cgroup:/hostfs/sys/fs/cgroup:ro
- /:/hostfs:ro
- /var/run/dbus/system_bus_socket:/hostfs/var/run/dbus/system_bus_socket:ro
# --------------- ELK ---------------
elasticsearch:
build:
context: elasticsearch/
args:
ELASTIC_VERSION: ${ELASTIC_VERSION}
volumes:
- ./elasticsearch/config/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml:ro,Z
- elasticsearch:/usr/share/elasticsearch/data:Z
ports:
- 9200:9200
- 9300:9300
environment:
node.name: elasticsearch
ES_JAVA_OPTS: -Xms512m -Xmx512m
# Bootstrap password.
# Used to initialize the keystore during the initial startup of
# Elasticsearch. Ignored on subsequent runs.
ELASTIC_PASSWORD: ${ELASTIC_PASSWORD:-}
# Use single node discovery in order to disable production mode and avoid bootstrap checks.
# see: https://www.elastic.co/guide/en/elasticsearch/reference/current/bootstrap-checks.html
discovery.type: single-node
networks:
- default
restart: unless-stopped
logstash:
build:
context: logstash/
args:
ELASTIC_VERSION: ${ELASTIC_VERSION}
volumes:
- ./logstash/config/logstash.yml:/usr/share/logstash/config/logstash.yml:ro,Z
- ./logstash/pipeline:/usr/share/logstash/pipeline:ro,Z
ports:
- 5044:5044
- 50000:50000/tcp
- 50000:50000/udp
- 9600:9600
environment:
LS_JAVA_OPTS: -Xms256m -Xmx256m
LOGSTASH_INTERNAL_PASSWORD: ${LOGSTASH_INTERNAL_PASSWORD:-}
networks:
- default
depends_on:
- elasticsearch
restart: unless-stopped
kibana:
build:
context: kibana/
args:
ELASTIC_VERSION: ${ELASTIC_VERSION}
volumes:
- ./kibana/config/kibana.yml:/usr/share/kibana/config/kibana.yml:ro,Z
ports:
- 5601:5601
environment:
KIBANA_SYSTEM_PASSWORD: ${KIBANA_SYSTEM_PASSWORD:-}
networks:
- default
depends_on:
- elasticsearch
restart: unless-stopped
# The 'setup' service runs a one-off script which initializes users inside
# Elasticsearch — such as 'logstash_internal' and 'kibana_system' — with the
# values of the passwords defined in the '.env' file. It also creates the
# roles required by some of these users.
#
# This task only needs to be performed once, during the *initial* startup of
# the stack. Any subsequent run will reset the passwords of existing users to
# the values defined inside the '.env' file, and the built-in roles to their
# default permissions.
#
# By default, it is excluded from the services started by 'docker compose up'
# due to the non-default profile it belongs to. To run it, either provide the
# '--profile=setup' CLI flag to Compose commands, or "up" the service by name
# such as 'docker compose up setup'.
setup:
profiles:
- setup
build:
context: setup/
args:
ELASTIC_VERSION: ${ELASTIC_VERSION}
init: true
volumes:
- ./setup/entrypoint.sh:/entrypoint.sh:ro,Z
- ./setup/lib.sh:/lib.sh:ro,Z
- ./setup/roles:/roles:ro,Z
environment:
ELASTIC_PASSWORD: ${ELASTIC_PASSWORD:-}
LOGSTASH_INTERNAL_PASSWORD: ${LOGSTASH_INTERNAL_PASSWORD:-}
KIBANA_SYSTEM_PASSWORD: ${KIBANA_SYSTEM_PASSWORD:-}
METRICBEAT_INTERNAL_PASSWORD: ${METRICBEAT_INTERNAL_PASSWORD:-}
FILEBEAT_INTERNAL_PASSWORD: ${FILEBEAT_INTERNAL_PASSWORD:-}
HEARTBEAT_INTERNAL_PASSWORD: ${HEARTBEAT_INTERNAL_PASSWORD:-}
MONITORING_INTERNAL_PASSWORD: ${MONITORING_INTERNAL_PASSWORD:-}
BEATS_SYSTEM_PASSWORD: ${BEATS_SYSTEM_PASSWORD:-}
networks:
- default
depends_on:
- elasticsearch
networks:
default:
driver: bridge
ipam:
config:
- subnet: 10.6.0.0/24
volumes:
elasticsearch:
cassandra_logs:
Both filebeat and metricbeat run inside the cassandra-monitoring-agent
container via supervisor:
[supervisord]
logfile=/var/log/supervisord/supervisord.log
# logfile=/dev/stdout
logfile_maxbytes=50MB
logfile_backups=10
loglevel=error
pidfile=/var/run/supervisord.pid
nodaemon=true
minfds=1024
minprocs=200
user=root
childlogdir=/var/log/supervisord/
[program:filebeat]
command=filebeat -c /etc/filebeat/filebeat.yml
autostart=true
autorestart=true
stderr_logfile=/var/log/filebeat/err.log
stdout_logfile=/var/log/filebeat/out.log
logfile_maxbytes=10MB
logfile_backups=10
[program:metricbeat]
# command=metricbeat -c /etc/metricbeat/metricbeat.yml
command=metricbeat -c /etc/metricbeat/metricbeat.yml -e -d "*"
autostart=true
autorestart=true
stderr_logfile=/var/log/metricbeat/err.log
stdout_logfile=/var/log/metricbeat/out.log
logfile_maxbytes=10MB
logfile_backups=10
Filebeat delivers the logs just fine via logstash:
name: ${CONTAINER_NAME:filebeat_collector}
# tags: ["foo", "bar"]
fields:
env: ${ENVIRONMENT:unknown_environment}
service: ${SERVICE:unknown_service}
beat: filebeat
filebeat.inputs:
- type: filestream
enabled: true
paths:
# This should be volume mounted into the instance
- /opt/log/*.log
- /opt/log/cassandra/*.log
tail_files: true
filebeat.config.modules:
path: ${path.config}/modules.d/*.yml
reload.enabled: false
#reload.period: 10s
setup.template.settings:
index.number_of_shards: 1
#index.codec: best_compression
#_source.enabled: false
#setup.dashboards.enabled: false
#setup.dashboards.url:
setup.kibana:
host: "#{KIBANA_HOST}:5601"
space.id:
output.logstash:
hosts: ["${LOGSTASH_HOST}:5044"]
username: "${LOGSTASH_USERNAME}"
password: "${LOGSTASH_PASSWORD}"
#ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]
#ssl.certificate: "/etc/pki/client/cert.pem"
#ssl.key: "/etc/pki/client/cert.key"
processors:
- add_host_metadata:
when.not.contains.tags: forwarded
- add_cloud_metadata: ~
- add_docker_metadata: ~
- add_kubernetes_metadata: ~
logging.level: debug
logging.to_stderr: true
logging.metrics.enabled: true
#logging.selectors: ["*"]
Metricbeat... I can't get to deliver either directly to Elasticsearch or to Logstash with either of the output directives:
name: ${CONTAINER_NAME:metricbeat_collector}
# tags: ["foo", "bar"]
fields:
env: ${ENVIRONMENT:unknown_environment}
service: ${SERVICE:unknown_service}
beat: metricbeat
metricbeat.config.modules:
# Glob pattern for configuration loading
path: ${path.config}/modules.d/*.yml
# Set to true to enable config reloading
reload.enabled: false
# Period on which files under path should be checked for changes
#reload.period: 10s
setup.template.settings:
index.number_of_shards: 1
index.codec: best_compression
#setup.dashboards.enabled: false
#setup.dashboards.url:
setup.kibana:
host: "${KIBANA_HOST}:5601"
username: "${KIBANA_USERNAME}"
password: "${KIBANA_PASSWORD}"
#space.id:
# This doesn't work either...
# output.elasticsearch:
# hosts: ["${ELASTICSEARCH_HOST}:9200"]
# preset: balanced
# #protocol: "https"
# #api_key: "id:api_key"
# username: "${ELASTICSEARCH_USERNAME}"
# password: "${ELASTICSEARCH_PASSWORD}"
output.logstash:
hosts: ["${LOGSTASH_HOST}:5044"]
username: "${LOGSTASH_USERNAME}"
password: "${LOGSTASH_PASSWORD}"
# ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]
# ssl.certificate: "/etc/pki/client/cert.pem"
# ssl.key: "/etc/pki/client/cert.key"
logging.level: debug
logging.to_stderr: true
logging.metrics.enabled: true
#logging.selectors: ["*"]
http.enabled: true
I can hit metricbeat via the HTTP endpoint.
I can see in the logs metrics are being gathered:
{"log.level":"debug","@timestamp":"2024-10-16T22:25:43.244Z","log.origin":{"function":"github.com/elastic/elastic-agent-system-metrics/metric/system/cgroup/cgv2.(*IOSubsystem).Get","file.name":"cgv2/io.go","file.line":69},"message":"io.pressure does not exist. Skipping.","service.name":"metricbeat","ecs.version":"1.6.0"}
{"log.level":"info","@timestamp":"2024-10-16T22:25:43.245Z","log.logger":"monitoring","log.origin":{"function":"github.com/elastic/beats/v7/libbeat/monitoring/report/log.(*reporter).logSnapshot","file.name":"log/log.go","file.line":187},"message":"Non-zero metrics in the last 30s","service.name":"metricbeat","monitoring":{"metrics":{"beat":{"cgroup":{"memory":{"mem":{"usage":{"bytes":222543872}}}},"cpu":{"system":{"ticks":90},"total":{"ticks":530,"value":530},"user":{"ticks":440}},"handles":{"limit":{"hard":1048576,"soft":1048576},"open":9},"info":{"ephemeral_id":"f22b7578-c60e-45e6-9dad-08fc4ea0582d","uptime":{"ms":1773034},"version":"8.14.2"},"memstats":{"gc_next":15059800,"memory_alloc":7729328,"memory_total":40200256,"rss":82644992},"runtime":{"goroutines":16}},"libbeat":{"config":{"module":{"running":0}},"output":{"events":{"active":0},"write":{"latency":{"histogram":{"count":0,"max":0,"mean":0,"median":0,"min":0,"p75":0,"p95":0,"p99":0,"p999":0,"stddev":0}}}},"pipeline":{"clients":0,"events":{"active":0}}},"system":{"load":{"1":5.78,"15":5.34,"5":5.42,"norm":{"1":0.4817,"15":0.445,"5":0.4517}}}},"ecs.version":"1.6.0"}}
metricbeat test output
seems fine:
{"log.level":"info","@timestamp":"2024-10-17T01:34:25.747Z","log.origin":{"function":"github.com/elastic/beats/v7/libbeat/cmd/instance.(*Beat).configure","file.name":"instance/beat.go","file.line":816},"message":"Home path: [/etc/metricbeat] Config path: [/etc/metricbeat] Data path: [/etc/metricbeat/data] Logs path: [/etc/metricbeat/logs]","service.name":"metricbeat","ecs.version":"1.6.0"}
{"log.level":"debug","@timestamp":"2024-10-17T01:34:25.747Z","log.logger":"beat","log.origin":{"function":"github.com/elastic/beats/v7/libbeat/cmd/instance.(*Beat).loadMeta","file.name":"instance/beat.go","file.line":935},"message":"Beat metadata path: /etc/metricbeat/data/meta.json","service.name":"metricbeat","ecs.version":"1.6.0"}
{"log.level":"info","@timestamp":"2024-10-17T01:34:25.748Z","log.origin":{"function":"github.com/elastic/beats/v7/libbeat/cmd/instance.(*Beat).configure","file.name":"instance/beat.go","file.line":824},"message":"Beat ID: eccabea0-4740-4f8b-9e53-feac068411a9","service.name":"metricbeat","ecs.version":"1.6.0"}
logstash: logstash:5044...
connection...
parse host... OK
dns lookup... OK
addresses: 10.6.0.4
dial up... OK
TLS... WARN secure connection disabled
talk to server... OK
as does test config.
If I run the kibana setup, it creates a metricbeat-*
index in the discovery section.
But... it's always empty. Nothing ever populates.
Help? I have no idea where to go next - not sure if logs are making it to Logstash or Elasticsearch, but nothing interesting I can see in the logs for either, but also nothing in the logs for metricbeat saying anything got rejected or couldn't connect.