Good afternoon,
I am trying to deploy metricbeat and kibana to kubernetes in google cloud using Helm. My aim is to collect data to benchmark elastic while running migrations to find out what resources does Elasticsearch need in order to take the less time possible. I have successfully created a docker-compose to run Metricbeat and Kibana locally and I have been abel to reduce the time to run a migration on 230k nodes to 10 minutes.
However, when deploying Metricbeat to kubernetes using Helm, Metricbeat does not collect data from the 3 nodes of my Elasticsearch cluster.
My .yml file for deploying Metricbeat is the following:
---
daemonset:
# Annotations to apply to the daemonset
annotations: {}
# additionals labels
labels: {}
affinity: {}
# Include the daemonset
enabled: true
# Extra environment variables for Metricbeat container.
envFrom: []
# - configMapRef:
# name: config-secret
extraVolumes: []
# - name: extras
# emptyDir: {}
extraVolumeMounts: []
# - name: extras
# mountPath: /usr/share/extras
# readOnly: true
hostAliases: []
#- ip: "127.0.0.1"
# hostnames:
# - "foo.local"
# - "bar.local"
hostNetworking: false
# Allows you to add any config files in /usr/share/metricbeat
# such as metricbeat.yml for daemonset
metricbeatConfig:
metricbeat.yml: |
metricbeat:
config:
modules:
path: /usr/share/metricbeat/modules.d/*.yml
reload:
enabled: false
output:
elasticsearch:
hosts: ["http://elastic-production-monitoring-master:9200"]
path:
config: /usr/share/metricbeat
data: /usr/share/metricbeat/data
home: /usr/share/metricbeat
logs: /usr/share/metricbeat/logs
processors:
- add_cloud_metadata: null
- add_docker_metadata: null
#========================== Modules configuration =============================
metricbeat.modules:
#---------------------------- Elasticsearch Module ----------------------------
- module: elasticsearch
metricsets:
- node
- node_stats
- index
- index_recovery
- index_summary
- shard
# - ml_job
period: 10s
hosts: ["url-to-my-elasticsearch-cluster-that-wants-to-be-monitored"]
#username: "elastic"
#password: "changeme"
#ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]
#index_recovery.active_only: true
xpack.enabled: true
#scope: node
# ================================= Dashboards =================================
# These settings control loading the sample dashboards to the Kibana index. Loading
# the dashboards are disabled by default and can be enabled either by setting the
# options here, or by using the `-setup` CLI flag or the `setup` command.
setup.dashboards.enabled: true
# =================================== Kibana ===================================
# Starting with Beats version 6.0.0, the dashboards are loaded via the Kibana API.
# This requires a Kibana endpoint configuration.
setup.kibana:
# Kibana Host
# Scheme and port can be left out and will be set to the default (http and 5601)
# In case you specify and additional path, the scheme is required: http://localhost:5601/path
# IPv6 addresses should always be defined as: https://[2001:db8::1]:5601
host: "http://kibana-kibana:5601"
nodeSelector: {}
# A list of secrets and their paths to mount inside the pod
# This is useful for mounting certificates for security other sensitive values
# Various pod security context settings. Bear in mind that many of these have an impact on metricbeat functioning properly.
# - Filesystem group for the metricbeat user. The official elastic docker images always have an id of 1000.
# - User that the container will execute as. Typically necessary to run as root (0) in order to properly collect host container logs.
# - Whether to execute the metricbeat containers as privileged containers. Typically not necessarily unless running within environments such as OpenShift.
securityContext:
runAsUser: 0
privileged: false
resources:
requests:
cpu: "100m"
memory: "100Mi"
limits:
cpu: "1000m"
memory: "200Mi"
tolerations: []
deployment:
# Annotations to apply to the deployment
annotations: {}
# additionals labels
labels: {}
affinity: {}
# Include the deployment
enabled: true
# Extra environment variables for Metricbeat container.
envFrom: []
# - configMapRef:
# name: config-secret
# Allows you to add any config files in /usr/share/metricbeat
extraVolumes: []
# - name: extras
# emptyDir: {}
extraVolumeMounts: []
# - name: extras
# mountPath: /usr/share/extras
# readOnly: true
# such as metricbeat.yml for deployment
hostAliases: []
#- ip: "127.0.0.1"
# hostnames:
# - "foo.local"
# - "bar.local"
metricbeatConfig:
metricbeat.yml: |
metricbeat:
config:
modules:
path: /usr/share/metricbeat/modules.d/*.yml
reload:
enabled: false
output:
elasticsearch:
hosts: ["http://elastic-production-monitoring-master:9200"]
path:
config: /usr/share/metricbeat
data: /usr/share/metricbeat/data
home: /usr/share/metricbeat
logs: /usr/share/metricbeat/logs
processors:
- add_cloud_metadata: null
- add_docker_metadata: null
#========================== Modules configuration =============================
metricbeat.modules:
nodeSelector: {}
# A list of secrets and their paths to mount inside the pod
# This is useful for mounting certificates for security other sensitive values
securityContext:
runAsUser: 0
privileged: false
resources:
requests:
cpu: "100m"
memory: "100Mi"
limits:
cpu: "1000m"
memory: "200Mi"
tolerations: []
# Replicas being used for the kube-state-metrics metricbeat deployment
replicas: 1
extraContainers: ""
# - name: dummy-init
# image: busybox
# command: ['echo', 'hey']
extraInitContainers: ""
# - name: dummy-init
# image: busybox
# command: ['echo', 'hey']
# Root directory where metricbeat will write data to in order to persist registry data across pod restarts (file position and other metadata).
hostPathRoot: /var/lib
image: "docker.elastic.co/beats/metricbeat"
imageTag: "7.17.0"
imagePullPolicy: "IfNotPresent"
imagePullSecrets: []
livenessProbe:
exec:
command:
- sh
- -c
- |
#!/usr/bin/env bash -e
curl --fail 127.0.0.1:5066
failureThreshold: 3
initialDelaySeconds: 10
periodSeconds: 10
timeoutSeconds: 5
readinessProbe:
exec:
command:
- sh
- -c
- |
#!/usr/bin/env bash -e
metricbeat test output
failureThreshold: 3
initialDelaySeconds: 10
periodSeconds: 10
timeoutSeconds: 5
# Whether this chart should self-manage its service account, role, and associated role binding.
managedServiceAccount: true
clusterRoleRules:
- apiGroups: [""]
resources:
- nodes
- namespaces
- events
- pods
- services
verbs: ["get", "list", "watch"]
- apiGroups: ["extensions"]
resources:
- replicasets
verbs: ["get", "list", "watch"]
- apiGroups: ["apps"]
resources:
- statefulsets
- deployments
- replicasets
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources:
- nodes/stats
verbs: ["get"]
- apiGroups:
- ""
resources:
- nodes/stats
verbs:
- get
- nonResourceURLs:
- "/metrics"
verbs:
- get
podAnnotations: {}
# iam.amazonaws.com/role: es-cluster
# Custom service account override that the pod will use
serviceAccount: ""
# Annotations to add to the ServiceAccount that is created if the serviceAccount value isn't set.
serviceAccountAnnotations: {}
# eks.amazonaws.com/role-arn: arn:aws:iam::111111111111:role/k8s.clustername.namespace.serviceaccount
# How long to wait for metricbeat pods to stop gracefully
terminationGracePeriod: 30
# This is the PriorityClass settings as defined in
# https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass
priorityClassName: ""
updateStrategy: RollingUpdate
# Override various naming aspects of this chart
# Only edit these if you know what you're doing
nameOverride: ""
fullnameOverride: ""
kube_state_metrics:
enabled: true
# host is used only when kube_state_metrics.enabled: false
host: ""
# Add sensitive data to k8s secrets
secrets: []
# - name: "env"
# value:
# ELASTICSEARCH_PASSWORD: "LS1CRUdJTiBgUFJJVkFURSB"
# api_key: ui2CsdUadTiBasRJRkl9tvNnw
# - name: "tls"
# value:
# ca.crt: |
# LS0tLS1CRUdJT0K
# LS0tLS1CRUdJT0K
# LS0tLS1CRUdJT0K
# LS0tLS1CRUdJT0K
# cert.crt: "LS0tLS1CRUdJTiBlRJRklDQVRFLS0tLS0K"
# cert.key.filepath: "secrets.crt" # The path to file should be relative to the `values.yaml` file.
# DEPRECATED
affinity: {}
envFrom: []
extraEnvs: []
extraVolumes: []
extraVolumeMounts: []
# Allows you to add any config files in /usr/share/metricbeat
# such as metricbeat.yml for both daemonset and deployment
metricbeatConfig: {}
nodeSelector: {}
podSecurityContext: {}
resources: {}
secretMounts: []
tolerations: []
labels: {}
Also, I know Metricbeats has the system metrics by default, which I want them to be disabled automatically every time Metricbeat pods are recreated.
I am new to kubernetes and Helm and I have not succeed in getting metrics from the 3 nodes in my cluster. Sometimes I get metrics from 1, 2 or 3 nodes, but it is not consistent when I redeploy
Thanks