Hello! The employee who set up our instance of elastic is no longer working here and he did not document how he set things up or the passwords that he created for the user that metricbeat uses to connect to the kibana/elastic server. Because I wasn't able to find the password for the metricbeat user, I decided to reset the password. I then followed the docs for how to set up metric beat on one of the new servers and whenever I try to run the ./metricbeat.exe setup -e
command, the last log gives me this error. I redacted hostnames and ip addresses
Loading dashboards (Kibana must be running and reachable)
{"log.level":"info","@timestamp":"2023-03-17T14:40:16.735-0400","log.logger":"kibana","log.origin":{"file.name":"kibana/client.go","file.line":179},"message":"Kibana url: http://{hostname}:5601","service.name":"metricbeat","ecs.version":"1.6.0"}
{"log.level":"error","@timestamp":"2023-03-17T14:40:17.754-0400","log.origin":{"file.name":"instance/beat.go","file.line":1071},"message":"Exiting: error connecting to Kibana: fail to get the Kibana version: HTTP GET request to http://{hostname}:5601/api/status fails: fail to execute the HTTP GET request: Get \"http://{hostname}:5601/api/status\": dial tcp ***.**.**.***:5601: connectex: No connection could be made because the target machine actively refused it. (status=0). Response: ","service.name":"metricbeat","ecs.version":"1.6.0"}
Exiting: error connecting to Kibana: fail to get the Kibana version: HTTP GET request to http://{hostname}:5601/api/status fails: fail to execute the HTTP GET request: Get "http://{hostname}:5601/api/status": dial tcp ***.**.**.***:5601: connectex: No connection could be made because the target machine actively refused it. (status=0). Response:
I'm getting the same error on the windows servers that were already setup and working after updating the password.
I am able to connect to kibana through the browser, but I can't connect through the browser using port 5601. It just gives an unable to connect error.
What I don't understand is when I run ./metricbeat.exe -e
I am able to get data in Kibana, but it stops as soon as you close the powershell window.
We have some linux servers using the same metricbeat user and updating the password on them worked just fine, but I can't seem to make things work on the windows servers.
metricbeat.yml on server that is trying to connect to kibana server (self-managed)
metricbeat.config.modules:
# Glob pattern for configuration loading
path: ${path.config}/modules.d/*.yml
# Set to true to enable config reloading
reload.enabled: false
# Period on which files under path should be checked for changes
#reload.period: 10s
# ======================= Elasticsearch template setting =======================
setup.template.settings:
index.number_of_shards: 1
index.codec: best_compression
#_source.enabled: false
# ================================== General ===================================
# The name of the shipper that publishes the network data. It can be used to group
# all the transactions sent by a single shipper in the web interface.
#name:
# The tags of the shipper are included in their own field with each
# transaction published.
#tags: ["service-X", "web-tier"]
# Optional fields that you can specify to add additional information to the
# output.
#fields:
# env: staging
setup.ilm.enabled: true
setup.ilm.rollover: "metricbeat-8.6.2"
setup.ilm.pattern: "{now/d}-000001"
# =================================== Kibana ===================================
# Starting with Beats version 6.0.0, the dashboards are loaded via the Kibana API.
# This requires a Kibana endpoint configuration.
setup.kibana:
# Kibana Host
# Scheme and port can be left out and will be set to the default (http and 5601)
# In case you specify and additional path, the scheme is required: http://localhost:5601/path
# IPv6 addresses should always be defined as: https://[2001:db8::1]:5601
host: "{hostname}"
username: "******"
password: "********"
# Kibana Space ID
# ID of the Kibana Space into which the dashboards should be loaded. By default,
# the Default Space will be used.
#space.id:
# ================================== Outputs ===================================
# Configure what output to use when sending the data collected by the beat.
# ---------------------------- Elasticsearch Output ----------------------------
output.elasticsearch:
# Array of hosts to connect to.
hosts: ["hostname:9201"]
# Protocol - either `http` (default) or `https`.
protocol: "https"
# Authentication credentials - either API key or username/password.
username: "*****"
password: "*********"
ssl.verification_mode: none
allow_older_versions: true
# ================================= Processors =================================
# Configure processors to enhance or manipulate events generated by the beat.
processors:
- add_host_metadata: ~
- add_cloud_metadata: ~
- add_docker_metadata: ~
- add_kubernetes_metadata: ~
kibana.yml on elastic/kibana server
server.port: 5601
server.host: "localhost"
elasticsearch.hosts: ["http://localhost:9200"]
logging:
appenders:
default:
type: file
filename: /var/log/kibana.log
layout:
type: pattern
root:
appenders: [default]
level: error
logging.verbose: false
monitoring.kibana.collection.enabled: false
#xpack.infra.sources.default.logAlias: "filebeat-*"
xpack.infra.sources.default.fields.timestamp: "@timestamp"
xpack.infra.sources.default.fields.message: ['message', '@message']
# setting up authentication
elasticsearch.username: "*****"
xpack.security.encryptionKey: "***********"
xpack.security.session.idleTimeout: "1h"
xpack.security.session.lifespan: "30d"
server.publicBaseUrl: "https://{hostname}"
xpack.encryptedSavedObjects.encryptionKey: "************"
elasticsearch.yml on elastic/kibana server
# ----------------------------------- Paths ------------------------------------
#
# Path to directory where to store the data (separate multiple locations by comma):
#
path.data: /var/lib/elasticsearch
#
# Path to log files:
#
path.logs: /var/log/elasticsearch
#
# ---------------------------------- Network -----------------------------------
#
# Set the bind address to a specific IP (IPv4 or IPv6):
#
#network.host: ***.***.*.*
network.host: 127.0.0.1
#
# Set a custom port for HTTP:
#
http.port: 9200
#
# For more information, consult the network module documentation.
#
# --------------------------------- Discovery ----------------------------------
#
# Pass an initial list of hosts to perform discovery when new node is started:
# The default list of hosts is ["127.0.0.1", "[::1]"]
#
discovery.seed_hosts: ["127.0.0.1", "[::1]"]
#discovery.zen.ping.unicast.hosts: ["127.0.0.1"]
#
# Prevent the "split brain" by configuring the majority of nodes (total number of master-eligible nodes / 2 + 1):
#
#discovery.zen.minimum_master_nodes:
#cluster.initial_master_nodes: ***.**.**.***
#
# For more information, consult the zen discovery module documentation.
#
# ---------------------------------- Gateway -----------------------------------
#
# Block initial recovery after a full cluster restart until N nodes are started:
#
#gateway.recover_after_nodes: 3
#
# For more information, consult the gateway module documentation.
#
# ---------------------------------- Various -----------------------------------
#
# Require explicit names when deleting indices:
#
#action.destructive_requires_name: true
indices.query.bool.max_clause_count: 2000
#node.ingest: true
# resolving errors
xpack.security.enabled: true
discovery.type: single-node