Hi,
We configured a cluster using the docker swarm based on the following yml file:
version: '3.8'
services:
node-master:
deploy:
replicas: 1
placement:
constraints:
- node.labels.elastic.replica == 0
restart_policy:
condition: on-failure
image: "docker.elastic.co/elasticsearch/elasticsearch:7.17.0"
environment:
- cluster.name=elk
- node.name=node-master
- network.publish_host=_eth1_
- discovery.seed_hosts=node-master,node1,node2
- bootstrap.memory_lock=true
volumes:
- /data/Elastic/ElasticLog:/usr/share/elasticsearch/logs:rw
- /data/Elastic/ElasticData:/usr/share/elasticsearch/data:rw
- /data/Elastic/ElasticConfig:/usr/share/elasticsearch/config:rw
ports:
- 9200:9200
- 9600:9600
networks:
- elk
node1:
deploy:
replicas: 1
placement:
constraints:
- node.labels.elastic.replica == 1
restart_policy:
condition: on-failure
image: "docker.elastic.co/elasticsearch/elasticsearch:7.17.0"
environment:
- cluster.name=elk
- node.name=node1
- network.publish_host=_eth1_
- discovery.seed_hosts=node-master,node1,node2
- bootstrap.memory_lock=true
volumes:
- /data/Elastic/ElasticLog:/usr/share/elasticsearch/logs:rw
- /data/Elastic/ElasticData:/usr/share/elasticsearch/data:rw
- /data/Elastic/ElasticConfig:/usr/share/elasticsearch/config:rw
ports:
- 9201:9200
networks:
- elk
node2:
deploy:
replicas: 1
placement:
constraints:
- node.labels.elastic.replica == 2
restart_policy:
condition: on-failure
image: "docker.elastic.co/elasticsearch/elasticsearch:7.17.0"
environment:
- cluster.name=elk
- node.name=node2
- network.publish_host=_eth1_
- discovery.seed_hosts=node-master,node1,node2
- bootstrap.memory_lock=true
volumes:
- /data/Elastic/ElasticLog:/usr/share/elasticsearch/logs:rw
- /data/Elastic/ElasticData:/usr/share/elasticsearch/data:rw
- /data/Elastic/ElasticConfig:/usr/share/elasticsearch/config:rw
ports:
- 9202:9200
networks:
- elk
kibana:
image: "docker.elastic.co/elasticsearch/elasticsearch:7.17.0"
environment:
ELASTICSEARCH_HOSTS: '["http://node-master:9200","http://node1:9200","http://node2:9200"]'
ports:
- 5601:5601
networks:
- elk
# Create networks manually before run
networks:
elk:
external: true
Everything looks fine, the 4 services(ELK nodes and 1 Kibana) are running, but when I look to the health of the cluster from Kibana
GET /_cluster/health
or also using the command
curl http://xxx:9200/_cluster/health?pretty
it shows just 1 node:
{
"cluster_name" : "elk",
"status" : "green",
"timed_out" : false,
"number_of_nodes" : 1,
"number_of_data_nodes" : 1,
"active_primary_shards" : 3,
"active_shards" : 3,
"relocating_shards" : 0,
"initializing_shards" : 0,
"unassigned_shards" : 0,
"delayed_unassigned_shards" : 0,
"number_of_pending_tasks" : 0,
"number_of_in_flight_fetch" : 0,
"task_max_waiting_in_queue_millis" : 0,
"active_shards_percent_as_number" : 100.0
}
{
"nodes" : {
"U5DpQZEaS4ig3mP8DF1zDg" : {
"name" : "node1"
}
}
}