Can't create new 3 node cluster using docker swarm

I am trying to setup a new 3 node Elasticsearch cluster within a swarm.

Docker version: 20.10.12
Elastic version: 7.16.3

When deploying the stack we see the below errors from the docker service logs:

elastic_elasticsearch.0.sbuktacuv6q4@002693.domain.local    | {"type": "server", "timestamp": "2022-04-01T14:24:23,260Z", "level": "WARN", "component": "o.e.c.c.ClusterFormationFailureHelper", "cluster.name": "uk-elastic-cluster", "node.name": "002693.domain.local", "message": "master not discovered yet, this node has not previously joined a bootstrapped (v7+) cluster, and this node must discover master-eligible nodes [002695.domain.local, 002694.domain.local, 002693.domain.local] to bootstrap a cluster: have discovered [{002693.domain.local}{9CO3S8z0Q-KGiioWAr8t6g}{7OQnbXClTEiQk34urOViYQ}{10.0.87.3}{10.0.87.3:9300}{cdfhilmrstw}]; discovery will continue using [10.0.87.2:9300, 10.0.87.4:9300] from hosts providers and [{002693.domain.local}{9CO3S8z0Q-KGiioWAr8t6g}{7OQnbXClTEiQk34urOViYQ}{10.0.87.3}{10.0.87.3:9300}{cdfhilmrstw}] from last-known cluster state; node term 0, last-accepted version 0 in term 0" }

elastic_elasticsearch.0.d9js76r9wsj6@002694.domain.local    | {"type": "server", "timestamp": "2022-04-01T14:24:25,615Z", "level": "WARN", "component": "o.e.c.c.ClusterFormationFailureHelper", "cluster.name": "uk-elastic-cluster", "node.name": "002694.domain.local", "message": "master not discovered yet, this node has not previously joined a bootstrapped (v7+) cluster, and this node must discover master-eligible nodes [002695.domain.local, 002694.domain.local, 002693.domain.local] to bootstrap a cluster: have discovered [{002694.domain.local}{E77e4VEYQxyAY9pmGAMyoQ}{chz4GZVsR6GuC9Qkojgumw}{10.0.87.2}{10.0.87.2:9300}{cdfhilmrstw}]; discovery will continue using [10.0.87.3:9300, 10.0.87.4:9300] from hosts providers and [{002694.domain.local}{E77e4VEYQxyAY9pmGAMyoQ}{chz4GZVsR6GuC9Qkojgumw}{10.0.87.2}{10.0.87.2:9300}{cdfhilmrstw}] from last-known cluster state; node term 0, last-accepted version 0 in term 0" }

elastic_elasticsearch.0.qcu47pu9xuaa@002695.domain.local    | {"type": "server", "timestamp": "2022-04-01T14:24:30,493Z", "level": "WARN", "component": "o.e.c.c.ClusterFormationFailureHelper", "cluster.name": "uk-elastic-cluster", "node.name": "002695.domain.local", "message": "master not discovered yet, this node has not previously joined a bootstrapped (v7+) cluster, and this node must discover master-eligible nodes [002695.domain.local, 002694.domain.local, 002693.domain.local] to bootstrap a cluster: have discovered [{002695.domain.local}{FG-q1XijSIilDOnQMzbGTA}{QxcKMF_HRLKRu60MOlU_7g}{10.0.87.4}{10.0.87.4:9300}{cdfhilmrstw}]; discovery will continue using [10.0.87.3:9300, 10.0.87.2:9300] from hosts providers and [{002695.domain.local}{FG-q1XijSIilDOnQMzbGTA}{QxcKMF_HRLKRu60MOlU_7g}{10.0.87.4}{10.0.87.4:9300}{cdfhilmrstw}] from last-known cluster state; node term 0, last-accepted version 0 in term 0" }

This is the docker compose file we are using:

version: "3.7"
services:
  elasticsearch:
    image: elasticsearch:7.16.3
    hostname: "{{.Node.Hostname}}"
    labels:
      - "co.elastic.logs/enabled=true"
      - "co.elastic.logs/module=elasticsearch"
    environment:
      - node.name={{.Node.Hostname}}
      - cluster.name=uk-elastic-cluster
      - "ES_JAVA_OPTS=-Xms16g -Xmx16g"
      - LOG4J_FORMAT_MSG_NO_LOOKUPS=true
      - discovery.seed_hosts=elasticsearch
      - cluster.initial_master_nodes=002693.domain.local,002694.domain.local,002695.domain.local
      - node.ml=true
      - xpack.ml.enabled=true
      - bootstrap.memory_lock=false
    volumes:
      - elasticsearch:/usr/share/elasticsearch/data
      - /etc/localtime:/etc/localtime:ro
      - /opt/shared-mount/config/certs:/usr/share/elasticsearch/config/certs
      - /opt/shared-mount/config/certs/elasticsearch.keystore:/usr/share/elasticsearch/config/elasticsearch.keystore
      - /opt/shared-mount/config/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml
      - /opt/shared-backup:/opt/shared-backup
    networks:
      - elastic
    deploy:
      mode: global
      endpoint_mode: dnsrr
      resources:
        limits:
          memory: 20G
  kibana:
    image: kibana:7.16.3
    hostname: "{{.Node.Hostname}}"
    labels:
      - "co.elastic.logs/enabled=true"
      - "co.elastic.logs/module=kibana"
    environment:
      - "ELASTICSEARCH_URL=https://elasticsearch:9200"
      - server.port=5601
    networks:
      - elastic
    ports:
      - 443:5601
    volumes:
      - /opt/shared-mount/config/kibana.yml:/usr/share/kibana/config/kibana.yml
      - /etc/localtime:/etc/localtime:ro
      - /opt/shared-mount/config/certs/:/etc/certs

    deploy:
      mode: replicated
      replicas: 1
      update_config:
        failure_action: rollback
        parallelism: 1
        delay: 10s
      restart_policy:
        condition: on-failure
        delay: 10s
        max_attempts: 3

  nginx:
    image: nginx:1.19.2-alpine
    labels:
      - "co.elastic.logs/enabled=true"
      - "co.elastic.logs/module=nginx"
    networks:
      - elastic
    ports:
      - 9200:9200
    volumes:
      - /opt/shared-mount/config/certs/:/etc/certs
      - /etc/localtime:/etc/localtime:ro
    deploy:
      mode: global
    command: |
      /bin/sh -c "echo '
      user nobody nogroup;
      worker_processes auto;
      events {
        worker_connections 1024;
      }
      http {
        client_max_body_size 4g;
        resolver 127.0.0.11 ipv6=off;
        server {
          listen 9200 ssl;
          ssl_certificate     /etc/certs/cert.cer;
          ssl_certificate_key /etc/certs/key.key;
          location / {
            proxy_set_header Connection keep-alive;
            set $$url https://elasticsearch:9200;
            proxy_pass $$url;
            proxy_set_header  Host $$http_host;
            proxy_set_header  X-Real-IP $$remote_addr;
            proxy_set_header  X-Forwarded-For $$proxy_add_x_forwarded_for;
            proxy_socket_keepalive on;
          }
        }
      }' | tee /etc/nginx/nginx.conf && nginx -t && nginx -g 'daemon off;'"
volumes:
  elasticsearch:
    driver: local
networks:
  elastic:
    driver: overlay
    attachable: true

Any suggestions on what's going on and possible resolution? Thanks.

This topic was automatically closed 28 days after the last reply. New replies are no longer allowed.