Dockerised elasticsearch cluster in different hosts

Hi,
I am trying to build a centralized monitoring and logging system using ELK stack. I am trying to create a dockerised elaticsearch cluster hosted on 2 hosts. There are 2 elasticsearch nodes in host1 and 1 node in host2.

I have defined a custom docker network in both host as this:

docker network create --subnet=172.16.0.0/24 monitoring_logging

HOST1
docker-compose.yml:

version: '2'
services:
  elasticsearch:
    image: docker.elastic.co/elasticsearch/elasticsearch:6.6.0
    ports:
      - 9200:9200
      - 9300:9300
    user: root
    volumes:
      - ./elasticsearch-master:/usr/share/elasticsearch/config
    restart: always
    ulimits:
      memlock:
        soft: -1
        hard: -1
    labels:
      container_group: logging
    networks:
      default:
        ipv4_address: 172.16.0.39
    environment:
      - node.name=elsatic-node-1
      - bootstrap.memory_lock=true
      - cluster.name=docker-cluster

  elasticsearch2:
    image: docker.elastic.co/elasticsearch/elasticsearch:6.6.0
    ports:
      - 9301:9300
    user: root
    volumes:
      - ./elasticsearch-master:/usr/share/elasticsearch/config
    restart: always
    ulimits:
      memlock:
        soft: -1
        hard: -1
    labels:
      container_group: logging
    networks:
      default:
        ipv4_address: 172.16.0.40
    environment:
      - node.name=elsatic-node-2
      - bootstrap.memory_lock=true
      - cluster.name=docker-cluster


networks:
  default:
    external:
      name: monitoring_logging

and elasticsearch.yml file is:

network.host: 0.0.0.0

xpack.security.enabled: true

node.master: true
node.data: true
discovery.zen.ping.unicast.hosts: ["172.16.0.39","172.16.0.40:9300","192.168.23.44:9300"]

HOST2
I have defined a custom docker network for this using:
// docker network create --subnet=172.16.0.0/24 monitoring_logging

docker-compose.yml:

version: '2'


services:
  elasticsearch:
    image: docker.elastic.co/elasticsearch/elasticsearch:6.6.0
    ports:
      - 9200:9200
      - 9300:9300
    user: root
    networks:
      default:
        ipv4_address: 172.16.0.42
    volumes:
      - ./elasticsearch-master:/usr/share/elasticsearch/config
    restart: always
    ulimits:
      memlock:
        soft: -1
        hard: -1
    labels:
      container_group: logging
    network_mode: "host"
    environment:
      - node.name=elsatic-node-5
      - bootstrap.memory_lock=true
      - cluster.name=docker-cluster

networks:
  default:
    external:
      name: monitoring_logging

elasticsearch.yml:

xpack.security.enabled: true

node.master: true
node.data: true
discovery.zen.ping.unicast.hosts: ["172.16.0.42:9300","192.168.23.50:9300","192.168.23.50:9301"]

I am getting this error in host2:

[2019-02-20T08:31:31,530][WARN ][o.e.d.z.ZenDiscovery     ] [elsatic-node-1] failed to connect to master [{elsatic-node-3}{RwRDYBB8S2aQIYXR8i8g8Q}{4g8Idg9pQ7Okf3NHQ7OBjQ}{172.16.0.41}{172.16.0.41:9300}{ml.machine_memory=33729269760, ml.max_open_jobs=20, xpack.installed=true, ml.enabled=true}], retrying...
org.elasticsearch.transport.ConnectTransportException: [elsatic-node-3][172.16.0.41:9300] connect_exception
	at org.elasticsearch.transport.TcpTransport$ChannelsConnectedListener.onFailure(TcpTransport.java:1569) ~[elasticsearch-6.6.0.jar:6.6.0]
	at org.elasticsearch.action.ActionListener.lambda$toBiConsumer$2(ActionListener.java:99) ~[elasticsearch-6.6.0.jar:6.6.0]
	at org.elasticsearch.common.concurrent.CompletableContext.lambda$addListener$0(CompletableContext.java:42) ~[elasticsearch-core-6.6.0.jar:6.6.0]
	at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:859) ~[?:?]
	at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:837) ~[?:?]
	at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:506) ~[?:?]
	at java.util.concurrent.CompletableFuture.completeExceptionally(CompletableFuture.java:2088) ~[?:?]
	at org.elasticsearch.common.concurrent.CompletableContext.completeExceptionally(CompletableContext.java:57) ~[elasticsearch-core-6.6.0.jar:6.6.0]
	at org.elasticsearch.transport.netty4.Netty4TcpChannel.lambda$new$1(Netty4TcpChannel.java:72) ~[?:?]
	at io.netty.util.concurrent.DefaultPromise.notifyListener0(DefaultPromise.java:511) ~[?:?]
	at io.netty.util.concurrent.DefaultPromise.notifyListeners0(DefaultPromise.java:504) ~[?:?]
	at io.netty.util.concurrent.DefaultPromise.notifyListenersNow(DefaultPromise.java:483) ~[?:?]
	at io.netty.util.concurrent.DefaultPromise.notifyListeners(DefaultPromise.java:424) ~[?:?]
	at io.netty.util.concurrent.DefaultPromise.tryFailure(DefaultPromise.java:121) ~[?:?]
	at io.netty.channel.nio.AbstractNioChannel$AbstractNioUnsafe.fulfillConnectPromise(AbstractNioChannel.java:327) ~[?:?]
	at io.netty.channel.nio.AbstractNioChannel$AbstractNioUnsafe.finishConnect(AbstractNioChannel.java:343) ~[?:?]
	at io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:644) ~[?:?]
	at io.netty.channel.nio.NioEventLoop.processSelectedKeysPlain(NioEventLoop.java:556) ~[?:?]
	at io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:510) ~[?:?]
	at io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:470) ~[?:?]
	at io.netty.util.concurrent.SingleThreadEventExecutor$5.run(SingleThreadEventExecutor.java:909) ~[?:?]
	at java.lang.Thread.run(Thread.java:834) [?:?]
Caused by: io.netty.channel.AbstractChannel$AnnotatedNoRouteToHostException: No route to host: 172.16.0.41/172.16.0.41:9300
	at sun.nio.ch.SocketChannelImpl.checkConnect(Native Method) ~[?:?]
	at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:779) ~[?:?]
	at io.netty.channel.socket.nio.NioSocketChannel.doFinishConnect(NioSocketChannel.java:327) ~[?:?]
	at io.netty.channel.nio.AbstractNioChannel$AbstractNioUnsafe.finishConnect(AbstractNioChannel.java:340) ~[?:?]
	... 6 more
Caused by: java.net.NoRouteToHostException: No route to host
	at sun.nio.ch.SocketChannelImpl.checkConnect(Native Method) ~[?:?]
	at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:779) ~[?:?]
	at io.netty.channel.socket.nio.NioSocketChannel.doFinishConnect(NioSocketChannel.java:327) ~[?:?]
	at io.netty.channel.nio.AbstractNioChannel$AbstractNioUnsafe.finishConnect(AbstractNioChannel.java:340) ~[?:?]
	... 6 more

My Kibana dashboard shows 2 nodes instead of 3.

What am I doing wrong here?

I think that the problem is that the node 192.168.23.50:9301 is running internally with a 9300 Transport Port and is "announcing" to the other nodes that you can reach it using 192.168.23.50:9300 which is incorrect.

I think that for this node you need to set transport.publish_port: 9301.

Sorry I forgot to mention the host IPs.
Host1-> 192.168.23.50
Host2-> 192.168.23.44

If you look into the host1 docker-compose file, I have mapped the 9300 port of Docker container with 9301 port of the host, since elastic search is running inside a Docker container, and the port has been mapped correctly in the docker-compose file, so adding this transport.publish_port: 9301 didn't solve this issue.

Can you share your docker-compose.yml file for 192.168.23.50 host? I mean the final version with the modification I suggested.

version: '2'
services:
  elasticsearch:
    image: docker.elastic.co/elasticsearch/elasticsearch:6.6.0
    ports:
      - 9200:9200
      - 9300:9300
    user: root
    volumes:
      - ./elasticsearch-master:/usr/share/elasticsearch/config
    restart: always
    ulimits:
      memlock:
        soft: -1
        hard: -1
    labels:
      container_group: logging
    networks:
      default:
        ipv4_address: 172.16.0.39
    environment:
      - node.name=elsatic-node-1
      - bootstrap.memory_lock=true
      - cluster.name=docker-cluster

  elasticsearch2:
    image: docker.elastic.co/elasticsearch/elasticsearch:6.6.0
    ports:
      - 9301:9301
    user: root
    volumes:
      - ./elasticsearch-master:/usr/share/elasticsearch/config
    restart: always
    ulimits:
      memlock:
        soft: -1
        hard: -1
    labels:
      container_group: logging
    networks:
      default:
        ipv4_address: 172.16.0.40
    environment:
      - node.name=elsatic-node-2
      - bootstrap.memory_lock=true
      - cluster.name=docker-cluster
      - transport.publish_port=9301


networks:
  default:
    external:
      name: monitoring_logging

I tried multiple things but did not succeed. I'm definitely not a Docker expert, sorry.

That said, I think this is incorrect:

  elasticsearch2:
    image: docker.elastic.co/elasticsearch/elasticsearch:6.6.0
    ports:
      - 9301:9301

It should be:

  elasticsearch2:
    image: docker.elastic.co/elasticsearch/elasticsearch:6.6.0
    ports:
      - 9301:9300

I'm not sure if you would need to split the docker-compose file in 2 parts. One for the 9300 instance and the other for the 9301 one.

Then use the external IP address in discovery.zen.ping.unicast.hosts and transport.publish_host.

Not sure if this would work.

I tried this as well, but still getting the same result.

1 Like

This topic was automatically closed 28 days after the last reply. New replies are no longer allowed.