Docker compose error - cannot connect logstash to elastic search

I am trying to create a elastic search cluster with logstash and kibana using docker compose. Have been trying for a long time but i am getting an error. My log lines are


kib01       | {"type":"log","@timestamp":"2021-09-21T06:55:23Z","tags":["warning","plugins","reporting","config"],"pid":7,"message":"Chromium sandbox provides an additional layer of protection, but is not supported for Linux Centos 7.8.2003 OS. Automatically setting 'xpack.reporting.capture.browser.chromium.disableSandbox: true'."}
kib01       | {"type":"log","@timestamp":"2021-09-21T06:55:23Z","tags":["error","elasticsearch","data"],"pid":7,"message":"Request error, retrying\nGET http://es02:9200/_nodes?filter_path=nodes.*.version%2Cnodes.*.http.publish_address%2Cnodes.*.ip => connect ECONNREFUSED 172.21.0.2:9200"}
kib01       | {"type":"log","@timestamp":"2021-09-21T06:55:23Z","tags":["error","elasticsearch","monitoring"],"pid":7,"message":"Request error, retrying\nGET http://es02:9200/_xpack => connect ECONNREFUSED 172.21.0.2:9200"}
kib01       | {"type":"log","@timestamp":"2021-09-21T06:55:23Z","tags":["error","elasticsearch","monitoring"],"pid":7,"message":"Request error, retrying\nGET http://es01:9200/_xpack => connect ECONNREFUSED 172.21.0.6:9200"}
kib01       | {"type":"log","@timestamp":"2021-09-21T06:55:23Z","tags":["error","elasticsearch","data"],"pid":7,"message":"Request error, retrying\nGET http://es01:9200/_nodes?filter_path=nodes.*.version%2Cnodes.*.http.publish_address%2Cnodes.*.ip => connect ECONNREFUSED 172.21.0.6:9200"}
kib01       | {"type":"log","@timestamp":"2021-09-21T06:55:23Z","tags":["error","elasticsearch","monitoring"],"pid":7,"message":"Request error, retrying\nGET http://es03:9200/_xpack => connect ECONNREFUSED 172.21.0.3:9200"}
kib01       | {"type":"log","@timestamp":"2021-09-21T06:55:23Z","tags":["error","elasticsearch","data"],"pid":7,"message":"Request error, retrying\nGET http://es03:9200/_nodes?filter_path=nodes.*.version%2Cnodes.*.http.publish_address%2Cnodes.*.ip => connect ECONNREFUSED 172.21.0.3:9200"}

kib01       | {"type":"log","@timestamp":"2021-09-21T06:55:23Z","tags":["warning","plugins","licensing"],"pid":7,"message":"License information could not be obtained from Elasticsearch due to Error: No Living connections error"}
kib01       | {"type":"log","@timestamp":"2021-09-21T06:55:23Z","tags":["warning","plugins","monitoring","monitoring"],"pid":7,"message":"X-Pack Monitoring Cluster Alerts will not be available: No Living connections"}
kib01       | {"type":"log","@timestamp":"2021-09-21T06:55:24Z","tags":["info","savedobjects-service"],"pid":7,"message":"Waiting until all Elasticsearch nodes are compatible with Kibana before starting saved objects migrations..."}
kib01       | {"type":"log","@timestamp":"2021-09-21T06:55:24Z","tags":["error","savedobjects-service"],"pid":7,"message":"Unable to retrieve version information from Elasticsearch nodes."}
kib01       | {"type":"log","@timestamp":"2021-09-21T06:55:24Z","tags":["warning","elasticsearch","monitoring"],"pid":7,"message":"Unable to revive connection: http://es03:9200/"}
kib01       | {"type":"log","@timestamp":"2021-09-21T06:55:24Z","tags":["warning","elasticsearch","monitoring"],"pid":7,"message":"No living connections"}
kib01       | {"type":"log","@timestamp":"2021-09-21T06:55:24Z","tags":["warning","plugins","licensing"],"pid":7,"message":"License information could not be obtained from Elasticsearch due to Error: No Living connections error"}
es03        | {"type": "server", "timestamp": "2021-09-21T06:55:24,471Z", "level": "INFO", "component": "o.e.t.NettyAllocator", "cluster.name": "es-docker-cluster", "node.name": "es03", "message": "creating NettyAllocator with the following configs: [name=unpooled, factors={es.unsafe.use_unpooled_allocator=false, g1gc_enabled=true, g1gc_region_size=1mb, heap_size=512mb}]" }
es03        | {"type": "server", "timestamp": "2021-09-21T06:55:24,674Z", "level": "INFO", "component": "o.e.d.DiscoveryModule", "cluster.name": "es-docker-cluster", "node.name": "es03", "message": "using discovery type [zen] and seed hosts providers [settings]" }
es01        | {"type": "server", "timestamp": "2021-09-21T06:55:24,947Z", "level": "INFO", "component": "o.e.t.NettyAllocator", "cluster.name": "es-docker-cluster", "node.name": "es01", "message": "creating NettyAllocator with the following configs: [name=unpooled, factors={es.unsafe.use_unpooled_allocator=false, g1gc_enabled=true, g1gc_region_size=1mb, heap_size=512mb}]" }
es01        | {"type": "server", "timestamp": "2021-09-21T06:55:25,088Z", "level": "INFO", "component": "o.e.d.DiscoveryModule", "cluster.name": "es-docker-cluster", "node.name": "es01", "message": "using discovery type [zen] and seed hosts providers [settings]" }
es02        | {"type": "server", "timestamp": "2021-09-21T06:55:25,120Z", "level": "INFO", "component": "o.e.t.NettyAllocator", "cluster.name": "es-docker-cluster", "node.name": "es02", "message": "creating NettyAllocator with the following configs: [name=unpooled, factors={es.unsafe.use_unpooled_allocator=false, g1gc_enabled=true, g1gc_region_size=1mb, heap_size=512mb}]" }
es02        | {"type": "server", "timestamp": "2021-09-21T06:55:25,250Z", "level": "INFO", "component": "o.e.d.DiscoveryModule", "cluster.name": "es-docker-cluster", "node.name": "es02", "message": "using discovery type [zen] and seed hosts providers [settings]" }
kib01       | {"type":"log","@timestamp":"2021-09-21T06:55:25Z","tags":["warning","elasticsearch","data"],"pid":7,"message":"Unable to revive connection: http://es02:9200/"}

My docker-compose.yml file is

version: '2.2'
services:
  es01:
    image: elasticsearch:7.9.2
    container_name: es01
    environment:
      - node.name=es01
      - cluster.name=es-docker-cluster
      - discovery.seed_hosts=es02,es03
      - cluster.initial_master_nodes=es01,es02,es03
      - bootstrap.memory_lock=true
      - "ES_JAVA_OPTS=-Xms512m -Xmx512m"
    ulimits:
      memlock:
        soft: -1
        hard: -1
    volumes:
      - data01:/usr/share/elasticsearch/data
    ports:
      - 9200:9200
    networks:
      - elastic

  es02:
    image: elasticsearch:7.9.2
    container_name: es02
    environment:
      - node.name=es02
      - cluster.name=es-docker-cluster
      - discovery.seed_hosts=es01,es03
      - cluster.initial_master_nodes=es01,es02,es03
      - bootstrap.memory_lock=true
      - "ES_JAVA_OPTS=-Xms512m -Xmx512m"
    ulimits:
      memlock:
        soft: -1
        hard: -1
    volumes:
      - data02:/usr/share/elasticsearch/data
    networks:
      - elastic

  es03:
    image: elasticsearch:7.9.2
    container_name: es03
    environment:
      - node.name=es03
      - cluster.name=es-docker-cluster
      - discovery.seed_hosts=es01,es02
      - cluster.initial_master_nodes=es01,es02,es03
      - bootstrap.memory_lock=true
      - "ES_JAVA_OPTS=-Xms512m -Xmx512m"
    ulimits:
      memlock:
        soft: -1
        hard: -1
    volumes:
      - data03:/usr/share/elasticsearch/data
    networks:
      - elastic

  kib01:
    image: kibana:7.9.2
    container_name: kib01
    ports:
      - 5601:5601
    environment:
      ELASTICSEARCH_URL: http://es01:9200
      ELASTICSEARCH_HOSTS: '["http://es01:9200","http://es02:9200","http://es03:9200"]'
    networks:
      - elastic

  logstash:
    image: logstash:7.9.2
    container_name: logstash
    ports:
      - 5000:5000
    environment:
      ELASTICSEARCH_URL: http://es01:9200
      ELASTICSEARCH_HOSTS: '["http://es01:9200","http://es02:9200","http://es03:9200"]'
    volumes:
      - type: bind
        source: /home/cloudera/logstash-pipeline
        target: /usr/share/logstash/pipeline
        read_only: true
    networks:
      - elastic


volumes:
  data01:
    driver: local
  data02:
    driver: local
  data03:
    driver: local

networks:
  elastic:
    driver: bridge

A few general things;

  1. 7.9 is kinda old, 7.14 is latest and you should really be using that
  2. 512MB of heap isn't much for Elasticsearch, try with 1GB

Can you try this with just a single Elasticsearch instance in your compose file?

1 Like

Thank you I will try and let you know

1 Like

Hey, I checked as you instructed but i am getting the same error. Is my yml file okay? appreciate your help

This topic was automatically closed 28 days after the last reply. New replies are no longer allowed.