Error with Elasticsearch after prod config

Hello,

I'm having some problems on my docker cluster, today I've decided to change the config regarding the elastsicsearch services and after the configuration when I try to deploy the stack it gives the error bellow.

elastic_elasticsearch01.1.rhik0bu8ex0f@docker-manager    | ERROR: [1] bootstrap checks failed
elastic_elasticsearch01.1.rhik0bu8ex0f@docker-manager    | [1]: memory locking requested for elasticsearch process but memory is not locked
elastic_elasticsearch01.1.rhik0bu8ex0f@docker-manager    | ERROR: Elasticsearch did not exit normally - check the logs at /usr/share/elasticsearch/logs/elastic-docker-cluster.log

This is my compose file:

version: '3.7'

services:

  elasticsearch01:
     image: docker.elastic.co/elasticsearch/elasticsearch:7.6.2
     environment:
       - node.name=elasticsearch01
       - cluster.name=elastic-docker-cluster
       - discovery.seed_hosts=elasticsearch02
       - cluster.initial_master_nodes=elasticsearch01,elasticsearch02
       - bootstrap.memory_lock=true
       - ELASTIC_PASSWORD=changeme
       - "ES_JAVA_OPTS=-Xms2g -Xmx2g"
     configs:
       -  source: elastic_config.v1
          target: /usr/share/elasticsearch/config/elasticsearch.yml
     ports:
       - "9200:9200"
       - "9300:9300"
     networks:
       - elastic_elknet

  elasticsearch02:
     image: docker.elastic.co/elasticsearch/elasticsearch:7.6.2
     environment:
       - node.name=elasticsearch02
       - cluster.name=elastic-docker-cluster
       - discovery.seed_hosts=elasticsearch01
       - cluster.initial_master_nodes=elasticsearch01,elasticsearch02
       - bootstrap.memory_lock=true
       - ELASTIC_PASSWORD=changeme
       - "ES_JAVA_OPTS= -Xms2g -Xmx2g"
     configs:
       -  source: elastic_config.v1
          target: /usr/share/elasticsearch/config/elasticsearch.yml
     networks:
       - elastic_elknet


  kibana:
    image: docker.elastic.co/kibana/kibana:7.6.2
    ports:
      - "5601:5601"
    configs:
      - source: kibana_config.v1
        target: /usr/share/kibana/config/kibana.yml
    networks:
      - elastic_elknet

  logstash:
    image: docker.elastic.co/logstash/logstash:7.6.2
    ports:
      - "5000:5000"
      - "9600:9600"
      - "9555:9555"
    configs:
      - source: logstash_config.v2
        target: /usr/share/logstash/config/logstash.yml
      - source: logstash_pipeline.v1
        target: /usr/share/logstash/pipeline/logstash.conf
    environment:
      LS_JAVA_OPTS: -Xmx256m -Xms256m
    networks:
      - elastic_elknet


  heartbeat:
    image: docker.elastic.co/beats/heartbeat:7.6.2
    configs:
      - source: heartbeat_config.v8
        target: /usr/share/heartbeat/heartbeat.yml
      - source: monitors_config.v8
        target: /usr/share/heartbeat/monitors.d/monitor.icmp.yml
    environment:
      - output.elasticsearch.hosts=["elasticsearch:9200"]
    networks:
      - elastic_elknet
    deploy:
     replicas: 2
     update_config:
       parallelism: 1
       delay: 10s
       order: stop-first
       failure_action: rollback

  filebeat:
    image: docker.elastic.co/beats/filebeat:7.6.2
    user: root
    ports:
      - "9002:9002"
    configs:
      - source: filebeat_conf.v3
        target: /usr/share/filebeat/filebeat.yml
    volumes:
      - /var/run/docker.sock:/var/run/docker.sock:ro
      - /var/lib/docker/containers/:/var/lib/docker/containers/:ro
    command: ["--strict.perms=false"]
    environment:
      - ELASTICSEARCH_USERNAME=ELASTICSEARCH_USERNAME=elastic
      - ELASTICSEARCH_PASSWORD=ELASTICSEARCH_PASSWORD=changeme
    networks:
      - elastic_elknet

  metricbeat:
    image: docker.elastic.co/beats/metricbeat:7.6.2
    user: root
    configs:
      - source: metricbeat_config
        target: /usr/share/metricbeat/metricbeat.yml
    volumes:
      - /var/run/docker.sock:/var/run/docker.sock:ro
      - /sys/fs/cgroup:/hostfs/sys/fs/cgroup:ro
      - /proc:/hostfs/proc:ro
      - /:/hostfs:ro
    networks:
      - elastic_elknet

  apm:
    image: docker.elastic.co/apm/apm-server:7.6.2
    user: root
    configs:
      - source: apm_config.v1
        target: /usr/share/apm-server/apm-server.yml
    command: ["--strict.perms=false"]
    networks:
      - elastic_elknet

configs:
   elastic_config.v1:
     file: ./elasticsearch/config/elasticsearch.yml
   logstash_config.v2:
     file: ./logstash/config/logstash.yml
   logstash_pipeline.v1:
     file: ./logstash/pipeline/logstash.conf
   kibana_config.v1:
     file: ./kibana/config/kibana.yml
   heartbeat_config.v8:
     file: ./heartbeat/config/heartbeat.yml
   filebeat_conf.v3:
     file: ./filebeat/config/filebeat.yml
   metricbeat_config:
     file: ./metricbeat/config/metricbeat.yml
   monitors_config.v8:
     file: ./heartbeat/monitors.d/monitor.icmp.yml
   apm_config.v1:
     file: ./apm/config/apm.yml


networks:
  elastic_elknet:
      external: true

Hello @Luis_Pereira1

Elasticsearch requests to lock the memory (bootstrap.memory_lock=true) but probably didn't manage to do it.

There are 2 possible reasons it didn't manage to lock the memory:

  • you do not have enough RAM to lock 2GB of heap for each node on the machine
  • the process do not have the rights to lock too much memory

If you open the logs as suggested by the error message,you'll probably find:

 Unable to lock JVM Memory: error=12, reason=Cannot allocate memory
 This can result in part of the JVM being swapped out.
 Increase RLIMIT_MEMLOCK, soft limit: 65536, hard limit: 65536
 These can be adjusted by modifying /etc/security/limits.conf, for example:
        # allow user 'elasticsearch' mlockall
        elasticsearch soft memlock unlimited
        elasticsearch hard memlock unlimited

On docker is even easier. Just add the following lines to your configuration (on each Elasticsearch):

  elasticsearch01:
     image: docker.elastic.co/elasticsearch/elasticsearch:7.6.2
...
     ulimits:
       memlock:
         soft: -1
         hard: -1

This is also detailed in our documentation page.

In general, is even better if you can totally disable swap or set vm.swappines to 1 (as described here), instead of using bootstrap.memory_lock=true.

Hi Luca,

The option below when I try to run the stack says it's deprecated.

I removed the option bootstrap.memory_lock=true but now it gives me this error.

node.name": "elasticsearch2", "message": "master not discovered yet, this node has not previously joined a bootstrapped (v7+) cluster, and this node must discover master-eligible nodes [elasticsearch1, elasticsearch2] to bootstrap a cluster: have discovered [{elasticsearch2}{DmvY5NxNSK-9Lz4_tlFUdA}{CnB4qkAgSoGjlFRaonRZww}{10.0.0.9}{10.0.0.9:9300}{dilm}{ml.machine_memory=8366452736, xpack.installed=true, ml.max_open_jobs=20}]; discovery will continue using [10.0.0.5:9300] from hosts providers and [{elasticsearch2}{DmvY5NxNSK-9Lz4_tlFUdA}{CnB4qkAgSoGjlFRaonRZww}{10.0.0.9}{10.0.0.9:9300}{dilm}{ml.machine_memory=8366452736, xpack.installed=true, ml.max_open_jobs=20}] from last-known cluster state; node term 0, last-accepted version 0 in term 0" }

I'm using another host as a worker in swarm mode, i don't know if it as something to do about that.

ID HOSTNAME STATUS AVAILABILITY MANAGER STATUS ENGINE VERSION
cl8hexf88909sy9t253z2wl46 * docker-manager Ready Active Leader 18.09.9
lg1el3f7xyng2v01xt788whvq photon-machine Ready Active 18.09.9

Kibana logs:

"warning","elasticsearch","admin"],"pid":6,"message":"No living connections"

The state of the services:

elastic_elasticsearch1 replicated 1/1
elastic_elasticsearch2 replicated 1/1
elastic_filebeat replicated 1/1
elastic_heartbeat replicated 2/2
elastic_kibana replicated 1/1
elastic_logstash replicated 1/1
elastic_metricbeat replicated 1/1
elastic_apm replicated 1/1

One more thing I also changed the swapiness to 1 as suggested

You're right:

In Docker versions prior to 18.09, containerd was managed by the Docker engine daemon. In Docker Engine 18.09, containerd is managed by systemd. Since containerd is managed by systemd, any custom configuration to the docker.service systemd configuration which changes mount settings (for example, MountFlags=slave) breaks interactions between the Docker Engine daemon and containerd, and you will not be able to start containers.

If you've configured swappiness to 1, it is fine then.


Your elasticsearch02 is missing exposing the ports 9200 and 9300.

There are more details in the documentation.


I need to see the Kibana configuration file to tell why it cannot connect. In any case, until the cluster is formed, it will trigger this error.

Yes, this error is because the elastic nodes cant join the cluster.

My compose file looks like this now:

version: '3.7'
services:

  es01:
     image: docker.elastic.co/elasticsearch/elasticsearch:7.6.2
     environment:
       - node.name=es01
       - cluster.name=elastic-cluster
       - discovery.seed_hosts=es02
       - cluster.initial_master_nodes=es01,es02  
       - "ES_JAVA_OPTS=-Xms512m -Xmx512m"
     configs:
       -  source: elastic_config.v1
          target: /usr/share/elasticsearch/config/elasticsearch.yml
     ports:
       - "9200:9200"
     networks:
       - elastic_elknet
 
  es02:
     image: docker.elastic.co/elasticsearch/elasticsearch:7.6.2
     environment:
       - node.name=es02
       - cluster.name=elastic-cluster
       - discovery.seed_hosts=es01
       - cluster.initial_master_nodes=es01,es02
       - "ES_JAVA_OPTS= -Xms512m -Xmx512m"
     configs:
       -  source: elastic_config.v1
          target: /usr/share/elasticsearch/config/elasticsearch.yml
     networks:
       - elastic_elknet
    

  kibana:
    image: docker.elastic.co/kibana/kibana:7.6.2
    ports:
      - "5601:5601"
    configs:
      - source: kibana_config.v1
        target: /usr/share/kibana/config/kibana.yml
    networks:
      - elastic_elknet

  logstash:
    image: docker.elastic.co/logstash/logstash:7.6.2
    ports:
      - "5000:5000"
      - "9600:9600"
      - "9555:9555"
    configs:
      - source: logstash_config.v2
        target: /usr/share/logstash/config/logstash.yml
      - source: logstash_pipeline.v1
        target: /usr/share/logstash/pipeline/logstash.conf
    environment:
      LS_JAVA_OPTS: -Xmx256m -Xms256m
    networks:
      - elastic_elknet
    

  heartbeat:
    image: docker.elastic.co/beats/heartbeat:7.6.2
    configs:
      - source: heartbeat_config.v8
        target: /usr/share/heartbeat/heartbeat.yml
      - source: monitors_config.v8
        target: /usr/share/heartbeat/monitors.d/monitor.icmp.yml
    environment:
      - output.elasticsearch.hosts=["elasticsearch:9200"]
    networks:
      - elastic_elknet
    deploy:
     replicas: 2
     update_config:
       parallelism: 1
       delay: 10s
       order: stop-first
       failure_action: rollback

  filebeat:
    image: docker.elastic.co/beats/filebeat:7.6.2
    user: root
    ports:
      - "9002:9002"
    configs:
      - source: filebeat_conf.v3
        target: /usr/share/filebeat/filebeat.yml
    volumes:
      - /var/run/docker.sock:/var/run/docker.sock:ro
      - /var/lib/docker/containers/:/var/lib/docker/containers/:ro
    command: ["--strict.perms=false"]
    environment:
      - ELASTICSEARCH_USERNAME=ELASTICSEARCH_USERNAME=elastic
      - ELASTICSEARCH_PASSWORD=ELASTICSEARCH_PASSWORD=changeme
    networks:
      - elastic_elknet

  metricbeat:
    image: docker.elastic.co/beats/metricbeat:7.6.2 
    user: root
    configs:
      - source: metricbeat_config
        target: /usr/share/metricbeat/metricbeat.yml
    volumes:
      - /var/run/docker.sock:/var/run/docker.sock:ro
      - /sys/fs/cgroup:/hostfs/sys/fs/cgroup:ro
      - /proc:/hostfs/proc:ro
      - /:/hostfs:ro
    networks:
      - elastic_elknet

  apm:
    image: docker.elastic.co/apm/apm-server:7.6.2
    user: root
    configs: 
      - source: apm_config.v1
        target: /usr/share/apm-server/apm-server.yml
    command: ["--strict.perms=false"]
    networks:
      - elastic_elknet

configs:
   elastic_config.v1:
     file: ./elasticsearch/config/elasticsearch.yml
   logstash_config.v2:
     file: ./logstash/config/logstash.yml
   logstash_pipeline.v1:
     file: ./logstash/pipeline/logstash.conf
   kibana_config.v1:
     file: ./kibana/config/kibana.yml
   heartbeat_config.v8:
     file: ./heartbeat/config/heartbeat.yml
   filebeat_conf.v3:
     file: ./filebeat/config/filebeat.yml
   metricbeat_config:
     file: ./metricbeat/config/metricbeat.yml
   monitors_config.v8:
     file: ./heartbeat/monitors.d/monitor.icmp.yml
   apm_config.v1:
     file: ./apm/config/apm.yml


networks:
  elastic_elknet:
      external: true

About the ports being exposed on the es01 and not on the es02 its because its not possible both services have exposed the same port.

In the example, it also shows the ports being exposed only in the first service

version: '2.2'
services:
  es01:
    image: docker.elastic.co/elasticsearch/elasticsearch:7.6.2
    container_name: es01
    environment:
      - node.name=es01
      - cluster.name=es-docker-cluster
      - discovery.seed_hosts=es02,es03
      - cluster.initial_master_nodes=es01,es02,es03
      - bootstrap.memory_lock=true
      - "ES_JAVA_OPTS=-Xms512m -Xmx512m"
    ulimits:
      memlock:
        soft: -1
        hard: -1
    volumes:
      - data01:/usr/share/elasticsearch/data
    ports:
      - 9200:9200
    networks:
      - elastic
  es02:
    image: docker.elastic.co/elasticsearch/elasticsearch:7.6.2
    container_name: es02
    environment:
      - node.name=es02
      - cluster.name=es-docker-cluster
      - discovery.seed_hosts=es01,es03
      - cluster.initial_master_nodes=es01,es02,es03
      - bootstrap.memory_lock=true
      - "ES_JAVA_OPTS=-Xms512m -Xmx512m"
    ulimits:
      memlock:
        soft: -1
        hard: -1
    volumes:
      - data02:/usr/share/elasticsearch/data
    networks:
      - elastic

Please refer to the docker compose file at this documentation page.

Use it as a the building block of your system.

This topic was automatically closed 28 days after the last reply. New replies are no longer allowed.