ElasticSearch container is not being created using Docker compose

Hi , I am trying to run ELK stack with filebeat on VM installed on windows machine. When I run the docker-compose up command , filebeat and kibana containers are getting created while elasticsearch container not being created. Not sure what is the reason , please see the docker-compose.yml file

version: '2'
services:
elasticsearch:
image: docker.elastic.co/elasticsearch/elasticsearch-oss:6.3.0
container_name: elasticsearch
hostname: elasticsearch
environment:
# - http.host=0.0.0.0
# - transport.host=0.0.0.0
- bootstrap.memory_lock=true
- "ES_JAVA_OPTS=-Xmx1024m -Xmx1024m"
volumes:
- esdata:/usr/share/elasticsearch/data
- ./elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml
ports:
- 9200:9200
- 9300:9300
ulimits:
memlock:
soft: -1
hard: -1
nofile:
soft: 65536
hard: 65536
mem_limit: 6g

# deploy:
#   resources:
#     limits:
#       cpus: '0.50'
#       memory: 4g
#     reservations:
#       cpus: '0.25'
#       memory: 2g
# healthcheck:
#   test: [ "CMD-SHELL", "curl --silent --fail localhost:9200/_cluster/health || exit 1" ]
#   interval: 30s
#   timeout: 30s
#   retries: 5
# cap_add:
#   - IPC_LOCK
# healthcheck:
#   test: ["CMD", "curl","-s" ,"-f", "-u", "elastic:${ES_PASSWORD}", "http://localhost:9200/_cat/health"]

kibana:
image: docker.elastic.co/kibana/kibana-oss:6.3.0
container_name: kibana
environment:
- SERVER_HOST=0.0.0.0
volumes:
- ./kibana.yml:/usr/share/kibana/config/kibana.yml
ports:
- 5601:5601
links:
- elasticsearch
depends_on:
- elasticsearch # { condition: service_healthy }
mem_limit: 512m

# environment:
# healthcheck:
#   test: [ "CMD", "curl", "-s", "-f", "http://localhost:5601/" ]
#   interval: 30s
#   timeout: 30s
#   retries: 5

filebeat:
container_name: filebeat
hostname: filebeat
user: root
image: docker.elastic.co/beats/filebeat-oss:6.3.0
command: filebeat -e -strict.perms=false
links:
- elasticsearch
- kibana
# - logstash
depends_on:
- elasticsearch # { condition: service_healthy }
- kibana # { condition: service_healthy }
# - logstash
mem_limit: 512m
volumes:
- ./fbdata:/usr/share/filebeat/data/
- ./fblogs:/usr/share/filebeat/logs/

  - ./filebeat.yml:/usr/share/filebeat/filebeat.yml

  - ../logfiles/analiz/prod/stdios/:/var/log/prod/stdios
  # - ../logfiles/analiz/test/stdios/:/var/log/test/stdios

  # - ../logfiles/trans/prod/mdblogs/:/var/log/prod/mdblogs

  #- ../logfiles/tracked/prod/logger-service/:/var/log/prod/logger-service  # only prod

  # these may be prod, test, uat, etc..
  # - ../logfiles/tracked/prod/sitevisits-cloud/:/var/log/prod/sitevisits-cloud
  # - ../logfiles/tracked/prod/ctms-connector/:/var/log/prod/ctms_connector
  # - ../logfiles/tracked/prod/sso-service/:/var/log/prod/sso-service

  # - ../logfiles/tracked/prod/svr-cloud/:/var/log/prod/svr-cloud
  # - ../logfiles/tracked/prod/ctms-connector-svr/:/var/log/prod/ctms_connector-svr
  # - ../logfiles/tracked/prod/sitevisits-app-mobile/:/var/log/prod/sitevisits-app-mobile
  # - ../logfiles/tracked/prod/svr-app-mobile/:/var/log/prod/svr-app-mobile

  # Named volume fsdata. This is used to persist the registry file between restarts, so to avoid data duplication
  # - fbdata:/usr/share/filebeat/data/

# command: filebeat -e -E output.elasticsearch.username=elastic -E output.elasticsearch.password=${ES_PASSWORD} -strict.perms=false
# restart: on-failure

logstash:

image: docker.elastic.co/logstash/logstash-oss:6.3.0

container_name: logstash

hostname: logstash

volumes:

- ./pipeline/:/usr/share/logstash/pipeline/:ro

- ./patterns-custom.yml:/usr/share/logstash/config/patterns/patterns-custom.yml

# - ./logstash/config/logstash.yml:/usr/share/logstash/config/logstash.yml:ro

ports:

- 5400:5400

links:

- elasticsearch

depends_on:

- elasticsearch

# command: logstash -f /usr/share/logstash/config/logstash.conf

headPlugin:

image: mobz/elasticsearch-head:5

container_name: head_540

ports:

- 9100:9100

volumes:
esdata:
driver: local
fbdata:
driver: local
fblogs:
driver: local

This topic was automatically closed 28 days after the last reply. New replies are no longer allowed.