I have a docker swarm cluster with 3 nodes. I set up a elasticsearch, kibana and filebeat as docker swarm services. These three swarm services are running without an error. A datastream is created by filebeat but no logs are updated in this datastream. Filebeat is deployed as "mode: global"
docker-compose.yaml
version: "3.8"
networks:
default:
name: elastic
external: false
external:
external: true
volumes:
filebeatdata01:
driver: local
services:
es01:
image: docker.elastic.co/elasticsearch/elasticsearch:${STACK_VERSION}
deploy:
mode: replicated
replicas: 1
restart_policy:
condition: any
resources:
limits:
cpus: "1"
memory: 6000M
placement:
constraints:
- node.labels.monitoring01 == true
ports:
- target: 9200
published: 9200
protocol: TCP
mode: host
labels:
co.elastic.logs/module: elasticsearch
networks:
- external
volumes:
- /home/ubuntu/elasticsearchnew/esdata01:/usr/share/elasticsearch/data
environment:
- node.name=es01
- cluster.name=${CLUSTER_NAME}
- discovery.type=single-node
- ELASTIC_PASSWORD=${ELASTIC_PASSWORD}
- bootstrap.memory_lock=true
- xpack.security.enabled=true
- xpack.security.http.ssl.enabled=false
- xpack.security.transport.ssl.enabled=false
- xpack.license.self_generated.type=${LICENSE}
- "ES_JAVA_OPTS=-Xms1024m -Xmx1024m"
ulimits:
memlock:
soft: -1
hard: -1
kibana:
depends_on:
- es01
image: docker.elastic.co/kibana/kibana:${STACK_VERSION}
deploy:
mode: replicated
replicas: 1
restart_policy:
condition: any
resources:
limits:
cpus: "1"
memory: 1000M
placement:
constraints:
- node.labels.monitoring01 == true
ports:
- target: 5601
published: 5601
protocol: TCP
mode: host
networks:
- external
labels:
co.elastic.logs/module: kibana
volumes:
- /home/ubuntu/elasticsearchnew/kibanadata:/usr/share/kibana/data
environment:
- SERVERNAME=kibana
- ELASTICSEARCH_HOSTS=http://es01:9200
- ELASTICSEARCH_USERNAME=kibana_system
- ELASTICSEARCH_PASSWORD=${KIBANA_PASSWORD}
- XPACK_SECURITY_ENCRYPTIONKEY=${ENCRYPTION_KEY}
- XPACK_ENCRYPTEDSAVEDOBJECTS_ENCRYPTIONKEY=${ENCRYPTION_KEY}
- XPACK_REPORTING_ENCRYPTIONKEY=${ENCRYPTION_KEY}
filebeat01:
depends_on:
- es01
image: docker.elastic.co/beats/filebeat:${STACK_VERSION}
command: ["setup"]
deploy:
mode: global
resources:
limits:
cpus: "0.1"
memory: 128M
networks:
- external
user: root
volumes:
- filebeatdata01:/usr/share/filebeat/data
- /home/ubuntu/elasticsearchnew/filebeat_ingest_data:/usr/share/filebeat/ingest_data/
- /home/ubuntu/elasticsearchnew/filebeat.yml:/usr/share/filebeat/filebeat.yml:ro
- /var/lib/docker/containers:/var/lib/docker/containers:ro
- /var/run/docker.sock:/var/run/docker.sock:ro
environment:
- ELASTIC_USER=elastic
- ELASTIC_PASSWORD=${ELASTIC_PASSWORD}
- output.elasticsearch.hosts=es01:9200
Filebeat.yaml
filebeat.config:
modules:
path: ${path.config}/modules.d/*.yml
reload.enabled: false
filebeat.autodiscover:
providers:
- type: docker
hints.enabled: true
processors:
- add_cloud_metadata: ~
output.elasticsearch:
hosts: '${ELASTICSEARCH_HOSTS:es01:9200}'
username: '${ELASTICSEARCH_USERNAME:xxxxxxxx}'
password: '${ELASTICSEARCH_PASSWORD:xxxxxxxxxxx}'
setup.kibana:
host: '${KIBANA_HOSTS:kibana:5601}'
username: '${ELASTIC_USERNAME:xxxxxxx}'
password: '${ELASTIC_PASSWORD:xxxxxxxxxxxxxxxxxx}'
curl -X GET "localhost:9200/_cat/shards?v=true&h=index,shard,prirep,state,node,unassigned.reason&s=state&pretty" -u elastic:jokPxRRJFIAjx1UYWNu0 | grep .ds-filebeat-8.14.3-2024
.ds-filebeat-8.14.3-2024.07.30-000001 0 r UNASSIGNED INDEX_CREATED
.ds-filebeat-8.14.3-2024.07.30-000001 0 p STARTED es01
One of the above shards is not allocated because shard with same name is already allocated.
Any idea how to solve this ?