Hi,
i am getting below error, after enable firewall on the logstash server.
2020-03-11T15:46:14,189][WARN ][logstash.outputs.elasticsearch] Attempted to
resurrect connection to dead ES instance, but got an error.
{:url=>"http://elasticsearch-supervision.swarm-14.recette:9200/",
:error_type=>LogStash::
Outputs::ElasticSearch::HttpClient::Pool::HostUnreachableError , :error=>"
Elasticsearch Unreachable
Hi @Youssef_SBAI .
welcome
I think your machine is not running currently pls check either its running or not?
another thing , check your elastic machine host/ip?
Thanks
HadoopHelp
Im in docker, my docker compose is
services:
elasticsearch:
image: registry.gitlab.hello.fr/devops/applications/docker.elastic.co/elasticsearch:6.8.3
networks:
- default
- swarm-system_traefik
volumes:
- /usr/share/zoneinfo/Europe/Paris:/etc/localtime
- /data/volumes/monitoring/elasticsearch/data:/usr/share/elasticsearch/data
- /data/volumes/monitoring/elasticsearch/logs:/usr/share/elasticsearch/logs
environment:
http.host: 0.0.0.0
xpack.security.enabled: "false"
discovery.zen.minimum_master_nodes: 1
node.master: "true"
node.data: "true"
node.ingest: "true"
ES_JAVA_OPTS: -Xms2048m -Xmx2048m
MACIF_DOCKER_NODE_ID: "{{.Node.ID}}"
MACIF_DOCKER_NODE_NAME: "{{.Node.Hostname}}"
MACIF_DOCKER_SERVICE_ID: "{{.Service.ID}}"
MACIF_DOCKER_SERVICE_NAME: "{{.Service.Name}}"
MACIF_DOCKER_SERVICE_LABELS: "{{.Service.Labels}}"
MACIF_DOCKER_TASK_ID: "{{.Task.ID}}"
MACIF_DOCKER_TASK_NAME: "{{.Task.Name}}"
MACIF_DOCKER_TASK_SLOT: "{{.Task.Slot}}"
MACIF_CLUSTER: "${MACIF_CLUSTER}"
MACIF_STACK: "${MACIF_STACK}"
MACIF_TYPE: "${MACIF_TYPE}"
MACIF_APP: "${MACIF_APPLICATION}"
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:9200/_cat/health"]
interval: 30s
timeout: 10s
retries: 3
start_period: 3m
deploy:
labels:
- "traefik.docker.network=swarm-system_traefik"
- "traefik.port=9200"
- "traefik.frontend.rule=Host:elasticsearch-supervision.${DOCKER_COMPOSE_DOMAIN}"
#- "traefik.frontend.auth.basic=${elastic.auth.user}:${elastic.auth.hashpassword}"
kibana:
image: registry.gitlab.macif.fr/devops/applications/docker.elastic.co/kibana:6.6.2
volumes:
- /etc/localtime:/etc/localtime:ro
#- /data/volumes/monitoring/kibana/plugins:/usr/share/kibana/plugins
environment:
TZ: Europe/Paris
ELASTICSEARCH_URL: http://elasticsearch:9200
networks:
- default
- swarm-system_traefik
deploy:
labels:
- "traefik.port=5601"
- "traefik.frontend.rule=Host:kibana-supervision.${DOCKER_COMPOSE_DOMAIN}"
- "traefik.docker.network=swarm-system_traefik"
#- "traefik.frontend.redirect.entryPoint=https"
resources:
limits:
memory: 2G
reservations:
memory: 2G
logstash:
image: registry.gitlab.hello.fr/devops/applications/docker.elastic.co/logstash:6.6.2
volumes:
- /etc/localtime:/etc/localtime:ro
- /data/volumes/monitoring/logstash/pipeline:/usr/share/logstash/pipeline
environment:
TZ: Europe/Paris
networks:
- default
- swarm-system_traefik
ports:
- 5044:5044
deploy:
replicas: 2
resources:
limits:
memory: 4G
reservations:
memory: 4G
grafana:
image: grafana/grafana
ports:
- "3000:3000"
volumes:
- /data/volumes/monitoring/garfana/data:/var/lib/grafana
environment:
- GF_SERVER_ROOT_URL=http://grafana-supervision.${DOCKER_COMPOSE_DOMAIN}:3000/
- GF_SECURITY_ADMIN_USER=admin
- GF_SECURITY_ADMIN_PASSWORD=admin
#- GF_SMTP_ENABLED=true
#- GF_SMTP_HOST=smtp.gmail.com:587
#- GF_SMTP_USER=grafana@example.com
#- GF_SMTP_FROM_ADDRESS=grafana@example.com
#- GF_SMTP_PASSWORD=******
- GF_USERS_ALLOW_SIGN_UP=false
- GF_ALERTING_ERROR_OR_TIMEOUT=keep_state
networks:
- default
- swarm-system_traefik
deploy:
labels:
- "traefik.port=3000"
- "traefik.frontend.rule=Host:grafana-supervision.${DOCKER_COMPOSE_DOMAIN}"
- "traefik.docker.network=swarm-system_traefik"
restart: always
filebeat:
image: registry.gitlab.hello.fr/devops/applications/docker.elastic.co/filebeat:7.5.0
volumes:
- /etc/localtime:/etc/localtime:ro
- /data/volumes/monitoring/filebeat:/etc/filebeat
environment:
TZ: Europe/Paris
networks:
- default
- swarm-system_traefik
deploy:
replicas: 2
resources:
limits:
memory: 512m
reservations:
memory: 512m
networks:
default:
swarm-system_traefik:
external: true
and my conf from logsash is
input {
beats {
port => "5044"
}
}
filter {
csv {
separator => ";"
columns => ["chaine", "job", "date_plan", "statut", "date_debut", "date_fin", "serveur",
"numero_passage", "application", "sous_application^M"]
}
mutate {
convert => { "numero_passage" => "integer" }
}
date {
match => [ "date_plan" , "YYYY-MM-dd" ]
timezone => "Europe/Paris"
}
date {
match => [ "date_debut" , "YYYY-MM-dd HH:mm:ss" ]
timezone => "Europe/Paris"
}
date {
match => [ "date_fin" , "YYYY-MM-dd HH:mm:ss" ]
timezone => "Europe/Paris"
}
}
output {
elasticsearch {
hosts => ["elasticsearch:9200"]
index => "poca"
}
stdout {}
}
my machine is running currently
dadoonet
(David Pilato)
March 12, 2020, 9:00am
6
Please format your code, logs or configuration files using </>
icon as explained in this guide and not the citation button. It will make your post more readable.
Or use markdown style like:
```
CODE
```
This is the icon to use if you are not using markdown format:
There's a live preview panel for exactly this reasons.
Lots of people read these forums, and many of them will simply skip over a post that is difficult to read, because it's just too large an investment of their time to try and follow a wall of badly formatted text.
If your goal is to get an answer to your questions, it's in your interest to make it as easy to read and understand as possible.
Please update your post.
Hi @Youssef_SBAI .
I think you are passing host-name like http://elasticsearch-supervision.swarm-14.recette:9200/
but in configuration file mentioned something different as below-
output { elasticsearch { hosts => ["elasticsearch:9200"] index => "poca" }
Please check ,i think you are missing something with elastic-search address.
Thanks
HadoopHelp
system
(system)
Closed
April 9, 2020, 2:07pm
8
This topic was automatically closed 28 days after the last reply. New replies are no longer allowed.