I am trying to run ELK in 3 separate docker containers but every time I get errors that Logstash is not able to reach Elasticsearch. The configuration is very simple, this is my logstash.conf
file:
input {
file {
path => "/opt/elk/logstash/sample.log" # Sample log file on local machine
type => "apachelogs"
start_position => "beginning"
}
}
filter {
if [type] == "apache-access" {
grok {
match => [ "message", "%{COMBINEDAPACHELOG}" ]
}
}
}
output {
elasticsearch {
hosts => "elasticsearch:9200"
}
}
And this is docker-compose.yml
file:
version: '2'
services:
kibana:
image: docker.elastic.co/kibana/kibana:5.3.0
ports:
- 5601:5601
networks:
- docker_elk
logstash:
image: docker.elastic.co/logstash/logstash:5.3.0
ports:
- 5044:5044
volumes:
- /opt/elk/logstash/logstash.conf:/usr/share/logstash/pipeline/logstash.conf
networks:
- docker_elk
elasticsearch:
image: docker.elastic.co/elasticsearch/elasticsearch:5.3.0
cap_add:
- IPC_LOCK
ports:
- 9200:9200
networks:
- docker_elk
networks:
docker_elk:
driver: bridge
Everything looks pretty clear and correct but Logstash is still not able to connect to Elasticsearch and shows the following logs:
[2017-04-12T02:48:31,436][INFO ][logstash.agent ] No persistent UUID file found. Generating new UUID {:uuid=>"9a6c69b2-4d0a-457b-b0cc-f0da3ab25f01", :path=>"/usr/share/logstash/data/uuid"}
[2017-04-12T02:49:04,923][INFO ][logstash.outputs.elasticsearch] Elasticsearch pool URLs updated {:changes=>{:removed=>[], :added=>[http://logstash_system:xxxxxx@elasticsearch:9200/_xpack/monitoring/?system_id=logstash&system_api_version=2&interval=1s]}}
[2017-04-12T02:49:04,933][INFO ][logstash.outputs.elasticsearch] Running health check to see if an Elasticsearch connection is working {:healthcheck_url=>http://logstash_system:xxxxxx@elasticsearch:9200/, :path=>"/"}
[2017-04-12T02:49:06,775][WARN ][logstash.outputs.elasticsearch] Attempted to resurrect connection to dead ES instance, but got an error. {:url=>#<URI::HTTP:0x46abdaa0 URL:http://logstash_system:xxxxxx@elasticsearch:9200/_xpack/monitoring/?system_id=logstash&system_api_version=2&interval=1s>, :error_type=>LogStash::Outputs::ElasticSearch::HttpClient::Pool::HostUnreachableError, :error=>"Elasticsearch Unreachable: [http://logstash_system:xxxxxx@elasticsearch:9200/][Manticore::SocketException] Connection refused (Connection refused)"}
[2017-04-12T02:49:06,781][INFO ][logstash.outputs.elasticsearch] New Elasticsearch output {:class=>"LogStash::Outputs::ElasticSearch", :hosts=>[#<URI::HTTP:0x3fe7f61c URL:http://elasticsearch:9200>]}
[2017-04-12T02:49:06,789][INFO ][logstash.pipeline ] Starting pipeline {"id"=>".monitoring-logstash", "pipeline.workers"=>1, "pipeline.batch.size"=>2, "pipeline.batch.delay"=>5, "pipeline.max_inflight"=>2}
[2017-04-12T02:49:06,809][INFO ][logstash.pipeline ] Pipeline .monitoring-logstash started
[2017-04-12T02:49:06,888][INFO ][logstash.outputs.elasticsearch] Elasticsearch pool URLs updated {:changes=>{:removed=>[], :added=>[http://elasticsearch:9200/]}}
[2017-04-12T02:49:06,893][INFO ][logstash.outputs.elasticsearch] Running health check to see if an Elasticsearch connection is working {:healthcheck_url=>http://elasticsearch:9200/, :path=>"/"}
[2017-04-12T02:49:06,912][WARN ][logstash.outputs.elasticsearch] Attempted to resurrect connection to dead ES instance, but got an error. {:url=>#<URI::HTTP:0x2a8adde URL:http://elasticsearch:9200/>, :error_type=>LogStash::Outputs::ElasticSearch::HttpClient::Pool::HostUnreachableError, :error=>"Elasticsearch Unreachable: [http://elasticsearch:9200/][Manticore::SocketException] Connection refused (Connection refused)"}
[2017-04-12T02:49:06,938][INFO ][logstash.outputs.elasticsearch] Using mapping template from {:path=>nil}
[2017-04-12T02:49:06,955][WARN ][logstash.outputs.elasticsearch] Marking url as dead. Last error: [LogStash::Outputs::ElasticSearch::HttpClient::Pool::HostUnreachableError] Elasticsearch Unreachable: [http://elasticsearch:9200/][Manticore::SocketException] Connection refused (Connection refused) {:url=>http://elasticsearch:9200/, :error_message=>"Elasticsearch Unreachable: [http://elasticsearch:9200/][Manticore::SocketException] Connection refused (Connection refused)", :error_class=>"LogStash::Outputs::ElasticSearch::HttpClient::Pool::HostUnreachableError"}
[2017-04-12T02:49:06,959][ERROR][logstash.outputs.elasticsearch] Failed to install template. {:message=>"Elasticsearch Unreachable: [http://elasticsearch:9200/][Manticore::SocketException] Connection refused (Connection refused)", :class=>"LogStash::Outputs::ElasticSearch::HttpClient::Pool::HostUnreachableError", :backtrace=>["/usr/share/logstash/vendor/bundle/jruby/1.9/gems/logstash-output-elas
[2017-04-12T03:02:04,611][WARN ][logstash.outputs.elasticsearch] Attempted to resurrect connection to dead ES instance, but got an error. {:url=>#<URI::HTTP:0x78b0071e URL:http://logstash_system:xxxxxx@elasticsearch:9200/_xpack/monitoring/?system_id=logstash&system_api_version=2&interval=1s>, :error_type=>LogStash::Outputs::ElasticSearch::HttpClient::Pool::BadResponseCodeError, :error=>"Got response code '401' contacting Elasticsearch at URL 'http://elasticsearch:9200/'"}
[2017-04-12T03:02:09,613][INFO ][logstash.outputs.elasticsearch] Running health check to see if an Elasticsearch connection is working {:healthcheck_url=>http://logstash_system:xxxxxx@elasticsearch:9200/, :path=>"/"}
[2017-04-12T03:02:10,087][WARN ][logstash.outputs.elasticsearch] Restored connection to ES instance {:url=>#<URI::HTTP:0x5bce79ab URL:http://logstash_system:xxxxxx@elasticsearch:9200/>}
[2017-04-12T03:02:13,454][ERROR][logstash.inputs.metrics ] Failed to create monitoring event {:message=>"For path: events", :error=>"LogStash::Instrument::MetricStore::MetricNotFound"}
[2017-04-12T03:02:23,458][ERROR][logstash.inputs.metrics ] Failed to create monitoring event {:message=>"For path: events", :error=>"LogStash::Instrument::MetricStore::MetricNotFound"}
Could anybody please explain what is wrong in my configuration and why Logstash is not able to reach Elasticsearch, though I can see both Logstash and Elasticsearch on Kibana dashboard. I can also attach Elasticsearch logs if needed.
Thanks!