Marking url as dead. Last error: [LogStash::Outputs::ElasticSearch::HttpClient::Pool::HostUnreachableError] Elasticsearch Unreachable

its active but logstash.service:main process exited, code=exited, status=143/n/a

Did you resolve the issue with the plugin you mentioned earlier? Does it start when not started as a service?

yes I tired installing the amazon_es plugin but it failed to install

Then that is probably what is causing the problems then.

yeah that is why I am uninstalling the logstash and reinstalling it

How did you install the plugin? Did you look at the documentation about how to use it? As far as I know it is not a drop-in replacement for the elasticsearch plugin, and is configured differently.

yes i referred the same documentation

What does your config look like? Does it match the example in the documentation?

sorry for late reply. This is my conf file . Now i am not using the elastic search output plugin still i am getting same error.

input {
udp{
port => 5978
}
}

filter {
grok{
match => {"message" => '(?:%{SYSLOGTIMESTAMP:timestamp}|%{TIMESTAMP_ISO8601:timestamp8601}) %{WORD:appName}/%{WORD:containerId}[%{INT:randomId:int}]: %{GREEDYDATA:logMessage}'}
}
}

output {

file{
	path => "/var/log/logstash/application/docker-%{appName}-%{containerId}.log"
}
   
    file{
           path => "/var/log/logstash/services/docker-%{appName}.log"
    }

}

What is the exact error you are getting with this config?

I fixed it , it was because of instance it was crashing. but problem still there with elastic search output plugin. I am using two logstash server , elastic search out plugin is working fine in other logstash server. this is the error i am getting

[2019-01-04T10:03:16,040][ERROR][logstash.outputs.elasticsearch] Attempted to send a bulk request to elasticsearch, but no there are no living connections in the connection pool. Perhaps Elasticsearch is unreachable or down? {:error_message=>"No Available connections", :class=>"LogStash::Outputs::ElasticSearch::HttpClient::Pool::NoConnectionAvailableError", :will_retry_in_seconds=>4}

this is the conf file of logstash server which is working file

input {
s3 {
bucket => "docsapp-alb-accesslogs"
prefix => "askadoc"
region => "us-west-2"
type => "elblogs"
codec => "plain"
delete => "true"
secret_access_key => ""
access_key_id => ""
}
}

filter {
grok {
match => {"message" => '%{TIMESTAMP_ISO8601:timestamp} %{NOTSPACE:elb_name} %{IP:elb_client_ip}:%{INT:elb_client_port:int} (?:%{IP:elb_backend_ip}:%{NUMBER:elb_backend_port:int}|-) %{NUMBER:request_processing_time:float} %{NUMBER:backend_processing_time:float} %{NUMBER:response_processing_time:float} (?:%{INT:elb_status_code:int}|-) (?:%{INT:backend_status_code:int}|-) %{INT:elb_received_bytes:int} %{INT:elb_sent_bytes:int} "(?:%{GREEDYDATA:elb_request}|-)" "(?:%{GREEDYDATA:userAgent}|-)" %{NOTSPACE:elb_sslcipher} %{NOTSPACE:elb_sslprotocol}'}
}

   mutate {
             convert => {
                        "elb_status_code" => "integer"
                        "backend_status_code" => "integer"
                       }

}
}

output {
file{
path => "/var/log/logstash/askadoc-accesslog-%{+YYYY.MM.dd}.log"
}

    elasticsearch {
		hosts => "http://search-askadocbacked-logs-6d**********.us-west-2.es.amazonaws.com:80"
		index => "askadoc-accesslog-%{+YYYY.MM.dd}"
	      }

}

What is different between the config that is working and the one that is not?

only the bucket and aws ES endpoint url are different

Can you curl both ES endpoints successfully from the machine where Logstash is running?

yes both are working

[root@ip-172-31-40-86 conf.d]# curl https://search-askadocbacked-logs-.us-west-2.es.amazonaws.com
{
"name" : "vu0erEX",
"cluster_name" : "547208114500:askadocbacked-logs",
"cluster_uuid" : "FhXphpj7ROm5US4E3WEiIA",
"version" : {
"number" : "6.3.1",
"build_flavor" : "oss",
"build_type" : "zip",
"build_hash" : "eb782d0",
"build_date" : "2018-09-11T14:05:25.216906Z",
"build_snapshot" : false,
"lucene_version" : "7.3.1",
"minimum_wire_compatibility_version" : "5.6.0",
"minimum_index_compatibility_version" : "5.0.0"
},
"tagline" : "You Know, for Search"
}
[root@ip-172-31-40-86 conf.d]# curl https://search-mb-production-app-.us-west-2.es.amazonaws.com
{
"name" : "9LExCRY",
"cluster_name" : "547208114500:mb-production-app",
"cluster_uuid" : "DXz6TOQqSVed2ygweUu2EA",
"version" : {
"number" : "6.3.1",
"build_flavor" : "oss",
"build_type" : "zip",
"build_hash" : "eb782d0",
"build_date" : "2018-09-11T14:05:25.216906Z",
"build_snapshot" : false,
"lucene_version" : "7.3.1",
"minimum_wire_compatibility_version" : "5.6.0",
"minimum_index_compatibility_version" : "5.0.0"
},
"tagline" : "You Know, for Search"

How do you know the issue is with the output and not with the input?

Those clusters appear to be open to the internet, which means anyone could gain access to them and read/add/alter/delete data. I would recommend that you secure them immediately.

Relying on the URL being "secret" is not protecting you against port scans.

if the issue is with input means it wont register the bucket itself but it showing error as ERROR[logstash.outputs.elasticsearch].

This topic was automatically closed 28 days after the last reply. New replies are no longer allowed.