Elasticsearch not getting data from logstash

07:37:08.569 [[main]>worker8] ERROR logstash.outputs.elasticsearch - Attempted to send a bulk request to elasticsearch' but Elasticsearch appears to be unreachable or down! {:error_message=>"Elasticsearch Unreachable: [http://xxxxx:9200/][Manticore::ClientProtocolException] xxxxx:9200 failed to respond", :class=>"LogStash::Outputs::ElasticSearch::HttpClient::Pool::HostUnreachableError", :will_retry_in_seconds=>2}
07:37:10.570 [[main]>worker8] WARN logstash.outputs.elasticsearch - UNEXPECTED POOL ERROR {:e=>#<LogStash::Outputs::ElasticSearch::HttpClient::Pool::NoConnectionAvailableError: No Available connections>}
07:37:10.570 [[main]>worker8] ERROR logstash.outputs.elasticsearch - Attempted to send a bulk request to elasticsearch, but no there are no living connections in the connection pool. Perhaps Elasticsearch is unreachable or down? {:error_message=>"No Available connections", :class=>"LogStash::Outputs::ElasticSearch::HttpClient::Pool::NoConnectionAvailableError", :will_retry_in_seconds=>4}
07:37:11.656 [Ruby-0-Thread-15: /usr/share/logstash/vendor/bundle/jruby/1.9/gems/logstash-output-elasticsearch-6.2.6-java/lib/logstash/outputs/elasticsearch/http_client/pool.rb:222] INFO logstash.outputs.elasticsearch - Running health check to see if an Elasticsearch connection is working {:healthcheck_url=>http://xxxxx:9200/, :path=>"http://xxxxxxx:9200"}
07:37:11.738 [Ruby-0-Thread-15: /usr/share/logstash/vendor/bundle/jruby/1.9/gems/logstash-output-elasticsearch-6.2.6-java/lib/logstash/outputs/elasticsearch/http_client/pool.rb:222] WARN logstash.outputs.elasticsearch - Restored connection to ES instance {:url=>#<URI::HTTP:0x12b1c33 URL:http://xxxxxx:9200/>}
08:27:08.568 [[main]>worker1] WARN logstash.outputs.elasticsearch - Marking url as dead. Last error: [LogStash::Outputs::ElasticSearch::HttpClient::Pool::HostUnreachableError] Elasticsearch Unreachable: [http://xxxxxx:9200/][Manticore::ClientProtocolException] xxxxxx:9200 failed to respond {:url=>http://xxxxxx:9200/, :error_message=>"Elasticsearch Unreachable: [http://xxxxxx:9200/][Manticore::ClientProtocolException] xxxxxx:9200 failed to respond", :error_class=>"LogStash::Outputs::ElasticSearch::HttpClient::Pool::HostUnreachableError"}

getting this exception and it is been occurring since last two days on logstash(connect with elastic cloud) .How can i resolve this

Should you not be using port 9243 with Elastic Cloud instead of 9200? What does your config look like?

Config of which? logstash or elaticsearch

I am looking for the Logstash config, specifically the Elasticsearch output plugin.

if [isMock] == "true" {
elasticsearch {
action => "index"
index => "mock-%{logType}-%{index_day}"
hosts => "xxxx:9200"
user => "user"
password => "pass"
healthcheck_path => "http://xxxx:9200"
}
}
else {
elasticsearch {
action => "index"
index => "%{logType}-%{index_day}"
hosts => "xxxx:9200"
user => "user"
password => "pass"
healthcheck_path => "http://xxxx:9200"
}
}

input {
stdin {
add_field => {
"logType" => "metric1"
}
}
file {
path => "/data02/logs/samza/samza-container-*.log"
add_field => {
"logType" => "samza_logs"
}
}

kafka {
   id => "metric2"
   bootstrap_servers => "kafka1:9092,kafka2:9093,kafka3:9094"
   topics => ["metric2"]
   add_field => {
       "logType" => "metric2"
    }
}

kafka {
   id => "metric3"
   bootstrap_servers => "kafka1:9092,kafka2:9093,kafka3:9094"
   topics => ["metric3"]
   add_field => {
       "logType" => "metric3"
    }
}

kafka {
   id => "metric4"
   bootstrap_servers => "kafka1:9092,kafka2:9093,kafka3:9094"
   topics => ["metric4"]
   add_field => {
       "logType" => "metric4"
    }
}

}
filter {

if([logType]=="samza_logs") {

    #convert all logstash default field to app field with double underscore from current context

     mutate {
       gsub => [
        "message" , "@timestamp", "timestamp",
        "message" , "_id", "__id",
        "message" , "_index", "__index",
        "message" , "_type", "__type",
        "message" , "_score", "__score"
       ]
    }

    grok { match => { "message" => "%{DATESTAMP:messageTime} \[%{WORD:severity}\] %{DATA:class} \| %{GREEDYDATA:request}"} }   

    if [request] =~ "^\{.*\}[\s\S]*$" { #check whether request is json or not
        json{
            source => "request"
            remove_field => "request"
        } 

        if [mdc] { # check mdc is present in json if then flatting it
          if [mdc][jobName] { mutate { add_field => { "[jobName]" => "%{[mdc][jobName]}" } } }
          if [mdc][jobId] { mutate { add_field => { "[jobId]" => "%{[mdc][jobId]}" } } }
          if [mdc][containerName] { mutate { add_field => { "[containerName]" => "%{[mdc][containerName]}" } } }
          mutate { remove_field => "[mdc]"}
        }
    } 
    if "_jsonparsefailure" in [tags] or "_grokparsefailure" in [tags] { mutate { remove_field => ["request" ,"tags"]}  }
    else { mutate { remove_field => ["message"]} }

    if [LogTime] { ruby { code => "event.set('LogTime', DateTime.parse(event.get('LogTime')).strftime('%Y-%m-%dT%H:%M:%S.%L%z'))" }  }
    else { ruby { code => "event.set('LogTime', Time.now.strftime('%Y-%m-%dT%H:%M:%S.%L%z'))" }}

    ruby {
        code => "event.set('index_day', Time.now.strftime('%Y-%m-%d'))"
    }
} 

else {
    json {
      source => "message"
      remove_field => "message"
    }

    if [RequestTS] { ruby { code => "event.set('index_day', Date.parse(event.get('RequestTS')).strftime('%Y-%m-%d'))" }}
    else { ruby { code => "event.set('index_day', Time.now.strftime('%Y-%m-%d'))" }}
}

}
output {

 if [isMock] == "true" {
    elasticsearch {
    action => "index"
    index => "mock-%{logType}-%{index_day}"
    hosts => "xxxx:9200"
    user => "user"
    password =>  "pass"
    healthcheck_path => "http://xxxx:9200"
  }
}
else {
  elasticsearch {
    action => "index"
    index => "%{logType}-%{index_day}"
    hosts => "xxxx:9200"
    user => "user"
    password =>  "pass"
     healthcheck_path => "http://xxxx:9200"
  }
}

}

whole configuration

If you are connecting to Elastic Cloud you should use port 9243.

No it was working previously since last 13 hr it's throwing exception so i think port is not a problem

Port 9200 is not working on ANY of my Elastic Cloud clusters, so I would recommend changing that to rule it out.

This topic was automatically closed 28 days after the last reply. New replies are no longer allowed.