Logstash is failing

/usr/share/logstash/logstash-core/lib/logstash/pipeline.rb:385] elasticsearch - retrying failed action with response code: 403 ({"type"=>"cluster_block_exception", "reason"=>"blocked by: [FORBIDDEN/12/index read-only / allow delete (api)];"})

getting the following error I have around 10000 log files spanned across multiple indices every five minutes the size of the file varies from 15 KB to 20 MB

when i each index by itself there is no issue but there is issue when running them together

I have a single node with 64GB ram and 2 TB hardisk to handle it

the below is the logstash.conf
pasting logstash.conf in separate post

Logstash is crashing with tmoes index and works fine with out it

I have around 130 csv files each of size of around 20 MB every five minutes

any suggestions would be of great help

input {
file {
path =>"/home/shared/msdp/LogStashOutputFormatted/SES_VG1/.csv"
start_position => "beginning"
sincedb_path => "/dev/null"
type => "sesvg1"
}
file {
path =>"/home/shared/msdp/LogStashOutputFormatted/SES_VG1_Disk/
.csv"
start_position => "beginning"
sincedb_path => "/dev/null"
type => "sesvg1disk"
}
file {
path =>"/home/shared/msdp/LogStashOutputFormatted/SES_VG2/.csv"
start_position => "beginning"
sincedb_path => "/dev/null"
type => "sesvg2"
}
file {
path =>"/home/shared/msdp/LogStashOutputFormatted/SPP_VG1/
.csv"
start_position => "beginning"
sincedb_path => "/dev/null"
type => "sppvg1"
}
file {
path =>"/home/shared/msdp/LogStashOutputFormatted/SPP_VG1_Disk/.csv"
start_position => "beginning"
sincedb_path => "/dev/null"
type => "sppvg1disk"
}
file {
path =>"/home/shared/msdp/LogStashOutputFormatted/SPP_VG2/
.csv"
start_position => "beginning"
sincedb_path => "/dev/null"
type => "sppvg2"
}

else if [type] == "sems" {
csv {
separator => ","
columns => ["LogType","RootLogId","SubLogId","transactionID","Instance","Operation","Status","User","Hostname","Protocol","Target","startTime","ExecuteTime","responseCode"]
}
dissect {
mapping => {
"ExecuteTime" => "%{day} %{hour}:%{minute}:%{second}.%{millisecond}"
}
}
if [hour] and [minute] and [second] and [millisecond] {
ruby {
code => "event.set( 'timeInSeconds', event.get('hour').to_i3600 + event.get('minute').to_i60 +
event.get('second').to_i + event.get('millisecond').to_f/1000)"
}
}
}
else if [type]== "semn" {
csv {
separator => ","
columns => ["LogType","RootLogId","SubLogId","transactionID","Instance","Operation","Status","User","Hostname","Protocol","Target","startTime","ExecuteTime","responseCode"]
}
dissect {
mapping => {
"ExecuteTime" => "%{day1} %{hour1}:%{minute1}:%{second1}.%{millisecond1}"
}
}
if [hour1] and [minute1] and [second1] and [millisecond1] {
ruby {
code => "event.set( 'timeInSeconds1', event.get('hour1').to_i3600 + event.get('minute1').to_i60 +
event.get('second1').to_i + event.get('millisecond1').to_f/1000)"
}
}
}
else if [type] == "tmoes" {
csv {
separator => ","
columns => ["timeStamp","nodeIP","transactionId","APIName","APICall","startTime","endTime","transTime","responseMsg","responseCode","errMsg1","errMsg2","imsi"]
}
dissect {
mapping => {

          "path" => "/%{folder1}/%{folder2}/%{folder3}/%{folder4}/%{folder5}/%{TmoesNodename}.%{LogName}"
         }
 } 

grok { match => [ "TmoesNodename", "^(?...)" ] }
}
else {
csv {
separator => ","
columns => ["site","nodeName","APIName","Status","Share","Total"]
}
}
mutate {
convert => { "Filesystem" => "string" }
convert => { "site" => "string" }
convert => { "Size" => "string" }
convert => { "Used" => "string" }
convert => { "Avail" => "string" }
convert => { "Utilization" => "integer" }
convert => { "MountedOn" => "string" }
convert => { "nodeName" => "string" }
convert => { "APIName" => "string" }
convert => { "Status" => "string" }
convert => { "Value" => "string" }
convert => { "Share" => "float" }
convert => { "Total" => "integer" }
convert => { "timeStamp" => "string" }
convert => { "nodeIP" => "string" }
convert => { "transactionID" => "string" }
convert => { "APICall" => "string" }
convert => { "startTime" => "string" }
convert => { "endTime" => "string" }
convert => { "transTime" => "string" }
convert => { "responseMsg" => "string" }
convert => { "responseCode" => "string" }
convert => { "errMsg1" => "string" }
convert => { "errMsg2" => "string" }
convert => { "imsi" => "string" }
convert => { "message1" => "string" }
convert => { "msisdn" => "string" }
convert => { "message2" => "string" }
convert => { "transid" => "string" }
convert => { "customer_id" => "string" }
convert => { "event" => "string" }
convert => { "service" => "string" }
convert => { "message3" => "string" }
convert => { "streetaddress1" => "string" }
convert => { "streetaddress2" => "string" }
convert => { "city" => "string" }
convert => { "state" => "string" }
convert => { "zipcode" => "string" }
convert => { "latitude" => "string" }
convert => { "longitude" => "string" }
convert => { "status1" => "string" }
convert => { "latencyTime" => "string" }
convert => { "transid1" => "string" }
convert => { "event1" => "string" }
convert => { "imsi1" => "string" }
convert => { "msisdn1" => "string" }
convert => { "LogType" => "string" }
convert => { "RootLogId" => "string" }
convert => { "SubLogId" => "string" }
convert => { "Instance" => "string" }
convert => { "Operation" => "string" }
convert => { "User" => "string" }
convert => { "Hostname" => "string" }
convert => { "Protocol" => "string" }
convert => { "Target" => "string" }
convert => { "ExecuteTime" => "string" }
convert => { "Date" => "string" }
convert => { "TransactionType" => "string" }
convert => { "Address" => "string" }
convert => { "EndPointName" => "string" }
convert => { "FailureStatusCode" => "string" }
convert => { "TransactionDuration" => "string" }
convert => { "x-device-auth-mode" => "string" }
convert => { "x-user-auth-mode" => "string" }
convert => { "x-device-type" => "string" }
convert => { "x-nsds-version" => "string" }
convert => { "conditiontype" => "string" }
convert => { "reason" => "string" }
}
}

output {
if [type] == "sesvg1" {
elasticsearch {
action => "index"
hosts => "http://localhost:9200"
index => "sesvg1"
}
}
else if [type] == "sesvg1disk" {
elasticsearch {
action => "index"
hosts => "http://localhost:9200"
index => "sesvg1disk"
}
}
else if [type] == "sppvg1" {
elasticsearch {
action => "index"
hosts => "http://localhost:9200"
index => "sppvg1"
}
}
else if [type] == "sppvg1disk" {
elasticsearch {
action => "index"
hosts => "http://localhost:9200"
index => "sppvg1disk"
}
}
else if [type] == "sppvg2" {
elasticsearch {
action => "index"
hosts => "http://localhost:9200"
index => "sppvg2"
}
}

else if [type] == "sems" {
elasticsearch {
action => "index"
hosts => "http://localhost:9200"
index => "sems"
}
}
else if [type] == "semn"{
elasticsearch {
action => "index"
hosts => "http://localhost:9200"
index => "semn"
}
}
else if [type] == "semdiskstats" {
elasticsearch {
action => "index"
hosts => "http://localhost:9200"
index => "semdiskstats"
}
}
else if [type] == "tmoes" {
elasticsearch {
action => "index"
hosts => "http://localhost:9200"
index => "tmoes"
}
}
else {
elasticsearch {
action => "index"
hosts => "http://localhost:9200"
index => "sesvg2"
}
}
}

Sounds like you are running out of disk space. Are you above the thresholds?

No Badger

Thresholds are perfect and no issues I don't have a cluster that's all

The Elasticsearch logs should clue you in on the reason for the index being read-only.

This topic was automatically closed 28 days after the last reply. New replies are no longer allowed.