All nodes except Master show as “Offline”

Hi All,
I am getting an issue my other two data nodes of elasticsearch cluster are not shown in Kibana. Although these are up and running in cluster. Please let me know how could i resolve this issue. I am use ELK stack 7.4.2

Hi @Osama_Tariq,

Let's take a look at your monitoring data and see if we can figure this out.

Can you run the following two queries and return the results for both? Run them against the monitoring cluster (which is where the monitoring data lives)

POST .monitoring-es-*/_search
{
  "size": 1,
  "sort": [
    {
      "timestamp": {
        "order": "desc"
      }
    }
  ],
  "query": {
    "term": {
      "type": {
        "value": "cluster_stats"
      }
    }
  },
  "collapse": {
    "field": "cluster_uuid"
  }
}

POST .monitoring-es-*/_search
{
  "size": 1,
  "sort": [
    {
      "timestamp": {
        "order": "desc"
      }
    }
  ],
  "query": {
    "term": {
      "type": {
        "value": "node_stats"
      }
    }
  },
  "collapse": {
    "field": "node_stats.node_id"
  }
}

!st Output, its too long unable to post complete:

2nd POST Output:

{
  "took" : 552,
  "timed_out" : false,
  "_shards" : {
    "total" : 1,
    "successful" : 1,
    "skipped" : 0,
    "failed" : 0
  },
  "hits" : {
    "total" : {
      "value" : 7930,
      "relation" : "eq"
    },
    "max_score" : null,
    "hits" : [
      {
        "_index" : ".monitoring-es-7-2019.12.31",
        "_type" : "_doc",
        "_id" : "SnR_XW8BKNWC4djNGlRg",
        "_score" : null,
        "_source" : {
          "cluster_uuid" : "CMlK79u5T4eeaLIyR-jB5g",
          "timestamp" : "2019-12-31T19:46:48.491Z",
          "interval_ms" : 10000,
          "type" : "node_stats",
          "source_node" : {
            "uuid" : "RfJNHOfLRKuOSxViu-wbbA",
            "host" : "172.16.20.29",
            "transport_address" : "172.16.20.29:9300",
            "ip" : "172.16.20.29",
            "name" : "nagios",
            "timestamp" : "2019-12-31T19:46:48.483Z"
          },
          "node_stats" : {
            "node_id" : "RfJNHOfLRKuOSxViu-wbbA",
            "node_master" : true,
            "mlockall" : false,
            "indices" : {
              "docs" : {
                "count" : 0
              },
              "store" : {
                "size_in_bytes" : 0
              },
              "indexing" : {
                "index_total" : 0,
                "index_time_in_millis" : 0,
                "throttle_time_in_millis" : 0
              },
              "search" : {
                "query_total" : 0,
                "query_time_in_millis" : 0
              },
              "query_cache" : {
                "memory_size_in_bytes" : 0,
                "hit_count" : 0,
                "miss_count" : 0,
                "evictions" : 0
              },
              "fielddata" : {
                "memory_size_in_bytes" : 0,
                "evictions" : 0
              },
              "segments" : {
                "count" : 0,
                "memory_in_bytes" : 0,
                "terms_memory_in_bytes" : 0,
                "stored_fields_memory_in_bytes" : 0,
                "term_vectors_memory_in_bytes" : 0,
                "norms_memory_in_bytes" : 0,
                "points_memory_in_bytes" : 0,
                "doc_values_memory_in_bytes" : 0,
                "index_writer_memory_in_bytes" : 0,
                "version_map_memory_in_bytes" : 0,
                "fixed_bit_set_memory_in_bytes" : 0
              },
              "request_cache" : {
                "memory_size_in_bytes" : 0,
                "evictions" : 0,
                "hit_count" : 0,
                "miss_count" : 0
              }
            },
            "os" : {
              "cpu" : {
                "load_average" : {
                  "1m" : 0.0,
                  "5m" : 0.01,
                  "15m" : 0.05
                }
              },
              "cgroup" : {
                "cpuacct" : {
                  "control_group" : "/",
                  "usage_nanos" : 2812286796062
                },
                "cpu" : {
                  "control_group" : "/",
                  "cfs_period_micros" : 100000,
                  "cfs_quota_micros" : -1,
                  "stat" : {
                    "number_of_elapsed_periods" : 0,
                    "number_of_times_throttled" : 0,
                    "time_throttled_nanos" : 0
                  }
                },
                "memory" : {
                  "control_group" : "/",
                  "limit_in_bytes" : "9223372036854771712",
                  "usage_in_bytes" : "3082403840"
                }
              }
            },
            "process" : {
              "open_file_descriptors" : 276,
              "max_file_descriptors" : 65535,
              "cpu" : {
                "percent" : 0
              }
            },
            "jvm" : {
              "mem" : {
                "heap_used_in_bytes" : 205971792,
                "heap_used_percent" : 9,
                "heap_max_in_bytes" : 2130051072
              },
              "gc" : {
                "collectors" : {
                  "young" : {
                    "collection_count" : 304,
                    "collection_time_in_millis" : 3505
                  },
                  "old" : {
                    "collection_count" : 3,
                    "collection_time_in_millis" : 133
                  }
                }
              }
            },
            "thread_pool" : {
              "generic" : {
                "threads" : 5,
                "queue" : 0,
                "rejected" : 0
              },
              "get" : {
                "threads" : 0,
                "queue" : 0,
                "rejected" : 0
              },
              "management" : {
                "threads" : 3,
                "queue" : 0,
                "rejected" : 0
              },
              "search" : {
                "threads" : 4,
                "queue" : 0,
                "rejected" : 0
              },
              "watcher" : {
                "threads" : 0,
                "queue" : 0,
                "rejected" : 0
              },
              "write" : {
                "threads" : 2,
                "queue" : 0,
                "rejected" : 0
              }
            },
            "fs" : {
              "total" : {
                "total_in_bytes" : 14921236480,
                "free_in_bytes" : 11190472704,
                "available_in_bytes" : 11190472704
              },
              "io_stats" : {
                "total" : {
                  "operations" : 46810,
                  "read_operations" : 27515,
                  "write_operations" : 19295,
                  "read_kilobytes" : 803540,
                  "write_kilobytes" : 1508866
                }
              }
            }
          }
        },
        "fields" : {
          "node_stats.node_id" : [
            "RfJNHOfLRKuOSxViu-wbbA"
          ]
        },
        "sort" : [
          1577821608491
        ]
      }
    ]
  }
}

@chrisronline Please let me know if you need further details to identify the issue ?

For the first query, can you paste the entire response in a gist?

Sure. Please look following link.

@chrisronline Have you got the output file ?

Thanks.

It looks like the other two nodes are not reporting node_stats at all. Let's see if they are reporting any monitoring data.

Try running:

POST .monitoring-es-*/_search
{
  "size": 1,
  "sort": [
    {
      "timestamp": {
        "order": "desc"
      }
    }
  ],
  "query": {
    "term": {
      "type": {
        "value": "shards"
      }
    }
  },
  "collapse": {
    "field": "shard.node"
  }
}

Check if the only node you see is also the only one showing up in the UI.

If so, we need to figure out why those nodes aren't reporting.

A couple of things that will help figure that out:

  1. GET _cluster/settings for the cluster
  2. The elasticsearch.yml for each node

It shows other data node not the one shown in Kibana. Only master node is shown in kibana.

{
  "took" : 7,
  "timed_out" : false,
  "_shards" : {
    "total" : 1,
    "successful" : 1,
    "skipped" : 0,
    "failed" : 0
  },
  "hits" : {
    "total" : {
      "value" : 1375,
      "relation" : "eq"
    },
    "max_score" : null,
    "hits" : [
      {
        "_index" : ".monitoring-es-7-2019.12.31",
        "_type" : "_doc",
        "_id" : "sWsxYLNqQiu7CJAvOC47SA:R22234sFQH2M4r24MwDShw:.kibana_1:0:r",
        "_score" : null,
        "_source" : {
          "cluster_uuid" : "CMlK79u5T4eeaLIyR-jB5g",
          "timestamp" : "2019-12-31T20:32:18.624Z",
          "interval_ms" : 10000,
          "type" : "shards",
          "source_node" : {
            "uuid" : "R22234sFQH2M4r24MwDShw",
            "host" : "172.16.20.5",
            "transport_address" : "172.16.20.5:9300",
            "ip" : "172.16.20.5",
            "name" : "Test-Usama",
            "timestamp" : "2019-12-31T20:32:18.566Z"
          },
          "state_uuid" : "sWsxYLNqQiu7CJAvOC47SA",
          "shard" : {
            "state" : "STARTED",
            "primary" : false,
            "node" : "R22234sFQH2M4r24MwDShw",
            "relocating_node" : null,
            "shard" : 0,
            "index" : ".kibana_1"
          }
        },
        "fields" : {
          "shard.node" : [
            "R22234sFQH2M4r24MwDShw"
          ]
        },
        "sort" : [
          1577824338624
        ]
      }
    ]
  }
}

GET _cluster/settings

{
  "persistent" : {
    "xpack" : {
      "monitoring" : {
        "collection" : {
          "enabled" : "true"
        }
      }
    }
  },
  "transient" : { }
}

Hmm. I'm not seeing anything obvious to me.

What about the elasticsearch.yml for the data nodes? And just to double check, I'm assuming you are not seeing any errors in the data nodes server logs?

I'm getting the same issue, if possible can you tell what possible solution is, how to figure this out.

This topic was automatically closed 28 days after the last reply. New replies are no longer allowed.