Kibana: 5 of 10 shards failed

Hi,
I did some changes to Kibaba index patterns: e.g. I had 2 monthly index patterns: "myindex-2019-04" and "myindex-2019-05", I deleted them and created new index pattern to include both: "myindex-* ".
After that, I changed all my visuzation to point to "myindex-*" id and not "myindex-2019-05" or "myindex-2019-04" (I did it through Management -> Saved Object panel).

After that , I noticed that when I try to open some visualizations, I got " 5 of 10 shards failed" errors without any stack trace. This happened not for all visualizations.

Here is result of my Cluster Stats:

{
  "_nodes": {
    "total": 15,
    "successful": 15,
    "failed": 0
  },
  "cluster_name": "563885087084:vsemetricssushiprod",
  "timestamp": 1557846138332,
  "status": "green",
  "indices": {
    "count": 5,
    "shards": {
      "total": 42,
      "primaries": 21,
      "replication": 1,
      "index": {
        "shards": {
          "min": 2,
          "max": 10,
          "avg": 8.4
        },
        "primaries": {
          "min": 1,
          "max": 5,
          "avg": 4.2
        },
        "replication": {
          "min": 1,
          "max": 1,
          "avg": 1
        }
      }
    },
    "docs": {
      "count": 7464927981,
      "deleted": 7
    },
    "store": {
      "size": "8.3tb",
      "size_in_bytes": 9135031497919
    },
    "fielddata": {
      "memory_size": "57.5gb",
      "memory_size_in_bytes": 61769174656,
      "evictions": 0
    },
    "query_cache": {
      "memory_size": "1.1gb",
      "memory_size_in_bytes": 1264182064,
      "total_count": 535101,
      "hit_count": 46532,
      "miss_count": 488569,
      "cache_size": 6581,
      "cache_count": 10211,
      "evictions": 3630
    },
    "completion": {
      "size": "0b",
      "size_in_bytes": 0
    },
    "segments": {
      "count": 2380,
      "memory": "19.4gb",
      "memory_in_bytes": 20857350220,
      "terms_memory": "16.9gb",
      "terms_memory_in_bytes": 18227430454,
      "stored_fields_memory": "2.3gb",
      "stored_fields_memory_in_bytes": 2477163768,
      "term_vectors_memory": "0b",
      "term_vectors_memory_in_bytes": 0,
      "norms_memory": "3.6mb",
      "norms_memory_in_bytes": 3857920,
      "points_memory": "141.8mb",
      "points_memory_in_bytes": 148714118,
      "doc_values_memory": "179.6kb",
      "doc_values_memory_in_bytes": 183960,
      "index_writer_memory": "0b",
      "index_writer_memory_in_bytes": 0,
      "version_map_memory": "0b",
      "version_map_memory_in_bytes": 0,
      "fixed_bit_set": "0b",
      "fixed_bit_set_memory_in_bytes": 0,
      "max_unsafe_auto_id_timestamp": -1,
      "file_sizes": {}
    }
  },
  "nodes": {
    "count": {
      "total": 15,
      "data": 12,
      "coordinating_only": 0,
      "master": 3,
      "ingest": 12
    },
    "versions": [
      "6.4.2"
    ],
    "os": {
      "available_processors": 102,
      "allocated_processors": 102,
      "names": [
        {
          "count": 15
        }
      ],
      "mem": {
        "total": "400.3gb",
        "total_in_bytes": 429922283520,
        "free": "10gb",
        "free_in_bytes": 10803552256,
        "used": "390.3gb",
        "used_in_bytes": 419118731264,
        "free_percent": 3,
        "used_percent": 97
      }
    },
    "process": {
      "cpu": {
        "percent": 103
      },
      "open_file_descriptors": {
        "min": 976,
        "max": 1672,
        "avg": 1240
      }
    },
    "jvm": {
      "max_uptime": "36d",
      "max_uptime_in_millis": 3113697500,
      "mem": {
        "heap_used": "110.9gb",
        "heap_used_in_bytes": 119181292112,
        "heap_max": "206.1gb",
        "heap_max_in_bytes": 221375496192
      },
      "threads": 2520
    },
    "fs": {
      "total": "17.3tb",
      "total_in_bytes": 19033313402880,
      "free": "8.7tb",
      "free_in_bytes": 9622869487616,
      "available": "7.8tb",
      "available_in_bytes": 8656250187776
    },
    "network_types": {
      "transport_types": {
        "netty4": 15
      },
      "http_types": {
        "filter-jetty": 15
      }
    }
  }
}

GET _cluster/health :

{
  "cluster_name": "563885087084:vsemetricssushiprod",
  "status": "green",
  "timed_out": false,
  "number_of_nodes": 15,
  "number_of_data_nodes": 12,
  "active_primary_shards": 21,
  "active_shards": 42,
  "relocating_shards": 0,
  "initializing_shards": 0,
  "unassigned_shards": 0,
  "delayed_unassigned_shards": 0,
  "number_of_pending_tasks": 0,
  "number_of_in_flight_fetch": 0,
  "task_max_waiting_in_queue_millis": 0,
  "active_shards_percent_as_number": 100
}

/cat/indicies:

health status index uuid pri rep docs.count docs.deleted store.size pri.store.size
green open myindex-2019-04 my-id 5 1 4606992997 0 5tb 2.5tb
green open .kibana my-id 1 1 28 7 491.1kb 245.5kb
green open myindex my-id 5 1 0 0 2.5kb 1.2kb
green open myindex-2019-05 my-id 5 1 2871736396 0 3.2tb 1.6tb

Exact Ditto error. exact change made.
index-job was in there
created index-job-2019 and created indexpattern index-job*

and now I get error saying same N of n shards failed.

I will keep eye on this thread for my solution as well

That index pattern is covering indices which are missing shards (data). What is the result of _cluster/health and _cat/indices?

Thanks for replying! Updated my post with cluster health and _cat/indicies

With the updated _cat/indices response, I don't see any indices which would match your index pattern. Is that expected?

Sorry, updated index name to match.

Everything looks OK regarding the cluster state. Is this still a problem for you? Is it possible that some nodes were down when you were receiving that error previously?

This topic was automatically closed 28 days after the last reply. New replies are no longer allowed.