Restirct users with search filter

Hi,

Can anyone please assist me on this : I need users not to search huge queries or if they search queries on kibana discover , they must add a filter.

The reason behind this is because users queries huge amount of data for example ( last 30 days without providing any filter in search query ) which results in our cluster to be crashed.

Kindly let me know if this can work or any other alternative.

Cheers

Shappy

If using the Discover app, which by default returns 500 documents, without a filter against 30 days of data can cause the cluster to crash I would suggest your cluster might be overloaded and need more heap or less data/shards per node.

What is the full output of the cluster stats API? Which version are you on?

Hi Christian,

here is the output

{
"_nodes" : {
"total" : 3,
"successful" : 3,
"failed" : 0
},
"cluster_name" : "abcdefghijklmnop",
"cluster_uuid" : "xxxxxx-xxxxxxxxx-xxxxxxxxxx",
"timestamp" : 1572407585476,
"status" : "green",
"indices" : {
"count" : 140,
"shards" : {
"total" : 485,
"primaries" : 271,
"replication" : 0.7896678966789668,
"index" : {
"shards" : {
"min" : 1,
"max" : 5,
"avg" : 3.4642857142857144
},
"primaries" : {
"min" : 1,
"max" : 5,
"avg" : 1.9357142857142857
},
"replication" : {
"min" : 0.0,
"max" : 1.0,
"avg" : 0.8928571428571429
}
}
},
"docs" : {
"count" : 648905413,
"deleted" : 16237427
},
"store" : {
"size_in_bytes" : 238025811722
},
"fielddata" : {
"memory_size_in_bytes" : 6472,
"evictions" : 0
},
"query_cache" : {
"memory_size_in_bytes" : 788566347,
"total_count" : 48481301,
"hit_count" : 12587141,
"miss_count" : 35894160,
"cache_size" : 12859,
"cache_count" : 194916,
"evictions" : 182057
},
"completion" : {
"size_in_bytes" : 0
},
"segments" : {
"count" : 5883,
"memory_in_bytes" : 683794717,
"terms_memory_in_bytes" : 438652238,
"stored_fields_memory_in_bytes" : 193350592,
"term_vectors_memory_in_bytes" : 0,
"norms_memory_in_bytes" : 17025344,
"points_memory_in_bytes" : 22842385,
"doc_values_memory_in_bytes" : 11924158,
"index_writer_memory_in_bytes" : 85502072,
"version_map_memory_in_bytes" : 26523109,
"fixed_bit_set_memory_in_bytes" : 1424832,
"max_unsafe_auto_id_timestamp" : 1572399066225,
"file_sizes" : { }
}
},
"nodes" : {
"count" : {
"total" : 3,
"coordinating_only" : 0,
"data" : 2,
"ingest" : 2,
"master" : 3,
"voting_only" : 1
},
"versions" : [
"7.3.1"
],
"os" : {
"available_processors" : 5,
"allocated_processors" : 8,
"names" : [
{
"name" : "Linux",
"count" : 3
}
],
"pretty_names" : [
{
"pretty_name" : "CentOS Linux 7 (Core)",
"count" : 3
}
],
"mem" : {
"total_in_bytes" : 547729580032,
"free_in_bytes" : 12033003520,
"used_in_bytes" : 535696576512,
"free_percent" : 2,
"used_percent" : 98
}
},
"process" : {
"cpu" : {
"percent" : 2
},
"open_file_descriptors" : {
"min" : 278,
"max" : 3182,
"avg" : 2189
}
},
"jvm" : {
"max_uptime_in_millis" : 1132870121,
"versions" : [
{
"version" : "11.0.4",
"vm_name" : "OpenJDK 64-Bit Server VM",
"vm_version" : "11.0.4+11-LTS",
"vm_vendor" : "Oracle Corporation",
"bundled_jdk" : true,
"using_bundled_jdk" : false,
"count" : 3
}
],
"mem" : {
"heap_used_in_bytes" : 7102047840,
"heap_max_in_bytes" : 16680222720
},
"threads" : 252
},
"fs" : {
"total_in_bytes" : 1035087118336,
"free_in_bytes" : 791000879104,
"available_in_bytes" : 791000879104
},
"plugins" : [
{
"name" : "repository-s3",
"version" : "7.3.1",
"elasticsearch_version" : "7.3.1",
"java_version" : "1.8",
"description" : "The S3 repository plugin adds S3 repositories",
"classname" : "org.elasticsearch.repositories.s3.S3RepositoryPlugin",
"extended_plugins" : ,
"has_native_controller" : false
}
],
"network_types" : {
"transport_types" : {
"security4" : 3
},
"http_types" : {
"security4" : 3
}
},
"discovery_types" : {
"zen" : 3
},
"packaging_types" : [
{
"flavor" : "default",
"type" : "tar",
"count" : 3
}
]
}
}

Those stats don't look that bad at all, but you do seem to have more small shards than recommended. Although they have not yet reached alarming levels you may want to look to reduce this, e.g. by switching to weekly indices instead of daily.

If you have monitoring enabled it would be interesting to see a picture of heap usage over time as this could be driven by requests and queries.

Hi christain,

Thanks for the reply, so means there is no such option to restrict users for search large data, as you mentioned earlier by default it returns 500 docs, can that be catered like making that default search reduce to 50%, would that help somehow.

Cheers

I am not aware of any way to restrict it in a good way. Can you share the logs from around the time you see a crash?

This topic was automatically closed 28 days after the last reply. New replies are no longer allowed.