Multiple nodes docker-compose

Hi,

I currently manage a docker ELK stack with 3 nodes (1 master/no-data, 2 master/data) on the same machine. If I add 2 mode data node containers to this(still on the same machine) will i experience significant performance increase while querying? I recently increased the heap memories on all nodes but didn't see much of an increase. Will 5 virtual nodes give me a significant performance increase or do i just need baremetal from now on?

Thanks.

It's unlikely, because all you are doing is splitting the available resources, not creating new ones.
If you are struggling with performance, adding more hardware would make more sense. Otherwise, detailing what problems you are seeing might allow us to give some advice.

Thanks for your reply Mark.

The main problem i face is slow queries and request timeouts from kibana. The machine that the stack is running is running is quite good with 200gb ram and 20 core/40 thread cpu. There is currently 3 nodes with 16 gbs of heap and 2 logstash containers with 4gbs of heap on them. The first solution i came up with is adding index creation filters to logstash.conf to make some weekly and monthly indices to reduce shard count(solution not yet live), When the log entries are at peak the heap used is around 65% on each node and a total of 60gbs of ram used on the machine. So I have lots of system resources to spare.
Note: The stack processes around 90 different indices with a total of roughly 150gb entries each day on daily rollovers.

What is the output from the _cluster/stats API?

{
  "_nodes" : {
    "total" : 3,
    "successful" : 3,
    "failed" : 0
  },
  "cluster_name" : "odfe-cluster",
  "cluster_uuid" : "wdpguiEBTmOvxuAHfYG3xw",
  "timestamp" : 1609827372154,
  "status" : "green",
  "indices" : {
    "count" : 2681,
    "shards" : {
      "total" : 5362,
      "primaries" : 2681,
      "replication" : 1.0,
      "index" : {
        "shards" : {
          "min" : 2,
          "max" : 2,
          "avg" : 2.0
        },
        "primaries" : {
          "min" : 1,
          "max" : 1,
          "avg" : 1.0
        },
        "replication" : {
          "min" : 1.0,
          "max" : 1.0,
          "avg" : 1.0
        }
      }
    },
    "docs" : {
      "count" : 6709191827,
      "deleted" : 1868228
    },
    "store" : {
      "size_in_bytes" : 2582897033865
    },
    "fielddata" : {
      "memory_size_in_bytes" : 92334244,
      "evictions" : 0
    },
    "query_cache" : {
      "memory_size_in_bytes" : 52661248,
      "total_count" : 2663463,
      "hit_count" : 605733,
      "miss_count" : 2057730,
      "cache_size" : 5128,
      "cache_count" : 28211,
      "evictions" : 23083
    },
    "completion" : {
      "size_in_bytes" : 0
    },
    "segments" : {
      "count" : 21281,
      "memory_in_bytes" : 181930130,
      "terms_memory_in_bytes" : 129273584,
      "stored_fields_memory_in_bytes" : 24360552,
      "term_vectors_memory_in_bytes" : 0,
      "norms_memory_in_bytes" : 16200192,
      "points_memory_in_bytes" : 0,
      "doc_values_memory_in_bytes" : 12095802,
      "index_writer_memory_in_bytes" : 3124614472,
      "version_map_memory_in_bytes" : 3284,
      "fixed_bit_set_memory_in_bytes" : 10936,
      "max_unsafe_auto_id_timestamp" : 1609826828923,
      "file_sizes" : { }
    },
    "mappings" : {
      "field_types" : [
        {
          "name" : "boolean",
          "count" : 2238,
          "index_count" : 1557
        },
        {
          "name" : "date",
          "count" : 2989,
          "index_count" : 2678
        },
        {
          "name" : "double",
          "count" : 8,
          "index_count" : 2
        },
        {
          "name" : "integer",
          "count" : 784,
          "index_count" : 68
        },
        {
          "name" : "keyword",
          "count" : 29992,
          "index_count" : 2681
        },
        {
          "name" : "long",
          "count" : 8058,
          "index_count" : 1919
        },
        {
          "name" : "nested",
          "count" : 70,
          "index_count" : 68
        },
        {
          "name" : "object",
          "count" : 4193,
          "index_count" : 1563
        },
        {
          "name" : "text",
          "count" : 30083,
          "index_count" : 2679
        }
      ]
    },
    "analysis" : {
      "char_filter_types" : [ ],
      "tokenizer_types" : [ ],
      "filter_types" : [ ],
      "analyzer_types" : [ ],
      "built_in_char_filters" : [ ],
      "built_in_tokenizers" : [ ],
      "built_in_filters" : [ ],
      "built_in_analyzers" : [ ]
    }
  },
  "nodes" : {
    "count" : {
      "total" : 3,
      "coordinating_only" : 0,
      "data" : 2,
      "ingest" : 3,
      "master" : 3,
      "remote_cluster_client" : 3
    },
    "versions" : [
      "7.7.0"
    ],
    "os" : {
      "available_processors" : 3,
      "allocated_processors" : 3,
      "names" : [
        {
          "name" : "Linux",
          "count" : 3
        }
      ],
      "pretty_names" : [
        {
          "pretty_name" : "CentOS Linux 7 (Core)",
          "count" : 3
        }
      ],
      "mem" : {
        "total_in_bytes" : 607702573056,
        "free_in_bytes" : 388459368448,
        "used_in_bytes" : 219243204608,
        "free_percent" : 64,
        "used_percent" : 36
      }
    },
    "process" : {
      "cpu" : {
        "percent" : 7
      },
      "open_file_descriptors" : {
        "min" : 347,
        "max" : 8405,
        "avg" : 5713
      }
    },
    "jvm" : {
      "max_uptime_in_millis" : 558458780,
      "versions" : [
        {
          "version" : "12.0.2",
          "vm_name" : "OpenJDK 64-Bit Server VM",
          "vm_version" : "12.0.2+10",
          "vm_vendor" : "Oracle Corporation",
          "bundled_jdk" : true,
          "using_bundled_jdk" : false,
          "count" : 3
        }
      ],
      "mem" : {
        "heap_used_in_bytes" : 35984636376,
        "heap_max_in_bytes" : 51513458688
      },
      "threads" : 386
    },
    "fs" : {
      "total_in_bytes" : 71984744497152,
      "free_in_bytes" : 53350310051840,
      "available_in_bytes" : 53350310051840
    },
    "plugins" : [
      {
        "name" : "opendistro_alerting",
        "version" : "1.8.0.0",
        "elasticsearch_version" : "7.7.0",
        "java_version" : "1.8",
        "description" : "Amazon OpenDistro alerting plugin",
        "classname" : "com.amazon.opendistroforelasticsearch.alerting.AlertingPlugin",
        "extended_plugins" : [
          "lang-painless"
        ],
        "has_native_controller" : false
      },
      {
        "name" : "opendistro_performance_analyzer",
        "version" : "1.8.0.0",
        "elasticsearch_version" : "7.7.0",
        "java_version" : "1.8",
        "description" : "Performance Analyzer Plugin",
        "classname" : "com.amazon.opendistro.elasticsearch.performanceanalyzer.PerformanceAnalyzerPlugin",
        "extended_plugins" : [ ],
        "has_native_controller" : false
      },
      {
        "name" : "opendistro-knn",
        "version" : "1.8.0.0",
        "elasticsearch_version" : "7.7.0",
        "java_version" : "1.8",
        "description" : "Open Distro for Elasticsearch KNN",
        "classname" : "com.amazon.opendistroforelasticsearch.knn.plugin.KNNPlugin",
        "extended_plugins" : [ ],
        "has_native_controller" : false
      },
      {
        "name" : "opendistro_security",
        "version" : "1.8.0.0",
        "elasticsearch_version" : "7.7.0",
        "java_version" : "1.8",
        "description" : "Provide access control related features for Elasticsearch 7",
        "classname" : "com.amazon.opendistroforelasticsearch.security.OpenDistroSecurityPlugin",
        "extended_plugins" : [ ],
        "has_native_controller" : false
      },
      {
        "name" : "opendistro-job-scheduler",
        "version" : "1.8.0.0",
        "elasticsearch_version" : "7.7.0",
        "java_version" : "1.8",
        "description" : "Open Distro for Elasticsearch job schduler plugin",
        "classname" : "com.amazon.opendistroforelasticsearch.jobscheduler.JobSchedulerPlugin",
        "extended_plugins" : [ ],
        "has_native_controller" : false
      },
      {
        "name" : "opendistro_sql",
        "version" : "1.8.0.0",
        "elasticsearch_version" : "7.7.0",
        "java_version" : "1.8",
        "description" : "Open Distro for Elasticsearch SQL",
        "classname" : "com.amazon.opendistroforelasticsearch.sql.plugin.SqlPlug",
        "extended_plugins" : [ ],
        "has_native_controller" : false
      },
      {
        "name" : "opendistro-anomaly-detection",
        "version" : "1.8.0.0",
        "elasticsearch_version" : "7.7.0",
        "java_version" : "1.8",
        "description" : "Amazon opendistro elasticsearch anomaly detector plugin",
        "classname" : "com.amazon.opendistroforelasticsearch.ad.AnomalyDetectorPlugin",
        "extended_plugins" : [
          "lang-painless",
          "opendistro-job-scheduler"
        ],
        "has_native_controller" : false
      },
      {
        "name" : "opendistro_index_management",
        "version" : "1.8.0.0",
        "elasticsearch_version" : "7.7.0",
        "java_version" : "1.8",
        "description" : "Open Distro Index State Management Plugin",
        "classname" : "com.amazon.opendistroforelasticsearch.indexstatemanagement.IndexStateManagementPlugin",
        "extended_plugins" : [
          "opendistro-job-scheduler"
        ],
        "has_native_controller" : false
      }
    ],
    "network_types" : {
      "transport_types" : {
        "com.amazon.opendistroforelasticsearch.security.ssl.http.netty.OpenDistroSecuritySSLNettyTransport" : 3
      },
      "http_types" : {
        "com.amazon.opendistroforelasticsearch.security.http.OpenDistroSecurityHttpServerTransport" : 3
      }
    },
    "discovery_types" : {
      "zen" : 3
    },
    "packaging_types" : [
      {
        "flavor" : "oss",
        "type" : "tar",
        "count" : 3
      }
    ],
    "ingest" : {
      "number_of_pipelines" : 0,
      "processor_stats" : { }
    }
  }
}

You've got way too many shards for that data size, I would start by shrinking things down and then implementing your new sharding structure ASAP, or use ILM.

The users want to keep their data for some required period of time. It might be a month, 45 days, a year etc. I keep the last 14 days of indexes in open state and close the rest. Im using ILM only for deletion. When the new structure comes live it will most likely take 1-2 months before things will be settled I suppose.

You won't be able to use ILM as you're using open distro. So you will need to figure something else out there.

I would otherwise suggest you look at the inbuilt Monitoring in Kibana, but youre using open distro and it doesn't have it. Without that functionality there's probably not a tonne we can help with unfortunately. You might need to ask on their forums.

Yes ILM isn't available in Open Distro, but it has ISM which roughly does the same job without the comfort of interface management.

I use elasticHQ for monitoring. Maybe that might be useful?