Load Balancing ECE Deployments

I'm looking for example configurations for load balancing ECE deployments and was hoping others have something they'd be able to share.

The ECE documentation appears to just leave the load balancing recommendation as "user supplied", "tcp streams", and a list of ports. Fine for the basics, but I'd like to an idea of what kinds of details people of found necessary in their deployments.

Also is anyone load balancing the Cloud UI/Admin services (ports 12300, 12343, 12400, 12443) in addition to elastic/kibana clusters (ports 9200, 9243, 9300, 9343)?

I'm currently looking at HAProxy (excluded Nginx) to do the job.

Below is a generalized HAProxy configuration I am currently testing. It is working for my purposes at the moment. But I am still interested in what others are doing in their deployments as well.

# Generalized HAProxy for Basic ECE Deployment
#
#  - Tested with HAProxy 1.5.18 on CentOS 7 with ECE 1.0.2
#
# https://www.elastic.co/guide/en/cloud-enterprise/current/ece-planning.html#ece-load-balancers
# https://www.elastic.co/guide/en/cloud-enterprise/current/ece-prereqs.html#ece-prereqs-networking
# 
# 192.168.1.10    haproxy-host
# 192.168.1.11    allocator01 with proxy and coordinator roles
# 192.168.1.12    allocator02 with proxy and coordinator roles


global
  group  haproxy
  maxconn  100000
  user  haproxy

defaults
  log  global
  maxconn  10000
  stats  enable

listen ece_coordinator_support_http_12300
  bind 192.168.1.10:12300
  mode tcp
  balance source
  option tcplog
  server allocator01_12300 192.168.1.11:12300 check
  server allocator02_12300 192.168.1.12:12300 check

listen ece_coordinator_support_https_12343
  bind 192.168.1.10:12343
  mode tcp
  balance source
  option tcplog
  server allocator01_12343 192.168.1.11:12343 check
  server allocator02_12343 192.168.1.12:12343 check

listen ece_coordinator_uiapi_http_12400
  bind 192.168.1.10:12400
  mode tcp
  balance source
  option tcplog
  server allocator01_12400 192.168.1.11:12400 check
  server allocator02_12400 192.168.1.12:12400 check

listen ece_coordinator_uiapi_https_12443
  bind 192.168.1.10:12443
  mode tcp
  balance source
  option tcplog
  server allocator01_12443 192.168.1.11:12443 check
  server allocator02_12443 192.168.1.12:12443 check

listen ece_proxy_elastickibana_http_9200
  bind 192.168.1.10:9200
  mode tcp
  balance source
  option tcplog
  server allocator01_9200 192.168.1.11:9200 check
  server allocator02_9200 192.168.1.12:9200 check

listen ece_proxy_elastickibana_https_9243
  bind 192.168.1.10:9243
  mode tcp
  balance source
  option tcplog
  server allocator01_9243 192.168.1.11:9243 check
  server allocator02_9243 192.168.1.12:9243 check

listen ece_proxy_elastictransport_http_9300
  bind 192.168.1.10:9300
  mode tcp
  balance source
  option tcplog
  server allocator01_9300 192.168.1.11:9300 check
  server allocator02_9300 192.168.1.12:9300 check

listen ece_proxy_elastictransport_9343
  bind 192.168.1.10:9343
  mode tcp
  balance source
  option tcplog
  server allocator01_9343 192.168.1.11:9343 check
  server allocator02_9343 192.168.1.12:9343 check

listen stats
  bind 192.168.1.10:9090
  mode http
  stats uri /
  stats auth someuser:somepass

This is what we are doing using Nginx.

events {
worker_connections 4096; ## Default: 1024
}

http {

	access_log /var/log/nginx/access2.log;
	error_log /var/log/nginx/error2.log;
	
    proxy_connect_timeout       600;
    proxy_send_timeout          600;
    proxy_read_timeout          600;
    send_timeout                600;

	upstream elastic-proxies {

		server 10.199.3.4:9200;
		server 10.199.3.5:9200;
		server 10.199.3.6:9200;
		keepalive 15;
	}

	upstream elastic-directors {

		server 10.199.4.17:12443;
		server 10.199.4.18:12443;
		server 10.199.4.19:12443;
	}

	server {

		listen 12443 ssl;
		ssl_certificate /etc/ssl/certs/cert.crt;
		ssl_certificate_key /etc/ssl/private/cert.key;
		include /etc/nginx/snippets/ssl-params.conf;

		location / {

			proxy_pass https://elastic-directors;
			proxy_http_version 1.1;
			proxy_set_header Connection "Keep-Alive";
			proxy_set_header Proxy-Connection "Keep-Alive";
			proxy_set_header Host $http_host;
		}
	}

	server {

		listen 9200;
		listen [::]:9200;

		location /probe {
			return 200;
		}
	}

	server {

		listen 9243 ssl;
		listen [::]:9243;
		ssl_certificate /etc/ssl/certs/cert.crt;
		ssl_certificate_key /etc/ssl/private/cert.key;
		include /etc/nginx/snippets/ssl-params.conf;

		location / {

			proxy_pass http://elastic-proxies;
			proxy_http_version 1.1;
			proxy_set_header Connection "Keep-Alive";
			proxy_set_header Proxy-Connection "Keep-Alive";
			proxy_set_header Host $http_host;
			client_max_body_size 200M;
		}
		location /probe {

			return 200;
		}
	}

	server {
		listen 443 ssl;
		server_name mycluster.mydomain.io;
		ssl_certificate /etc/ssl/certs/cert.crt;
		ssl_certificate_key /etc/ssl/private/cert.key;
		include /etc/nginx/snippets/ssl-params.conf;

		location / {

			proxy_pass http://oauth2-proxy-azuread:4180;
			proxy_http_version 1.1;
			proxy_set_header Connection "Keep-Alive";
			proxy_set_header Proxy-Connection "Keep-Alive";
			proxy_set_header X-Host myclusterid.mydomain.io;
			proxy_set_header Host $host;
		}
	}

	server {
		listen 443 ssl default_server;
		server_name _;
		ssl_certificate /etc/ssl/certs/cert.crt;
		ssl_certificate_key /etc/ssl/private/cert.key;
		include /etc/nginx/snippets/ssl-params.conf;
		return 418;
	}
} 

To get Azure Active Directory for Kibana we use OAuth2 proxy.

This topic was automatically closed 14 days after the last reply. New replies are no longer allowed.