We are testing out The Elasticsearch cross cluster search
Our Setup is 2 ELK+Kafka cluster in docker with each having single nodes of all component, that is Elasticsearch, Logstatsh, Kibana and Kafka.
Below is the Docker-compose for setup one:
version: '2'
services:
elasticsearch:
build:
context: elasticsearch/
volumes:
- "./elasticsearch/config/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml:ro"
- "./esdata:/usr/share/elasticsearch/data"
ports:
- "16022:16022"
- "16021:16021"
environment:
ES_JAVA_OPTS: "-Xmx256m -Xms256m"
discovery.type: zen
discovery.zen.ping.unicast.hosts: elasticsearch
networks:
- elk
ulimits:
memlock:
soft: -1
hard: -1
nofile:
soft: 65536
hard: 65536
logstash:
build:
context: logstash/
volumes:
- ./logstash/config/logstash.yml:/usr/share/logstash/config/logstash.yml:ro
- ./logstash/pipeline:/usr/share/logstash/pipeline:ro
ports:
- "5000:5000"
environment:
LS_JAVA_OPTS: "-Xmx256m -Xms256m"
networks:
- elk
depends_on:
- elasticsearch
kibana:
build:
context: kibana/
volumes:
- ./kibana/config/:/usr/share/kibana/config:ro
ports:
- "5601:5601"
networks:
- elk
depends_on:
- elasticsearch
zookeeper:
image: confluentinc/cp-zookeeper:latest
networks:
- elk
environment:
ZOOKEEPER_CLIENT_PORT: 32181
ZOOKEEPER_TICK_TIME: 2000
extra_hosts:
- "default:127.0.0.1"
kafka:
image: confluentinc/cp-kafka:latest
networks:
- elk
depends_on:
- zookeeper
ports:
- "16020:16020"
environment:
KAFKA_BROKER_ID: 1
KAFKA_ZOOKEEPER_CONNECT: zookeeper:32181
KAFKA_ADVERTISED_LISTENERS: EXT://<setup_1_ip>:16020, INT://kafka:9092
KAFKA_INTER_BROKER_LISTENER_NAME: INT
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: INT:PLAINTEXT, EXT:PLAINTEXT
extra_hosts:
- "default:127.0.0.1"
networks:
elk:
driver: bridge
The Elasticsearch yml file is as below
---
# Default Elasticsearch configuration from elasticsearch-docker.
#
cluster.name: "docker-cluster_setup_1"
network.host: 0.0.0.0
network.publish_host: <setup_1_ip>
# minimum_master_nodes need to be explicitly set when bound on a public IP
# set to 1 to allow single node clusters
# Details: https://github.com/elastic/elasticsearch/pull/17288
#discovery.zen.minimum_master_nodes: 1
# Use single node discovery in order to disable production mode and avoid bootstrap checks
# see https://www.elastic.co/guide/en/elasticsearch/reference/current/bootstrap-checks.html
#
discovery.type: zen
# Set a custom port for the node to node communication (9300 by default):
#
transport.tcp.port: 16022
# Set a custom port to listen for HTTP traffic:
#
http.port: 16021
search:
remote:
cluster_setup2:
seeds: <setup_2_ip>:16022
The docker Compose for the setup 2 is as below
version: '2'
services:
elasticsearch:
build:
context: elasticsearch/
volumes:
- "./elasticsearch/config/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml:ro"
- "./esdata:/usr/share/elasticsearch/data"
ports:
- "16022:16022"
- "16021:16021"
environment:
ES_JAVA_OPTS: "-Xmx256m -Xms256m"
discovery.type: zen
discovery.zen.ping.unicast.hosts: elasticsearch
networks:
- elk
ulimits:
memlock:
soft: -1
hard: -1
nofile:
soft: 65536
hard: 65536
<REST SAME AS SETUP 1>
And the elasticsearch yml file is as follows
# Default Elasticsearch configuration from elasticsearch-docker.
#
cluster.name: "docker-cluster_setup2"
network.host: 0.0.0.0
network.publish_host: <setup_2_ip>
# minimum_master_nodes need to be explicitly set when bound on a public IP
# set to 1 to allow single node clusters
# Details: https://github.com/elastic/elasticsearch/pull/17288
#discovery.zen.minimum_master_nodes: 1
# Use single node discovery in order to disable production mode and avoid bootstrap checks
a# see https://www.elastic.co/guide/en/elasticsearch/reference/current/bootstrap-checks.html
#
discovery.type: zen
# Set a custom port for the node to node communication (9300 by default):
#
transport.tcp.port: 16022
# Set a custom port to listen for HTTP traffic:
#
http.port: 16021
search:
remote:
cluster_setup1:
seeds: <setup_1_ip>:16022
When attempting cross cluster search I am able to connect from setup 2 to setup 1 where as when I try from setup 1 to setup 2 I am getting timeout error
Query executed on setup 2
curl -X GET "localhost:16021/cluster_setup1:index1/_count" -H 'Content-Type: application/json' -d'
{
"query": {
"match_all": {}
}
}
'
{"count":61070,"_shards":{"total":5,"successful":5,"skipped":0,"failed":0}}
Query executed on setup 1
curl -X GET "localhost:16021/cluster_setup1:index1/_count" -H 'Content-Type: application/json' -d'
> {
> "query": {
> "match_all": {}
>
> }
> }
> '
{"error":{"root_cause":[{"type":"connect_transport_exception","reason":"[][<setup_2_ip>:16022] connect_timeout[30s]"}],"type":"transport_exception","reason":"unable to communicate with remote cluster [cluster_setup2]","caused_by":{"type":"connect_transport_exception","reason":"[][<setup_2_ip>:16022] connect_timeout[30s]"}},"status":500}
We are able to telnet to the corresponding ip:ports both the ways