Hi,
You cannot sync automated from prod -> dev and manually from dev -> prod.
Building a new cluster and sync the data is not going to work if your cluster is RED, you need to figure out the cluster is RED and fix that issue.
I would start to make the cluster config more explicit, I mean set cluster name and ip config so you get control over your config. My Elasticsearch.yml looks like this.
cluster.name: clog
node.name: tb-clog-esd1.tb.iss.local
path.data: /opt/elasticdb/data
path.logs: /var/log/elasticsearch
bootstrap.memory_lock: true
#
network.host: ['10.80.3.11', '127.0.0.1']
#
discovery.zen.master_election.ignore_non_master_pings: true
discovery.zen.ping.unicast.hosts: ["10.80.3.10","10.80.3.11","10.80.3.12","10.80.3.13","10.80.3.14","10.80.3.15","10.80.3.16","10.80.3.17","10.80.3.18","10.80.3.19","10.80.3.20","10.80.3.21","10.80.3.22","10.80.3.23","10.80.3.24","10.80.3.25","10.80.3.26","10.80.3.27","10.80.3.28","10.80.3.29","10.80.3.30","10.80.3.4","10.80.3.5","10.80.3.6","10.80.3.7","10.80.3.8","10.80.3.9"]
discovery.zen.minimum_master_nodes: 2
gateway.recover_after_time: 10m
gateway.recover_after_nodes: 12
gateway.expected_data_nodes: 20
node.master: false
node.data: true
node.ingest: true
node.ml: true
#
xpack.http.ssl.verification_mode: certificate
xpack.watcher.index.rest.direct_access: 'true'
xpack.monitoring.enabled: 'true'
xpack.monitoring.exporters:
clog:
type: http
host: ["http://10.80.3.80:9200"]
auth.username: "elastic"
auth.password: "xxxxx"
xpack.monitoring.collection.indices: '*'
xpack.monitoring.collection.interval: 30s
# Reporting settings
xpack.notification.email.account:
standard_account:
profile: standard
email_defaults:
from:email@example.com
smtp:
auth: false
starttls.enable: false
host: smtp.host
port: 25
# Transport encryption
xpack.security.transport.ssl.enabled: true
xpack.security.transport.ssl.verification_mode: certificate
xpack.security.transport.ssl.key: /etc/elasticsearch/certs/tb-clog-esd1.key
xpack.security.transport.ssl.certificate: /etc/elasticsearch/certs/tb-clog-esd1.crt
xpack.security.transport.ssl.certificate_authorities: [ "/etc/elasticsearch/certs/ca.crt" ]
#
# Http client encryption
xpack.security.http.ssl.enabled: false
xpack.security.http.ssl.key: /etc/elasticsearch/certs/tb-clog-esd1.key
xpack.security.http.ssl.certificate: /etc/elasticsearch/certs/tb-clog-esd1.crt
xpack.security.http.ssl.certificate_authorities: [ "/etc/elasticsearch/certs/ca.crt" ]
#
# Security settings
xpack.security.enabled: true
xpack:
security:
authc:
realms:
native1:
type: native
order: 0
ldap1:
type: ldap
order: 1
url: "ldap://ldapc.host:489"
user_search:
base_dn: "ou=people,dc=boss,dc=host"
attribute: uid
group_search:
base_dn: "ou=groups,dc=boss,dc=host"
files:
role_mapping: "/etc/elasticsearch/role_mapping.yml"
unmapped_groups_as_roles: false
#
http.cors.enabled: true
http.cors.allow-origin: "/.*/"
transport.tcp.compress: true
node.attr.box_type: ssd
That said, without knowing the actual problem and no logs snippets it Is just guessing.
Good luck
Paul.