Hi,I want to save my backups in AWS,below is my elastic for my docker compose that is run and up and work well and i add to enviornment into this configuration:
version: '3'
services:
elasticsearch:
image: focker.ir/elasticsearch/elasticsearch:8.13.4
container_name: ${CONT_NAME}
restart: unless-stopped
hostname: ${NODE_NAME}
ulimits:
memlock:
soft: -1
hard: -1
nofile:
soft: 131072
hard: 131072
nproc: 8192
fsize: -1
network_mode: bridge
ports:
# HTTP/REST
- 9201:9201/tcp
- 9301:9301/tcp
volumes:
# mkdir /var/lib/elasticsearch && chown -R 1000:1000 /var/lib/elasticsearch
- $PWD/var/lib/elasticsearch:/usr/share/elasticsearch/data
- $PWD/etc/certs:/usr/share/elasticsearch/config/certificates
- $PWD/etc/elasticsearch/backup:/usr/share/elasticsearch/backup
environment:
ES_JAVA_OPTS: '-Xms12g -Xmx12g'
ELASTIC_PASSWORD: ${ELASTIC_PASSWORD}
cluster.name: elastiflow
node.name: ${NODE_NAME}
bootstrap.memory_lock: 'true'
network.bind_host: 0.0.0.0
network.publish_host: ${NETWORK_PUBLISH_HOST}
http.port: 9201
http.publish_port: 9201
transport.port: 9301
transport.publish_port: 9301
discovery.seed_hosts: '${ELASTICSEARCH_HOSTS_1},${ELASTICSEARCH_HOSTS_2},${ELASTICSEARCH_HOSTS_3}'
cluster.initial_master_nodes: 'ES_NODE_NAME_1,ES_NODE_NAME_2,ES_NODE_NAME_3'
indices.query.bool.max_clause_count: 8192
search.max_buckets: 250000
action.destructive_requires_name: 'true'
reindex.ssl.verification_mode: 'none'
xpack.security.http.ssl.key: /usr/share/elasticsearch/config/certificates/node.key
xpack.security.http.ssl.certificate: /usr/share/elasticsearch/config/certificates/node.pem
xpack.security.http.ssl.certificate_authorities: /usr/share/elasticsearch/config/certificates/root-ca-key.pem
xpack.security.http.ssl.verification_mode: 'none'
xpack.security.http.ssl.enabled: 'true'
xpack.security.transport.ssl.key: /usr/share/elasticsearch/config/certificates/node.key
xpack.security.transport.ssl.certificate: /usr/share/elasticsearch/config/certificates/node.pem
xpack.security.transport.ssl.certificate_authorities: /usr/share/elasticsearch/config/certificates/root-ca-key.pem
xpack.security.transport.ssl.verification_mode: 'none'
xpack.security.transport.ssl.enabled: 'true'
xpack.monitoring.collection.enabled: 'true'
xpack.monitoring.collection.interval: 30s
xpack.security.enabled: 'true'
xpack.security.audit.enabled: 'false'
path.repo: "/usr/share/elasticsearch/backup"
s3.client.default.endpoint: "https://xxx.xxx.com"
s3.client.default.protocol: "https"
and after it runs I do this into my docker container:
./bin/elasticsearch-keystore add s3.client.default.access_key
./bin/elasticsearch-keystore add s3.client.default.secret_key
and add my access and secret key,also I can see my bucket into S3
aws s3 ls --endpoint-url https://xxx.xxx.com
2024-06-06 12:10:46 datacenter
also I have this
curl -k -u 'elastic:mypassword' -X GET "https://x.x.x.x:9201/_snapshot/MY_AGG"
{"MY_AGG":{"type":"s3","settings":{"bucket":"datacenter","client":"default","base_path":"elasticsearch_snapshots","endpoint":"https://xxx.xxx.com","protocol":"https","region":"ams1"}}}
but when it creates I get for repository
{
"name": "ResponseError",
"message": "repository_verification_exception\n\tCaused by:\n\t\ti_o_exception: Unable to upload object [elasticsearch_snapshots/tests-b6qrbfGaQUOB0spM_IDTTQ/master.dat] using a single upload\n\tRoot causes:\n\t\trepository_verification_exception: [MY_AGG] path [elasticsearch_snapshots] is not accessible on master node"
}
and Verification status is not connected
also into my container after do
docker exec -it elastic_agg_1 /bin/bash
I get
curl -I https://xxx.xxx.com
HTTP/2 200
server: nginx
date: Thu, 06 Jun 2024 10:50:54 GMT
content-type: application/xml
x-amz-request-id: tx000009xxxxx238a-0xxxxx48e-16415a0a-xxx
strict-transport-security: max-age=31536000; includeSubDomains
is there anyone to help me to solce my problem?