Cannot run es with ERROR: Missing logging config file at xxx/log4j2.properties

es version: elasticsearch:8.18.0

yaml file.

  elastic:
    image: "${ELASTIC_IMAGE}"
    container_name: elastic
    read_only: true
    user: "${UID:-1000}:${GID:-1000}"  # Run as non-root user
    deploy:
      resources:
        limits:
          cpus: "2.0"
          memory: "4096M"
        reservations:
          cpus: "1.0"
          memory: "2048M"
    networks:
      - middleware
    environment:
      TZ: Asia/Shanghai
      ELASTIC_PASSWORD: "${ELASTIC_PASSWORD}"  
      ES_JAVA_OPTS: "-Xms2g -Xmx2g -XX:+UseG1GC -XX:G1ReservePercent=25 -XX:+HeapDumpOnOutOfMemoryError"
      discovery.type: single-node
      ES_PATH_CONF: /usr/share/elasticsearch/config
      cluster.name: "es-server"
      node.name: "single-node"
      node.roles: "master,data,ingest"
      network.host: "0.0.0.0"
      http.port: "9200"
      path.logs: "/usr/share/elasticsearch/logs"
      xpack.security.enabled: "true"
      http.cors.enabled: "true"
      http.cors.allow-origin: "\"*\""
    ports:
      - "${ELASTIC_PORT}:9200"
    volumes:
      - /etc/localtime:/etc/localtime:ro
      - ./data/elastic/data:/usr/share/elasticsearch/data:rw
      - ./data/elastic/logs:/usr/share/elasticsearch/logs:rw
      - ./data/elastic/plugins:/usr/share/elasticsearch/plugins:rw
      - ./data/elastic/config:/usr/share/elasticsearch/config:rw  
    tmpfs:
      - /tmp:rw,exec
    ulimits:
      memlock:
        soft: -1
        hard: -1
    healthcheck:
      test: ["CMD", "sh", "-c", "curl -f -u elastic:${ELASTIC_PASSWORD} http://localhost:9200/_cluster/health?pretty"]
      interval: 30s
      timeout: 10s
      retries: 5
      start_period: 10s
    restart: always

networks:
  middleware:
    driver: bridge

Now I have to manually generate and mount the log configuration file to start it normally. However, due to security requirements, we now use ordinary users to maintain the es service. if the configuration file is mounted, the final configuration file will be the root user's permissions instead of the default ordinary user's permissions. This conflicts with the ordinary user maintenance we need.

[root@localhost ~]# ll /data/guodi/installmiddleware/data/elastic/config/
total 4
-rw-rw---- 1 guodi guodi 238 Jun  4 12:26 elasticsearch.keystore
-rwxr-xr-x 1 root  root    0 Jun  4 12:33 elasticsearch.yml
-rwxr-xr-x 1 root  root    0 Jun  4 12:33 jvm.options
-rwxr-xr-x 1 root  root    0 Jun  4 12:29 log4j2.properties


[guodi@localhost installmiddleware]$ docker-compose ps -a
NAME      IMAGE                  COMMAND                  SERVICE   CREATED              STATUS                        PORTS
elastic   elasticsearch:8.18.0   "/bin/tini -- /usr/l…"   elastic   About a minute ago   Up About a minute (healthy)   0.0.0.0:9200->9200/tcp, :::9200->9200/tcp, 9300/tcp
  elastic:
    image: "${ELASTIC_IMAGE}"
    container_name: elastic
    read_only: true
    user: "${UID:-1000}:${GID:-1000}"  # Run as non-root user
    deploy:
      resources:
        limits:
          cpus: "2.0"
          memory: "4096M"
        reservations:
          cpus: "1.0"
          memory: "2048M"
    networks:
      - middleware
    environment:
      TZ: Asia/Shanghai
      ELASTIC_PASSWORD: "${ELASTIC_PASSWORD}"  
      ES_JAVA_OPTS: "-Xms2g -Xmx2g -XX:+UseG1GC -XX:G1ReservePercent=25 -XX:+HeapDumpOnOutOfMemoryError"
    ports:
      - "${ELASTIC_PORT}:9200"
    volumes:
      - /etc/localtime:/etc/localtime:ro
      - ./data/elastic/data:/usr/share/elasticsearch/data:rw
      - ./data/elastic/logs:/usr/share/elasticsearch/logs:rw
      - ./data/elastic/plugins:/usr/share/elasticsearch/plugins:rw
      - ./data/elastic/config:/usr/share/elasticsearch/config:rw  
      - ./conf/elasticsearch-log4j2.properties:/usr/share/elasticsearch/config/log4j2.properties:ro
      - ./conf/elasticsearch-jvm.options:/usr/share/elasticsearch/config/jvm.options:ro
      - ./conf/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml:ro
    tmpfs:
      - /tmp:rw,exec
    ulimits:
      memlock:
        soft: -1
        hard: -1
    healthcheck:
      test: ["CMD", "sh", "-c", "curl -f -u elastic:${ELASTIC_PASSWORD} http://localhost:9200/_cluster/health?pretty"]
      interval: 30s
      timeout: 10s
      retries: 5
      start_period: 10s
    restart: always

Not sure if this will help since I have only run Kibana in a container, but I have this as part of my values.yaml file:

podSecurityContext:
  fsGroup: 996          # When Kibana is installed from an RPM package the Kibana group is 996.
  runAsNonRoot: true
  runAsUser: 998        # When Kibana is installed from an RPM package the Kibana user  is 998
  runAsGroup: 996       # When Kibana is installed from an RPM package the Kibana group is 996.
  seccompProfile:
    type: "RuntimeDefault"

securityContext:
  allowPrivilegeEscalation: false
  capabilities:
    drop:
    - ALL
  readOnlyRootFilesystem: true

Maybe you can check if what user and group your Elasticsearch is run as, and see if you need something similar for your Helm chart?

Sorry, I executed this locally using docker-compose, not using the helm package.