Deploy Elastic Agent successful but has no data steams

Hi there!
I got a problem with Elastic Agent when using ELK on docker. I have two docker-compose files for creating certificate and deploying ELK purpose. I enrolled elastic agent successfull [1] however I got bad_certificate on Elasticsearch log [2] and there are not any data treams gotten.

[1] Elastic agent log

 sudo elastic-agent enroll --url=https://192.168.1.200:8220 \
    --fleet-server-es=https://192.168.1.200:9992 \
    --fleet-server-service-token=AAEAAWVsYXN0aWMvZmxlZXQtc2VydmVyL3Rva2VuLTE2NjAyMDEwNDY5NjI6N0FRUXN2enZUaDZYUmdjbVd1eUViZw   \
    --fleet-server-policy=499b5aa7-d214-5b5d-838b-3cd76469844e \
    --certificate-authorities=/home/h4niz/secrets/certificate_authority/ca/ca.crt \
    --fleet-server-es-ca=/home/h4niz/secrets/certificate_authority/ca/ca.crt \
    --fleet-server-cert=/home/h4niz/secrets/certificates/elasticsearch/elasticsearch.crt \
    --fleet-server-cert-key=/home/h4niz/secrets/certificates/elasticsearch/elasticsearch.key

This will replace your current settings. Do you want to continue? [Y/n]:
2022-08-11T14:11:22.426+0700 INFO cmd/enroll_cmd.go:776 Fleet Server - Stopping
2022-08-11T14:12:24.445+0700 INFO cmd/enroll_cmd.go:757 Fleet Server - Running on policy with Fleet Server integration: 499b5aa7-d214-5b5d-838b-3cd76469844e; missing config fleet.agent.id (expected during bootstrap process)
2022-08-11T14:12:25.149+0700 INFO cmd/enroll_cmd.go:454 Starting enrollment to URL: https://192.168.1.200:8220/
2022-08-11T14:12:26.296+0700 INFO cmd/enroll_cmd.go:254 Successfully triggered restart on running Elastic Agent.
Successfully enrolled the Elastic Agent.

[2] Elasticsearch errors log:

elasticsearch | {"type": "server", "timestamp": "2022-08-11T08:38:31,188Z", "level": "WARN", "component": "o.e.h.AbstractHttpServerTransport", "cluster.name": "docker-cluster", "node.name": "5eaa2b31f9e6", "message": "caught exception while handling client http traffic, closing connection Netty4HttpChannel{localAddress=/172.30.0.3:9200, remoteAddress=/192.168.1.200:38796}", "cluster.uuid": "ifz7z072T7-TUVgrXVcIEw", "node.id": "KzOwV-ZxQRyaCFiiYelM0Q" ,
elasticsearch | "stacktrace": ["io.netty.handler.codec.DecoderException: javax.net.ssl.SSLHandshakeException: Received fatal alert: bad_certificate",
elasticsearch | "at io.netty.handler.codec.ByteToMessageDecoder.callDecode(ByteToMessageDecoder.java:477) ~[netty-codec-4.1.66.Final.jar:4.1.66.Final]",
elasticsearch | "at io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:276) ~[netty-codec-4.1.66.Final.jar:4.1.66.Final]",
elasticsearch | "at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:379) [netty-transport-4.1.66.Final.jar:4.1.66.Final]",
elasticsearch | "at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:365) [netty-transport-4.1.66.Final.jar:4.1.66.Final]",
elasticsearch | "at io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:357) [netty-transport-4.1.66.Final.jar:4.1.66.Final]",
elasticsearch | "at io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1410) [netty-transport-4.1.66.Final.jar:4.1.66.Final]",
elasticsearch | "at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:379) [netty-transport-4.1.66.Final.jar:4.1.66.Final]",
elasticsearch | "at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:365) [netty-transport-4.1.66.Final.jar:4.1.66.Final]",
elasticsearch | "at io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:919) [netty-transport-4.1.66.Final.jar:4.1.66.Final]",
elasticsearch | "at io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) [netty-transport-4.1.66.Final.jar:4.1.66.Final]",
elasticsearch | "at io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:719) [netty-transport-4.1.66.Final.jar:4.1.66.Final]",
elasticsearch | "at io.netty.channel.nio.NioEventLoop.processSelectedKeysPlain(NioEventLoop.java:620) [netty-transport-4.1.66.Final.jar:4.1.66.Final]",
elasticsearch | "at io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:583) [netty-transport-4.1.66.Final.jar:4.1.66.Final]",
elasticsearch | "at io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:493) [netty-transport-4.1.66.Final.jar:4.1.66.Final]",
elasticsearch | "at io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:986) [netty-common-4.1.66.Final.jar:4.1.66.Final]",
elasticsearch | "at io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) [netty-common-4.1.66.Final.jar:4.1.66.Final]",
elasticsearch | "at java.lang.Thread.run(Thread.java:833) [?:?]",
elasticsearch | "Caused by: javax.net.ssl.SSLHandshakeException: Received fatal alert: bad_certificate",
elasticsearch | "at sun.security.ssl.Alert.createSSLException(Alert.java:131) ~[?:?]",
elasticsearch | "at sun.security.ssl.Alert.createSSLException(Alert.java:117) ~[?:?]",
elasticsearch | "at sun.security.ssl.TransportContext.fatal(TransportContext.java:358) ~[?:?]",
elasticsearch | "at sun.security.ssl.Alert$AlertConsumer.consume(Alert.java:293) ~[?:?]",
elasticsearch | "at sun.security.ssl.TransportContext.dispatch(TransportContext.java:204) ~[?:?]",
elasticsearch | "at sun.security.ssl.SSLTransport.decode(SSLTransport.java:172) ~[?:?]",
elasticsearch | "at sun.security.ssl.SSLEngineImpl.decode(SSLEngineImpl.java:736) ~[?:?]",
elasticsearch | "at sun.security.ssl.SSLEngineImpl.readRecord(SSLEngineImpl.java:691) ~[?:?]",
elasticsearch | "at sun.security.ssl.SSLEngineImpl.unwrap(SSLEngineImpl.java:506) ~[?:?]",
elasticsearch | "at sun.security.ssl.SSLEngineImpl.unwrap(SSLEngineImpl.java:482) ~[?:?]",
elasticsearch | "at javax.net.ssl.SSLEngine.unwrap(SSLEngine.java:679) ~[?:?]",
elasticsearch | "at io.netty.handler.ssl.SslHandler$SslEngineType$3.unwrap(SslHandler.java:298) ~[netty-handler-4.1.66.Final.jar:4.1.66.Final]",
elasticsearch | "at io.netty.handler.ssl.SslHandler.unwrap(SslHandler.java:1344) ~[netty-handler-4.1.66.Final.jar:4.1.66.Final]",
elasticsearch | "at io.netty.handler.ssl.SslHandler.decodeJdkCompatible(SslHandler.java:1237) ~[netty-handler-4.1.66.Final.jar:4.1.66.Final]",
elasticsearch | "at io.netty.handler.ssl.SslHandler.decode(SslHandler.java:1286) ~[netty-handler-4.1.66.Final.jar:4.1.66.Final]",
elasticsearch | "at io.netty.handler.codec.ByteToMessageDecoder.decodeRemovalReentryProtection(ByteToMessageDecoder.java:507) ~[netty-codec-4.1.66.Final.jar:4.1.66.Final]",
elasticsearch | "at io.netty.handler.codec.ByteToMessageDecoder.callDecode(ByteToMessageDecoder.java:446) ~[netty-codec-4.1.66.Final.jar:4.1.66.Final]",
elasticsearch | "... 16 more"] }

For making sure my certificate is working good, I tried to connect to Elasticsearch service with CA by using cURL:

curl -H "Authorization: Bearer AAEAAWVsYXN0aWMvZmxlZXQtc2VydmVyL3Rva2VuLTE2NjAyMDEwNDY5NjI6N0FRUXN2enZUaDZYUmdjbVd1eUViZw" --cacert /home/h4niz/secrets/certificate_authority/ca/ca.crt htt
{
  "name" : "5eaa2b31f9e6",
  "cluster_name" : "docker-cluster",
  "cluster_uuid" : "ifz7z072T7-TUVgrXVcIEw",
  "version" : {
    "number" : "7.17.1",
    "build_flavor" : "default",
    "build_type" : "docker",
    "build_hash" : "e5acb99f822233d62d6444ce45a4543dc1c8059a",
    "build_date" : "2022-02-23T22:20:54.153567231Z",
    "build_snapshot" : false,
    "lucene_version" : "8.11.1",
    "minimum_wire_compatibility_version" : "6.8.0",
    "minimum_index_compatibility_version" : "6.0.0-beta1"
  },
  "tagline" : "You Know, for Search"
}

Everything works correctly! I also do not receive bad_certificate error in elasticsearch log.

I tried to find some solutions on google by keywords: elastic-agent has no data stream with tls enabled,etc,.. But I could not solved this issue!

So, do you have any ideas for solving my problem?


Here is my docker-compose files

  • Docker compose file to install CA
version: '3.7'

services:
  certs:
    container_name: certs
    image: docker.elastic.co/elasticsearch/elasticsearch:${ELASTIC_VERSION}
    command: bash ${ELASTIC_DIR}/config/setup.sh
    user: "0"
    volumes:
      - ./swag:/swag/
      - ./secrets:/secrets/
      - ./setup/setup.sh:${ELASTIC_DIR}/config/setup.sh
      - ./setup/instances.yml:${ELASTIC_DIR}/config/instances.yml:ro
    environment: 
      ELASTIC_PASSWORD: ${ELASTIC_PASSWORD}
      SUBDOMAIN: ${SUBDOMAIN}
      SUBFOLDER: ${SUBFOLDER}
      STAGING: ${STAGING}
    networks: 
      - elk

volumes:
  secrets:
    driver: local
  setup:
    driver: local

networks:
  elk:
    driver: bridge
  • Docker-compose file to deploy ELK with TheHive server.
version: '3.7'

secrets:
  ca.crt:
    file: ./secrets/certificate_authority/ca/ca.crt
  elasticsearch.keystore:
    file: ./secrets/elasticsearch.keystore
  elastic-stack-ca.p12:
    file: ./secrets/certificate_authority/elastic-stack-ca.p12
  elasticsearch.key:
    file: ./secrets/certificates/elasticsearch/elasticsearch.key
  elasticsearch.cert:
    file: ./secrets/certificates/elasticsearch/elasticsearch.crt
  kibana.key:
    file: ./secrets/certificates/kibana/kibana.key
  kibana.cert:
    file: ./secrets/certificates/kibana/kibana.crt
  logstash.pkcs8.key:
    file: ./secrets/certificates/logstash/logstash.pkcs8.key
  logstash.key:
    file: ./secrets/certificates/logstash/logstash.key
  logstash.p12:
    file: ./secrets/keystores/logstash/logstash.p12
  logstash.cert:
    file: ./secrets/certificates/logstash/logstash.crt
  filebeat.key:
    file: ./secrets/certificates/filebeat/filebeat.key
  filebeat.cert:
    file: ./secrets/certificates/filebeat/filebeat.crt
  metricbeat.key:
    file: ./secrets/certificates/metricbeat/metricbeat.key
  metricbeat.cert:
    file: ./secrets/certificates/metricbeat/metricbeat.crt
  packetbeat.key:
    file: ./secrets/certificates/packetbeat/packetbeat.key
  packetbeat.cert:
    file: ./secrets/certificates/packetbeat/packetbeat.crt

services:
  # The 'setup' service runs a one-off script which initializes the
  # 'logstash_internal' and 'kibana_system' users inside Elasticsearch with the
  # values of the passwords defined in the '.env' file.
  #
  # This task is only performed during the *initial* startup of the stack. On all
  # subsequent runs, the service simply returns immediately, without performing
  # any modification to existing users.
  setup:
    build:
      context: setup/
      args:
        ELASTIC_VERSION: ${ELASTIC_VERSION}
    init: true
    volumes:
      - setup:/state:Z
    environment:
      ELASTIC_PASSWORD: ${ELASTIC_PASSWORD:-}
      LOGSTASH_INTERNAL_PASSWORD: ${LOGSTASH_INTERNAL_PASSWORD:-}
      KIBANA_SYSTEM_PASSWORD: ${KIBANA_SYSTEM_PASSWORD:-}
    networks:
      - elk

  swag:
    image: linuxserver/swag
    container_name: swag
    cap_add:
      - NET_ADMIN
    environment:
      - PUID=2000
      - PGID=2000
      - TZ=${TIMEZONE}
      - URL=${DOMAIN}
      - SUBDOMAINS=${SUBDOMAIN}
      - VALIDATION=http
      - EMAIL=${EMAIL}
      - STAGING=false
    volumes:
      - ./swag:/config
    ports:
      - 443:443
      - 80:80
    restart: unless-stopped
    networks:
      - elk
    depends_on: 
      - elasticsearch
      - kibana

  elasticsearch:
    container_name: elasticsearch
    build:
      context: elasticsearch/
      args:
        ELASTIC_VERSION: ${ELASTIC_VERSION}
    volumes:
      - elasticsearch:/usr/share/elasticsearch/data:z
      - ./elasticsearch/config/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml:ro,z 
    secrets:
      - source: elasticsearch.keystore
        target: ${ELASTIC_DIR}/config/elasticsearch.keystore
      - source: ca.crt
        target: ${ELASTIC_DIR}/config/ca.crt
      - source: elasticsearch.cert
        target: ${ELASTIC_DIR}/config/elasticsearch.crt
      - source: elasticsearch.key
        target: ${ELASTIC_DIR}/config/elasticsearch.key
    ports:
      - "9992:9200"
      - "9993:9300"
    environment:
      ES_JAVA_OPTS: -Xms512m -Xmx512m
      CONFIG_DIR: ${ELASTIC_DIR}/config
      # Bootstrap password.
      # Used to initialize the keystore during the initial startup of
      # Elasticsearch. Ignored on subsequent runs.
      ELASTIC_PASSWORD: ${ELASTIC_PASSWORD:-}
      # Use single node discovery in order to disable production mode and avoid bootstrap checks.
      # see: https://www.elastic.co/guide/en/elasticsearch/reference/current/bootstrap-checks.html
      discovery.type: single-node
    healthcheck:
      test: curl -s https://elasticsearch:9200 >/dev/null; if [[ $$? == 52 ]]; then echo 0; else echo 1; fi
      interval: 30s
      timeout: 10s
      retries: 5
    ulimits:
      memlock:
        soft: -1
        hard: -1
      nofile:
        soft: 200000
        hard: 200000
    networks:
      - elk

  logstash:
    container_name: logstash
    build:
      context: logstash/
      args:
        ELASTIC_VERSION: ${ELASTIC_VERSION}
    volumes:
      - ./logstash/config/logstash.yml:/usr/share/logstash/config/logstash.yml:ro,Z
      - ./logstash/pipeline:/usr/share/logstash/pipeline:ro,Z
    secrets:
      - source: ca.crt
        target: ${LOGSTASH_DIR}/config/ca.crt
      - source: logstash.cert
        target: ${LOGSTASH_DIR}/config/logstash.crt
      - source: logstash.pkcs8.key
        target: ${LOGSTASH_DIR}/config/logstash.pkcs8.key
      - source: logstash.key
        target: ${LOGSTASH_DIR}/config/logstash.key
      - source: logstash.p12
        target: ${LOGSTASH_DIR}/config/logstash.p12
    ports:
      - "9954:5044"
      - "9950:5000/tcp"
      - "9950:5000/udp"
      - "9996:9600"
    environment:
      LS_JAVA_OPTS: -Xms256m -Xmx256m
      CONFIG_DIR: ${LOGSTASH_DIR}/config
      LOGSTASH_INTERNAL_PASSWORD: ${LOGSTASH_INTERNAL_PASSWORD:-}
    networks:
      - elk
    depends_on:
      - elasticsearch

  kibana:
    container_name: kibana
    build:
      context: kibana/
      args:
        ELASTIC_VERSION: ${ELASTIC_VERSION}
    volumes:
      - ./kibana/config/kibana.yml:${KIBANA_DIR}/config/kibana.yml:ro
    ports:
      - "9956:5601"
    environment:
      CONFIG_DIR: ${KIBANA_DIR}/config
      ENCRYPTION_KEY: ${XPACK_ENCRYPTION_KEY}
      KIBANA_SYSTEM_PASSWORD: ${KIBANA_SYSTEM_PASSWORD:-}
    secrets:
      - source: ca.crt
        target: ${KIBANA_DIR}/config/ca.crt
      - source: kibana.cert
        target: ${KIBANA_DIR}/config/kibana.crt
      - source: kibana.key
        target: ${KIBANA_DIR}/config/kibana.key
    healthcheck:
      test: curl -s https://kibana:5601 >/dev/null; if [[ $$? == 52 ]]; then echo 0; else echo 1; fi
      interval: 30s
      timeout: 10s
      retries: 5
    networks:
      - elk
    depends_on:
      - elasticsearch

  cortex:
    image: 'thehiveproject/cortex:latest'
    container_name: cortex
    restart: unless-stopped
    command:
      --job-directory ${JOB_DIRECTORY}
    environment:
      - 'JOB_DIRECTORY=${JOB_DIRECTORY}'
    volumes:
      - './cortex/application.conf:/etc/cortex/application.conf'
      - './cortex/jobs:${JOB_DIRECTORY}'
      - '/var/run/docker.sock:/var/run/docker.sock'
    ports:
      - '8891:9001'
    networks:
      - elk

  postgres:
    image: postgres
    container_name: PostgresDB
    restart: unless-stopped
    environment:
      - 'POSTGRES_USER=${POSTGRES_USER}'
      - 'POSTGRES_PASSWORD=${POSTGRES_PASSWORD}'
      - 'POSTGRES_DB=${POSTGRES_DB}'
    volumes:
      - './postgres/data:/var/lib/postgresql/data'
    networks:
      - elk

  n8n:
    image: n8nio/n8n
    container_name: n8n
    restart: unless-stopped
    environment:
      - DB_TYPE=postgresdb
      - DB_POSTGRESDB_HOST=postgres
      - DB_POSTGRESDB_PORT=5432
      - 'DB_POSTGRESDB_DATABASE=${POSTGRES_DB}'
      - 'DB_POSTGRESDB_USER=${POSTGRES_USER}'
      - 'DB_POSTGRESDB_PASSWORD=${POSTGRES_PASSWORD}'
      - 'N8N_BASIC_AUTH_ACTIVE=${N8N_BASIC_AUTH_ACTIVE}'
      - 'N8N_BASIC_AUTH_USER=${N8N_BASIC_AUTH_USER}'
      - 'N8N_BASIC_AUTH_PASSWORD=${N8N_BASIC_AUTH_PASSWORD}'
    ports:
      - '8878:5678'
    links:
      - postgres
    volumes:
      - './n8n/.n8n:/root/.n8n'
      - './n8n/workflows:/opt/workflows'
    networks:
      - elk
      - default
    command: n8n start

  thehive:
    image: 'thehiveproject/thehive4:latest'
    container_name: 'thehive4'
    ports:
      - '8890:9000'
    volumes:
      - './thehive/application.conf:/etc/thehive/application.conf'
      - './thehive/db:/opt/thp/thehive/db'
      - './thehive/data:/opt/thp/thehive/data'
      - './thehive/index:/opt/thp/thehive/index'
    networks:
      - elk
    command: '--no-config --no-config-secret'

  # elastic-agent:
  #   container_name: elastic-agent
  #   hostname: elastic-agent
  #   build:
  #     context: elastic-agent/
  #     args:
  #       ELK_VERSION: $ELK_VERSION
  #   restart: unless-stopped
  #   environment:
  #     FLEET_CA: '/ca.crt'
  #     ELK_VERSION: ${ELK_VERSION}
  #     KIBANA_HOST: "https://kibana:5601"
  #     ELASTICSEARCH_USERNAME: ${ELASTIC_USERNAME}
  #     ELASTICSEARCH_PASSWORD: ${ELASTIC_PASSWORD}
  #     ELASTICSEARCH_HOSTS: "https://elasticsearch:9200"
  #     FLEET_ENROLL_INSECURE: 1
  #     ENROLL_FORCE: 1
  #     PREFLIGHT_CHECK: 1
  #   secrets:
  #     - source: ca.crt
  #       target: /ca.crt
  #   ports:
  #     - "22:22" 
  #   networks:
  #     - elk
  #   depends_on: 
  #     - logstash

networks:
  elk:
    driver: bridge
  default: null

volumes:
  setup:
  elasticsearch:

Any ideas? :frowning:

This topic was automatically closed 28 days after the last reply. New replies are no longer allowed.