Elastic-agent to logstash SSL/TLS errors

I am having issues with elastic agent sending events to logstash.

I have really tried to dumb this down to the basics with a new deployment (fresh elastic database) and only the minimum of settings.

I am running on on premise implementation using docker, using compose to start up containers.

My docker compose file is:

type or version: '3.2'
services:
  elasticsearch:
    image: docker.elastic.co/elasticsearch/elasticsearch:8.3.3
    restart: always
    container_name: elasticsearch
    hostname: elasticsearch
    environment:
      - node.name=elasticsearch
      - discovery.type=single-node
      - xpack.security.enabled=true
      - xpack.security.http.ssl.enabled=true
      - xpack.security.http.ssl.key=elasticsearch.key
      - xpack.security.http.ssl.certificate=elasticsearch.crt
      - xpack.security.http.ssl.certificate_authorities=ca.crt
      - xpack.security.transport.ssl.enabled=true
      - xpack.security.transport.ssl.key=elasticsearch.key
      - xpack.security.transport.ssl.certificate=elasticsearch.crt
      - xpack.security.transport.ssl.certificate_authorities=ca.crt
      - logger.level=WARN
      - TZ=Pacific/Auckland
      - ELASTIC_PASSWORD=xxx
    volumes:
      - ./certs/elasticsearch/elasticsearch.key:/usr/share/elasticsearch/config/elasticsearch.key:ro
      - ./certs/elasticsearch/elasticsearch.crt:/usr/share/elasticsearch/config/elasticsearch.crt:ro
      - ./certs/ca/ca.crt:/usr/share/elasticsearch/config/ca.crt:ro
      - es_data:/usr/share/elasticsearch/data
    ports:
      - "9200:9200"
    networks:
      - esnet
    healthcheck:
      test:
        [
          "CMD-SHELL",
          "curl -s --cacert config/ca.crt https://elasticsearch:9200 | grep -q 'missing authentication credentials'",
        ]
      interval: 10s
      timeout: 10s
      retries: 120

  logstash:
#    image: docker.elastic.co/logstash/logstash:7.17.1
    build: ./logstash
    restart: always
    depends_on:
      elasticsearch:
        condition: service_healthy
    container_name: logstash
    hostname: logstash
    environment:
      - TZ=Pacific/Auckland
      - xpack.monitoring.enabled=true
      - xpack.monitoring.elasticsearch.username="logstash_system"
      - xpack.monitoring.elasticsearch.password="xxx"
      - xpack.monitoring.elasticsearch.hosts="https://elasticsearch:9200"
      - xpack.security.transport.ssl.enabled=true
      - xpack.monitoring.elasticsearch.ssl.certificate_authority="/usr/share/logstash/config/ca.crt"
      - log.level=warn
    volumes:
      - ./logstash/pipeline:/usr/share/logstash/pipeline:ro
      - ./logstash/config/pipelines.yml:/usr/share/logstash/config/pipelines.yml:ro
      - ./logstash/config/jvm.options:/usr/share/logstash/config/jvm.options:ro
      - ./certs/ca/ca.crt:/usr/share/logstash/config/ca.crt:ro
      - ./certs/logstash/logstash.crt:/usr/share/logstash/config/logstash.crt:ro
      - ./certs/logstash/logstash.pkcs8.key:/usr/share/logstash/config/logstash.pkcs8.key:ro
      - ./logstash/mariadb-java-client-2.7.2.jar:/usr/share/logstash/mariadb-java-client-2.7.2.jar:ro
      - ./logstash/jdbc_tracking:/usr/share/logstash/jdbc_tracking:rw
    ports:
      - "8085:8085/tcp"
      - "5044:5044/tcp"
      - "514:5514/udp"
    networks:
      - esnet

  kibana:
    image: docker.elastic.co/kibana/kibana:8.3.3
    restart: always
    depends_on:
      elasticsearch:
        condition: service_healthy
    container_name: kibana
    hostname: kibana
    environment:
      - SERVER_NAME=kibana
      - SERVER_BASEPATH=/kibana
      - SERVER_REWRITEBASEPATH=true
      - SERVER_PUBLICBASEURL=http://xxx:5601/kibana
      - XPACK_REPORTING_KIBANASERVER_HOSTNAME=kibana
      - XPACK_ENCRYPTEDSAVEDOBJECTS_ENCRYPTIONKEY=xx
      - ELASTICSEARCH_HOSTS=https://elasticsearch:9200
      - ELASTICSEARCH_SERVICEACCOUNTTOKEN=xx
      - ELASTICSEARCH_SSL_CERTIFICATEAUTHORITIES=/usr/share/kibana/config/ca.crt
      - TZ=Pacific/Auckland
    volumes:
      - ./certs/kibana/kibana.key:/usr/share/kibana/config/kibana.key:ro
      - ./certs/kibana/kibana.crt:/usr/share/kibana/config/kibana.crt:ro
      - ./certs/ca/ca.crt:/usr/share/kibana/config/ca.crt:ro
    ports:
      - "5601:5601"
    networks:
      - esnet

  elastic-agent:
    image: docker.elastic.co/beats/elastic-agent:8.3.3
    restart: always
    depends_on:
      elasticsearch:
        condition: service_healthy
    container_name: elastic-agent
    hostname: elastic-agent
    environment:
      - TZ=Pacific/Auckland
      - FLEET_SERVER_ENABLE=true
      - FLEET_URL=https://elastic-agent:8220
      - FLEET_INSECURE=true
      - FLEET_SERVER_ELASTICSEARCH_HOST=https://elasticsearch:9200
      - FLEET_SERVER_CERT=/usr/share/elastic-agent/config/elastic-agent.crt
      - FLEET_SERVER_CERT_KEY=/usr/share/elastic-agent/config/elastic-agent.key
      - FLEET_SERVER_ELASTICSEARCH_CA=/usr/share/elastic-agent/config/ca.crt
      - FLEET_SERVER_SERVICE_TOKEN=xxx
    volumes:
      - ./certs/elastic-agent/elastic-agent.key:/usr/share/elastic-agent/config/elastic-agent.key:ro
      - ./certs/elastic-agent/elastic-agent.crt:/usr/share/elastic-agent/config/elastic-agent.crt:ro
      - ./certs/ca/ca.crt:/usr/share/elastic-agent/config/ca.crt:ro
    ports:
      - "8220:8220"
      - "9001:9001/udp"
      - "9002:9002/udp"
    networks:
      - esnet

volumes:
  es_data:
    driver: local
    driver_opts:
      type: none
      o: bind
      device: "/opt/elk/elk_data"

networks:
  esnet:
    enable_ipv6: false
    driver: bridge
    ipam:
      driver: default
      config:
        - subnet: 192.168.200.0/24

certificates are all generated at the same time using the elasticcert util and all signed using the same CA. I get the following error in the logstash logs (many many times so I am assuming it is for every event).

Fleet server output is configured as logstash with the certificates for the elastic-agent added in the kibana gui fleet config area

logstash         | Caused by: io.netty.handler.ssl.NotSslRecordException: not an SSL/TLS record: 325700000034324300004c80785eecbd07605c49721e8c3fd8a21ce52c67787cb24e36097675555757c341924f27cbd2ea2cdfae934ee7757557f512b72086c60cf76e75a673ce39e79c73ce39e79c73ce39c73700c805e70d30030e710bd22bdd1d97d3efbd0ed5dff755e8f7e2e7efececfc3f3b3b9ffc691f9f7cd6fce0becfe67affc1647f12438c7742be13e5b500fb21ef07da4b397ef1e4f6e4b3eefb5c4de73ad9fff8a4bace27fb937e70e8d5753eb93d99bffdc027fb93d76dda26b7276ff9f1ec607a34d99fe43dc87b3479747b7270f4e0e17cb8f8b4699b1ecdf5e0c88f278f6e4ff40d3f5afcb874e5edc9bde96c7ea4f787db1fdff3c3bda32f9d1df97caf4def4f6e4ffcc13dbfefc77af8fa814df62750c0d183dd4175bd4341cb1dad9dee806b15481add75727bb2689b8af61a83de4134ba43b5c11d89bddc09089ca0580f354c6e4f563efa741c8f27e1d1ed89b7d9f941c01ec4bd30fc746f3a5b0c70c5481edd7ec784ec7f7c72a8d50f17376bd3fbc3acbee9c7afb7e9fd07d399bffee078fa116fc30af8e19b93e1caa526333f7eeba00d7376387d6336d7d9bdc9edc9f4f88dd71737be336bf7fcbebe7e78d0fc6836b47affa1cee6076df795d3bfb97dc1735f6fd3a37ef0c6ebc3c86793fdc949efee9cf66eefedfb87a7cf9a3ef0a3c7cb3c7bfde0bebee167cf9cadfbd02716017b511616b1348e87c78793fdc9bdf9fcc16cffeedd8f7ef4a37b7e329ebd36bdfbe0786a0fdb7c76f71d33313263d3239ff63ed99f7cae1e2e2660e929f5e1c1a1dd319d0f1376b65502bf06691ff23ef1b04d962e7ad27dd9c3bd38feec9309bd736f58a6fd49cdb176d6de3868250344100acea18ab7d6638ddda487822a9e9a5ad384d18a84a642a1c0693746677fb93b4b3d3eb5d0a7ec66c5dd8e6c7afc64252f369b8f4e8fdf3c387ae3753b182eb93b7d30bfbbd27e1fa3c39da387f7ebb02726178e6b7e303f1c96e5226b3f31fa3b4fa600f6860dbebc646d76e7bc45bd7130bff7b00e9073f7d4b0eeae333de6b3767cf0607e0285af9c766df760b6abbbfdd87d578f6c7798d8dd61bbfaf19dd981f9ee00b2bb0f8ea7cd67b383a337761f1c3cf0c38323df9ddfd3f9eec1d11b3e9bcf4e5af5e3e9fd5dddbdfff0707e307f68be3bedbbb3e9c3e3e6b3dbbbf3633d9af5e9f1fdd9eec1fcf6e259f37b8b671dd9f057bbf3e9eedbd387c7bb5ddf9a1e1fcc7df7d31740b1f7e9174d743b769dfb80b5676471f90e78ca481e9d0270891d10a4e6245d0d447b6b58b474b754b4458686e45e38652b64cec8cd2359aada5ae732b93d5940ca3b60d50fdf7cfdf1ca3c7a0cde8fade2d1edc97d9fcd16174d3e74368228af019ed0ddedc4e5c31ffa4e9ffdc10fec7ef84307d3bd239fcfdfde6bf7f4e8c80ff73ec7bb3e3c9cbfefe45fbfe874613efca153a3b8b3a0b13b670bf6e10f9119e7c2828e3979ab9a34d598b479aa11a5e5a2aa56b325d0a06ca91991e48e155b97601fdefdeca35dff58f3850dbd4f1fbe716ffedecfd8f5b7fc68befb519dedf68363b793b53d98ef1ebbb67b6ebb3a1f567a77ae07878345cceff96323dadbfdb6f3dd87b3877a78f8f6ee7dd7a3d9e2e7439dcd77efe9911dfaf1eec1d1b94b76edc0768fa667bf2f6ef7b84f7b93dbc3ec4ef63f3e99f63ef3f9643f86007c7bc18b93fd8f4f1ee87c00b5bb6fe9f1ddc3837af7842dee3ee185bbcf6a07cf7c833b1f994d8ff686613c7a747b329b1fbbde9fec4f66739b3e9c4f1e2de4d2ffbbb3f3a9af5ca35c5a9a5482f8024dea3bf7e1e36d766a807b6d6adef63ec787ff3d7eff99a9edef2e359ccd0ef73e309dbf3a3bfca0b7e9b1bda3edd174beab47bbafbefacaddd75e7975f778f1fbfe2ec694c3e21f468c84c31f736a9425b9b762214bb04a9cb9865e2c3365f3522d6482e815a2472090c02140295439522a1c3b3937ca0572aba5911526c440c2a92589a185802d94945350c35cbd57ebd9fce93f350f218620cda1f2d035b79c3496aa15c90a086ae16ec3e55eaa4473ab9592aaf56e89152c5bcb603d299824f3108558aa26d404b966d56ed98072b1e23d29b153ebe4a084d90231a3632c35a9a1576a0121c714a419b502969368026de446a578758885b882696247ab544f7e038cb9064b95b30a376c122cb76128e2488c4a3d59ca29d59632138137cb1d2392a2662fae55d54c05a0471c2e8d40c5c3a0ea898a89291b8388754dcc9db43b276a6048957b6d5c493b75cd86e0ee5e1a96c280459853cf9d0c42eb595919132803670b88d0004b2d8c98d0aa6aad143252c3385c213151b350bc958a625483648d263561c2eaa23cf4561ac55e2b75a9598aa652924a378a960d355b6c54b4564e2974b20a900b7813cc213bc5de8da5784b581b8598b58a006b2bdeb803c6a2996aad99ddcc347a4ed57304b3d65277f20e118a3b40e815d9bd6a75c1d20dc81d2569aeccc5bd01c45e4b8d2e4ace3230ad460e904cadb6d49a59974c982c79e350c093353335a4849610bd6125502137d24aa563228cc0a16214c19c210bc790a57282e0509013083631b72e4e89219867ead68b14c71229576d80d19da3f7c885bc67f29495bc6502042d1c2227eada0395412c746a46257715b1900b6bf29a35776a44a146b118bcb752b07a2f4da34ba2622ac6da34b74e4c588dbb62054c150d5ac0d00304ea6cc32fcd950b63699830576b15991b0a604272ab8080b11941eedc3a49abc008b5a10222a9d6209d15014db486d4991128e6107335495091501a476a916aab31e5220d7a4c0d6aaf005e0c325542951e93f54c4d9048c1432815a4d60a35f764c33ee75e08c942ea1e39
logstash         |      at io.netty.handler.ssl.SslHandler.decodeJdkCompatible(SslHandler.java:1213) ~[netty-all-4.1.65.Final.jar:4.1.65.Final]
logstash         |      at io.netty.handler.ssl.SslHandler.decode(SslHandler.java:1280) ~[netty-all-4.1.65.Final.jar:4.1.65.Final]
logstash         |      at io.netty.handler.codec.ByteToMessageDecoder.decodeRemovalReentryProtection(ByteToMessageDecoder.java:507) ~[netty-all-4.1.65.Final.jar:4.1.65.Final]
logstash         |      at io.netty.handler.codec.ByteToMessageDecoder.callDecode(ByteToMessageDecoder.java:446) ~[netty-all-4.1.65.Final.jar:4.1.65.Final]
logstash         |      ... 17 more

Pipeline config in logstash is

input {
        elastic_agent {
                port => 5044
                ssl => true
                ssl_certificate_authorities => ["/usr/share/logstash/config/ca.crt"]
                ssl_certificate => "/usr/share/logstash/config/logstash.crt"
                ssl_key => "/usr/share/logstash/config/logstash.pkcs8.key"
                ssl_verify_mode => "peer"
        }
}

output {
        elasticsearch {
                hosts => "https://elasticsearch:9200"
                data_stream => true
            cacert => "/usr/share/logstash/config/ca.crt"
            user => "logstash_internal"
            password => "xxx"
        }
}

The strange thing is that it seems to actually work - the events end up in elasticsearch and I can see the datastreams updating in the fleet server page in kibana. I can see the events in the dashboards.

The message is just a warning but the frequency it occurs it is creating huge log volumes in logstash.

Any help greatly appreciated.

This topic was automatically closed 28 days after the last reply. New replies are no longer allowed.