How to configure elastic stack, i want to migrate from Prometheus to elastic

hi everyone,
i am using elastic/eck-stack helm chart, my helm values are posted below,

i want to migrate away from my current prometheus monitoring solution but since my environment is using servicemonitors and pod monitors heavily. i also have kafka, rabbitmq, redis in my cluster and apm is enabled on my apps

  1. i want to see kafka, rabbitmq, redis connections in APM for my event based services.
  2. how can i get servicemonitor and pod monitor metrics into elastic dynamically
  3. what is the best way to enrich my dashboards in kibana to have the same observability of what i have in grafana in kibana.
  4. any documentation on how to install and configure integrations using configuration as code ? i was only able to successfully INSTALL kubernetes, system, elastic_agent, fleet_server integrations but NOT CONFIGURE them using code .. any help in this part please .. any detailed documentation ?
  5. what to use that will fit my requirements , elastic_agent or metircbeat ?
# helm upgrade --install elastic-operator elastic/eck-operator -n elastic --create-namespace -f ./tolerations.yaml
# helm upgrade --install es elastic/eck-stack -n elastic --create-namespace -f ./eck.yaml

eck-elasticsearch:
  enabled: true
  version: 8.17.0
  fullnameOverride: elasticsearch
  annotations:
    eck.k8s.elastic.co/downward-node-labels: "topology.kubernetes.io/zone" 
  http:
    service:
      spec:
        selector:
          elasticsearch.k8s.elastic.co/cluster-name: elasticsearch
          elasticsearch.k8s.elastic.co/node-data: 'true'
  auth:
    roles:
      - secretName: logstash-elasticsearch-user-role
  nodeSets:
    - name: data
      count: 3
      podTemplate:
        spec:
          tolerations:
            - key: "CONSUMER"
              operator: "Equal"
              value: "devops"
              effect: "NoSchedule"
          nodeSelector:
            CONSUMER: devops
          containers:
            - name: elasticsearch
              env:
                - name: zone
                  valueFrom:
                    fieldRef:
                      fieldPath: metadata.annotations['topology.kubernetes.io/zone']
              resources:
                requests:
                  cpu: 4
                  memory: 12Gi
                limits:
                  cpu: 4
                  memory: 12Gi
          affinity:
            podAntiAffinity:
              preferredDuringSchedulingIgnoredDuringExecution:
                - weight: 100
                  podAffinityTerm:
                    labelSelector:
                      matchLabels:
                        app.kubernetes.io/instance: prometheus
                    topologyKey: kubernetes.io/hostname
          topologySpreadConstraints:
            - labelSelector:
                matchLabels:
                  elasticsearch.k8s.elastic.co/cluster-name: elasticsearch
              maxSkew: 1
              topologyKey: kubernetes.io/hostname
              whenUnsatisfiable: DoNotSchedule
            - labelSelector:
                matchLabels:
                  elasticsearch.k8s.elastic.co/cluster-name: elasticsearch
              maxSkew: 1
              topologyKey: topology.kubernetes.io/zone
              whenUnsatisfiable: ScheduleAnyway
          initContainers:
            - name: sysctl
              securityContext:
                privileged: true
                runAsUser: 0
              command: [ 'sh', '-c', 'sysctl -w vm.max_map_count=262144' ]
      config:
        node.roles: [  "master", "data", "ingest", "ml", "transform", "remote_cluster_client" ]
        cluster.routing.allocation.awareness.attributes: k8s_node_name,zone
        node.attr.zone: $ZONE
        node.store.allow_mmap: false
      volumeClaimTemplates:
        - metadata:
            name: elasticsearch-data
          spec:
            accessModes:
              - ReadWriteOnce
            resources:
              requests:
                storage: 5Gi
            storageClassName: efs-sc
  ingress:
    enabled: false

eck-kibana:
  enabled: true
  fullnameOverride: kibana
  version: 8.17.0
  spec:
    count: 3
    elasticsearchRef:
      name: elasticsearch
    podTemplate:
      spec:
        containers:
          - name: kibana
            env:
              - name: NODE_OPTIONS
                value: "--max-old-space-size=2048"
            resources:
              requests:
                memory: 1Gi
                cpu: 0.5
              limits:
                memory: 2.5Gi
                cpu: 1
        affinity:
          podAntiAffinity:
            preferredDuringSchedulingIgnoredDuringExecution:
              - weight: 100
                podAffinityTerm:
                  labelSelector:
                    matchLabels:
                      kibana.k8s.elastic.co/name: kibana
                  topologyKey: kubernetes.io/hostname
        nodeSelector:
          CONSUMER: devops
        tolerations:
          - key: "CONSUMER"
            operator: "Equal"
            value: "devops"
            effect: "NoSchedule"
    config:
      kibana.autocompleteTimeout: 10000
      kibana.autocompleteTerminateAfter: 1000000
      xpack.fleet.agents.enabled: true
      xpack.fleet.enableDeleteUnenrolledAgents: true
      xpack.fleet.agents.elasticsearch.hosts: [ "https://elasticsearch-es-http.elastic.svc:9200" ]
      xpack.fleet.agents.fleet_server.hosts: [ "https://fleet-server-agent-http.elastic.svc:8220" ]
      xpack.fleet.packages:
        - name: apm
          version: latest
        - name: kubernetes
          version: latest
        - name: system
          version: latest
        - name: elastic_agent
          version: latest
        - name: fleet_server
          version: latest
        - name: kafka
          version: latest
        - name: prometheus
          version: latest
      xpack.fleet.agentPolicies:
        - name: Fleet Server on ECK policy Test1
          id: eck-fleet-test-1
          namespace: default
          is_managed: true
          monitoring_enabled:
            - logs
            - metrics
          unenroll_timeout: 900
          inactivity_timeout: 900
          package_policies:
            - name: fleet_server_1
              id: fleet_server-1
              package:
                name: fleet_server
        - name: Elastic Agent on ECK policy Test1
          id: elastic-agent-test-1
          namespace: default
          is_managed: true
          monitoring_enabled:
            - logs
            - metrics
          unenroll_timeout: 900
          inactivity_timeout: 900
          package_policies:
            - package:
                name: system
              name: system-1
            - package:
                name: kafka
              name: kafka-1
            - package:
                name: kubernetes
              name: kubernetes-test-1
            - package:
                name: prometheus
              name: prometheus-1


eck-agent:
  enabled: true
  fullnameOverride: "elastic-agent"
  elasticsearchRefs: []
  version: 8.17.0
  spec:
    kibanaRef:
      name: kibana
    elasticsearchRefs: []
    fleetServerRef:
      name: fleet-server
    mode: fleet
    fleetServerEnabled:
    policyID: elastic-agent-test-1
    daemonSet:
      podTemplate:
        spec:
          securityContext:
            runAsUser: 0
          containers:
            - name: agent
              env:
                - name: FLEET_INSECURE
                  value: "true"
              volumeMounts:
                - name: varlogcontainers
                  mountPath: /var/log/containers
                - name: varlogpods
                  mountPath: /var/log/pods
                - name: varlibdockercontainers
                  mountPath: /var/lib/docker/containers
          tolerations:
            - operator: "Exists"
          serviceAccountName: elastic-agent
          hostNetwork: true
          dnsPolicy: ClusterFirstWithHostNet
          automountServiceAccountToken: true
          volumes:
            - name: varlogcontainers
              hostPath:
                path: /var/log/containers
            - name: varlogpods
              hostPath:
                path: /var/log/pods
            - name: varlibdockercontainers
              hostPath:
                path: /var/lib/docker/containers

eck-fleet-server:
  enabled: true
  fullnameOverride: "fleet-server"
  version: 8.17.0
  kibanaRef:
    name: kibana
  elasticsearchRefs:
    - name: elasticsearch
  policyID: eck-fleet-test-1
  spec:
    deployment:
      replicas: 1
      podTemplate:
        spec:
          securityContext:
            runAsUser: 0
          nodeSelector:
            CONSUMER: devops
          tolerations:
            - key: "CONSUMER"
              operator: "Equal"
              value: "devops"
              effect: "NoSchedule"
          serviceAccountName: fleet-server
          automountServiceAccountToken: true

eck-logstash:
  enabled: true
  count: 3
  version: 8.17.0
  fullnameOverride: "logstash"
  podTemplate:
    spec:
      nodeSelector:
        CONSUMER: devops
      tolerations:
        - key: "CONSUMER"
          operator: "Equal"
          value: "devops"
          effect: "NoSchedule"
  elasticsearchRefs:
    - clusterName: eck
      name: elasticsearch
  volumeClaimTemplates:
    - metadata:
        name: logstash-data
      spec:
        accessModes:
          - ReadWriteOnce
        resources:
          requests:
            storage: 1Gi
        storageClassName: efs-sc
  pipelines:
    - pipeline.id: main
      pipeline.workers: 2
      config.string: |
        input {
          beats {
            port => 5044
          }
        }

        filter {
          json {
            source => "message"
            target => "parsed_json"
            skip_on_invalid_json => true
          }

          if [kubernetes] {
            mutate {
              add_field => {
                "k8s_namespace" => "%{[kubernetes][namespace]}"
                "k8s_pod_name"  => "%{[kubernetes][pod][name]}"
              }
            }

            mutate {
              gsub => [
                "k8s_namespace", "-", "_"
              ]
            }
          }

          mutate {
            remove_field => ["host", "input", "agent", "ecs"]
          }

          mutate {
            add_field => {
              "beats_source_type" => "logs"
            }
          }
        }

        output {
          elasticsearch {
            action => "create"
            doc_as_upsert => true
            user => "${ECK_ES_USER}"
            hosts => [ "${ECK_ES_HOSTS}" ]
            password => "${ECK_ES_PASSWORD}"
            index => "%{beats_source_type}-%{k8s_namespace}"
            cacert => "${ECK_ES_SSL_CERTIFICATE_AUTHORITY}"
          }

          stdout {
            codec => rubydebug
          }
        }
  services:
    - name: filebeat
      service:
        spec:
          type: ClusterIP
          ports:
            - port: 5044
              name: "filebeat"
              protocol: TCP
              targetPort: 5044

eck-beats:
  enabled: true
  name: metricbeat
  type: metricbeat
  version: 8.17.0
  fullnameOverride: metricbeat
  elasticsearchRef:
    name: elasticsearch
  kibanaRef:
    name: kibana
  config:
    metricbeat:
      autodiscover:
        providers:
          - hints:
              default_config: {}
              enabled: "true"
            node: ${NODE_NAME}
            type: kubernetes
      modules:
        - module: prometheus
          metricsets: ["remote_write"]
          host: "0.0.0.0"
          port: "9201"
          metrics_count: true
        - module: kubernetes
          hosts: [ "prometheus-kube-state-metrics.monitoring.svc:8080" ]
          period: 10s
          add_metadata: true
          metricsets:
            - state_namespace
            - state_node
            - state_deployment
            - state_daemonset
            - state_replicaset
            - state_pod
            - state_container
            - state_job
            - state_cronjob
            - state_resourcequota
            - state_statefulset
            - state_service
            - state_persistentvolume
            - state_persistentvolumeclaim
            - state_storageclass
        - module: kubernetes
          metricsets:
            - proxy
          period: 10s
          host: ${NODE_NAME}
          hosts: ["localhost:10249"]
        - module: kubernetes
          metricsets:
            - apiserver
          hosts: [ "https://${KUBERNETES_SERVICE_HOST}:${KUBERNETES_SERVICE_PORT}" ]
          bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
          ssl.certificate_authorities:
            - /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
          period: 30s
        - module: kubernetes
          metricsets:
            - event
        - module: system
          period: 10s
          metricsets:
            - cpu
            - load
            - memory
            - network
            - process_summary
            # - process # if you enable this it will crash.
          process:
            include_top_n:
              by_cpu: 5
              by_memory: 5
          processes:
            - .*
        - module: system
          period: 1m
          metricsets:
            - filesystem
            - fsstat
          processors:
            - drop_event:
                when:
                  regexp:
                    system:
                      filesystem:
                        mount_point: ^/(sys|cgroup|proc|dev|etc|host|lib)($|/)
        - module: kubernetes
          period: 10s
          node: ${NODE_NAME}
          hosts:
            - https://${NODE_NAME}:10250
          bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
          ssl:
            verification_mode: none
          metricsets:
            - node
            - system
            - pod
            - container
            - volume
    processors:
      - add_cloud_metadata: {}
      - add_host_metadata: {}
  daemonSet:
    podTemplate:
      spec:
        tolerations:
          - operator: "Exists"
        serviceAccountName: metricbeat
        automountServiceAccountToken: true
        containers:
          - args:
              - -e
              - -c
              - /etc/beat.yml
              - --system.hostfs=/hostfs
            name: metricbeat
            resources:
              requests:
                cpu: 150m
                memory: 512Mi
              limits:
                cpu: 200m
                memory: 768Mi
            volumeMounts:
              - mountPath: /hostfs/sys/fs/cgroup
                name: cgroup
              - mountPath: /var/run/docker.sock
                name: dockersock
              - mountPath: /hostfs/proc
                name: proc
            env:
              - name: NODE_NAME
                valueFrom:
                  fieldRef:
                    fieldPath: spec.nodeName
        dnsPolicy: ClusterFirstWithHostNet
        hostNetwork: true
        securityContext:
          runAsUser: 0
        terminationGracePeriodSeconds: 30
        volumes:
          - hostPath:
              path: /sys/fs/cgroup
            name: cgroup
          - hostPath:
              path: /var/run/docker.sock
            name: dockersock
          - hostPath:
              path: /proc
            name: proc
  clusterRole:
    name: metricbeat
    rules:
      - apiGroups:
          - ""
        resources:
          - nodes
          - persistentvolumeclaims
          - namespaces
          - events
          - pods
        verbs:
          - get
          - list
          - watch
      - apiGroups:
          - "extensions"
        resources:
          - replicasets
        verbs:
          - get
          - list
          - watch
      - apiGroups:
          - apps
        resources:
          - statefulsets
          - deployments
          - replicasets
        verbs:
          - get
          - list
          - watch
      - apiGroups:
          - ""
        resources:
          - nodes/stats
        verbs:
          - get
      - nonResourceURLs:
          - /metrics
        verbs:
          - get

  serviceAccount:
    name: metricbeat

  clusterRoleBinding:
    name: metricbeat
    subjects:
      - kind: ServiceAccount
        name: metricbeat
    roleRef:
      kind: ClusterRole
      name: metricbeat
      apiGroup: rbac.authorization.k8s.io

eck-apm-server:
  enabled: true
  version: 8.17.0
  count: 1
  elasticsearchRef:
    name: elasticsearch
  kibanaRef:
    name: kibana
  http:
    service:
      spec:
        ports:
          - name: http
            port: 8200
            targetPort: 8200

and this is the logstash-role secret resource yaml

apiVersion: v1
kind: Secret
metadata:
  name: logstash-elasticsearch-user-role
  namespace: elastic
stringData:
  roles.yml: |-
    eck_logstash_user_role:
      cluster: [ "monitor", "manage_ilm", "read_ilm", "manage_logstash_pipelines", "manage_index_templates", "cluster:admin/ingest/pipeline/get"]
      indices:
      - names: [ "logstash", "logstash-*", "metricbeat-*", "metricbeat", "ecs-logstash", "ecs-logstash-*", "logs-*", "logs", "metrics-*", "metrics", "synthetics-*", "traces-*"]
        privileges: [ "manage", "create", "write", "index", "create_index", "read", "view_index_metadata", "auto_configure", "create_index", "all" ]

Check out the documentation here: Run standalone Elastic Agent on ECK | Elastic Cloud on Kubernetes [2.16] | Elastic

but why should i consider standalone since i am deploying elastic-agent using the same elastic-stack chart and fleet ?

Sorry, wrong link. This is for fleet managed setup: Run Fleet-managed Elastic Agent on ECK | Elastic Cloud on Kubernetes [2.16] | Elastic

i have read those documents .. but i still can't find how to configure the integrations .. it only describe the initialization but not configuration ..

There is documentation about the integration fields here: Elastic integrations | Elastic integrations | Elastic

The integration config should be put under package_policies for Fleet-managed agents, see an example here: Create an agent policy without using the UI | Fleet and Elastic Agent Guide [master] | Elastic

1 Like

after the inputs from the integrations config page into the kibana section like this . as it is stated in the integration copy-paste . but the syntax in the link you have provided doe not match the inputs.

      xpack.fleet.packages:
        - name: apm
          version: latest
        - name: kubernetes
          version: latest
        - name: system
          version: latest
        - name: elastic_agent
          version: latest
        - name: fleet_server
          version: latest
        - name: kafka
          version: latest
        - name: prometheus
          version: latest
        - name: rabbitmq
          version: latest
        - name: redis
          version: latest
      xpack.fleet.agentPolicies:
        - name: Fleet Server on ECK policy Test1
          id: eck-fleet-test-1
          namespace: default
          is_managed: true
          monitoring_enabled:
            - logs
            - metrics
          unenroll_timeout: 900
          inactivity_timeout: 900
          package_policies:
            - name: fleet_server_1
              id: fleet_server-1
              package:
                name: fleet_server
        - name: Elastic Agent on ECK policy Test1
          id: elastic-agent-test-1
          namespace: default
          is_managed: true
          monitoring_enabled:
            - logs
            - metrics
          unenroll_timeout: 60
          inactivity_timeout: 60
          package_policies:
            - package:
                name: system
              name: system-1
            - package:
                name: kafka
              name: kafka-1
            - package:
                name: redis
              name: redis-1
              id: preconfigured-redis-1
              inputs:
                # Collect Redis application logs: Collecting application logs from Redis instances
                - id: redis-logfile
                  type: logfile
                  streams:
                    # Redis application logs: Collect Redis application logs
                    - id: logfile-redis.log
                      data_stream:
                        dataset: redis.log
                        type: logs
                      paths:
                        - /var/log/redis/redis-server.log*
                      tags:
                        - preserve_original_event
                        - redis-log
                      exclude_files:
                        - .gz$
                      exclude_lines:
                        - '^\s+[\-`(''.|_]'
                # Collect Redis slow logs: Collecting slow logs from Redis instances
                - id: redis-redis
                  type: redis
                  streams:
                    # Redis slow logs: Collect Redis slow logs
                    - id: redis-redis.slowlog
                      data_stream:
                        dataset: redis.slowlog
                        type: logs
                      hosts:
                        - 'redis-master.redis.svc:6379'
                      # password: <PASSWORD> # Password
                # Collect Redis metrics: Collecting info, key and keyspace metrics from Redis instances
                - id: redis-redis/metrics
                  type: redis/metrics
                  streams:
                    # Redis info metrics: Collect Redis info metrics
                    - id: redis/metrics-redis.info
                      data_stream:
                        dataset: redis.info
                        type: metrics
                      metricsets:
                        - info
                      hosts:
                        - 'redis-master.redis.svc:6379'
                      idle_timeout: 20s
                      maxconn: 10
                      network: tcp
                      period: 10s
                      # username: <USERNAME> # Username
                      # password: <PASSWORD> # Password
                    # Redis key metrics: Collect Redis key metrics
                    - id: redis/metrics-redis.key
                      data_stream:
                        dataset: redis.key
                        type: metrics
                      metricsets:
                        - key
                      hosts:
                        - 'redis-master.redis.svc:6379'
                      idle_timeout: 20s
                      key.patterns:
                        - limit: 20
                          pattern: '*'
                      maxconn: 10
                      network: tcp
                      period: 10s
                      # username: <USERNAME> # Username
                      # password: <PASSWORD> # Password
                    # Redis keyspace metrics: Collect Redis keyspace metrics
                    - id: redis/metrics-redis.keyspace
                      data_stream:
                        dataset: redis.keyspace
                        type: metrics
                      metricsets:
                        - keyspace
                      hosts:
                        - 'redis-master.redis.svc:6379'
                      idle_timeout: 20s
                      maxconn: 10
                      network: tcp
                      period: 10s
                      # username: <USERNAME> # Username
                      # password: <PASSWORD> # Password

i am getting the following error

    at /usr/share/kibana/node_modules/rxjs/dist/cjs/internal/operators/distinctUntilChanged.js:18:28
    at OperatorSubscriber._this._next (/usr/share/kibana/node_modules/rxjs/dist/cjs/internal/operators/OperatorSubscriber.js:33:21)
    at OperatorSubscriber.Subscriber.next (/usr/share/kibana/node_modules/rxjs/dist/cjs/internal/Subscriber.js:51:18)
    at /usr/share/kibana/node_modules/rxjs/dist/cjs/internal/operators/map.js:10:24
    at OperatorSubscriber._this._next (/usr/share/kibana/node_modules/rxjs/dist/cjs/internal/operators/OperatorSubscriber.js:33:21)
    at OperatorSubscriber.Subscriber.next (/usr/share/kibana/node_modules/rxjs/dist/cjs/internal/Subscriber.js:51:18)
    at ReplaySubject._subscribe (/usr/share/kibana/node_modules/rxjs/dist/cjs/internal/ReplaySubject.js:54:24)
    at ReplaySubject.Observable._trySubscribe (/usr/share/kibana/node_modules/rxjs/dist/cjs/internal/Observable.js:41:25)
    at ReplaySubject.Subject._trySubscribe (/usr/share/kibana/node_modules/rxjs/dist/cjs/internal/Subject.js:123:47)
    at /usr/share/kibana/node_modules/rxjs/dist/cjs/internal/Observable.js:35:31
    at Object.errorContext (/usr/share/kibana/node_modules/rxjs/dist/cjs/internal/util/errorContext.js:22:9)
    at ReplaySubject.Observable.subscribe (/usr/share/kibana/node_modules/rxjs/dist/cjs/internal/Observable.js:26:24)
    at /usr/share/kibana/node_modules/rxjs/dist/cjs/internal/operators/share.js:65:18
    at OperatorSubscriber.<anonymous> (/usr/share/kibana/node_modules/rxjs/dist/cjs/internal/util/lift.js:14:28)
    at /usr/share/kibana/node_modules/rxjs/dist/cjs/internal/Observable.js:30:30
    at Object.errorContext (/usr/share/kibana/node_modules/rxjs/dist/cjs/internal/util/errorContext.js:22:9)
    at Observable.subscribe (/usr/share/kibana/node_modules/rxjs/dist/cjs/internal/Observable.js:26:24)
    at /usr/share/kibana/node_modules/rxjs/dist/cjs/internal/operators/map.js:9:16
    at OperatorSubscriber.<anonymous> (/usr/share/kibana/node_modules/rxjs/dist/cjs/internal/util/lift.js:14:28)
    at /usr/share/kibana/node_modules/rxjs/dist/cjs/internal/Observable.js:30:30
    at Object.errorContext (/usr/share/kibana/node_modules/rxjs/dist/cjs/internal/util/errorContext.js:22:9)
    at Observable.subscribe (/usr/share/kibana/node_modules/rxjs/dist/cjs/internal/Observable.js:26:24)
    at /usr/share/kibana/node_modules/rxjs/dist/cjs/internal/operators/distinctUntilChanged.js:13:16
    at OperatorSubscriber.<anonymous> (/usr/share/kibana/node_modules/rxjs/dist/cjs/internal/util/lift.js:14:28)
    at /usr/share/kibana/node_modules/rxjs/dist/cjs/internal/Observable.js:30:30
    at Object.errorContext (/usr/share/kibana/node_modules/rxjs/dist/cjs/internal/util/errorContext.js:22:9)
    at Observable.subscribe (/usr/share/kibana/node_modules/rxjs/dist/cjs/internal/Observable.js:26:24)
    at /usr/share/kibana/node_modules/rxjs/dist/cjs/internal/operators/map.js:9:16
    at OperatorSubscriber.<anonymous> (/usr/share/kibana/node_modules/rxjs/dist/cjs/internal/util/lift.js:14:28)
    at /usr/share/kibana/node_modules/rxjs/dist/cjs/internal/Observable.js:30:30
    at Object.errorContext (/usr/share/kibana/node_modules/rxjs/dist/cjs/internal/util/errorContext.js:22:9)
    at Observable.subscribe (/usr/share/kibana/node_modules/rxjs/dist/cjs/internal/Observable.js:26:24)
    at /usr/share/kibana/node_modules/rxjs/dist/cjs/internal/operators/take.js:13:20
    at OperatorSubscriber.<anonymous> (/usr/share/kibana/node_modules/rxjs/dist/cjs/internal/util/lift.js:14:28)
    at /usr/share/kibana/node_modules/rxjs/dist/cjs/internal/Observable.js:30:30
    at Object.errorContext (/usr/share/kibana/node_modules/rxjs/dist/cjs/internal/util/errorContext.js:22:9)
    at Observable.subscribe (/usr/share/kibana/node_modules/rxjs/dist/cjs/internal/Observable.js:26:24)
    at /usr/share/kibana/node_modules/rxjs/dist/cjs/internal/operators/throwIfEmpty.js:11:16
    at SafeSubscriber.<anonymous> (/usr/share/kibana/node_modules/rxjs/dist/cjs/internal/util/lift.js:14:28)
    at /usr/share/kibana/node_modules/rxjs/dist/cjs/internal/Observable.js:30:30
    at Object.errorContext (/usr/share/kibana/node_modules/rxjs/dist/cjs/internal/util/errorContext.js:22:9)
    at Observable.subscribe (/usr/share/kibana/node_modules/rxjs/dist/cjs/internal/Observable.js:26:24)
    at /usr/share/kibana/node_modules/rxjs/dist/cjs/internal/Observable.js:86:19
    at new Promise (<anonymous>)

the same for both kafka and rabbitmq when i add configuration