K8s dashboard giving bad gateway

Hi everyone
I have deployed kibana with xpack using helm chart on on premises k8s cluster. I have used traefik as load balancer. Without xpack I am able to get kibana ui running on private hostname but with xpack I am not able to run kibana ui on private hostname meanwhile it is working with local host. With host name it is giving bad gateway. Any inputs on this how should we fix this ?

below is my kibana values.yaml file

---
elasticsearchHosts: "http://elasticsearch-master:9200"
#elasticsearchHosts: "https://elasticsearch.pte.sgre.one"
replicas: 1

# Extra environment variables to append to this nodeGroup
# This will be appended to the current 'env:' key. You can use any of the kubernetes env
# syntax here
extraEnvs:
  - name: "NODE_OPTIONS"
    value: "--max-old-space-size=1800"
  - name: "ELASTICSEARCH_USERNAME"
    valueFrom:
      secretKeyRef:
        name: security-master-credentials
        key: username
  - name: "ELASTICSEARCH_PASSWORD"
    valueFrom:
      secretKeyRef:
        name: security-master-credentials
        key: password  
#  - name: MY_ENVIRONMENT_VAR
#    value: the_value_goes_here

# Allows you to load environment variables from kubernetes secret or config map
envFrom: []
# - secretRef:
#     name: env-secret
# - configMapRef:
#     name: config-map

# A list of secrets and their paths to mount inside the pod
# This is useful for mounting certificates for security and for mounting
# the X-Pack license
secretMounts: 
  - name: elastic-certificate-pem
    secretName: elastic-certificate-pem
    path: /usr/share/kibana/config/certs
#  - name: kibana-keystore
#    secretName: kibana-keystore
#    path: /usr/share/kibana/data/kibana.keystore
#    subPath: kibana.keystore # optional

hostAliases: []
#- ip: "127.0.0.1"
#  hostnames:
#  - "foo.local"
#  - "bar.local"

image: "artifactory.pte.sgre.one:443/docker-local/kibana"
imageTag: "7.16.3"
imagePullPolicy: "IfNotPresent"

# additionals labels
labels: {}

podAnnotations:
  {}
  # iam.amazonaws.com/role: es-cluster

resources:
  requests:
    cpu: "2000m"
    memory: "3Gi"
  limits:
    cpu: "2000m"
    memory: "3Gi"

protocol: https

serverHost: "0.0.0.0"

healthCheckPath: "/app/kibana"

# Allows you to add any config files in /usr/share/kibana/config/
# such as kibana.yml
kibanaConfig: 
  kibana.yml: |
    server.host: "0.0.0.0"
    server.ssl:
      enabled: true
      key: /usr/share/kibana/config/certs/elastic-certificate.pem
      certificate: /usr/share/kibana/config/certs/elastic-certificate.pem
    xpack.security.encryptionKey: d5c503097193a7a5205fde00e19eef79
    elasticsearch.ssl:
      certificateAuthorities: /usr/share/kibana/config/certs/elastic-certificate.pem
      verificationMode: certificate
#   kibana.yml: |
#     key:
#       nestedkey: value

# If Pod Security Policy in use it may be required to specify security context as well as service account

podSecurityContext:
  fsGroup: 1000

securityContext:
  capabilities:
    drop:
      - ALL
  # readOnlyRootFilesystem: true
  runAsNonRoot: true
  runAsUser: 1000

serviceAccount: ""

# Whether or not to automount the service account token in the pod. Normally, Kibana does not need this
automountToken: true

# This is the PriorityClass settings as defined in
# https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass
priorityClassName: ""

httpPort: 5601

extraVolumes:
  []
  # - name: extras
  #   emptyDir: {}

extraVolumeMounts:
  []
  # - name: extras
  #   mountPath: /usr/share/extras
  #   readOnly: true
  #
extraContainers: ""
# - name: dummy-init
#   image: busybox
#   command: ['echo', 'hey']

extraInitContainers: ""
# - name: dummy-init
#   image: busybox
#   command: ['echo', 'hey']

updateStrategy:
  type: "Recreate"

service:
  type: ClusterIP
  loadBalancerIP: ""
  port: 5601
  nodePort: ""
  labels: {}
  annotations:
    {}
    # cloud.google.com/load-balancer-type: "Internal"
    # service.beta.kubernetes.io/aws-load-balancer-internal: 0.0.0.0/0
    # service.beta.kubernetes.io/azure-load-balancer-internal: "true"
    # service.beta.kubernetes.io/openstack-internal-load-balancer: "true"
    # service.beta.kubernetes.io/cce-load-balancer-internal-vpc: "true"
  loadBalancerSourceRanges:
    []
    # 0.0.0.0/0
  httpPortName: http

ingress:
  enabled: true
  hosts:
    - host: kibana.pte.sgre.one
      paths:
        - path: /
      backend:
          serviceName: kibana-kibana
          servicePort: 5601  
  #tls: []
  #  - secretName: chart-example-tls
      hosts:
        - chart-example.local

readinessProbe:
  failureThreshold: 3
  initialDelaySeconds: 10
  periodSeconds: 10
  successThreshold: 3
  timeoutSeconds: 5

imagePullSecrets: []
nodeSelector: {}
tolerations: []
affinity: {}

nameOverride: ""
fullnameOverride: ""

lifecycle:
  {}
  # preStop:
  #   exec:
  #     command: ["/bin/sh", "-c", "echo Hello from the postStart handler > /usr/share/message"]
  # postStart:
  #   exec:
  #     command: ["/bin/sh", "-c", "echo Hello from the postStart handler > /usr/share/message"]

# Deprecated - use only with versions < 6.6
elasticsearchURL: "" # "http://elasticsearch-master:9200"

this is my traefik config file

image:
  name: artifactory.pte.sgre.one:443/docker-virtual/traefik

# Create an IngressRoute for the dashboard
ingressRoute:
  dashboard:
    enabled: false

service:
  annotations:
    metallb.universe.tf/address-pool: ingress-pool
    traefik.ingress.kubernetes.io/router.entrypoints: web
    traefik.ingress.kubernetes.io/router.tls: "true"

ssl:
  enforce: true
  enabled: true
  performRedirect: true

ports:
  web:
    redirectTo: websecure
  websecure:
    tls:
      enabled: true
additionalArguments:
  - '--providers.file.filename=/etc/traefik/dynamic/traefik-cert.yaml'
volumes: 
 - name: kibana-secret
   mountPath: "/etc/certs/kibana"
   type: secret
 - name: elasticsearch-secret
   mountPath: "/etc/certs/elasticsearch"
   type: secret
 - name: traefik-config
   mountPath: "/etc/traefik/dynamic"
   type: configMap
    

can anybody please help me with this

any inputs on this

This topic was automatically closed 28 days after the last reply. New replies are no longer allowed.