ECK and oauth-proxy

Hi I have a little problem with my oauth-proxy on openshift. The proxy is up an running an everytime I try to login with the openshift login I get this error in elasticsearch and the login is not working.

{"@timestamp":"2024-04-15T07:04:46.873Z", "log.level": "WARN", "message":"Authentication to realm native1 failed - Password authentication failed for phihele", "ecs.version": "1.2.0","service.name":"ES_ECS","event.dataset":"elasticsearch.server","process.thread.name":"elasticsearch[monitoring-stack-es-master-0][transport_worker][T#1]","log.logger":"org.elasticsearch.xpack.security.authc.RealmsAuthenticator","trace.id":"517fabd79f326a210977533b6cefb6a0","elasticsearch.cluster.uuid":"rZE9BserSySyWz2hsxVJYw","elasticsearch.node.id":"sjl-4UtBTLK7p9KE654nLA","elasticsearch.node.name":"monitoring-stack-es-master-0","elasticsearch.cluster.name":"monitoring-stack"}

here is my elasticsearch helm:

 metadata:
              name: monitoring-stack
            securityContext:
              runAsUser: 1000880000
              runAsNonRoot: true
            podSecurityContext:
              fsGroup: 1000880000
              runAsUser: 1000880000
            sysctlInitContainer:
              enabled: false
            
            nodeSets:
              - name: master
                count: 1
                config:
                  node.roles: ["master"]
                  xpack.ml.enabled: false
                  node.store.allow_mmap: false
                  xpack.monitoring.collection.enabled: true
                  xpack.monitoring.elasticsearch.collection.enabled: false
                podTemplate:
                  spec:
                    containers:
                    - name: elasticsearch
                      resources:
                        limits:
                          memory: 4Gi
                          cpu: 2
                    tolerations:
                    - key: monitoring
                      effect: NoSchedule
                      value: reserved
                    - key: monitoring
                      effect: NoExecute
                      value: reserved
                volumeClaimTemplates:
                - metadata:
                    name: elasticsearch-data
                  spec:
                    accessModes:
                    - ReadWriteOnce
                    resources:
                      requests:
                        storage: 10Gi

              - name: data-nodes
                count: 2
                config:
                  node.roles: ["data", "ingest"]
                  node.store.allow_mmap: false    
                podTemplate:
                  spec:
                    containers:
                    - name: elasticsearch
                      resources:
                        limits:
                          memory: 4Gi
                          cpu: 2
                        requests:
                          cpu: 1
                          memory: 2Gi
                    tolerations:
                    - key: monitoring
                      effect: NoSchedule
                      value: reserved
                    - key: monitoring
                      effect: NoExecute
                      value: reserved
                volumeClaimTemplates:
                - metadata:
                    name: elasticsearch-data
                  spec:
                    accessModes: [ "ReadWriteOnce" ]
                    resources:
                      requests:
                        storage: 2Ti

and my kibana:

eck-kibana:
            enabled: true
            fullnameOverride: monitoring-kibana
            metadata:
              name: kibana
            securityContext:
              runAsUser: 1000880000
              runAsNonRoot: true
            podSecurityContext:
              fsGroup: 1000880000
              runAsUser: 1000880000
            sysctlInitContainer:
              enabled: false
            spec:
              count: 1
              elasticsearchRef:
                name: monitoring-stack
                namespace: havdb-logging
              podTemplate:
                spec:
                  serviceAccountName: kibana
                  automountServiceAccountToken: true
                  containers:
                  - name: kibana
                    env:
                      - name: NODE_OPTIONS
                        value: "--max-old-space-size=2048"
                    resources:
                      requests:
                        memory: 2Gi
                        cpu: 1
                      limits:
                        memory: 4Gi
                        cpu: 4
                  # Deploy Application behind an oauth proxy
                  - name: oauth-proxy
                    image: quay.io/openshift/origin-oauth-proxy:4.13
                    imagePullPolicy: IfNotPresent 
                    args:
                      - -provider=openshift
                      - -https-address=:8888
                      - -http-address=
                      - -email-domain=*
                      - -upstream=http://localhost:5601
                      - -tls-cert=/etc/tls/private/tls.crt
                      - -tls-key=/etc/tls/private/tls.key
                      - -cookie-secret-file=/etc/proxy/secrets/session_secret
                      - -openshift-service-account=kibana
                      - -openshift-ca=/var/run/secrets/kubernetes.io/serviceaccount/ca.crt
                    ports:
                      - name: oauth-proxy
                        containerPort: 8888
                        protocol: TCP
                    volumeMounts:
                      - mountPath: /etc/tls/private
                        name: secret-monitoring-kibana-tls
                      - mountPath: /etc/proxy/secrets
                        name: secret-monitoring-kibana-proxy 
                  nodeSelector:
                      node-role.kubernetes.io/monitoring: ""
                  tolerations:
                    - key: monitoring
                      effect: NoSchedule
                      value: reserved
                    - key: monitoring
                      effect: NoExecute
                      value: reserved
                  volumes:
                    - name: secret-monitoring-kibana-tls
                      secret:
                        defaultMode: 420
                        secretName: monitoring-kibana-tls
                    - name: secret-monitoring-kibana-proxy
                      secret:
                        defaultMode: 420
                        secretName: monitoring-kibana-proxy
              http:
                service:
                  metadata:
                    annotations:
                      service.alpha.openshift.io/serving-cert-secret-name: monitoring-kibana-tls
                  spec:
                    type: ClusterIP
                    ports:
                    - name: kibana
                      port: 5601
                      protocol: TCP
                      targetPort: kibana
                    - name: proxy
                      port: 8888
                      protocol: TCP
                      targetPort: oauth-proxy
                tls:
                  selfSignedCertificate:
                    disabled: true
              
              config:
                server.publicBaseUrl: "https://monitoring-stack.apps.lab.xxx.xx.xx.net"
                monitoring.ui.ccs.enabled: false
                monitoring.kibana.collection.enabled: false
                monitoring.ui.container.elasticsearch.enabled: true
                xpack.monitoring.collection.enabled: false

Can someone tell me what I done wrong or what I am missing?