Kibana container is dying

Hello All I'm deploying a new cluster using kibana as docker container and elasticsearch cluster is already running in ec2 instances, but the kibana container is in a loop eahc 5 minutes the container died, I used a default configuration.

    server.name: kibana-antik
    server.host: "0"
    elasticsearch.hosts: [ "http://elastic.example.net:9200" ]
    monitoring.ui.container.elasticsearch.enabled: true

but after some minutes the container logs show this and died:

{"type":"response","@timestamp":"2020-05-27T17:07:22Z","tags":[],"pid":6,"method":"get","statusCode":302,"req":{"url":"/","method":"get","headers":{"user-agent":"curl/7.29.0","host":"localhost:5601","accept":"*/*"},"remoteAddress":"127.0.0.1","userAgent":"127.0.0.1"},"res":{"statusCode":302,"responseTime":13,"contentLength":9},"message":"GET / 302 13ms - 9.0B"}
{"type":"response","@timestamp":"2020-05-27T17:07:52Z","tags":[],"pid":6,"method":"get","statusCode":302,"req":{"url":"/","method":"get","headers":{"user-agent":"curl/7.29.0","host":"localhost:5601","accept":"*/*"},"remoteAddress":"127.0.0.1","userAgent":"127.0.0.1"},"res":{"statusCode":302,"responseTime":7,"contentLength":9},"message":"GET / 302 7ms - 9.0B"}
{"type":"response","@timestamp":"2020-05-27T17:08:22Z","tags":[],"pid":6,"method":"get","statusCode":302,"req":{"url":"/","method":"get","headers":{"user-agent":"curl/7.29.0","host":"localhost:5601","accept":"*/*"},"remoteAddress":"127.0.0.1","userAgent":"127.0.0.1"},"res":{"statusCode":302,"responseTime":7,"contentLength":9},"message":"GET / 302 7ms - 9.0B"}
{"type":"response","@timestamp":"2020-05-27T17:08:52Z","tags":[],"pid":6,"method":"get","statusCode":302,"req":{"url":"/","method":"get","headers":{"user-agent":"curl/7.29.0","host":"localhost:5601","accept":"*/*"},"remoteAddress":"127.0.0.1","userAgent":"127.0.0.1"},"res":{"statusCode":302,"responseTime":9,"contentLength":9},"message":"GET / 302 9ms - 9.0B"}
{"type":"response","@timestamp":"2020-05-27T17:09:23Z","tags":[],"pid":6,"method":"get","statusCode":302,"req":{"url":"/","method":"get","headers":{"user-agent":"curl/7.29.0","host":"localhost:5601","accept":"*/*"},"remoteAddress":"127.0.0.1","userAgent":"127.0.0.1"},"res":{"statusCode":302,"responseTime":8,"contentLength":9},"message":"GET / 302 8ms - 9.0B"}
{"type":"response","@timestamp":"2020-05-27T17:09:53Z","tags":[],"pid":6,"method":"get","statusCode":302,"req":{"url":"/","method":"get","headers":{"user-agent":"curl/7.29.0","host":"localhost:5601","accept":"*/*"},"remoteAddress":"127.0.0.1","userAgent":"127.0.0.1"},"res":{"statusCode":302,"responseTime":7,"contentLength":9},"message":"GET / 302 7ms - 9.0B"}
{"type":"log","@timestamp":"2020-05-27T17:09:54Z","tags":["info","plugins-system"],"pid":6,"message":"Stopping all plugins."}
{"type":"log","@timestamp":"2020-05-27T17:09:54Z","tags":["info","plugins","watcher"],"pid":6,"message":"Stopping plugin"}
{"type":"log","@timestamp":"2020-05-27T17:09:54Z","tags":["info","plugins","monitoring"],"pid":6,"message":"Stopping plugin"}
{"type":"log","@timestamp":"2020-05-27T17:09:54Z","tags":["info","plugins","infra"],"pid":6,"message":"Stopping plugin"}
{"type":"log","@timestamp":"2020-05-27T17:09:54Z","tags":["info","plugins","graph"],"pid":6,"message":"Stopping plugin"}
{"type":"log","@timestamp":"2020-05-27T17:09:54Z","tags":["info","plugins","dataEnhanced"],"pid":6,"message":"Stopping plugin"}
{"type":"log","@timestamp":"2020-05-27T17:09:54Z","tags":["info","plugins","file_upload"],"pid":6,"message":"Stopping plugin"}
{"type":"log","@timestamp":"2020-05-27T17:09:54Z","tags":["info","plugins","ml"],"pid":6,"message":"Stopping plugin"}
{"type":"log","@timestamp":"2020-05-27T17:09:54Z","tags":["info","plugins","uptime"],"pid":6,"message":"Stopping plugin"}
{"type":"log","@timestamp":"2020-05-27T17:09:54Z","tags":["info","plugins","alertingBuiltins"],"pid":6,"message":"Stopping plugin"}
{"type":"log","@timestamp":"2020-05-27T17:09:54Z","tags":["info","plugins","apm"],"pid":6,"message":"Stopping plugin"}
{"type":"log","@timestamp":"2020-05-27T17:09:54Z","tags":["info","plugins","alerting"],"pid":6,"message":"Stopping plugin"}
{"type":"log","@timestamp":"2020-05-27T17:09:54Z","tags":["info","plugins","case"],"pid":6,"message":"Stopping plugin"}
{"type":"log","@timestamp":"2020-05-27T17:09:54Z","tags":["info","plugins","actions"],"pid":6,"message":"Stopping plugin"}
{"type":"log","@timestamp":"2020-05-27T17:09:54Z","tags":["info","plugins","spaces"],"pid":6,"message":"Stopping plugin"}
{"type":"log","@timestamp":"2020-05-27T17:09:54Z","tags":["info","plugins","remoteClusters"],"pid":6,"message":"Stopping plugin"}
{"type":"log","@timestamp":"2020-05-27T17:09:54Z","tags":["info","plugins","indexManagement"],"pid":6,"message":"Stopping plugin"}
{"type":"log","@timestamp":"2020-05-27T17:09:54Z","tags":["info","plugins","licenseManagement"],"pid":6,"message":"Stopping plugin"}
{"type":"log","@timestamp":"2020-05-27T17:09:54Z","tags":["info","plugins","transform"],"pid":6,"message":"Stopping plugin"}
{"type":"log","@timestamp":"2020-05-27T17:09:54Z","tags":["info","plugins","snapshotRestore"],"pid":6,"message":"Stopping plugin"}
{"type":"log","@timestamp":"2020-05-27T17:09:54Z","tags":["info","plugins","security"],"pid":6,"message":"Stopping plugin"}
{"type":"log","@timestamp":"2020-05-27T17:09:54Z","tags":["info","plugins","upgradeAssistant"],"pid":6,"message":"Stopping plugin"}
{"type":"log","@timestamp":"2020-05-27T17:09:54Z","tags":["info","plugins","canvas"],"pid":6,"message":"Stopping plugin"}
{"type":"log","@timestamp":"2020-05-27T17:09:54Z","tags":["info","plugins","painlessLab"],"pid":6,"message":"Stopping plugin"}
{"type":"log","@timestamp":"2020-05-27T17:09:54Z","tags":["info","plugins","searchprofiler"],"pid":6,"message":"Stopping plugin"}
{"type":"log","@timestamp":"2020-05-27T17:09:54Z","tags":["info","plugins","consoleExtensions"],"pid":6,"message":"Stopping plugin"}
{"type":"log","@timestamp":"2020-05-27T17:09:54Z","tags":["info","plugins","console"],"pid":6,"message":"Stopping plugin"}
{"type":"log","@timestamp":"2020-05-27T17:09:54Z","tags":["info","plugins","cloud"],"pid":6,"message":"Stopping plugin"}
{"type":"log","@timestamp":"2020-05-27T17:09:54Z","tags":["info","plugins","home"],"pid":6,"message":"Stopping plugin"}
{"type":"log","@timestamp":"2020-05-27T17:09:54Z","tags":["info","plugins","data"],"pid":6,"message":"Stopping plugin"}
{"type":"log","@timestamp":"2020-05-27T17:09:54Z","tags":["info","plugins","visualizations"],"pid":6,"message":"Stopping plugin"}
{"type":"log","@timestamp":"2020-05-27T17:09:54Z","tags":["info","plugins","expressions"],"pid":6,"message":"Stopping plugin"}
{"type":"log","@timestamp":"2020-05-27T17:09:54Z","tags":["info","plugins","bfetch"],"pid":6,"message":"Stopping plugin"}
{"type":"log","@timestamp":"2020-05-27T17:09:54Z","tags":["info","plugins","share"],"pid":6,"message":"Stopping plugin"}
{"type":"log","@timestamp":"2020-05-27T17:09:54Z","tags":["info","plugins","rollup"],"pid":6,"message":"Stopping plugin"}
{"type":"log","@timestamp":"2020-05-27T17:09:54Z","tags":["info","plugins","translations"],"pid":6,"message":"Stopping plugin"}
{"type":"log","@timestamp":"2020-05-27T17:09:54Z","tags":["info","plugins","apm_oss"],"pid":6,"message":"Stopping plugin"}
{"type":"log","@timestamp":"2020-05-27T17:09:54Z","tags":["info","plugins","kibanaLegacy"],"pid":6,"message":"Stopping plugin"}
{"type":"log","@timestamp":"2020-05-27T17:09:54Z","tags":["info","plugins","features"],"pid":6,"message":"Stopping plugin"}
{"type":"log","@timestamp":"2020-05-27T17:09:54Z","tags":["info","plugins","timelion"],"pid":6,"message":"Stopping plugin"}
{"type":"log","@timestamp":"2020-05-27T17:09:54Z","tags":["info","plugins","telemetryCollectionXpack"],"pid":6,"message":"Stopping plugin"}
{"type":"log","@timestamp":"2020-05-27T17:09:54Z","tags":["info","plugins","telemetry"],"pid":6,"message":"Stopping plugin"}
{"type":"log","@timestamp":"2020-05-27T17:09:54Z","tags":["info","plugins","telemetryCollectionManager"],"pid":6,"message":"Stopping plugin"}
{"type":"log","@timestamp":"2020-05-27T17:09:54Z","tags":["info","plugins","lens"],"pid":6,"message":"Stopping plugin"}
{"type":"log","@timestamp":"2020-05-27T17:09:54Z","tags":["info","plugins","ossTelemetry"],"pid":6,"message":"Stopping plugin"}
{"type":"log","@timestamp":"2020-05-27T17:09:54Z","tags":["info","plugins","metrics"],"pid":6,"message":"Stopping plugin"}
{"type":"log","@timestamp":"2020-05-27T17:09:54Z","tags":["info","plugins","usageCollection"],"pid":6,"message":"Stopping plugin"}
{"type":"log","@timestamp":"2020-05-27T17:09:54Z","tags":["info","plugins","code"],"pid":6,"message":"Stopping plugin"}
{"type":"log","@timestamp":"2020-05-27T17:09:54Z","tags":["info","plugins","eventLog"],"pid":6,"message":"Stopping plugin"}
{"type":"log","@timestamp":"2020-05-27T17:09:54Z","tags":["info","plugins","visTypeVega"],"pid":6,"message":"Stopping plugin"}
{"type":"log","@timestamp":"2020-05-27T17:09:54Z","tags":["info","plugins","encryptedSavedObjects"],"pid":6,"message":"Stopping plugin"}
{"type":"log","@timestamp":"2020-05-27T17:09:54Z","tags":["info","plugins","licensing"],"pid":6,"message":"Stopping plugin"}
{"type":"log","@timestamp":"2020-05-27T17:09:54Z","tags":["info","plugins","taskManager"],"pid":6,"message":"Stopping plugin"}
{"type":"log","@timestamp":"2020-05-27T17:09:54Z","tags":["info","plugins","siem"],"pid":6,"message":"Stopping plugin"}

Would it be possible to show the full logs?

It seems someone/something is triggering using curl several times.

I might suggest to enable debug logging on Kibana (logging.verbose: true).

Hello Thank you for your answer

The issue was sorted out, was related with a different resource

1 Like

This topic was automatically closed 28 days after the last reply. New replies are no longer allowed.