I started logstash successfully but I don't see anything in my kibana

I have a problem with logstash
it boots with no errors in the logs but I don't see anything in kibana
Kibana and elastic are on another node, both apps are running on docker at version 8.5.3

logstash is on a separate node from the kibana and eleastic instance
It is installed with the .deb package in version 7.5.2

Logs Logstash

[2022-12-16T11:25:02,163][INFO ][logstash.runner          ] Starting Logstash {"logstash.version"=>"7.5.2"}
[2022-12-16T11:25:05,808][INFO ][org.reflections.Reflections] Reflections took 54 ms to scan 1 urls, producing 20 keys and 40 values
[2022-12-16T11:25:09,003][INFO ][logstash.outputs.elasticsearch][main] Elasticsearch pool URLs updated {:changes=>{:removed=>[], :added=>[http://elastic:xxxxxx@*******:9200/]}}
[2022-12-16T11:25:09,440][WARN ][logstash.outputs.elasticsearch][main] Restored connection to ES instance {:url=>"http://elastic:xxxxxx@********:9200/"}
[2022-12-16T11:25:09,525][INFO ][logstash.outputs.elasticsearch][main] ES Output version determined {:es_version=>8}
[2022-12-16T11:25:09,531][WARN ][logstash.outputs.elasticsearch][main] Detected a 6.x and above cluster: the `type` event field won't be used to determine the document _type {:es_version=>8}
[2022-12-16T11:25:09,588][INFO ][logstash.outputs.elasticsearch][main] New Elasticsearch output {:class=>"LogStash::Outputs::ElasticSearch", :hosts=>["http://********:9200"]}
[2022-12-16T11:25:09,664][INFO ][logstash.outputs.elasticsearch][main] Using default mapping template
[2022-12-16T11:25:09,733][INFO ][logstash.outputs.elasticsearch][main] Attempting to install template {:manage_template=>{"index_patterns"=>"logstash-*", "version"=>80001, "settings"=>{"index.refresh_interval"=>"5s", "number_of_shards"=>1, "index.lifecycle.name"=>"logstash-policy", "index.lifecycle.rollover_alias"=>"logstash"}, "mappings"=>{"dynamic_templates"=>[{"message_field"=>{"path_match"=>"message", "match_mapping_type"=>"string", "mapping"=>{"type"=>"text", "norms"=>false}}}, {"string_fields"=>{"match"=>"*", "match_mapping_type"=>"string", "mapping"=>{"type"=>"text", "norms"=>false, "fields"=>{"keyword"=>{"type"=>"keyword", "ignore_above"=>256}}}}}], "properties"=>{"@timestamp"=>{"type"=>"date"}, "@version"=>{"type"=>"keyword"}, "geoip"=>{"dynamic"=>true, "properties"=>{"ip"=>{"type"=>"ip"}, "location"=>{"type"=>"geo_point"}, "latitude"=>{"type"=>"half_float"}, "longitude"=>{"type"=>"half_float"}}}}}}}
[2022-12-16T11:25:09,836][WARN ][org.logstash.instrument.metrics.gauge.LazyDelegatingGauge][main] A gauge metric of an unknown type (org.jruby.specialized.RubyArrayOneObject) has been create for key: cluster_uuids. This may result in invalid serialization.  It is recommended to log an issue to the responsible developer/development team.
[2022-12-16T11:25:09,843][INFO ][logstash.javapipeline    ][main] Starting pipeline {:pipeline_id=>"main", "pipeline.workers"=>8, "pipeline.batch.size"=>125, "pipeline.batch.delay"=>50, "pipeline.max_inflight"=>1000, "pipeline.sources"=>["/etc/logstash/conf.d/logstash-ehcos.conf"], :thread=>"#<Thread:0x4206d5d6 run>"}
[2022-12-16T11:25:10,153][INFO ][logstash.inputs.file     ][main] No sincedb_path set, generating one based on the "path" setting {:sincedb_path=>"/var/lib/logstash/plugins/inputs/file/.sincedb_3feb19d4a444dd4ad1171c743f654398", :path=>["/opt/ehcos-server/logs/ehcos-mule*.log"]}
[2022-12-16T11:25:10,201][INFO ][logstash.inputs.file     ][main] No sincedb_path set, generating one based on the "path" setting {:sincedb_path=>"/var/lib/logstash/plugins/inputs/file/.sincedb_434f34af7890f1e23f7c131792fad38f", :path=>["/opt/ehcos-server/logs/**/ehCS*.log"]}
[2022-12-16T11:25:10,207][INFO ][logstash.inputs.file     ][main] No sincedb_path set, generating one based on the "path" setting {:sincedb_path=>"/var/lib/logstash/plugins/inputs/file/.sincedb_4735d8287188199c63e5087acec18301", :path=>["/opt/ehcos-server/logs/querys*.log"]}
[2022-12-16T11:25:10,213][INFO ][logstash.inputs.file     ][main] No sincedb_path set, generating one based on the "path" setting {:sincedb_path=>"/var/lib/logstash/plugins/inputs/file/.sincedb_55d9daaa2bd1fe556ac82936ae4f71ae", :path=>["/opt/ehcos-server/logs/**/ehHIS*.log"]}
[2022-12-16T11:25:10,226][INFO ][logstash.javapipeline    ][main] Pipeline started {"pipeline.id"=>"main"}
[2022-12-16T11:25:10,263][INFO ][filewatch.observingtail  ][main] START, creating Discoverer, Watch with file and sincedb collections
[2022-12-16T11:25:10,280][INFO ][filewatch.observingtail  ][main] START, creating Discoverer, Watch with file and sincedb collections
[2022-12-16T11:25:10,301][INFO ][filewatch.observingtail  ][main] START, creating Discoverer, Watch with file and sincedb collections
[2022-12-16T11:25:10,301][INFO ][filewatch.observingtail  ][main] START, creating Discoverer, Watch with file and sincedb collections
[2022-12-16T11:25:10,369][INFO ][logstash.agent           ] Pipelines running {:count=>1, :running_pipelines=>[:main], :non_running_pipelines=>[]}
[2022-12-16T11:25:11,006][INFO ][logstash.agent           ] Successfully started Logstash API endpoint {:port=>9600}

my conf:

input {
  file {
    type => "Clinic"
    path => "/opt/ehcos-server/logs/**/ehCS*.log"
        start_position => "beginning"
        codec => multiline {
                pattern => "%{TIME}.*"
                negate => "true"
                what => "previous"
    }
  }

  file {
    type => "PMG"
    path => "/opt/ehcos-server/logs/**/ehHIS*.log"
        start_position => "beginning"
        codec => multiline {
                pattern => "%{TIME}.*"
                negate => "true"
                what => "previous"
    }
  }

  file {
    type => "Mule"
        path => "/opt/ehcos-server/logs/ehcos-mule*.log"
        start_position => "beginning"
        codec => multiline {
                pattern => "%{TIME}.*"
                negate => "true"
                what => "previous"
    }
  }
 file {
    type => "Query"
        path => "/opt/ehcos-server/logs/querys*.log"
        start_position => "beginning"
        codec => multiline {
                pattern => "%{TIME}.*"
                negate => "true"
                what => "previous"
    }
  }
}

filter {
  if ([message] =~ /ERROR/ and [message] !~ /Exception/) or [message] =~ /INFO/{
    drop { }
  }

  if [type] == "Mule" {
    grok { match => [ "message", "%{TIME:time} %{LOGLEVEL:level}" ] }
  }
  if [type] == "Query" {
        mutate {
      gsub => ["message", "^.*?Method", "method"]
    }

    kv {
       source => "message"
           field_split_pattern => ", "
       value_split => ":"
           exclude_keys => [ "Tables", "Success", "Type", "Batch", "QuerySize", "BatchSize", "Query", "Params" ]
    }

        mutate {
          rename => {"Name" => "tenant"}
      gsub => ["tenant", "^.*?jdbc/", ""]
          remove_field => [ "message" ]
    }
  }
  else {
    grok { match => [ "message", "%{TIME:time} %{LOGLEVEL:level} %{NOTSPACE:thread} %{NOTSPACE:tenant}" ] }
  }

}


output {
  elasticsearch {
    hosts => ["http://*******:9200"]
        user => elastic
    password => ******
  }
}

There might be:

  • already read files, add:
    Linux: sincedb_path => "/dev/null"
    Win: sincedb_path => "NUL"
    or set your own path to track the file processing
  • add ruby debug in output part
    stdout { codec => rubydebug }
  • check are IFs are OK

Logs:

[2022-12-16T13:49:21,430][INFO ][logstash.runner          ] Starting Logstash {"logstash.version"=>"7.5.2"}
[2022-12-16T13:49:24,716][INFO ][org.reflections.Reflections] Reflections took 43 ms to scan 1 urls, producing 20 keys and 40 values
[2022-12-16T13:49:28,960][INFO ][logstash.outputs.elasticsearch][main] Elasticsearch pool URLs updated {:changes=>{:removed=>[], :added=>[http://elastic:xxxxxx@10.250.61.178:9200/]}}
[2022-12-16T13:49:29,271][WARN ][logstash.outputs.elasticsearch][main] Restored connection to ES instance {:url=>"http://elastic:xxxxxx@*******:9200/"}
[2022-12-16T13:49:29,338][INFO ][logstash.outputs.elasticsearch][main] ES Output version determined {:es_version=>8}
[2022-12-16T13:49:29,343][WARN ][logstash.outputs.elasticsearch][main] Detected a 6.x and above cluster: the `type` event field won't be used to determine the document _type {:es_version=>8}
[2022-12-16T13:49:29,391][INFO ][logstash.outputs.elasticsearch][main] New Elasticsearch output {:class=>"LogStash::Outputs::ElasticSearch", :hosts=>["http://******:9200"]}
[2022-12-16T13:49:29,473][INFO ][logstash.outputs.elasticsearch][main] Using default mapping template
[2022-12-16T13:49:29,538][INFO ][logstash.outputs.elasticsearch][main] Attempting to install template {:manage_template=>{"index_patterns"=>"logstash-*", "version"=>80001, "settings"=>{"index.refresh_interval"=>"5s", "number_of_shards"=>1, "index.lifecycle.name"=>"logstash-policy", "index.lifecycle.rollover_alias"=>"logstash"}, "mappings"=>{"dynamic_templates"=>[{"message_field"=>{"path_match"=>"message", "match_mapping_type"=>"string", "mapping"=>{"type"=>"text", "norms"=>false}}}, {"string_fields"=>{"match"=>"*", "match_mapping_type"=>"string", "mapping"=>{"type"=>"text", "norms"=>false, "fields"=>{"keyword"=>{"type"=>"keyword", "ignore_above"=>256}}}}}], "properties"=>{"@timestamp"=>{"type"=>"date"}, "@version"=>{"type"=>"keyword"}, "geoip"=>{"dynamic"=>true, "properties"=>{"ip"=>{"type"=>"ip"}, "location"=>{"type"=>"geo_point"}, "latitude"=>{"type"=>"half_float"}, "longitude"=>{"type"=>"half_float"}}}}}}}
[2022-12-16T13:49:29,648][WARN ][org.logstash.instrument.metrics.gauge.LazyDelegatingGauge][main] A gauge metric of an unknown type (org.jruby.specialized.RubyArrayOneObject) has been create for key: cluster_uuids. This may result in invalid serialization.  It is recommended to log an issue to the responsible developer/development team.
[2022-12-16T13:49:29,655][INFO ][logstash.javapipeline    ][main] Starting pipeline {:pipeline_id=>"main", "pipeline.workers"=>8, "pipeline.batch.size"=>125, "pipeline.batch.delay"=>50, "pipeline.max_inflight"=>1000, "pipeline.sources"=>["/etc/logstash/conf.d/logstash-ehcos.conf"], :thread=>"#<Thread:0xabaeefc run>"}
[2022-12-16T13:49:30,051][INFO ][logstash.javapipeline    ][main] Pipeline started {"pipeline.id"=>"main"}
[2022-12-16T13:49:30,118][INFO ][filewatch.observingtail  ][main] START, creating Discoverer, Watch with file and sincedb collections
[2022-12-16T13:49:30,121][INFO ][filewatch.observingtail  ][main] START, creating Discoverer, Watch with file and sincedb collections
[2022-12-16T13:49:30,124][INFO ][filewatch.observingtail  ][main] START, creating Discoverer, Watch with file and sincedb collections
[2022-12-16T13:49:30,118][INFO ][filewatch.observingtail  ][main] START, creating Discoverer, Watch with file and sincedb collections
[2022-12-16T13:49:30,192][INFO ][logstash.agent           ] Pipelines running {:count=>1, :running_pipelines=>[:main], :non_running_pipelines=>[]}
[2022-12-16T13:49:30,865][INFO ][logstash.agent           ] Successfully started Logstash API endpoint {:port=>9600}

I added what you told me but I don't see much more info

the strange thing is that if I do a

cat ehcos-mule.log | nc -q0 192.****** 50000

there I see the file in kibana and in the logstash logs I see how the file is processed

Logstash master server: 50000

Docker Compose


version: '3.7'

services:
  setup:
    build:
      context: setup/
      args:
        ELASTIC_VERSION: ${ELASTIC_VERSION}
    init: true
    volumes:
      - ./setup/entrypoint.sh:/entrypoint.sh:ro,Z
      - ./setup/helpers.sh:/helpers.sh:ro,Z
      - ./setup/roles:/roles:ro,Z
      - setup:/state:Z
    environment:
      ELASTIC_PASSWORD: ${ELASTIC_PASSWORD:-}
      LOGSTASH_INTERNAL_PASSWORD: ${LOGSTASH_INTERNAL_PASSWORD:-}
      KIBANA_SYSTEM_PASSWORD: ${KIBANA_SYSTEM_PASSWORD:-}
      METRICBEAT_INTERNAL_PASSWORD: ${METRICBEAT_INTERNAL_PASSWORD:-}
      FILEBEAT_INTERNAL_PASSWORD: ${FILEBEAT_INTERNAL_PASSWORD:-}
      HEARTBEAT_INTERNAL_PASSWORD: ${HEARTBEAT_INTERNAL_PASSWORD:-}
      MONITORING_INTERNAL_PASSWORD: ${MONITORING_INTERNAL_PASSWORD:-}
      BEATS_SYSTEM_PASSWORD: ${BEATS_SYSTEM_PASSWORD:-}
    networks:
      - elk
    depends_on:
      - elasticsearch

  elasticsearch:
    build:
      context: elasticsearch/
      args:
        ELASTIC_VERSION: ${ELASTIC_VERSION}
    volumes:
      - ./elasticsearch/config/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml:ro,Z
      - elasticsearch:/usr/share/elasticsearch/data:Z
    ports:
      - 9200:9200
      - 9300:9300
    environment:
      node.name: elasticsearch
      ES_JAVA_OPTS: -Xms512m -Xmx512m
      ELASTIC_PASSWORD: ${ELASTIC_PASSWORD:-}
      discovery.type: single-node
      xpack.security.enabled: true
    networks:
      - elk

  kibana:
    build:
      context: kibana/
      args:
        ELASTIC_VERSION: ${ELASTIC_VERSION}
    volumes:
      - ./kibana/config/kibana.yml:/usr/share/kibana/config/kibana.yml:ro,Z
    ports:
      - 80:5601
    environment:
      KIBANA_SYSTEM_PASSWORD: ${KIBANA_SYSTEM_PASSWORD:-}
      KIBANA_URL: ${KIBANA_URL}
    networks:
      - elk
    depends_on:
      - elasticsearch

  logstash:
    build:
      context: logstash/
      args:
        ELASTIC_VERSION: ${ELASTIC_VERSION}
    volumes:
      - ./logstash/config/logstash.yml:/usr/share/logstash/config/logstash.yml:ro,Z
      - ./logstash/pipeline:/usr/share/logstash/pipeline:ro,Z
    ports:
      - 5044:5044
      - 50000:50000/tcp
      - 50000:50000/udp
      - 9600:9600
    environment:
      LS_JAVA_OPTS: -Xms256m -Xmx256m
      LOGSTASH_INTERNAL_PASSWORD: ${LOGSTASH_INTERNAL_PASSWORD:-}
    networks:
      - elk
    depends_on:
      - elasticsearch



networks:
  elk:
    driver: bridge

volumes:
  setup:
  elasticsearch:

This topic was automatically closed 28 days after the last reply. New replies are no longer allowed.