ryh
June 6, 2019, 10:12pm
1
Hi All,
I need your help. When I'm building my ELK stack with Filebeat and an app locally, I'm able to see my fields and messages are clear. But when I use Ansible, things go completely wrong.
Below are my configs for a local build.
elasticsearch.yml
cluster.name: elasticsearch-master
node.name: elasticsearch-node
network.host: 0.0.0.0
# minimum_master_nodes need to be explicitly set when bound on a public IP
# set to 1 to allow single node clusters
# Details: https://github.com/elastic/elasticsearch/pull/17288
# discovery.zen.minimum_master_nodes: 1
#cluster.initial_master_nodes:
#- elasticsearch-node
discovery.type: single-node
bootstrap.memory_lock: true
elasticsearch dockerfile
FROM docker.elastic.co/elasticsearch/elasticsearch:7.1.1
COPY --chown=elasticsearch:elasticsearch elasticsearch.yml /usr/share/elasticsearch/config/
CMD ["elasticsearch", "-Elogger.level=INFO"]
filebeat yml
filebeat.inputs:
- type: docker
combine_partial: true
containers:
path: "/usr/share/dockerlogs/data"
stream: "stdout"
ids:
- "*"
exclude_files: ['\.gz$']
# ignore_older: 10m
filebeat.config:
modules:
path: ${path.config}/modules.d/*.yml
reload.enabled: false
processors:
- add_tags:
tags: [development, mimidae]
output:
logstash:
hosts: 'logstash'
logging.files:
path: /var/log/filebeat
name: filebeat
keepfiles: 7
permissions: 0644
ssl.verification_mode: none
filebeat dockerfile
FROM docker.elastic.co/beats/filebeat:7.1.1
COPY filebeat.yml /usr/share/filebeat/filebeat.yml
USER root
RUN chown -R root /usr/share/filebeat/
RUN chmod -R go-w /usr/share/filebeat/
logstash yml
input {
beats {
port => 5044
host => "0.0.0.0"
}
}
filter {
grok {
match => { "message" => "%{USERNAME:service_name} %{USERNAME:username} \[%{LOGLEVEL:log_level}\] %{GREEDYDATA:log_message}"}
}
}
output {
elasticsearch {
hosts => elasticsearch
manage_template => false
index => "%{[@metadata][beat]}-%{[@metadata][version]}-%{+YYYY.MM.dd}"
}
}
logstash dockerfile
FROM docker.elastic.co/logstash/logstash:7.1.1
RUN rm -f /usr/share/logstash/pipeline/logstash.conf
COPY pipeline /usr/share/logstash/pipeline/
and here is my docker-compose
version: '3'
services:
mimidae:
hostname: mimidae
image: gsw/mimidae:latest
build:
context: ../
dockerfile: Dockerfile
command: ["-c", "files/mimidae.json.example"]
volumes:
- "/opt/dicts/spitz/latest:/opt/dicts/spitz/latest"
filebeat:
hostname: filebeat
image: filebeat/filebeat:latest
build:
context: filebeat
dockerfile: Dockerfile
volumes:
# needed to access all docker logs (read only) :
- "/var/lib/docker/containers:/usr/share/dockerlogs/data:ro"
# needed to access additional informations about containers
- "/var/run/docker.sock:/var/run/docker.sock"
links:
- logstash
kibana:
image: docker.elastic.co/kibana/kibana:7.1.1
environment:
- "LOGGING_QUIET=true"
links:
- elasticsearch
ports:
- 5601:5601
logstash:
hostname: logstash
image: filebeat/logstash:latest
build:
context: logstash
dockerfile: Dockerfile
ports:
- 5044:5044
environment:
LOG_LEVEL: error
links:
- elasticsearch
elasticsearch:
hostname: elasticsearch
image: filebeat/elasticsearch:latest
build:
context: elasticsearch
dockerfile: Dockerfile
environment:
- "ES_JAVA_OPTS=-Xms256m -Xmx256m"
ports:
- 9200:9200
- 9300:9300
warkolm
(Mark Walkom)
June 6, 2019, 10:35pm
2
Rather than deleting your question, it would be better if you could share your solution, as it may help others in the future with a similar problem
ryh
June 6, 2019, 10:50pm
3
In Ansible, below are the configs
filebeat tasks>main.yml
- name: ensure dockerlogs directory exists
become: true
file:
state: directory
path: /usr/share/filebeat/dockerlogs
- name: copy filebeat config file
become: true
copy:
src: filebeat.yml
dest: /usr/share/filebeat/filebeat.yml
owner: root
group: root
- name: create container network
become: true
docker_network:
name: elk-network
- name: start filebeat
become: true
docker_container:
name: filebeat
image: "{{ filebeat_docker_image_name }}:{{ filebeat_docker_image_tag }}"
state: started
user: root
restart: yes
volumes:
- /var/lib/docker/containers:/usr/share/dockerlogs/data:ro
- /var/run/docker.sock:/var/run/docker.sock
- /usr/share/filebeat/filebeat.yml:/usr/share/filebeat/filebeat.yml
restart_policy: unless-stopped
networks:
- name: elk-network
links:
- logstash
filebeat.yml
filebeat.inputs:
- type: docker
combine_partial: true
containers:
path: "/usr/share/dockerlogs/data"
stream: "stdout"
ids:
- "*"
exclude_files: ['\.gz$']
filebeat.config:
modules:
path: ${path.config}/modules.d/*.yml
reload.enabled: false
processors:
- add_tags:
tags: [development, mimidae]
output:
logstash:
hosts: 'logstash'
logging.files:
path: /var/log/filebeat
name: filebeat
keepfiles: 7
permissions: 0644
ssl.verification_mode: none
logstasth tasks>main.yml
- name: ensure logstash config directory exists
become: true
file:
state: directory
path: "{{ logstash_backend_config_directory }}"
- name: copy logstash config file
become: true
copy:
src: logstash.conf
dest: "{{ logstash_backend_config_directory }}/logstash.conf"
- name: start logstash
become: true
docker_container:
name: logstash
image: "{{ logstash_docker_image_name }}:{{ logstash_docker_image_tag }}"
state: started
restart: yes
volumes:
- "{{ logstash_backend_config_directory }}/logstash.conf:{{ logstash_backend_config_directory }}/logstash.conf"
published_ports:
- 5044:5044
env:
LOG_LEVEL: error
restart_policy: unless-stopped
networks:
- name: elk-network
aliases:
- logstash
links:
- elasticsearch
logstash.conf (pipeline)
input {
beats {
port => 5044
host => "0.0.0.0"
}
}
filter {
grok {
match => { "message" => "%{USERNAME:service_name} %{USERNAME:username} \[%{LOGLEVEL:log_level}\] %{GREEDYDATA:log_message}"}
}
}
output {
elasticsearch {
hosts => elasticsearch
manage_template => false
index => "%{[@metadata][beat]}-%{[@metadata][version]}-%{+YYYY.MM.dd}"
}
}
elasticsearch tasks>main.yml
- name: ensure elasticsearch config directory exists
become: true
file:
state: directory
path: /usr/share/elasticsearch/config
- name: copy elasticsearch config file
become: true
copy:
src: elasticsearch.yml
dest: /usr/share/elasticsearch/config/elasticsearch.yml
- name: start elasticsearch
become: true
docker_container:
name: elasticsearch
image: "{{ elasticsearch_docker_image_name }}:{{ elasticsearch_docker_image_tag }}"
state: started
user: root
restart: yes
published_ports:
- 9200:9200
- 9300:9300
env:
ES_JAVA_OPTS: "-Xms256m -Xmx256m"
volumes:
- /usr/share/elasticsearch/config/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml
restart_policy: unless-stopped
networks:
- name: elk-network
elasticsearch config
cluster.name: elasticsearch-master
node.name: elasticsearch-node
network.host: 0.0.0.0
discovery.type: single-node
bootstrap.memory_lock: true
ryh
June 6, 2019, 10:54pm
4
I wasn't trying to hide the solution. I just had to gather my thoughts and ask the question properly.
When I run the following command:
{
"query": {
"exists": {
"field": "message"
}
},
"sort": [
{ "@timestamp": { "order": "desc" }}
]
}
'
Locally, I get the correct result
{
"_index" : "filebeat-7.1.1-2019.06.06",
"_type" : "_doc",
"_id" : "wbTvLmsBoGSZycNxpvum",
"_score" : null,
"_source" : {
"log" : {
"offset" : 21604,
"file" : {
"path" : "/usr/share/dockerlogs/data/2da681fe19d85df4c864f8acf7f4b70822b9bd17a99e126a33837354fac4f705/2da681fe19d85df4c864f8acf7f4b70822b9bd17a99e126a33837354fac4f705-json.log"
}
},
"input" : {
"type" : "docker"
},
"service_name" : "SOME_SERVICE_NAME",
"stream" : "stdout",
"tags" : [
"development",
"mimidae",
"beats_input_codec_plain_applied"
],
"username" : "root",
"ecs" : {
"version" : "1.0.0"
},
"log_level" : "INFO",
"message" : "SOME CORRECT MESSAGE",
"@timestamp" : "2019-06-06T22:36:21.876Z",
"@version" : "1",
"host" : {
"name" : "filebeat"
},
"agent" : {
"type" : "filebeat",
"id" : "2df2eebd-e5ae-4063-a7c3-208d134c982f",
"hostname" : "filebeat",
"version" : "7.1.1",
"ephemeral_id" : "7cf812fa-4b8d-446a-b982-9d70e1672d55"
},
"log_message" : "Still no SpitzTlmHk"
},
"sort" : [
1559860581876
]
}
But on Ansible, the messages aren't parsed properly thus they aren't showing on elasticsearch.
Thanks in advance.