Hello,
After a lot of testing, I have a complete stack that works with, in the same namespace: nginx, Filebeat, Logstash, ES, Kibana.
Now, I try to run these same components but spread over two different namespaces:
- namespace a: nginx, Filebeat, Logstash
- namespace esk: ES, Kibana
In the namespace esk everything is ok (manual data injection is ok and so on).
In namespace a, Filebeat and Logstash pods remain blocked in "ContainerCreating".
I use:
- Windows 10 PRO
- WSL1 with Ubuntu 18.04
- Docker Desktop
And, here are the main files I use :
filebeat.yaml
---
apiVersion: v1
kind: ConfigMap
metadata:
name: filebeat-config
namespace: nsa
labels:
k8s-app: filebeat
data:
filebeat.yml: |-
tags: ["nsa"]
filebeat.config:
modules:
path: ${path.config}/modules.d/*.yml
reload.enabled: false
filebeat.autodiscover:
providers:
- type: kubernetes
host: ${NODE_NAME}
hints.enabled: true
templates:
- conditions.and:
- contains.kubernetes.container.image: nginx
- equals.kubernetes.namespace: nsa
config:
- module: nginx
access:
enabled: true
var.paths: ["/usr/share/filebeat/nginxlogs/access.log"]
error:
enabled: true
var.paths: ["/usr/share/filebeat/nginxlogs/error.log"]
processors:
- add_cloud_metadata:
- add_host_metadata:
- add_docker_metadata:
output.logstash:
hosts: ["logstash-nsa:5044"]
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: filebeat
namespace: nsa
labels:
k8s-app: filebeat
spec:
selector:
matchLabels:
k8s-app: filebeat
template:
metadata:
labels:
k8s-app: filebeat
spec:
serviceAccountName: filebeat
terminationGracePeriodSeconds: 30
hostNetwork: true
dnsPolicy: ClusterFirstWithHostNet
containers:
- name: filebeat
image: docker.elastic.co/beats/filebeat:7.8.0
args: [
"-c", "/etc/filebeat.yml",
"-e",
]
env:
- name: ELASTICSEARCH_HOST
value: elasticsearch-es-http
- name: ELASTICSEARCH_PORT
value: "9200"
#- name: ELASTICSEARCH_USERNAME
#value: elastic
#- name: ELASTICSEARCH_PASSWORD
#valueFrom:
#secretKeyRef:
#key: elastic
#name: elasticsearch-es-elastic-user
- name: NODE_NAME
# value: elasticsearch-es-elasticsearch-0
valueFrom:
fieldRef:
fieldPath: spec.nodeName
securityContext:
runAsUser: 0
resources:
limits:
memory: 200Mi
requests:
cpu: 100m
memory: 100Mi
volumeMounts:
- name: config
mountPath: /etc/filebeat.yml
subPath: filebeat.yml
readOnly: true
- name: data
mountPath: /usr/share/filebeat/data
- name: varlibdockercontainers
mountPath: /var/lib/docker/containers
readOnly: true
- name: varlog
mountPath: /var/log
readOnly: true
#- name: es-certs
#mountPath: /mnt/elastic/tls.crt
#readOnly: true
#subPath: tls.crt
- name: nginxlogs
mountPath: /usr/share/filebeat/nginxlogs
volumes:
- name: config
configMap:
defaultMode: 0600
name: filebeat-config
- name: varlibdockercontainers
hostPath:
path: /var/lib/docker/containers
- name: varlog
hostPath:
path: /var/log
- name: data
hostPath:
path: /var/lib/filebeat-data
type: DirectoryOrCreate
#- name: es-certs
#secret:
#secretName: elasticsearch-es-http-certs-public
- name: nginxlogs
hostPath:
path: /c/PATH/TO/PERSISTENT/VOLUME/nginx-data
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: filebeat
subjects:
- kind: ServiceAccount
name: filebeat
namespace: nsa
roleRef:
kind: ClusterRole
name: filebeat
apiGroup: rbac.authorization.k8s.io
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
name: filebeat
labels:
k8s-app: filebeat
rules:
- apiGroups: [""]
resources:
- namespaces
- pods
verbs:
- get
- watch
- list
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: filebeat
namespace: nsa
labels:
k8s-app: filebeat
---
logstash.yaml
---
apiVersion: v1
kind: Service
metadata:
namespace: nsa
labels:
app: logstash-nsa
name: logstash-nsa
spec:
ports:
- name: "25826"
port: 25826
targetPort: 25826
- name: "5044"
port: 5044
targetPort: 5044
selector:
app: logstash-nsa
status:
loadBalancer: {}
---
apiVersion: v1
kind: ConfigMap
metadata:
namespace: nsa
name: logstash-configmap-nsa
data:
logstash.yml: |
http.host: "0.0.0.0"
path.config: /usr/share/logstash/pipeline
logstash.conf: |
input {
beats {
port => 5044
}
}
filter {
mutate { add_field => { "show" => "This data will be in the output" } }
mutate { add_field => { "[@metadata][test1]" => "foo" } }
mutate { add_field => { "[@metadata][test2]" => "bar" } }
if [event][module] == "nginx-a" {
if [fileset][name] == "access" {
grok {
match => { "message" => ["%{IPORHOST:[nginx][access][remote_ip]} - %{DATA:[nginx][access][user_name]} \[%{HTTPDATE:[nginx][access][time]}\] \"%{WORD:[nginx][access][method]} %{DATA:[nginx][access][url]} HTTP/%{NUMBER:[nginx][access][http_version]}\" %{NUMBER:[nginx][access][response_code]} %{NUMBER:[nginx][access][body_sent][bytes]} \"%{DATA:[nginx][access][referrer]}\" \"%{DATA:[nginx][access][agent]}\""] }
remove_field => "message"
}
mutate {
add_field => { "read_timestamp" => "%{@timestamp}" }
}
useragent {
source => "[nginx][access][agent]"
target => "[nginx][access][user_agent]"
remove_field => "[nginx][access][agent]"
}
geoip {
source => "[nginx][access][remote_ip]"
target => "[nginx][access][geoip]"
}
}
else if [fileset][name] == "error" {
grok {
match => { "message" => ["%{DATA:[nginx][error][time]} \[%{DATA:[nginx][error][level]}\] %{NUMBER:[nginx][error][pid]}#%{NUMBER:[nginx][error][tid]}: (\*%{NUMBER:[nginx][error][connection_id]} )?%{GREEDYDATA:[nginx][error][message]}"] }
remove_field => "message"
}
mutate {
rename => { "@timestamp" => "read_timestamp" }
}
date {
match => [ "[nginx][error][time]", "YYYY/MM/dd H:m:s" ]
remove_field => "[nginx][error][time]"
}
}
}
}
output {
if "access" in [fileset][name] {
elasticsearch {
index => "access-%{[@metadata][beat]}-%{[@metadata][test1]}-%{+YYYY.MM.dd-H.m}"
namespace => "namespace_esk"
hosts => [ "${ES_HOSTS}" ]
#user => "${ES_USER}"
#password => "${ES_PASSWORD}"
#cacert => '/etc/logstash/certificates/ca.crt'
}
}
if "error" in [fileset][name] {
elasticsearch {
index => "error-%{[@metadata][beat]}-%{[@metadata][test2]}-%{+YYYY.MM.dd-H.m}"
namespace => "namespace_esk"
hosts => [ "${ES_HOSTS}" ]
#user => "${ES_USER}"
#password => "${ES_PASSWORD}"
#cacert => '/etc/logstash/certificates/ca.crt'
}
}
}
---
apiVersion: v1
kind: Pod
metadata:
labels:
app: logstash-nsa
name: logstash-nsa
namespace: nsa
spec:
containers:
- image: docker.elastic.co/logstash/logstash:7.8.0
name: logstash
ports:
- containerPort: 25826
- containerPort: 5044
env:
- name: ES_HOSTS
value: "https://elasticsearch-es-http:9200"
#- name: ES_USER
#value: "elastic"
#- name: ES_PASSWORD
#valueFrom:
#secretKeyRef:
#name: elasticsearch-es-elastic-user
#key: elastic
resources: {}
volumeMounts:
- name: config-volume
mountPath: /usr/share/logstash/config
- name: logstash-pipeline-volume
mountPath: /usr/share/logstash/pipeline
#- name: cert-ca
#mountPath: "/etc/logstash/certificates"
#readOnly: true
restartPolicy: OnFailure
volumes:
- name: config-volume
configMap:
name: logstash-configmap-nsa
items:
- key: logstash.yml
path: logstash.yml
- name: logstash-pipeline-volume
configMap:
name: logstash-configmap-nsa
items:
- key: logstash.conf
path: logstash.conf
#- name: cert-ca
#secret:
#secretName: elasticsearch-es-http-certs-public
status: {}
elasticsearch.yaml
apiVersion: elasticsearch.k8s.elastic.co/v1
kind: Elasticsearch
metadata:
name: elasticsearch
namespace: esk
spec:
version: 7.8.0
nodeSets:
- name: elasticsearch
count: 1
config:
node.store.allow_mmap: false
node.master: true
node.data: true
node.ingest: true
xpack.security.authc:
anonymous:
username: anonymous
roles: superuser
authz_exception: false
podTemplate:
metadata:
labels:
app: elasticsearch
spec:
initContainers:
- name: sysctl
securityContext:
privileged: true
command: ['sh', '-c', 'sysctl -w vm.max_map_count=262144']
containers:
- name: elasticsearch
resources:
requests:
memory: 4Gi
cpu: 0.5
limits:
memory: 4Gi
cpu: 1
env:
- name: ES_JAVA_OPTS
value: "-Xms2g -Xmx2g"
volumeClaimTemplates:
- metadata:
name: elasticsearch-data
spec:
storageClassName: es-data
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 5Gi
...