I am implementing the stack below in my gke cluster version 1.30.10-gke.1022000 and I am getting an error in the ingest-data pod, it cannot start due to changes in the stateFulSet.
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: elastic-stack
namespace: argocd
annotations:
argocd.argoproj.io/sync-wave: "16"
spec:
destination:
namespace: elastic-stack
server: 'https://kubernetes.default.svc'
source:
repoURL: 'https://charts.getup.io/getupcloud'
targetRevision: 1.2.1
chart: templater
helm:
values: |
templates:
- |-
apiVersion: elasticsearch.k8s.elastic.co/v1
kind: Elasticsearch
metadata:
name: om30-qa
namespace: elastic-stack
spec:
version: 8.15.3
http:
tls:
selfSignedCertificate:
disabled: true
nodeSets:
- name: master
count: 1
config:
node.roles:
- master
- remote_cluster_client
node.store.allow_mmap: false
podTemplate:
spec:
tolerations:
- key: stack-elk
value: "true"
operator: Equal
effect: NoExecute
nodeSelector:
stack-elk: "true"
initContainers:
- name: sysctl
securityContext:
privileged: true
runAsUser: 0
command: ['sh', '-c', 'sysctl -w vm.max_map_count=262144']
containers:
- name: elasticsearch
env:
- name: ES_JAVA_OPTS
value: -Xms3g -Xmx3g
resources:
requests:
memory: 4Gi
limits:
memory: 4Gi
volumeClaimTemplates:
- metadata:
name: elasticsearch-data
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 100Gi
storageClassName: standard-rwo
- name: ingest-data
count: 1
config:
node.roles:
- data
- ingest
- ml
- transform
- remote_cluster_client
node.store.allow_mmap: false
podTemplate:
spec:
tolerations:
- key: stack-elk
value: "true"
operator: Equal
effect: NoExecute
nodeSelector:
stack-elk: "true"
initContainers:
- name: sysctl
securityContext:
privileged: true
runAsUser: 0
command: ['sh', '-c', 'sysctl -w vm.max_map_count=262144']
containers:
- name: elasticsearch
env:
- name: ES_JAVA_OPTS
value: -Xms3g -Xmx3g
resources:
requests:
memory: 4Gi
limits:
memory: 4Gi
volumeClaimTemplates:
- metadata:
name: elasticsearch-data
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 100Gi
storageClassName: standard-rwo
- |-
apiVersion: kibana.k8s.elastic.co/v1
kind: Kibana
metadata:
name: om30-qa
namespace: elastic-stack
spec:
config:
xpack.fleet.packages:
- name: apm
version: latest
xpack.security:
authc:
http.schemes: [basic]
providers:
anonymous.anonymous1:
order: 0
credentials:
username: "superadmin"
password: "Ksio287Ujswwuw72618js"
server:
publicBaseUrl: https://kibana.qa.teste.com
version: 8.15.3
count: 1
elasticsearchRef:
name: om30-qa
http:
tls:
selfSignedCertificate:
disabled: true
podTemplate:
spec:
containers:
- name: kibana
env:
- name: NODE_OPTIONS
value: "--max-old-space-size=2048"
resources:
limits:
memory: 2Gi
requests:
cpu: 500m
memory: 500Mi
readinessProbe:
failureThreshold: 3
httpGet:
path: /
port: 5601
scheme: HTTP
initialDelaySeconds: 10
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 5
tolerations:
- key: stack-elk
value: "true"
operator: Equal
effect: NoExecute
nodeSelector:
stack-elk: "true"
- |-
apiVersion: beat.k8s.elastic.co/v1beta1
kind: Beat
metadata:
name: om30-qa
namespace: elastic-stack
spec:
type: filebeat
version: 8.15.3
elasticsearchRef:
name: om30-qa
config:
filebeat.autodiscover:
providers:
- type: kubernetes
node: ${NODE_NAME}
hints.enabled: true
hints.default_config:
type: container
paths:
- /var/log/containers/*${data.kubernetes.container.id}.log
processors:
- decode_json_fields:
fields: ["message"]
process_array: false
max_depth: 1
target: ""
overwrite_keys: true
add_error_key: true
daemonSet:
podTemplate:
spec:
serviceAccount: filebeat-om30-qa
automountServiceAccountToken: true
tolerations:
- effect: NoSchedule
operator: Exists
dnsPolicy: ClusterFirstWithHostNet
hostNetwork: true
securityContext:
runAsUser: 0
containers:
- name: filebeat
env:
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
volumeMounts:
- name: varlogcontainers
mountPath: /var/log/containers
- name: varlogpods
mountPath: /var/log/pods
- name: varlibdockercontainers
mountPath: /var/lib/docker/containers
volumes:
- name: varlogcontainers
hostPath:
path: /var/log/containers
- name: varlogpods
hostPath:
path: /var/log/pods
- name: varlibdockercontainers
hostPath:
path: /var/lib/docker/containers
- |-
apiVersion: apm.k8s.elastic.co/v1
kind: ApmServer
metadata:
name: apm-server
namespace: elastic-stack
spec:
config:
auth:
secret_token: nOn5rm4hrh055mmH98xDkthEhhMzThY
anonymous:
enabled: true
queue:
mem:
events: 4096
rum:
enabled: true
http:
service:
metadata:
annotations:
xexternal-dns.alpha.kubernetes.io/hostname: apm.o
xetworking.gke.io/internal-load-balancer-allow-global-access: "true"
xnetworking.gke.io/load-balancer-type: Internal
spec:
type: ClusterIP #LoadBalancer ou NodePort
tls:
certificate: {}
selfSignedCertificate:
disabled: true
count: 1
podTemplate:
spec:
containers:
- name: apm-server
# Work around para adicionar secret_token no arquivo de configuração
command:
- sh
- -c
- |
sed -e s/secret_token/"auth:\n secret_token"/g config/config-secret/..data/apm-server.yml > config/apm-fixed-server.yml
exec apm-server run -e -c config/apm-fixed-server.yml
resources:
requests:
cpu: 500m
memory: 2000Mi
limits:
memory: 2000Mi
version: 8.15.3
elasticsearchRef:
name: om30-qa
kibanaRef:
name: om30-qa
- |-
apiVersion: v1
kind: ServiceAccount
metadata:
name: filebeat-om30-qa
namespace: elastic-stack
- |-
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: elastic-beat-autodiscover-binding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: elastic-beat-autodiscover
subjects:
- kind: ServiceAccount
name: filebeat-om30-qa
namespace: elastic-stack
- |-
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: elastic-beat-autodiscover
rules:
- apiGroups:
- ""
resources:
- nodes
- namespaces
- events
- pods
verbs:
- get
- list
- watch
- apiGroups: ["apps"]
resources:
- replicasets
verbs:
- get
- list
- watch
- apiGroups: ["batch"]
resources:
- jobs
verbs:
- get
- list
- watch
- |-
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: kb
namespace: elastic-stack
annotations:
nginx.ingress.kubernetes.io/auth-url: "https://$host/oauth2/auth"
nginx.ingress.kubernetes.io/auth-signin: "https://$host/oauth2/start?rd=$escaped_request_uri"
external-dns.alpha.kubernetes.io/hostname: kibana.qa.teste.com
nginx.ingress.kubernetes.io/configuration-snippet: |
proxy_pass_request_headers on;
proxy_set_header Authorization "Basic c3VwZXJhZG1pbjpLc2lvMjg3VWpzd3d1dzcyNjE4anM="; # Gerar base64 do user elastic mais password da secret om30-qa-es-elastic-user. Ex: echo -n "superadmin:Ksio287Ujswwuw72618js" | base64
spec:
ingressClassName: nginx
rules:
- host: kibana.qa.teste.com
http:
paths:
- backend:
service:
name: om30-qa-kb-http
port:
number: 5601
path: /
pathType: Prefix
project: default
syncPolicy:
automated:
prune: true
selfHeal: true
syncOptions:
- CreateNamespace=true
- ServerSideApply=true
Error:
Update for 'om30-es-ingest-data' of type 'StatefulSet' in namespace 'elastic-system' failed with error Operation cannot be fulfilled on statefulsets.apps "om30-es-ingest-data": the object has been modified; please apply your changes to the latest version and try again
Changes detected in 'om30-es-ingest-data-es-transport-certs' of type 'SECRET' in namespace 'elastic-system', Updated 'om30-es-ingest-data' of type 'StatefulSet' in namespace 'elastic-system'