Copy
helm repo add fluent https://fluent.github.io/helm-charts
helm repo update
helm install fluent-bit fluent/fluent-bit --namespace logging -f <filename>
# extract the values.yaml before install using helm show values
helm show values fluent/fluent-bit > values.yaml
Check installed version
Copy
helm list -n logging
Replace ‘fluent-bit’ with your actual release name and ‘logging’ with your namespace
Copy
helm upgrade fluent-bit fluent/fluent-bit \
--namespace logging \
-f values-staging-eks-cluster.yaml
Copy
# install specific version
helm upgrade --install fluent-bit fluent/fluent-bit \
--namespace logging \
--version 0.56.0 \
-f values-staging-eks-cluster.yaml
Customisation In FLuentbit :
Production Changes :
Copy
# PERSISTENCE LAYER: This is critical for your 7-day concern
storage.path /var/log/fluent-bit/storage
storage.sync normal
storage.checksum off
# Limits how much memory is used to manage the disk backlog
storage.backpressure_mem_limit 60M
# DB is essential: It remembers the file offset (where it last read)
DB /var/log/fluent-bit/flb_kube.db
DB.locking true
# Use filesystem buffering to survive crashes/outages
storage.type filesystem
Refresh_Interval 10
# Recommended: buffer specifically for metadata lookups
Buffer_Size 32kb
Logstash_DateFormat %Y.%m.%d
Retry_Limit 20
- name: fluentbit-state
hostPath:
path: /var/lib/fluent-bit # Create this on your worker nodes
type: DirectoryOrCreate
- name: fluentbit-state
mountPath: /var/log/fluent-bit # Maps storage.path and DB here
Copy
config:
service: |
[SERVICE]
Daemon Off
Flush {{ .Values.flush }}
Log_Level {{ .Values.logLevel }}
Parsers_File /fluent-bit/etc/parsers.conf
Parsers_File /fluent-bit/etc/conf/custom_parsers.conf
HTTP_Server On
HTTP_Listen 0.0.0.0
HTTP_Port {{ .Values.metricsPort }}
Health_Check On
# PERSISTENCE LAYER: This is critical for your 7-day concern
storage.path /var/log/fluent-bit/storage
storage.sync normal
storage.checksum off
# Limits how much memory is used to manage the disk backlog
storage.backpressure_mem_limit 60M
## https://docs.fluentbit.io/manual/pipeline/inputs
inputs: |
[INPUT]
Name tail
Path /var/log/containers/*.log
multiline.parser docker, cri
Tag kube.*
Mem_Buf_Limit 50MB
Skip_Long_Lines On
# DB is essential: It remembers the file offset (where it last read)
DB /var/log/fluent-bit/flb_kube.db
DB.locking true
# Use filesystem buffering to survive crashes/outages
storage.type filesystem
Refresh_Interval 10
# we are not forwarding node logs to kibana
# [INPUT]
# Name systemd
# Tag host.*
# Systemd_Filter _SYSTEMD_UNIT=kubelet.service
# Read_From_Tail On
## https://docs.fluentbit.io/manual/pipeline/filters
filters: |
[FILTER]
Name kubernetes
Match kube.*
Merge_Log On
Keep_Log Off
K8S-Logging.Parser On
K8S-Logging.Exclude On
# Recommended: buffer specifically for metadata lookups
Buffer_Size 32kb
## https://docs.fluentbit.io/manual/pipeline/outputs
outputs: |
[OUTPUT]
Name es
Match kube.*
Host elasticsearch.internal
Port 9200
HTTP_User elastic
HTTP_Passwd xxxxxxx
# Enable Logstash format compatibility
Logstash_Format On
Logstash_Prefix apps-prod-cluster
# Date format for Weekly Index (Year.WeekNumber)
# Resulting Index Example: staging-eks-cluster-2024.07
Logstash_DateFormat %Y.%m.%d
# ElasticSearch 7.x/8.x compatibility (removes type mapping)
Suppress_Type_Name On
# this is required for python type of application where dots are used in keys
Replace_Dots On
# TLS Configuration (Required for AWS OpenSearch or Elastic Cloud)
tls On
tls.verify Off
# CHANGED: Instead of "False" (infinite), we set a high limit.
# This prevents a single "bad log" from blocking your
# entire pipeline for 7 days.
Retry_Limit 20
# Buffer_Size should be "False" to let the engine
# dynamiclly allocate based on the log size.
Buffer_Size False
# Buffer handling
# Retry_Limit 5 # Stop retrying after 5 attempts so you don't block the queue
# Buffer_Size 256KB # Increase buffer size for large Python stack traces
## https://docs.fluentbit.io/manual/administration/configuring-fluent-bit/classic-mode/upstream-servers
## This configuration is deprecated, please use `extraFiles` instead.
upstream: {}
## https://docs.fluentbit.io/manual/pipeline/parsers
customParsers: |
[PARSER]
Name docker_no_time
Format json
Time_Keep Off
Time_Key time
Time_Format %Y-%m-%dT%H:%M:%S.%L
# This allows adding more files with arbitrary filenames to /fluent-bit/etc/conf by providing key/value pairs.
# The key becomes the filename, the value becomes the file content.
extraFiles: {}
# upstream.conf: |
# [UPSTREAM]
# upstream1
#
# [NODE]
# name node-1
# host 127.0.0.1
# port 43000
# example.conf: |
# [OUTPUT]
# Name example
# Match foo.*
# Host bar
# The config volume is mounted by default, either to the existingConfigMap value, or the default of "fluent-bit.fullname"
volumeMounts:
- name: config
mountPath: /fluent-bit/etc/conf
daemonSetVolumes:
- name: varlog
hostPath:
path: /var/log
- name: varlibdockercontainers
hostPath:
path: /var/lib/docker/containers
- name: etcmachineid
hostPath:
path: /etc/machine-id
type: File
- name: fluentbit-state
hostPath:
path: /var/lib/fluent-bit # Create this on your worker nodes
type: DirectoryOrCreate
daemonSetVolumeMounts:
- name: varlog
mountPath: /var/log
- name: varlibdockercontainers
mountPath: /var/lib/docker/containers
readOnly: true
- name: etcmachineid
mountPath: /etc/machine-id
readOnly: true
- name: fluentbit-state
mountPath: /var/log/fluent-bit # Maps storage.path and DB here
Copy
helm upgrade fluent-bit fluent/fluent-bit --namespace logging -f values.yaml
kubectl logs -l app.kubernetes.io/name=fluent-bit -n logging -f
ec2 server installation
Copy
# Install FluentBit
curl https://raw.githubusercontent.com/fluent/fluent-bit/master/install.sh | sh
https://docs.fluentbit.io/manual/2.1/installation/linux/amazon-linux
