Last active
October 27, 2018 21:56
-
-
Save AjeetK/766697835a93a286ef42e6cb8e6634a1 to your computer and use it in GitHub Desktop.
Fluentd Configmap For Kubernetes
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
kind: ConfigMap | |
apiVersion: v1 | |
metadata: | |
name: fluentd-es-config-v0.1.5 | |
namespace: default | |
labels: | |
addonmanager.kubernetes.io/mode: Reconcile | |
data: | |
system.conf: |- | |
<system> | |
root_dir /tmp/fluentd-buffers/ | |
</system> | |
containers.input.conf: |- | |
<source> | |
@id fluentd-containers.log | |
@type tail | |
path /var/log/containers/*.log | |
pos_file /var/log/es-containers.log.pos | |
tag raw.kubernetes.* | |
read_from_head true | |
<parse> | |
@type multi_format | |
<pattern> | |
format json | |
time_key time | |
time_format %Y-%m-%dT%H:%M:%S.%NZ | |
</pattern> | |
<pattern> | |
format /^(?<time>.+) (?<stream>stdout|stderr) [^ ]* (?<log>.*)$/ | |
time_format %Y-%m-%dT%H:%M:%S.%N%:z | |
</pattern> | |
</parse> | |
</source> | |
# Detect exceptions in the log output and forward them as one log entry. | |
<match raw.kubernetes.**> | |
@id raw.kubernetes | |
@type detect_exceptions | |
remove_tag_prefix raw | |
message log | |
stream stream | |
multiline_flush_interval 5 | |
max_bytes 500000 | |
max_lines 1000 | |
</match> | |
system.input.conf: |- | |
# Example: | |
# 2015-12-21 23:17:22,066 [salt.state ][INFO ] Completed state [net.ipv4.ip_forward] at time 23:17:22.066081 | |
<source> | |
@id minion | |
@type tail | |
format /^(?<time>[^ ]* [^ ,]*)[^\[]*\[[^\]]*\]\[(?<severity>[^ \]]*) *\] (?<message>.*)$/ | |
time_format %Y-%m-%d %H:%M:%S | |
path /var/log/salt/minion | |
pos_file /var/log/salt.pos | |
tag salt | |
</source> | |
# Example: | |
# Dec 21 23:17:22 gke-foo-1-1-4b5cbd14-node-4eoj startupscript: Finished running startup script /var/run/google.startup.script | |
<source> | |
@id startupscript.log | |
@type tail | |
format syslog | |
path /var/log/startupscript.log | |
pos_file /var/log/es-startupscript.log.pos | |
tag startupscript | |
</source> | |
# Examples: | |
# time="2016-02-04T06:51:03.053580605Z" level=info msg="GET /containers/json" | |
# time="2016-02-04T07:53:57.505612354Z" level=error msg="HTTP Error" err="No such image: -f" statusCode=404 | |
# TODO(random-liu): Remove this after cri container runtime rolls out. | |
<source> | |
@id docker.log | |
@type tail | |
format /^time="(?<time>[^)]*)" level=(?<severity>[^ ]*) msg="(?<message>[^"]*)"( err="(?<error>[^"]*)")?( statusCode=($<status_code>\d+))?/ | |
path /var/log/docker.log | |
pos_file /var/log/es-docker.log.pos | |
tag docker | |
</source> | |
# Example: | |
# 2016/02/04 06:52:38 filePurge: successfully removed file /var/etcd/data/member/wal/00000000000006d0-00000000010a23d1.wal | |
<source> | |
@id etcd.log | |
@type tail | |
# Not parsing this, because it doesn't have anything particularly useful to | |
# parse out of it (like severities). | |
format none | |
path /var/log/etcd.log | |
pos_file /var/log/es-etcd.log.pos | |
tag etcd | |
</source> | |
# Multi-line parsing is required for all the kube logs because very large log | |
# statements, such as those that include entire object bodies, get split into | |
# multiple lines by glog. | |
# Example: | |
# I0204 07:32:30.020537 3368 server.go:1048] POST /stats/container/: (13.972191ms) 200 [[Go-http-client/1.1] 10.244.1.3:40537] | |
<source> | |
@id kubelet.log | |
@type tail | |
format multiline | |
multiline_flush_interval 5s | |
format_firstline /^\w\d{4}/ | |
format1 /^(?<severity>\w)(?<time>\d{4} [^\s]*)\s+(?<pid>\d+)\s+(?<source>[^ \]]+)\] (?<message>.*)/ | |
time_format %m%d %H:%M:%S.%N | |
path /var/log/kubelet.log | |
pos_file /var/log/es-kubelet.log.pos | |
tag kubelet | |
</source> | |
# Example: | |
# I1118 21:26:53.975789 6 proxier.go:1096] Port "nodePort for kube-system/default-http-backend:http" (:31429/tcp) was open before and is still needed | |
<source> | |
@id kube-proxy.log | |
@type tail | |
format multiline | |
multiline_flush_interval 5s | |
format_firstline /^\w\d{4}/ | |
format1 /^(?<severity>\w)(?<time>\d{4} [^\s]*)\s+(?<pid>\d+)\s+(?<source>[^ \]]+)\] (?<message>.*)/ | |
time_format %m%d %H:%M:%S.%N | |
path /var/log/kube-proxy.log | |
pos_file /var/log/es-kube-proxy.log.pos | |
tag kube-proxy | |
</source> | |
# Example: | |
# I0204 07:00:19.604280 5 handlers.go:131] GET /api/v1/nodes: (1.624207ms) 200 [[kube-controller-manager/v1.1.3 (linux/amd64) kubernetes/6a81b50] 127.0.0.1:38266] | |
<source> | |
@id kube-apiserver.log | |
@type tail | |
format multiline | |
multiline_flush_interval 5s | |
format_firstline /^\w\d{4}/ | |
format1 /^(?<severity>\w)(?<time>\d{4} [^\s]*)\s+(?<pid>\d+)\s+(?<source>[^ \]]+)\] (?<message>.*)/ | |
time_format %m%d %H:%M:%S.%N | |
path /var/log/kube-apiserver.log | |
pos_file /var/log/es-kube-apiserver.log.pos | |
tag kube-apiserver | |
</source> | |
# Example: | |
# I0204 06:55:31.872680 5 servicecontroller.go:277] LB already exists and doesn't need update for service kube-system/kube-ui | |
<source> | |
@id kube-controller-manager.log | |
@type tail | |
format multiline | |
multiline_flush_interval 5s | |
format_firstline /^\w\d{4}/ | |
format1 /^(?<severity>\w)(?<time>\d{4} [^\s]*)\s+(?<pid>\d+)\s+(?<source>[^ \]]+)\] (?<message>.*)/ | |
time_format %m%d %H:%M:%S.%N | |
path /var/log/kube-controller-manager.log | |
pos_file /var/log/es-kube-controller-manager.log.pos | |
tag kube-controller-manager | |
</source> | |
# Example: | |
# W0204 06:49:18.239674 7 reflector.go:245] pkg/scheduler/factory/factory.go:193: watch of *api.Service ended with: 401: The event in requested index is outdated and cleared (the requested history has been cleared [2578313/2577886]) [2579312] | |
<source> | |
@id kube-scheduler.log | |
@type tail | |
format multiline | |
multiline_flush_interval 5s | |
format_firstline /^\w\d{4}/ | |
format1 /^(?<severity>\w)(?<time>\d{4} [^\s]*)\s+(?<pid>\d+)\s+(?<source>[^ \]]+)\] (?<message>.*)/ | |
time_format %m%d %H:%M:%S.%N | |
path /var/log/kube-scheduler.log | |
pos_file /var/log/es-kube-scheduler.log.pos | |
tag kube-scheduler | |
</source> | |
# Example: | |
# I0603 15:31:05.793605 6 cluster_manager.go:230] Reading config from path /etc/gce.conf | |
<source> | |
@id glbc.log | |
@type tail | |
format multiline | |
multiline_flush_interval 5s | |
format_firstline /^\w\d{4}/ | |
format1 /^(?<severity>\w)(?<time>\d{4} [^\s]*)\s+(?<pid>\d+)\s+(?<source>[^ \]]+)\] (?<message>.*)/ | |
time_format %m%d %H:%M:%S.%N | |
path /var/log/glbc.log | |
pos_file /var/log/es-glbc.log.pos | |
tag glbc | |
</source> | |
# Example: | |
# I0603 15:31:05.793605 6 cluster_manager.go:230] Reading config from path /etc/gce.conf | |
<source> | |
@id cluster-autoscaler.log | |
@type tail | |
format multiline | |
multiline_flush_interval 5s | |
format_firstline /^\w\d{4}/ | |
format1 /^(?<severity>\w)(?<time>\d{4} [^\s]*)\s+(?<pid>\d+)\s+(?<source>[^ \]]+)\] (?<message>.*)/ | |
time_format %m%d %H:%M:%S.%N | |
path /var/log/cluster-autoscaler.log | |
pos_file /var/log/es-cluster-autoscaler.log.pos | |
tag cluster-autoscaler | |
</source> | |
# Logs from systemd-journal for interesting services. | |
# TODO(random-liu): Remove this after cri container runtime rolls out. | |
<source> | |
@id journald-docker | |
@type systemd | |
matches [{ "_SYSTEMD_UNIT": "docker.service" }] | |
<storage> | |
@type local | |
persistent true | |
path /var/log/journald-docker.pos | |
</storage> | |
read_from_head true | |
tag docker | |
</source> | |
<source> | |
@id journald-container-runtime | |
@type systemd | |
matches [{ "_SYSTEMD_UNIT": "{{ container_runtime }}.service" }] | |
<storage> | |
@type local | |
persistent true | |
path /var/log/journald-container-runtime.pos | |
</storage> | |
read_from_head true | |
tag container-runtime | |
</source> | |
<source> | |
@id journald-kubelet | |
@type systemd | |
matches [{ "_SYSTEMD_UNIT": "kubelet.service" }] | |
<storage> | |
@type local | |
persistent true | |
path /var/log/journald-kubelet.pos | |
</storage> | |
read_from_head true | |
tag kubelet | |
</source> | |
<source> | |
@id journald-node-problem-detector | |
@type systemd | |
matches [{ "_SYSTEMD_UNIT": "node-problem-detector.service" }] | |
<storage> | |
@type local | |
persistent true | |
path /var/log/journald-node-problem-detector.pos | |
</storage> | |
read_from_head true | |
tag node-problem-detector | |
</source> | |
<source> | |
@id kernel | |
@type systemd | |
matches [{ "_TRANSPORT": "kernel" }] | |
<storage> | |
@type local | |
persistent true | |
path /var/log/kernel.pos | |
</storage> | |
<entry> | |
fields_strip_underscores true | |
fields_lowercase true | |
</entry> | |
read_from_head true | |
tag kernel | |
</source> | |
forward.input.conf: |- | |
# Takes the messages sent over TCP | |
<source> | |
@type forward | |
</source> | |
monitoring.conf: |- | |
# Prometheus Exporter Plugin | |
# input plugin that exports metrics | |
<source> | |
@type prometheus | |
</source> | |
<source> | |
@type monitor_agent | |
</source> | |
# input plugin that collects metrics from MonitorAgent | |
<source> | |
@type prometheus_monitor | |
<labels> | |
host ${hostname} | |
</labels> | |
</source> | |
# input plugin that collects metrics for output plugin | |
<source> | |
@type prometheus_output_monitor | |
<labels> | |
host ${hostname} | |
</labels> | |
</source> | |
# input plugin that collects metrics for in_tail plugin | |
<source> | |
@type prometheus_tail_monitor | |
<labels> | |
host ${hostname} | |
</labels> | |
</source> | |
output.conf: |- | |
# Enriches records with Kubernetes metadata | |
<filter kubernetes.**> | |
@type kubernetes_metadata | |
</filter> | |
<filter kubernetes.var.log.containers.myapp**.log> | |
@type parser | |
@id myapp_filter | |
key_name log | |
reserve_data true | |
remove_key_name_field true | |
#<parse> | |
# @type regexp | |
# expression /^(?<level>[^ ]*)[ \t]+\[(?<time>[^\]]*)\] \[(?<thread>[^\]]*)\] \[(?<request>[^\]]*)\] (?<class>[^ ]*): (?<message>.*)$/ | |
# time_format %Y-%m-%d %H:%M:%S,%L %z | |
#</parse> | |
<parse> | |
@type multiline | |
format_firstline /^[A-Z]/ | |
format1 /^(?<level>[^ ]*)[ \t]+\[(?<time>[^\]]*)\] \[(?<thread>[^\]]*)\] \[(?<request>[^\]]*)\] (?<class>[^ ]*): (?<message>.*)$/ | |
time_format %Y-%m-%d %H:%M:%S,%L %z | |
</parse> | |
</filter> | |
<match kubelet> | |
@type elasticsearch_dynamic | |
@id kubelet_out_es | |
log_level info | |
include_tag_key true | |
host "#{ENV['FLUENT_ELASTICSEARCH_HOST']}" | |
port "#{ENV['FLUENT_ELASTICSEARCH_PORT']}" | |
scheme "#{ENV['FLUENT_ELASTICSEARCH_SCHEME'] || 'http'}" | |
ssl_verify "#{ENV['FLUENT_ELASTICSEARCH_SSL_VERIFY'] || 'true'}" | |
reload_connections "#{ENV['FLUENT_ELASTICSEARCH_RELOAD_CONNECTIONS'] || 'true'}" | |
index_name fluentd-${tag_parts[0]+ "-" + Time.at(time).getlocal("+05:30").strftime(@logstash_dateformat)} | |
include_timestamp true | |
#logstash_prefix "#{ENV['FLUENT_ELASTICSEARCH_LOGSTASH_PREFIX'] || 'logstash'}" | |
#logstash_format true | |
<buffer> | |
flush_thread_count 8 | |
flush_interval 5s | |
chunk_limit_size 4M | |
queue_limit_length 32 | |
retry_max_interval 30 | |
retry_forever true | |
</buffer> | |
</match> | |
<match kube-apiserver> | |
@type elasticsearch_dynamic | |
@id apiserver_out_es | |
log_level info | |
include_tag_key true | |
host "#{ENV['FLUENT_ELASTICSEARCH_HOST']}" | |
port "#{ENV['FLUENT_ELASTICSEARCH_PORT']}" | |
scheme "#{ENV['FLUENT_ELASTICSEARCH_SCHEME'] || 'http'}" | |
ssl_verify "#{ENV['FLUENT_ELASTICSEARCH_SSL_VERIFY'] || 'true'}" | |
reload_connections "#{ENV['FLUENT_ELASTICSEARCH_RELOAD_CONNECTIONS'] || 'true'}" | |
index_name fluentd-${tag_parts[0]+ "-" + Time.at(time).getlocal("+05:30").strftime(@logstash_dateformat)} | |
include_timestamp true | |
#logstash_prefix "#{ENV['FLUENT_ELASTICSEARCH_LOGSTASH_PREFIX'] || 'logstash'}" | |
#logstash_format true | |
<buffer> | |
flush_thread_count 8 | |
flush_interval 5s | |
chunk_limit_size 4M | |
queue_limit_length 32 | |
retry_max_interval 30 | |
retry_forever true | |
</buffer> | |
</match> | |
<match **> | |
@type copy | |
<store> | |
@type elasticsearch_dynamic | |
@id out_es | |
#log_level info | |
include_tag_key true | |
host "#{ENV['FLUENT_ELASTICSEARCH_HOST']}" | |
port "#{ENV['FLUENT_ELASTICSEARCH_PORT']}" | |
scheme "#{ENV['FLUENT_ELASTICSEARCH_SCHEME'] || 'http'}" | |
ssl_verify "#{ENV['FLUENT_ELASTICSEARCH_SSL_VERIFY'] || 'true'}" | |
reload_connections "#{ENV['FLUENT_ELASTICSEARCH_RELOAD_CONNECTIONS'] || 'true'}" | |
index_name fluentd-${record['kubernetes']['container_name']}-${Time.at(time).getlocal("+05:30").strftime(@logstash_dateformat)} | |
include_timestamp true | |
#logstash_prefix "#{ENV['FLUENT_ELASTICSEARCH_LOGSTASH_PREFIX'] || 'logstash'}" | |
#logstash_format true | |
<buffer> | |
flush_thread_count 8 | |
flush_interval 5s | |
chunk_limit_size 2M | |
queue_limit_length 32 | |
retry_max_interval 30 | |
retry_forever true | |
</buffer> | |
</store> | |
# <store> | |
# @type gcs | |
# @id out_gcs | |
# project mys-gcp-project-id | |
# keyfile /etc/fluent/gcp-creds.json | |
# bucket myapp-logs | |
# object_key_format %{path}%{time_slice}_%{index}.%{file_extension} | |
# path logs/${tag}/%Y/%m/%d/ | |
# # if you want to use ${tag} or %Y/%m/%d/ like syntax in path / object_key_format, | |
# # need to specify tag for ${tag} and time for %Y/%m/%d in <buffer> argument. | |
# <buffer tag,time> | |
# @type file | |
# path /var/log/fluent/gcs | |
# timekey 1h # 1 hour partition | |
# timekey_wait 10m | |
# timekey_use_utc true # use utc | |
# </buffer> | |
# <format> | |
# @type json | |
# </format> | |
# </store> | |
</match> |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment