helm安装filebeat,常用配置,使用logstash过滤日志
来源:原创
时间:2024-09-02
作者:脚本小站
分类:云原生
安装filebeat到kubernetes:
wget https://get.helm.sh/helm-v3.9.4-linux-amd64.tar.gz tar -xf helm-v3.9.4-linux-amd64.tar.gz cp ./linux-amd64/helm /usr/local/bin/helm helm version helm help helm repo add elastic https://helm.elastic.co helm pull elastic/filebeat --version 7.17.3 tar -xf filebeat-7.17.3.tgz helm install filebeat . -n logging --create-namespace helm upgrade filebeat . -n logging --create-namespace helm uninstall filebeat . -n logging
默认配置:
apiVersion: v1
data:
filebeat.yml: |
filebeat.inputs:
- type: container
paths:
- /var/log/containers/*.log
processors:
- add_kubernetes_metadata:
host: ${NODE_NAME}
matchers:
- logs_path:
logs_path: "/var/log/containers/"
output.elasticsearch:
host: '${NODE_NAME}'
hosts: ["192.168.0.113:9200"]
kind: ConfigMap
metadata:
annotations:
meta.helm.sh/release-name: filebeat
meta.helm.sh/release-namespace: logging
labels:
app: filebeat-filebeat
name: filebeat-filebeat-daemonset-config
namespace: logging修改默认索引名称:修改values.yml文件
vim values.yaml
filebeatConfig:
filebeat.yml: |
filebeat.inputs:
- type: container
paths:
- /var/log/containers/ingress-nginx-controller*.log
processors:
- add_kubernetes_metadata:
host: ${NODE_NAME}
matchers:
- logs_path:
logs_path: "/var/log/containers/"
output.elasticsearch:
host: '${NODE_NAME}'
hosts: '172.16.37.126:9200'
index: "eksdev-ingress-nginx-%{+yyyy.MM.dd}"
setup.ilm.enabled: false # 关键
setup.template:
name: "eksdev-ingress-nginx"
pattern: "eksdev-ingress-nginx-*"收集特定目录/名称空间下的日志:
filebeatConfig:
filebeat.yml: |
filebeat.inputs:
- type: container
paths:
- /var/log/containers/ingress-nginx-controller*.log # 通过这种方式可以收集特定名称空间下的日志
processors:
- add_kubernetes_metadata:
host: ${NODE_NAME}
matchers:
- logs_path:
logs_path: "/var/log/containers/"
output.elasticsearch:
host: '${NODE_NAME}'
hosts: '172.16.37.126:9200'删除字段:
filebeatConfig:
filebeat.yml: |
filebeat.inputs:
- type: container
paths:
- /var/log/containers/*.log
processors:
- add_kubernetes_metadata:
host: ${NODE_NAME}
matchers:
- logs_path:
logs_path: "/var/log/containers/"
- drop_fields:
fields: # 要删除的字段,其中agent等某些字段删除不了,可以通过logstash删除
- log
- input
- container
- kubernetes
- kubernetes.container.image
- stream
output.elasticsearch:
host: '${NODE_NAME}'
hosts: '172.16.37.126:9200'
index: "ingress-nginx-%{+yyyy.MM.dd}" # 使用模版
setup.ilm.enabled: false
setup.template:
name: "ingress-nginx" # 定义索引模版,新版本需要定义后才能使用
pattern: "ingress-nginx-*"helm中values.yml的配置:
filebeatConfig:
filebeat.yml: |
filebeat.inputs:
- type: container
paths:
- /var/log/containers/ingress-nginx-controller*.log
processors:
- add_kubernetes_metadata:
host: ${NODE_NAME}
matchers:
- logs_path:
logs_path: "/var/log/containers/"
- drop_fields:
fields:
- log
- input
- container
- kubernetes
- kubernetes.container.image
- stream
output.elasticsearch:
host: '${NODE_NAME}'
hosts: '172.16.37.126:9200'
index: "eksdev-ingress-nginx-%{+yyyy.MM.dd}"
setup.ilm.enabled: false
setup.template:
name: "eksdev-ingress-nginx"
pattern: "eksdev-ingress-nginx-*"filebeat配置参考:
filebeat.inputs:
- type: container
paths:
- /var/log/containers/*.log
processors:
- add_kubernetes_metadata:
default_indexers.enabled: true
default_matchers.enabled: true
host: ${NODE_NAME}
matchers:
- logs_path:
logs_path: "/var/log/containers/"
- drop_event.when.regexp:
or:
kubernetes.pod.name: "filebeat-*"
kubernetes.pod.name: "external-dns.*"
kubernetes.pod.name: "coredns-*"
- drop_fields:
fields:
- log
- input
- container.*
- kubernetes.labels
- kubernetes.node
- kubernetes.pod.id
output.elasticsearch:
host: '${NODE_NAME}'
hosts: ["172.17.68.100:9200"]
indices:
- index: "efk-test-kube-system-%{+yyyy.MM.dd}"
when.contains:
kubernetes.namespace: "kube-system"
- index: "efk-test-test-%{+yyyy.MM.dd}"
when.contains:
kubernetes.namespace: "test"
#关闭索引的生命周期,开启则上面的index配置会被无视
setup.ilm.enabled: false
#设置索引模板的名称
setup.template.nameo: "efk-test"
#设置索引模板的匹配模式
setup.template.pattern: "efk-test-*"
#覆盖已有的索引模板
setup.template.overwrite: false
#设置索引分片与副本数量
setup.template.settings:
index.number_of_shards: 1
index.number_of_replicas: 0filebeat收集节点上面的日志配置:
filebeat.inputs:
- type: filestream
enabled: true
paths:
- /home/probe/code/finstepapi/finstep-probe-*.log
filebeat.config.modules:
path: ${path.config}/modules.d/*.yml
reload.enabled: false
setup.template.settings:
index.number_of_shards: 1
# 自定义输出索引名称,7版本之后必须要先定义模板才能在output哪里之定义index的名称
setup.template.name: "finstepapi-monitor"
setup.template.pattern: "finstepapi-monitor-*"
setup.template.overwrite: true
setup.template.enabled: true
setup.ilm.enabled: false
output.elasticsearch:
hosts: ["10.159.0.55:9200"]
index: "finstepapi-monitor-%{+yyyy.MM.dd}"
## 输出到console调试
#output.console:
# pretty: true
## 加上下面俩行只输出message内容
# codec.format:
# string: '%{[message]}'
# 删除掉不需要的字段
processors:
- drop_fields:
fields: ["log","host","input","agent","ecs"]
ignore_missing: false
# - add_host_metadata:
# when.not.contains.tags: forwarded
# - add_cloud_metadata: ~
# - add_docker_metadata: ~
# - add_kubernetes_metadata: ~创建自定义索引名称注意项:
cnblogs.com/zyxnhr/p/12214706.html
安装es:
mkdir /data/elasticsearch/{logs,data} -pv
chmod 777 -R /data/elasticsearch/logs /data/elasticsearch/data
docker run -d --name elasticsearch \
-p 9200:9200 \
-p 9300:9300 \
-e "discovery.type=single-node" \
-e ES_JAVA_OPTS="-Xms256m -Xmx256m" \
-v /data/elasticsearch/logs:/usr/share/elasticsearch/logs \
-v /data/elasticsearch/data:/usr/share/elasticsearch/data \
elasticsearch:7.17.1
docker run \
-d --name kibana \
-p 5601:5601 \
kibana:7.17.1
mkdir -p /data/kibana/config -pv
docker cp kibana:/usr/share/kibana/config /data/kibana/
vim /data/kibana/config/kibana.yml
server.host: "0"
server.shutdownTimeout: "5s"
elasticsearch.hosts: [ "http://localhost:9100" ] # 记得修改ip
monitoring.ui.container.elasticsearch.enabled: true
i18n.locale: "zh-CN"
docker stop kibana
docker rm kibana
docker run \
-d --name kibana \
-p 5601:5601 \
-v /data/kibana/config:/usr/share/kibana/config \
kibana:7.17.1使用logstash过滤日志:使用下面的配置filebeat配置不用改直接安装即可。
input {
beats {
port => 5044
}
}
output {
stdout {
codec => rubydebug
}
}
#output {
# elasticsearch {
# hosts => ["http://10.159.0.19:9200"]
# index => "%{[@metadata][index]}"
# }
#}
filter {
# mutate {
# add_field => { "new_field" => "Hello, World!" }
# }
if [kubernetes][namespace] == "kube-system" and [kubernetes][deployment][name] == "ingress-nginx-controller" {
mutate {
add_field => { "[@metadata][index]" => "dev-%{[kubernetes][deployment][name]}-%{+YYYY.MM.dd}" }
remove_field => ["[kubernetes][node]", "[kubernetes][namespace_labels]", "[kubernetes][labels]", "[kubernetes][namespace]", "[kubernetes][deployment]","[kubernetes][namespace_uid]", "[kubernetes][pod][uid]", "[kubernetes][pod][ip]", "[kubernetes][replicaset]", "[kubernetes][container]", "container", "agent", "tags", "ecs", "input", "host", "stream", "log", "@version"]
}
json {
source => "message"
}
} else if [kubernetes][namespace] != "kube-system" and [kubernetes][deployment][name] {
mutate {
add_field => { "[@metadata][index]" => "dev-%{[kubernetes][deployment][name]}-%{+YYYY.MM.dd}" }
remove_field => ["[kubernetes][node]", "[kubernetes][namespace_labels]", "[kubernetes][labels]", "[kubernetes][namespace]", "[kubernetes][deployment]","[kubernetes][namespace_uid]", "[kubernetes][pod][uid]", "[kubernetes][pod][ip]", "[kubernetes][replicaset]", "[kubernetes][container]", "container", "agent", "tags", "ecs", "input", "host", "stream", "log", "@version"]
}
# grok {
# match => { "message" => "%{COMBINEDAPACHELOG}" }
# }
} else {
mutate {
add_field => { "[@metadata][index]" => "dev-other-%{+YYYY.MM.dd}" }
remove_field => ["[kubernetes][labels]"]
}
}
}
#output {
# elasticsearch {
# hosts => ["http://10.159.0.19:9200"]
# index => "%{[@metadata][beat]}-%{[@metadata][version]}-%{+YYYY.MM.dd}"
# #user => "elastic"
# #password => "changeme"
# }
#}可以删除不需要的日志,也可作为临时调试删掉干扰的信息:
if [kubernetes][namespace] == "kube-system" {
drop { }
}logstash 处理 ingress-nginx-controller 的日志
logstash-configmap:配置文件中解析了nginx日志和删除了不必要的字段。
apiVersion: v1
kind: ConfigMap
metadata:
name: logstash-config
namespace: logging
data:
logstash.conf: |
input {
beats {
port => 5000
}
}
filter {
grok {
pattern_definitions => {
"NGINX_EXTENDED" => "%{IPORHOST:client_ip} - %{USER:remote_user} \[%{HTTPDATE:timestamp}\] \"%{WORD:method} %{URIPATH:request_path}(?:\\?%{URIPARAM:query})? HTTP/%{NUMBER:http_version}\" %{NUMBER:status} %{NUMBER:body_bytes_sent} \"%{DATA:referer}\" \"%{DATA:user_agent}\" %{NUMBER:request_time} %{NUMBER:upstream_response_time} \[%{DATA:upstream_name}\] \[%{DATA:cache_status}\] %{IPORHOST:upstream_addr}:%{POSINT:upstream_port} %{NUMBER:upstream_bytes} %{NUMBER:upstream_time} %{NUMBER:upstream_status} %{DATA:request_id}"
}
match => { "message" => "%{NGINX_EXTENDED}" }
overwrite => ["message"]
}
date {
match => ["timestamp", "dd/MMM/yyyy:HH:mm:ss Z"]
target => "@timestamp"
}
urldecode {
field => "request_path"
}
urldecode {
field => "query"
}
mutate {
remove_field => ["agent", "host", "ecs"]
}
if "_grokparsefailure" in [tags] {
mutate {
add_tag => ["nginx_parse_failed"]
}
}
}
output {
elasticsearch {
hosts => ["http://172.16.37.126:9200"]
index => "eksprd-nginx-%{+YYYY.MM.dd}"
}
stdout {
codec => rubydebug
}
}logstash-deployment:
apiVersion: apps/v1 kind: Deployment metadata: name: logstash namespace: logging spec: replicas: 1 selector: matchLabels: app: logstash template: metadata: labels: app: logstash spec: containers: - name: logstash image: docker.elastic.co/logstash/logstash:7.15.1 resources: requests: memory: 500Mi cpu: 100m limits: memory: 2Gi cpu: 2 ports: - containerPort: 5000 volumeMounts: - name: config mountPath: /usr/share/logstash/pipeline/ volumes: - name: config configMap: name: logstash-config --- apiVersion: v1 kind: Service metadata: name: logstash namespace: logging spec: ports: - port: 5000 protocol: TCP targetPort: 5000 selector: app: logstash type: ClusterIP
对应的filebeat的配置:
filebeatConfig:
filebeat.yml: |
filebeat.inputs:
- type: container
paths:
- /var/log/containers/ingress-nginx-controller*.log
processors:
- add_kubernetes_metadata:
host: ${NODE_NAME}
matchers:
- logs_path:
logs_path: "/var/log/containers/"
- drop_fields:
fields:
- log
- input
- container
- kubernetes
- kubernetes.container.image
- stream
output.logstash:
hosts: ["logstash:5000"]
enabled: true
loadbalance: true
compression_level: 3
setup.ilm.enabled: false创建索引:

查看日志:

筛选日志:

参考:
cnblogs.com/whtjyt/p/17829241.html
