日志,对于任何系统来说都是及其重要的组成部分,在计算机系统中比较复杂,日志有不同的来源,如操作系统,应用服务,业务逻辑等,它们都在不停产生各种各样的日志。 K8S系统里的业务应用是高度 “动态化”的,随着容器编排的进行,业务容器在不断的被创建、被销毁、被迁移、被扩缩容…
需要建立一套集中式的方法,把不同来源的数据集中整合到一个地方
开源解决方案—ELK Stack:

https://github.com/elastic/elasticsearch
https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-6.8.14.rpm
k8s-master.boysec.cn上安装:
[root@k8s-master ~]# cd /tools/
[root@k8s-master tools]# wget https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-6.8.14.rpm
[root@k8s-master tools]# rpm -ivh elasticsearch-6.8.14.rpm
[root@k8s-master tools]# systemctl daemon-reload
[root@k8s-master tools]# systemctl enable elasticsearch.service# vim /etc/elasticsearch/elasticsearch.yml
cluster.name: es.od.com
node.name: k8s-master.boysec.cn
path.data: /var/lib/elasticsearch
path.logs: /var/log/elasticsearch
bootstrap.memory_lock: true
network.host: 10.1.1.120
http.port: 9200# vim /etc/elasticsearch/jvm.options
-Xms512m
-Xmx512mcat > /etc/security/limits.d/es.conf <<EOF
elasticsearch hard nofile 65536
elasticsearch soft fsize unlimited
elasticsearch hard memlock unlimited
elasticsearch soft memlock unlimited
EOFsystemctl start elasticsearch.service
netstat -lnpt|grep 9200
tcp6 0 0 10.1.1.120:9200 :::* LISTEN 114845/java
# 常见报错
cat /var/log/elasticsearch/k8s-logs.log
---
[1] bootstrap checks failed
[1]: memory locking requested for elasticsearch process but memory is not locked
----
# 解决办法
# 在/usr/lib/systemd/system/elasticsearch.service"添加
# [Service]字段下添加 LimitMEMLOCK=infinitycurl -H "Content-Type:application/json" -XPUT http://10.1.1.120:9200/_template/k8s -d '{
"template" : "k8s*",
"index_patterns": ["k8s*"],
"settings": {
"number_of_shards": 5,
"number_of_replicas": 0
}
}'Kafka是最初由Linkedin公司开发,是一个分布式、分区的、多副本的、多订阅者,基于zookeeper协调的分布式日志系统(也可以当做MQ系统),常见可以用于web/nginx日志、访问日志,消息服务等等,Linkedin于2010年贡献给了Apache基金会并成为顶级开源项目。
主要应用场景是:日志收集系统和消息系统。
官网地址 Github地址
k8s-slave.boysec.cn上:
[root@k8s-slave ~]# cd /tools/
[root@k8s-slave tools]# wget https://archive.apache.org/dist/kafka/2.2.0/kafka_2.12-2.2.0.tgz
[root@k8s-slave tools]# tar xf kafka_2.12-2.2.0.tgz -C /opt/
[root@k8s-slave tools]# ln -s /opt/kafka_2.12-2.2.0/ /opt/kafka[root@k8s-slave kafka]# vim /opt/kafka/config/server.properties
log.dirs=/data/kafka/logs
zookeeper.connect=localhost:2181
log.flush.interval.messages=10000
log.flush.interval.ms=1000
delete.topic.enable=true
host.name=k8s-slave.boysec.cn
[root@k8s-slave kafka]# mkdir -p /data/kafka/logs[root@k8s-slave kafka]# /opt/kafka/bin/kafka-server-start.sh -daemon /opt/kafka/config/server.properties
[root@k8s-slave kafka]# netstat -lntup |grep 9092
tcp6 0 0 10.1.1.130:9092 :::* LISTEN 103655/java https://github.com/yahoo/kafka-manager https://github.com/yahoo/kafka-manager/archive/2.0.0.2.tar.gz
k8s-dns.boysec.cn上:
mkdir /data/dockerfile/kafka-manager
cd /data/dockerfile/kafka-managerFROM hseeberger/scala-sbt
ENV ZK_HOSTS=10.1.1.130:2181 \
KM_VERSION=2.0.0.2
RUN mkdir -p /tmp && \
cd /tmp && \
wget https://github.com/yahoo/kafka-manager/archive/${KM_VERSION}.tar.gz && \
tar xf ${KM_VERSION}.tar.gz && \
cd /tmp/CMAK-${KM_VERSION} && \
sbt clean dist && \
unzip -d / ./target/universal/kafka-manager-${KM_VERSION}.zip && \
rm -fr /tmp/${KM_VERSION} /tmp/kafka-manager-${KM_VERSION}
WORKDIR /kafka-manager-${KM_VERSION}
EXPOSE 9000
ENTRYPOINT ["./bin/kafka-manager","-Dconfig.file=conf/application.conf"]docker build . -t harbor.od.com/infra/kafka-manager:v2.0.0.2注意:直接下载镜像(本人已做出3.0.0.5版本kafaka-manager,以支持kafak2.4。使用请自己测试!)
docker pull wangxiansen/kafka-manager:v2.0.0.2 #wangxiansen/kafka-manager:v3.0.0.5
docker tag wangxiansen/kafka-manager:v2.0.0.2 harbor.od.com/infra/kafka-manager:v2.0.0.2
docker push harbor.od.com/infra/kafka-manager:v2.0.0.2[root@k8s-dns ~]# mkdir /var/k8s-yaml/kafka-manager
[root@k8s-dns ~]# cd /var/k8s-yaml/kafka-managervim /var/k8s-yaml/kafka-manager/deployment.yaml
kind: Deployment
apiVersion: apps/v1
metadata:
name: kafka-manager
namespace: infra
labels:
name: kafka-manager
spec:
replicas: 1
selector:
matchLabels:
app: kafka-manager
strategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 1
maxSurge: 1
revisionHistoryLimit: 7
progressDeadlineSeconds: 600
template:
metadata:
labels:
app: kafka-manager
spec:
containers:
- name: kafka-manager
image: harbor.od.com/infra/kafka-manager:v2.0.0.2
imagePullPolicy: IfNotPresent
ports:
- containerPort: 9000
protocol: TCP
env:
- name: ZK_HOSTS
value: zk4.od.com:2181
- name: APPLICATION_SECRET
value: letmein
imagePullSecrets:
- name: harbor
terminationGracePeriodSeconds: 30
securityContext:
runAsUser: 0vim /var/k8s-yaml/kafka-manager/svc.yaml
kind: Service
apiVersion: v1
metadata:
name: kafka-manager
namespace: infra
spec:
ports:
- protocol: TCP
port: 9000
targetPort: 9000
selector:
app: kafka-managervim /var/k8s-yaml/kafka-manager/ingress.yaml
kind: Ingress
apiVersion: extensions/v1beta1
metadata:
name: kafka-manager
namespace: infra
spec:
rules:
- host: km.od.com
http:
paths:
- path: /
backend:
serviceName: kafka-manager
servicePort: 9000[root@k8s-dns ~]# vi /var/named/chroot/etc/od.com.zone
...
km A 10.1.1.50在任意一台k8s运算节点执行:
kubectl apply -f http://k8s-yaml.od.com/kafka-manager/deployment.yaml
kubectl apply -f http://k8s-yaml.od.com/kafka-manager/svc.yaml
kubectl apply -f http://k8s-yaml.od.com/kafka-manager/ingress.yamlhttp://km.od.com/

https://www.elastic.co/downloads/beats/filebeat
[root@k8s-dns ~]# mkdir /data/dockerfile/filebeat
[root@k8s-dns ~]# cd /data/dockerfile/filebeatvim /data/dockerfile/filebeat/Dockerfile
FROM debian:jessie
ENV FILEBEAT_VERSION=7.4.0 \
FILEBEAT_SHA1=c63bb1e16f7f85f71568041c78f11b57de58d497ba733e398fa4b2d071270a86dbab19d5cb35da5d3579f35cb5b5f3c46e6e08cdf840afb7c347777aae5c4e11
RUN set -x && \
apt-get update && \
apt-get install -y wget && \
wget https://artifacts.elastic.co/downloads/beats/filebeat/filebeat-${FILEBEAT_VERSION}-linux-x86_64.tar.gz -O /opt/filebeat.tar.gz && \
cd /opt && \
echo "${FILEBEAT_SHA1} filebeat.tar.gz" | sha512sum -c - && \
tar xzvf filebeat.tar.gz && \
cd filebeat-* && \
cp filebeat /bin && \
cd /opt && \
rm -rf filebeat* && \
apt-get purge -y wget && \
apt-get autoremove -y && \
apt-get clean && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
COPY docker-entrypoint.sh /
ENTRYPOINT ["/docker-entrypoint.sh"]vim /data/dockerfile/filebeat/docker-entrypoint.sh
#!/bin/bash
ENV=${ENV:-"test"}
PROJ_NAME=${PROJ_NAME:-"no-define"}
MULTILINE=${MULTILINE:-"^\d{2}"}
cat > /etc/filebeat.yaml << EOF
filebeat.inputs:
- type: log
fields_under_root: true
fields:
topic: logm-${PROJ_NAME}
paths:
- /logm/*.log
- /logm/*/*.log
- /logm/*/*/*.log
- /logm/*/*/*/*.log
- /logm/*/*/*/*/*.log
scan_frequency: 120s
max_bytes: 10485760
multiline.pattern: '$MULTILINE'
multiline.negate: true
multiline.match: after
multiline.max_lines: 100
- type: log
fields_under_root: true
fields:
topic: logu-${PROJ_NAME}
paths:
- /logu/*.log
- /logu/*/*.log
- /logu/*/*/*.log
- /logu/*/*/*/*.log
- /logu/*/*/*/*/*.log
- /logu/*/*/*/*/*/*.log
output.kafka:
hosts: ["10.1.1.130:9092"]
topic: k8s-fb-$ENV-%{[topic]}
version: 2.0.0
required_acks: 0
max_message_bytes: 10485760
EOF
set -xe
# If user don't provide any command
# Run filebeat
if [[ "$1" == "" ]]; then
exec filebeat -c /etc/filebeat.yaml
else
# Else allow the user to run arbitrarily commands like bash
exec "$@"
fidocker build . -t harbor.od.com/infra/filebeat:v7.4.0
docker push harbor.od.com/infra/filebeat:v7.4.0使用边车模式(sidecar)来启动filebeat资源 vim /var/k8s-yaml/dubbo-demo-consumer/deployment.yaml
kind: Deployment
apiVersion: apps/v1
metadata:
name: dubbo-demo-consumer
namespace: test
labels:
name: dubbo-demo-consumer
spec:
replicas: 1
selector:
matchLabels:
name: dubbo-demo-consumer
template:
metadata:
labels:
app: dubbo-demo-consumer
name: dubbo-demo-consumer
spec:
containers:
- name: dubbo-demo-consumer
image: harbor.od.com/app/dubbo-demo-web:tomcat_20210902_2035
imagePullPolicy: IfNotPresent
ports:
- containerPort: 8080
protocol: TCP
env:
- name: C_OPTS
value: -Denv=dev -Dapollo.meta=http://config-test.od.com
volumeMounts:
- mountPath: /opt/tomcat/logs
name: logm
- name: filebeat
image: harbor.od.com/infra/filebeat:v7.4.0
imagePullPolicy: IfNotPresent
env:
- name: ENV
value: test
- name: PROJ_NAME
value: dubbo-demo-web
volumeMounts:
- mountPath: /logm
name: logm
volumes:
- emptyDir: {}
name: logm
imagePullSecrets:
- name: harbor
restartPolicy: Always
terminationGracePeriodSeconds: 30
securityContext:
runAsUser: 0
schedulerName: default-scheduler
strategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 1
maxSurge: 1
revisionHistoryLimit: 7
progressDeadlineSeconds: 600边车模式优势:他们是sidecar模式运行的,他们共享UTS、USER、NET。也就是共享用户名和进程通信,隔离的是IPC、FS,文件系统不共享
在任意一台k8s运算节点执行:
kubectl apply -f http://k8s-yaml.od.com/filebeat/deployment.yaml
docker ps|grep filebeat[root@k8s-dns ~]# docker pull logstash:6.8.13
[root@k8s-dns ~]# docker tag logstash:6.8.13 harbor.od.com/infra/logstash:v6.8.13
[root@k8s-dns ~]# docker push harbor.od.com/infra/logstash:v6.8.13[root@k8s-dns ~]# mkdir /etc/logstash
vi /etc/logstash/logstash-test.conf
input {
kafka {
bootstrap_servers => "10.1.1.130:9092"
client_id => "10.1.1.250"
consumer_threads => 4
group_id => "k8s_test"
topics_pattern => "k8s-fb-test-.*"
}
}
filter {
json {
source => "message"
}
}
output {
elasticsearch {
hosts => ["10.1.1.120:9200"]
index => "k8s-test-%{+YYYY.MM.DD}"
}
}vi /etc/logstash/logstash-prod.conf
input {
kafka {
bootstrap_servers => "10.1.1.130:9092"
client_id => "10.1.1.254"
consumer_threads => 4
group_id => "k8s_prod"
topics_pattern => "k8s-fb-prod-.*"
}
}
filter {
json {
source => "message"
}
}
output {
elasticsearch {
hosts => ["10.1.1.120:9200"]
index => "k8s-prod-%{+YYYY.MM.DD}"
}
}[root@k8s-dns ~]# docker run -d --name=logstash-test -v /etc/logstash:/etc/logstash harbor.od.com/infra/logstash:v6.8.13 -f /etc/logstash/logstash-test.conf
[root@k8s-dns ~]# curl http://10.1.1.120:9200/_cat/indices?v
health status index uuid pri rep docs.count docs.deleted store.size pri.store.size
green open k8s-test-2021.03.63 j6jQSHRqSgm3s_LfXUVAbg 5 0 3 0 460b 460b[root@k8s-dns ~]# docker pull kibana:6.8.13
[root@k8s-dns ~]# docker tag kibana:6.8.13 harbor.od.com/infra/kibana:v6.8.13
[root@k8s-dns ~]# docker push harbor.od.com/infra/kibana:v6.8.13[root@k8s-dns ~]# mkdir /var/k8s-yaml/kibana
[root@k8s-dns ~]# cd /var/k8s-yaml/kibanavim /var/k8s-yaml/kibana/deployment.yaml
kind: Deployment
apiVersion: apps/v1
metadata:
name: kibana
namespace: infra
labels:
name: kibana
spec:
replicas: 1
selector:
matchLabels:
name: kibana
template:
metadata:
labels:
app: kibana
name: kibana
spec:
containers:
- name: kibana
image: harbor.od.com/infra/kibana:v6.8.13
imagePullPolicy: IfNotPresent
ports:
- containerPort: 5601
protocol: TCP
env:
- name: ELASTICSEARCH_URL
value: http://10.1.1.120:9200
imagePullSecrets:
- name: harbor
securityContext:
runAsUser: 0
strategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 1
maxSurge: 1
revisionHistoryLimit: 7
progressDeadlineSeconds: 600vim /var/k8s-yaml/kibana/svc.yaml
kind: Service
apiVersion: v1
metadata:
name: kibana
namespace: infra
spec:
ports:
- protocol: TCP
port: 5601
targetPort: 5601
selector:
app: kibanavim /var/k8s-yaml/kibana/ingress.yaml
kind: Ingress
apiVersion: extensions/v1beta1
metadata:
name: kibana
namespace: infra
spec:
rules:
- host: kibana.od.com
http:
paths:
- path: /
backend:
serviceName: kibana
servicePort: 5601[root@k8s-dns ~]# vi /var/named/chroot/etc/od.com.zone
...
kibana A 10.1.1.50在任意一台k8s运算节点执行:
kubectl apply -f http://k8s-yaml.od.com/kibana/deployment.yaml
kubectl apply -f http://k8s-yaml.od.com/kibana/svc.yaml
kubectl apply -f http://k8s-yaml.od.com/kibana/ingress.yamlhttp://kibana.od.com








用于区分各种环境

