1、架构图
软件包官方下载地址:https://www.elastic.co/cn/downloads/ 2、部署ElasticSearch
cd /opt/src
tar xf elasticsearch-7.10.2-linux-x86_64.tar.gz -C /opt/
ln -s elasticsearch-7.10.2 /opt/elasticsearch
cd /opt/elasticsearch
mkdir -p /data/elasticsearch/{data,logs}
配置elasticsearch.yml
[root@hdss-52 opt]# egrep -v "^#|^$" elasticsearch/config/elasticsearch.yml
cluster.name: es.kcwl.com
node.name: hdss-52.host.com
path.data: /data/elasticsearch/data
path.logs: /data/elasticsearch/logs
bootstrap.memory_lock: true
network.host: 172.16.90.52
http.port: 9200
discovery.seed_hosts: ["127.0.0.1"]
cluster.initial_master_nodes: ["hdss-52.host.com"]
http.cors.enabled: true
http.cors.allow-origin: "*"
http.cors.allow-headers: Authorization
xpack.security.enabled: true
xpack.security.transport.ssl.enabled: true
设置jvm参数
elasticsearch]# vi config/jvm.options
# 根据环境设置,-Xms和-Xmx设置为相同的值,推荐设置为机器内存的一半左右
-Xms512m
-Xmx512m
创建普通用户
useradd -s /bin/bash -M es
chown -R es.es /opt/elasticsearch-7.10.2
chown -R es.es /data/elasticsearch/
调整文件描述符
vim /etc/security/limits.d/es.conf
es hard nofile 65536
es soft fsize unlimited
es hard memlock unlimited
es soft memlock unlimited
调整内核参数
sysctl -w vm.max_map_count=262144
echo "vm.max_map_count=262144" > /etc/sysctl.conf
sysctl -p
启动es服务
su -c "/opt/elasticsearch/bin/elasticsearch -d" es
netstat -luntp|grep 9200
tcp6 0 0 172.16.90.52:9200 :::* LISTEN 15501/java
调整ES日志模板
curl -XPUT http://192.168.0.107:9200/_template/k8s -H 'content-Type:application/json' -d '{
"template" : "k8s*",
"index_patterns": ["k8s*"],
"settings": {
"number_of_shards": 5,
"number_of_replicas": 0 # 生产为3份副本集,本es为单节点,不能配置副本集
}
}'
2、部署kafka 部署kafka先要部署zookeeper服务 下载地址:https://archive.apache.org/dist/zookeeper/
tar zxvf /usr/local/src/zookeeper-3.4.14.tar.gz -C /opt/
ln -s /opt/zookeeper-3.4.14/ /opt/zookeeper
mkdir -pv /data/zookeeper/data /data/zookeeper/logs
配置文件修改:
cat /opt/zookeeper/conf/zoo.cfg
tickTime=2000
initLimit=10
syncLimit=5
dataDir=/data/zookeeper/data
dataLogDir=/data/zookeeper/logs
clientPort=2181
启动服务
/opt/zookeeper/bin/zkServer.sh start
netstat -lntp| grep 2181
tcp6 0 0 :::2181 :::* LISTEN 108043/java
部署kafka下载安装包
cd /opt/src
wget https://archive.apache.org/dist/kafka/2.2.0/kafka_2.12-2.2.0.tgz
tar xf kafka_2.12-2.2.0.tgz -C /opt/
ln -s /opt/kafka_2.12-2.2.0/ /opt/kafka
cd /opt/kafka
mkdir /data/kafka/logs -p
修改配置
[root@hdss-52 opt]# egrep -v "^#|^$" kafka/config/server.properties
broker.id=0
num.network.threads=3
num.io.threads=8
socket.send.buffer.bytes=102400
socket.receive.buffer.bytes=102400
socket.request.max.bytes=104857600
log.dirs=/data/kafka/logs
num.partitions=1
num.recovery.threads.per.data.dir=1
offsets.topic.replication.factor=1
transaction.state.log.replication.factor=1
transaction.state.log.min.isr=1
log.flush.interval.messages=10000
log.flush.interval.ms=1000
log.retention.hours=168
log.segment.bytes=1073741824
log.retention.check.interval.ms=300000
zookeeper.connect=localhost:2181
zookeeper.connection.timeout.ms=6000
group.initial.rebalance.delay.ms=0
delete.topic.enable=true
host.name=hdss-52.host.com
启动kafka
bin/kafka-server-start.sh -daemon config/server.properties
netstat -luntp|grep 9092
tcp6 0 0 172.16.90.52:9092 :::* LISTEN 2026/java
4、部署logstash(这里选择docker部署的) 下载官方镜像然后push到自己的出库中。
docker pull docker.elastic.co/logstash/logstash:7.10.2
docker tag d0a2dac51fcb harbor.china95059.com.cn/infra/logstash:v7.10.2
docker push harbor.china95059.com.cn/infra/logstash:v7.10.2
准备目录
mkdir /etc/logstash/
创建项目A配置文件
[root@hdss-6 logstash]# cat logstash-sx.conf
input {
kafka {
bootstrap_servers => "172.16.90.52:9092"
topics => ["shengxian"]
group_id => "shengxian"
codec => "json"
}
}
filter {
json {
source => "message"
}
}
output{
if [filetype] == "app" {
elasticsearch {
hosts => ["http://172.16.90.52:9200"]
user => "elastic"
password => "lvcheng@2015"
index => "sx_app-%{+YYYY.MM}"
}
}
else if [filetype] == "web" {
elasticsearch {
hosts => ["http://172.16.90.52:9200"]
user => "elastic"
password => "lvcheng@2015"
index => "sx_web-%{+YYYY.MM}"
}
}
启动logstash
docker run -it -d --restart=always --name logstash-sx-prod -v /etc/logstash:/etc/logstash harbor.china95059.com.cn/infra/logstash:v7.10.2 -f /etc/logstash/logstash-sx.conf
如果有两个项目再启动一个容器消费日志就可以了
5、部署kibana
tar zxvf kibana-7.10.2-linux-x86_64.tar.gz
mv kibana-7.10.2-linux-x86_64 kibana-7.10.2
mv kibana-7.10.2 ../
ln -s /opt/kibana-7.10.2/ /opt/kibana
配置文件
egrep -v "^#|^$" /opt/kibana/config/kibana.yml
server.port: 5601
server.host: "172.16.90.6"
elasticsearch.hosts: ["http://172.16.90.52:9200"]
elasticsearch.username: "kibana"
elasticsearch.password: "123456"
i18n.locale: "zh-CN"
启动 kibana
nohup ./bin/kibana --allow-root &
6、部署filebeat hosts添加解析
cat /etc/hosts
122.224.207.90 hdss-52.host.com
解压安装包
tar zxvf filebeat-7.5.1-linux-x86_64.tar.gz
mv filebeat-7.5.1-linux-x86_64 filebeat-7.5.1
ln -s /opt/filebeat-7.5.1 /opt/filebeat
配置文件编写
cat /opt/filebeat/filebeat.yml
filebeat.inputs:
- type: log
fields_under_root: true
fields:
filetype: web
paths:
- /home/project/shangye/log/shangye-web*.log
scan_frequency: 120s
max_bytes: 10485760
multiline.pattern: ^\d{2}
multiline.negate: true
multiline.match: after
multiline.max_lines: 100
output.kafka:
hosts: ["192.16.90.52:9092"]
topic: shengxian
version: 2.0.0
required_acks: 0
max_message_bytes: 10485760
启动服务
./filebeat -e -c /opt/filebeat/filebeat.yml
7、创建ES用户密码
[root@node01 elasticsearch-7.7.0]# bin/elasticsearch-setup-passwords interactive
future versions of Elasticsearch will require Java 11; your Java version from [/opt/app/jdk1.8.0_181/jre] does not meet this requirement
Initiating the setup of passwords for reserved users elastic,apm_system,kibana,logstash_system,beats_system,remote_monitoring_user.
You will be prompted to enter passwords as the process progresses.
Please confirm that you would like to continue [y/N]y
Enter password for [elastic]:
Reenter password for [elastic]:
Enter password for [apm_system]:
Reenter password for [apm_system]:
Enter password for [kibana]:
Reenter password for [kibana]:
Enter password for [logstash_system]:
Reenter password for [logstash_system]:
Enter password for [beats_system]:
Reenter password for [beats_system]:
Enter password for [remote_monitoring_user]:
Reenter password for [remote_monitoring_user]:
Changed password for user [apm_system]
Changed password for user [kibana]
Changed password for user [logstash_system]
Changed password for user [beats_system]
Changed password for user [remote_monitoring_user]
Changed password for user [elastic]
到这里已经部署完成
现在添加域名解析:
[root@hdss-6 conf.d]# cat kibana.com.cn.conf
server {
listen 80;
server_name kibana.com.cn;
client_max_body_size 1000m;
location / {
proxy_pass http://172.16.90.6:5601;
}
}
浏览器访问:http://kibana.com.cn
用户名:elastic 密码:刚刚自己设置的