本文共 13298 字,大约阅读时间需要 44 分钟。
1:在两台代理服务器上部署nginx实现Apache负载均衡,地址分别是192.168.158.50/24,192.168.158.60/24;
网页轮询查看 2:在两台代理服务器上部署keepalived服务,实现双机热备,提供VIP地址 192.168.158.100/24,且验证地址漂移; 3:在两台web服务器(地址:192.168.158.30/24;192.168.158.40/24)上部署Apache服务,且实现日志的收集logstach功能,去收集access访问日志中的数据信息传递给elasticsearch集群; 4:构建两台elasticsearch服务器(地址:192.168.158.10/24;192.168.158.20/24)组建集群,存储logstach传输的日志信息; 5:部署一台kibana服务器(地址:可以使用ES中的任意一台或单独部署)实现日志信息的可视化功能;#准备yum源,yum安装rpm --import https://packages.elastic.co/GPG-KEY-elasticsearchcd /etc/yum.repo.d/vim elasticsearch.repo[elasticsearch-2.x]name=Elasticsearch repository for 2.x packagesbaseurl=http://packages.elastic.co/elasticsearch/2.x/centosgpgcheck=1gpgkey=http://packages.elastic.co/GPG-KEY-elasticsearchenable=1yum install elasticsearch -yyum install java -y (1.8版本) #安装Java,因为有JVM,日志收集最好的开发语言就是java
[root@node1 ~]# cd /etc/elasticsearch/[root@node1 elasticsearch]# lselasticsearch.yml log4j2.properties jvm.options scripts[root@node1 elasticsearch]# cp -p elasticsearch.yml elasticsearch.yml.bak ## 先把原配置文件做一下备份,再修改配置文件[root@node1 elasticsearch]# vim elasticsearch.yml17: cluster.name: elk-cluster ## 集群名称23: node.name: node1 ## 当前节点名33: path.data: /data/elk-data ## 数据存储的位置(目录不存在,需要创建)37: path.logs: /var/log/elasticsearch/ ## 日志文件存放的位置43: bootstrap.memory_lock: false ## true:允许内存中的数据交还给SWAP,flase:不允许内存中的数据交还给SWAP。 选择false,因为swap分区实在硬盘上建立的,如果内存不够,数据溢出,分到硬盘上的话,会影响速度55: network.host: 0.0.0.0 ## 监听地址,0.0.0.0表示所有网段59: http.port: 9200 ## ES端口号,外部通信的端口号 PS:9300是集群内部通信端口68: discovery.zen.ping.unicast.hosts: ["node1", "node2"] ## 群集中包含的节点名
#添加映射[root@node1 ~]# vi /etc/hosts192.168.158.10 node1192.168.158.20 node2[root@node2 ~]# vi /etc/hosts192.168.158.10 node1192.168.158.20 node2
[root@node1 elasticsearch]# mkdir -p /data/elk-data # 创建数据存放目录[root@node1 elasticsearch]# id elasticsearch ## ES的程序用户,安装的时候自动创建的用户uid=990(elasticsearch) gid=985(elasticsearch) 组=985(elasticsearch)[root@node1 elasticsearch]# chown elasticsearch.elasticsearch /data/elk-data/ ## 授权,交给用户 elasticsearch去管理[root@node1 elasticsearch]# systemctl start elasticsearch.service ## 开启服务[root@node1 elasticsearch]# netstat -anpt | grep 9200 # 过滤9200端口(外部访问集群端口)tcp6 0 0 :::9200 :::* LISTEN 15726/java
输入http://192.168.158.10:9200/_cluster/state?pretty
可视化工具,更方便查看集群信息,并管理集群,以node1为例
[root@node1 ~]# /usr/share/elasticsearch/bin/plugin install mobz/elasticsearch-head 安装位置/usr/share/elasticsearch/plugins/head
两台都装
1、安装Apahce服务(httpd)[root@apache ~]# yum -y install httpd[root@apache ~]# systemctl start httpd2、安装Java环境[root@apache ~]# java -version #如果没有装,安装yum -y install java-1.8.0openjdk version "1.8.0_181"OpenJDK Runtime Environment (build 1.8.0_181-b13)OpenJDK 64-Bit Server VM (build 25.181-b13, mixed mode)3、安装logstash上传logstash-5.5.1.rpm到/opt目录下[root@apache ~]# cd /opt[root@apache opt]# rpm -ivh logstash-5.5.1.rpm #安装logstash[root@apache opt]# systemctl start logstash.service #启动logstash[root@apache opt]# systemctl enable logstash.service[root@apache opt]# ln -s /usr/share/logstash/bin/logstash /usr/local/bin/ #建立logstash软连接
[root@apache opt]# cd /etc/logstash/conf.d/[root@apache conf.d]# touch apache_log.conf[root@apache conf.d]# vi apache_log.confinput { file{ path => "/etc/httpd/logs/access_log" type => "access" start_position => "beginning" } file{ path => "/etc/httpd/logs/error_log" type => "error" start_position => "beginning" } }output { if [type] == "access" { elasticsearch { hosts => ["192.168.158.10:9200"] index => "apache_access-%{+YYYY.MM.dd}" } } if [type] == "error" { elasticsearch { hosts => ["192.168.158.10:9200"] index => "apache_error-%{+YYYY.MM.dd}" } } }[root@apache conf.d]# /usr/share/logstash/bin/logstash -f apache_log.conf
[root@node1 ~]# mkdir -p /usr/local/kibana[root@node1 ~]# tar xzf kibana-4.3.1-linux-x64.tar.gz -C /usr/local/kibana#修改配置文件[root@node1 ~]# vi /usr/local/kibana/kibana-4.3.1-linux-x64/config/kibana.yml//2行 server.port: 5601 #服务端口安徽//5行server.host: "0.0.0.0"//12行 ES地址elasticsearch.url: "http://192.168.158.10:9200“ #和ES建立联系//20行 kibana.index: ".kibana" #建立索引[root@node1 ~]# yum install screen -y[root@node1 ~]# /usr/local/kibana/kibana-4.3.1-linux-x64/bin/kibana #启动kibana监听#ctrl+a+d 进行丢入后台
#安装nginx服务systemctl stop firewalldsetenforce 0yum -y install gcc gcc-c++ make pcre-devel zlib-develtar xf nginx-1.12.2.tar.gz cd nginx-1.12.2/./configure \--prefix=/usr/local/nginx \--user=nginx \--group=nginx \--with-http_stub_status_modulemake && make installuseradd -M -s /sbin/nologin nginxln -s /usr/local/nginx/sbin/nginx /usr/binecho '#!/bin/bash#chkconfig: 35 20 80#description: nginx serverPROG="/usr/local/nginx/sbin/nginx"PIDF="/usr/local/nginx/logs/nginx.pid"case "$1" in start) $PROG ;; stop) killall -s QUIT $(cat $PIDF) ;; restart) $0 stop $0 start ;; reload) killall -s HUP $(cat $PIDF) ;; *) echo "Usage: $0 {start|stop|reload|status}" exit 1esacexit 0' > /etc/init.d/nginxchmod +x /etc/init.d/nginxchkconfig --add nginxservice nginx startnetstat -ntap | grep nginx
修改配置文件,两台nginx都做
#gzip on; upstream apaches { #添加upstream字段,实现地址轮询 server 192.168.158.30:80; server 192.168.158.40:80; } server { listen 80; server_name localhost; #charset koi8-r; #access_log logs/host.access.log main; location / { root html; index index.html index.htm; proxy_pass http://apaches; #修改此行 proxy_redirect default; }
关闭防火墙即可访问
主机名 | 真实ip | 漂移地址 |
---|---|---|
LVS1 | 192.168.158.50 | (ens33:0)192.168.158.100 |
LVS2 | 192.168.158.60 | (ens33:0)192.168.158.100 |
WEB1 | 192.168.158.30 | (lo:0)192.168.158.100 |
WEB2 | 192.168.158.40 | (lo:0)192.168.158.100 |
//keepalived:双机热备要用到的,ipvsadm:调度管理要用[root@lvs01 ~]# yum install keepalived ipvsadm -y[root@lvs02 ~]# yum install keepalived ipvsadm -y
#尾行插入下段配置vim /etc/sysctl.confnet.ipv4.ip_forward = 1net.ipv4.conf.all.send_redirects = 0net.ipv4.conf.default.send_redirects = 0net.ipv4.conf.ens33.send_redirects = 0#重载配置,使之生效[root@lvs01 ~]# sysctl -pnet.ipv4.ip_forward = 1net.ipv4.conf.all.send_redirects = 0net.ipv4.conf.default.send_redirects = 0net.ipv4.conf.ens33.send_redirects = 0
虚拟网卡:ens33:0 真实网卡ens33
LVS1[root@lvs01 ~]# cd /etc/sysconfig/network-scripts/[root@lvs01 network-scripts]# cp -p ifcfg-ens33 ifcfg-ens33:0[root@lvs01 network-scripts]# vim ifcfg-ens33:0 //编辑虚拟IP地址DEVICE=ens33:0ONBOOT=yesIPADDR=192.168.158.100NETMASK=255.255.255.0[root@lvs01 network-scripts]# vim ifcfg-ens33TYPE=EthernetPROXY_METHOD=noneBROWSER_ONLY=noBOOTPROTO=static #修改为staticIPADDR=192.168.158.10 NETMASK=255.255.255.0GATEWAY=192.168.158.2DEFROUTE=yesIPV4_FAILURE_FATAL=noIPV6INIT=yesIPV6_AUTOCONF=yesIPV6_DEFROUTE=yesIPV6_FAILURE_FATAL=noIPV6_ADDR_GEN_MODE=stable-privacyNAME=ens33UUID=915032f7-b91a-4e95-8247-3bbabd595285DEVICE=ens33ONBOOT=yes
LVS2
[root@lvs02 network-scripts]# vim ifcfg-ens33:0 //编辑虚拟IP地址DEVICE=ens33:0ONBOOT=yesIPADDR=192.168.158.100NETMASK=255.255.255.0[root@lvs02 network-scripts]# vim ifcfg-ens33IPADDR=192.168.158.20 NETMASK=255.255.255.0GATEWAY=192.168.158.2
两台lvs都要配置
cd /etc/init.d/vim dr.sh//脚本内容:#!/bin/bashGW=192.168.158.2VIP=192.168.158.100RIP1=192.168.158.30RIP2=192.168.158.40case "$1" instart) /sbin/ipvsadm --save > /etc/sysconfig/ipvsadm systemctl start ipvsadm /sbin/ifconfig ens33:0 $VIP broadcast $VIP netmask 255.255.255.255 broadcast $VIP up /sbin/route add -host $VIP dev ens33:0 /sbin/ipvsadm -A -t $VIP:80 -s rr /sbin/ipvsadm -a -t $VIP:80 -r $RIP1:80 -g /sbin/ipvsadm -a -t $VIP:80 -r $RIP2:80 -g echo "ipvsadm starting ---- ------[ok]" ;;stop) /sbin/ipvsadm -C systemctl stop ipvsadm ifconfig ens33:0 down route del $VIP echo "ipvsamd stoped--------------[ok]" ;; status) if [ ! -e /var/lock/subsys/ipvsadm ]; then echo "ipvsadm stoped----------------------" exit 1 else echo "ipvsamd Runing------------[ok]" fi ;;*) echo "Usage: $0 {start I stop I status }" exit 1esacexit 0//添加权限,注意:先不要执行脚本!!!chmod +x dr.sh
两台LVS服务器都重各项配置
//开启dr服务配置[root@lvs01 network-scripts]# service dr.sh startipvsadm starting --------------------[ok]//关闭或关闭防护功能[root@lvs01 network-scripts]# setenforce 0[root@lvs01 network-scripts]# systemctl restart network[root@lvs01 network-scripts]# systemctl stop [root@lvs02 network-scripts]# systemctl restart network
yum -y install httpdsystemctl stop firewalldsetenforce 0
cd /var/www/html/echo "this is web1
" > index.html //另一台web服务器输入内容:test02区分一下。cd /var/www/html/echo "this is web2
" > index.html
cd /etc/sysconfig/network-scripts/cp -p ifcfg-lo ifcfg-lo:0vim ifcfg-lo:0#内容全部删除,添加以下内容:DEVICE=lo:0IPADDR=192.168.158.100NETMASK=255.255.255.0ONBOOT=yes
cd /etc/init.d/vim web.sh#!/bin/bashVIP=192.168.158.100case "$1" in start) ifconfig lo:0 $VIP netmask 255.255.255.255 broadcast $VIP /sbin/route add -host $VIP dev lo:0 echo "1" >/proc/sys/net/ipv4/conf/lo/arp_ignore echo "2" >/proc/sys/net/ipv4/conf/lo/arp_announce echo "1" >/proc/sys/net/ipv4/conf/all/arp_ignore echo "2" >/proc/sys/net/ipv4/conf/all/arp_announce sysctl -p >/dev/null 2>&1 echo "RealServer Start OK" ;; stop) ifconfig lo:0 down route del $VIP /dev/null 2>&1 echo "0" >/proc/sys/net/ipv4/conf/lo/arp_ignore echo "0" >/proc/sys/net/ipv4/conf/lo/arp_announce echo "0" >/proc/sys/net/ipv4/conf/all/arp_ignore echo "0" >/proc/sys/net/ipv4/conf/all/arp_announce echo "RealServer Stopd" ;; *) echo "Usage: $0 {start|stop}" exit 1esacexit 0chmod +x web.sh
cd /etc/sysconfig/network-scripts/vim ifcfg-ens33//修改为static模式,并添加以下内容:IPADDR=192.168.158.30 #另一台 web2 的IP地址为192.168.158.40NETMASK=255.255.255.0GATEWAY=192.168.100.1[root@lvs network-scripts]# systemctl stop firewalld.service [root@lvs network-scripts]# setenforce 0[root@lvs ~]# cd /etc/init.d/[root@lvs init.d]# [root@lvs init.d]# service dr.sh start[root@lvs init.d]# service wed.sh startipvsadm starting ---- ------[ok]
指定全局参数
LVS1vi /etc/keepalived/keepalived.conf#删除所有! Configuration File for keepalivedglobal_defs { router_id LVS_01}vrrp_instance VI_1 { state MASTER #备用服务器LVS2改为BACKUP interface ens33 virtual_router_id 10 priority 100 #优先级,备用LVS2优先级小于100,设为90 advert_int 1 authentication { auth_type PASS auth_pass 1111 } virtual_ipaddress { 192.168.158.100 }}virtual_server 192.168.158.100 80 { #虚拟地址和端口 delay_loop 6 lb_algo rr lb_kind DR #LVS的DR模式 #persistence_timeout 50 #一定时间内使来自于同一个Client的所有TCP请求被负载到同一个RealServer上 protocol TCP real_server 192.168.158.30 80 { #web1服务器节点 weight 1 TCP_CHECK { connect_port 80 connect_timeout 3 nb_get_retry 3 delay_before_retry 3 } } real_server 192.168.158.40 80 { #web2的服务器节点 weight 1 TCP_CHECK { connect_port 80 connect_timeout 3 nb_get_retry 3 delay_before_retry 3 } }}
LVS2
! Configuration File for keepalivedglobal_defs { router_id LVS_02}vrrp_instance VI_1 { state BACKUP #备用服务器LVS2改为BACKUP interface ens33 virtual_router_id 10 priority 90 #优先级,备用LVS2设为90 advert_int 1 authentication { auth_type PASS auth_pass 1111 } virtual_ipaddress { 192.168.158.100 }}virtual_server 192.168.158.100 80 { #虚拟地址和端口 delay_loop 6 lb_algo rr lb_kind DR #LVS的DR模式 #persistence_timeout 50 #一定时间内使来自于同一个Client的所有TCP请求被负载到同一个RealServer上 protocol TCP real_server 192.168.158.30 80 { #web1服务器节点 weight 1 TCP_CHECK { connect_port 80 connect_timeout 3 nb_get_retry 3 delay_before_retry 3 } } real_server 192.168.158.40 80 { #web2的服务器节点 weight 1 TCP_CHECK { connect_port 80 connect_timeout 3 nb_get_retry 3 delay_before_retry 3 } }}
systemctl start keepalived停止LVS1的keepalive
systemctl stop keepalived虚拟地址已经漂移过去 以上项目就搭建完成了,有任何问题都可以在评论区留言噢
转载地址:http://nfdaf.baihongyu.com/