xml地图|网站地图|网站标签 [设为首页] [加入收藏]

智能硬件

当前位置:美高梅游戏网站 > 智能硬件 > Linux-实现双主模型的nginx的高可用

Linux-实现双主模型的nginx的高可用

来源:http://www.gd-chuangmei.com 作者:美高梅游戏网站 时间:2019-11-26 21:04

实现双主模型的ngnix高可用(一)

图片 1


准备:主机7台

client:

172.18.x.x

调度器:keepalived+nginx 带172.18.x.x/16 网卡

192.168.234.27

192.168.234.37

real_server

192.168.234.47

192.168.234.57

192.168.234.67

192.168.234.77


实验结果

  1 [root@234c17 ~]# for i in {1..4};do curl www.a.com;curl www.b.com;sleep 1;done
  2 234.57
  3 234.77
  4 234.47
  5 234.67
  6 234.57
  7 234.77
  8 234.47
  9 234.67

第一步,前期准备

安装四台CentOS节点,设置如下

  1. lsh10:192.168.3.10 #时间同步服务器,centos7
  2. lsh11:centos7,一个接口上配置三个ip
    192.168.222.11 内网web1
    192.168.222.22 内网web2
    192.168.222.33 内网web3
  3. n1:192.168.3.101 #keepalived服务器1,centos6
  4. n2:192.168.3.102 #keepalived服务器2,centos6
    两个虚拟路由器地址:192.168.3.151,192.168.3.152

图片 2

双主模型Nginx高可用集群.png

  • 全部关闭selinux
vi /etc/selinux/config
SELINUX=disabled
  • 全部关闭防火墙
chkconfig --level 123456 iptables off


过程:

第二步,节点lsh配置时间同步

  • 配置同步时间服务器
]# yum -y install ntp ntpdate    #安装程序包
]# vim /etc/ntp.conf   # 修改配置文件
server time.windows.com
server s2m.time.edu.cn
server 0.asia.pool.ntp.org
server 1.asia.pool.ntp.org
server 2.asia.pool.ntp.org
server 3.asia.pool.ntp.org
server 127.127.1.0 iburst  local clock #当外部时间不可用时,使用本地时间。
restrict 192.168.3.1 mask 255.255.255.0 nomodify  #允许更新的IP地址段
]# systemctl start ntpd    #启动服务
]# systemctl enable ntpd.service    #设置开机启动
  • 配置其他节点同步时间
]# yum -y install ntpdate
]# ntpdate 192.168.3.10    #同步时间
]# yum -y install chrony  #安装程序包
]# vim /etc/chrony.conf    #修改配置文件
server 192.168.3.10 iburst    #和时间服务器同步
]# chkconfig --level 35 chronyd on  #centos 6开机启动
]# systemctl enable chronyd.service  #centos7开机启动


一、先配置4台real_server,安装好测试用的httpd

  1 [root@234c47 ~]# curl 192.168.234.47;curl 192.168.234.57;curl 192.168.234.67;curl 192.168.234.77
  2 234.47
  3 234.57
  4 234.67
  5 234.77

第三步,节点lsh11配置成内网web服务器

  • 安装httpd,写脚本安装虚拟服务器
[root@localhost yum.repos.d]# yum -y install httpd
[root@localhost yum.repos.d]# cd /etc/httpd/conf.d/
[root@localhost conf.d]# vim vhosts.conf
[root@localhost conf.d]# httpd -t
AH00112: Warning: DocumentRoot [/data/web/vhost1] does not exist
AH00112: Warning: DocumentRoot [/data/web/vhost2] does not exist
AH00112: Warning: DocumentRoot [/data/web/vhost3] does not exist
AH00558: httpd: Could not reliably determine the server's fully qualified domain name, using localhost.localdomain. Set the 'ServerName' directive globally to suppress this message
Syntax OK
[root@localhost conf.d]# mkdir -pv /data/web/vhost{1,2,3}
mkdir: created directory ‘/data/web’
mkdir: created directory ‘/data/web/vhost1’
mkdir: created directory ‘/data/web/vhost2’
mkdir: created directory ‘/data/web/vhost3’
[root@localhost conf.d]# vim /data/web/vhost1/index.html
[root@localhost conf.d]# vim /data/web/vhost2/index.html
[root@localhost conf.d]# vim /data/web/vhost3/index.html
  • 配置虚拟服务,三个web都设置不同的页面
[root@localhost conf.d]# vim vhosts.conf  #设置虚拟服务脚本
<VirtualHost 192.168.222.11:80>
ServerName 192.168.222.11
DocumentRoot "/data/web/vhost1"
<Directory "/data/web/vhost1">
Options FollowSymLinks
AllowOverride None
Require all granted
</Directory>
</VirtualHost>
<VirtualHost 192.168.222.22:80>
ServerName 192.168.222.22
DocumentRoot "/data/web/vhost2"
<Directory "/data/web/vhost2">
Options FollowSymLinks
AllowOverride None
Require all granted
</Directory>
</VirtualHost>
<VirtualHost 192.168.222.33:80>
ServerName 192.168.222.33
DocumentRoot "/data/web/vhost3"
<Directory "/data/web/vhost3">
Options FollowSymLinks
AllowOverride None
Require all granted
</Directory>
</VirtualHost>
  • 配置Nginx的反向代理
[root@n1 ~]# vim /etc/nginx/conf.d/default.conf
upstream websrvs {  #增加此段
     server 192.168.222.11:80;
     server 192.168.222.22:80;
     server 192.168.222.33:80;
}

location / {  #在此段增加内容
     proxy_pass http://websrvs;
}
  • 开启httpd服务,并查看服务状态。
[root@localhost conf.d]# systemctl start httpd
[root@localhost conf.d]# systemctl status httpd
● httpd.service - The Apache HTTP Server
   Loaded: loaded (/usr/lib/systemd/system/httpd.service; disabled; vendor preset: disabled)
   Active: active (running) since Wed 2018-04-11 04:29:34 EDT; 7s ago
     Docs: man:httpd(8)
           man:apachectl(8)
 Main PID: 1540 (httpd)
   Status: "Processing requests..."
   CGroup: /system.slice/httpd.service
           ├─1540 /usr/sbin/httpd -DFOREGROUND
           ├─1541 /usr/sbin/httpd -DFOREGROUND
           ├─1542 /usr/sbin/httpd -DFOREGROUND
           ├─1543 /usr/sbin/httpd -DFOREGROUND
           ├─1544 /usr/sbin/httpd -DFOREGROUND
           └─1545 /usr/sbin/httpd -DFOREGROUND

Apr 11 04:29:34 localhost.localdomain systemd[1]: Starting The Apache HTTP Server...
Apr 11 04:29:34 localhost.localdomain systemd[1]: Started The Apache HTTP Server.


二、配置keepalived

因为是双主模型

第四步,n1和n2安装keepalived

  • 两台都安装并启动nginx服务
]#yum install epel-release
]#yum -y install nginx    
[root@n2 ~]# service nginx start
  • 配置n1的keepalived.conf文件
[root@n1~]# vim /etc/keepalived/keepalived.conf 
! Configuration: command not found
global_defs { 
    notification_email { 
     root@localhost  #发送报告的邮箱
    } 
    notification_email_from keepalived@localhost 
    smtp_server 127.0.0.1  #邮件服务器
    smtp_connect_timeout 30 
    router_id n1  #物理设备ID
    vrrp_mcast_group4 224.0.100.19  #多播地址
   } 

   vrrp_script chk_down {    #判断文件是否存在来改变优先级
    script "/etc/keepalived/sleep.sh"    
    interval 2   #检测间隔
    weight -5   #权限-5
   } 

   vrrp_script chk_nginx {    #判断进程是否存在来改变优先级
    script "killall -0 nginx && exit 0 || exit 1"    
    interval 1   #检测间隔
    weight -5    #权限-5
    fall 2    #检测2次才生效
    rise 1    #检测1次脚本状态未改变时恢复
   } 


   vrrp_instance VI_1 { 
    state MASTER  #路由器状态
    interface eth0   #绑定路由器使用的物理接口
    virtual_router_id 14   #路由器ID
    priority 100   #当前主机优先级,范围0-255
    advert_int 1   #vrrp通告时间间隔
    authentication {   #简单字符串验证
     auth_type PASS 
     auth_pass 571f97b2   #8位密码
    } 
    virtual_ipaddress {   #虚拟地址
     192,168.3.151/24 dev eth0
    } 
    track_script { 
     chk_down 
     chk_nginx 
    } 
    notify_master "/etc/keepalived/notify.sh master"   #转为主节点触发脚本
    notify_backup "/etc/keepalived/notify.sh backup" #转为备节点触发脚本
    notify_fault "/etc/keepalived/notify.sh fault" #转为失败状态触发脚本
   }

   vrrp_instance VI_2 { 
    state BACKUP
    interface eth0 
    virtual_router_id 15
    priority 96 
    advert_int 1 
    authentication { 
     auth_type PASS 
     auth_pass 33334444 
    } 
    virtual_ipaddress { 
     192,168.3.152/24 dev eth0
    } 
    track_script { 
     chk_down 
     chk_nginx 
    } 
    notify_master "/etc/keepalived/notify.sh master" 
    notify_backup "/etc/keepalived/notify.sh backup" 
    notify_fault "/etc/keepalived/notify.sh fault" 
   } 
  • 增加脚本sleep.sh
[root@n1~]# vim sleep.sh  #监测/etc/keepalived/目录下是否有down文件
if [ -f /etc/keepalived/down ];then
    exit 1  #检测down文件存在,返回1
else
    exit 0  #未检测到,返回0
fi
  • 修改notify.sh
[root@n1 keepalived]# vim notify.sh
#!/bin/bash
#
contact='root@localhost'
notify() {
    local mailsubject="$(hostname) to be $1, vip floating"
    local mailbody="$(date +'%F %T'): vrrp transition, $(hostname) changed to be $1"
    echo "$mailbody" | mail -s "$mailsubject" $contact
 }
case $1 in
     master)  #状态为master时启动nginx服务
         systemctl start nginx.service
         notify master
         ;;
     backup)  #状态为backup时启动nginx服务
         systemctl start nginx.service
         notify backup
         ;;
     fault)  #状态为fault时停止nginx服务
         systemctl stop nginx.service
         notify fault
         ;;
     *)
        echo "Usage: $(basename $0) {master|backup|fault}"
        exit 1
     ;;
esac

[root@n1 ~]# chmod +x /etc/keepalived/sleep.sh  #脚本添加执行权限
[root@n1 ~]# chmod +x /etc/keepalived/notify.sh
[root@n1 ~]# ll /etc/keepalived/
-rw-r--r-- 1 root root 1512 4月  12 11:23 keepalived.conf
-rwxr-xr-x 1 root root  616 4月  11 17:23 notify.sh
-rwxr-xr-x 1 root root   66 4月  11 17:15 sleep.sh

同时赋予sleep.sh和notify.sh执行权限。
再把这两个脚本拷贝到n2节点相同位置。



1.配置keepalived主机234.27

[root@234c27 ~]# vim /etc/keepalived/keepalived.conf

! Configuration File for keepalived

global_defs {
    notification_email {
      root@localhost
    }
    notification_email_from keepalived@localhost
    smtp_server 127.0.0.1
    smtp_connect_timeout 30
    router_id kpone
    vrrp _mcast_group4 234.10.10.10
 }
 vrrp_instance VI_1 {
     state MASTER
     interface ens33
     virtual_router_id 50
     priority 100
     advert_int 1
     authentication {
         auth_type PASS
         auth_pass 1111
     }
     virtual_ipaddress {
         172.18.0.100/16  //这ip调度 192.168.234.47/57
     }
 }
vrrp_instance VI_2 {
     state BACKUP
     interface ens33
     virtual_router_id 51
     priority 80
     advert_int 1
     authentication {
         auth_type PASS
         auth_pass 2222
     }
     virtual_ipaddress {
         172.18.0.200/16  //这ip调度 192.168.234.147/157
     }
}

第五步,最终测试

  • 开启两个n1虚拟端,各自运行下面的命令进行检测
    [root@n1 ~]# tcpdump -i eth0 -nn host 224.0.100.19
    [root@n1 ~]# tail -f /var/log/messages

  • 开启n1的keepalived服务

[root@n1 ~]# service keepalived start
[root@n1 ~]# ip a l  #查看接口,虚拟路由151和152都成功开启
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000
    link/ether 00:0c:29:c4:68:e0 brd ff:ff:ff:ff:ff:ff
    inet 192.168.3.101/24 brd 192.168.3.255 scope global eth0
    inet 192.168.3.151/24 scope global secondary eth0
    inet 192.168.3.152/24 scope global secondary eth0
...

[root@n1 ~]# tcpdump -i eth0 -nn host 224.0.100.19
10:55:38.134230 IP 192.168.3.101 > 224.0.100.19: VRRPv2, Advertisement, vrid 15, prio 96, authtype simple, intvl 1s, length 16
10:55:39.134087 IP 192.168.3.101 > 224.0.100.19: VRRPv2, Advertisement, vrid 14, prio 100, authtype simple, intvl 1s, length 16
10:55:39.134793 IP 192.168.3.101 > 224.0.100.19: VRRPv2, Advertisement, vrid 15, prio 96, authtype simple, intvl 1s, length 16
10:55:40.136173 IP 192.168.3.101 > 224.0.100.19: VRRPv2, Advertisement, vrid 14, prio 100, authtype simple, intvl 1s, length 16

[root@n1 ~]# tail -f /var/log/messages
Apr 12 10:44:26 n1 Keepalived_vrrp[1323]: VRRP_Instance(VI_1) Transition to MASTER STATE
Apr 12 10:44:27 n1 Keepalived_vrrp[1323]: VRRP_Instance(VI_1) Entering MASTER STATE
Apr 12 10:44:28 n1 Keepalived_vrrp[1323]: VRRP_Instance(VI_2) Transition to MASTER STATE
Apr 12 10:44:29 n1 Keepalived_vrrp[1323]: VRRP_Instance(VI_2) Entering MASTER STATE

224.0.100.19多播信息中只有101节点有心跳包,所以在101节点上VI_1和VI_2自动成为MASTER,虚拟路由151和152成功漂移在101节点。

  • 开启n2的keepalived服务
[root@n2 keepalived]# service keepalived start
[root@n2 keepalived]# ip a l  #查看接口,虚拟路由152开启
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000
    link/ether 00:0c:29:b8:49:7c brd ff:ff:ff:ff:ff:ff
    inet 192.168.3.102/24 brd 192.168.3.255 scope global eth0
    inet 192.168.3.152/24 scope global secondary eth0

14:19:12.883461 IP 192.168.3.101 > 224.0.100.19: VRRPv2, Advertisement, vrid 14, prio 100, authtype simple, intvl 1s, length 20
14:19:12.894791 IP 192.168.3.102 > 224.0.100.19: VRRPv2, Advertisement, vrid 15, prio 100, authtype simple, intvl 1s, length 20
14:19:13.886811 IP 192.168.3.101 > 224.0.100.19: VRRPv2, Advertisement, vrid 14, prio 100, authtype simple, intvl 1s, length 20
14:19:13.895772 IP 192.168.3.102 > 224.0.100.19: VRRPv2, Advertisement, vrid 15, prio 100, authtype simple, intvl 1s, length 20

Apr 12 13:35:53 n1 Keepalived_vrrp[43137]: VRRP_Instance(VI_2) Sending gratuitous ARPs on eth0 for 192.168.3.152
Apr 12 14:15:19 n1 Keepalived_vrrp[43137]: VRRP_Instance(VI_2) Received higher prio advert
Apr 12 14:15:19 n1 Keepalived_vrrp[43137]: VRRP_Instance(VI_2) Entering BACKUP STATE
Apr 12 14:15:19 n1 Keepalived_vrrp[43137]: VRRP_Instance(VI_2) removing protocol VIPs.
Apr 12 14:15:19 n1 Keepalived_healthcheckers[43136]: Netlink reflector reports IP 192.168.3.152 removed

继续对n1节点抓包分析,多播信息中有101和102两台节点的心跳包,路由151最高优先级的是节点101,而路由152最高优先级改变为节点102,所以节点101的路由152状态自动变为BACKUP,路由152漂移到节点102。

[root@lsh ~]# curl http://192.168.3.151
<h1>www.11111.com</h1>
[root@lsh ~]# curl http://192.168.3.151
<h1>www.22222.com</h1>
[root@lsh ~]# curl http://192.168.3.151
<h1>www.33333.com</h1>
[root@lsh ~]# curl http://192.168.3.152
<h1>www.11111.com</h1>
[root@lsh ~]# curl http://192.168.3.152
<h1>www.22222.com</h1>
[root@lsh ~]# curl http://192.168.3.152
<h1>www.33333.com</h1>

此时在测试机上打开虚拟路由151和152,都能各自反代到三个不同的页面。

  • n1节点降权测试
[root@n1 ~]# touch /etc/keepalived/down
[root@n1 ~]# ip a l  #查看接口,虚拟路由151被移除
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000
    link/ether 00:0c:29:c4:68:e0 brd ff:ff:ff:ff:ff:ff
    inet 192.168.3.101/24 brd 192.168.3.255 scope global eth0
    inet6 fe80::20c:29ff:fec4:68e0/64 scope link 
       valid_lft forever preferred_lft forever

14:41:58.225281 IP 192.168.3.102 > 224.0.100.19: VRRPv2, Advertisement, vrid 14, prio 96, authtype simple, intvl 1s, length 20
14:41:58.268412 IP 192.168.3.102 > 224.0.100.19: VRRPv2, Advertisement, vrid 15, prio 100, authtype simple, intvl 1s, length 20
14:41:59.227515 IP 192.168.3.102 > 224.0.100.19: VRRPv2, Advertisement, vrid 14, prio 96, authtype simple, intvl 1s, length 20
14:41:59.269282 IP 192.168.3.102 > 224.0.100.19: VRRPv2, Advertisement, vrid 15, prio 100, authtype simple, intvl 1s, length 20

Apr 12 14:41:46 n1 Keepalived_vrrp[43137]: VRRP_Instance(VI_1) Received higher prio advert
Apr 12 14:41:46 n1 Keepalived_vrrp[43137]: VRRP_Instance(VI_1) Entering BACKUP STATE
Apr 12 14:41:46 n1 Keepalived_vrrp[43137]: VRRP_Instance(VI_1) removing protocol VIPs.
Apr 12 14:41:46 n1 Keepalived_healthcheckers[43136]: Netlink reflector reports IP 192.168.3.151 removed

在n1新建down文件,当/etc/keepalive/目录下有down文件时,keepalived中的chk_down脚本会自动执行,把虚拟路由的优先级降低5数值,105路由优先级变成95,此时n2节点105路由是96,n1<n2=95<96,路由105被转移到n2。

[root@n2 keepalived]# ip a l
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000
    link/ether 00:0c:29:b8:49:7c brd ff:ff:ff:ff:ff:ff
    inet 192.168.3.102/24 brd 192.168.3.255 scope global eth0
    inet 192.168.3.152/24 scope global secondary eth0
    inet 192.168.3.151/24 scope global secondary eth0

此时查看n2的接口,151和152都漂移在这节点上。

2.配置keepalived主机234.37

[root@234c37 ~]# vim /etc/keepalived/keepalived.conf

! Configuration File for keepalived

global_defs {
    notification_email {
      root@localhost
    }
    notification_email_from keepalived@localhost
    smtp_server 127.0.0.1
    smtp_connect_timeout 30
    router_id kpone
    vrrp _mcast_group4 234.10.10.10
 }
 vrrp_instance VI_1 {
     state BACKUP
     interface ens33
     virtual_router_id 50
     priority 80
     advert_int 1
     authentication {
         auth_type PASS
         auth_pass 1111
     }
     virtual_ipaddress {
         172.18.0.100/16  //这ip调度 192.168.234.47/57
     }
 }
vrrp_instance VI_2 {
     state MASTER
     interface ens33
     virtual_router_id 51
     priority 100
     advert_int 1
     authentication {
         auth_type PASS
         auth_pass 2222
     }
     virtual_ipaddress {
         172.18.0.200/16  //这ip调度 192.168.234.147/157
     }
}

这样双主模型简单的就搭建好了

本文由美高梅游戏网站发布于智能硬件,转载请注明出处:Linux-实现双主模型的nginx的高可用

关键词: