32433221 发表于 2016-10-19 10:10:15

Keepalived+LVS-DR模式高可用负载均衡集群的搭建

主机环境 redhat6.5 64位
实验环境 服务端1 ip 172.25.25.113主机名:server3.example.com   
         服务端2 ip 172.25.25.114主机名:server4.example.com
         管理端2 ip 172.25.25.112主机名:server2.example.com
         管理端1 ip 172.25.25.111主机名:server1.example.com
防火墙状态:关闭
虚拟ip(vip): 172.25.25.200/24


前面的博文中已经介绍过将DR添加到高可用集群(HA)的heartbeat中避免单点故障,本节将介绍将另一个避免单点故障的方法:将DR添加到HA的keepalived中。keepalived对后端有健康检查,则在安装好keepalived之后可直接添加DR。本节将不再介绍DR的配置,感兴趣的可查看前面的博文,本文直接从keeepalived的安装开始。
在Keepalived.org官网可下载leepalived压缩包(keeepalived对后端有健康检查)
1.源码安装keepalived
# ls
keepalived-1.2.24.tar.gz
# tar zxf keepalived-1.2.24.tar.gz      #解压
keepalived-1.2.24
keepalived-1.2.24.tar.gz
# cd keepalived-1.2.24
# ls
aclocal.m4ChangeLog   CONTRIBUTORSgenhash    keepalived.spec.inmissing
ar-lib       compile       COPYING       INSTALL   lib               README
AUTHOR      configure   depcomp       install-shMakefile.am         TODO
bin_install configure.acdoc         keepalivedMakefile.in
# ./configure--prefix=/usr/local/keepalived
若有以下错误

则安装
# yum installopenssl-devel.x86_64 -y
# ./configure--prefix=/usr/local/keepalived
# make
# make install
# cd /usr/local/
# ls
binetcgames includekeepalivedlib lib64libexecsbin sharesrc
# scp -r keepalived/172.25.25.112:/usr/local/
root@172.25.25.112's password:
#作软连接
# ln -s/usr/local/keepalived/etc/keepalived/ /etc/ #主配置文件的软链接
# ln -s/usr/local/keepalived/etc/rc.d/init.d/keepalived /etc/init.d/    #启动脚本的软链接
# chmod +x /etc/init.d/keepalived       #改变脚本的权限
# ln -s/usr/local/keepalived/etc/sysconfig/keepalived /etc/sysconfig/   #配置文件的软链接
# ln -s/usr/local/keepalived/sbin/keepalived /sbin/ #二进制文件的软链接
# /etc/init.d/keepalived start#测试,开启
Starting keepalived:                                       
# /etc/init.d/keepalived stop
Stopping keepalived:                                       
#

2.将DR添加到keepalived及测试(管理端) 1.添加DR到keepalived
# cd /etc/keepalived/
# ls
keepalived.confsamples
# vim keepalived.conf#进入主配置文件
1 ! Configuration Filefor keepalived
2
3 global_defs {
4    notification_email {
5         root@localhost      #邮件接受端
6    }
7    notification_email_from keepalived@server1.example.com      #邮件发送端
8    smtp_server 127.0.0.1    #本地回环
9    smtp_connect_timeout 30      #连接超时
10    router_id LVS_DEVEL
11    vrrp_skip_check_adv_addr
12    vrrp_strict
13    vrrp_garp_interval 0
14    vrrp_gna_interval 0
15 }
16
17 vrrp_instance VI_1 {
18   state MASTER      #服务端1是master
19   interface eth0      #进入接口eth0
20   virtual_router_id 25    #虚拟路由id(1-254之间)
21   priority 100            #在启动keepalived服务时,系统会比较priority的值,哪个值大哪个就是master
22   advert_int 1
23   authentication {
24         auth_type PASS
25         auth_pass 1111
26   }
27   virtual_ipaddress {
28         172.25.25.200   #虚拟ip‘
29   }
30 }
31
32 virtual_server172.25.25.200 80 {#虚拟服务
33   delay_loop 6
34   lb_algo rr
35   lb_kind DR          #DR
36 #    persistence_timeout 50    #超时
37   protocol TCP
38
39   real_server 172.25.25.113 80 {      #真正的服务端
40         weight 1      #权重
41         TCP_CHECK {
42             connect_timeout 3
43             nb_get_retry 3
44             delay_before_retry 3
45         }
46   }
47      real_server 172.25.25.114 80 {   #真正的服务端
48         weight 1
49         TCP_CHECK {
50             connect_timeout 3
51             nb_get_retry 3
52             delay_before_retry 3
53         }
54   }
55
56 }
# /etc/init.d/keepalived start   #开启(服务端1)
Starting keepalived:                                       

# scp keepalived.conf172.25.25.112:/etc/keepalived/
root@172.25.25.112's password:      #将其传到服务端2
keepalived.conf                                 100%1049   1.0KB/s   00:00
# vim keepalived.conf#进入刚传到服务端2文件,修改
7    notification_email_from keepalived@server2.example.com#邮件发送段,本机
18   state BACKUP   #备服务器
2# /etc/init.d/keepalived start   #开启(服务端2)
Starting keepalived:                                       
1   priority 88         #比主的数值小就可以

2.测试
# ipvsadm -l
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
->RemoteAddress:Port         ForwardWeight ActiveConn InActConn
TCP172.25.25.200:httprr
->server3.example.com:http   Route   1   0          0      
->172.25.25.114:http         Route   1   0          0   

#刷新之后,服务端2
   
# ip addr show   #查看ip,虚拟ip在服务端1
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 16436 qdisc noqueuestate UNKNOWN
    link/loopback00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8scope host lo
    inet6 ::1/128 scopehost
       valid_lft foreverpreferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdiscpfifo_fast state UP qlen 1000
    link/ether 52:54:00:ec:8b:36brd ff:ff:ff:ff:ff:ff
    inet 172.25.25.111/24brd 172.25.25.255 scope global eth0
    inet 172.25.25.200/32scope global eth0   #虚拟ip
    inet6fe80::5054:ff:feec:8b36/64 scope link
       valid_lft foreverpreferred_lft forever

当主机keepalived停掉,系统会将服务转移到备机上;当主机重新打开时,系统会重新读配置文件,来确定主机和备机,将服务开启到主机上

#将服务端1的keepalived停掉,测试
# /etc/init.d/keepalived stop
Stopping keepalived:                                       
#服务正常

#刷新之后,服务端2

# ip addr show    #查看,虚拟ip不在服务端1
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 16436 qdisc noqueuestate UNKNOWN
    link/loopback00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8scope host lo
    inet6 ::1/128 scopehost
       valid_lft foreverpreferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdiscpfifo_fast state UP qlen 1000
    link/ether52:54:00:ec:8b:36 brd ff:ff:ff:ff:ff:ff
    inet 172.25.25.111/24brd 172.25.25.255 scope global eth0
    inet6fe80::5054:ff:feec:8b36/64 scope link
       valid_lft foreverpreferred_lft forever

# ip addr show#查看ip虚拟在服务端2
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 16436 qdisc noqueuestate UNKNOWN
    link/loopback00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scopehost lo
    inet6 ::1/128 scopehost
       valid_lft foreverpreferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdiscpfifo_fast state UP qlen 1000
    link/ether52:54:00:85:1a:3b brd ff:ff:ff:ff:ff:ff
    inet 172.25.25.112/24brd 172.25.25.255 scope global eth0
    inet 172.25.25.200/32scope global eth0   #虚拟ip
    inet6fe80::5054:ff:fe85:1a3b/64 scope link
       valid_lft foreverpreferred_lft forever

#将服务端1的keepalived开启
# /etc/init.d/keepalived start
Starting keepalived:                                       
#服务正常运行

#刷新之后,服务端2

# ip addr show   #查看虚拟ip,回到服务端1
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 16436 qdisc noqueuestate UNKNOWN
    link/loopback00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8scope host lo
    inet6 ::1/128 scopehost
       valid_lft foreverpreferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdiscpfifo_fast state UP qlen 1000
    link/ether52:54:00:ec:8b:36 brd ff:ff:ff:ff:ff:ff
    inet 172.25.25.111/24brd 172.25.25.255 scope global eth0
    inet 172.25.25.200/32scope global eth0   #虚拟ip
    inet6fe80::5054:ff:feec:8b36/64 scope link
       valid_lft foreverpreferred_lft forever
# ip addr show    #查看,服务端2上没有虚拟ip
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 16436 qdisc noqueuestate UNKNOWN
    link/loopback00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8scope host lo
    inet6 ::1/128 scopehost
       valid_lft foreverpreferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdiscpfifo_fast state UP qlen 1000
    link/ether52:54:00:85:1a:3b brd ff:ff:ff:ff:ff:ff
    inet 172.25.25.112/24brd 172.25.25.255 scope global eth0
    inet6fe80::5054:ff:fe85:1a3b/64 scope link
       valid_lft foreverpreferred_lft forever


页: [1]
查看完整版本: Keepalived+LVS-DR模式高可用负载均衡集群的搭建