case "$1" in
start)
/sbin/ifconfig lo:0 $SNS_VIP netmask 255.255.255.255 broadcast $SNS_VIP
/sbin/route add -host $SNS_VIP dev lo:0
echo "1" >/proc/sys/net/ipv4/conf/lo/arp_ignore
echo "2" >/proc/sys/net/ipv4/conf/lo/arp_announce
echo "1" >/proc/sys/net/ipv4/conf/all/arp_ignore
echo "2" >/proc/sys/net/ipv4/conf/all/arp_announce
sysctl -p >/dev/null 2>&1
echo "RealServer Start OK"
;;
stop)
/sbin/ifconfig lo:0 down
/sbin/route del $SNS_VIP >/dev/null 2>&1
echo "0" >/proc/sys/net/ipv4/conf/lo/arp_ignore
echo "0" >/proc/sys/net/ipv4/conf/lo/arp_announce
echo "0" >/proc/sys/net/ipv4/conf/all/arp_ignore
echo "0" >/proc/sys/net/ipv4/conf/all/arp_announce
echo "RealServer Stoped"
;;
status)
islothere='/sbin/ifconfig lo:0 |grep $SNS_VIP'
if test -z "$islothere" ; then
echo "LVS_VIP is not Start for Real Server"
else
echo "LVS_VIP is Running on Real Server"
fi
;;
*)
echo "Usage: $0 {start|status|stop}"
exit 1
esac
配置完成后,为服务lvsrs添加可执行权限,然后就可启动或关闭lvsrs了。启动后,可通过ifconfig命令查看VIP是否成功添加;注意此时VIP即使成功添加,也是ping不通的。 6、开启服务
sh lvs_real.sh start
service keepalived start
这是就可以ping 通VIP了
7、拷贝lvs_real.sh 到每个节点
scp lvs_real.sh root@192.168.3.108:/etc/init.d
scp lvs_real.sh root@192.168.3.74:/etc/init.d 8、配置各项服务开启自动启动
lvs
cp /root/lvs_real.sh /etc/init.d/lvs_real.sh
vi /etc/init.d/lvs_real.sh
加入 # chkconfig: 2345 64 36
chkconfig --add lvs_real.sh
chkconfig lvs_real.sh on
chkconfig keepalived on
节点配置:
vi /etc/init.d/lvs_real.sh
加入 # chkconfig: 2345 64 36
chkconfig --add lvs_real.sh
chkconfig lvs_real.sh on
service iptables stop
chkconfig iptables off 三、测试
本方案是由一台负载调度器(Director Server)和两台Real Server组成的负载均衡集群,由LVS软件实现。正常情况下,用户请求通过VIP到达Director Server,然后由其根据负载均衡算法选择一台Real Server响应用户;当监测到某一台Real Server故障时,则将其剔除集群,不再提供服务,待恢复正常后,自动加入继续提供服务。
――负载均衡功能
在一台客户端机器上并发创建客户端连接,以模拟用户请求:
# mysqlslap -uroot -proot123 -h192.168.3.40 -P3306 --concurrency=100 --iterations=5 --create-schema='information_schema' --query='select count(*) from processlist;' --number-of-queries=10000 --debug-info
在此期间,查看两台Slave从库上的连接数:
# ipvsadm --list
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
-> RemoteAddress:Port Forward Weight ActiveConn InActConn
TCP localhost:mysql lc persistent 50
-> localhost:mysql Route 100 48 154
-> localhost:mysql Route 100 1 0
[iyunv@localhost ~]# ipvsadm --list
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
-> RemoteAddress:Port Forward Weight ActiveConn InActConn
TCP localhost:mysql lc persistent 50
-> localhost:mysql Route 100 49 253
-> localhost:mysql Route 100 1 0
[iyunv@localhost ~]# ipvsadm --list
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
-> RemoteAddress:Port Forward Weight ActiveConn InActConn
TCP localhost:mysql lc persistent 50
-> localhost:mysql Route 100 61 341
-> localhost:mysql Route 100 1 0
[iyunv@localhost ~]# ipvsadm --list
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
-> RemoteAddress:Port Forward Weight ActiveConn InActConn
TCP localhost:mysql lc persistent 50
-> localhost:mysql Route 100 32 370
-> localhost:mysql Route 100 1 0
从两台Slave从库上查询,每台从库均创建了多个连接,连接是动态变化的,所以连接数并不是绝对平均。
――故障转移功能
将node1上的MySQL实例关闭,模拟此节点故障,然后查看主Director Server上日志
[iyunv@localhost ~]# service mysql stop
Shutting down MySQL... [ OK ]
[iyunv@node1 ~]# tail /var/log/messages
Apr 4 00:01:22 localhost Keepalived_healthcheckers: TCP connection to [192.168.3.108:3306] failed !!!
Apr 4 00:01:22 localhost Keepalived_healthcheckers: Removing service [192.168.3.108:3306] from VS [192.168.3.40:3306]
日志显示,Keepalived检测到192.168.3.108:3306端口连接失败,则将其从VS [192.168.3.40:3306]中剔除了;此时若再次连接MySQL,会发现所有连接均在192.168.3.74上。
将node1上的MySQL实例重新启动,模拟节点恢复,然后查看主Director Server上日志
[iyunv@localhost ~]# service mysql start
Starting MySQL.. [ OK ]
[iyunv@node1 ~]# tail /var/log/messages
Apr 4 00:06:52 localhost Keepalived_healthcheckers: TCP connection to [192.168.3.108:3306] success.
Apr 4 00:06:52 localhost Keepalived_healthcheckers: Adding service [192.168.3.108:3306] to VS [192.168.3.40:3306]
可见,Real Server恢复后,Keepalived可立即监测到,此时自动将其添加到LVS集群中。