keepalived高级应用
上篇博客学习了vrrp协议和keepalived的基本应用,现在就来学习keepalived的高级应用一、keepalived+lvs实现httpd高可用负载均衡集群
1、环境
192.168.100.179 lvs(dr模型)+keepalived
192.168.100.180 lvs(dr模型)+keepalived
192.168.100.173 httpd1
192.168.100.175 httpd2
OS:CentOS-6.5-x86_64
vip:192.168.100.11
拓扑简单就不画了
集群各节点时间同步
2、后端主机的配置
为了方便使用,我们写一个脚本,配置后端主机的内核参数和vip
# cat lvs_dr.sh
#/bin/bash
#
vip="192.168.100.11"
start() {
echo 1 > /proc/sys/net/ipv4/conf/all/arp_ignore
echo 1 > /proc/sys/net/ipv4/conf/lo/arp_ignore
echo 2 > /proc/sys/net/ipv4/conf/lo/arp_announce
echo 2 > /proc/sys/net/ipv4/conf/all/arp_announce
ifconfig lo:0 $vip netmask 255.255.255.255 broadcast $vip
route add -host $vip dev lo:0
}
stop() {
echo 0> /proc/sys/net/ipv4/conf/all/arp_ignore
echo 0 > /proc/sys/net/ipv4/conf/lo/arp_ignore
echo 0 > /proc/sys/net/ipv4/conf/lo/arp_announce
echo 0 > /proc/sys/net/ipv4/conf/all/arp_announce
ifconfig lo:0 down
}
case $1 in
start)
start
;;
stop)
stop
;;
*)
echo "Usage:$(basename $0) {start|stop}"
exit 1
esac
# bash lvs_dr.sh start
# ifconfig
eth0 Link encap:EthernetHWaddr 00:0C:29:6A:DC:8D
inet addr:192.168.100.173Bcast:192.168.100.255Mask:255.255.255.0
UP BROADCAST RUNNING MULTICASTMTU:1500Metric:1
RX packets:67549 errors:0 dropped:0 overruns:0 frame:0
TX packets:23518 errors:0 dropped:0 overruns:0 carrier:0
collisions:0 txqueuelen:1000
RX bytes:4583906 (4.3 MiB)TX bytes:2019642 (1.9 MiB)
lo Link encap:Local Loopback
inet addr:127.0.0.1Mask:255.0.0.0
UP LOOPBACK RUNNINGMTU:65536Metric:1
RX packets:618 errors:0 dropped:0 overruns:0 frame:0
TX packets:618 errors:0 dropped:0 overruns:0 carrier:0
collisions:0 txqueuelen:0
RX bytes:54373 (53.0 KiB)TX bytes:54373 (53.0 KiB)
lo:0 Link encap:Local Loopback
inet addr:192.168.100.11Mask:255.255.255.255
UP LOOPBACK RUNNINGMTU:65536Metric:1
# scp lvs_dr.sh 192.168.100.175:~#复制到httpd2上并执行 3、keepalived配置
179节点:
# cat keepalived.conf
! Configuration File for keepalived
global_defs {
notification_email {
root@loalhost
}
notification_email_from xiexiaojun
smtp_server 127.0.0.1
smtp_connect_timeout 30
router_id LVS_DEVEL
}
vrrp_script chk_mt_down {
script "[ -f /etc/keepalived/down ] && exit 1 || exit 0"
interval 1
weight -5
}
vrrp_instance VI_1 {
state MASTER
interface eth0
virtual_router_id 51
priority 100
advert_int 1
authentication {
auth_type PASS
auth_pass vi111
}
virtual_ipaddress {
192.168.100.11
}
track_script {
chk_mt_down
}
}
virtual_server 192.168.100.11 80 {
delay_loop 6
lb_algo rr
lb_kind DR
nat_mask 255.255.255.0
# persistence_timeout 50
protocol TCP
real_server 192.168.100.173 80 {
weight 1
HTTP_GET {
url {
path /
status_code 200
}
connect_timeout 3
nb_get_retry 3
delay_before_retry 3
}
}
real_server 192.168.100.175 80 {
weight 1
HTTP_GET {
url {
path /
status_code 200
}
connect_timeout 3
nb_get_retry 3
delay_before_retry 3
}
}
sorry_server 127.0.0.1 80 备用服务器,在集群中如果所有real server全部宕机了,客户端访问时就会出现错误页面,这样是很不友好的,我们提供一个维护页面来提醒用户什么时间段正在维护。
} 180节点:
# cat keepalived.conf
! Configuration File for keepalived
global_defs {
notification_email {
root@localhost
}
notification_email_from xiexiaojun
smtp_server 127.0.0.1
smtp_connect_timeout 30
router_id LVS_DEVEL
}
vrrp_script chk_mt_down {
script "[ -f /etc/keepalived/down ] && exit 1 || exit 0"
interval 1
weight -5
}
vrrp_instance VI_1 {
state BACKUP
interface eth0
virtual_router_id 51
priority 90
advert_int 1
authentication {
auth_type PASS
auth_pass vi111
}
virtual_ipaddress {
192.168.100.11
}
track_script {
chk_mt_down
}
}
virtual_server 192.168.100.11 80 {
delay_loop 6
lb_algo rr
lb_kind DR
nat_mask 255.255.255.0
# persistence_timeout 50
protocol TCP
real_server 192.168.100.173 80 {
weight 1
HTTP_GET {
url {
path /
status_code 200
}
connect_timeout 3
nb_get_retry 3
delay_before_retry 3
}
}
real_server 192.168.100.175 80 {
weight 1
HTTP_GET {
url {
path /
status_code 200
}
connect_timeout 3
nb_get_retry 3
delay_before_retry 3
}
}
sorry_server 127.0.0.1 80 #备用服务器,在集群中如果所有real server全部宕机了,客户端访问时就会出现错误页面,这样是很不友好的,我们提供一个维护页面来提醒用户什么时间段正在维护。
} 查看179节点:
# ip addr
1: lo:mtu 16436 qdisc noqueue state UNKNOWN
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
2: eth0:mtu 1500 qdisc pfifo_fast state UP qlen 1000
link/ether 00:0c:29:53:f6:29 brd ff:ff:ff:ff:ff:ff
inet 192.168.100.179/24 brd 192.168.100.255 scope global eth0
inet 192.168.100.11/32 scope global eth0
inet6 fe80::20c:29ff:fe53:f629/64 scope link
valid_lft forever preferred_lft forever
# ipvsadm -ln
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
-> RemoteAddress:Port Forward Weight ActiveConn InActConn
TCP192.168.100.11:80 rr
-> 192.168.100.173:80 Route 1 0 1
-> 192.168.100.175:80 Route 1 0 0 可以看到keepalived根据配置文件自动生成了lvs规则,
测试:从浏览器访问192.168.100.11
http://s3.运维网.com/wyfs02/M00/76/F4/wKioL1ZftwGB5Xn8AABHd7TqNL4199.pnghttp://s2.运维网.com/wyfs02/M01/76/F4/wKioL1ZftsXAQ48fAABMDkdBknY225.png
刷新能够实现轮询173和175。
再测试当173和175都挂了时:
# ipvsadm -ln
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
-> RemoteAddress:Port Forward Weight ActiveConn InActConn
TCP192.168.100.11:80 rr
-> 127.0.0.1:80 Local 1 0 8 从浏览器访问192.168.100.11:
http://s5.运维网.com/wyfs02/M00/76/FA/wKiom1Zf9LvRkE_NAABB0qs056Y108.png
4、自定义邮件通知
如何在keepalived故障时或主备切换时(默认只有在后端主机故障才会发邮件通知),发送自定义的警告邮件给指定的管理员?
在keepalived配置文件vrrp_instance(虚拟路由)段中添加指定脚本的路径
notify_master "/etc/keepalived/notify.shmaster"
notify_backup "/etc/keepalived/notify.shbackup "
notify_fault "/etc/keepalived/notify.sh fault "
# cat notify.sh
#!/bin/bash
# Author: MageEdu
# description: An example of notify script
#
vip=192.168.100.11
contact='root@localhost'
notify() {
mailsubject="`hostname` to be $1: $vip floating"
mailbody="`date '+%F %H:%M:%S'`: vrrp transition, `hostname` changed to be $1"
echo $mailbody | mail -s "$mailsubject" $contact
}
case "$1" in
master)
notify master
exit 0
;;
backup)
notify backup
exit 0
;;
fault)
notify fault
exit 0
;;
*)
echo 'Usage: `basename $0` {master|backup|fault}'
exit 1
;;
esac
# chmod +x notify.sh
# scp notify.sh 192.168.100.180:/etc/keepalived/
notify.sh 100%618 0.6KB/s 00:00 二、keepalived+haproxy实现httpd高可用负载均衡集群
直接使用上面的环境把lvs改成haproxy就可以了
我们知道keepalived在后端主机故障时只能对ip做漂移,并不能像其它高可用方案一样高可用某服务,这里就要使用脚本达到后端主机故障启用某服务。
1、haproxy的配置
简单修改下haproxy的配置:
frontendmain *:80
64 # acl url_static path_beg -i /static /images /javascript /stylesheets
65 # acl url_static path_end -i .jpg .gif .png .css .js
66
67 # use_backend static if url_static
68 default_backend app
69
70 #---------------------------------------------------------------------
71 # static backend for serving up images, stylesheets and such
72 #---------------------------------------------------------------------
73 #backend static
74 balance roundrobin
75 server static 127.0.0.1:80 check
76
77 #---------------------------------------------------------------------
78 # round robin balancing between the various backends
79 #---------------------------------------------------------------------
80 backend app
81 balance roundrobin
82 serverapp1 192.168.100.173:80 check
83 serverapp2 192.168.100.175:80 check
84 # serverapp3 127.0.0.1:5003 check
85 # serverapp4 127.0.0.1:5004 check
# scp haproxy.cfg 192.168.100.180:/etc/haproxy/
haproxy.cfg 100% 3351 3.3KB/s 00:00 为了避免之前配置的lvs影响,在后端主机上运行之前lvs.sh脚本关闭之前的配置
# bash lvs_dr.sh stop
# bash lvs_dr.sh stop 2、keepalived的配置
当179和180两个节点,哪个节点是主节点时,应该启动haproxy服务,备用时关闭haproxy(一直运行着也可以咯)还要有检测haproxy服务是否正常运行的脚本,故障时就切换。
haproxy的切换脚本:
# cat notify.sh
#!/bin/bash
# Author: MageEdu
# description: An example of notify script
#
vip=172.16.100.11
contact='root@localhost'
notify() {
mailsubject="`hostname` to be $1: $vip floating"
mailbody="`date '+%F %H:%M:%S'`: vrrp transition, `hostname` changed to be $1"
echo $mailbody | mail -s "$mailsubject" $contact
}
case "$1" in
master)
notify master
/etc/rc.d/init.d/haproxy start
exit 0
;;
backup)
notify backup
/etc/rc.d/init.d/haproxy restart #注意:如果这里为stop,那么主haproxy down时,权重-5,
而备haproxy因为haproxy stop 权重也-5,此时ip不会转移。
exit 0
;;
fault)
notify fault
/etc/rc.d/init.d/haproxy stop
exit 0
;;
*)
echo 'Usage: `basename $0` {master|backup|fault}'
exit 1
;;
esac 179节点:
# cat keepalived.conf
! Configuration File for keepalived
global_defs {
notification_email {
root@localhost
}
notification_email_from xiexiaojun
smtp_server 127.0.0.1
smtp_connect_timeout 30
router_id LVS_DEVEL
}
vrrp_script chk_mt_down {
script "[ -f /etc/keepalived/down ] && exit 1 || exit 0"
interval 1
weight -5
}
vrrp_script chk_haproxy {
script "killall -0 haproxy &>/dev/null"
interval 1
weight -5
}
vrrp_instance VI_1 {
state MASTER
interface eth0
virtual_router_id 51
priority 100
advert_int 1
authentication {
auth_type PASS
auth_pass vi111
}
virtual_ipaddress {
192.168.100.11
}
track_script {
chk_mt_down
chk_haproxy
}
notify_master "/etc/keepalived/notify.sh master"
notify_backup "/etc/keepalived/notify.sh backup"
notify_fault "/etc/keepalived/notify.sh fault"
}
virtual_server 192.168.100.11 80 {
delay_loop 6
lb_algo rr
lb_kind DR
nat_mask 255.255.255.0
# persistence_timeout 50
protocol TCP
real_server 192.168.100.173 80 {
weight 1
HTTP_GET {
url {
path /
status_code 200
}
connect_timeout 3
nb_get_retry 3
delay_before_retry 3
}
}
real_server 192.168.100.175 80 {
weight 1
HTTP_GET {
url {
path /
status_code 200
}
connect_timeout 3
nb_get_retry 3
delay_before_retry 3
}
}
sorry_server 127.0.0.1 80
} 180节点:
# cat keepalived.conf
! Configuration File for keepalived
global_defs {
notification_email {
root@localhost
}
notification_email_from xiexiaojun
smtp_server 127.0.0.1
smtp_connect_timeout 30
router_id LVS_DEVEL
}
vrrp_script chk_mt_down {
script "[ -f /etc/keepalived/down ] && exit 1 || exit 0"
interval 1
weight -5
}
vrrp_script chk_haproxy {
script "killall -0 haproxy &>/dev/null"
interval 1
weight -5
}
vrrp_instance VI_1 {
state BACKUP
interface eth0
virtual_router_id 51
priority 90
advert_int 1
authentication {
auth_type PASS
auth_pass vi111
}
virtual_ipaddress {
192.168.100.11
}
track_script {
chk_mt_down #检测2两个脚本,有一个执行失败就检测失败
chk_haproxy
}
notify_master "/etc/keepalived/notify.sh master"
notify_backup "/etc/keepalived/notify.sh backup"
notify_fault "/etc/keepalived/notify.sh fault"
}
virtual_server 192.168.100.11 80 {
delay_loop 6
lb_algo rr
lb_kind DR
nat_mask 255.255.255.0
# persistence_timeout 50
protocol TCP
real_server 192.168.100.173 80 {
weight 1
HTTP_GET {
url {
path /
status_code 200
}
connect_timeout 3
nb_get_retry 3
delay_before_retry 3
}
}
real_server 192.168.100.175 80 {
weight 1
HTTP_GET {
url {
path /
status_code 200
}
connect_timeout 3
nb_get_retry 3
delay_before_retry 3
}
}
sorry_server 127.0.0.1 80
} 测试:
http://s3.运维网.com/wyfs02/M00/76/F4/wKioL1ZftwGB5Xn8AABHd7TqNL4199.pnghttp://s2.运维网.com/wyfs02/M01/76/F4/wKioL1ZftsXAQ48fAABMDkdBknY225.png
做双主模式就是在加一个虚拟路由就可以了,就不再演示了,不清楚的可以看上篇博文。
页:
[1]