设为首页 收藏本站
查看: 828|回复: 0

[经验分享] LVS+Keepalived实现负载均衡试验笔记

[复制链接]

尚未签到

发表于 2018-12-30 07:16:39 | 显示全部楼层 |阅读模式
  试验环境
  192.168.108.108    Master-LVS-Director
  
192.168.108.109    Backup-LVS-Director
  
192.168.108.180     VIP
  
192.168.108.161    RealServer1
  
192.168.108.162    RealServer2
  拓扑图
  

  安装ipvsadm
  
# yum install -y ipvsadm
  安装keepalived
  确认当前运行的内核
  
# name -r
  
2.6.18-128.4.1.el5xen
  
# ls -1 /usr/src/kernels
  
2.6.18-128.4.1.el5-x86_64
  
2.6.18-128.el5-x86_64
  # wget http://www.keepalived.org/software/keepalived-1.1.17.tar.gz
  
# tar -xvzf keepalived-1.1.17.tar.gz
  
# cd keepalived-1.1.17
  
# ./configure --sysconfdir=/etc/ --sbindir=/usr/sbin/ --with-kernel-dir=/usr/src/kernels/2.6.18-128.4.1.el5-x86_64
  
Keepalived configuration
  
------------------------
  
Keepalived version: 1.1.17
  
Compiler: gcc
  
Compiler flags: -g -O2
  
Extra Lib : -lpopt -lssl -lcrypto
  
Use IPVS Framework: Yes
  
IPVS sync daemon support : Yes
  
Use VRRP Framework : Yes
  
Use LinkWatch: No
  
Use Debug flags: No
  
# make && make install
  配置keepalived  
  
# vi /etc/keepalived/keepalived.conf
  
下载 keepalived.conf
  


! Configuration File for keepalived  

  
# 全局定义
  
global_defs {
  
notification_email {
  
13810955300@139.com
  
}
  
notification_email_from root@experiment.jobkoo.com
  

  
#smtp主机地址
  
smtp_server 127.0.0.1
  
smtp_connect_timeout 30
  

  
#运行Keepalived服务器的一个标识。发邮件时显示在邮件标题中的信息
  
router_id LVS_MASTER
  
}
  

  
#VIP
  
vrrp_instance VI_1 {
  

  
#指定实例的初始状态(角色)。在两台router都启动时马上会根据priority的高低开始竞选
  
#高priority为Master
  
state MASTER
  

  
#VT_1 实例绑定的网卡
  
interface eth0
  

  
#VRID 标记(0-255)
  
virtual_router_id 51
  

  
#优先级,BACKUP的值一定要低于MASTER
  
priority 100
  

  
#检查间隔
  
advert_int 1
  

  
#设置认证
  
authentication {
  
#认证类型
  
auth_type PASS
  
#认证密码
  
auth_pass 1111
  
}
  

  
#VIP 这个IP在发生MASTER 到 BACKUP切换时会随之add或del,所以每台服务器上可以不绑定
  
#虚拟地址,而都放入virtual_ipaddress块中(可以多个),keepalived会自动使用ip地址进
  
#行绑定(不需要依赖ifcfg-eth0),利用ip add show eth0可以看到加入的VIP
  
virtual_ipaddress {
  
192.168.108.180
  
}
  
}
  

  
#定义virtual_server (HTTP | 80)
  
virtual_server 192.168.108.180 80 {
  
delay_loop 6            # service polling的delay时间
  
lb_algo wlc             # 调度算法
  
lb_kind DR              # LVS工作方式
  
persistence_timeout 50  # 会话保持时间
  
protocol TCP            # 协议类型(TCP|UDP)
  

  
#定义rs1,每一个rs都需要下面的一个配置段
  
real_server 192.168.108.161 80 {
  
weight 1            # 权值 默认1,0为失效
  
# inhibit_on_failure    # 在服务器健康检查失败后不从IPVS中删除而将其权值标记为0
  

  
# TCP方式的健康检查
  
TCP_CHECK {
  
connect_timeout 10      # 连接超时时间
  
nb_get_retry 3          # 重试次数
  
delay_before_retry 3    # 重试间隔
  
connect_port 80         # 健康检查端口
  
}
  
}
  

  
# 定义rs2
  
real_server 192.168.108.162 80 {
  
weight 1
  
TCP_CHECK {
  
connect_timeout 10
  
nb_get_retry 3
  
delay_before_retry 3
  
connect_port 80
  
}
  
}
  
}
  

  
# 定义virtual_server (HTTPS | 443)
  
virtual_server 192.168.108.180 443 {
  
delay_loop 6
  
lb_algo wlc
  
lb_kind DR
  
nat_mask 255.255.255.0
  
persistence_timeout 50
  
protocol TCP
  

  
real_server 192.168.108.161 443 {
  
weight 1
  
TCP_CHECK {
  
connect_timeout 10
  
nb_get_retry 3
  
delay_before_retry 3
  
connect_port 443
  
}
  
}
  

  
real_server 192.168.108.162 443 {
  
weight 1
  
TCP_CHECK {
  
connect_timeout 10
  
nb_get_retry 3
  
delay_before_retry 3
  
connect_port 443
  
}
  
}
  
}
  配置RS
  
为了方便起见我自己编写了一个启动脚本,如下:
  
下载 lvsRealServer.sh
  


?View Code BASH  

  
1
  
2
  
3
  
4
  
5
  
6
  
7
  
8
  
9
  
10
  
11
  
12
  
13
  
14
  
15
  
16
  
17
  
18
  
19
  
20
  
21
  
22
  
23
  
24
  
25
  
26
  
27
  
28
  
29
  
30
  
31
  
32
  
33
  
34
  
35
  
36
  
37
  
38
  
39
  
40
  
41
  
42
  
43
  
44
  
45
  
46
  
47
  
48
  
49
  
50
  
51
  

  

  

#!/bin/bash  
#Description : RealServer Start!
  
#Write by:Cooper
  
#Last Modefiy:2009.08.21
  

  
VIP=192.168.108.180
  
LVS_TYPE=DR
  

  
startrs()
  
{
  
echo "start LVS of REALServer"
  

  
if [ "$LVS_TYPE" == "DR" ];then
  
/sbin/ifconfig lo:0 $VIP broadcast $VIP netmask 255.255.255.255 up
  
/sbin/route add -host $VIP dev lo:0
  
else
  
/sbin/ifconfig tunl0 $VIP netmask 255.255.255.255 broadcast $VIP up
  
/sbin/route add -host $VIP dev tunl0
  
fi
  
echo "1" >/proc/sys/net/ipv4/conf/lo/arp_ignore
  
echo "2" >/proc/sys/net/ipv4/conf/lo/arp_announce
  
echo "1" >/proc/sys/net/ipv4/conf/all/arp_ignore
  
echo "2" >/proc/sys/net/ipv4/conf/all/arp_announce
  
}
  

  
stoprs()
  
{
  
if [ "$LVS_TYPE" == "DR" ];then
  
/sbin/ifconfig lo:0 down
  
echo "close LVS Directorserver"
  
else
  
/sbin/ifconfig tunl0 down
  
echo "close LVS Tunnel server"
  
fi
  
echo "0" >/proc/sys/net/ipv4/conf/lo/arp_ignore
  
echo "0" >/proc/sys/net/ipv4/conf/lo/arp_announce
  
echo "0" >/proc/sys/net/ipv4/conf/all/arp_ignore
  
echo "0" >/proc/sys/net/ipv4/conf/all/arp_announce
  
}
  

  
# ============ Main ===========
  

  
case $1 in
  
"start")
  
startrs;;
  
"stop")
  
stoprs;;
  
"*")
  
echo "Usage $0 {start|stop}"
  
exit 1
  
esac
  

  该脚本默认启动LVS/DR模式,通过修改脚本变量可以实现LVS/Tunnel模式的切换。
  运行脚本进行RS设置后执行相关的服务
  [root@rs-1]# sh lvsRealServer.sh
  
[root@rs-1]# service httpd restart
  
rs-2执行相同的操作
  Master/Backup LVS启动keepalived 服务
  [root@Master-LVS]# service keepalived start
  
Backup-LVS 同样执行如上命令启动keepalived
  查看Master-LVS上eth0接口在启动keepalived前后变化
  启动keepalived之前
  
# ip add show eth0
  
6: eth0:  mtu 1500 qdisc noqueue
  
link/ether 00:1d:7d:3d:1c:63 brd ff:ff:ff:ff:ff:ff
  
inet 192.168.108.108/24 brd 192.168.108.255 scope global eth0
  
inet6 fe80::21d:7dff:fe3d:1c63/64 scope link
  
valid_lft forever preferred_lft forever
  启动之后
  
# ip add show eth0
  
6: eth0:  mtu 1500 qdisc noqueue
  
link/ether 00:1d:7d:3d:1c:63 brd ff:ff:ff:ff:ff:ff
  
inet 192.168.108.108/24 brd 192.168.108.255 scope global eth0
  
inet 192.168.108.180/32 scope global eth0
  
inet6 fe80::21d:7dff:fe3d:1c63/64 scope link
  
valid_lft forever preferred_lft forever
  查看LVS运行情况
  [root@Master-LVS]# ipvsadm -ln
  
IP Virtual Server version 1.2.1 (size=4096)
  
Prot LocalAddress:Port Scheduler Flags
  
-> RemoteAddress:Port           Forward Weight ActiveConn InActConn
  
TCP  192.168.108.180:443 wlc persistent 50
  
-> 192.168.108.161:443          Route   1      0          0
  
-> 192.168.108.162:443          Route   1      0          0
  
TCP  192.168.108.180:80 wlc persistent 50
  
-> 192.168.108.161:80           Route   1      0          0
  
-> 192.168.108.162:80           Route   1      0          0
  故障测试
  RS故障
  切换到其中的一台rs上,如192.168.108.161
  
# service httpd stop
  这时查看Master/Backup LVS上的的日志输出
  
[root@Master-LVS]# tail -f /var/log/message

?View Code LOG  

  
1
  
2
  
3
  
4
  
5
  
6
  
7
  

  

  

  
Sep  3 11:08:01 experiment Keepalived_healthcheckers: TCP connection to [192.168.108.161:80] failed !!!
  
Sep  3 11:08:01 experiment Keepalived_healthcheckers: Removing service [192.168.108.161:80] from VS [192.168.108.180:80]
  
Sep  3 11:08:01 experiment Keepalived_healthcheckers: Remote SMTP server [127.0.0.1:25] connected.
  
Sep  3 11:08:01 experiment Keepalived_healthcheckers: TCP connection to [192.168.108.161:443] failed !!!
  
Sep  3 11:08:01 experiment Keepalived_healthcheckers: Removing service [192.168.108.161:443] from VS [192.168.108.180:443]
  
Sep  3 11:08:01 experiment Keepalived_healthcheckers: Remote SMTP server [127.0.0.1:25] connected.
  
Sep  3 11:08:01 experiment Keepalived_healthcheckers: SMTP alert successfully sent.
  

  [root@Backup-LVS]# tail -f /var/log/message
  


?View Code LOG  

  
1
  
2
  
3
  
4
  
5
  
6
  
7
  

  

  

  
Sep  3 11:08:02 localhost Keepalived_healthcheckers: TCP connection to [192.168.108.161:443] failed !!!
  
Sep  3 11:08:02 localhost Keepalived_healthcheckers: Removing service [192.168.108.161:443] from VS [192.168.108.180:443]
  
Sep  3 11:08:02 localhost Keepalived_healthcheckers: Remote SMTP server [127.0.0.1:25] connected.
  
Sep  3 11:08:02 localhost Keepalived_healthcheckers: TCP connection to [192.168.108.161:80] failed !!!
  
Sep  3 11:08:02 localhost Keepalived_healthcheckers: Removing service [192.168.108.161:80] from VS [192.168.108.180:80]
  
Sep  3 11:08:02 localhost Keepalived_healthcheckers: Remote SMTP server [127.0.0.1:25] connected.
  
Sep  3 11:08:03 localhost Keepalived_healthcheckers: SMTP alert successfully sent.
  

  通过日志可以看出Master与Backup几乎同时感知了RS1服务器已经故障,并且从IPVS中移除故障rs(或者将其权值标记为0也就是不可用)。并且向指定的邮箱发送邮件,Master和Backup都会发送邮件,其邮件标题会根据router_id的值区分出Master和Backup
  Master LVS-Router故障
  停止Master-LVS的keepalived服务,人为造成故障
  
[root@Master-LVS]# service keepalived stop
  这时查看Backup-LVS的log信息
  
[root@Backup-LVS]# tail -f /var/log/message

?View Code LOG  

  
1
  
2
  
3
  
4
  
5
  
6
  
7
  

  

  

  
Sep  3 11:23:28 localhost Keepalived_vrrp: VRRP_Instance(VI_1) Transition to MASTER STATE
  
Sep  3 11:23:29 localhost Keepalived_vrrp: VRRP_Instance(VI_1) Entering MASTER STATE
  
Sep  3 11:23:29 localhost Keepalived_vrrp: VRRP_Instance(VI_1) setting protocol VIPs.
  
Sep  3 11:23:29 localhost Keepalived_vrrp: VRRP_Instance(VI_1) Sending gratuitous ARPs on eth0 for 192.168.108.180
  
Sep  3 11:23:29 localhost Keepalived_vrrp: Netlink reflector reports IP 192.168.108.180 added
  
Sep  3 11:23:29 localhost Keepalived_healthcheckers: Netlink reflector reports IP 192.168.108.180 added
  
Sep  3 11:23:34 localhost Keepalived_vrrp: VRRP_Instance(VI_1) Sending gratuitous ARPs on eth0 for 192.168.108.180
  

  由日志可以看出,Backup-LVS监测到Master-LVS故障后立即将自己的身份切换为Master然后将VIP设置到自己的eth0端口上并发送ARP广播。
  现在我手动将Master-LVS的keepalived的服务起来,然后再查看Backup-LVS的log信息
  
[root@Master-LVS]# service keepalived start
  [root@Backup-LVS]# tail -f /var/log/message

?View Code LOG  

  
1
  
2
  
3
  
4
  
5
  

  

  

  
Sep  3 11:30:44 localhost Keepalived_vrrp: VRRP_Instance(VI_1) Received higher prio advert
  
Sep  3 11:30:44 localhost Keepalived_vrrp: VRRP_Instance(VI_1) Entering BACKUP STATE
  
Sep  3 11:30:44 localhost Keepalived_vrrp: VRRP_Instance(VI_1) removing protocol VIPs.
  
Sep  3 11:30:44 localhost Keepalived_vrrp: Netlink reflector reports IP 192.168.108.180 removed
  
Sep  3 11:30:44 localhost Keepalived_healthcheckers: Netlink reflector reports IP 192.168.108.180 removed
  

  由Backup-LVS的日志可以看到,其检测到比自己优先级高的实例后将自己的身份切换成了Backup,然后将VIP从eth0端口移除,并发送心跳给Master-LVS。
  piranha与keepalived比较
  经过比较得知,piranha的主-备的地位是相同的,也就是说主故障后备就会代替主,经其地位从备切换为主,而当先前的主恢复正常后则先前的主便成了备,其不会主动切换自己的身份为主,当前的备可以检测到先前的主已经恢复但并不会主动将自己的身份修改为备。
  而keepalived则是主备分明的,其利用优先级的设置可以严格的制定主备身份。
  参考文章
  LVS-HOWTO
  
http://www.keepalived.org/documentation.html
  
http://bbs.linuxtone.org/thread-1077-1-1.html
  资源下载
  
Keepalived-UserGuide    CN EN
  出自:salogs.com



运维网声明 1、欢迎大家加入本站运维交流群:群②:261659950 群⑤:202807635 群⑦870801961 群⑧679858003
2、本站所有主题由该帖子作者发表,该帖子作者与运维网享有帖子相关版权
3、所有作品的著作权均归原作者享有,请您和我们一样尊重他人的著作权等合法权益。如果您对作品感到满意,请购买正版
4、禁止制作、复制、发布和传播具有反动、淫秽、色情、暴力、凶杀等内容的信息,一经发现立即删除。若您因此触犯法律,一切后果自负,我们对此不承担任何责任
5、所有资源均系网友上传或者通过网络收集,我们仅提供一个展示、介绍、观摩学习的平台,我们不对其内容的准确性、可靠性、正当性、安全性、合法性等负责,亦不承担任何法律责任
6、所有作品仅供您个人学习、研究或欣赏,不得用于商业或者其他用途,否则,一切后果均由您自己承担,我们对此不承担任何法律责任
7、如涉及侵犯版权等问题,请您及时通知我们,我们将立即采取措施予以解决
8、联系人Email:admin@iyunv.com 网址:www.yunweiku.com

所有资源均系网友上传或者通过网络收集,我们仅提供一个展示、介绍、观摩学习的平台,我们不对其承担任何法律责任,如涉及侵犯版权等问题,请您及时通知我们,我们将立即处理,联系人Email:kefu@iyunv.com,QQ:1061981298 本贴地址:https://www.yunweiku.com/thread-657378-1-1.html 上篇帖子: lvs架构的DR模式+keepalived高可用 下篇帖子: Haproxy+keepalived (Web负载均衡解决方案)
您需要登录后才可以回帖 登录 | 立即注册

本版积分规则

扫码加入运维网微信交流群X

扫码加入运维网微信交流群

扫描二维码加入运维网微信交流群,最新一手资源尽在官方微信交流群!快快加入我们吧...

扫描微信二维码查看详情

客服E-mail:kefu@iyunv.com 客服QQ:1061981298


QQ群⑦:运维网交流群⑦ QQ群⑧:运维网交流群⑧ k8s群:运维网kubernetes交流群


提醒:禁止发布任何违反国家法律、法规的言论与图片等内容;本站内容均来自个人观点与网络等信息,非本站认同之观点.


本站大部分资源是网友从网上搜集分享而来,其版权均归原作者及其网站所有,我们尊重他人的合法权益,如有内容侵犯您的合法权益,请及时与我们联系进行核实删除!



合作伙伴: 青云cloud

快速回复 返回顶部 返回列表