三、客户端测试
1、先更新hosts文件(当然,仅用内网的dns服务也成,只要保证能正常解析服务端)
[root@tvm-test ~]# cat /etc/hosts |grep gluster
192.168.56.241 glusterfs-node01.office.test
192.168.56.242 glusterfs-node02.office.test
2、安装包“glusterfs-fuse”以便于mount识别glusterfs格式。
[root@tvm-test ~]# yum install glusterfs-fuse -y
3、挂载
[root@tvm-test ~]# mount -t glusterfs glusterfs-node01.office.test:/gv0 /mnt
[root@tvm-test ~]# df -h |grep mnt
glusterfs-node01.office.test:/gv0 1.0T 33M 1.0T 1% /mnt
4、测试写入文件
[root@tvm-test ~]# for i in `seq -w 1 100`; do cp -rp /var/log/messages /mnt/copy-test-$i; done
确认文件数量:
[root@tvm-test ~]# ls /mnt |wc -l
100
再对比每个node服务器的文件数量:
[root@tvm-glusterfs-node01 ~]# ls /data/brick1/gv0/ |wc -l
100
[root@tvm-glusterfs-node02 ~]# ls /data/brick1/gv0/ |wc -l
100
再来一次:
[root@tvm-test ~]# for i in `seq -w 1 234`; do cp -rp /var/log/messages /mnt/copy-test-again-$i; done
[root@tvm-test ~]# ls /mnt |wc -l
334
[root@tvm-glusterfs-node01 ~]# ls /data/brick1/gv0/ |wc -l
334
[root@tvm-glusterfs-node02 ~]# ls /data/brick1/gv0/ |wc -l
334
5、小结
很明显,我们在“Type: Replicate”这种模式下,有2个副本,node01和node02互为镜像,类似RAID1。
四、异常情况测试
1、某个节点网络异常,离线
1)当前状态
-----------
[root@tvm-glusterfs-node01 ~]# gluster peer status
Number of Peers: 1
Hostname: glusterfs-node02.office.test
Uuid: 35dbbb8e-d7b2-42f3-bea4-fb69dcc680fd
State: Peer in Cluster (Connected)
[root@tvm-glusterfs-node02 ~]# gluster peer status
Number of Peers: 1
Hostname: glusterfs-node01.office.test
Uuid: 43c1c156-07f8-4434-b8f0-627e26a1ffdc
State: Peer in Cluster (Connected)
[root@tvm-test ~]# mount -t glusterfs glusterfs-node01.office.test:/gv0 /mnt
[root@tvm-test ~]# df -h |grep mnt
glusterfs-node01.office.test:/gv0 1.0T 33M 1.0T 1% /mnt
[root@tvm-test ~]# ss -ant |grep -E "49152|24007" |grep 'ESTAB'
ESTAB 0 0 192.168.56.251:1022 192.168.56.241:24007
ESTAB 0 0 192.168.56.251:1017 192.168.56.241:49152
ESTAB 0 0 192.168.56.251:1016 192.168.56.242:49152
-----------
2)我们下线tvm-glusterfs-node01(断开网卡),查看集群情况
[root@tvm-glusterfs-node02 ~]# gluster peer status
Number of Peers: 1
Hostname: glusterfs-node01.office.test
Uuid: 43c1c156-07f8-4434-b8f0-627e26a1ffdc
State: Peer in Cluster (Disconnected)
[root@tvm-glusterfs-node02 ~]# gluster volume status
Status of volume: gv0
Gluster process Port Online Pid
------------------------------------------------------------------------------
Brick glusterfs-node02.office.test:/data/brick1/gv0 49152 Y 1085
NFS Server on localhost 2049 Y 1090
Self-heal Daemon on localhost N/A Y 1095
Task Status of Volume gv0
------------------------------------------------------------------------------
There are no active volume tasks
3)查看客户端访问的状态
[root@tvm-test ~]# ls /mnt/
ls: cannot access /mnt/: Transport endpoint is not connected
2)下线tvm-glusterfs-node01(断开网卡),查看集群的状态
[root@tvm-glusterfs-node02 ~]# gluster peer status
Number of Peers: 1
Hostname: glusterfs-node01.office.test
Uuid: 43c1c156-07f8-4434-b8f0-627e26a1ffdc
State: Peer in Cluster (Disconnected)
[root@tvm-glusterfs-node02 ~]# gluster volume status
Status of volume: gv0
Gluster process Port Online Pid
------------------------------------------------------------------------------
Brick glusterfs-node02.office.test:/data/brick1/gv0 49152 Y 1085
NFS Server on localhost 2049 Y 1090
Self-heal Daemon on localhost N/A Y 1095
Task Status of Volume gv0
------------------------------------------------------------------------------
There are no active volume tasks
3)查看客户端访问的状态
[root@tvm-test ~]# ls /mnt/ |wc -l
393
咦,竟然不用手动重新挂载,那我们尝试写文件,看看是不是写到node02上了:
[root@tvm-test ~]# for i in `seq -w 1 10`; do cp -rp /var/log/messages /mnt/is_write_on_node02-$i; done
对比一下:
[root@tvm-glusterfs-node02 ~]# ls /data/brick1/gv0/ |wc -l
403
[root@tvm-glusterfs-node02 ~]# ls /data/brick1/gv0/ |grep 'write'
is_write_on_node02-01
is_write_on_node02-02
is_write_on_node02-03
is_write_on_node02-04
is_write_on_node02-05
is_write_on_node02-06
is_write_on_node02-07
is_write_on_node02-08
is_write_on_node02-09
is_write_on_node02-10
果然是写到node02上(也只有这个节点可用)
4)再次上线node01,肯定可以同步啦
[root@tvm-glusterfs-node01 ~]# ls /data/brick1/gv0/ |wc -l
393
过了大约1秒(这个时间预计取决于文件大小)
[root@tvm-glusterfs-node01 ~]# ls /data/brick1/gv0/ |wc -l
403
ZYXW、参考
1、官网 doc
http://gluster.readthedocs.org/en/latest/Quick-Start-Guide/Quickstart/
2、Set up GlusterFS on two nodes
http://banoffeepiserver.com/glusterfs/set-up-glusterfs-on-two-nodes.html
3、第二章 一个可靠的存储后端
http://inthecloud.readthedocs.org/zh_CN/draft/posts/ch02.html