cd /etc/yum.repos.d/
wget http://download.gluster.org/pub/gluster/glusterfs/3.5/3.5.1/EPEL.repo/glusterfs-epel.repo
如果这个链接不行,则自己搞一个,也是很简单的事情,如下
[root@server203 yum.repos.d]# cat glusterfs-epel.repo
# Place this file in your /etc/yum.repos.d/ directory
[glusterfs-epel]
name=GlusterFS is a clustered file-system capable of scaling to several petabytes.
baseurl=http://download.gluster.org/pub/gluster/glusterfs/LATEST/EPEL.repo/epel-$releasever/$basearch/
enabled=1
skip_if_unavailable=1
gpgcheck=1
gpgkey=http://download.gluster.org/pub/gluster/glusterfs/LATEST/EPEL.repo/pub.key
[glusterfs-noarch-epel]
name=GlusterFS is a clustered file-system capable of scaling to several petabytes.
baseurl=http://download.gluster.org/pub/gluster/glusterfs/LATEST/EPEL.repo/epel-$releasever/noarch
enabled=1
skip_if_unavailable=1
gpgcheck=1
gpgkey=http://download.gluster.org/pub/gluster/glusterfs/LATEST/EPEL.repo/pub.key
[glusterfs-source-epel]
name=GlusterFS is a clustered file-system capable of scaling to several petabytes. - Source
baseurl=http://download.gluster.org/pub/gluster/glusterfs/LATEST/EPEL.repo/epel-$releasever/SRPMS
enabled=0
skip_if_unavailable=1
gpgcheck=1
gpgkey=http://download.gluster.org/pub/gluster/glusterfs/LATEST/EPEL.repo/pub.key
[root@server203 ~]# gluster volume status
Status of volume: zhou
Gluster process Port Online Pid
------------------------------------------------------------------------------
Brick server202:/data 49153 Y 45883
Brick server203:/data 49153 Y 45012
NFS Server on localhost 2049 Y 45026
Self-heal Daemon on localhost N/A Y 45030
NFS Server on server202 2049 Y 45897
Self-heal Daemon on server202 N/A Y 45901
Task Status of Volume zhou
------------------------------------------------------------------------------
There are no active volume tasks
---------------------------下面是常见问题汇集--------------------------------------
[root@server203 ~]# gluster peer status
Connection failed. Please check if gluster daemon is operational.
麻烦,您把进程先开起来 /etc/init.d/glusterd start && /etc/init.d/glusterfsd start
[root@server203 ~]# gluster peer probe server202
peer probe: failed: Probe returned with unknown errno 107
帅锅,请关闭iptables防火墙,谢谢
[root@New_server ~]# gluster volume remove-brick zhou server202:/data
WARNING: running remove-brick commands without an explicit option is deprecated, and will be removed in the next version of GlusterFS.
To forcibly remove a brick in the next version of GlusterFS, you will need to use "remove-brick force".
不管是创建volume还是移除,全部在后面加一个force
---------------------------下面是常见问题汇集--------------------------------------
help一下,看看有那些管理工具
[root@New_server ~]# gluster volume help
volume info [all|] - list information of all volumes
volume create [stripe ] [replica ] [transport ] ?... [force] - create a new volume of specified type with mentioned bricks
volume delete - delete volume specified by
volume start [force] - start volume specified by
volume stop [force] - stop volume specified by
volume add-brick [ ] ... [force] - add brick to volume
volume remove-brick [replica ] ... [start|stop|status|commit|force] - remove brick from volume
volume rebalance [fix-layout] {start|stop|status} [force] - rebalance operations
volume replace-brick {start [force]|pause|abort|status|commit [force]} - replace-brick operations
volume set - set options for volume
volume help - display help for the volume command
volume log rotate [BRICK] - rotate the log file for corresponding volume/brick
volume sync [all|] - sync the volume information from a peer
volume reset [option] [force] - reset all the reconfigured options
volume profile {start|stop|info [nfs]} - volume profile operations
volume quota {enable|disable|list [ ...]|remove | default-soft-limit } |
volume quota {limit-usage []} |
volume quota {alert-time|soft-timeout|hard-timeout} {} - quota translator specific operations
volume top {open|read|write|opendir|readdir|clear} [nfs|brick ] [list-cnt ] |
volume top {read-perf|write-perf} [bs count ] [brick ] [list-cnt ] - volume top operations
volume status [all | [nfs|shd||quotad]] [detail|clients|mem|inode|fd|callpool|tasks] - display status of all or specified volume(s)/brick
volume heal [{full | statistics {heal-count {replica }} |info {healed | heal-failed | split-brain}}] - self-heal commands on volume specified by
volume statedump [nfs|quotad] [all|mem|iobuf|callpool|priv|fd|inode|history]... - perform statedump on bricks
volume list - list all volumes in cluster
volume clear-locks kind {blocked|granted|all}{inode [range]|entry [basename]|posix [range]} - Clear locks held on path