|
获取ceph image
1
| docker pull ceph/daemon
|
准备硬盘
1
2
3
| mkfs.xfs /dev/vdb1
mkdir -p /opt/ceph/osd/vdb
mount -o defaults,noatime,nodiratime,noexec,nodev,nobarrier /dev/vdb1 /opt/ceph/osd/vdb
|
部署mon
1
2
3
4
5
6
7
8
9
10
11
| docker run -d \
--name mon0 \
-e MON_NAME=mon0 \
-e MON_IP=10.111.252.165 \
-e CEPH_PUBLIC_NETWORK=10.111.252.128/25 \
-e CEPH_CLUSTER_NETWORK=192.168.10.128/25 \
--net=host \
-v /etc/ceph:/etc/ceph \
-v /var/lib/ceph/:/var/lib/ceph/ \
-v /etc/localtime:/etc/localtime:ro \
ceph/daemon mon
|
编辑ceph.conf
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
| [global]
fsid = 41cbd215-1c41-4a30-abd6-c597375f8930
mon initial members = mon0
mon host = 10.111.252.165
auth cluster required = cephx
auth service required = cephx
auth client required = cephx
public network = 10.111.252.128/25
cluster network = 192.168.10.128/25
osd journal size = 100
osd pool default size = 3
osd pool default min size = 2
osd crush update on start = false
[mon]
mon initial members = mon0
mon host = 10.111.252.165
[mon.mon0]
host = mon0
mon addr = 10.111.252.165:6789
[client]
rbd default format = 2
docker restart mon0
|
部署osd
1
2
3
4
5
6
7
8
9
| docker run -d \
--name=osdvdb \
--net=host \
-v /etc/ceph:/etc/ceph \
-v /var/lib/ceph/:/var/lib/ceph/ \
-v /opt/ceph/osd/vdb:/var/lib/ceph/osd \
-e OSD_TYPE=directory \
-v /etc/localtime:/etc/localtime:ro \
ceph/daemon osd
|
复制/etc/ceph到其余节点,在其余节点上部署osd
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
| scp -r /etc/ceph root@10.111.252.222:/etc/
scp -r /var/lib/ceph root@10.111.252.222:/var/lib/
ssh 10.111.252.222 rm -rf /var/lib/ceph/mon/
ssh 10.111.252.222 rm -rf /var/lib/ceph/osd/
scp -r /etc/ceph root@10.111.252.231:/etc/
scp -r /var/lib/ceph root@10.111.252.231:/var/lib/
ssh 10.111.252.231 rm -rf /var/lib/ceph/mon/
ssh 10.111.252.231 rm -rf /var/lib/ceph/osd/
mkfs.xfs /dev/vdb1
mkdir -p /opt/ceph/osd/vdb
mount -o defaults,noatime,nodiratime,noexec,nodev,nobarrier \
/dev/vdb1 /opt/ceph/osd/vdb
docker run -d \
--name=osdvdb \
--net=host \
-v /etc/ceph:/etc/ceph \
-v /var/lib/ceph/:/var/lib/ceph/ \
-v /opt/ceph/osd/vdb:/var/lib/ceph/osd \
-e OSD_TYPE=directory \
-v /etc/localtime:/etc/localtime:ro \
ceph/daemon osd
|
查看ceph状态
1
2
3
4
5
6
7
8
9
10
| ceph -s
cluster b448ce73-47eb-4a77-9a62-48ac7abc3218
health HEALTH_OK
monmap e1: 1 mons at {mon0=10.111.252.165:6789/0}
election epoch 4, quorum 0 mon0
osdmap e18: 3 osds: 3 up, 3 in
flags sortbitwise
pgmap v49: 64 pgs, 1 pools, 0 bytes data, 0 objects
401 MB used, 149 GB / 149 GB avail
64 active+clean
|
|
|
|