|
开始我打算更新“部署mimic版本的Ceph分布式存储系统”,但是发觉不好布局,所以新续写了这篇文章。如下配置、部署依然是在ceph01节点完成,部署的新节点为ceph04。
1.配置ansible
# cat /etc/ansible/hosts | grep -v ^# | grep -v ^$
[node]
192.168.100.117
192.168.100.118
[new-node]
192.168.100.119 2.配置hosts
# cat /etc/hosts
192.168.100.116 ceph01
192.168.100.117 ceph02
192.168.100.118 ceph03
192.168.100.119 ceph04
3.复制密钥
# ssh-copy-id -i .ssh/id_rsa.pub root@ceph04 4.使用ansible配置节点
# vim ceph.yaml
- hosts: new-node
remote_user: root
tasks:
- name: 关闭Selinux
lineinfile:
path: /etc/selinux/config
regexp: '^SELINUX='
line: 'SELINUX=disabled'
- name: 关闭Firewall
service:
name: firewalld
state: stopped
enabled: no
- hosts: new-node
remote_user: root
tasks:
- name: 复制hosts
copy: src=/etc/hosts dest=/etc/hosts
- name: 复制EPEL源
copy: src=/etc/yum.repos.d/epel.repo dest=/etc/yum.repos.d/epel.repo
- rpm_key:
state: present
key: https://mirrors.aliyun.com/ceph/keys/release.asc
- name: 复制ceph源
copy: src=/etc/yum.repos.d/ceph.repo dest=/etc/yum.repos.d/ceph.repo
- name: 删除缓存数据
command: yum clean all
args:
warn: no
- name: 创建元数据缓存
command: yum makecache
args:
warn: no
- hosts: new-node
remote_user: root
tasks:
- name: 安装包
yum:
name: "{{ packages }}"
vars:
packages:
- yum-plugin-priorities
- snappy
- leveldb
- gdisk
- python-argparse
- gperftools-libs
- ntp
- ntpdate
- ntp-doc
- name: 启动ntpdate
service:
name: ntpdate
state: started
enabled: yes
- name: 启动ntpd
service:
name: ntpd
state: started
enabled: yes
# ansible-playbook ceph.yaml
5.使用ansible-deploy为节点安装ceph
# cd /etc/ceph/
# ceph-deploy install --release mimic ceph04 6.将监视器添加到现有群集
# ceph-deploy mon add ceph04 --address 192.168.100.119
# cat ceph.conf
[global]
fsid = 09f5d004-5759-4b54-8c4b-e3ebb0b416b2
public_network = 192.168.100.0/24
cluster_network = 192.168.100.0/24
mon_initial_members = ceph01, ceph02, ceph03,ceph04
mon_host = 192.168.100.116,192.168.100.117,192.168.100.118,192.168.100.119
auth_cluster_required = cephx
auth_service_required = cephx
auth_client_required = cephx
# ceph mon stat
e2: 4 mons at {ceph01=192.168.100.116:6789/0,ceph02=192.168.100.117:6789/0,ceph03=192.168.100.118:6789/0,ceph04=192.168.100.119:6789/0}, election epoch 60, leader 0 ceph01, quorum 0,1,2,3 ceph01,ceph02,ceph03,ceph04
7.将管理密钥拷贝到该节点
# ceph-deploy admin ceph04
8.创建 ceph 管理进程服务
# ceph-deploy mgr create ceph04
# ceph -s|grep mgr
mgr: ceph01(active), standbys: ceph03, ceph02, ceph04 9.创建OSD
# ceph-deploy osd create --data /dev/sdb ceph04
# ceph osd stat
4 osds: 4 up, 4 in; epoch: e53
# ceph -s
cluster:
id: 09f5d004-5759-4b54-8c4b-e3ebb0b416b2
health: HEALTH_OK
services:
mon: 4 daemons, quorum ceph01,ceph02,ceph03,ceph04
mgr: ceph01(active), standbys: ceph03, ceph02, ceph04
osd: 4 osds: 4 up, 4 in
data:
pools: 1 pools, 128 pgs
objects: 64 objects, 136 MiB
usage: 4.3 GiB used, 76 GiB / 80 GiB avail
pgs: 128 active+clean 注:如果就是想加一个磁盘(或者说OSD)的话,只需该步骤即可。
|
|