378 发表于 2017-12-5 12:10:01

openstack 高可用(HA)


[*]使用ceph-deploy  1.1 磁盘分区
  1.2 安装ceph-deploy
  1.3 初始化ceph和mon
  1.4 配置osd
  1.5 创建pool
  1.6 添加用户key
  2.手工搭建
  2.1 安装ceph
  2.2 打开防火墙策略
  2.3 创建mon
  2.3 添加mon
  2.4 添加OSD
[*]使用ceph-deploy
  使用ceph官方源(不要使用centos内置ceph)
  1.1 磁盘分区
  获得你阵列的alignment参数
cat /sys/block/sdb/queue/optimal_io_size 1048576
cat /sys/block/sdb/queue/minimum_io_size 262144
cat /sys/block/sdb/alignment_offset 0
cat /sys/block/sdb/queue/physical_block_size 512
  把optimal_io_size的值与alignment_offset的值相加,之后除以physical_block_size的值.:(1048576 + 0) / 512 = 2048。
  1.2 安装ceph-deploy
target = node-1
  yum install ceph-depoy -y
  1.3 初始化ceph和mon
  target = node-1
  mkdir ~/ceph-deploy && cd ~/ceph-deploy
  ceph-deploy new node-1
编辑ceph.conf
  vim ceph.conf

  fsid = 6b2a4554-4f3d-4ae6-be57-6f8b0f9bd744
  mon_initial_members = node-1
  mon_host = 192.168.56.21
  auth_cluster_required = cephx
  auth_service_required = cephx
  auth_client_required = cephx
public_network=192.168.56.0/24
osd pool default>osd journal>安装ceph包(会下载使用epel源)
  ceph-deploy install node-1 node-2 node-3
如无外网,可手工安装
  yum install ceph -y
初始化mon节点
  ceph-deploy mon create-initial
添加 Monitors
  ceph-deploy mon add node-2 node-3
把配置文件和 admin 密钥拷贝到管理节点和 Ceph 节点
  ceph-deploy admin node-1 node-2 node-3
  1.4 配置osd
  ceph-deploy disk zap node-3:vdb1
  ceph-deploy disk zap node-3:vdb2
为磁盘打osd标签
  sgdisk --typecode=1:4fbd7e29-9d25-41b8-afd0-062c0ceff05d /dev/vdb
  sgdisk --typecode=2:4fbd7e29-9d25-41b8-afd0-062c0ceff05d /dev/vdb
  blkid -p -o udev /dev/vdb2
  udevadm info -q property /dev/vdb1
prepare osd
  ceph-deploy osd prepare node-3:vdb1
  ceph-deploy osd prepare node-3:vdb2
activate osd
  ceph-deploy osd activate node-3:vdb1
  ceph-deploy osd activate node-3:vdb2
  1.5 创建pool
pool大小 = osd个数 * 200 / 副本数 * 比例
  ceph osd pool create vms 128
  ceph osd pool create volumes 128
  1.6 添加用户key

  ceph auth get-or-create client.cinder mon 'allow r' osd 'allow>
  ceph auth get-or-create client.glance mon 'allow r' osd 'allow>
  ceph auth get-or-create client.nova mon 'allow r' osd 'allow>  ceph auth get-or-create client.glance >ceph.client.glance.keyring
  ceph auth get-or-create client.cinder >ceph.client.cinder.keyring
  ceph auth get-or-create client.nova >ceph.client.nova.keyring
  centos7.2
  newton需要使用Jewel
  2.手工搭建
  2.1 安装ceph
  yum install ceph
  2.2 打开防火墙策略
  在monitor节点
  iptables -I INPUT -p tcp --dport 6789 -j ACCEPT #仅在monitor上打开
  iptables -I INPUT -p tcp --match multiport --dports 6800:7300 -j ACCEPT #所有节点上打开
  在osd 节点添加 /etc/sysconfig/iptables文件,
  -A INPUT -p tcp -m multiport --dports 6789 -m comment --comment "010 ceph-mon allow" -j ACCEPT
  -A INPUT -p tcp -m multiport --dports 6800:7100 -m comment --comment "011 ceph-osd allow" -j ACCEPT
  2.3 创建mon
  ceph-authtool --create-keyring /tmp/ceph.mon.keyring --gen-key -n mon. --cap mon 'allow '
  ceph-authtool --create-keyring /etc/ceph/ceph.client.admin.keyring --gen-key -n client.admin --set-uid=0 --cap mon 'allow ' --cap osd 'allow *' --cap mds 'allow'
  ceph-authtool /tmp/ceph.mon.keyring --import-keyring /etc/ceph/ceph.client.admin.keyring
  monmaptool --create --add node-1 1.1.1.61 --fsid a7f64266-0894-4f1e-a635-d0aeaca0e993 /tmp/monmap
  mkdir /var/lib/ceph/mon/ceph-node-1
  chown -R ceph:ceph /var/lib/ceph/mon/ceph-node-1
  sudo -u ceph ceph-mon --cluster ceph --mkfs -i node-1 --monmap /tmp/monmap --keyring /tmp/ceph.mon.keyring
  touch /var/lib/ceph/mon/ceph-node-1/done
  systemctl enable ceph-mon@node-1
  systemctl start ceph-mon@node-1
  ceph -s
  2.3 添加mon
  mkdir /var/lib/ceph/mon/ceph-node-2
  chown -R ceph:ceph /var/lib/ceph/mon/ceph-node-2
  ceph auth get mon. -o /tmp/key-filename
  ceph mon getmap -o /tmp/map-filename
  monmaptool --print /tmp/map-filename
  sudo -u ceph ceph-mon -i node-2 --mkfs --monmap /tmp/map-filename --keyring /tmp/key-filename
  touch /var/lib/ceph/mon/ceph-node-2/done
  systemctl enable ceph-mon@node-2
  systemctl start ceph-mon@node-2
  2.4 添加OSD
在 node-1
  scp /var/lib/ceph/bootstrap-osd/ceph.keyring node-3:/var/lib/ceph/bootstrap-osd/ceph.keyring
清除OSD原有数据
  ceph-disk zap /dev/vdb
准备OSD,会自动启动osd
  ceph-disk -v prepare --fs-type xfs --cluster-uuid a7f64266-0894-4f1e-a635-d0aeaca0e993 /dev/vdb
  创建pools
  ceph osd pool delete rbd rbd --yes-i-really-really-mean-it
  ceph osd pool create vms 300
  ceph osd pool create images 40
  ceph osd pool create volumes 120
  创建用户

  ceph auth get-or-create client.cinder mon 'allow r' osd 'allow>
  ceph auth get-or-create client.glance mon 'allow r' osd 'allow>
页: [1]
查看完整版本: openstack 高可用(HA)