mr923 发表于 2018-6-1 09:11:00

openstack K版本和ceph对接

  本次环境:

  openstack(K版本):控制和计算各一台,并且安装到dashboard,可以正常创建虚拟机(搭建过程建官方http://docs.openstack.org/kilo/install-guide/install/yum/content/)
  ceph: 共3台,两台节点一台desploy部署机(搭建过程建官方http://ceph.com/)
  下面在控制节点安装cinder,在控制节点上操作:
  ##创建数据库并且授权
  # mysql
  Welcome to the MariaDB monitor.Commands end with ; or \g.
  Your MariaDB connection id is 2439
  Server version: 5.5.47-MariaDB MariaDB Server
  

  Copyright (c) 2000, 2015, Oracle, MariaDB Corporation Ab and others.
  

  Type 'help;' or '\h' for help. Type '\c' to clear the current input statement.
  

  MariaDB [(none)]> CREATE DATABASE cinder;
  Query OK, 1 row affected (0.00 sec)
  

  MariaDB [(none)]> GRANT ALL PRIVILEGES ON cinder.* TO 'cinder'@'localhost' \
  ->   IDENTIFIED BY 'awcloud';
  Query OK, 0 rows affected (0.15 sec)
  

  MariaDB [(none)]> GRANT ALL PRIVILEGES ON cinder.* TO 'cinder'@'%' \
  ->   IDENTIFIED BY 'awcloud';
  Query OK, 0 rows affected (0.01 sec)
  ##创建用户、端点等信息
  # source admin-openrc.sh
  # openstack user create --password-prompt cinder
  # openstack role add --project service --user cinder admin
  # openstack service create --name cinder \
  >   --description "OpenStack Block Storage" volume
  # openstack service create --name cinderv2 \
  >   --description "OpenStack Block Storage" volumev2
  # openstack endpoint create \
  >   --publicurl http://controller:8776/v2/%\(tenant_id\)s \
  >   --internalurl http://controller:8776/v2/%\(tenant_id\)s \
  >   --adminurl http://controller:8776/v2/%\(tenant_id\)s \
  >   --region RegionOne \
  >   volume
  # openstack endpoint create \
  >   --publicurl http://controller:8776/v2/%\(tenant_id\)s \
  >   --internalurl http://controller:8776/v2/%\(tenant_id\)s \
  >   --adminurl http://controller:8776/v2/%\(tenant_id\)s \
  >   --region RegionOne \
  >   volumev2
  安装cinder服务
  # yum install openstack-cinder python-cinderclient python-oslo-db -y
  修改配置文件
  # cp /etc/cinder/cinder.conf /etc/cinder/cinder.conf.bk
  # vim /etc/cinder/cinder.conf
  # egrep -v "^#|^$" /etc/cinder/cinder.conf
  
  rpc_backend = rabbit
  auth_strategy = keystone
  my_ip = 192.168.8.199
  verbose = True
  
  
  
  connection = mysql://cinder:awcloud@controller/cinder
  
  
  
  auth_uri = http://controller:5000
  auth_url = http://controller:35357
  auth_plugin = password
  project_domain_id = default
  user_domain_id = default
  project_name = service
  username = cinder
  password = awcloud
  
  
  
  
  
  rabbit_host = controller
  rabbit_userid = guest
  rabbit_password = guest
  
  
  lock_path = /var/lock/cinder
  重启服务
  # systemctl enable openstack-cinder-api.service openstack-cinder-scheduler.service
  # systemctl start openstack-cinder-api.service openstack-cinder-scheduler.service
  ###为contronller节点配置实现接管ceph
  #yum install python-rbd ceph-common -y
  # yum install python-rbd ceph-common -y
  把验证文件和ceph的配置文件拷贝到控制节点
  # scp ceph.client.admin.keyring ceph.conf 192.168.8.199:/etc/ceph/
  此时在controller节点执行ceph命令是否成功
  # ceph -s
  cluster 3155ed83-9e92-43da-90f1-c7715148f48f
  health HEALTH_OK
  monmap e1: 1 mons at {node1=192.168.8.35:6789/0}
  election epoch 2, quorum 0 node1
  osdmap e47: 2 osds: 2 up, 2 in
  pgmap v1325: 64 pgs, 1 pools, 0 bytes data, 0 objects
  80896 kB used, 389 GB / 389 GB avail
  64 active+clean
  ##为cinder、nova、glance创建volume
  # ceph osd pool create vms
  # ceph osd pool create volumes 50
  pool 'volumes' created
  # ceph osd pool create images 50
  pool 'images' created
  # ceph osd pool create backups 50
  pool 'backups' created
  # ceph osd pool create vms 50
  pool 'vms' created
  #
  为ceph客户端做认证
  # ceph auth get-or-create client.cinder mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=volumes, allow rwx pool=vms, allow rx pool=images'
  rbd_children, allow rwx pool=images'
  ceph auth get-or-create client.cinder-backup mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=backups'
  # ceph auth get-or-create client.glance mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=images'
  # ceph auth get-or-create client.cinder-backup mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=backups'
  #
  ##创建用户的认证文件
  # ceph auth get-or-create client.glance|tee /etc/ceph/ceph.client.glance.keyring
  
  key = AQANyXRXb5l7CRAA2yVyM92BIm+U3QDseZGqow==
  # chown glance:glance /etc/ceph/ceph.client.glance.keyring
  # ceph auth get-or-create client.cinder | sudo tee /etc/ceph/ceph.client.cinder.keyring
  
  key = AQDkyHRXvOTwARAAbRha/MtmqPcJm0RF9jcrsQ==
  # sudo chown cinder:cinder /etc/ceph/ceph.client.cinder.keyring
  # ceph auth get-or-create client.cinder-backup |sudo tee /etc/ceph/ceph.client.cinder-backup.keyring
  
  key = AQAVyXRXQDKFBRAAtY9DuiGGRSTBDu0MRckXbA==
  #chown cinder:cinder /etc/ceph/ceph.client.cinder-backup.keyring
  #
  #
  ##把/etc/ceph/ceph.client.cinder.keyring用户认证文件拷贝到计算节点
  # scp /etc/ceph/ceph.client.cinder.keyring compute:/etc/ceph/
  ##在compute节点创建libvirt的key
  # uuidgen
  457eb676-33da-42ec-9a8c-9293d545c337
  cat > secret.xml <<EOF
  <secret ephemeral='no' private='no'>
  <uuid>457eb676-33da-42ec-9a8c-9293d545c337</uuid>
  <usage type='ceph'>
  <name>client.cinder secret</name>
  </usage>
  </secret>
  EOF
  # sudo virsh secret-define --file secret.xml
  # sudo virsh secret-set-value --secret 457eb676-33da-42ec-9a8c-9293d545c337 --base64 $(cat client.cinder.key)
  ###为glance对接ceph
  vi /etc/glance/glance-api.conf
  
  ...
  default_store=rbd
  rbd_store_user=glance
  rbd_store_pool=images
  show_image_direct_url=True
  # systemctl restart openstack-glance-api.service
  # systemctl restart openstack-glance-registry.service
  ##为cinder和ceph对接
  # vim /etc/cinder/cinder.conf
  
  volume_driver=cinder.volume.drivers.rbd.RBDDriver
  rbd_pool=volumes
  rbd_ceph_conf=/etc/ceph/ceph.conf
  rbd_flatten_volume_from_snapshot=false
  rbd_max_clone_depth=5
  glance_api_version=2
  rbd_user=cinder
  rbd_secret_uuid=457eb676-33da-42ec-9a8c-9293d545c337
  # systemctl restart openstack-cinder-api.service
  # systemctl restart openstack-glance-registry.service
  ##为cinder backup对接ceph
  
  backup_driver=cinder.backup.drivers.ceph
  backup_ceph_conf=/etc/ceph/ceph.conf
  backup_ceph_user=cinder-backup
  backup_ceph_chunk_size=134217728
  backup_ceph_pool=backups
  backup_ceph_stripe_unit=0
  backup_ceph_stripe_count=0
  restore_discard_excess_bytes=true
  

  # systemctl restart openstack-cinder-backup.service
  为nova对接ceph
  # vim/etc/nova/nova.conf
  
  libvirt_images_type=rbd
  libvirt_images_rbd_pool=vms
  libvirt_images_rbd_ceph_conf=/etc/ceph/ceph.conf
  rbd_user=cinder
  rbd_secret_uuid=457eb676-33da-42ec-9a8c-9293d545c337
  libvirt_inject_password=false
  libvirt_inject_key=false
  libvirt_inject_partition=-2
  libvirt_live_migration_flag="VIR_MIGRATE_UNDEFINE_SOURCE,VIR_MIGRATE_PEER2PEER,VIR_MIGRATE_LIVE,VIR_MIGRATE_PERSIST_DEST"
  

  # systemctl restart openstack-nova-compute.service
  至此已经全部完成!下面进行验证操作:
  创建一个虚拟机磁盘作用在云硬盘上,若出报错,
  tailf/var/log/cinder/volume.log
  2016-06-24 03:21:00.458 58907 ERROR oslo_messaging.rpc.dispatcher Exception during message handling: Permission denied: '/var/lock/cinder'
  查看是否有这个目录
  # ll /var/lock/cinder
  ls: cannot access /var/lock/cinder: No such file or directory
  ##创建此目录
  # mkdir /var/lock/cinder -p
  # chown cinder.cinder /var/lock/cinder/
  创建一台云主机,使用cinder命令验证
  # rbd ls volumes
  volume-8a1ff9c3-0dbd-41d7-a46b-ebaa45bc2230
  现在创建的虚拟机已经在ceph集群中了。
  

  参考文档:
  http://docs.ceph.com/docs/master/rbd/rbd-openstack/
  http://docs.openstack.org/kilo/install-guide/install/yum/content/cinder-install-controller-node.html
  

  
页: [1]
查看完整版本: openstack K版本和ceph对接