Install OpenStack Kilo
安装openstack k版本完善中
操作系统OSCentOS Linux release 7.0.1406 (Core)
虚拟机(要能上网)
node1 (管理): 10.10.0.10 192.168.74.211
node2网络:10.10.0.20 10.20.0.20 10.30.0.20 192.168.74.212
node3计算:10.10.0.30 10.20.0.30 192.168.74.213
一、环境准备
1关闭防火墙
systemctl stop firewalld
systemctl disable firewalld
2:关闭SELinux
vim /etc/selinux/config
...
SELINUX=disabled
setenforce0
3:时间同步管理节点做时间服务器其它节点从管理节点同步时间
管理节点
yum install ntp
vim /etc/ntp.conf
...
server127.127.1.0 # local clock
fudge 127.127.1.0 stratum 10
systemctl enable ntpd.service
systemctl start ntpd.service
# ntpq-c peers
remote refid st t when poll reach delay offsetjitter
==============================================================================
*LOCAL(0) .LOCL. 10 l 57 64177 0.000 0.000 0.000
其它节点安装服务将时间服务器直接管理节点即可
4:yum 源
注每个节点都要做
安装EPEL源
yum install http://dl.fedoraproject.org/pub/epel/7/x86_64/e/epel-release-7-5.noarch.rpm
安装K版本的源
yum install http://rdo.fedorapeople.org/openstack-kilo/rdo-release-kilo.rpm
示例
# cat media.repo
name=media
baseurl=http://vault.centos.org/7.0.1406/os/x86_64
enabled=1
gpgcheck=0
# cat rdo-release.repo
name=OpenStack Kilo Repository
baseurl=http://repos.fedorapeople.org/repos/openstack/openstack-kilo/el7/
skip_if_unavailable=0
enabled=1
gpgcheck=1
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-RDO-kilo
# cat epel.repo
name=Extra Packages for Enterprise Linux 7 - $basearch
#baseurl=http://download.fedoraproject.org/pub/epel/7/$basearch
mirrorlist=https://mirrors.fedoraproject.org/metalink?repo=epel-7&arch=$basearch
failovermethod=priority
enabled=1
gpgcheck=1
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL-7
name=Extra Packages for Enterprise Linux 7 - $basearch - Debug
#baseurl=http://download.fedoraproject.org/pub/epel/7/$basearch/debug
mirrorlist=https://mirrors.fedoraproject.org/metalink?repo=epel-debug-7&arch=$basearch
failovermethod=priority
enabled=0
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL-7
gpgcheck=1
name=Extra Packages for Enterprise Linux 7 - $basearch - Source
#baseurl=http://download.fedoraproject.org/pub/epel/7/SRPMS
mirrorlist=https://mirrors.fedoraproject.org/metalink?repo=epel-source-7&arch=$basearch
failovermethod=priority
enabled=0
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL-7
gpgcheck=1
5:各节点之间做互信
vim /etc/hosts
127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
10.10.0.10 node1
10.10.0.20 node2
10.10.0.30 node3
ssh-keygen
cat id_rsa.pub >> authorized_keys #进入authorized_keys复制并修改
cat authorized_keys
ssh-rsa ----- root@node1
ssh-rsa ----- root@node2
ssh-rsa ----- root@node3
scp -r /root/.ssh node2:/root/
scp -r /root/.ssh node3:/root/
二、安装数据库服务
管理节点
yum install mariadb mariadb-server MySQL-python
vim /etc/my.cnf.d/mariadb_openstack.cnf
bind-address = 10.10.0.10
default-storage-engine = innodb
innodb_file_per_table
collation-server = utf8_general_ci
init-connect = 'SET NAMES utf8'
character-set-server = utf8
mysql_secure_installation
#设置密码后面全部输入Y
三、安装消息队列服务
yum install rabbitmq-server
systemctl enable rabbitmq-server.service
systemctl start rabbitmq-server.service
rabbitmqctl add_user openstack openstack
rabbitmqctl set_permissions openstack ".*" ".*" ".*"
四、安装keystone
mysql -u root -p
MariaDB [(none)]> CREATE DATABASE keystone;
MariaDB [(none)]> GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'localhost' IDENTIFIED BY 'keystone';
MariaDB [(none)]> GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'%' IDENTIFIED BY 'keystone';
MariaDB [(none)]> flush PRIVILEGES ;
yum install openstack-keystone httpd mod_wsgi python-openstackclient memcached python-memcached
systemctl enable memcached.service
systemctl start memcached.service
vim /etc/keystone/keystone.conf
DEFAULT]
verbose = True
admin_token = openstack
connection = mysql://keystone:keystone@10.10.0.10/keystone
driver = keystone.contrib.revoke.backends.sql.Revoke
provider = keystone.token.providers.uuid.Provider
driver = keystone.token.persistence.backends.memcache.Token
su -s /bin/sh -c "keystone-manage db_sync" keystone
vim /etc/httpd/conf.d/wsgi-keystone.conf
Listen 5000
Listen 35357
<VirtualHost *:5000>
WSGIDaemonProcess keystone-public processes=5 threads=1 user=keystone group=keystone display-name=%{GROUP}
WSGIProcessGroup keystone-public
WSGIScriptAlias / /var/www/cgi-bin/keystone/main
WSGIApplicationGroup %{GLOBAL}
WSGIPassAuthorization On
LogLevel info
ErrorLogFormat "%{cu}t %M"
ErrorLog /var/log/httpd/keystone-error.log
CustomLog /var/log/httpd/keystone-access.log combined
</VirtualHost>
<VirtualHost *:35357>
WSGIDaemonProcess keystone-admin processes=5 threads=1 user=keystone group=keystone display-name=%{GROUP}
WSGIProcessGroup keystone-admin
WSGIScriptAlias / /var/www/cgi-bin/keystone/admin
WSGIApplicationGroup %{GLOBAL}
WSGIPassAuthorization On
LogLevel info
ErrorLogFormat "%{cu}t %M"
ErrorLog /var/log/httpd/keystone-error.log
CustomLog /var/log/httpd/keystone-access.log combined
</VirtualHost>
mkdir -p /var/www/cgi-bin/keystone
curl http://git.openstack.org/cgit/openstack/keystone/plain/httpd/keystone.py?h=stable/kilo| tee /var/www/cgi-bin/keystone/main /var/www/cgi-bin/keystone/admin
chown -R keystone:keystone /var/www/cgi-bin/keystone
chmod 755 /var/www/cgi-bin/keystone/*
systemctl enable httpd.service
systemctl start httpd.service
export OS_TOKEN=openstack
export OS_URL=http://10.10.0.10:35357/v2.0
keystone user-list
openstack service create --name keystone --description "OpenStack Identity" identity
openstack endpoint create --publicurl http://10.10.0.10:5000/v2.0 --internalurl http://10.10.0.10:5000/v2.0--adminurl http://10.10.0.10:35357/v2.0 --region RegionOne identity
openstack project create --description "Admin Project" admin
openstack user create --password-prompt admin
openstack role create admin
openstack role add --project admin --user admin admin
openstack project create --description "Service Project" service
openstack project create --description "Demo Project" demo
openstack user create --password-prompt demo
openstack role create user
openstack role add --project demo --user demo user
unset OS_TOKEN OS_URL
openstack --os-auth-url http://controller:35357 --os-project-name admin --os-username admin --os-auth-type password
openstack --os-auth-url http://10.10.0.10:35357 --os-project-name admin --os-username admin --os-auth-type password token issue
openstack --os-auth-url http://10.10.0.10:35357 --os-project-name admin --os-username admin --os-auth-type password project list
vim openrc.sh
export OS_PROJECT_DOMAIN_ID=default
export OS_USER_DOMAIN_ID=default
export OS_PROJECT_NAME=admin
export OS_TENANT_NAME=admin
export OS_USERNAME=admin
export OS_PASSWORD=admin
export OS_AUTH_URL=http://10.10.0.10:35357/v3
sourceopenrc.sh
openstack token issue
keystone user-list
五、安装glance镜像服务
mysql -u root -p
MariaDB [(none)]>GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'localhost' IDENTIFIED BY 'glance';
MariaDB [(none)]>GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'%' IDENTIFIED BY 'glance';
MariaDB [(none)]> flush PRIVILEGES ;
sourceopenrc.sh
openstack user create --password-prompt glance
openstack role add --project service --user glance admin
penstack service create --name glance --description "OpenStack Image service" image
openstack endpoint create --publicurl http://10.10.0.10:9292 --internalurl http://10.10.0.10:9292 --adminurl http://10.10.0.10:9292 --region RegionOne image
yum install openstack-glance python-glance python-glanceclient
vim /etc/glance/glance-api.conf
notification_driver = noop
verbose = True
connection = mysql://glance:glance@node1/glance
auth_uri = http://10.10.0.10:5000
auth_url = http://10.10.0.10:35357
auth_plugin = password
project_domain_id = default
user_domain_id = default
project_name = service
username = glance
password = glance
flavor = keystone
default_store = file
filesystem_store_datadir = /var/lib/glance/images/
vim /etc/glance/glance-registry.conf
notification_driver = noop
verbose = True
connection = mysql://glance:glance@10.10.0.10/glance
auth_uri = http://10.10.0.10:5000
auth_url = http://10.10.0.10:35357
auth_plugin = password
project_domain_id = default
user_domain_id = default
project_name = service
username = glance
password = glance
flavor = keystone
su -s /bin/sh -c "glance-manage db_sync" glance
systemctl enable openstack-glance-api.service openstack-glance-registry.service
systemctl start openstack-glance-api.service openstack-glance-registry.service
wget -P /tmp/images http://download.cirros-cloud.net/0.3.4/cirros-0.3.4-x86_64-disk.img
glance image-create --name="cirros" --disk-format=qcow2 --container-format=bare --is-public=true < cirros-0.3.3-x86_64-disk.img
glance image-update --property architecture=x86_64 --property os_distro=ubuntu --property os_version=0.3.3 --property vol_size=1 4aaaebae-4c34-45c9-9b7d-fb8911de7c6e
glance image-list
六:安装nova计算服务
1管理节点
mysql -u root -p
MariaDB [(none)]> GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'localhost' IDENTIFIED BY 'nova';
MariaDB [(none)]> GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'%' IDENTIFIED BY 'nova';
MariaDB [(none)]> flush PRIVILEGES ;
openstack user create --password-prompt nova
openstack role add --project service --user nova admin
openstack service create --name nova --description "OpenStack Compute" compute
openstack endpoint create --publicurl http://10.10.0.10:8774/v2/%\(tenant_id\)s --internalurl http://10.10.0.10:8774/v2/%\(tenant_id\)s--adminurl http://10.10.0.10:8774/v2/%\(tenant_id\)s
--region RegionOne compute
yum install openstack-nova-api openstack-nova-cert openstack-nova-conductor openstack-nova-console openstack-nova-novncproxy openstack-nova-scheduler python-novaclient
vim/etc/nova/nova.conf
my_ip = 10.10.0.10
rpc_backend = rabbit
auth_strategy = keystone
vncserver_listen = 10.10.0.10
vncserver_proxyclient_address = 10.10.0.10
verbose = True
network_api_class = nova.network.neutronv2.api.API
security_group_api = neutron
linuxnet_interface_driver = nova.network.linux_net.LinuxOVSInterfaceDriver
firewall_driver = nova.virt.firewall.NoopFirewallDriver
network_api_class = nova.network.api.API
security_group_api = nova
connection = mysql://nova:nova@10.10.0.10/nova
host = node1
auth_uri = http://10.10.0.10:5000
auth_url = http://10.10.0.10:35357
auth_plugin = password
project_domain_id = default
user_domain_id = default
project_name = service
username = nova
password = nova
url = http://10.10.0.10:9696
auth_strategy = keystone
admin_auth_url = http://10.10.0.10:35357/v2.0
admin_tenant_name = service
admin_username = neutron
admin_password = neutron
service_metadata_proxy = True
metadata_proxy_shared_secret = 10.10.0.10
lock_path = /var/lib/nova/tmp
rabbit_host = 10.10.0.10
rabbit_userid = openstack
rabbit_password = openstack
su -s /bin/sh -c "nova-manage db sync" nova
systemctl enable openstack-nova-api.service openstack-nova-cert.service openstack-nova-consoleauth.service openstack-nova-scheduler.serviceopenstack-nova-conductor.service openstack-
nova-novncproxy.service
systemctl startopenstack-nova-api.service openstack-nova-cert.service openstack-nova-consoleauth.service openstack-nova-scheduler.serviceopenstack-nova-conductor.service openstack-
nova-novncproxy.service
2计算节点
yum install openstack-nova-compute sysfsutils
# vim /etc/nova/nova.conf
verbose = True
my_ip = 10.10.0.30
rpc_backend = rabbit
auth_strategy = keystone
vnc_enabled = True
vncserver_listen = 0.0.0.0
vncserver_proxyclient_address = 10.10.0.10
novncproxy_base_url = http://10.10.0.10:6080/vnc_auto.html
network_api_class = nova.network.neutronv2.api.API
security_group_api = neutron
linuxnet_interface_driver = nova.network.linux_net.LinuxOVSInterfaceDriver
firewall_driver = nova.virt.firewall.NoopFirewallDriver
host = 10.10.0.10
auth_uri = http://10.10.0.10:5000
auth_url = http://10.10.0.10:35357
auth_plugin = password
project_domain_id = default
user_domain_id = default
project_name = service
username = nova
password = nova
virt_type = qemu
url = http://10.10.0.10:9696
auth_strategy = keystone
admin_auth_url = http://10.10.0.10:35357/v2.0
admin_tenant_name = service
admin_username = neutron
admin_password = neutron
lock_path = /var/lib/nova/tmp
rabbit_host = 10.10.0.10
rabbit_userid = openstack
rabbit_password = openstack
grep -c '(vmx|svm)' /proc/cpuinfo
systemctl enable libvirtd.service openstack-nova-compute.service
systemctl start libvirtd.service openstack-nova-compute.service
管理节点
#nova service-list
+----+------------------+-------+----------+---------+-------+----------------------------+-----------------+
| Id | Binary | Host| Zone | Status| State | Updated_at | Disabled Reason |
+----+------------------+-------+----------+---------+-------+----------------------------+-----------------+
| 1| nova-conductor | node1 | internal | enabled | up | 2015-12-28T22:43:53.000000 | - |
| 2| nova-consoleauth | node1 | internal | enabled | up | 2015-12-28T22:43:53.000000 | - |
| 3| nova-cert | node1 | internal | enabled | up | 2015-12-28T22:43:52.000000 | - |
| 4| nova-scheduler | node1 | internal | enabled | up | 2015-12-28T22:43:54.000000 | - |
| 5| nova-compute | node3 | nova | enabled | up | 2015-12-28T22:43:56.000000 | - |
+----+------------------+-------+----------+---------+-------+----------------------------+-----------------+
七:neutron
1管理节点
mysql -u root -p
MariaDB [(none)]> GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'localhost' IDENTIFIED BY 'neutron';
MariaDB [(none)]> GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'%' IDENTIFIED BY 'neutron';
openstack user create --password-prompt neutron
openstack role add --project service --user neutron admin
openstack service create --name neutron --description "OpenStack Networking" network
openstack endpoint create --publicurl http://10.10.0.10:9696 --adminurl http://10.10.0.10:9696 --internalurl http://10.10.0.10:9696 --region RegionOne network
yum install openstack-neutron openstack-neutron-ml2 python-neutronclient
vim /etc/neutron/neutron.conf
verbose = True
core_plugin = ml2
service_plugins = router
allow_overlapping_ips = True
auth_strategy = keystone
notify_nova_on_port_status_changes = True
notify_nova_on_port_data_changes = True
nova_url = http://10.10.0.10:8774/v2
rpc_backend = rabbit
core_plugin = ml2
service_plugins = router
allow_overlapping_ips = True
notify_nova_on_port_status_changes = True
notify_nova_on_port_data_changes = True
nova_url = http://10.10.0.10:8774/v2
auth_uri = http://10.10.0.10:5000
auth_url = http://10.10.0.10:35357
auth_plugin = password
project_domain_id = default
user_domain_id = default
project_name = service
username = neutron
password = neutron
connection = mysql://neutron:neutron@10.10.0.10/neutron
rpc_backend = rabbit
auth_url = http://10.10.0.10:35357
auth_plugin = password
project_domain_id = default
user_domain_id = default
region_name = RegionOne
project_name = service
username = nova
password = nova
lock_path = $state_path/lock
rabbit_host = 10.10.0.10
rabbit_userid = openstack
rabbit_password = openstack
vim/etc/neutron/plugins/ml2/ml2_conf.ini
type_drivers = flat,vlan,gre,vxlan
tenant_network_types = gre
mechanism_drivers = openvswitch
tunnel_id_ranges = 1:1000
enable_security_group = True
enable_ipset = True
firewall_driver = neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver
ln -s /etc/neutron/plugins/ml2/ml2_conf.ini /etc/neutron/plugin.ini
su -s /bin/sh -c "neutron-db-manage --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/ml2/ml2_conf.ini upgrade head" neutron
systemctl restart openstack-nova-api.service openstack-nova-scheduler.service openstack-nova-conductor.service
systemctl enable neutron-server.service
systemctl start neutron-server.service
2网络节点
# cat /etc/sysctl.conf
...
net.ipv4.ip_forward=1
net.ipv4.conf.all.rp_filter=0
net.ipv4.conf.default.rp_filter=0
sysctl-p
yum install openstack-neutron openstack-neutron-ml2 openstack-neutron-openvswitch
vim /etc/neutron/neutron.conf
rpc_backend = rabbit
auth_strategy = keystone
core_plugin = ml2
service_plugins = router
allow_overlapping_ips = True
verbose = True
auth_uri = http://10.10.0.10:5000
auth_url = http://10.10.0.10:35357
auth_plugin = password
project_domain_id = default
user_domain_id = default
project_name = service
username = neutron
password = neutron
lock_path = $state_path/lock
rabbit_host = 10.10.0.10
rabbit_userid = openstack
rabbit_password = openstack
vim /etc/neutron/plugins/ml2/ml2_conf.ini
type_drivers = flat,vlan,gre,vxlan
tenant_network_types = gre
mechanism_drivers = openvswitch
flat_networks = external
tunnel_id_ranges = 1:1000
enable_security_group = True
enable_ipset = True
firewall_driver = neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver
local_ip = 10.10.0.20
bridge_mappings = external:br-ex
tunnel_types = gre
vim /etc/neutron/l3_agent.ini
interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver
external_network_bridge =
router_delete_namespaces = True
verbose = True
vim /etc/neutron/dhcp_agent.ini
interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver
dhcp_driver = neutron.agent.linux.dhcp.Dnsmasq
dhcp_delete_namespaces = True
dnsmasq_config_file = /etc/neutron/dnsmasq-neutron.conf
vim /etc/neutron/dnsmasq-neutron.conf
dhcp-option-force=26,1454
pkill dnsmasq
vim/etc/neutron/metadata_agent.ini
auth_uri = http://10.10.0.10:5000
auth_url = http://10.10.0.10:35357
auth_region = RegionOne
auth_plugin = password
project_domain_id = default
user_domain_id = default
project_name = service
username = neutron
password = neutron
nova_metadata_ip = 10.10.0.10
metadata_proxy_shared_secret = 10.10.0.10
verbose = True
systemctl enable openvswitch.service
systemctl start openvswitch.service
ovs-vsctl add-br br-ex
ovs-vsctl add-port br-ex eno33554992
ethtool -Keno33554992 gro off
ln -s /etc/neutron/plugins/ml2/ml2_conf.ini /etc/neutron/plugin.ini
cp /usr/lib/systemd/system/neutron-openvswitch-agent.service /usr/lib/systemd/system/neutron-openvswitch-agent.service.orig
sed -i 's,plugins/openvswitch/ovs_neutron_plugin.ini,plugin.ini,g' /usr/lib/systemd/system/neutron-openvswitch-agent.service
systemctl enable neutron-openvswitch-agent.service neutron-l3-agent.service neutron-dhcp-agent.service neutron-metadata-agent.service neutron-ovs-cleanup.service
systemctl start neutron-openvswitch-agent.service neutron-l3-agent.service neutron-dhcp-agent.service neutron-metadata-agent.service
3计算节点
vim /etc/sysctl.conf
pv4.conf.all.rp_filter=0
net.ipv4.conf.default.rp_filter=0
net.bridge.bridge-nf-call-iptables=1
net.bridge.bridge-nf-call-ip6tables=1
sysctl -p
yum install openstack-neutron openstack-neutron-ml2 openstack-neutron-openvswitch
vim /etc/neutron/neutron.conf
rpc_backend = rabbit
auth_strategy = keystone
core_plugin = ml2
service_plugins = router
allow_overlapping_ips = True
verbose = True
auth_uri = http://10.10.0.10:5000
auth_url = http://10.10.0.10:35357
auth_plugin = password
project_domain_id = default
user_domain_id = default
project_name = service
username = neutron
password = neutron
lock_path = $state_path/lock
rabbit_host = 10.10.0.10
rabbit_userid = openstack
rabbit_password = openstack
systemctl restart neutron-openvswitch-agent.service
vim/etc/neutron/plugins/ml2/ml2_conf.ini
rpc_backend = rabbit
auth_strategy = keystone
core_plugin = ml2
service_plugins = router
allow_overlapping_ips = True
verbose = True
auth_uri = http://10.10.0.10:5000
auth_url = http://10.10.0.10:35357
auth_plugin = password
project_domain_id = default
user_domain_id = default
project_name = service
username = neutron
password = neutron
lock_path = $state_path/lock
rabbit_host = 10.10.0.10
rabbit_userid = openstack
rabbit_password = openstack
type_drivers = flat,vlan,gre,vxlan
tenant_network_types = gre
mechanism_drivers = openvswitch
tunnel_id_ranges = 1:1000
enable_security_group = True
enable_ipset = True
firewall_driver = neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver
local_ip = 10.20.0.20
tunnel_types = gre
systemctl enable openvswitch.service
systemctl start openvswitch.service
ln -s /etc/neutron/plugins/ml2/ml2_conf.ini /etc/neutron/plugin.ini /usr/lib/systemd/system/neutron-openvswitch-agent.service.orig
cp /usr/lib/systemd/system/neutron-openvswitch-agent.service /usr/lib/systemd/system/neutron-openvswitch-agent.service.orig
sed -i 's,plugins/openvswitch/ovs_neutron_plugin.ini,plugin.ini,g' /usr/lib/systemd/system/neutron-openvswitch-agent.service
systemctl restart openstack-nova-compute.service
systemctl enable neutron-openvswitch-agent.service
systemctl start neutron-openvswitch-agent.service
4管理节点
neutron ext-list
neutron net-create ext-net --router:external --provider:physical_network external --provider:network_type flat
neutron subnet-create ext-net 192.168.124.0/24 --name ext-subnet --allocation-pool start=192.168.124.110,end=192.168.124.200 --disable-dhcp --gateway 192.168.124.1
neutron net-create demo-net
neutron subnet-create demo-net 192.168.1.0/24 --name demo-subnet --gateway 192.168.1.1
neutron router-create demo-router
neutron router-interface-add demo-router demo-subnet
neutron router-gateway-set demo-router ext-net
八安装dashboard
yum install openstack-dashboard httpd mod_wsgi memcached python-memcached
vim /etc/openstack-dashboard/local_settings
...
OPENSTACK_HOST = "10.10.0.10"
ALLOWED_HOSTS = ['*']
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
'LOCATION': '127.0.0.1:11211',
}
}
OPENSTACK_KEYSTONE_DEFAULT_ROLE = "user"
...
setsebool -P httpd_can_network_connect on
chown -R apache:apache /usr/share/openstack-dashboard/static
systemctl enable httpd.service memcached.service
systemctl start httpd.service memcached.service
登录一下http://10.10.0.10/dashboard
九安装ceph
yum源示例
ceph.repo
name=ceph
baseurl=http://download.ceph.com/rpm/el7/x86_64/
enabled=1
gpgcheck=0
name=ceph norach
baseurl=http://download.ceph.com/rpm/el7/noarch/
enabled=1
gpgcheck=0
name=ceph srpms
baseurl=http://download.ceph.com/rpm/el7/SRPMS/
enabled=1
gpgcheck=0
先安装这些依赖包~
yum install ftp://rpmfind.net/linux/centos/7.1.1503/extras/x86_64/Packages/python-werkzeug-0.9.1-2.el7.noarch.rpm
yum install ftp://195.220.108.108/linux/centos/7.1.1503/extras/x86_64/Packages/python-itsdangerous-0.23-2.el7.noarch.rpm
yum install ftp://195.220.108.108/linux/centos/7.1.1503/extras/x86_64/Packages/python-flask-0.10.1-4.el7.noarch.rpm
装包
yum install snappy leveldb gdisk python-argparse gperftools-libs ceph
mkdir /root/ceph
cd /root/ceph
vim /etc/ceph/ceph.conf
fsid = b74dadd9-c0cc-44da-b19a-e071b760a187
public network = 10.10.0.0/24
cluster network = 10.10.0.0/24
auth cluster required = cephx
auth service required = cephx
auth client required = cephx
filestore xattr use omap = true
osd pool default size = 2
osd pool default min size = 1
osd pool default pg num = 333
osd pool default pgp num = 333
osd crush chooseleaf type = 1
mon osd full ratio = .80
mon osd nearfull ratio = .70
debug lockdep = 0/0
debug context = 0/0
debug crush = 0/0
debug buffer = 0/0
debug timer = 0/0
debug journaler = 0/0
debug osd = 0/0
debug optracker = 0/0
debug objclass = 0/0
debug filestore = 0/0
debug journal = 0/0
debug ms = 0/0
debug monc = 0/0
debug tp = 0/0
debug auth = 0/0
debug finisher = 0/0
debug heartbeatmap = 0/0
debug perfcounter = 0/0
debug asok = 0/0
debug throttle = 0/0
mon initial members = node1
mon host = 10.10.0.10:6789
host = node1
mon addr = 10.10.0.10:6789
mon data = /var/lib/ceph/mon/ceph-node1
创建key文件及monitor map
使用如下命令生成monitor key:
ceph-authtool --create-keyring ceph.mon.keyring --gen-key -n mon. --cap mon 'allow *'
使用如下命令生成admin key:
ceph-authtool --create-keyring /etc/ceph/ceph.client.admin.keyring --gen-key -n client.admin --set-uid=0 --cap mon 'allow *' --cap osd 'allow *' --cap mds 'allow'
将admin key 加入monitor key:
ceph-authtool ceph.mon.keyring --import-keyring /etc/ceph/ceph.client.admin.keyring
创建monitor map文件:
monmaptool --create --add node1 10.10.0.10--fsid b74dadd9-c0cc-44da-b19a-e071b760a187 /tmp/monmap
创建第一个monitor
创建monitor的数据目录:
mkdir -p /var/lib/ceph/mon/ceph-mon1_hostname
初始化monitor:
ceph-mon --mkfs -i mon1_hostname --monmap /tmp/monmap --keyring /tmp/ceph.mon.keyring
启动monitor服务:
service ceph start mon
------------------------------------------------------------------------------------------
monitor工作验证
根据配置文件创建好所有的monitor以后在任何一个monitor节点进行验证:
在任何一个monitor节点执行ceph -s 命令可以观察到ceph集群信息由于还没有添加osd集群为非健康状态
-----------------------------------------------------------------------------------------
增加OSD方法如下
首先你要生成一下ID值
uuidgen
执行这条命令这时候会返回一个结果比如3这个值就是ceph分配给你的osd的值记住它后面会用到
ceph osd create 906f693a-acb0-4f24-8e2c-a8d7cdaec63f
创建一个挂载目录到时候分区是要被挂载到这的目录名建议与分配的号一样
mkdir /var/lib/ceph/osd/ceph-3
准备好你的硬盘注意分区不是必须的。
#parted /dev/mapper/mpathc
mkfs -t xfs /dev/mapper/mpathcp
挂载
mount -t xfs /dev/mapper/mpathcp1 /var/lib/ceph/osd/ceph-3
关键步骤在这里老老实实敲把该修改的按需修改注意啊 这里面涉及到几个参数要指对了osd分配的值、UUID、 权重值、主机名
ceph-osd -i 3 --mkfs --mkkey --osd-uuid 906f693a-acb0-4f24-8e2c-a8d7cdaec63f
ceph auth add osd.3 osd 'allow *' mon 'allow profile osd' -i /var/lib/ceph/osd/ceph-3/keyring
ceph osd crush add osd.3 1.0 host=node-6
ceph-disk activate /var/lib/ceph/osd/ceph-3
关键最后一步将挂载写到fstab里
vim /etc/fstab
示例
/dev/mapper/mpathcp /var/lib/ceph/osd/ceph-3xfs defaults,noatime,inode64 0 0
结束语
修改osd的权重如下
ceph osd crush reweight osd.3 4
ceph osd pool create volumes 200 200 replicated
``ceph auth get-or-create client.cinder mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=volumes'``
``ceph auth get-or-create client.cinder | tee /etc/ceph/ceph.client.awcloud.keyring``
将CEPH集群弄好了。再安装cinder
10cinder
这里我将cinder安装到管理节点然后它的后端用的是ceph
mysql -u root -p
MariaDB [(none)]> GRANT ALL PRIVILEGES ON cinder.* TO 'cinder'@'localhost' IDENTIFIED BY 'cinder';
MariaDB [(none)]> GRANT ALL PRIVILEGES ON cinder.* TO 'cinder'@'%' IDENTIFIED BY 'cinder';
openstack user create --password-prompt cinder
openstack role add --project service --user cinder admin
openstack service create --name cinder --description "OpenStack Block Storage" volume
openstack service create --name cinderv2 --description "OpenStack Block Storage" volumev2
openstack endpoint create --publicurl http://10.10.0.10:8776/v2/%\(tenant_id\)s --internalurl http://10.10.0.10:8776/v2/%\(tenant_id\)s --adminurl http://10.10.0.10:8776/v2/%\(tenant_id\)s
--region RegionOne volume
openstack endpoint create --publicurl http://10.10.0.10:8776/v2/%\(tenant_id\)s --internalurl http://10.10.0.10:8776/v2/%\(tenant_id\)s --adminurl http://10.10.0.10:8776/v2/%\(tenant_id\)s
--region RegionOne volumev2
yum install openstack-cinder python-cinderclient python-oslo-db qemu
cp /usr/share/cinder/cinder-dist.conf /etc/cinder/cinder.conf
chown -R cinder:cinder /etc/cinder/cinder.conf
vim /etc/cinder/cinder.conf
rpc_backend = rabbit
auth_strategy = keystone
my_ip = 10.10.0.10
verbose = True
volume_driver=cinder.volume.drivers.rbd.RBDDriver
rbd_secret_uuid=b74dadd9-c0cc-44da-b19a-e071b760a187
rbd_user=cinder
rbd_ceph_conf=/etc/ceph/ceph.conf
rbd_max_clone_depth=5
rbd_pool=volumes
rados_connect_timeout=-1
rbd_flatten_volume_from_snapshot=False
rbd_store_chunk_size=4
#logdir = /var/log/cinder
#state_path = /var/lib/cinder
#lock_path = /var/lib/cinder/tmp
#volumes_dir = /etc/cinder/volumes
#iscsi_helper = lioadm
#rootwrap_config = /etc/cinder/rootwrap.conf
connection = mysql://cinder:cinder@10.10.0.10/cinder
auth_uri = http://10.10.0.10:5000
auth_url = http://10.10.0.10:35357
auth_plugin = password
project_domain_id = default
user_domain_id = default
project_name = service
username = cinder
password = cinder
#admin_tenant_name = %SERVICE_TENANT_NAME%
#admin_user = %SERVICE_USER%
#admin_password = %SERVICE_PASSWORD%
#auth_host = 127.0.0.1
#auth_port = 35357
#auth_protocol = http
rabbit_host = 10.10.0.10
rabbit_userid = openstack
rabbit_password = openstack
lock_path = /var/lock/cinder
su -s /bin/sh -c "cinder-manage db sync" cinder
chown cinder.cinder -R /etc/ceph/
systemctl enable openstack-cinder-api.service openstack-cinder-scheduler.service
systemctl start openstack-cinder-api.service openstack-cinder-scheduler.service
试一下对接效果
# cinder create 1
+---------------------+--------------------------------------+
| Property | Value |
+---------------------+--------------------------------------+
| attachments | [] |
|availability_zone| nova |
| bootable | false |
| created_at | 2015-12-30T05:16:33.325201 |
| display_description | None |
| display_name | None |
| encrypted | False |
| id | 6e698821-03e7-4298-b91a-768225eddc61 |
| metadata | {} |
| multiattach | false |
| size | 1 |
| snapshot_id | None |
| source_volid | None |
| status | creating |
| volume_type | None |
+---------------------+--------------------------------------+
# cinderlist
+--------------------------------------+-----------+--------------+------+-------------+----------+-------------+
| ID | Status| Display Name | Size | Volume Type | Bootable | Attached to |
+--------------------------------------+-----------+--------------+------+-------------+----------+-------------+
| 6106f76d-d824-4ae1-87f3-2754989ea0ee | available | - |1 | - |false | |
页:
[1]