1、基本环境
(1)ftp-1
从光盘安装vsftpd
先本地挂载centos到mnt下 cd到Pak。。。下 ls找到vsftp
rpm -ivh vsftpd-2.2.2-11.el6_4.1.x86_64.rpm
service vsftpd restart
chkconfig vsftpd on
(2)yum
通过TFTP将两个ISO传到controller中
[iyunv@controller ~]# cd /var/ftp
[iyunv@controller ftp]# mkdir centos openstack
[iyunv@controller media]# cd /opt
[iyunv@controller opt]# ls
CentOS-6.5-x86_64-bin.iso iaas-v1.2.iso
vi /etc/fstab
/opt/CentOS-6.5-x86_64-bin.iso /var/ftp/centos iso9660 defaults,ro,loop 0 0
/opt/iaas-v1.2.iso /var/ftp/openstack iso9660 defaults,ro,loop 0 0
[iyunv@controller ftp]# mount -a
[iyunv@controller ftp]# mount
...
/opt/CentOS-6.5-x86_64-bin.iso on /var/ftp/centos type iso9660 (ro,loop=/dev/loop0)
/opt/iaas-v1.2.iso on /var/ftp/openstack type iso9660 (ro,loop=/dev/loop1)
[iyunv@controller media]# cd /etc/yum.repos.d/
[iyunv@controller yum.repos.d]# mv * /opt
[iyunv@controller yum.repos.d]# vi yum.repo
[centos]
name=centos
baseurl=file:///var/ftp/centos
gpgcheck=0
[openstack]
name=openstack
baseurl=file:///var/ftp/openstack/iaas-repo
gpgcheck=0
yum clean all
yum makecache
(3)iptables
service iptables stop
chkconfig iptables off
(4)selinux
setenforce 0
vi /etc/sysconfig/selinux
[iyunv@controller ~]# getenforce
Permissive
(5)ftp-2
service vsftpd start
chkconfig vsftpd on
(6)hostname
[iyunv@controller ~]# hostname
controller
(7)network
vi /etc/sysconfig/network-scripts/ifcfg-eth0 #此处禁用networkmanager 写上static,禁用了networkmanager的dhcp就要写上static 严谨些省事
vi /etc/sysconfig/network-scripts/ifcfg-eth1
service network restart
(8)hosts
vi /etc/hosts
192.168.100.10 controller
192.168.100.20 compute
(9)ping
ping -c 4 controller
ping -c 4 compute
(10)ntp
yum install ntp -y
vi /etc/ntp.conf
#server 0.centos.pool.ntp.org iburst
#server 1.centos.pool.ntp.org iburst
#server 2.centos.pool.ntp.org iburst
#server 3.centos.pool.ntp.org iburst
restrict 192.168.100.0 mask 255.255.255.0 nomodify
server 127.127.1.0
fudge 127.127.1.0 stratum 10
service ntpd restart
chkconfig ntpd on
[iyunv@controller ~]# ntpstat
synchronised to local net at stratum 11
time correct to within 7948 ms
polling server every 64 s
[iyunv@controller ~]# ntpq -p
remote refid st t when poll reach delay offset jitter
==============================================================================
*LOCAL(0) .LOCL. 10 l 38 64 1 0.000 0.000 0.000
(11)MySQL
yum install mysql mysql-server MySQL-python -y
vi /etc/my.cnf
bind-address = 192.168.100.10
default-storage-engine = innodb
innodb_file_per_table #优化
collation-server = utf8_general_ci
init-connect = 'SET NAMES utf8'
character-set-server = utf8
service mysqld restart
chkconfig mysqld on
mysql_install_db
/usr/bin/mysqladmin -u root password '000000'
mysql_secure_installation
000000
n
y
n
y
y
(12)openstack packages
yum install yum-plugin-priorities openstack-utils openstack-selinux -y
yum upgrade -y
(13)Qpid
yum install qpid-cpp-server -y
vi /etc/qpidd.conf
auth=no
service qpidd restart
chkconfig qpidd on
2、认证服务Keystone
mysql -uroot -p000000
CREATE DATABASE keystone;
CREATE DATABASE glance;
CREATE DATABASE nova;
CREATE DATABASE neutron;
CREATE DATABASE cinder;
GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'localhost' IDENTIFIED BY '000000';
GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'localhost' IDENTIFIED BY '000000';
GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'localhost' IDENTIFIED BY '000000';
GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'localhost' IDENTIFIED BY '000000';
GRANT ALL PRIVILEGES ON cinder.* TO 'cinder'@'localhost' IDENTIFIED BY '000000';
GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'%' IDENTIFIED BY '000000';
GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'%' IDENTIFIED BY '000000';
GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'%' IDENTIFIED BY '000000';
GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'%' IDENTIFIED BY '000000';
GRANT ALL PRIVILEGES ON cinder.* TO 'cinder'@'%' IDENTIFIED BY '000000';
mysql> show databases;
+--------------------+
| Database |
+--------------------+
| information_schema |
| cinder |
| glance |
| keystone |
| mysql |
| neutron |
| nova |
+--------------------+
7 rows in set (0.00 sec)
exit
yum install openstack-keystone python-keystoneclient -y
openstack-config --set /etc/keystone/keystone.conf database connection mysql://keystone:000000@controller/keystone
su -s /bin/sh -c "keystone-manage db_sync" keystone
ADMIN_TOKEN=$(openssl rand -hex 10)
echo $ADMIN_TOKEN
openstack-config --set /etc/keystone/keystone.conf DEFAULT admin_token $ADMIN_TOKEN
keystone-manage pki_setup --keystone-user keystone --keystone-group keystone
chown -R keystone:keystone /etc/keystone/ssl
chmod -R o-rwx /etc/keystone/ssl
service openstack-keystone restart
chkconfig openstack-keystone on
(crontab -l -u keystone 2>&1 | grep -q token_flush) || echo '@hourly /usr/bin/keystone-manage token_flush >/var/log/keystone/keystone-tokenflush.log 2>&1' >> /var/spool/cron/keystone
crontab -l -u keystone
export OS_SERVICE_TOKEN=$ADMIN_TOKEN
export OS_SERVICE_ENDPOINT=http://controller:35357/v2.0
keystone user-create --name=admin --pass=000000 --email=admin@localhost
keystone role-create --name=admin
keystone tenant-create --name=admin --description="Admin Tenant"
keystone user-role-add --user=admin --tenant=admin --role=admin
keystone user-role-add --user=admin --role=_member_ --tenant=admin
keystone user-create --name=user1 --pass=000000 --email=user1@localhost #这里建立user1是为了方便开发人员调试使用,给java开发的朋友用
keystone tenant-create --name=user1 --description="User1 Tenant"
keystone user-role-add --user=user1 --role=_member_ --tenant=user1
keystone tenant-create --name=service --description="Service Tenant"
keystone service-create --name=keystone --type=identity --description="OpenStack Identity"
keystone endpoint-create \
--service-id=$(keystone service-list | awk '/ identity / {print $2}') \
--publicurl=http://controller:5000/v2.0 \
--internalurl=http://controller:5000/v2.0 \
--adminurl=http://controller:35357/v2.0
unset OS_SERVICE_TOKEN OS_SERVICE_ENDPOINT
keystone --os-username=admin --os-password=000000 --os-auth-url=http://controller:35357/v2.0 token-get
keystone --os-username=admin --os-password=000000 --os-tenant-name=admin --os-auth-url=http://controller:35357/v2.0 token-get
cd
vi admin-openrc.sh
export OS_USERNAME=admin
export OS_PASSWORD=000000
export OS_TENANT_NAME=admin
export OS_AUTH_URL=http://controller:35357/v2.0
source admin-openrc.sh
keystone token-get
[iyunv@controller images]# keystone user-list
+----------------------------------+--------+---------+------------------+
| id | name | enabled | email |
+----------------------------------+--------+---------+------------------+
| 9e83e1bca3a24abc842534f3bfd8c67b | admin | True | admin@localhost |
| 8afcb27107d742ba9cb7b1ffdc009487 | user1 | True | user1@localhost |
+----------------------------------+--------+---------+------------------+
keystone user-role-list --user admin --tenant admin
3、映像服务Glance
source admin-openrc.sh
yum install openstack-glance python-glanceclient -y
openstack-config --set /etc/glance/glance-api.conf database connection mysql://glance:000000@controller/glance
openstack-config --set /etc/glance/glance-registry.conf database connection mysql://glance:000000@controller/glance
su -s /bin/sh -c "glance-manage db_sync" glance
keystone user-create --name=glance --pass=000000 --email=glance@localhost
keystone user-role-add --user=glance --tenant=service --role=admin
openstack-config --set /etc/glance/glance-api.conf keystone_authtoken auth_uri http://controller:5000
openstack-config --set /etc/glance/glance-api.conf keystone_authtoken auth_host controller
openstack-config --set /etc/glance/glance-api.conf keystone_authtoken auth_port 35357
openstack-config --set /etc/glance/glance-api.conf keystone_authtoken auth_protocol http
openstack-config --set /etc/glance/glance-api.conf keystone_authtoken admin_tenant_name service
openstack-config --set /etc/glance/glance-api.conf keystone_authtoken admin_user glance
openstack-config --set /etc/glance/glance-api.conf keystone_authtoken admin_password 000000
openstack-config --set /etc/glance/glance-api.conf paste_deploy flavor keystone
openstack-config --set /etc/glance/glance-registry.conf keystone_authtoken auth_uri http://controller:5000
openstack-config --set /etc/glance/glance-registry.conf keystone_authtoken auth_host controller
openstack-config --set /etc/glance/glance-registry.conf keystone_authtoken auth_port 35357
openstack-config --set /etc/glance/glance-registry.conf keystone_authtoken auth_protocol http
openstack-config --set /etc/glance/glance-registry.conf keystone_authtoken admin_tenant_name service
openstack-config --set /etc/glance/glance-registry.conf keystone_authtoken admin_user glance
openstack-config --set /etc/glance/glance-registry.conf keystone_authtoken admin_password 000000
openstack-config --set /etc/glance/glance-registry.conf paste_deploy flavor keystone
keystone service-create --name=glance --type=image --description="OpenStack Image Service"
keystone endpoint-create \
--service-id=$(keystone service-list | awk '/ image / {print $2}') \
--publicurl=http://controller:9292 \
--internalurl=http://controller:9292 \
--adminurl=http://controller:9292
service openstack-glance-api restart
service openstack-glance-registry restart
chkconfig openstack-glance-api on
chkconfig openstack-glance-registry on
[iyunv@controller ~]# cd /var/ftp/iaas/images/
[iyunv@controller images]# file centos_65_x86_6420140327.qcow2
centos_65_x86_6420140327.qcow2: Qemu Image, Format: Qcow , Version: 2
[iyunv@controller images]# glance image-create --name CentOS-6.5-x86_64 --disk-format qcow2 --container-format bare --is-public True --progress < centos_65_x86_6420140327.qcow2
[iyunv@controller images]# glance image-list
+--------------------------------------+-------------------+-------------+------------------+-----------+--------+
| ID | Name | Disk Format | Container Format | Size | Status |
+--------------------------------------+-------------------+-------------+------------------+-----------+--------+
| 48cb1672-3c92-4b72-9ac5-c3f5b68a5bfc | CentOS-6.5-x86_64 | qcow2 | bare | 305397760 | active |
+--------------------------------------+-------------------+-------------+------------------+-----------+--------+
4、计算服务Nova
cd
source admin-openrc.sh
yum install -y openstack-nova-api openstack-nova-cert openstack-nova-conductor openstack-nova-console openstack-nova-novncproxy openstack-nova-scheduler python-novaclient
openstack-config --set /etc/nova/nova.conf database connection mysql://nova:000000@controller/nova
openstack-config --set /etc/nova/nova.conf DEFAULT rpc_backend qpid
openstack-config --set /etc/nova/nova.conf DEFAULT qpid_hostname controller
openstack-config --set /etc/nova/nova.conf DEFAULT my_ip 192.168.100.10
openstack-config --set /etc/nova/nova.conf DEFAULT vncserver_listen 192.168.100.10
openstack-config --set /etc/nova/nova.conf DEFAULT vncserver_proxyclient_address 192.168.100.10
su -s /bin/sh -c "nova-manage db sync" nova
keystone user-create --name=nova --pass=000000 --email=nova@localhost
keystone user-role-add --user=nova --tenant=service --role=admin
openstack-config --set /etc/nova/nova.conf DEFAULT auth_strategy keystone
openstack-config --set /etc/nova/nova.conf keystone_authtoken auth_uri http://controller:5000
openstack-config --set /etc/nova/nova.conf keystone_authtoken auth_host controller
openstack-config --set /etc/nova/nova.conf keystone_authtoken auth_protocol http
openstack-config --set /etc/nova/nova.conf keystone_authtoken auth_port 35357
openstack-config --set /etc/nova/nova.conf keystone_authtoken admin_user nova
openstack-config --set /etc/nova/nova.conf keystone_authtoken admin_tenant_name service
openstack-config --set /etc/nova/nova.conf keystone_authtoken admin_password 000000
keystone service-create --name=nova --type=compute --description="OpenStack Compute"
keystone endpoint-create \
--service-id=$(keystone service-list | awk '/ compute / {print $2}') \
--publicurl=http://controller:8774/v2/%\(tenant_id\)s \
--internalurl=http://controller:8774/v2/%\(tenant_id\)s \
--adminurl=http://controller:8774/v2/%\(tenant_id\)s
service openstack-nova-api restart
service openstack-nova-cert restart
service openstack-nova-consoleauth restart
service openstack-nova-scheduler restart
service openstack-nova-conductor restart
service openstack-nova-novncproxy restart
chkconfig openstack-nova-api on
chkconfig openstack-nova-cert on
chkconfig openstack-nova-consoleauth on
chkconfig openstack-nova-scheduler on
chkconfig openstack-nova-conductor on
chkconfig openstack-nova-novncproxy on
[iyunv@controller ~]# nova image-list
+--------------------------------------+-------------------+--------+--------+
| ID | Name | Status | Server |
+--------------------------------------+-------------------+--------+--------+
| 48cb1672-3c92-4b72-9ac5-c3f5b68a5bfc | CentOS-6.5-x86_64 | ACTIVE | |
+--------------------------------------+-------------------+--------+--------+
[iyunv@controller ~]# nova service-list
+------------------+------------+----------+---------+-------+----------------------------+-----------------+
| Binary | Host | Zone | Status | State | Updated_at | Disabled Reason |
+------------------+------------+----------+---------+-------+----------------------------+-----------------+
| nova-cert | controller | internal | enabled | up | 2015-04-24T08:43:17.000000 | - |
| nova-consoleauth | controller | internal | enabled | up | 2015-04-24T08:43:12.000000 | - |
| nova-scheduler | controller | internal | enabled | up | 2015-04-24T08:43:18.000000 | - |
| nova-conductor | controller | internal | enabled | up | 2015-04-24T08:43:13.000000 | - |
+------------------+------------+----------+---------+-------+----------------------------+-----------------+
5、网络服务Neutron
source admin-openrc.sh
keystone user-create --name neutron --pass 000000 --email neutron@localhost
keystone user-role-add --user neutron --tenant service --role admin
keystone service-create --name neutron --type network --description "OpenStack Networking"
keystone endpoint-create \
--service-id $(keystone service-list | awk '/ network / {print $2}') \
--publicurl http://controller:9696 \
--adminurl http://controller:9696 \
--internalurl http://controller:9696
yum install -y openstack-neutron openstack-neutron-ml2 python-neutronclient openstack-neutron-openvswitch
openstack-config --set /etc/neutron/neutron.conf database connection mysql://neutron:000000@controller/neutron
openstack-config --set /etc/neutron/neutron.conf DEFAULT auth_strategy keystone
openstack-config --set /etc/neutron/neutron.conf keystone_authtoken auth_uri http://controller:5000
openstack-config --set /etc/neutron/neutron.conf keystone_authtoken auth_host controller
openstack-config --set /etc/neutron/neutron.conf keystone_authtoken auth_protocol http
openstack-config --set /etc/neutron/neutron.conf keystone_authtoken auth_port 35357
openstack-config --set /etc/neutron/neutron.conf keystone_authtoken admin_tenant_name service
openstack-config --set /etc/neutron/neutron.conf keystone_authtoken admin_user neutron
openstack-config --set /etc/neutron/neutron.conf keystone_authtoken admin_password 000000
openstack-config --set /etc/neutron/neutron.conf DEFAULT rpc_backend neutron.openstack.common.rpc.impl_qpid
openstack-config --set /etc/neutron/neutron.conf DEFAULT qpid_hostname controller
openstack-config --set /etc/neutron/neutron.conf DEFAULT notify_nova_on_port_status_changes True
openstack-config --set /etc/neutron/neutron.conf DEFAULT notify_nova_on_port_data_changes True
openstack-config --set /etc/neutron/neutron.conf DEFAULT nova_url http://controller:8774/v2
openstack-config --set /etc/neutron/neutron.conf DEFAULT nova_admin_username nova
openstack-config --set /etc/neutron/neutron.conf DEFAULT nova_admin_tenant_id $(keystone tenant-list | awk '/ service / { print $2 }')
openstack-config --set /etc/neutron/neutron.conf DEFAULT nova_admin_password 000000
openstack-config --set /etc/neutron/neutron.conf DEFAULT nova_admin_auth_url http://controller:35357/v2.0
vi /etc/sysctl.conf
net.ipv4.ip_forward = 1
net.ipv4.conf.all.rp_filter = 0
net.ipv4.conf.default.rp_filter = 0
sysctl -p
openstack-config --set /etc/neutron/neutron.conf DEFAULT core_plugin ml2
openstack-config --set /etc/neutron/neutron.conf DEFAULT service_plugins router
openstack-config --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2 type_drivers flat,gre
openstack-config --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2 tenant_network_types flat,gre
openstack-config --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2 mechanism_drivers openvswitch
openstack-config --set /etc/neutron/plugins/ml2/ml2_conf.ini securitygroup firewall_driver neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver
openstack-config --set /etc/neutron/plugins/ml2/ml2_conf.ini securitygroup enable_security_group True
openstack-config --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2_type_gre tunnel_id_ranges 1:1000
openstack-config --set /etc/neutron/plugins/ml2/ml2_conf.ini ovs local_ip 192.168.100.10
openstack-config --set /etc/neutron/plugins/ml2/ml2_conf.ini ovs tunnel_type gre
openstack-config --set /etc/neutron/plugins/ml2/ml2_conf.ini ovs enable_tunneling True
openstack-config --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2_type_flat flat_networks physnet1
openstack-config --set /etc/neutron/plugins/ml2/ml2_conf.ini ovs flat_networks physnet1
openstack-config --set /etc/neutron/plugins/ml2/ml2_conf.ini ovs bridge_mappings physnet1:br-ex
ln -s /etc/neutron/plugins/ml2/ml2_conf.ini /etc/neutron/plugin.ini
cp /etc/init.d/neutron-openvswitch-agent /etc/init.d/neutron-openvswitch-agent.orig
sed -i 's,plugins/openvswitch/ovs_neutron_plugin.ini,plugin.ini,g' /etc/init.d/neutron-openvswitch-agent
openstack-config --set /etc/nova/nova.conf DEFAULT network_api_class nova.network.neutronv2.api.API
openstack-config --set /etc/nova/nova.conf DEFAULT neutron_url http://controller:9696
openstack-config --set /etc/nova/nova.conf DEFAULT neutron_auth_strategy keystone
openstack-config --set /etc/nova/nova.conf DEFAULT neutron_admin_tenant_name service
openstack-config --set /etc/nova/nova.conf DEFAULT neutron_admin_username neutron
openstack-config --set /etc/nova/nova.conf DEFAULT neutron_admin_password 000000
openstack-config --set /etc/nova/nova.conf DEFAULT neutron_admin_auth_url http://controller:35357/v2.0
openstack-config --set /etc/nova/nova.conf DEFAULT linuxnet_interface_driver nova.network.linux_net.LinuxOVSInterfaceDriver
openstack-config --set /etc/nova/nova.conf DEFAULT firewall_driver nova.virt.firewall.NoopFirewallDriver
openstack-config --set /etc/nova/nova.conf DEFAULT security_group_api neutron
openstack-config --set /etc/neutron/dhcp_agent.ini DEFAULT interface_driver neutron.agent.linux.interface.OVSInterfaceDriver
openstack-config --set /etc/neutron/dhcp_agent.ini DEFAULT dhcp_driver neutron.agent.linux.dhcp.Dnsmasq
openstack-config --set /etc/neutron/dhcp_agent.ini DEFAULT use_namespaces True
openstack-config --set /etc/neutron/l3_agent.ini DEFAULT interface_driver neutron.agent.linux.interface.OVSInterfaceDriver
openstack-config --set /etc/neutron/l3_agent.ini DEFAULT use_namespaces True
openstack-config --set /etc/nova/nova.conf DEFAULT service_neutron_metadata_proxy true
openstack-config --set /etc/nova/nova.conf DEFAULT neutron_metadata_proxy_shared_secret METADATA_SECRET
service openstack-nova-api restart
vi /etc/sysconfig/network-scripts/ifcfg-br-ex
DEVICE=br-ex
ONBOOT=yes
NM_CONTROLLED=no
BOOTPROTO=static
DEFROUTE=yes
NAME="System br-ex"
IPADDR=192.168.200.10
GATEWAY=192.168.200.1
NETMASK=255.255.255.0
cp /etc/sysconfig/network-scripts/ifcfg-eth0 /etc/sysconfig/network-scripts/ifcfg-eth0.bak
cp /etc/sysconfig/network-scripts/ifcfg-eth1 /etc/sysconfig/network-scripts/ifcfg-eth1.bak
service openvswitch restart
chkconfig openvswitch on
ovs-vsctl add-br br-int
ovs-vsctl add-br br-ex
ovs-vsctl add-port br-ex eth1
ethtool -K eth1 gro off
service network restart
service neutron-server restart
service neutron-openvswitch-agent restart
service neutron-l3-agent restart
service neutron-dhcp-agent restart
service neutron-metadata-agent restart
chkconfig neutron-server on
chkconfig neutron-openvswitch-agent on
chkconfig neutron-l3-agent on
chkconfig neutron-dhcp-agent on
chkconfig neutron-metadata-agent on
[iyunv@controller ~]# neutron agent-list
+--------------------------------------+--------------------+------------+-------+----------------+
| id | agent_type | host | alive | admin_state_up |
+--------------------------------------+--------------------+------------+-------+----------------+
| 09f58d32-c470-4675-892e-76b014da0a7b | Open vSwitch agent | controller | :-) | True |
| 8c2198ba-b033-4faf-a891-9614e46c29af | DHCP agent | controller | :-) | True |
| 91a78d57-94e3-4e82-835b-67e86a5b14f2 | Metadata agent | controller | :-) | True |
| 95c83176-0867-4161-bdc3-bc455f6d92a4 | L3 agent | controller | :-) | True |
+--------------------------------------+--------------------+------------+-------+----------------+
source admin-openrc.sh
tenantID=`keystone tenant-list | grep service | awk '{print $2}'`
neutron net-create --tenant-id $tenantID flat-net --shared --provider:network_type flat --provider:physical_network physnet1
neutron subnet-create --tenant-id $tenantID --gateway 192.168.200.1 --name flat-subnet --allocation-pool start=192.168.200.100,end=192.168.200.200 flat-net 192.168.200.0/24
[iyunv@controller ~]# neutron net-list
+--------------------------------------+----------+-------------------------------------------------------+
| id | name | subnets |
+--------------------------------------+----------+-------------------------------------------------------+
| 201837d5-3f43-4828-9480-918dae7fc672 | flat-net | 269222fa-ba55-4712-bb32-6b7a1280bfe8 192.168.200.0/24 |
+--------------------------------------+----------+-------------------------------------------------------+
[iyunv@controller ~]# neutron subnet-list
+--------------------------------------+-------------+------------------+--------------------------------------------------------+
| id | name | cidr | allocation_pools |
+--------------------------------------+-------------+------------------+--------------------------------------------------------+
| 269222fa-ba55-4712-bb32-6b7a1280bfe8 | flat-subnet | 192.168.200.0/24 | {"start": "192.168.200.100", "end": "192.168.200.200"} |
+--------------------------------------+-------------+------------------+--------------------------------------------------------+
6、Dashboard
yum install -y memcached python-memcached mod_wsgi openstack-dashboard
vi /etc/openstack-dashboard/local_settings
TIME_ZONE = "Asia/Shanghai"
ALLOWED_HOSTS = ['*']
OPENSTACK_HOST = "controller"
setsebool -P httpd_can_network_connect on
vi /etc/httpd/conf/httpd.conf
ServerName 192.168.100.10:80
service httpd restart
service memcached restart
chkconfig httpd on
chkconfig memcached on
访问 192.168.100.10/dashboard 如果出现无法访问控制台 本机hosts文件修改 或者 把地址栏的controller改为192.168.100.10
启动云主机后
[iyunv@controller ~]# cp admin-openrc.sh user1-openrc.sh
[iyunv@controller ~]# vi user1-openrc.sh
export OS_USERNAME=user1
export OS_PASSWORD=000000
export OS_TENANT_NAME=user1
export OS_AUTH_URL=http://controller:35357/v2.0
[iyunv@controller ~]# nova list
+--------------------------------------+--------+--------+------------+-------------+--------------------------+
| ID | Name | Status | Task State | Power State | Networks |
+--------------------------------------+--------+--------+------------+-------------+--------------------------+
| 5c3fd4c3-e747-4748-a34a-cbc56fcc004b | centos | ACTIVE | - | Running | flat-net=192.168.200.101 |
+--------------------------------------+--------+--------+------------+-------------+--------------------------+
配置安全组规则,添加ALL ICMP和SSH,从SSH登录到云主机,用户名root,密码000000
[iyunv@host-192-168-200-101 ~]# hostname
host-192-168-200-101
[iyunv@host-192-168-200-101 ~]# ip addr
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 16436 qdisc noqueue state UNKNOWN
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000
link/ether fa:16:3e:30:2c:70 brd ff:ff:ff:ff:ff:ff
inet 192.168.200.101/24 brd 192.168.200.255 scope global eth0
inet6 fe80::f816:3eff:fe30:2c70/64 scope link
valid_lft forever preferred_lft forever
7、块存储服务Cinder
yum install openstack-cinder
openstack-config --set /etc/cinder/cinder.conf database connection mysql://cinder:000000@controller/cinder
su -s /bin/sh -c "cinder-manage db sync" cinder
source admin-openrc.sh
keystone user-create --name=cinder --pass=000000 --email=cinder@localhost
keystone user-role-add --user=cinder --tenant=service --role=admin
openstack-config --set /etc/cinder/cinder.conf DEFAULT auth_strategy keystone
openstack-config --set /etc/cinder/cinder.conf keystone_authtoken auth_uri http://controller:5000
openstack-config --set /etc/cinder/cinder.conf keystone_authtoken auth_host controller
openstack-config --set /etc/cinder/cinder.conf keystone_authtoken auth_protocol http
openstack-config --set /etc/cinder/cinder.conf keystone_authtoken auth_port 35357
openstack-config --set /etc/cinder/cinder.conf keystone_authtoken admin_user cinder
openstack-config --set /etc/cinder/cinder.conf keystone_authtoken admin_tenant_name service
openstack-config --set /etc/cinder/cinder.conf keystone_authtoken admin_password 000000
openstack-config --set /etc/cinder/cinder.conf DEFAULT rpc_backend qpid
openstack-config --set /etc/cinder/cinder.conf DEFAULT qpid_hostname controller
keystone service-create --name=cinder --type=volume --description="OpenStack Block Storage"
keystone endpoint-create \
--service-id=$(keystone service-list | awk '/ volume / {print $2}') \
--publicurl=http://controller:8776/v1/%\(tenant_id\)s \
--internalurl=http://controller:8776/v1/%\(tenant_id\)s \
--adminurl=http://controller:8776/v1/%\(tenant_id\)s
keystone service-create --name=cinderv2 --type=volumev2 --description="OpenStack Block Storage v2"
keystone endpoint-create \
--service-id=$(keystone service-list | awk '/ volumev2 / {print $2}') \
--publicurl=http://controller:8776/v2/%\(tenant_id\)s \
--internalurl=http://controller:8776/v2/%\(tenant_id\)s \
--adminurl=http://controller:8776/v2/%\(tenant_id\)s
service openstack-cinder-api start
service openstack-cinder-scheduler start
chkconfig openstack-cinder-api on
chkconfig openstack-cinder-scheduler on
[iyunv@controller ~]# source user1-openrc.sh
[iyunv@controller ~]# cinder create --display-name cindervolume 1
+---------------------+--------------------------------------+
| Property | Value |
+---------------------+--------------------------------------+
| attachments | [] |
| availability_zone | nova |
| bootable | false |
| created_at | 2015-04-24T10:10:26.869800 |
| display_description | None |
| display_name | cindervolume |
| encrypted | False |
| id | 8fe4b453-071c-4e95-b2a7-f6af44913f22 |
| metadata | {} |
| size | 1 |
| snapshot_id | None |
| source_volid | None |
| status | creating |
| volume_type | None |
+---------------------+--------------------------------------+
[iyunv@controller ~]# cinder list
+--------------------------------------+-----------+--------------+------+-------------+----------+-------------+
| ID | Status | Display Name | Size | Volume Type | Bootable | Attached to |
+--------------------------------------+-----------+--------------+------+-------------+----------+-------------+
| 8fe4b453-071c-4e95-b2a7-f6af44913f22 | available | cindervolume | 1 | None | false | |
+--------------------------------------+-----------+--------------+------+-------------+----------+-------------+
[iyunv@controller ~]# cinder extend cindervolume 5
[iyunv@controller ~]# cinder list
+--------------------------------------+-----------+--------------+------+-------------+----------+-------------+
| ID | Status | Display Name | Size | Volume Type | Bootable | Attached to |
+--------------------------------------+-----------+--------------+------+-------------+----------+-------------+
| 8fe4b453-071c-4e95-b2a7-f6af44913f22 | available | cindervolume | 5 | None | false | |
+--------------------------------------+-----------+--------------+------+-------------+----------+-------------+
8、监控服务Ceilometer for cinder
yum install openstack-ceilometer-api openstack-ceilometer-collector openstack-ceilometer-notification openstack-ceilometer-central openstack-ceilometer-alarm python-ceilometerclient mongodb-server mongodb
vi /etc/mongodb.conf
smallfiles = true
bind_ip = 192.168.100.10
service mongod start
chkconfig mongod on
mongo --host controller --eval '
db = db.getSiblingDB("ceilometer");
db.addUser({user: "ceilometer",
pwd: "CEILOMETER_DBPASS",
roles: [ "readWrite", "dbAdmin" ]})'
source admin-openrc.sh
openstack-config --set /etc/ceilometer/ceilometer.conf database connection mongodb://ceilometer:CEILOMETER_DBPASS@controller:27017/ceilometer
CEILOMETER_TOKEN=$(openssl rand -hex 10)
echo $CEILOMETER_TOKEN
openstack-config --set /etc/ceilometer/ceilometer.conf publisher metering_secret $CEILOMETER_TOKEN
openstack-config --set /etc/ceilometer/ceilometer.conf DEFAULT rpc_backend ceilometer.openstack.common.rpc.impl_qpid
keystone user-create --name=ceilometer --pass=CEILOMETER_PASS --email=ceilometer@localhost
keystone user-role-add --user=ceilometer --tenant=service --role=admin
openstack-config --set /etc/ceilometer/ceilometer.conf DEFAULT auth_strategy keystone
openstack-config --set /etc/ceilometer/ceilometer.conf \
keystone_authtoken auth_host controller
openstack-config --set /etc/ceilometer/ceilometer.conf \
keystone_authtoken admin_user ceilometer
openstack-config --set /etc/ceilometer/ceilometer.conf \
keystone_authtoken admin_tenant_name service
openstack-config --set /etc/ceilometer/ceilometer.conf \
keystone_authtoken auth_protocol http
openstack-config --set /etc/ceilometer/ceilometer.conf \
keystone_authtoken auth_uri http://controller:5000
openstack-config --set /etc/ceilometer/ceilometer.conf \
keystone_authtoken admin_password CEILOMETER_PASS
openstack-config --set /etc/ceilometer/ceilometer.conf \
service_credentials os_auth_url http://controller:5000/v2.0
openstack-config --set /etc/ceilometer/ceilometer.conf \
service_credentials os_username ceilometer
openstack-config --set /etc/ceilometer/ceilometer.conf \
service_credentials os_tenant_name service
openstack-config --set /etc/ceilometer/ceilometer.conf \
service_credentials os_password CEILOMETER_PASS
keystone service-create --name=ceilometer --type=metering --description="Telemetry"
keystone endpoint-create \
--service-id=$(keystone service-list | awk '/ metering / {print $2}') \
--publicurl=http://controller:8777 \
--internalurl=http://controller:8777 \
--adminurl=http://controller:8777
service openstack-ceilometer-api start
service openstack-ceilometer-notification start
service openstack-ceilometer-central start
service openstack-ceilometer-collector start
service openstack-ceilometer-alarm-evaluator start
service openstack-ceilometer-alarm-notifier start
chkconfig openstack-ceilometer-api on
chkconfig openstack-ceilometer-notification on
chkconfig openstack-ceilometer-central on
chkconfig openstack-ceilometer-collector on
chkconfig openstack-ceilometer-alarm-evaluator on
chkconfig openstack-ceilometer-alarm-notifier on
openstack-config --set /etc/cinder/cinder.conf DEFAULT control_exchange cinder
openstack-config --set /etc/cinder/cinder.conf DEFAULT notification_driver cinder.openstack.common.notifier.rpc_notifier
service openstack-cinder-api restart
service openstack-cinder-scheduler restart
[iyunv@controller ~]# ceilometer meter-list
+------------+-------+-------+--------------------------------------+---------+----------------------------------+
| Name | Type | Unit | Resource ID | User ID | Project ID |
+------------+-------+-------+--------------------------------------+---------+----------------------------------+
| image | gauge | image | 48cb1672-3c92-4b72-9ac5-c3f5b68a5bfc | None | 1c5f572715374366b3c24d982c9ea0bd |
| image.size | gauge | B | 48cb1672-3c92-4b72-9ac5-c3f5b68a5bfc | None | 1c5f572715374366b3c24d982c9ea0bd |
+------------+-------+-------+--------------------------------------+---------+----------------------------------+
[iyunv@controller ~]# ceilometer statistics -m image -p 60
+--------+---------------------+---------------------+-----+-----+-----+-----+-------+----------+---------------------+---------------------+
| Period | Period Start | Period End | Max | Min | Avg | Sum | Count | Duration | Duration Start | Duration End |
+--------+---------------------+---------------------+-----+-----+-----+-----+-------+----------+---------------------+---------------------+
| 60 | 2015-04-24T11:56:36 | 2015-04-24T11:57:36 | 1.0 | 1.0 | 1.0 | 1.0 | 1 | 0.0 | 2015-04-24T11:56:36 | 2015-04-24T11:56:36 |
| 60 | 2015-04-24T12:05:36 | 2015-04-24T12:06:36 | 1.0 | 1.0 | 1.0 | 1.0 | 1 | 0.0 | 2015-04-24T12:06:33 | 2015-04-24T12:06:33 |
+--------+---------------------+---------------------+-----+-----+-----+-----+-------+----------+---------------------+---------------------+
样题.zip
522.51 KB, 下载次数: 0
拓扑以及环境部署
运维网声明
1、欢迎大家加入本站运维交流群:群②:261659950 群⑤:202807635 群⑦870801961 群⑧679858003
2、本站所有主题由该帖子作者发表,该帖子作者与运维网 享有帖子相关版权
3、所有作品的著作权均归原作者享有,请您和我们一样尊重他人的著作权等合法权益。如果您对作品感到满意,请购买正版
4、禁止制作、复制、发布和传播具有反动、淫秽、色情、暴力、凶杀等内容的信息,一经发现立即删除。若您因此触犯法律,一切后果自负,我们对此不承担任何责任
5、所有资源均系网友上传或者通过网络收集,我们仅提供一个展示、介绍、观摩学习的平台,我们不对其内容的准确性、可靠性、正当性、安全性、合法性等负责,亦不承担任何法律责任
6、所有作品仅供您个人学习、研究或欣赏,不得用于商业或者其他用途,否则,一切后果均由您自己承担,我们对此不承担任何法律责任
7、如涉及侵犯版权等问题,请您及时通知我们,我们将立即采取措施予以解决
8、联系人Email:admin@iyunv.com 网址:www.yunweiku.com