controller
eth0 192.168.101.211
eth1 192.168.200.211
compute
eth0 192.168.101.212
eth1 192.168.200.212
network
eth0 192.168.101.213
eth1 192.168.200.213
#我们使用的是mini版CentOS-7-x86_64-Minimal-1511.iso
#一.以下步骤三台服务器都要配置
#停止selinux,iptables
#1.先安装vim
yum install vim
#2.配置hosts文件解析
vim /etc/hosts
192.168.101.211 controller
192.168.101.212 compute
192.168.101.213 network
#3.安装ifconfig工具等其他网络工具,同时更新软件包
yum makecache && yum install net-tools -y&& yum update -y
#4.安装时间同步部署
yum install chrony -y
#控制节点配置
vim /etc/chrony.conf
server controller iburst
allow 192.168.0.0/24
#其余节点
vim /etc/chrony.conf
server controller iburst
#公共配置操作
systemctl enable chronyd.service
systemctl start chronyd.service
#将硬件时钟调整为与本地时钟一致, 0 为设置为 UTC 时间
timedatectl set-local-rtc 1
#设置系统时区为上海
timedatectl set-timezone Asia/Shanghai
#防止自动更新
yum install yum-plugin-priorities -y
#安装yum源
yum install centos-release-openstack-mitaka -y
#所有节点执行
yum upgrade
yum install python-openstackclient -y
yum install openstack-selinux -y
#三:部署mariadb数据库
#控制节点:
yum install mariadb mariadb-server python2-PyMySQL -y
vim /etc/my.cnf.d/openstack.cnf
[mysqld]
#控制节点管理网络ip
bind-address = 192.168.101.211
default-storage-engine = innodb
innodb_file_per_table
max_connections = 4096
collation-server = utf8_general_ci
character-set-server = utf8
systemctl enable mariadb.service
systemctl start mariadb.service
mysql_secure_installation
#四:部署mongoDB
#控制节点:
yum install mongodb-server mongodb -y
vim /etc/mongod.conf
#控制节点管理网络ip
bind_ip = 192.168.101.211
smallfiles = true
systemctl enable mongod.service
systemctl start mongod.service
#五:部署消息队列rabbitmq(验证方式:http://192.168.101.211:15672 用户guest 密码guest)
#控制节点
yum install rabbitmq-server -y
systemctl enable rabbitmq-server.service
systemctl start rabbitmq-server.service
#设置rabbitmq用户密码
rabbitmqctl add_user openstack password
#为新建用户设置权限
rabbitmqctl set_permissions openstack ".*" ".*" ".*"
#六:部署memcached缓存(为keystone服务缓存tokens)
#控制节点
yum install memcached python-memcached -y
systemctl enable memcached.service
systemctl start memcached.service
#第二部分: 认证服务keystone部署
#一:安装和配置服务
#1.建库建用户
mysql -u root -p
CREATE DATABASE keystone;
GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'localhost' IDENTIFIED BY 'password';
GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'%' IDENTIFIED BY 'password';
GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'controller' IDENTIFIED BY 'password';
flush privileges;
#2.安装wsgi
yum install openstack-keystone httpd mod_wsgi -y
#3.配置keystone
vim /etc/keystone/keystone.conf
[DEFAULT]
admin_token = password #建议用命令制作token:openssl rand -hex 10
[database]
connection = mysql+pymysql://keystone:password@controller/keystone
[token]
provider = fernet
#Token Provider:UUID, PKI, PKIZ, or Fernet
#http://blog.csdn.net/miss_yang_cloud/article/details/49633719
#4.同步修改到数据库
su -s /bin/sh -c "keystone-manage db_sync" keystone
#5.初始化fernet keys
keystone-manage fernet_setup --keystone-user keystone --keystone-group keystone
#6.配置apache服务
vim /etc/httpd/conf/httpd.conf
ServerName controller
vim /etc/httpd/conf.d/wsgi-keystone.conf
Listen 5000
Listen 35357
<VirtualHost *:5000>
WSGIDaemonProcess keystone-public processes=5 threads=1 user=keystone group=keystone display-name=%{GROUP}
WSGIProcessGroup keystone-public
WSGIScriptAlias / /usr/bin/keystone-wsgi-public
WSGIApplicationGroup %{GLOBAL}
WSGIPassAuthorization On
ErrorLogFormat "%{cu}t %M"
ErrorLog /var/log/httpd/keystone-error.log
CustomLog /var/log/httpd/keystone-access.log combined
<Directory /usr/bin>
Require all granted
</Directory>
</VirtualHost>
<VirtualHost *:35357>
WSGIDaemonProcess keystone-admin processes=5 threads=1 user=keystone group=keystone display-name=%{GROUP}
WSGIProcessGroup keystone-admin
WSGIScriptAlias / /usr/bin/keystone-wsgi-admin
WSGIApplicationGroup %{GLOBAL}
WSGIPassAuthorization On
ErrorLogFormat "%{cu}t %M"
ErrorLog /var/log/httpd/keystone-error.log
CustomLog /var/log/httpd/keystone-access.log combined
<Directory /usr/bin>
Require all granted
</Directory>
</VirtualHost>
#7.启动服务:
systemctl enable httpd.service
systemctl start httpd.service
#二:创建服务实体和访问端点
#1.实现配置管理员环境变量,用于获取后面创建的权限
export OS_TOKEN=password
export OS_URL=http://controller:35357/v3
export OS_IDENTITY_API_VERSION=3
#2.基于上一步给的权限,创建认证服务实体(目录服务)
openstack service create --name keystone --description "OpenStack Identity" identity
#3.基于上一步建立的服务实体,创建访问该实体的三个api端点
openstack endpoint create --region RegionOne identity public http://controller:5000/v3
openstack endpoint create --region RegionOne identity internal http://controller:5000/v3
openstack endpoint create --region RegionOne identity admin http://controller:35357/v3
#三:创建域,租户,用户,角色,把四个元素关联到一起
#建立一个公共的域名:
openstack domain create --description "Default Domain" default
#管理员:admin
openstack project create --domain default --description "Admin Project" admin
#密码admin
openstack user create --domain default --password-prompt admin
openstack role create admin
openstack role add --project admin --user admin admin
#普通用户:demo
openstack project create --domain default --description "Demo Project" demo
#密码demo
openstack user create --domain default --password-prompt demo
openstack role create user
openstack role add --project demo --user demo user
#为后续的服务创建统一租户service
#解释:后面每搭建一个新的服务都需要在keystone中执行四种操作:1.建租户 2.建用户 3.建角色 4.做关联
#后面所有的服务公用一个租户service,都是管理员角色admin,所以实际上后续的服务安装关于keysotne
#的操作只剩2,4
openstack project create --domain default --description "Service Project" service
#四:验证操作:
vim /etc/keystone/keystone-paste.ini
#在[pipeline:public_api], [pipeline:admin_api], and [pipeline:api_v3] 三个地方
#移走:admin_token_auth
unset OS_TOKEN OS_URL
openstack --os-auth-url http://controller:35357/v3 --os-project-domain-name default --os-user-domain-name default --os-project-name admin --os-username admin token issue
Password:密码admin
+------------+-----------------------------------------------------------------+
| Field | Value |
+------------+-----------------------------------------------------------------+
| expires | 2016-10-06T11:13:54.248329Z |
| id | gAAAAABX9iPib_UNpY-8RZmatyjqnWlIz5rN3HknZ-OB260hzODXgzK8GO7him2 |
| | aAnpOTtLagMaKBxhej5FqLcbqrgyvzYwfG23eEGTZAWNxhCePNmnVVZSDDzalFE |
| | WhvbWb5BiPkVyoPJx2KxLmPsLsmUsHZmLOim5qqeAl5kg8CQNviob50Ls |
| project_id | a47533d927aa475a8bcef222c61421ee |
| user_id | acfaac29ce1d48958c2a1e22a3daab12 |
+------------+-----------------------------------------------------------------+
#五:新建客户端脚本文件
#管理员:admin-openrc
vim admin-openrc
export OS_PROJECT_DOMAIN_NAME=default
export OS_USER_DOMAIN_NAME=default
export OS_PROJECT_NAME=admin
export OS_USERNAME=admin
export OS_PASSWORD=admin
export OS_AUTH_URL=http://controller:35357/v3
export OS_IDENTITY_API_VERSION=3
export OS_IMAGE_API_VERSION=2
#普通用户demo:demo-openrc
vim demo-openrc
export OS_PROJECT_DOMAIN_NAME=default
export OS_USER_DOMAIN_NAME=default
export OS_PROJECT_NAME=demo
export OS_USERNAME=demo
export OS_PASSWORD=demo
export OS_AUTH_URL=http://controller:5000/v3
export OS_IDENTITY_API_VERSION=3
export OS_IMAGE_API_VERSION=2
#效果:
source admin-openrc
openstack token issue
#第三部分:部署镜像服务
#一:安装和配置服务
#1.建库建用户
mysql -u root -p
CREATE DATABASE glance;
GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'localhost' IDENTIFIED BY 'password';
GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'%' IDENTIFIED BY 'password';
GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'controller' IDENTIFIED BY 'password';
flush privileges;
#2.keystone认证操作:
#上面提到过:所有后续项目的部署都统一放到一个租户service里,然后需要为每个项目建立用户,建管理员角色,建立关联
. admin-openrc
#密码password
openstack user create --domain default --password-prompt glance
openstack role add --project service --user glance admin
#建立服务实体
openstack service create --name glance --description "OpenStack Image" image
#建端点
openstack endpoint create --region RegionOne image public http://controller:9292
openstack endpoint create --region RegionOne image internal http://controller:9292
openstack endpoint create --region RegionOne image admin http://controller:9292
#3.安装软件
yum install openstack-glance -y
#4.修改配置:
vim /etc/glance/glance-api.conf
[database]
#这里的数据库连接配置是用来初始化生成数据库表结构,不配置无法生成数据库表结构
#glance-api不配置database对创建vm无影响,对使用metada有影响
#日志报错:ERROR glance.api.v2.metadef_namespaces
connection = mysql+pymysql://glance:password@controller/glance
[keystone_authtoken]
auth_url = http://controller:5000
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = glance
password = password
[paste_deploy]
flavor = keystone
[glance_store]
stores = file,http
default_store = file
filesystem_store_datadir = /var/lib/glance/images/
vim /etc/glance/glance-registry.conf
[database]
#这里的数据库配置是用来glance-registry检索镜像元数据
connection = mysql+pymysql://glance:password@controller/glance
#新建目录:
mkdir /var/lib/glance/images/
chown glance. /var/lib/glance/images/
#同步数据库:(此处会报一些关于future的问题,自行忽略)
su -s /bin/sh -c "glance-manage db_sync" glance
#启动服务:
systemctl enable openstack-glance-api.service openstack-glance-registry.service
systemctl start openstack-glance-api.service openstack-glance-registry.service
二:验证操作:
. admin-openrc
wget http://download.cirros-cloud.net/0.3.4/cirros-0.3.4-x86_64-disk.img
#(本地下载:wget http://172.16.209.100/cirros-0.3.4-x86_64-disk.img)
openstack image create "cirros" --file cirros-0.3.4-x86_64-disk.img --disk-format qcow2 --container-format bare --public
openstack image list
#第四部分:部署compute服务
#一:控制节点配置
#1.建库建用户
CREATE DATABASE nova_api;
CREATE DATABASE nova;
GRANT ALL PRIVILEGES ON nova_api.* TO 'nova'@'localhost' IDENTIFIED BY 'password';
GRANT ALL PRIVILEGES ON nova_api.* TO 'nova'@'%' IDENTIFIED BY 'password';
GRANT ALL PRIVILEGES ON nova_api.* TO 'nova'@'controller' IDENTIFIED BY 'password';
GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'localhost' IDENTIFIED BY 'password';
GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'%' IDENTIFIED BY 'password';
GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'controller' IDENTIFIED BY 'password';
flush privileges;
#2.keystone相关操作
. admin-openrc
openstack user create --domain default --password-prompt nova
openstack role add --project service --user nova admin
openstack service create --name nova --description "OpenStack Compute" compute
openstack endpoint create --region RegionOne compute public http://controller:8774/v2.1/%\(tenant_id\)s
openstack endpoint create --region RegionOne compute internal http://controller:8774/v2.1/%\(tenant_id\)s
openstack endpoint create --region RegionOne compute admin http://controller:8774/v2.1/%\(tenant_id\)s
#3.安装软件包:
yum install openstack-nova-api openstack-nova-conductor openstack-nova-console openstack-nova-novncproxy openstack-nova-scheduler -y
#4.修改配置:
vim /etc/nova/nova.conf
[DEFAULT]
enabled_apis = osapi_compute,metadata
rpc_backend = rabbit
auth_strategy = keystone
#下面的为管理ip
my_ip = 192.168.101.211
use_neutron = True
firewall_driver = nova.virt.firewall.NoopFirewallDriver
[api_database]
connection = mysql+pymysql://nova:password@controller/nova_api
[database]
connection = mysql+pymysql://nova:password@controller/nova
[oslo_messaging_rabbit]
rabbit_host = controller
rabbit_userid = openstack
rabbit_password = password
[keystone_authtoken]
auth_url = http://controller:5000
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = nova
password = password
[vnc]
#下面的为管理ip
vncserver_listen = 192.168.101.211
#下面的为管理ip
vncserver_proxyclient_address = 192.168.101.211
[oslo_concurrency]
lock_path = /var/lib/nova/tmp
#5.同步数据库:(此处会报一些关于future的问题,自行忽略)
su -s /bin/sh -c "nova-manage api_db sync" nova
su -s /bin/sh -c "nova-manage db sync" nova
#6.启动服务
systemctl enable openstack-nova-api.service openstack-nova-consoleauth.service openstack-nova-scheduler.service openstack-nova-conductor.service openstack-nova-novncproxy.service
systemctl start openstack-nova-api.service openstack-nova-consoleauth.service openstack-nova-scheduler.service openstack-nova-conductor.service openstack-nova-novncproxy.service
#二:计算节点配置
#1.安装软件包:
yum install openstack-nova-compute libvirt-daemon-lxc -y
#2.修改配置:
vim /etc/nova/nova.conf
[DEFAULT]
rpc_backend = rabbit
auth_strategy = keystone
#计算节点管理网络ip
my_ip = 192.168.101.212
use_neutron = True
firewall_driver = nova.virt.firewall.NoopFirewallDriver
[oslo_messaging_rabbit]
rabbit_host = controller
rabbit_userid = openstack
rabbit_password = password
[vnc]
enabled = True
vncserver_listen = 0.0.0.0
#计算节点管理网络ip
vncserver_proxyclient_address = 192.168.101.212
#控制节点管理网络ip
novncproxy_base_url = http://192.168.101.211:6080/vnc_auto.html
[glance]
api_servers = http://controller:9292
[oslo_concurrency]
lock_path = /var/lib/nova/tmp
#3.如果在不支持虚拟化的机器上部署nova,请确认
egrep -c '(vmx|svm)' /proc/cpuinfo
#结果为0
vim /etc/nova/nova.conf
[libvirt]
virt_type = qemu
#4.启动服务
systemctl enable libvirtd.service openstack-nova-compute.service
systemctl start libvirtd.service openstack-nova-compute.service
#三:验证
#控制节点
source admin-openrc
openstack compute service list
[iyunv@controller ~]# openstack compute service list
+----+------------------+------------+----------+---------+-------+----------------------------+
| Id | Binary | Host | Zone | Status | State | Updated At |
+----+------------------+------------+----------+---------+-------+----------------------------+
| 1 | nova-scheduler | controller | internal | enabled | up | 2016-10-06T15:11:52.000000 |
| 2 | nova-conductor | controller | internal | enabled | up | 2016-10-06T15:11:52.000000 |
| 3 | nova-consoleauth | controller | internal | enabled | up | 2016-10-06T15:11:51.000000 |
| 6 | nova-compute | compute | nova | enabled | up | 2016-10-06T15:11:50.000000 |
+----+------------------+------------+----------+---------+-------+----------------------------+
#第五部分:部署网络服务
#一:控制节点配置
#1.建库建用户
CREATE DATABASE neutron;
GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'localhost' IDENTIFIED BY 'password';
GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'%' IDENTIFIED BY 'password';
GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'controller' IDENTIFIED BY 'password';
flush privileges;
#2.keystone相关
. admin-openrc
#密码password
openstack user create --domain default --password-prompt neutron
openstack role add --project service --user neutron admin
openstack service create --name neutron --description "OpenStack Networking" network
openstack endpoint create --region RegionOne network public http://controller:9696
openstack endpoint create --region RegionOne network internal http://controller:9696
openstack endpoint create --region RegionOne network admin http://controller:9696
#3.安装软件包
yum install openstack-neutron openstack-neutron-ml2 python-neutronclient which -y
#4.配置服务器组件
vim /etc/neutron/neutron.conf
#在[数据库]节中,配置数据库访问:
[DEFAULT]
core_plugin = ml2
service_plugins = router
#下面配置:启用重叠IP地址功能
allow_overlapping_ips = True
rpc_backend = rabbit
auth_strategy = keystone
notify_nova_on_port_status_changes = True
notify_nova_on_port_data_changes = True
[oslo_messaging_rabbit]
rabbit_host = controller
rabbit_userid = openstack
rabbit_password = password
[database]
connection = mysql+pymysql://neutron:password@controller/neutron
[keystone_authtoken]
auth_url = http://controller:5000
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = neutron
password = password
[nova]
auth_url = http://controller:5000
auth_type = password
project_domain_name = default
user_domain_name = default
region_name = RegionOne
project_name = service
username = nova
password = password
[oslo_concurrency]
lock_path = /var/lib/neutron/tmp
vim /etc/neutron/plugins/ml2/ml2_conf.ini
[ml2]
type_drivers = flat,vlan,vxlan,gre
tenant_network_types = vxlan
mechanism_drivers = openvswitch,l2population
extension_drivers = port_security
[ml2_type_flat]
flat_networks = provider
[ml2_type_vxlan]
vni_ranges = 1:1000
[securitygroup]
enable_ipset = True
vim /etc/nova/nova.conf
[neutron]
url = http://controller:9696
auth_url = http://controller:5000
auth_type = password
project_domain_name = default
user_domain_name = default
region_name = RegionOne
project_name = service
username = neutron
password = password
service_metadata_proxy = True
#5.创建连接
ln -s /etc/neutron/plugins/ml2/ml2_conf.ini /etc/neutron/plugin.ini
#6.同步数据库:(此处会报一些关于future的问题,自行忽略)
su -s /bin/sh -c "neutron-db-manage --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/ml2/ml2_conf.ini upgrade head" neutron
#7.重启nova服务
systemctl restart openstack-nova-api.service
#8.启动neutron服务
systemctl enable neutron-server.service
systemctl start neutron-server.service
#二:网络节点配置
#1. 编辑 /etc/sysctl.conf
net.ipv4.ip_forward=1
net.ipv4.conf.all.rp_filter=0
net.ipv4.conf.default.rp_filter=0
#2.执行下列命令,立即生效
sysctl -p
#3.安装软件包
yum install openstack-neutron openstack-neutron-ml2 openstack-neutron-openvswitch -y
#4.配置组件
vim /etc/neutron/neutron.conf
[DEFAULT]
core_plugin = ml2
service_plugins = router
allow_overlapping_ips = True
rpc_backend = rabbit
auth_strategy = keystone
[oslo_messaging_rabbit]
rabbit_host = controller
rabbit_userid = openstack
rabbit_password = password
[oslo_concurrency]
lock_path = /var/lib/neutron/tmp
#6、编辑
vim /etc/neutron/plugins/ml2/openvswitch_agent.ini
[ovs]
#下面ip为网络节点数据网络ip
local_ip=192.168.200.213
bridge_mappings=external:br-ex
[agent]
tunnel_types=gre,vxlan
l2_population=True
prevent_arp_spoofing=True
#7.配置L3代理。编辑
vim /etc/neutron/l3_agent.ini
[DEFAULT]
interface_driver=neutron.agent.linux.interface.OVSInterfaceDriver
external_network_bridge=br-ex
#8.配置DHCP代理。编辑
vim /etc/neutron/dhcp_agent.ini
[DEFAULT]
interface_driver=neutron.agent.linux.interface.OVSInterfaceDriver
dhcp_driver=neutron.agent.linux.dhcp.Dnsmasq
enable_isolated_metadata=True
#9.配置元数据代理。编辑
vim /etc/neutron/metadata_agent.ini
[DEFAULT]
nova_metadata_ip=controller
metadata_proxy_shared_secret=password
#10.启动服务
#网路节点:
systemctl enable neutron-openvswitch-agent.service neutron-l3-agent.service neutron-dhcp-agent.service neutron-metadata-agent.service
systemctl start neutron-openvswitch-agent.service neutron-l3-agent.service neutron-dhcp-agent.service neutron-metadata-agent.service
#12.建网桥
#如果你单独一块网卡作为桥接网卡,使用这步指定你的网卡名,如果是虚拟网卡桥接,则按后面的网桥方式添加
ovs-vsctl add-br br-ex
# 这块为真实的网卡
ovs-vsctl add-port br-ex eth2
#注意,如果网卡数量有限,想用网路节点的管理网络网卡作为br-ex绑定的物理网卡
#那么需要将网络节点管理网络网卡ip去掉,建立br-ex的配置文件,ip使用原管理网ip
ovs-vsctl add-br br-ex
[iyunv@network ~]# cat /etc/sysconfig/network-scripts/ifcfg-eth0
DEVICE=eth0
TYPE=Ethernet
ONBOOT="yes"
BOOTPROTO="none"
[iyunv@network ~]# cat /etc/sysconfig/network-scripts/ifcfg-br-ex
DEVICE=br-ex
TYPE=Ethernet
ONBOOT="yes"
BOOTPROTO="none"
HWADDR=bc:ee:7b:78:7b:a7
IPADDR=192.168.101.213
GATEWAY=192.168.101.1
NETMASK=255.255.255.0
DNS1=202.106.0.20
DNS1=8.8.8.8
NM_CONTROLLED=no #注意加上这一句否则网卡可能启动不成功
systemctl restart network
ovs-vsctl add-port br-ex eth0
#三:计算节点配置
#1. 编辑
vim /etc/sysctl.conf
net.ipv4.conf.all.rp_filter=0
net.ipv4.conf.default.rp_filter=0
net.bridge.bridge-nf-call-iptables=1
net.bridge.bridge-nf-call-ip6tables=1
#2.
sysctl -p
#3.
yum install openstack-neutron openstack-neutron-ml2 openstack-neutron-openvswitch -y
#4.编辑
vim /etc/neutron/neutron.conf
[DEFAULT]
rpc_backend = rabbit
auth_strategy = keystone
[oslo_messaging_rabbit]
rabbit_host = controller
rabbit_userid = openstack
rabbit_password = password
[oslo_concurrency]
lock_path = /var/lib/neutron/tmp
#5.编辑
vim /etc/neutron/plugins/ml2/openvswitch_agent.ini
[ovs]
#下面ip为计算节点数据网络ip
local_ip = 192.168.200.212
#bridge_mappings = vlan:br-vlan
[agent]
tunnel_types = gre,vxlan
l2_population = True
prevent_arp_spoofing = True
[securitygroup]
firewall_driver = neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver
enable_security_group = True
#7.编辑
vim /etc/nova/nova.conf
[neutron]
url = http://controller:9696
auth_url = http://controller:5000
auth_type = password
project_domain_name = default
user_domain_name = default
region_name = RegionOne
project_name = service
username = neutron
password = password
#8.启动服务
systemctl enable neutron-openvswitch-agent.service
systemctl start neutron-openvswitch-agent.service
systemctl restart openstack-nova-compute.service
#第六部分:部署控制面板dashboard
#在控制节点
#1.安装软件包
yum install openstack-dashboard -y
#2.配置
vim /etc/openstack-dashboard/local_settings
ALLOWED_HOSTS = ['*', ]
SESSION_ENGINE = 'django.contrib.sessions.backends.cache'
OPENSTACK_API_VERSIONS = {
"identity": 3,
"image": 2,
"volume": 2,
}
OPENSTACK_KEYSTONE_MULTIDOMAIN_SUPPORT = True
OPENSTACK_KEYSTONE_DEFAULT_DOMAIN = "default"
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
'LOCATION': 'controller:11211',
}
}
OPENSTACK_HOST = "controller"
OPENSTACK_KEYSTONE_URL = "http://%s:5000/v3" % OPENSTACK_HOST
OPENSTACK_KEYSTONE_DEFAULT_ROLE = "user"
TIME_ZONE = "UTC"
#3.启动服务
systemctl enable httpd.service memcached.service
systemctl restart httpd.service memcached.service
#4.验证;
http://192.168.101.211/dashboard
#总结:
#与keystone打交道的只有api层,所以不要到处乱配
#建主机的时候由nova-compute负责调用各个api,所以不要再控制节点配置啥调用
#ml2是neutron的core plugin,只需要在控制节点配置
#网络节点只需要配置相关的agent
#各组件的api除了接收请求外还有很多其他功能,比方说验证请求的合理性,控制节点nova.conf需要配neutron的api、认证,因为nova boot时需要去验证用户提交网络的合理性,控制节点neutron.conf需要配nova的api、认证,因为你删除网络端口时需要通过nova-api去查是否有主机正在使用端口。计算几点nova.conf需要配neutron,因为nova-compute发送请求给neutron-server来创建端口。这里的端口值得是'交换机上的端口'
#不明白为啥?或者不懂我在说什么,请好好研究openstack各组件通信机制和主机创建流程,或者来听我的课哦,一般博文都不教真的。
#
#网路故障排查:
#网络节点:
[iyunv@network ~]# ip netns show
qrouter-7096bd89-908a-4e9d-90dc-a539b024f1d5
qdhcp-8f4fb890-4328-4e87-a3c4-b4906e7e34fb
qdhcp-0b550e2d-7c6f-42fa-84f0-13a4f9a58c50
[iyunv@network ~]# ip netns exec qrouter-7096bd89-908a-4e9d-90dc-a539b024f1d5 bash
[iyunv@network ~]# ping www.baidu.com
PING www.a.shifen.com (61.135.169.125) 56(84) bytes of data.
64 bytes from 61.135.169.125: icmp_seq=1 ttl=56 time=7.01 ms
64 bytes from 61.135.169.125: icmp_seq=2 ttl=56 time=11.6 ms
#如果无法ping通,那么退出namespace
ovs-vsctl del-br br-ex
ovs-vsctl del-br br-int
ovs-vsctl del-br br-tun
ovs-vsctl add-br br-int
ovs-vsctl add-br br-ex
ovs-vsctl add-port br-ex eth0
systemctl restart neutron-openvswitch-agent.service neutron-l3-agent.service neutron-dhcp-agent.service neutron-metadata-agent.service
运维网声明
1、欢迎大家加入本站运维交流群:群②:261659950 群⑤:202807635 群⑦870801961 群⑧679858003
2、本站所有主题由该帖子作者发表,该帖子作者与运维网 享有帖子相关版权
3、所有作品的著作权均归原作者享有,请您和我们一样尊重他人的著作权等合法权益。如果您对作品感到满意,请购买正版
4、禁止制作、复制、发布和传播具有反动、淫秽、色情、暴力、凶杀等内容的信息,一经发现立即删除。若您因此触犯法律,一切后果自负,我们对此不承担任何责任
5、所有资源均系网友上传或者通过网络收集,我们仅提供一个展示、介绍、观摩学习的平台,我们不对其内容的准确性、可靠性、正当性、安全性、合法性等负责,亦不承担任何法律责任
6、所有作品仅供您个人学习、研究或欣赏,不得用于商业或者其他用途,否则,一切后果均由您自己承担,我们对此不承担任何法律责任
7、如涉及侵犯版权等问题,请您及时通知我们,我们将立即采取措施予以解决
8、联系人Email:admin@iyunv.com 网址:www.yunweiku.com