564543 发表于 2016-7-22 09:46:58

openstack安装配置(三)

增加Networking - 前期准备(controller)

Networking又叫做Neutron,是Openstack必不可少的组件,它其实是网络虚拟化的实现工具,可以让我们模拟出路由器、交换机、网卡等网络设备。
Neutron支持两种网络模式,第一种是非常简单的网络架构,它仅支持是让实例连接外网,不支持自定义网络、路由器以及浮动ip。只有管理员或者授权的用户有权限去管理网络。第二种网络功能比较强大,支持自定义网络管理,支持自建路由器并且也支持浮动ip。即使没有授权的用户也可以管理网络,支持用户自己配置和管理。
创建库、授权账号 mysql -uroot -ptn1Pi6Ytm
> CREATE DATABASE neutron;
> GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'localhost'    IDENTIFIED BY 'RYgv0rg7p';
> GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'%'    IDENTIFIED BY 'RYgv0rg7p';
执行脚本 source admin-openrc.sh
创建neutron用户(密码为mdcGVl29i)
openstack user create --domain default --password-prompt neutron
把admin角色添加到neutron用户里
openstack role add --project service --user neutron admin
创建neutron实例
openstack service create --name neutron   --description "OpenStack Networking" network
创建networking服务api终端
openstack endpoint create --region RegionOne   network public http://controller:9696
openstack endpoint create --region RegionOne   network internal http://controller:9696
openstack endpoint create --region RegionOne   network admin http://controller:9696

增加Networking - 配置(controller)

安装组件
yum install openstack-neutron openstack-neutron-ml2 \
openstack-neutron-linuxbridge python-neutronclient ebtables ipset -y
配置服务端组件
vim   /etc/neutron/neutron.conf//更改或增加

core_plugin = ml2
service_plugins =
rpc_backend = rabbit
auth_strategy = keystone
notify_nova_on_port_status_changes = True
notify_nova_on_port_data_changes = True
nova_url = http://controller:8774/v2
verbose = True


connection = mysql://neutron:quidyOC50@controller/neutron


rabbit_host = controller
rabbit_userid = openstack
rabbit_password = o3NXovnz5


auth_uri = http://controller:5000
auth_url = http://controller:35357
auth_plugin = password
project_domain_id = default
user_domain_id = default
project_name = service
username = neutron
password = mdcGVl29i


auth_url = http://controller:35357
auth_plugin = password
project_domain_id = default
user_domain_id = default
region_name = RegionOne
project_name = service
username = nova
password = hsSNsqc43

lock_path = /var/lib/neutron/tmp
配置ml2 插件
vim/etc/neutron/plugins/ml2/ml2_conf.ini//更改或增加

type_drivers = flat,vlan
tenant_network_types =
mechanism_drivers = linuxbridge
extension_drivers = port_security


flat_networks = public

enable_ipset = True

编辑linux桥接agent
vim/etc/neutron/plugins/ml2/linuxbridge_agent.ini//增加或更改

physical_interface_mappings = public:eno16777736

enable_vxlan = False

prevent_arp_spoofing = True

enable_security_group = True
firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver

配置dhcp agent
vim/etc/neutron/dhcp_agent.ini//增加或更改

interface_driver = neutron.agent.linux.interface.BridgeInterfaceDriver
dhcp_driver = neutron.agent.linux.dhcp.Dnsmasq
enable_isolated_metadata = True
verbose = True

增加Networking - 配置元数据agent(controller)

编辑配置文件
vim/etc/neutron/metadata_agent.ini//更改或增加

auth_uri = http://controller:5000
auth_url = http://controller:35357
auth_region = RegionOne
auth_plugin = password
project_domain_id = default
user_domain_id = default
project_name = service
username = neutron
password = mdcGVl29i
nova_metadata_ip = controller
metadata_proxy_shared_secret = m8uhmQTu2
verbose = True
说明:需要删除掉配置文件里原有的 auth_url   auth_region admin_tenant_nameadmin_useradmin_password

增加Networking - compute使用网络(controller)

vim /etc/nova/nova.conf//更改或添加

url = http://controller:9696
auth_url = http://controller:35357
auth_plugin = password
project_domain_id = default
user_domain_id = default
region_name = RegionOne
project_name = service
username = neutron
password = mdcGVl29i

service_metadata_proxy = True
metadata_proxy_shared_secret = m8uhmQTu2

增加Networking - 启动服务(controller)

创建ml2插件配置文件创建软连接
ln -s /etc/neutron/plugins/ml2/ml2_conf.ini /etc/neutron/plugin.ini
生成数据
su -s /bin/sh -c "neutron-db-manage --config-file /etc/neutron/neutron.conf \
--config-file /etc/neutron/plugins/ml2/ml2_conf.ini upgrade head" neutron
重启compute api服务
systemctl restart openstack-nova-api.service
启动服务
systemctl enable neutron-server.service \
neutron-linuxbridge-agent.service neutron-dhcp-agent.service \
neutron-metadata-agent.service
systemctl start neutron-server.service \
neutron-linuxbridge-agent.service neutron-dhcp-agent.service \
neutron-metadata-agent.service
systemctl enable neutron-l3-agent.service
systemctl start neutron-l3-agent.service

增加Networking - 配置compute节点(compute)

安装组件 yum install -y openstack-neutron openstack-neutron-linuxbridge ebtables ipset
配置普通组件vim /etc/neutron/neutron.conf //更改或增加

rpc_backend = rabbit
auth_strategy = keystone
verbose = True

rabbit_host = controller
rabbit_userid = openstack
rabbit_password = o3NXovnz5

auth_uri = http://controller:5000
auth_url = http://controller:35357
auth_plugin = password
project_domain_id = default
user_domain_id = default
project_name = service
username = neutron
password = mdcGVl29i

lock_path = /var/lib/neutron/tmp

配置linux桥接agent
vim /etc/neutron/plugins/ml2/linuxbridge_agent.ini

physical_interface_mappings = public:eno16777736


enable_vxlan = False


prevent_arp_spoofing = True


enable_security_group = True
firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver

配置compute使用网络
vim /etc/nova/nova.conf//更改或增加

url = http://controller:9696
auth_url = http://controller:35357
auth_plugin = password
project_domain_id = default
user_domain_id = default
region_name = RegionOne
project_name = service
username = neutron
password = mdcGVl29i
启动服务
systemctl restart openstack-nova-compute.service   
systemctl enable neutron-linuxbridge-agent.service
systemctl start neutron-linuxbridge-agent.service

增加Networking - 验证配置(controller)

执行环境变量脚本
source admin-openrc.sh

列出所有的扩展
neutron ext-list

列出所有agent
neutron agent-list

agent type如下:
Linux bridge agent
Linux bridge agent
DHCP agent
Metadata agent
必须要有4个,否则说明上面的某个步骤配置有问题。

增加dashboard- horizon (controller)--图形化控制台

安装包 yum install -y openstack-dashboard
编辑配置文件
vim /etc/openstack-dashboard/local_settings //更改或增加
OPENSTACK_HOST = "controller"
ALLOWED_HOSTS = ['*', ]
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': '127.0.0.1:11211',
} }
OPENSTACK_KEYSTONE_DEFAULT_ROLE = "user"
OPENSTACK_KEYSTONE_MULTIDOMAIN_SUPPORT = True
OPENSTACK_API_VERSIONS = {
"identity": 3,
"volume": 2,
}
TIME_ZONE = "Asia/Chongqing"
重启服务systemctl restart httpd.service memcached.service
此时可以去访问了 http://controller/dashboard   使用账号admin或者demon用户登陆即可,密码为前期准备的时候用mkpasswd生成的密码3qiVpzU2x域为default

增加block storage - 前期准备 (controller)
block storage又叫做cinder,用来给openstack提供存储服务,比如我们在阿里云购买一台云主机,同时想购买容量大的磁盘,通常叫做云盘,这个云盘就是block storage。
创建库并授权cinder用户
mysql -uroot -ptn1Pi6Ytm
> CREATE DATABASE cinder;
> GRANT ALL PRIVILEGES ON cinder.* TO 'cinder'@'localhost'    IDENTIFIED BY 'O3bwbpoZ3';
> GRANT ALL PRIVILEGES ON cinder.* TO 'cinder'@'%'    IDENTIFIED BY 'O3bwbpoZ3';
执行初始化脚本
source admin-openrc.sh
创建cinder用户 (密码为hf8LX9bow)
openstack user create --domain default --password-prompt cinder
添加admin角色
openstack role add --project service --user cinder admin

创建cinder和cinderv2 实例
openstack service create --name cinder \
--description "OpenStack Block Storage" volume

openstack service create --name cinderv2 \
--description "OpenStack Block Storage" volumev2

创建块存储服务api终端
openstack endpoint create --region RegionOne volume public http://controller:8776/v1/%\(tenant_id\)s
openstack endpoint create --region RegionOne volume internal http://controller:8776/v1/%\(tenant_id\)s
openstack endpoint create --region RegionOne volume admin http://controller:8776/v1/%\(tenant_id\)s
openstack endpoint create --region RegionOne volumev2 public http://controller:8776/v2/%\(tenant_id\)s
openstack endpoint create --region RegionOne volumev2 internal http://controller:8776/v2/%\(tenant_id\)s
openstack endpoint create --region RegionOne volumev2 admin http://controller:8776/v2/%\(tenant_id\)s

增加block storage - 安装和配置 (controller)

安装包yum install -y openstack-cinder python-cinderclient
编辑配置文件vi /etc/cinder/cinder.conf//更改或增加

connection = mysql://cinder:O3bwbpoZ3@controller/cinder

rpc_backend = rabbit
auth_strategy = keystone
my_ip = 192.168.16.111
verbose = True

auth_uri = http://controller:5000
auth_url = http://controller:35357
auth_plugin = password
project_domain_id = default
user_domain_id = default
project_name = service
username = cinder
password = hf8LX9bow


rabbit_host = controller
rabbit_userid = openstack
rabbit_password = o3NXovnz5

lock_path = /var/lib/cinder/tmp

同步数据su -s /bin/sh -c "cinder-manage db sync" cinder

配置compute使用块存储
vi /etc/nova/nova.conf

os_region_name=RegionOne

启动服务
systemctl restart openstack-nova-api.service
systemctl enable openstack-cinder-api.service openstack-cinder-scheduler.service
systemctl start openstack-cinder-api.service openstack-cinder-scheduler.service

增加block storage - 配置storage节点 (compute)       

我们理应需要再准备一台单独的机器来做storage服务的,但是为了节省资源,我们就那compute节点和storage节点共用。这里需要为compute(storage)节点再增加一块磁盘(/dev/sdb)作为存储磁盘。
安装lvm yum install -y lvm2

启动服务
systemctl enable lvm2-lvmetad.service
systemctl start lvm2-lvmetad.service

创建物理卷 pvcreate /dev/sdb

创建卷组vgcreate cinder-volumes /dev/sdb

编辑配置文件   vi/etc/lvm/lvm.conf
devices {
      filter = [ "a/sdb/", "r/.*/"]
说明: 如果还有第三块磁盘,应该再加上
filter = [ "a/sda/", "a/sdb/", "r/.*/"]

安装包yum install -yopenstack-cinder targetcli python-oslo-policy

编辑配置文件   vi /etc/cinder/cinder.conf

rpc_backend = rabbit
auth_strategy = keystone
my_ip = 192.168.16.112
enabled_backends = lvm
glance_host = controller
verbose = True


connection = mysql://cinder:O3bwbpoZ3@controller/cinder


rabbit_host = controller
rabbit_userid = openstack
rabbit_password = o3NXovnz5


auth_uri = http://controller:5000
auth_url = http://controller:35357
auth_plugin = password
project_domain_id = default
user_domain_id = default
project_name = service
username = cinder
password = hf8LX9bow


volume_driver = cinder.volume.drivers.lvm.LVMVolumeDriver
volume_group = cinder-volumes
iscsi_protocol = iscsi
iscsi_helper = lioadm


lock_path = /var/lib/cinder/tmp

增加block storage - 启动和验证

启动服务 (compute)
systemctl enable openstack-cinder-volume.service target.service
systemctl start openstack-cinder-volume.service target.service

验证操作(controller)
1. 执行初始化脚本
source admin-openrc.sh

2. 列出服务
cinder service-list

页: [1]
查看完整版本: openstack安装配置(三)