454luikty 发表于 2017-8-21 09:54:06

openstack安装配置—— compute node配置

计算节点需要配置的主要是nova和neutron的客户端,控制节点在进行资源调度及配置时需要计算节点配合方能实现的,计算节点配置内容相对较少,实际生产环境中,需要配置的计算节点数量相当庞大,那么我们就需要借助ansible或者puppet这样的自动化工具进行了,   废话不多讲,直接进入配置状态。
compute节点基础配置
# lscpu
Architecture:          x86_64
CPU op-mode(s):      32-bit, 64-bit
Byte Order:            Little Endian
CPU(s):                8
On-line CPU(s) list:   0-7
Thread(s) per core:    1
Core(s) per socket:    1
Socket(s):             8
NUMA node(s):          1
Vendor ID:             GenuineIntel
CPU family:            6
Model:               44
Model name:            Westmere E56xx/L56xx/X56xx (Nehalem-C)
Stepping:            1
CPU MHz:               2400.084
BogoMIPS:            4800.16
Virtualization:      VT-x
Hypervisor vendor:   KVM
Virtualization type:   full
L1d cache:             32K
L1i cache:             32K
L2 cache:            4096K
NUMA node0 CPU(s):   0-7

# free -h
            total      used      free      sharedbuff/cache   available
Mem:            15G      142M         15G      8.3M      172M         15G
Swap:            0B          0B          0B
# lsblk
NAME            MAJ:MIN RM   SIZE RO TYPE MOUNTPOINT
sr0            11:0    11024M0 rom
vda             252:0    0   400G0 disk
├─vda1          252:1    0   500M0 part /boot
└─vda2          252:2    0 399.5G0 part
├─centos-root 253:0    0    50G0 lvm/
├─centos-swap 253:1    0   3.9G0 lvm
└─centos-data 253:2    0 345.6G0 lvm/data

# ifconfig
eth0: flags=4163<UP,BROADCAST,RUNNING,MULTICAST>mtu 1500
      inet 192.168.10.31netmask 255.255.255.0broadcast 192.168.10.255
      inet6 fe80::5054:ff:fe18:bb1bprefixlen 64scopeid 0x20<link>
      ether 52:54:00:18:bb:1btxqueuelen 1000(Ethernet)
      RX packets 16842bytes 1460696 (1.3 MiB)
      RX errors 0dropped 1416overruns 0frame 0
      TX packets 747bytes 199340 (194.6 KiB)
      TX errors 0dropped 0 overruns 0carrier 0collisions 0

eth1: flags=4163<UP,BROADCAST,RUNNING,MULTICAST>mtu 1500
      inet 10.0.0.31netmask 255.255.0.0broadcast 10.0.255.255
      inet6 fe80::5054:ff:fe28:e0a7prefixlen 64scopeid 0x20<link>
      ether 52:54:00:28:e0:a7txqueuelen 1000(Ethernet)
      RX packets 16213bytes 1360633 (1.2 MiB)
      RX errors 0dropped 1402overruns 0frame 0
      TX packets 23bytes 1562 (1.5 KiB)
      TX errors 0dropped 0 overruns 0carrier 0collisions 0

eth2: flags=4163<UP,BROADCAST,RUNNING,MULTICAST>mtu 1500
      inet 111.40.215.9netmask 255.255.255.240broadcast 111.40.215.15
      inet6 fe80::5054:ff:fe28:e07aprefixlen 64scopeid 0x20<link>
      ether 52:54:00:28:e0:7atxqueuelen 1000(Ethernet)
      RX packets 40bytes 2895 (2.8 KiB)
      RX errors 0dropped 0overruns 0frame 0
      TX packets 24bytes 1900 (1.8 KiB)
      TX errors 0dropped 0 overruns 0carrier 0collisions 0

lo: flags=73<UP,LOOPBACK,RUNNING>mtu 65536
      inet 127.0.0.1netmask 255.0.0.0
      inet6 ::1prefixlen 128scopeid 0x10<host>
      looptxqueuelen 0(Local Loopback)
      RX packets 841bytes 44167 (43.1 KiB)
      RX errors 0dropped 0overruns 0frame 0
      TX packets 841bytes 44167 (43.1 KiB)
      TX errors 0dropped 0 overruns 0carrier 0collisions 0

# getenforce
Disabled
# iptables -vnL
Chain INPUT (policy ACCEPT 0 packets, 0 bytes)
pkts bytes target   prot opt in   out   source               destination         

Chain FORWARD (policy ACCEPT 0 packets, 0 bytes)
pkts bytes target   prot opt in   out   source               destination         

Chain OUTPUT (policy ACCEPT 0 packets, 0 bytes)
pkts bytes target   prot opt in   out   source               destination         
# cat /etc/hosts
127.0.0.1   localhost localhost.localdomain localhost4 localhost4.localdomain4
::1         localhost localhost.localdomain localhost6 localhost6.localdomain6
192.168.10.10        controller
192.168.10.20        block
192.168.10.31        compute1
192.168.10.32        compute2
#

配置时间同步服务
# yum install -y chrony
# vim /etc/chrony.conf
# grep -v ^# /etc/chrony.conf | tr -s [[:space:]]
server controller iburst
stratumweight 0
driftfile /var/lib/chrony/drift
rtcsync
makestep 10 3
bindcmdaddress 127.0.0.1
bindcmdaddress ::1
keyfile /etc/chrony.keys
commandkey 1
generatecommandkey
noclientlog
logchange 0.5
logdir /var/log/chrony
# systemctl enable chronyd.service
# systemctl start chronyd.service
# chronyc sources
210 Number of sources = 1
MS Name/IP address         Stratum Poll Reach LastRx Last sample
===============================================================================
^* controller                  3   6    17    52    -15us[ -126us] +/-138ms
#

安装 OpenStack 客户端
# yum install -y python-openstackclient

安装配置nova客户端
# yum install -y openstack-nova-compute
# cp /etc/nova/nova.conf{,.bak}
# vim /etc/nova/nova.conf
# grep -v ^# /etc/nova/nova.conf | tr -s [[:space:]]

rpc_backend = rabbit
auth_strategy = keystone
my_ip = 192.168.10.31
use_neutron = True
firewall_driver = nova.virt.firewall.NoopFirewallDriver











api_servers = http://controller:9292






auth_uri = http://controller:5000
auth_url = http://controller:35357
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = nova
password = NOVA_PASS






lock_path = /var/lib/nova/tmp



rabbit_host = controller
rabbit_userid = openstack
rabbit_password = RABBIT_PASS










enabled = True
vncserver_listen = 0.0.0.0
vncserver_proxyclient_address = $my_ip
novncproxy_base_url = http://controller:6080/vnc_auto.html


# egrep -c '(vmx|svm)' /proc/cpuinfo//检验是否支持虚拟机的硬件加速
8
#
如果此处检验结果为0就请参考openstack环境准备一文中kvm虚拟机如何开启嵌套虚拟化栏目内容

# systemctl enable libvirtd.service openstack-nova-compute.service
Created symlink from /etc/systemd/system/multi-user.target.wants/openstack-nova-compute.service to /usr/lib/systemd/system/openstack-nova-compute.service.
# systemctl start libvirtd.service openstack-nova-compute.service//计算节点上不会启动相应端口,只能通过服务状态进行查看
# systemctl status libvirtd.service openstack-nova-compute.service
● libvirtd.service - Virtualization daemon
   Loaded: loaded (/usr/lib/systemd/system/libvirtd.service; enabled; vendor preset: enabled)
   Active: active (running) since Sun 2017-07-16 19:10:26 CST; 12min ago
   Docs: man:libvirtd(8)
         http://libvirt.org
Main PID: 1002 (libvirtd)
   CGroup: /system.slice/libvirtd.service
         └─1002 /usr/sbin/libvirtd

Jul 16 19:10:26 compute1 systemd: Starting Virtualization daemon...
Jul 16 19:10:26 compute1 systemd: Started Virtualization daemon.
Jul 16 19:21:06 compute1 systemd: Started Virtualization daemon.

● openstack-nova-compute.service - OpenStack Nova Compute Server
   Loaded: loaded (/usr/lib/systemd/system/openstack-nova-compute.service; enabled; vendor preset: disabled)
   Active: active (running) since Sun 2017-07-16 19:21:11 CST; 1min 21s ago
Main PID: 1269 (nova-compute)
   CGroup: /system.slice/openstack-nova-compute.service
         └─1269 /usr/bin/python2 /usr/bin/nova-compute

Jul 16 19:21:06 compute1 systemd: Starting OpenStack Nova Compute Server...
Jul 16 19:21:11 compute1 nova-compute: /usr/lib/python2.7/site-packages/pkg_resources/__init__.py:187: RuntimeWarning: You have...
Jul 16 19:21:11 compute1 nova-compute: stacklevel=1,
Jul 16 19:21:11 compute1 systemd: Started OpenStack Nova Compute Server.
Hint: Some lines were ellipsized, use -l to show in full.
#

前往controller节点验证计算服务配置

安装配置neutron客户端
控制节点网络配置完成后开始继续以下步骤
# yum install -y openstack-neutron-linuxbridge ebtables ipset
# cp /etc/neutron/neutron.conf{,.bak}
# vim /etc/neutron/neutron.conf
# grep -v ^# /etc/neutron/neutron.conf | tr -s [[:space:]]

rpc_backend = rabbit
auth_strategy = keystone





auth_uri = http://controller:5000
auth_url = http://controller:35357
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = neutron
password = NEUTRON_PASS



lock_path = /var/lib/neutron/tmp



rabbit_host = controller
rabbit_userid = openstack
rabbit_password = RABBIT_PASS




#

linuxbridge代理配置
# cp /etc/neutron/plugins/ml2/linuxbridge_agent.ini{,.bak}
# vim /etc/neutron/plugins/ml2/linuxbridge_agent.ini
# grep -v ^# /etc/neutron/plugins/ml2/linuxbridge_agent.ini | tr -s [[:space:]]



physical_interface_mappings = provider:eth1

enable_security_group = True
firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver

enable_vxlan = True
local_ip = 192.168.10.31
l2_population = True
#

再次编辑nova配置文件,追加网络配置
# vim /etc/nova/nova.conf
url = http://controller:9696
auth_url = http://controller:35357
auth_type = password
project_domain_name = default
user_domain_name = default
region_name = RegionOne
project_name = service
username = neutron
password = NEUTRON_PASS

重启计算节点服务,启用并启动linuxbridge代理服务
# systemctl restart openstack-nova-compute.service
# systemctl enable neutron-linuxbridge-agent.service
Created symlink from /etc/systemd/system/multi-user.target.wants/neutron-linuxbridge-agent.service to /usr/lib/systemd/system/neutron-linuxbridge-agent.service.
# systemctl start neutron-linuxbridge-agent.service

前往controller节点验证网络服务配置

页: [1]
查看完整版本: openstack安装配置—— compute node配置