在计算节点(compute01、compute02)安装配置Nova
1、安装组件
# yum install openstack-nova-compute -y 

2、修改配置文件/etc/nova/nova.conf
# cd /etc/nova/ && mv nova.conf nova.conf.source && cat nova.conf.source |grep -Ev "^#|^$" > nova.conf && chown nova:root nova.conf
# vim /etc/nova/nova.conf

在【DEFAULT】部分中,仅启用计算和元数据API
[DEFAULT]
# ...
enabled_apis = osapi_compute,metadata

在【DEFAULT】部分中,配置RabbitMQ消息队列访问
[DEFAULT]
# ...
transport_url = rabbit://openstack:openstack@controller:5672/

配置身份服务访问权限
[api]
# ...
auth_strategy = keystone

[keystone_authtoken]
# ...
www_authenticate_uri = http://controller:5000/
auth_url = http://controller:5000/
memcached_servers = controller:11211
auth_type = password
project_domain_name = Default
user_domain_name = Default
project_name = service
username = nova
password = nova

在【DEFAULT】部分,配置my_ip选项以使用计算节点的管理接口IP地址
[DEFAULT]
# ...
my_ip = 192.168.58.101
# my_ip = 192.168.58.102

在【DEFAULT】部分中,启用对网络服务的支持
[DEFAULT]
# ...
use_neutron = true
firewall_driver = nova.virt.firewall.NoopFirewallDriver

在【vnc】部分中,配置VNC代理以使用控制器节点的管理接口IP地址
[vnc]
# ...
enabled = true
server_listen = 0.0.0.0
server_proxyclient_address = $my_ip
novncproxy_base_url = http://192.168.58.100:6080/vnc_auto.html

在【glance】部分中,配置图像服务API的位置
[glance]
# ...
api_servers = http://controller:9292
 
在【oslo_concurrency】部分中,配置锁路径
[oslo_concurrency]
# ...
lock_path = /var/lib/nova/tmp

在【placement】部分中,配置Placement服务的访问权限
[placement]
# ...
region_name = RegionOne
project_domain_name = Default
project_name = service
auth_type = password
user_domain_name = Default
auth_url = http://controller:5000/v3
username = placement
password = placement

3、判断计算机是否支持虚拟机硬件加速
# egrep -c '(vmx|svm)' /proc/cpuinfo

上述命令返回值不是 0,则计算节点支持硬件加速,不需要进行下面的配置。
上述命令返回值是 0,则计算节点不支持硬件加速,并且必须配置 libvirt 为使用 QEMU 而不是 KVM,需要编辑/etc/nova/nova.conf 文件中的[libvirt]部分

编辑/etc/nova/nova.conf文件中的【libvirt】部分如下
[libvirt]
# ...
virt_type = qemu

4、安装并配置kvm组件
# yum -y install qemu-kvm-common-ev qemu-kvm-tools qemu-kvm-ev libvirt-daemon-kvm qemu-guest-agent qemu-img-ev libibverbs -y

修改计算节点配置文件/etc/libvirt/libvirtd.conf

# vim /etc/libvirt/libvirtd.conf
listen_tls = 0
listen_tcp = 1
tcp_port = "16509"
listen_addr = "192.168.58.101"
auth_tcp = "none"

修改计算节点配置文件/etc/sysconfig/libvirtd

# vim /etc/sysconfig/libvirtd
LIBVIRTD_CONFIG=/etc/libvirt/libvirtd.conf
LIBVIRTD_ARGS="--listen"

5、启动libvirtd、nova-compute服务,并设置成开机自启动
# systemctl enable libvirtd.service openstack-nova-compute.service
# systemctl start libvirtd.service openstack-nova-compute.service

6、查看virsh版本
# virsh version
根据库编译:libvirt 4.5.0
使用库:libvirt 4.5.0
使用的 API: QEMU 4.5.0
运行管理程序: QEMU 2.12.0

计算节点nova.conf

[DEFAULT]

my_ip = 192.168.58.101

# my_ip = 计算节点IP

use_neutron = true

firewall_driver = nova.virt.firewall.NoopFirewallDriver

enabled_apis = osapi_compute,metadata

transport_url = rabbit://openstack:openstack@controller:5672/

[api]

auth_strategy = keystone

[api_database]

[barbican]

[cache]

[cinder]

[compute]

[conductor]

[console]

[consoleauth]

[cors]

[database]

[devices]

[ephemeral_storage_encryption]

[filter_scheduler]

[glance]

api_servers = http://controller:9292

[guestfs]

[healthcheck]

[hyperv]

[ironic]

[key_manager]

[keystone]

[keystone_authtoken]

www_authenticate_uri = http://controller:5000/

auth_url = http://controller:5000/

memcached_servers = controller:11211

auth_type = password

project_domain_name = Default

user_domain_name = Default

project_name = service

username = nova

password = nova

[libvirt]

virt_type = qemu

[metrics]

[mks]

[neutron]

[notifications]

[osapi_v21]

[oslo_concurrency]

lock_path = /var/lib/nova/tmp

[oslo_messaging_amqp]

[oslo_messaging_kafka]

[oslo_messaging_notifications]

[oslo_messaging_rabbit]

[oslo_middleware]

[oslo_policy]

[pci]

[placement]

region_name = RegionOne

project_domain_name = Default

project_name = service

auth_type = password

user_domain_name = Default

auth_url = http://controller:5000/v3

username = placement

password = placement

[powervm]

[privsep]

[profiler]

[quota]

[rdp]

[remote_debug]

[scheduler]

[serial_console]

[service_user]

[spice]

[upgrade_levels]

[vault]

[vendordata_dynamic_auth]

[vmware]

[vnc]

enabled = true

server_listen = 0.0.0.0

server_proxyclient_address = $my_ip

novncproxy_base_url = http://192.168.58.100:6080/vnc_auto.html

[workarounds]

[wsgi]

[xenserver]

[xvp]

[zvm]