控制节点配置
1. 建库建用户
CREATE DATABASE neutron; GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'localhost' IDENTIFIED BY '123456'; GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'%' IDENTIFIED BY '123456'; flush privileges;
2. keystone相关
. admin-openrc openstack user create --domain default --password-prompt neutron +---------------------+----------------------------------+ | Field | Value | +---------------------+----------------------------------+ | domain_id | 135e691ebbb74fefb5086970eac74706 | | enabled | True | | id | 44c83659c24a4442bdd5a633ce0c20a6 | | name | neutron | | options | {} | | password_expires_at | None | +---------------------+----------------------------------+ openstack role add --project service --user neutron admin openstack service create --name neutron --description "OpenStack Networking" network +-------------+----------------------------------+ | Field | Value | +-------------+----------------------------------+ | description | OpenStack Networking | | enabled | True | | id | d6bba9ae89234d44a6d9ecae8663a1d5 | | name | neutron | | type | network | +-------------+----------------------------------+ openstack endpoint create --region RegionOne network public http://controller01:9696 +--------------+----------------------------------+ | Field | Value | +--------------+----------------------------------+ | enabled | True | | id | 0bcd1c09ec7f43b3a5e795a667dd0718 | | interface | public | | region | RegionOne | | region_id | RegionOne | | service_id | d6bba9ae89234d44a6d9ecae8663a1d5 | | service_name | neutron | | service_type | network | | url | http://controller01:9696 | +--------------+----------------------------------+ openstack endpoint create --region RegionOne network internal http://controller01:9696 +--------------+----------------------------------+ | Field | Value | +--------------+----------------------------------+ | enabled | True | | id | d2e28de6e48749b2afef7a2ae40a2bb1 | | interface | internal | | region | RegionOne | | region_id | RegionOne | | service_id | d6bba9ae89234d44a6d9ecae8663a1d5 | | service_name | neutron | | service_type | network | | url | http://controller01:9696 | +--------------+----------------------------------+ openstack endpoint create --region RegionOne network admin http://controller01:9696 +--------------+----------------------------------+ | Field | Value | +--------------+----------------------------------+ | enabled | True | | id | 09d901455aee4e018523b212369690ed | | interface | admin | | region | RegionOne | | region_id | RegionOne | | service_id | d6bba9ae89234d44a6d9ecae8663a1d5 | | service_name | neutron | | service_type | network | | url | http://controller01:9696 | +--------------+----------------------------------+
3. 安装软件包
1 yum install openstack-neutron openstack-neutron-ml2 python-neutronclient which -y
4. 配置服务器组件
vim /etc/neutron/neutron.conf
# 在[数据库]节中,配置数据库访问: [DEFAULT] core_plugin = ml2 service_plugins = router #下面配置:启用重叠IP地址功能 allow_overlapping_ips = True rpc_backend = rabbit auth_strategy = keystone notify_nova_on_port_status_changes = True notify_nova_on_port_data_changes = True [oslo_messaging_rabbit] rabbit_host = controller rabbit_userid = openstack rabbit_password = 123456 [database] connection = mysql+pymysql://neutron:123456@controller/neutron [keystone_authtoken] auth_url = http://controller:5000 memcached_servers = controller:11211 auth_type = password project_domain_name = default user_domain_name = default project_name = service username = neutron password = 123456 [nova] auth_url = http://controller:5000 auth_type = password project_domain_name = default user_domain_name = default region_name = RegionOne project_name = service username = nova password = 123456 [oslo_concurrency] lock_path = /var/lib/neutron/tmp [neutron] url = http://controller:9696 auth_url = http://controller:5000 auth_type = password project_domain_name = default user_domain_name = default region_name = RegionOne project_name = service username = neutron password = 123456 service_metadata_proxy = True metadata_proxy_shared_secret = 123456
vim /etc/neutron/plugins/ml2/ml2_conf.ini
[ml2] type_drivers = flat,vlan,vxlan,gre tenant_network_types = vxlan mechanism_drivers = openvswitch,l2population extension_drivers = port_security [ml2_type_flat] flat_networks = provider [ml2_type_vxlan] vni_ranges = 1:1000 [securitygroup] enable_ipset = True
vim /etc/nova/nova.conf
[neutron] url = http://controller01:9696 auth_url = http://controller01:5000 auth_type = password project_domain_name = default user_domain_name = default region_name = RegionOne project_name = service username = neutron password = 123456 service_metadata_proxy = True
vim /etc/neutron/metadata_agent.ini
nova_metadata_ip = controller
metadata_proxy_shared_secret = 123456
5. 创建连接
1 ln -s /etc/neutron/plugins/ml2/ml2_conf.ini /etc/neutron/plugin.ini
6. 同步数据库
此处会报一些关于future的问题,自行忽略
1 su -s /bin/sh -c "neutron-db-manage --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/ml2/ml2_conf.ini upgrade head" neutron
7. 重启nova服务
1 systemctl restart openstack-nova-api.service
8. 启动neutron服务
1 systemctl enable neutron-server.service 2 systemctl start neutron-server.service
网络节点配置
1. 编辑配置文件
vim /etc/sysctl.conf
net.ipv4.ip_forward=1 net.ipv4.conf.all.rp_filter=0 net.ipv4.conf.default.rp_filter=0
2. 执行下列命令,立即生效
sysctl -p
3. 安装软件包
1 yum install openstack-neutron openstack-neutron-ml2 openstack-neutron-openvswitch -y
4.配置组件
vim /etc/neutron/neutron.conf
[DEFAULT] core_plugin = ml2 service_plugins = router allow_overlapping_ips = True rpc_backend = rabbit auth_strategy = keystone [database] connection = mysql+pymysql://neutron:123456@controller01/neutron [oslo_messaging_rabbit] rabbit_host = controller01 rabbit_userid = openstack rabbit_password = 123456 [oslo_concurrency] lock_path = /var/lib/neutron/tmp
vim /etc/neutron/plugins/ml2/openvswitch_agent.ini
[ovs] #下面ip为网络节点数据网络ip local_ip=10.0.0.1 bridge_mappings=external:br-ex [agent] tunnel_types=gre,vxlan #l2_population=True prevent_arp_spoofing=True
7. 配置L3代理
vim /etc/neutron/l3_agent.ini
[DEFAULT] interface_driver=neutron.agent.linux.interface.OVSInterfaceDriver external_network_bridge=br-ex
8. 配置DHCP代理
vim /etc/neutron/dhcp_agent.ini
[DEFAULT] interface_driver=neutron.agent.linux.interface.OVSInterfaceDriver dhcp_driver=neutron.agent.linux.dhcp.Dnsmasq enable_isolated_metadata=True
9. 配置元数据代理
vim /etc/neutron/metadata_agent.ini
[DEFAULT] nova_metadata_ip=controller01 metadata_proxy_shared_secret=123456
10. 启动服务(先启动服务再建网桥br-ex)
1 systemctl start neutron-openvswitch-agent.service neutron-l3-agent.service 2 systemctl start neutron-dhcp-agent.service neutron-metadata-agent.service 3 systemctl enable neutron-openvswitch-agent.service neutron-l3-agent.service 4 systemctl enable neutron-dhcp-agent.service neutron-metadata-agent.service
11.建网桥
方法一:
注意,如果网卡数量有限,想用网路节点的管理网络网卡作为br-ex绑定的物理网卡
那么需要将网络节点管理网络网卡ip去掉,建立br-ex的配置文件,ip使用原管理网ip
cat /etc/sysconfig/network-scripts/ifcfg-eth0
DEVICE=eth0 TYPE=Ethernet ONBOOT="yes" BOOTPROTO="none" NM_CONTROLLED=no
cat /etc/sysconfig/network-scripts/ifcfg-br-ex
DEVICE=br-ex TYPE=Ethernet ONBOOT="yes" BOOTPROTO="none" #HWADDR=bc:ee:7b:78:7b:a7 IPADDR=192.168.198.10 GATEWAY=192.168.198.1 NETMASK=255.255.255.0 DNS1=202.106.0.20 DNS1=8.8.8.8 NM_CONTROLLED=no #注意加上这一句否则网卡可能启动不成功
添加网桥设备
ovs-vsctl add-br br-ex ovs-vsctl add-port br-ex eth2 #要在network服务重启前将物理端口eth0加入网桥br-ex systemctl restart network # 重启网络时,务必保证eth2网卡没有ip或者干脆是down掉的状态,并且一定要NM_CONTROLLED=no,否则会无法启动服务
方法二:http://www.cnblogs.com/cq146637/p/8322064.html
计算节点配置
1. 优化系统内核参数
vim /etc/sysctl.conf
net.ipv4.conf.all.rp_filter=0
net.ipv4.conf.default.rp_filter=0
2. 修改参数立即生效
sysctl -p
3. 安装软件
1 yum install openstack-neutron openstack-neutron-ml2 openstack-neutron-openvswitch -y
4. 编辑配置文件
vim /etc/neutron/neutron.conf
[DEFAULT] rpc_backend = rabbit auth_strategy = keystone [oslo_messaging_rabbit] rabbit_host = controller01 rabbit_userid = openstack rabbit_password = che001 [oslo_concurrency] lock_path = /var/lib/neutron/tmp
vim /etc/neutron/plugins/ml2/openvswitch_agent.ini
[ovs] #下面ip为计算节点数据网络ip local_ip = 10.0.0.2 #bridge_mappings = vlan:br-vlan [agent] tunnel_types = gre,vxlan l2_population = True #开启l2_population功能用于接收sdn控制器(一般放在控制节点)发来的(新建的vm)arp信息,这样就把arp信息推送到了每个中断设备(计算节点),减少了一大波初识arp广播流量(说初始是因为如果没有l2pop机制,一个vm对另外一个vm的arp广播一次后就缓存到本地了),好强大,详见https://assafmuller.com/2014/05/21/ovs-arp-responder-theory-and-practice/ arp_responder = True #开启br-tun的arp响应功能,这样br-tun就成了一个arp proxy,来自本节点对其他虚拟机而非物理主机的arp请求可以基于本地的br-tun轻松搞定,不能再牛逼了 prevent_arp_spoofing = True [securitygroup] firewall_driver = neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver enable_security_group = True
vim /etc/nova/nova.conf
[neutron] url = http://controller01:9696 auth_url = http://controller01:5000 auth_type = password project_domain_name = default user_domain_name = default region_name = RegionOne project_name = service username = neutron password = 123456
7.启动服务
1 systemctl enable neutron-openvswitch-agent.service 2 systemctl start neutron-openvswitch-agent.service 3 systemctl restart openstack-nova-compute.service
参考博客 http://blog.51cto.com/egon09/1839667