zoukankan      html  css  js  c++  java
  • 安装Rocky版OpenStack 1控制节点+1计算节点环境部署脚本

    在上一篇文章中叙述了具体的安装部署过程,在这里把相应的部署脚本写出来,供大家参考:

    一、执行部署的setup.sh脚本:

    #!/bin/bash
    
    ############################################################
    ##                   执行脚本前手动配置项              #####
    ############################################################
    
    # 每台服务器两块网卡,网卡eth0用于管理网络,网卡eth1用于业务网络
    # eth0可选择仅主机模式,静态ip,eth1选择桥接模式,自动获取ip,便于开始安装软件
    # 网卡名修改,进入安装系统按e,输入net.ifnames=0 biosdevname=0,回车进入安装
    # 每台服务器首先修改主机名
    # 控制节点和其他节点必须先做免密登录
    # 将OpenStack包和local_settings文件上传到控制节点/root
    # 将cirros-0.3.5-x86_64-disk.img上传到控制节点/root
    
    
    ################################################################
    ##         设置环境参数,输入服务器角色对应的ip地址        #####
    ################################################################
    
    ## 以下为参考用例,如需修改在如下两部分都需修改,且一致
    MGMTNETWORK=192.168.3.0/24
    CONTROLLER_IP=192.168.3.10
    COMPUTER1_IP=192.168.3.11
    BLOCK1_IP=192.168.3.11
    OBJECT1_IP=192.168.3.12
    
    CONTROLLER_NAME=controller
    COMPUTER1_NAME=compute
    BLOCK1_NAME=compute
    OBJECT1_NAME=object
    MYSQLUSERNAME=root
    MYSQLPASSWORD=root
    
    
    cat <<EOF > /root/install/environment
    #!/bin/bash
    
    MGMTNETWORK=192.168.3.0/24
    CONTROLLER_IP=192.168.3.10
    COMPUTER1_IP=192.168.3.11
    BLOCK1_IP=192.168.3.11
    OBJECT1_IP=192.168.3.12
    
    CONTROLLER_NAME=controller
    COMPUTER1_NAME=compute
    BLOCK1_NAME=compute
    OBJECT1_NAME=object
    MYSQLUSERNAME=root
    MYSQLPASSWORD=root
    EOF
    
    
    cat /root/install/environment > /root/install/controller-install.sh
    cat /root/install/controller.sh >> /root/install/controller-install.sh
    cat /root/install/environment > /root/install/compute-install.sh
    cat /root/install/compute.sh >> /root/install/compute-install.sh
    chmod 777 /root/install/controller-install.sh
    chmod 777 /root/install/compute-install.sh
    
    ##################################################################
    ##########                 控制节点安装                     ######
    ##################################################################
    
    bash /root/install/controller-install.sh
    
    echo -------------------------------------------------------------
    echo                 controller  node install is OK!
    echo -------------------------------------------------------------
    
    
    #################################################################
    ##########                    计算节点安装                 ######
    #################################################################
    
    scp /root/install/compute-install.sh root@$COMPUTER1_IP:/root 
    ssh root@$COMPUTER1_IP bash /root/compute-install.sh
    
    echo ------------------------------------------------------------
    echo                  compute node install is OK! 
    echo ------------------------------------------------------------
    
    
    ################################################################
    ##########                控制节点发现计算节点            ######
    ################################################################
    
    cd /root
    . admin
    openstack compute service list --service nova-compute
    su -s /bin/sh -c "nova-manage cell_v2 discover_hosts --verbose" nova
    systemctl -t servie | grep nova
    nova hypervisor-list
    openstack token issue
    openstack project list
    openstack endpoint list
    glance image-list
    openstack user list
    openstack compute service list
    openstack catalog list

    二、控制节点预安装controller.sh脚本

    ##############################################################
    ##########         控制节点环境准备                     ######
    ##############################################################
    
    #hostnamectl set-hostname $CONTROLLER_NAME
    
    cat <<EOF >> /etc/hosts
    $CONTROLLER_IP    $CONTROLLER_NAME
    $COMPUTER1_IP    $COMPUTER1_NAME
    $BLOCK1_IP    $BLOCK1_NAME
    $OBJECT1_IP    $OBJECT1_NAME
    EOF
    
    ## 创建admin环境变量
    cat <<EOF > /root/admin
    export OS_PROJECT_DOMAIN_NAME=Default
    export OS_USER_DOMAIN_NAME=Default
    export OS_PROJECT_NAME=admin
    export OS_USERNAME=admin
    export OS_PASSWORD=ADMIN_PASS
    export OS_AUTH_URL=http://$CONTROLLER_IP:5000/v3
    export OS_IDENTITY_API_VERSION=3
    export OS_IMAGE_API_VERSION=2
    EOF
    chmod 777 /root/admin
    
    ## 创建demo环境变量
    cat <<EOF > /root/demo
    export OS_PROJECT_DOMAIN_NAME=Default
    export OS_USER_DOMAIN_NAME=Default
    export OS_PROJECT_NAME=myproject
    export OS_USERNAME=myuser
    export OS_PASSWORD=MYUSER_PASS
    export OS_AUTH_URL=http://$CONTROLLER_IP:5000/v3
    export OS_IDENTITY_API_VERSION=3
    export OS_IMAGE_API_VERSION=2
    EOF
    chmod 777 /root/demo
    
    ## 时间同步
    yum install -y http://dl.fedoraproject.org/pub/epel/7Server/x86_64/Packages/e/epel-release-7-11.noarch.rpm
    yum install chrony -y
    cp /etc/chrony.conf /etc/chrony.conf.bak
    sed -i "/^server/d" /etc/chrony.conf
    echo server $CONTROLLER_IP >> /etc/chrony.conf
    echo allow $MGMTNETWORK >> /etc/chrony.conf
    echo "local stratum 10" >> /etc/chrony.conf
    
    systemctl restart chronyd
    systemctl enable chronyd
    systemctl status chronyd
    chronyc sources -v
    sleep 5
    
    ## 安装rabbitmq
    yum install rabbitmq-server -y
    systemctl enable rabbitmq-server.service
    systemctl restart rabbitmq-server.service
    systemctl status rabbitmq-server.service
    
    rabbitmqctl add_user openstack RABBIT_PASS
    rabbitmqctl set_permissions openstack ".*" ".*" ".*"
    rabbitmq-plugins enable rabbitmq_management
    
    ## 安装memcache
    yum install memcached python-memcached -y
    cp /etc/sysconfig/memcached /etc/sysconfig/memcached.bak
    sed -i "s/::1/$CONTROLLER_IP/g" /etc/sysconfig/memcached
    systemctl enable memcached.service
    systemctl restart memcached.service
    systemctl status memcached.service
    
    ## 安装数据库
    yum install mariadb mariadb-server python2-PyMySQL -y
    cat <<EOF > /etc/my.cnf.d/openstack.cnf
    [mysqld]
    bind-address = $CONTROLLER_IP
    default-storage-engine = innodb
    innodb_file_per_table = on
    max_connections = 4096
    collation-server = utf8_general_ci
    character-set-server = utf8
    EOF
    
    systemctl enable mariadb.service
    systemctl restart mariadb.service
    systemctl status mariadb.service
    
    ## 初始化数据库
    yum install expect -y
    cat <<EOF > /root/install/mysqlinstall.sh
    #!/usr/bin/expect
    spawn mysql_secure_installation
    expect "Enter current password for root (enter for none):"
    send "
    "
    expect "Set root password? "
    send "Y
    "
    expect "New password: "
    send "$MYSQLPASSWORD
    "
    expect "Re-enter new password: "
    send "$MYSQLPASSWORD
    "
    expect "Remove anonymous users?"
    send "Y
    "
    expect "Disallow root login remotely?"
    send "n
    "
    expect "Remove test database and access to it?"
    send "Y
    "
    expect "Reload privilege tables now?"
    send "Y
    "
    interact
    EOF
    
    sleep 5
    chmod 777 /root/install/mysqlinstall.sh
    cd /root/install
    ./mysqlinstall.sh
    sleep 5
    
    ## 创建数据库
    mysql -u$MYSQLUSERNAME -p$MYSQLPASSWORD <<EOF
    CREATE DATABASE keystone;
    CREATE DATABASE glance;
    CREATE DATABASE nova_api;
    CREATE DATABASE nova;
    CREATE DATABASE nova_cell0;
    CREATE DATABASE placement;
    CREATE DATABASE neutron;
    CREATE DATABASE cinder;
    GRANT ALL PRIVILEGES ON keystone.* to 'keystone'@'localhost'IDENTIFIED BY 'KEYSTONE_DBPASS';
    GRANT ALL PRIVILEGES ON keystone.* to 'keystone'@'%'IDENTIFIED BY 'KEYSTONE_DBPASS';
    GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'localhost' IDENTIFIED BY 'GLANCE_DBPASS';
    GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'%' IDENTIFIED BY 'GLANCE_DBPASS';
    GRANT ALL PRIVILEGES ON nova_api.* TO 'nova'@'localhost' IDENTIFIED BY 'NOVA_DBPASS';
    GRANT ALL PRIVILEGES ON nova_api.* TO 'nova'@'%' IDENTIFIED BY 'NOVA_DBPASS';
    GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'localhost' IDENTIFIED BY 'NOVA_DBPASS';
    GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'%' IDENTIFIED BY 'NOVA_DBPASS';
    GRANT ALL PRIVILEGES ON nova_cell0.* TO 'nova'@'localhost' IDENTIFIED BY 'NOVA_DBPASS';
    GRANT ALL PRIVILEGES ON nova_cell0.* TO 'nova'@'%' IDENTIFIED BY 'NOVA_DBPASS';
    GRANT ALL PRIVILEGES ON placement.* TO 'placement'@'localhost' IDENTIFIED BY 'PLACEMENT_DBPASS';
    GRANT ALL PRIVILEGES ON placement.* TO 'placement'@'%' IDENTIFIED BY 'PLACEMENT_DBPASS';
    GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'localhost' IDENTIFIED BY 'NEUTRON_DBPASS';
    GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'%' IDENTIFIED BY 'NEUTRON_DBPASS';
    GRANT ALL PRIVILEGES ON cinder.* TO 'cinder'@'localhost' IDENTIFIED BY 'CINDER_DBPASS';
    GRANT ALL PRIVILEGES ON cinder.* TO 'cinder'@'%' IDENTIFIED BY 'CINDER_DBPASS';
    EOF
    
    ## 下载测试镜像cirros
    #wget http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img
    
    ## 配置OpenStack yum源
    yum install vsftpd -y
    systemctl restart vsftpd
    systemctl enable vsftpd
    systemctl status vsftpd
    mkdir /etc/yum.repos.d/save
    mv /etc/yum.repos.d/C* /etc/yum.repos.d/save
    mv /etc/yum.repos.d/epel* /etc/yum.repos.d/save
    mv /root/openstack /var/ftp/pub
    cat <<EOF > /etc/yum.repos.d/yum.repo
    [rocky]
    name=rocky-openstack
    baseurl=ftp://$CONTROLLER_IP/pub/openstack
    enable=true
    gpgcheck=0
    EOF
    yum clean all
    yum makecache
    
    ## 设置eth1网卡
    cat <<EOF > /etc/sysconfig/network-scripts/ifcfg-eth1
    TYPE=Ethernet
    BOOTPROTO=none
    DEVICE=eth0
    ONBOOT=yes
    EOF
    
    sleep 5
    echo ---------------------------------------------------------------
    echo                 controller node environment is OK!                                                                         
    echo ----------------------------------------------------------------
    sleep 5
    
    #####################################################################
    ##########              控制节点keystone安装                   ######
    #####################################################################
    
    yum install python-openstackclient -y
    yum install openstack-selinux -y
    yum install openstack-keystone httpd mod_wsgi -y
    cp /etc/keystone/keystone.conf /etc/keystone/keystone.conf.bak
    sed -i "/^[database]$/aconnection = mysql+pymysql://keystone:KEYSTONE_DBPASS@$CONTROLLER_IP/keystone" /etc/keystone/keystone.conf
    sed -i "/^[token]$/aprovider = fernet" /etc/keystone/keystone.conf
    su -s /bin/sh -c "keystone-manage db_sync" keystone
    keystone-manage fernet_setup --keystone-user keystone --keystone-group keystone
    keystone-manage credential_setup --keystone-user keystone --keystone-group keystone
    keystone-manage bootstrap --bootstrap-password ADMIN_PASS 
    --bootstrap-admin-url http://$CONTROLLER_IP:5000/v3/ 
    --bootstrap-internal-url http://$CONTROLLER_IP:5000/v3/ 
    --bootstrap-public-url http://$CONTROLLER_IP:5000/v3/ 
    --bootstrap-region-id RegionOne
    echo -e ServerName $CONTROLLER_NAME >> /etc/httpd/conf/httpd.conf
    ln -s /usr/share/keystone/wsgi-keystone.conf /etc/httpd/conf.d/
    systemctl enable httpd.service
    systemctl restart httpd.service
    systemctl status httpd.service
    
    export OS_USERNAME=admin
    export OS_PASSWORD=ADMIN_PASS
    export OS_PROJECT_NAME=admin
    export OS_USER_DOMAIN_NAME=Default
    export OS_PROJECT_DOMAIN_NAME=Default
    export OS_AUTH_URL=http://$CONTROLLER_IP:5000/v3
    export OS_IDENTITY_API_VERSION=3
    
    openstack domain create --description "An Example Domain" example
    openstack project create --domain default --description "Service Project" service
    openstack project create --domain default --description "Demo Project" myproject
    
    openstack user create --domain default --password MYUSER_PASS myuser
    openstack role create myrole
    openstack role add --project myproject --user myuser myrole
    openstack --os-auth-url http://$CONTROLLER_IP:5000/v3 --os-project-domain-name Default --os-user-domain-name Default --os-project-name admin --os-username admin token issue
    openstack --os-auth-url http://$CONTROLLER_IP:5000/v3 --os-project-domain-name Default --os-user-domain-name Default --os-project-name myproject --os-username myuser token issue
    
    . /root/admin
    openstack token issue
    
    sleep 5
    echo ----------------------------------------------------------------
    echo                 controller node keystone is OK! 
    echo ----------------------------------------------------------------
    sleep 5
    
    
    #####################################################################
    ##########                 控制节点glance安装                  ######
    #####################################################################
    
    . /root/admin
    openstack user create --domain default --password GLANCE_PASS glance
    openstack role add --project service --user glance admin
    openstack service create --name glance --description "OpenStack Image" image
    openstack endpoint create --region RegionOne image public http://$CONTROLLER_IP:9292
    openstack endpoint create --region RegionOne image internal http://$CONTROLLER_IP:9292
    openstack endpoint create --region RegionOne image admin http://$CONTROLLER_IP:9292
    yum install openstack-glance -y
    cp /etc/glance/glance-api.conf /etc/glance/glance-api.conf.bak
    sed -i "/^[database]$/aconnection = mysql+pymysql://glance:GLANCE_DBPASS@$CONTROLLER_IP/glance" /etc/glance/glance-api.conf                                 
    sed -i "/^[keystone_authtoken]$/awww_authenticate_uri = http://$CONTROLLER_IP:5000
    auth_url = http://$CONTROLLER_IP:5000
    memcached_servers = $CONTROLLER_IP:11211
    auth_type = password
    project_domain_name = Default
    user_domain_name = Default
    project_name = service
    username = glance
    password = GLANCE_PASS" /etc/glance/glance-api.conf 
    sed -i "/^[paste_deploy]$/aflavor = keystone" /etc/glance/glance-api.conf
    sed -i "/^[glance_store]$/astores = file,http
    default_store = file
    filesystem_store_datadir = /var/lib/glance/images/" /etc/glance/glance-api.conf
    cp /etc/glance/glance-registry.conf /etc/glance/glance-registry.conf.bak
    sed -i "/^[database]$/aconnection = mysql+pymysql://glance:GLANCE_DBPASS@$CONTROLLER_IP/glance" /etc/glance/glance-registry.conf                                 
    sed -i "/^[keystone_authtoken]$/awww_authenticate_uri = http://$CONTROLLER_IP:5000
    auth_url = http://$CONTROLLER_IP:5000
    memcached_servers = $CONTROLLER_IP:11211
    auth_type = password
    project_domain_name = Default
    user_domain_name = Default
    project_name = service
    username = glance
    password = GLANCE_PASS" /etc/glance/glance-registry.conf 
    sed -i "/^[paste_deploy]$/aflavor = keystone" /etc/glance/glance-registry.conf
    su -s /bin/sh -c "glance-manage db_sync" glance
    systemctl enable openstack-glance-api.service openstack-glance-registry.service
    systemctl restart openstack-glance-api.service openstack-glance-registry.service
    systemctl status openstack-glance-api.service openstack-glance-registry.service
    
    sleep 5
    openstack image create "cirros" --file /root/cirros-0.3.5-x86_64-disk.img --disk-format qcow2 --container-format bare --public
    openstack image list
    
    sleep 5
    echo -------------------------------------------------------------
    echo              controller node glance is OK!
    echo -------------------------------------------------------------
    sleep 5
    
    ##################################################################
    ##########                 控制节点nova安装                 ######
    ##################################################################
    
    ./root/admin
    openstack user create --domain default --password NOVA_PASS nova
    openstack role add --project service --user nova admin
    openstack service create --name nova --description "OpenStack Compute" compute
    openstack endpoint create --region RegionOne compute public http://$CONTROLLER_IP:8774/v2.1
    openstack endpoint create --region RegionOne compute internal http://$CONTROLLER_IP:8774/v2.1
    openstack endpoint create --region RegionOne compute admin http://$CONTROLLER_IP:8774/v2.1
    openstack user create --domain default --password PLACEMENT_PASS placement
    openstack role add --project service --user placement admin
    openstack service create --name placement --description "Placement API" placement
    openstack endpoint create --region RegionOne placement public http://$CONTROLLER_IP:8778
    openstack endpoint create --region RegionOne placement internal http://$CONTROLLER_IP:8778
    openstack endpoint create --region RegionOne placement admin http://$CONTROLLER_IP:8778
    yum install openstack-nova-api openstack-nova-conductor openstack-nova-console openstack-nova-novncproxy openstack-nova-scheduler openstack-nova-placement-api -y
    cp /etc/nova/nova.conf /etc/nova/nova.conf.bak
    sed -i "/^[DEFAULT]$/aenabled_apis = osapi_compute,metadata
    transport_url = rabbit://openstack:RABBIT_PASS@$CONTROLLER_IP
    my_ip = $CONTROLLER_IP
    use_neutron = true
    firewall_driver = nova.virt.firewall.NoopFirewallDriver" /etc/nova/nova.conf
    sed -i "/^[api_database]$/aconnection = mysql+pymysql://nova:NOVA_DBPASS@$CONTROLLER_IP/nova_api" /etc/nova/nova.conf
    sed -i "/^[database]$/aconnection = mysql+pymysql://nova:NOVA_DBPASS@$CONTROLLER_IP/nova" /etc/nova/nova.conf
    sed -i "/^[placement_database]$/aconnection = mysql+pymysql://placement:PLACEMENT_DBPASS@$CONTROLLER_IP/placement" /etc/nova/nova.conf
    sed -i "/^[api]$/aauth_strategy = keystone" /etc/nova/nova.conf
    sed -i "/^[keystone_authtoken]$/aauth_url = http://$CONTROLLER_IP:5000/v3
    memcached_servers = $CONTROLLER_IP:11211
    auth_type = password
    project_domain_name = default
    user_domain_name = default
    project_name = service
    username = nova
    password = NOVA_PASS" /etc/nova/nova.conf
    sed -i "/^[vnc]$/aenabled = true
    server_listen = $CONTROLLER_IP
    server_proxyclient_address = $CONTROLLER_IP" /etc/nova/nova.conf
    sed -i "/^[glance]$/aapi_servers = http://$CONTROLLER_IP:9292" /etc/nova/nova.conf
    sed -i "/^[oslo_concurrency]$/alock_path = /var/lib/nova/tmp" /etc/nova/nova.conf
    sed -i "/^[placement]$/a
    egion_name = RegionOne
    project_domain_name = Default
    project_name = service
    auth_type = password
    user_domain_name = Default
    auth_url = http://$CONTROLLER_IP:5000/v3
    username = placement
    password = PLACEMENT_PASS" /etc/nova/nova.conf
    echo -e "
    
    <Directory /usr/bin>
    <IfVersion >= 2.4>
    Require all granted
    </IfVersion>
    <IfVersion < 2.4>
    Order allow,deny
    Allow from all
    </IfVersion>
    </Directory>" >> /etc/httpd/conf.d/00-nova-placement-api.conf
    systemctl restart httpd
    systemctl status httpd
    su -s /bin/sh -c "nova-manage api_db sync" nova
    su -s /bin/sh -c "nova-manage cell_v2 map_cell0" nova
    su -s /bin/sh -c "nova-manage cell_v2 create_cell --name=cell1 --verbose" nova
    su -s /bin/sh -c "nova-manage db sync" nova
    su -s /bin/sh -c "nova-manage cell_v2 list_cells" nova
    systemctl enable openstack-nova-api.service openstack-nova-scheduler.service openstack-nova-conductor.service openstack-nova-novncproxy.service openstack-nova-consoleauth.service
    systemctl restart openstack-nova-api.service openstack-nova-scheduler.service openstack-nova-conductor.service openstack-nova-novncproxy.service openstack-nova-consoleauth.service
    systemctl status openstack-nova-api.service openstack-nova-scheduler.service openstack-nova-conductor.service openstack-nova-novncproxy.service openstack-nova-consoleauth.service
    
    sleep 5
    echo ---------------------------------------------------------
    echo                 controller node nova is OK!
    echo ---------------------------------------------------------
    sleep 5
    
    
    ##############################################################
    ##########               控制节点neutron安装            ######
    ##############################################################
    
    . /root/admin
    openstack user create --domain default --password NEUTRON_PASS neutron
    openstack role add --project service --user neutron admin
    openstack service create --name neutron --description "OpenStack Networking" network
    openstack endpoint create --region RegionOne network public http://$CONTROLLER_IP:9696
    openstack endpoint create --region RegionOne network internal http://$CONTROLLER_IP:9696
    openstack endpoint create --region RegionOne network admin http://$CONTROLLER_IP:9696
    yum install openstack-neutron openstack-neutron-ml2 openstack-neutron-linuxbridge ebtables -y
    
    cp /etc/neutron/neutron.conf /etc/neutron/neutron.conf.bak
    sed -i "{
    /^#/d
    /^$/d
    /^[database]$/aconnection = mysql+pymysql://neutron:NEUTRON_DBPASS@$CONTROLLER_IP/neutron
    /^[DEFAULT]$/acore_plugin = ml2
    service_plugins = router
    allow_overlapping_ips = true
    transport_url = rabbit://openstack:RABBIT_PASS@$CONTROLLER_IP
    auth_strategy = keystone
    notify_nova_on_port_status_changes = true
    notify_nova_on_port_data_changes = true
    /^[keystone_authtoken]$/awww_authenticate_uri = http://$CONTROLLER_IP:5000
    auth_url = http://$CONTROLLER_IP:5000
    memcached_servers = $CONTROLLER_IP:11211
    auth_type = password
    project_domain_name = default
    user_domain_name = default
    project_name = service
    username = neutron
    password = NEUTRON_PASS
    /^[oslo_concurrency]$/alock_path = /var/lib/neutron/tmp
    /^[nova]$/aauth_url = http://$CONTROLLER_IP:5000
    auth_type = password
    project_domain_name = default
    user_domain_name = default
    region_name = RegionOne
    project_name = service
    username = nova
    password = NOVA_PASS
    }" /etc/neutron/neutron.conf
    
    cp /etc/neutron/plugins/ml2/ml2_conf.ini /etc/neutron/plugins/ml2/ml2_conf.ini.bak
    sed -i "{
    /^#/d
    /^$/d
    /^[ml2]$/a	ype_drivers = flat,vlan,vxlan
    tenant_network_types = vxlan
    mechanism_drivers = linuxbridge,l2population
    extension_drivers = port_security
    /^[ml2_type_flat]$/aflat_networks = provider
    /^[ml2_type_vxlan]$/avni_ranges = 1:1000
    /^[securitygroup]$/aenable_ipset = true
    }" /etc/neutron/plugins/ml2/ml2_conf.ini
    
    cp /etc/neutron/plugins/ml2/linuxbridge_agent.ini /etc/neutron/plugins/ml2/linuxbridge_agent.ini.bak
    sed -i "{
    /^#/d
    /^$/d
    /^[linux_bridge]$/aphysical_interface_mappings = provider:eth1
    /^[vxlan]$/aenable_vxlan = true
    local_ip = $CONTROLLER_IP
    l2_population = true
    /^[securitygroup]$/aenable_security_group = true
    firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver
    }" /etc/neutron/plugins/ml2/linuxbridge_agent.ini
    
    cp /etc/neutron/l3_agent.ini /etc/neutron/l3_agent.ini.bak
    sed -i "{
    /^#/d
    /^$/d
    /^[DEFAULT]$/ainterface_driver = linuxbridge
    }" /etc/neutron/l3_agent.ini
    
    cp /etc/neutron/dhcp_agent.ini /etc/neutron/dhcp_agent.ini.bak
    sed -i "{
    /^#/d
    /^$/d
    /^[DEFAULT]$/ainterface_driver = linuxbridge
    dhcp_driver = neutron.agent.linux.dhcp.Dnsmasq
    enable_isolated_metadata = true
    }" /etc/neutron/dhcp_agent.ini
    
    cp /etc/neutron/metadata_agent.ini /etc/neutron/metadata_agent.ini.bak
    sed -i "{
    /^#/d;
    /^$/d;
    /^[DEFAULT]$/a
    ova_metadata_host = $CONTROLLER_IP
    metadata_proxy_shared_secret = METADATA_SECRET
    }" /etc/neutron/metadata_agent.ini
    
    sed -i "{
    /^#/d;
    /^$/d;
    /^[neutron]$/aurl = http://$CONTROLLER_IP:9696
    auth_url = http://$CONTROLLER_IP:5000
    auth_type = password
    project_domain_name = default
    user_domain_name = default
    region_name = RegionOne
    project_name = service
    username = neutron
    password = NEUTRON_PASS
    service_metadata_proxy = true
    metadata_proxy_shared_secret = METADATA_SECRET
    }" /etc/nova/nova.conf
    
    ln -s /etc/neutron/plugins/ml2/ml2_conf.ini /etc/neutron/plugin.ini
    su -s /bin/sh -c "neutron-db-manage --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/ml2/ml2_conf.ini upgrade head" neutron
    
    systemctl restart openstack-nova-api.service
    systemctl enable neutron-server.service neutron-linuxbridge-agent.service neutron-dhcp-agent.service neutron-metadata-agent.service
    systemctl restart neutron-server.service neutron-linuxbridge-agent.service neutron-dhcp-agent.service neutron-metadata-agent.service
    systemctl status neutron-server.service neutron-linuxbridge-agent.service neutron-dhcp-agent.service neutron-metadata-agent.service
    
    ## 对于网络选项2,还启用并启动第3层服务:
    systemctl enable neutron-l3-agent.service
    systemctl restart neutron-l3-agent.service
    systemctl status neutron-l3-agent.service
    
    sleep 5
    echo ----------------------------------------------------------
    echo               controller node neutron is OK!   
    echo ----------------------------------------------------------
    sleep 5
    
    ###############################################################
    ##########                控制节点cinder安装             ######
    ###############################################################
    
    . /root/admin
    openstack user create --domain default --password CINDER_PASS cinder
    openstack role add --project service --user cinder admin
    openstack service create --name cinderv2 --description "OpenStack Block Storage" volumev2
    openstack service create --name cinderv3 --description "OpenStack Block Storage" volumev3
    openstack endpoint create --region RegionOne volumev2 public http://$CONTROLLER_IP:8776/v2/%(project_id)s
    openstack endpoint create --region RegionOne volumev2 internal http://$CONTROLLER_IP:8776/v2/%(project_id)s
    openstack endpoint create --region RegionOne volumev2 admin http://$CONTROLLER_IP:8776/v2/%(project_id)s
    openstack endpoint create --region RegionOne volumev3 public http://$CONTROLLER_IP:8776/v3/%(project_id)s
    openstack endpoint create --region RegionOne volumev3 internal http://$CONTROLLER_IP:8776/v3/%(project_id)s
    openstack endpoint create --region RegionOne volumev3 admin http://$CONTROLLER_IP:8776/v3/%(project_id)s
    
    yum install openstack-cinder -y
    mv /etc/cinder/cinder.conf /etc/cinder/cinder.conf.bak
    cat /etc/cinder/cinder.conf.bak | egrep -v "^#|^$" > /etc/cinder/cinder.conf
    sed -i "/^[DEFAULT]$/a	ransport_url = rabbit://openstack:RABBIT_PASS@$CONTROLLER_IP
    auth_strategy = keystone
    my_ip = $CONTROLLER_IP" /etc/cinder/cinder.conf
    sed -i "/^[database]$/aconnection = mysql+pymysql://cinder:CINDER_DBPASS@$CONTROLLER_IP/cinder" /etc/cinder/cinder.conf
    sed -i "/^[keystone_authtoken]$/aauth_uri = http://$CONTROLLER_IP:5000
    auth_url = http://$CONTROLLER_IP:5000
    memcached_servers = $CONTROLLER_IP:11211
    auth_type = password
    project_domain_id = default
    user_domain_id = default
    project_name = service
    username = cinder
    password = CINDER_PASS" /etc/cinder/cinder.conf
    sed -i "/^[oslo_concurrency]$/alock_path = /var/lib/cinder/tmp" /etc/cinder/cinder.conf
    
    su -s /bin/sh -c "cinder-manage db sync" cinder
    sed -i "/^[cinder]$/aos_region_name = RegionOne"  /etc/nova/nova.conf
    
    systemctl restart openstack-nova-api.service
    systemctl enable openstack-cinder-api.service openstack-cinder-scheduler.service
    systemctl restart openstack-cinder-api.service openstack-cinder-scheduler.service
    
    sleep 5
    echo ----------------------------------------------------------
    echo                 controller node cinder is OK!
    echo ----------------------------------------------------------
    sleep 5
    
    ###############################################################
    ##########                 控制节点horizon安装           ######
    ###############################################################
    
    yum install openstack-dashboard -y
    mv /etc/openstack-dashboard/local_settings /etc/openstack-dashboard/local_settings.bak
    cp /root/local_settings /etc/openstack-dashboard/local_settings
    echo "WSGIApplicationGroup %{GLOBAL}" >> /etc/httpd/conf.d/openstack-dashboard.conf
    systemctl restart httpd.service
    
    echo -------------------------------------------------------------
    echo                  controller node horizon is OK!   
    echo -------------------------------------------------------------

    三、计算节点部署compute.sh脚本

    #############################################################
    ##########             计算节点环境准备                ######
    #############################################################
    
    #hostnamectl set-hostname $COMPUTER1_NAME
    
    cat <<EOF >> /etc/hosts
    $CONTROLLER_IP    $CONTROLLER_NAME
    $COMPUTER1_IP    $COMPUTER1_NAME
    $BLOCK1_IP    $BLOCK1_NAME
    $OBJECT1_IP    $OBJECT1_NAME
    EOF
    
    ## 时间同步
    yum install -y http://dl.fedoraproject.org/pub/epel/7Server/x86_64/Packages/e/epel-release-7-11.noarch.rpm
    yum makecache
    yum install chrony -y
    cp /etc/chrony.conf /etc/chrony.conf.bak
    sed -i "/^server/d" /etc/chrony.conf
    echo server $CONTROLLER_IP >> /etc/chrony.conf
    systemctl restart chronyd
    systemctl enable chronyd
    systemctl status chronyd
    chronyc sources -v
    
    ## 设置eth1网卡
    cat <<EOF > /etc/sysconfig/network-scripts/ifcfg-eth1
    TYPE=Ethernet
    BOOTPROTO=none
    DEVICE=eth0
    ONBOOT=yes
    EOF
    
    
    ## 配置OpenStack yum源
    yum clean all
    mkdir /etc/yum.repos.d/save
    mv /etc/yum.repos.d/C* /etc/yum.repos.d/save
    mv /etc/yum.repos.d/epel* /etc/yum.repos.d/save
    cat <<EOF > /etc/yum.repos.d/yum.repo
    [rocky]
    name=rocky-openstack
    baseurl=ftp://$CONTROLLER_IP/pub/openstack
    enable=true
    gpgcheck=0
    EOF
    yum clean all
    yum makecache
    
    sleep 5
    echo --------------------------------------------------------
    echo          compute node environment is OK!
    echo --------------------------------------------------------
    sleep 5
    
    #############################################################
    ##########            计算节点Nova安装                 ######
    #############################################################
    
    yum install openstack-nova-compute -y
    cp /etc/nova/nova.conf /etc/nova/nova.conf.bak
    sed -i "/^[DEFAULT]$/aenabled_apis = osapi_compute,metadata
    transport_url = rabbit://openstack:RABBIT_PASS@$CONTROLLER_IP
    my_ip = $COMPUTER1_IP
    use_neutron = true
    firewall_driver = nova.virt.firewall.NoopFirewallDriver" /etc/nova/nova.conf
    sed -i "/^[api]$/aauth_strategy = keystone" /etc/nova/nova.conf
    sed -i "/^[keystone_authtoken]$/aauth_url = http://$CONTROLLER_IP:5000/v3
    memcached_servers = $CONTROLLER_IP:11211
    auth_type = password
    project_domain_name = default
    user_domain_name = default
    project_name = service
    username = nova
    password = NOVA_PASS" /etc/nova/nova.conf 
    sed -i "/^[vnc]$/aenabled = true
    server_listen = 0.0.0.0
    server_proxyclient_address = $COMPUTER1_IP
    novncproxy_base_url = http://$CONTROLLER_IP:6080/vnc_auto.html" /etc/nova/nova.conf
    sed -i "/^[glance]$/aapi_servers = http://$CONTROLLER_IP:9292" /etc/nova/nova.conf
    sed -i "/^[libvirt]$/avirt_type = qemu" /etc/nova/nova.conf
    sed -i "/^[oslo_concurrency]$/alock_path = /var/lib/nova/tmp" /etc/nova/nova.conf
    sed -i "/^[placement]$/a
    egion_name = RegionOne
    project_domain_name = Default
    project_name = service
    auth_type = password
    user_domain_name = Default
    auth_url = http://$CONTROLLER_IP:5000/v3
    username = placement
    password = PLACEMENT_PASS" /etc/nova/nova.conf
    systemctl enable libvirtd.service openstack-nova-compute.service
    systemctl restart libvirtd.service openstack-nova-compute.service
    systemctl status libvirtd.service openstack-nova-compute.service
    
    sleep 5
    echo -------------------------------------------------------
    echo                  compute node nova is OK!
    echo -------------------------------------------------------
    sleep 5
    
    
    ############################################################
    ##########          计算节点neutron安装               ######
    ############################################################
    
    yum install openstack-neutron-linuxbridge ebtables ipset -y
    
    cp /etc/neutron/neutron.conf /etc/neutron/neutron.conf.bak
    sed -i "{
    /^#/d
    /^$/d
    /^[DEFAULT]$/a	ransport_url = rabbit://openstack:RABBIT_PASS@$CONTROLLER_IP
    auth_strategy = keystone
    /^[keystone_authtoken]$/awww_authenticate_uri = http://$CONTROLLER_IP:5000
    auth_url = http://$CONTROLLER_IP:5000
    memcached_servers = $CONTROLLER_IP:11211
    auth_type = password
    project_domain_name = default
    user_domain_name = default
    project_name = service
    username = neutron
    password = NEUTRON_PASS
    /^[oslo_concurrency]$/alock_path = /var/lib/neutron/tmp
    }" /etc/neutron/neutron.conf
    
    cp /etc/neutron/plugins/ml2/linuxbridge_agent.ini /etc/neutron/plugins/ml2/linuxbridge_agent.ini.bak
    sed -i "{
    /^#/d
    /^$/d
    /^[linux_bridge]$/aphysical_interface_mappings = provider:eth1
    /^[vxlan]$/aenable_vxlan = true
    local_ip = $COMPUTER1_IP
    l2_population = true
    /^[securitygroup]$/aenable_security_group = true
    firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver
    }" /etc/neutron/plugins/ml2/linuxbridge_agent.ini
    
    sed -i "{
    /^#/d
    /^$/d
    /^[neutron]$/aurl = http://$CONTROLLER_IP:9696
    auth_url = http://$CONTROLLER_IP:5000
    auth_type = password
    project_domain_name = default
    user_domain_name = default
    region_name = RegionOne
    project_name = service
    username = neutron
    password = NEUTRON_PASS
    }" /etc/nova/nova.conf
    
    systemctl restart openstack-nova-compute.service
    systemctl enable neutron-linuxbridge-agent.service
    systemctl restart neutron-linuxbridge-agent.service
    systemctl status neutron-linuxbridge-agent.service
    
    sleep 5
    echo --------------------------------------------------------
    echo                  compute node neutron is OK!
    echo --------------------------------------------------------
    sleep 5
    
    #############################################################
    ##########               存储节点cinder安装            ######
    #############################################################
    
    yum install lvm2 device-mapper-persistent-data -y
    systemctl enable lvm2-lvmetad.service
    systemctl start lvm2-lvmetad.service
    
    pvcreate /dev/sdb
    vgcreate cinder-volumes /dev/sdb
    mv /etc/lvm/lvm.conf /etc/lvm/lvm.conf.bak
    cat /etc/lvm/lvm.conf.bak | egrep -v "^#|^$" > /etc/lvm/lvm.conf
    sed -i '/^devices {$/afilter = [ "a/sdb/", "r/.*/"]' /etc/lvm/lvm.conf
    
    
    yum install openstack-cinder targetcli python-keystone -y
    mv /etc/cinder/cinder.conf /etc/cinder/cinder.conf.bak
    cat /etc/cinder/cinder.conf.bak | egrep -v "^#|^$" > /etc/cinder/cinder.conf
    sed -i "/^[database]$/aconnection = mysql+pymysql://cinder:CINDER_DBPASS@$CONTROLLER_IP/cinder" /etc/cinder/cinder.conf
    sed -i "/^[DEFAULT]$/a	ransport_url = rabbit://openstack:RABBIT_PASS@$CONTROLLER_IP
    auth_strategy = keystone
    my_ip = $BLOCK1_IP
    enabled_backends = lvm
    glance_api_servers = http://$CONTROLLER_IP:9292" /etc/cinder/cinder.conf
    sed -i "/^[keystone_authtoken]$/awww_authenticate_uri = http://$CONTROLLER_IP:5000
    auth_url = http://$CONTROLLER_IP:5000
    memcached_servers = $CONTROLLER_IP:11211
    auth_type = password
    project_domain_id = default
    user_domain_id = default
    project_name = service
    username = cinder
    password = CINDER_PASS" /etc/cinder/cinder.conf
    sed -i "/^[oslo_concurrency]$/alock_path = /var/lib/cinder/tmp" /etc/cinder/cinder.conf
    cat <<EOF >> /etc/cinder/cinder.conf
    [lvm]
    volume_driver = cinder.volume.drivers.lvm.LVMVolumeDriver
    volume_group = cinder-volumes
    iscsi_protocol = iscsi
    iscsi_helper = lioadm
    EOF
    
    systemctl enable openstack-cinder-volume.service target.service
    systemctl start openstack-cinder-volume.service target.service
    
    sleep 5
    echo ----------------------------------------------------------
    echo                  compute node cinder is OK! 
    echo -----------------------------------------------------------
    sleep 5
  • 相关阅读:
    Django-website 程序案例系列-3 URL详解
    Django-website 程序案例系列-1 最简单的web服务器
    c# 多维数组、交错数组(转化为DataTable)
    c++(重载等号=操作为深拷贝)
    c# 导入c++ dll
    Nhibernate HQL 匿名类(严格说是map的使用以及构造函数的使用
    spring.net 集成nhibernate配置文件(这里暴露了GetCurrentSession 对于 CurrentSession unbond thread这里给出了解决方法)
    hibernate mapping文件中 xmlns会导致linq to xml 查询不到对应的节点
    linq to xml
    xml 操作(动态添加 property属性 其他节点同理)
  • 原文地址:https://www.cnblogs.com/chenli90/p/10638163.html
Copyright © 2011-2022 走看看