zoukankan      html  css  js  c++  java
  • CloudStack学习-1

    环境准备


     

    实验使用的虚拟机配置

    Vmware Workstation
    虚拟机系统2个
    系统版本:centos6.6 x86_64
    内存:4GB
    网络:两台机器都是nat
    磁盘:装完系统后额外添加个50GB的磁盘
    额外:勾选vt-x
    

    相关知识介绍

    Cloudstack模仿亚马逊的云
    glusterfs模仿谷歌的分布式文件系统
    hadoop也是模仿谷歌的大数据系统产生的
    cloudstack是java开发的
    openstack是python开发

    Cloudstack它的架构类似于saltstack

    下载软件包

    从官网可以下载下面rpm包

    http://cloudstack.apt-get.eu/centos/6/4.8/

    master和agent都需要rpm包,注意路径,这里的6是centos6系统,4.8是Cloudstack的版本号
    usage这个包用于计费监控的,这里用不到
    cli是调用亚马逊aws接口之类的包,这里用不到
    实验这里只用到了management,common,agent这3个包

    下载如下包
    cloudstack-agent-4.8.0-1.el6.x86_64.rpm
    cloudstack-baremetal-agent-4.8.0-1.el6.x86_64.rpm
    cloudstack-cli-4.8.0-1.el6.x86_64.rpm
    cloudstack-common-4.8.0-1.el6.x86_64.rpm
    cloudstack-management-4.8.0-1.el6.x86_64.rpm
    cloudstack-usage-4.8.0-1.el6.x86_64.rpm
    

      操作命令如下

    [root@master1 ~]# mkdir /tools
    [root@master1 ~]# cd /tools/
    wget http://cloudstack.apt-get.eu/centos/6/4.8/cloudstack-agent-4.8.0-1.el6.x86_64.rpm
    wget http://cloudstack.apt-get.eu/centos/6/4.8/cloudstack-baremetal-agent-4.8.0-1.el6.x86_64.rpm
    wget http://cloudstack.apt-get.eu/centos/6/4.8/cloudstack-cli-4.8.0-1.el6.x86_64.rpm
    wget http://cloudstack.apt-get.eu/centos/6/4.8/cloudstack-common-4.8.0-1.el6.x86_64.rpm
    wget http://cloudstack.apt-get.eu/centos/6/4.8/cloudstack-management-4.8.0-1.el6.x86_64.rpm
    wget http://cloudstack.apt-get.eu/centos/6/4.8/cloudstack-usage-4.8.0-1.el6.x86_64.rpm
    

      

    下载kvm模板,这里只有master需要下载这个模板,它是系统虚拟机使用的
    systemvm64template-2016-05-18-4.7.1-kvm.qcow2.bz2

    http://cloudstack.apt-get.eu/systemvm/4.6/
    http://cloudstack.apt-get.eu/systemvm/4.6/systemvm64template-4.6.0-kvm.qcow2.bz2
    

    

    正式开始
    关闭iptables和selinux

    sed  -i   's#SELINUX=enforcing#SELINUX=disabled#g'   /etc/selinux/config
    setenforce  0
     chkconfig iptables off 
    /etc/init.d/iptables  stop
    

    两台机器配置IP地址为静态的

    [root@master1 ~]# cat /etc/sysconfig/network-scripts/ifcfg-eth0
    DEVICE=eth0
    TYPE=Ethernet
    ONBOOT=yes
    BOOTPROTO=static
    IPADDR=192.168.145.151
    NETMASK=255.255.255.0
    GATEWAY=192.168.145.2
    DNS1=10.0.1.11
    [root@master1 ~]# 
    [root@agent1 ~]# cat /etc/sysconfig/network-scripts/ifcfg-eth0 
    DEVICE=eth0
    TYPE=Ethernet
    ONBOOT=yes
    BOOTPROTO=static
    IPADDR=192.168.145.152
    NETMASK=255.255.255.0
    GATEWAY=192.168.145.2
    DNS1=10.0.1.11
    [root@agent1 ~]# 
    

    配置主机名分别为master1和agent1

    配置hosts文件
    cat >>/etc/hosts<<EOF
    192.168.145.151 master1
    192.168.145.152 agent1
    EOF
    

    配置ntp 

    yum  install ntp -y
    chkconfig ntpd on 
    /etc/init.d/ntpd start
    

    检查 hostname  --fqdn  

    [root@master1 ~]# hostname --fqdn
    master1
    [root@master1 ~]# 
    [root@agent1 ~]# hostname --fqdn
    agent1
    [root@agent1 ~]# 
    

    两台机器安装epel源,默认的163的源可以下载epel源

    yum   install  epel-release -y
    

    master端安装nfs
    它也会自动把依赖的rpcbind安装上
    nfs 作为二级存储,给agent提供虚拟机iso文件,存储快照的地方

    yum  install nfs-utils -y
    

    master端配置nfs,给agent宿主机当二级存储使用

    [root@master1 ~]# cat /etc/exports 
    /export/secondary   *(rw,async,no_root_squash,no_subtree_check)
    [root@master1 ~]# 
    

    master创建挂载目录 

    [root@master1 ~]# mkdir /export/secondary  -p
    [root@master1 ~]#
    

    agent上也如下操作,注意agent新建primary目录

    [root@agent1 ~]# mkdir /export/primary  -p
    [root@agent1 ~]# 
    

    格式化磁盘
    master上操作,这里不分区了

    [root@master1 ~]# mkfs.ext4 /dev/sdb
    mke2fs 1.41.12 (17-May-2010)
    /dev/sdb is entire device, not just one partition!
    Proceed anyway? (y,n) y
    Filesystem label=
    OS type: Linux
    Block size=4096 (log=2)
    Fragment size=4096 (log=2)
    Stride=0 blocks, Stripe width=0 blocks
    3276800 inodes, 13107200 blocks
    655360 blocks (5.00%) reserved for the super user
    First data block=0
    Maximum filesystem blocks=4294967296
    400 block groups
    32768 blocks per group, 32768 fragments per group
    8192 inodes per group
    Superblock backups stored on blocks: 
    	32768, 98304, 163840, 229376, 294912, 819200, 884736, 1605632, 2654208, 
    	4096000, 7962624, 11239424
    
    Writing inode tables: done                            
    Creating journal (32768 blocks): done
    Writing superblocks and filesystem accounting information: done
    
    This filesystem will be automatically checked every 35 mounts or
    180 days, whichever comes first.  Use tune2fs -c or -i to override.
    [root@master1 ~]# 
    

    agent上操作

    [root@agent1 ~]# mkfs.ext4 /dev/sdb
    mke2fs 1.41.12 (17-May-2010)
    /dev/sdb is entire device, not just one partition!
    Proceed anyway? (y,n) y
    Filesystem label=
    OS type: Linux
    Block size=4096 (log=2)
    Fragment size=4096 (log=2)
    Stride=0 blocks, Stripe width=0 blocks
    3276800 inodes, 13107200 blocks
    655360 blocks (5.00%) reserved for the super user
    First data block=0
    Maximum filesystem blocks=4294967296
    400 block groups
    32768 blocks per group, 32768 fragments per group
    8192 inodes per group
    Superblock backups stored on blocks: 
    	32768, 98304, 163840, 229376, 294912, 819200, 884736, 1605632, 2654208, 
    	4096000, 7962624, 11239424
    
    Writing inode tables: done                            
    Creating journal (32768 blocks): done
    Writing superblocks and filesystem accounting information: done
    
    This filesystem will be automatically checked every 37 mounts or
    180 days, whichever comes first.  Use tune2fs -c or -i to override.
    [root@agent1 ~]# 
    

    挂载磁盘

    master上操作
    [root@master1 ~]# echo "/dev/sdb   /export/secondary  ext4  defaults  0  0">>/etc/fstab
    [root@master1 ~]# mount -a
    [root@master1 ~]# df -h
    Filesystem      Size  Used Avail Use% Mounted on
    /dev/sda3        35G  2.3G   31G   7% /
    tmpfs           931M     0  931M   0% /dev/shm
    /dev/sda1       380M   33M  328M   9% /boot
    /dev/sdb         50G   52M   47G   1% /export/secondary
    [root@master1 ~]# 
    
    agent上操作
    [root@agent1 ~]# echo "/dev/sdb   /export/primary  ext4  defaults  0  0">>/etc/fstab
    [root@agent1 ~]# mount -a
    [root@agent1 ~]# df -h
    Filesystem      Size  Used Avail Use% Mounted on
    /dev/sda3        35G  2.1G   32G   7% /
    tmpfs           1.9G     0  1.9G   0% /dev/shm
    /dev/sda1       380M   33M  328M   9% /boot
    /dev/sdb         50G   52M   47G   1% /export/primary
    [root@agent1 ~]#
    

      

    配置nfs和iptables


     

    先打开官方文档配置nfs和iptables等

    http://docs.cloudstack.apache.org/projects/cloudstack-installation/en/4.9/qig.html

    有些企业可以关闭iptables,有些企业需要开启,这里我们按照开启的配置,来配置nfs

    centos6.x 配置nfs添加如下参数,默认有这些参数,只是被注释了。这里我们直接添加到文件最后即可
    这个工作在master上操作

    LOCKD_TCPPORT=32803
    LOCKD_UDPPORT=32769
    MOUNTD_PORT=892
    RQUOTAD_PORT=875
    STATD_PORT=662
    STATD_OUTGOING_PORT=2020
    

    这里我们直接添加到文件最后即可

    [root@master1 ~]# vim /etc/sysconfig/nfs
    [root@master1 ~]# tail -10 /etc/sysconfig/nfs 
    #
    # To enable RDMA support on the server by setting this to
    # the port the server should listen on
    #RDMA_PORT=20049 
    LOCKD_TCPPORT=32803
    LOCKD_UDPPORT=32769
    MOUNTD_PORT=892
    RQUOTAD_PORT=875
    STATD_PORT=662
    STATD_OUTGOING_PORT=2020
    [root@master1 ~]# 
    


    前后对比
    master上操作的

    [root@master1 tools]# cat /etc/sysconfig/iptables
    # Firewall configuration written by system-config-firewall
    # Manual customization of this file is not recommended.
    *filter
    :INPUT ACCEPT [0:0]
    :FORWARD ACCEPT [0:0]
    :OUTPUT ACCEPT [0:0]
    -A INPUT -m state --state ESTABLISHED,RELATED -j ACCEPT
    -A INPUT -p icmp -j ACCEPT
    -A INPUT -i lo -j ACCEPT
    -A INPUT -m state --state NEW -m tcp -p tcp --dport 22 -j ACCEPT
    -A INPUT -j REJECT --reject-with icmp-host-prohibited
    -A FORWARD -j REJECT --reject-with icmp-host-prohibited
    COMMIT
    [root@master1 tools]# vim /etc/sysconfig/iptables
    

    添加如下
    多加了一条80的。为了避免以后再配置,这个80个nfs无关,是后面提供http方式的镜像源使用

    -A INPUT  -m state --state NEW -p udp --dport 111 -j ACCEPT
    -A INPUT  -m state --state NEW -p tcp --dport 111 -j ACCEPT
    -A INPUT  -m state --state NEW -p tcp --dport 2049 -j ACCEPT
    -A INPUT  -m state --state NEW -p tcp --dport 32803 -j ACCEPT
    -A INPUT  -m state --state NEW -p udp --dport 32769 -j ACCEPT
    -A INPUT  -m state --state NEW -p tcp --dport 892 -j ACCEPT
    -A INPUT  -m state --state NEW -p udp --dport 892 -j ACCEPT
    -A INPUT  -m state --state NEW -p tcp --dport 875 -j ACCEPT
    -A INPUT  -m state --state NEW -p udp --dport 875 -j ACCEPT
    -A INPUT  -m state --state NEW -p tcp --dport 662 -j ACCEPT
    -A INPUT  -m state --state NEW -p udp --dport 662 -j ACCEPT
    -A INPUT  -m state --state NEW -p tcp --dport 80 -j ACCEPT
    

    添加后结果如下

    [root@master1 tools]# cat /etc/sysconfig/iptables
    # Firewall configuration written by system-config-firewall
    # Manual customization of this file is not recommended.
    *filter
    :INPUT ACCEPT [0:0]
    :FORWARD ACCEPT [0:0]
    :OUTPUT ACCEPT [0:0]
    -A INPUT -m state --state ESTABLISHED,RELATED -j ACCEPT
    -A INPUT -p icmp -j ACCEPT
    -A INPUT -i lo -j ACCEPT
    -A INPUT  -m state --state NEW -p udp --dport 111 -j ACCEPT
    -A INPUT  -m state --state NEW -p tcp --dport 111 -j ACCEPT
    -A INPUT  -m state --state NEW -p tcp --dport 2049 -j ACCEPT
    -A INPUT  -m state --state NEW -p tcp --dport 32803 -j ACCEPT
    -A INPUT  -m state --state NEW -p udp --dport 32769 -j ACCEPT
    -A INPUT  -m state --state NEW -p tcp --dport 892 -j ACCEPT
    -A INPUT  -m state --state NEW -p udp --dport 892 -j ACCEPT
    -A INPUT  -m state --state NEW -p tcp --dport 875 -j ACCEPT
    -A INPUT  -m state --state NEW -p udp --dport 875 -j ACCEPT
    -A INPUT  -m state --state NEW -p tcp --dport 662 -j ACCEPT
    -A INPUT  -m state --state NEW -p udp --dport 662 -j ACCEPT
    -A INPUT  -m state --state NEW -p tcp --dport 80 -j ACCEPT
    -A INPUT -m state --state NEW -m tcp -p tcp --dport 22 -j ACCEPT
    -A INPUT -j REJECT --reject-with icmp-host-prohibited
    -A FORWARD -j REJECT --reject-with icmp-host-prohibited
    COMMIT
    [root@master1 tools]# 
    

    master开启nfs服务,开启iptables

    [root@master1 tools]# service iptables restart
    iptables: Applying firewall rules:                         [  OK  ]
    [root@master1 tools]# service rpcbind start
    [root@master1 tools]# service nfs start
    Starting NFS services:                                     [  OK  ]
    Starting NFS quotas:                                       [  OK  ]
    Starting NFS mountd:                                       [  OK  ]
    Starting NFS daemon:                                       [  OK  ]
    Starting RPC idmapd:                                       [  OK  ]
    [root@master1 tools]# chkconfig rpcbind on
    [root@master1 tools]# chkconfig nfs on
    [root@master1 tools]# 
    

    agent上检查
    如果没有showmount命令需要安装nfs包
    yum install nfs-utils -y

    查看对方是否提供了nfs服务

    [root@agent1 ~]# showmount -e 192.168.145.151
    Export list for 192.168.145.151:
    /export/secondary *
    [root@agent1 ~]# 
    

    测试下可否挂载

    [root@agent1 ~]# mount -t nfs 192.168.145.151:/export/secondary /mnt
    [root@agent1 ~]# df -h
    Filesystem            Size  Used Avail Use% Mounted on
    /dev/sda3              35G  2.3G   31G   7% /
    tmpfs                 1.9G     0  1.9G   0% /dev/shm
    /dev/sda1             380M   33M  328M   9% /boot
    /dev/sdb               50G   52M   47G   1% /export/primary
    192.168.145.151:/export/secondary
                           50G   52M   47G   1% /mnt
    [root@agent1 ~]# 
    

    测试成功,卸载即可。上面仅仅测试

    [root@agent1 ~]# umount /mnt -lf
    [root@agent1 ~]# df -h
    Filesystem      Size  Used Avail Use% Mounted on
    /dev/sda3        35G  2.3G   31G   7% /
    tmpfs           1.9G     0  1.9G   0% /dev/shm
    /dev/sda1       380M   33M  328M   9% /boot
    /dev/sdb         50G   52M   47G   1% /export/primary
    [root@agent1 ~]# 
    

      

    安装和配置Cloudstack


     

    管理服务器端安装,master上操作

    [root@master1 tools]# ls
    cloudstack-agent-4.8.0-1.el6.x86_64.rpm
    cloudstack-baremetal-agent-4.8.0-1.el6.x86_64.rpm
    cloudstack-cli-4.8.0-1.el6.x86_64.rpm
    cloudstack-common-4.8.0-1.el6.x86_64.rpm
    cloudstack-management-4.8.0-1.el6.x86_64.rpm
    cloudstack-usage-4.8.0-1.el6.x86_64.rpm
    systemvm64template-4.6.0-kvm.qcow2.bz2
    
    
    [root@master1 tools]# yum install -y cloudstack-management-4.8.0-1.el6.x86_64.rpm cloudstack-common-4.8.0-1.el6.x86_64.rpm
    [root@master1 tools]# rpm -qa | grep cloudstack
    cloudstack-common-4.8.0-1.el6.x86_64
    cloudstack-management-4.8.0-1.el6.x86_64
    [root@master1 tools]# 
    

    在master上安装mysql-server 

    [root@master1 tools]# yum  install mysql-server -y
    

     

    修改mysql配置文件添加参数
    在[mysqld]模块下
    添加下面参数

    innodb_rollback_on_timeout=1
    innodb_lock_wait_timeout=600
    max_connections=350
    log-bin=mysql-bin
    binlog-format = 'ROW'
    

    最终结果如下

    [root@master1 tools]# vim /etc/my.cnf 
    [root@master1 tools]# cat /etc/my.cnf 
    [mysqld]
    datadir=/var/lib/mysql
    socket=/var/lib/mysql/mysql.sock
    user=mysql
    # Disabling symbolic-links is recommended to prevent assorted security risks
    symbolic-links=0
    innodb_rollback_on_timeout=1
    innodb_lock_wait_timeout=600
    max_connections=350
    log-bin=mysql-bin
    binlog-format = 'ROW'
    
    [mysqld_safe]
    log-error=/var/log/mysqld.log
    pid-file=/var/run/mysqld/mysqld.pid
    [root@master1 tools]# 
    

    启动mysql服务并设置开机启动

    [root@master1 tools]# service mysqld start
    Initializing MySQL database:  Installing MySQL system tables...
    OK
    Filling help tables...
    OK
    
    To start mysqld at boot time you have to copy
    support-files/mysql.server to the right place for your system
    
    PLEASE REMEMBER TO SET A PASSWORD FOR THE MySQL root USER !
    To do so, start the server, then issue the following commands:
    
    /usr/bin/mysqladmin -u root password 'new-password'
    /usr/bin/mysqladmin -u root -h master1 password 'new-password'
    
    Alternatively you can run:
    /usr/bin/mysql_secure_installation
    
    which will also give you the option of removing the test
    databases and anonymous user created by default.  This is
    strongly recommended for production servers.
    
    See the manual for more instructions.
    
    You can start the MySQL daemon with:
    cd /usr ; /usr/bin/mysqld_safe &
    
    You can test the MySQL daemon with mysql-test-run.pl
    cd /usr/mysql-test ; perl mysql-test-run.pl
    
    Please report any problems with the /usr/bin/mysqlbug script!
    
                                                               [  OK  ]
    Starting mysqld:                                           [  OK  ]
    [root@master1 tools]# chkconfig mysqld on
    [root@master1 tools]# 
    [root@master1 tools]# ls /var/lib/mysql/
    ibdata1  ib_logfile0  ib_logfile1  mysql  mysql.sock  test
    [root@master1 tools]# 
    

    给mysql设置密码,第一个是localhost的,第二个是可以远程登录的

    [root@master1 tools]# /usr/bin/mysqladmin -u root password '123456'
    [root@master1 tools]# mysql -uroot -p123456 -e "grant all on *.* to root@'%'  identified by '123456';"
    [root@master1 tools]# 
    


    master端初始化Cloudstack的数据库
    这个命令其实是导入数据到mysql库(在master上操作),执行脚本创建库和表

    [root@master1 tools]# cloudstack-setup-databases cloud:123456@localhost --deploy-as=root:123456
    Mysql user name:cloud                                                           [ OK ]
    Mysql user password:******                                                      [ OK ]
    Mysql server ip:localhost                                                       [ OK ]
    Mysql server port:3306                                                          [ OK ]
    Mysql root user name:root                                                       [ OK ]
    Mysql root user password:******                                                 [ OK ]
    Checking Cloud database files ...                                               [ OK ]
    Checking local machine hostname ...                                             [ OK ]
    Checking SELinux setup ...                                                      [ OK ]
    Detected local IP address as 192.168.145.151, will use as cluster management server node IP[ OK ]
    Preparing /etc/cloudstack/management/db.properties                              [ OK ]
    Applying /usr/share/cloudstack-management/setup/create-database.sql             [ OK ]
    Applying /usr/share/cloudstack-management/setup/create-schema.sql               [ OK ]
    Applying /usr/share/cloudstack-management/setup/create-database-premium.sql     [ OK ]
    Applying /usr/share/cloudstack-management/setup/create-schema-premium.sql       [ OK ]
    Applying /usr/share/cloudstack-management/setup/server-setup.sql                [ OK ]
    Applying /usr/share/cloudstack-management/setup/templates.sql                   [ OK ]
    Processing encryption ...                                                       [ OK ]
    Finalizing setup ...                                                            [ OK ]
    
    CloudStack has successfully initialized database, you can check your database configuration in /etc/cloudstack/management/db.properties
    
    [root@master1 tools]# 
    

    初始化完毕
    下面文件是初始化之后,自动改动的。我们可以查看下,里面的东西不需要改动了

    [root@master1 tools]# vim /etc/cloudstack/management/db.properties
    [root@master1 tools]# 
    

    启动master,输入cl按tab键可以看到很多命令

    [root@master1 tools]# cl
    clean-binary-files                    cloudstack-set-guest-sshkey
    clear                                 cloudstack-setup-databases
    clock                                 cloudstack-setup-encryption
    clockdiff                             cloudstack-setup-management
    cloudstack-external-ipallocator.py    cloudstack-sysvmadm
    cloudstack-migrate-databases          cloudstack-update-xenserver-licenses
    cloudstack-sccs                       cls
    cloudstack-set-guest-password         
    [root@master1 tools]# cloudstack-setup-management 
    Starting to configure CloudStack Management Server:
    Configure Firewall ...        [OK]
    Configure CloudStack Management Server ...[OK]
    CloudStack Management Server setup is Done!
    [root@master1 tools]#
    

    你的master防火墙配置好了,这个启动它会再起来,加入一些自己的端口,比如下面。多了9090,8250,8080端口 

    [root@master1 tools]# head -10  /etc/sysconfig/iptables
    # Generated by iptables-save v1.4.7 on Sat Feb 11 20:07:43 2017
    *filter
    :INPUT ACCEPT [0:0]
    :FORWARD ACCEPT [0:0]
    :OUTPUT ACCEPT [0:0]
    -A INPUT -p tcp -m tcp --dport 9090 -j ACCEPT 
    -A INPUT -p tcp -m tcp --dport 8250 -j ACCEPT 
    -A INPUT -p tcp -m tcp --dport 8080 -j ACCEPT 
    -A INPUT -m state --state RELATED,ESTABLISHED -j ACCEPT 
    -A INPUT -p icmp -j ACCEPT 
    [root@master1 tools]# 
    

    下面是它的日志,因为它底层是tomcat,日志是一致的
    master最好是16GB内存。提供足够内存给jvm,这样服务启动才快。

    [root@master1 tools]# tail -f /var/log/cloudstack/management/catalina.out 
    INFO  [o.a.c.f.j.i.AsyncJobManagerImpl] (AsyncJobMgr-Heartbeat-1:ctx-2f0e1bd5) (logid:4db872cc) Begin cleanup expired async-jobs
    INFO  [o.a.c.f.j.i.AsyncJobManagerImpl] (AsyncJobMgr-Heartbeat-1:ctx-2f0e1bd5) (logid:4db872cc) End cleanup expired async-jobs
    INFO  [o.a.c.f.j.i.AsyncJobManagerImpl] (AsyncJobMgr-Heartbeat-1:ctx-fc8583d4) (logid:8724f870) Begin cleanup expired async-jobs
    INFO  [o.a.c.f.j.i.AsyncJobManagerImpl] (AsyncJobMgr-Heartbeat-1:ctx-fc8583d4) (logid:8724f870) End cleanup expired async-jobs
    

    IE浏览器打开下面地址
    http://192.168.145.151:8080/client/
    打开管理页面,说明master端安装完毕

    导入系统虚拟机镜像

    CloudStack通过一系列系统虚拟机提供功能,如访问虚拟机控制台,如提供各类
    网络服务,以及管理辅助存储中的各类资源。
    下面是导入系统虚拟机模板,并把这些模板部署于刚才创建的辅助存储中:管理服务器
    包含一个脚本可以正确的操作这些系统虚拟机模板
    先找到虚拟机模板路径

    [root@master1 tools]# ls /tools/systemvm64template-4.6.0-kvm.qcow2.bz2 
    /tools/systemvm64template-4.6.0-kvm.qcow2.bz2
    

    master上执行下面命令

    /usr/share/cloudstack-common/scripts/storage/secondary/cloud-install-sys-tmplt 
    -m /export/secondary 
    -f /tools/systemvm64template-4.6.0-kvm.qcow2.bz2 
    -h kvm -F
    

    这个步骤的作用就是把虚拟机模板导入到二级存储,执行过程如下

    [root@master1 tools]# /usr/share/cloudstack-common/scripts/storage/secondary/cloud-install-sys-tmplt 
    > -m /export/secondary 
    > -f /tools/systemvm64template-4.6.0-kvm.qcow2.bz2 
    > -h kvm -F
    Uncompressing to /usr/share/cloudstack-common/scripts/storage/secondary/0cc65968-4ff3-4b4c-b31e-7f1cf5d1959b.qcow2.tmp (type bz2)...could take a long time
    Moving to /export/secondary/template/tmpl/1/3///0cc65968-4ff3-4b4c-b31e-7f1cf5d1959b.qcow2...could take a while
    Successfully installed system VM template /tools/systemvm64template-4.6.0-kvm.qcow2.bz2 to /export/secondary/template/tmpl/1/3/
    [root@master1 tools]# 
    

    导入成功后,模板会在这里,一个模板和一个模板配置文件

    [root@master1 tools]# cd /export/secondary/
    [root@master1 secondary]# ls
    lost+found  template
    [root@master1 secondary]# cd template/tmpl/1/3/
    [root@master1 3]# ls
    0cc65968-4ff3-4b4c-b31e-7f1cf5d1959b.qcow2  template.properties
    [root@master1 3]# ls
    0cc65968-4ff3-4b4c-b31e-7f1cf5d1959b.qcow2  template.properties
    [root@master1 3]# pwd
    /export/secondary/template/tmpl/1/3
    [root@master1 3]# 
    

    这个是模板的配置,不用修改

    [root@master1 3]# cat template.properties 
    filename=0cc65968-4ff3-4b4c-b31e-7f1cf5d1959b.qcow2
    description=SystemVM Template
    checksum=
    hvm=false
    size=322954240
    qcow2=true
    id=3
    public=true
    qcow2.filename=0cc65968-4ff3-4b4c-b31e-7f1cf5d1959b.qcow2
    uniquename=routing-3
    qcow2.virtualsize=322954240
    virtualsize=322954240
    qcow2.size=322954240
    [root@master1 3]# 
    

      

    agent安装Cloudstack包
    agent端如下操作

    [root@agent1 tools]# yum install cloudstack-common-4.8.0-1.el6.x86_64.rpm cloudstack-agent-4.8.0-1.el6.x86_64.rpm -y
    

    它会依赖qemu-kvm,libvirt和glusterfs 这些包都会自动安装上
    glusterfs已经默认作为kvm的后端存储了

    [root@agent1 ~]# rpm -qa | egrep "cloudstack|gluster|kvm|libvirt"
    glusterfs-client-xlators-3.7.5-19.el6.x86_64
    cloudstack-common-4.8.0-1.el6.x86_64
    cloudstack-agent-4.8.0-1.el6.x86_64
    glusterfs-libs-3.7.5-19.el6.x86_64
    glusterfs-3.7.5-19.el6.x86_64
    glusterfs-api-3.7.5-19.el6.x86_64
    libvirt-python-0.10.2-60.el6.x86_64
    libvirt-0.10.2-60.el6.x86_64
    libvirt-client-0.10.2-60.el6.x86_64
    qemu-kvm-0.12.1.2-2.491.el6_8.3.x86_64
    [root@agent1 ~]# 
    

    Agent端虚拟化配置

    配置KVM
    KVM中我们有两部分需要进行配置,libvirt和qemu

    配置qemu
    KVM的配置相对简单,只需要配置一项,编辑/etc/libvirt/qemu.conf
    取消vnc_listen=0.0.0.0的注释,同时注释掉security_driver="none"
    由于security_driver默认是如下注释状态。#security_driver = "selinux"
    这里就不改它了。只取消vnc_listen=0.0.0.0的注释
    (有人说加入主机会自动修改这些参数。我未验证,直接手动修改)

    配置Libvirt

    Cloudstack通过调用libvirt管理虚拟机。
    为了实现动态迁移,libvirt需要监听使用非加密的TCP连接。还需要关闭尝试
    使用组播DNS进行广播。这些都在/etc/libvirt/libvirtd.conf文件中进行配置
    (有人说加入主机会自动修改这些参数。我未验证,直接手动修改)
    设置下列参数
    这些参数它是让我们取消注释,我们直接加入到最后就行了(agent上操作)

    listen_tls = 0
    listen_tcp = 1
    tcp_port = "16059"
    auth_tcp = "none"
    mdns_adv = 0
    

    修改命令如下 

    cat>>/etc/libvirt/libvirtd.conf<<EOF
    listen_tls = 0
    listen_tcp = 1
    tcp_port = "16059"
    auth_tcp = "none"
    mdns_adv = 0
    EOF
    

    操作过程如下

    [root@agent1 tools]# cat>>/etc/libvirt/libvirtd.conf<<EOF
    > listen_tls = 0
    > listen_tcp = 1
    > tcp_port = "16059"
    > auth_tcp = "none"
    > mdns_adv = 0
    > EOF
    [root@agent1 tools]# tail -5 /etc/libvirt/libvirtd.conf
    listen_tls = 0
    listen_tcp = 1
    tcp_port = "16059"
    auth_tcp = "none"
    mdns_adv = 0
    [root@agent1 tools]# 
    

    还要取消下面文件中注释
    /etc/sysconfig/libvirtd
    #LIBVIRTD_ARGS="--listen"
    文档上是取消注释,我们这里改成-l 注意这里是listen的l,是字母,不是数字1
    LIBVIRTD_ARGS="-1"

    (有人说加入主机会自动修改这些参数。我未验证,直接手动修改)
    查看检验

    [root@agent1 tools]# grep LIBVIRTD_ARGS /etc/sysconfig/libvirtd
    # in LIBVIRTD_ARGS instead.
    LIBVIRTD_ARGS="-1"
    [root@agent1 tools]# 
    

    重启libvirt服务,查看kvm模块是否有加载

    [root@agent1 tools]# /etc/init.d/libvirtd restart
    Stopping libvirtd daemon:                                  [  OK  ]
    Starting libvirtd daemon:                                  [  OK  ]
    [root@agent1 tools]# lsmod | grep kvm
    kvm_intel              55496  0 
    kvm                   337772  1 kvm_intel
    [root@agent1 tools]# 
    

    KVM部分配置完成 

    用户界面操作



    默认密码是admin/password
    用户界面语言可以选择简体中文
    IE浏览器打开下面地址
    http://192.168.145.151:8080/client/

    CloudStack提供一个基于Web的UI,管理员和终端用户能够使用这个界面。用户界面版本

    依赖于登录时使用的凭证不同而不同。用户界面是适用于大多数流行的浏览器。包括IE7
    IE8,IE9,Firefox,chrome等
    登录地址如下
    http://management-server-ip:8080/client/

     

    admin/password
    可以选择语言

     

    登录进去。显示如下

     

    用户界面的配置
    网页上点击跳过此步骤

     

    点击右边的添加资源域

     

    选择基本网络

     

    dns这里我们都可以写成公共的dns服务器(我们没自己搭建dns)
    我本地局域网友dns,这里就写成了本地dns服务器
    网络就选择默认的第一个就行了

     

    下一步

     

    这里就输入eth0
    (高级玩法里面还分存储网络,管理网络和来宾网络)

     

    编辑下面的管理和来宾,标签这里都写成cloudbr0

     

    点击下一步
    网关写上面的这些信息
    预留的IP这里,是给系统虚拟机和宿主机用的

     

    这个是给普通kvm使用的,创建的客户kvm使用的

     

    集群名称自定义

     

    这里的主机就是agent的那个
    root/root01

     

    主存储这里支持很多协议。RBD和Gluster自己可以百度下作用
    RBD是safe
    nfs的话,机器挂了。所有kvm都挂了
    如果用了glusterfs挂了。可以使用其它节点起来。(复制卷)
    我们这里设置为自己的共享挂载点/export/primary

     

    最终填写结果如下,下一步

     

    有时间可以百度下,下面东西都是什么
    s3是亚马逊aws的云存储

     

    二级存储这里最终的选择如下

    点击启动
     

    初始化过程比较慢

     

    可以先点否,手动启用资源域

     

    启动资源域

    启动资源域后系统会创建两个虚拟机
    console proxy vm是你虚拟机的vnc的代理服务的机器
    secondary storage vm是你虚拟机镜像的机器,通过它取到镜像

     

    启动资源域之后
    登录agent上查看2个系统虚拟机

    [root@agent1 cloudstack]# virsh list --all
     Id    Name                           State
    ----------------------------------------------------
     1     s-1-VM                         running
     2     v-2-VM                         running
    
    [root@agent1 cloudstack]# 
    

    宿主机多了很多vnet

    [root@agent1 cloudstack]# ip a
    1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN 
        link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
        inet 127.0.0.1/8 scope host lo
        inet6 ::1/128 scope host 
           valid_lft forever preferred_lft forever
    2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000
        link/ether 00:0c:29:ab:d5:a9 brd ff:ff:ff:ff:ff:ff
        inet6 fe80::20c:29ff:feab:d5a9/64 scope link 
           valid_lft forever preferred_lft forever
    3: virbr0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UNKNOWN 
        link/ether 52:54:00:ea:87:7d brd ff:ff:ff:ff:ff:ff
        inet 192.168.122.1/24 brd 192.168.122.255 scope global virbr0
    4: virbr0-nic: <BROADCAST,MULTICAST> mtu 1500 qdisc noop state DOWN qlen 500
        link/ether 52:54:00:ea:87:7d brd ff:ff:ff:ff:ff:ff
    8: cloudbr0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UNKNOWN 
        link/ether 00:0c:29:ab:d5:a9 brd ff:ff:ff:ff:ff:ff
        inet 192.168.145.152/24 brd 192.168.145.255 scope global cloudbr0
        inet6 fe80::20c:29ff:feab:d5a9/64 scope link 
           valid_lft forever preferred_lft forever
    10: cloud0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UNKNOWN 
        link/ether fe:00:a9:fe:00:64 brd ff:ff:ff:ff:ff:ff
        inet 169.254.0.1/16 scope global cloud0
        inet6 fe80::f810:caff:fe2d:6be3/64 scope link 
           valid_lft forever preferred_lft forever
    11: vnet0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UNKNOWN qlen 500
        link/ether fe:00:a9:fe:00:64 brd ff:ff:ff:ff:ff:ff
        inet6 fe80::fc00:a9ff:fefe:64/64 scope link 
           valid_lft forever preferred_lft forever
    12: vnet1: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UNKNOWN qlen 500
        link/ether fe:db:22:00:00:07 brd ff:ff:ff:ff:ff:ff
        inet6 fe80::fcdb:22ff:fe00:7/64 scope link 
           valid_lft forever preferred_lft forever
    13: vnet2: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UNKNOWN qlen 500
        link/ether fe:20:e4:00:00:13 brd ff:ff:ff:ff:ff:ff
        inet6 fe80::fc20:e4ff:fe00:13/64 scope link 
           valid_lft forever preferred_lft forever
    14: vnet3: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UNKNOWN qlen 500
        link/ether fe:00:a9:fe:00:6e brd ff:ff:ff:ff:ff:ff
        inet6 fe80::fc00:a9ff:fefe:6e/64 scope link 
           valid_lft forever preferred_lft forever
    15: vnet4: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UNKNOWN qlen 500
        link/ether fe:92:68:00:00:08 brd ff:ff:ff:ff:ff:ff
        inet6 fe80::fc92:68ff:fe00:8/64 scope link 
           valid_lft forever preferred_lft forever
    16: vnet5: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UNKNOWN qlen 500
        link/ether fe:84:42:00:00:0f brd ff:ff:ff:ff:ff:ff
        inet6 fe80::fc84:42ff:fe00:f/64 scope link 
           valid_lft forever preferred_lft forever
    [root@agent1 cloudstack]# 
    

    同时发现eth0的地址配到了cloudbr0上

    [root@agent1 ~]# cat /etc/sysconfig/network-scripts/ifcfg- 
    ifcfg-cloudbr0  ifcfg-eth0      ifcfg-lo        
    [root@agent1 ~]# cat /etc/sysconfig/network-scripts/ifcfg-cloudbr0 
    
    DEVICE=cloudbr0
    
    TYPE=Bridge
    
    ONBOOT=yes
    BOOTPROTO=static
    IPADDR=192.168.145.152
    NETMASK=255.255.255.0
    GATEWAY=192.168.145.2
    DNS1=10.0.1.11
    NM_CONTROLLED=no
    [root@agent1 ~]# cat /etc/sysconfig/network-scripts/ifcfg-eth0 
    DEVICE=eth0
    TYPE=Ethernet
    
    ONBOOT=yes
    BOOTPROTO=static
    IPADDR=192.168.145.152
    NETMASK=255.255.255.0
    GATEWAY=192.168.145.2
    DNS1=10.0.1.11
    NM_CONTROLLED=no
    BRIDGE=cloudbr0
    [root@agent1 ~]# ifconfig eth0
    eth0      Link encap:Ethernet  HWaddr 00:0C:29:AB:D5:A9  
              inet6 addr: fe80::20c:29ff:feab:d5a9/64 Scope:Link
              UP BROADCAST RUNNING MULTICAST  MTU:1500  Metric:1
              RX packets:9577 errors:0 dropped:0 overruns:0 frame:0
              TX packets:5449 errors:0 dropped:0 overruns:0 carrier:0
              collisions:0 txqueuelen:1000 
              RX bytes:1804345 (1.7 MiB)  TX bytes:646033 (630.8 KiB)
    
    [root@agent1 ~]# ifconfig cloudbr0
    cloudbr0  Link encap:Ethernet  HWaddr 00:0C:29:AB:D5:A9  
              inet addr:192.168.145.152  Bcast:192.168.145.255  Mask:255.255.255.0
              inet6 addr: fe80::20c:29ff:feab:d5a9/64 Scope:Link
              UP BROADCAST RUNNING MULTICAST  MTU:1500  Metric:1
              RX packets:2652 errors:0 dropped:0 overruns:0 frame:0
              TX packets:1343 errors:0 dropped:0 overruns:0 carrier:0
              collisions:0 txqueuelen:0 
              RX bytes:277249 (270.7 KiB)  TX bytes:190930 (186.4 KiB)
    
    [root@agent1 ~]# 
    

    系统VM是不同于主机上创建的普通虚拟机,它们是CloudStack云平台自带的用于完成自身的一些任务的虚拟机

    1、Seondary Storage VM:简称SSVM,用于管理二级存储的相关操作,如模板根镜像文件的
    上传与下载,快照,volumes的存放,第一次创建虚拟机时从二级存储拷贝模板到一级存储并且
    自动创建快照,每一个资源域可以有多个SSVM,当SSVM被删除或停止,它会自动被重建并启动
    2、ConsolePorxy VM:用于web界面展示控制台
    3、虚拟路由器:将会在第一个实例启动后自动创建

    两台虚拟机可以查看控制台

    默认的用户名和密码root/password

    167是pod网络,179是来宾网络

    下面是另外一台机器的网络情况

    系统VM是不同于主机上创建的普通虚拟机,它们是CloudStack云平台自带的用于
    完成自身的一些任务的虚拟机
    Seondary Storage VM:简称SSVM,用于管理二级存储的相关操作,如模板根镜像文件的
    上传与下载,快照,volumes的存放,第一次创建虚拟机时从二级存储拷贝模板到一级存储并且
    自动创建快照,每一个资源域可以有多个SSVM,当SSVM被删除或停止,它会自动被重建并启动
     
    ConsolePorxy VM:用于web界面展示控制台
     
    虚拟路由器将会在第一个实例启动后自动创建
    模板这里有个centos5.5,目前是无法使用的

     

    要把它打开

     

    我们允许都可以访问

    提示重启服务才生效,我们重启,它会从网上下载5.5的镜像源
    [root@master1 3]# /etc/init.d/cloudstack-management restart
    Stopping cloudstack-management:                            [FAILED]
    Starting cloudstack-management:                            [  OK  ]
    [root@master1 3]# 
    [root@master1 3]# 
    [root@master1 3]# /etc/init.d/cloudstack-management restart
    Stopping cloudstack-management:                            [  OK  ]
    Starting cloudstack-management:                            [  OK  ]
    [root@master1 3]# 
    

    以上修改,它就自动下载自带的模板了

     

    模板可以通过本地上传,也可以添加
    注意本地上传暂时有bug。

     

    重新登录网页

     

    对于一些情况,资源域资源告警时可能无法创建虚拟机。我们可以调整参数,让其超配

    改成3

    mem.overprovisioning.factor
    内存超配倍数,内存可用量=内存总量*超配倍数;类型:整数;默认:1(不超配)

    下面两个网站关于全局设置的意思可以看看
    http://www.chinacloudly.com/cloudstack%E5%85%A8%E5%B1%80%E9%85%8D%E7%BD%AE%E5%8F%82%E6%95%B0/
    http://blog.csdn.net/u011650565/article/details/41945433

    重启management服务,登录这里没变

     

    然后点击基础架构---集群----cluster--设置

    找到下面,

    也改成3.0

    告警设置

    允许的利用率,下面的0.85可以改成0.99,因为达到0.85的情况下不允许创建虚拟机了

     

    这里就变了,我的虚拟机没8GB,这里就是超配了

     

    制作模板和创建自定义虚拟机



    CloudStack模板支持两种模式
    1、通过kvm制作的qcow2或者raw文件
    2、直接上传iso文件作为模板文件

    由于国内nginx比较流行,我们这里使用nginx搭建镜像源

    [root@master1 ~]# yum install nginx -y
    

    防火墙我们前期做了,其实也可以把它关闭了
    实际环境中,最好另外搭建一台nginx服务器,尽量减轻master的压力

    [root@master1 ~]# /etc/init.d/iptables stop
    

      

    启动nginx

    [root@master1 ~]# /etc/init.d/nginx   start
    Starting nginx:                                            [  OK  ]
    [root@master1 ~]# 
    

    浏览器输入master的地址,也就是nginx安装的服务器的地址
    http://192.168.145.151/

    编辑配置文件,让它成为目录服务器

       这个access_log下面添加
        access_log  /var/log/nginx/access.log  main;
        autoindex on;  #显示目录
        autoindex_exact_size  on;  #显示文件大小
        autoindex_localtime on;   #显示文件时间
    

    确认下,可以把汉字删除,防止一些可能的报错

    [root@master1 ~]# sed -n  '23,26p' /etc/nginx/nginx.conf
        access_log  /var/log/nginx/access.log  main;
        autoindex on;
        autoindex_exact_size  on;
        autoindex_localtime on;
    [root@master1 ~]# 
    

    检查语法,重启nginx  

    [root@master1 ~]# /etc/init.d/nginx configtest
    nginx: the configuration file /etc/nginx/nginx.conf syntax is ok
    nginx: configuration file /etc/nginx/nginx.conf test is successful
    [root@master1 ~]# /etc/init.d/nginx restart
    Stopping nginx:                                            [  OK  ]
    Starting nginx:                                            [  OK  ]
    [root@master1 ~]# 
    

      

    到/usr/share/nginx/html 目录下,删除所有文件,
    上传iso文件到/usr/share/nginx/html 下面
    这里我们上传CentOS-6.5-x86_64-minimal.iso

    [root@master1 tools]# cd /usr/share/nginx/html/
    [root@master1 html]# ls
    404.html  50x.html  index.html  nginx-logo.png  poweredby.png
    [root@master1 html]# rm -rf *
    [root@master1 html]# mv /tools/CentOS-6.5-x86_64-minimal.iso .
    [root@master1 html]# ls
    CentOS-6.5-x86_64-minimal.iso
    [root@master1 html]# 
    


    再次刷新nginx首页
    http://192.168.145.151/

    网页操作,添加iso

    它会自动从服务器上下载
    自己下载到自己上,肯定速度很快

     
     
    创建自定义虚拟机实例

     

    这里可以选择20GB

    关联性默认

    网络默认

    名称这里如果你不写,它会自动生成64位的一个uuid

     

    点击启动
     
    创建中,第一次创建比较慢,它要从二级存储,也就是nfs服务器拿到主存储上,所以比较慢
    创建第二个虚拟机的时候比较快

     

    打开实例的控制台

     

    选最后一个

     

    root01

    virtio已经是页面标识了,红帽现在全力支持kvm了
    c6已经集成到内核了

    安装过程

     

    点击reboot之后,这里取消附件iso,防止重复安装
    机器装好之后,reboot,同时尽快取消iso,否则又从iso创建了

     

     设置网卡onboot=yes

    安装新的实例后,虚拟路由器这里也变了

     

    生产中,一般一个集群是8-16台或者24台主机
    2个机柜的服务器,这么划分比较合理
    超过24台你可以添加另一个集群,cluster2

    agent机器上输入ip a
    看到cloudbr0就是我们创建的网桥
    vnet0就是虚拟设备
    虚拟机连接到vnet设备,vnet又接到网桥上

    [root@agent1 cloudstack]# ip a
    1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN 
        link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
        inet 127.0.0.1/8 scope host lo
        inet6 ::1/128 scope host 
           valid_lft forever preferred_lft forever
    2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000
        link/ether 00:0c:29:ab:d5:a9 brd ff:ff:ff:ff:ff:ff
        inet6 fe80::20c:29ff:feab:d5a9/64 scope link 
           valid_lft forever preferred_lft forever
    3: virbr0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UNKNOWN 
        link/ether 52:54:00:ea:87:7d brd ff:ff:ff:ff:ff:ff
        inet 192.168.122.1/24 brd 192.168.122.255 scope global virbr0
    4: virbr0-nic: <BROADCAST,MULTICAST> mtu 1500 qdisc noop state DOWN qlen 500
        link/ether 52:54:00:ea:87:7d brd ff:ff:ff:ff:ff:ff
    8: cloudbr0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UNKNOWN 
        link/ether 00:0c:29:ab:d5:a9 brd ff:ff:ff:ff:ff:ff
        inet 192.168.145.152/24 brd 192.168.145.255 scope global cloudbr0
        inet6 fe80::20c:29ff:feab:d5a9/64 scope link 
           valid_lft forever preferred_lft forever
    10: cloud0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UNKNOWN 
        link/ether fe:00:a9:fe:00:64 brd ff:ff:ff:ff:ff:ff
        inet 169.254.0.1/16 scope global cloud0
        inet6 fe80::f810:caff:fe2d:6be3/64 scope link 
           valid_lft forever preferred_lft forever
    11: vnet0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UNKNOWN qlen 500
        link/ether fe:00:a9:fe:00:64 brd ff:ff:ff:ff:ff:ff
        inet6 fe80::fc00:a9ff:fefe:64/64 scope link 
           valid_lft forever preferred_lft forever
    12: vnet1: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UNKNOWN qlen 500
        link/ether fe:db:22:00:00:07 brd ff:ff:ff:ff:ff:ff
        inet6 fe80::fcdb:22ff:fe00:7/64 scope link 
           valid_lft forever preferred_lft forever
    13: vnet2: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UNKNOWN qlen 500
        link/ether fe:20:e4:00:00:13 brd ff:ff:ff:ff:ff:ff
        inet6 fe80::fc20:e4ff:fe00:13/64 scope link 
           valid_lft forever preferred_lft forever
    14: vnet3: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UNKNOWN qlen 500
        link/ether fe:00:a9:fe:00:6e brd ff:ff:ff:ff:ff:ff
        inet6 fe80::fc00:a9ff:fefe:6e/64 scope link 
           valid_lft forever preferred_lft forever
    15: vnet4: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UNKNOWN qlen 500
        link/ether fe:92:68:00:00:08 brd ff:ff:ff:ff:ff:ff
        inet6 fe80::fc92:68ff:fe00:8/64 scope link 
           valid_lft forever preferred_lft forever
    16: vnet5: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UNKNOWN qlen 500
        link/ether fe:84:42:00:00:0f brd ff:ff:ff:ff:ff:ff
        inet6 fe80::fc84:42ff:fe00:f/64 scope link 
           valid_lft forever preferred_lft forever
    17: vnet6: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UNKNOWN qlen 500
        link/ether fe:ba:5c:00:00:12 brd ff:ff:ff:ff:ff:ff
        inet6 fe80::fcba:5cff:fe00:12/64 scope link 
           valid_lft forever preferred_lft forever
    18: vnet7: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UNKNOWN qlen 500
        link/ether fe:00:a9:fe:01:4d brd ff:ff:ff:ff:ff:ff
        inet6 fe80::fc00:a9ff:fefe:14d/64 scope link 
           valid_lft forever preferred_lft forever
    19: vnet8: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UNKNOWN qlen 500
        link/ether fe:b1:ec:00:00:10 brd ff:ff:ff:ff:ff:ff
        inet6 fe80::fcb1:ecff:fe00:10/64 scope link 
           valid_lft forever preferred_lft forever
    [root@agent1 cloudstack]# 
    

    网络流量走向:虚拟机到vnet---cloudbr0---eth0
    cloudbr0桥接了很多设备

    [root@agent1 cloudstack]# brctl show
    bridge name	bridge id		STP enabled	interfaces
    cloud0		8000.fe00a9fe0064	no		vnet0
    							vnet3
    							vnet7
    cloudbr0		8000.000c29abd5a9	no		eth0
    							vnet1
    							vnet2
    							vnet4
    							vnet5
    							vnet6
    							vnet8
    virbr0		8000.525400ea877d	yes		virbr0-nic
    [root@agent1 cloudstack]# 
    

    master到那个虚拟机网络不通

    [root@master1 html]# ping 192.168.145.176
    PING 192.168.145.176 (192.168.145.176) 56(84) bytes of data.
    ^C
    --- 192.168.145.176 ping statistics ---
    23 packets transmitted, 0 received, 100% packet loss, time 22542ms
    
    [root@master1 html]# 
    

      

    添加下面安全组

    打开icmp

    放开全部的tcp

    出口规则也全部打开

    修改安全组之后通了 

    [root@master1 html]# ping 192.168.145.176
    PING 192.168.145.176 (192.168.145.176) 56(84) bytes of data.
    64 bytes from 192.168.145.176: icmp_seq=1 ttl=64 time=3.60 ms
    64 bytes from 192.168.145.176: icmp_seq=2 ttl=64 time=1.88 ms
    64 bytes from 192.168.145.176: icmp_seq=3 ttl=64 time=1.46 ms
    ^C
    --- 192.168.145.176 ping statistics ---
    3 packets transmitted, 3 received, 0% packet loss, time 2087ms
    rtt min/avg/max/mdev = 1.463/2.316/3.605/0.927 ms
    [root@master1 html]# 
    

    搭建自己的私有云,安全组这里是全放开,和刚才设置类似,
    再加个udp的全放开

     登录到6.5的实例上,配置dns

    [root@localhost ~]# cat /etc/sysconfig/network-scripts/ifcfg-eth0 
    DEVICE=eth0
    HWADDR=06:B1:EC:00:00:10
    TYPE=Ethernet
    UUID=5f46c5e2-5ac6-4bb9-b21d-fed7f49e7475
    ONBOOT=yes
    NM_CONTROLLED=yes
    BOOTPROTO=dhcp
    DNS1=10.0.1.11
    [root@localhost ~]# /etc/init.d/network restart
    [root@localhost ~]# ping www.baidu.com
    PING www.a.shifen.com (115.239.211.112) 56(84) bytes of data.
    64 bytes from 115.239.211.112: icmp_seq=1 ttl=128 time=4.10 ms
    ^C
    --- www.a.shifen.com ping statistics ---
    1 packets transmitted, 1 received, 0% packet loss, time 815ms
    rtt min/avg/max/mdev = 4.107/4.107/4.107/0.000 ms
    [root@localhost ~]# 
    

    6.5的实例上安装openssh  

    [root@localhost ~]# yum install -y openssh
    

      

    回顾下,查看下二级存储的东西

    [root@master1 html]# cd /export/secondary/
    [root@master1 secondary]# ls
    lost+found  snapshots  template  volumes
    [root@master1 secondary]# cd snapshots/
    [root@master1 snapshots]# ls
    [root@master1 snapshots]# cd ..
    [root@master1 secondary]# cd template/
    [root@master1 template]# ls
    tmpl
    [root@master1 template]# cd tmpl/2/201/
    [root@master1 201]# ls
    201-2-c27db1c6-f780-35c3-9c63-36a5330df298.iso  template.properties
    [root@master1 201]# cat template.properties 
    #
    #Sat Feb 11 15:18:59 UTC 2017
    filename=201-2-c27db1c6-f780-35c3-9c63-36a5330df298.iso
    id=201
    public=true
    iso.filename=201-2-c27db1c6-f780-35c3-9c63-36a5330df298.iso
    uniquename=201-2-c27db1c6-f780-35c3-9c63-36a5330df298
    virtualsize=417333248
    checksum=0d9dc37b5dd4befa1c440d2174e88a87
    iso.size=417333248
    iso.virtualsize=417333248
    hvm=true
    description=centos6.5
    iso=true
    size=417333248
    [root@master1 201]#
    

      

     agent上查看

    [root@agent1 cloudstack]# cd /export/primary/
    [root@agent1 primary]# ls
    0cc65968-4ff3-4b4c-b31e-7f1cf5d1959b  cf3dac7a-a071-4def-83aa-555b5611fb02
    1685f81b-9ac9-4b21-981a-f1b01006c9ef  f3521c3d-fca3-4527-984d-5ff208e05b5c
    99643b7d-aaf4-4c75-b7d6-832c060e9b77  lost+found
    

    这里面有两个系统创建的虚拟机 ,加上自己创建的虚拟机,还有一个虚拟路由

    [root@agent1 primary]# virsh list --all
     Id    Name                           State
    ----------------------------------------------------
     1     s-1-VM                         running
     2     v-2-VM                         running
     3     r-4-VM                         running
     4     i-2-3-VM                       running
    
    [root@agent1 primary]# 
    

      

  • 相关阅读:
    Docker--简介&&安装
    Mycat
    Mysql--主从复制
    Nginx--平滑升级
    Nginx--rewrite
    Nginx--缓存
    Mysql--SQL语句
    Nginx--虚拟主机
    Nginx--反向代理&&负载均衡
    Nginx--用户认证&&访问控制&&限速&&状态访问
  • 原文地址:https://www.cnblogs.com/nmap/p/6390780.html
Copyright © 2011-2022 走看看