zoukankan      html  css  js  c++  java
  • 一键部署Kubernetes高可用集群

    三台master,四台node,系统版本为CentOS7

    IP ROLE
    172.60.0.226 master01
    172.60.0.86 master02
    172.60.0.106 master03
    172.60.0.227 node01
    172.60.0.228 node02
    172.60.0.44 node03
    172.60.0.46 node04

    这里安装的kubernetes版本为1.5.1,docker版本为1.12.3

    三个master节点通过keepalived实现高可用。结构如下图,参考官网。

    以下是脚本内容:

    # vim k8s-deploy.sh
     
    #!/bin/bash
    set -x
    set -e
     
    HTTP_SERVER=172.60.0.43:8000
    KUBE_HA=true
     
    KUBE_REPO_PREFIX=gcr.io/google_containers
    KUBE_ETCD_IMAGE=quay.io/coreos/etcd:v3.0.15
     
    root=$(id -u)
    if [ "$root" -ne 0 ] ;then
        echo must run as root
        exit 1
    fi
     
    kube::install_docker()
    {
        set +e
        docker info> /dev/null 2>&1
        i=$?
        set -e
        if [ $i -ne 0 ]; then
            curl -L http://$HTTP_SERVER/rpms/docker.tar.gz > /tmp/docker.tar.gz
            tar zxf /tmp/docker.tar.gz -C /tmp
            yum localinstall -y /tmp/docker/*.rpm
            systemctl enable docker.service && systemctl start docker.service
            kube::config_docker
        fi
        echo docker has been installed
        rm -rf /tmp/docker /tmp/docker.tar.gz
    }
     
    kube::config_docker()
    {
        setenforce 0 > /dev/null 2>&1 && sed -i -e 's/SELINUX=enforcing/SELINUX=disabled/g' /etc/selinux/config
     
        sysctl -w net.bridge.bridge-nf-call-iptables=1
        sysctl -w net.bridge.bridge-nf-call-ip6tables=1
    cat <<EOF >>/etc/sysctl.conf
        net.bridge.bridge-nf-call-ip6tables = 1
        net.bridge.bridge-nf-call-iptables = 1
    EOF
     
        mkdir -p /etc/systemd/system/docker.service.d
    cat <<EOF >/etc/systemd/system/docker.service.d/10-docker.conf
    [Service]
        ExecStart=
        ExecStart=/usr/bin/dockerd -s overlay --selinux-enabled=false
    EOF
     
        systemctl daemon-reload && systemctl restart docker.service
    }
     
    kube::load_images()
    {
        mkdir -p /tmp/k8s
     
        images=(
            kube-apiserver-amd64_v1.5.1
            kube-controller-manager-amd64_v1.5.1
            kube-scheduler-amd64_v1.5.1
            kube-proxy-amd64_v1.5.1
            pause-amd64_3.0
            kube-discovery-amd64_1.0
            kubedns-amd64_1.9
            exechealthz-amd64_1.2
            kube-dnsmasq-amd64_1.4
            dnsmasq-metrics-amd64_1.0
            etcd_v3.0.15
            flannel-amd64_v0.7.0
        )
     
        for i in "${!images[@]}"; do
            ret=$(docker images | awk 'NR!=1{print $1"_"$2}'| grep $KUBE_REPO_PREFIX/${images[$i]} | wc -l)
            if [ $ret -lt 1 ];then
                curl -L http://$HTTP_SERVER/images/${images[$i]}.tar o /tmp/k8s/${images[$i]}.tar
                docker load -i /tmp/k8s/${images[$i]}.tar
            fi
        done
     
        rm /tmp/k8s* -rf
    }
     
    kube::install_bin()
    {
        set +e
        which kubeadm > /dev/null 2>&1
        i=$?
        set -e
        if [ $i -ne 0 ]; then
            curl -L http://$HTTP_SERVER/rpms/k8s.tar.gz > /tmp/k8s.tar.gz
            tar zxf /tmp/k8s.tar.gz -C /tmp
            yum localinstall -y  /tmp/k8s/*.rpm
            rm -rf /tmp/k8s*
            systemctl enable kubelet.service && systemctl start kubelet.service && rm -rf /etc/kubernetes
        fi
    }
     
    kube::wait_apiserver()
    {
        until curl http://127.0.0.1:8080; do sleep 1; done
    }
     
    kube::disable_static_pod()
    {
        # remove the waring log in kubelet
        sed -i 's/--pod-manifest-path=/etc/kubernetes/manifests//g' /etc/systemd/system/kubelet.service.d/10-kubeadm.conf
        systemctl daemon-reload && systemctl restart kubelet.service
    }
     
    kube::get_env()
    {
      HA_STATE=$1
      [ $HA_STATE == "MASTER" ] && HA_PRIORITY=200 || HA_PRIORITY=`expr 200 - ${RANDOM} / 1000 + 1`
      KUBE_VIP=$(echo $2 |awk -F= '{print $2}')
      VIP_PREFIX=$(echo ${KUBE_VIP} | cut -d . -f 1,2,3)
      #dhcp和static地址的不同取法
      VIP_INTERFACE=$(ip addr show | grep ${VIP_PREFIX} | awk -F 'dynamic' '{print $2}' | head -1)
      [ -z ${VIP_INTERFACE} ] && VIP_INTERFACE=$(ip addr show | grep ${VIP_PREFIX} | awk -F 'global' '{print $2}' | head -1)
      ###
      LOCAL_IP=$(ip addr show | grep ${VIP_PREFIX} | awk -F / '{print $1}' | awk -F ' ' '{print $2}' | head -1)
      MASTER_NODES=$(echo $3 | grep -o '[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3}')
      MASTER_NODES_NO_LOCAL_IP=$(echo "${MASTER_NODES}" | sed -e 's/'${LOCAL_IP}'//g')
    }
     
    kube::install_keepalived()
    {
        kube::get_env $@
        set +e
        which keepalived > /dev/null 2>&1
        i=$?
        set -e
        if [ $i -ne 0 ]; then
            ip addr add ${KUBE_VIP}/32 dev ${VIP_INTERFACE}
            curl -L http://$HTTP_SERVER/rpms/keepalived.tar.gz > /tmp/keepalived.tar.gz
            tar zxf /tmp/keepalived.tar.gz -C /tmp
            yum localinstall -y  /tmp/keepalived/*.rpm
            rm -rf /tmp/keepalived*
            systemctl enable keepalived.service && systemctl start keepalived.service
            kube::config_keepalived
        fi
    }
     
    kube::config_keepalived()
    {
      echo "gen keepalived configuration"
    cat <<EOF >/etc/keepalived/keepalived.conf
    global_defs {
       router_id LVS_k8s
    }
     
    vrrp_script CheckK8sMaster {
        script "curl http://127.0.0.1:8080"
        interval 3
        timeout 9
        fall 2
        rise 2
    }
     
    vrrp_instance VI_1 {
        state ${HA_STATE}
        interface ${VIP_INTERFACE}
        virtual_router_id 61
        priority ${HA_PRIORITY}
        advert_int 1
        mcast_src_ip ${LOCAL_IP}
        nopreempt
        authentication {
            auth_type PASS
            auth_pass 378378
        }
        unicast_peer {
            ${MASTER_NODES_NO_LOCAL_IP}
        }
        virtual_ipaddress {
            ${KUBE_VIP}
        }
        track_script {
            CheckK8sMaster
        }
    }
     
    EOF
      modprobe ip_vs
      systemctl daemon-reload && systemctl restart keepalived.service
    }
     
    kube::save_master_ip()
    {
        set +e
        # 应该从$2里拿到etcd集群的 --endpoints, 这里默认走的127.0.0.1:2379
        [ ${KUBE_HA} == true ] && etcdctl mk ha_master ${LOCAL_IP}
        set -e
    }
     
    kube::copy_master_config()
    {
        local master_ip=$(etcdctl get ha_master)
        mkdir -p /etc/kubernetes
        scp -r root@${master_ip}:/etc/kubernetes/* /etc/kubernetes/
        systemctl start kubelet
    }
     
    kube::set_label()
    {
      until kubectl get no | grep `hostname`; do sleep 1; done
      kubectl label node `hostname` kubeadm.alpha.kubernetes.io/role=master
    }
     
    kube::master_up()
    {
        shift
     
        kube::install_docker
     
        kube::load_images
     
        kube::install_bin
     
        [ ${KUBE_HA} == true ] && kube::install_keepalived "MASTER" $@
     
        # 存储master_ip,master02和master03需要用这个信息来copy配置
        kube::save_master_ip
     
        # 这里一定要带上--pod-network-cidr参数,不然后面的flannel网络会出问题
        kubeadm init --use-kubernetes-version=v1.5.1 --pod-network-cidr=10.244.0.0/16 $@
     
        # 使master节点可以被调度
        # kubectl taint nodes --all dedicated-
     
        echo -e "33[32m 注意记录下token信息,node加入集群时需要使用!33[0m"
     
        # install flannel network
        kubectl apply -f http://$HTTP_SERVER/network/kube-flannel.yaml --namespace=kube-system
     
        # show pods
        kubectl get pod --all-namespaces
    }
     
    kube::replica_up()
    {
        shift
     
        kube::install_docker
     
        kube::load_images
     
        kube::install_bin
     
        kube::install_keepalived "BACKUP" $@
     
        kube::copy_master_config
     
        kube::set_label
     
    }
     
    kube::node_up()
    {
        kube::install_docker
     
        kube::load_images
     
        kube::install_bin
     
        kube::disable_static_pod
     
        kubeadm join $@
    }
     
    kube::tear_down()
    {
        systemctl stop kubelet.service
        docker ps -aq|xargs -I '{}' docker stop {}
        docker ps -aq|xargs -I '{}' docker rm {}
        df |grep /var/lib/kubelet|awk '{ print $6 }'|xargs -I '{}' umount {}
        rm -rf /var/lib/kubelet && rm -rf /etc/kubernetes/ && rm -rf /var/lib/etcd
        yum remove -y kubectl kubeadm kubelet kubernetes-cni
        if [ ${KUBE_HA} == true ]
        then
          yum remove -y keepalived
          rm -rf /etc/keepalived/keepalived.conf
        fi
        rm -rf /var/lib/cni
        ip link del cni0
    }
     
    main()
    {
        case $1 in
        "m" | "master" )
            kube::master_up $@
            ;;
        "r" | "replica" )
            kube::replica_up $@
            ;;
        "j" | "join" )
            shift
            kube::node_up $@
            ;;
        "d" | "down" )
            kube::tear_down
            ;;
        *)
            echo "usage: $0 m[master] | r[replica] | j[join] token | d[down] "
            echo "       $0 master to setup master "
            echo "       $0 replica to setup replica master "
            echo "       $0 join   to join master with token "
            echo "       $0 down   to tear all down ,inlude all data! so becarefull"
            echo "       unkown command $0 $@"
            ;;
        esac
    }
     
    main $@
    

    脚本使用方法

    1、在一台单独的server上启动一个http-server,用来存放image和rpm包等文件,脚本会从此处拉取文件。

    # nohup python -m SimpleHTTPServer &
    Serving HTTP on 0.0.0.0 port 8000 ...
    

    这是我的http-server地址:http://172.16.200.90:8000/

    # tree 
    .
    ├── etcd
    │   ├── deploy-etcd.sh
    │   └── temp-etcd
    │       ├── etcd
    │       └── etcdctl
    ├── images
    │   ├── dnsmasq-metrics-amd64_1.0.tar
    │   ├── etcd_v3.0.15.tar
    │   ├── exechealthz-amd64_1.2.tar
    │   ├── flannel-git_0.7.0.tar
    │   ├── kube-apiserver-amd64_v1.5.1.tar
    │   ├── kube-controller-manager-amd64_v1.5.1.tar
    │   ├── kube-discovery-amd64_1.0.tar
    │   ├── kubedns-amd64_1.9.tar
    │   ├── kube-dnsmasq-amd64_1.4.tar
    │   ├── kube-proxy-amd64_v1.5.1.tar
    │   ├── kubernetes-dashboard-amd64.tar
    │   ├── kube-scheduler-amd64_v1.5.1.tar
    │   └── pause-amd64_3.0.tar
    ├── k8s-deploy.sh
    ├── network
    │   └── kube-flannel.yaml
    ├── nohup.out
    ├── README.md
    └── rpms
        ├── docker.tar.gz
        ├── haproxy.tar.gz
        ├── k8s.tar.gz
        └── keepalived.tar.gz
    

    2、部署master01,在master01上执行脚本。

    # curl -L http://172.60.0.43:8000/k8s-deploy.sh | bash -s master --api-advertise-addresses=172.60.0.87 --external-etcd-endpoints=http://172.60.0.226:2379,http://172.60.0.86:2379,http://172.60.0.106:2379
    
    172.60.0.43:8000 	是http-server
    
    --api-advertise-addresses 	 是vip地址
    
    --external-etcd-endpoints 	 是etcd集群的地址
    
    记录下你的 token 输出,node节点加入集群时需要使用该token。
    

    3、部署master02和master03。这里需要分别设置两个节点与master01的ssh互信。然后分别在master02和master03上执行脚本。完成后会自动和master01组成冗余。

    # curl -L http://172.60.0.43:8000/k8s-deploy.sh | bash -s replica --api-advertise-addresses=172.60.0.87 --external-etcd-endpoints=http://172.60.0.226:2379,http://172.60.0.86:2379,http://172.60.0.106:2379
    

    上面步骤完成之后,就实现了master节点的高可用。

    4、部署node。在每个node上分别执行脚本就即可。

    # curl -L http://172.60.0.43:8000/k8s-deploy.sh |  bash -s join --token=3635d0.6d0caa140b219bc0 172.60.0.87   		这里的token就是部署master01完成后记录下的token
    

    加入集群时,这里有可能会报 refuse 错误,将 kube-discovery 扩容到三个副本即可。

    # kubectl scale deployment --replicas 3 kube-discovery -n kube-system 
    

    5、完成后就得到了一个完整的高可用集群。

    # kubectl get node
    NAME             STATUS         AGE
    kube-node02      Ready          22h
    kuber-master01   Ready,master   23h
    kuber-master02   Ready,master   23h
    kuber-master03   Ready,master   23h
    kuber-node01     Ready          23h
    kuber-node03     Ready          23h
    kuber-node04     Ready          23h
    
    # kubectl get pod --all-namespaces -o wide
    NAMESPACE     NAME                                     READY     STATUS    RESTARTS   AGE       IP             NODE
    kube-system   dummy-2088944543-191tw                   1/1       Running   0          1d        172.60.0.87    kuber-master01
    kube-system   kube-apiserver-kuber-master01            1/1       Running   0          1d        172.60.0.87    kuber-master01
    kube-system   kube-apiserver-kuber-master02            1/1       Running   0          23h       172.60.0.86    kuber-master02
    kube-system   kube-apiserver-kuber-master03            1/1       Running   0          23h       172.60.0.87    kuber-master03
    kube-system   kube-controller-manager-kuber-master01   1/1       Running   0          1d        172.60.0.87    kuber-master01
    kube-system   kube-controller-manager-kuber-master02   1/1       Running   0          23h       172.60.0.86    kuber-master02
    kube-system   kube-controller-manager-kuber-master03   1/1       Running   0          23h       172.60.0.87    kuber-master03
    kube-system   kube-discovery-1769846148-53vs5          1/1       Running   0          1d        172.60.0.87    kuber-master01
    kube-system   kube-discovery-1769846148-m18d0          1/1       Running   0          23h       172.60.0.87    kuber-master03
    kube-system   kube-discovery-1769846148-tf0m9          1/1       Running   0          23h       172.60.0.86    kuber-master02
    kube-system   kube-dns-2924299975-80fnn                4/4       Running   0          1d        10.244.0.2     kuber-master01
    kube-system   kube-flannel-ds-51db4                    2/2       Running   0          23h       172.60.0.87    kuber-master01
    kube-system   kube-flannel-ds-gsn3m                    2/2       Running   4          23h       172.60.0.227   kuber-node01
    kube-system   kube-flannel-ds-httmj                    2/2       Running   0          23h       172.60.0.86    kuber-master02
    kube-system   kube-flannel-ds-tq4xn                    2/2       Running   0          23h       172.60.0.87    kuber-master03
    kube-system   kube-flannel-ds-w206v                    2/2       Running   1          23h       172.60.0.44    kuber-node03
    kube-system   kube-flannel-ds-x1qv3                    2/2       Running   0          22h       172.60.0.228   kube-node02
    kube-system   kube-flannel-ds-xzn9l                    2/2       Running   1          23h       172.60.0.46    kuber-node04
    kube-system   kube-proxy-67m5m                         1/1       Running   0          23h       172.60.0.44    kuber-node03
    kube-system   kube-proxy-6gkm4                         1/1       Running   0          23h       172.60.0.86    kuber-master02
    kube-system   kube-proxy-7l8c8                         1/1       Running   0          1d        172.60.0.87    kuber-master01
    kube-system   kube-proxy-mb650                         1/1       Running   0          23h       172.60.0.87    kuber-master03
    kube-system   kube-proxy-nb24x                         1/1       Running   0          23h       172.60.0.46    kuber-node04
    kube-system   kube-proxy-qlwhj                         1/1       Running   0          22h       172.60.0.228   kube-node02
    kube-system   kube-proxy-rhwrw                         1/1       Running   1          23h       172.60.0.227   kuber-node01
    kube-system   kube-scheduler-kuber-master01            1/1       Running   0          1d        172.60.0.87    kuber-master01
    kube-system   kube-scheduler-kuber-master02            1/1       Running   0          23h       172.60.0.86    kuber-master02
    kube-system   kube-scheduler-kuber-master03            1/1       Running   0          23h       172.60.0.87    kuber-master03
    kube-system   kubernetes-dashboard-3000605155-s5f7t    1/1       Running   0          22h       10.244.6.2     kuber-node01
    
  • 相关阅读:
    Andrew Ng机器学习公开课笔记 -- Regularization and Model Selection
    storm-kafka-0.8-plus 源码解析
    Storm ack和fail机制再论
    Kafka Producer接口
    Kafka Tools
    Kafka Consumer接口
    Andrew Ng机器学习公开课笔记 -- 学习理论
    关于bitmap recycle trying to use a recycled bitmap android.graphics.Bitmap
    爬虫-微信公众平台消息获取
    SVN:This client is too old to work with working copy…解决方法
  • 原文地址:https://www.cnblogs.com/keithtt/p/6649995.html
Copyright © 2011-2022 走看看