zoukankan      html  css  js  c++  java
  • ubuntu系统kubernetes1.18多master集群架构部署

    ubuntu系统kubernetes1.18多master集群架构部署演练

    部署版本

    ubuntu版本 20.04
    docker版本 19.03.10
    kubernetes版本 1.18.2

    1 环境准备(所有主机)

    主机名 ip
    k8s-master01 10.0.0.101
    k8s-master02 10.0.0.102
    k8s-master03 10.0.0.103
    k8s-node01 10.0.0.104

    1.1 关闭防火墙

    ufw disabled
    systemctl disable ufw.service
    

    1.2 关闭swap分区

    swapoff -a 
    sed -ri 's/.*swap.*/#&/' /etc/fstab
    

    1.3 安装依赖包

    apt install -y apt-transport-https apt-transport-https ca-certificates curl gnupg-agent software-properties-common lrzsz net-tools
    
    

    1.4 添加系统内核变量

    cat <<EOF > /etc/sysctl.d/k8s.conf
    net.bridge.bridge-nf-call-ip6tables = 1
    net.bridge.bridge-nf-call-iptables = 1
    net.ipv4.ip_forward=1
    EOF
    sysctl --system
    
    

    vim /etc/default/grub

    # 添加参数
    GRUB_CMDLINE_LINUX="cgroup_enable=memory swapaccount=1"
    

    update-grub

    1.5 设置时间

    timedatectl set-timezone Asia/Shanghai
    systemctl restart rsyslog
    
    

    1.6 添加内核参数

    echo 1 > /proc/sys/net/bridge/bridge-nf-call-iptables
    echo 1 >/proc/sys/net/bridge/bridge-nf-call-ip6tables
    echo """
    vm.swappiness = 0
    net.bridge.bridge-nf-call-iptables = 1
    net.ipv4.ip_forward = 1
    net.bridge.bridge-nf-call-ip6tables = 1
    """ > /etc/sysctl.conf
    
    
    sysctl -p
    

    1.7 修改fileter参数为 1 默认为 2(如果为2calico网络模式不支持)

    1、修改后的文件
    root@k8s-master01:~# cat /etc/sysctl.d/10-network-security.conf
    
    # Turn on Source Address Verification in all interfaces to
    # prevent some spoofing attacks.
    net.ipv4.conf.default.rp_filter=1
    net.ipv4.conf.all.rp_filter=1
    
    2、重置系统参数
    sysctl --system
    
    

    2 安装docker和kubernetes

    2.1 docker安装

    1、 添加docker源
    curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
    add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
    
    2、查看可安装的历史版本
    apt  list docker-ce -a 
    
    3、安装docker指定版本
    apt install -y docker-ce=5:19.03.10~3-0~ubuntu-focal docker-ce-cli=5:19.03.10~3-0~ubuntu-focal containerd.io
    
    
    systemctl enable docker && systemctl start docker
    
    

    2.2 kubernetes安装

    1、添加源
    curl -s https://mirrors.aliyun.com/kubernetes/apt/doc/apt-key.gpg | sudo apt-key add -
    
    tee /etc/apt/sources.list.d/kubernetes.list <<EOF 
    deb https://mirrors.aliyun.com/kubernetes/apt/ kubernetes-xenial main
    EOF
    
    apt-get update
    
    2、 查看可安装的版本
    apt list kubeadm -a 
    
    
    3、安装指定版本
    apt install -y kubeadm=1.18.2-00 kubectl=1.18.2-00 kubelet=1.18.2-00
    
    
    4、添加配置文件
    vim /etc/systemd/system/kubelet.service.d/10-kubeadm.conf 
    

    Environment="cgroup-driver=systemd/cgroup-driver=cgroupfs"

    5、 启动
    systemctl enable kubelet && systemctl start kubelet
    

    3 keepalived的安装

    3.1 安装keepalived

    在线安装

    sudo apt-get install -y  libssl-dev openssl libpopt-dev
    
    sudo apt-get install -y  keepalived
    
    

    上传离线包

    dpkg -i ipvsadm_1%3a1.31-1_amd64.deb 
    dpkg -i keepalived_1%3a2.0.19-2_amd64.deb 
    
    

    3.2 编辑配置文件

    3.2.1 master01配置文件

    cat <<EOF > /etc/keepalived/keepalived.conf 
    global_defs {
       router_id K8S-LIVE
    }
    vrrp_instance VI_1 {
        state BACKUP
        nopreempt
        interface ens32
        virtual_router_id 80
        priority 100
        advert_int 1
        authentication {
            auth_type PASS
            auth_pass just0kk
        }
        virtual_ipaddress {
            10.0.0.200
        }
    }
    virtual_server 10.0.0.200 6443 {
        delay_loop 6
        lb_algo loadbalance
        lb_kind DR
        net_mask 255.255.255.0
        persistence_timeout 0
        protocol TCP
        real_server 10.0.0.101 6443 {
            weight 1
            SSL_GET {
                url {
                  path /healthz
                  status_code 200
                }
                connect_timeout 3
                nb_get_retry 3
                delay_before_retry 3
            }
        }
        real_server 10.0.0.102 6443 {
            weight 1
            SSL_GET {
                url {
                  path /healthz
                  status_code 200
                }
                connect_timeout 3
                nb_get_retry 3
                delay_before_retry 3
            }
        }
        real_server 10.0.0.103 6443 {
            weight 1
            SSL_GET {
                url {
                  path /healthz
                  status_code 200
                }
                connect_timeout 3
                nb_get_retry 3
                delay_before_retry 3
            }
        }
    }
    EOF
    

    3.2.2 master02配置文件

    cat <<EOF > /etc/keepalived/keepalived.conf 
    global_defs {
       router_id K8S-LIVE
    }
    vrrp_instance VI_1 {
        state BACKUP
        nopreempt
        interface ens32
        virtual_router_id 80
        priority 50
        advert_int 1
        authentication {
            auth_type PASS
            auth_pass just0kk
        }
        virtual_ipaddress {
            10.0.0.200
        }
    }
    virtual_server 10.0.0.200 6443 {
        delay_loop 6
        lb_algo loadbalance
        lb_kind DR    net_mask 255.255.255.0
        persistence_timeout 0
        protocol TCP
        real_server 10.0.0.101 6443 {
            weight 1
            SSL_GET {
                url {
                  path /healthz
                  status_code 200
                }
                connect_timeout 3
                nb_get_retry 3
                delay_before_retry 3
            }
        }
        real_server 10.0.0.102 6443 {
            weight 1
            SSL_GET {
                url {
                  path /healthz
                  status_code 200
                }
                connect_timeout 3
                nb_get_retry 3
                delay_before_retry 3
            }
        }
        real_server 10.0.0.103 6443 {
            weight 1
            SSL_GET {
                url {
                  path /healthz
                  status_code 200
                }
                connect_timeout 3
                nb_get_retry 3
                delay_before_retry 3
            }
        }
    }
    EOF
    
    

    3.2.3 master03配置文

    cat <<EOF > /etc/keepalived/keepalived.conf
    global_defs {
       router_id K8S-LIVE
    }
    vrrp_instance VI_1 {
        state BACKUP
        nopreempt
        interface ens32
        virtual_router_id 80
        priority 30
        advert_int 1
        authentication {
            auth_type PASS
            auth_pass just0kk
        }
        virtual_ipaddress {
            10.0.0.200
        }
    }
    virtual_server 10.0.0.200 6443 {
        delay_loop 6
        lb_algo loadbalance
        lb_kind DR
        net_mask 255.255.255.0
        persistence_timeout 0
        protocol TCP
        real_server 10.0.0.101 6443 {
            weight 1
            SSL_GET {
                url {
                  path /healthz
                  status_code 200
                }
                connect_timeout 3
                nb_get_retry 3
                delay_before_retry 3
            }
        }
        real_server 10.0.0.102 6443 {
            weight 1
            SSL_GET {
                url {
                  path /healthz
                  status_code 200
                }
                connect_timeout 3
                nb_get_retry 3
                delay_before_retry 3
            }
        }
        real_server 10.0.0.103 6443 {
            weight 1
            SSL_GET {
                url {
                  path /healthz
                  status_code 200
                }
                connect_timeout 3
                nb_get_retry 3
                delay_before_retry 3
            }
        }
    }
    EOF
    
    

    启动keepalived

    systemctl enable keepalived && systemctl start keepalived
    
    
    观察设置的vip地址是否是正常在master01上
    
    

    4 kubernetes集群加入

    4.1 镜像导入

    上传镜像

    docker load -i   1-18-kube-apiserver.tar.gz
    docker load -i   1-18-kube-scheduler.tar.gz
    docker load -i   1-18-kube-controller-manager.tar.gz
    docker load -i   1-18-pause.tar.gz
    docker load -i   1-18-cordns.tar.gz
    docker load -i   1-18-etcd.tar.gz
    docker load -i   1-18-kube-proxy.tar.gz
    
    说明:
    pause版本是3.2,用到的镜像是k8s.gcr.io/pause:3.2
    etcd版本是3.4.3,用到的镜像是k8s.gcr.io/etcd:3.4.3-0        
    cordns版本是1.6.7,用到的镜像是k8s.gcr.io/coredns:1.6.7
    
    apiserver、scheduler、controller-manager、kube-proxy版本是1.18.2,用到的镜像分别是
    k8s.gcr.io/kube-apiserver:v1.18.2
    k8s.gcr.io/kube-controller-manager:v1.18.2
    k8s.gcr.io/kube-scheduler:v1.18.2
    k8s.gcr.io/kube-proxy:v1.18.2
    

    4.2 在master01节点上初始化k8s集群

    cat << EOF > kubeadm-init.yaml
    apiVersion: kubeadm.k8s.io/v1beta2
    kind: ClusterConfiguration
    kubernetesVersion: v1.18.2
    controlPlaneEndpoint: 10.0.0.200:6443
    apiServer:
      certSANs:
      - 10.0.0.101
      - 10.0.0.102
      - 10.0.0.103
      - 10.0.0.104
      - 10.0.0.200
    networking:
      podSubnet: 10.244.0.0/16
    EOF
    ---
    apiVersion: kubeproxy.config.k8s.io/v1alpha1
    kind:  KubeProxyConfiguration
    mode: ipvs
    
    EOF
    
    

    4.2.1 初始化命令

    kubeadm init --config kubeadm-init.yaml |tee k8s-join.yaml
    

    4.2.2 在master1节点执行如下,这样才能有权限操作k8s资源

    mkdir -p $HOME/.kube
    sudo cp -i  /etc/kubernetes/admin.conf  $HOME/.kube/config
    sudo chown $(id -u):$(id -g)  $HOME/.kube/config
    
    

    4.3 安装calico网络模式

    4.3.1 传入calico镜像

    docker load -i   cni.tar.gz
    docker load -i   calico-node.tar.
    
    

    在master01节点执行

    kubectl apply -f calico.yaml
    

    4.4 把master1节点的证书拷贝到master2和master3上

    4.4.1 在master2和master3上创建证书存放目录

    cd /root && mkdir -p /etc/kubernetes/pki/etcd &&mkdir -p ~/.kube/
    

    4.4.2 执行shell脚本

    cat << EOF > scp.sh
    #!/bin/bash
    USER=root
    CONTROL_PLANE_IPS="10.0.0.102 10.0.0.103"
    for host in ${CONTROL_PLANE_IPS}; do
    	scp /etc/kubernetes/pki/ca.crt "${USER}"@$host:/etc/kubernetes/pki/ 
    	scp /etc/kubernetes/pki/ca.key "${USER}"@$host:/etc/kubernetes/pki/ 
    	scp /etc/kubernetes/pki/sa.key "${USER}"@$host:/etc/kubernetes/pki/
    	scp /etc/kubernetes/pki/sa.pub "${USER}"@$host:/etc/kubernetes/pki/ 
    	scp /etc/kubernetes/pki/front-proxy-ca.crt "${USER}"@$host:/etc/kubernetes/pki/
    	scp /etc/kubernetes/pki/front-proxy-ca.key "${USER}"@$host:/etc/kubernetes/pki/ 
    	scp /etc/kubernetes/pki/etcd/ca.crt "${USER}"@$host:/etc/kubernetes/pki/etcd/
    	scp /etc/kubernetes/pki/etcd/ca.key "${USER}"@$host:/etc/kubernetes/pki/etcd/
        scp /etc/kubernetes/admin.conf "${USER}"@$host:/etc/kubernetes/
    done
    EOF
    

    4.5 master02 和master03加入集群节点

    --control-plane:这个参数表示加入到k8s集群的是master节点

      kubeadm join 10.0.0.200:6443 --token gehni0.8zgnoew2cjrd1pz7 
        --discovery-token-ca-cert-hash sha256:4967cb054bd5899af3e4b6ad3ab0c9f878b549ef7f72842d145b15f500e429ca 
        --control-plane 
    
    

    在master2和master3上操作:

    mkdir -p $HOME/.kube
    sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
    sudo chown $(id -u):$(id -g)$HOME/.kube/config
    

    4.6 node节点加入集群

    kubeadm join 10.0.0.200:6443 --token gehni0.8zgnoew2cjrd1pz7 
        --discovery-token-ca-cert-hash sha256:4967cb054bd5899af3e4b6ad3ab0c9f878b549ef7f72842d145b15f500e429ca 
    
    
    

    查看集群状态信息

    kubectl get nodes
    

    显示如下:

    NAME     STATUS   ROLES    AGE    VERSION
    master1  Ready    master   39m    v1.18.2
    master2  Ready    master   5m9s   v1.18.2
    master3  Ready    master   2m33s  v1.18.2
    

    踢出节点

    kubectl删除节点
    删除节点
    1、驱逐节点上的pod:kubectl drain k8s-master --delete-local-data --force --ignore-daemonsets
    2、删除节点:kubectl delete node 10.20.20.33
    
    
    
    
  • 相关阅读:
    bzoj3524: [Poi2014]Couriers(主席树)
    51nod 1275 连续子段的差异(twopointer+单调队列)
    51nod 1274 最长递增路径(DP)
    51nod 1273 旅行计划(思维题)
    51nod 1257 背包问题 V3(分数规划)
    CSS 几款比较常用的翻转特效
    css文字飞入效果
    jQuery使用方法
    数据库
    关系型数据库基本概念及MySQL简述
  • 原文地址:https://www.cnblogs.com/dinghc/p/14919823.html
Copyright © 2011-2022 走看看