zoukankan      html  css  js  c++  java
  • 三、升级kubeasz部署的kubernetes

    1、升级前实验环境

    OS: 
    root@harbor:~# cat /etc/issue
    Ubuntu 20.04.2 LTS 
     l
    
    root@harbor:~# uname -r
    5.4.0-81-generic
    
    IP分配:
    172.168.33.201 harbor.ywx.net  k8s-deploy
    172.168.33.202 haproxy01
    172.168.33.203 haproxy02
    172.168.33.204 ecd01
    172.168.33.205 ecd02
    172.168.33.206 ecd03
    172.168.33.207 k8s-master01
    172.168.33.208 k8s-master02
    172.168.33.209 k8s-master03
    172.168.33.210 k8s-node01
    172.168.33.211 k8s-node02
    172.168.33.212 k8s-node03
    
    VIP:
    172.168.33.50  api-server的VIP
    172.168.33.51
    172.168.33.52
    172.168.33.53
    
    Kubernetes: v1.12.0
    
    root@k8s-master01:/usr/local/src# kubectl get nodes
    NAME             STATUS                     ROLES    AGE     VERSION
    172.168.33.207   Ready,SchedulingDisabled   master   7d23h   v1.21.0
    172.168.33.208   Ready,SchedulingDisabled   master   7d23h   v1.21.0
    172.168.33.209   Ready,SchedulingDisabled   master   7d23h   v1.21.0
    172.168.33.210   Ready                      node     7d23h   v1.21.0
    172.168.33.211   Ready                      node     7d23h   v1.21.0
    172.168.33.212   Ready                      node     7d23h   v1.21.0
    

      

    kubeasz部署v1.12.0见:

    https://www.cnblogs.com/yaokaka/p/15308917.html

    2、升级kubernetes到v1.12.5

    1)下载kubernetes v1.12.5版本的二进制软件

    https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.21.md#downloads-for-v1215

    下载:
    Source Code: kubernetes.tar.gz	
    
    Client Binaries: kubernetes-client-linux-amd64.tar.gz
    
    Server Binaries: kubernetes-server-linux-amd64.tar.gz
    
    Node Binaries: kubernetes-node-linux-amd64.tar.gz
    
    #注意:不要下载错误的cpu架构
    root@k8s-master01:/usr/local/src# pwd
    /usr/local/src
    root@k8s-master01:/usr/local/src# ls
    kubernetes-client-linux-amd64.tar.gz  kubernetes-node-linux-amd64.tar.gz  kubernetes-server-linux-amd64.tar.gz  kubernetes.tar.gz
    
    root@k8s-master01:/usr/local/src# tar -xf kubernetes-client-linux-amd64.tar.gz 
    root@k8s-master01:/usr/local/src# tar -xf kubernetes-node-linux-amd64.tar.gz 
    root@k8s-master01:/usr/local/src# tar -xf kubernetes-server-linux-amd64.tar.gz 
    root@k8s-master01:/usr/local/src# tar -xf kubernetes.tar.gz 
    
    root@k8s-master01:/usr/local/src# ll kubernetes
    total 35284
    drwxr-xr-x 10 root root      176 Sep 16 05:27 ./
    drwxr-xr-x  3 root root      179 Sep 25 20:44 ../
    drwxr-xr-x  3 root root       49 Sep 16 05:27 LICENSES/
    -rw-r--r--  1 root root     3387 Sep 16 05:27 README.md
    drwxr-xr-x  2 root root        6 Sep 16 05:22 addons/
    drwxr-xr-x  3 root root       31 Sep 16 05:27 client/
    drwxr-xr-x  9 root root      311 Sep 16 05:27 cluster/
    drwxr-xr-x  2 root root       38 Sep 16 05:27 docs/
    drwxr-xr-x  3 root root       17 Sep 16 05:27 hack/
    -rw-r--r--  1 root root 36121220 Sep 16 05:22 kubernetes-src.tar.gz
    drwxr-xr-x  3 root root       17 Sep 16 05:21 node/
    drwxr-xr-x  3 root root       66 Sep 16 05:27 server/
    -rw-r--r--  1 root root        8 Sep 16 05:27 version
    

      

    2)升级master节点

    #在node节点操作,剔除k8s-master节点
    #第一步从所有的node节点的负载均衡中剔除k8s-master01节点
    #以k8s-node01为例:k8s-node02和k8s-node03操作一样
    root@k8s-node01:~# vim /etc/kube-lb/conf/kube-lb.conf
    user root;
    worker_processes 1;
    
    error_log  /etc/kube-lb/logs/error.log warn;
    
    events {
        worker_connections  3000;
    }
    
    stream {
        upstream backend {
            #server 172.168.33.207:6443    max_fails=2 fail_timeout=3s; #剔除k8s-master01节点
            server 172.168.33.208:6443    max_fails=2 fail_timeout=3s;
            server 172.168.33.209:6443    max_fails=2 fail_timeout=3s;
        }
    
        server {
            listen 127.0.0.1:6443;
            proxy_connect_timeout 1s;
            proxy_pass backend;
        }
    }
    
    #第二步所有node节点重启kube-lb
    root@k8s-node01:~# systemctl restart kube-lb.service
    
    #以上2步需要在所有node节点操作
    
    #在master节点上操作,升级master节点
    #以k8s-master01为例
    #第三步停止master节点上kube-apiserver kube-controller-manager kube-proxy kube-scheduler kubelet服务
    root@k8s-master01:~# systemctl  stop kube-apiserver kube-controller-manager kube-proxy kube-scheduler kubelet
    
    #第四步拷贝v1.12.5的kube-apiserver kube-controller-manager kube-proxy kube-scheduler kubelet二进制文件覆盖老版本的二进制文件
    root@k8s-master01:~# cd /usr/local/src/kubernetes/server/bin/
    root@k8s-master01:/usr/local/src/kubernetes/server/bin# cp kube-apiserver kube-controller-manager kube-scheduler kube-proxy kubelet kubectl /usr/local/bin/
    
    #第五步重启master节点上kube-apiserver kube-controller-manager kube-proxy kube-scheduler kubelet服务
    root@k8s-master01:~# systemctl  start kube-apiserver kube-controller-manager kube-proxy kube-scheduler kubelet
    
    #在所有node节点操作
    #第六步将k8s-master01重新添加进所有node节点的kube-lb文件并重启kube-lb
    #以k8s-node01为例
    root@k8s-node01:~# vim /etc/kube-lb/conf/kube-lb.conf
    user root;
    worker_processes 1;
    
    error_log  /etc/kube-lb/logs/error.log warn;
    
    events {
        worker_connections  3000;
    }
    
    stream {
        upstream backend {
            server 172.168.33.207:6443    max_fails=2 fail_timeout=3s;
            server 172.168.33.208:6443    max_fails=2 fail_timeout=3s;
            server 172.168.33.209:6443    max_fails=2 fail_timeout=3s;
        }
    
        server {
            listen 127.0.0.1:6443;
            proxy_connect_timeout 1s;
            proxy_pass backend;
        }
    }
    
    root@k8s-node01:~# systemctl restart kube-lb.service 
    
    #第七步验证k8s-master01的版本信息
    root@k8s-master01:~# kubectl get nodes
    NAME             STATUS                     ROLES    AGE     VERSION
    172.168.33.207   Ready,SchedulingDisabled   master   7d23h   v1.21.5
    172.168.33.208   Ready,SchedulingDisabled   master   7d23h   v1.21.0
    172.168.33.209   Ready,SchedulingDisabled   master   7d23h   v1.21.0
    172.168.33.210   Ready                      node     7d23h   v1.21.0
    172.168.33.211   Ready                      node     7d23h   v1.21.0
    172.168.33.212   Ready                      node     7d23h   v1.21.0
    
    #第八步升级k8s-master02和k8s-master03
    #k8s-master02和k8s-master03升级步骤与k8s-master01一致
    #验证master节点
    root@k8s-master01:~# kubectl get nodes
    NAME             STATUS                     ROLES    AGE     VERSION
    172.168.33.207   Ready,SchedulingDisabled   master   7d23h   v1.21.5
    172.168.33.208   Ready,SchedulingDisabled   master   7d23h   v1.21.5
    172.168.33.209   Ready,SchedulingDisabled   master   7d23h   v1.21.5
    172.168.33.210   Ready                      node     7d23h   v1.21.0
    172.168.33.211   Ready                      node     7d23h   v1.21.0
    172.168.33.212   Ready                      node     7d23h   v1.21.0
    

      

    3)升级node节点

    #以k8s-node01为例,k8s-node02和k8s-node03通k8s-node01一样
    #第一步:驱逐该k8s-node01节点上的所有pod
    #在master节点上操作
    root@k8s-node01:~# kubectl drain 172.168.33.210
    ......
    There are pending nodes to be drained:
     172.168.33.210
    cannot delete DaemonSet-managed Pods (use --ignore-daemonsets to ignore): kube-system/calico-node-ccv26
    cannot delete Pods with local storage (use --delete-emptydir-data to override): kubernetes-dashboard/kubernetes-dashboard-79b875f7f8-phwtv
    #需要添加--ignore-daemonsets和--delete-emptydir-data来清除所有pod
    
    root@k8s-master01:~# kubectl drain 172.168.33.210 --force --ignore-daemonsets --delete-emptydir-data
    #清楚DaemonSet-managed Pods和local storage
    
    root@k8s-master01:~# kubectl get nodes
    NAME             STATUS                     ROLES    AGE     VERSION
    172.168.33.207   Ready,SchedulingDisabled   master   7d23h   v1.21.5
    172.168.33.208   Ready,SchedulingDisabled   master   7d23h   v1.21.5
    172.168.33.209   Ready,SchedulingDisabled   master   7d23h   v1.21.5
    172.168.33.210   Ready,SchedulingDisabled   node     7d23h   v1.21.0 #k8s-node01不在调度pod
    172.168.33.211   Ready                      node     7d23h   v1.21.0
    172.168.33.212   Ready                      node     7d23h   v1.21.0
    
    
    
    #第二步:停止K8s-node01上的kubelet和kube-proxy服务
    root@k8s-node01:~# systemctl stop kubelet kube-proxy
    
    #第三步:把v1.12.5版本的kubelet和kube-proxy从k8s-master01上拷贝过来
    root@k8s-master01:/usr/local/src/kubernetes/server/bin# pwd
    /usr/local/src/kubernetes/server/bin
    root@k8s-master01:/usr/local/src/kubernetes/server/bin# scp kubelet kube-proxy 172.168.33.210:/usr/local/bin/
    
    #第四步:在k8s-node01上重启kubelet和kube-proxy服务
    root@k8s-node01:~# systemctl start kubelet kube-proxy
    
    #第五步:取消k8s-node01上不调度pod的设置
    root@k8s-master01:~# kubectl uncordon 172.168.33.210
    node/172.168.33.210 uncordoned
    root@k8s-master01:~# kubectl get nodes
    NAME             STATUS                     ROLES    AGE     VERSION
    172.168.33.207   Ready,SchedulingDisabled   master   7d23h   v1.21.5
    172.168.33.208   Ready,SchedulingDisabled   master   7d23h   v1.21.5
    172.168.33.209   Ready,SchedulingDisabled   master   7d23h   v1.21.5
    172.168.33.210   Ready                      node     7d23h   v1.21.5
    172.168.33.211   Ready                      node     7d23h   v1.21.0
    172.168.33.212   Ready                      node     7d23h   v1.21.0
    
    #第六步:升级k8s-node02和k8s-node03,操作与k8s-node01一样
    #第七步:验证
    root@k8s-master01:~# kubectl get nodes
    NAME             STATUS                     ROLES    AGE   VERSION
    172.168.33.207   Ready,SchedulingDisabled   master   8d    v1.21.5
    172.168.33.208   Ready,SchedulingDisabled   master   8d    v1.21.5
    172.168.33.209   Ready,SchedulingDisabled   master   8d    v1.21.5
    172.168.33.210   Ready                      node     8d    v1.21.5
    172.168.33.211   Ready                      node     8d    v1.21.5
    172.168.33.212   Ready                      node     8d    v1.21.5
    

      

    I have a dream so I study hard!!!
  • 相关阅读:
    蓝牙遐想
    BT stack浅入了解
    集合(set)
    字典练习
    数据类型--字典
    数据类型--元组
    字符串
    深浅copy
    python---list
    三种Div高度自适应的方法
  • 原文地址:https://www.cnblogs.com/yaokaka/p/15335719.html
Copyright © 2011-2022 走看看