zoukankan      html  css  js  c++  java
  • Kubenetes环境搭建笔记

    master节点

    基础环境

    操作系统:CentOS7

    设置主机名

    hostnamectl set-hostname master  	# 设置hostname为master
    hostnamectl --static  				# 查看hostname
    

    设置hosts vim /etc/hosts,增加如下配置

    127.0.0.1   master   # 解决安装过程的warning
    

    关闭防火墙

    systemctl stop firewalld.service
    systemctl disable firewalld.service
    

    关闭SELinux

    vim /etc/sysconfig/selinux  # 修改SELINUX=disable
    

    关闭swap,kubernetes1.8开始不关闭swap无法启动

    #去掉 /etc/fstab 里面这一行 /dev/mapper/centos-swap swap                    swap    defaults        0 0
    swapoff -a
    cp /etc/fstab /etc/fstab_bak
    cat /etc/fstab_bak |grep -v swap > /etc/fstab
    cat /etc/fstab
    

    安装docker-ce

    安装容器运行时CRI支持,推荐使用docker CE 19.03

    先设置稳定的安装源[必要?]

    yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
    

    查询可安装的版本

    yum list docker-ce --showduplicates | sort -r
    

    安装docker-ce

    yum install -y yum-utils device-mapper-persistent-data lvm2
    yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
    yum makecache fast
    yum install -y --setopt=obsoletes=0 docker-ce-19.03.9-3.el7 
    systemctl start docker
    systemctl enable docker
    
    
    已安装:
      docker-ce.x86_64 3:19.03.9-3.el7                                                                               
    
    作为依赖被安装:
      audit-libs-python.x86_64 0:2.8.5-4.el7                      checkpolicy.x86_64 0:2.5-8.el7                     
      container-selinux.noarch 2:2.119.2-1.911c772.el7_8          containerd.io.x86_64 0:1.4.11-3.1.el7              
      docker-ce-cli.x86_64 1:20.10.10-3.el7                       docker-scan-plugin.x86_64 0:0.9.0-3.el7            
      libcgroup.x86_64 0:0.41-21.el7                              libseccomp.x86_64 0:2.3.1-4.el7                    
      libsemanage-python.x86_64 0:2.5-14.el7                      policycoreutils-python.x86_64 0:2.5-34.el7         
      python-IPy.noarch 0:0.75-6.el7                              setools-libs.x86_64 0:3.3.8-4.el7                  
    

    查看docker版本

    docker -v
    Docker version 20.10.10, build b485636
    

    设置docker的cgroupdriver并重启,解决安装失败问题

    cat > /etc/docker/daemon.json <<EOF
    {
      "exec-opts": ["native.cgroupdriver=systemd"],
      "log-driver": "json-file",
      "log-opts": {
        "max-size": "100m"
      },
      "storage-driver": "overlay2",
      "storage-opts": [
        "overlay2.override_kernel_check=true"
      ],
      "data-root": "/data/docker",
      "registry-mirrors": ["https://xxxx.mirror.aliyuncs.com"] # 阿里云的镜像加速地址
    }
    EOF
    
    systemctl daemon-reload
    systemctl restart docker
    

    用kubeadm 部署 kubernetes

    更新源: vim /etc/yum.repos.d/kubernetes.repo

    [kubernetes]
    name=Kubernetes
    baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
    enabled=1
    gpgcheck=1
    repo_gpgcheck=1
    gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
    exclude=kube*
    

    安装并启动kubeadm

    #安装   注意::这里一定要看一下版本号,因为 Kubeadm init 的时候 填写的版本号不能低于kuberenete版本
    yum install -y kubelet kubeadm kubectl --disableexcludes=kubernetes
    #注 如果需要指定版本 用下面的命令   kubelet-<version>
    yum install kubelet-1.14.1 kubeadm-1.14.1 kubectl-1.14.1 --disableexcludes=kubernetes
    
    已安装:
      kubeadm.x86_64 0:1.22.3-0            kubectl.x86_64 0:1.22.3-0            kubelet.x86_64 0:1.22.3-0           
    
    作为依赖被安装:
      conntrack-tools.x86_64 0:1.4.4-7.el7                    cri-tools.x86_64 0:1.19.0-0                            
      kubernetes-cni.x86_64 0:0.8.7-0                         libnetfilter_cthelper.x86_64 0:1.0.0-11.el7            
      libnetfilter_cttimeout.x86_64 0:1.0.0-7.el7             libnetfilter_queue.x86_64 0:1.0.2-2.el7_2              
      socat.x86_64 0:1.7.3.2-2.el7                           
    
    
    #启动 kubelet 
    systemctl enable kubelet.service && systemctl start kubelet.service
    

    设置kubeadm的cgroupdriver并重启,解决安装失败问题

    cat > /var/lib/kubelet/config.yaml <<EOF
    apiVersion: kubelet.config.k8s.io/v1beta1
    kind: KubeletConfiguration
    cgroupDriver: systemd
    EOF
    
    systemctl restart kubelet
    

    启动kubelet.service之后 我们查看一下kubelet状态是未启动状态,查看原因发现是 “/var/lib/kubelet/config.yaml”文件不存在,这里可以暂时先不用处理,当kubeadm init 之后会创建此文件

    ➜  ~ systemctl status kubelet.service 
    ● kubelet.service - kubelet: The Kubernetes Node Agent
       Loaded: loaded (/usr/lib/systemd/system/kubelet.service; enabled; vendor preset: disabled)
      Drop-In: /usr/lib/systemd/system/kubelet.service.d
               └─10-kubeadm.conf
       Active: activating (auto-restart) (Result: exit-code) since 日 2021-11-14 00:47:09 CST; 948ms ago
         Docs: https://kubernetes.io/docs/
      Process: 66843 ExecStart=/usr/bin/kubelet $KUBELET_KUBECONFIG_ARGS $KUBELET_CONFIG_ARGS $KUBELET_KUBEADM_ARGS $KUBELET_EXTRA_ARGS (code=exited, status=1/FAILURE)
     Main PID: 66843 (code=exited, status=1/FAILURE)
    
    11月 14 00:47:09 master systemd[1]: Unit kubelet.service entered failed state.
    11月 14 00:47:09 master systemd[1]: kubelet.service failed.
    
    journalctl -xefu kubelet
    -- Logs begin at 六 2021-11-13 23:20:49 CST. --
    11月 14 00:46:48 master systemd[1]: Started kubelet: The Kubernetes Node Agent.
    -- Subject: Unit kubelet.service has finished start-up
    -- Defined-By: systemd
    -- Support: http://lists.freedesktop.org/mailman/listinfo/systemd-devel
    -- 
    -- Unit kubelet.service has finished starting up.
    -- 
    -- The start-up result is done.
    11月 14 00:46:48 master kubelet[66587]: E1114 00:46:48.782436   66587 server.go:206] "Failed to load kubelet config file" err="failed to load Kubelet config file /var/lib/kubelet/config.yaml, error failed to read kubelet config file \"/var/lib/kubelet/config.yaml\", error: open /var/lib/kubelet/config.yaml: no such file or directory" path="/var/lib/kubelet/config.yaml"
    11月 14 00:46:48 master systemd[1]: kubelet.service: main process exited, code=exited, status=1/FAILURE
    11月 14 00:46:48 master systemd[1]: Unit kubelet.service entered failed state.
    11月 14 00:46:48 master systemd[1]: kubelet.service failed.
    11月 14 00:46:58 master systemd[1]: kubelet.service holdoff time over, scheduling restart.
    11月 14 00:46:58 master systemd[1]: Stopped kubelet: The Kubernetes Node Agent.
    -- Subject: Unit kubelet.service has finished shutting down
    -- Defined-By: systemd
    -- Support: http://lists.freedesktop.org/mailman/listinfo/systemd-devel
    -- 
    -- Unit kubelet.service has finished shutting down.
    11月 14 00:46:58 master systemd[1]: Started kubelet: The Kubernetes Node Agent.
    -- Subject: Unit kubelet.service has finished start-up
    -- Defined-By: systemd
    -- Support: http://lists.freedesktop.org/mailman/listinfo/systemd-devel
    -- 
    -- Unit kubelet.service has finished starting up.
    -- 
    -- The start-up result is done.
    11月 14 00:46:58 master kubelet[66710]: E1114 00:46:58.894483   66710 server.go:206] "Failed to load kubelet config file" err="failed to load Kubelet config file /var/lib/kubelet/config.yaml, error failed to read kubelet config file \"/var/lib/kubelet/config.yaml\", error: open /var/lib/kubelet/config.yaml: no such file or directory" path="/var/lib/kubelet/config.yaml"
    11月 14 00:46:58 master systemd[1]: kubelet.service: main process exited, code=exited, status=1/FAILURE
    11月 14 00:46:58 master systemd[1]: Unit kubelet.service entered failed state.
    11月 14 00:46:58 master systemd[1]: kubelet.service failed.
    11月 14 00:47:09 master systemd[1]: kubelet.service holdoff time over, scheduling restart.
    11月 14 00:47:09 master systemd[1]: Stopped kubelet: The Kubernetes Node Agent.
    -- Subject: Unit kubelet.service has finished shutting down
    -- Defined-By: systemd
    -- Support: http://lists.freedesktop.org/mailman/listinfo/systemd-devel
    
    

    我们在 master上用kubeadm ini初始化kubernetes

    注意:这里的kubernetes-version 一定要和上面安装的版本号一致 否则会报错

    #只在master上执行   node节点不执行
    kubeadm init \
    --apiserver-advertise-address=172.16.223.2 \
    --image-repository registry.aliyuncs.com/google_containers \
    --kubernetes-version v1.22.3 \
    --pod-network-cidr=172.16.0.0/16
    

    出问题后恢复环境的命令

    echo y | kubeadm reset
    

    问题记录1

    ➜  kubernetes kubeadm init \                                        
    --apiserver-advertise-address=192.168.205.132 \
    --image-repository registry.aliyuncs.com/google_containers \
    --kubernetes-version v1.22.3 \
    --pod-network-cidr=192.168.0.0/16
    [init] Using Kubernetes version: v1.22.3
    [preflight] Running pre-flight checks
            [WARNING Hostname]: hostname "master" could not be reached
            [WARNING Hostname]: hostname "master": lookup master on 192.168.205.2:53: no such host
    [preflight] Pulling images required for setting up a Kubernetes cluster
    

    k8s hostname ““ could not be reached_yzhao66的博客-CSDN博客

    问题记录2

    ➜  kubernetes kubeadm init \
    > --apiserver-advertise-address=192.168.205.132 \
    > --image-repository registry.aliyuncs.com/google_containers \
    > --kubernetes-version v1.22.3 \
    > --pod-network-cidr=192.168.0.0/16
    [init] Using Kubernetes version: v1.22.3
    [preflight] Running pre-flight checks
            [WARNING Hostname]: hostname "master" could not be reached
            [WARNING Hostname]: hostname "master": lookup master on 192.168.205.2:53: no such host
    error execution phase preflight: [preflight] Some fatal errors occurred:
            [ERROR FileContent--proc-sys-net-bridge-bridge-nf-call-iptables]: /proc/sys/net/bridge/bridge-nf-call-iptables contents are not set to 1
    [preflight] If you know what you are doing, you can make a check non-fatal with `--ignore-preflight-errors=...`
    To see the stack trace of this error execute with --v=5 or higher
    

    ERROR FileContent--proc-sys-net-bridge-bridge-nf-call-iptables 设置错误导致kubeadm安装k8s失败 - 小侬 - 博客园 (cnblogs.com)

    问题记录3

    [kubelet-check] It seems like the kubelet isn't running or healthy.
    [kubelet-check] The HTTP call equal to 'curl -sSL http://localhost:10248/healthz' failed with error: Get "http://localhost:10248/healthz": dial tcp [::1]:10248: connect: connection refused.
    [kubelet-check] It seems like the kubelet isn't running or healthy.
    [kubelet-check] The HTTP call equal to 'curl -sSL http://localhost:10248/healthz' failed with error: Get "http://localhost:10248/healthz": dial tcp [::1]:10248: connect: connection refused.
    [kubelet-check] It seems like the kubelet isn't running or healthy.
    [kubelet-check] The HTTP call equal to 'curl -sSL http://localhost:10248/healthz' failed with error: Get "http://localhost:10248/healthz": dial tcp [::1]:10248: connect: connection refused.
    [kubelet-check] It seems like the kubelet isn't running or healthy.
    [kubelet-check] The HTTP call equal to 'curl -sSL http://localhost:10248/healthz' failed with error: Get "http://localhost:10248/healthz": dial tcp [::1]:10248: connect: connection refused.
    [kubelet-check] It seems like the kubelet isn't running or healthy.
    [kubelet-check] The HTTP call equal to 'curl -sSL http://localhost:10248/healthz' failed with error: Get "http://localhost:10248/healthz": dial tcp [::1]:10248: connect: connection refused.
    
            Unfortunately, an error has occurred:
                    timed out waiting for the condition
    
            This error is likely caused by:
                    - The kubelet is not running
                    - The kubelet is unhealthy due to a misconfiguration of the node in some way (required cgroups disabled)
    
            If you are on a systemd-powered system, you can try to troubleshoot the error with the following commands:
                    - 'systemctl status kubelet'
                    - 'journalctl -xeu kubelet'
    
            Additionally, a control plane component may have crashed or exited when started by the container runtime.
            To troubleshoot, list all containers using your preferred container runtimes CLI.
    
            Here is one example how you may list all Kubernetes containers running in docker:
                    - 'docker ps -a | grep kube | grep -v pause'
                    Once you have found the failing container, you can inspect its logs with:
                    - 'docker logs CONTAINERID'
    
    error execution phase wait-control-plane: couldn't initialize a Kubernetes cluster
    To see the stack trace of this error execute with --v=5 or higher
    
    
    
    tail /var/log/messages
    
    Nov 14 06:41:32 master kubelet: E1114 06:41:32.821090   32487 server.go:294] "Failed to run kubelet" err="failed to run Kubelet: misconfiguration: kubelet cgroup driver: \"systemd\" is different from docker cgroup driver: \"cgroupfs\""
    Nov 14 06:41:32 master systemd: kubelet.service: main process exited, code=exited, status=1/FAILURE
    Nov 14 06:41:32 master systemd: Unit kubelet.service entered failed state.
    Nov 14 06:41:32 master systemd: kubelet.service failed.
    

    修改方案:

    Linux下minikube启动失败(It seems like the kubelet isn't running or healthy)_程序员欣宸的博客-CSDN博客

    【kubeadm初始化报错】kubelet cgroup driver: “systemd“ is different from docker cgroup driver: “cgroupfs“_拾级而上-CSDN博客

    启动成功

    ➜  kubernetes kubeadm init \
    > --apiserver-advertise-address=192.168.205.132 \
    > --image-repository registry.aliyuncs.com/google_containers \
    > --kubernetes-version v1.22.3 \
    > --pod-network-cidr=192.168.0.0/16
    [init] Using Kubernetes version: v1.22.3
    [preflight] Running pre-flight checks
    [preflight] Pulling images required for setting up a Kubernetes cluster
    [preflight] This might take a minute or two, depending on the speed of your internet connection
    [preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
    [certs] Using certificateDir folder "/etc/kubernetes/pki"
    [certs] Generating "ca" certificate and key
    [certs] Generating "apiserver" certificate and key
    [certs] apiserver serving cert is signed for DNS names [kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local master] and IPs [10.96.0.1 192.168.205.132]
    [certs] Generating "apiserver-kubelet-client" certificate and key
    [certs] Generating "front-proxy-ca" certificate and key
    [certs] Generating "front-proxy-client" certificate and key
    [certs] Generating "etcd/ca" certificate and key
    [certs] Generating "etcd/server" certificate and key
    [certs] etcd/server serving cert is signed for DNS names [localhost master] and IPs [192.168.205.132 127.0.0.1 ::1]
    [certs] Generating "etcd/peer" certificate and key
    [certs] etcd/peer serving cert is signed for DNS names [localhost master] and IPs [192.168.205.132 127.0.0.1 ::1]
    [certs] Generating "etcd/healthcheck-client" certificate and key
    [certs] Generating "apiserver-etcd-client" certificate and key
    [certs] Generating "sa" key and public key
    [kubeconfig] Using kubeconfig folder "/etc/kubernetes"
    [kubeconfig] Writing "admin.conf" kubeconfig file
    [kubeconfig] Writing "kubelet.conf" kubeconfig file
    [kubeconfig] Writing "controller-manager.conf" kubeconfig file
    [kubeconfig] Writing "scheduler.conf" kubeconfig file
    [kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
    [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
    [kubelet-start] Starting the kubelet
    [control-plane] Using manifest folder "/etc/kubernetes/manifests"
    [control-plane] Creating static Pod manifest for "kube-apiserver"
    [control-plane] Creating static Pod manifest for "kube-controller-manager"
    [control-plane] Creating static Pod manifest for "kube-scheduler"
    [etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
    [wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s
    [apiclient] All control plane components are healthy after 5.502285 seconds
    [upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
    [kubelet] Creating a ConfigMap "kubelet-config-1.22" in namespace kube-system with the configuration for the kubelets in the cluster
    [upload-certs] Skipping phase. Please see --upload-certs
    [mark-control-plane] Marking the node master as control-plane by adding the labels: [node-role.kubernetes.io/master(deprecated) node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]
    [mark-control-plane] Marking the node master as control-plane by adding the taints [node-role.kubernetes.io/master:NoSchedule]
    [bootstrap-token] Using token: rptyou.81yja96tim7smxi9
    [bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
    [bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to get nodes
    [bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
    [bootstrap-token] configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
    [bootstrap-token] configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
    [bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
    [kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
    [addons] Applied essential addon: CoreDNS
    [addons] Applied essential addon: kube-proxy
    
    Your Kubernetes control-plane has initialized successfully!
    
    To start using your cluster, you need to run the following as a regular user:
    
      mkdir -p $HOME/.kube
      sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
      sudo chown $(id -u):$(id -g) $HOME/.kube/config
    
    Alternatively, if you are the root user, you can run:
    
      export KUBECONFIG=/etc/kubernetes/admin.conf
    
    You should now deploy a pod network to the cluster.
    Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
      https://kubernetes.io/docs/concepts/cluster-administration/addons/
    
    Then you can join any number of worker nodes by running the following on each as root:
    
    kubeadm join 192.168.205.132:6443 --token rptyou.81yja96tim7smxi9 \
            --discovery-token-ca-cert-hash sha256:66abd99bf1e0d89ffc8639d7b4887986d17045017e443327b718e481a326f4fd 
    
    

    查看状态

    ➜  ~ systemctl status kubelet.service
    ● kubelet.service - kubelet: The Kubernetes Node Agent
       Loaded: loaded (/usr/lib/systemd/system/kubelet.service; enabled; vendor preset: disabled)
      Drop-In: /usr/lib/systemd/system/kubelet.service.d
               └─10-kubeadm.conf
       Active: active (running) since 日 2021-11-14 06:52:39 CST; 29min ago
         Docs: https://kubernetes.io/docs/
     Main PID: 46239 (kubelet)
        Tasks: 15
       Memory: 44.6M
       CGroup: /system.slice/kubelet.service
               └─46239 /usr/bin/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --ku...
    
    11月 14 07:22:10 master kubelet[46239]: I1114 07:22:10.520318   46239 cni.go:239] "Unable to upda...t.d"
    11月 14 07:22:14 master kubelet[46239]: E1114 07:22:14.244371   46239 kubelet.go:2337] "Container...zed"
    11月 14 07:22:15 master kubelet[46239]: I1114 07:22:15.521923   46239 cni.go:239] "Unable to upda...t.d"
    11月 14 07:22:19 master kubelet[46239]: E1114 07:22:19.252927   46239 kubelet.go:2337] "Container...zed"
    11月 14 07:22:20 master kubelet[46239]: I1114 07:22:20.522498   46239 cni.go:239] "Unable to upda...t.d"
    11月 14 07:22:24 master kubelet[46239]: E1114 07:22:24.261009   46239 kubelet.go:2337] "Container...zed"
    11月 14 07:22:25 master kubelet[46239]: I1114 07:22:25.524179   46239 cni.go:239] "Unable to upda...t.d"
    11月 14 07:22:29 master kubelet[46239]: E1114 07:22:29.268331   46239 kubelet.go:2337] "Container...zed"
    11月 14 07:22:30 master kubelet[46239]: I1114 07:22:30.524362   46239 cni.go:239] "Unable to upda...t.d"
    11月 14 07:22:34 master kubelet[46239]: E1114 07:22:34.276257   46239 kubelet.go:2337] "Container...zed"
    Hint: Some lines were ellipsized, use -l to show in full.
    
    

    查看每个组件的健康状态

    ➜  ~ kubectl get cs
    Warning: v1 ComponentStatus is deprecated in v1.19+
    NAME                 STATUS      MESSAGE                                                                                       ERROR
    scheduler            Unhealthy   Get "http://127.0.0.1:10251/healthz": dial tcp 127.0.0.1:10251: connect: connection refused   
    controller-manager   Healthy     ok                                                                                            
    etcd-0               Healthy     {"health":"true","reason":""} 
    

    解决方案

    scheduler Unhealthy Get “http://127.0.0.1:10251/healthz“: dial tcp 127.0.0.1:10251: con_python基础-CSDN博客

    查看node状态

    ➜  ~ kubectl get node
    NAME     STATUS   ROLES                  AGE     VERSION
    master   NotReady    control-plane,master   78m     v1.22.3
    

    安装port Network( flannel )

    k8s cluster 工作 必须安装pod网络,否则pod之间无法通信,k8s支持多种方案,这里选择flannel

    kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
    

    检查pod状态,需要确保当前Pod 都是 running

    ➜  ~ kubectl get pod --all-namespaces -o wide
    NAMESPACE     NAME                             READY   STATUS    RESTARTS   AGE     IP                NODE     NOMINATED NODE   READINESS GATES
    kube-system   coredns-7f6cbbb7b8-bds8r         1/1     Running   0          79m     192.168.0.2       master   <none>           <none>
    kube-system   coredns-7f6cbbb7b8-ng4cd         1/1     Running   0          79m     192.168.0.3       master   <none>           <none>
    kube-system   etcd-master                      1/1     Running   0          79m     192.168.205.132   master   <none>           <none>
    kube-system   kube-apiserver-master            1/1     Running   0          79m     192.168.205.132   master   <none>           <none>
    kube-system   kube-controller-manager-master   1/1     Running   0          43m     192.168.205.132   master   <none>           <none>
    kube-system   kube-flannel-ds-92nd8            1/1     Running   0          6m58s   192.168.205.133   node1    <none>           <none>
    kube-system   kube-flannel-ds-jxx2b            1/1     Running   0          20m     192.168.205.131   node3    <none>           <none>
    kube-system   kube-flannel-ds-whsvs            1/1     Running   0          35m     192.168.205.132   master   <none>           <none>
    kube-system   kube-flannel-ds-wzkmg            1/1     Running   0          6m28s   192.168.205.130   node2    <none>           <none>
    kube-system   kube-proxy-5wd2g                 1/1     Running   0          6m28s   192.168.205.130   node2    <none>           <none>
    kube-system   kube-proxy-dm474                 1/1     Running   0          79m     192.168.205.132   master   <none>           <none>
    kube-system   kube-proxy-qlwt7                 1/1     Running   0          6m58s   192.168.205.133   node1    <none>           <none>
    kube-system   kube-proxy-zqsn4                 1/1     Running   0          20m     192.168.205.131   node3    <none>           <none>
    kube-system   kube-scheduler-master            1/1     Running   0          44m     192.168.205.132   master   <none>           <none>
    

    再次查看node状态; pod状态变为 Ready

    ➜  ~ kubectl get node
    NAME     STATUS   ROLES                  AGE     VERSION
    master   Ready    control-plane,master   78m     v1.22.3
    

    到此为止,master就算装完了

    node节点

    node节点加入

    node节点加入需要保证安装并启动kubeadm,参考master安装部分即可

    执行命令加入,从master节点安装成功的信息中获取即可

    kubeadm join 192.168.205.132:6443 --token rptyou.81yja96tim7smxi9 \
            --discovery-token-ca-cert-hash sha256:66abd99bf1e0d89ffc8639d7b4887986d17045017e443327b718e481a326f4fd 
    

    执行成功之后就可以在master节点查看了

    ➜  ~ kubectl get node                        
    NAME     STATUS   ROLES                  AGE   VERSION
    master   Ready    control-plane,master   83m   v1.22.3
    node1    Ready    <none>                 10m   v1.22.3
    node2    Ready    <none>                 10m   v1.22.3
    node3    Ready    <none>                 24m   v1.22.3
    

    node节点删除

    删除节点之后,节点想再次加入到集群中 需要先执行 kubeadm reset , 之后再执行 kubeadm join

    [root@k8s-master testnginx]# kubectl delete node k8s-node    ---k8s-node节点名称,当然不只这一种删除pod的方法,我这里不一一列出了
    

    重新生成token

    增加节点时token过期,重新生成token的方法, 直接上命令了

    [root@k8s-master testnginx]# kubeadm token list
    TOKEN                     TTL       EXPIRES                     USAGES                   DESCRIPTION                                                EXTRA GROUPS
    uf2c4g.n7ibf1g8gxbkqz2z   23h       2019-04-03T15:28:40+08:00   authentication,signing   The default bootstrap token generated by 'kubeadm init'.   system:bootstrappers:kubeadm:default-node-token
    [root@k8s-master testnginx]# kubeadm token create
    w0r09e.e5olwz1rlhwvgo9p
    [root@k8s-master testnginx]# kubeadm token list
    TOKEN                     TTL       EXPIRES                     USAGES                   DESCRIPTION                                                EXTRA GROUPS
    uf2c4g.n7ibf1g8gxbkqz2z   23h       2019-04-03T15:28:40+08:00   authentication,signing   The default bootstrap token generated by 'kubeadm init'.   system:bootstrappers:kubeadm:default-node-token
    w0r09e.e5olwz1rlhwvgo9p   23h       2019-04-03T16:19:56+08:00   authentication,signing   <none>                                                     system:bootstrappers:kubeadm:default-node-token
    [root@k8s-master testnginx]#
    

    k8s HelloWorld

    创建一个 nginx-deployment.yaml 内容如下

    [root@k8s-master testnginx]# cat nginx-deployment.yaml
    apiVersion: extensions/v1beta1
    kind: Deployment
    metadata:
      name: test-nginx
      namespace: test
    spec:
      replicas: 3
      template:
        metadata:
          labels:
            app: nginx
        spec:
          containers:
          - name: nginx
            image: yaohl0911/test_nginx
            ports:
            - containerPort: 80
    

    创建 svc, pod, 查看pod状态

    mkdir -p kubenetess/testnginx
    cd kubenetess/testnginx/
    vim nginx-deployment.yaml
    kubectl create -f nginx-deployment.yaml
    deployment.extensions/qf-test-nginx created
    kubectl get svc,pod
    
  • 相关阅读:
    给出两个 非空 的链表用来表示两个非负的整数。其中,它们各自的位数是按照 逆序 的方式存储的,并且它们的每个节点只能存储 一位 数字。
    11
    实战 迁移学习 VGG19、ResNet50、InceptionV3 实践 猫狗大战 问题
    tx2系统备份与恢复
    如何在Ubuntu 18.04上安装和卸载TeamViewer
    bzoj 3732 Network (kruskal重构树)
    bzoj2152 聪聪可可 (树形dp)
    牛客 216D 消消乐 (二分图最小点覆盖)
    牛客 197E 01串
    Wannafly挑战赛23
  • 原文地址:https://www.cnblogs.com/yaohl0911/p/15579809.html
Copyright © 2011-2022 走看看