zoukankan      html  css  js  c++  java
  • kubeadm部署kubernetes v1.18.2

    kubeadm安装k8s v1.18.2

    reference //参考

    环境规划

    • CentOS Linux release 7.5.1804 (Core) //centos7理论都是可以的
    • 4G/2C
    • master 192.168.3.104 node 192.168.3.105 //online to network
    • docker-ce 18.06.3 kubernetes v1.18.2

    准备工作

    all server done

    • selinux
    • hosts
    • firewalld
    • swap
    • ntp
    • yum

    selinux

    [root@c-3-104 ~]# getenforce 
    Disabled
    [root@c-3-104 ~]# setenforce 0
    setenforce: SELinux is disabled
    [root@c-3-104 ~]# cat /etc/selinux/config 
    # This file controls the state of SELinux on the system.
    # SELINUX= can take one of these three values:
    #     enforcing - SELinux security policy is enforced.
    #     permissive - SELinux prints warnings instead of enforcing.
    #     disabled - No SELinux policy is loaded.
    SELINUX=disabled
    # SELINUXTYPE= can take one of three two values:
    #     targeted - Targeted processes are protected,
    #     minimum - Modification of targeted policy. Only selected processes are protected. 
    #     mls - Multi Level Security protection.
    SELINUXTYPE=targeted
    

    hosts

    [root@c-3-104 ~]# cat /etc/hosts
    127.0.0.1   localhost localhost.localdomain localhost4 localhost4.localdomain4
    ::1         localhost localhost.localdomain localhost6 localhost6.localdomain6
    192.168.3.104 c-3-104
    
    [root@c-3-105 ~]# cat /etc/hosts
    127.0.0.1   localhost localhost.localdomain localhost4 localhost4.localdomain4
    ::1         localhost localhost.localdomain localhost6 localhost6.localdomain6
    192.168.3.105 c-3-105
    
    

    firewalld

    [root@c-3-105 ~]# systemctl disable firewalld
    [root@c-3-105 ~]# 
    [root@c-3-105 ~]# systemctl stop  firewalld
    [root@c-3-105 ~]# 
    [root@c-3-105 ~]# systemctl status  firewalld
    ● firewalld.service
       Loaded: masked (/dev/null; bad)
       Active: inactive (dead)
    [root@c-3-105 ~]# 
    

    swap

    [root@c-3-105 ~]# swapoff -a
    [root@c-3-105 ~]# cat /etc/fstab 
    
    #
    # /etc/fstab
    # Created by anaconda on Sat Apr 11 11:14:30 2020
    #
    # Accessible filesystems, by reference, are maintained under '/dev/disk'
    # See man pages fstab(5), findfs(8), mount(8) and/or blkid(8) for more info
    #
    UUID=d6c43c7e-ad98-4c7b-ae13-a0a21ad5ed8e /                       xfs     defaults        0 0
    UUID=05af1f32-2c02-4d59-b061-b63b20c4e878 /boot                   xfs     defaults        0 0
    #/dev/mapper/centos-swap swap                    swap    defaults        0 0
    [root@c-3-105 ~]# 
    [root@c-3-105 ~]# free -m
                  total        used        free      shared  buff/cache   available
    Mem:           3773          97        3532          11         142        3467
    Swap:             0           0           0
    [root@c-3-105 ~]# 
    

    ntp

    [root@c-3-104 ~]# crontab -l
    */20 * * * * /usr/sbin/ntpdate ntpupdate.tencentyun.com &>/dev/null && /usr/sbin/hwclock -w
    [root@c-3-104 ~]# whereis ntpdate
    ntpdate: /usr/sbin/ntpdate /usr/share/man/man8/ntpdate.8.gz
    [root@c-3-104 ~]# 
    [root@c-3-104 ~]# date -R 
    Wed, 27 May 2020 12:08:47 +0800
    [root@c-3-104 ~]# /usr/sbin/ntpdate ntpupdate.tencentyun.com &>/dev/null && /usr/sbin/hwclock -w
    [root@c-3-104 ~]# date
    Wed May 27 12:08:52 CST 2020
    
    

    yum

    base/epel
    link
    docker-ce*
    link
    k8s

    [root@master01 ~]# cat <<EOF > /etc/yum.repos.d/kubernetes.repo
    [kubernetes]
    name=Kubernetes
    baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
    enabled=1
    gpgcheck=1
    repo_gpgcheck=1
    gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
    EOF
    

    yum clean all && yum makecache

    docker-ce && kubeadm

    docker-ce二进制安装
    docker-ce yum安装
    kubeadm

    [root@c-3-104 ~]# yum list kubeadm --show-duplicate |grep 1.18
    kubeadm.x86_64                       1.18.0-0                         kubernetes
    kubeadm.x86_64                       1.18.1-0                         kubernetes
    kubeadm.x86_64                       1.18.2-0                         kubernetes
    kubeadm.x86_64                       1.18.3-0                         kubernetes
    [root@c-3-104 ~]# yum install -y kubeadm kubelet kubectl
    [root@c-3-104 ~]# systemctl  enable kubelet
    Created symlink from /etc/systemd/system/multi-user.target.wants/kubelet.service to /usr/lib/systemd/system/kubelet.service.
    [root@c-3-104 ~]# 
    
    

    master

    kubeadm init

    kubeadm init --kubernetes-version=1.18.2 
    	--apiserver-advertise-address=192.168.3.104   
    	--image-repository registry.aliyuncs.com/google_containers  
    	--service-cidr=10.10.0.0/16 --pod-network-cidr=10.20.0.0/16
    
    [root@c-3-104 ~]# kubeadm init --kubernetes-version=1.18.2 
    > --apiserver-advertise-address=192.168.3.104   
    > --image-repository registry.aliyuncs.com/google_containers  
    > --service-cidr=10.10.0.0/16 --pod-network-cidr=10.20.0.0/16
    W0527 13:13:50.045479   13148 configset.go:202] WARNING: kubeadm cannot validate component configs for API groups [kubelet.config.k8
    s.io kubeproxy.config.k8s.io][init] Using Kubernetes version: v1.18.2
    [preflight] Running pre-flight checks
    	[WARNING IsDockerSystemdCheck]: detected "cgroupfs" as the Docker cgroup driver. The recommended driver is "systemd". Please
     follow the guide at https://kubernetes.io/docs/setup/cri/[preflight] Pulling images required for setting up a Kubernetes cluster
    [preflight] This might take a minute or two, depending on the speed of your internet connection
    [preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
    [kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
    [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
    [kubelet-start] Starting the kubelet
    [certs] Using certificateDir folder "/etc/kubernetes/pki"
    [certs] Generating "ca" certificate and key
    [certs] Generating "apiserver" certificate and key
    [certs] apiserver serving cert is signed for DNS names [c-3-104 kubernetes kubernetes.default kubernetes.default.svc kubernetes.defa
    ult.svc.cluster.local] and IPs [10.10.0.1 192.168.3.104][certs] Generating "apiserver-kubelet-client" certificate and key
    [certs] Generating "front-proxy-ca" certificate and key
    [certs] Generating "front-proxy-client" certificate and key
    [certs] Generating "etcd/ca" certificate and key
    [certs] Generating "etcd/server" certificate and key
    [certs] etcd/server serving cert is signed for DNS names [c-3-104 localhost] and IPs [192.168.3.104 127.0.0.1 ::1]
    [certs] Generating "etcd/peer" certificate and key
    [certs] etcd/peer serving cert is signed for DNS names [c-3-104 localhost] and IPs [192.168.3.104 127.0.0.1 ::1]
    [certs] Generating "etcd/healthcheck-client" certificate and key
    [certs] Generating "apiserver-etcd-client" certificate and key
    [certs] Generating "sa" key and public key
    [kubeconfig] Using kubeconfig folder "/etc/kubernetes"
    [kubeconfig] Writing "admin.conf" kubeconfig file
    [kubeconfig] Writing "kubelet.conf" kubeconfig file
    [kubeconfig] Writing "controller-manager.conf" kubeconfig file
    [kubeconfig] Writing "scheduler.conf" kubeconfig file
    [control-plane] Using manifest folder "/etc/kubernetes/manifests"
    [control-plane] Creating static Pod manifest for "kube-apiserver"
    [control-plane] Creating static Pod manifest for "kube-controller-manager"
    W0527 13:15:55.300229   13148 manifests.go:225] the default kube-apiserver authorization-mode is "Node,RBAC"; using "Node,RBAC"
    [control-plane] Creating static Pod manifest for "kube-scheduler"
    W0527 13:15:55.302269   13148 manifests.go:225] the default kube-apiserver authorization-mode is "Node,RBAC"; using "Node,RBAC"
    [etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
    [wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests".
     This can take up to 4m0s[apiclient] All control plane components are healthy after 34.003709 seconds
    [upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
    [kubelet] Creating a ConfigMap "kubelet-config-1.18" in namespace kube-system with the configuration for the kubelets in the cluster
    [upload-certs] Skipping phase. Please see --upload-certs
    [mark-control-plane] Marking the node c-3-104 as control-plane by adding the label "node-role.kubernetes.io/master=''"
    [mark-control-plane] Marking the node c-3-104 as control-plane by adding the taints [node-role.kubernetes.io/master:NoSchedule]
    [bootstrap-token] Using token: 78va9y.lbfgkkpjl9cwe5cs
    [bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
    [bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to get nodes
    [bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate 
    credentials[bootstrap-token] configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
    [bootstrap-token] configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
    [bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
    [kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
    [addons] Applied essential addon: CoreDNS
    [addons] Applied essential addon: kube-proxy
    
    Your Kubernetes control-plane has initialized successfully!
    
    To start using your cluster, you need to run the following as a regular user:
    
      mkdir -p $HOME/.kube
      sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
      sudo chown $(id -u):$(id -g) $HOME/.kube/config
    
    You should now deploy a pod network to the cluster.
    Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
      https://kubernetes.io/docs/concepts/cluster-administration/addons/
    
    Then you can join any number of worker nodes by running the following on each as root:
    
    kubeadm join 192.168.3.104:6443 --token 78va9y.lbfgkkpjl9cwe5cs 
        --discovery-token-ca-cert-hash sha256:d2de3ae7c47a18ceca31deebcf1b0e5441cc6a7963e272dd381a8f600ef63820 
    [root@c-3-104 ~]#
    

    kubectl配置

    [root@c-3-104 ~]# mkdir -p $HOME/.kube
    [root@c-3-104 ~]#cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
    [root@c-3-104 ~]#  chown $(id -u):$(id -g) $HOME/.kube/config
    
    [root@c-3-104 ~]# source <(kubectl completion bash)
    [root@c-3-104 ~]# cat .bashrc   //add source
    # .bashrc
    
    # User specific aliases and functions
    
    alias rm='rm -i'
    alias cp='cp -i'
    alias mv='mv -i'
    
    # Source global definitions
    if [ -f /etc/bashrc ]; then
    	. /etc/bashrc
    fi
    source <(kubectl completion bash)
    
    

    CNI-calico网络

    [root@c-3-104 ~]# kubectl apply -f https://docs.projectcalico.org/manifests/calico.yaml
    
    

    pod状态

    [root@c-3-104 ~]# kubectl get pods -A
    NAMESPACE     NAME                                       READY   STATUS    RESTARTS   AGE
    kube-system   calico-kube-controllers-789f6df884-vw89h   1/1     Running   0          6m31s
    kube-system   calico-node-45wcn                          1/1     Running   0          6m31s
    kube-system   calico-node-8sxzh                          1/1     Running   0          6m31s
    kube-system   coredns-7ff77c879f-hppfr                   1/1     Running   0          13m
    kube-system   coredns-7ff77c879f-zztzk                   1/1     Running   0          13m
    kube-system   etcd-c-3-104                               1/1     Running   0          13m
    kube-system   kube-apiserver-c-3-104                     1/1     Running   0          13m
    kube-system   kube-controller-manager-c-3-104            1/1     Running   2          13m
    kube-system   kube-proxy-66wv4                           1/1     Running   0          13m
    kube-system   kube-proxy-pqzsq                           1/1     Running   0          11m
    kube-system   kube-scheduler-c-3-104                     1/1     Running   2          13m
    [root@c-3-104 ~]# kubectl get nodes
    NAME      STATUS   ROLES    AGE   VERSION
    c-3-104   Ready    master   13m   v1.18.3
    c-3-105   Ready    <none>   11m   v1.18.3
    [root@c-3-104 ~]# 
    
    

    kubernetes-dashboard
    link

    [root@c-3-104 ~]# wget -k https://raw.githubusercontent.com/kubernetes/dashboard/v2.0.1/aio/deploy/recommended.yaml
    --2020-05-27 13:33:21--  https://raw.githubusercontent.com/kubernetes/dashboard/v2.0.1/aio/deploy/recommended.yaml
    Resolving raw.githubusercontent.com (raw.githubusercontent.com)... 151.101.108.133
    Connecting to raw.githubusercontent.com (raw.githubusercontent.com)|151.101.108.133|:443... connected.
    HTTP request sent, awaiting response... 200 OK
    Length: 7552 (7.4K) [text/plain]
    Saving to: ‘recommended.yaml’
    
    100%[==========================================================================================>] 7,552       --.-K/s   in 0s      
    
    2020-05-27 13:33:22 (15.4 MB/s) - ‘recommended.yaml’ saved [7552/7552]
    
    Converted 0 files in 0 seconds.
    

    设置nodeport/token-ttl

    [root@c-3-104 ~]# cat recommended.yaml 
    
    ---
    
    kind: Service
    apiVersion: v1
    metadata:
      labels:
        k8s-app: kubernetes-dashboard
      name: kubernetes-dashboard
      namespace: kubernetes-dashboard
    spec:
      type: NodePort   //add
      ports:
        - port: 443
          targetPort: 8443
          nodePort: 30000  //add
      selector:
        k8s-app: kubernetes-dashboard
    
    ---
    
    kind: Deployment
    apiVersion: apps/v1
    metadata:
      labels:
        k8s-app: kubernetes-dashboard
      name: kubernetes-dashboard
      namespace: kubernetes-dashboard
    spec:
      replicas: 1
      revisionHistoryLimit: 10
      selector:
        matchLabels:
          k8s-app: kubernetes-dashboard
      template:
        metadata:
          labels:
            k8s-app: kubernetes-dashboard
        spec:
          containers:
            - name: kubernetes-dashboard
              image: kubernetesui/dashboard:v2.0.1
              imagePullPolicy: Always
              ports:
                - containerPort: 8443
                  protocol: TCP
              args:
                - --auto-generate-certificates
                - --namespace=kubernetes-dashboard
                - --token-ttl=43200   //add
                # Uncomment the following line to manually specify Kubernetes API server Host
                # If not specified, Dashboard will attempt to auto discover the API server and connect
                # to it. Uncomment only if the default does not work.
                # - --apiserver-host=http://my-address:port
              volumeMounts:
                - name: kubernetes-dashboard-certs
                  mountPath: /certs
                  # Create on-disk volume to store exec logs
                - mountPath: /tmp
                  name: tmp-volume
              livenessProbe:
                httpGet:
                  scheme: HTTPS
                  path: /
                  port: 8443
                initialDelaySeconds: 30
                timeoutSeconds: 30
              securityContext:
                allowPrivilegeEscalation: false
                readOnlyRootFilesystem: true
                runAsUser: 1001
                runAsGroup: 2001
          volumes:
            - name: kubernetes-dashboard-certs
              secret:
                secretName: kubernetes-dashboard-certs
            - name: tmp-volume
              emptyDir: {}
          serviceAccountName: kubernetes-dashboard
          nodeSelector:
            "kubernetes.io/os": linux
          # Comment the following tolerations if Dashboard must not be deployed on master
          tolerations:
            - key: node-role.kubernetes.io/master
              effect: NoSchedule
    
    
    [root@c-3-104 ~]# kubectl create -f recommended.yaml
    
    [root@c-3-104 ~]# kubectl get pods -A
    NAMESPACE              NAME                                         READY   STATUS    RESTARTS   AGE
    kube-system            calico-kube-controllers-789f6df884-vw89h     1/1     Running   0          25m
    kube-system            calico-node-45wcn                            1/1     Running   0          25m
    kube-system            calico-node-8sxzh                            1/1     Running   0          25m
    kube-system            coredns-7ff77c879f-hppfr                     1/1     Running   0          31m
    kube-system            coredns-7ff77c879f-zztzk                     1/1     Running   0          31m
    kube-system            etcd-c-3-104                                 1/1     Running   0          32m
    kube-system            kube-apiserver-c-3-104                       1/1     Running   0          32m
    kube-system            kube-controller-manager-c-3-104              1/1     Running   2          32m
    kube-system            kube-proxy-66wv4                             1/1     Running   0          31m
    kube-system            kube-proxy-pqzsq                             1/1     Running   0          29m
    kube-system            kube-scheduler-c-3-104                       1/1     Running   2          32m
    kubernetes-dashboard   dashboard-metrics-scraper-6b4884c9d5-dk4xh   1/1     Running   0          11m
    kubernetes-dashboard   kubernetes-dashboard-86467659d-sqpcs         1/1     Running   0          11m
    [root@c-3-104 ~]# kubectl get svc -A
    NAMESPACE              NAME                        TYPE        CLUSTER-IP     EXTERNAL-IP   PORT(S)                  AGE
    default                kubernetes                  ClusterIP   10.10.0.1      <none>        443/TCP                  32m
    kube-system            kube-dns                    ClusterIP   10.10.0.10     <none>        53/UDP,53/TCP,9153/TCP   32m
    kubernetes-dashboard   dashboard-metrics-scraper   ClusterIP   10.10.12.70    <none>        8000/TCP                 12m
    kubernetes-dashboard   kubernetes-dashboard        NodePort    10.10.116.89   <none>        443:30000/TCP            12m
    [root@c-3-104 ~]# 
    
    

    add rbac

    kubectl create clusterrolebinding serviceaccount-cluster-admin 
      --clusterrole=cluster-admin 
      --user=system:serviceaccount:kubernetes-dashboard:kubernetes-dashboard
    

    dashboard-token

    [root@c-3-104 ~]# kubectl get secret --namespace=kubernetes-dashboard
    NAME                               TYPE                                  DATA   AGE
    default-token-k2f2q                kubernetes.io/service-account-token   3      2m55s
    kubernetes-dashboard-certs         Opaque                                0      2m55s
    kubernetes-dashboard-csrf          Opaque                                1      2m55s
    kubernetes-dashboard-key-holder    Opaque                                2      2m55s
    kubernetes-dashboard-token-fj8ww   kubernetes.io/service-account-token   3      2m55s
    [root@c-3-104 ~]# 
    [root@c-3-104 ~]# kubectl describe secrets -n kubernetes-dashboard kubernetes-dashboard-token-fj8ww | grep token | awk 'NR==3{print 
    $2}'eyJhbGciOiJSUzI1NiIsImtpZCI6IjVxRHpTMXRoS3dOWUZMRElBVC1YZ1NpZ1daVDI3dENtWUlpQTRUN2U1QTQifQ.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2Nvd
    W50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlcm5ldGVzLWRhc2hib2FyZCIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJrdWJlcm5ldGVzLWRhc2hib2FyZC10b2tlbi1majh3dyIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VydmljZS1hY2NvdW50Lm5hbWUiOiJrdWJlcm5ldGVzLWRhc2hib2FyZCIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VydmljZS1hY2NvdW50LnVpZCI6ImExZGQzODRjLWFkZTMtNDJkOS1hYzRhLWFkM2FmMzdmNTRmOSIsInN1YiI6InN5c3RlbTpzZXJ2aWNlYWNjb3VudDprdWJlcm5ldGVzLWRhc2hib2FyZDprdWJlcm5ldGVzLWRhc2hib2FyZCJ9.ai-vpMtUqRqrMQsUqnQ0HzIWMBV8KQ2kMNE75jlENvUMWMsM1d0V-ANISzkF_YIrZrUlG5Z6a27dLJFr01Sybiy5pqkGPrhiDbClBMO69lN25r5EQ1VBUDEJUpLKI-rGkXi2GnnPC_e8o57O-1yL9YB-ToXdLFdRFBNnbpNKFcMdz5ooBsgQTgXMC_zEbS6aYyAMBlV-1yoLmEuFUL8qHt01Yt-wI9blnikYF-7HksjEVjVhWeQfPDOv0VGVlwDjXOHNKIUQPU13mTFNrXOv52KiiPxrKhBaJJbbPpmsUrdi3bCbKL-QWuv6ch5rMTcX75z6auhk4PG0bqbnmxEm2g
    

    chrome
    https://192.168.3.104:30000/

    node

    root@c-3-105 ~]# systemctl enable kubelet
    Created symlink from /etc/systemd/system/multi-user.target.wants/kubelet.service to /usr/lib/systemd/system/kubelet.service.
    [root@c-3-105 ~]# 
    [root@c-3-105 ~]# kubeadm join 192.168.3.104:6443 --token 78va9y.lbfgkkpjl9cwe5cs 
    >     --discovery-token-ca-cert-hash sha256:d2de3ae7c47a18ceca31deebcf1b0e5441cc6a7963e272dd381a8f600ef63820
    W0527 13:18:21.305837   10959 join.go:346] [preflight] WARNING: JoinControlPane.controlPlane settings will be ignored when control-p
    lane flag is not set.[preflight] Running pre-flight checks
    	[WARNING IsDockerSystemdCheck]: detected "cgroupfs" as the Docker cgroup driver. The recommended driver is "systemd". Please
     follow the guide at https://kubernetes.io/docs/setup/cri/[preflight] Reading configuration from the cluster...
    [preflight] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -oyaml'
    [kubelet-start] Downloading configuration for the kubelet from the "kubelet-config-1.18" ConfigMap in the kube-system namespace
    [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
    [kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
    [kubelet-start] Starting the kubelet
    [kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap...
    
    This node has joined the cluster:
    * Certificate signing request was sent to apiserver and a response was received.
    * The Kubelet was informed of the new secure connection details.
    
    Run 'kubectl get nodes' on the control-plane to see this node join the cluster.
    
    

    ERROR

    CNI缺失

    [root@c-3-104 ~]# journalctl -u kubelet -f     //master
    -- Logs begin at Wed 2020-05-27 11:29:03 CST. --
    May 27 13:22:46 c-3-104 kubelet[14091]: E0527 13:22:46.217591   14091 kubelet.go:2187] Container runtime network not ready: NetworkR
    eady=false reason:NetworkPluginNotReady message:docker: network plugin is not ready: cni config uninitializedMay 27 13:22:49 c-3-104 kubelet[14091]: W0527 13:22:49.351884   14091 cni.go:237] Unable to update cni config: no networks found in 
    /etc/cni/net.dMay 27 13:22:51 c-3-104 kubelet[14091]: E0527 13:22:51.219407   14091 kubelet.go:2187] Container runtime network not ready: NetworkR
    eady=false reason:NetworkPluginNotReady message:docker: network plugin is not ready: cni config uninitializedMay 27 13:22:54 c-3-104 kubelet[14091]: W0527 13:22:54.352129   14091 cni.go:237] Unable to update cni config: no networks found in 
    /etc/cni/net.dMay 27 13:22:56 c-3-104 kubelet[14091]: E0527 13:22:56.222623   14091 kubelet.go:2187] Container runtime network not ready: NetworkR
    eady=false reason:NetworkPluginNotReady message:docker: network plugin is not ready: cni config uninitializedMay 27 13:22:59 c-3-104 kubelet[14091]: W0527 13:22:59.352454   14091 cni.go:237] Unable to update cni config: no networks found in 
    /etc/cni/net.dMay 27 13:23:01 c-3-104 kubelet[14091]: E0527 13:23:01.224719   14091 kubelet.go:2187] Container runtime network not ready: NetworkR
    eady=false reason:NetworkPluginNotReady message:docker: network plugin is not ready: cni config uninitializedMay 27 13:23:04 c-3-104 kubelet[14091]: W0527 13:23:04.353681   14091 cni.go:237] Unable to update cni config: no networks found in 
    /etc/cni/net.dMay 27 13:23:06 c-3-104 kubelet[14091]: E0527 13:23:06.229160   14091 kubelet.go:2187] Container runtime network not ready: NetworkR
    eady=false reason:NetworkPluginNotReady message:docker: network plugin is not ready: cni config uninitializedMay 27 13:23:09 c-3-104 kubelet[14091]: W0527 13:23:09.355189   14091 cni.go:237] Unable to update cni config: no networks found in 
    /etc/cni/net.d
    

    解决方式部署CNI
    flannel
    calico
    weave
    ...

    [root@master01 ~]# kubectl apply -f https://docs.projectcalico.org/manifests/calico.yaml
    

    异常日志查看

    [root@c-3-104 ~]# kubectl get pods -A
    NAMESPACE              NAME                                         READY   STATUS    RESTARTS   AGE
    kube-system            calico-kube-controllers-789f6df884-vw89h     1/1     Running   0          33m
    kube-system            calico-node-45wcn                            1/1     Running   0          33m
    kube-system            calico-node-8sxzh                            1/1     Running   0          33m
    kube-system            coredns-7ff77c879f-hppfr                     1/1     Running   0          40m
    kube-system            coredns-7ff77c879f-zztzk                     1/1     Running   0          40m
    kube-system            etcd-c-3-104                                 1/1     Running   0          40m
    kube-system            kube-apiserver-c-3-104                       1/1     Running   0          40m
    kube-system            kube-controller-manager-c-3-104              1/1     Running   2          40m
    kube-system            kube-proxy-66wv4                             1/1     Running   0          40m
    kube-system            kube-proxy-pqzsq                             1/1     Running   0          38m
    kube-system            kube-scheduler-c-3-104                       1/1     Running   2          40m
    kubernetes-dashboard   dashboard-metrics-scraper-6b4884c9d5-dk4xh   1/1     Running   0          20m
    kubernetes-dashboard   kubernetes-dashboard-86467659d-sqpcs         1/1     Running   0          20m
    [root@c-3-104 ~]# 
    [root@c-3-104 ~]# 
    [root@c-3-104 ~]# kubectl logs -f -n kubernetes-dashboard kubernetes-dashboard-86467659d-sqpcs
    
    [root@c-3-104 ~]# journalctl -u kubelet -f
    
  • 相关阅读:
    左孩子右兄弟的字典树
    UVA 1401 Remember the Word
    HDOJ 4770 Lights Against Dudely
    UvaLA 3938 "Ray, Pass me the dishes!"
    UVA
    Codeforces 215A A.Sereja and Coat Rack
    Codeforces 215B B.Sereja and Suffixes
    HDU 4788 Hard Disk Drive
    HDU 2095 find your present (2)
    图的连通性问题—学习笔记
  • 原文地址:https://www.cnblogs.com/xiaochina/p/12972960.html
Copyright © 2011-2022 走看看