zoukankan      html  css  js  c++  java
  • 使用kubeadm搭建k8s高可用集群

    环境准备

    系统使用的Ubuntu18.04

    主机IP 主机名 docker版本
    172.31.1.10 k8s-master1 19.03.15
    172.31.1.11 k8s-master2 19.03.15
    172.31.1.12 k8s-master3 19.03.15
    172.31.1.13 harbor 19.03.15
    172.31.1.14 haproxy1
    172.31.1.15 haproxy2
    172.31.1.16 k8s-node1 19.03.15
    172.31.1.17 k8s-node2 19.03.15
    172.31.1.18 k8s-node3 19.03.15

    改主机名,因为k8s是以主机名区分的

    [root@long-ubuntu ~]# hostnamectl set-hostname k8s-master1.example.local
    
    [root@long-ubuntu ~]# hostnamectl set-hostname k8s-master2.example.local
    
    [root@long-ubuntu ~]# hostnamectl set-hostname k8s-master3.example.local
    
    root@k8s-ubuntu:~# hostnamectl set-hostname harbor.example.local
    
    root@k8s-ubuntu:~# hostnamectl set-hostname ha1.example.local
    
    [root@long-ubuntu ~]# hostnamectl set-hostname k8s-node1.example.local
    
    [root@long-ubuntu ~]# hostnamectl set-hostname k8s-node2.example.local
    
    [root@long-ubuntu ~]# hostnamectl set-hostname k8s-node3.example.local
    

    Ubuntu1804一键安装docker-ce

    #!/bin/bash
    # Ubuntu Install docker-ce
    
    apt purge ufw lxd lxd-client lxcfs -y lxc-common
    
    apt install -y iproute2 ntpdate tcpdump telnet traceroute nfs-kernel-server nfs-common  
    lrzsz tree openssl libssl-dev libpcre3 libpcre3-dev zlib1g-dev ntpdate tcpdump telnet  
    traceroute gcc openssh-server lrzsz tree openssl libssl-dev libpcre3 libpcre3-dev 
    zlib1g-dev  ntpdate tcpdump telnet traceroute iotop unzip zip
    
    apt-get remove docker docker-engine docker.io
    
    apt-get install -y apt-transport-https ca-certificates curl gnupg2 software-properties-common
    
    curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
    
    sudo add-apt-repository    "deb [arch=amd64] https://mirrors.tuna.tsinghua.edu.cn/docker-ce/linux/ubuntu 
      $(lsb_release -cs) 
      stable"
    
    apt update
    
    apt install -y docker-ce=5:19.03.15~3-0~ubuntu-bionic docker-ce-cli=5:19.03.15~3-0~ubuntu-bionic
    
    sudo mkdir -p /etc/docker
    sudo tee /etc/docker/daemon.json <<-'EOF'
    {
      "registry-mirrors": ["https://rzd1bb7q.mirror.aliyuncs.com"]
    }
    EOF
    
    sudo systemctl daemon-reload
    sudo systemctl restart docker
    docker version
    

    记得关闭swap

    关闭防火墙

    优化内核参数

    [root@long ~]# sysctl -a | grep forward
    net.ipv4.ip_forward = 1
    
    [root@long ~]# sysctl -a | grep bridge-nf-call
    net.bridge.bridge-nf-call-ip6tables = 1
    net.bridge.bridge-nf-call-iptables = 1
    

    keepalived + haproxy 安装

    # 172.31.1.14
    [root@ha1 ~]# apt -y install keepalived haproxy
    

    配置keepalived

    [root@ha1 ~]# find / -name "*keepalived*"
    
    # 拷贝
    [root@ha1 ~]# cp /usr/share/doc/keepalived/samples/keepalived.conf.vrrp /etc/keepalived/keepalived.conf
    

    测试ip是有被使用

    [root@k8s-master1 ~]# ping 172.31.1.188
    PING 172.31.1.188 (172.31.1.188) 56(84) bytes of data.
    From 172.31.1.10 icmp_seq=1 Destination Host Unreachable
    From 172.31.1.10 icmp_seq=2 Destination Host Unreachable
    From 172.31.1.10 icmp_seq=3 Destination Host Unreachable
    
    # 上面提示就是没有,所以以下可以设置成VIP的ip地址
    

    修改配置

    [root@ha1 ~]# vim /etc/keepalived/keepalived.conf
    ! Configuration File for keepalived
    
    global_defs {
       notification_email {
         acassen
       }
       notification_email_from Alexandre.Cassen@firewall.loc
       smtp_server 192.168.200.1
       smtp_connect_timeout 30
       router_id LVS_DEVEL
    }
    
    vrrp_instance VI_1 {
        state MASTER
        interface eth0
        garp_master_delay 10
        smtp_alert
        virtual_router_id 51
        priority 100
        advert_int 1
        authentication {
            auth_type PASS
            auth_pass 1111
        }
        virtual_ipaddress {
            172.31.1.188 dev eth0 label eth0:1
        }
    }
    

    开机启动

    [root@ha1 ~]# systemctl enable --now keepalived
    

    查看

    [root@ha1 ~]# ip a
    1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
        link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
        inet 127.0.0.1/8 scope host lo
           valid_lft forever preferred_lft forever
        inet6 ::1/128 scope host
           valid_lft forever preferred_lft forever
    2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc fq_codel state UP group default qlen 1000
        link/ether 00:0c:29:da:36:40 brd ff:ff:ff:ff:ff:ff
        inet 172.31.1.14/21 brd 172.31.7.255 scope global eth0
           valid_lft forever preferred_lft forever
        inet 172.31.1.188/32 scope global eth0:1
           valid_lft forever preferred_lft forever
        inet6 fe80::20c:29ff:feda:3640/64 scope link
           valid_lft forever preferred_lft forever
    

    配置HAproxy

    [root@ha1 ~]# vim /etc/haproxy/haproxy.cfg
    
    listen stats
      mode http
      bind 0.0.0.0:9999
      stats enable
      log global
      stats uri /haproxy-status
      stats auth haadmin:123456
    
    listen k8s-m44-6443
      bind 172.31.1.188:6443
      mode tcp
      server 172.31.1.10 172.31.1.10:6443 check inter 2s fall 3 rise 5
      server 172.31.1.11 172.31.1.11:6443 check inter 2s fall 3 rise 5
      server 172.31.1.12 172.31.1.12:6443 check inter 2s fall 3 rise 5
    

    开机启动

    [root@ha1 ~]# systemctl enable --now haproxy
    Synchronizing state of haproxy.service with SysV service script with /lib/systemd/systemd-sysv-install.
    Executing: /lib/systemd/systemd-sysv-install enable haproxy
    

    配置harbor

    先下载docker-compose,记得安装好docker

    [root@harbor ~]# wget https://github.com/docker/compose/releases/download/1.29.2/docker-compose-Linux-x86_64
    
    [root@harbor ~]# cp docker-compose-Linux-x86_64 /usr/bin/docker-compose
    
    # 授权
    [root@harbor ~]# chmod +x /usr/bin/docker-compose
    
    # 查看版本
    [root@harbor ~]# docker-compose version
    docker-compose version 1.29.2, build 5becea4c
    docker-py version: 5.0.0
    CPython version: 3.7.10
    OpenSSL version: OpenSSL 1.1.0l  10 Sep 2019
    

    安装harbor

    # 172.31.1.13
    [root@harbor ~]# mkdir /apps
    [root@harbor harbor]# mkdir certs
    
    [root@harbor ~]# cp harbor-offline-installer-v2.2.3.tgz /apps/
    [root@harbor ~]# cd /apps/
    [root@harbor apps]# ll
    total 500924
    drwxr-xr-x  2 root root      4096 Jul 24 01:38 ./
    drwxr-xr-x 26 root root      4096 Jul 24 01:38 ../
    -rw-r--r--  1 root root 512937171 Jul 24 01:38 harbor-offline-installer-v2.2.3.tgz
    
    # 解压
    [root@harbor apps]# tar xf harbor-offline-installer-v2.2.3.tgz
    

    制作harbor实现https

    报错如下:

    [root@harbor certs]# openssl req -x509 -new -nodes -key harbor-ca.key  -subj "/CN=harbor.longxuan.vip" -days 7120 -out harbor-ca.crt
    Can't load /root/.rnd into RNG
    140654265754048:error:2406F079:random number generator:RAND_load_file:Cannot open file:../crypto/rand/randfile.c:88:Filename=/root/.rnd
    

    解决方法:(提前创建这个文件)

    # 删除
    [root@harbor certs]# rm -rf harbor-ca.crt
    
    [root@harbor certs]# touch /root/.rnd
    

    重新制作https

    [root@harbor certs]# openssl genrsa -out  harbor-ca.key
    Generating RSA private key, 2048 bit long modulus (2 primes)
    .............+++++
    ......................................................+++++
    e is 65537 (0x010001)
    
    [root@harbor certs]# openssl req -x509 -new -nodes -key harbor-ca.key  -subj "/CN=harbor.longxuan.vip" -days 7120 -out harbor-ca.crt
    

    修改harbor

    [root@harbor harbor]# vim harbor.yml
    hostname: harbor.longxuan.vip
    
    https:
      # https port for harbor, default is 443
      port: 443
      # The path of cert and key files for nginx
      certificate: /apps/harbor/certs/harbor-ca.crt
      private_key: /apps/harbor/certs/harbor-ca.key
    
    harbor_admin_password: 123456
    

    开始安装

    [root@harbor harbor]# ./install.sh --with-trivy
    

    测试浏览器访问 172.31.1.13

    创建目录(需要docker拉镜像的所有机器都要创建)自己生成的证书只能这么操作

    [root@harbor harbor]# mkdir /etc/docker/certs.d/harbor.longxuan.vip -p
    

    拷贝公钥

    [root@harbor harbor]# cd certs/
    [root@harbor certs]# ll
    total 16
    drwxr-xr-x 2 root root 4096 Jul 24 01:50 ./
    drwxr-xr-x 4 root root 4096 Jul 24 01:56 ../
    -rw-r--r-- 1 root root 1139 Jul 24 01:50 harbor-ca.crt
    -rw------- 1 root root 1679 Jul 24 01:42 harbor-ca.key
    [root@harbor certs]# scp harbor-ca.crt 172.31.1.10:/etc/docker/certs.d/harbor.longxuan.vip/
    

    测试

    每台需要docker拉镜像的,因为需要域名解析,所以要做好域名解析

    [root@k8s-master1 ~]# echo "172.31.1.13 harbor.longxuan.vip"  >> /etc/hosts
    
    # 登录docker
    [root@k8s-master1 ~]# docker login harbor.longxuan.vip
    Username: admin
    Password:
    WARNING! Your password will be stored unencrypted in /root/.docker/config.json.
    Configure a credential helper to remove this warning. See
    https://docs.docker.com/engine/reference/commandline/login/#credentials-store
    
    Login Succeeded
    

    拉个官方的小镜像

    [root@k8s-master1 ~]# docker pull alpine
    Using default tag: latest
    latest: Pulling from library/alpine
    5843afab3874: Pull complete
    Digest: sha256:234cb88d3020898631af0ccbbcca9a66ae7306ecd30c9720690858c1b007d2a0
    Status: Downloaded newer image for alpine:latest
    docker.io/library/alpine:latest
    

    打标签

    [root@k8s-master1 ~]# docker tag alpine harbor.longxuan.vip/baseimages/alpine
    

    上传

    [root@k8s-master1 ~]# docker push harbor.longxuan.vip/baseimages/alpine
    The push refers to repository [harbor.longxuan.vip/baseimages/alpine]
    72e830a4dff5: Pushed
    latest: digest: sha256:1775bebec23e1f3ce486989bfc9ff3c4e951690df84aa9f926497d82f2ffca9d size: 528
    

    查看浏览器的镜像是否存在

    某一台拉取

    [root@k8s-master2 ~]# vim /etc/hosts
    [root@k8s-master2 ~]# docker login harbor.longxuan.vip
    Username: admin
    Password:
    WARNING! Your password will be stored unencrypted in /root/.docker/config.json.
    Configure a credential helper to remove this warning. See
    https://docs.docker.com/engine/reference/commandline/login/#credentials-store
    
    Login Succeeded
    [root@k8s-master2 ~]# 
    [root@k8s-master2 ~]# docker pull harbor.longxuan.vip/baseimages/alpine
    Using default tag: latest
    latest: Pulling from baseimages/alpine
    5843afab3874: Pull complete
    Digest: sha256:1775bebec23e1f3ce486989bfc9ff3c4e951690df84aa9f926497d82f2ffca9d
    Status: Downloaded newer image for harbor.longxuan.vip/baseimages/alpine:latest
    harbor.longxuan.vip/baseimages/alpine:latest
    

    安装kubeadm (每台都要操作,一步都不能少)

    # 更新aliyun的k8s
    [root@k8s-master1 ~]# apt-get update && apt-get install -y apt-transport-https
    
    [root@k8s-master1 ~]# curl https://mirrors.aliyun.com/kubernetes/apt/doc/apt-key.gpg | apt-key add -
    
    [root@k8s-master1 ~]# cat <<EOF >/etc/apt/sources.list.d/kubernetes.list
    deb https://mirrors.aliyun.com/kubernetes/apt/ kubernetes-xenial main
    EOF
    
    [root@k8s-master1 ~]# apt update
    

    master节点安装

    [root@k8s-master1 ~]# apt-cache madison kubeadm
    
    # 需要有控制需求才安装kubectl
    [root@k8s-master1 ~]# apt install -y kubeadm=1.20.5-00 kubelet=1.20.5-00 kubectl=1.20.5-00
    

    node节点安装

    [root@k8s-master1 ~]# apt install -y kubeadm=1.20.5-00 kubelet=1.20.5-00
    

    k8s补全命令

    [root@k8s-master1 ~]# mkdir /data/scipts -p
    
    [root@k8s-master1 ~]# kubeadm completion bash > /data/scipts/kubeadm_completion.sh
    
    [root@k8s-master1 ~]# source /data/scipts/kubeadm_completion.sh
    
    # 写入开机启动
    [root@k8s-master1 ~]# vim /etc/profile
    ...
    source /data/scipts/kubeadm_completion.sh
    
    [root@k8s-master1 ~]# chmod a+x /data/scipts/kubeadm_completion.sh
    

    kubectl补全命令

    [root@k8s-master2 ~]# kubectl completion bash > /data/scipts/kubectl_completion.sh
    [root@k8s-master2 ~]# source /data/scipts/kubectl_completion.sh
    

    查看需要的镜像(默认)

    [root@k8s-master1 ~]# kubeadm config images list
    I0724 03:03:42.202676    8619 version.go:254] remote version is much newer: v1.21.3; falling back to: stable-1.20
    k8s.gcr.io/kube-apiserver:v1.20.9
    k8s.gcr.io/kube-controller-manager:v1.20.9
    k8s.gcr.io/kube-scheduler:v1.20.9
    k8s.gcr.io/kube-proxy:v1.20.9
    k8s.gcr.io/pause:3.2
    k8s.gcr.io/etcd:3.4.13-0
    k8s.gcr.io/coredns:1.7.0
    

    查看指定版本需要的镜像

    [root@k8s-master1 ~]# kubeadm config images list --kubernetes-version v1.20.5
    k8s.gcr.io/kube-apiserver:v1.20.5
    k8s.gcr.io/kube-controller-manager:v1.20.5
    k8s.gcr.io/kube-scheduler:v1.20.5
    k8s.gcr.io/kube-proxy:v1.20.5
    k8s.gcr.io/pause:3.2
    k8s.gcr.io/etcd:3.4.13-0
    k8s.gcr.io/coredns:1.7.0
    

    把他们改成国内(脚本 每台master都要执行,这样安装快)

    [root@k8s-master1 ~]# cat k8s-v1.20.5-install.sh
    #!/bin/bash
    # Images k8s-v1.20.5
    docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-apiserver:v1.20.5
    docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-controller-manager:v1.20.5
    docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-scheduler:v1.20.5
    docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-proxy:v1.20.5
    docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.2
    docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/etcd:3.4.13-0
    docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/coredns:1.7.0
    

    初始化

    # 172.31.1.10
    [root@k8s-master1 ~]# kubeadm  init 
    --apiserver-advertise-address=172.31.1.10 
    --control-plane-endpoint=172.31.1.188 
    --apiserver-bind-port=6443  
    --kubernetes-version=v1.20.5 
    --pod-network-cidr=10.100.0.0/16 
    --service-cidr=10.200.0.0/16 
    --service-dns-domain=longxuan.local 
    --image-repository=registry.cn-hangzhou.aliyuncs.com/google_containers 
    --ignore-preflight-errors=swap
    

    正确安装后得出如下信息:

    Your Kubernetes control-plane has initialized successfully!
    
    To start using your cluster, you need to run the following as a regular user:
    
      mkdir -p $HOME/.kube
      sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
      sudo chown $(id -u):$(id -g) $HOME/.kube/config
    
    Alternatively, if you are the root user, you can run:
    
      export KUBECONFIG=/etc/kubernetes/admin.conf
    
    You should now deploy a pod network to the cluster.
    Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
      https://kubernetes.io/docs/concepts/cluster-administration/addons/
    
    You can now join any number of control-plane nodes by copying certificate authorities
    and service account keys on each node and then running the following as root:
    
    # 添加master节点
      kubeadm join 172.31.1.188:6443 --token jumav2.xxqxqx8sm49qqpkb 
        --discovery-token-ca-cert-hash sha256:0ab1061fcfe2543fc53694513329b332cbc78ebf49600ecb40a0ee226cbd4b63 
        --control-plane
    
    Then you can join any number of worker nodes by running the following on each as root:
    
    # 添加node节点
    kubeadm join 172.31.1.188:6443 --token jumav2.xxqxqx8sm49qqpkb 
        --discovery-token-ca-cert-hash sha256:0ab1061fcfe2543fc53694513329b332cbc78ebf49600ecb40a0ee226cbd4b63
    

    按照要求创建

    [root@k8s-master1 ~]# mkdir -p $HOME/.kube
    [root@k8s-master1 ~]# sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
    [root@k8s-master1 ~]# sudo chown $(id -u):$(id -g) $HOME/.kube/config
    

    如果其他master节点需要可以执行kubectl命令按如下操作:

    # 创建目录
    [root@k8s-server2 ~]# mkdir /root/.kube/ -p
    
    # 拷贝
    [root@k8s-server1 m44]# scp /root/.kube/config 172.31.1.11:/root/.kube/
    

    下载网络

    [root@k8s-master1 ~]# wget https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
    

    修改网络

    下载需要的镜像

    [root@k8s-master1 ~]# docker pull quay.io/coreos/flannel:v0.14.0
    

    打标签

    [root@k8s-master1 ~]# docker tag quay.io/coreos/flannel:v0.14.0 harbor.longxuan.vip/baseimages/flannel:v0.14.0
    

    上传harbor仓库

    [root@k8s-master1 ~]# docker push harbor.longxuan.vip/baseimages/flannel:v0.14.0
    

    修改网络配置 (注意文件不能添加#作为注释)

    ---
    apiVersion: policy/v1beta1
    kind: PodSecurityPolicy
    metadata:
      name: psp.flannel.unprivileged
      annotations:
        seccomp.security.alpha.kubernetes.io/allowedProfileNames: docker/default
        seccomp.security.alpha.kubernetes.io/defaultProfileName: docker/default
        apparmor.security.beta.kubernetes.io/allowedProfileNames: runtime/default
        apparmor.security.beta.kubernetes.io/defaultProfileName: runtime/default
    spec:
      privileged: false
      volumes:
      - configMap
      - secret
      - emptyDir
      - hostPath
      allowedHostPaths:
      - pathPrefix: "/etc/cni/net.d"
      - pathPrefix: "/etc/kube-flannel"
      - pathPrefix: "/run/flannel"
      readOnlyRootFilesystem: false
      # Users and groups
      runAsUser:
        rule: RunAsAny
      supplementalGroups:
        rule: RunAsAny
      fsGroup:
        rule: RunAsAny
      # Privilege Escalation
      allowPrivilegeEscalation: false
      defaultAllowPrivilegeEscalation: false
      # Capabilities
      allowedCapabilities: ['NET_ADMIN', 'NET_RAW']
      defaultAddCapabilities: []
      requiredDropCapabilities: []
      # Host namespaces
      hostPID: false
      hostIPC: false
      hostNetwork: true
      hostPorts:
      - min: 0
        max: 65535
      # SELinux
      seLinux:
        # SELinux is unused in CaaSP
        rule: 'RunAsAny'
    ---
    kind: ClusterRole
    apiVersion: rbac.authorization.k8s.io/v1
    metadata:
      name: flannel
    rules:
    - apiGroups: ['extensions']
      resources: ['podsecuritypolicies']
      verbs: ['use']
      resourceNames: ['psp.flannel.unprivileged']
    - apiGroups:
      - ""
      resources:
      - pods
      verbs:
      - get
    - apiGroups:
      - ""
      resources:
      - nodes
      verbs:
      - list
      - watch
    - apiGroups:
      - ""
      resources:
      - nodes/status
      verbs:
      - patch
    ---
    kind: ClusterRoleBinding
    apiVersion: rbac.authorization.k8s.io/v1
    metadata:
      name: flannel
    roleRef:
      apiGroup: rbac.authorization.k8s.io
      kind: ClusterRole
      name: flannel
    subjects:
    - kind: ServiceAccount
      name: flannel
      namespace: kube-system
    ---
    apiVersion: v1
    kind: ServiceAccount
    metadata:
      name: flannel
      namespace: kube-system
    ---
    kind: ConfigMap
    apiVersion: v1
    metadata:
      name: kube-flannel-cfg
      namespace: kube-system
      labels:
        tier: node
        app: flannel
    data:
      cni-conf.json: |
        {
          "name": "cbr0",
          "cniVersion": "0.3.1",
          "plugins": [
            {
              "type": "flannel",
              "delegate": {
                "hairpinMode": true,
                "isDefaultGateway": true
              }
            },
            {
              "type": "portmap",
              "capabilities": {
                "portMappings": true
              }
            }
          ]
        }
        # 这里的网络是初始化时定义的pod网络
      net-conf.json: |
        {
          "Network": "10.100.0.0/16",  
          "Backend": {
            "Type": "vxlan"
          }
        }
    ---
    apiVersion: apps/v1
    kind: DaemonSet
    metadata:
      name: kube-flannel-ds
      namespace: kube-system
      labels:
        tier: node
        app: flannel
    spec:
      selector:
        matchLabels:
          app: flannel
      template:
        metadata:
          labels:
            tier: node
            app: flannel
        spec:
          affinity:
            nodeAffinity:
              requiredDuringSchedulingIgnoredDuringExecution:
                nodeSelectorTerms:
                - matchExpressions:
                  - key: kubernetes.io/os
                    operator: In
                    values:
                    - linux
          hostNetwork: true
          priorityClassName: system-node-critical
          tolerations:
          - operator: Exists
            effect: NoSchedule
          serviceAccountName: flannel
          initContainers:
          - name: install-cni
            # 镜像是改成上传harbor仓库
            image: harbor.longxuan.vip/baseimages/flannel:v0.14.0
            command:
            - cp
            args:
            - -f
            - /etc/kube-flannel/cni-conf.json
            - /etc/cni/net.d/10-flannel.conflist
            volumeMounts:
            - name: cni
              mountPath: /etc/cni/net.d
            - name: flannel-cfg
              mountPath: /etc/kube-flannel/
          containers:
          - name: kube-flannel
            # 镜像是改成上传harbor仓库
            image: harbor.longxuan.vip/baseimages/flannel:v0.14.0
            command:
            - /opt/bin/flanneld
            args:
            - --ip-masq
            - --kube-subnet-mgr
            resources:
              requests:
                cpu: "100m"
                memory: "50Mi"
              limits:
                cpu: "100m"
                memory: "50Mi"
            securityContext:
              privileged: false
              capabilities:
                add: ["NET_ADMIN", "NET_RAW"]
            env:
            - name: POD_NAME
              valueFrom:
                fieldRef:
                  fieldPath: metadata.name
            - name: POD_NAMESPACE
              valueFrom:
                fieldRef:
                  fieldPath: metadata.namespace
            volumeMounts:
            - name: run
              mountPath: /run/flannel
            - name: flannel-cfg
              mountPath: /etc/kube-flannel/
          volumes:
          - name: run
            hostPath:
              path: /run/flannel
          - name: cni
            hostPath:
              path: /etc/cni/net.d
          - name: flannel-cfg
            configMap:
              name: kube-flannel-cfg
    

    部署网络

    [root@k8s-master1 ~]# kubectl apply -f kube-flannel.yml
    
    podsecuritypolicy.policy/psp.flannel.unprivileged created
    clusterrole.rbac.authorization.k8s.io/flannel created
    clusterrolebinding.rbac.authorization.k8s.io/flannel created
    serviceaccount/flannel created
    configmap/kube-flannel-cfg created
    daemonset.apps/kube-flannel-ds created
    

    查看node

    [root@k8s-master1 ~]# kubectl get node
    

    删除某个pod

    [root@k8s-master1 ~]# kubectl delete pod kube-flannel-ds-ddpnj -n kube-system
    
    pod "kube-flannel-ds-ddpnj" deleted
    

    查看所有pod

    [root@k8s-master1 ~]# kubectl get pod -A
    NAMESPACE     NAME                                                READY   STATUS                  RESTARTS   AGE
    kube-system   coredns-54d67798b7-7t9pv                            1/1     Running                 0          73m
    kube-system   coredns-54d67798b7-znmkk                            1/1     Running                 0          73m
    kube-system   etcd-k8s-master2.example.local                      1/1     Running                 0          73m
    kube-system   kube-apiserver-k8s-master2.example.local            1/1     Running                 0          73m
    kube-system   kube-controller-manager-k8s-master2.example.local   1/1     Running                 0          73m
    kube-system   kube-flannel-ds-l7n5s                               1/1     Running                 0          6m1s
    kube-system   kube-flannel-ds-mcxtp                               1/1     Running                 0          25m
    kube-system   kube-proxy-8rrxj                                    1/1     Running                 0          73m
    kube-system   kube-proxy-rkt2m                                    1/1     Running                 0          25m
    kube-system   kube-scheduler-k8s-master2.example.local            1/1     Running                 0          73m
    

    添加node节点

    [root@k8s-node1 ~]# kubeadm join 172.31.1.188:6443 --token jumav2.xxqxqx8sm49qqpkb 
        --discovery-token-ca-cert-hash sha256:0ab1061fcfe2543fc53694513329b332cbc78ebf49600ecb40a0ee226cbd4b63
    

    添加master节点

    生成一个key

    [root@k8s-master1 ~]# kubeadm init phase upload-certs --upload-certs
    I0724 07:55:01.996821   50433 version.go:254] remote version is much newer: v1.21.3; falling back to: stable-1.20
    [upload-certs] Storing the certificates in Secret "kubeadm-certs" in the "kube-system" Namespace
    [upload-certs] Using certificate key:
    03c8ae8a4b1e298157011910e110d7acf4855c354710f227c692a0be8ac54617
    

    添加其他master节点命令

    [root@k8s-master3 ~]# kubeadm join 172.31.1.188:6443 --token jumav2.xxqxqx8sm49qqpkb 
        --discovery-token-ca-cert-hash sha256:0ab1061fcfe2543fc53694513329b332cbc78ebf49600ecb40a0ee226cbd4b63 
        --control-plane --certificate-key 03c8ae8a4b1e298157011910e110d7acf4855c354710f227c692a0be8ac54617
    

    各node节点会自动加入到master节点,下载镜像并启动flannel,直到最终在master看到node处于Ready状态

    如果单master允许pod运行在master节点执行如下命令

    [root@k8s-master1 ~]# kubectl taint nodes --all node-role.kubernetes.io/master-
    

    k8s创建容器并测试内部网络

    [root@k8s-master1 ~]# kubectl run net-test1 --image=alpine sleep 60000
    pod/net-test1 created
    [root@k8s-master1 ~]# kubectl run net-test2 --image=alpine sleep 60000
    pod/net-test2 created
    [root@k8s-master1 ~]# kubectl run net-test3 --image=alpine sleep 60000
    pod/net-test3 created
    

    查看ip

    [root@k8s-master1 ~]# kubectl get pod -o wide
    NAME        READY   STATUS    RESTARTS   AGE     IP           NODE                      NOMINATED NODE   READINESS GATES
    net-test1   1/1     Running   0          2m34s   10.100.5.2   k8s-node3.example.local   <none>           <none>
    net-test2   1/1     Running   0          2m27s   10.100.2.2   k8s-node2.example.local   <none>           <none>
    net-test3   1/1     Running   0          2m22s   10.100.1.4   k8s-node1.example.local   <none>           <none>
    

    测试网络连通性

    [root@k8s-master1 ~]# kubectl exec -it net-test1 sh
    kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead.
    / # ip a
    1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN qlen 1000
        link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
        inet 127.0.0.1/8 scope host lo
           valid_lft forever preferred_lft forever
    3: eth0@if6: <BROADCAST,MULTICAST,UP,LOWER_UP,M-DOWN> mtu 1450 qdisc noqueue state UP
        link/ether 7e:0c:85:29:c4:8c brd ff:ff:ff:ff:ff:ff
        inet 10.100.5.2/24 brd 10.100.5.255 scope global eth0
           valid_lft forever preferred_lft forever
    / # ping 10.100.2.2
    PING 10.100.2.2 (10.100.2.2): 56 data bytes
    64 bytes from 10.100.2.2: seq=0 ttl=62 time=0.979 ms
    64 bytes from 10.100.2.2: seq=1 ttl=62 time=0.600 ms
    
    # 验证外部网络
    / # ping www.baidu.com
    PING www.baidu.com (110.242.68.4): 56 data bytes
    64 bytes from 110.242.68.4: seq=0 ttl=127 time=49.085 ms
    64 bytes from 110.242.68.4: seq=1 ttl=127 time=54.397 ms
    64 bytes from 110.242.68.4: seq=2 ttl=127 time=128.386 ms
    

    报错

    flannel没有启动成功,coredns是启动不了的,先检查flannel有什么报错信息比如:(coredns依赖网络组件,比如flannel等等)

    [root@k8s-server1 m44]# kubectl logs -f kube-flannel-ds-bn9sd -n kube-system
    I0725 09:49:33.570637       1 main.go:520] Determining IP address of default interface
    I0725 09:49:33.571818       1 main.go:533] Using interface with name eth0 and address 172.18.8.149
    I0725 09:49:33.571867       1 main.go:550] Defaulting external address to interface address (172.18.8.149)
    W0725 09:49:33.572628       1 client_config.go:608] Neither --kubeconfig nor --master was specified.  Using the inClusterConfig.  This might not work.
    E0725 09:49:34.163087       1 main.go:251] Failed to create SubnetManager: error parsing subnet config: invalid character '#' looking for beginning of object key string
    

    解决方法:

    # 删除flannel.yaml再重新apply,即可
    [root@k8s-server1 m44]# kubectl delete -f kube-flannel.yaml
    
    [root@k8s-server1 m44]# kubectl apply -f kube-flannel.yaml
    
  • 相关阅读:
    js禁止鼠标右键功能
    js判断客户端是pc还是手机及获取浏览器版本
    js实现深拷贝的一些方法
    python使用requests请求的数据乱码
    PyCharm引入python需要使用的包
    几个常见用于解决nginx负载均衡的session共享问题的办法
    面试最让你手足无措的一个问题:你的系统如何支撑高并发?
    Linux/Windows 平台最容易安装 Composer教程
    Laravel一些常用命令整理
    nginx下重写隐藏index.php文件
  • 原文地址:https://www.cnblogs.com/xuanlv-0413/p/15366330.html
Copyright © 2011-2022 走看看