zoukankan      html  css  js  c++  java
  • 二进制安装单master节点测试环境k8s集群

    二进制安装单master节点测试环境k8s集群

    一、环境规划

    1.1、实验环境规划

    K8S集群角色 Ip 主机名 安装的组件
    控制节点 192.168.40.180 k8s-master1 apiserver、controller-manager、scheduler、etcd、docker
    工作节点 192.168.40.181 k8s-node1 kubelet、kube-proxy、docker、calico、cordns

    实验环境规划:

    • 操作系统:centos7.6
    • 配置: 4Gib内存/4vCPU/100G硬盘
    • 网络:Vmware NAT模式

    k8s网络环境规划:

    • k8s版本:v1.20.7

    • Pod网段:10.0.0.0/16

    • Service网段:10.255.0.0/16

    1.2、节点初始化

    1)配置静态ip地址

    # 把虚拟机或者物理机配置成静态ip地址,这样机器重新启动后ip地址也不会发生改变。以master1主机修改静态IP为例
    ~]# cat /etc/sysconfig/network-scripts/ifcfg-eth0 
    TYPE=Ethernet
    BOOTPROTO=none
    NAME=eth0
    DEVICE=eth0
    ONBOOT=yes
    IPADDR=192.168.40.180	# 按实验规划修改
    NETMASK=255.255.255.0
    GATEWAY=192.168.40.2
    DNS1=223.5.5.5
    
    # 重启网络
    ~]# systemctl restart network
    
    # 测试网络连通信
    ~]# ping baidu.com
    PING baidu.com (39.156.69.79) 56(84) bytes of data.
    64 bytes from 39.156.69.79 (39.156.69.79): icmp_seq=1 ttl=128 time=63.2 ms
    64 bytes from 39.156.69.79 (39.156.69.79): icmp_seq=2 ttl=128 time=47.3 ms
    

    2)配置主机名

    ~]# hostnamectl set-hostname <主机名> && bash
    

    3)配置hosts文件

    # 所有机器
    cat >> /etc/hosts << EOF 
    192.168.40.180 k8s-master1
    192.168.40.181 k8s-node1 
    EOF
    
    # 测试
    ~]# ping k8s-master1
    PING k8s-master1 (192.168.40.180) 56(84) bytes of data.
    64 bytes from k8s-master1 (192.168.40.180): icmp_seq=1 ttl=64 time=0.015 ms
    64 bytes from k8s-master1 (192.168.40.180): icmp_seq=2 ttl=64 time=0.047 ms
    

    4)配置主机之间无密码登录

    # 生成ssh 密钥对,一路回车,不输入密码
    ssh-keygen -t rsa
    
    # 把本地的ssh公钥文件安装到远程主机对应的账户
    ssh-copy-id -i .ssh/id_rsa.pub k8s-master1
    ssh-copy-id -i .ssh/id_rsa.pub k8s-node1
    

    5)关闭firewalld防火墙

    systemctl stop firewalld ; systemctl disable firewalld
    

    6)关闭selinux

    # 临时关闭
    setenforce 0
    # 永久关闭
    sed -i 's/SELINUX=enforcing/SELINUX=disabled/g' /etc/selinux/config
    # 查看
    getenforce
    

    7)关闭交换分区swap

    #临时关闭
    swapoff -a
    #永久关闭:注释swap挂载,给swap开头加一下注释
    sed -ri 's/.*swap.*/#&/' /etc/fstab
    #注意:如果是克隆的虚拟机,需要删除UUID一行
    

    8)修改内核参数

    # 1、加载br_netfilter模块
    modprobe br_netfilter
    
    # 2、验证模块是否加载成功
    lsmod |grep br_netfilter
    
    # 3、修改内核参数
    cat > /etc/sysctl.d/k8s.conf <<EOF
    net.bridge.bridge-nf-call-ip6tables = 1
    net.bridge.bridge-nf-call-iptables = 1
    net.ipv4.ip_forward = 1
    EOF
    
    # 4、使刚才修改的内核参数生效
    sysctl -p /etc/sysctl.d/k8s.conf
    

    9)配置阿里云repo源

    # 备份
    mv /etc/yum.repos.d/CentOS-Base.repo /etc/yum.repos.d/CentOS-Base.repo.backup
    
    # 下载新的CentOS-Base.repo
    wget -O /etc/yum.repos.d/CentOS-Base.repo http://mirrors.aliyun.com/repo/Centos-7.repo
    
    # 生成缓存
    yum clean all && yum makecache
    

    10)配置时间同步

    # 安装ntpdate命令,
    yum install ntpdate -y
    
    # 跟网络源做同步
    ntpdate cn.pool.ntp.org
    
    # 把时间同步做成计划任务
    crontab -e
    * */1 * * * /usr/sbin/ntpdate   cn.pool.ntp.org
    
    # 重启crond服务
    service crond restart
    

    11)安装iptables

    # 安装iptables
    yum install iptables-services -y
    
    # 禁用iptables
    service iptables stop && systemctl disable iptables
    
    # 清空防火墙规则
    iptables -F
    

    12)开启ipvs

    不开启ipvs将会使用iptables进行数据包转发,但是效率低,所以官网推荐需要开通ipvs。

    # 创建ipvs.modules文件
    ~]# vim /etc/sysconfig/modules/ipvs.modules
    #!/bin/bash
    ipvs_modules="ip_vs ip_vs_lc ip_vs_wlc ip_vs_rr ip_vs_wrr ip_vs_lblc ip_vs_lblcr ip_vs_dh ip_vs_sh ip_vs_nq ip_vs_sed ip_vs_ftp nf_conntrack"
    for kernel_module in ${ipvs_modules}; do
     /sbin/modinfo -F filename ${kernel_module} > /dev/null 2>&1
     if [ 0 -eq 0 ]; then
     /sbin/modprobe ${kernel_module}
     fi
    done
    
    # 执行脚本
    ~]# chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules && lsmod | grep ip_vs
    ip_vs_ftp              13079  0 
    nf_nat                 26787  1 ip_vs_ftp
    ip_vs_sed              12519  0 
    ip_vs_nq               12516  0 
    ip_vs_sh               12688  0 
    ip_vs_dh               12688  0 
    ip_vs_lblcr            12922  0 
    ip_vs_lblc             12819  0 
    ip_vs_wrr              12697  0 
    ip_vs_rr               12600  0 
    ip_vs_wlc              12519  0 
    ip_vs_lc               12516  0 
    ip_vs                 141092  22 ip_vs_dh,ip_vs_lc,ip_vs_nq,ip_vs_rr,ip_vs_sh,ip_vs_ftp,ip_vs_sed,ip_vs_wlc,ip_vs_wrr,ip_vs_lblcr,ip_vs_lblc
    nf_conntrack          133387  2 ip_vs,nf_nat
    libcrc32c              12644  4 xfs,ip_vs,nf_nat,nf_conntrack
    

    13)安装基础软件包

    ~]# yum install -y yum-utils device-mapper-persistent-data lvm2 wget net-tools nfs-utils lrzsz gcc gcc-c++ make cmake libxml2-devel openssl-devel curl curl-devel unzip sudo ntp libaio-devel wget vim ncurses-devel autoconf automake zlib-devel  python-devel epel-release openssh-server socat  ipvsadm conntrack ntpdate telnet rsync
    

    14)安装docker-ce

    ~]# yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
    ~]# yum install docker-ce docker-ce-cli containerd.io -y
    ~]# systemctl start docker && systemctl enable docker.service && systemctl status docker
    

    15)配置docker镜像加速器

    # 注意:修改docker文件驱动为systemd,默认为cgroupfs,kubelet默认使用systemd,两者必须一致才可以
    ~]# tee /etc/docker/daemon.json << 'EOF'
    {
     "registry-mirrors":["https://rsbud4vc.mirror.aliyuncs.com","https://registry.docker-cn.com","https://docker.mirrors.ustc.edu.cn","https://dockerhub.azk8s.cn","http://hub-mirror.c.163.com","http://qtid6917.mirror.aliyuncs.com", "https://rncxm540.mirror.aliyuncs.com"],
      "exec-opts": ["native.cgroupdriver=systemd"]
    } 
    EOF
    
    ~]# systemctl daemon-reload && systemctl restart docker && systemctl status docker
    

    二、部署etcd

    etcd软件下载地址:https://github.com/etcd-io/etcd/releases/

    2.1、配置etcd工作目录

    # 创建配置文件目录和证书文件存放目录
    [root@k8s-master1 ~]# mkdir -p /etc/etcd
    [root@k8s-master1 ~]# mkdir -p /etc/etcd/ssl
    

    2.2、安装签发证书工具cfssl

    [root@k8s-master1 ~]# mkdir /data/work -p && cd /data/work
    [root@k8s-master1 work]# wget https://pkg.cfssl.org/R1.2/cfssl_linux-amd64
    [root@k8s-master1 work]# wget https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64
    [root@k8s-master1 work]# wget https://pkg.cfssl.org/R1.2/cfssl-certinfo_linux-amd64
    [root@k8s-master1 work]# chmod +x cfssl_linux-amd64 cfssljson_linux-amd64 cfssl-certinfo_linux-amd64
    [root@k8s-master1 work]# mv cfssl_linux-amd64 /usr/local/bin/cfssl
    [root@k8s-master1 work]# mv cfssljson_linux-amd64 /usr/local/bin/cfssljson
    [root@k8s-master1 work]# mv cfssl-certinfo_linux-amd64 /usr/bin/cfssl-certinfo
    

    2.3、配置ca证书

    1)生成ca证书请求文件

    [root@k8s-master1 work]# vim ca-csr.json 
    {
      "CN": "kubernetes",
      "key": {
          "algo": "rsa",
          "size": 2048
      },
      "names": [
        {
          "C": "CN",
          "ST": "Hubei",
          "L": "Wuhan",
          "O": "k8s",
          "OU": "system"
        }
      ],
      "ca": {
              "expiry": "87600h"
      }
    }
    
    [root@k8s-master1 work]# cfssl gencert -initca ca-csr.json  | cfssljson -bare ca
    [root@k8s-master1 work]# ll
    total 16
    -rw-r--r-- 1 root root  997 Jul  8 13:37 ca.csr
    -rw-r--r-- 1 root root  252 Jul  8 13:37 ca-csr.json
    -rw------- 1 root root 1675 Jul  8 13:37 ca-key.pem
    -rw-r--r-- 1 root root 1346 Jul  8 13:37 ca.pem
    

    2)生成ca证书json文件

    [root@k8s-master1 work]# vim ca-config.json 
    {
      "signing": {
          "default": {
              "expiry": "87600h"
            },
          "profiles": {
              "kubernetes": {
                  "usages": [
                      "signing",
                      "key encipherment",
                      "server auth",
                      "client auth"
                  ],
                  "expiry": "87600h"
              }
          }
      }
    }
    

    2.4、生成etcd证书

    # 配置etcd证书请求,hosts的ip变成自己master节点的ip
    [root@k8s-master1 work]# vim etcd-csr.json 
    {
      "CN": "etcd",
      "hosts": [
        "127.0.0.1",
        "192.168.40.180"
      ],
      "key": {
        "algo": "rsa",
        "size": 2048
      },
      "names": [{
        "C": "CN",
        "ST": "Hubei",
        "L": "Wuhan",
        "O": "k8s",
        "OU": "system"
      }]
    }
    
    [root@k8s-master1 work]# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes etcd-csr.json | cfssljson  -bare etcd
    
    [root@k8s-master1 work]# ls etcd*.pem
    etcd-key.pem  etcd.pem
    

    2.5、部署etcd

    1)上传软件并解压

    # 把etcd-v3.4.13-linux-amd64.tar.gz上传到k8s-master1节点的/data/work目录下
    [root@k8s-master1 work]# pwd
    /data/work
    [root@k8s-master1 work]# tar -xf etcd-v3.4.13-linux-amd64.tar.gz
    [root@k8s-master1 work]# cp -p etcd-v3.4.13-linux-amd64/etcd* /usr/local/bin/
    

    2)创建配置文件

    [root@k8s-master1 work]# vim etcd.conf 
    #[Member]
    ETCD_NAME="etcd1"
    ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
    ETCD_LISTEN_PEER_URLS="https://192.168.40.180:2380"
    ETCD_LISTEN_CLIENT_URLS="https://192.168.40.180:2379,http://127.0.0.1:2379"
    #[Clustering]
    ETCD_INITIAL_ADVERTISE_PEER_URLS="https://192.168.40.180:2380"
    ETCD_ADVERTISE_CLIENT_URLS="https://192.168.40.180:2379"
    ETCD_INITIAL_CLUSTER="etcd1=https://192.168.40.180:2380"
    ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
    ETCD_INITIAL_CLUSTER_STATE="new"
    

    参数说明:

    ETCD_NAME:节点名称,集群中唯一

    ETCD_DATA_DIR:数据目录

    ETCD_LISTEN_PEER_URLS:集群通信监听地址

    ETCD_LISTEN_CLIENT_URLS:客户端访问监听地址

    ETCD_INITIAL_ADVERTISE_PEER_URLS:集群通告地址

    ETCD_ADVERTISE_CLIENT_URLS:客户端通告地址

    ETCD_INITIAL_CLUSTER:集群节点地址

    ETCD_INITIAL_CLUSTER_TOKEN:集群Token

    ETCD_INITIAL_CLUSTER_STATE:加入集群的当前状态,new是新集群,existing表示加入已有集群

    3)创建启动服务文件

    [root@k8s-master1 work]# vim etcd.service 
    [Unit]
    Description=Etcd Server
    After=network.target
    After=network-online.target
    Wants=network-online.target
     
    [Service]
    Type=notify
    EnvironmentFile=-/etc/etcd/etcd.conf
    WorkingDirectory=/var/lib/etcd/
    ExecStart=/usr/local/bin/etcd 
      --cert-file=/etc/etcd/ssl/etcd.pem 
      --key-file=/etc/etcd/ssl/etcd-key.pem 
      --trusted-ca-file=/etc/etcd/ssl/ca.pem 
      --peer-cert-file=/etc/etcd/ssl/etcd.pem 
      --peer-key-file=/etc/etcd/ssl/etcd-key.pem 
      --peer-trusted-ca-file=/etc/etcd/ssl/ca.pem 
      --peer-client-cert-auth 
      --client-cert-auth
    Restart=on-failure
    RestartSec=5
    LimitNOFILE=65536
     
    [Install]
    WantedBy=multi-user.target
    

    4)拷贝相关文件

    [root@k8s-master1 work]# cp ca*.pem /etc/etcd/ssl/
    [root@k8s-master1 work]# cp etcd*.pem /etc/etcd/ssl/
    [root@k8s-master1 work]# cp etcd.conf /etc/etcd/
    [root@k8s-master1 work]# cp etcd.service /usr/lib/systemd/system/
    [root@k8s-master1 work]# mkdir -p /var/lib/etcd/default.etcd
    

    5)启动etcd

    [root@k8s-master1 work]# systemctl daemon-reload
    [root@k8s-master1 work]# systemctl enable etcd.service
    [root@k8s-master1 work]# systemctl start etcd.service
    [root@k8s-master1 work]# systemctl status etcd
    

    6)查看状态

    [root@k8s-master1 work]# ETCDCTL_API=3
    [root@k8s-master1 work]# /usr/local/bin/etcdctl --write-out=table --cacert=/etc/etcd/ssl/ca.pem --cert=/etc/etcd/ssl/etcd.pem --key=/etc/etcd/ssl/etcd-key.pem --endpoints=https://192.168.40.180:2379  endpoint health
    +-----------------------------+--------+-------------+-------+
    |          ENDPOINT           | HEALTH |    TOOK     | ERROR |
    +-----------------------------+--------+-------------+-------+
    | https://192.168.40.180:2379 |   true | 27.087166ms |       |
    +-----------------------------+--------+-------------+-------+
    

    三、部署kubernetes组件

    二进制软件包地址: https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/

    3.1、软件包下载

     # 把kubernetes-server-linux-amd64.tar.gz上传到k8s-master1上的/data/work
    [root@k8s-master1 work]# tar zxvf kubernetes-server-linux-amd64.tar.gz
    [root@k8s-master1 work]# cd kubernetes/server/bin/
    [root@k8s-master1 bin]# cp kube-apiserver kube-controller-manager kube-scheduler kubectl /usr/local/bin/
    [root@k8s-master1 bin]# scp kubelet kube-proxy k8s-node1:/usr/local/bin/
    [root@k8s-master1 bin]# cd /data/work/
    [root@k8s-master1 work]# mkdir -p /etc/kubernetes/ 
    [root@k8s-master1 work]# mkdir -p /etc/kubernetes/ssl
    [root@k8s-master1 work]# mkdir /var/log/kubernetes
    

    3.2、部署api-server组件

    1)创建token.csv文件

    # 格式:token,用户名,UID,用户组
    [root@k8s-master1 work]# cat > token.csv << EOF
    $(head -c 16 /dev/urandom | od -An -t x | tr -d ' '),kubelet-bootstrap,10001,"system:kubelet-bootstrap"
    EOF
    [root@k8s-master1 work]# cat token.csv 
    e4997d93ecf57d9f0b82fd2978137770,kubelet-bootstrap,10001,"system:kubelet-bootstrap"
    

    2)创建csr请求文件并生成证书

    [root@k8s-master1 work]# vim kube-apiserver-csr.json 
    {
      "CN": "kubernetes",
      "hosts": [
        "127.0.0.1",
        "192.168.40.180",
        "192.168.40.181",
        "10.255.0.1",
        "kubernetes",
        "kubernetes.default",
        "kubernetes.default.svc",
        "kubernetes.default.svc.cluster",
        "kubernetes.default.svc.cluster.local"
      ],
      "key": {
        "algo": "rsa",
        "size": 2048
      },
      "names": [
        {
          "C": "CN",
          "ST": "Hubei",
          "L": "Wuhan",
          "O": "k8s",
          "OU": "system"
        }
      ]
    }
    
    [root@k8s-master1 work]# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-apiserver-csr.json | cfssljson -bare kube-apiserver
    [root@k8s-master1 work]# ll kube-apiserver*
    -rw-r--r-- 1 root root 1245 Jul  8 14:21 kube-apiserver.csr
    -rw-r--r-- 1 root root  456 Jul  8 14:20 kube-apiserver-csr.json
    -rw------- 1 root root 1675 Jul  8 14:21 kube-apiserver-key.pem
    -rw-r--r-- 1 root root 1610 Jul  8 14:21 kube-apiserver.pem
    

    3)创建api-server的配置文件

    [root@k8s-master1 work]# vim kube-apiserver.conf 
    KUBE_APISERVER_OPTS="--enable-admission-plugins=NamespaceLifecycle,NodeRestriction,LimitRanger,ServiceAccount,DefaultStorageClass,ResourceQuota 
      --anonymous-auth=false 
      --bind-address=192.168.40.180 
      --secure-port=6443 
      --advertise-address=192.168.40.180 
      --insecure-port=0 
      --authorization-mode=Node,RBAC 
      --runtime-config=api/all=true 
      --enable-bootstrap-token-auth 
      --service-cluster-ip-range=10.255.0.0/16 
      --token-auth-file=/etc/kubernetes/token.csv 
      --service-node-port-range=30000-50000 
      --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem  
      --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem 
      --client-ca-file=/etc/kubernetes/ssl/ca.pem 
      --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem 
      --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem 
      --service-account-key-file=/etc/kubernetes/ssl/ca-key.pem 
      --service-account-signing-key-file=/etc/kubernetes/ssl/ca-key.pem  
      --service-account-issuer=https://kubernetes.default.svc.cluster.local 
      --etcd-cafile=/etc/etcd/ssl/ca.pem 
      --etcd-certfile=/etc/etcd/ssl/etcd.pem 
      --etcd-keyfile=/etc/etcd/ssl/etcd-key.pem 
      --etcd-servers=https://192.168.40.180:2379 
      --enable-swagger-ui=true 
      --allow-privileged=true 
      --apiserver-count=3 
      --audit-log-maxage=30 
      --audit-log-maxbackup=3 
      --audit-log-maxsize=100 
      --audit-log-path=/var/log/kube-apiserver-audit.log 
      --event-ttl=1h 
      --alsologtostderr=true 
      --logtostderr=false 
      --log-dir=/var/log/kubernetes 
      --v=4"
    

    配置文件参数说明:

    --logtostderr:启用日志

    --v:日志等级

    --log-dir:日志目录

    --etcd-servers:etcd集群地址

    --bind-address:监听地址

    --secure-port:https安全端口

    --advertise-address:集群通告地址

    --allow-privileged:启用授权

    --service-cluster-ip-range:Service虚拟IP地址段

    --enable-admission-plugins:准入控制模块

    --authorization-mode:认证授权,启用RBAC授权和节点自管理

    --enable-bootstrap-token-auth:启用TLS bootstrap机制

    --token-auth-file:bootstrap token文件

    --service-node-port-range:Service nodeport类型默认分配端口范围

    --kubelet-client-xxx:apiserver访问kubelet客户端证书

    --tls-xxx-file:apiserver https证书

    --etcd-xxxfile:连接Etcd集群证书 –

    -audit-log-xxx:审计日志

    4)创建服务启动文件

    [root@k8s-master1 work]# vim kube-apiserver.service 
    [Unit]
    Description=Kubernetes API Server
    Documentation=https://github.com/kubernetes/kubernetes
    After=etcd.service
    Wants=etcd.service
     
    [Service]
    EnvironmentFile=-/etc/kubernetes/kube-apiserver.conf
    ExecStart=/usr/local/bin/kube-apiserver $KUBE_APISERVER_OPTS
    Restart=on-failure
    RestartSec=5
    Type=notify
    LimitNOFILE=65536
     
    [Install]
    WantedBy=multi-user.target
    

    5)拷贝相关文件

    [root@k8s-master1 work]# cp ca*.pem /etc/kubernetes/ssl
    [root@k8s-master1 work]# cp kube-apiserver*.pem /etc/kubernetes/ssl/
    [root@k8s-master1 work]# cp token.csv /etc/kubernetes/
    [root@k8s-master1 work]# cp kube-apiserver.conf /etc/kubernetes/
    [root@k8s-master1 work]# cp kube-apiserver.service /usr/lib/systemd/system/
    

    6)启动并测试

    [root@k8s-master1 work]# systemctl daemon-reload && systemctl enable kube-apiserver && systemctl start kube-apiserver
    [root@k8s-master1 work]# systemctl status kube-apiserver
    
    # 以下结果是正常的
    [root@k8s-master1 work]# curl --insecure https://192.168.40.180:6443/
    {
      "kind": "Status",
      "apiVersion": "v1",
      "metadata": {
        
      },
      "status": "Failure",
      "message": "Unauthorized",
      "reason": "Unauthorized",
      "code": 401
    }
    

    3.3、部署kubectl组件

    1)创建csr请求文件并生成证书

    [root@k8s-master1 work]# vim admin-csr.json 
    {
      "CN": "admin",
      "hosts": [],
      "key": {
        "algo": "rsa",
        "size": 2048
      },
      "names": [
        {
          "C": "CN",
          "ST": "Hubei",
          "L": "Wuhan",
          "O": "system:masters",             
          "OU": "system"
        }
      ]
    }
    
    [root@k8s-master1 work]# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes admin-csr.json | cfssljson -bare admin
    [root@k8s-master1 work]# cp admin*.pem /etc/kubernetes/ssl/
    

    2)创建kubeconfig配置文件

    kubeconfig 为 kubectl 的配置文件,包含访问 apiserver 的所有信息,如 apiserver 地址、CA 证书和自身使用的证书

    # 1.设置集群参数
    [root@k8s-master1 work]# kubectl config set-cluster kubernetes --certificate-authority=ca.pem --embed-certs=true --server=https://192.168.40.180:6443 --kubeconfig=kube.config
    
    # 2.设置客户端认证参数
    [root@k8s-master1 work]# kubectl config set-credentials admin --client-certificate=admin.pem --client-key=admin-key.pem --embed-certs=true --kubeconfig=kube.config
    
    # 3.设置上下文参数
    [root@k8s-master1 work]# kubectl config set-context kubernetes --cluster=kubernetes --user=admin --kubeconfig=kube.config
    
    # 4.设置默认上下文
    [root@k8s-master1 work]# kubectl config use-context kubernetes --kubeconfig=kube.config
    [root@k8s-master1 work]# mkdir ~/.kube -p
    [root@k8s-master1 work]# cp kube.config ~/.kube/config
    
    # 5.授权kubernetes证书访问kubelet api权限
    [root@k8s-master1 work]# kubectl create clusterrolebinding kube-apiserver:kubelet-apis --clusterrole=system:kubelet-api-admin --user kubernetes
    

    3)查看集群组件状态

    [root@k8s-master1 work]# kubectl cluster-info
    Kubernetes control plane is running at https://192.168.40.180:6443
    
    [root@k8s-master1 work]# kubectl get componentstatuses
    Warning: v1 ComponentStatus is deprecated in v1.19+
    NAME                 STATUS      MESSAGE                                                                                       ERROR
    controller-manager   Unhealthy   Get "http://127.0.0.1:10252/healthz": dial tcp 127.0.0.1:10252: connect: connection refused   
    scheduler            Unhealthy   Get "http://127.0.0.1:10251/healthz": dial tcp 127.0.0.1:10251: connect: connection refused   
    etcd-0               Healthy     {"health":"true"}
    
    [root@k8s-master1 work]# kubectl get all --all-namespaces
    NAMESPACE   NAME                 TYPE        CLUSTER-IP   EXTERNAL-IP   PORT(S)   AGE
    default     service/kubernetes   ClusterIP   10.255.0.1   <none>        443/TCP 
    

    4)配置kubectl子命令补全

    [root@k8s-master1 work]# yum install bash-completion -y
    [root@k8s-master1 work]# kubectl completion bash > /etc/bash_completion.d/kubectl
    

    3.4、部署kube-controller-manager组件

    1)创建csr请求文件并生成证书

    [root@k8s-master1 work]# vim kube-controller-manager-csr.json 
    {
        "CN": "system:kube-controller-manager",
        "key": {
            "algo": "rsa",
            "size": 2048
        },
        "hosts": [
          "127.0.0.1",
          "192.168.40.180"
        ],
        "names": [
          {
            "C": "CN",
            "ST": "Hubei",
            "L": "Wuhan",
            "O": "system:kube-controller-manager",
            "OU": "system"
          }
        ]
    }
    
    [root@k8s-master1 work]# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-controller-manager-csr.json | cfssljson -bare kube-controller-manager
    [root@k8s-master1 work]# ll kube-controller-manager*.pem
    -rw------- 1 root root 1675 Jul  8 15:05 kube-controller-manager-key.pem
    -rw-r--r-- 1 root root 1480 Jul  8 15:05 kube-controller-manager.pem
    

    2)创建kube-controller-manager的kubeconfig

    # 1.设置集群参数
    [root@k8s-master1 work]# kubectl config set-cluster kubernetes --certificate-authority=ca.pem --embed-certs=true --server=https://192.168.40.180:6443 --kubeconfig=kube-controller-manager.kubeconfig
    
    # 2.设置客户端认证参数
    [root@k8s-master1 work]# kubectl config set-credentials system:kube-controller-manager --client-certificate=kube-controller-manager.pem --client-key=kube-controller-manager-key.pem --embed-certs=true --kubeconfig=kube-controller-manager.kubeconfig
    
    # 3.设置上下文参数
    [root@k8s-master1 work]# kubectl config set-context system:kube-controller-manager --cluster=kubernetes --user=system:kube-controller-manager --kubeconfig=kube-controller-manager.kubeconfig
    
    # 4.设置默认上下文
    [root@k8s-master1 work]# kubectl config use-context system:kube-controller-manager --kubeconfig=kube-controller-manager.kubeconfig
    

    3)创建配置文件

    [root@k8s-master1 work]# vim kube-controller-manager.conf 
    KUBE_CONTROLLER_MANAGER_OPTS="--port=0 
      --secure-port=10252 
      --bind-address=127.0.0.1 
      --kubeconfig=/etc/kubernetes/kube-controller-manager.kubeconfig 
      --service-cluster-ip-range=10.255.0.0/16 
      --cluster-name=kubernetes 
      --cluster-signing-cert-file=/etc/kubernetes/ssl/ca.pem 
      --cluster-signing-key-file=/etc/kubernetes/ssl/ca-key.pem 
      --allocate-node-cidrs=true 
      --cluster-cidr=10.0.0.0/16 
      --experimental-cluster-signing-duration=87600h 
      --root-ca-file=/etc/kubernetes/ssl/ca.pem 
      --service-account-private-key-file=/etc/kubernetes/ssl/ca-key.pem 
      --leader-elect=true 
      --feature-gates=RotateKubeletServerCertificate=true 
      --controllers=*,bootstrapsigner,tokencleaner 
      --horizontal-pod-autoscaler-use-rest-clients=true 
      --horizontal-pod-autoscaler-sync-period=10s 
      --tls-cert-file=/etc/kubernetes/ssl/kube-controller-manager.pem 
      --tls-private-key-file=/etc/kubernetes/ssl/kube-controller-manager-key.pem 
      --use-service-account-credentials=true 
      --alsologtostderr=true 
      --logtostderr=false 
      --log-dir=/var/log/kubernetes 
      --v=2"
    

    4)创建启动文件

    [root@k8s-master1 work]# vim kube-controller-manager.service 
    [Unit]
    Description=Kubernetes Controller Manager
    Documentation=https://github.com/kubernetes/kubernetes
    [Service]
    EnvironmentFile=-/etc/kubernetes/kube-controller-manager.conf
    ExecStart=/usr/local/bin/kube-controller-manager $KUBE_CONTROLLER_MANAGER_OPTS
    Restart=on-failure
    RestartSec=5
    [Install]
    WantedBy=multi-user.target
    

    5)启动服务

    [root@k8s-master1 work]# cp kube-controller-manager*.pem /etc/kubernetes/ssl/
    [root@k8s-master1 work]# cp kube-controller-manager.kubeconfig /etc/kubernetes/
    [root@k8s-master1 work]# cp kube-controller-manager.conf /etc/kubernetes/
    [root@k8s-master1 work]# cp kube-controller-manager.service /usr/lib/systemd/system/
    
    [root@k8s-master1 work]# systemctl daemon-reload 
    [root@k8s-master1 work]# systemctl enable kube-controller-manager
    [root@k8s-master1 work]# systemctl start kube-controller-manager
    [root@k8s-master1 work]# systemctl status kube-controller-manager
    

    3.5、部署kube-scheduler组件

    1)创建csr请求并生成证书

    [root@k8s-master1 work]# vim kube-scheduler-csr.json 
    {
        "CN": "system:kube-scheduler",
        "hosts": [
          "127.0.0.1",
          "192.168.40.180"
        ],
        "key": {
            "algo": "rsa",
            "size": 2048
        },
        "names": [
          {
            "C": "CN",
            "ST": "Hubei",
            "L": "Wuhan",
            "O": "system:kube-scheduler",
            "OU": "system"
          }
        ]
    }
    
    [root@k8s-master1 work]# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-scheduler-csr.json | cfssljson -bare kube-scheduler
    [root@k8s-master1 work]# ll kube-scheduler*.pem
    -rw------- 1 root root 1679 Jul  8 15:21 kube-scheduler-key.pem
    -rw-r--r-- 1 root root 1456 Jul  8 15:21 kube-scheduler.pem
    

    2)创建kube-scheduler的kubeconfig

    # 1.设置集群参数
    [root@k8s-master1 work]# kubectl config set-cluster kubernetes --certificate-authority=ca.pem --embed-certs=true --server=https://192.168.40.180:6443 --kubeconfig=kube-scheduler.kubeconfig
    
    # 2.设置客户端认证参数
    [root@k8s-master1 work]# kubectl config set-credentials system:kube-scheduler --client-certificate=kube-scheduler.pem --client-key=kube-scheduler-key.pem --embed-certs=true --kubeconfig=kube-scheduler.kubeconfig
    
    # 3.设置上下文参数
    [root@k8s-master1 work]# kubectl config set-context system:kube-scheduler --cluster=kubernetes --user=system:kube-scheduler --kubeconfig=kube-scheduler.kubeconfig
    
    # 4.设置默认上下文
    [root@k8s-master1 work]# kubectl config use-context system:kube-scheduler --kubeconfig=kube-scheduler.kubeconfig
    

    3)创建配置文件

    [root@k8s-master1 work]# vim kube-scheduler.conf 
    KUBE_SCHEDULER_OPTS="--address=127.0.0.1 
    --kubeconfig=/etc/kubernetes/kube-scheduler.kubeconfig 
    --leader-elect=true 
    --alsologtostderr=true 
    --logtostderr=false 
    --log-dir=/var/log/kubernetes 
    --v=2"
    

    4)创建服务启动文件

    [root@k8s-master1 work]# vim kube-scheduler.service 
    [Unit]
    Description=Kubernetes Scheduler
    Documentation=https://github.com/kubernetes/kubernetes
     
    [Service]
    EnvironmentFile=-/etc/kubernetes/kube-scheduler.conf
    ExecStart=/usr/local/bin/kube-scheduler $KUBE_SCHEDULER_OPTS
    Restart=on-failure
    RestartSec=5
     
    [Install]
    WantedBy=multi-user.target
    

    5)启动服务

    [root@k8s-master1 work]# cp kube-scheduler*.pem /etc/kubernetes/ssl/
    [root@k8s-master1 work]# cp kube-scheduler.kubeconfig /etc/kubernetes/
    [root@k8s-master1 work]# cp kube-scheduler.conf /etc/kubernetes/
    [root@k8s-master1 work]# cp kube-scheduler.service /usr/lib/systemd/system/
    
    [root@k8s-master1 work]# systemctl daemon-reload
    [root@k8s-master1 work]# systemctl enable kube-scheduler
    [root@k8s-master1 work]# systemctl start kube-scheduler
    [root@k8s-master1 work]# systemctl status kube-scheduler
    

    3.6、部署kubelet组件

    1)创建kubelet-bootstrap.kubeconfig

    [root@k8s-master1 work]# BOOTSTRAP_TOKEN=$(awk -F "," '{print $1}' /etc/kubernetes/token.csv)
    [root@k8s-master1 work]# echo $BOOTSTRAP_TOKEN
    e4997d93ecf57d9f0b82fd2978137770
    
    [root@k8s-master1 work]# kubectl config set-cluster kubernetes --certificate-authority=ca.pem --embed-certs=true --server=https://192.168.40.180:6443 --kubeconfig=kubelet-bootstrap.kubeconfig
    
    [root@k8s-master1 work]# kubectl config set-credentials kubelet-bootstrap --token=${BOOTSTRAP_TOKEN} --kubeconfig=kubelet-bootstrap.kubeconfig
    
    [root@k8s-master1 work]# kubectl config set-context default --cluster=kubernetes --user=kubelet-bootstrap --kubeconfig=kubelet-bootstrap.kubeconfig
    
    [root@k8s-master1 work]# kubectl config use-context default --kubeconfig=kubelet-bootstrap.kubeconfig
    
    [root@k8s-master1 work]# kubectl create clusterrolebinding kubelet-bootstrap --clusterrole=system:node-bootstrapper --user=kubelet-bootstrap
    

    2)创建配置文件kubelet.json

    [root@k8s-master1 work]# vim kubelet.json 
    {
      "kind": "KubeletConfiguration",
      "apiVersion": "kubelet.config.k8s.io/v1beta1",
      "authentication": {
        "x509": {
          "clientCAFile": "/etc/kubernetes/ssl/ca.pem"
        },
        "webhook": {
          "enabled": true,
          "cacheTTL": "2m0s"
        },
        "anonymous": {
          "enabled": false
        }
      },
      "authorization": {
        "mode": "Webhook",
        "webhook": {
          "cacheAuthorizedTTL": "5m0s",
          "cacheUnauthorizedTTL": "30s"
        }
      },
      "address": "192.168.40.181",
      "port": 10250,
      "readOnlyPort": 10255,
      "cgroupDriver": "systemd",
      "hairpinMode": "promiscuous-bridge",
      "serializeImagePulls": false,
      "featureGates": {
        "RotateKubeletClientCertificate": true,
        "RotateKubeletServerCertificate": true
      },
      "clusterDomain": "cluster.local.",
      "clusterDNS": ["10.255.0.2"]
    }
    

    3)创建启动文件

    [root@k8s-master1 work]# vim kubelet.service 
    [Unit]
    Description=Kubernetes Kubelet
    Documentation=https://github.com/kubernetes/kubernetes
    After=docker.service
    Requires=docker.service
    [Service]
    WorkingDirectory=/var/lib/kubelet
    ExecStart=/usr/local/bin/kubelet 
      --bootstrap-kubeconfig=/etc/kubernetes/kubelet-bootstrap.kubeconfig 
      --cert-dir=/etc/kubernetes/ssl 
      --kubeconfig=/etc/kubernetes/kubelet.kubeconfig 
      --config=/etc/kubernetes/kubelet.json 
      --network-plugin=cni 
      --pod-infra-container-image=registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.2 
      --alsologtostderr=true 
      --logtostderr=false 
      --log-dir=/var/log/kubernetes 
      --v=2
    Restart=on-failure
    RestartSec=5
     
    [Install]
    WantedBy=multi-user.target
    

    4)拷贝相关文件

    [root@k8s-node1 ~]# mkdir /etc/kubernetes/ssl -p
    [root@k8s-master1 work]# scp kubelet-bootstrap.kubeconfig kubelet.json k8s-node1:/etc/kubernetes/
    [root@k8s-master1 work]# scp  ca.pem k8s-node1:/etc/kubernetes/ssl/
    [root@k8s-master1 work]# scp  kubelet.service k8s-node1:/usr/lib/systemd/system/
    

    5)启动服务

    [root@k8s-node1 ~]# mkdir -p /var/lib/kubelet
    [root@k8s-node1 ~]# mkdir -p /var/log/kubernetes
    [root@k8s-node1 ~]# systemctl daemon-reload
    [root@k8s-node1 ~]# systemctl enable kubelet
    [root@k8s-node1 ~]# systemctl start kubelet
    [root@k8s-node1 ~]# systemctl status kubelet
    

    6)控制节点上Approve一下bootstrap请求

    [root@k8s-master1 work]# kubectl get csr
    NAME                                                   AGE   SIGNERNAME                                    REQUESTOR           CONDITION
    node-csr-R1pT4yS6b9nHGRfgqFCuSWftfeYHyaxAPqesWoYJqKg   49s   kubernetes.io/kube-apiserver-client-kubelet   kubelet-bootstrap   Pending
    [root@k8s-master1 work]# kubectl certificate approve node-csr-R1pT4yS6b9nHGRfgqFCuSWftfeYHyaxAPqesWoYJqKg
    certificatesigningrequest.certificates.k8s.io/node-csr-R1pT4yS6b9nHGRfgqFCuSWftfeYHyaxAPqesWoYJqKg approved
    [root@k8s-master1 work]# kubectl get csr
    NAME                                                   AGE   SIGNERNAME                                    REQUESTOR           CONDITION
    node-csr-R1pT4yS6b9nHGRfgqFCuSWftfeYHyaxAPqesWoYJqKg   76s   kubernetes.io/kube-apiserver-client-kubelet   kubelet-bootstrap   Approved,Issued
    
    [root@k8s-master1 work]# kubectl get nodes 
    NAME        STATUS     ROLES    AGE   VERSION
    k8s-node1   NotReady   <none>   19s   v1.20.7	# 还没安装网络插件
    

    3.7、部署kube-proxy组件

    1)创建csr请求并生成证书

    [root@k8s-master1 work]# vim kube-proxy-csr.json 
    {
      "CN": "system:kube-proxy",
      "key": {
        "algo": "rsa",
        "size": 2048
      },
      "names": [
        {
          "C": "CN",
          "ST": "Hubei",
          "L": "Wuhan",
          "O": "k8s",
          "OU": "system"
        }
      ]
    }
    
    [root@k8s-master1 work]# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-proxy-csr.json | cfssljson -bare kube-proxy
    [root@k8s-master1 work]# ll kube-proxy*.pem
    -rw------- 1 root root 1679 Jul  8 15:59 kube-proxy-key.pem
    -rw-r--r-- 1 root root 1391 Jul  8 15:59 kube-proxy.pem
    

    2)创建kubeconfig文件

    [root@k8s-master1 work]# kubectl config set-cluster kubernetes --certificate-authority=ca.pem --embed-certs=true --server=https://192.168.40.180:6443 --kubeconfig=kube-proxy.kubeconfig
    
    [root@k8s-master1 work]# kubectl config set-credentials kube-proxy --client-certificate=kube-proxy.pem --client-key=kube-proxy-key.pem --embed-certs=true --kubeconfig=kube-proxy.kubeconfig
    
    [root@k8s-master1 work]# kubectl config set-context default --cluster=kubernetes --user=kube-proxy --kubeconfig=kube-proxy.kubeconfig
    
    [root@k8s-master1 work]# kubectl config use-context default --kubeconfig=kube-proxy.kubeconfig
    

    3)创建kube-proxy配置文件

    [root@k8s-master1 work]# vim kube-proxy.yaml 
    apiVersion: kubeproxy.config.k8s.io/v1alpha1
    bindAddress: 192.168.40.181
    clientConnection:
      kubeconfig: /etc/kubernetes/kube-proxy.kubeconfig
    clusterCIDR: 192.168.40.0/24
    healthzBindAddress: 192.168.40.181:10256
    kind: KubeProxyConfiguration
    metricsBindAddress: 192.168.40.181:10249
    mode: "ipvs"
    

    4)创建服务启动文件

    [root@k8s-master1 work]# vim kube-proxy.service 
    [Unit]
    Description=Kubernetes Kube-Proxy Server
    Documentation=https://github.com/kubernetes/kubernetes
    After=network.target
     
    [Service]
    WorkingDirectory=/var/lib/kube-proxy
    ExecStart=/usr/local/bin/kube-proxy 
      --config=/etc/kubernetes/kube-proxy.yaml 
      --alsologtostderr=true 
      --logtostderr=false 
      --log-dir=/var/log/kubernetes 
      --v=2
    Restart=on-failure
    RestartSec=5
    LimitNOFILE=65536
     
    [Install]
    WantedBy=multi-user.target
    

    5)拷贝相关文件

    [root@k8s-master1 work]# scp  kube-proxy.kubeconfig kube-proxy.yaml k8s-node1:/etc/kubernetes/
    [root@k8s-master1 work]# scp  kube-proxy.service k8s-node1:/usr/lib/systemd/system/
    

    6)启动服务

    [root@k8s-node1 ~]# mkdir -p /var/lib/kube-proxy
    [root@k8s-node1 ~]# systemctl daemon-reload
    [root@k8s-node1 ~]# systemctl enable kube-proxy
    [root@k8s-node1 ~]# systemctl restart kube-proxy
    [root@k8s-node1 ~]# systemctl status kube-proxy
    

    3.8、部署Calico

    [root@k8s-master1 work]# cat calico.yaml 
    # Calico Version v3.5.3
    # https://docs.projectcalico.org/v3.5/releases#v3.5.3
    # This manifest includes the following component versions:
    #   calico/node:v3.5.3
    #   calico/cni:v3.5.3
    
    # This ConfigMap is used to configure a self-hosted Calico installation.
    kind: ConfigMap
    apiVersion: v1
    metadata:
      name: calico-config
      namespace: kube-system
    data:
      # Typha is disabled.
      typha_service_name: "none"
      # Configure the Calico backend to use.
      calico_backend: "bird"
    
      # Configure the MTU to use
      veth_mtu: "1440"
    
      # The CNI network configuration to install on each node.  The special
      # values in this config will be automatically populated.
      cni_network_config: |-
        {
          "name": "k8s-pod-network",
          "cniVersion": "0.3.0",
          "plugins": [
            {
              "type": "calico",
              "log_level": "info",
              "datastore_type": "kubernetes",
              "nodename": "__KUBERNETES_NODE_NAME__",
              "mtu": __CNI_MTU__,
              "ipam": {
                "type": "host-local",
                "subnet": "usePodCidr"
              },
              "policy": {
                  "type": "k8s"
              },
              "kubernetes": {
                  "kubeconfig": "__KUBECONFIG_FILEPATH__"
              }
            },
            {
              "type": "portmap",
              "snat": true,
              "capabilities": {"portMappings": true}
            }
          ]
        }
    
    ---
    
    # This manifest installs the calico/node container, as well
    # as the Calico CNI plugins and network config on
    # each master and worker node in a Kubernetes cluster.
    kind: DaemonSet
    apiVersion: apps/v1
    metadata:
      name: calico-node
      namespace: kube-system
      labels:
        k8s-app: calico-node
    spec:
      selector:
        matchLabels:
          k8s-app: calico-node
      updateStrategy:
        type: RollingUpdate
        rollingUpdate:
          maxUnavailable: 1
      template:
        metadata:
          labels:
            k8s-app: calico-node
          annotations:
            # This, along with the CriticalAddonsOnly toleration below,
            # marks the pod as a critical add-on, ensuring it gets
            # priority scheduling and that its resources are reserved
            # if it ever gets evicted.
            scheduler.alpha.kubernetes.io/critical-pod: ''
        spec:
          nodeSelector:
            beta.kubernetes.io/os: linux
          hostNetwork: true
          tolerations:
            # Make sure calico-node gets scheduled on all nodes.
            - effect: NoSchedule
              operator: Exists
            # Mark the pod as a critical add-on for rescheduling.
            - key: CriticalAddonsOnly
              operator: Exists
            - effect: NoExecute
              operator: Exists
          serviceAccountName: calico-node
          # Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a "force
          # deletion": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods.
          terminationGracePeriodSeconds: 0
          initContainers:
            # This container installs the Calico CNI binaries
            # and CNI network config file on each node.
            - name: install-cni
              image: quay.io/calico/cni:v3.5.3	# 可以修改镜像地址
              command: ["/install-cni.sh"]
              env:
                # Name of the CNI config file to create.
                - name: CNI_CONF_NAME
                  value: "10-calico.conflist"
                # The CNI network config to install on each node.
                - name: CNI_NETWORK_CONFIG
                  valueFrom:
                    configMapKeyRef:
                      name: calico-config
                      key: cni_network_config
                # Set the hostname based on the k8s node name.
                - name: KUBERNETES_NODE_NAME
                  valueFrom:
                    fieldRef:
                      fieldPath: spec.nodeName
                # CNI MTU Config variable
                - name: CNI_MTU
                  valueFrom:
                    configMapKeyRef:
                      name: calico-config
                      key: veth_mtu
                # Prevents the container from sleeping forever.
                - name: SLEEP
                  value: "false"
              volumeMounts:
                - mountPath: /host/opt/cni/bin
                  name: cni-bin-dir
                - mountPath: /host/etc/cni/net.d
                  name: cni-net-dir
          containers:
            # Runs calico/node container on each Kubernetes node.  This
            # container programs network policy and routes on each
            # host.
            - name: calico-node
              image: quay.io/calico/node:v3.5.3	# 可以修改镜像地址
              env:
                # Use Kubernetes API as the backing datastore.
                - name: DATASTORE_TYPE
                  value: "kubernetes"
                # Wait for the datastore.
                - name: WAIT_FOR_DATASTORE
                  value: "true"
                # Set based on the k8s node name.
                - name: NODENAME
                  valueFrom:
                    fieldRef:
                      fieldPath: spec.nodeName
                # Choose the backend to use.
                - name: CALICO_NETWORKING_BACKEND
                  valueFrom:
                    configMapKeyRef:
                      name: calico-config
                      key: calico_backend
                # Cluster type to identify the deployment type
                - name: CLUSTER_TYPE
                  value: "k8s,bgp"
                # Auto-detect the BGP IP address.
                - name: IP
                  value: "autodetect"
                - name: IP_AUTODETECTION_METHOD
                  value: "can-reach=192.168.40.181"
                # Enable IPIP
                - name: CALICO_IPV4POOL_IPIP
                  value: "Always"
                # Set MTU for tunnel device used if ipip is enabled
                - name: FELIX_IPINIPMTU
                  valueFrom:
                    configMapKeyRef:
                      name: calico-config
                      key: veth_mtu
                # The default IPv4 pool to create on startup if none exists. Pod IPs will be
                # chosen from this range. Changing this value after installation will have
                # no effect. This should fall within `--cluster-cidr`.
                - name: CALICO_IPV4POOL_CIDR
                  value: "10.0.0.0/16"
                # Disable file logging so `kubectl logs` works.
                - name: CALICO_DISABLE_FILE_LOGGING
                  value: "true"
                # Set Felix endpoint to host default action to ACCEPT.
                - name: FELIX_DEFAULTENDPOINTTOHOSTACTION
                  value: "ACCEPT"
                # Disable IPv6 on Kubernetes.
                - name: FELIX_IPV6SUPPORT
                  value: "false"
                # Set Felix logging to "info"
                - name: FELIX_LOGSEVERITYSCREEN
                  value: "info"
                - name: FELIX_HEALTHENABLED
                  value: "true"
              securityContext:
                privileged: true
              resources:
                requests:
                  cpu: 250m
              livenessProbe:
                httpGet:
                  path: /liveness
                  port: 9099
                  host: localhost
                periodSeconds: 10
                initialDelaySeconds: 10
                failureThreshold: 6
              readinessProbe:
                exec:
                  command:
                  - /bin/calico-node
                  - -bird-ready
                  - -felix-ready
                periodSeconds: 10
              volumeMounts:
                - mountPath: /lib/modules
                  name: lib-modules
                  readOnly: true
                - mountPath: /run/xtables.lock
                  name: xtables-lock
                  readOnly: false
                - mountPath: /var/run/calico
                  name: var-run-calico
                  readOnly: false
                - mountPath: /var/lib/calico
                  name: var-lib-calico
                  readOnly: false
          volumes:
            # Used by calico/node.
            - name: lib-modules
              hostPath:
                path: /lib/modules
            - name: var-run-calico
              hostPath:
                path: /var/run/calico
            - name: var-lib-calico
              hostPath:
                path: /var/lib/calico
            - name: xtables-lock
              hostPath:
                path: /run/xtables.lock
                type: FileOrCreate
            # Used to install CNI.
            - name: cni-bin-dir
              hostPath:
                path: /opt/cni/bin
            - name: cni-net-dir
              hostPath:
                path: /etc/cni/net.d
    ---
    
    apiVersion: v1
    kind: ServiceAccount
    metadata:
      name: calico-node
      namespace: kube-system
    
    ---
    # Create all the CustomResourceDefinitions needed for
    # Calico policy and networking mode.
    
    apiVersion: apiextensions.k8s.io/v1beta1
    kind: CustomResourceDefinition
    metadata:
       name: felixconfigurations.crd.projectcalico.org
    spec:
      scope: Cluster
      group: crd.projectcalico.org
      version: v1
      names:
        kind: FelixConfiguration
        plural: felixconfigurations
        singular: felixconfiguration
    ---
    
    apiVersion: apiextensions.k8s.io/v1beta1
    kind: CustomResourceDefinition
    metadata:
      name: bgppeers.crd.projectcalico.org
    spec:
      scope: Cluster
      group: crd.projectcalico.org
      version: v1
      names:
        kind: BGPPeer
        plural: bgppeers
        singular: bgppeer
    
    ---
    
    apiVersion: apiextensions.k8s.io/v1beta1
    kind: CustomResourceDefinition
    metadata:
      name: bgpconfigurations.crd.projectcalico.org
    spec:
      scope: Cluster
      group: crd.projectcalico.org
      version: v1
      names:
        kind: BGPConfiguration
        plural: bgpconfigurations
        singular: bgpconfiguration
    
    ---
    
    apiVersion: apiextensions.k8s.io/v1beta1
    kind: CustomResourceDefinition
    metadata:
      name: ippools.crd.projectcalico.org
    spec:
      scope: Cluster
      group: crd.projectcalico.org
      version: v1
      names:
        kind: IPPool
        plural: ippools
        singular: ippool
    
    ---
    
    apiVersion: apiextensions.k8s.io/v1beta1
    kind: CustomResourceDefinition
    metadata:
      name: hostendpoints.crd.projectcalico.org
    spec:
      scope: Cluster
      group: crd.projectcalico.org
      version: v1
      names:
        kind: HostEndpoint
        plural: hostendpoints
        singular: hostendpoint
    
    ---
    
    apiVersion: apiextensions.k8s.io/v1beta1
    kind: CustomResourceDefinition
    metadata:
      name: clusterinformations.crd.projectcalico.org
    spec:
      scope: Cluster
      group: crd.projectcalico.org
      version: v1
      names:
        kind: ClusterInformation
        plural: clusterinformations
        singular: clusterinformation
    
    ---
    
    apiVersion: apiextensions.k8s.io/v1beta1
    kind: CustomResourceDefinition
    metadata:
      name: globalnetworkpolicies.crd.projectcalico.org
    spec:
      scope: Cluster
      group: crd.projectcalico.org
      version: v1
      names:
        kind: GlobalNetworkPolicy
        plural: globalnetworkpolicies
        singular: globalnetworkpolicy
    
    ---
    
    apiVersion: apiextensions.k8s.io/v1beta1
    kind: CustomResourceDefinition
    metadata:
      name: globalnetworksets.crd.projectcalico.org
    spec:
      scope: Cluster
      group: crd.projectcalico.org
      version: v1
      names:
        kind: GlobalNetworkSet
        plural: globalnetworksets
        singular: globalnetworkset
    
    ---
    
    apiVersion: apiextensions.k8s.io/v1beta1
    kind: CustomResourceDefinition
    metadata:
      name: networkpolicies.crd.projectcalico.org
    spec:
      scope: Namespaced
      group: crd.projectcalico.org
      version: v1
      names:
        kind: NetworkPolicy
        plural: networkpolicies
        singular: networkpolicy
    ---
    
    # Include a clusterrole for the calico-node DaemonSet,
    # and bind it to the calico-node serviceaccount.
    kind: ClusterRole
    apiVersion: rbac.authorization.k8s.io/v1beta1
    metadata:
      name: calico-node
    rules:
      # The CNI plugin needs to get pods, nodes, and namespaces.
      - apiGroups: [""]
        resources:
          - pods
          - nodes
          - namespaces
        verbs:
          - get
      - apiGroups: [""]
        resources:
          - endpoints
          - services
        verbs:
          # Used to discover service IPs for advertisement.
          - watch
          - list
          # Used to discover Typhas.
          - get
      - apiGroups: [""]
        resources:
          - nodes/status
        verbs:
          # Needed for clearing NodeNetworkUnavailable flag.
          - patch
          # Calico stores some configuration information in node annotations.
          - update
      # Watch for changes to Kubernetes NetworkPolicies.
      - apiGroups: ["networking.k8s.io"]
        resources:
          - networkpolicies
        verbs:
          - watch
          - list
      # Used by Calico for policy information.
      - apiGroups: [""]
        resources:
          - pods
          - namespaces
          - serviceaccounts
        verbs:
          - list
          - watch
      # The CNI plugin patches pods/status.
      - apiGroups: [""]
        resources:
          - pods/status
        verbs:
          - patch
      # Calico monitors various CRDs for config.
      - apiGroups: ["crd.projectcalico.org"]
        resources:
          - globalfelixconfigs
          - felixconfigurations
          - bgppeers
          - globalbgpconfigs
          - bgpconfigurations
          - ippools
          - globalnetworkpolicies
          - globalnetworksets
          - networkpolicies
          - clusterinformations
          - hostendpoints
        verbs:
          - get
          - list
          - watch
      # Calico must create and update some CRDs on startup.
      - apiGroups: ["crd.projectcalico.org"]
        resources:
          - ippools
          - felixconfigurations
          - clusterinformations
        verbs:
          - create
          - update
      # Calico stores some configuration information on the node.
      - apiGroups: [""]
        resources:
          - nodes
        verbs:
          - get
          - list
          - watch
      # These permissions are only requried for upgrade from v2.6, and can
      # be removed after upgrade or on fresh installations.
      - apiGroups: ["crd.projectcalico.org"]
        resources:
          - bgpconfigurations
          - bgppeers
        verbs:
          - create
          - update
    ---
    apiVersion: rbac.authorization.k8s.io/v1beta1
    kind: ClusterRoleBinding
    metadata:
      name: calico-node
    roleRef:
      apiGroup: rbac.authorization.k8s.io
      kind: ClusterRole
      name: calico-node
    subjects:
    - kind: ServiceAccount
      name: calico-node
      namespace: kube-system
    ---
    
    [root@k8s-master1 work]# kubectl apply -f calico.yaml
    [root@k8s-master1 work]# kubectl get pods -n kube-system
    NAME                READY   STATUS    RESTARTS   AGE
    calico-node-t8q8z   1/1     Running   0          25s
    [root@k8s-master1 work]# kubectl get nodes
    NAME        STATUS   ROLES    AGE   VERSION
    k8s-node1   Ready    <none>   33m   v1.20.7
    

    注意:需要修改calico.yaml文件:

    - name: IP_AUTODETECTION_METHOD
      value: "can-reach=192.168.40.181"  # 这个ip是k8s任何一个工作节点的ip都行
    

    3.9、部署coredns组件

    [root@k8s-master1 work]# cat coredns.yaml 
    apiVersion: v1
    kind: ServiceAccount
    metadata:
      name: coredns
      namespace: kube-system
    ---
    apiVersion: rbac.authorization.k8s.io/v1
    kind: ClusterRole
    metadata:
      labels:
        kubernetes.io/bootstrapping: rbac-defaults
      name: system:coredns
    rules:
      - apiGroups:
        - ""
        resources:
        - endpoints
        - services
        - pods
        - namespaces
        verbs:
        - list
        - watch
      - apiGroups:
        - discovery.k8s.io
        resources:
        - endpointslices
        verbs:
        - list
        - watch
    ---
    apiVersion: rbac.authorization.k8s.io/v1
    kind: ClusterRoleBinding
    metadata:
      annotations:
        rbac.authorization.kubernetes.io/autoupdate: "true"
      labels:
        kubernetes.io/bootstrapping: rbac-defaults
      name: system:coredns
    roleRef:
      apiGroup: rbac.authorization.k8s.io
      kind: ClusterRole
      name: system:coredns
    subjects:
    - kind: ServiceAccount
      name: coredns
      namespace: kube-system
    ---
    apiVersion: v1
    kind: ConfigMap
    metadata:
      name: coredns
      namespace: kube-system
    data:
      Corefile: |
        .:53 {
            errors
            health {
              lameduck 5s
            }
            ready
            kubernetes cluster.local in-addr.arpa ip6.arpa {
              fallthrough in-addr.arpa ip6.arpa
            }
            prometheus :9153
            forward . /etc/resolv.conf {
              max_concurrent 1000
            }
            cache 30
            loop
            reload
            loadbalance
        }
    ---
    apiVersion: apps/v1
    kind: Deployment
    metadata:
      name: coredns
      namespace: kube-system
      labels:
        k8s-app: kube-dns
        kubernetes.io/name: "CoreDNS"
    spec:
      # replicas: not specified here:
      # 1. Default is 1.
      # 2. Will be tuned in real time if DNS horizontal auto-scaling is turned on.
      strategy:
        type: RollingUpdate
        rollingUpdate:
          maxUnavailable: 1
      selector:
        matchLabels:
          k8s-app: kube-dns
      template:
        metadata:
          labels:
            k8s-app: kube-dns
        spec:
          priorityClassName: system-cluster-critical
          serviceAccountName: coredns
          tolerations:
            - key: "CriticalAddonsOnly"
              operator: "Exists"
          nodeSelector:
            kubernetes.io/os: linux
          affinity:
             podAntiAffinity:
               preferredDuringSchedulingIgnoredDuringExecution:
               - weight: 100
                 podAffinityTerm:
                   labelSelector:
                     matchExpressions:
                       - key: k8s-app
                         operator: In
                         values: ["kube-dns"]
                   topologyKey: kubernetes.io/hostname
          containers:
          - name: coredns
            image: coredns/coredns:1.7.0
            imagePullPolicy: IfNotPresent
            resources:
              limits:
                memory: 170Mi
              requests:
                cpu: 100m
                memory: 70Mi
            args: [ "-conf", "/etc/coredns/Corefile" ]
            volumeMounts:
            - name: config-volume
              mountPath: /etc/coredns
              readOnly: true
            ports:
            - containerPort: 53
              name: dns
              protocol: UDP
            - containerPort: 53
              name: dns-tcp
              protocol: TCP
            - containerPort: 9153
              name: metrics
              protocol: TCP
            securityContext:
              allowPrivilegeEscalation: false
              capabilities:
                add:
                - NET_BIND_SERVICE
                drop:
                - all
              readOnlyRootFilesystem: true
            livenessProbe:
              httpGet:
                path: /health
                port: 8080
                scheme: HTTP
              initialDelaySeconds: 60
              timeoutSeconds: 5
              successThreshold: 1
              failureThreshold: 5
            readinessProbe:
              httpGet:
                path: /ready
                port: 8181
                scheme: HTTP
          dnsPolicy: Default
          volumes:
            - name: config-volume
              configMap:
                name: coredns
                items:
                - key: Corefile
                  path: Corefile
    ---
    apiVersion: v1
    kind: Service
    metadata:
      name: kube-dns
      namespace: kube-system
      annotations:
        prometheus.io/port: "9153"
        prometheus.io/scrape: "true"
      labels:
        k8s-app: kube-dns
        kubernetes.io/cluster-service: "true"
        kubernetes.io/name: "CoreDNS"
    spec:
      selector:
        k8s-app: kube-dns
      clusterIP: 10.255.0.2
      ports:
      - name: dns
        port: 53
        protocol: UDP
      - name: dns-tcp
        port: 53
        protocol: TCP
      - name: metrics
        port: 9153
        protocol: TCP
        
        
    [root@k8s-master1 ~]# kubectl apply -f coredns.yaml
    [root@k8s-master1 work]# kubectl get pods -n kube-system
    NAME                       READY   STATUS    RESTARTS   AGE
    calico-node-t8q8z          1/1     Running   0          5m35s
    coredns-7bf4bd64bd-5bnj7   1/1     Running   0          9s
    [root@k8s-master1 work]# kubectl get svc -n kube-system
    NAME       TYPE        CLUSTER-IP   EXTERNAL-IP   PORT(S)                  AGE
    kube-dns   ClusterIP   10.255.0.2   <none>        53/UDP,53/TCP,9153/TCP   22s
    

    3.10、测试集群部署tomcat服务

    [root@k8s-master1 work]# cat tomcat.yaml 
    apiVersion: v1
    kind: Pod
    metadata:
      name: demo-pod
      namespace: default
      labels:
        app: myapp
        env: dev
    spec:
      containers:
      - name:  tomcat-pod-java
        ports:
        - containerPort: 8080
        image: tomcat:8.5-jre8-alpine
        imagePullPolicy: IfNotPresent
      - name: busybox
        image: busybox:latest
        command:
        - "/bin/sh"
        - "-c"
        - "sleep 3600"
    
    [root@k8s-master1 work]# cat tomcat-service.yaml 
    apiVersion: v1
    kind: Service
    metadata:
      name: tomcat
    spec:
      type: NodePort
      ports:
        - port: 8080
          nodePort: 30080
      selector:
        app: myapp
        env: dev
        
    [root@k8s-master1 work]# kubectl apply -f tomcat.yaml
    pod/demo-pod created
    [root@k8s-master1 work]# kubectl apply -f tomcat-service.yaml
    service/tomcat created
    [root@k8s-master1 work]# kubectl get pods
    NAME       READY   STATUS    RESTARTS   AGE
    demo-pod   2/2     Running   0          102s
    [root@k8s-master1 work]# kubectl get svc
    NAME         TYPE        CLUSTER-IP       EXTERNAL-IP   PORT(S)          AGE
    kubernetes   ClusterIP   10.255.0.1       <none>        443/TCP          125m
    tomcat       NodePort    10.255.164.138   <none>        8080:30080/TCP   112s
    

    浏览器访问测试:

    image-20210708163309724

    3.11、验证coredns是否正常

    # busybox要用指定的1.28版本,不能用最新版本,最新版本,nslookup会解析不到dns和ip
    [root@k8s-master1 ~]# kubectl run busybox --image busybox:1.28 --restart=Never --rm -it busybox -- sh
    / # ping www.baidu.com
    PING www.baidu.com (39.156.66.18): 56 data bytes
    64 bytes from 39.156.66.18: seq=0 ttl=127 time=39.3 ms
    #通过上面可以看到能访问网络
    
    / # nslookup kubernetes.default.svc.cluster.local
    Server:    10.255.0.2
    Address 1: 10.255.0.2 kube-dns.kube-system.svc.cluster.local
    
    Name:      kubernetes.default.svc.cluster.local
    Address 1: 10.255.0.1 kubernetes.default.svc.cluster.local
    
    / # nslookup tomcat.default.svc.cluster.local
    Server:    10.255.0.2
    Address 1: 10.255.0.2 kube-dns.kube-system.svc.cluster.local
    
    Name:      tomcat.default.svc.cluster.local
    Address 1: 10.255.164.138 tomcat.default.svc.cluster.local
    
    作者:Lawrence

    -------------------------------------------

    个性签名:独学而无友,则孤陋而寡闻。做一个灵魂有趣的人!

    扫描上面二维码关注我
    如果你真心觉得文章写得不错,而且对你有所帮助,那就不妨帮忙“推荐"一下,您的“推荐”和”打赏“将是我最大的写作动力!
    本文版权归作者所有,欢迎转载,但未经作者同意必须保留此段声明,且在文章页面明显位置给出原文连接.
  • 相关阅读:
    DC中为什么要用Uniquify?
    hdu 1596 find the safest road
    hdu2112 HDU Today
    hdu 2066 一个人的旅行
    poj 3026 Borg Maze
    poj 1979 Red and Black
    poj 1321 棋盘问题
    hdu 1010 Tempter of the Bone
    hdu 4861 Couple doubi
    codeforces584B Kolya and Tanya
  • 原文地址:https://www.cnblogs.com/hujinzhong/p/14986752.html
Copyright © 2011-2022 走看看