zoukankan      html  css  js  c++  java
  • Centos7离线部署kubernetes 1.13集群记录

    一、说明

    本篇主要参考kubernetes中文社区的一篇部署文章(CentOS 使用二进制部署 Kubernetes 1.13集群),并做了更详细的记录以备用。

    二、部署环境

    1、kubernetes 版本:1.13,二进制文件

    在参考文章中有下载。

    2、本地部署环境

    ip hostname version 部署
    10.0.3.107 manager107 3.10.0-957.1.3.el7.x86_64 api-server,scheduler,controller-manager,etcd,kubelet,kube-proxy,flannel
    10.0.3.68 worker68 3.10.0-957.1.3.el7.x86_64 kubelet,kube-proxy,flannel
    10.0.3.80 worker80 3.10.0-957.1.3.el7.x86_64 kubelet,kube-proxy,flannel

    3、部署网络说明

    参考CentOS 使用二进制部署 Kubernetes 1.13集群

    三、kubernetes 安装及配置

    1、创建临时目录

    #存放etcd证书及配置文件
    [root@manager107 ~]# mkdir -p /home/workspace/etcd
    #存放k8s证书及配置文件
    [root@manager107 ~]# mkdir -p /home/workspace/k8s
    #存放k8s安装文件
    [root@manager107 ~]# mkdir -p /home/workspace/packages

    2、设置关闭防火墙、Swap及SELINUX

    3台服务器上执行:

    systemctl stop firewalld && systemctl disable firewalld
    setenforce 0
    swapoff -a && sysctl -w vm.swappiness=0 vi /etc/selinux/config SELINUX=disabled

    3、安装docker

    4、创建安装目录

    [root@manager107 ~]# mkdir /k8s/etcd/{bin,cfg,ssl} -p
    [root@manager107 ~]# mkdir /k8s/kubernetes/{bin,cfg,ssl} -p

    5、安装及配置CFSSL

    [root@manager107 ~]# wget https://pkg.cfssl.org/R1.2/cfssl_linux-amd64
    [root@manager107 ~]# wget https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64
    [root@manager107 ~]# wget https://pkg.cfssl.org/R1.2/cfssl-certinfo_linux-amd64
    [root@manager107 ~]# chmod +x cfssl_linux-amd64 cfssljson_linux-amd64 cfssl-certinfo_linux-amd64
    [root@manager107 ~]# mv cfssl_linux-amd64 /usr/local/bin/cfssl
    [root@manager107 ~]# mv cfssljson_linux-amd64 /usr/local/bin/cfssljson
    [root@manager107 ~]# mv cfssl-certinfo_linux-amd64 /usr/bin/cfssl-certinfo

    6、创建认证证书

    [root@manager107 ~]# cd /home/workspace/etcd
    #创建 ETCD 证书
    [root@manager107 etcd]# cat << EOF | tee ca-config.json
    {
      "signing": {
        "default": {
          "expiry": "87600h"
        },
        "profiles": {
          "www": {
             "expiry": "87600h",
             "usages": [
                "signing",
                "key encipherment",
                "server auth",
                "client auth"
            ]
          }
        }
      }
    }
    EOF
    #创建 ETCD CA 配置文件
    [root@manager107 etcd]# cat << EOF | tee ca-csr.json
    {
        "CN": "etcd CA",
        "key": {
            "algo": "rsa",
            "size": 2048
        },
        "names": [
            {
                "C": "CN",
                "L": "Shenzhen",
                "ST": "Shenzhen"
            }
        ]
    }
    EOF
    #创建 ETCD Server 证书
    [root@manager107 etcd]# cat << EOF | tee server-csr.json
    {
        "CN": "etcd",
        "hosts": [
        "10.0.3.107",
        "10.0.3.68",
        "10.0.3.80"
        ],
        "key": {
            "algo": "rsa",
            "size": 2048
        },
        "names": [
            {
                "C": "CN",
                "L": "Shenzhen",
                "ST": "Shenzhen"
            }
        ]
    }
    EOF
    #生成 ETCD CA 证书和私钥
    [root@manager107 etcd]# cfssl gencert -initca ca-csr.json | cfssljson -bare ca -
    [root@manager107 etcd]# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=www server-csr.json | cfssljson -bare server
    [root@manager107 etcd]# cd /home/workspace/k8s/
    #创建 Kubernetes CA 证书
    [root@manager107 k8s]# cat << EOF | tee ca-config.json
    {
      "signing": {
        "default": {
          "expiry": "87600h"
        },
        "profiles": {
          "kubernetes": {
             "expiry": "87600h",
             "usages": [
                "signing",
                "key encipherment",
                "server auth",
                "client auth"
            ]
          }
        }
      }
    }
    EOF
    [root@manager107 k8s]# cat << EOF | tee ca-csr.json
    {
        "CN": "kubernetes",
        "key": {
            "algo": "rsa",
            "size": 2048
        },
        "names": [
            {
                "C": "CN",
                "L": "Shenzhen",
                "ST": "Shenzhen",
                "O": "k8s",
                "OU": "System"
            }
        ]
    }
    EOF
    [root@manager107 k8s]# cfssl gencert -initca ca-csr.json | cfssljson -bare ca -
    #生成API_SERVER证书
    [root@manager107 k8s]# cat << EOF | tee server-csr.json
    {
        "CN": "kubernetes",
        "hosts": [
          "10.0.0.1",
          "127.0.0.1",
          "10.0.3.107",
          "kubernetes",
          "kubernetes.default",
          "kubernetes.default.svc",
          "kubernetes.default.svc.cluster",
          "kubernetes.default.svc.cluster.local"
        ],
        "key": {
            "algo": "rsa",
            "size": 2048
        },
        "names": [
            {
                "C": "CN",
                "L": "Shenzhen",
                "ST": "Shenzhen",
                "O": "k8s",
                "OU": "System"
            }
        ]
    }
    EOF
    [root@manager107 k8s]# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes server-csr.json | cfssljson -bare server
    #创建 Kubernetes Proxy 证书
    [root@manager107 k8s]# cat << EOF | tee kube-proxy-csr.json
    {
      "CN": "system:kube-proxy",
      "hosts": [],
      "key": {
        "algo": "rsa",
        "size": 2048
      },
      "names": [
        {
          "C": "CN",
          "L": "Shenzhen",
          "ST": "Shenzhen",
          "O": "k8s",
          "OU": "System"
        }
      ]
    }
    EOF
    [root@manager107 k8s]# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-proxy-csr.json | cfssljson -bare kube-proxy

    7、ssh-key认证

    [root@manager107 ~]# ssh-keygen
    [root@manager107 ~]# ssh-copy-id 10.0.3.68
    [root@manager107 ~]# ssh-copy-id 10.0.3.80

    8、部署etcd

    [root@manager107 workspace]# cd /home/workspace/packages/k8s1.13-centos
    [root@manager107 k8s1.13-centos]# tar -xvf etcd-v3.3.10-linux-amd64.tar.gz
    [root@manager107 k8s1.13-centos]# cd etcd-v3.3.10-linux-amd64/
    [root@manager107 etcd-v3.3.10-linux-amd64]# cp etcd etcdctl /k8s/etcd/bin/
    [root@manager107 etcd-v3.3.10-linux-amd64]# vim /k8s/etcd/cfg/etcd
    #[Member]
    ETCD_NAME="etcd01"
    ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
    ETCD_LISTEN_PEER_URLS="https://10.0.3.107:2380"
    ETCD_LISTEN_CLIENT_URLS="https://10.0.3.107:2379"
    
    #[Clustering]
    ETCD_INITIAL_ADVERTISE_PEER_URLS="https://10.0.3.107:2380"
    ETCD_ADVERTISE_CLIENT_URLS="https://10.0.3.107:2379"
    ETCD_INITIAL_CLUSTER="etcd01=https://10.0.3.107:2380,etcd02=https://10.0.3.68:2380,etcd03=https://10.0.3.80:2380"
    ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
    ETCD_INITIAL_CLUSTER_STATE="new"
    #创建 etcd的 systemd unit 文件
    [root@manager107 etcd-v3.3.10-linux-amd64]# vim /usr/lib/systemd/system/etcd.service
    [Unit]
    Description=Etcd Server
    After=network.target
    After=network-online.target
    Wants=network-online.target
    
    [Service]
    Type=notify
    EnvironmentFile=/k8s/etcd/cfg/etcd
    ExecStart=/k8s/etcd/bin/etcd 
    --name=${ETCD_NAME} 
    --data-dir=${ETCD_DATA_DIR} 
    --listen-peer-urls=${ETCD_LISTEN_PEER_URLS} 
    --listen-client-urls=${ETCD_LISTEN_CLIENT_URLS},http://127.0.0.1:2379 
    --advertise-client-urls=${ETCD_ADVERTISE_CLIENT_URLS} 
    --initial-advertise-peer-urls=${ETCD_INITIAL_ADVERTISE_PEER_URLS} 
    --initial-cluster=${ETCD_INITIAL_CLUSTER} 
    --initial-cluster-token=${ETCD_INITIAL_CLUSTER_TOKEN} 
    --initial-cluster-state=new 
    --cert-file=/k8s/etcd/ssl/server.pem 
    --key-file=/k8s/etcd/ssl/server-key.pem 
    --peer-cert-file=/k8s/etcd/ssl/server.pem 
    --peer-key-file=/k8s/etcd/ssl/server-key.pem 
    --trusted-ca-file=/k8s/etcd/ssl/ca.pem 
    --peer-trusted-ca-file=/k8s/etcd/ssl/ca.pem
    Restart=on-failure
    LimitNOFILE=65536
    
    [Install]
    WantedBy=multi-user.target
    #拷贝证书文件
    [root@manager107 etcd-v3.3.10-linux-amd64]# cd /home/workspace/etcd/
    [root@manager107 etcd]# cp ca*pem server*pem /k8s/etcd/ssl
    #将启动文件、配置文件拷贝到 节点68、节点80
    [root@manager107 etcd]# cd /k8s/
    [root@manager107 k8s]# scp -r etcd 10.0.3.68:/k8s/etcd/
    [root@manager107 k8s]# scp -r etcd 10.0.3.80:/k8s/etcd/
    [root@manager107 k8s]# scp /usr/lib/systemd/system/etcd.service  10.0.3.68:/usr/lib/systemd/system/etcd.service
    [root@manager107 k8s]# scp /usr/lib/systemd/system/etcd.service  10.0.3.80:/usr/lib/systemd/system/etcd.service
    #在68上修改etcd配置文件
    [root@worker68 ~]# vim /k8s/etcd/cfg/etcd
    #[Member]
    ETCD_NAME="etcd02"
    ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
    ETCD_LISTEN_PEER_URLS="https://10.0.3.68:2380"
    ETCD_LISTEN_CLIENT_URLS="https://10.0.3.68:2379"
    
    #[Clustering]
    ETCD_INITIAL_ADVERTISE_PEER_URLS="https://10.0.3.68:2380"
    ETCD_ADVERTISE_CLIENT_URLS="https://10.0.3.68:2379"
    ETCD_INITIAL_CLUSTER="etcd01=https://10.0.3.107:2380,etcd02=https://10.0.3.68:2380,etcd03=https://10.0.3.80:2380"
    ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
    ETCD_INITIAL_CLUSTER_STATE="new"
    #在80上修改etcd配置文件
    [root@worker80 ~]# vim /k8s/etcd/cfg/etcd
    #[Member]
    ETCD_NAME="etcd03"
    ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
    ETCD_LISTEN_PEER_URLS="https://10.0.3.80:2380"
    ETCD_LISTEN_CLIENT_URLS="https://10.0.3.80:2379"
    
    #[Clustering]
    ETCD_INITIAL_ADVERTISE_PEER_URLS="https://10.0.3.80:2380"
    ETCD_ADVERTISE_CLIENT_URLS="https://10.0.3.80:2379"
    ETCD_INITIAL_CLUSTER="etcd01=https://10.0.3.107:2380,etcd02=https://10.0.3.68:2380,etcd03=https://10.0.3.80:2380"
    ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
    ETCD_INITIAL_CLUSTER_STATE="new"
    #80启动etcd
    [root@worker80 ~]# systemctl daemon-reload
    [root@worker80 ~]# systemctl enable etcd
    [root@worker80 ~]# systemctl start etcd
    #68启动etcd
    [root@worker68 ~]# systemctl daemon-reload
    [root@worker68 ~]# systemctl enable etcd
    [root@worker68 ~]# systemctl start etcd
    #107启动etcd
    [root@manager107 ~]# systemctl daemon-reload
    [root@manager107 ~]# systemctl enable etcd
    [root@manager107 ~]# systemctl start etcd
    #验证集群是否正常运行
    [root@manager107 ~]# /k8s/etcd/bin/etcdctl --ca-file=/k8s/etcd/ssl/ca.pem 
    --cert-file=/k8s/etcd/ssl/server.pem
    --key-file=/k8s/etcd/ssl/server-key.pem
    --endpoints="https://10.0.3.107:2379,https://10.0.3.68:2379,https://10.0.3.80:2379"
    cluster-health

    9、部署Flannel网络

    #向 etcd 写入集群 Pod 网段信息
    [root@manager107 ssl]# cd /k8s/etcd/ssl/
    [root@manager107 ssl]# /k8s/etcd/bin/etcdctl --ca-file=ca.pem --cert-file=server.pem 
    --key-file=server-key.pem 
    --endpoints="https://10.0.3.107:2379,https://10.0.3.68:2379,https://10.0.3.80:2379" 
    set /coreos.com/network/config '{ "Network": "172.20.0.0/16", "Backend": {"Type": "vxlan"}}'
    #解压安装
    [root@manager107 ssl]# cd /home/workspace/packages/k8s1.13-centos
    [root@manager107 k8s1.13-centos]# tar -xvf flannel-v0.10.0-linux-amd64.tar.gz
    [root@manager107 k8s1.13-centos]# mv flanneld mk-docker-opts.sh /k8s/kubernetes/bin/
    #配置Flannel
    [root@manager107 k8s1.13-centos]# vim /k8s/kubernetes/cfg/flanneld
    FLANNEL_OPTIONS="--etcd-endpoints=https://10.0.3.107:2379,https://10.0.3.68:2379,https://10.0.3.80:2379 -etcd-cafile=/k8s/etcd/ssl/ca.pem -etcd-certfile=/k8s/etcd/ssl/server.pem -etcd-keyfile=/k8s/etcd/ssl/server-key.pem"
    #创建 flanneld 的 systemd unit 文件
    [root@manager107 k8s1.13-centos]# vim /usr/lib/systemd/system/flanneld.service
    [Unit]
    Description=Flanneld overlay address etcd agent
    After=network-online.target network.target
    Before=docker.service
    
    [Service]
    Type=notify
    EnvironmentFile=/k8s/kubernetes/cfg/flanneld
    ExecStart=/k8s/kubernetes/bin/flanneld --ip-masq $FLANNEL_OPTIONS
    ExecStartPost=/k8s/kubernetes/bin/mk-docker-opts.sh -k DOCKER_NETWORK_OPTIONS -d /run/flannel/subnet.env
    Restart=on-failure
    
    [Install]
    WantedBy=multi-user.target
    #配置Docker启动指定子网段
    [root@manager107 ~]# vim /usr/lib/systemd/system/docker.service
    [Unit]
    Description=Docker Application Container Engine
    Documentation=https://docs.docker.com
    BindsTo=containerd.service
    After=network-online.target firewalld.service
    Wants=network-online.target
    
    [Service]
    Type=notify
    # the default is not to use systemd for cgroups because the delegate issues still
    # exists and systemd currently does not support the cgroup feature set required
    # for containers run by docker
    EnvironmentFile=/run/flannel/subnet.env
    # ExecStart=/usr/bin/dockerd  -H tcp://0.0.0.0:2376 -H unix://
    ExecStart=/usr/bin/dockerd $DOCKER_NETWORK_OPTIONS
    ExecReload=/bin/kill -s HUP $MAINPID
    TimeoutSec=0
    RestartSec=2
    Restart=always
    
    # Note that StartLimit* options were moved from "Service" to "Unit" in systemd 229.
    # Both the old, and new location are accepted by systemd 229 and up, so using the old location
    # to make them work for either version of systemd.
    StartLimitBurst=3
    
    # Note that StartLimitInterval was renamed to StartLimitIntervalSec in systemd 230.
    # Both the old, and new name are accepted by systemd 230 and up, so using the old name to make
    # this option work for either version of systemd.
    StartLimitInterval=60s
    
    # Having non-zero Limit*s causes performance problems due to accounting overhead
    # in the kernel. We recommend using cgroups to do container-local accounting.
    LimitNOFILE=infinity
    LimitNPROC=infinity
    LimitCORE=infinity
    
    # Comment TasksMax if your systemd version does not supports it.
    # Only systemd 226 and above support this option.
    TasksMax=infinity
    
    # set delegate yes so that systemd does not reset the cgroups of docker containers
    Delegate=yes
    
    # kill only the docker process, not all processes in the cgroup
    KillMode=process
    
    [Install]
    WantedBy=multi-user.target
    [root@worker68 ~]# vim /usr/lib/systemd/system/docker.service
    [Unit]
    Description=Docker Application Container Engine
    Documentation=https://docs.docker.com
    BindsTo=containerd.service
    After=network-online.target firewalld.service
    Wants=network-online.target
    
    [Service]
    Type=notify
    # the default is not to use systemd for cgroups because the delegate issues still
    # exists and systemd currently does not support the cgroup feature set required
    # for containers run by docker
    EnvironmentFile=/run/flannel/subnet.env
    # ExecStart=/usr/bin/dockerd  -H tcp://0.0.0.0:2376 -H unix://
    ExecStart=/usr/bin/dockerd $DOCKER_NETWORK_OPTIONS
    ExecReload=/bin/kill -s HUP $MAINPID
    TimeoutSec=0
    RestartSec=2
    Restart=always
    
    # Note that StartLimit* options were moved from "Service" to "Unit" in systemd 229.
    # Both the old, and new location are accepted by systemd 229 and up, so using the old location
    # to make them work for either version of systemd.
    StartLimitBurst=3
    
    # Note that StartLimitInterval was renamed to StartLimitIntervalSec in systemd 230.
    # Both the old, and new name are accepted by systemd 230 and up, so using the old name to make
    # this option work for either version of systemd.
    StartLimitInterval=60s
    
    # Having non-zero Limit*s causes performance problems due to accounting overhead
    # in the kernel. We recommend using cgroups to do container-local accounting.
    LimitNOFILE=infinity
    LimitNPROC=infinity
    LimitCORE=infinity
    
    # Comment TasksMax if your systemd version does not supports it.
    # Only systemd 226 and above support this option.
    TasksMax=infinity
    
    # set delegate yes so that systemd does not reset the cgroups of docker containers
    Delegate=yes
    
    # kill only the docker process, not all processes in the cgroup
    KillMode=process
    
    [Install]
    WantedBy=multi-user.target
    [root@worker80 ~]# vim /usr/lib/systemd/system/docker.service
    [Unit]
    Description=Docker Application Container Engine
    Documentation=https://docs.docker.com
    BindsTo=containerd.service
    After=network-online.target firewalld.service
    Wants=network-online.target
    
    [Service]
    Type=notify
    # the default is not to use systemd for cgroups because the delegate issues still
    # exists and systemd currently does not support the cgroup feature set required
    # for containers run by docker
    EnvironmentFile=/run/flannel/subnet.env
    # ExecStart=/usr/bin/dockerd  -H tcp://0.0.0.0:2376 -H unix://
    ExecStart=/usr/bin/dockerd $DOCKER_NETWORK_OPTIONS
    ExecReload=/bin/kill -s HUP $MAINPID
    TimeoutSec=0
    RestartSec=2
    Restart=always
    
    # Note that StartLimit* options were moved from "Service" to "Unit" in systemd 229.
    # Both the old, and new location are accepted by systemd 229 and up, so using the old location
    # to make them work for either version of systemd.
    StartLimitBurst=3
    
    # Note that StartLimitInterval was renamed to StartLimitIntervalSec in systemd 230.
    # Both the old, and new name are accepted by systemd 230 and up, so using the old name to make
    # this option work for either version of systemd.
    StartLimitInterval=60s
    
    # Having non-zero Limit*s causes performance problems due to accounting overhead
    # in the kernel. We recommend using cgroups to do container-local accounting.
    LimitNOFILE=infinity
    LimitNPROC=infinity
    LimitCORE=infinity
    
    # Comment TasksMax if your systemd version does not supports it.
    # Only systemd 226 and above support this option.
    TasksMax=infinity
    
    # set delegate yes so that systemd does not reset the cgroups of docker containers
    Delegate=yes
    
    # kill only the docker process, not all processes in the cgroup
    KillMode=process
    
    [Install]
    WantedBy=multi-user.target
    #将flanneld systemd unit 文件到所有节点
    [root@manager107 ~]# cd /k8s
    [root@manager107 k8s]# scp -r kubernetes 10.0.3.68:/k8s/kubernetes 
    [root@manager107 k8s]# scp -r kubernetes 10.0.3.80:/k8s/kubernetes 
    [root@manager107 k8s]# scp /k8s/kubernetes/cfg/flanneld 10.0.3.68:/k8s/kubernetes/cfg/flanneld
    [root@manager107 k8s]# scp /k8s/kubernetes/cfg/flanneld 10.0.3.80:/k8s/kubernetes/cfg/flanneld
    [root@manager107 k8s]# scp /usr/lib/systemd/system/docker.service  10.0.3.68:/usr/lib/systemd/system/docker.service
    [root@manager107 k8s]# scp /usr/lib/systemd/system/docker.service  10.0.3.80:/usr/lib/systemd/system/docker.service
    [root@manager107 k8s]# scp /usr/lib/systemd/system/flanneld.service  10.0.3.68:/usr/lib/systemd/system/flanneld.service
    [root@manager107 k8s]# scp /usr/lib/systemd/system/flanneld.service  10.0.3.80:/usr/lib/systemd/system/flanneld.service
    #107上启动flannel
    [root@manager107 ~]# systemctl daemon-reload
    [root@manager107 ~]# systemctl enable flanneld
    [root@manager107 ~]# systemctl start flanneld
    [root@manager107 ~]# systemctl restart docker
    #68上启动flannel
    [root@worker68 ~]# systemctl daemon-reload
    [root@worker68 ~]# systemctl enable flanneld
    [root@worker68 ~]# systemctl start flanneld
    [root@worker68 ~]# systemctl restart docker
    #80上启动flannel
    [root@worker80 ~]# systemctl daemon-reload
    [root@worker80 ~]# systemctl enable flanneld
    [root@worker80 ~]# systemctl start flanneld
    [root@worker80 ~]# systemctl restart docker
    #查看是否生效
    [root@manager107 ~]# ip add

    10、部署master节点

    kubernetes master 节点运行如下组件:

    • kube-apiserver
    • kube-scheduler
    • kube-controller-manager

    kube-scheduler 和 kube-controller-manager 可以以集群模式运行,通过 leader 选举产生一个工作进程,其它进程处于阻塞模式。

    #将二进制文件解压拷贝到master 节点
    [root@manager107 ~]# cd /home/workspace/packages/k8s1.13-centos
    [root@manager107 k8s1.13-centos]# tar -xvf kubernetes-server-linux-amd64.tar.gz
    [root@manager107 k8s1.13-centos]# cd kubernetes/server/bin/
    [root@manager107 bin]# cp kube-scheduler kube-apiserver kube-controller-manager kubectl /k8s/kubernetes/bin/
    #拷贝认证
    [root@manager107 bin]# cd /home/workspace/k8s/
    [root@manager107 k8s]# cp *pem /k8s/kubernetes/ssl/
    #部署 kube-apiserver 组件
    ##创建 TLS Bootstrapping Token
    [root@manager107 k8s]# head -c 16 /dev/urandom | od -An -t x | tr -d ' '
    e9ca0f3e1b66c9bef910b47171490c53
    [root@manager107 k8s]# vim /k8s/kubernetes/cfg/token.csv
    e9ca0f3e1b66c9bef910b47171490c53,kubelet-bootstrap,10001,"system:kubelet-bootstrap"
    ##创建apiserver配置文件
    [root@manager107 k8s]# vim /k8s/kubernetes/cfg/kube-apiserver
    KUBE_APISERVER_OPTS="--logtostderr=true 
    --v=4 
    --etcd-servers=https://10.0.3.107:2379,https://10.0.3.68:2379,https://10.0.3.80:2379 
    --bind-address=10.0.3.107 
    --secure-port=6443 
    --advertise-address=10.0.3.107 
    --allow-privileged=true 
    --service-cluster-ip-range=10.0.0.0/24 
    --enable-admission-plugins=NamespaceLifecycle,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota,NodeRestriction 
    --authorization-mode=RBAC,Node 
    --enable-bootstrap-token-auth 
    --token-auth-file=/k8s/kubernetes/cfg/token.csv 
    --service-node-port-range=30000-50000 
    --tls-cert-file=/k8s/kubernetes/ssl/server.pem  
    --tls-private-key-file=/k8s/kubernetes/ssl/server-key.pem 
    --client-ca-file=/k8s/kubernetes/ssl/ca.pem 
    --service-account-key-file=/k8s/kubernetes/ssl/ca-key.pem 
    --etcd-cafile=/k8s/etcd/ssl/ca.pem 
    --etcd-certfile=/k8s/etcd/ssl/server.pem 
    --etcd-keyfile=/k8s/etcd/ssl/server-key.pem"
    ##创建 kube-apiserver systemd unit 文件
    [root@manager107 k8s]# vim /usr/lib/systemd/system/kube-apiserver.service
    [Unit]
    Description=Kubernetes API Server
    Documentation=https://github.com/kubernetes/kubernetes
    
    [Service]
    EnvironmentFile=-/k8s/kubernetes/cfg/kube-apiserver
    ExecStart=/k8s/kubernetes/bin/kube-apiserver $KUBE_APISERVER_OPTS
    Restart=on-failure
    
    [Install]
    WantedBy=multi-user.target
    #启动服务
    [root@manager107 k8s]# systemctl daemon-reload
    [root@manager107 k8s]# systemctl enable kube-apiserver
    [root@manager107 k8s]# systemctl restart kube-apiserver
    #查看apiserver是否运行
    [root@manager107 k8s]# systemctl status kube-apiserver
    #部署kube-scheduler
    ##创建kube-scheduler配置文件
    [root@manager107 k8s]# vim  /k8s/kubernetes/cfg/kube-scheduler
    KUBE_SCHEDULER_OPTS="--logtostderr=true --v=4 --master=127.0.0.1:8080 --leader-elect"
    ##创建kube-scheduler systemd unit 文件
    [root@manager107 k8s]# vim /usr/lib/systemd/system/kube-scheduler.service
    [Unit]
    Description=Kubernetes Scheduler
    Documentation=https://github.com/kubernetes/kubernetes
    
    [Service]
    EnvironmentFile=-/k8s/kubernetes/cfg/kube-scheduler
    ExecStart=/k8s/kubernetes/bin/kube-scheduler $KUBE_SCHEDULER_OPTS
    Restart=on-failure
    
    [Install]
    WantedBy=multi-user.target
    ##启动服务
    [root@manager107 k8s]# systemctl daemon-reload
    [root@manager107 k8s]# systemctl enable kube-scheduler.service
    [root@manager107 k8s]# systemctl start kube-scheduler.service
    ##查看kube-scheduler是否运行
    [root@manager107 k8s]# systemctl status kube-scheduler.service
    #部署kube-controller-manager
    ##创建kube-controller-manager配置文件
    [root@manager107 k8s]# vim /k8s/kubernetes/cfg/kube-controller-manager
    KUBE_CONTROLLER_MANAGER_OPTS="--logtostderr=true 
    --v=4 
    --master=127.0.0.1:8080 
    --leader-elect=true 
    --address=127.0.0.1 
    --service-cluster-ip-range=10.0.0.0/24 
    --cluster-name=kubernetes 
    --cluster-signing-cert-file=/k8s/kubernetes/ssl/ca.pem 
    --cluster-signing-key-file=/k8s/kubernetes/ssl/ca-key.pem  
    --root-ca-file=/k8s/kubernetes/ssl/ca.pem 
    --service-account-private-key-file=/k8s/kubernetes/ssl/ca-key.pem"
    ##创建kube-controller-manager systemd unit 文件
    [root@manager107 k8s]# vim /usr/lib/systemd/system/kube-controller-manager.service
    [Unit]
    Description=Kubernetes Controller Manager
    Documentation=https://github.com/kubernetes/kubernetes
    
    [Service]
    EnvironmentFile=/k8s/kubernetes/cfg/kube-controller-manager
    ExecStart=/k8s/kubernetes/bin/kube-controller-manager $KUBE_CONTROLLER_MANAGER_OPTS
    Restart=on-failure
    
    [Install]
    WantedBy=multi-user.target
    ##启动服务
    [root@manager107 k8s]# systemctl daemon-reload
    [root@manager107 k8s]# systemctl enable kube-controller-manager
    [root@manager107 k8s]# systemctl start kube-controller-manager
    ##查看kube-controller-manager是否运行
    [root@manager107 k8s]# systemctl status kube-controller-manager
    #将可执行文件路/k8s/kubernetes/ 添加到 PATH 变量中
    [root@manager107 k8s]# vim /etc/profile
    PATH=/k8s/kubernetes/bin:$PATH:$HOME/bin
    [root@manager107 k8s]# source /etc/profile
    #查看master集群状态
    [root@manager107 k8s]# kubectl get cs,nodes

    11、部署node 节点

    kubernetes node 节点运行如下组件:

    • docker
    • kubelet
    • kube-proxy

    kubelet 运行在每个 worker 节点上,接收 kube-apiserver 发送的请求,管理 Pod 容器,执行交互式命令,如exec、run、logs 等;

    kubelet 启动时自动向 kube-apiserver 注册节点信息,内置的 cadvisor 统计和监控节点的资源使用情况;

    为确保安全,本文档只开启接收 https 请求的安全端口,对请求进行认证和授权,拒绝未授权的访问(如apiserver、heapster)。

    #将kubelet 二进制文件拷贝至node节点
    [root@manager107 bin]# cd /home/workspace/packages/k8s1.13-centos/kubernetes/server/bin
    [root@manager107 bin]# scp kubelet kube-proxy 10.0.3.68:/k8s/kubernetes/bin/
    [root@manager107 bin]# scp kubelet kube-proxy 10.0.3.80:/k8s/kubernetes/bin/
    #新建目录
    [root@manager107 bin]# mkdir /home/workspace/kubelet_bootstrap_config
    [root@manager107 bin]# cd /home/workspace/kubelet_bootstrap_config
    #创建 kubelet bootstrap kubeconfig 文件
    [root@manager107 kubelet_bootstrap_config]# vim environment.sh
    # 创建kubelet bootstrapping kubeconfig
    BOOTSTRAP_TOKEN=e9ca0f3e1b66c9bef910b47171490c53
    KUBE_APISERVER="https://10.0.3.107:6443"
    # 设置集群参数
    kubectl config set-cluster kubernetes 
      --certificate-authority=/home/workspace/k8s/ca.pem 
      --embed-certs=true 
      --server=${KUBE_APISERVER} 
      --kubeconfig=bootstrap.kubeconfig
    
    # 设置客户端认证参数
    kubectl config set-credentials kubelet-bootstrap 
      --token=${BOOTSTRAP_TOKEN} 
      --kubeconfig=bootstrap.kubeconfig
    
    # 设置上下文参数
    kubectl config set-context default 
      --cluster=kubernetes 
      --user=kubelet-bootstrap 
      --kubeconfig=bootstrap.kubeconfig
    
    # 设置默认上下文
    kubectl config use-context default --kubeconfig=bootstrap.kubeconfig
    
    
    
    # 创建kube-proxy kubeconfig文件
    
    kubectl config set-cluster kubernetes 
      --certificate-authority=/home/workspace/k8s/ca.pem 
      --embed-certs=true 
      --server=${KUBE_APISERVER} 
      --kubeconfig=kube-proxy.kubeconfig
    
    kubectl config set-credentials kube-proxy 
      --client-certificate=/home/workspace/k8s/kube-proxy.pem 
      --client-key=/home/workspace/k8s/kube-proxy-key.pem 
      --embed-certs=true 
      --kubeconfig=kube-proxy.kubeconfig
    
    kubectl config set-context default 
      --cluster=kubernetes 
      --user=kube-proxy 
      --kubeconfig=kube-proxy.kubeconfig
    
    kubectl config use-context default --kubeconfig=kube-proxy.kubeconfig
    [root@manager107 kubelet_bootstrap_config]# sh environment.sh
    #将bootstrap kubeconfig kube-proxy.kubeconfig 文件拷贝到所有 nodes节点
    [root@manager107 kubelet_bootstrap_config]# cp bootstrap.kubeconfig kube-proxy.kubeconfig /k8s/kubernetes/cfg/
    [root@manager107 kubelet_bootstrap_config]# scp bootstrap.kubeconfig kube-proxy.kubeconfig 10.0.3.68:/k8s/kubernetes/cfg/
    [root@manager107 kubelet_bootstrap_config]# scp bootstrap.kubeconfig kube-proxy.kubeconfig 10.0.3.80:/k8s/kubernetes/cfg/
    #107上创建 kubelet 参数配置文件
    [root@manager107 ~]# vim /k8s/kubernetes/cfg/kubelet.config
    kind: KubeletConfiguration
    apiVersion: kubelet.config.k8s.io/v1beta1
    address: 10.0.3.107
    port: 10250
    readOnlyPort: 10255
    cgroupDriver: cgroupfs
    clusterDNS: ["10.0.0.2"]
    clusterDomain: cluster.local.
    failSwapOn: false
    authentication:
      anonymous:
        enabled: true
    #107上创建kubelet配置文件
    [root@manager107 ~]# vim /k8s/kubernetes/cfg/kubelet
    KUBELET_OPTS="--logtostderr=true 
    --v=4 
    --hostname-override=10.0.3.107 
    --kubeconfig=/k8s/kubernetes/cfg/kubelet.kubeconfig 
    --bootstrap-kubeconfig=/k8s/kubernetes/cfg/bootstrap.kubeconfig 
    --config=/k8s/kubernetes/cfg/kubelet.config 
    --cert-dir=/k8s/kubernetes/ssl 
    --pod-infra-container-image=registry.cn-hangzhou.aliyuncs.com/google-containers/pause-amd64:3.0"
    #107上创建kubelet systemd unit 文件
    [root@manager107 ~]# vim /usr/lib/systemd/system/kubelet.service
    [Unit]
    Description=Kubernetes Kubelet
    After=docker.service
    Requires=docker.service
    
    [Service]
    EnvironmentFile=/k8s/kubernetes/cfg/kubelet
    ExecStart=/k8s/kubernetes/bin/kubelet $KUBELET_OPTS
    Restart=on-failure
    KillMode=process
    
    [Install]
    WantedBy=multi-user.target
    #68上创建 kubelet 参数配置文件:
    [root@worker68 ~]# vim /k8s/kubernetes/cfg/kubelet.config
    kind: KubeletConfiguration
    apiVersion: kubelet.config.k8s.io/v1beta1
    address: 10.0.3.68
    port: 10250
    readOnlyPort: 10255
    cgroupDriver: cgroupfs
    clusterDNS: ["10.0.0.2"]
    clusterDomain: cluster.local.
    failSwapOn: false
    authentication:
      anonymous:
        enabled: true
    #68上创建kubelet配置文件
    [root@worker68 ~]# vim /k8s/kubernetes/cfg/kubelet
    KUBELET_OPTS="--logtostderr=true 
    --v=4 
    --hostname-override=10.0.3.68 
    --kubeconfig=/k8s/kubernetes/cfg/kubelet.kubeconfig 
    --bootstrap-kubeconfig=/k8s/kubernetes/cfg/bootstrap.kubeconfig 
    --config=/k8s/kubernetes/cfg/kubelet.config 
    --cert-dir=/k8s/kubernetes/ssl 
    --pod-infra-container-image=registry.cn-hangzhou.aliyuncs.com/google-containers/pause-amd64:3.0"
    #68上创建kubelet systemd unit 文件
    [root@worker68 ~]# vim /usr/lib/systemd/system/kubelet.service 
    [Unit]
    Description=Kubernetes Kubelet
    After=docker.service
    Requires=docker.service
    
    [Service]
    EnvironmentFile=/k8s/kubernetes/cfg/kubelet
    ExecStart=/k8s/kubernetes/bin/kubelet $KUBELET_OPTS
    Restart=on-failure
    KillMode=process
    
    [Install]
    WantedBy=multi-user.target
    #80上创建 kubelet 参数配置文件
    [root@worker80 ~]# vim /k8s/kubernetes/cfg/kubelet.config
    kind: KubeletConfiguration
    apiVersion: kubelet.config.k8s.io/v1beta1
    address: 10.0.3.80
    port: 10250
    readOnlyPort: 10255
    cgroupDriver: cgroupfs
    clusterDNS: ["10.0.0.2"]
    clusterDomain: cluster.local.
    failSwapOn: false
    authentication:
      anonymous:
        enabled: true
    #80上创建kubelet配置文件
    [root@worker80 ~]# vim /k8s/kubernetes/cfg/kubelet
    KUBELET_OPTS="--logtostderr=true 
    --v=4 
    --hostname-override=10.0.3.80 
    --kubeconfig=/k8s/kubernetes/cfg/kubelet.kubeconfig 
    --bootstrap-kubeconfig=/k8s/kubernetes/cfg/bootstrap.kubeconfig 
    --config=/k8s/kubernetes/cfg/kubelet.config 
    --cert-dir=/k8s/kubernetes/ssl 
    --pod-infra-container-image=registry.cn-hangzhou.aliyuncs.com/google-containers/pause-amd64:3.0"
    #80上创建kubelet systemd unit 文件
    [root@worker80 ~]# vim /usr/lib/systemd/system/kubelet.service 
    [Unit]
    Description=Kubernetes Kubelet
    After=docker.service
    Requires=docker.service
    
    [Service]
    EnvironmentFile=/k8s/kubernetes/cfg/kubelet
    ExecStart=/k8s/kubernetes/bin/kubelet $KUBELET_OPTS
    Restart=on-failure
    KillMode=process
    
    [Install]
    WantedBy=multi-user.target
    #将kubelet-bootstrap用户绑定到系统集群角色
    [root@manager107 ~]# kubectl create clusterrolebinding kubelet-bootstrap 
    --clusterrole=system:node-bootstrapper 
    --user=kubelet-bootstrap
    #107上启动服务
    [root@manager107 ~]# systemctl daemon-reload
    [root@manager107 ~]# systemctl enable kubelet
    [root@manager107 ~]# systemctl start kubelet
    #68上启动服务
    [root@worker68 ~]# systemctl daemon-reload
    [root@worker68 ~]# systemctl enable kubelet
    [root@worker68 ~]# systemctl start kubelet
    #80上启动服务
    [root@worker80 ~]# systemctl daemon-reload
    [root@worker80 ~]# systemctl enable kubelet
    [root@worker80 ~]# systemctl start kubelet

    kubelet 首次启动时向 kube-apiserver 发送证书签名请求,必须通过后 kubernetes 系统才会将该 Node 加入到集群。

    #查看未授权的CSR请求
    [root@manager107 ~]# kubectl get csr
    #通过CSR请求
    [root@manager107 ~]# kubectl certificate approve 节点名
    #查看集群状态
    [root@manager107 ~]# kubectl get nodes
    NAME              STATUS     ROLES     AGE      VERSION
    node/10.0.3.107   Ready      master    20h      v1.13.0
    node/10.0.3.68    Ready      node      20h      v1.13.0
    node/10.0.3.80    Ready      node      20h      v1.13.0
    #部署 kube-proxy 组件
    #kube-proxy 运行在所有 node节点上,它监听 apiserver 中 service 和 Endpoint 的变化情况,创建路由规则来进行服务负载均衡。
    #107上创建 kube-proxy 配置文件
    [root@manager107 ~]# vim /k8s/kubernetes/cfg/kube-proxy
    KUBE_PROXY_OPTS="--logtostderr=true 
    --v=4 
    --hostname-override=10.0.3.107 
    --cluster-cidr=10.0.0.0/24 
    --kubeconfig=/k8s/kubernetes/cfg/kube-proxy.kubeconfig"
    #107上创建kube-proxy systemd unit 文件
    [root@manager107 ~]# vim /usr/lib/systemd/system/kube-proxy.service 
    [Unit]
    Description=Kubernetes Proxy
    After=network.target
    
    [Service]
    EnvironmentFile=/k8s/kubernetes/cfg/kube-proxy
    ExecStart=/k8s/kubernetes/bin/kube-proxy $KUBE_PROXY_OPTS
    Restart=on-failure
    
    [Install]
    WantedBy=multi-user.target
    #68上创建 kube-proxy 配置文件
    [root@worker68 ~]# vim /k8s/kubernetes/cfg/kube-proxy
    KUBE_PROXY_OPTS="--logtostderr=true 
    --v=4 
    --hostname-override=10.0.3.68 
    --cluster-cidr=10.0.0.0/24 
    --kubeconfig=/k8s/kubernetes/cfg/kube-proxy.kubeconfig"
    #68上创建kube-proxy systemd unit 文件
    [root@worker68 ~]# vim /usr/lib/systemd/system/kube-proxy.service 
    [Unit]
    Description=Kubernetes Proxy
    After=network.target
    
    [Service]
    EnvironmentFile=/k8s/kubernetes/cfg/kube-proxy
    ExecStart=/k8s/kubernetes/bin/kube-proxy $KUBE_PROXY_OPTS
    Restart=on-failure
    
    [Install]
    WantedBy=multi-user.target
    #80上创建 kube-proxy 配置文件
    [root@worker80 ~]# vim /k8s/kubernetes/cfg/kube-proxy
    KUBE_PROXY_OPTS="--logtostderr=true 
    --v=4 
    --hostname-override=10.0.3.80 
    --cluster-cidr=10.0.0.0/24 
    --kubeconfig=/k8s/kubernetes/cfg/kube-proxy.kubeconfig"
    #80上创建kube-proxy systemd unit 文件
    [root@worker80 ~]# vim /usr/lib/systemd/system/kube-proxy.service
    [Unit]
    Description=Kubernetes Proxy
    After=network.target
    
    [Service]
    EnvironmentFile=/k8s/kubernetes/cfg/kube-proxy
    ExecStart=/k8s/kubernetes/bin/kube-proxy $KUBE_PROXY_OPTS
    Restart=on-failure
    
    [Install]
    WantedBy=multi-user.target
    #107上启动服务
    [root@manager107 ~]# systemctl daemon-reload
    [root@manager107 ~]# systemctl enable kube-proxy
    [root@manager107 ~]# systemctl start kube-proxy
    #68上启动服务
    [root@worker68 ~]# systemctl daemon-reload
    [root@worker68 ~]# systemctl enable kube-proxy
    [root@worker68 ~]# systemctl start kube-proxy
    #80上启动服务
    [root@worker80 ~]# systemctl daemon-reload
    [root@worker80 ~]# systemctl enable kube-proxy
    [root@worker80 ~]# systemctl start kube-proxy
    #给node和master节点打标签
    [root@manager107 ~]# kubectl label node 10.0.3.107  node-role.kubernetes.io/master='master'
    [root@manager107 ~]# kubectl label node 10.0.3.68  node-role.kubernetes.io/node='node'
    [root@manager107 ~]# kubectl label node 10.0.3.80  node-role.kubernetes.io/node='node'
    #查看集群状态
    [root@manager107 ~]# kubectl get node,cs
    NAME              STATUS   ROLES    AGE   VERSION
    node/10.0.3.107   Ready    master   21h   v1.13.0
    node/10.0.3.68    Ready    node     21h   v1.13.0
    node/10.0.3.80    Ready    node     21h   v1.13.0
    
    NAME                                 STATUS    MESSAGE             ERROR
    componentstatus/scheduler            Healthy   ok
    componentstatus/controller-manager   Healthy   ok
    componentstatus/etcd-1               Healthy   {"health":"true"}
    componentstatus/etcd-0               Healthy   {"health":"true"}
    componentstatus/etcd-2               Healthy   {"health":"true"}

    四、参考

    https://www.kubernetes.org.cn/4963.html

  • 相关阅读:
    PAT 1008--------数组元素的循环右移,你需要记住的
    PAT1049-----枚举法,找规律题,注意降低时间复杂度
    PAT1048----你需要了解并记住的解题思路
    C++中几个输入函数的用法和区别(cin、cin.get()、cin.getline()、getline()、gets()、getchar()))
    PAT1040----关于数学题目的解法新思路值得借鉴,字符的配对
    PAT1029-----介绍字符串的解题思路和部分知识点
    PAT1027-----等差数列的问题或数学问题
    PAT1026----四舍五入的思路,%2d的一些知识
    / 已阅 /PAT1017-------高精度计算,问题的所有可能情况
    LeetCode 无重复字符的最长子串
  • 原文地址:https://www.cnblogs.com/dowi/p/11009611.html
Copyright © 2011-2022 走看看