zoukankan      html  css  js  c++  java
  • Kubernetes系列三:二进制安装Kubernetes环境

    安装环境:

    # 三个节点信息
    192.168.31.11  主机名:env11  角色:部署Master节点/Node节点/ETCD节点
    192.168.31.12  主机名:env12  角色:部署Node节点/ETCD节点
    192.168.31.13  主机名:env13  角色:部署Node节点/ETCD节点
    # 操作系统版本信息
    CentOS Linux release 7.4.1708 (Core)
    # 关闭每个节点的firewall和selinux
    systemctl stop firewall; systemctl disable firewall
    setenforce 0; sed -i 's/SELINUX=enforcing/SELINUX=disabled/' /etc/sysconfig/selinux

    1、环境初始化

    环境初始化操作,三个节点上操作一致。

    # 添加国内Docker源
    cd /etc/yum.repos.d/
    wget https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
    # 安装Docker
    yum install -y docker-ce
    # 启动Docker并设置开机自启
    systemctl start docker; systemctl enable docker
    # 准备部署目录
    mkdir -p /opt/kubernetes/{cfg,bin,ssl,log}
    # 百度网盘地址
    链接:https://pan.baidu.com/s/1PrunUVHfwL97jjc4PqL_Bw 
    提取码:4oeg 
    # 解压后复制文件到指定目录
    把 k8s-v1.10.3/bin/、etcd-v3.3.18-linux-amd64、flannel-v0.10.0-linux-amd64目录下的文件复制到/opt/kubernetes/bin目录下
    把 cni-plugins-amd64-v0.8.4目录复制到/opt/kubernetes/bin目录下并重命名为cni
    # 添加环境变量
    vim .bash_profile
    PATH=$PATH:$HOME/bin:/opt/kubernetes/bin
    source .bash_profile

    2、创建CA证书

    从k8s的1.8版本开始,K8S系统各组件需要使用TLS证书对通信进行加密。每一个K8S集群都需要独立的CA证书体系。CA证书有以下三种:easyrsa、openssl、cfssl。这里使用cfssl证书,也是目前使用最多的,相对来说配置简单一些,通过json的格式,把证书相关的东西配置进去即可。这里使用cfssl的版本为1.2版本。

    2.1、安装CFSSL

    cfssl官方地址:http://pkg.cfssl.org,从这个地址可以下载最新版本cfssl。

    wget https://pkg.cfssl.org/R1.2/cfssl_linux-amd64
    wget https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64
    wget https://pkg.cfssl.org/R1.2/cfssl-certinfo_linux-amd64
    chmod +x cfssl_linux-amd64 cfssljson_linux-amd64 cfssl-certinfo_linux-amd64
    mv cfssl_linux-amd64 /usr/local/bin/cfssl
    mv cfssljson_linux-amd64 /usr/local/bin/cfssljson
    mv cfssl-certinfo_linux-amd64 /usr/bin/cfssl-certinfo

    2.2、创建用来生成CA文件的JSON配置文件

    # 创建临时证书存放目录
    [root@env11 ssl]# mkdir -p /opt/src/ssl
    [root@env11 ssl]# cd /opt/src/ssl
    [root@env11 ssl]# vim ca-config.json
    {
      "signing": {
        "default": {
          "expiry": "8760h"
        },
        "profiles": {
          "kubernetes": {
            "usages": [
                "signing",
                "key encipherment",
                "server auth",
                "client auth"
            ],
            "expiry": "8760h"
          }
        }
      }
    }

    2.3、创建用来生成CA证书签名请求(CSR)的JSON配置文件

    [root@env11 ssl]# cd /opt/src/ssl
    [root@env11 ssl]# vim ca-csr.json
    {
      "CN": "kubernetes",
      "key": {
        "algo": "rsa",
        "size": 2048
      },
      "names": [
        {
          "C": "CN",
          "ST": "BeiJing",
          "L": "BeiJing",
          "O": "k8s",
          "OU": "System"
        }
      ]
    }

    2.4、生成CA证书(ca.pem)和密钥(ca-key.pem)

    [root@env11 ssl]# cfssl gencert -initca ca-csr.json | cfssljson -bare ca
    [root@env11 ssl]# ls -l ca*
    -rw-r--r--. 1 root root  290 Apr 14 19:00 ca-config.json
    -rw-r--r--. 1 root root  208 Apr 14 19:00 ca-csr.json
    -rw-------. 1 root root 1679 Apr 14 12:06 ca-key.pem
    -rw-r--r--. 1 root root 1001 Apr 14 12:06 ca.csr
    -rw-r--r--. 1 root root 1359 Apr 14 12:06 ca.pem

    2.5、分发证书

    拷贝生成的文件ca.csr,ca.pem,ca-key.pem,ca-config.json到三个节点的/opt/kubernetes/ssl目录下

    3、ETCD集群部署

    所有持久化的状态信息以KV的形式存储在ETCD中。类似zookeeper,提供分布式协调服务。之所以说kubenetes各个组件是无状态的,就是因为其中把数据都存放在ETCD中。由于ETCD支持集群,这里在三台主机上都部署上ETCD。

    3.1、创建etcd证书签名请求

    [root@env11 ssl]# cd /usr/src/ssl
    [root@env11 ssl]# vim etcd-csr.json
    {
      "CN": "etcd",
      "hosts": [         # 此处的IP就是ETCD各个集群的IP
        "127.0.0.1",
        "192.168.31.11",
        "192.168.31.12",
        "192.168.31.13"
      ],
      "key": {
        "algo": "rsa",
        "size": 2048
      },
      "names": [
        {
          "C": "CN",
          "ST": "BeiJing",
          "L": "BeiJing",
          "O": "k8s",
          "OU": "System"
        }
      ]
    }

    生成etcd证书和私钥

    cfssl gencert -ca=/opt/src/ssl/ca.pem 
      -ca-key=/opt/src/ssl/ca-key.pem 
      -config=/opt/src/ssl/ca-config.json 
      -profile=kubernetes etcd-csr.json | cfssljson -bare etcd
    [root@env11 ssl]# ls -l etcd*
    -rw-r--r--. 1 root root  299 Apr 14 19:06 etcd-csr.json
    -rw-------. 1 root root 1675 Apr 14 12:08 etcd-key.pem
    -rw-r--r--. 1 root root 1062 Apr 14 12:08 etcd.csr
    -rw-r--r--. 1 root root 1436 Apr 14 12:08 etcd.pem

    3.2、分发证书

    拷贝生成的文件etcd.pem,etcd-key.pem到三个节点的/opt/kubernetes/ssl目录下

    3.3、配置etcd配置文件

    使用2379端口用于外部通信,2380用于内部通信

    [root@env11 ~]# vim /opt/kubernetes/cfg/etcd.conf
    #[member]
    ETCD_NAME="etcd-node1"
    ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
    #ETCD_SNAPSHOT_COUNTER="10000"
    #ETCD_HEARTBEAT_INTERVAL="100"
    #ETCD_ELECTION_TIMEOUT="1000"
    ETCD_LISTEN_PEER_URLS="https://192.168.31.11:2380"
    ETCD_LISTEN_CLIENT_URLS="https://192.168.31.11:2379,https://127.0.0.1:2379"
    #ETCD_MAX_SNAPSHOTS="5"
    #ETCD_MAX_WALS="5"
    #ETCD_CORS=""
    #[cluster]
    ETCD_INITIAL_ADVERTISE_PEER_URLS="https://192.168.31.11:2380"
    # if you use different ETCD_NAME (e.g. test),
    # set ETCD_INITIAL_CLUSTER value for this name, i.e. "test=http://..."
    ETCD_INITIAL_CLUSTER="etcd-node1=https://192.168.31.11:2380,etcd-node2=https://192.168.31.12:2380,etcd-node3=https://192.168.31.13:2380"
    ETCD_INITIAL_CLUSTER_STATE="new"
    ETCD_INITIAL_CLUSTER_TOKEN="k8s-etcd-cluster"
    ETCD_ADVERTISE_CLIENT_URLS="https://192.168.31.11:2379"
    #[security]
    CLIENT_CERT_AUTH="true"
    ETCD_CA_FILE="/opt/kubernetes/ssl/ca.pem"
    ETCD_CERT_FILE="/opt/kubernetes/ssl/etcd.pem"
    ETCD_KEY_FILE="/opt/kubernetes/ssl/etcd-key.pem"
    PEER_CLIENT_CERT_AUTH="true"
    ETCD_PEER_CA_FILE="/opt/kubernetes/ssl/ca.pem"
    ETCD_PEER_CERT_FILE="/opt/kubernetes/ssl/etcd.pem"
    ETCD_PEER_KEY_FILE="/opt/kubernetes/ssl/etcd-key.pem"

    3.4、创建etcd系统服务

    [root@env11 ~]# vim /etc/systemd/system/etcd.service
    [Unit]
    Description=Etcd Server
    After=network.target
     
    [Service]
    Type=simple
    WorkingDirectory=/var/lib/etcd
    EnvironmentFile=-/opt/kubernetes/cfg/etcd.conf
    # set GOMAXPROCS to number of processors
    ExecStart=/bin/bash -c "GOMAXPROCS=$(nproc) /opt/kubernetes/bin/etcd"
    Type=notify
     
    [Install]
    WantedBy=multi-user.target

    3.5、分发配置文件

    拷贝/opt/kubernetes/cfg/etcd.conf,/etc/systemd/system/etcd.service到三个机器对应目录下,并加载配置设置etcd服务开机自启。

    # 三节点需要都操作
    [root@env11 ~]# systemctl daemon-reload
    [root@env11 ~]# systemctl enable etcd

    复制过去的配置文件/opt/kubernetes/cfg/etcd.conf需要根据节点修改部分配置

    # env12节点需要修改的地方
    ETCD_NAME="etcd-node2"
    ETCD_LISTEN_PEER_URLS="https://192.168.31.12:2380"
    ETCD_LISTEN_CLIENT_URLS="https://192.168.31.12:2379,https://127.0.0.1:2379"
    ETCD_INITIAL_ADVERTISE_PEER_URLS="https://192.168.31.12:2380"
    ETCD_ADVERTISE_CLIENT_URLS="https://192.168.31.12:2379"
    # env13节点需要修改的地方
    ETCD_NAME="etcd-node3"
    ETCD_LISTEN_PEER_URLS="https://192.168.31.13:2380"
    ETCD_LISTEN_CLIENT_URLS="https://192.168.31.13:2379,https://127.0.0.1:2379"
    ETCD_INITIAL_ADVERTISE_PEER_URLS="https://192.168.31.13:2380"
    ETCD_ADVERTISE_CLIENT_URLS="https://192.168.31.13:2379"

    3.6、创建etcd的数据存储目录并在三个节点启动etcd

    # 三个节点都需要操作
    [root@env11 ~]# mkdir /var/lib/etcd/
    [root@env11 ~]# systemctl start etcd
    [root@env11 ~]# netstat -tunlp | grep etcd
    tcp        0      0 192.168.31.11:2379      0.0.0.0:*               LISTEN      6002/etcd          
    tcp        0      0 127.0.0.1:2379          0.0.0.0:*               LISTEN      6002/etcd          
    tcp        0      0 192.168.31.11:2380      0.0.0.0:*               LISTEN      6002/etcd

    3.7、验证etcd集群

    [root@env11 ~]# etcdctl --endpoints=https://192.168.31.11:2379 
    --ca-file=/opt/kubernetes/ssl/ca.pem 
    --cert-file=/opt/kubernetes/ssl/etcd.pem 
    --key-file=/opt/kubernetes/ssl/etcd-key.pem cluster-health
    member f35b781c28383ca is healthy: got healthy result from https://192.168.31.11:2379
    member 18320ec3b6a86db4 is healthy: got healthy result from https://192.168.31.12:2379
    member 5f87ed09e484b6b3 is healthy: got healthy result from https://192.168.31.13:2379

    4、Master节点部署

    4.1、创建生成CSR的JSON配置文件

    [root@env11 ~]# cd /opt/src/ssl/
    [root@env11 ssl]# vim kubernetes-csr.json
    {
      "CN": "kubernetes",
      "hosts": [
        "127.0.0.1",
        "192.168.31.11",     # master的IP地址
        "10.1.0.1",
        "kubernetes",
        "kubernetes.default",
        "kubernetes.default.svc",
        "kubernetes.default.svc.cluster",
        "kubernetes.default.svc.cluster.local"
      ],
      "key": {
        "algo": "rsa",
        "size": 2048
      },
      "names": [
        {
          "C": "CN",
          "ST": "BeiJing",
          "L": "BeiJing",
          "O": "k8s",
          "OU": "System"
        }
      ]
    }

    4.2、生成Kubernetes证书和私钥

    cfssl gencert -ca=/opt/src/ssl/ca.pem 
       -ca-key=/opt/src/ssl/ca-key.pem 
       -config=/opt/src/ssl/ca-config.json 
       -profile=kubernetes kubernetes-csr.json | cfssljson -bare kubernetes

    拷贝生成的文件kubernetes-key.pem,kubernetes.pem到三个机器的/opt/kubernetes/ssl目录下

    4.3、创建kube-apiserver使用的客户端token文件

    [root@kenv11 ssl]# vim /opt/kubernetes/ssl/bootstrap-token.csv
    bceaefa5f8d569895071fee2f77b5d3e,kubelet-bootstrap,10001,"system:kubelet-bootstrap"

    4.4、创建基础用户名、密码认证配置

    [root@env11 ssl]# vim /opt/kubernetes/ssl/basic-auth.csv
    admin,admin,1
    readonly,readonly,2

    4.5、部署kubernetes  APIServer系统服务

    [root@env11 ssl]# vim /usr/lib/systemd/system/kube-apiserver.service
    [Unit]
    Description=Kubernetes API Server
    Documentation=https://github.com/GoogleCloudPlatform/kubernetes
    After=network.target
     
    [Service]
    ExecStart=/opt/kubernetes/bin/kube-apiserver 
      --admission-control=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,ResourceQuota,NodeRestriction 
      --bind-address=192.168.31.11 
      --insecure-bind-address=127.0.0.1 
      --authorization-mode=Node,RBAC 
      --runtime-config=rbac.authorization.k8s.io/v1 
      --kubelet-https=true 
      --anonymous-auth=false 
      --basic-auth-file=/opt/kubernetes/ssl/basic-auth.csv 
      --enable-bootstrap-token-auth 
      --token-auth-file=/opt/kubernetes/ssl/bootstrap-token.csv 
      --service-cluster-ip-range=10.1.0.0/16 
      --service-node-port-range=20000-40000 
      --tls-cert-file=/opt/kubernetes/ssl/kubernetes.pem 
      --tls-private-key-file=/opt/kubernetes/ssl/kubernetes-key.pem 
      --client-ca-file=/opt/kubernetes/ssl/ca.pem 
      --service-account-key-file=/opt/kubernetes/ssl/ca-key.pem 
      --etcd-cafile=/opt/kubernetes/ssl/ca.pem 
      --etcd-certfile=/opt/kubernetes/ssl/kubernetes.pem 
      --etcd-keyfile=/opt/kubernetes/ssl/kubernetes-key.pem 
      --etcd-servers=https://192.168.31.11:2379,https://192.168.31.12:2379,https://192.168.31.13:2379 
      --enable-swagger-ui=true 
      --allow-privileged=true 
      --audit-log-maxage=30 
      --audit-log-maxbackup=3 
      --audit-log-maxsize=100 
      --audit-log-path=/opt/kubernetes/log/api-audit.log 
      --event-ttl=1h 
      --v=2 
      --logtostderr=false 
      --log-dir=/opt/kubernetes/log
    Restart=on-failure
    RestartSec=5
    Type=notify
    LimitNOFILE=65536
     
    [Install]
    WantedBy=multi-user.target

    4.6、启动并设置API Server开机自启

    [root@env11 ssl]# systemctl daemon-reload
    [root@env11 ssl]# systemctl enable kube-apiserver; systemctl start kube-apiserver
    [root@env11 ssl]# netstat -tunlp | grep kube-apiserver
    tcp        0      0 192.168.31.11:6443      0.0.0.0:*               LISTEN      6008/kube-apiserver
    tcp        0      0 127.0.0.1:8080          0.0.0.0:*               LISTEN      6008/kube-apiserver

    从监听端口可以看到apiServer在监听6443端口,同时也监听本地的8080端口,是提供给kube-scheduler和kube-controllerManager使用。

    4.7、部署kubernetes  ControllerManager系统服务

    ControllerManager由一系列的控制器组成,通过apiServer监控整个集群的状态,并保证集群处于预期的工作状态。

    [root@env11 ssl]# vim /usr/lib/systemd/system/kube-controller-manager.service
    [Unit]
    Description=Kubernetes Controller Manager
    Documentation=https://github.com/GoogleCloudPlatform/kubernetes
     
    [Service]
    ExecStart=/opt/kubernetes/bin/kube-controller-manager 
      --address=127.0.0.1 
      --master=http://127.0.0.1:8080 
      --allocate-node-cidrs=true 
      --service-cluster-ip-range=10.1.0.0/16 
      --cluster-cidr=10.2.0.0/16 
      --cluster-name=kubernetes 
      --cluster-signing-cert-file=/opt/kubernetes/ssl/ca.pem 
      --cluster-signing-key-file=/opt/kubernetes/ssl/ca-key.pem 
      --service-account-private-key-file=/opt/kubernetes/ssl/ca-key.pem 
      --root-ca-file=/opt/kubernetes/ssl/ca.pem 
      --leader-elect=true 
      --v=2 
      --logtostderr=false 
      --log-dir=/opt/kubernetes/log
     
    Restart=on-failure
    RestartSec=5
     
    [Install]
    WantedBy=multi-user.target

    4.8、启动并设置ControllerManager开机自启

    [root@env11 ssl]# systemctl daemon-reload
    [root@env11 ssl]# systemctl enable  kube-controller-manager; systemctl start  kube-controller-manager
    [root@env11 ssl]# netstat -tunlp | grep kube-controll 
    tcp        0      0 127.0.0.1:10252         0.0.0.0:*               LISTEN      12112/kube-controll

    4.9、部署kubernetes  Scheduler系统服务

    Scheduler负责分配调度Pod到集群内的node节点;监听kube-apiServer查询还未分配Node的Pod;根据调度策略为这些Pod分配节点。

    [root@env11 ssl]# vim /usr/lib/systemd/system/kube-scheduler.service
    [Unit]
    Description=Kubernetes Scheduler
    Documentation=https://github.com/GoogleCloudPlatform/kubernetes
     
    [Service]
    ExecStart=/opt/kubernetes/bin/kube-scheduler 
      --address=127.0.0.1 
      --master=http://127.0.0.1:8080 
      --leader-elect=true 
      --v=2 
      --logtostderr=false 
      --log-dir=/opt/kubernetes/log
     
    Restart=on-failure
    RestartSec=5
     
    [Install]
    WantedBy=multi-user.target

    4.10、启动并设置Scheduler开机自启

    [root@env11 ssl]# systemctl daemon-reload
    [root@env11 ssl]# systemctl enable  kube-scheduler; systemctl start  kube-scheduler
    [root@env11 ssl]# netstat -tunlp | grep kube-scheduler
    tcp        0      0 127.0.0.1:10251         0.0.0.0:*               LISTEN      5662/kube-scheduler

    4.11、部署kubectl命令行工具

    准备二进制命令包,创建admin证书请求。

    [root@env11 ssl]# vim admin-csr.json
    {
      "CN": "admin",
      "hosts": [],
      "key": {
        "algo": "rsa",
        "size": 2048
      },
      "names": [
        {
          "C": "CN",
          "ST": "BeiJing",
          "L": "BeiJing",
          "O": "system:masters",
          "OU": "System"
        }
      ]
    }

    生成admin证书和私钥

    cfssl gencert -ca=/opt/src/ssl/ca.pem 
       -ca-key=/opt/src/ssl/ca-key.pem 
       -config=/opt/src/ssl/ca-config.json 
       -profile=kubernetes admin-csr.json | cfssljson -bare admin
    [root@env11 ssl]# ls -l admin*
    -rw-r--r--. 1 root root  229 Apr 14 15:32 admin-csr.json
    -rw-------. 1 root root 1679 Apr 14 15:33 admin-key.pem
    -rw-r--r--. 1 root root 1009 Apr 14 15:33 admin.csr
    -rw-r--r--. 1 root root 1399 Apr 14 15:33 admin.pem
    # 复制证书到本地目录
    cp admin*.pem /opt/kubernetes/ssl/

    向kubeconfig配置文件添加集群

    kubectl config set-cluster kubernetes 
       --certificate-authority=/opt/kubernetes/ssl/ca.pem 
       --embed-certs=true 
       --server=https://192.168.31.11:6443

    向kubeconfig配置文件添加用户

    kubectl config set-credentials admin 
       --client-certificate=/opt/kubernetes/ssl/admin.pem 
       --embed-certs=true 
       --client-key=/opt/kubernetes/ssl/admin-key.pem

    向kubeconfig配置文件添加context

    kubectl config set-context kubernetes 
       --cluster=kubernetes 
       --user=admin

    设置默认上下文context

    kubectl config use-context kubernetes

    测试是否可以获取信息

    [root@env11 ssl]# kubectl get cs
    NAME                 STATUS    MESSAGE              ERROR
    scheduler            Healthy   ok                  
    etcd-1               Healthy   {"health": "true"}  
    etcd-0               Healthy   {"health": "true"}  
    controller-manager   Healthy   ok                   
    etcd-2               Healthy   {"health": "true"}

    5、Node节点部署(三个节点都是node节点)

    5.1、部署kubelet

    创建角色绑定,kubelet启动时会向kube-apiServer发送tsl bootstrap请求,所以需要将bootstrap的token设置成对应的角色,这样kubelet才有权限创建该请求。

    [root@env11]# kubectl create clusterrolebinding kubelet-bootstrap --clusterrole=system:node-bootstrapper --user=kubelet-bootstrap

    创建kubelet bootstrapping kubeconfig文件 设置集群参数

    cd /opt/src/ssl
    kubectl config set-cluster kubernetes 
       --certificate-authority=/opt/kubernetes/ssl/ca.pem 
       --embed-certs=true 
       --server=https://192.168.31.11:6443 
       --kubeconfig=bootstrap.kubeconfig

    设置客户端认证参数

    kubectl config set-credentials kubelet-bootstrap 
       --token=ad6d5bb607a186796d8861557df0d17f 
       --kubeconfig=bootstrap.kubeconfig

    设置上下文

    kubectl config set-context default 
       --cluster=kubernetes 
       --user=kubelet-bootstrap 
       --kubeconfig=bootstrap.kubeconfig

    选择默认上下文

    kubectl config use-context default --kubeconfig=bootstrap.kubeconfig

    分发bootstrap.kubeconfig 到三节点/opt/kubernetes/cfg

    5.2、部署kubelet设置CNI支持

    配置CNI使用flannel作为后端插件。

    [root@env11 ~]# mkdir -p /etc/cni/net.d
    [root@env11 ~]# vim /etc/cni/net.d/10-default.conf
    {
            "name": "flannel",
            "type": "flannel",
            "delegate": {
                "bridge": "docker0",
                "isDefaultGateway": true,
                "mtu": 1400
            }
    }

    分发配置/etc/cni/net.d/10-default.conf到三节点相同目录下。三个节点都创建kubelet数据存储目录

    [root@env11 ~]# mkdir -p /var/lib/kubelet/

    创建kubelet系统服务

    [root@env11 ~]# vim /usr/lib/systemd/system/kubelet.service
    [Unit]
    Description=Kubernetes Kubelet
    Documentation=https://github.com/GoogleCloudPlatform/kubernetes
    After=docker.service
    Requires=docker.service
     
    [Service]
    WorkingDirectory=/var/lib/kubelet
    ExecStart=/opt/kubernetes/bin/kubelet 
      --address=192.168.31.11 
      --hostname-override=192.168.31.11 
      --pod-infra-container-image=mirrorgooglecontainers/pause-amd64:3.0 
      --experimental-bootstrap-kubeconfig=/opt/kubernetes/cfg/bootstrap.kubeconfig 
      --kubeconfig=/opt/kubernetes/cfg/kubelet.kubeconfig 
      --cert-dir=/opt/kubernetes/ssl 
      --network-plugin=cni 
      --cni-conf-dir=/etc/cni/net.d 
      --cni-bin-dir=/opt/kubernetes/bin/cni 
      --cluster-dns=10.1.0.2 
      --cluster-domain=cluster.local. 
      --hairpin-mode hairpin-veth 
      --allow-privileged=true 
      --fail-swap-on=false 
      --logtostderr=true 
      --v=2 
      --logtostderr=false 
      --log-dir=/opt/kubernetes/log
    Restart=on-failure
    RestartSec=5
     
    [Install]
    WantedBy=multi-user.target

    分发/usr/lib/systemd/system/kubelet.service到另外两个节点下,并且修改相应的配置为对应节点的IP地址,启动并设置kubelet开机自启。

    systemctl daemon-reload
    systemctl enable kubelet; systemctl start kubelet

    在env11节点上查看csr请求

    [root@env11 ssl]# kubectl get csr
    NAME                                                   AGE       REQUESTOR           CONDITION
    node-csr-GefhvoMO3iYaqsn-6PokIw0iC7n-TLH3MFezRD_cZPg   21s       kubelet-bootstrap   Pending
    node-csr-_QoxwxQe13lLzZ0VgSh7P604iXeve0X6EaGB-rxFj9Q   40s       kubelet-bootstrap   Pending

    批准kubelet的TLS证书请求

    kubectl get csr|grep 'Pending' | awk 'NR>0{print $1}'| xargs kubectl certificate approve

    执行完毕后,在env11节点查看节点状态已经是Ready状态了

    [root@env11 ssl]# kubectl get node
    NAME            STATUS    ROLES     AGE       VERSION
    192.168.31.12   Ready     <none>    62d       v1.10.3
    192.168.31.13   Ready     <none>    62d       v1.10.3

    5.3、部署kube-proxy

    配置kube-proyx使用LVS,三个节点上都安装。

    yum install -y ipvsadm ipset conntrack

    创建kube-proxy证书请求

    [root@env11 ssl]# cd /opt/src/ssl/
    [root@env11 ssl]# vim kube-proxy-csr.json
    {
      "CN": "system:kube-proxy",
      "hosts": [],
      "key": {
        "algo": "rsa",
        "size": 2048
      },
      "names": [
        {
          "C": "CN",
          "ST": "BeiJing",
          "L": "BeiJing",
          "O": "k8s",
          "OU": "System"
        }
      ]
    }

    生成证书

    cfssl gencert -ca=/opt/src/ssl/ca.pem 
       -ca-key=/opt/src/ssl/ca-key.pem 
       -config=/opt/src/ssl/ca-config.json 
       -profile=kubernetes  kube-proxy-csr.json | cfssljson -bare kube-proxy

    分发证书kube-proxy.csr,kube-proxy.pem,kube-proxy-key.pem到所有Node节点

    创建kube-proxy配置文件

    kubectl config set-cluster kubernetes 
       --certificate-authority=/opt/kubernetes/ssl/ca.pem 
       --embed-certs=true 
       --server=https://192.168.31.11:6443 
       --kubeconfig=kube-proxy.kubeconfig
      
       kubectl config set-credentials kube-proxy 
       --client-certificate=/opt/kubernetes/ssl/kube-proxy.pem 
       --client-key=/opt/kubernetes/ssl/kube-proxy-key.pem 
       --embed-certs=true 
       --kubeconfig=kube-proxy.kubeconfig
      
        kubectl config set-context default 
       --cluster=kubernetes 
       --user=kube-proxy 
       --kubeconfig=kube-proxy.kubeconfig
      
        kubectl config use-context default --kubeconfig=kube-proxy.kubeconfig

    分发kube-proxy.kubeconfig 到三节点/opt/kubernetes/cfg/

    创建kube-proxy系统服务

    # 创建数据存储目录
    mkdir /var/lib/kube-proxy
    #
    [root@env11 ssl]# vim /usr/lib/systemd/system/kube-proxy.service
    [Unit]
    Description=Kubernetes Kube-Proxy Server
    Documentation=https://github.com/GoogleCloudPlatform/kubernetes
    After=network.target
     
    [Service]
    WorkingDirectory=/var/lib/kube-proxy
    ExecStart=/opt/kubernetes/bin/kube-proxy 
      --bind-address=192.168.31.11 
      --hostname-override=192.168.31.11 
      --kubeconfig=/opt/kubernetes/cfg/kube-proxy.kubeconfig 
      --masquerade-all 
      --feature-gates=SupportIPVSProxyMode=true 
      --proxy-mode=ipvs 
      --ipvs-min-sync-period=5s 
      --ipvs-sync-period=5s 
      --ipvs-scheduler=rr 
      --logtostderr=true 
      --v=2 
      --logtostderr=false 
      --log-dir=/opt/kubernetes/log
     
    Restart=on-failure
    RestartSec=5
    LimitNOFILE=65536
     
    [Install]
    WantedBy=multi-user.target

    分发配置/usr/lib/systemd/system/kube-proxy.service文件到三个节点,并且修改配置为对应节点的IP地址,并设置kube-proxy开机自启。

    启动并设置kube-proxy开机自启

    systemctl daemon-reload
    systemctl enable kube-proxy; systemctl start kube-proxy

    查看LVS状态,已经可以看到创建了一个LVS集群,来自10.1.0.1:443的请求转到192.168.31.11:6443,二6443就是apiServer的端口

    [root@env12 ~]# ipvsadm -Ln
    IP Virtual Server version 1.2.1 (size=4096)
    Prot LocalAddress:Port Scheduler Flags
      -> RemoteAddress:Port           Forward Weight ActiveConn InActConn
    TCP  10.1.0.1:443 rr persistent 10800
      -> 192.168.31.11:6443           Masq    1      4          0

    k8s的集群部署完毕,由于k8s本身不支持网络,需要借助第三方网络才能进创建Pod。

    6、Flannel网络部署

    6.1、为flannel生成证书请求

    [root@env11 ~]# cd /opt/src/ssl/
    [root@env11 ssl]# vim flanneld-csr.json
    {
      "CN": "flanneld",
      "hosts": [],
      "key": {
        "algo": "rsa",
        "size": 2048
      },
      "names": [
        {
          "C": "CN",
          "ST": "BeiJing",
          "L": "BeiJing",
          "O": "k8s",
          "OU": "System"
        }
      ]
    }

    生成证书

    cfssl gencert -ca=/opt/src/ssl/ca.pem 
         -ca-key=/opt/src/ssl/ca-key.pem 
         -config=/opt/src/ssl/ca-config.json 
         -profile=kubernetes flanneld-csr.json | cfssljson -bare flanneld
    [root@env11 ssl]# ll flannel*
    -rw-r--r--. 1 root root  221 Apr 14 15:58 flanneld-csr.json
    -rw-------. 1 root root 1679 Apr 14 15:58 flanneld-key.pem
    -rw-r--r--. 1 root root  997 Apr 14 15:58 flanneld.csr
    -rw-r--r--. 1 root root 1391 Apr 14 15:58 flanneld.pem

    6.2、分发证书和文件

    复制配置文件flanneld-key.pem,flanneld.pem到三节点的目录/opt/kubernetes/ssl

    复制flannel相关启动文件flanneld mk-docker-opts.sh remove-docker0.sh到三节点目录/opt/kubernetes/bin

    6.3、配置flannel配置文件

    [root@env11 bin]# cat /opt/kubernetes/cfg/flannel
    FLANNEL_ETCD="-etcd-endpoints=https://192.168.31.11:2379,https://192.168.31.12:2379,https://192.168.31.13:2379"
    FLANNEL_ETCD_KEY="-etcd-prefix=/kubernetes/network"
    FLANNEL_ETCD_CAFILE="--etcd-cafile=/opt/kubernetes/ssl/ca.pem"
    FLANNEL_ETCD_CERTFILE="--etcd-certfile=/opt/kubernetes/ssl/flanneld.pem"
    FLANNEL_ETCD_KEYFILE="--etcd-keyfile=/opt/kubernetes/ssl/flanneld-key.pem"

    分发配置文件/opt/kubernetes/cfg/flannel到三节点/opt/kubernetes/cfg目录。

    6.4、设置Flannel系统服务

    [root@env11 bin]# cat /usr/lib/systemd/system/flannel.service
    [Unit]
    Description=Flanneld overlay address etcd agent
    After=network.target
    Before=docker.service
     
    [Service]
    EnvironmentFile=-/opt/kubernetes/cfg/flannel
    ExecStartPre=/opt/kubernetes/bin/remove-docker0.sh
    ExecStart=/opt/kubernetes/bin/flanneld ${FLANNEL_ETCD} ${FLANNEL_ETCD_KEY} ${FLANNEL_ETCD_CAFILE} ${FLANNEL_ETCD_CERTFILE} ${FLANNEL_ETCD_KEYFILE}
    ExecStartPost=/opt/kubernetes/bin/mk-docker-opts.sh -d /run/flannel/docker
     
    Type=notify
     
    [Install]
    WantedBy=multi-user.target
    RequiredBy=docker.service

    分发配置/usr/lib/systemd/system/flannel.service文件到三个节点。

    6.5、下载CNI插件

    https://github.com/containernetworking/plugins/releases,CNI插件官方地址,/opt/kubernetes/bin/cni 最终放置目录,解压时已经安装好。

    6.6、创建ETCD的key

    为了创建Pod的网段,并在etcd中存储,而后flannel从etcd中取出并进行分配。

    # 使用etcdctl 的mk命令写入一个键值对
    /opt/kubernetes/bin/etcdctl
    --ca-file /opt/kubernetes/ssl/ca.pem 
    --cert-file /opt/kubernetes/ssl/flanneld.pem 
    --key-file /opt/kubernetes/ssl/flanneld-key.pem 
    --no-sync -C https://192.168.31.11:2379,https://192.168.31.12:2379,https://192.168.31.13:2379 
    mk /kubernetes/network/config '{ "Network": "10.2.0.0/16", "Backend": { "Type": "vxlan", "VNI": 1 }}' >/dev/null 2>&1

    6.7、启动并设置flannel开机自启

    三个节点都设置。看到每个节点上会多出一个flannel.1的网卡,不同的节点都在不同的网段。

    systemctl daemon-reload
    systemctl enable flannel; systemctl start flannel

    6.8、配置Docker使用Flannel

    [root@env11 ~]# vim /usr/lib/systemd/system/docker.service
    [Unit]
    Description=Docker Application Container Engine
    Documentation=https://docs.docker.com
    #BindsTo=containerd.service
    #After=network-online.target firewalld.service containerd.service
    # 让docker在flannel网络后面启动
    After=network-online.target firewalld.service flannel.service
    Wants=network-online.target
    Requires=flannel.service
     
    [Service]
    Type=notify
    # the default is not to use systemd for cgroups because the delegate issues still
    # exists and systemd currently does not support the cgroup feature set required
    # for containers run by docker
    # 增加,设置docker0的IP地址为flannel分配的ip地址
    EnvironmentFile=-/run/flannel/docker
    #ExecStart=/usr/bin/dockerd -H fd:// --containerd=/run/containerd/containerd.sock
    # 修改启动,参数生效
    ExecStart=/usr/bin/dockerd $DOCKER_OPTS
    ExecReload=/bin/kill -s HUP $MAINPID
    TimeoutSec=0
    RestartSec=2
    Restart=always
     
    # Note that StartLimit* options were moved from "Service" to "Unit" in systemd 229.
    # Both the old, and new location are accepted by systemd 229 and up, so using the old location
    # to make them work for either version of systemd.
    StartLimitBurst=3
     
    # Note that StartLimitInterval was renamed to StartLimitIntervalSec in systemd 230.
    # Both the old, and new name are accepted by systemd 230 and up, so using the old name to make
    # this option work for either version of systemd.
    StartLimitInterval=60s
     
    # Having non-zero Limit*s causes performance problems due to accounting overhead
    # in the kernel. We recommend using cgroups to do container-local accounting.
    LimitNOFILE=infinity
    LimitNPROC=infinity
    LimitCORE=infinity
     
    # Comment TasksMax if your systemd version does not supports it.
    # Only systemd 226 and above support this option.
    TasksMax=infinity
     
    # set delegate yes so that systemd does not reset the cgroups of docker containers
    Delegate=yes
     
    # kill only the docker process, not all processes in the cgroup
    KillMode=process
     
    [Install]
    WantedBy=multi-user.target

    三个节点都设置,随后重启docker服务看到,docker0的ip地址为flannel分配的IP地址。

    7、CoreDNS

    7.1、部署CoreDNS

    [root@env11 ~]# vim coredns.yaml
    apiVersion: v1
    kind: ServiceAccount
    metadata:
      name: coredns
      namespace: kube-system
      labels:
          kubernetes.io/cluster-service: "true"
          addonmanager.kubernetes.io/mode: Reconcile
    ---
    apiVersion: rbac.authorization.k8s.io/v1
    kind: ClusterRole
    metadata:
      labels:
        kubernetes.io/bootstrapping: rbac-defaults
        addonmanager.kubernetes.io/mode: Reconcile
      name: system:coredns
    rules:
    - apiGroups:
      - ""
      resources:
      - endpoints
      - services
      - pods
      - namespaces
      verbs:
      - list
      - watch
    ---
    apiVersion: rbac.authorization.k8s.io/v1
    kind: ClusterRoleBinding
    metadata:
      annotations:
        rbac.authorization.kubernetes.io/autoupdate: "true"
      labels:
        kubernetes.io/bootstrapping: rbac-defaults
        addonmanager.kubernetes.io/mode: EnsureExists
      name: system:coredns
    roleRef:
      apiGroup: rbac.authorization.k8s.io
      kind: ClusterRole
      name: system:coredns
    subjects:
    - kind: ServiceAccount
      name: coredns
      namespace: kube-system
    ---
    apiVersion: v1
    kind: ConfigMap
    metadata:
      name: coredns
      namespace: kube-system
      labels:
          addonmanager.kubernetes.io/mode: EnsureExists
    data:
      Corefile: |
        .:53 {
            errors
            health
            kubernetes cluster.local. in-addr.arpa ip6.arpa {
                pods insecure
                upstream
                fallthrough in-addr.arpa ip6.arpa
            }
            prometheus :9153
            proxy . /etc/resolv.conf
            cache 30
        }
    ---
    apiVersion: extensions/v1beta1
    kind: Deployment
    metadata:
      name: coredns
      namespace: kube-system
      labels:
        k8s-app: coredns
        kubernetes.io/cluster-service: "true"
        addonmanager.kubernetes.io/mode: Reconcile
        kubernetes.io/name: "CoreDNS"
    spec:
      replicas: 2
      strategy:
        type: RollingUpdate
        rollingUpdate:
          maxUnavailable: 1
      selector:
        matchLabels:
          k8s-app: coredns
      template:
        metadata:
          labels:
            k8s-app: coredns
        spec:
          serviceAccountName: coredns
          tolerations:
            - key: node-role.kubernetes.io/master
              effect: NoSchedule
            - key: "CriticalAddonsOnly"
              operator: "Exists"
          containers:
          - name: coredns
            image: coredns/coredns:1.0.6
            imagePullPolicy: IfNotPresent
            resources:
              limits:
                memory: 170Mi
              requests:
                cpu: 100m
                memory: 70Mi
            args: [ "-conf", "/etc/coredns/Corefile" ]
            volumeMounts:
            - name: config-volume
              mountPath: /etc/coredns
            ports:
            - containerPort: 53
              name: dns
              protocol: UDP
            - containerPort: 53
              name: dns-tcp
              protocol: TCP
            livenessProbe:
              httpGet:
                path: /health
                port: 8080
                scheme: HTTP
              initialDelaySeconds: 60
              timeoutSeconds: 5
              successThreshold: 1
              failureThreshold: 5
          dnsPolicy: Default
          volumes:
            - name: config-volume
              configMap:
                name: coredns
                items:
                - key: Corefile
                  path: Corefile
    ---
    apiVersion: v1
    kind: Service
    metadata:
      name: coredns
      namespace: kube-system
      labels:
        k8s-app: coredns
        kubernetes.io/cluster-service: "true"
        addonmanager.kubernetes.io/mode: Reconcile
        kubernetes.io/name: "CoreDNS"
    spec:
      selector:
        k8s-app: coredns
      clusterIP: 10.1.0.2
      ports:
      - name: dns
        port: 53
        protocol: UDP
      - name: dns-tcp
        port: 53
        protocol: TCP
    # kubectl apply -f coredns.yaml

    选择默认上下文

  • 相关阅读:
    人脉是麻烦出来的
    oracle撤销表空间和回滚段
    linux虚拟机ip地址更改
    linux各个文件夹的用途
    Apache的配置文件http.conf参数含义详解
    账户管理_新建用户_用户组
    【刷题】BZOJ 3994 [SDOI2015]约数个数和
    【刷题】BZOJ 2301 [HAOI2011]Problem b
    【刷题】洛谷 P3455 [POI2007]ZAP-Queries
    【刷题】BZOJ 2820 YY的GCD
  • 原文地址:https://www.cnblogs.com/djoker/p/12317811.html
Copyright © 2011-2022 走看看