zoukankan      html  css  js  c++  java
  • kubernetes一: 二进制安装k8s集群

    一、k8s架构规划

    1.k8s逻辑架构

    2.k8s实验拓扑

    3.k8s三条网络

    4.主机规划

    主机名 ip cpu 内存 角色
    kjdow7-11.host.com 10.4.7.11 2 3 k8s代理节点1
    kjdow7-12.host.com 10.4.7.12 2 3 k8s代理节点2
    kjdow7-21.host.com 10.4.7.21 2 4 k8s运算节点1
    kjdow7-22.host.com 10.4.7.22 2 4 k8s运算节点1
    kjdow7-200.host.com 10.4.7.200 2 3 k8s运维节点1

    5.软件版本

    组件 版本 备注
    harbor 1.7.5以上 私有仓库
    etcd 3.3.3以下(建议3.1.*稳定版) 数据库
    k8s 1.15(生产建议) k8s主要软件

    harbor私有仓库1.7.5版本以下爆出有漏洞,可能被越权获取管理员权限

    k8s版本较多,1.16版之后有一个质的更新,删除了一些旧组件,增添了新特性,目前使用的公司少,文档少,暂时不建议在生产上部署

    二、二进制安装部署k8s集群

    1.k8s前置工作-bind9安装

    在实验环境中,网站需要进行域名解析,需要搭建本地DNS服务,可以使用linux的开源工具bind进行部署

    #在每台主机上预先安装软件
    yum install wget net-tools telnet tree nmap sysstat lrzsz dos2unix bind-utils -y
    

    1.1 安装bind9软件

    #在7-11主机上进行操作
    
    ##1.安装软件
    7-11 ~]# yum install bind -y
    ##2.配置bind主配置文件
    7-11 ~]# vim /etc/named.conf
            listen-on port 53 { 10.4.7.11; }; ##127.0.0.1修改为any,表示监听全部地址
            allow-query     { any; };    #表示别的机器也可以使用
            forwarders      { 223.5.5.5; };       #如果没有解析,就转发给此地址进行解析
            recursion yes;                        #采用递归查询
            dnssec-enable no;
            dnssec-validation no;      
    7-11 ~]# named-checkconf      #检查配置文件是否有错
    

    1.2 配置bind9

    #配置区域配置文件
    7-11 ~]# vim /etc/named.rfc1912.zones
    zone "host.com" IN {                      #主机域
            type master;
            file "host.com.zone";
            allow-update { 10.4.7.11; };
    };
    
    zone "phc-dow.com" IN {                  #业务域
            type master;
            file "phc-dow.com.zone";
            allow-update { 10.4.7.11; };
    };
    
    #配置区域数据文件
    [root@kjdow7-11 ~]# cat /var/named/host.com.zone 
    $ORIGIN  host.com.
    $TTL  600   ; 10 minutes
    @        IN SOA dns.host.com. dnsadmin.host.com. (
                                    2020010201   ; serial
    				10800        ; refresh (3 hours)
    				900          ; retry  (15 minutes)
    				604800       ; expire (1 week)
    				86400        ; minimum (1 day)
                    )
    			NS   dns.host.com.
    $TTL  60 ; 1 minute
    dns                A         10.4.7.11
    kjdow7-11           A         10.4.7.11
    kjdow7-12           A         10.4.7.12
    kjdow7-21           A         10.4.7.21
    kjdow7-22           A         10.4.7.22
    kjdow7-200          A         10.4.7.200
    7-11 ~]# vim /var/named/phc-dow.com.zone
    $ORIGIN  phc-dow.com.
    $TTL  600   ; 10 minutes
    @        IN SOA dns.phc-dow.com. dnsadmin.phc-dow.com. (
                                    2020010201   ; serial
                                    10800        ; refresh (3 hours)
                                    900          ; retry  (15 minutes)
                                    604800       ; expire (1 week)
                                    86400        ; minimum (1 day)
                    )
                            NS   dns.phc-dow.com.
    $TTL  60 ; 1 minute
    dns                A         10.4.7.11
    

    注意:serial字段在每次修改配置文件时数值+1

    1.3,启动并验证DNS

    #启动服务
    7-11 ~]# systemctl start named
    7-11 ~]# systemctl enable named
    Created symlink from /etc/systemd/system/multi-user.target.wants/named.service to /usr/lib/systemd/system/named.service.
    [root@kjdow7-11 ~]# netstat -lntup | grep 53
    tcp        0      0 10.4.7.11:53            0.0.0.0:*               LISTEN      6898/named          
    tcp        0      0 127.0.0.1:953           0.0.0.0:*               LISTEN      6898/named          
    tcp6       0      0 ::1:53                  :::*                    LISTEN      6898/named          
    tcp6       0      0 ::1:953                 :::*                    LISTEN      6898/named          
    udp        0      0 10.4.7.11:53            0.0.0.0:*                           6898/named          
    udp        0      0 0.0.0.0:832             0.0.0.0:*                           5323/rpcbind        
    udp6       0      0 ::1:53                  :::*                                6898/named          
    udp6       0      0 :::832                  :::*                                5323/rpcbind   
    
    #验证
    7-11 ~]# dig -t A kjdow7-22.host.com @10.4.7.11 +short
    10.4.7.22
    
    #在其他主机上设置dns为10.4.7.11即可解析
     ~]# cat /etc/resolv.conf 
    # Generated by NetworkManager
    search host.com                           #短域名解析
    nameserver 10.4.7.11
    
    

    2.K8S前置准备工作--准备签发证书环境

    在kjdow7-200上进行操作

    2.1 安装cfssl

    7-200 ~]# curl -s -L -o /usr/bin/cfssl https://pkg.cfssl.org/R1.2/cfssl_linux-amd64 
    7-200 ~]# curl -s -L -o /usr/bin/cfssljson https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64 
    7-200 ~]# curl -s -L -o /usr/bin/cfssl-certinfo https://pkg.cfssl.org/R1.2/cfssl-certinfo_linux-amd64 
    7-200 ~]# chmod +x /usr/bin/cfssl*
    

    2.2 配置

    创建生成CA证书的JSON配置文件

    7-200 ~]# mkdir /opt/certs
    7-200 ~]# vim /opt/certs/ca-config.json
    {
        "signing": {
            "default": {
                "expiry": "175200h"
            },
            "profiles": {
                "server": {
                    "expiry": "175200h",
                    "usages": [
                        "signing",
                        "key encipherment",
                        "server auth"
                    ]
                },
                "client": {
                    "expiry": "175200h",
                    "usages": [
                        "signing",
                        "key encipherment",
                        "client auth"
                    ]
                },
                "peer": {
                    "expiry": "175200h",
                    "usages": [
                        "signing",
                        "key encipherment",
                        "server auth",
                        "client auth"
                    ]
                }
            }
        }
    }
    

    证书类型
    client certificate: 客户端使用,用于服务端认证客户端,例如etcdctl、etcd proxy、fleetctl、docker客户端
    server certificate: 服务端使用,客户端以此验证服务端身份,例如docker服务端、kube-apiserver
    peer certificate: 双向证书,用于etcd集群成员间通信

    创建生成CA证书签名请求(csr)的JSON配置文件

    /opt/certs/ca-csr.json
    {
        "CN": "kubernetes-ca",
        "hosts": [
        ],
        "key": {
            "algo": "rsa",
            "size": 2048
        },
        "names": [
            {
                "C": "CN",
                "ST": "shanghai",
                "L": "shanghai",
                "O": "phc-dow",
                "OU": "kjdow"
            }
        ],
        "ca": {
            "expiry": "175200h"
        }
    }
    

    CN: Common Name,浏览器使用该字段验证网站是否合法,一般写的是域名。非常重要。浏览器使用该字段验证网站是否合法
    C: Country, 国家
    ST: State,州,省
    L: Locality,地区,城市
    O: Organization Name,组织名称,公司名称
    OU: Organization Unit Name,组织单位名称,公司部门

    生成CA证书和私钥

    7-200 ~]# cd /opt/certs
    7-200 certs]# cfssl gencert -initca ca-csr.json | cfssljson -bare ca  
    2019/01/18 09:31:19 [INFO] generating a new CA key and certificate from CSR
    2019/01/18 09:31:19 [INFO] generate received request
    2019/01/18 09:31:19 [INFO] received CSR
    2019/01/18 09:31:19 [INFO] generating key: rsa-2048
    2019/01/18 09:31:19 [INFO] encoded CSR
    2019/01/18 09:31:19 [INFO] signed certificate with serial number 345276964513449660162382535043012874724976422200
    
    7-200 certs]# ls -l
    -rw-r--r-- 1 root root  836 Jan 16 11:04 ca-config.json
    -rw-r--r-- 1 root root  332 Jan 16 11:10 ca-csr.json
    -rw------- 1 root root 1675 Jan 16 11:17 ca-key.pem
    -rw-r--r-- 1 root root 1001 Jan 16 11:17 ca.csr
    -rw-r--r-- 1 root root 1354 Jan 16 11:17 ca.pem
    

    生成ca.pem、ca.csr、ca-key.pem(CA私钥,需妥善保管)

    3.K8S前置准备工作--docker环境安装

    在kjdow7-21,kjdow7-22,kjdow7-200上进行操作

    3.1 安装docker

    curl -fsSL https://get.docker.com | bash -s docker --mirror Aliyun
    
    

    这条命令会自动添加docker源,并自动安装软件

    3.2 配置docker

    ~]# mkdir /etc/docker /data/docker -p
    ~]# vi /etc/docker/daemon.json
    {
      "graph": "/data/docker",
      "storage-driver": "overlay",
    "registry-mirrors": ["https://dockerhub.azk8s.cn", "https://docker.mirrors.ustc.edu.cn"],
      "insecure-registries": ["registry.access.redhat.com","quay.io","harbor.phc-dow.com"],
      "bip": "172.7.21.1/24",
      "exec-opts": ["native.cgroupdriver=systemd"],
      "live-restore": true
    }
    
    

    注意:这里bip要根据宿主机ip变化,在22上bip改为172.7.22.1/24,在200上bip改为172.7.200.1/24

    执行docker info出现如下警告

    WARNING: bridge-nf-call-iptables is disabled
    WARNING: bridge-nf-call-ip6tables is disabled
    
    

    解决办法:

    vi /etc/sysctl.conf       #末尾添加以下两行
    net.bridge.bridge-nf-call-ip6tables = 1
    net.bridge.bridge-nf-call-iptables = 1
    #最后执行
    sysctl -p                #启用配置
    
    

    4、K8S前置准备工作--私有仓库harbor搭建

    在kjdow7-200上进行操作

    4.1 下载软件并安装

    harbor下载地址

    7-200 src]# tar xf harbor-offline-installer-v1.8.5.tgz -C /opt
    7-200 src]# cd /opt
    [root@kjdow7-200 opt]# ls
    certs  containerd  harbor  src
    7-200 opt]# mv harbor harbor-v1.8.5
    7-200 opt]# ln -s harbor-v1.8.5 harbor
    
    

    4.2 配置

    7-200 harbor]# vim harbor.yml
    hostname: harbor.phc-dow.com
    http:
      # port for http, default is 80. If https enabled, this port will redirect to https port
      port: 180
      harbor_admin_password: 123456
    data_volume: /data/harbor
    log:
        location: /data/harbor/logs
        
    7-200 harbor]# mkdir /data/harbor/logs -p
    
    

    4.3 安装 docker-compose依赖包

    7-200 harbor]# yum install docker-compose -y
    7-200 harbor]# rpm -qa docker-compose
    docker-compose-1.18.0-4.el7.noarch
    
    

    harbor的安装依赖docker-compose

    4.4 安装harbor

    7-200 harbor]# ./install.sh
    7-200 harbor]# docker-compose ps
          Name                     Command               State             Ports          
    --------------------------------------------------------------------------------------
    harbor-core         /harbor/start.sh                 Up                               
    harbor-db           /entrypoint.sh postgres          Up      5432/tcp                 
    harbor-jobservice   /harbor/start.sh                 Up                               
    harbor-log          /bin/sh -c /usr/local/bin/ ...   Up      127.0.0.1:1514->10514/tcp
    harbor-portal       nginx -g daemon off;             Up      80/tcp                   
    nginx               nginx -g daemon off;             Up      0.0.0.0:180->80/tcp      
    redis               docker-entrypoint.sh redis ...   Up      6379/tcp                 
    registry            /entrypoint.sh /etc/regist ...   Up      5000/tcp                 
    registryctl         /harbor/start.sh                 Up  
    
    

    4.5 配置harbor的dns内网解析

    7-11 ~]# vi /var/named/phc-dow.com.zone
                                    2020010202   ; serial     #serial的值+1
    harbor             A         10.4.7.200                   #添加harbor域名的A记录
    
    #检查域名解析
    7-11 ~]# dig -t A harbor.phc-dow.com @10.4.7.11 +short
    10.4.7.200
    
    

    4.6 安装nginx并配置

    7-200 harbor]# yum install nginx -y
    7-200 harbor]# vim /etc/nginx/conf.d/harbor.phc-dow.conf
    server {
        listen       80;
        server_name  harbor.phc-dow.com;
    
        client_max_body_size 1000m;
    
        location / {
            proxy_pass http://127.0.0.1:180;
        }
    }
    7-200 harbor]# nginx -t
    7-200 harbor]# systemctl start nginx
    
    

    4.7 访问并上传镜像

    ##通过浏览器访问 harbor.phc-dow.com
    7-200 ~]# docker pull nginx:1.7.9
    7-200 ~]# docker images
    nginx                           1.7.9                      84581e99d807        4 years ago         91.7MB
    7-200 ~]# docker tag 84581e99d807 harbor.phc-dow.com/public/nginx:v1.7.9
    7-200 ~]# docker login harbor.phc-dow.com
    Username: admin
    Password: 
    7-200 ~]# docker push harbor.phc-dow.com/public/nginx:v1.7.9
    
    

    5、安装部署主控节点服务--etcd集群

    5.1 创建证书文件

    etcd主机之间进行通信需要使用https,因此需要证书文件,证书类型是peer

    在kjdow7-22上进行操作

    vim /opt/certs/etcd-peer-csr.json
    {
        "CN": "etcd-peer",
        "hosts": [
            "10.4.7.11",
            "10.4.7.12",
            "10.4.7.21",
            "10.4.7.22"
        ],
        "key": {
            "algo": "rsa",
            "size": 2048
        },
        "names": [
            {
                "C": "CN",
                "ST": "shanghai",
                "L": "shanghai",
                "O": "phc-dow",
                "OU": "kjdow"
            }
        ]
    }
    
    

    hosts字段把可能安装etcd的ip全部写进去,不能写地址段,只能写ip地址

    生成etcd证书和私钥

    7-200 certs]# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=peer etcd-peer-csr.json | cfssljson -bare etcd-peer
    2020/01/09 01:21:58 [INFO] generate received request
    2020/01/09 01:21:58 [INFO] received CSR
    2020/01/09 01:21:58 [INFO] generating key: rsa-2048
    2020/01/09 01:21:59 [INFO] encoded CSR
    2020/01/09 01:21:59 [INFO] signed certificate with serial number 18968919947780963988803677724704789922238134918
    2020/01/09 01:21:59 [WARNING] This certificate lacks a "hosts" field. This makes it unsuitable for
    websites. For more information see the Baseline Requirements for the Issuance and Management
    of Publicly-Trusted Certificates, v.1.1.6, from the CA/Browser Forum (https://cabforum.org);
    specifically, section 10.2.3 ("Information Requirements").
    ###检查生成的证书
    7-200 certs]# ls -l | grep etcd
    -rw-r--r-- 1 root root 1078 Jan  9 01:21 etcd-peer.csr
    -rw-r--r-- 1 root root  373 Jan  9 01:21 etcd-peer-csr.json
    -rw------- 1 root root 1675 Jan  9 01:21 etcd-peer-key.pem
    -rw-r--r-- 1 root root 1460 Jan  9 01:21 etcd-peer.pem
    
    
    

    注意:ca证书前面已经签发,这里直接使用

    5.2 搭建etcd

    在kjdow7-12,kjdow7-21,kjdow7-22上进行操作,分别搭建etcd

    1) 下载安装etcd

    etcd下载地址

    7-12 ~]# cd /opt/src
    7-12 src]# tar xf etcd-v3.1.20-linux-amd64.tar.gz -C /opt
    7-12 opt]# mv etcd-v3.1.20-linux-amd64 etcd-v3.1.20
    7-12 opt]# ln -s etcd-v3.1.20 etcd
    
    
    2)安装与配置
    #创建用户
    [root@kjdow7-12 opt]# useradd -s /sbin/nologin -M etcd
    [root@kjdow7-12 opt]# id etcd
    uid=1000(etcd) gid=1000(etcd) groups=1000(etcd)
    ##创建目录
    [root@kjdow7-12 opt]# mkdir -p /data/etcd /data/logs/etcd-server
    #创建目录,并报备证书、私钥
    [root@kjdow7-12 opt]# mkdir -p /opt/etcd/certs
    [root@kjdow7-12 certs]# scp kjdow7-200:/opt/certs/ca.pem .
    [root@kjdow7-12 certs]# scp kjdow7-200:/opt/certs/etcd-peer.pem .
    [root@kjdow7-12 certs]# scp kjdow7-200:/opt/certs/etcd-peer-key.pem .
    [root@kjdow7-12 certs]# chown -R etcd.etcd /opt/etcd/certs/
    #为目录授权
    [root@kjdow7-12 certs]# chown -R etcd.etcd /data/etcd /data/logs/etcd-server/
    [root@kjdow7-12 certs]# chown -R etcd.etcd /opt/etcd-v3.1.20/
    
    
    3) 使用supervisor软件进行管理启动
    ##创建etcd服务启动脚本
    [root@kjdow7-12 ~]# vim /opt/etcd/etcd-server-startup.sh
    #!/bin/sh
    ./etcd --name etcd-server-7-12 
           --data-dir /data/etcd/etcd-server 
           --listen-peer-urls https://10.4.7.12:2380 
           --listen-client-urls https://10.4.7.12:2379,http://127.0.0.1:2379 
           --quota-backend-bytes 8000000000 
           --initial-advertise-peer-urls https://10.4.7.12:2380 
           --advertise-client-urls https://10.4.7.12:2379,http://127.0.0.1:2379 
           --initial-cluster  etcd-server-7-12=https://10.4.7.12:2380,etcd-server-7-21=https://10.4.7.21:2380,etcd-server-7-22=https://10.4.7.22:2380 
           --ca-file ./certs/ca.pem 
           --cert-file ./certs/etcd-peer.pem 
           --key-file ./certs/etcd-peer-key.pem 
           --client-cert-auth  
           --trusted-ca-file ./certs/ca.pem 
           --peer-ca-file ./certs/ca.pem 
           --peer-cert-file ./certs/etcd-peer.pem 
           --peer-key-file ./certs/etcd-peer-key.pem 
           --peer-client-cert-auth 
           --peer-trusted-ca-file ./certs/ca.pem 
           --log-output stdout
    [root@kjdow7-12 ~]# chmod +x /opt/etcd/etcd-server-startup.sh
    
    ##安装并启动supervisor软件
    [root@kjdow7-12 ~]# yum install supervisor -y
    [root@kjdow7-12 ~]# systemctl start supervisord
    [root@kjdow7-12 ~]# systemctl enable supervisord
    ##创建etcd-server的启动配置
    [root@kjdow7-12 ~]# vim /etc/supervisord.d/etcd-server.ini
    [program:etcd-server-7-12]
    command=/opt/etcd/etcd-server-startup.sh                        ; the program (relative uses PATH, can take args)
    numprocs=1                                                      ; number of processes copies to start (def 1)
    directory=/opt/etcd                                             ; directory to cwd to before exec (def no cwd)
    autostart=true                                                  ; start at supervisord start (default: true)
    autorestart=true                                                ; retstart at unexpected quit (default: true)
    startsecs=22                                                    ; number of secs prog must stay running (def. 1)
    startretries=3                                                  ; max # of serial start failures (default 3)
    exitcodes=0,2                                                   ; 'expected' exit codes for process (default 0,2)
    stopsignal=QUIT                                                 ; signal used to kill process (default TERM)
    stopwaitsecs=10                                                 ; max num secs to wait b4 SIGKILL (default 10)
    user=etcd                                                       ; setuid to this UNIX account to run the program
    redirect_stderr=false                                           ; redirect proc stderr to stdout (default false)
    stdout_logfile=/data/logs/etcd-server/etcd.stdout.log           ; stdout log path, NONE for none; default AUTO
    stdout_logfile_maxbytes=64MB                                    ; max # logfile bytes b4 rotation (default 50MB)
    stdout_logfile_backups=4                                        ; # of stdout logfile backups (default 10)
    stdout_capture_maxbytes=1MB                                     ; number of bytes in 'capturemode' (default 0)
    stdout_events_enabled=false                                     ; emit events on stdout writes (default false)
    stderr_logfile=/data/logs/etcd-server/etcd.stderr.log           ; stderr log path, NONE for none; default AUTO
    stderr_logfile_maxbytes=64MB                                    ; max # logfile bytes b4 rotation (default 50MB)
    stderr_logfile_backups=4                                        ; # of stderr logfile backups (default 10)
    stderr_capture_maxbytes=1MB                                     ; number of bytes in 'capturemode' (default 0)
    stderr_events_enabled=false                                     ; emit events on stderr writes (default false)
    
    

    注意: :在ini文件后面可以看情况加上以下2行配置文件

    killasgroup=true  #这个东西主要用于,supervisord管理的子进程,这个子进程本身还有,子进程。那么我们如果仅仅干掉supervisord的子进程的话,子进程的子进程,有可能会变成孤儿进程。所以咱们可以设置可个选项,把整个该子进程的,整个进程组都干掉。 设置为true的话,一般killasgroup也会被设置为true。需要注意的是,该选项发送的是stop信号,默认为false。。非必须设置。
    stopasgroup=true  #这个和上面的stopasgroup类似,不过发送的是kill信号
    

    注意:etcd集群各主机启动配置略有不同,配置其他节点时注意修改。

    ###启动etcd服务并检查
    [root@kjdow7-12 ~]# supervisorctl update
    etcd-server-7-12: added process group
    [root@kjdow7-12 ~]# supervisorctl status
    etcd-server-7-12                 RUNNING   pid 8545, uptime 0:00:23
    
    
    4) 在其他节点上进行安装

    请参照1到3步骤,注意修改配置的ip地址

    [root@kjdow7-21 certs]# supervisorctl status
    etcd-server-7-21                 RUNNING   pid 9232, uptime 0:00:30
    [root@kjdow7-22 ~]# supervisorctl status
    etcd-server-7-22                 RUNNING   pid 9282, uptime 0:00:30
    
    
    5)检查集群状态

    3台均启动后,检查集群状态

    [root@kjdow7-12 ~]# /opt/etcd/etcdctl cluster-health
    member 988139385f78284 is healthy: got healthy result from http://127.0.0.1:2379
    member 5a0ef2a004fc4349 is healthy: got healthy result from http://127.0.0.1:2379
    member f4a0cb0a765574a8 is healthy: got healthy result from http://127.0.0.1:2379
    cluster is healthy
    [root@kjdow7-12 ~]# /opt/etcd/etcdctl member list
    988139385f78284: name=etcd-server-7-22 peerURLs=https://10.4.7.22:2380 clientURLs=http://127.0.0.1:2379,https://10.4.7.22:2379 isLeader=false
    5a0ef2a004fc4349: name=etcd-server-7-21 peerURLs=https://10.4.7.21:2380 clientURLs=http://127.0.0.1:2379,https://10.4.7.21:2379 isLeader=false
    f4a0cb0a765574a8: name=etcd-server-7-12 peerURLs=https://10.4.7.12:2380 clientURLs=http://127.0.0.1:2379,https://10.4.7.12:2379 isLeader=true
    
    

    6、安装部署主控节点服务--apiserver

    在kjdow7-21、kjdow7-22上进行操作

    6.1 下载k8s软件包

    k8s下载地址

    [root@kjdow7-21 src]# tar xf kubernetes-server-linux-amd64-v1.15.4.tar.gz -C /opt
    [root@kjdow7-21 ~]# cd /opt
    [root@kjdow7-21 opt]# ls
    containerd  etcd  etcd-v3.1.20  kubernetes  src
    [root@kjdow7-21 opt]# mv kubernetes kubernetes-v1.15.4
    [root@kjdow7-21 opt]# ln -s kubernetes-v1.15.4 kubernetes
    [root@kjdow7-21 opt]# cd kubernetes
    [root@kjdow7-21 kubernetes]# ls
    addons  kubernetes-src.tar.gz  LICENSES  server
    [root@kjdow7-21 kubernetes]# ll
    total 27212
    drwxr-xr-x 2 root root        6 Oct 16 03:34 addons
    -rw-r--r-- 1 root root 26654123 Oct 16 03:34 kubernetes-src.tar.gz
    -rw-r--r-- 1 root root  1205293 Oct 16 03:34 LICENSES
    drwxr-xr-x 3 root root       17 Oct 16 03:30 server
    [root@kjdow7-21 kubernetes]# rm -f kubernetes-src.tar.gz    #这是源码包,删掉
    [root@kjdow7-21 kubernetes]# cd server/bin/
    [root@kjdow7-21 bin]# ls
    apiextensions-apiserver              cloud-controller-manager.tar  kube-apiserver             kube-controller-manager             kubectl     kube-proxy.docker_tag  kube-scheduler.docker_tag
    cloud-controller-manager             hyperkube                     kube-apiserver.docker_tag  kube-controller-manager.docker_tag  kubelet     kube-proxy.tar         kube-scheduler.tar
    cloud-controller-manager.docker_tag  kubeadm                       kube-apiserver.tar         kube-controller-manager.tar         kube-proxy  kube-scheduler         mounter
    [root@kjdow7-21 bin]# rm -f *.tar      #这些是kubeadm安装所需的镜像包,删掉
    [root@kjdow7-21 bin]# rm -rf *_tag
    [root@kjdow7-21 bin]# ll
    total 885008
    -rwxr-xr-x 1 root root  43547104 Oct 16 03:34 apiextensions-apiserver
    -rwxr-xr-x 1 root root 100610080 Oct 16 03:34 cloud-controller-manager
    -rwxr-xr-x 1 root root 200726128 Oct 16 03:34 hyperkube
    -rwxr-xr-x 1 root root  40190400 Oct 16 03:34 kubeadm
    -rwxr-xr-x 1 root root 164571520 Oct 16 03:34 kube-apiserver
    -rwxr-xr-x 1 root root 116462560 Oct 16 03:34 kube-controller-manager
    -rwxr-xr-x 1 root root  42993696 Oct 16 03:34 kubectl
    -rwxr-xr-x 1 root root 119686160 Oct 16 03:34 kubelet
    -rwxr-xr-x 1 root root  36991584 Oct 16 03:34 kube-proxy
    -rwxr-xr-x 1 root root  38790240 Oct 16 03:34 kube-scheduler
    -rwxr-xr-x 1 root root   1648224 Oct 16 03:34 mounter
    [root@kjdow7-21 ~]# ln -s /opt/kubernetes/server/bin/kubectl /usr/bin/kubectl
    
    

    6.2 集群规划

    主机名 角色 ip
    kjdow7-21.host.com kube-apiserver 10.4.7.21
    kjdow7-22.host.com kube-apiserver 10.4.7.22
    kjdow7-11.host.com 4层负载均衡 10.4.7.11
    kjdow7-12.host.com 4层负载均衡 10.4.7.12

    注意:这里10.4.7.1110.4.7.12使用nginx做4层负载均衡器,用keepalived跑一个vip:10.4.7.10,代理两个kube-apiserver,实现高可用

    这里部署文档以kjdow7-21.host.com主机为例,另外一台运算节点安装部署方法类似

    6.3 签发证书

    1)签发client证书

    apiserver与etcd进行通信时,apiserver是客户端,etcd是服务端,因此需要client证书。

    在kjdow7-200上进行操作

    创建生成证书签名请求(csr)的JSON配置文件

    [root@kjdow7-200 ~]# vim /opt/certs/client-csr.json
    {
        "CN": "k8s-node",
        "hosts": [
        ],
        "key": {
            "algo": "rsa",
            "size": 2048
        },
        "names": [
            {
                "C": "CN",
                "ST": "shanghai",
                "L": "shanghai",
                "O": "phc-dow",
                "OU": "kjdow"
            }
        ]
    }
    
    

    生成client证书和私钥

    [root@kjdow7-200 ~]# cd /opt/certs/
    [root@kjdow7-200 certs]# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=client client-csr.json | cfssljson -bare client
    2020/01/09 04:00:27 [INFO] generate received request
    2020/01/09 04:00:27 [INFO] received CSR
    2020/01/09 04:00:27 [INFO] generating key: rsa-2048
    2020/01/09 04:00:29 [INFO] encoded CSR
    2020/01/09 04:00:29 [INFO] signed certificate with serial number 24514806972076249411305307112862513929803875017
    2020/01/09 04:00:29 [WARNING] This certificate lacks a "hosts" field. This makes it unsuitable for
    websites. For more information see the Baseline Requirements for the Issuance and Management
    of Publicly-Trusted Certificates, v.1.1.6, from the CA/Browser Forum (https://cabforum.org);
    specifically, section 10.2.3 ("Information Requirements").
    ##检查生成的证书、私钥
    [root@kjdow7-200 certs]# ls -l|grep client
    -rw-r--r-- 1 root root 1005 Jan  9 04:00 client.csr
    -rw-r--r-- 1 root root  289 Jan  9 03:59 client-csr.json
    -rw------- 1 root root 1675 Jan  9 04:00 client-key.pem
    -rw-r--r-- 1 root root 1395 Jan  9 04:00 client.pem
    
    
    2)签发kube-apiserver证书

    其他客户端来找apiserver时也需要ssl认证,这是apiserver的server端证书。

    创建生成证书签名请求(csr)的JSON配置文件

    [root@kjdow7-200 certs]# vim /opt/certs/apiserver-csr.json
    {
        "CN": "apiserver",
        "hosts": [
            "127.0.0.1",
            "192.168.0.1",
            "kubernetes.default",
            "kubernetes.default.svc",
            "kubernetes.default.svc.cluster",
            "kubernetes.default.svc.cluster.local",
            "10.4.7.10",
            "10.4.7.21",
            "10.4.7.22",
            "10.4.7.23"
        ],
        "key": {
            "algo": "rsa",
            "size": 2048
        },
        "names": [
            {
                "C": "CN",
                "ST": "shanghai",
                "L": "shanghai",
                "O": "phc-dow",
                "OU": "kjdow"
            }
        ]
    }
    
    

    生成kube-apiserver证书和私钥

    [root@kjdow7-200 certs]# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=server apiserver-csr.json | cfssljson -bare apiserver
    2020/01/09 04:07:00 [INFO] generate received request
    2020/01/09 04:07:00 [INFO] received CSR
    2020/01/09 04:07:00 [INFO] generating key: rsa-2048
    2020/01/09 04:07:01 [INFO] encoded CSR
    2020/01/09 04:07:01 [INFO] signed certificate with serial number 93669080945735753983245798274605600027216431034
    2020/01/09 04:07:01 [WARNING] This certificate lacks a "hosts" field. This makes it unsuitable for
    websites. For more information see the Baseline Requirements for the Issuance and Management
    of Publicly-Trusted Certificates, v.1.1.6, from the CA/Browser Forum (https://cabforum.org);
    specifically, section 10.2.3 ("Information Requirements").
    [root@kjdow7-200 certs]# echo $?
    0
    [root@kjdow7-200 certs]# ls -l|grep apiserver
    -rw-r--r-- 1 root root 1257 Jan  9 04:07 apiserver.csr
    -rw-r--r-- 1 root root  571 Jan  9 04:05 apiserver-csr.json
    -rw------- 1 root root 1675 Jan  9 04:07 apiserver-key.pem
    -rw-r--r-- 1 root root 1623 Jan  9 04:07 apiserver.pem
    
    
    3) 拷贝证书至各运算节点,并创建配置

    在kjdow7-21、kjdow7-22上进行操作

    ##拷贝证书、私钥,注意私钥文件属性600
    [root@kjdow7-21 certs]# mkdir -p /opt/kubernetes/server/bin/certs
    [root@kjdow7-21 certs]# cd /opt/kubernetes/server/bin/certs
    [root@kjdow7-21 certs]# scp kjdow7-200:/opt/certs/ca.pem .
    [root@kjdow7-21 certs]# scp kjdow7-200:/opt/certs/ca-key.pem .  
    [root@kjdow7-21 certs]# scp kjdow7-200:/opt/certs/client.pem .   
    [root@kjdow7-21 certs]# scp kjdow7-200:/opt/certs/client-key.pem .  
    [root@kjdow7-21 certs]# scp kjdow7-200:/opt/certs/apiserver.pem .   
    [root@kjdow7-21 certs]# scp kjdow7-200:/opt/certs/apiserver-key.pem .
    
    

    6.4 配置,并启动服务

    在kjdow7-21、kjdow7-22上进行操作

    [root@kjdow7-21 certs]# mkdir -p /opt/kubernetes/server/bin/conf
    [root@kjdow7-21 certs]# vi /opt/kubernetes/server/bin/conf/audit.yaml
    apiVersion: audit.k8s.io/v1beta1 # This is required.
    kind: Policy
    # Don't generate audit events for all requests in RequestReceived stage.
    omitStages:
      - "RequestReceived"
    rules:
      # Log pod changes at RequestResponse level
      - level: RequestResponse
        resources:
        - group: ""
          # Resource "pods" doesn't match requests to any subresource of pods,
          # which is consistent with the RBAC policy.
          resources: ["pods"]
      # Log "pods/log", "pods/status" at Metadata level
      - level: Metadata
        resources:
        - group: ""
          resources: ["pods/log", "pods/status"]
    
      # Don't log requests to a configmap called "controller-leader"
      - level: None
        resources:
        - group: ""
          resources: ["configmaps"]
          resourceNames: ["controller-leader"]
    
      # Don't log watch requests by the "system:kube-proxy" on endpoints or services
      - level: None
        users: ["system:kube-proxy"]
        verbs: ["watch"]
        resources:
        - group: "" # core API group
          resources: ["endpoints", "services"]
    
      # Don't log authenticated requests to certain non-resource URL paths.
      - level: None
        userGroups: ["system:authenticated"]
        nonResourceURLs:
        - "/api*" # Wildcard matching.
        - "/version"
    
      # Log the request body of configmap changes in kube-system.
      - level: Request
        resources:
        - group: "" # core API group
          resources: ["configmaps"]
        # This rule only applies to resources in the "kube-system" namespace.
        # The empty string "" can be used to select non-namespaced resources.
        namespaces: ["kube-system"]
    
      # Log configmap and secret changes in all other namespaces at the Metadata level.
      - level: Metadata
        resources:
        - group: "" # core API group
          resources: ["secrets", "configmaps"]
    
      # Log all other resources in core and extensions at the Request level.
      - level: Request
        resources:
        - group: "" # core API group
        - group: "extensions" # Version of group should NOT be included.
    
      # A catch-all rule to log all other requests at the Metadata level.
      - level: Metadata
        # Long-running requests like watches that fall under this rule will not
        # generate an audit event in RequestReceived.
        omitStages:
          - "RequestReceived"
    
    

    创建启动脚本

    [root@kjdow7-21 certs]# vim /opt/kubernetes/server/bin/kube-apiserver.sh
    #!/bin/bash
    ./kube-apiserver 
      --apiserver-count 2 
      --audit-log-path /data/logs/kubernetes/kube-apiserver/audit-log 
      --audit-policy-file ./conf/audit.yaml 
      --authorization-mode RBAC 
      --client-ca-file ./certs/ca.pem 
      --requestheader-client-ca-file ./certs/ca.pem 
      --enable-admission-plugins NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota 
      --etcd-cafile ./certs/ca.pem 
      --etcd-certfile ./certs/client.pem 
      --etcd-keyfile ./certs/client-key.pem 
      --etcd-servers https://10.4.7.12:2379,https://10.4.7.21:2379,https://10.4.7.22:2379 
      --service-account-key-file ./certs/ca-key.pem 
      --service-cluster-ip-range 192.168.0.0/16 
      --service-node-port-range 3000-29999 
      --target-ram-mb=1024 
      --kubelet-client-certificate ./certs/client.pem 
      --kubelet-client-key ./certs/client-key.pem 
      --log-dir  /data/logs/kubernetes/kube-apiserver 
      --tls-cert-file ./certs/apiserver.pem 
      --tls-private-key-file ./certs/apiserver-key.pem 
      --v 2
    
    

    调整权限和目录

    [root@kjdow7-21 certs]# chmod +x /opt/kubernetes/server/bin/kube-apiserver.sh
    [root@kjdow7-21 certs]# mkdir -p /data/logs/kubernetes/kube-apiserver
    
    

    创建supervisor配置

    [root@kjdow7-21 certs]# vim /etc/supervisord.d/kube-apiserver.ini
    [program:kube-apiserver-7-21]
    command=/opt/kubernetes/server/bin/kube-apiserver.sh            ; the program (relative uses PATH, can take args)
    numprocs=1                                                      ; number of processes copies to start (def 1)
    directory=/opt/kubernetes/server/bin                            ; directory to cwd to before exec (def no cwd)
    autostart=true                                                  ; start at supervisord start (default: true)
    autorestart=true                                                ; retstart at unexpected quit (default: true)
    startsecs=22                                                    ; number of secs prog must stay running (def. 1)
    startretries=3                                                  ; max # of serial start failures (default 3)
    exitcodes=0,2                                                   ; 'expected' exit codes for process (default 0,2)
    stopsignal=QUIT                                                 ; signal used to kill process (default TERM)
    stopwaitsecs=10                                                 ; max num secs to wait b4 SIGKILL (default 10)
    user=root                                                       ; setuid to this UNIX account to run the program
    redirect_stderr=false                                           ; redirect proc stderr to stdout (default false)
    stdout_logfile=/data/logs/kubernetes/kube-apiserver/apiserver.stdout.log        ; stdout log path, NONE for none; default AUTO
    stdout_logfile_maxbytes=64MB                                    ; max # logfile bytes b4 rotation (default 50MB)
    stdout_logfile_backups=4                                        ; # of stdout logfile backups (default 10)
    stdout_capture_maxbytes=1MB                                     ; number of bytes in 'capturemode' (default 0)
    stdout_events_enabled=false                                     ; emit events on stdout writes (default false)
    stderr_logfile=/data/logs/kubernetes/kube-apiserver/apiserver.stderr.log        ; stderr log path, NONE for none; default AUTO
    stderr_logfile_maxbytes=64MB                                    ; max # logfile bytes b4 rotation (default 50MB)
    stderr_logfile_backups=4                                        ; # of stderr logfile backups (default 10)
    stderr_capture_maxbytes=1MB                                     ; number of bytes in 'capturemode' (default 0)
    stderr_events_enabled=false                                     ; emit events on stderr writes (default false)
    stopasgroup=true
    
    

    启动服务并检查

    [root@kjdow7-21 bin]# supervisorctl update
    kube-apiserver-7-21: added process group
    [root@kjdow7-21 bin]# supervisorctl status
    etcd-server-7-21                 RUNNING   pid 9232, uptime 1:36:06
    kube-apiserver-7-21              RUNNING   pid 10804, uptime 0:00:35
    [root@kjdow7-21 ~]# netstat -lntup | grep kub
    tcp        0      0 127.0.0.1:8080          0.0.0.0:*               LISTEN      10805/./kube-apiser 
    tcp6       0      0 :::6443                 :::*                    LISTEN      10805/./kube-apiser 
    
    

    6.5 安装部署启动检查所有集群规划主机上的kube-apiserver

    重复6.1到6.4步骤,注意修改配置文件为相应的地址

    [root@kjdow7-22 bin]# supervisorctl status
    etcd-server-7-22                 RUNNING   pid 9282, uptime 3:03:39
    kube-apiserver-7-22              RUNNING   pid 10895, uptime 0:05:02
    [root@kjdow7-22 bin]# netstat -lntup | grep kube
    tcp        0      0 127.0.0.1:8080          0.0.0.0:*               LISTEN      10896/./kube-apiser 
    tcp6       0      0 :::6443                 :::*                    LISTEN      10896/./kube-apiser
    
    

    7、安装部署主控节点L4反代api-server

    在kjdow7-11、kjdow7-12上进行操作

    7.1 安装配置nginx

    [root@kjdow7-11 ~]# yum install nginx -y
    #配置nginx配置文件做四层负载均衡
    vim /etc/nginx/nginx.conf
    stream {
        upstream kube-apiserver {
            server 10.4.7.21:6443     max_fails=3 fail_timeout=30s;
            server 10.4.7.22:6443     max_fails=3 fail_timeout=30s;
        }
        server {
            listen 7443;
            proxy_connect_timeout 2s;
            proxy_timeout 900s;
            proxy_pass kube-apiserver;
        }
    }
    
    

    7.2 安装配置keepalived

    [root@kjdow7-11 ~]# yum install keepalived -y
    #在11和12两台上分别配置检测脚本
    [root@kjdow7-11 ~]# vi /etc/keepalived/check_port.sh
    #!/bin/bash
    #keepalived 监控端口脚本
    #使用方法:
    #在keepalived的配置文件中
    #vrrp_script check_port {#创建一个vrrp_script脚本,检查配置
    #    script "/etc/keepalived/check_port.sh 7443" #配置监听的端口
    #    interval 2 #检查脚本的频率,单位(秒)
    #}
    CHK_PORT=$1
    if [ -n "$CHK_PORT" ];then
            PORT_PROCESS=`ss -lnt|grep $CHK_PORT|wc -l`
            if [ $PORT_PROCESS -eq 0 ];then
                    echo "Port $CHK_PORT Is Not Used,End."
                    exit 1
            fi
    else
            echo "Check Port Cant Be Empty!"
    fi
    
    

    keepalived主 在kjdow7-11上进行操作

    vim /etc/keepalived/keepalived.conf
    ! Configuration File for keepalived
    
    global_defs {
       router_id 10.4.7.11
    
    }
    
    vrrp_script chk_nginx {
        script "/etc/keepalived/check_port.sh 7443"
        interval 2
        weight -20
    }
    
    vrrp_instance VI_1 {
        state MASTER
        interface eth1      #设置监听的网卡,按照需求配置
        virtual_router_id 251
        priority 100
        advert_int 1
        mcast_src_ip 10.4.7.11
        nopreempt
    
        authentication {
            auth_type PASS
            auth_pass 11111111
        }
        track_script {
             chk_nginx
        }
        virtual_ipaddress {
            10.4.7.10
        }
    }
    
    

    keepalived备 在kjdow7-12上进行操作

    vim /etc/keepalived/keepalived.conf
    ! Configuration File for keepalived
    global_defs {
    	router_id 10.4.7.12
    }
    vrrp_script chk_nginx {
    	script "/etc/keepalived/check_port.sh 7443"
    	interval 2
    	weight -20
    }
    vrrp_instance VI_1 {
    	state BACKUP
    	interface eth1      #设置监听的网卡,按照需求配置
    	virtual_router_id 251
    	mcast_src_ip 10.4.7.12
    	priority 90
    	advert_int 1
    	authentication {
    		auth_type PASS
    		auth_pass 11111111
    	}
    	track_script {
    		chk_nginx
    	}
    	virtual_ipaddress {
    		10.4.7.10
    	}
    }
    
    

    7.3 启动服务并检查

    在kjdow7-11、kjdow7-12上进行操作

    systemctl start keepalived
    systemctl start nginx
    systemctl enable nginx
    systemctl enable keepalived
    ip add | grep 10.4.7.10
    
    

    8.安装部署主控节点控制器服务controller-manager

    在kjdow7-21、kjdow7-22上进行操作

    8.1 创建启动脚本

    [root@kjdow7-21 ~]# vim /opt/kubernetes/server/bin/kube-controller-manager.sh
    #!/bin/sh
    ./kube-controller-manager 
      --cluster-cidr 172.7.0.0/16 
      --leader-elect true 
      --log-dir /data/logs/kubernetes/kube-controller-manager 
      --master http://127.0.0.1:8080 
      --service-account-private-key-file ./certs/ca-key.pem 
      --service-cluster-ip-range 192.168.0.0/16 
      --root-ca-file ./certs/ca.pem 
      --v 2
      
    ##调整文件权限,创建目录
    [root@kjdow7-21 ~]# cd /opt/kubernetes/server/bin
    [root@kjdow7-21 ~]# chmod +x /opt/kubernetes/server/bin/kube-controller-manager.sh
    [root@kjdow7-21 ~]# mkdir -p /data/logs/kubernetes/kube-controller-manager
    
    

    8.2 创建supervisor配置并启动

    [root@kjdow7-21 ~]# vim /etc/supervisord.d/kube-conntroller-manager.ini
    [program:kube-controller-manager-7-21]
    command=/opt/kubernetes/server/bin/kube-controller-manager.sh                     ; the program (relative uses PATH, can take args)
    numprocs=1                                                                        ; number of processes copies to start (def 1)
    directory=/opt/kubernetes/server/bin                                              ; directory to cwd to before exec (def no cwd)
    autostart=true                                                                    ; start at supervisord start (default: true)
    autorestart=true                                                                  ; retstart at unexpected quit (default: true)
    startsecs=22                                                                      ; number of secs prog must stay running (def. 1)
    startretries=3                                                                    ; max # of serial start failures (default 3)
    exitcodes=0,2                                                                     ; 'expected' exit codes for process (default 0,2)
    stopsignal=QUIT                                                                   ; signal used to kill process (default TERM)
    stopwaitsecs=10                                                                   ; max num secs to wait b4 SIGKILL (default 10)
    user=root                                                                         ; setuid to this UNIX account to run the program
    redirect_stderr=false                                                             ; redirect proc stderr to stdout (default false)
    stdout_logfile=/data/logs/kubernetes/kube-controller-manager/controll.stdout.log  ; stdout log path, NONE for none; default AUTO
    stdout_logfile_maxbytes=64MB                                                      ; max # logfile bytes b4 rotation (default 50MB)
    stdout_logfile_backups=4                                                          ; # of stdout logfile backups (default 10)
    stdout_capture_maxbytes=1MB                                                       ; number of bytes in 'capturemode' (default 0)
    stdout_events_enabled=false                                                       ; emit events on stdout writes (default false)
    stderr_logfile=/data/logs/kubernetes/kube-controller-manager/controll.stderr.log  ; stderr log path, NONE for none; default AUTO
    stderr_logfile_maxbytes=64MB                                                      ; max # logfile bytes b4 rotation (default 50MB)
    stderr_logfile_backups=4                                                          ; # of stderr logfile backups (default 10)
    stderr_capture_maxbytes=1MB                                                       ; number of bytes in 'capturemode' (default 0)
    stderr_events_enabled=false                                                       ; emit events on stderr writes (default false)
    stopasgroup=true
    
    ##启动服务并检查
    [root@kjdow7-21 ~]# supervisorctl update
    kube-controller-manager-7-21: added process group
    [root@kjdow7-21 ~]# supervisorctl status
    etcd-server-7-21                 RUNNING   pid 9232, uptime 15:34:39
    kube-apiserver-7-21              RUNNING   pid 10804, uptime 13:59:08
    kube-controller-manager-7-21     RUNNING   pid 11922, uptime 0:00:27
    
    

    8.3 安装部署启动检查所有集群规划主机上的kube-controller-manager服务

    在另一台上安装部署kube-controller-manager,注意配置文件ip地址要更改,因为kube-controller-manager组件只是跟本机部署的api-server进行通信,因此不用ssl加密,因此不需要签发证书,一般生产建议这样配置。但如果想让kube-controller-manager跟集群所有的其他组件ssl加密,就需要同样签发两套证书,参考api-server签发证书配置。

    [root@kjdow7-22 ~]# supervisorctl update
    kube-controller-manager-7-22: added process group
    [root@kjdow7-22 ~]# supervisorctl status
    etcd-server-7-22                 RUNNING   pid 9282, uptime 15:11:30
    kube-apiserver-7-22              RUNNING   pid 10895, uptime 12:12:53
    kube-controller-manager-7-22     RUNNING   pid 11802, uptime 0:00:39
    
    

    9.安装部署主控节点调度器服务kube-scheduler

    在kjdow7-21、kjdow7-22上进行操作

    9.1 创建启动脚本

    [root@kjdow7-21 ~]# vim /opt/kubernetes/server/bin/kube-scheduler.sh
    #!/bin/sh
    ./kube-scheduler 
      --leader-elect  
      --log-dir /data/logs/kubernetes/kube-scheduler 
      --master http://127.0.0.1:8080 
      --v 2
    ###调整文件权限,创建目录
    [root@kjdow7-21 ~]# cd /opt/kubernetes/server/bin
    [root@kjdow7-21 ~]# chmod +x /opt/kubernetes/server/bin/kube-scheduler.sh
    [root@kjdow7-21 ~]# mkdir -p /data/logs/kubernetes/kube-scheduler
    
    

    9.2 创建supervisor配置

    [root@kjdow7-21 ~]# vim /etc/supervisord.d/kube-scheduler.ini
    [program:kube-scheduler-7-21]
    command=/opt/kubernetes/server/bin/kube-scheduler.sh                     ; the program (relative uses PATH, can take args)
    numprocs=1                                                               ; number of processes copies to start (def 1)
    directory=/opt/kubernetes/server/bin                                     ; directory to cwd to before exec (def no cwd)
    autostart=true                                                           ; start at supervisord start (default: true)
    autorestart=true                                                         ; retstart at unexpected quit (default: true)
    startsecs=22                                                             ; number of secs prog must stay running (def. 1)
    startretries=3                                                           ; max # of serial start failures (default 3)
    exitcodes=0,2                                                            ; 'expected' exit codes for process (default 0,2)
    stopsignal=QUIT                                                          ; signal used to kill process (default TERM)
    stopwaitsecs=10                                                          ; max num secs to wait b4 SIGKILL (default 10)
    user=root                                                                ; setuid to this UNIX account to run the program
    redirect_stderr=false                                                    ; redirect proc stderr to stdout (default false)
    stdout_logfile=/data/logs/kubernetes/kube-scheduler/scheduler.stdout.log ; stdout log path, NONE for none; default AUTO
    stdout_logfile_maxbytes=64MB                                             ; max # logfile bytes b4 rotation (default 50MB)
    stdout_logfile_backups=4                                                 ; # of stdout logfile backups (default 10)
    stdout_capture_maxbytes=1MB                                              ; number of bytes in 'capturemode' (default 0)
    stdout_events_enabled=false                                              ; emit events on stdout writes (default false)
    stderr_logfile=/data/logs/kubernetes/kube-scheduler/scheduler.stderr.log ; stderr log path, NONE for none; default AUTO
    stderr_logfile_maxbytes=64MB                                             ; max # logfile bytes b4 rotation (default 50MB)
    stderr_logfile_backups=4                                                 ; # of stderr logfile backups (default 10)
    stderr_capture_maxbytes=1MB                                              ; number of bytes in 'capturemode' (default 0)
    stderr_events_enabled=false                                              ; emit events on stderr writes (default false)
    stopasgroup=true
    ###启动服务并检查
    [root@kjdow7-21 ~]# supervisorctl update
    kube-scheduler-7-21: added process group
    [root@kjdow7-21 ~]# supervisorctl status
    etcd-server-7-21                 RUNNING   pid 9232, uptime 15:50:38
    kube-apiserver-7-21              RUNNING   pid 10804, uptime 14:15:07
    kube-controller-manager-7-21     RUNNING   pid 11922, uptime 0:16:26
    kube-scheduler-7-21              RUNNING   pid 11950, uptime 0:00:33
    
    

    9.3 安装部署启动检查所有集群规划主机上的kube-scheduler服务

    在另一台上安装部署kube-scheduler,注意配置文件ip地址要更改,因为kube-scheduler组件只是跟本机部署的api-server进行通信,因此不用ssl加密,因此不需要签发证书,一般生产建议这样配置。但如果想让kube-scheduler跟集群所有的其他组件ssl加密,就需要同样签发两套证书,参考api-server签发证书配置。

    [root@kjdow7-22 ~]# supervisorctl update
    kube-scheduler-7-22: added process group
    [root@kjdow7-22 ~]# supervisorctl status
    etcd-server-7-22                 RUNNING   pid 9282, uptime 15:25:47
    kube-apiserver-7-22              RUNNING   pid 10895, uptime 12:27:10
    kube-controller-manager-7-22     RUNNING   pid 11802, uptime 0:14:56
    kube-scheduler-7-22              RUNNING   pid 11825, uptime 0:00:29
    
    

    10.检查主控节点状态

    [root@kjdow7-22 ~]# ln -s /opt/kubernetes/server/bin/kubectl /usr/bin/kubectl
    [root@kjdow7-22 ~]# kubectl get cs          #检查集群的健康状态
    NAME                 STATUS    MESSAGE              ERROR
    scheduler            Healthy   ok                   
    controller-manager   Healthy   ok                   
    etcd-0               Healthy   {"health": "true"}   
    etcd-2               Healthy   {"health": "true"}   
    etcd-1               Healthy   {"health": "true"} 
    
    

    11.部署node节点服务-kubelet

    在kjdow7-21、kjdow7-22上进行部署

    11.1 签发证书

    在kjdow7-200上进行操作

    [root@kjdow7-200 certs]# vim /opt/certs/kubelet-csr.json
    {
        "CN": "kubelet-node",
        "hosts": [
        "127.0.0.1",
        "10.4.7.10",
        "10.4.7.21",
        "10.4.7.22",
        "10.4.7.23",
        "10.4.7.24",
        "10.4.7.25",
        "10.4.7.26",
        "10.4.7.27",
        "10.4.7.28"
        ],
        "key": {
            "algo": "rsa",
            "size": 2048
        },
        "names": [
            {
                "C": "CN",
                "ST": "shanghai",
                "L": "shanghai",
                "O": "phc-dow",
                "OU": "kjdow"
            }
        ]
    }
    
    

    注意:hosts字段把可能部署kubelet的主机ip都写进去,只能写ip地址,不能写网段。后期如果在非hosts列表中ip主机上部署kubelet,需要重新签发证书,并更换证书,并重启服务

    ###生成kubelet证书和私钥
    [root@kjdow7-200 certs]#  cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=server kubelet-csr.json | cfssljson -bare kubelet
    
    ###检查生成的证书、私钥
    [root@kjdow7-200 certs]#  ls -l|grep kubelet
    total 88
    -rw-r--r-- 1 root root  415 Jan 22 16:58 kubelet-csr.json
    -rw------- 1 root root 1679 Jan 22 17:00 kubelet-key.pem
    -rw-r--r-- 1 root root 1086 Jan 22 17:00 kubelet.csr
    -rw-r--r-- 1 root root 1456 Jan 22 17:00 kubelet.pem
    
    

    11.2 拷贝证书至各运算节点

    在kjdow7-21、kjdow7-22上进行操作

    [root@kjdow7-21 certs]# scp kjdow7-200:/opt/certs/kubelet.pem .  
    [root@kjdow7-21 certs]# scp kjdow7-200:/opt/certs/kubelet-key.pem .
    [root@kjdow7-21 certs]# ls -l /opt/kubernetes/server/bin/certs
    total 40
    -rw------- 1 root root 1676 Jan 21 16:39 apiserver-key.pem
    -rw-r--r-- 1 root root 1599 Jan 21 16:36 apiserver.pem
    -rw------- 1 root root 1675 Jan 21 13:55 ca-key.pem
    -rw-r--r-- 1 root root 1354 Jan 21 13:50 ca.pem
    -rw------- 1 root root 1679 Jan 21 13:53 client-key.pem
    -rw-r--r-- 1 root root 1368 Jan 21 13:53 client.pem
    -rw------- 1 root root 1679 Jan 22 17:00 kubelet-key.pem
    -rw-r--r-- 1 root root 1456 Jan 22 17:00 kubelet.pem
    
    

    最后两个是需要拷贝过来的,前面几个之前已经拷贝过来了。

    11.3 创建配置文件kubelet.kubeconfig

    在kjdow7-21上进行操作,生成的文件直接拷贝到其他节点上使用,不需要再次执行以下步骤。

    1)set-cluster

    注意:在/opt/kubernetes/server/bin/conf目录下

    [root@kjdow7-21 conf]# cd /opt/kubernetes/server/conf
    [root@kjdow7-21 conf]# kubectl config set-cluster myk8s 
      --certificate-authority=/opt/kubernetes/server/bin/certs/ca.pem 
      --embed-certs=true 
      --server=https://10.4.7.10:7443 
      --kubeconfig=kubelet.kubeconfig
    
    Cluster "myk8s" set.
    
    
    2) set-credentials

    注意:在/opt/kubernetes/server/conf目录下

    [root@kjdow7-21 conf]# kubectl config set-credentials k8s-node --client-certificate=/opt/kubernetes/server/bin/certs/client.pem --client-key=/opt/kubernetes/server/bin/certs/client-key.pem --embed-certs=true --kubeconfig=kubelet.kubeconfig 
    
    User "k8s-node" set.
    
    
    3) set-context

    注意:在/opt/kubernetes/server/conf目录下

    [root@kjdow7-21 conf]# kubectl config set-context myk8s-context 
      --cluster=myk8s 
      --user=k8s-node 
      --kubeconfig=kubelet.kubeconfig
    
    Context "myk8s-context" created.
    
    
    4) use-context

    注意:在/opt/kubernetes/server/conf目录下

    [root@kjdow7-21 conf]# kubectl config use-context myk8s-context --kubeconfig=kubelet.kubeconfig
    
    Switched to context "myk8s-context".
    
    ###查看生成的配置文件
    [root@kjdow7-21 conf]# ll
    total 12
    -rw-r--r-- 1 root root 2223 Jan  9 04:29 audit.yaml
    -rw------- 1 root root 6279 Jan 10 01:07 kubelet.kubeconfig
    
    
    5) 使用k8s-node.yaml给用户k8s-node授权

    注意:这一步只需要执行一次即可

    • 创建资源配置文件
    [root@kjdow7-21 conf]# vim /opt/kubernetes/server/bin/conf/k8s-node.yaml
    apiVersion: rbac.authorization.k8s.io/v1
    kind: ClusterRoleBinding
    metadata:
      name: k8s-node
    roleRef:
      apiGroup: rbac.authorization.k8s.io
      kind: ClusterRole
      name: system:node
    subjects:
    - apiGroup: rbac.authorization.k8s.io
      kind: User
      name: k8s-node
    
    
    • 应用资源配置文件
    7-21 conf]# kubectl create -f k8s-node.yaml
    
    clusterrolebinding.rbac.authorization.k8s.io/k8s-node created
    
    

    根据rbac规格给k8s-node用户授予权限:集群里成为运算节点的权限,此资源已经写入etcd中,不需要在其他节点重复执行了

    • 检查
    7-21 conf]# kubectl get clusterrolebinding k8s-node
    NAME           AGE
    k8s-node       3m
    7-21 conf]# kubectl get clusterrolebinding k8s-node -o yaml  #查看创建yaml语句
    
    

    11.4 准备pause基础镜像

    在kjdow7-200上进行操作

    [root@kjdow7-200 ~]# docker pull kubernetes/pause
    Using default tag: latest
    latest: Pulling from kubernetes/pause
    4f4fb700ef54: Pull complete 
    b9c8ec465f6b: Pull complete 
    Digest: sha256:b31bfb4d0213f254d361e0079deaaebefa4f82ba7aa76ef82e90b4935ad5b105
    Status: Downloaded newer image for kubernetes/pause:latest
    docker.io/kubernetes/pause:latest
    
    ####查看镜像,并推送到镜像仓库中
    [root@kjdow7-200 ~]# docker images | grep pause
    kubernetes/pause                  latest                     f9d5de079539        5 years ago         240kB
    [root@kjdow7-200 ~]# docker tag f9d5de079539 harbor.phc-dow.com/public/pause:latest
    [root@kjdow7-200 ~]# docker push harbor.phc-dow.com/public/pause:latest
    The push refers to repository [harbor.phc-dow.com/public/pause]
    5f70bf18a086: Mounted from public/nginx 
    e16a89738269: Pushed 
    latest: digest: sha256:b31bfb4d0213f254d361e0079deaaebefa4f82ba7aa76ef82e90b4935ad5b105 size: 938
    
    

    11.5 创建kubelet启动脚本

    在kjdow7-21、kjdow7-22上进行操作

    vim /opt/kubernetes/server/bin/kubelet.sh
    #!/bin/sh
    ./kubelet 
      --anonymous-auth=false 
      --cgroup-driver systemd 
      --cluster-dns 192.168.0.2 
      --cluster-domain cluster.local 
      --runtime-cgroups=/systemd/system.slice --kubelet-cgroups=/systemd/system.slice 
      --fail-swap-on="false" 
      --client-ca-file ./certs/ca.pem 
      --tls-cert-file ./certs/kubelet.pem 
      --tls-private-key-file ./certs/kubelet-key.pem 
      --hostname-override kjdow7-21.host.com 
      --image-gc-high-threshold 20 
      --image-gc-low-threshold 10 
      --kubeconfig ./conf/kubelet.kubeconfig 
      --log-dir /data/logs/kubernetes/kube-kubelet 
      --pod-infra-container-image harbor.phc-dow.com/public/pause:latest 
      --root-dir /data/kubelet
    
    

    注意:kubelet集群各主机的启动脚本略有不同,部署其他节点时注意修改。

    11.6 检查配置,权限,创建日志目录

    在kjdow7-21、kjdow7-22上进行操作

    7-21 conf]# ls -l|grep kubelet.kubeconfig 
    -rw------- 1 root root 6471 Jan 22 17:33 kubelet.kubeconfig
    7-21 conf]# chmod +x /opt/kubernetes/server/bin/kubelet.sh
    7-21 conf]# mkdir -p /data/logs/kubernetes/kube-kubelet /data/kubelet
    
    

    11.7 创建supervisor配置,并启动

    在kjdow7-21、kjdow7-22上进行操作

    [root@kjdow7-21 conf]# vim /etc/supervisord.d/kube-kubelet.ini
    [program:kube-kubelet-7-21]
    command=/opt/kubernetes/server/bin/kubelet.sh                 ; the program (relative uses PATH, can take args)
    numprocs=1                                                        ; number of processes copies to start (def 1)
    directory=/opt/kubernetes/server/bin                              ; directory to cwd to before exec (def no cwd)
    autostart=true                                                    ; start at supervisord start (default: true)
    autorestart=true              									  ; retstart at unexpected quit (default: true)
    startsecs=22                  									  ; number of secs prog must stay running (def. 1)
    startretries=3                									  ; max # of serial start failures (default 3)
    exitcodes=0,2                 									  ; 'expected' exit codes for process (default 0,2)
    stopsignal=QUIT               									  ; signal used to kill process (default TERM)
    stopwaitsecs=10               									  ; max num secs to wait b4 SIGKILL (default 10)
    user=root                                                         ; setuid to this UNIX account to run the program
    redirect_stderr=false                                             ; redirect proc stderr to stdout (default false)
    stdout_logfile=/data/logs/kubernetes/kube-kubelet/kubelet.stdout.log   ; stdout log path, NONE for none; default AUTO
    stdout_logfile_maxbytes=64MB                                      ; max # logfile bytes b4 rotation (default 50MB)
    stdout_logfile_backups=4                                          ; # of stdout logfile backups (default 10)
    stdout_capture_maxbytes=1MB                                       ; number of bytes in 'capturemode' (default 0)
    stdout_events_enabled=false                                       ; emit events on stdout writes (default false)
    stderr_logfile=/data/logs/kubernetes/kube-kubelet/kubelet.stderr.log   ; stderr log path, NONE for none; default AUTO
    stderr_logfile_maxbytes=64MB                                      ; max # logfile bytes b4 rotation (default 50MB)
    stderr_logfile_backups=4                                          ; # of stderr logfile backups (default 10)
    stderr_capture_maxbytes=1MB   									  ; number of bytes in 'capturemode' (default 0)
    stderr_events_enabled=false   									  ; emit events on stderr writes (default false)
    stopasgroup=true
    
    ###启动服务并检查
    [root@kjdow7-21 conf]# supervisorctl update
    kube-kubelet-7-21: added process group
    [root@kjdow7-21 conf]# supervisorctl status
    etcd-server-7-21                 RUNNING   pid 9232, uptime 22:17:49
    kube-apiserver-7-21              RUNNING   pid 10804, uptime 20:42:18
    kube-controller-manager-7-21     RUNNING   pid 12138, uptime 5:05:06
    kube-kubelet-7-21                RUNNING   pid 12683, uptime 0:00:40
    kube-scheduler-7-21              RUNNING   pid 12131, uptime 5:05:06
    
    ###检查运算节点
    [root@kjdow7-21 conf]# kubectl get node
    NAME                 STATUS   ROLES    AGE   VERSION
    kjdow7-21.host.com   Ready    <none>   55s   v1.15.5
    
    #非常重要!
    
    

    11.8 安装部署启动检查所有集群规划主机上的kubelet服务

    在集群其他节点上部署,重复以上步骤,11.3略有不同,不需要重复做,只需要把文件拷贝到相应位置即可。

    [root@kjdow7-22 ~]# supervisorctl update
    kube-kubelet-7-22: added process group
    [root@kjdow7-22 ~]# supervisorctl status
    etcd-server-7-22                 RUNNING   pid 9282, uptime 22:00:28
    kube-apiserver-7-22              RUNNING   pid 10895, uptime 19:01:51
    kube-controller-manager-7-22     RUNNING   pid 11802, uptime 6:49:37
    kube-kubelet-7-22                RUNNING   pid 12417, uptime 0:00:29
    kube-scheduler-7-22              RUNNING   pid 11825, uptime 6:35:10
    
    

    11.9 检查集群状态

    ###查看节点状态
    [root@kjdow7-22 ~]# kubectl get node
    NAME                 STATUS   ROLES    AGE    VERSION
    kjdow7-21.host.com   Ready    <none>   12m    v1.15.5
    kjdow7-22.host.com   Ready    <none>   2m3s   v1.15.5
    
    ###给节点创建label标签
    [root@kjdow7-22 ~]# kubectl label node kjdow7-21.host.com node-role.kubernetes.io/master=
    node/kjdow7-21.host.com labeled
    [root@kjdow7-22 ~]# kubectl get node
    NAME                 STATUS   ROLES    AGE    VERSION
    kjdow7-21.host.com   Ready    master   13m    v1.15.5
    kjdow7-22.host.com   Ready    <none>   3m3s   v1.15.5
    [root@kjdow7-22 ~]# kubectl label node kjdow7-21.host.com node-role.kubernetes.io/node=
    node/kjdow7-21.host.com labeled
    [root@kjdow7-22 ~]# kubectl get node
    NAME                 STATUS   ROLES         AGE     VERSION
    kjdow7-21.host.com   Ready    master,node   13m     v1.15.5
    kjdow7-22.host.com   Ready    <none>        3m32s   v1.15.5
    [root@kjdow7-22 ~]# kubectl label node kjdow7-22.host.com node-role.kubernetes.io/master=
    node/kjdow7-22.host.com labeled
    [root@kjdow7-22 ~]# kubectl label node kjdow7-22.host.com node-role.kubernetes.io/node=
    node/kjdow7-22.host.com labeled
    [root@kjdow7-22 ~]# kubectl get node
    NAME                 STATUS   ROLES         AGE     VERSION
    kjdow7-21.host.com   Ready    master,node   13m     v1.15.5
    kjdow7-22.host.com   Ready    master,node   3m51s   v1.15.5
    
    

    12、安装部署运算节点服务--kube-proxy

    在kjdow7-21、kjdow7-22上进行部署

    12.1 签发kube-proxy证书

    在kjdow7-200上进行操作

    vim /opt/certs/kube-proxy-csr.json
    {
        "CN": "system:kube-proxy",
        "key": {
            "algo": "rsa",
            "size": 2048
        },
        "names": [
            {
                "C": "CN",
                "ST": "shanghai",
                "L": "shanghai",
                "O": "phc-dow",
                "OU": "kjdow"
            }
        ]
    }
    
    ##生成kube-proxy证书和私钥
    7-200 certs]# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=client kube-proxy-csr.json | cfssljson -bare kube-proxy-client
    
    ##检查生成的证书、私钥
    7-200 certs]# ls -l|grep kube-proxy
    -rw------- 1 root root 1679 Jan 22 17:31 kube-proxy-client-key.pem
    -rw-r--r-- 1 root root 1005 Jan 22 17:31 kube-proxy-client.csr
    -rw-r--r-- 1 root root 1383 Jan 22 17:31 kube-proxy-client.pem
    -rw-r--r-- 1 root root  268 Jan 22 17:23 kube-proxy-csr.json
    
    

    12.2 拷贝证书至各运算节点

    在kjdow7-21、kjdow7-22上进行操作

    [root@kjdow7-21 ~]# cd /opt/kubernetes/server/bin/certs/
    [root@kjdow7-21 certs]# scp kjdow7-200:/opt/certs/kube-proxy-client.pem . 
    [root@kjdow7-21 certs]# scp kjdow7-200:/opt/certs/kube-proxy-client-key.pem .
    7-21 cert]# ls -l /opt/kubernetes/server/bin/certs
    total 40
    -rw------- 1 root root 1676 Jan 21 16:39 apiserver-key.pem
    -rw-r--r-- 1 root root 1599 Jan 21 16:36 apiserver.pem
    -rw------- 1 root root 1675 Jan 21 13:55 ca-key.pem
    -rw-r--r-- 1 root root 1354 Jan 21 13:50 ca.pem
    -rw------- 1 root root 1679 Jan 21 13:53 client-key.pem
    -rw-r--r-- 1 root root 1368 Jan 21 13:53 client.pem
    -rw------- 1 root root 1679 Jan 22 17:00 kubelet-key.pem
    -rw-r--r-- 1 root root 1456 Jan 22 17:00 kubelet.pem
    -rw------- 1 root root 1679 Jan 22 17:31 kube-proxy-client-key.pem
    -rw-r--r-- 1 root root 1383 Jan 22 17:31 kube-proxy-client.pem
    
    

    注意: 只需要拷贝最后两个即可

    12.3 创建配置kube-proxy.kubeconfig

    注意:这一步生成的文件只需要做一次,然后把生成的文件拷贝到其他节点

    1) set-cluster

    注意:在conf目录下

    7-21 conf]# cd /opt/kubernetes/server/bin/conf
    7-21 conf]# kubectl config set-cluster myk8s 
      --certificate-authority=/opt/kubernetes/server/bin/certs/ca.pem 
      --embed-certs=true 
      --server=https://10.4.7.10:7443 
      --kubeconfig=kube-proxy.kubeconfig
      
    Cluster "myk8s" set.
    
    
    2) set-credentials

    注意:在conf目录下

    7-21 conf]# kubectl config set-credentials kube-proxy 
      --client-certificate=/opt/kubernetes/server/bin/certs/kube-proxy-client.pem 
      --client-key=/opt/kubernetes/server/bin/certs/kube-proxy-client-key.pem 
      --embed-certs=true 
      --kubeconfig=kube-proxy.kubeconfig
    
    User "kube-proxy" set.
    
    
    3) set-context

    注意:在conf目录下

    7-21 conf]# kubectl config set-context myk8s-context 
      --cluster=myk8s 
      --user=kube-proxy 
      --kubeconfig=kube-proxy.kubeconfig
    
    Context "myk8s-context" created.
    
    
    4) use-context

    注意:在conf目录下

    7-21 conf]# kubectl config use-context myk8s-context --kubeconfig=kube-proxy.kubeconfig
    
    Switched to context "myk8s-context".
    
    
    ##查看创建的文件
    7-21 conf]# ls -l|grep kube-proxy.kubeconfig    
    -rw------- 1 root root 6471 Jan 22 17:33 kube-proxy.kubeconfig
    
    

    12.4 加载ipvs模块

    在kjdow7-21、kjdow7-22上进行操作

    vim /root/ipvs.sh
    #!/bin/bash
    ipvs_mods_dir="/usr/lib/modules/$(uname -r)/kernel/net/netfilter/ipvs"
    for i in $(ls $ipvs_mods_dir|grep -o "^[^.]*")
    do
      /sbin/modinfo -F filename $i &>/dev/null
      if [ $? -eq 0 ];then
        /sbin/modprobe $i
      fi
    done
    
    

    脚本主要是加载有关ipvs模块

    [root@kjdow7-21 ~]# lsmod | grep ip_vs
    [root@kjdow7-21 ~]# chmod +x /root/ipvs.sh 
    [root@kjdow7-21 ~]# sh /root/ipvs.sh 
    [root@kjdow7-21 ~]# lsmod | grep ip_vs
    ip_vs_wrr              12697  0 
    ip_vs_wlc              12519  0 
    ip_vs_sh               12688  0 
    ip_vs_sed              12519  0 
    ip_vs_rr               12600  0 
    ip_vs_pe_sip           12740  0 
    nf_conntrack_sip       33860  1 ip_vs_pe_sip
    ip_vs_nq               12516  0 
    ip_vs_lc               12516  0 
    ip_vs_lblcr            12922  0 
    ip_vs_lblc             12819  0 
    ip_vs_ftp              13079  0 
    ip_vs_dh               12688  0 
    ip_vs                 145497  24 ip_vs_dh,ip_vs_lc,ip_vs_nq,ip_vs_rr,ip_vs_sh,ip_vs_ftp,ip_vs_sed,ip_vs_wlc,ip_vs_wrr,ip_vs_pe_sip,ip_vs_lblcr,ip_vs_lblc
    nf_nat                 26787  3 ip_vs_ftp,nf_nat_ipv4,nf_nat_masquerade_ipv4
    nf_conntrack          133095  8 ip_vs,nf_nat,nf_nat_ipv4,xt_conntrack,nf_nat_masquerade_ipv4,nf_conntrack_netlink,nf_conntrack_sip,nf_conntrack_ipv4
    libcrc32c              12644  4 xfs,ip_vs,nf_nat,nf_conntrack
    
    

    12.5 创建kube-proxy启动脚本

    在kjdow7-21、kjdow7-22上进行操作

    [root@kjdow7-21 ~]# vim /opt/kubernetes/server/bin/kube-proxy.sh
    #!/bin/sh
    ./kube-proxy 
      --cluster-cidr 172.7.0.0/16 
      --hostname-override kjdow7-21.host.com 
      --proxy-mode=ipvs 
      --ipvs-scheduler=nq 
      --kubeconfig ./conf/kube-proxy.kubeconfig
      
    ##检查配置,权限,创建日志目录
    7-21 conf]# chmod +x /opt/kubernetes/server/bin/kube-proxy.sh
    7-21 conf]# mkdir -p /data/logs/kubernetes/kube-proxy
    
    

    注意:kube-proxy集群各主机的启动脚本略有不同,部署其他节点时注意修改。

    12.6 创建supervisor配置,并启动服务

    在kjdow7-21、kjdow7-22上进行操作

    [root@kjdow7-21 ~]# vim /etc/supervisord.d/kube-proxy.ini
    [program:kube-proxy-7-21]
    command=/opt/kubernetes/server/bin/kube-proxy.sh                 ; the program (relative uses PATH, can take args)
    numprocs=1                                                           ; number of processes copies to start (def 1)
    directory=/opt/kubernetes/server/bin                                 ; directory to cwd to before exec (def no cwd)
    autostart=true                                                       ; start at supervisord start (default: true)
    autorestart=true                                                     ; retstart at unexpected quit (default: true)
    startsecs=22                                                         ; number of secs prog must stay running (def. 1)
    startretries=3                                                       ; max # of serial start failures (default 3)
    exitcodes=0,2                                                        ; 'expected' exit codes for process (default 0,2)
    stopsignal=QUIT                                                      ; signal used to kill process (default TERM)
    stopwaitsecs=10                                                      ; max num secs to wait b4 SIGKILL (default 10)
    user=root                                                		         ; setuid to this UNIX account to run the program
    redirect_stderr=false                                           		 ; redirect proc stderr to stdout (default false)
    stdout_logfile=/data/logs/kubernetes/kube-proxy/proxy.stdout.log     ; stdout log path, NONE for none; default AUTO
    stdout_logfile_maxbytes=64MB                                    		 ; max # logfile bytes b4 rotation (default 50MB)
    stdout_logfile_backups=4                                        		 ; # of stdout logfile backups (default 10)
    stdout_capture_maxbytes=1MB                                     		 ; number of bytes in 'capturemode' (default 0)
    stdout_events_enabled=false                                     		 ; emit events on stdout writes (default false)
    stderr_logfile=/data/logs/kubernetes/kube-proxy/proxy.stderr.log     ; stderr log path, NONE for none; default AUTO
    stderr_logfile_maxbytes=64MB                                    		 ; max # logfile bytes b4 rotation (default 50MB)
    stderr_logfile_backups=4                                        		 ; # of stderr logfile backups (default 10)
    stderr_capture_maxbytes=1MB   						                           ; number of bytes in 'capturemode' (default 0)
    stderr_events_enabled=false   						                           ; emit events on stderr writes (default false)
    stopasgroup=true
    
    ##启动服务并检查
    [root@kjdow7-21 ~]# supervisorctl update
    kube-proxy-7-21: added process group
    [root@kjdow7-21 ~]# supervisorctl status
    etcd-server-7-21                 RUNNING   pid 9232, uptime 1 day, 21:43:47
    kube-apiserver-7-21              RUNNING   pid 10804, uptime 1 day, 20:08:16
    kube-controller-manager-7-21     RUNNING   pid 12138, uptime 1 day, 4:31:04
    kube-kubelet-7-21                RUNNING   pid 12683, uptime 23:26:38
    kube-proxy-7-21                  RUNNING   pid 30246, uptime 0:00:58
    kube-scheduler-7-21              RUNNING   pid 12131, uptime 1 day, 4:31:04
    
    

    12.7 安装部署启动检查所有集群规划主机上的kube-proxy服务

    [root@kjdow7-22 ~]# supervisorctl update
    kube-proxy-7-22: added process group
    [root@kjdow7-22 ~]# supervisorctl status
    etcd-server-7-22                 RUNNING   pid 9282, uptime 1 day, 21:18:32
    kube-apiserver-7-22              RUNNING   pid 10895, uptime 1 day, 18:19:55
    kube-controller-manager-7-22     RUNNING   pid 11802, uptime 1 day, 6:07:41
    kube-kubelet-7-22                RUNNING   pid 12417, uptime 23:18:33
    kube-proxy-7-22                  RUNNING   pid 28635, uptime 0:01:08
    kube-scheduler-7-22              RUNNING   pid 11825, uptime 1 day, 5:53:14
    
    

    12.8 验证ipvs功能

    [root@kjdow7-21 ~]# yum install ipvsadm -y
    [root@kjdow7-21 ~]# ipvsadm -ln
    IP Virtual Server version 1.2.1 (size=4096)
    Prot LocalAddress:Port Scheduler Flags
      -> RemoteAddress:Port           Forward Weight ActiveConn InActConn
    TCP  192.168.0.1:443 nq
      -> 10.4.7.21:6443               Masq    1      0          0         
      -> 10.4.7.22:6443               Masq    1      0          0 
    [root@kjdow7-21 ~]# kubectl get svc
    NAME         TYPE        CLUSTER-IP    EXTERNAL-IP   PORT(S)   AGE
    kubernetes   ClusterIP   192.168.0.1   <none>        443/TCP   2d
    
    

    13、 验证kubernetes集群

    在任意一个运算节点,创建一个资源配置清单

    [root@kjdow7-21 ~]# vim /root/nginx-ds.yaml
    apiVersion: extensions/v1beta1
    kind: DaemonSet
    metadata:
      name: nginx-ds
    spec:
      template:
        metadata:
          labels:
            app: nginx-ds
        spec:
          containers:
          - name: my-nginx
            image: harbor.phc-dow.com/public/nginx:v1.7.9
            ports:
            - containerPort: 80
            
    [root@kjdow7-21 ~]# kubectl create -f nginx-ds.yaml 
    daemonset.extensions/nginx-ds created
    [root@kjdow7-21 ~]# kubectl get pods
    NAME             READY   STATUS    RESTARTS   AGE
    nginx-ds-ssdtm   1/1     Running   0          56s
    nginx-ds-xfsk4   1/1     Running   0          56s
    
    [root@kjdow7-21 ~]# kubectl get pods -o wide
    NAME             READY   STATUS    RESTARTS   AGE    IP           NODE                 NOMINATED NODE   READINESS GATES
    nginx-ds-ssdtm   1/1     Running   0          106s   172.7.21.2   kjdow7-21.host.com   <none>           <none>
    nginx-ds-xfsk4   1/1     Running   0          106s   172.7.22.2   kjdow7-22.host.com   <none>           <none>
    
    

    可以看到已经起了两个pod,使用curl命令只能通一个,是因为跨宿主机之间还不能通信。

  • 相关阅读:
    Codeforces Global Round 7 题解 (ABCDE)
    猫树 简单介绍
    pip模块
    协程
    多线程threading
    多进程multiprocessing
    DOM
    标签学习
    初步了解Bootstrap4
    初步了解jQuery
  • 原文地址:https://www.cnblogs.com/dinghc/p/13031436.html
Copyright © 2011-2022 走看看