zoukankan      html  css  js  c++  java
  • kubernetes集群环境搭建(6)

    kubernetes集群环境搭建(6)

    一、kubernetes集群环境搭建

    1.1.1 安装部署node节点服务kubelet

    1 .安装部署kubelet服务主机规划

    主机 角色 ip
    hdss-21 kubelet 10.0.0.21
    hdss-22 kubelet 10.0.0.22
    1. 签发kubelet证书(hdss-201)
    创建生成证书签名请求(csr)的JSON配置文件
    [root@hdss-201.host.com ~]# cd /opt/certs/
    [root@hdss-201.host.com /opt/certs]# vim  kubelet-csr.json
    [root@hdss-201.host.com /opt/certs]# cat kubelet-csr.json
    {
        "CN": "k8s-kubelet",
        "hosts": [
        "127.0.0.1",
        "10.0.0.10",
        "10.0.0.21",
        "10.0.0.22",
        "10.0.0.23",
        "10.0.0.24",
        "10.0.0.25",
        "10.0.0.26",
        "10.0.0.27",
        "10.0.0.28"
        ],
        "key": {
            "algo": "rsa",
            "size": 2048
        },
        "names": [
            {
                "C": "CN",
                "ST": "guizjou",
                "L": "guiyang",
                "O": "od",
                "OU": "ops"
            }
        ]
    }
    #注意:添加node节点IP,多些一些可有能安装使用的IP(预留kubelet ip),如果新node的ip不在证书内,需要重新编写证书,拷贝至所有主机
    
    生成证书
    
    [root@hdss-201.host.com /opt/certs]# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=server kubelet-csr.json | cfssl-json -bare kubelet
    2020/06/15 21:13:10 [INFO] generate received request
    2020/06/15 21:13:10 [INFO] received CSR
    2020/06/15 21:13:10 [INFO] generating key: rsa-2048
    2020/06/15 21:13:10 [INFO] encoded CSR
    2020/06/15 21:13:10 [INFO] signed certificate with serial number 175022088378942891662316182252757045162962070069
    2020/06/15 21:13:10 [WARNING] This certificate lacks a "hosts" field. This makes it unsuitable for
    websites. For more information see the Baseline Requirements for the Issuance and Management
    of Publicly-Trusted Certificates, v.1.1.6, from the CA/Browser Forum (https://cabforum.org);
    specifically, section 10.2.3 ("Information Requirements").
    [root@hdss-201.host.com /opt/certs]# ll
    total 84
    -rw-r--r-- 1 root root 1249 Jun 13 21:35 apiserver.csr
    -rw-r--r-- 1 root root  566 Jun 13 21:31 apiserver-csr.json
    -rw------- 1 root root 1679 Jun 13 21:35 apiserver-key.pem
    -rw-r--r-- 1 root root 1598 Jun 13 21:35 apiserver.pem
    -rw-r--r-- 1 root root  840 Jun 12 21:24 ca-config.json
    -rw-r--r-- 1 root root  993 Jun 10 21:49 ca.csr
    -rw-r--r-- 1 root root  345 Jun 10 21:48 ca-csr.json
    -rw------- 1 root root 1675 Jun 10 21:49 ca-key.pem
    -rw-r--r-- 1 root root 1346 Jun 10 21:49 ca.pem
    -rw-r--r-- 1 root root  993 Jun 13 21:23 client.csr
    -rw-r--r-- 1 root root  280 Jun 13 21:22 client-csr.json
    -rw------- 1 root root 1675 Jun 13 21:23 client-key.pem
    -rw-r--r-- 1 root root 1363 Jun 13 21:23 client.pem
    -rw-r--r-- 1 root root 1062 Jun 12 21:33 etcd-peer.csr
    -rw-r--r-- 1 root root  363 Jun 12 21:27 etcd-peer-csr.json
    -rw------- 1 root root 1679 Jun 12 21:33 etcd-peer-key.pem
    -rw-r--r-- 1 root root 1428 Jun 12 21:33 etcd-peer.pem
    -rw-r--r-- 1 root root 1115 Jun 15 21:13 kubelet.csr
    -rw-r--r-- 1 root root  451 Jun 15 21:13 kubelet-csr.json
    -rw------- 1 root root 1675 Jun 15 21:13 kubelet-key.pem
    -rw-r--r-- 1 root root 1468 Jun 15 21:13 kubelet.pem
    
    1. 拷贝证书、私钥
    #21
    [root@hdss-21.host.com /opt/kubernetes/server/bin]# scp -rp hdss-201:/opt/certs/kubelet-key.pem /opt/kubernetes/server/bin/certs/
    root@hdss-201's password: 
    kubelet-key.pem                                                                                    100% 1675     1.3MB/s   00:00    
    [root@hdss-21.host.com /opt/kubernetes/server/bin]# scp -rp hdss-201:/opt/certs/kubelet.pem /opt/kubernetes/server/bin/certs/
    root@hdss-201's password: 
    kubelet.pem                                                                                        100% 1468     2.0MB/s   00:00    
    [root@hdss-21.host.com /opt/kubernetes/server/bin]# ll /opt/kubernetes/server/bin/certs/
    total 32
    -rw------- 1 root root 1679 Jun 13 21:49 apiserver-key.pem
    -rw-r--r-- 1 root root 1598 Jun 13 21:48 apiserver.pem
    -rw------- 1 root root 1675 Jun 13 21:47 ca-key.pem
    -rw-r--r-- 1 root root 1346 Jun 13 21:46 ca.pem
    -rw------- 1 root root 1675 Jun 13 21:48 client-key.pem
    -rw-r--r-- 1 root root 1363 Jun 13 21:48 client.pem
    -rw------- 1 root root 1675 Jun 15 21:13 kubelet-key.pem
    -rw-r--r-- 1 root root 1468 Jun 15 21:13 kubelet.pem
    #22操作相同
    
    1. 创建配置文件(分四步)
    #21
    1、set-cluster
    [root@hdss-21.host.com /opt/kubernetes/server/bin]# cd config/
    [root@hdss-21.host.com /opt/kubernetes/server/bin/config]# kubectl config set-cluster myk8s 
    >     --certificate-authority=/opt/kubernetes/server/bin/certs/ca.pem 
    >     --embed-certs=true 
    >     --server=https://10.0.0.10:7443 
    >     --kubeconfig=kubelet.kubeconfig
    Cluster "myk8s" set.
    
    2、set-credentials
    [root@hdss-21.host.com /opt/kubernetes/server/bin/config]# kubectl config set-credentials k8s-node 
    >   --client-certificate=/opt/kubernetes/server/bin/certs/client.pem 
    >   --client-key=/opt/kubernetes/server/bin/certs/client-key.pem 
    >   --embed-certs=true 
    >   --kubeconfig=kubelet.kubeconfig 
    User "k8s-node" set.
    
    3、set-context
    [root@hdss-21.host.com /opt/kubernetes/server/bin/config]# kubectl config set-context myk8s-context 
    >   --cluster=myk8s 
    >   --user=k8s-node 
    >   --kubeconfig=kubelet.kubeconfig
    Context "myk8s-context" created.
    
    4、use-context
    [root@hdss-21.host.com /opt/kubernetes/server/bin/config]# kubectl config use-context myk8s-context --kubeconfig=kubelet.kubeconfig
    Switched to context "myk8s-context".
    
    1. 创建资源配置文件,授予权限,角色绑定
    [root@hdss-21.host.com /opt/kubernetes/server/bin/config]# vi k8s-node.yaml
    [root@hdss-21.host.com /opt/kubernetes/server/bin/config]# cat k8s-node.yaml 
    apiVersion: rbac.authorization.k8s.io/v1
    kind: ClusterRoleBinding
    metadata:
      name: k8s-node
    roleRef:
      apiGroup: rbac.authorization.k8s.io
      kind: ClusterRole
      name: system:node
    subjects:
    - apiGroup: rbac.authorization.k8s.io
      kind: User
      name: k8s-node
      
    [root@hdss-21.host.com /opt/kubernetes/server/bin/config]# kubectl create -f k8s-node.yaml
    clusterrolebinding.rbac.authorization.k8s.io/k8s-node created
    
    [root@hdss-21.host.com /opt/kubernetes/server/bin/config]# kubectl get clusterrolebinding k8s-node #检查
    NAME       AGE
    k8s-node   42s
    [root@hdss-21.host.com /opt/kubernetes/server/bin/config]# kubectl get clusterrolebinding k8s-node -o yaml
    apiVersion: rbac.authorization.k8s.io/v1
    kind: ClusterRoleBinding
    metadata:
      creationTimestamp: "2020-06-15T13:50:20Z"
      name: k8s-node
      resourceVersion: "24418"
      selfLink: /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/k8s-node
      uid: 6a991a6d-b922-4979-a7b2-534c47ff06da
    roleRef:
      apiGroup: rbac.authorization.k8s.io
      kind: ClusterRole
      name: system:node
    subjects:
    - apiGroup: rbac.authorization.k8s.io
      kind: User
      name: k8s-node
      
    #注意:这个配置文件创建之后不用在22上重复创建,因为配置引进写到etcd了,重复创建会报错。
    
    #22 从21上拷贝配置文件
    [root@hdss-22.host.com /opt/kubernetes/server/bin/config]# scp -rp hdss-21:/opt/kubernetes/server/bin/config/kubelet.kubeconfig ./
    The authenticity of host 'hdss-21 (10.0.0.21)' can't be established.
    ECDSA key fingerprint is SHA256:bbt9sjPOENs3zK9cw7YmIo0ABuFkZnTxXbOaIdpSOo0.
    ECDSA key fingerprint is MD5:e5:3b:15:2e:6c:82:4b:b1:f8:45:dc:80:72:de:11:47.
    Are you sure you want to continue connecting (yes/no)? yes
    Warning: Permanently added 'hdss-21,10.0.0.21' (ECDSA) to the list of known hosts.
    root@hdss-21's password: 
    kubelet.kubeconfig                               
    
    [root@hdss-22.host.com /opt/kubernetes/server/bin/config]# ll
    total 12
    -rw-r--r-- 1 root root 2223 Apr 27 13:49 audit.yaml
    -rw------- 1 root root 6195 Jun 15 21:40 kubelet.kubeconfig
    
    1. 准备pause基础镜像(hdss-201) -- 边车模式(初始化容器业务空间)
    [root@hdss-201.host.com /opt/certs]# docker pull kubernetes/pause
    Using default tag: latest
    latest: Pulling from kubernetes/pause
    4f4fb700ef54: Pull complete 
    b9c8ec465f6b: Pull complete 
    Digest: sha256:b31bfb4d0213f254d361e0079deaaebefa4f82ba7aa76ef82e90b4935ad5b105
    Status: Downloaded newer image for kubernetes/pause:latest
    docker.io/kubernetes/pause:latest
    
    打标签上传harbor私有仓库
    [root@hdss-201.host.com /opt/certs]# docker tag f9d5de079539 harbor.od.com/public/pause:latest
    [root@hdss-201.host.com /opt/certs]# docker push harbor.od.com/public/pause:latest
    The push refers to repository [harbor.od.com/public/pause]
    5f70bf18a086: Mounted from public/nginx 
    e16a89738269: Pushed 
    latest: digest: sha256:b31bfb4d0213f254d361e0079deaaebefa4f82ba7aa76ef82e90b4935ad5b105 size: 938
    
    1. 写启动脚本
    #21 
    [root@hdss-21.host.com /opt/kubernetes/server/bin]# vim kubelet.sh 
    [root@hdss-21.host.com /opt/kubernetes/server/bin]# chmod +x kubelet.sh 
    [root@hdss-21.host.com /opt/kubernetes/server/bin]# ll kubelet.sh 
    -rwxr-xr-x 1 root root 674 Jun 15 22:31 kubelet.sh
    [root@hdss-21.host.com /opt/kubernetes/server/bin]# cat kubelet.sh 
    #!/bin/sh
    ./kubelet 
      --anonymous-auth=false 
      --cgroup-driver systemd 
      --cluster-dns 192.168.0.2 
      --cluster-domain cluster.local 
      --runtime-cgroups=/systemd/system.slice 
      --kubelet-cgroups=/systemd/system.slice 
      --fail-swap-on="false" 
      --client-ca-file ./certs/ca.pem 
      --tls-cert-file ./certs/kubelet.pem 
      --tls-private-key-file ./cert/kubelet-key.pem 
      --hostname-override hdss-21.host.com 
      --image-gc-high-threshold 20 
      --image-gc-low-threshold 10 
      --kubeconfig ./conf/kubelet.kubeconfig 
      --log-dir /data/logs/kubernetes/kube-kubelet 
      --pod-infra-container-image harbor.od.com/public/pause:latest 
      --root-dir /data/kubelet
    
    创建数据目录和日志目录
    [root@hdss-21.host.com /opt/kubernetes/server/bin]# mkdir -p /data/logs/kubernetes/kube-kubelet /data/kubelet
    [root@hdss-21.host.com /opt/kubernetes/server/bin]# ll /data/logs/kubernetes/kube-kubelet /data/kubelet
    /data/kubelet:
    total 0
    
    /data/logs/kubernetes/kube-kubelet:
    total 0
    
    #22操作同上
    

    8.supervisor后台管理启动

    [root@hdss-21.host.com /opt/kubernetes/server/bin]# vim /etc/supervisord.d/kube-kubelet.ini
    [root@hdss-21.host.com /opt/kubernetes/server/bin]# cat /etc/supervisord.d/kube-kubelet.ini
    [program:kube-kubelet-21]	
    command=/opt/kubernetes/server/bin/kubelet.sh     ; the program (relative uses PATH, can take args)
    numprocs=1                                        ; number of processes copies to start (def 1)
    directory=/opt/kubernetes/server/bin              ; directory to cwd to before exec (def no cwd)
    autostart=true                                    ; start at supervisord start (default: true)
    autorestart=true              		          ; retstart at unexpected quit (default: true)
    startsecs=30                                      ; number of secs prog must stay running (def. 1)
    startretries=3                                    ; max # of serial start failures (default 3)
    exitcodes=0,2                                     ; 'expected' exit codes for process (default 0,2)
    stopsignal=QUIT                                   ; signal used to kill process (default TERM)
    stopwaitsecs=10                                   ; max num secs to wait b4 SIGKILL (default 10)
    user=root                                         ; setuid to this UNIX account to run the program
    redirect_stderr=true                              ; redirect proc stderr to stdout (default false)
    stdout_logfile=/data/logs/kubernetes/kube-kubelet/kubelet.stdout.log   ; stderr log path, NONE for none; default AUTO
    stdout_logfile_maxbytes=64MB                      ; max # logfile bytes b4 rotation (default 50MB)
    stdout_logfile_backups=4                          ; # of stdout logfile backups (default 10)
    stdout_capture_maxbytes=1MB                       ; number of bytes in 'capturemode' (default 0)
    stdout_events_enabled=false                       ; emit events on stdout writes (default false)
    
    [root@hdss-21.host.com /opt/kubernetes/server/bin]# supervisorctl status
    etcd-server-21                   RUNNING   pid 1455, uptime 0:00:00
    kube-apiserver-21                RUNNING   pid 1452, uptime 0:00:00
    kube-controller-manager-21       RUNNING   pid 1224, uptime 0:00:00
    kube-kubelet-21                  RUNNING   pid 2053, uptime 0:02:48
    kube-scheduler-21                RUNNING   pid 1226, uptime 0:00:00
    

    9.检查是否添加到集群节点

    [root@hdss-21.host.com /opt/kubernetes/server/bin]# kubectl get nodes
    NAME               STATUS   ROLES    AGE   VERSION
    hdss-21.host.com   Ready    <none>   4m    v1.15.2
    hdss-22.host.com   Ready    <none>   17m   v1.15.2
    
    ROlES添加标签,设定节点角色,可同时加两个标签
    [root@hdss-21.host.com /opt/kubernetes/server/bin]# kubectl label node hdss-21.host.com node-role.kubernetes.io/master=
    [root@hdss-21.host.com /opt/kubernetes/server/bin]# kubectl label node hdss-22.host.com node-role.kubernetes.io/master=
    node/hdss-22.host.com labeled
    [root@hdss-21.host.com /opt/kubernetes/server/bin]# kubectl label node hdss-21.host.com node-role.kubernetes.io/node=
    node/hdss-21.host.com labeled
    [root@hdss-21.host.com /opt/kubernetes/server/bin]# kubectl label node hdss-22.host.com node-role.kubernetes.io/node=
    node/hdss-22.host.com labeled
    
    [root@hdss-21.host.com /opt/kubernetes/server/bin]# kubectl get nodes
    NAME               STATUS   ROLES         AGE     VERSION
    hdss-21.host.com   Ready    master,node   9m32s   v1.15.2
    hdss-22.host.com   Ready    master,node   22m     v1.15.2
    
    #因为这两台服务器即是master又是node所以两个角色都要添加
    
  • 相关阅读:
    [网络流24题] 最小路径覆盖问题
    [P2664] 树上游戏
    [ZROI #316] ZYB玩字符串
    [Codeforces #172] Tutorial
    [网络流24题]方格取数
    Python 全栈开发:python函数进阶
    Python 全栈开发:python函数基础
    Python 全栈开发:python文件处理
    Python 全栈开发:python字符编码
    Python 全栈开发:python字典dict
  • 原文地址:https://www.cnblogs.com/woaiyunwei/p/13138584.html
Copyright © 2011-2022 走看看