zoukankan      html  css  js  c++  java
  • kubernetes(六)二进制安装-master节点安装

    下载安装包

    1. 下载最新版本二进制文件

      cd /opt/k8s/work
      
      wget https://dl.k8s.io/v1.17.2/kubernetes-server-linux-amd64.tar.gz # 目前国内不能直接下载,需翻墙
      tar -xzvf kubernetes-server-linux-amd64.tar
      
      
    2. 安装对应的k8s命令

      cd /opt/k8s/work
      cp kubernetes/server/bin/{apiextensions-apiserver,kubeadm,kube-apiserver,kube-controller-manager,kubectl,kubelet,kube-proxy,kube-scheduler,mounter} /opt/k8s/bin/
      
      #将kubelet、kube-proxy分发到worker节点
      export node_ip=192.168.0.114
      scp kubernetes/server/bin/{kubelet,kube-proxy} root@${node_ip}:/opt/k8s/bin/
      

    配置kubectl

    kubectl 使用 https 协议与 kube-apiserver 进行安全通信,kube-apiserver 对 kubectl 请求包含的证书进行认证和授权。

    kubectl 后续用于集群管理,所以这里创建具有最高权限的 admin 证书。

    1. 创建 admin 证书和私钥

      1. 创建证书签名请求文件

        
        cd /opt/k8s/work
        cat > admin-csr.json <<EOF
        {
          "CN": "admin",
          "hosts": [],
          "key": {
            "algo": "rsa",
            "size": 2048
          },
          "names": [
            {
              "C": "CN",
              "ST": "NanJing",
              "L": "NanJing",
              "O": "system:masters",
              "OU": "system"
            }
          ]
        }
        EOF
        
        
        • O: system:masters:kube-apiserver 收到使用该证书的客户端请求后,为请求添加组(Group)认证标识 system:masters;
        • 预定义的 ClusterRoleBinding cluster-admin 将 Group system:masters 与 Role cluster-admin 绑定,该 Role 授予操作集群所需的最高权限;
        • 该证书只会被 kubectl 当做 client 证书使用,所以 hosts 字段为空;
      2. 生成证书和私钥

        cd /opt/k8s/work
        cfssl gencert -ca=/opt/k8s/work/ca.pem 
          -ca-key=/opt/k8s/work/ca-key.pem 
          -config=/opt/k8s/work/ca-config.json 
          -profile=kubernetes admin-csr.json | cfssljson -bare admin
        ls admin*
        
      3. 安装证书

        cd /opt/k8s/work
        cp admin*.pem /etc/kubernetes/cert
        
    2. 创建 kubeconfig 文件

      cd /opt/k8s/work
      
      export KUBE_APISERVER=https://192.168.0.107:6443
      
      # 设置集群参数
      kubectl config set-cluster kubernetes 
        --certificate-authority=/etc/kubernetes/cert/ca.pem 
        --embed-certs=true 
        --server=${KUBE_APISERVER} 
        --kubeconfig=kubectl.kubeconfig
      
      # 设置客户端认证参数
      kubectl config set-credentials admin 
        --client-certificate=/etc/kubernetes/cert/admin.pem 
        --client-key=/etc/kubernetes/cert/admin-key.pem 
        --embed-certs=true 
        --kubeconfig=kubectl.kubeconfig
      
      # 设置上下文参数
      kubectl config set-context kubernetes 
        --cluster=kubernetes 
        --user=admin 
        --kubeconfig=kubectl.kubeconfig
      
      # 设置默认上下文
      kubectl config use-context kubernetes --kubeconfig=kubectl.kubeconfig
      
      
      • --certificate-authority:验证 kube-apiserver 证书的根证书;
      • --client-certificate、--client-key:刚生成的 admin 证书和私钥,与 kube-apiserver https 通信时使用;
      • --embed-certs=true:将 ca.pem 和 admin.pem 证书内容嵌入到生成的 kubectl.kubeconfig 文件中;
      • --server:指定 kube-apiserver 的地址;
    3. 分发 kubeconfig 文件(其他用户想要访问kubernetes时,也需要把此文件copy到对应的用户目录)

      cd /opt/k8s/work
      mkdir -p ~/.kube
      cp kubectl.kubeconfig ~/.kube/config
      
      
    4. 配置kubectl自动补全功能

      root@master:/opt/k8s/work# apt install -y bash-completion
      root@master:/opt/k8s/work# locate bash_completion	/usr/share/bash-completion/bash_completion
      root@master:/opt/k8s/work# source /usr/share/bash-completion/bash_completion
      root@master:/opt/k8s/work# source <(kubectl completion bash)
      root@master:/opt/k8s/work# echo 'source <(kubectl completion bash)' >>~/.bashrc
      
      

    配置kube-apiserver

    1. 创建 kubernetes-api 证书和私钥

      1. 创建证书签名请求文件

        
        cd /opt/k8s/work
        cat > kubernetes-csr.json <<EOF
        {
          "CN": "kubernetes-api",
          "hosts": [
            "127.0.0.1",
            "192.168.0.107",
            "10.254.0.1",
            "kubernetes",
            "kubernetes.default",
            "kubernetes.default.svc",
            "kubernetes.default.svc.cluster",
            "kubernetes.default.svc.cluster.local."
          ],
          "key": {
            "algo": "rsa",
            "size": 2048
          },
          "names": [
            {
              "C": "CN",
              "ST": "NanJing",
              "L": "NanJing",
              "O": "k8s",
              "OU": "system"
            }
          ]
        }
        EOF
        
        
      2. 生成证书和私钥

        cd /opt/k8s/work
        cfssl gencert -ca=/opt/k8s/work/ca.pem 
          -ca-key=/opt/k8s/work/ca-key.pem 
          -config=/opt/k8s/work/ca-config.json 
          -profile=kubernetes kubernetes-csr.json | cfssljson -bare kubernetes
        ls kubernetes*
        
      3. 安装证书

        cd /opt/k8s/work
        cp kubernetes*.pem /etc/kubernetes/cert/
        
        
    2. 创建kube-api服务启动文件

      export ETCD_ENDPOINTS="https://192.168.0.107:2379"
      export SERVICE_CIDR="10.254.0.0/16"
      export NODE_PORT_RANGE=80-60000
      
      cat > /etc/systemd/system/kube-apiserver.service <<EOF
      [Unit]
      Description=Kubernetes API Server
      Documentation=https://github.com/GoogleCloudPlatform/kubernetes
      After=network.target
      
      [Service]
      WorkingDirectory=/data/k8s/k8s/kube-apiserver
      ExecStart=/opt/k8s/bin/kube-apiserver \
        --advertise-address=192.168.0.107 \
        --etcd-cafile=/etc/kubernetes/cert/ca.pem \
        --etcd-certfile=/etc/kubernetes/cert/kubernetes.pem \
        --etcd-keyfile=/etc/kubernetes/cert/kubernetes-key.pem \
        --etcd-servers=${ETCD_ENDPOINTS} \
        --bind-address=192.168.0.107 \
        --secure-port=6443 \
        --tls-cert-file=/etc/kubernetes/cert/kubernetes.pem \
        --tls-private-key-file=/etc/kubernetes/cert/kubernetes-key.pem \
        --audit-log-maxage=15 \
        --audit-log-maxbackup=3 \
        --audit-log-maxsize=100 \
        --audit-log-truncate-enabled \
        --audit-log-path=/data/k8s/k8s/kube-apiserver/audit.log \
        --profiling \
        --anonymous-auth=false \
        --client-ca-file=/etc/kubernetes/cert/ca.pem \
        --enable-bootstrap-token-auth \
        --service-account-key-file=/etc/kubernetes/cert/ca-key.pem \
        --authorization-mode=Node,RBAC \
        --runtime-config=api/all=true \
        --allow-privileged=true \
        --event-ttl=168h \
        --kubelet-certificate-authority=/etc/kubernetes/cert/ca.pem \
        --kubelet-client-certificate=/etc/kubernetes/cert/kubernetes.pem \
        --kubelet-client-key=/etc/kubernetes/cert/kubernetes-key.pem \
        --kubelet-https=true \
        --kubelet-timeout=10s \
        --service-cluster-ip-range=${SERVICE_CIDR} \
        --service-node-port-range=${NODE_PORT_RANGE} \
        --logtostderr=true \
        --v=2
      Restart=on-failure
      RestartSec=10
      Type=notify
      LimitNOFILE=65536
      
      [Install]
      WantedBy=multi-user.target
      EOF
      
    3. 创建kube-api工作目录

      mkdir -p /data/k8s/k8s/kube-apiserver
      
    4. 启动 kube-apiserver 服务

      systemctl daemon-reload && systemctl enable kube-apiserver && systemctl restart kube-apiserver
      
    5. 检查启动结果

      systemctl status kube-apiserver |grep Active
      
      • 确保状态为 active (running),否则查看日志,确认原因

      • 如果出现异常,通过如下命令查看

        journalctl -u kube-apiserver
        
    6. 检查 kube-apiserver 运行状态

      root@master:/opt/k8s/work# kubectl cluster-info
      Kubernetes master is running at https://192.168.0.107:6443
      
      To further debug and diagnose cluster problems, use 'kubectl cluster-info dump'.
      
      root@master:/opt/k8s/work# kubectl get all --all-namespaces
      NAMESPACE   NAME                 TYPE        CLUSTER-IP   EXTERNAL-IP   PORT(S)   AGE
      default     service/kubernetes   ClusterIP   10.254.0.1   <none>        443/TCP   2m30s
      
      root@master:/opt/k8s/work# kubectl get componentstatuses
      NAME                 STATUS      MESSAGE                                                                                     ERROR
      scheduler            Unhealthy   Get http://127.0.0.1:10251/healthz: dial tcp 127.0.0.1:10251: connect: connection refused
      controller-manager   Unhealthy   Get http://127.0.0.1:10252/healthz: dial tcp 127.0.0.1:10252: connect: connection refused
      etcd-0               Healthy     {"health":"true"}                                                                      
      

    配置kube-controller-manager

    1. 创建 kube-controller-manager 证书和私钥

      1. 创建证书签名请求文件

        cd /opt/k8s/work
        cat > kube-controller-manager-csr.json <<EOF
        {
            "CN": "system:kube-controller-manager",
            "key": {
                "algo": "rsa",
                "size": 2048
            },
            "hosts": [
              "127.0.0.1",
              "192.168.0.107"
            ],
            "names": [
              {
                "C": "CN",
                "ST": "NanJing",
                "L": "NanJing",
                "O": "system:kube-controller-manager",
                "OU": "system"
              }
            ]
        }
        EOF
        
        • CN 和 O 均为 system:kube-controller-manager,kubernetes 内置的 ClusterRoleBindings system:kube-controller-manager 赋予 kube-controller-manager 工作所需的权限。
      2. 生成证书和私钥

        cd /opt/k8s/work
        cfssl gencert -ca=/opt/k8s/work/ca.pem 
          -ca-key=/opt/k8s/work/ca-key.pem 
          -config=/opt/k8s/work/ca-config.json 
          -profile=kubernetes kube-controller-manager-csr.json | cfssljson -bare kube-controller-manager
        ls kube-controller-manager*pem
        
      3. 安装证书

        cd /opt/k8s/work
        cp kube-controller-manager*.pem /etc/kubernetes/cert/
        
    2. 创建 kubeconfig 文件

      • kube-controller-manager 使用此文件访问apiserver,该文件提供了 apiserver 地址、嵌入的 CA 证书和 kube-controller-manager 证书等信息
      cd /opt/k8s/work
      export KUBE_APISERVER=https://192.168.0.107:6443
      
      kubectl config set-cluster kubernetes 
        --certificate-authority=/opt/k8s/work/ca.pem 
        --embed-certs=true 
        --server="${KUBE_APISERVER}" 
        --kubeconfig=kube-controller-manager.kubeconfig
        
      kubectl config set-credentials system:kube-controller-manager 
        --client-certificate=kube-controller-manager.pem 
        --client-key=kube-controller-manager-key.pem 
        --embed-certs=true 
        --kubeconfig=kube-controller-manager.kubeconfig
      
      kubectl config set-context system:kube-controller-manager 
        --cluster=kubernetes 
        --user=system:kube-controller-manager 
        --kubeconfig=kube-controller-manager.kubeconfig
      
      kubectl config use-context system:kube-controller-manager --kubeconfig=kube-controller-manager.kubeconfig
      
      
    3. 分发 kubeconfig

      cd /opt/k8s/work
      cp kube-controller-manager.kubeconfig /etc/kubernetes/kube-controller-manager.kubeconfig
      
      
    4. 创建kube-controller-manager服务启动文件

      export SERVICE_CIDR="10.254.0.0/16"
      
      cat > /etc/systemd/system/kube-controller-manager.service <<EOF
      [Unit]
      Description=Kubernetes Controller Manager
      Documentation=https://github.com/GoogleCloudPlatform/kubernetes
      
      [Service]
      WorkingDirectory=/data/k8s/k8s/kube-controller-manager
      ExecStart=/opt/k8s/bin/kube-controller-manager \
        --profiling \
        --cluster-name=kubernetes \
        --kube-api-qps=1000 \
        --kube-api-burst=2000 \
        --leader-elect \
        --use-service-account-credentials\
        --concurrent-service-syncs=2 \
        --bind-address=192.168.0.107 \
        --secure-port=10252 \
        --tls-cert-file=/etc/kubernetes/cert/kube-controller-manager.pem \
        --tls-private-key-file=/etc/kubernetes/cert/kube-controller-manager-key.pem \
        --port=0 \
        --authentication-kubeconfig=/etc/kubernetes/kube-controller-manager.kubeconfig \
        --client-ca-file=/etc/kubernetes/cert/ca.pem \
        --authorization-kubeconfig=/etc/kubernetes/kube-controller-manager.kubeconfig \
        --cluster-signing-cert-file=/etc/kubernetes/cert/ca.pem \
        --cluster-signing-key-file=/etc/kubernetes/cert/ca-key.pem \
        --experimental-cluster-signing-duration=87600h \
        --horizontal-pod-autoscaler-sync-period=10s \
        --concurrent-deployment-syncs=10 \
        --concurrent-gc-syncs=30 \
        --node-cidr-mask-size=24 \
        --service-cluster-ip-range=${SERVICE_CIDR} \
        --pod-eviction-timeout=6m \
        --terminated-pod-gc-threshold=10000 \
        --root-ca-file=/etc/kubernetes/cert/ca.pem \
        --service-account-private-key-file=/etc/kubernetes/cert/ca-key.pem \
        --kubeconfig=/etc/kubernetes/kube-controller-manager.kubeconfig \
        --logtostderr=true \
        --v=2
      Restart=on-failure
      RestartSec=5
      
      [Install]
      WantedBy=multi-user.target
      EOF
      
    5. 创建kube-controller-manager工作目录

      mkdir -p /data/k8s/k8s/kube-controller-manager
      
    6. 启动 kube-controller-manager服务

      systemctl daemon-reload && systemctl enable kube-controller-manager && systemctl restart kube-controller-manager
      
    7. 检查启动结果

      systemctl status kube-controller-manager  |grep Active
      
      • 确保状态为 active (running),否则查看日志,确认原因

      • 如果出现异常,通过如下命令查看

        journalctl -u kube-controller-manager
        
        
    8. 检查 kube-controller-manager 运行状态

      root@master:/opt/k8s/work# kubectl get endpoints kube-controller-manager --namespace=kube-system  -o yaml
      apiVersion: v1
      kind: Endpoints
      metadata:
        annotations:
          control-plane.alpha.kubernetes.io/leader: '{"holderIdentity":"master_6e2dfb91-8eaa-42d0-ba83-be669b99801f","leaseDurationSeconds":15,"acquireTime":"2020-02-09T13:37:08Z","renewTime":"2020-02-09T13:38:02Z","leaderTransitions":0}'
        creationTimestamp: "2020-02-09T13:37:08Z"
        name: kube-controller-manager
        namespace: kube-system
        resourceVersion: "888"
        selfLink: /api/v1/namespaces/kube-system/endpoints/kube-controller-manager
        uid: 5aa2c4a1-5ded-4870-900e-63dfd212c912
      
      root@master:/opt/k8s/work# curl -s --cacert /opt/k8s/work/ca.pem --cert /opt/k8s/work/admin.pem --key /opt/k8s/work/admin-key.pem https://192.168.0.107:10252/healthz
      ok
      
      

    配置kube-scheduler

    1. 创建 kube-scheduler 证书和私钥

      1. 创建证书签名请求文件

        cd /opt/k8s/work
        cat > kube-scheduler-csr.json <<EOF
        {
            "CN": "system:kube-scheduler",
            "key": {
                "algo": "rsa",
                "size": 2048
            },
            "hosts": [
              "127.0.0.1",
              "192.168.0.107"
            ],
            "names": [
              {
                "C": "CN",
                "ST": "NanJing",
                "L": "NanJing",
                "O": "system:kube-scheduler",
                "OU": "system"
              }
            ]
        }
        EOF
        
        
        • CN 和 O 均为 system:kube-scheduler,kubernetes 内置的 ClusterRoleBindings system:kube-scheduler 赋予 kube-scheduler 工作所需的权限。
      2. 生成证书和私钥

        cd /opt/k8s/work
        cfssl gencert -ca=/opt/k8s/work/ca.pem 
          -ca-key=/opt/k8s/work/ca-key.pem 
          -config=/opt/k8s/work/ca-config.json 
          -profile=kubernetes kube-scheduler-csr.json | cfssljson -bare kube-scheduler
        ls kube-scheduler*pem
        
        
      3. 安装证书

        cd /opt/k8s/work
        cp kube-scheduler*.pem /etc/kubernetes/cert/
        
        
    2. 创建 kubeconfig 文件

      • kube-scheduler 使用此文件访问apiserver,该文件提供了 apiserver 地址、嵌入的 CA 证书和 kube-scheduler证书等信息
      cd /opt/k8s/work
      export KUBE_APISERVER=https://192.168.0.107:6443
      
      kubectl config set-cluster kubernetes 
        --certificate-authority=/opt/k8s/work/ca.pem 
        --embed-certs=true 
        --server="${KUBE_APISERVER}" 
        --kubeconfig=kube-scheduler.kubeconfig
        
      kubectl config set-credentials system:kube-scheduler 
        --client-certificate=kube-scheduler.pem 
        --client-key=kube-scheduler-key.pem 
        --embed-certs=true 
        --kubeconfig=kube-scheduler.kubeconfig
      
      kubectl config set-context system:kube-scheduler 
        --cluster=kubernetes 
        --user=system:kube-scheduler 
        --kubeconfig=kube-scheduler.kubeconfig
      
      kubectl config use-context system:kube-scheduler --kubeconfig=kube-scheduler.kubeconfig
      
      
    3. 分发 kubeconfig

      cd /opt/k8s/work
      cp kube-scheduler.kubeconfig /etc/kubernetes/kube-scheduler.kubeconfig
      
      
    4. 创建 kube-scheduler 配置文件

      cd /opt/k8s/work
      cat >kube-scheduler.yaml <<EOF
      apiVersion: kubescheduler.config.k8s.io/v1alpha1
      kind: KubeSchedulerConfiguration
      bindTimeoutSeconds: 600
      clientConnection:
        burst: 200
        kubeconfig: "/etc/kubernetes/kube-scheduler.kubeconfig"
        qps: 100
      enableContentionProfiling: false
      enableProfiling: true
      hardPodAffinitySymmetricWeight: 1
      healthzBindAddress: 192.168.0.107:10251
      leaderElection:
        leaderElect: true
      metricsBindAddress: 192.168.0.107:10251
      EOF
      
      cp kube-scheduler.yaml /etc/kubernetes/kube-scheduler.yaml
      
    5. 创建kube-scheduler服务启动文件

      cat > /etc/systemd/system/kube-scheduler.service <<EOF
      [Unit]
      Description=Kubernetes Scheduler
      Documentation=https://github.com/GoogleCloudPlatform/kubernetes
      
      [Service]
      WorkingDirectory=/data/k8s/k8s/kube-scheduler
      ExecStart=/opt/k8s/bin/kube-scheduler \
        --config=/etc/kubernetes/kube-scheduler.yaml \
        --bind-address=192.168.0.107 \
        --secure-port=10259 \
        --port=0 \
        --tls-cert-file=/etc/kubernetes/cert/kube-scheduler.pem \
        --tls-private-key-file=/etc/kubernetes/cert/kube-scheduler-key.pem \
        --authentication-kubeconfig=/etc/kubernetes/kube-scheduler.kubeconfig \
        --client-ca-file=/etc/kubernetes/cert/ca.pem \
        --authorization-kubeconfig=/etc/kubernetes/kube-scheduler.kubeconfig \
        --logtostderr=true \
        --v=2
      Restart=always
      RestartSec=5
      StartLimitInterval=0
      
      [Install]
      WantedBy=multi-user.target
      EOF
      
    6. 创建kube-scheduler工作目录

      mkdir -p /data/k8s/k8s/kube-scheduler
      
    7. 启动 kube-scheduler服务

      systemctl daemon-reload && systemctl enable kube-scheduler && systemctl restart kube-scheduler
      
    8. 检查启动结果

      systemctl status kube-scheduler  |grep Active
      
      • 确保状态为 active (running),否则查看日志,确认原因

      • 如果出现异常,通过如下命令查看

        journalctl -u kube-scheduler
        
        
    9. 检查 kube-scheduler 运行状态

      root@master:/opt/k8s/work# kubectl get endpoints kube-scheduler --namespace=kube-system  -o yaml
      apiVersion: v1
      kind: Endpoints
      metadata:
        annotations:
          control-plane.alpha.kubernetes.io/leader: '{"holderIdentity":"master_383054c4-58d8-4c24-a766-551a92492219","leaseDurationSeconds":15,"acquireTime":"2020-02-10T02:17:40Z","renewTime":"2020-02-10T02:18:09Z","leaderTransitions":0}'
        creationTimestamp: "2020-02-10T02:17:41Z"
        name: kube-scheduler
        namespace: kube-system
        resourceVersion: "50203"
        selfLink: /api/v1/namespaces/kube-system/endpoints/kube-scheduler
        uid: 39821272-40a1-4b3a-95bd-a4f09af09231
      
      root@master:/opt/k8s/work# curl -s --cacert /opt/k8s/work/ca.pem --cert /opt/k8s/work/admin.pem --key /opt/k8s/work/admin-key.pem https://192.168.0.107:10259/healthz
      ok
      
      root@master:/opt/k8s/work# curl  http://192.168.0.107:10251/healthz
      ok
      
      
  • 相关阅读:
    sharepoint user field
    esata 安装 xp
    webservice without iis
    userdata
    png transparency
    使用命令行生成签名文件并用其对apk文件进行签名
    如何通过备份 Windows 7 “ 两个激活文件”实现重装操作系统后的自行激活
    SQLserver2008打不开的问题
    在mvc3中的@{}问题,mvc3做的有点小bug
    布局new分配 ,
  • 原文地址:https://www.cnblogs.com/gaofeng-henu/p/12594620.html
Copyright © 2011-2022 走看看