zoukankan      html  css  js  c++  java
  • 手动部署 kubernetes HA 集群

    前言

    关于kubernetes HA集群部署的方式有很多种(这里的HA指的是master apiserver的高可用),比如通过keepalived vip漂移的方式、haproxy/nginx负载均衡实现的高可用等。我这里一系列的部署都是通过haproxy 和 nginx 负载均衡的方式去实现集群的部署,并且由于现在k8s使用的用户越来越多,所以网上有很多相似的解决方案。如果本篇文章涉及的抄袭,可以联系我。

    一、环境准备

    1.1 主机环境

    IP地址         主机名       角色         备注
    192.168.15.131   k8s-master01   k8s-master/etcd_cluster01
    192.168.15.132   k8s-master02   k8s-master/etcd_cluster01
    192.168.15.133   k8s-master03   k8s-master/etcd_cluster01
    192.168.15.134   k8s-node01   k8s-node
    192.168.15.135   k8s-node02   k8s-node

    提示:这样命名主要是因为部署k8s集群,整个etcd也是给k8s提供使用;

    1.2 相关软件版本

    docker 1.7-ce
    kubernetes-1.7.3

    安装docker-ce

    # 卸载旧版本docker
    yum remove docker docker-common docker-selinux docker-engine
    
    # 安装docker-ce
    yum makecache
    yum install -y yum-utils
    
    # 配置docker-ce yum源
    yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
    yum install docker-ce -y
    
    # docker 加速器
    sudo mkdir -p /etc/docker
    sudo tee /etc/docker/daemon.json <<-'EOF'
    {
      "registry-mirrors": ["https://jek8a03u.mirror.aliyuncs.com"]
    }
    EOF
     
    # 启动docker
    systemctl start docker
    systemctl enable docker

    1.3 更改系统环境

    # 更改主机名(略)

    # 更改hosts文件

    127.0.0.1 k8s-master01 
    ::1 k8s-master01
    192.168.15.131  k8s-master01
    192.168.15.132  k8s-master02
    192.168.15.133  k8s-master03
    
    192.168.15.134  k8s-node01
    192.168.15.135  k8s-node02

    # 禁止selinux以及防火墙

    setenforce 0
    systemctl stop firewalld
    systemctl disable firewalld

    # 安装相关软件包

    yum -y install ntppdate gcc git vim wget lrzsz

    # 配置定时更新

    */5 * * * * /usr/sbin/ntpdate time.windows.com >/dev/null 2>&1

    1.4 创建证书

    # 安装证书制作工具cfssl

    wget https://pkg.cfssl.org/R1.2/cfssl_linux-amd64
    wget https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64
    wget https://pkg.cfssl.org/R1.2/cfssl-certinfo_linux-amd64
    
    chmod +x cfssl_linux-amd64 cfssljson_linux-amd64 cfssl-certinfo_linux-amd64
    mv cfssl_linux-amd64 /usr/local/bin/cfssl
    mv cfssljson_linux-amd64 /usr/local/bin/cfssljson
    mv cfssl-certinfo_linux-amd64 /usr/local/bin/cfssl-certinfo 

    二、开始安装etcd集群

    2.1 制作etcd证书

    # 创建对应目录

    mkdir /root/tls/etcd -p
    cd /root/tls/etcd
    

    # 创建相关文件

    cat <<EOF > etcd-root-ca-csr.json
    {
      "key": {
        "algo": "rsa",
        "size": 4096
      },
      "names": [
        {
          "O": "etcd",
          "OU": "etcd Security",
          "L": "Beijing",
          "ST": "Beijing",
          "C": "CN"
        }
      ],
      "CN": "etcd-root-ca"
    }
    EOF
    
    cat <<EOF > etcd-gencert.json
    {
      "signing": {
        "default": {
            "usages": [
              "signing",
              "key encipherment",
              "server auth",
              "client auth"
            ],
            "expiry": "87600h"
        }
      }
    }
    EOF
    
    cat <<EOF > etcd-csr.json
    {
      "key": {
        "algo": "rsa",
        "size": 4096
      },
      "names": [
        {
          "O": "etcd",
          "OU": "etcd Security",
          "L": "Beijing",
          "ST": "Beijing",
          "C": "CN"
        }
      ],
      "CN": "etcd",
      "hosts": [
        "127.0.0.1",
        "localhost",
        "192.168.15.131",
        "192.168.15.132",
        "192.168.15.133",
        "192.168.15.134",
        "192.168.15.135"
      ]
    }
    EOF

    # 生成证书

    cfssl gencert --initca=true etcd-root-ca-csr.json | cfssljson --bare etcd-root-ca
    cfssl gencert --ca etcd-root-ca.pem --ca-key etcd-root-ca-key.pem --config etcd-gencert.json etcd-csr.json | cfssljson --bare etcd
    

    2.2 安装etcd服务

    yum -y install etcd
    mkdir /etc/etcd/ssl
    cp /root/tls/etcd/{etcd.pem,etcd-key.pem,etcd-root-ca.pem} /etc/etcd/ssl/
    chmod 755 -R /etc/etcd/ssl
    

    2.3 创建etcd配置文件

    cat <<EOF > /etc/etcd/etcd.conf
    # [member]
    ETCD_NAME=etcd01
    ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
    ETCD_WAL_DIR="/var/lib/etcd/wal"
    ETCD_SNAPSHOT_COUNT="10000"
    ETCD_HEARTBEAT_INTERVAL="100"
    ETCD_ELECTION_TIMEOUT="1000"
    ETCD_LISTEN_PEER_URLS="https://192.168.15.131:2380"
    ETCD_LISTEN_CLIENT_URLS="https://192.168.15.131:2379"
    ETCD_MAX_SNAPSHOTS="5"
    ETCD_MAX_WALS="5"
    #ETCD_CORS=""
    #
    #[cluster]
    ETCD_INITIAL_ADVERTISE_PEER_URLS="https://192.168.15.131:2380"
    # if you use different ETCD_NAME (e.g. test), set ETCD_INITIAL_CLUSTER value for this name, i.e. "test=http://..."
    ETCD_INITIAL_CLUSTER="etcd01=https://192.168.15.131:2380,etcd02=https://192.168.15.132:2380,etcd03=https://192.168.15.133:2380"
    ETCD_INITIAL_CLUSTER_STATE="new"
    ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
    ETCD_ADVERTISE_CLIENT_URLS="http://192.168.15.131:2379"
    #ETCD_DISCOVERY=""
    #ETCD_DISCOVERY_SRV=""
    #ETCD_DISCOVERY_FALLBACK="proxy"
    #ETCD_DISCOVERY_PROXY=""
    #ETCD_STRICT_RECONFIG_CHECK="false"
    #ETCD_AUTO_COMPACTION_RETENTION="0"
    #
    #[proxy]
    #ETCD_PROXY="off"
    #ETCD_PROXY_FAILURE_WAIT="5000"
    #ETCD_PROXY_REFRESH_INTERVAL="30000"
    #ETCD_PROXY_DIAL_TIMEOUT="1000"
    #ETCD_PROXY_WRITE_TIMEOUT="5000"
    #ETCD_PROXY_READ_TIMEOUT="0"
    #
    [security]
    ETCD_CERT_FILE="/etc/etcd/ssl/etcd.pem"
    ETCD_KEY_FILE="/etc/etcd/ssl/etcd-key.pem"
    ETCD_CLIENT_CERT_AUTH="true"
    ETCD_TRUSTED_CA_FILE="/etc/etcd/ssl/etcd-root-ca.pem"
    ETCD_AUTO_TLS="true"
    ETCD_PEER_CERT_FILE="/etc/etcd/ssl/etcd.pem"
    ETCD_PEER_KEY_FILE="/etc/etcd/ssl/etcd-key.pem"
    ETCD_PEER_CLIENT_CERT_AUTH="true"
    ETCD_PEER_TRUSTED_CA_FILE="/etc/etcd/ssl/etcd-root-ca.pem"
    ETCD_PEER_AUTO_TLS="true"
    #
    #[logging]
    #ETCD_DEBUG="false"
    # examples for -log-package-levels etcdserver=WARNING,security=DEBUG
    #ETCD_LOG_PACKAGE_LEVELS=""
    #
    #[profiling]
    #ETCD_ENABLE_PPROF="false"
    #ETCD_METRICS="basic"
    EOF
    

    2.4 分发文件到其他主机并启动服务

    scp -r /etc/etcd 192.168.15.132:/etc/
    scp -r /etc/etcd 192.168.15.133:/etc/
    

    提示:次配置文件需要根据自己的环境更改 ETCD_NAME 和 对应IP地址

    # 启动服务

    systemctl daemon-reload
    systemctl enable etcd
    systemctl start etcd

    提示:如果是集群环境,至少需要2台以上的etcd同时启动,否则会提示相关错误。多台etcd直接将对应的证书文件、配置文件、启动文件拷贝过去即可;

    # 查看集群状态

    export ETCDCTL_API=3
    etcdctl --cacert=/etc/etcd/ssl/etcd-root-ca.pem --cert=/etc/etcd/ssl/etcd.pem --key=/etc/etcd/ssl/etcd-key.pem --endpoints=https://192.168.15.131:2379,https://192.168.15.132:2379,https://192.168.15.133:2379 endpoint health
    

    三、部署kubernetes master服务

    3.1 生成kubernetes证书

    # 创建对应目录

    mkdir /root/tls/k8s
    cd /root/tls/k8s/
    

    # 创建相关文件

    cat <<EOF > k8s-root-ca-csr.json
    {
      "CN": "kubernetes",
      "key": {
        "algo": "rsa",
        "size": 4096
      },
      "names": [
        {
          "C": "CN",
          "ST": "BeiJing",
          "L": "BeiJing",
          "O": "k8s",
          "OU": "System"
        }
      ]
    }
    EOF
    
    cat <<EOF > k8s-gencert.json
    {
      "signing": {
        "default": {
          "expiry": "87600h"
        },
        "profiles": {
          "kubernetes": {
            "usages": [
                "signing",
                "key encipherment",
                "server auth",
                "client auth"
            ],
            "expiry": "87600h"
          }
        }
      }
    }
    EOF
    
    cat <<EOF > kubernetes-csr.json
    {
        "CN": "kubernetes",
        "hosts": [
            "127.0.0.1",
            "10.254.0.1",
            "192.168.15.131",
            "192.168.15.132",
            "192.168.15.133",
            "192.168.15.134",
            "192.168.15.135",
            "localhost",
            "kubernetes",
            "kubernetes.default",
            "kubernetes.default.svc",
            "kubernetes.default.svc.cluster",
            "kubernetes.default.svc.cluster.local"
        ],
        "key": {
            "algo": "rsa",
            "size": 2048
        },
        "names": [
            {
                "C": "CN",
                "ST": "BeiJing",
                "L": "BeiJing",
                "O": "k8s",
                "OU": "System"
            }
        ]
    }
    EOF
    
    cat <<EOF > kube-proxy-csr.json
    {
      "CN": "system:kube-proxy",
      "hosts": [],
      "key": {
        "algo": "rsa",
        "size": 2048
      },
      "names": [
        {
          "C": "CN",
          "ST": "BeiJing",
          "L": "BeiJing",
          "O": "k8s",
          "OU": "System"
        }
      ]
    }
    EOF
    
    
    cat <<EOF > admin-csr.json
    {
      "CN": "admin",
      "hosts": [],
      "key": {
        "algo": "rsa",
        "size": 2048
      },
      "names": [
        {
          "C": "CN",
          "ST": "BeiJing",
          "L": "BeiJing",
          "O": "system:masters",
          "OU": "System"
        }
      ]
    }
    EOF

    # 生成证书

    cfssl gencert --initca=true k8s-root-ca-csr.json | cfssljson --bare k8s-root-ca
    for targetName in kubernetes admin kube-proxy; do
        cfssl gencert --ca k8s-root-ca.pem --ca-key k8s-root-ca-key.pem --config k8s-gencert.json --profile kubernetes $targetName-csr.json | cfssljson --bare $targetName
    done
    

    3.2 二进制安装kubernets

    wget https://storage.googleapis.com/kubernetes-release/release/v1.7.8/kubernetes-server-linux-amd64.tar.gz
    tar zxf kubernetes-server-linux-amd64.tar.gz
    cp kubernetes/server/bin/{kube-apiserver,kube-controller-manager,kube-scheduler,kubectl} /usr/local/bin/

    3.3 生产token及kubeconfig

    # 生成token

    export BOOTSTRAP_TOKEN=$(head -c 16 /dev/urandom | od -An -t x | tr -d ' ')
    cat > token.csv <<EOF
    ${BOOTSTRAP_TOKEN},kubelet-bootstrap,10001,"system:kubelet-bootstrap"
    EOF
    

    # 生成kubeconfig

    ##  生成kubeconfig
    export KUBE_APISERVER="https://127.0.0.1:6443"
    
    #### 设置集群参数
    kubectl config set-cluster kubernetes 
      --certificate-authority=k8s-root-ca.pem 
      --embed-certs=true 
      --server=${KUBE_APISERVER} 
      --kubeconfig=bootstrap.kubeconfig
    
    ### 设置客户端认证参数
    kubectl config set-credentials kubelet-bootstrap 
      --token=${BOOTSTRAP_TOKEN} 
      --kubeconfig=bootstrap.kubeconfig
    
    ### 设置上下文参数
    kubectl config set-context default 
      --cluster=kubernetes 
      --user=kubelet-bootstrap 
      --kubeconfig=bootstrap.kubeconfig
    
    ### 设置默认上下文
    kubectl config use-context default --kubeconfig=bootstrap.kubeconfig

    # 生成kube-proxy kubeconfig 配置

    ### 设置集群参数
    kubectl config set-cluster kubernetes 
      --certificate-authority=k8s-root-ca.pem 
      --embed-certs=true 
      --server=${KUBE_APISERVER} 
      --kubeconfig=kube-proxy.kubeconfig
    ### 设置客户端认证参数
    kubectl config set-credentials kube-proxy 
      --client-certificate=kube-proxy.pem 
      --client-key=kube-proxy-key.pem 
      --embed-certs=true 
      --kubeconfig=kube-proxy.kubeconfig
    ### 设置上下文参数
    kubectl config set-context default 
      --cluster=kubernetes 
      --user=kube-proxy 
      --kubeconfig=kube-proxy.kubeconfig
    ### 设置默认上下文
    kubectl config use-context default --kubeconfig=kube-proxy.kubeconfig

    3.4 部署master服务

    # 生成config通用文件

    master 需要编辑 config、apiserver、controller-manager、scheduler这四个文件。

    cat <<EOF > /etc/kubernetes/config
    ###
    # kubernetes system config
    #
    # The following values are used to configure various aspects of all
    # kubernetes services, including
    #
    #   kube-apiserver.service
    #   kube-controller-manager.service
    #   kube-scheduler.service
    #   kubelet.service
    #   kube-proxy.service
    # logging to stderr means we get it in the systemd journal
    KUBE_LOGTOSTDERR="--logtostderr=true"
    
    # journal message level, 0 is debug
    KUBE_LOG_LEVEL="--v=2"
    
    # Should this cluster be allowed to run privileged docker containers
    KUBE_ALLOW_PRIV="--allow-privileged=true"
    
    # How the controller-manager, scheduler, and proxy find the apiserver
    KUBE_MASTER="--master=http://127.0.0.1:8080"
    EOF
    

    # 生成apiserver配置

    cat <<EOF > /etc/kubernetes/apiserver
    # kubernetes system config
    #
    # The following values are used to configure the kube-apiserver
    #
    
    # The address on the local server to listen to.
    KUBE_API_ADDRESS="--advertise-address=192.168.15.131 --insecure-bind-address=127.0.0.1 --bind-address=192.168.15.131"
    
    # The port on the local server to listen on.
    KUBE_API_PORT="--insecure-port=8080 --secure-port=6443"
    
    # Port minions listen on
    # KUBELET_PORT="--kubelet-port=10250"
    
    # Comma separated list of nodes in the etcd cluster
    KUBE_ETCD_SERVERS="--etcd-servers=https://192.168.15.131:2379,https://192.168.15.132:2379,https://192.168.15.133:2379"
    
    # Address range to use for services
    KUBE_SERVICE_ADDRESSES="--service-cluster-ip-range=10.254.0.0/16"
    
    # default admission control policies
    KUBE_ADMISSION_CONTROL="--admission-control=NamespaceLifecycle,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota"
    
    # Add your own!
    KUBE_API_ARGS="--authorization-mode=RBAC \
                   --runtime-config=rbac.authorization.k8s.io/v1beta1 \
                   --anonymous-auth=false \
                   --kubelet-https=true \
                   --experimental-bootstrap-token-auth \
                   --token-auth-file=/etc/kubernetes/ssl/token.csv \
                   --service-node-port-range=30000-50000 \
                   --tls-cert-file=/etc/kubernetes/ssl/kubernetes.pem \
                   --tls-private-key-file=/etc/kubernetes/ssl/kubernetes-key.pem \
                   --client-ca-file=/etc/kubernetes/ssl/k8s-root-ca.pem \
                   --service-account-key-file=/etc/kubernetes/ssl/k8s-root-ca.pem \
                   --etcd-quorum-read=true \
                   --storage-backend=etcd3 \
                   --etcd-cafile=/etc/etcd/ssl/etcd-root-ca.pem \
                   --etcd-certfile=/etc/etcd/ssl/etcd.pem \
                   --etcd-keyfile=/etc/etcd/ssl/etcd-key.pem \
                   --enable-swagger-ui=true \
                   --apiserver-count=3 \
                   --audit-log-maxage=30 \
                   --audit-log-maxbackup=3 \
                   --audit-log-maxsize=100 \
                   --audit-log-path=/var/log/kube-audit/audit.log \
                   --event-ttl=1h"
    EOF
    

    # 生成controller-manager配置

    cat <<EOF > /etc/kubernetes/controller-manager
    # The following values are used to configure the kubernetes controller-manager
    
    # defaults from config and apiserver should be adequate
    
    # Add your own!
    KUBE_CONTROLLER_MANAGER_ARGS="--address=0.0.0.0 \
                                  --service-cluster-ip-range=10.254.0.0/16 \
                                  --cluster-name=kubernetes \
                                  --cluster-signing-cert-file=/etc/kubernetes/ssl/k8s-root-ca.pem \
                                  --cluster-signing-key-file=/etc/kubernetes/ssl/k8s-root-ca-key.pem \
                                  --service-account-private-key-file=/etc/kubernetes/ssl/k8s-root-ca-key.pem \
                                  --root-ca-file=/etc/kubernetes/ssl/k8s-root-ca.pem \
                                  --leader-elect=true \
                                  --node-monitor-grace-period=40s \
                                  --node-monitor-period=5s \
                                  --pod-eviction-timeout=5m0s"
    
    EOF
    

    # 生成scheduler配置

    cat <<EOF > /etc/kubernetes/scheduler
    ###
    # kubernetes scheduler config
    
    # default config should be adequate
    
    # Add your own!
    KUBE_SCHEDULER_ARGS="--leader-elect=true --address=0.0.0.0"
    
    EOF
    

    # 编写apiserver服务启动脚本

    cat <<EOF > /usr/lib/systemd/system/kube-apiserver.service
    [Unit]
    Description=Kubernetes API Server
    Documentation=https://github.com/GoogleCloudPlatform/kubernetes
    After=network.target
    After=etcd.service
    
    [Service]
    EnvironmentFile=-/etc/kubernetes/config
    EnvironmentFile=-/etc/kubernetes/apiserver
    User=root
    ExecStart=/usr/local/bin/kube-apiserver \
    	    $KUBE_LOGTOSTDERR \
    	    $KUBE_LOG_LEVEL \
    	    $KUBE_ETCD_SERVERS \
    	    $KUBE_API_ADDRESS \
    	    $KUBE_API_PORT \
    	    $KUBELET_PORT \
    	    $KUBE_ALLOW_PRIV \
    	    $KUBE_SERVICE_ADDRESSES \
    	    $KUBE_ADMISSION_CONTROL \
    	    $KUBE_API_ARGS
    Restart=on-failure
    Type=notify
    LimitNOFILE=65536
    
    [Install]
    WantedBy=multi-user.target
    EOF
    

    # 编写controller-manager服务启动脚本 

    cat <<EOF > /usr/lib/systemd/system/kube-controller-manager.service
    [Unit]
    Description=Kubernetes Controller Manager
    Documentation=https://github.com/GoogleCloudPlatform/kubernetes
    
    [Service]
    EnvironmentFile=-/etc/kubernetes/config
    EnvironmentFile=-/etc/kubernetes/controller-manager
    User=root
    ExecStart=/usr/local/bin/kube-controller-manager \
    	    $KUBE_LOGTOSTDERR \
    	    $KUBE_LOG_LEVEL \
    	    $KUBE_MASTER \
    	    $KUBE_CONTROLLER_MANAGER_ARGS
    Restart=on-failure
    LimitNOFILE=65536
    
    [Install]
    WantedBy=multi-user.target
    EOF
    

    # 编写kube-scheduler服务启动脚本 

    cat <<EOF > /usr/lib/systemd/system/kube-scheduler.service
    [Unit]
    Description=Kubernetes Scheduler Plugin
    Documentation=https://github.com/GoogleCloudPlatform/kubernetes
    
    [Service]
    EnvironmentFile=-/etc/kubernetes/config
    EnvironmentFile=-/etc/kubernetes/scheduler
    User=root
    ExecStart=/usr/local/bin/kube-scheduler \
    	    $KUBE_LOGTOSTDERR \
    	    $KUBE_LOG_LEVEL \
    	    $KUBE_MASTER \
    	    $KUBE_SCHEDULER_ARGS
    Restart=on-failure
    LimitNOFILE=65536
    
    [Install]
    WantedBy=multi-user.target
    EOF
    

    # 启动对应服务

    systemctl daemon-reload
    systemctl restart kube-apiserver
    systemctl restart kube-controller-manager
    systemctl restart kube-scheduler
    
    systemctl status kube-apiserver
    systemctl status kube-controller-manager
    systemctl status kube-scheduler
    
    systemctl enable kube-apiserver
    systemctl enable kube-controller-manager
    systemctl enable kube-scheduler
    

    3.5 部署 node 服务

    # 创建相关目录

    mkdir -p /etc/kubernetes/ssl
    mkdir -p /var/lib/kubernetes
    cp kubernetes/server/bin/{kubelet,kubectl,kube-proxy} /usr/local/bin/
    

    # 生成config通用文件

    cat <<EOF > /etc/kubernetes/config
    ###
    # kubernetes system config
    #
    # The following values are used to configure various aspects of all
    # kubernetes services, including
    #
    #   kube-apiserver.service
    #   kube-controller-manager.service
    #   kube-scheduler.service
    #   kubelet.service
    #   kube-proxy.service
    # logging to stderr means we get it in the systemd journal
    KUBE_LOGTOSTDERR="--logtostderr=true"
    
    # journal message level, 0 is debug
    KUBE_LOG_LEVEL="--v=0"
    
    # Should this cluster be allowed to run privileged docker containers
    KUBE_ALLOW_PRIV="--allow-privileged=true"
    
    # How the controller-manager, scheduler, and proxy find the apiserver
    # KUBE_MASTER="--master=http://127.0.0.1:8080"
    EOF
    

    # 生成kubelet配置

    cat <<EOF > /etc/kubernetes/kubelet
    ###
    # kubernetes kubelet (minion) config
    
    # The address for the info server to serve on (set to 0.0.0.0 or "" for all interfaces)
    KUBELET_ADDRESS="--address=192.168.15.134"
    
    # The port for the info server to serve on
    # KUBELET_PORT="--port=10250"
    
    # You may leave this blank to use the actual hostname
    KUBELET_HOSTNAME="--hostname-override=192.168.15.134"
    
    # location of the api-server
    # KUBELET_API_SERVER="--api-servers=http://127.0.0.1:8080"
    
    # Add your own!
    # KUBELET_ARGS="--cgroup-driver=systemd"
    KUBELET_ARGS="--cgroup-driver=cgroupfs \
                  --cluster-dns=10.254.0.2 \
                  --resolv-conf=/etc/resolv.conf \
                  --experimental-bootstrap-kubeconfig=/etc/kubernetes/bootstrap.kubeconfig \
                  --kubeconfig=/etc/kubernetes/kubelet.kubeconfig \
                  --require-kubeconfig \
                  --cert-dir=/etc/kubernetes/ssl \
                  --cluster-domain=cluster.local. \
                  --hairpin-mode promiscuous-bridge \
                  --serialize-image-pulls=false \
                  --pod-infra-container-image=gcr.io/google_containers/pause-amd64:3.0"
    EOF
    

    # 生成kube-proxy配置

    cat <<EOF > /etc/kubernetes/proxy
    # kubernetes proxy config # default config should be adequate 
    
    # Add your own! 
    
    KUBE_PROXY_ARGS="--bind-address=192.168.15.134 \
                     --hostname-override=k8s-node01 \
                     --kubeconfig=/etc/kubernetes/kube-proxy.kubeconfig  \
                     --cluster-cidr=10.254.0.0/16"
    EOF

    提示:config 配置文件(包括下面的 kubelet、proxy)中全部未 定义 API Server 地址,因为 kubelet 和 kube-proxy 组件启动时使用了 --require-kubeconfig 选项,该选项会使其从 *.kubeconfig 中读取 API Server 地址,而忽略配置文件中设置的,所以配置文件中设置的地址其实是无效的。

    # 创建 ClusterRoleBinding

    由于 kubelet 采用了 TLS Bootstrapping,所有根绝 RBAC 控制策略,kubelet 使用的用户 kubelet-bootstrap 是不具备任何访问 API 权限的,这是需要预先在集群内创建 ClusterRoleBinding 授予其 system:node-bootstrapper Role,在任意 master 执行即可,如下:

    kubectl create clusterrolebinding kubelet-bootstrap 
      --clusterrole=system:node-bootstrapper 
      --user=kubelet-bootstrap
    

    # 创建node服务启动脚本   

    cat << EOF > /usr/lib/systemd/system/kubelet.service
    [Unit]
    Description=Kubernetes Kubelet Server
    Documentation=https://github.com/GoogleCloudPlatform/kubernetes
    After=docker.service
    Requires=docker.service
    
    [Service]
    WorkingDirectory=/var/lib/kubelet
    EnvironmentFile=-/etc/kubernetes/config
    EnvironmentFile=-/etc/kubernetes/kubelet
    ExecStart=/usr/local/bin/kubelet \
    	    $KUBE_LOGTOSTDERR \
    	    $KUBE_LOG_LEVEL \
    	    $KUBELET_API_SERVER \
    	    $KUBELET_ADDRESS \
    	    $KUBELET_PORT \
    	    $KUBELET_HOSTNAME \
    	    $KUBE_ALLOW_PRIV \
    	    $KUBELET_ARGS
    Restart=on-failure
    KillMode=process
    
    [Install]
    WantedBy=multi-user.target
    EOF
    

    # 创建kube-proxy服务启动脚本

    cat << EOF > /usr/lib/systemd/system/kube-proxy.service
    [Unit]
    Description=Kubernetes Kube-Proxy Server
    Documentation=https://github.com/GoogleCloudPlatform/kubernetes
    After=network.target
    
    [Service]
    EnvironmentFile=-/etc/kubernetes/config
    EnvironmentFile=-/etc/kubernetes/proxy
    ExecStart=/usr/local/bin/kube-proxy \
    	    $KUBE_LOGTOSTDERR \
    	    $KUBE_LOG_LEVEL \
    	    $KUBE_MASTER \
    	    $KUBE_PROXY_ARGS
    Restart=on-failure
    LimitNOFILE=65536
    
    [Install]
    WantedBy=multi-user.target
    EOF
    

    # 创建Nginx代理

    创建nginx代理的目的就是实现apiserver高可用,这样做的目的在于不需要维护前端的负载均衡,直接在node节点实现高可用。

    mkdir -p /etc/nginx
    cat << EOF > /etc/nginx/nginx.conf
    error_log stderr notice;
    
    worker_processes auto;
    events {
      multi_accept on;
      use epoll;
      worker_connections 1024;
    }
    
    stream {
        upstream kube_apiserver {
            least_conn;
            server 192.168.15.131:6443;
            server 192.168.15.132:6443;
            server 192.168.15.133:6443;
        }
    
        server {
            listen        0.0.0.0:6443;
            proxy_pass    kube_apiserver;
            proxy_timeout 10m;
            proxy_connect_timeout 1s;
        }
    }
    EOF
    chmod +r /etc/nginx/nginx.conf
    

    启动nginx-proxy容器

    docker run -it -d -p 127.0.0.1:6443:6443 -v /etc/localtime:/etc/localtime -v /etc/nginx:/etc/nginx --name nginx-proxy --net=host --restart=always --memory=512M nginx:1.13.3-alpine
    

    # 启动服务  

    systemctl daemon-reload
    systemctl start kubelet
    systemctl status kubelet
    systemctl enable kubelet
    

    # 添加node到kubernetes集群  

    由于采用了 TLS Bootstrapping,所以 kubelet 启动后不会立即加入集群,而是进行证书申请,从日志中可以看到如下输出:

    Jul 19 14:15:31 docker4.node kubelet[18213]: I0719 14:15:31.810914   18213 feature_gate.go:144] feature gates: map[]
    Jul 19 14:15:31 docker4.node kubelet[18213]: I0719 14:15:31.811025   18213 bootstrap.go:58] Using bootstrap kubeconfig to generate TLS client cert, key and kubeconfig file
    

    此时只需要在 master 允许其证书申请即可,如下:

    [root@localhost ~]# kubectl get csr
    NAME                                                   AGE       REQUESTOR           CONDITION
    node-csr-_xILhfT4Z5FLQsz8csi3tJKLwz0q02U3aTI8MmoHgQg   24s       kubelet-bootstrap   Pending
    
    [root@localhost ~]# kubectl certificate approve node-csr-_xILhfT4Z5FLQsz8csi3tJKLwz0q02U3aTI8MmoHgQg
    
    [root@localhost ~]# kubectl get node
    NAME             STATUS    AGE       VERSION
    192.168.15.131   Ready     27s       v1.7.3
    

    # 最后启动 kube-proxy 组件

    systemctl daemon-reload
    systemctl start kube-proxy
    systemctl enable kube-proxy
    systemctl status kube-proxy  

    四、部署calico网络

    4.1 简介

    网路组件这里采用 Calico,Calico 目前部署也相对比较简单,只需要创建一下 yml 文件即可,具体可参考:https://docs.projectcalico.org/v2.3/getting-started/kubernetes/。部署calico需要满足以下条件:

    • 必须将kubelet配置为使用CNI网络插件(例如--network-plugin = cni);
    • kube-proxy必须以iptables代理模式启动(这是Kubernetes v1.2.0的默认值);
    • 启动kube-proxy 必须禁止使用--masquerade-all标志,这与Calico策略冲突;
    • kubernetes网络策略插件至少满足kubernetes 1.3版本;
    • 当启用RBAC时,Calico组件必须定义和使用适当的帐户,角色和绑定;

    calico 有多种安装方式,选用哪种需要根据安装kubernetes的方式,具体如下:

    • Standard Hosted Install,安装Calico用于现有的etcd群集。这是部署Calico在生产中推荐的托管方法;
    • Kubeadm Hosted Install,安装Calico以及单个节点etcd群集。这是推荐的托管方法,与Calico一起快速入门,与kubeadm等工具结合使用;
    • Kubernetes Datastore,不需要自己的etcd集群的模式安装Calico;

    4.2 安装calico网络

    等等......开始之前,先检查一下你的kubelet配置,是否启用 --network-plugin=cni 参数,如果没有赶紧加上。如果不加,你可能获取的一直都是docker0 分配的网段。

    wget http://docs.projectcalico.org/v2.6/getting-started/kubernetes/installation/hosted/calico.yaml
    sed -i 's@.*etcd_endpoints:.*@  etcd_endpoints: "https://192.168.15.131:2379,https://192.168.15.132:2379,https://192.168.15.133:2379"@gi' calico.yaml
    
    export ETCD_CERT=`cat /etc/etcd/ssl/etcd.pem | base64 | tr -d '
    '`
    export ETCD_KEY=`cat /etc/etcd/ssl/etcd-key.pem | base64 | tr -d '
    '`
    export ETCD_CA=`cat /etc/etcd/ssl/etcd-root-ca.pem | base64 | tr -d '
    '`
    
    
    sed -i "s@.*etcd-cert:.*@  etcd-cert: ${ETCD_CERT}@gi" calico.yaml
    sed -i "s@.*etcd-key:.*@  etcd-key: ${ETCD_KEY}@gi" calico.yaml
    sed -i "s@.*etcd-ca:.*@  etcd-ca: ${ETCD_CA}@gi" calico.yaml
    
    sed -i 's@.*etcd_ca:.*@  etcd_ca: "/calico-secrets/etcd-ca"@gi' calico.yaml
    sed -i 's@.*etcd_cert:.*@  etcd_cert: "/calico-secrets/etcd-cert"@gi' calico.yaml
    sed -i 's@.*etcd_key:.*@  etcd_key: "/calico-secrets/etcd-key"@gi' calico.yaml
    
    sed -i 's@192.168.0.0/16@10.254.64.0/18@gi' calico.yaml
    
    mkdir /data/kubernetes/calico -p
    mv calico.yaml /data/kubernetes/calico/

    提示:大陆访问gcr.io是访问不到,可以通过修改hosts文件实现,参考如下:

    61.91.161.217 gcr.io
    61.91.161.217 www.gcr.io
    61.91.161.217 packages.cloud.google.com
    

    # 启动pod

    kubectl create -f /data/kubernetes/calico/calico.yaml
    kubectl apply -f http://docs.projectcalico.org/v2.6/getting-started/kubernetes/installation/rbac.yaml
    

    提示:可能由于网络等原因镜像下载较慢,导致pod无法正常启动,建议先将镜像下载到本地然后在启动。

    # 验证网络

    cat << EOF > demo.deploy.yml
    apiVersion: apps/v1beta1
    kind: Deployment
    metadata:
      name: demo-deployment
    spec:
      replicas: 3
      template:
        metadata:
          labels:
            app: demo
        spec:
          containers:
          - name: demo
            image: mritd/demo
            ports:
            - containerPort: 80
    EOF
    kubectl create -f demo.deploy.yml
    kubetcl get pods -o wide --all-namespaces
    

    提示:kubectl exec到某个pod内ping另一个不同node上的pod,此时每个node节点上都应有不同pod IP的路由。具体过程,这里就不做演示。

    五、部署DNS

    DNS 部署目前有两种方式,一种是纯手动,另一种是使用 Addon-manager,目前个人感觉 Addon-manager 有点繁琐,所以以下采取纯手动部署 DNS 组件。

    5.1 部署DNS

    DNS 组件相关文件位于 kubernetes addons 目录下,把相关文件下载下来然后稍作修改即可;

    # 创建相关目录

    mkdir /data/kubernetes/dns
    cd  /data/kubernetes/dns
    

    # 下载对应文件  

    wget https://raw.githubusercontent.com/kubernetes/kubernetes/release-1.7/cluster/addons/dns/kubedns-cm.yaml
    wget https://raw.githubusercontent.com/kubernetes/kubernetes/release-1.7/cluster/addons/dns/kubedns-sa.yaml
    wget https://raw.githubusercontent.com/kubernetes/kubernetes/release-1.7/cluster/addons/dns/kubedns-svc.yaml.sed
    wget https://raw.githubusercontent.com/kubernetes/kubernetes/release-1.7/cluster/addons/dns/kubedns-controller.yaml.sed
    mv kubedns-controller.yaml.sed kubedns-controller.yaml
    mv kubedns-svc.yaml.sed kubedns-svc.yaml
    

    # 修改配置  

    sed -i 's/$DNS_DOMAIN/cluster.local/gi' kubedns-controller.yaml
    sed -i 's/$DNS_SERVER_IP/10.254.0.2/gi' kubedns-svc.yaml
    

    提示:此 "DNS_SERVER_IP" 是你在 kubelet 配置文件中指定的地址,不是随便写的。

    # 创建(我把所有 yml 放到的 dns 目录中)

    kubectl create -f ./data/kubernetes/dns
    

    # 验证

    ## 启动一个nginx pod  

    cat > my-nginx.yaml << EOF
    apiVersion: extensions/v1beta1
    kind: Deployment
    metadata:
      name: my-nginx
    spec:
      replicas: 2
      template:
        metadata:
          labels:
            run: my-nginx
        spec:
          containers:
          - name: my-nginx
            image: nginx:1.7.9
            ports:
            - containerPort: 80
    EOF
    
    kubectl create -f my-nginx.yaml
    

    ## export 该 Deployment, 生成 my-nginx 服务  

    kubectl expose deploy my-nginx
    
    [root@k8s-master01 ~]# kubectl get services --all-namespaces |grep my-nginx
    default       my-nginx     10.254.127.14   <none>        80/TCP          2s
    

    ## 创建另一个 Pod,查看 /etc/resolv.conf 是否包含 kubelet 配置的 --cluster-dns 和 --cluster-domain,是否能够将服务 my-nginx 解析到上面显示的 Cluster IP 10.254.127.14;  

    [root@k8s-master01 ~]# kubectl exec  nginx -i -t -- /bin/bash
    root@nginx:/# cat /etc/resolv.conf
    nameserver 10.254.0.2
    search default.svc.cluster.local. svc.cluster.local. cluster.local. localhost
    options ndots:5
    root@nginx:/# ping my-nginx
    PING my-nginx.default.svc.cluster.local (10.254.127.14): 48 data bytes
    ^C--- my-nginx.default.svc.cluster.local ping statistics ---
    3 packets transmitted, 0 packets received, 100% packet loss
    root@nginx:/# ping kubernetes
    PING kubernetes.default.svc.cluster.local (10.254.0.1): 48 data bytes
    ^C--- kubernetes.default.svc.cluster.local ping statistics ---
    5 packets transmitted, 0 packets received, 100% packet loss
    root@nginx:/# ping kube-dns.kube-system.svc.cluster.local
    PING kube-dns.kube-system.svc.cluster.local (10.254.0.2): 48 data bytes
    ^C--- kube-dns.kube-system.svc.cluster.local ping statistics ---
    3 packets transmitted, 0 packets received, 100% packet loss
    

    以上均能正常解析出IP地址,表明DNS服务正常;

    5.2 自动伸缩DNS服务  

    # 创建项目目录

    mkdir /data/kubernetes/dns-autoscaler
    cd /data/kubernetes/dns-autoscaler/
    

    # 下载文件  

    wget https://raw.githubusercontent.com/kubernetes/kubernetes/release-1.7/cluster/addons/dns-horizontal-autoscaler/dns-horizontal-autoscaler-rbac.yaml
    wget https://raw.githubusercontent.com/kubernetes/kubernetes/release-1.7/cluster/addons/dns-horizontal-autoscaler/dns-horizontal-autoscaler.yaml
    

    然后直接 kubectl create -f 即可,DNS 自动扩容计算公式为 replicas = max( ceil( cores * 1/coresPerReplica ) , ceil( nodes * 1/nodesPerReplica ) ),如果想调整 DNS 数量(负载因子),只需要调整 ConfigMap 中对应参数即可,具体计算细节参考上面的官方文档;

    # 编辑 Config Map

    kubectl edit cm kube-dns-autoscaler --namespace=kube-system

    提示:整个集群过程,可能存在镜像无法下载的情况。那么,你需要更镜像地址或者将镜像下载到本地。请注意镜像下载的问题,国内推荐阿里云容器镜像以及使用阿里云加速器下载dockerhub中镜像;

      

      

  • 相关阅读:
    使用pymysql模块进行封装,自动化不可或缺的数据库校验
    使用paramiko模块进行封装,远程操作linux主机
    提高开发效率的 Eclipse 实用操作
    遍历Map的四种方法
    key可以重复的Map集合:IdentityHashMap
    Java根据条件删除Map中元素
    用POI读取具有任意合并单元的excel数据
    【转载】说说JSON和JSONP,也许你会豁然开朗,含jQuery用例
    面向对象的基本原则
    forward和redirect的区别
  • 原文地址:https://www.cnblogs.com/yangxiaoyi/p/7606121.html
Copyright © 2011-2022 走看看