zoukankan      html  css  js  c++  java
  • k8s-高可用多主master配置

    • 准备主机
    1. centos7镜像
    2. node1: 192.168.0.101
    3. node2: 192.168.0.102
    4. node3: 192.168.0.103
    5. vip: 192.168.0.104
    • 配置ssh免密 并修改/etc/hosts跟/etc/hostname
    • 配置所有节点的kubelet
    # 配置kubelet使用国内可用镜像
    # 修改/etc/systemd/system/kubelet.service.d/10-kubeadm.conf
    # 添加如下配置 
    Environment="KUBELET_EXTRA_ARGS=--pod-infra-container-image=registry.cn-shanghai.aliyuncs.com/gcr-k8s/pause-amd64:3.0"
    # 使用命令
    sed -i '/ExecStart=$/i Environment="KUBELET_EXTRA_ARGS=--pod-infra-container-image=registry.cn-shanghai.aliyuncs.com/gcr-k8s/pause-amd64:3.0"'  /etc/systemd/system/kubelet.service.d/10-kubeadm.conf
    # 重新载入配置
    systemctl daemon-reload
    • 修改环境变量
    rm -rf $HOME/.kube
    mkdir -p $HOME/.kube
    sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
    sudo chown $(id -u):$(id -g) $HOME/.kube/config
    
    
    systemctl stop firewalld &&  systemctl disable firewalld
    
    setenforce  0 #临时禁用selinux
    sed -i "s/^SELINUX=enforcing/SELINUX=disabled/g" /etc/sysconfig/selinux  #永久关闭 修改/etc/sysconfig/selinux文件设置
    sed -i "s/^SELINUX=enforcing/SELINUX=disabled/g" /etc/selinux/config
    
    
    swapoff -a #临时关闭swap
    sed -i 's/.*swap.*/#&/' /etc/fstab #永久关闭 注释/etc/fstab文件里swap相关的行
    
    
    cat <<EOF >  /etc/sysctl.d/k8s.conf
    net.bridge.bridge-nf-call-ip6tables = 1
    net.bridge.bridge-nf-call-iptables = 1
    EOF
    sysctl --system
    
    
    cat <<EOF > /etc/yum.repos.d/kubernetes.repo
    [kubernetes]
    name=Kubernetes
    baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
    enabled=1
    gpgcheck=1
    repo_gpgcheck=1
    gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
    EOF
    
    yum install -y kubelet-1.11.0 kubeadm-1.11.0 kubectl-1.11.0
    • 拉取镜像
    #新建脚本
    images=(kube-proxy-amd64:v1.11.0 kube-scheduler-amd64:v1.11.0 kube-controller-manager-amd64:v1.11.0 kube-apiserver-amd64:v1.11.0
    etcd-amd64:3.2.18 coredns:1.1.3 pause-amd64:3.1 kubernetes-dashboard-amd64:v1.8.3 k8s-dns-sidecar-amd64:1.14.9 k8s-dns-kube-dns-amd64:1.14.9
    k8s-dns-dnsmasq-nanny-amd64:1.14.9 )
    for imageName in ${images[@]} ; do
    docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/$imageName
    done
    • 部署keepalived跟haproxy
    # 新建脚本 每台master执行
    docker pull haproxy:1.7.8-alpine mkdir /etc/haproxy cat >/etc/haproxy/haproxy.cfg<<EOF global log 127.0.0.1 local0 err maxconn 5000 uid 99 gid 99 #daemon nbproc 1 pidfile haproxy.pid defaults mode http log 127.0.0.1 local0 err maxconn 5000 retries 3 timeout connect 5s timeout client 30s timeout server 30s timeout check 2s listen admin_stats mode http bind 0.0.0.0:1080 log 127.0.0.1 local0 err stats refresh 30s stats uri /haproxy-status stats realm Haproxy Statistics stats auth will:will stats hide-version stats admin if TRUE frontend k8s-https bind 0.0.0.0:8443 mode tcp #maxconn 50000 default_backend k8s-https backend k8s-https mode tcp balance roundrobin server k8s-master01 172.16.2.71:6443 weight 1 maxconn 1000 check inter 2000 rise 2 fall 3 server k8s-master02 172.16.2.72:6443 weight 1 maxconn 1000 check inter 2000 rise 2 fall 3 server k8s-master03 172.16.2.73:6443 weight 1 maxconn 1000 check inter 2000 rise 2 fall 3 EOF # 启动haproxy docker run -d --name my-haproxy -v /etc/haproxy:/usr/local/etc/haproxy:ro -p 8443:8443 -p 1080:1080 --restart always haproxy:1.7.8-alpine #keepalived docker pull osixia/keepalived:1.4.4 docker run --net=host --cap-add=NET_ADMIN -e KEEPALIVED_INTERFACE=ens33 #改成你的网卡名称 -e KEEPALIVED_VIRTUAL_IPS="#PYTHON2BASH:['192.168.0.104']" -e KEEPALIVED_UNICAST_PEERS="#PYTHON2BASH:['192.168.0.101,'192.168.0.102','192.168.0.103']" -e KEEPALIVED_PASSWORD=admin --name k8s-keepalived --restart always -d osixia/keepalived:1.4.4
    • 启动第一台master
    # 新建脚本 执行完成后保证master启动日志 含有node节点加入token命令
    LOAD_BALANCER_DNS="192.168.0.101" LOAD_BALANCER_PORT="8443" CP0_HOSTNAME="node1" CP0_IP="192.168.0.104" cat >kubeadm-config.yaml<<EOF apiVersion: kubeadm.k8s.io/v1alpha2 kind: MasterConfiguration kubernetesVersion: v1.11.0 imageRepository: registry.cn-hangzhou.aliyuncs.com/google_containers apiServerCertSANs: - "$LOAD_BALANCER_DNS" - "node1" - "node2" - "node3" - "192.168.0.101" - "192.168.0.102" - "192.168.0.103" - "192.168.0.104" - "127.0.0.1" api: controlPlaneEndpoint: "$LOAD_BALANCER_DNS:$LOAD_BALANCER_PORT" etcd: local: extraArgs: listen-client-urls: "https://127.0.0.1:2379,https://$CP0_IP:2379" advertise-client-urls: "https://$CP0_IP:2379" listen-peer-urls: "https://$CP0_IP:2380" initial-advertise-peer-urls: "https://$CP0_IP:2380" initial-cluster: "$CP0_HOSTNAME=https://$CP0_IP:2380" serverCertSANs: - $CP0_HOSTNAME - $CP0_IP peerCertSANs: - $CP0_HOSTNAME - $CP0_IP networking: # This CIDR is a Calico default. Substitute or remove for your CNI provider. podSubnet: "192.168.0.0/16" EOF kubeadm init --config kubeadm-config.yaml > start.log cd /etc/kubernetes && tar cvzf k8s-key.tgz admin.conf pki/ca.* pki/sa.* pki/front-proxy-ca.* pki/etcd/ca.* scp k8s-key.tgz node2:~/ scp k8s-key.tgz node1:~/ ssh node2 'tar xf k8s-key.tgz -C /etc/kubernetes/' ssh node1 'tar xf k8s-key.tgz -C /etc/kubernetes/' rm -rf $HOME/.kube mkdir -p $HOME/.kube sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config sudo chown $(id -u):$(id -g) $HOME/.kube/config
    • 启动第二台master
    #新建脚本
    LOAD_BALANCER_DNS="192.168.0.104"
    LOAD_BALANCER_PORT="8443"
    CP0_HOSTNAME="node1"
    CP0_IP="192.168.0.101"
    CP1_HOSTNAME="node2"
    CP1_IP="192.168.0.102"
    cat >kubeadm-config.yaml<<EOF
    apiVersion: kubeadm.k8s.io/v1alpha2
    kind: MasterConfiguration
    kubernetesVersion: v1.11.0
    imageRepository: registry.cn-hangzhou.aliyuncs.com/google_containers
    apiServerCertSANs:
    - "$LOAD_BALANCER_DNS"
    - "node1"
    - "node2"
    - "node3"
    - "192.168.0.101"
    - "192.168.0.102"
    - "192.168.0.103"
    - "192.168.0.104"
    - "127.0.0.1"
    api:
        controlPlaneEndpoint: "$LOAD_BALANCER_DNS:$LOAD_BALANCER_PORT"
    etcd:
      local:
        extraArgs:
          listen-client-urls: "https://127.0.0.1:2379,https://$CP1_IP:2379"
          advertise-client-urls: "https://$CP1_IP:2379"
          listen-peer-urls: "https://$CP1_IP:2380"
          initial-advertise-peer-urls: "https://$CP1_IP:2380"
          initial-cluster: "$CP0_HOSTNAME=https://$CP0_IP:2380,$CP1_HOSTNAME=https://$CP1_IP:2380"
          initial-cluster-state: existing
        serverCertSANs:
          - $CP1_HOSTNAME
          - $CP1_IP
        peerCertSANs:
          - $CP1_HOSTNAME
          - $CP1_IP
    networking:
        # This CIDR is a calico default. Substitute or remove for your CNI provider.
        podSubnet: "192.168.0.0/16"
    EOF
    
    kubeadm alpha phase certs all --config kubeadm-config.yaml
    kubeadm alpha phase kubelet config write-to-disk --config kubeadm-config.yaml
    kubeadm alpha phase kubelet write-env-file --config kubeadm-config.yaml
    kubeadm alpha phase kubeconfig kubelet --config kubeadm-config.yaml
    systemctl start kubelet
    KUBECONFIG=/etc/kubernetes/admin.conf kubectl exec -n kube-system etcd-${CP0_HOSTNAME} -- etcdctl --ca-file /etc/kubernetes/pki/etcd/ca.crt --cert-file /etc/kubernetes/pki/etcd/peer.crt --key-file /etc/kubernetes/pki/etcd/peer.key --endpoints=https://${CP0_IP}:2379 member add ${CP1_HOSTNAME} https://${CP1_IP}:2380
    kubeadm alpha phase etcd local --config kubeadm-config.yaml
    kubeadm alpha phase kubeconfig all --config kubeadm-config.yaml
    kubeadm alpha phase controlplane all --config kubeadm-config.yaml
    kubeadm alpha phase mark-master --config kubeadm-config.yaml
    • 部署第三台master
    # 新建脚本
    LOAD_BALANCER_DNS
    ="192.168.91.100" LOAD_BALANCER_PORT="8443" CP0_HOSTNAME="node1" CP0_IP="192.168.0.101" CP1_HOSTNAME="node2" CP1_IP="192.168.0.102" CP2_HOSTNAME="node3" CP2_IP="192.168.0.103" cat >kubeadm-config.yaml<<EOF apiVersion: kubeadm.k8s.io/v1alpha2 kind: MasterConfiguration kubernetesVersion: v1.11.0 imageRepository: registry.cn-hangzhou.aliyuncs.com/google_containers piServerCertSANs: - "$LOAD_BALANCER_DNS" - "node1" - "node2" - "node3" - "192.168.0.101" - "192.168.0.102" - "192.168.0.103" - "192.168.0.104" - "127.0.0.1" api: controlPlaneEndpoint: "$LOAD_BALANCER_DNS:$LOAD_BALANCER_PORT" etcd: local: extraArgs: listen-client-urls: "https://127.0.0.1:2379,https://$CP2_IP:2379" advertise-client-urls: "https://$CP2_IP:2379" listen-peer-urls: "https://$CP2_IP:2380" initial-advertise-peer-urls: "https://$CP2_IP:2380" initial-cluster: "$CP0_HOSTNAME=https://$CP0_IP:2380,$CP1_HOSTNAME=https://$CP1_IP:2380,$CP2_HOSTNAME=https://$CP2_IP:2380" initial-cluster-state: existing serverCertSANs: - $CP2_HOSTNAME - $CP2_IP peerCertSANs: - $CP2_HOSTNAME - $CP2_IP networking: # This CIDR is a calico default. Substitute or remove for your CNI provider. podSubnet: "192.168.0.0/16" EOF kubeadm alpha phase certs all --config kubeadm-config.yaml kubeadm alpha phase kubelet config write-to-disk --config kubeadm-config.yaml kubeadm alpha phase kubelet write-env-file --config kubeadm-config.yaml kubeadm alpha phase kubeconfig kubelet --config kubeadm-config.yaml systemctl start kubelet KUBECONFIG=/etc/kubernetes/admin.conf kubectl exec -n kube-system etcd-${CP0_HOSTNAME} -- etcdctl --ca-file /etc/kubernetes/pki/etcd/ca.crt --cert-file /etc/kubernetes/pki/etcd/peer.crt --key-file /etc/kubernetes/pki/etcd/peer.key --endpoints=https://${CP0_IP}:2379 member add ${CP2_HOSTNAME} https://${CP2_IP}:2380 kubeadm alpha phase etcd local --config kubeadm-config.yaml kubeadm alpha phase kubeconfig all --config kubeadm-config.yaml kubeadm alpha phase controlplane all --config kubeadm-config.yaml kubeadm alpha phase mark-master --config kubeadm-config.yaml
    • 部署网络可以使用calico或者flannel
    #新建脚本
    wget https://docs.projectcalico.org/v3.1/getting-started/kubernetes/installation/hosted/rbac-kdd.yaml
    
    wget https://docs.projectcalico.org/v3.1/getting-started/kubernetes/installation/hosted/kubernetes-datastore/calico-networking/1.7/calico.yaml
    #每个master节点执行 calico使用DaemonSet部署到每个node
    kubectl taint  nodes --all node-role.kubernetes.io/master-
    kubectl apply -f rbac-kdd.yaml 
    kubectl apply -f calico.yaml
    #查看master节点状况
    kubectl get pods --all-namespaces

    • 加入node节点
      #第一台master执行完 start.log找出token命令
        kubeadm join 192.168.0.104:8443 --token v8q1a2.9zb56bff4076tmin --discovery-token-ca-cert-hash sha256:94a1bc52ce95cb8a69c97528d81ca0ea2bde48947450b9c7b59225dcafe8cebc

        

  • 相关阅读:
    NYOJ-开灯问题
    cocos2dx 3.0 飞机大战
    Java 实现享元(Flyweight)模式
    MongoDB 操作手冊CRUD查询指针
    均值滤波
    cxf调用c#的webservice
    SharePoint 2013 术语和术语集介绍
    Unity3d 网络编程(二)(Unity3d内建网络各项參数介绍)
    linux服务器在运行210天左右宕机
    好的用户界面-界面设计的一些技巧
  • 原文地址:https://www.cnblogs.com/1ssqq1lxr/p/10071762.html
Copyright © 2011-2022 走看看