zoukankan      html  css  js  c++  java
  • kubernetes 高可用集群1.14 部署

    系统配置
    CentOS Linux release 7.6.1810 (Core)
    系统更新
    yum install epel-release -y
    yum update -y
    
    内核版本:Linux 5.1.8-1.el7.elrepo.x86_64
    内核升级
    # 安装yum源
    rpm --import https://www.elrepo.org/RPM-GPG-KEY-elrepo.org
    rpm -Uvh http://www.elrepo.org/elrepo-release-7.0-3.el7.elrepo.noarch.rpm
    
    # 查看列表
    yum --disablerepo=* --enablerepo=elrepo-kernel repolist
    yum --disablerepo=* --enablerepo=elrepo-kernel list kernel*
    
    
    # 安装
    yum --enablerepo=elrepo-kernel install kernel-ml-devel kernel-ml -y
    
    
    # 设置生成新的grub
    grub2-set-default 0
    grub2-mkconfig -o /etc/grub2.cfg
    
    
    # 移除旧版本工具包
    yum remove kernel-tools-libs.x86_64 kernel-tools.x86_64 -y
    
    # 安装新版本
    yum --disablerepo=* --enablerepo=elrepo-kernel install -y kernel-ml-tools.x86_64
    
    
    # 重启
    reboot
    
    # 查看内核版本
    uname -sr
    

      

    修改内核参数
    cat <<EOF > /etc/sysctl.d/k8s.conf
    net.ipv4.tcp_keepalive_time = 600
    net.ipv4.tcp_keepalive_intvl = 30
    net.ipv4.tcp_keepalive_probes = 10
    net.ipv6.conf.all.disable_ipv6 = 1
    net.ipv6.conf.default.disable_ipv6 = 1
    net.ipv6.conf.lo.disable_ipv6 = 1
    net.ipv4.neigh.default.gc_stale_time = 120
    net.ipv4.conf.all.rp_filter = 0
    net.ipv4.conf.default.rp_filter = 0
    net.ipv4.conf.default.arp_announce = 2
    net.ipv4.conf.lo.arp_announce = 2
    net.ipv4.conf.all.arp_announce = 2
    net.ipv4.ip_forward = 1
    net.ipv4.tcp_max_tw_buckets = 5000
    net.ipv4.tcp_syncookies = 1
    net.ipv4.tcp_max_syn_backlog = 1024
    net.ipv4.tcp_synack_retries = 2
    net.bridge.bridge-nf-call-ip6tables = 1
    net.bridge.bridge-nf-call-iptables = 1
    net.netfilter.nf_conntrack_max = 2310720
    fs.inotify.max_user_watches=89100
    fs.may_detach_mounts = 1
    fs.file-max = 52706963
    fs.nr_open = 52706963
    net.bridge.bridge-nf-call-arptables = 1
    vm.swappiness = 0   #最大限度使用物理内存,然后才是 swap空间
    vm.overcommit_memory=1
    vm.panic_on_oom=0
    EOF
    sysctl --system
    

      

     

    关闭swap
    # 临时关闭
    swapoff -a
    # 永久关闭
    sed -i '/ swap / s/^(.*)$/#1/g' /etc/fstab
    
    开启ipvs
    modprobe -- ip_vs
    modprobe -- ip_vs_rr
    modprobe -- ip_vs_wrr
    modprobe -- ip_vs_sh
    modprobe -- nf_conntrack_ipv4
    # 查看是否加载
    lsmod | grep ip_vs
    # 配置开机自加载
    cat <<EOF>> /etc/rc.local
    modprobe -- ip_vs
    modprobe -- ip_vs_rr
    modprobe -- ip_vs_wrr
    modprobe -- ip_vs_sh
    modprobe -- nf_conntrack_ipv4
    EOF
    chmod +x /etc/rc.d/rc.local
    
    禁用selinux
    #临时关闭
    setenforce 0
    #永久关闭
    sed -ir 's/(SELINUX=)[a-z]*/disabled/' /etc/selinux/config
    

     

    禁用postfix
    systemctl stop postfix
    systemctl disable postfix
    

      

    关闭防火墙
    systemctl stop firewalld
    systemctl disable firewalld
    

      

    安装docker-ce
    # Uninstall old versions
    yum remove docker 
                      docker-client 
                      docker-client-latest 
                      docker-common 
                      docker-latest 
                      docker-latest-logrotate 
                      docker-logrotate 
                      docker-engine
    
    
    
    
    # Install using the repository
    yum install -y yum-utils 
      device-mapper-persistent-data 
      lvm2
    
    
    yum-config-manager 
        --add-repo 
        https://download.docker.com/linux/centos/docker-ce.repo
    
    
    yum install docker-ce docker-ce-cli containerd.io -y
    
    # Boot the service
    systemctl start docker
    systemctl enable docker
    

      

    其他
    ssh免密登录、hosts文件、ntp时间同步
    

      

    配置keepalived、haproxy(Master节点)
    yum install -y socat keepalived haproxy ipvsadm
    systemctl enable haproxy
    systemctl enable keepalived
    

      

    haproxy配置
    # /etc/haproxy/haproxy.cfg
    global
        log         127.0.0.1 local3
        chroot      /var/lib/haproxy
        pidfile     /var/run/haproxy.pid
        maxconn     32768
        user        haproxy
        group       haproxy
        daemon
        nbproc      1
        stats socket /var/lib/haproxy/stats
    
    defaults
        mode                    tcp
        log                     global
        option                  tcplog
        option                  dontlognull
        option                  redispatch
        retries                 3
        timeout queue           1m
        timeout connect         10s
        timeout client          1m
        timeout server          1m
        timeout check           10s
    
    listen stats
        mode   http
        bind :8888
        stats   enable
        stats   uri     /admin?stats
        stats   auth    admin:admin
        stats   admin   if TRUE
    
    frontend  k8s_https *:8443
        mode      tcp
        maxconn      2000
        default_backend     https_sri
    
    backend https_sri
        balance      roundrobin
        server master1-api 172.17.1.201:6443  check inter 10000 fall 2 rise 2 weight 1
        server master2-api 172.17.1.202:6443  check inter 10000 fall 2 rise 2 weight 1
        server master3-api 172.17.1.203:6443  check inter 10000 fall 2 rise 2 weight 1
    
    keepalived配置
    # /etc/keepalived/keepalived.conf
    global_defs {
       router_id master01
    }
    
    vrrp_script check_haproxy {
        script /etc/keepalived/check_haproxy.sh
        interval 3
    }
    
    vrrp_instance VI_1 {
        state MASTER
        interface eth0
        virtual_router_id 99
        priority 100
        advert_int 1
        authentication {
            auth_type PASS
            auth_pass 1111
        }
        virtual_ipaddress {
            172.17.1.200
        }
        track_script {   
            check_haproxy
        }
    }
    

      

    # /etc/keepalived/check_haproxy.sh
    #!/bin/bash
    NUM=`ps -C haproxy --no-header |wc -l`
    if [ $NUM -eq 0 ];then
        systemctl stop keepalived
    fi
    
    
    # chmod 755 /etc/keepalived/check_haproxy.sh
    

      

    三个节点keepalived配置文件存在区别:
    router_id分别为master01、master02、master03
    state分别为MASTER、BACKUP、BACKUP
    priority分别为100、90、80
    

      

    K8S集群部署
    安装 kubeadm、kubelet、kubectl
    所有节点都安装 kubeadm、kubelet、kubectl,注意:node节点的kubectl不是必须的。
    
    # 配置yum源
    cat <<EOF > /etc/yum.repos.d/kubernetes.repo
    [kubernetes]
    name=Kubernetes
    baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
    enabled=1
    gpgcheck=0
    repo_gpgcheck=0
    gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg
           https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
    EOF
    # 安装
    yum -y install kubeadm-1.14.0 kubelet-1.14.0 kubectl-1.14.0
    systemctl enable kubelet
    

      

    初始化master
    # 获取配置文件
    kubeadm config print init-defaults > kubeadm.conf
    
    # 更改置文件中的
    # imageRepository: k8s.gcr.io
    # 比如:
    # imageRepository: docker.io/mirrorgooglecontainers
    
    # 运行命令
    kubeadm config images list --config kubeadm.conf
    kubeadm config images pull --config kubeadm.conf
    kubeadm init --config kubeadm.conf(根据实际情况分别配置)
    

      

    master01
    kubeadm_master01.conf
    apiVersion: kubeadm.k8s.io/v1beta1
    kind: InitConfiguration
    localAPIEndpoint:
      advertiseAddress: 172.17.1.201
      bindPort: 6443
    ---
    apiVersion: kubeadm.k8s.io/v1beta1
    kind: ClusterConfiguration
    kubernetesVersion: v1.14.0
    controlPlaneEndpoint: "172.17.1.200:8443"
    imageRepository: registry.aliyuncs.com/google_containers
    apiServer:
      certSANs:
      - "g-master01"
      - "g-master02"
      - "g-master03"
      - 172.17.1.201
      - 172.17.1.202
      - 172.17.1.203
      - 172.17.1.200
    networking:
      podSubnet: "10.244.0.0/16"
      serviceSubnet: "10.96.0.0/12"
    certificatesDir: /etc/kubernetes/pki
    clusterName: kubernetes
    
    etcd:
      local:
        extraArgs:
          listen-client-urls: "https://127.0.0.1:2379,https://172.17.1.201:2379"
          advertise-client-urls: "https://172.17.1.201:2379"
          listen-peer-urls: "https://172.17.1.201:2380"
          initial-advertise-peer-urls: "https://172.17.1.201:2380"
          initial-cluster: "g-master01.bjyztj.sjhl=https://172.17.1.201:2380"
          initial-cluster-state: new
        serverCertSANs:
          - g-master01
          - 172.17.1.201
        peerCertSANs:
          - g-master01
          - 172.17.1.201
    ---
    apiVersion: kubeproxy.config.k8s.io/v1alpha1
    kind: KubeProxyConfiguration
    mode: ipvs
    

      

    # 初始化master01
    kubeadm init --config kubeadm_master01.conf
    # 配置kubectl管理集群
    mkdir .kube
    cp -i /etc/kubernetes/admin.conf .kube/config
    
    
    kubectl get pods -n kube-system
    

      

    # 安装Flannel网络插件
    wget https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
    

      

    # 绑定网卡
          containers:
          - name: kube-flannel
            image: quay.io/coreos/flannel:v0.11.0-amd64
            command:
            - /opt/bin/flanneld
            args:
            - --ip-masq
            - --kube-subnet-mgr
            - --iface=eth0        #添加该行
    

      

    # 而后应用配置文件(注意只在master01执行)
    kubectl apply -f kube-flannel.yml
    

      

    kubectl get pods -n kube-system
    

      

    # 分发证书
    #!/bin/bash
    for index in 202 203; do
      ip=172.17.1.${index}
      ssh $ip "mkdir -p /etc/kubernetes/pki/etcd; mkdir -p ~/.kube/"
      scp /etc/kubernetes/pki/ca.crt $ip:/etc/kubernetes/pki/ca.crt
      scp /etc/kubernetes/pki/ca.key $ip:/etc/kubernetes/pki/ca.key
      scp /etc/kubernetes/pki/sa.key $ip:/etc/kubernetes/pki/sa.key
      scp /etc/kubernetes/pki/sa.pub $ip:/etc/kubernetes/pki/sa.pub
      scp /etc/kubernetes/pki/front-proxy-ca.crt $ip:/etc/kubernetes/pki/front-proxy-ca.crt
      scp /etc/kubernetes/pki/front-proxy-ca.key $ip:/etc/kubernetes/pki/front-proxy-ca.key
      scp /etc/kubernetes/pki/etcd/ca.crt $ip:/etc/kubernetes/pki/etcd/ca.crt
      scp /etc/kubernetes/pki/etcd/ca.key $ip:/etc/kubernetes/pki/etcd/ca.key
      scp /etc/kubernetes/admin.conf $ip:/etc/kubernetes/admin.conf
      scp /etc/kubernetes/admin.conf $ip:~/.kube/config
    done
    

      

    master02
    kubeadm_master02.conf
    apiVersion: kubeadm.k8s.io/v1beta1
    kind: InitConfiguration
    localAPIEndpoint:
      advertiseAddress: 172.17.1.202
      bindPort: 6443
    ---
    apiVersion: kubeadm.k8s.io/v1beta1
    kind: ClusterConfiguration
    kubernetesVersion: v1.14.0
    controlPlaneEndpoint: "172.17.1.200:8443"
    imageRepository: registry.aliyuncs.com/google_containers
    apiServer:
      certSANs:
      - "g-master01"
      - "g-master02"
      - "g-master03"
      - 172.17.1.201
      - 172.17.1.202
      - 172.17.1.203
      - 172.17.1.200
    networking:
      podSubnet: "10.244.0.0/16"
      serviceSubnet: "10.96.0.0/12"
    certificatesDir: /etc/kubernetes/pki
    clusterName: kubernetes
    
    etcd:
      local:
        extraArgs:
          listen-client-urls: "https://127.0.0.1:2379,https://172.17.1.202:2379"
          advertise-client-urls: "https://172.17.1.202:2379"
          listen-peer-urls: "https://172.17.1.202:2380"
          initial-advertise-peer-urls: "https://172.17.1.202:2380"
          initial-cluster: "g-master01.bjyztj.sjhl=https://172.17.1.201:2380,g-master02.bjyztj.sjhl=https://172.17.1.202:2380"
          initial-cluster-state: existing
        serverCertSANs:
          - g-master02
          - 172.17.1.202
        peerCertSANs:
          - g-master02
          - 172.17.1.202
    ---
    apiVersion: kubeproxy.config.k8s.io/v1alpha1
    kind: KubeProxyConfiguration
    mode: ipvs
    

      

    # 配置证书
    kubeadm init phase certs all --config kubeadm_master02.conf
    # 配置etcd
    kubeadm init phase etcd local --config kubeadm_master02.conf
    # 生成kubelet配置文件
    kubeadm init phase kubeconfig kubelet --config kubeadm_master02.conf
    # 启动kubelet
    kubeadm init phase kubelet-start --config kubeadm_master02.conf
    # 将master02的etcd加入集群
    kubectl exec -n kube-system etcd-g-master01.bjyztj.sjhl -- etcdctl --ca-file /etc/kubernetes/pki/etcd/ca.crt --cert-file /etc/kubernetes/pki/etcd/peer.crt --key-file /etc/kubernetes/pki/etcd/peer.key --endpoints=https://172.17.1.201:2379 member add master2 https://172.17.1.202:2380
    # 启动 kube-apiserver、kube-controller-manager、kube-scheduler
    kubeadm init phase kubeconfig all --config kubeadm_master02.conf
    kubeadm init phase control-plane all --config kubeadm_master02.conf
    

      

    # 查看节点状态
    kubectl get nodes
    # 将节点标记为master
    kubeadm init phase mark-control-plane --config kubeadm_master02.conf
    # 查看
    kubectl get nodes
    

      

    master03
    kubeadm_master03.conf
    apiVersion: kubeadm.k8s.io/v1beta1
    kind: InitConfiguration
    localAPIEndpoint:
      advertiseAddress: 172.17.1.203
      bindPort: 6443
    ---
    apiVersion: kubeadm.k8s.io/v1beta1
    kind: ClusterConfiguration
    kubernetesVersion: v1.14.0
    controlPlaneEndpoint: "172.17.1.200:8443"
    imageRepository: registry.aliyuncs.com/google_containers
    apiServer:
      certSANs:
      - "g-master01"
      - "g-master02"
      - "g-master03"
      - 172.17.1.201
      - 172.17.1.202
      - 172.17.1.203
      - 172.17.1.200
    networking:
      podSubnet: "10.244.0.0/16"
      serviceSubnet: "10.96.0.0/12"
    certificatesDir: /etc/kubernetes/pki
    clusterName: kubernetes
    
    etcd:
      local:
        extraArgs:
          listen-client-urls: "https://127.0.0.1:2379,https://172.17.1.203:2379"
          advertise-client-urls: "https://172.17.1.203:2379"
          listen-peer-urls: "https://172.17.1.203:2380"
          initial-advertise-peer-urls: "https://172.17.1.203:2380"
          initial-cluster: "g-master01.bjyztj.sjhl=https://172.17.1.201:2380,g-master02.bjyztj.sjhl=https://172.17.1.202:2380,g-master03.bjyztj.sjhl=https://172.17.1.203:2380"
          initial-cluster-state: existing
        serverCertSANs:
          - g-master03
          - 172.17.1.203
        peerCertSANs:
          - g-master03
          - 172.17.1.203
    ---
    apiVersion: kubeproxy.config.k8s.io/v1alpha1
    kind: KubeProxyConfiguration
    mode: ipvs
    

      

    # 配置证书
    kubeadm init phase certs all --config kubeadm_master03.conf
    # 配置etcd
    kubeadm init phase etcd local --config kubeadm_master03.conf
    # 生成kubelet配置文件
    kubeadm init phase kubeconfig kubelet --config kubeadm_master03.conf
    # 启动kubelet
    kubeadm init phase kubelet-start --config kubeadm_master03.conf
    # 将master02的etcd加入集群
    kubectl exec -n kube-system etcd-g-master01.bjyztj.sjhl -- etcdctl --ca-file /etc/kubernetes/pki/etcd/ca.crt --cert-file /etc/kubernetes/pki/etcd/peer.crt --key-file /etc/kubernetes/pki/etcd/peer.key --endpoints=https://172.17.1.201:2379 member add master3 https://172.17.1.203:2380
    # 启动 kube-apiserver、kube-controller-manager、kube-scheduler
    kubeadm init phase kubeconfig all --config kubeadm_master03.conf
    kubeadm init phase control-plane all --config kubeadm_master03.conf
    

      

    # 查看节点状态
    kubectl get nodes
    # 将节点标记为master
    kubeadm init phase mark-control-plane --config kubeadm_master03.conf
    # 查看
    kubectl get nodes
    

      

    worker节点加入集群
    Your Kubernetes control-plane has initialized successfully!
    
    To start using your cluster, you need to run the following as a regular user:
    
      mkdir -p $HOME/.kube
      sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
      sudo chown $(id -u):$(id -g) $HOME/.kube/config
    
    You should now deploy a pod network to the cluster.
    Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
      https://kubernetes.io/docs/concepts/cluster-administration/addons/
    
    You can now join any number of control-plane nodes by copying certificate authorities
    and service account keys on each node and then running the following as root:
    
      kubeadm join 172.17.1.200:8443 --token 45vifj.buiy0wjpjrortpjn 
        --discovery-token-ca-cert-hash sha256:f1285c8b72d22a5cf97f1713ad7ed258efcba275203ef42d8877d9d66902fee8 
        --experimental-control-plane
    
    Then you can join any number of worker nodes by running the following on each as root:
    
    kubeadm join 172.17.1.200:8443 --token 45vifj.buiy0wjpjrortpjn 
        --discovery-token-ca-cert-hash sha256:f1285c8b72d22a5cf97f1713ad7ed258efcba275203ef42d8877d9d66902fee8
    

      

    状态检查
    kubectl get nodes
    kubectl cluster-info
    kubectl get cs
    kubectl exec -n kube-system etcd-g-master01.bjyztj.sjhl -- etcdctl --ca-file /etc/kubernetes/pki/etcd/ca.crt --cert-file /etc/kubernetes/pki/etcd/peer.crt --key-file /etc/kubernetes/pki/etcd/peer.key --endpoints=https://172.17.1.201:2379 member list
    ipvsadm -ln
    kubectl log --tail=10 kube-proxy-tqxlq -n kube-system
    

      

      

     

      

      

     

  • 相关阅读:
    java 多线程
    构造N位格雷码(递归,面向对象)
    字典树trie
    快速排序
    C++ 链表
    15-谜问题(深拷贝、LC检索、面向对象编程)
    [编程题] 扫描透镜(本题还涉及如何从字符串中提取数字)
    python爬虫提取冰与火之歌五季的种子
    带有限期和效益的单位时间的作业排序贪心算法
    0/1背包问题与动态规划
  • 原文地址:https://www.cnblogs.com/lixinliang/p/12217033.html
Copyright © 2011-2022 走看看