zoukankan      html  css  js  c++  java
  • 9--k8s之Endpoints、健康服务检查、高可用

    一、Endpoints

    Endpoint是kubernetes中的一个资源对象,存储在etcd中,用来记录一个service对应的所有pod的访问地址,它是根据service配置文件中selector描述产生的。

    一个Service由一组Pod组成,这些Pod通过Endpoints暴露出来,Endpoints是实现实际服务的端点集合。换句话说,service和pod之间的联系是通过endpoints实现的。

    命名空间级资源,如果endpoints和service是同一个名字,那么就自动关联。

    1.功能一:与service做负载均衡

    [root@k8s ~]# kubectl describe svc
    Name:              kubernetes
    Namespace:         default
    Labels:            component=apiserver
                       provider=kubernetes
    Annotations:       <none>
    Selector:          <none>
    Type:              ClusterIP
    IP Family Policy:  SingleStack
    IP Families:       IPv4
    IP:                10.96.0.1
    IPs:               10.96.0.1
    Port:              https  443/TCP
    TargetPort:        6443/TCP
    Endpoints:         192.168.15.201:6443
    Session Affinity:  None
    Events:            <none>
    

    2.功能二:将外部服务引入集群

    案例

    # 先在本机创建一个外部的服务mysql
    [root@k8s endpoints]# docker run -d -p 3306:3306 -e MYSQL_ROOT_PASSWORD=123456 mysql:5.7 
    c34bab6ad37f46bae59ef2ee712e8430c53142d30a53119e9912407fd540ad61
    # 端口3306,密码如上
    
    kind: Endpoints
    apiVersion: v1
    metadata:
      namespace: default
      name: test-endpoints  #命令空间级资源,可定义ns
    subsets:   #定义外部地址
      - addresses: # 代理ip,定义web服务的,可以代理多个ip。
          - ip: 192.168.15.201
        ports:
          - port: 3306 # 服务的端口
            protocol: TCP
            name: http
    ---
    kind: Service
    apiVersion: v1
    metadata:
      name: test-endpoints # 这里的名称要和上面一样才能关联
      namespace: default  # 同上
    spec:
      ports:
        - port: 3306
          targetPort: 3306
          protocol: TCP
          name: http
    ---
    kind: Deployment # 提供一个mysql的客户端
    apiVersion: apps/v1
    metadata:
      name: mysql
      namespace: default
    spec:
      selector:
        matchLabels:
          app: mysql-v1
      template:
        metadata:
          labels:
            app: mysql-v1
        spec:
          containers:
            - name: mysql
              image: mysql:5.7
              env:
                - name: MYSQL_ROOT_PASSWORD
                  value: "123456"
    
    # 部署endpoints文件
    [root@k8s endpoints]# kubectl apply -f endpoints.yaml 
    endpoints/test-endpoints created
    service/test-endpoints created
    deployment.apps/mysql created
    [root@k8s endpoints]# kubectl get -f endpoints.yaml 
    NAME                       ENDPOINTS             AGE
    endpoints/test-endpoints   192.168.15.201:3306   8s
    
    NAME                     TYPE        CLUSTER-IP      EXTERNAL-IP   PORT(S)    AGE
    service/test-endpoints   ClusterIP   10.106.61.144   <none>        3306/TCP   8s
    
    NAME                    READY   UP-TO-DATE   AVAILABLE   AGE
    deployment.apps/mysql   1/1     1            1           8s
    
    # 进入部署的项目
    [root@k8s endpoints]# kubectl exec -it mysql-578666457d-g8856 -- bash
    # 链接这个集群内部的ip
    root@mysql-578666457d-g8856:/# mysql -uroot -p123456 -h10.106.61.144
    mysql> create database db01;
    Query OK, 1 row affected (0.01 sec)
    
    mysql> show databases;
    +--------------------+
    | Database           |
    +--------------------+
    | information_schema |
    | db01               |
    | mysql              |
    | performance_schema |
    | sys                |
    +--------------------+
    5 rows in set (0.00 sec)
    
    # 进入外面的docker的mysql
    [root@k8s endpoints]# docker exec -it c34bab6ad37f bash
    root@c34bab6ad37f:/# mysql -uroot -p123456
    mysql> show databases;
    +--------------------+
    | Database           |
    +--------------------+
    | information_schema |
    | db01               |
    | mysql              |
    | performance_schema |
    | sys                |
    +--------------------+
    5 rows in set (0.00 sec)
    # 发现刚才创建的db01在这里,说明Endpoints成功的代理了mysql服务
    

    二、健康服务检查

    1.配置清单

    ---
    kind: Deployment
    apiVersion: apps/v1
    metadata:
      name: test-deployment
    spec:
      selector:
        matchLabels:
          app: nginx-v1
      template:
        metadata:
          labels:
            app: nginx-v1
        spec:
          containers:
            - name: nginx
              image: nginx
              lifecycle: # 回调HOOK
                postStart: # 创建Pod前启动
                  exec: # 第一种方式,使用较多
                    command:
                      - "/bin/sh"
                      - "-c"
                      - "touch /root/1.txt"
                  httpGet: # 第二种方式(使用少)
                    port: 80
                    path: / # httpGet的请求必须返回是200才认为是成功的
                  tcpSocket: # 第三种方式(使用少)
                    port: 80
                preStop: # 删除Pod前启动
                  exec:
                    command:
                      - "/bin/sh"
                      - "-c"
                      - "echo 123 > /root/1.txt"
              livenessProbe:
                exec:
                  command:
                    - "bin/bash"
                    - "-c"
                    - "cat /usr/share/nginx/html/index.php"
                initialDelaySeconds: 0 # 执行延迟时间
                periodSeconds: 3 # 探测频率
                timeoutSeconds: 1 # 超时时间
                successThreshold: 1 # 探测成功多少次为成功
                failureThreshold: 3 # 探测失败多少次为失败
              readinessProbe:
                tcpSocket:
                  port: 80
                initialDelaySeconds: 30 # 项目比较大的时候给大一点
                periodSeconds: 1 # 就绪性的敏感度设置较大,用户体验较好
                timeoutSeconds: 1
                successThreshold: 3
                failureThreshold: 1
    

    2.回调Hook--lifecycle

    启动时的执行函数为postStart,执行的方式有三种,分别是exec、httpGet、tcpSocket,但是httpGet需要请求到200才会返回成功,否则失败。

    结束时的执行函数为preStop,执行方式与上面类似。

    3.存活性--livenessProbe

    存活性这里一般用exec的形式来检查,生产环境一般设置如下

              livenessProbe:
                exec:
                  command:
                    - "bin/bash"
                    - "-c"
                    - "cat /usr/share/nginx/html/index.php"
                initialDelaySeconds: 0 # 执行延迟时间,一般立即执行
                periodSeconds: 3 # 探测频率,三秒探测一次
                timeoutSeconds: 1 # 超时时间
                successThreshold: 1 # 探测成功多少次为成功
                failureThreshold: 3 # 探测失败多少次为失败
    

    4.就绪性

    就绪性这里一般是通过检查端口的形式来配置

              readinessProbe:
                tcpSocket:
                  port: 80
                initialDelaySeconds: 30 # 项目比较大的时候给大一点
                periodSeconds: 1 # 就绪性的敏感度设置较大,用户体验较好
                timeoutSeconds: 1 # 超时时间
                successThreshold: 3 # 三次成功为成功
                failureThreshold: 1 # 一次失败剔除
    

    三、综合案例--搭建wordpress

    1.准备nginx文件,构建为镜像

    1、创建目录
    [root@docter ~]# mkdir /blog/{php,nginx,mysql}
    [root@docter ~]# cd /blog/nginx/
    
    2、编写nginx的dockerfile文件
    [root@docter nginx]# cat Dockerfile 
    FROM nginx
    ADD nginx.conf /etc/nginx/nginx.conf
    ADD default.conf /etc/nginx/conf.d/default.conf
    RUN groupadd www -g 666 && 
        useradd www -u 666 -g 666 -M -r -s /sbin/nologin
    ADD discuz /usr/share/nginx/html
    RUN chown -R www.www /usr/share/nginx/html
    WORKDIR  /usr/shar/nginx/html
    EXPOSE 80 443
    CMD nginx -g "daemon off;"
    
    3、编写nginx的default.conf 文件
    [root@docter nginx]# cat default.conf 
    server {
        listen       80;
        listen  [::]:80;
        server_name  localhost;
        root   /usr/share/nginx/html;
        
        location / {
            index  index.php index.html index.htm;
        }
    
        location ~ .php$ {
            root           /usr/share/nginx/html;
            fastcgi_pass   php:9000;
            fastcgi_index  index.php;
            fastcgi_param  SCRIPT_FILENAME  $document_root$fastcgi_script_name;
            include        fastcgi_params;
        }
    }
    
    3、编辑nginx配置文件
    [root@docker1 nginx]# vim nginx.conf 
    user  www;
    worker_processes  auto;
    
    error_log  /var/log/nginx/error.log notice;
    pid        /var/run/nginx.pid;
    
    events {
        worker_connections  1024;
    }
    
    http {
        include       /etc/nginx/mime.types;
        default_type  application/octet-stream;
        log_format  main  '$remote_addr - $remote_user [$time_local] "$request" '
                      '$status $body_bytes_sent "$http_referer" '
                      '"$http_user_agent" "$http_x_forwarded_for"';
    
    access_log  /var/log/nginx/access.log  main;
    
    sendfile        on;
    #tcp_nopush     on;
    
    keepalive_timeout  65;
    
    gzip  on;
    
    include /etc/nginx/conf.d/*.conf;
    
    5、构建镜像
    在阿里云上面
    [root@k8s-m-01 nginx]# docker build -t registry.cn-shanghai.aliyuncs.com/cdank8s/web:discuz-v1 .
    
    #测试启动
    docker run -d --name nginx  registry.cn-shanghai.aliyuncs.com/cdank8s/web:discuz-v1
    

    2.构建php镜像

    #编辑php Dockerfile
    [root@docker php]# vim Dockerfile
    FROM centos:7
    RUN groupadd www -g 666 && 
        useradd www -u 666 -g 666 -M -r -s /sbin/nologin
    ADD php.repo /etc/yum.repos.d/php.repo
    RUN yum install -y php71w php71w-cli php71w-common php71w-devel php71w-embedded php71w-gd php71w-mcrypt php71w-mbstring php71w-pdo php71w-xml php71w-fpm php71w-mysqlnd php71w-opcache php71w-pecl-memcached php71w-pecl-redis php71w-pecl-mongodb php71w-bcmath 
    RUN sed -i 's#apache#www#' /etc/php-fpm.d/www.conf
    EXPOSE 9000
    WORKDIR 9000
    ADD discuze /usr/share/nginx/html
    RUN chown - R www.www /usr/share/nginx/html
    CMD php-fpm "-F"
     
    #创建PHP.repo
    [root@docker php]# vim php.repo
    [php-webtatic]
    name = PHP Repo
    baseurl = http://us-east.repo.webtatic.com/yum/el7/x86_64/
    gpgcheck = 0
    enable = 1
    
    #创建镜像
    [root@k8s-m-01 nginx]# docker build -t registry.cn-shanghai.aliyuncs.com/cdank8s/web:discuz-php-v1 .
    

    3.创建yaml

    # 数据库服务部署
    # 数据库名称空间创建
    apiVersion: v1
    kind: Namespace
    metadata:
      name: mysql
    ---
    # 数据库控制器创建
    apiVersion: apps/v1
    kind: Deployment
    metadata:
      name: mysql
      namespace: mysql
    spec:
      selector:
        matchLabels:
          app: mysql
      template:
        metadata:
          labels:
            app: mysql
        spec:
          containers:
            - name: mysql
              image: mysql:5.7
              env:
                - name: MYSQL_ROOT_PASSWORD
                  value: "123456"
                - name: MYSQL_DATABASE
                  value: wordpress
              livenessProbe: # 存活性检查
                exec:
                  command:
                    - "/bin/bash"
                    - "-c"
                    - "cat /etc/mysql/my.cnf"
                initialDelaySeconds: 0
                periodSeconds: 3
                timeoutSeconds: 1
                successThreshold: 1
                failureThreshold: 3
              readinessProbe: # 就绪性检查
                tcpSocket:
                  port: 3306
                initialDelaySeconds: 20
                periodSeconds: 1
                successThreshold: 3
                failureThreshold: 1
                timeoutSeconds: 1
    ---
    # 给数据库配置Service
    apiVersion: v1
    kind: Service
    metadata:
      name: mysql
      namespace: mysql
    spec:
      selector:
        app: mysql
      ports:
        - port: 3306
          targetPort: 3306
      type: NodePort
    # 数据库部署完毕
    ---
    # 创建项目的名称空间
    apiVersion: v1
    kind: Namespace
    metadata:
      namespace: wordpress
      name: wordpress
    ---
    # 创建项目的控制器
    apiVersion: apps/v1
    kind: Deployment
    metadata:
      name: wordpress
      namespace: wordpress
    spec:
      selector:
        matchLabels:
          app: wordpress
      template:
        metadata:
          labels:
            app: wordpress
        spec:
          containers:
            - name: php
              image: alvinos/php:wordpress-v2
              imagePullPolicy: Always
              livenessProbe:
                exec:
                  command:
                    - "/bin/bash"
                    - "-c"
                    - "ps -ef | grep php"
                initialDelaySeconds: 0
                periodSeconds: 3
                timeoutSeconds: 1
                successThreshold: 1
                failureThreshold: 1
              readinessProbe:
                tcpSocket:
                  port: 9000
                initialDelaySeconds: 20
                periodSeconds: 1
                timeoutSeconds: 1
                successThreshold: 3
                failureThreshold: 1
            - name: nginx
              image: alvinos/nginx:wordpress-v2
              imagePullPolicy: Always
              livenessProbe:
                exec:
                  command:
                    - "/bin/bash"
                    - "-c"
                    - "cat /etc/nginx/nginx.conf"
                initialDelaySeconds: 0
                periodSeconds: 3
                timeoutSeconds: 1
                successThreshold: 1
                failureThreshold: 1
              readinessProbe:
                tcpSocket:
                  port: 80
                initialDelaySeconds: 10
                periodSeconds: 1
                timeoutSeconds: 1
                successThreshold: 3
                failureThreshold: 1
    # 控制器部署完毕
    ---
    # 部署控制器Service
    apiVersion: v1
    kind: Service
    metadata:
      name: wordpress
      namespace: wordpress
    spec:
      selector:
        app: wordpress
      ports:
        - port: 80
          targetPort: 80
          name: http
          nodePort: 30080
        - port: 443
          targetPort: 443
          name: https
      type: NodePort
    

    cluster.local想要修改可以修改这里
    [root@k8s wordpress]# grep -ro "cluster.local" /etc/kubernetes/
    /etc/kubernetes/manifests/kube-apiserver.yaml:cluster.local
    

    四、ADM的api高可用

    部署软件、系统要求

    软件 版本
    Centos CentOS Linux release 7.5及以上
    Docker 19.03.12
    Kubernetes V0.13.0
    Flannel V1.19.1
    Kernel-lm kernel-lt-4.4.245-1.el7.elrepo.x86_64.rpm
    Kernel-lm-deve kernel-lt-devel-4.4.245-1.el7.elrepo.x86_64.rpm

    节点规划

    • IP建议采用192网段,避免与kubernetes内网冲突
    准备机器 IP 配置 系统内核版本
    k8s-master1 192.168.15.11 2核2G 4.4+
    k8s-master2 192.168.15.12 2核2G 4.4+
    k8s-master3 192.168.15.13 2核2G 4.4+
    k8s-node1 192.168.15.14 2核2G 4.4+
    k8s-node2 192.168.15.15 2核2G 4.4+

    1.导出初始化文件,做修改(m01主节点)

    [root@localhost ~]# kubeadm config print init-defaults > init-config.yaml
    [root@localhost ~]# cat init-config.yaml 
    apiVersion: kubeadm.k8s.io/v1beta2
    bootstrapTokens:
    - groups:
      - system:bootstrappers:kubeadm:default-node-token
      token: abcdef.0123456789abcdef    #容易失效
      ttl: 24h0m0s
      usages:
      - signing
      - authentication
    kind: InitConfiguration
    localAPIEndpoint:
      advertiseAddress: 1.2.3.4  # 主节点ip
      bindPort: 6443
    nodeRegistration:
      criSocket: /var/run/dockershim.sock
      name: node  #对应的主机名
      taints: null
    ---
    apiServer:
      timeoutForControlPlane: 4m0s
    apiVersion: kubeadm.k8s.io/v1beta2
    certificatesDir: /etc/kubernetes/pki
    clusterName: kubernetes
    controllerManager: {}
    dns:
      type: CoreDNS
    etcd:
      local:
        dataDir: /var/lib/etcd
    imageRepository: k8s.gcr.io
    kind: ClusterConfiguration
    kubernetesVersion: 1.21.0
    networking:
      dnsDomain: cluster.local
      serviceSubnet: 10.96.0.0/12
    scheduler: {}
    

    2.修改

    INIT_IP=`hostname -i`
    INIT_HOST=`hostname`
    cat > init-config.yaml << EOF
    apiVersion: kubeadm.k8s.io/v1beta2
    bootstrapTokens:
    - groups:
      - system:bootstrappers:kubeadm:default-node-token
      token: abcdef.0123456789abcdef
      ttl: 24h0m0s
      usages:
      - signing
      - authentication
    kind: InitConfiguration
    localAPIEndpoint:
      advertiseAddress: ${INIT_IP} # 当前的主机ip
      bindPort: 6443
    nodeRegistration:
      criSocket: /var/run/dockershim.sock
      name: ${INIT_HOST} # 对应的主机名
      taints: 
      - effect: NoSchedule
        key: node-role.kubernetes.io/master
    ---
    apiServer:
      certSANs:
        - 192.168.15.59 # 高可用的虚拟IP
      timeoutForControlPlane: 4m0s
    apiVersion: kubeadm.k8s.io/v1beta2
    certificatesDir: /etc/kubernetes/pki
    clusterName: kubernetes
    controlPlaneEndpoint: 192.168.15.59:8443   #控住节点的endpoints
    controllerManager: {}
    dns:
      type: CoreDNS
    etcd:
      local:
        dataDir: /var/lib/etcd
    imageRepository: registry.cn-shanghai.aliyuncs.com/baim0os # 自己的镜像仓库
    kind: ClusterConfiguration
    kubernetesVersion: 1.21.3
    networking:
      dnsDomain: cluster.local
      podSubnet: 10.244.0.0/16
      serviceSubnet: 10.96.0.0/12
    scheduler: {}
    EOF
    

    3.安装高可用软件

     # 三台master节点都需要安装
      # keeplived + haproxy
      [root@k8s-m-01 ~]# yum install -y keepalived haproxy
    
    # 修改keepalived配置文件
    # 根据节点的不同,修改的配置也不同
      mv /etc/keepalived/keepalived.conf /etc/keepalived/keepalived.conf_bak
      cd /etc/keepalived
      KUBE_APISERVER_IP=`hostname -i`
      cat > /etc/keepalived/keepalived.conf <<EOF
      ! Configuration File for keepalived
      global_defs {
          router_id LVS_DEVEL
      }
      vrrp_script chk_kubernetes {
          script "/etc/keepalived/check_kubernetes.sh"
          interval 2
          weight -5
          fall 3
          rise 2
      }
      vrrp_instance VI_1 {
          state MASTER  #m2,m3节点改成BACKUP
          interface eth0
          mcast_src_ip ${KUBE_APISERVER_IP}
          virtual_router_id 51
          priority 100  #权重,m2改成90,m3改成80
          advert_int 2
          authentication {
              auth_type PASS
              auth_pass K8SHA_KA_AUTH
          }
          virtual_ipaddress {
              192.168.15.59
          }
      }
    EOF
    
    #、加载keepalived并启动
    [root@k8s-m-01 keepalived]# systemctl daemon-reload
    [root@k8s-m-01 /etc/keepalived]# systemctl enable --now keepalived
    # 、验证keepalived是否启动
    [root@k8s-m-01 keepalived]# systemctl status keepalived.service 
    ● keepalived.service - LVS and VRRP High Availability Monitor
       Loaded: loaded (/usr/lib/systemd/system/keepalived.service; enabled; vendor preset: disabled)
       Active: active (running) since Sun 2021-08-01 14:48:23 CST; 27s ago
    [root@k8s-m-01 keepalived]# ip a |grep 116
        inet 172.16.1.116/32 scope global eth1
    
    # 修改haproxy配置文件
    # 高可用软件
      cat > /etc/haproxy/haproxy.cfg <<EOF
      global
        maxconn  2000
        ulimit-n  16384
        log  127.0.0.1 local0 err
        stats timeout 30s
      defaults
        log global
        mode  http
        option  httplog
        timeout connect 5000
        timeout client  50000
        timeout server  50000
        timeout http-request 15s
        timeout http-keep-alive 15s
      frontend monitor-in
        bind *:33305
        mode http
        option httplog
        monitor-uri /monitor
      listen stats
        bind    *:8006
        mode    http
        stats   enable
        stats   hide-version
        stats   uri       /stats
        stats   refresh   30s
        stats   realm     Haproxy Statistics
        stats   auth      admin:admin
      frontend k8s-master
        bind 0.0.0.0:8443
        bind 127.0.0.1:8443
        mode tcp
        option tcplog
        tcp-request inspect-delay 5s
        default_backend k8s-master
      backend k8s-master
        mode tcp
        option tcplog
        option tcp-check
        balance roundrobin
        default-server inter 10s downinter 5s rise 2 fall 2 slowstart 60s maxconn 250 maxqueue 256 weight 100
        server m01    192.168.15.51:6443  check inter 2000 fall 2 rise 2 weight 100
        server m02    192.168.15.52:6443  check inter 2000 fall 2 rise 2 weight 100
        server m03    192.168.15.53:6443  check inter 2000 fall 2 rise 2 weight 100
    EOF
    
    #、启动haproxy
    [root@k8s-m-01 keepalived]# systemctl daemon-reload 
    [root@k8s-m-01 /etc/keepalived]# systemctl enable --now haproxy.service 
    # 、检查集群状态
    [root@k8s-m-01 keepalived]# systemctl status haproxy.service
    ● haproxy.service - HAProxy Load Balancer
       Loaded: loaded (/usr/lib/systemd/system/haproxy.service; enabled; vendor preset: disabled)
       Active: active (running) since Fri 2021-07-16 21:12:00 CST; 27s ago
    
    
    #查看kuebenets所需要的镜像
    # 1、查看镜像列表
    [root@k8s-m-01 ~]# kubeadm config images list
    k8s.gcr.io/kube-apiserver:v1.21.3
    k8s.gcr.io/kube-controller-manager:v1.21.3
    k8s.gcr.io/kube-scheduler:v1.21.3
    k8s.gcr.io/kube-proxy:v1.21.3
    k8s.gcr.io/pause:3.4.1
    k8s.gcr.io/etcd:3.4.13-0
    k8s.gcr.io/coredns/coredns:v1.8.0
    quay.io/coreos/flannel:v0.14.0
    # 2、查看阿里云镜像列表
    [root@k8s-m-01 ~]# kubeadm config images list --image-repository=registry.cn-shanghai.aliyuncs.com/mmk8s
    registry.cn-shanghai.aliyuncs.com/mmk8s/kube-apiserver:v1.21.3
    registry.cn-shanghai.aliyuncs.com/mmk8s/kube-controller-manager:v1.21.3
    registry.cn-shanghai.aliyuncs.com/mmk8s/kube-scheduler:v1.21.3
    registry.cn-shanghai.aliyuncs.com/mmk8s/kube-proxy:v1.21.3
    registry.cn-shanghai.aliyuncs.com/mmk8s/pause:3.4.1
    registry.cn-shanghai.aliyuncs.com/mmk8s/etcd:3.4.13-0
    registry.cn-shanghai.aliyuncs.com/mmk8s/coredns:v1.8.0
    

    4.初始化集群(m01)

    kubeadm init --config init-config.yaml --upload-certs
    
    # 、初始化集群
    [root@k8s-m-01 ~]#  kubeadm init --config init-config.yaml --upload-certs
    You can now join any number of the control-plane node running the following command on each as root:
    #  主节点命令复制下来
      kubeadm join 172.16.1.116:8443 --token abcdef.0123456789abcdef 
    	--discovery-token-ca-cert-hash sha256:3c24cf3218a243148f20c6804d3766d2b6cd5dadc620313d0cf2dcbfd1626c5d 
    	--control-plane --certificate-key 1e852aa82be85e8b1b4776cce3a0519b1d0b1f76e5633e5262e2436e8f165993
    #  从节点命令复制下来
    Then you can join any number of worker nodes by running the following on each as root:
    
    kubeadm join 172.16.1.116:8443 --token abcdef.0123456789abcdef 
    	--discovery-token-ca-cert-hash sha256:3c24cf3218a243148f20c6804d3766d2b6cd5dadc620313d0cf2dcbfd1626c5d 
    	
    # 、主节点创建集群
    node节点要查看token,主节点生成token可重复执行查看,不会改变
    [root@k8s-m-01 ~]# kubeadm token create --print-join-command
    kubeadm join 172.16.1.116:8443 --token pfu0ek.ndis39t916v9clq1 --discovery-token-ca-cert-hash sha256:3c24cf3218a243148f20c6804d3766d2b6cd5dadc620313d0cf2dcbfd1626c5d 
    # 、 初始化完成查看kubernetes
    [root@k8s-m-01 ~]# systemctl restart kubelet.service
    
    # 、配置 kubernetes 用户信息(master01节点执行)
    [root@k8s-m-01 ~]# kubectl label nodes k8s-n-01 node-role.kubernetes.io/node=n01
    node/k8s-n-01 labeled
    [root@k8s-m-01 ~]# kubectl label nodes k8s-n-02 node-role.kubernetes.io/node=n02
    node/k8s-n-02 labeled
    [root@k8s-m-01 ~]# kubectl get node
    NAME       STATUS     ROLES                  AGE     VERSION
    k8s-m-01   Ready      control-plane,master   73m     v1.21.3
    k8s-m-02   Ready      control-plane,master   63m     v1.21.3
    k8s-m-03   Ready      control-plane,master   63m     v1.21.3
    k8s-n-01   Ready      node                   2m40s   v1.21.3
    k8s-n-02   Ready      node                   62m     v1.21.3
    
    # 、建立用户集群权限 
    mkdir -p $HOME/.kube
    sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
    sudo chown $(id -u):$(id -g) $HOME/.kube/config
    
    # 、如果使用root用户,则添加至环境变量 (选做)
    # 临时生效
    [root@k8s-m-01 ~]# export KUBECONFIG=/etc/kubernetes/admin.conf
    # 永久生效
    [root@k8s-m-01 ~]# vim /etc/profile.d/kubernetes.sh
     export KUBECONFIG=/etc/kubernetes/admin.conf
    [root@k8s-m-01 ~]# source /etc/profile
    
    # 、增加命令提示 (所以节点都执行)
    所有节点执行
    yum install -y bash-completion
    source /usr/share/bash-completion/bash_completion
    source <(kubectl completion bash)
    echo "source <(kubectl completion bash)" >> ~/.bashrc
    
    

    故障排除

    # 1、从节点加入集群可能会出现如下报错:
    [ERROR FileContent--proc-sys-net-bridge-bridge-nf-call-iptables]: /proc/sys/net/bridge/bridge-nf-call-iptables contents are not set to 1
    [preflight] If you know what you are doing, you can make a check non-fatal with `--ignore-preflight-errors=...`
    To see the stack trace of this error execute with --v=5 or higher
    
    PS:前提安装Docker+启动,再次尝试加入节点!
    # 1、报错原因:
    swap没关,一旦触发 swap,会导致系统性能急剧下降,所以一般情况下,所以K8S 要求关闭 swap
    # 2、解决方法:
    1>  执行以下三条命令后再次执行添加到集群命令:
    modprobe br_netfilter
    echo 1 > /proc/sys/net/bridge/bridge-nf-call-iptables
    echo 1 > /proc/sys/net/ipv4/ip_forward
    
    # 2、STATUS 状态是Healthy
    [root@k8s-m-01 ~]# kubectl get cs
    Warning: v1 ComponentStatus is deprecated in v1.19+
    NAME                 STATUS      MESSAGE                                                                                       ERROR
    scheduler            Unhealthy   Get "http://127.0.0.1:10251/healthz": dial tcp 127.0.0.1:10251: connect: connection refused   
    controller-manager   Unhealthy   Get "http://127.0.0.1:10252/healthz": dial tcp 127.0.0.1:10252: connect: connection refused   
    etcd-0               Healthy     {"health":"true"}    
    1、解决方式
    [root@k8s-m-01 ~]# vim /etc/kubernetes/manifests/kube-controller-manager.yaml 
     #- --port=0
    [root@k8s-m-01 ~]# vim /etc/kubernetes/manifests/kube-scheduler.yaml 
     #- --port=0
     [root@k8s-m-01 ~]# systemctl restart kubelet.service
    
    2、查看状态
    [root@k8s-m-01 ~]# kubectl get cs
    Warning: v1 ComponentStatus is deprecated in v1.19+
    NAME                 STATUS    MESSAGE             ERROR
    controller-manager   Healthy   ok                  
    scheduler            Healthy   ok                  
    etcd-0               Healthy   {"health":"true"}  
    

    5.安装网络插件calico

    Calico是一个纯三层的协议,为OpenStack虚机和Docker容器提供多主机间通信。Calico不使用重叠网络比如flannel和libnetwork重叠网络驱动,它是一个纯三层的方法,使用虚拟路由代替虚拟交换,每一台虚拟路由通过BGP协议传播可达信息(路由)到剩余数据中心。

    # 下载calico
    curl https://docs.projectcalico.org/manifests/calico.yaml -O
    
    # 部署calico
    kubectl apply -f calico.yaml
    

    6.各节点执行加入命令

    # 设置集群角色
    kubectl label nodes n01 node-role.kubernetes.io/node=n01
    kubectl label nodes n02 node-role.kubernetes.io/node=n02
    
    # 查看集群状态
    [root@m01 ~]# kubectl get nodes
    [root@m01 ~]# kubectl get nodes
    NAME   STATUS   ROLES                  AGE     VERSION
    m01    Ready    control-plane,master   36m     v1.21.3
    m02    Ready    control-plane,master   6m47s   v1.21.3
    m03    Ready    control-plane,master   5m50s   v1.21.3
    n01    Ready    node                   5m      v1.21.3
    n02    Ready    node                   4m42s   v1.21.3
    
  • 相关阅读:
    (整理)SQLServer_DBA 工具
    (转)winform Form 淡入淡出效果
    (转)SQLServer_T-SQL 语句执行时间的查询
    (整理)IIS 7 503 "service unavailable" errors
    (转载)C#中使用GUID
    (转载)SQL Server 2005 如何启用xp_cmdshell组件
    设计模式之适配器
    jaxb 组装及解析xml
    springMvc 简单搭建
    设计模式之工厂模式
  • 原文地址:https://www.cnblogs.com/caodan01/p/15136177.html
Copyright © 2011-2022 走看看