zoukankan      html  css  js  c++  java
  • kubernetes v1.15.4 部署手册

    kubernetes v1.15.4 部署手册

     

     

    配置要求

     

    推荐在阿里云采购如下配置:(也可以使用自己的虚拟机、私有云等)

    • 3台 2核4G 的ECS(突发性能实例 t5 ecs.t5-c1m2.large或同等配置,单台约 0.4元/小时,停机时不收费)

    • Cent OS 7.6

     

    安装后的软件版本为

    • Kubernetes v1.15.4

      • calico 3.8.2

      • nginx-ingress 1.5.3

    • Docker 18.09.7

     

    检查 centos / hostname

    # 在 master 节点和 worker 节点都要执行

    cat /etc/redhat-release

     

    # 此处 hostname 的输出将会是该机器在 Kubernetes 集群中的节点名字

    # 不能使用 localhost 作为节点的名字

    hostname

     

    # 请使用 lscpu 命令,核对 CPU 信息

    # Architecture: x86_64 本安装文档不支持 arm 架构

    # CPU(s): 2 CPU 内核数量不能低于 2

    lscpu

     

    修改 hostname

    # 修改 hostname

    hostnamectl set-hostname your-new-host-name

    # 查看修改结果

    hostnamectl status

    # 设置 hostname 解析

    echo "127.0.0.1 $(hostname)" >> /etc/hosts

     

    安装docker / kubelet

    使用 root 身份在所有节点执行如下代码,以安装软件:

    • docker

    • nfs-utils

    • kubectl / kubeadm / kubelet

     

    #!/bin/bash
    
    # 在 master 节点和 worker 节点都要执行
    
    # 安装 docker
    # 参考文档如下
    # https://docs.docker.com/install/linux/docker-ce/centos/ 
    # https://docs.docker.com/install/linux/linux-postinstall/
    
    # 卸载旧版本
    yum remove -y docker 
    docker-client 
    docker-client-latest 
    docker-common 
    docker-latest 
    docker-latest-logrotate 
    docker-logrotate 
    docker-selinux 
    docker-engine-selinux 
    docker-engine
    
    # 设置 yum repository
    yum install -y yum-utils 
    device-mapper-persistent-data 
    lvm2
    yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
    
    # 安装并启动 docker
    yum install -y docker-ce-18.09.7 docker-ce-cli-18.09.7 containerd.io
    systemctl enable docker
    systemctl start docker
    
    # 安装 nfs-utils
    # 必须先安装 nfs-utils 才能挂载 nfs 网络存储
    yum install -y nfs-utils
    
    # 关闭 防火墙
    systemctl stop firewalld
    systemctl disable firewalld
    
    # 关闭 SeLinux
    setenforce 0
    sed -i "s/SELINUX=enforcing/SELINUX=disabled/g" /etc/selinux/config
    
    # 关闭 swap
    swapoff -a
    yes | cp /etc/fstab /etc/fstab_bak
    cat /etc/fstab_bak |grep -v swap > /etc/fstab
    
    # 修改 /etc/sysctl.conf
    # 如果有配置,则修改
    sed -i "s#^net.ipv4.ip_forward.*#net.ipv4.ip_forward=1#g"  /etc/sysctl.conf
    sed -i "s#^net.bridge.bridge-nf-call-ip6tables.*#net.bridge.bridge-nf-call-ip6tables=1#g"  /etc/sysctl.conf
    sed -i "s#^net.bridge.bridge-nf-call-iptables.*#net.bridge.bridge-nf-call-iptables=1#g"  /etc/sysctl.conf
    # 可能没有,追加
    echo "net.ipv4.ip_forward = 1" >> /etc/sysctl.conf
    echo "net.bridge.bridge-nf-call-ip6tables = 1" >> /etc/sysctl.conf
    echo "net.bridge.bridge-nf-call-iptables = 1" >> /etc/sysctl.conf
    # 执行命令以应用
    sysctl -p
    
    # 配置K8S的yum源
    cat <<EOF > /etc/yum.repos.d/kubernetes.repo
    [kubernetes]
    name=Kubernetes
    baseurl=http://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
    enabled=1
    gpgcheck=0
    repo_gpgcheck=0
    gpgkey=http://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg
           http://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
    EOF
    
    # 卸载旧版本
    yum remove -y kubelet kubeadm kubectl
    
    # 安装kubelet、kubeadm、kubectl
    yum install -y kubelet-1.15.4 kubeadm-1.15.4 kubectl-1.15.4
    
    # 修改docker Cgroup Driver为systemd
    # # 将/usr/lib/systemd/system/docker.service文件中的这一行 ExecStart=/usr/bin/dockerd -H fd:// --containerd=/run/containerd/containerd.sock
    # # 修改为 ExecStart=/usr/bin/dockerd -H fd:// --containerd=/run/containerd/containerd.sock --exec-opt native.cgroupdriver=systemd
    # 如果不修改,在添加 worker 节点时可能会碰到如下错误
    # [WARNING IsDockerSystemdCheck]: detected "cgroupfs" as the Docker cgroup driver. The recommended driver is "systemd". 
    # Please follow the guide at https://kubernetes.io/docs/setup/cri/
    sed -i "s#^ExecStart=/usr/bin/dockerd.*#ExecStart=/usr/bin/dockerd -H fd:// --containerd=/run/containerd/containerd.sock --exec-opt native.cgroupdriver=systemd#g" /usr/lib/systemd/system/docker.service
    
    # 设置 docker 镜像,提高 docker 镜像下载速度和稳定性
    # 如果您访问 https://hub.docker.io 速度非常稳定,亦可以跳过这个步骤
    curl -sSL https://get.daocloud.io/daotools/set_mirror.sh | sh -s http://f1361db2.m.daocloud.io
    
    # 重启 docker,并启动 kubelet
    systemctl daemon-reload
    systemctl restart docker
    systemctl enable kubelet && systemctl start kubelet
    
    docker version

     

    
    

    初始化 master 节点

     

    # 只在 master 节点执行

     

    # 替换 x.x.x.x 为 master 节点实际 IP(请使用内网 IP)

    # export 命令只在当前 shell 会话中有效,开启新的 shell 窗口后,如果要继续安装过程,请重新执行此处的 export 命令

    export MASTER_IP=x.x.x.x

     

    # 替换 apiserver.demo 为 您想要的 dnsName (不建议使用 master 的 hostname 作为 APISERVER_NAME)

    export APISERVER_NAME=apiserver.master

     

    # Kubernetes 容器组所在的网段,该网段安装完成后,由 kubernetes 创建,事先并不存在于您的物理网络中

    export POD_SUBNET=10.100.0.1/20

    echo "${MASTER_IP} ${APISERVER_NAME}" >> /etc/hosts

     

     

    #!/bin/bash
    
    # 只在 master 节点执行
    
    # 查看完整配置选项 https://godoc.org/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta2
    rm -f ./kubeadm-config.yaml
    cat <<EOF > ./kubeadm-config.yaml
    apiVersion: kubeadm.k8s.io/v1beta2
    kind: ClusterConfiguration
    kubernetesVersion: v1.15.4
    imageRepository: registry.cn-hangzhou.aliyuncs.com/google_containers
    controlPlaneEndpoint: "${APISERVER_NAME}:6443"
    networking:
      serviceSubnet: "10.96.0.0/16"
      podSubnet: "${POD_SUBNET}"
      dnsDomain: "cluster.local"
    EOF
    
    # kubeadm init
    # 根据您服务器网速的情况,您需要等候 3 - 10 分钟
    kubeadm init --config=kubeadm-config.yaml --upload-certs
    
    # 配置 kubectl
    rm -rf /root/.kube/
    mkdir /root/.kube/
    cp -i /etc/kubernetes/admin.conf /root/.kube/config
    
    # 安装 calico 网络插件
    # 参考文档 https://docs.projectcalico.org/v3.8/getting-started/kubernetes/
    rm -f calico.yaml
    wget https://docs.projectcalico.org/v3.8/manifests/calico.yaml
    sed -i "s#192.168.0.0/16#${POD_SUBNET}#" calico.yaml
    kubectl apply -f calico.yaml

     

    检查master初始化结果

     

    # 只在 master 节点执行

     

    # 执行如下命令,等待 3-10 分钟,直到所有的容器组处于 Running 状态

    watch kubectl get pod -n kube-system -o wide

     

    # 查看 master 节点初始化结果

    kubectl get nodes -o wide

     

     

     

    初始化 worker 节点

    获取 join 命令参数

    在 master 节点执行

    # 只在 master 节点执行

    kubeadm token create --print-join-command

     

    初始化 worker

    对所有 worker 节点执行

    # 只在 worker 节点执行

    # 替换 ${MASTER_IP} 为 master 节点实际 IP

    # 替换 ${APISERVER_NAME} 为初始化 master 节点时所使用的 APISERVER_NAME

    # 可能会出现空白的情况 那是应为 worker节点没有 这两个环境变量

    # 可以手动添加 "masterip masterhostname" 到 hosts文件

    echo "${MASTER_IP} ${APISERVER_NAME}" >> /etc/hosts

     

    # 替换为 master 节点上 kubeadm token create 命令的输出

    kubeadm join apiserver.master:6443 --token mpfjma.4vjjg8flqihor4vt --discovery-token-ca-cert-hash sha256:6f7a8e40a810323672de5eee6f4d19aa2dbdb38411845a1bf5dd63485c43d303

     

     

    查看初始化结果

    在 master 节点执行

     

    kubectl get nodes

     

    NAME STATUS ROLES AGE VERSION

    demo-master-a-1 Ready master 5m3s v1.15.4

    demo-worker-a-1 Ready <none> 2m26s v1.15.4

    demo-worker-a-2 Ready <none> 3m56s v1.15.4

     

    使用 Ingress Controller

    安装 Ingress Controller

    编辑 mandatory.yaml

    将镜像替换成 阿里云的源

    https://github.com/kubernetes/ingress-nginx/blob/master/deploy/static/mandatory.yaml

    apiVersion: v1
    kind: Namespace
    metadata:
      name: ingress-nginx
      labels:
        app.kubernetes.io/name: ingress-nginx
        app.kubernetes.io/part-of: ingress-nginx
    
    ---
    
    kind: ConfigMap
    apiVersion: v1
    metadata:
      name: nginx-configuration
      namespace: ingress-nginx
      labels:
        app.kubernetes.io/name: ingress-nginx
        app.kubernetes.io/part-of: ingress-nginx
    
    ---
    kind: ConfigMap
    apiVersion: v1
    metadata:
      name: tcp-services
      namespace: ingress-nginx
      labels:
        app.kubernetes.io/name: ingress-nginx
        app.kubernetes.io/part-of: ingress-nginx
    
    ---
    kind: ConfigMap
    apiVersion: v1
    metadata:
      name: udp-services
      namespace: ingress-nginx
      labels:
        app.kubernetes.io/name: ingress-nginx
        app.kubernetes.io/part-of: ingress-nginx
    
    ---
    apiVersion: v1
    kind: ServiceAccount
    metadata:
      name: nginx-ingress-serviceaccount
      namespace: ingress-nginx
      labels:
        app.kubernetes.io/name: ingress-nginx
        app.kubernetes.io/part-of: ingress-nginx
    
    ---
    apiVersion: rbac.authorization.k8s.io/v1beta1
    kind: ClusterRole
    metadata:
      name: nginx-ingress-clusterrole
      labels:
        app.kubernetes.io/name: ingress-nginx
        app.kubernetes.io/part-of: ingress-nginx
    rules:
      - apiGroups:
          - ""
        resources:
          - configmaps
          - endpoints
          - nodes
          - pods
          - secrets
        verbs:
          - list
          - watch
      - apiGroups:
          - ""
        resources:
          - nodes
        verbs:
          - get
      - apiGroups:
          - ""
        resources:
          - services
        verbs:
          - get
          - list
          - watch
      - apiGroups:
          - ""
        resources:
          - events
        verbs:
          - create
          - patch
      - apiGroups:
          - "extensions"
          - "networking.k8s.io"
        resources:
          - ingresses
        verbs:
          - get
          - list
          - watch
      - apiGroups:
          - "extensions"
          - "networking.k8s.io"
        resources:
          - ingresses/status
        verbs:
          - update
    
    ---
    apiVersion: rbac.authorization.k8s.io/v1beta1
    kind: Role
    metadata:
      name: nginx-ingress-role
      namespace: ingress-nginx
      labels:
        app.kubernetes.io/name: ingress-nginx
        app.kubernetes.io/part-of: ingress-nginx
    rules:
      - apiGroups:
          - ""
        resources:
          - configmaps
          - pods
          - secrets
          - namespaces
        verbs:
          - get
      - apiGroups:
          - ""
        resources:
          - configmaps
        resourceNames:
          # Defaults to "<election-id>-<ingress-class>"
          # Here: "<ingress-controller-leader>-<nginx>"
          # This has to be adapted if you change either parameter
          # when launching the nginx-ingress-controller.
          - "ingress-controller-leader-nginx"
        verbs:
          - get
          - update
      - apiGroups:
          - ""
        resources:
          - configmaps
        verbs:
          - create
      - apiGroups:
          - ""
        resources:
          - endpoints
        verbs:
          - get
    
    ---
    apiVersion: rbac.authorization.k8s.io/v1beta1
    kind: RoleBinding
    metadata:
      name: nginx-ingress-role-nisa-binding
      namespace: ingress-nginx
      labels:
        app.kubernetes.io/name: ingress-nginx
        app.kubernetes.io/part-of: ingress-nginx
    roleRef:
      apiGroup: rbac.authorization.k8s.io
      kind: Role
      name: nginx-ingress-role
    subjects:
      - kind: ServiceAccount
        name: nginx-ingress-serviceaccount
        namespace: ingress-nginx
    
    ---
    apiVersion: rbac.authorization.k8s.io/v1beta1
    kind: ClusterRoleBinding
    metadata:
      name: nginx-ingress-clusterrole-nisa-binding
      labels:
        app.kubernetes.io/name: ingress-nginx
        app.kubernetes.io/part-of: ingress-nginx
    roleRef:
      apiGroup: rbac.authorization.k8s.io
      kind: ClusterRole
      name: nginx-ingress-clusterrole
    subjects:
      - kind: ServiceAccount
        name: nginx-ingress-serviceaccount
        namespace: ingress-nginx
    
    ---
    
    apiVersion: apps/v1
    kind: Deployment
    metadata:
      name: nginx-ingress-controller
      namespace: ingress-nginx
      labels:
        app.kubernetes.io/name: ingress-nginx
        app.kubernetes.io/part-of: ingress-nginx
    spec:
      replicas: 1
      selector:
        matchLabels:
          app.kubernetes.io/name: ingress-nginx
          app.kubernetes.io/part-of: ingress-nginx
      template:
        metadata:
          labels:
            app.kubernetes.io/name: ingress-nginx
            app.kubernetes.io/part-of: ingress-nginx
          annotations:
            prometheus.io/port: "10254"
            prometheus.io/scrape: "true"
        spec:
          # wait up to five minutes for the drain of connections
          terminationGracePeriodSeconds: 300
          serviceAccountName: nginx-ingress-serviceaccount
          nodeSelector:
            kubernetes.io/os: linux
          containers:
            - name: nginx-ingress-controller
              image: registry.aliyuncs.com/google_containers/nginx-ingress-controller:0.26.1
              args:
                - /nginx-ingress-controller
                - --configmap=$(POD_NAMESPACE)/nginx-configuration
                - --tcp-services-configmap=$(POD_NAMESPACE)/tcp-services
                - --udp-services-configmap=$(POD_NAMESPACE)/udp-services
                - --publish-service=$(POD_NAMESPACE)/ingress-nginx
                - --annotations-prefix=nginx.ingress.kubernetes.io
              securityContext:
                allowPrivilegeEscalation: true
                capabilities:
                  drop:
                    - ALL
                  add:
                    - NET_BIND_SERVICE
                # www-data -> 33
                runAsUser: 33
              env:
                - name: POD_NAME
                  valueFrom:
                    fieldRef:
                      fieldPath: metadata.name
                - name: POD_NAMESPACE
                  valueFrom:
                    fieldRef:
                      fieldPath: metadata.namespace
              ports:
                - name: http
                  containerPort: 80
                - name: https
                  containerPort: 443
              livenessProbe:
                failureThreshold: 3
                httpGet:
                  path: /healthz
                  port: 10254
                  scheme: HTTP
                initialDelaySeconds: 10
                periodSeconds: 10
                successThreshold: 1
                timeoutSeconds: 10
              readinessProbe:
                failureThreshold: 3
                httpGet:
                  path: /healthz
                  port: 10254
                  scheme: HTTP
                periodSeconds: 10
                successThreshold: 1
                timeoutSeconds: 10
              lifecycle:
                preStop:
                  exec:
                    command:
                      - /wait-shutdown
    
    ---
    

    安装 nginx-ingress-controller

    kubectl apply -f mandatory.yaml

     

    暴露 nginx-ingress 80 443 端口

    编辑 ingress-service.yaml

    https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/static/provider/baremetal/service-nodeport.yaml

    apiVersion: v1
    kind: Service
    metadata:
      name: ingress-nginx
      namespace: ingress-nginx
      labels:
        app.kubernetes.io/name: ingress-nginx
        app.kubernetes.io/part-of: ingress-nginx
    spec:
      type: NodePort
      ports:
        - name: http
          port: 80
          targetPort: 80
          protocol: TCP
          nodePort: 80   # http请求对外映射80端口
        - name: https
          port: 443
          targetPort: 443
          protocol: TCP
          nodePort: 443  # https请求对外映射443端口
      selector:
        app.kubernetes.io/name: ingress-nginx
        app.kubernetes.io/part-of: ingress-nginx
     
    ---
    
    修改 nodePort 端口范围

    1、编辑 kube-apiserver.yaml

    vim /etc/kubernetes/manifests/kube-apiserver.yaml

    2、找到 --service-cluster-ip-range 这一行,在这一行的下一行增加 如下内容

    - --service-node-port-range=1-65535

    3、修改配置后,重启 k8s

    systemctl daemon-reload

    systemctl restart kubelet

    systemctl status kubelet

     

    启动 nginx-ingress-service

    kubectl apply -f ingress-service.yaml

     

    配置 Ingress 中定义 L7 路由规则

    编辑 nfys-ingress.yaml

    apiVersion: extensions/v1beta1
    kind: Ingress
    metadata:
      name: ingress-for-nginx  # Ingress 的名字,仅用于标识
      annotations:
        kubernets.io/ingress.class: "nginx"
    spec:
      rules:                      # Ingress 中定义 L7 路由规则
      - http:
    #    host: local.test.kinglian.cn   # 根据 virtual hostname 进行路由
          paths:                  # 按路径进行路由
          - path: /
            backend:
              serviceName: nginx-service  # 指定后端的 Service 为之前创建的 nginx-service
              servicePort: 80
          - path: /admin
            backend:
              serviceName: uaas-service  # 指定后端的 Service 为之前创建的 uaas-service
              servicePort: 2019
          - path: /dims
            backend:
              serviceName: dims-service  # 指定后端的 Service 为之前创建的 dims-service
              servicePort: 2021
    
    ---
    

    启动 路由规则

    kubectl apply -f nfys-ingress.yaml

    配置 harbor 镜像仓库登录验证

    1,先用docker登录harbor

    登录的用户名密码为在harbor上注册的用户名密码,并且登录用户需要有对应仓库的拉取权限,否则不能访问仓库。登录示例:docker login hub.yxtc.com:8081,登录之后会生成~/.docker/config.json文件,config.json文件内容如下

    {
            "auths": {
                    "hub.yxtc.com:8081": {
                            "auth": "Y3I3Olh1MTIzNDU2MjU="
                    }
            }
    }     
    


    其中hub.yxtc.com:8081为harbor服务器的地址

     

    2,对config.json进行base64加密

    命令如下:

    cat ~/.docker/config.json |base64 -w 0
    ewoJImF1dGhzIjogewoJCSJyZWdpc3RyeS5raW5nbGlhbi5jbiI6IHsKCQkJImF1dGgiOiAiZW1oaGJtZHRhRHBhYldneE1Ea3pORFE1TWpFPSIKCQl9Cgl9LAoJIkh0dHBIZWFkZXJzIjogewoJCSJVc2VyLUFnZW50IjogIkRvY2tlci1DbGllbnQvMTguMDkuNyAobGludXgpIgoJfQp9
    

    3,创建secret.yaml文件

    文件内容如下:

    apiVersion: v1
    kind: Secret
    metadata:
      name: mysecret #后面要引用
    data:
      .dockerconfigjson: ewoJImF1dGhzIjogewoJCSJyZWdpc3RyeS5raW5nbGlhbi5jbiI6IHsKCQkJImF1dGgiOiAiZW1oaGJtZHRhRHBhYldneE1Ea3pORFE1TWpFPSIKCQl9Cgl9LAoJIkh0dHBIZWFkZXJzIjogewoJCSJVc2VyLUFnZW50IjogIkRvY2tlci1DbGllbnQvMTguMDkuNyAobGludXgpIgoJfQp9
    type: kubernetes.io/dockerconfigjson
    


     

    4,创建secret

    命令如下:

    kubectl create -f secret.yaml

     

    5,新建pod

    用imagePullSecrets指定secret,pod的yaml文件示例如下:

      imagePullSecrets:
        - name: mysecret  
    

      

    发布 一个项目

    按照下文发布后

    1、会运行一个容器

    2、并且不会暴露任何端口

    3、容器内/opt 会挂在一个nfs存储 对应宿主机的/nfs-share/logs

    4、需要通过 ingress 进行调度

     

    编辑 uaas.yaml

    apiVersion: extensions/v1beta1     #与k8s集群版本有关,使用 kubectl api-versions 即可查看当前集群支持的版本
    kind: Deployment        #该配置的类型,我们使用的是 Deployment
    metadata:               #译名为元数据,即 Deployment 的一些基本属性和信息
      name: uaas-deployment        #Deployment 的名称
      labels:           #标签,可以灵活定位一个或多个资源,其中key和value均可自定义,可以定义多组,目前不需要理解
        app: uaas  #为该Deployment设置key为app,value为nginx的标签
    spec:           #这是关于该Deployment的描述,可以理解为你期待该Deployment在k8s中如何使用
      replicas: 1   #使用该Deployment创建一个应用程序实例
      selector:         #标签选择器,与上面的标签共同作用,目前不需要理解
        matchLabels: #选择包含标签app:nginx的资源
          app: uaas
      template:         #这是选择或创建的Pod的模板
        metadata:   #Pod的元数据
          labels:   #Pod的标签,上面的selector即选择包含标签app:nginx的Pod
            app: uaas
        spec:           #期望Pod实现的功能(即在pod中部署)
          containers:       #生成container,与docker中的container是同一种
          - name: uaas     #container的名称
            image: registry.demo.cn/prod/nfys-uaas:20191108      #使用镜像创建container
            imagePullPolicy: Always # Always 总是拉取 / IfNotPresent 默认有则不拉 / Never 只使用本地
            ports:
            - containerPort: 2019  #声明镜像端口
            volumeMounts:
            - name: uaas-persistent-storage
              mountPath: /opt #挂在目录 (容器内)
          volumes:
          - name: uaas-persistent-storage
            nfs:
              path: /nfs-share/logs #宿主机目录
              server: 192.168.1.61 #nfs-server 
          imagePullSecrets:
          - name: nfys-secret
    ---
    
    apiVersion: v1
    kind: Service
    metadata:
      name: uaas-service   #Service 的名称
      labels:       #Service 自己的标签
        app: uaas  #为该 Service 设置 key 为 app,value 为 nginx 的标签
    spec:       #这是关于该 Service 的定义,描述了 Service 如何选择 Pod,如何被访问
      selector:         #标签选择器
        app: uaas  #选择包含标签 app:nginx 的 Pod
      ports:
      - name: uaas-port    #端口的名字
        protocol: TCP           #协议类型 TCP/UDP
        port: 2019            #集群内的其他容器组可通过 80 端口访问 Service 集群内其他重启可以通过这个端口访问
        targetPort: 2019      #将请求转发到匹配 Pod 的 80 端口 宿主机暴露的端口 收到的请求转发 容器目标端口
    


    使用 uaas.yaml 发布项目

    kubectl apply -f uaas.yaml

     

    存储 Volumes

     

    健康检测机制 Pod

    LivenessProbe探针:

    用于判断容器是否存活,即Pod是否为running状态,如果LivenessProbe探针探测到容器不健康,则kubelet将kill掉容器,并根据容器的重启策略是否重启,如果一个容器不包含LivenessProbe探针,则Kubelet认为容器的LivenessProbe探针的返回值永远成功。

     

    ReadinessProbe探针:

    用于判断容器是否启动完成,即容器的Ready是否为True,可以接收请求,如果ReadinessProbe探测失败,则容器的Ready将为False,控制器将此Pod的Endpoint从对应的service的Endpoint列表中移除,从此不再将任何请求调度此Pod上,直到下次探测成功。

     

  • 相关阅读:
    cocos2dx遇到的一些坑
    cocos2dx场景切换的坑
    整合quickx到普通cocos2dx
    Hadoop、spark
    Redis高级特性及应用场景
    wpf相关好资源
    MVVM模式的几个开源框架
    ASP.NET的IIS映射
    NET 开发者必备的工具箱
    C#开源汇总
  • 原文地址:https://www.cnblogs.com/xuanbao/p/11827104.html
Copyright © 2011-2022 走看看