zoukankan      html  css  js  c++  java
  • 控制器

    1.Deployment 2.StatefulSet 3.DaemonSet 4.Job 5.CronJob

    Pod与controllers的关系
    1. controllers:在集群上管理和运行容器的对象
    2. 通过label-selector相关联 通过标签
    3. Pod通过控制器实现应用的运维,如伸缩,升级等

    一、Deployment

    部署无状态应用
    管理Pod和ReplicaSet
    具有上线部署、副本设定、滚动升级、回滚等功能
    提供声明式更新,例如只更新一个新的Image

    应用场景:Web服务

    [root@master01 yaml_doc]# cat deploy-nginx.yaml 
    apiVersion: apps/v1
    kind: Deployment   #Deployment资源类型
    metadata:
      name: nginx-deployment
      namespace: default
      labels:
        app: nginx
    
    spec:
      replicas: 3
      selector:
        matchLabels:
          app: nginx-deploy
      template:
        metadata:
          labels:
            app: nginx-deploy
    
        spec:
          containers:
          - name: nginx
            image: 10.192.27.111/library/nginx:1.14
            imagePullPolicy: IfNotPresent
            command: [ "/bin/bash", "-ce", "tail -f /dev/null" ]
            ports:
            - containerPort: 80
    
    ---
    
    apiVersion: v1
    kind: Service
    metadata:
      name: nginx-service-mxxl
    spec:
      type: NodePort
      ports:
      - port: 80
        nodePort: 30080
      selector:
        app: nginx-deploy
    [root@master01 yaml_doc]# 
    [root@master01 yaml_doc]# cat deploy-nginx.yaml
    [root@master01 yaml_doc]# kubectl create -f deploy-nginx.yaml 
    deployment.apps/nginx-deployment created
    service/nginx-service-mxxl created
    [root@master01 yaml_doc]# kubectl get all
    NAME                                    READY   STATUS    RESTARTS   AGE
    pod/nginx-deployment-76f6cdc8c4-9gbkz   1/1     Running   0          3m43s
    pod/nginx-deployment-76f6cdc8c4-bfw45   1/1     Running   0          3m43s
    pod/nginx-deployment-76f6cdc8c4-sbcrm   1/1     Running   0          3m43s
    
    NAME                         TYPE        CLUSTER-IP   EXTERNAL-IP   PORT(S)        AGE
    service/kubernetes           ClusterIP   10.0.0.1     <none>        443/TCP        11d
    service/nginx-service-mxxl   NodePort    10.0.0.69    <none>        80:30080/TCP   3m43s
    
    NAME                               READY   UP-TO-DATE   AVAILABLE   AGE
    deployment.apps/nginx-deployment   3/3     3            3           3m43s
    
    NAME                                          DESIRED   CURRENT   READY   AGE
    replicaset.apps/nginx-deployment-76f6cdc8c4   3         3         3       3m43s

     学习一个命令

    [root@master01 yaml_doc]# kubectl rollout history deployment/nginx-deployment #查看历史版本
    deployment.extensions/nginx-deployment
    REVISION CHANGE-CAUSE
    1 <none>

    二、SatefulSet

    部署有状态应用
    解决Pod独立生命周期,保持Pod启动顺序和唯一性
      1.稳定,唯一的网络标识符,持久存储
      2.有序,优雅的部署和扩展、删除和终止
      3.有序,滚动更新
    应用场景:数据库

    创建一个SatefulSet实例

    #将前面所有的pods删除,以免干扰
    [root@master01 yaml_doc]# kubectl delete -f . 
    service "nginx" deleted
    deployment.apps "nginx" deleted
    pod "mypod" deleted
    deployment.apps "nginx-deployment" deleted
    service "nginx-service" deleted
    pod "frontend" deleted
    pod "nginx" deleted
    pod "liveness-exec" deleted
    pod "pod-example" deleted
    [root@master01 yaml_doc]# 

    因为SatefulSet控制器:要保证唯一的网络标识符,所以采用(无头服务没有CLUSTER-IP,跟普通的service不一样,所有需要DNS解析)

    1、在kube-system名称空间创建coredns

    [root@master01 yaml_doc]# cat coredns.yaml 
    # Warning: This is a file generated from the base underscore template file: coredns.yaml.base
    
    apiVersion: v1
    kind: ServiceAccount
    metadata:
      name: coredns
      namespace: kube-system
      labels:
          kubernetes.io/cluster-service: "true"
          addonmanager.kubernetes.io/mode: Reconcile
    ---
    apiVersion: rbac.authorization.k8s.io/v1
    kind: ClusterRole
    metadata:
      labels:
        kubernetes.io/bootstrapping: rbac-defaults
        addonmanager.kubernetes.io/mode: Reconcile
      name: system:coredns
    rules:
    - apiGroups:
      - ""
      resources:
      - endpoints
      - services
      - pods
      - namespaces
      verbs:
      - list
      - watch
    ---
    apiVersion: rbac.authorization.k8s.io/v1
    kind: ClusterRoleBinding
    metadata:
      annotations:
        rbac.authorization.kubernetes.io/autoupdate: "true"
      labels:
        kubernetes.io/bootstrapping: rbac-defaults
        addonmanager.kubernetes.io/mode: EnsureExists
      name: system:coredns
    roleRef:
      apiGroup: rbac.authorization.k8s.io
      kind: ClusterRole
      name: system:coredns
    subjects:
    - kind: ServiceAccount
      name: coredns
      namespace: kube-system
    ---
    apiVersion: v1
    kind: ConfigMap
    metadata:
      name: coredns
      namespace: kube-system
      labels:
          addonmanager.kubernetes.io/mode: EnsureExists
    data:
      Corefile: |
        .:53 {
            errors
            health
            kubernetes cluster.local in-addr.arpa ip6.arpa {
                pods insecure
                upstream
                fallthrough in-addr.arpa ip6.arpa
            }
            prometheus :9153
            proxy . /etc/resolv.conf
            cache 30
            loop
            reload
            loadbalance
        }
    ---
    apiVersion: extensions/v1beta1
    kind: Deployment
    metadata:
      name: coredns
      namespace: kube-system
      labels:
        k8s-app: kube-dns
        kubernetes.io/cluster-service: "true"
        addonmanager.kubernetes.io/mode: Reconcile
        kubernetes.io/name: "CoreDNS"
    spec:
      # replicas: not specified here:
      # 1. In order to make Addon Manager do not reconcile this replicas parameter.
      # 2. Default is 1.
      # 3. Will be tuned in real time if DNS horizontal auto-scaling is turned on.
      strategy:
        type: RollingUpdate
        rollingUpdate:
          maxUnavailable: 1
      selector:
        matchLabels:
          k8s-app: kube-dns
      template:
        metadata:
          labels:
            k8s-app: kube-dns
          annotations:
            seccomp.security.alpha.kubernetes.io/pod: 'docker/default'
        spec:
          serviceAccountName: coredns
          tolerations:
            - key: node-role.kubernetes.io/master
              effect: NoSchedule
            - key: "CriticalAddonsOnly"
              operator: "Exists"
          containers:
          - name: coredns
            image: 10.192.27.111/library/coredns:1.2.2
            imagePullPolicy: IfNotPresent
            resources:
              limits:
                memory: 170Mi
              requests:
                cpu: 100m
                memory: 70Mi
            args: [ "-conf", "/etc/coredns/Corefile" ]
            volumeMounts:
            - name: config-volume
              mountPath: /etc/coredns
              readOnly: true
            ports:
            - containerPort: 53
              name: dns
              protocol: UDP
            - containerPort: 53
              name: dns-tcp
              protocol: TCP
            - containerPort: 9153
              name: metrics
              protocol: TCP
            livenessProbe:
              httpGet:
                path: /health
                port: 8080
                scheme: HTTP
              initialDelaySeconds: 60
              timeoutSeconds: 5
              successThreshold: 1
              failureThreshold: 5
            securityContext:
              allowPrivilegeEscalation: false
              capabilities:
                add:
                - NET_BIND_SERVICE
                drop:
                - all
              readOnlyRootFilesystem: true
          dnsPolicy: Default
          volumes:
            - name: config-volume
              configMap:
                name: coredns
                items:
                - key: Corefile
                  path: Corefile
    ---
    apiVersion: v1
    kind: Service
    metadata:
      name: kube-dns
      namespace: kube-system
      annotations:
        prometheus.io/port: "9153"
        prometheus.io/scrape: "true"
      labels:
        k8s-app: kube-dns
        kubernetes.io/cluster-service: "true"
        addonmanager.kubernetes.io/mode: Reconcile
        kubernetes.io/name: "CoreDNS"
    spec:
      selector:
        k8s-app: kube-dns
      clusterIP: 10.0.0.2 
      ports:
      - name: dns
        port: 53
        protocol: UDP
      - name: dns-tcp
        port: 53
        protocol: TCP
    [root@master01 yaml_doc]# 
    coredns.yaml
    [root@master01 yaml_doc]# kubectl get pods -n kube-system
    No resources found.
    
    [root@master01 yaml_doc]# kubectl create -f coredns.yaml 
    serviceaccount/coredns created
    clusterrole.rbac.authorization.k8s.io/system:coredns created
    clusterrolebinding.rbac.authorization.k8s.io/system:coredns created
    configmap/coredns created
    deployment.extensions/coredns created
    service/kube-dns created
    
    [root@master01 yaml_doc]# kubectl get all -n kube-system  #在kube-system命名空间
    NAME                           READY   STATUS    RESTARTS   AGE
    pod/coredns-5c5d76fdbb-lrjtq   1/1     Running   0          2m53s  #coredns pod
    
    NAME               TYPE        CLUSTER-IP   EXTERNAL-IP   PORT(S)         AGE
    service/kube-dns   ClusterIP   10.0.0.2     <none>        53/UDP,53/TCP   2m53s   #coredns service
    
    NAME                      READY   UP-TO-DATE   AVAILABLE   AGE
    deployment.apps/coredns   1/1     1            1           2m53s  #coredns 控制器 
    
    NAME                                 DESIRED   CURRENT   READY   AGE
    replicaset.apps/coredns-5c5d76fdbb   1         1         1       2m53s  #副本
    [root@master01 yaml_doc]# 

    2、创建StatefulSet控制器

    [root@master01 yaml_doc]# cat sts.yaml 
    apiVersion: v1
    kind: Service
    metadata:
      name: nginx
      labels:
        app: nginx
    spec:
      ports:
      - port: 80
        name: web
      clusterIP: None   #无头服务设置None  默认为存在
      selector:
        app: nginx     #便签用于匹配pod
    
    ---
    
    apiVersion: apps/v1beta1
    kind: StatefulSet    #资源类型为StatefulSet控制器
    metadata:
      name: nginx-statefulset
      namespace: default
    spec:
      selector:
        matchLabels:
          app: nginx # has to match .spec.template.metadata.labels  #便签用于匹配pod
      serviceName: nginx
      replicas: 3 # by default is 1
      template:
        metadata:
          labels:
            app: nginx # has to match .spec.selector.matchLabels  #便签用于被匹配
        spec:
          terminationGracePeriodSeconds: 10
          containers:
          - name: nginx
            image: 10.192.27.111/library/nginx:latest
            command: [ "/bin/bash", "-ce", "tail -f /dev/null" ]
            ports:
            - containerPort: 80
    [root@master01 yaml_doc]# 
    [root@master01 yaml_doc]# kubectl create -f sts.yaml 
    service/nginx created
    statefulset.apps/nginx-statefulset created
    
    [root@master01 yaml_doc]# kubectl get all
    NAME                      READY   STATUS    RESTARTS   AGE
    pod/nginx-statefulset-0   1/1     Running   0          9s
    pod/nginx-statefulset-1   1/1     Running   0          8s
    pod/nginx-statefulset-2   1/1     Running   0          7s
    
    NAME                 TYPE        CLUSTER-IP   EXTERNAL-IP   PORT(S)   AGE
    service/kubernetes   ClusterIP   10.0.0.1     <none>        443/TCP   10d
    service/nginx        ClusterIP   None         <none>        80/TCP    9s  #CLUSTER-IP 为none
    
    NAME                                 READY   AGE
    statefulset.apps/nginx-statefulset   3/3     9s
    #因为SatefulSet控制器:要保证唯一的网络标识符,所以采用(无头服务没有CLUSTER-IP,跟普通的service不一样,所有需要DNS解析)
    [root@master01 yaml_doc]# 

    3、创建一个测试的pod

    [root@master01 yaml_doc]# grep busybox *  #过滤目录:查看包含busybox的文件和目录
    dns-test.yaml:    - name: busybox
    dns-test.yaml:      image: 10.192.27.111/project/busybox:latest
    grep: pod: 是一个目录
    [root@master01 yaml_doc]#
    
    [root@master01 yaml_doc]# vim dns-test.yaml 
    apiVersion: v1
    kind: Pod
    metadata:
      name: dns-test
    spec:
      containers:
        - name: busybox
          image: busybox:1.28.4   #好像一定要这个版本 其它版本不行
          args:
            - /bin/sh
            - -c
            - sleep 360000
      restartPolicy: Never
    
    [root@master01 yaml_doc]# kubectl create -f dns-test.yaml 
    pod/dns-test created
    [root@master01 yaml_doc]# kubectl get pods
    NAME                  READY   STATUS    RESTARTS   AGE
    dns-test              1/1     Running   0          5m10s
    nginx-statefulset-0   1/1     Running   0          12m
    nginx-statefulset-1   1/1     Running   0          12m
    nginx-statefulset-2   1/1     Running   0          12m
    [root@master01 yaml_doc]# 
    [root@master01 yaml_doc]# kubectl apply -f dns-test.yaml 
    pod/dns-test created
    [root@master01 yaml_doc]# kubectl exec -it dns-test sh #进入pod中
    #########################################################################
    / # nslookup kubernetes  #进行解析kubernetes 域名 对应的是master节点
    Server:    10.0.0.2        #DNSserver的IP已经在coredns.yaml指定了
    Address 1: 10.0.0.2 kube-dns.kube-system.svc.cluster.local
    
    Name:      kubernetes
    Address 1: 10.0.0.1 kubernetes.default.svc.cluster.local
    #########################################################################
    / # nslookup nginx-statefulset-0.nginx  #进行解析
    Server:    10.0.0.2
    Address 1: 10.0.0.2 kube-dns.kube-system.svc.cluster.local
    
    Name:      nginx-statefulset-0.nginx
    Address 1: 172.17.65.2 nginx-statefulset-0.nginx.default.svc.cluster.local
    #########################################################################
    / # nslookup nginx-statefulset-1.nginx  #pod容器的主机名.服务名.命名空间.svc.cluster.local
    Server:    10.0.0.2
    Address 1: 10.0.0.2 kube-dns.kube-system.svc.cluster.local
    
    Name:      nginx-statefulset-1.nginx
    Address 1: 172.17.39.2 nginx-statefulset-1.nginx.default.svc.cluster.local
    / # 
    #########################################################################

    StatefulSet与Deployment区别:有身份的!
    身份三要素:
      域名    #示例格式:web-0.nginx.default.svc.cluster.local
      主机名    # 主机名.服务名.命名空间.svc.cluster.local
      存储(PVC)

    ClusterIP A记录格式:<service-name>.<namespace-name>.svc.cluster.local
    ClusterIP=None A记录格式:<statefulsetName-index>.<service-name>.svc.cluster.local

    #进入这个pod的三个容器中 执行hostname
    [root@master01 yaml_doc]# for i in 0 1 2;do kubectl exec nginx-statefulset-$i hostname; done
    nginx-statefulset-0
    nginx-statefulset-1
    nginx-statefulset-2
    [root@master01 yaml_doc]# 

    三、DaemonSet

    在每一个Node上运行一个Pod
    新加入的Node也同样会自动运行一个Pod
    应用场景:Agent

     官方参考地址https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/

    [root@master01 yaml_doc]# cat ds.yaml 
    apiVersion: apps/v1  # 接口版本
    kind: DaemonSet  # 指定资源类型
    metadata:
      name: nginx-deployment #控制器的名字
      labels:
        app: nginx #标签
    spec:
      selector:
        matchLabels: #控制器通过标签管理
          app: nginx
      template:
        metadata:
          labels:
            app: nginx
        spec:
          containers:
          - name: nginx
            image: 10.192.27.111/library/nginx:1.14
            imagePullPolicy: IfNotPresent
            command: [ "/bin/bash", "-ce", "tail -f /dev/null" ]
            ports:
            - containerPort: 80
    [root@master01 yaml_doc]# cat ds.yaml
    [root@master01 yaml_doc]# kubectl create -f ds.yaml 
    daemonset.apps/nginx-deployment created
    [root@master01 yaml_doc]# kubectl get all
    NAME                         READY   STATUS    RESTARTS   AGE
    pod/nginx-deployment-5vxfs   1/1     Running   0          8s
    pod/nginx-deployment-j6wt2   1/1     Running   0          8s
    
    NAME                 TYPE        CLUSTER-IP   EXTERNAL-IP   PORT(S)   AGE
    service/kubernetes   ClusterIP   10.0.0.1     <none>        443/TCP   11d
    
    NAME                              DESIRED   CURRENT   READY   UP-TO-DATE   AVAILABLE   NODE SELECTOR   AGE
    daemonset.apps/nginx-deployment   2         2         2       2            2           <none>          8s
    [root@master01 yaml_doc]# kubectl get pods -o wide #没有副本数 但是它会在每个节点上创建一个pod 
    NAME                     READY   STATUS    RESTARTS   AGE   IP            NODE            NOMINATED NODE   READINESS GATES
    nginx-deployment-5vxfs   1/1     Running   0          36s   172.17.46.2   10.192.27.116   <none>           <none>
    nginx-deployment-j6wt2   1/1     Running   0          36s   172.17.43.2   10.192.27.115   <none>           <none>
    [root@master01 yaml_doc]# 

    四、Job

    Job分为普通任务(Job)和定时任务(CronJob)
    一次性执行

    官方参考地址https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/
    应用场景:离线数据处理,视频解码等业务

    [root@master01 yaml_doc]# vim job.yaml
    apiVersion: batch/v1
    kind: Job
    metadata:
      name: pi
    spec:
      template:
        spec:
          containers:
          - name: pi
            image: perl
            command: ["perl",  "-Mbignum=bpi", "-wle", "print bpi(2000)"]
          restartPolicy: Never #正常退出不重启
      backoffLimit: 4 #当非正常退出时 限制重启的次数为4次  默认是6次
    [root@master01 yaml_doc]# vim job.yaml
    [root@master01 yaml_doc]# kubectl  create -f job.yaml 
    job.batch/pi created
    [root@master01 yaml_doc]# kubectl  get pods
    NAME                     READY   STATUS              RESTARTS   AGE
    pi-mc98d                 0/1     ContainerCreating   0          8s
    [root@master01 yaml_doc]# kubectl  get job  #查看job
    NAME   COMPLETIONS   DURATION   AGE
    pi     0/1           18s        18s
    [root@master01 yaml_doc]# 
    [root@master01 yaml_doc]# kubectl  get pods
    NAME                     READY   STATUS      RESTARTS   AGE
    pi-mc98d                 0/1     Completed   0          5m8s  #完成任务
    [root@master01 yaml_doc]# kubectl logs pi-mc98d  #pi的值
    3.1415926535897932384626433832795028841971693993751058209749445923078164062862089986280348253421170679821480865132823066470938446095505822317253594081284811174502841027019385211055596446229489549303819644288109756659334461284756482337867831652712019091456485669234603486104543266482133936072602491412737245870066063155881748815209209628292540917153643678925903600113305305488204665213841469519415116094330572703657595919530921861173819326117931051185480744623799627495673518857527248912279381830119491298336733624406566430860213949463952247371907021798609437027705392171762931767523846748184676694051320005681271452635608277857713427577896091736371787214684409012249534301465495853710507922796892589235420199561121290219608640344181598136297747713099605187072113499999983729780499510597317328160963185950244594553469083026425223082533446850352619311881710100031378387528865875332083814206171776691473035982534904287554687311595628638823537875937519577818577805321712268066130019278766111959092164201989380952572010654858632788659361533818279682303019520353018529689957736225994138912497217752834791315155748572424541506959508295331168617278558890750983817546374649393192550604009277016711390098488240128583616035637076601047101819429555961989467678374494482553797747268471040475346462080466842590694912933136770289891521047521620569660240580381501935112533824300355876402474964732639141992726042699227967823547816360093417216412199245863150302861829745557067498385054945885869269956909272107975093029553211653449872027559602364806654991198818347977535663698074265425278625518184175746728909777727938000816470600161452491921732172147723501414419735685481613611573525521334757418494684385233239073941433345477624168625189835694855620992192221842725502542568876717904946016534668049886272327917860857843838279679766814541009538837863609506800642251252051173929848960841284886269456042419652850222106611863067442786220391949450471237137869609563643719172874677646575739624138908658326459958133904780275901
    [root@master01 yaml_doc]# 
    [root@master01 yaml_doc]# kubectl logs pi-mc98d #pi的值

    五、CronJob

    定时任务,像Linux的Crontab一样。
    定时任务

    官方参考地址https://kubernetes.io/docs/tasks/job/automated-tasks-with-cron-jobs/
    应用场景:通知,备份

    [root@master01 yaml_doc]# cat cornjob.yaml 
    apiVersion: batch/v1beta1
    kind: CronJob
    metadata:
      name: hello
    spec:
      schedule: "*/1 * * * *"
      jobTemplate:
        spec:
          template:
            spec:
              imagePullSecrets:
              - name: registry-pull-secret
              containers:
              - name: hello
                image: 10.192.27.111/project/busybox:latest
                args:
                - /bin/sh
                - -c
                - date; echo Hello from the Kubernetes cluster
              restartPolicy: OnFailure
    [root@master01 yaml_doc]# 
    [root@master01 yaml_doc]# cat cornjob.yaml
    [root@master01 yaml_doc]# kubectl create -f cornjob.yaml 
    cronjob.batch/hello created
    
    [root@master01 yaml_doc]# kubectl get pods
    No resources found.
    [root@master01 yaml_doc]# kubectl get cronjob
    NAME    SCHEDULE      SUSPEND   ACTIVE   LAST SCHEDULE   AGE
    hello   */1 * * * *   False     1        9s              23s
    [root@master01 yaml_doc]# kubectl get pods
    NAME                     READY   STATUS      RESTARTS   AGE
    hello-1574405880-w6kng   0/1     Completed   0          35s
    [root@master01 yaml_doc]# kubectl logs hello-1574405880-w6kng
    Fri Nov 22 06:58:09 UTC 2019
    Hello from the Kubernetes cluster
    [root@master01 yaml_doc]# 

    也可通过run命令创建CronJob:

    [root@master01 yaml_doc]# kubectl run --generator=run-pod/v1 hello --schedule="*/1 * * * *" --restart=OnFailure --image=busybox -- /bin/sh -c "date; echo Hello from the Kubernetes cluster"

    上述定义文件中,让人不理解的是那个schedule参数,用来定义任务执行的周期,那么,schedule: "*/1 * * * *",这段话到底是什么意思:

    这里找到了一个比较靠谱的资料:定时任务CronJob表达式详解

    大概内容如下:

    CronJob shedule 参数详解

    所以我们这里的意思是: 每隔一秒执行一次。

    一个小问题

    定时任务pod过多 如何快速删除

    [root@master01 yaml_doc]# kubectl get pods
    NAME                     READY   STATUS      RESTARTS   AGE
    hello-1574405880-w6kng   0/1     Completed   0          114m
    hello-1574405940-gzkbc   0/1     Completed   0          113m
    hello-1574406000-869k8   0/1     Completed   0          112m
    hello-1574406060-9dgfq   0/1     Completed   0          111m
    hello-1574406120-mvgq4   0/1     Completed   0          110m
    hello-1574406180-dj9g6   0/1     Completed   0          109m
    hello-1574406240-p8nsg   0/1     Completed   0          108m
    hello-1574406300-222gk   0/1     Completed   0          107m
    hello-1574406360-mz5d6   0/1     Completed   0          106m
    hello-1574406420-89hcq   0/1     Completed   0          105m
    hello-1574406480-c7q54   0/1     Completed   0          104m
    hello-1574406540-rk5v5   0/1     Completed   0          103m
    hello-1574406600-6jhxh   0/1     Completed   0          102m
    hello-1574406660-ltr6f   0/1     Completed   0          101m
    hello-1574406720-psbcq   0/1     Completed   0          100m
    hello-1574406780-ch7bj   0/1     Completed   0          99m
    hello-1574406840-mvhht   0/1     Completed   0          98m
    hello-1574406900-gbr8n   0/1     Completed   0          97m
    hello-1574406960-w468t   0/1     Completed   0          96m
    hello-1574407020-kvmqs   0/1     Completed   0          95m
    hello-1574407080-rpmzl   0/1     Completed   0          94m
    hello-1574407140-w2chf   0/1     Completed   0          93m
    hello-1574407200-t64kp   0/1     Completed   0          92m
    hello-1574407260-krp6p   0/1     Completed   0          91m
    hello-1574407320-bgwc2   0/1     Completed   0          90m
    hello-1574407380-jpvts   0/1     Completed   0          89m
    hello-1574407440-9z5zn   0/1     Completed   0          88m
    hello-1574407500-bl7f7   0/1     Completed   0          87m
    hello-1574407560-665j6   0/1     Completed   0          86m
    hello-1574407620-2dgzn   0/1     Completed   0          85m
    hello-1574407680-vh4cq   0/1     Completed   0          84m
    hello-1574407740-h9m5r   0/1     Completed   0          83m
    hello-1574407800-bblxr   0/1     Completed   0          82m
    hello-1574407860-b7wp7   0/1     Completed   0          81m
    hello-1574407920-hbh9h   0/1     Completed   0          80m
    hello-1574407980-9mx6w   0/1     Completed   0          79m
    hello-1574408040-g8fnp   0/1     Completed   0          78m
    hello-1574408100-lkjp9   0/1     Completed   0          77m
    hello-1574408160-zhnsw   0/1     Completed   0          76m
    hello-1574408220-jnffx   0/1     Completed   0          75m
    hello-1574408280-57flh   0/1     Completed   0          74m
    hello-1574408340-kk65n   0/1     Completed   0          73m
    hello-1574408400-986p2   0/1     Completed   0          72m
    hello-1574408460-bml4w   0/1     Completed   0          71m
    hello-1574408520-72226   0/1     Completed   0          70m
    hello-1574408580-kx8rz   0/1     Completed   0          69m
    hello-1574408640-cl92v   0/1     Completed   0          68m
    hello-1574408700-mwbgs   0/1     Completed   0          67m
    hello-1574408760-zds8k   0/1     Completed   0          66m
    hello-1574408820-zwqcw   0/1     Completed   0          65m
    hello-1574408880-zvgdc   0/1     Completed   0          64m
    hello-1574408940-kprls   0/1     Completed   0          63m
    hello-1574409000-wtr6j   0/1     Completed   0          62m
    hello-1574409060-hnkck   0/1     Completed   0          61m
    hello-1574409120-86jmj   0/1     Completed   0          60m
    hello-1574409180-xx54s   0/1     Completed   0          59m
    hello-1574409240-7psvz   0/1     Completed   0          58m
    hello-1574409300-8ldjp   0/1     Completed   0          57m
    hello-1574409360-7288z   0/1     Completed   0          56m
    hello-1574409420-56b8t   0/1     Completed   0          55m
    hello-1574409480-ckbk6   0/1     Completed   0          54m
    hello-1574409540-7q6zv   0/1     Completed   0          53m
    hello-1574409600-qm2f8   0/1     Completed   0          52m
    hello-1574409660-m9rqv   0/1     Completed   0          51m
    hello-1574409720-qcm45   0/1     Completed   0          50m
    hello-1574409780-v8t5j   0/1     Completed   0          49m
    hello-1574409840-2vlsw   0/1     Completed   0          48m
    hello-1574409900-tgjqv   0/1     Completed   0          47m
    hello-1574409960-hz7rp   0/1     Completed   0          46m
    hello-1574410020-hb29f   0/1     Completed   0          45m
    hello-1574410080-z6vr5   0/1     Completed   0          44m
    hello-1574410140-wk46p   0/1     Completed   0          43m
    hello-1574410200-d9gt6   0/1     Completed   0          42m
    hello-1574410260-fxqb5   0/1     Completed   0          41m
    hello-1574410320-vdbgv   0/1     Completed   0          40m
    hello-1574410380-f8k62   0/1     Completed   0          39m
    hello-1574410440-l6c48   0/1     Completed   0          38m
    hello-1574410500-gmzx8   0/1     Completed   0          37m
    hello-1574410560-5zfm2   0/1     Completed   0          36m
    hello-1574410620-c89cp   0/1     Completed   0          35m
    hello-1574410680-87rnv   0/1     Completed   0          34m
    hello-1574410740-7sdqw   0/1     Completed   0          33m
    hello-1574410800-h4x2d   0/1     Completed   0          32m
    hello-1574410860-ns5gs   0/1     Completed   0          31m
    hello-1574410920-x2ssf   0/1     Completed   0          30m
    hello-1574410980-vfqsq   0/1     Completed   0          29m
    hello-1574411040-2fd4n   0/1     Completed   0          28m
    hello-1574411100-qqz27   0/1     Completed   0          27m
    hello-1574411160-fd5nq   0/1     Completed   0          26m
    hello-1574411220-nknvq   0/1     Completed   0          25m
    hello-1574411280-x6cfb   0/1     Completed   0          24m
    hello-1574411340-zjfwr   0/1     Completed   0          23m
    hello-1574411400-9gst6   0/1     Completed   0          22m
    hello-1574411460-p4ltl   0/1     Completed   0          21m
    hello-1574411520-qwwds   0/1     Completed   0          20m
    hello-1574411580-bvvg5   0/1     Completed   0          19m
    hello-1574411640-65zlq   0/1     Completed   0          18m
    hello-1574411700-2js4q   0/1     Completed   0          17m
    hello-1574411760-lrm8j   0/1     Completed   0          16m
    hello-1574411820-qgmjc   0/1     Completed   0          15m
    hello-1574411880-m6hfc   0/1     Completed   0          14m
    hello-1574411940-xhdsp   0/1     Completed   0          13m
    hello-1574412000-vr9bs   0/1     Completed   0          12m
    hello-1574412060-94m7w   0/1     Completed   0          11m
    hello-1574412120-z2nbb   0/1     Completed   0          10m
    hello-1574412180-sbwf5   0/1     Completed   0          9m39s
    hello-1574412240-gxzfx   0/1     Completed   0          8m39s
    hello-1574412300-xb8zt   0/1     Completed   0          7m39s
    hello-1574412360-dmwk2   0/1     Completed   0          6m39s
    hello-1574412420-gz4fw   0/1     Completed   0          5m39s
    [root@master01 yaml_doc]# kubectl delete $(kubectl get pods  | awk '{print "pod/"$1}'|sed 1d)
    pod "hello-1574405880-w6kng" deleted
    pod "hello-1574405940-gzkbc" deleted
    pod "hello-1574406000-869k8" deleted
    pod "hello-1574406060-9dgfq" deleted
    pod "hello-1574406120-mvgq4" deleted
    pod "hello-1574406180-dj9g6" deleted
    pod "hello-1574406240-p8nsg" deleted
    pod "hello-1574406300-222gk" deleted
    pod "hello-1574406360-mz5d6" deleted
    pod "hello-1574406420-89hcq" deleted
    pod "hello-1574406480-c7q54" deleted
    pod "hello-1574406540-rk5v5" deleted
    pod "hello-1574406600-6jhxh" deleted
    pod "hello-1574406660-ltr6f" deleted
    pod "hello-1574406720-psbcq" deleted
    pod "hello-1574406780-ch7bj" deleted
    pod "hello-1574406840-mvhht" deleted
    pod "hello-1574406900-gbr8n" deleted
    pod "hello-1574406960-w468t" deleted
    pod "hello-1574407020-kvmqs" deleted
    pod "hello-1574407080-rpmzl" deleted
    pod "hello-1574407140-w2chf" deleted
    pod "hello-1574407200-t64kp" deleted
    pod "hello-1574407260-krp6p" deleted
    pod "hello-1574407320-bgwc2" deleted
    pod "hello-1574407380-jpvts" deleted
    pod "hello-1574407440-9z5zn" deleted
    pod "hello-1574407500-bl7f7" deleted
    pod "hello-1574407560-665j6" deleted
    pod "hello-1574407620-2dgzn" deleted
    pod "hello-1574407680-vh4cq" deleted
    pod "hello-1574407740-h9m5r" deleted
    pod "hello-1574407800-bblxr" deleted
    pod "hello-1574407860-b7wp7" deleted
    pod "hello-1574407920-hbh9h" deleted
    pod "hello-1574407980-9mx6w" deleted
    pod "hello-1574408040-g8fnp" deleted
    pod "hello-1574408100-lkjp9" deleted
    pod "hello-1574408160-zhnsw" deleted
    pod "hello-1574408220-jnffx" deleted
    pod "hello-1574408280-57flh" deleted
    pod "hello-1574408340-kk65n" deleted
    pod "hello-1574408400-986p2" deleted
    pod "hello-1574408460-bml4w" deleted
    pod "hello-1574408520-72226" deleted
    pod "hello-1574408580-kx8rz" deleted
    pod "hello-1574408640-cl92v" deleted
    pod "hello-1574408700-mwbgs" deleted
    pod "hello-1574408760-zds8k" deleted
    pod "hello-1574408820-zwqcw" deleted
    pod "hello-1574408880-zvgdc" deleted
    pod "hello-1574408940-kprls" deleted
    pod "hello-1574409000-wtr6j" deleted
    pod "hello-1574409060-hnkck" deleted
    pod "hello-1574409120-86jmj" deleted
    pod "hello-1574409180-xx54s" deleted
    pod "hello-1574409240-7psvz" deleted
    pod "hello-1574409300-8ldjp" deleted
    pod "hello-1574409360-7288z" deleted
    pod "hello-1574409420-56b8t" deleted
    pod "hello-1574409480-ckbk6" deleted
    pod "hello-1574409540-7q6zv" deleted
    pod "hello-1574409600-qm2f8" deleted
    pod "hello-1574409660-m9rqv" deleted
    pod "hello-1574409720-qcm45" deleted
    pod "hello-1574409780-v8t5j" deleted
    pod "hello-1574409840-2vlsw" deleted
    pod "hello-1574409900-tgjqv" deleted
    pod "hello-1574409960-hz7rp" deleted
    pod "hello-1574410020-hb29f" deleted
    pod "hello-1574410080-z6vr5" deleted
    pod "hello-1574410140-wk46p" deleted
    pod "hello-1574410200-d9gt6" deleted
    pod "hello-1574410260-fxqb5" deleted
    pod "hello-1574410320-vdbgv" deleted
    pod "hello-1574410380-f8k62" deleted
    pod "hello-1574410440-l6c48" deleted
    pod "hello-1574410500-gmzx8" deleted
    pod "hello-1574410560-5zfm2" deleted
    pod "hello-1574410620-c89cp" deleted
    pod "hello-1574410680-87rnv" deleted
    pod "hello-1574410740-7sdqw" deleted
    pod "hello-1574410800-h4x2d" deleted
    pod "hello-1574410860-ns5gs" deleted
    pod "hello-1574410920-x2ssf" deleted
    pod "hello-1574410980-vfqsq" deleted
    pod "hello-1574411040-2fd4n" deleted
    pod "hello-1574411100-qqz27" deleted
    pod "hello-1574411160-fd5nq" deleted
    pod "hello-1574411220-nknvq" deleted
    pod "hello-1574411280-x6cfb" deleted
    pod "hello-1574411340-zjfwr" deleted
    pod "hello-1574411400-9gst6" deleted
    pod "hello-1574411460-p4ltl" deleted
    pod "hello-1574411520-qwwds" deleted
    pod "hello-1574411580-bvvg5" deleted
    pod "hello-1574411640-65zlq" deleted
    pod "hello-1574411700-2js4q" deleted
    pod "hello-1574411760-lrm8j" deleted
    pod "hello-1574411820-qgmjc" deleted
    pod "hello-1574411880-m6hfc" deleted
    pod "hello-1574411940-xhdsp" deleted
    pod "hello-1574412000-vr9bs" deleted
    pod "hello-1574412060-94m7w" deleted
    pod "hello-1574412120-z2nbb" deleted
    pod "hello-1574412180-sbwf5" deleted
    pod "hello-1574412240-gxzfx" deleted
    pod "hello-1574412300-xb8zt" deleted
    pod "hello-1574412360-dmwk2" deleted
    [root@master01 yaml_doc]# 
    [root@master01 yaml_doc]# kubectl delete $(kubectl get pods | awk '{print "pod/"$1}'|sed 1d)
    链接:https://www.jianshu.com/p/104742c7bcf7

  • 相关阅读:
    [组件封装] 微信小程序
    发布一个npm包(webpack loader)
    深入理解JavaScript隐式类型转换(详解 +
    通过nginx代理之后,获取客户端ip
    IntelliJ Idea 常用快捷键列表
    Example For maven-compiler-plugin
    maven 使用tomcat插件 自动化部署war
    jsoup: Java HTML Parser (类似jquery)
    Export Data from mysql Workbench 6.0
    安装openJDK 8
  • 原文地址:https://www.cnblogs.com/linux985/p/11909657.html
Copyright © 2011-2022 走看看