zoukankan      html  css  js  c++  java
  • pod间访问

    root@ubuntu:~# nsenter -n --target  27134 
    root@ubuntu:~# ip a
    1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
        link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
        inet 127.0.0.1/8 scope host lo
           valid_lft forever preferred_lft forever
        inet6 ::1/128 scope host 
           valid_lft forever preferred_lft forever
    3: eth0@if641: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1450 qdisc noqueue state UP group default 
        link/ether fa:66:b3:ab:05:9f brd ff:ff:ff:ff:ff:ff link-netnsid 0
        inet 10.244.0.19/24 brd 10.244.0.255 scope global eth0
           valid_lft forever preferred_lft forever
        inet6 fe80::f866:b3ff:feab:59f/64 scope link 
           valid_lft forever preferred_lft forever
    root@ubuntu:~# ping 10.244.0.20
    PING 10.244.0.20 (10.244.0.20) 56(84) bytes of data.
    64 bytes from 10.244.0.20: icmp_seq=1 ttl=64 time=0.259 ms
    64 bytes from 10.244.0.20: icmp_seq=2 ttl=64 time=0.092 ms
    ^C
    --- 10.244.0.20 ping statistics ---
    2 packets transmitted, 2 received, 0% packet loss, time 1001ms
    rtt min/avg/max/mdev = 0.092/0.175/0.259/0.084 ms
    root@ubuntu:~# ping   10.244.2.2
    PING 10.244.2.2 (10.244.2.2) 56(84) bytes of data.
    64 bytes from 10.244.2.2: icmp_seq=1 ttl=62 time=2.91 ms
    64 bytes from 10.244.2.2: icmp_seq=2 ttl=62 time=1.01 ms
    64 bytes from 10.244.2.2: icmp_seq=3 ttl=62 time=0.360 ms
    64 bytes from 10.244.2.2: icmp_seq=4 ttl=62 time=0.260 ms
    ^C
    --- 10.244.2.2 ping statistics ---
    4 packets transmitted, 4 received, 0% packet loss, time 3012ms
    rtt min/avg/max/mdev = 0.260/1.138/2.914/1.066 ms
    root@ubuntu:~# 

    node2

    root@cloud:~# docker inspect c453b8f32678 | grep -i pid
                "Pid": 427496,
                "PidMode": "",
                "PidsLimit": null,
    root@cloud:~# nsenter -n --target  427496
    root@cloud:~# ip a
    1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
        link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
        inet 127.0.0.1/8 scope host lo
           valid_lft forever preferred_lft forever
    3: eth0@if13: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1450 qdisc noqueue state UP group default 
        link/ether ba:78:36:f9:52:c7 brd ff:ff:ff:ff:ff:ff link-netnsid 0
        inet 10.244.2.2/24 brd 10.244.2.255 scope global eth0
           valid_lft forever preferred_lft forever
    root@cloud:~# ping 10.244.0.19
    PING 10.244.0.19 (10.244.0.19) 56(84) bytes of data.
    root@cloud:~# ping 10.244.0.19
    PING 10.244.0.19 (10.244.0.19) 56(84) bytes of data.
    ^C
    --- 10.244.0.19 ping statistics ---
    172 packets transmitted, 0 received, 100% packet loss, time 175082ms
    
    root@cloud:~# ping 10.244.0.19
    PING 10.244.0.19 (10.244.0.19) 56(84) bytes of data.
    
    
    
    ^C
    --- 10.244.0.19 ping statistics ---
    10 packets transmitted, 0 received, 100% packet loss, time 9193ms
    
    root@cloud:~# 

    原来cloud节点安装了antrea,

    10.244.0.19 这个pod没有使用antrea cni
    root@cloud:~# kubectl get pods -n kube-system | grep antrea
    antrea-agent-l5zg5                   2/2     Running   1          14h
    antrea-agent-rgvt5                   2/2     Running   0          20d
    antrea-controller-685ff89775-n7vzb   1/1     Running   0          20d
    root@cloud:~# 
    root@cloud:~#  kubectl exec -it  pods/antrea-agent-rgvt5 -c antrea-agent ovs-vsctl show   -n kube-system  
    kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl kubectl exec [POD] -- [COMMAND] instead.
    67555ae4-7e45-4aa4-9298-c2a1c4803993
        Bridge br-int
            datapath_type: system
            Port antrea-gw0
                Interface antrea-gw0
                    type: internal
            Port web2-6d7-c9c7e9
                Interface web2-6d7-c9c7e9
            Port nginx-de-9f1eba
                Interface nginx-de-9f1eba
            Port antrea-tun0
                Interface antrea-tun0
                    type: geneve
                    options: {csum="true", key=flow, remote_ip=flow}
            Port mc1-36cb6d
                Interface mc1-36cb6d
            Port debian-6-177c0e
                Interface debian-6-177c0e
            Port nginx-de-8bf1e1
                Interface nginx-de-8bf1e1
        ovs_version: "2.14.0"
    root@cloud:~# 
    root@cloud:~# kubectl get pods 
    NAME                                READY   STATUS             RESTARTS   AGE
    debian-6c44fc6956-ltsrt             0/1     CrashLoopBackOff   5059       17d
    mc1                                 2/2     Running            0          17d
    my-deployment-68bdbbb5cc-bbszv      0/1     ImagePullBackOff   0          36d
    my-deployment-68bdbbb5cc-nrst9      0/1     ImagePullBackOff   0          36d
    my-deployment-68bdbbb5cc-rlgzt      0/1     ImagePullBackOff   0          36d
    my-nginx-5dc4865748-jqx54           1/1     Running            2          36d
    my-nginx-5dc4865748-pcrbg           1/1     Running            2          36d
    nginx                               0/1     ImagePullBackOff   0          36d
    nginx-deployment-6b474476c4-r6z5b   1/1     Running            0          9d
    nginx-deployment-6b474476c4-w6xh9   1/1     Running            0          9d
    web2-6d784f67bf-4gqq2               1/1     Running            0          20d
    web2-worker-579fdc68dd-d8t6m        1/1     Running            0          13h
    root@cloud:~# kubectl get pods web2-6d784f67bf-4gqq2 -o wide
    NAME                    READY   STATUS    RESTARTS   AGE   IP           NODE     NOMINATED NODE   READINESS GATES
    web2-6d784f67bf-4gqq2   1/1     Running   0          20d   10.244.0.2   ubuntu   <none>           <none>
    root@cloud:~# kubectl get pods  web2-worker-579fdc68dd-d8t6m  -o wide
    NAME                           READY   STATUS    RESTARTS   AGE   IP           NODE    NOMINATED NODE   READINESS GATES
    web2-worker-579fdc68dd-d8t6m   1/1     Running   0          13h   10.244.2.2   cloud   <none>           <none>
    root@cloud:~# kubectl describe pods web2-6d784f67bf-4gqq2
    Name:         web2-6d784f67bf-4gqq2
    Namespace:    default
    Priority:     0
    Node:         ubuntu/10.10.16.82
    Start Time:   Fri, 28 May 2021 16:07:21 +0800
    Labels:       pod-template-hash=6d784f67bf
                  run=web2
    Annotations:  <none>
    Status:       Running
    IP:           10.244.0.2
    IPs:
      IP:           10.244.0.2
    Controlled By:  ReplicaSet/web2-6d784f67bf
    Containers:
      web2:
        Container ID:   containerd://ca0b5220695f95f4df64a22a0eeb73cf1e8d77da02f5cc00e44ae1221ba4722d
        Image:          nginx
        Image ID:       docker.io/library/nginx@sha256:bf2ccf1e3f83bbe1c99b2332e7573479dd5c8909f27bc03a712e0750ccbe551d
        Port:           8087/TCP
        Host Port:      0/TCP
        State:          Running
          Started:      Fri, 28 May 2021 16:07:22 +0800
        Ready:          True
        Restart Count:  0
        Environment:    <none>
        Mounts:
          /var/run/secrets/kubernetes.io/serviceaccount from default-token-ckv9x (ro)
    Conditions:
      Type              Status
      Initialized       True 
      Ready             True 
      ContainersReady   True 
      PodScheduled      True 
    Volumes:
      default-token-ckv9x:
        Type:        Secret (a volume populated by a Secret)
        SecretName:  default-token-ckv9x
        Optional:    false
    QoS Class:       BestEffort
    Node-Selectors:  <none>
    Tolerations:     node.kubernetes.io/not-ready:NoExecute for 300s
                     node.kubernetes.io/unreachable:NoExecute for 300s
    Events:          <none>

    master 节点删除antrea.yml

    root@ubuntu:~# kubectl delete  -f  ./antrea/build/yamls/antrea.yml
    customresourcedefinition.apiextensions.k8s.io "antreaagentinfos.clusterinformation.antrea.tanzu.vmware.com" deleted
    customresourcedefinition.apiextensions.k8s.io "antreaagentinfos.crd.antrea.io" deleted
    customresourcedefinition.apiextensions.k8s.io "antreacontrollerinfos.clusterinformation.antrea.tanzu.vmware.com" deleted
    customresourcedefinition.apiextensions.k8s.io "antreacontrollerinfos.crd.antrea.io" deleted
    customresourcedefinition.apiextensions.k8s.io "clustergroups.core.antrea.tanzu.vmware.com" deleted
    customresourcedefinition.apiextensions.k8s.io "clustergroups.crd.antrea.io" deleted
    customresourcedefinition.apiextensions.k8s.io "clusternetworkpolicies.crd.antrea.io" deleted
    customresourcedefinition.apiextensions.k8s.io "clusternetworkpolicies.security.antrea.tanzu.vmware.com" deleted
    customresourcedefinition.apiextensions.k8s.io "egresses.crd.antrea.io" deleted
    customresourcedefinition.apiextensions.k8s.io "externalentities.core.antrea.tanzu.vmware.com" deleted
    customresourcedefinition.apiextensions.k8s.io "externalentities.crd.antrea.io" deleted
    customresourcedefinition.apiextensions.k8s.io "networkpolicies.crd.antrea.io" deleted
    customresourcedefinition.apiextensions.k8s.io "networkpolicies.security.antrea.tanzu.vmware.com" deleted
    customresourcedefinition.apiextensions.k8s.io "tiers.crd.antrea.io" deleted
    customresourcedefinition.apiextensions.k8s.io "tiers.security.antrea.tanzu.vmware.com" deleted
    customresourcedefinition.apiextensions.k8s.io "traceflows.crd.antrea.io" deleted
    customresourcedefinition.apiextensions.k8s.io "traceflows.ops.antrea.tanzu.vmware.com" deleted

    worker节点删除了

    root@cloud:~# kubectl get pods -n kube-system | grep antrea
    antrea-agent-l5zg5               0/2     Terminating   1          14h
    root@cloud:~# kubectl get pods -n kube-system | grep antrea
    root@cloud:~# 

    重新安装flannel

    root@ubuntu:~# kubectl apply  -f  kube-flannel.yml 
    podsecuritypolicy.policy/psp.flannel.unprivileged created
    clusterrole.rbac.authorization.k8s.io/flannel created
    clusterrolebinding.rbac.authorization.k8s.io/flannel created
    serviceaccount/flannel created
    configmap/kube-flannel-cfg created
    daemonset.apps/kube-flannel-ds-amd64 created
    daemonset.apps/kube-flannel-ds-arm64 created
    daemonset.apps/kube-flannel-ds-arm created
    daemonset.apps/kube-flannel-ds-ppc64le created
    daemonset.apps/kube-flannel-ds-s390x created
    root@ubuntu:~#  kubectl get pods -n kube-system | grep flannel
    kube-flannel-ds-arm64-28rkj      1/1     Running   0          29s
    kube-flannel-ds-arm64-56rgm      1/1     Running   0          29s
    root@ubuntu:~# kubectl delete  -f   web-deployment-v2.yaml
    deployment.apps "web2" deleted
    root@ubuntu:~# kubectl delete  -f   web-deployment-worker.yaml 
    deployment.apps "web2-worker" deleted
    root@ubuntu:~# 
    root@ubuntu:~#  kubectl get pods -n kube-system | grep flannel
    kube-flannel-ds-arm64-28rkj      1/1     Running   0          89s
    kube-flannel-ds-arm64-56rgm      1/1     Running   0          89s
    root@ubuntu:~# 
    root@cloud:~# kubectl get pods -n kube-system | grep flannel
    kube-flannel-ds-arm64-28rkj      1/1     Running   0          19s
    kube-flannel-ds-arm64-56rgm      1/1     Running   0          19s
    root@cloud:~# 
  • 相关阅读:
    Android:Service通知Activity更新界面
    greendao 查询之数据去重
    Android GreenDao 深查询 n:m 的关系
    java 获取当天(今日)零点零分零秒
    Android Theme.Dialog 到光 AppCompatDialog
    Android 如何利用Activity的Dialog风格完成弹出框设计
    上周热点回顾(9.14-9.20)团队
    .NET跨平台之旅:将示例站点从ASP.NET 5 Beta5升级至Beta7团队
    上周热点回顾(9.7-9.13)团队
    上周热点回顾(8.31-9.6)团队
  • 原文地址:https://www.cnblogs.com/dream397/p/14898196.html
Copyright © 2011-2022 走看看