zoukankan      html  css  js  c++  java
  • (转)利用local nginx搭建k8s-1.17.4高可用kubernetes集群

    原文:https://www.cnblogs.com/uglyliu/p/12555858.html

    https://www.cnblogs.com/climbsnail/p/12821799.html

    之前看大佬的开源项目2.0版本,https://github.com/easzlab/kubeasz发现已经去掉了keepalived,已无需依赖外部负载均衡实现apiserver的高可用,原理和用local nginx差不多,一直想尝试下这种搭建方法,今天终于如愿了,记录下

    1. 简介

    利用local nginx方式实现Kubernetes 1.17.4高可用搭建

    2. 服务器版本和架构信息

    系统版本:CentOS Linux release 7.7.1908 (Core)
    内核:4.18.12-1.el7.elrepo.x86_64
    Kubernetes: v1.17.4  #目前最新稳定版本
    Docker-ce: 19.03.8   #目前最新稳定版本
    网络组件:calico(3.8.7) #目前最新稳定版本
    硬件配置:4核4G
    

    3. 服务器角色规划

    节点名称角色IP安装软件
    k8s-master-01 master 10.80.6.206 kubeadm、kubelet、kubectl、docker、apiserver、controller-manage、scheduler、nginx、etcd、kube-proxy
    k8s-master-02 master 10.80.6.207 kubeadm、kubelet、kubectl、docker、apiserver、controller-manage、scheduler、nginx、etcd、kube-proxy
    k8s-master-03 master 10.80.6.208 kubeadm、kubelet、kubectl、docker、apiserver、controller-manage、scheduler、nginx、etcd、kube-proxy
    k8s-node-01 node 10.80.6.209 kubeadm、kubelet、kubectl、docker、kube-proxy、nginx
    pod网段   11.210.0.0/16  
    service网段   10.96.0.0/12  

    4. 服务器初始化

    4.1 关闭Selinux/firewalld/iptables(所有机器执行)

    setenforce 0 
    && sed -i 's/^SELINUX=.*$/SELINUX=disabled/' /etc/selinux/config 
    && getenforce
    
    systemctl stop firewalld 
    && systemctl daemon-reload 
    && systemctl disable firewalld 
    && systemctl daemon-reload 
    && systemctl status firewalld
    
    yum install -y iptables-services 
    && systemctl stop iptables 
    && systemctl disable iptables 
    && systemctl status iptables
    

    4.2 为每台服务器添加host解析记录(所有机器执行)

    cat >>/etc/hosts<<EOF
    10.80.6.206 k8s-master-01
    10.80.6.207 k8s-master-02
    10.80.6.208 k8s-master-03
    10.80.6.209 k8s-node-01
    EOF
    

    4.3 更换阿里源(所有机器执行)

    yum install wget -y
    cp -r /etc/yum.repos.d /etc/yum.repos.d.bak
    rm -f /etc/yum.repos.d/*.repo
    wget -O /etc/yum.repos.d/CentOS-Base.repo http://mirrors.aliyun.com/repo/Centos-7.repo 
    && wget -O /etc/yum.repos.d/epel.repo http://mirrors.aliyun.com/repo/epel-7.repo
    
    yum clean all && yum makecache
    

    4.4 设置limits.conf(所有机器执行)

    cat >> /etc/security/limits.conf <<EOF
    # End of file
    * soft nproc 10240000
    * hard nproc 10240000
    * soft nofile 10240000
    * hard nofile 10240000
    EOF
    

    4.5 设置sysctl.conf(所有机器执行)

    [ ! -e "/etc/sysctl.conf_bk" ] && /bin/mv /etc/sysctl.conf{,_bk} 
    && cat > /etc/sysctl.conf << EOF
    fs.file-max=20480000
    fs.nr_open=20480000
    net.ipv4.tcp_max_tw_buckets = 180000
    net.ipv4.tcp_sack = 1
    net.ipv4.tcp_window_scaling = 1
    net.ipv4.tcp_rmem = 4096 87380 4194304
    net.ipv4.tcp_wmem = 4096 16384 4194304
    net.ipv4.tcp_max_syn_backlog = 16384
    net.core.netdev_max_backlog = 32768
    net.core.somaxconn = 32768
    net.core.wmem_default = 8388608
    net.core.rmem_default = 8388608
    net.core.rmem_max = 16777216
    net.core.wmem_max = 16777216
    net.ipv4.tcp_timestamps = 0
    net.ipv4.tcp_fin_timeout = 20
    net.ipv4.tcp_synack_retries = 2
    net.ipv4.tcp_syn_retries = 2
    net.ipv4.tcp_syncookies = 1
    #net.ipv4.tcp_tw_len = 1
    net.ipv4.tcp_tw_reuse = 1
    net.ipv4.tcp_mem = 94500000 915000000 927000000
    net.ipv4.tcp_max_orphans = 3276800
    net.ipv4.ip_local_port_range = 1024 65000
    #net.nf_conntrack_max = 6553500
    #net.netfilter.nf_conntrack_max = 6553500
    #net.netfilter.nf_conntrack_tcp_timeout_close_wait = 60
    #net.netfilter.nf_conntrack_tcp_timeout_fin_wait = 120
    #net.netfilter.nf_conntrack_tcp_timeout_time_wait = 120
    #net.netfilter.nf_conntrack_tcp_timeout_established = 3600
    EOF
    sysctl -p
    

    4.6 配置时间同步(所有机器执行)

    ntpdate -u pool.ntp.org
    crontab -e       #加入定时任务
    */15 * * * * /usr/sbin/ntpdate -u pool.ntp.org >/dev/null 2>&1
    

    4.7 配置k8s.conf(所有机器执行)

    cat <<EOF >  /etc/sysctl.d/k8s.conf
    net.bridge.bridge-nf-call-ip6tables = 1
    net.bridge.bridge-nf-call-iptables = 1
    net.ipv4.ip_nonlocal_bind = 1
    net.ipv4.ip_forward = 1
    vm.swappiness=0
    EOF
    
    #执行命令使其修改生效
    modprobe br_netfilter 
    && sysctl -p /etc/sysctl.d/k8s.conf
    

    4.8 关闭交换分区(所有机器执行)

    swapoff -a
    yes | cp /etc/fstab /etc/fstab_bak
    cat /etc/fstab_bak |grep -v swap > /etc/fstab
    

    4.9 升级系统内核(所有机器执行)

    yum update -y
    
    
    rpm -ivh /usr/local/src/kernel-ml-4.18.12-1.el7.elrepo.x86_64.rpm
    rpm -ivh /usr/local/src/kernel-ml-devel-4.18.12-1.el7.elrepo.x86_64.rpm
    
    查看内核修改结果
    grub2-editenv list  
    
    #注意,这里执行下面的命令会出现多个内核版本
    cat /boot/grub2/grub.cfg |grep "menuentry "
    menuentry 'CentOS Linux (4.18.12-1.el7.elrepo.x86_64) 7 (Core)' --class centos --class gnu-linux --class gnu --class os --unrestricted $menuentry_id_option 'gnulinux-3.10.0-862.el7.x86_64-advanced-021a955b-781d-425a-8250-f39857437658' 
    
    
    设置默认内核版本,改版本必须已经存在,请注意执行命令cat /boot/grub2/grub.cfg |grep "menuentry "后生成的内容,切勿随意复制
    grub2-set-default 'CentOS Linux (4.18.12-1.el7.elrepo.x86_64) 7 (Core)'
    
    查看内核修改结果
    grub2-editenv list  
    
    # 查看内核修改结果
    grub2-editenv list  
    
    #重启以更换内核使其生效
    reboot
    
    

    4.10 加载ipvs模块(所有机器执行)

    cat > /etc/sysconfig/modules/ipvs.modules <<EOF
    #!/bin/bash
    modprobe -- ip_vs
    modprobe -- ip_vs_rr
    modprobe -- ip_vs_wrr
    modprobe -- ip_vs_sh
    modprobe -- nf_conntrack_ipv4
    EOF
    chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules && lsmod | grep -e ip_vs -e nf_conntrack_ipv4
    

    4.11 添加k8s yum源(所有机器执行)

    cat << EOF > /etc/yum.repos.d/kubernetes.repo
    [kubernetes]
    name=Kubernetes
    baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
    enabled=1
    gpgcheck=1
    repo_gpgcheck=1
    gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
    EOF
    

    4.12 安装服务器必备软件(所有服务器安装)

    yum -y install wget vim iftop iotop net-tools nmon telnet lsof iptraf nmap httpd-tools lrzsz mlocate ntp ntpdate strace libpcap nethogs iptraf iftop nmon bridge-utils bind-utils telnet nc nfs-utils rpcbind nfs-utils dnsmasq python python-devel tcpdump mlocate tree gcc gcc-c++ pcre pcre-devel openssl openssl-devel
    

    5、安装nginx(所有服务器安装)

    useradd nginx -s /sbin/nologin -M
    wget http://nginx.org/download/nginx-1.16.1.tar.gz
    tar xvf nginx-1.16.1.tar.gz
    cd nginx-16.1
    
    #注意增加--with-stream模块,因为要用nginx的四层代理
    ./configure --user=nginx --group=nginx --prefix=/usr/local/nginx --with-http_realip_module --with-http_stub_status_module --with-http_ssl_module --with-http_flv_module --with-http_gzip_static_module --with-cc-opt=-O3 --with-stream
    
    echo $?
    
    make install 
    echo $?
    
    cp /usr/local/nginx/conf/nginx.conf /usr/local/nginx/conf/nginx.conf.bak
    > /usr/local/nginx/conf/nginx.conf
    
    vi /usr/local/nginx/conf/nginx.conf
    
    user nginx nginx;
    worker_processes auto;
    events {
        worker_connections  65536;
        use epoll;
    }
    error_log /var/log/nginx_error.log info;
    
    stream {
        upstream kube-servers {
            hash $remote_addr consistent;
            server k8s-master-01:6443 weight=5 max_fails=1 fail_timeout=3s;
            server k8s-master-02:6443 weight=5 max_fails=1 fail_timeout=3s;
            server k8s-master-03:6443 weight=5 max_fails=1 fail_timeout=3s;
        }
    
        server {
            listen 8443 reuseport;
            proxy_connect_timeout 3s;
            # 加大timeout
            proxy_timeout 3000s;
            proxy_pass kube-servers;
        }
    }
    
    #测试语法并启动
    /usr/local/nginx/sbin/nginx -t
    /usr/local/nginx/sbin/nginx
    

    6、安装docker(所有服务器安装)

    yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
    yum install docker-ce -y
    

    6.1 配置daemon.json文件(所有服务器配置)

    {
        "exec-opts": ["native.cgroupdriver=systemd"],
        "registry-mirrors":[
            "https://c6ai9izk.mirror.aliyuncs.com"
        ],
        "bip":"172.133.100.1/24",
        "max-concurrent-downloads":3,
        "data-root":"/data/docker",
        "log-driver":"json-file",
        "log-opts":{
            "max-size":"100m",
            "max-file":"1"
        },
        "max-concurrent-uploads":5,
        "storage-driver":"overlay2",
        "storage-opts": [
        "overlay2.override_kernel_check=true"
      ]
    }
    

    6.2 启动检查docker服务

    systemctl enable docker 
    && systemctl restart docker 
    && systemctl status docker
    

    7 使用kubeadm部署kubernetes

    7.1 配置kubernetes.repo(每台机器都需要配置)

    cat <<EOF > /etc/yum.repos.d/kubernetes.repo
    [kubernetes]
    name=Kubernetes
    baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
    enabled=1
    gpgcheck=0
    repo_gpgcheck=0
    gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
    EOF
    

    7.2 安装必备软件(所有机器安装)

    yum install -y kubelet-1.17.4 kubeadm-1.17.4 kubectl-1.17.4 ipvsadm ipset
    
    #设置kubelet开机自启动,注意:这一步不能直接执行 systemctl start kubelet,会报错,成功初始化完后kubelet会自动起来
    systemctl enable kubelet
    

    7.3 修改初始化配置

    在其中一台master上执行

    使用kubeadm config print init-defaults > kubeadm-init.yaml 打印出默认配置,然后在根据自己的环境修改配置

    注意需要修改advertiseAddress、controlPlaneEndpoint、imageRepository、serviceSubnet、podSubnet、kubernetesVersion

    apiVersion: kubeadm.k8s.io/v1beta2
    bootstrapTokens:
    - groups:
      - system:bootstrappers:kubeadm:default-node-token
      token: abcdef.0123456789abcdef
      ttl: 24h0m0s
      usages:
      - signing
      - authentication
    kind: InitConfiguration
    localAPIEndpoint:
      advertiseAddress: 10.80.6.206
      bindPort: 6443
    nodeRegistration:
      criSocket: /var/run/dockershim.sock
      name: k8s-master-01
      taints:
      - effect: NoSchedule
        key: node-role.kubernetes.io/master
    ---
    apiServer:
      timeoutForControlPlane: 4m0s
    apiVersion: kubeadm.k8s.io/v1beta2
    certificatesDir: /etc/kubernetes/pki
    clusterName: kubernetes
    controllerManager: {}
    dns:
      type: CoreDNS
    etcd:
      local:
        dataDir: /var/lib/etcd
    imageRepository: registry.cn-hangzhou.aliyuncs.com/google_containers
    kind: ClusterConfiguration
    kubernetesVersion: v1.17.4
    controlPlaneEndpoint: "127.0.0.1:8443"   #注意此处,因为利用local nginx模式
    networking:
      dnsDomain: cluster.local
      serviceSubnet: 10.96.0.0/12
      podSubnet: 11.210.0.0/16
    scheduler: {}
    

    7.4 预下载镜像(3台master都执行,记得把文件kubeadm-init.yaml拷贝过去)

    kubeadm config images pull --config kubeadm-init.yaml
    

    7.5 初始化

    其中一台master执行

    kubeadm init --config initconfig.yaml --upload-certs   #注意此处有个--upload-certs参数,该参数作用就是将相关的证书直接上传到etcd中保存,就不用执行拷贝证书到其他master服务器的操作了
    

    初始化成功后,依次执行

    mkdir -p $HOME/.kube
    sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
    sudo chown $(id -u):$(id -g) $HOME/.kube/config
    

    然后在其他2台master服务器执行k8s-master-01生成的一串加入集群的命令,形如下:

    kubeadm join 127.0.0.1:8443 --token abcdef.0123456789abcdef     --discovery-token-ca-cert-hash sha256:12605c402e471271bdb11e460fd40bd130c534e73ad9b0b032909d99c79b7e29     --control-plane --certificate-key dbaf047f1383434c56ab6ca51cbce3f8184aa9ead0783664150edbca66c5c9ce
    

    node节点执行类似下面的命令

    注意:token有效期是有限的,如果旧的token过期,可以在master节点上使用kubeadm token create --print-join-command重新创建一条token。

    kubeadm join 127.0.0.1:8443 --token xoyjnz.cebt8jbfwg8dko11     --discovery-token-ca-cert-hash sha256:12605c402e471271bdb11e460fd40bd130c534e73ad9b0b032909d99c79b7e29
    

    8 部署网络插件calico

    8.1 下载calico.yaml文件

    wget -c https://docs.projectcalico.org/v3.8/getting-started/kubernetes/installation/hosted/kubernetes-datastore/calico-networking/1.7/calico.yaml
    

    8.2 修改calico.yaml(根据实际情况配置)

    修改CALICO_IPV4POOL_CIDR这个下面的vaule值,默认是192.168.0.0/16

            # The default IPv4 pool to create on startup if none exists. Pod IPs will be
            # chosen from this range. Changing this value after installation will have
            # no effect. This should fall within `--cluster-cidr`.
            - name: CALICO_IPV4POOL_CIDR
              value: "11.210.0.0/16"
    

    8.3 执行kubectl apply -f calico.yaml

    [root@k8s-master-01 ~]# kubectl apply -f calico.yaml
    configmap/calico-config created
    customresourcedefinition.apiextensions.k8s.io/felixconfigurations.crd.projectcalico.org created
    customresourcedefinition.apiextensions.k8s.io/ipamblocks.crd.projectcalico.org created
    customresourcedefinition.apiextensions.k8s.io/blockaffinities.crd.projectcalico.org created
    customresourcedefinition.apiextensions.k8s.io/ipamhandles.crd.projectcalico.org created
    customresourcedefinition.apiextensions.k8s.io/ipamconfigs.crd.projectcalico.org created
    customresourcedefinition.apiextensions.k8s.io/bgppeers.crd.projectcalico.org created
    customresourcedefinition.apiextensions.k8s.io/bgpconfigurations.crd.projectcalico.org created
    customresourcedefinition.apiextensions.k8s.io/ippools.crd.projectcalico.org created
    customresourcedefinition.apiextensions.k8s.io/hostendpoints.crd.projectcalico.org created
    customresourcedefinition.apiextensions.k8s.io/clusterinformations.crd.projectcalico.org created
    customresourcedefinition.apiextensions.k8s.io/globalnetworkpolicies.crd.projectcalico.org created
    customresourcedefinition.apiextensions.k8s.io/globalnetworksets.crd.projectcalico.org created
    customresourcedefinition.apiextensions.k8s.io/networkpolicies.crd.projectcalico.org created
    clusterrole.rbac.authorization.k8s.io/calico-kube-controllers created
    clusterrolebinding.rbac.authorization.k8s.io/calico-kube-controllers created
    clusterrole.rbac.authorization.k8s.io/calico-node created
    clusterrolebinding.rbac.authorization.k8s.io/calico-node created
    daemonset.extensions/calico-node created
    serviceaccount/calico-node created
    deployment.extensions/calico-kube-controllers created
    serviceaccount/calico-kube-controllers created
    

    8.4 查看节点状态

    一开始没安装网络组件,是显示notReady的,装完cailco后就变成Ready,说明集群已就绪了,可以进行下一步验证集群是否搭建成功

    [root@k8s-master-01 ~]# kubectl get nodes
    NAME            STATUS   ROLES    AGE     VERSION
    k8s-master-01   Ready    master   4h29m   v1.17.4
    k8s-master-02   Ready    master   4h28m   v1.17.4
    k8s-master-03   Ready    master   4h27m   v1.17.4
    k8s-node-01     Ready    <none>   4h27m   v1.17.4
    

    9 kube-proxy开启ipvs[单个master节点执行]

    9.1 修改ConfigMap的kube-system/kube-proxy中的config.conf,mode: "ipvs"

    kubectl edit cm kube-proxy -n kube-system
    

    9.2 之后重启各个节点上的kube-proxy pod:

    [root@k8s-master-01 ~]# kubectl get pod -n kube-system | grep kube-proxy | awk '{system("kubectl delete pod "$1" -n kube-system")}'
    pod "kube-proxy-8fpjb" deleted
    pod "kube-proxy-dqqxh" deleted
    pod "kube-proxy-mxvz2" deleted
    pod "kube-proxy-np9x9" deleted
    pod "kube-proxy-rtzcn" deleted
    

    9.3 查看kube-proxy pod状态

    [root@k8s-master-01 ~]# kubectl get pod -n kube-system | grep kube-proxy
    kube-proxy-4fhpg                           1/1     Running   0          81s
    kube-proxy-9f2x6                           1/1     Running   0          109s
    kube-proxy-cxl5m                           1/1     Running   0          89s
    kube-proxy-lvp9q                           1/1     Running   0          78s
    kube-proxy-v4mg8                           1/1     Running   0          99s
    

    9.4 查看是否开启了ipvs

    日志中打印出了Using ipvs Proxier,说明ipvs模式已经开启

    [root@k8s-master-01 ~]# kubectl logs kube-proxy-mfv85 -n kube-system|grep ipvs
    I0323 10:27:25.224163       1 server_others.go:172] Using ipvs Proxier.
    

    10 查看ipvs状态

    [root@k8s-master-01 ~]# ipvsadm -L -n
    IP Virtual Server version 1.2.1 (size=4096)
    Prot LocalAddress:Port Scheduler Flags
      -> RemoteAddress:Port           Forward Weight ActiveConn InActConn
    TCP  10.96.0.1:443 rr
      -> 10.80.6.206:6443             Masq    1      0          0         
      -> 10.80.6.207:6443             Masq    1      0          0         
      -> 10.80.6.208:6443             Masq    1      0          0         
    TCP  10.96.0.10:53 rr
      -> 11.210.151.129:53            Masq    1      0          0         
      -> 11.210.183.129:53            Masq    1      0          0         
    TCP  10.96.0.10:9153 rr
      -> 11.210.151.129:9153          Masq    1      0          0         
      -> 11.210.183.129:9153          Masq    1      0          0         
    UDP  10.96.0.10:53 rr
      -> 11.210.151.129:53            Masq    1      0          0         
      -> 11.210.183.129:53            Masq    1      0          0  
    

    11 测试一个运行一个容器

    [root@k8s-master-01 ~]# kubectl run nginx --image=nginx:1.14 --replicas=2
    kubectl run --generator=deployment/apps.v1 is DEPRECATED and will be removed in a future version. Use kubectl run --generator=run-pod/v1 or kubectl create instead.
    deployment.apps/nginx created
    

    11.1 查看nginx pod

    [root@k8s-master-01 ~]# kubectl get pods -o wide
    NAME                     READY   STATUS    RESTARTS   AGE    IP               NODE          NOMINATED NODE   READINESS GATES
    curl-69c656fd45-m5hh2    1/1     Running   1          4h2m   11.210.154.195   k8s-node-01   <none>           <none>
    nginx-5cf565498c-hhtzh   1/1     Running   0          4h4m   11.210.154.194   k8s-node-01   <none>           <none>
    nginx-5cf565498c-hmjlj   1/1     Running   0          4h4m   11.210.154.193   k8s-node-01   <none>           <none>
    

    11.2 通过curl命令测试nginx

    [root@k8s-master-01 ~]# curl 11.210.154.194
    <!DOCTYPE html>
    <html>
    <head>
    <title>Welcome to nginx!</title>
    <style>
        body {
             35em;
            margin: 0 auto;
            font-family: Tahoma, Verdana, Arial, sans-serif;
        }
    </style>
    </head>
    <body>
    <h1>Welcome to nginx!</h1>
    <p>If you see this page, the nginx web server is successfully installed and
    working. Further configuration is required.</p>
    
    <p>For online documentation and support please refer to
    <a href="http://nginx.org/">nginx.org</a>.<br/>
    Commercial support is available at
    <a href="http://nginx.com/">nginx.com</a>.</p>
    
    <p><em>Thank you for using nginx.</em></p>
    </body>
    </html>
    

    能显示出Welcome to nginx,说明pod运行正常,间接也说明集群可以正常使用

    12 测试dns

    进入后执行nslookup kubernetes.default

    [root@k8s-master-01 ~]# kubectl run curl --image=radial/busyboxplus:curl -it
    kubectl run --generator=deployment/apps.v1 is DEPRECATED and will be removed in a future version. Use kubectl run --generator=run-pod/v1 or kubectl create instead.
    If you don't see a command prompt, try pressing enter.
    [ root@curl-66bdcf564-njcqk:/ ]$ nslookup kubernetes.default
    Server:    10.96.0.10
    Address 1: 10.96.0.10 kube-dns.kube-system.svc.cluster.local
    
    Name:      kubernetes.default
    Address 1: 10.96.0.1 kubernetes.default.svc.cluster.local    #能显示类似这样的输出,说明dns是okay的
    

    至此local nginx实现高可用kubernete集群部署完成。

    13 遇到的一些问题

    本来想在初始化文件中加入挂载时区的文件,这样apiserver和controller manager的日志就不会晚8个小时了,挂载后无法初始化成功,没解决,后面再看看这个问题
    
    天天向上,空杯心态。
  • 相关阅读:
    Android深度探索--第三章读后感
    Android深度探索--第二章读后感
    Android深度探索--第一章读后感
    android深度探索第十章心得体会
    android深度探索第九章心得体会
    android深度探索第八章心得体会
    深度探索android第七章
    Android 深度探索第六章
    深度探索android第五章
    Android深度探索第四章读后感
  • 原文地址:https://www.cnblogs.com/liujiacai/p/13664321.html
Copyright © 2011-2022 走看看