zoukankan      html  css  js  c++  java
  • k8s安装测试记录(v1.14.0)

    节点规划

    系统配置

    CentOS Linux release 7.6.1810 (Core)

    • 系统更新
    yum install epel-release -y
    yum update -y
    

    内核版本:Linux 5.1.8-1.el7.elrepo.x86_64

    • 内核升级
    # 安装yum源
    rpm --import https://www.elrepo.org/RPM-GPG-KEY-elrepo.org
    rpm -Uvh http://www.elrepo.org/elrepo-release-7.0-3.el7.elrepo.noarch.rpm
    
    # 查看列表
    yum --disablerepo=* --enablerepo=elrepo-kernel repolist
    yum --disablerepo=* --enablerepo=elrepo-kernel list kernel*
    
    
    # 安装
    yum --enablerepo=elrepo-kernel install kernel-ml-devel kernel-ml -y
    
    
    # 设置生成新的grub
    grub2-set-default 0
    grub2-mkconfig -o /etc/grub2.cfg
    
    
    # 移除旧版本工具包
    yum remove kernel-tools-libs.x86_64 kernel-tools.x86_64 -y
    
    # 安装新版本
    yum --disablerepo=* --enablerepo=elrepo-kernel install -y kernel-ml-tools.x86_64
    
    
    # 重启
    reboot
    
    # 查看内核版本
    uname -sr
    
    

    修改内核参数

    cat <<EOF > /etc/sysctl.d/k8s.conf
    net.ipv4.tcp_keepalive_time = 600
    net.ipv4.tcp_keepalive_intvl = 30
    net.ipv4.tcp_keepalive_probes = 10
    net.ipv6.conf.all.disable_ipv6 = 1
    net.ipv6.conf.default.disable_ipv6 = 1
    net.ipv6.conf.lo.disable_ipv6 = 1
    net.ipv4.neigh.default.gc_stale_time = 120
    net.ipv4.conf.all.rp_filter = 0
    net.ipv4.conf.default.rp_filter = 0
    net.ipv4.conf.default.arp_announce = 2
    net.ipv4.conf.lo.arp_announce = 2
    net.ipv4.conf.all.arp_announce = 2
    net.ipv4.ip_forward = 1
    net.ipv4.tcp_max_tw_buckets = 5000
    net.ipv4.tcp_syncookies = 1
    net.ipv4.tcp_max_syn_backlog = 1024
    net.ipv4.tcp_synack_retries = 2
    net.bridge.bridge-nf-call-ip6tables = 1
    net.bridge.bridge-nf-call-iptables = 1
    net.netfilter.nf_conntrack_max = 2310720
    fs.inotify.max_user_watches=89100
    fs.may_detach_mounts = 1
    fs.file-max = 52706963
    fs.nr_open = 52706963
    net.bridge.bridge-nf-call-arptables = 1
    vm.swappiness = 0   #最大限度使用物理内存,然后才是 swap空间
    vm.overcommit_memory=1
    vm.panic_on_oom=0
    EOF
    sysctl --system
    

    关闭swap

    # 临时关闭
    swapoff -a
    # 永久关闭
    sed -i '/ swap / s/^(.*)$/#1/g' /etc/fstab
    

    开启ipvs

    modprobe -- ip_vs
    modprobe -- ip_vs_rr
    modprobe -- ip_vs_wrr
    modprobe -- ip_vs_sh
    modprobe -- nf_conntrack_ipv4
    # 查看是否加载
    lsmod | grep ip_vs
    # 配置开机自加载
    cat <<EOF>> /etc/rc.local
    modprobe -- ip_vs
    modprobe -- ip_vs_rr
    modprobe -- ip_vs_wrr
    modprobe -- ip_vs_sh
    modprobe -- nf_conntrack_ipv4
    EOF
    chmod +x /etc/rc.d/rc.local
    

    禁用selinux

    #临时关闭
    setenforce 0
    #永久关闭
    sed -ir 's/(SELINUX=)[a-z]*/disabled/' /etc/selinux/config
    

    禁用postfix

    systemctl stop postfix
    systemctl disable postfix
    

    关闭防火墙

    systemctl stop firewalld
    systemctl disable firewalld
    

    安装docker-ce

    # Uninstall old versions
    yum remove docker 
                      docker-client 
                      docker-client-latest 
                      docker-common 
                      docker-latest 
                      docker-latest-logrotate 
                      docker-logrotate 
                      docker-engine
    
    
    
    
    # Install using the repository
    yum install -y yum-utils 
      device-mapper-persistent-data 
      lvm2
    
    
    yum-config-manager 
        --add-repo 
        https://download.docker.com/linux/centos/docker-ce.repo
    
    
    yum install docker-ce docker-ce-cli containerd.io -y
    
    # Boot the service
    systemctl start docker
    systemctl enable docker
    

    其他

    ssh免密登录、hosts文件、ntp时间同步
    

    配置keepalived、haproxy(Master节点)

    yum install -y socat keepalived haproxy ipvsadm
    systemctl enable haproxy
    systemctl enable keepalived
    

    haproxy配置

    # /etc/haproxy/haproxy.cfg
    global
        log         127.0.0.1 local3
        chroot      /var/lib/haproxy
        pidfile     /var/run/haproxy.pid
        maxconn     32768
        user        haproxy
        group       haproxy
        daemon
        nbproc      1
        stats socket /var/lib/haproxy/stats
    
    defaults
        mode                    tcp
        log                     global
        option                  tcplog
        option                  dontlognull
        option                  redispatch
        retries                 3
        timeout queue           1m
        timeout connect         10s
        timeout client          1m
        timeout server          1m
        timeout check           10s
    
    listen stats
        mode   http
        bind :8888
        stats   enable
        stats   uri     /admin?stats
        stats   auth    admin:admin
        stats   admin   if TRUE
    
    frontend  k8s_https *:8443
        mode      tcp
        maxconn      2000
        default_backend     https_sri
    
    backend https_sri
        balance      roundrobin
        server master1-api 172.17.1.201:6443  check inter 10000 fall 2 rise 2 weight 1
        server master2-api 172.17.1.202:6443  check inter 10000 fall 2 rise 2 weight 1
        server master3-api 172.17.1.203:6443  check inter 10000 fall 2 rise 2 weight 1
    

    keepalived配置

    # /etc/keepalived/keepalived.conf
    global_defs {
       router_id master01
    }
    
    vrrp_script check_haproxy {
        script /etc/keepalived/check_haproxy.sh
        interval 3
    }
    
    vrrp_instance VI_1 {
        state MASTER
        interface eth0
        virtual_router_id 99
        priority 100
        advert_int 1
        authentication {
            auth_type PASS
            auth_pass 1111
        }
        virtual_ipaddress {
            172.17.1.200
        }
        track_script {   
            check_haproxy
        }
    }
    
    
    # /etc/keepalived/check_haproxy.sh
    #!/bin/bash
    NUM=`ps -C haproxy --no-header |wc -l`
    if [ $NUM -eq 0 ];then
        systemctl stop keepalived
    fi
    
    
    # chmod 755 /etc/keepalived/check_haproxy.sh
    
    三个节点keepalived配置文件存在区别:
    router_id分别为master01、master02、master03
    state分别为MASTER、BACKUP、BACKUP
    priority分别为100、90、80
    
    

    K8S集群部署

    安装 kubeadm、kubelet、kubectl

    所有节点都安装 kubeadm、kubelet、kubectl,注意:node节点的kubectl不是必须的。

    # 配置yum源
    cat <<EOF > /etc/yum.repos.d/kubernetes.repo
    [kubernetes]
    name=Kubernetes
    baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
    enabled=1
    gpgcheck=0
    repo_gpgcheck=0
    gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg
           https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
    EOF
    # 安装
    yum -y install kubeadm-1.14.0 kubelet-1.14.0 kubectl-1.14.0
    systemctl enable kubelet
    
    
    

    初始化master

    # 获取配置文件
    kubeadm config print init-defaults > kubeadm.conf
    
    # 更改置文件中的
    # imageRepository: k8s.gcr.io
    # 比如:
    # imageRepository: docker.io/mirrorgooglecontainers
    
    # 运行命令
    kubeadm config images list --config kubeadm.conf
    kubeadm config images pull --config kubeadm.conf
    kubeadm init --config kubeadm.conf(根据实际情况分别配置)
    

    或:

    kubeadm config images list |sed -e 's/^/docker pull /g' -e 's#k8s.gcr.io#docker.io/mirrorgooglecontainers#g' |sh -x
    
    
    docker images |grep mirrorgooglecontainers |awk '{print "docker tag ",$1":"$2,$1":"$2}'
    
    docker tag  mirrorgooglecontainers/kube-proxy:v1.14.0 k8s.gcr.io/kube-proxy:v1.14.0
    docker tag  mirrorgooglecontainers/kube-apiserver:v1.14.0 k8s.gcr.io/kube-apiserver:v1.14.0
    docker tag  mirrorgooglecontainers/kube-controller-manager:v1.14.0 k8s.gcr.io/kube-controller-manager:v1.14.0
    docker tag  mirrorgooglecontainers/kube-scheduler:v1.14.0 k8s.gcr.io/kube-scheduler:v1.14.0
    docker tag  mirrorgooglecontainers/etcd:3.3.10 k8s.gcr.io/etcd:3.3.10
    docker tag  mirrorgooglecontainers/pause:3.1 k8s.gcr.io/pause:3.1
    
    docker images |grep mirrorgooglecontainers |awk '{print "docker rmi ", $1":"$2}' |sh -x
    
    docker pull coredns/coredns:1.3.1
    docker tag coredns/coredns:1.3.1 k8s.gcr.io/coredns:1.3.1
    docker rmi coredns/coredns:1.3.1
    
    
    kubeadm init --config kubeadm.conf(根据实际情况分别配置)
    
    master01
    • kubeadm_master01.conf
    apiVersion: kubeadm.k8s.io/v1beta1
    kind: InitConfiguration
    localAPIEndpoint:
      advertiseAddress: 172.17.1.201
      bindPort: 6443
    ---
    apiVersion: kubeadm.k8s.io/v1beta1
    kind: ClusterConfiguration
    kubernetesVersion: v1.14.0
    controlPlaneEndpoint: "172.17.1.200:8443"
    imageRepository: registry.aliyuncs.com/google_containers
    apiServer:
      certSANs:
      - "g-master01"
      - "g-master02"
      - "g-master03"
      - 172.17.1.201
      - 172.17.1.202
      - 172.17.1.203
      - 172.17.1.200
    networking:
      podSubnet: "10.244.0.0/16"
      serviceSubnet: "10.96.0.0/12"
    certificatesDir: /etc/kubernetes/pki
    clusterName: kubernetes
    
    etcd:
      local:
        extraArgs:
          listen-client-urls: "https://127.0.0.1:2379,https://172.17.1.201:2379"
          advertise-client-urls: "https://172.17.1.201:2379"
          listen-peer-urls: "https://172.17.1.201:2380"
          initial-advertise-peer-urls: "https://172.17.1.201:2380"
          initial-cluster: "g-master01.bjyztj.sjhl=https://172.17.1.201:2380"
          initial-cluster-state: new
        serverCertSANs:
          - g-master01
          - 172.17.1.201
        peerCertSANs:
          - g-master01
          - 172.17.1.201
    ---
    apiVersion: kubeproxy.config.k8s.io/v1alpha1
    kind: KubeProxyConfiguration
    mode: ipvs
    
    # 初始化master01
    kubeadm init --config kubeadm_master01.conf
    # 配置kubectl管理集群
    mkdir .kube
    cp -i /etc/kubernetes/admin.conf .kube/config
    
    
    kubectl get pods -n kube-system
    
    # 安装Flannel网络插件
    wget https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
    
    # 绑定网卡
          containers:
          - name: kube-flannel
            image: quay.io/coreos/flannel:v0.11.0-amd64
            command:
            - /opt/bin/flanneld
            args:
            - --ip-masq
            - --kube-subnet-mgr
            - --iface=eth0        #添加该行
    
    # 而后应用配置文件(注意只在master01执行)
    kubectl apply -f kube-flannel.yml
    
    
    或者使用 caclico
    wget https://docs.projectcalico.org/manifests/calico.yaml
    kubectl apply -f calico.yaml
    
    kubectl get pods -n kube-system
    
    # 分发证书
    #!/bin/bash
    for index in 202 203; do
      ip=172.17.1.${index}
      ssh $ip "mkdir -p /etc/kubernetes/pki/etcd; mkdir -p ~/.kube/"
      scp /etc/kubernetes/pki/ca.crt $ip:/etc/kubernetes/pki/ca.crt
      scp /etc/kubernetes/pki/ca.key $ip:/etc/kubernetes/pki/ca.key
      scp /etc/kubernetes/pki/sa.key $ip:/etc/kubernetes/pki/sa.key
      scp /etc/kubernetes/pki/sa.pub $ip:/etc/kubernetes/pki/sa.pub
      scp /etc/kubernetes/pki/front-proxy-ca.crt $ip:/etc/kubernetes/pki/front-proxy-ca.crt
      scp /etc/kubernetes/pki/front-proxy-ca.key $ip:/etc/kubernetes/pki/front-proxy-ca.key
      scp /etc/kubernetes/pki/etcd/ca.crt $ip:/etc/kubernetes/pki/etcd/ca.crt
      scp /etc/kubernetes/pki/etcd/ca.key $ip:/etc/kubernetes/pki/etcd/ca.key
      scp /etc/kubernetes/admin.conf $ip:/etc/kubernetes/admin.conf
      scp /etc/kubernetes/admin.conf $ip:~/.kube/config
    done
    
    master02
    • kubeadm_master02.conf
    apiVersion: kubeadm.k8s.io/v1beta1
    kind: InitConfiguration
    localAPIEndpoint:
      advertiseAddress: 172.17.1.202
      bindPort: 6443
    ---
    apiVersion: kubeadm.k8s.io/v1beta1
    kind: ClusterConfiguration
    kubernetesVersion: v1.14.0
    controlPlaneEndpoint: "172.17.1.200:8443"
    imageRepository: registry.aliyuncs.com/google_containers
    apiServer:
      certSANs:
      - "g-master01"
      - "g-master02"
      - "g-master03"
      - 172.17.1.201
      - 172.17.1.202
      - 172.17.1.203
      - 172.17.1.200
    networking:
      podSubnet: "10.244.0.0/16"
      serviceSubnet: "10.96.0.0/12"
    certificatesDir: /etc/kubernetes/pki
    clusterName: kubernetes
    
    etcd:
      local:
        extraArgs:
          listen-client-urls: "https://127.0.0.1:2379,https://172.17.1.202:2379"
          advertise-client-urls: "https://172.17.1.202:2379"
          listen-peer-urls: "https://172.17.1.202:2380"
          initial-advertise-peer-urls: "https://172.17.1.202:2380"
          initial-cluster: "g-master01.bjyztj.sjhl=https://172.17.1.201:2380,g-master02.bjyztj.sjhl=https://172.17.1.202:2380"
          initial-cluster-state: existing
        serverCertSANs:
          - g-master02
          - 172.17.1.202
        peerCertSANs:
          - g-master02
          - 172.17.1.202
    ---
    apiVersion: kubeproxy.config.k8s.io/v1alpha1
    kind: KubeProxyConfiguration
    mode: ipvs
    
    # 配置证书
    kubeadm init phase certs all --config kubeadm_master02.conf
    # 配置etcd
    kubeadm init phase etcd local --config kubeadm_master02.conf
    # 生成kubelet配置文件
    kubeadm init phase kubeconfig kubelet --config kubeadm_master02.conf
    # 启动kubelet
    kubeadm init phase kubelet-start --config kubeadm_master02.conf
    # 将master02的etcd加入集群
    kubectl exec -n kube-system etcd-g-master01.bjyztj.sjhl -- etcdctl --ca-file /etc/kubernetes/pki/etcd/ca.crt --cert-file /etc/kubernetes/pki/etcd/peer.crt --key-file /etc/kubernetes/pki/etcd/peer.key --endpoints=https://172.17.1.201:2379 member add master2 https://172.17.1.202:2380
    
    
    # 以下方式是 k8s1.17 的etcd 加入进群方式
    # kubectl exec -n kube-system  etcd-yzsjhl-evdc-promaster01.opi.com   -- etcdctl   --cacert="/etc/kubernetes/pki/etcd/ca.crt"   --cert="/etc/kubernetes/pki/etcd/peer.crt"  --key="/etc/kubernetes/pki/etcd/peer.key"  --endpoints=https://10.16.1.105:2379    member add master2 --peer-urls="https://10.16.1.106:2380"
    
    # 启动 kube-apiserver、kube-controller-manager、kube-scheduler
    kubeadm init phase kubeconfig all --config kubeadm_master02.conf
    kubeadm init phase control-plane all --config kubeadm_master02.conf
    
    # 查看节点状态
    kubectl get nodes
    # 将节点标记为master
    kubeadm init phase mark-control-plane --config kubeadm_master02.conf
    # 查看
    kubectl get nodes
    
    master03
    • kubeadm_master03.conf
    apiVersion: kubeadm.k8s.io/v1beta1
    kind: InitConfiguration
    localAPIEndpoint:
      advertiseAddress: 172.17.1.203
      bindPort: 6443
    ---
    apiVersion: kubeadm.k8s.io/v1beta1
    kind: ClusterConfiguration
    kubernetesVersion: v1.14.0
    controlPlaneEndpoint: "172.17.1.200:8443"
    imageRepository: registry.aliyuncs.com/google_containers
    apiServer:
      certSANs:
      - "g-master01"
      - "g-master02"
      - "g-master03"
      - 172.17.1.201
      - 172.17.1.202
      - 172.17.1.203
      - 172.17.1.200
    networking:
      podSubnet: "10.244.0.0/16"
      serviceSubnet: "10.96.0.0/12"
    certificatesDir: /etc/kubernetes/pki
    clusterName: kubernetes
    
    etcd:
      local:
        extraArgs:
          listen-client-urls: "https://127.0.0.1:2379,https://172.17.1.203:2379"
          advertise-client-urls: "https://172.17.1.203:2379"
          listen-peer-urls: "https://172.17.1.203:2380"
          initial-advertise-peer-urls: "https://172.17.1.203:2380"
          initial-cluster: "g-master01.bjyztj.sjhl=https://172.17.1.201:2380,g-master02.bjyztj.sjhl=https://172.17.1.202:2380,g-master03.bjyztj.sjhl=https://172.17.1.203:2380"
          initial-cluster-state: existing
        serverCertSANs:
          - g-master03
          - 172.17.1.203
        peerCertSANs:
          - g-master03
          - 172.17.1.203
    ---
    apiVersion: kubeproxy.config.k8s.io/v1alpha1
    kind: KubeProxyConfiguration
    mode: ipvs
    
    # 配置证书
    kubeadm init phase certs all --config kubeadm_master03.conf
    # 配置etcd
    kubeadm init phase etcd local --config kubeadm_master03.conf
    # 生成kubelet配置文件
    kubeadm init phase kubeconfig kubelet --config kubeadm_master03.conf
    # 启动kubelet
    kubeadm init phase kubelet-start --config kubeadm_master03.conf
    # 将master02的etcd加入集群
    kubectl exec -n kube-system etcd-g-master01.bjyztj.sjhl -- etcdctl --ca-file /etc/kubernetes/pki/etcd/ca.crt --cert-file /etc/kubernetes/pki/etcd/peer.crt --key-file /etc/kubernetes/pki/etcd/peer.key --endpoints=https://172.17.1.201:2379 member add master3 https://172.17.1.203:2380
    #以下命令是 k8s 1.17 版本 etcd 加入集群方式
    #kubectl exec -n kube-system  etcd-yzsjhl-evdc-promaster01.opi.com   -- etcdctl   --cacert="/etc/kubernetes/pki/etcd/ca.crt"   --cert="/etc/kubernetes/pki/etcd/peer.crt"  --key="/etc/kubernetes/pki/etcd/peer.key"  --endpoints=https://10.16.1.105:2379    member add master3 --peer-urls="https://10.16.1.107:2380"
    
    # 启动 kube-apiserver、kube-controller-manager、kube-scheduler
    kubeadm init phase kubeconfig all --config kubeadm_master03.conf
    kubeadm init phase control-plane all --config kubeadm_master03.conf
    
    # 查看节点状态
    kubectl get nodes
    # 将节点标记为master
    kubeadm init phase mark-control-plane --config kubeadm_master03.conf
    # 查看
    kubectl get nodes
    

    worker节点加入集群

    Your Kubernetes control-plane has initialized successfully!
    
    To start using your cluster, you need to run the following as a regular user:
    
      mkdir -p $HOME/.kube
      sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
      sudo chown $(id -u):$(id -g) $HOME/.kube/config
    
    You should now deploy a pod network to the cluster.
    Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
      https://kubernetes.io/docs/concepts/cluster-administration/addons/
    
    You can now join any number of control-plane nodes by copying certificate authorities
    and service account keys on each node and then running the following as root:
    
      kubeadm join 172.17.1.200:8443 --token 45vifj.buiy0wjpjrortpjn 
        --discovery-token-ca-cert-hash sha256:f1285c8b72d22a5cf97f1713ad7ed258efcba275203ef42d8877d9d66902fee8 
        --experimental-control-plane
    
    Then you can join any number of worker nodes by running the following on each as root:
    
    kubeadm join 172.17.1.200:8443 --token 45vifj.buiy0wjpjrortpjn 
        --discovery-token-ca-cert-hash sha256:f1285c8b72d22a5cf97f1713ad7ed258efcba275203ef42d8877d9d66902fee8
    

    状态检查

    kubectl get nodes
    kubectl cluster-info
    kubectl get cs
    kubectl exec -n kube-system etcd-g-master01.bjyztj.sjhl -- etcdctl --ca-file /etc/kubernetes/pki/etcd/ca.crt --cert-file /etc/kubernetes/pki/etcd/peer.crt --key-file /etc/kubernetes/pki/etcd/peer.key --endpoints=https://172.17.1.201:2379 member list
    ipvsadm -ln
    kubectl log --tail=10 kube-proxy-tqxlq -n kube-system
    

    Dashboard

    #!/bin/bash
    DASHDOARD_VERSION=v1.10.1
    HEAPSTER_VERSION=v1.5.4
    GRAFANA_VERSION=v5.0.4
    INFLUXDB_VERSION=v1.5.2
    username=registry.cn-hangzhou.aliyuncs.com/google_containers
    images=(
            kubernetes-dashboard-amd64:${DASHDOARD_VERSION}
            heapster-grafana-amd64:${GRAFANA_VERSION}
            heapster-amd64:${HEAPSTER_VERSION}
            heapster-influxdb-amd64:${INFLUXDB_VERSION}
            )
    for image in ${images[@]}
    do
    docker pull ${username}/${image}
    docker tag ${username}/${image} k8s.gcr.io/${image}
    docker rmi ${username}/${image}
    done
    
    wget http://pencil-file.oss-cn-hangzhou.aliyuncs.com/blog/kubernetes-dashboard.yaml
    kubectl apply -f kubernetes-dashboard.yaml
    # 查看dashboard 信息,Running 说明正常
    kubectl get pods -n kube-system
    
    # 创建 serviceaccount 用于登录 dashboard
    kubectl create serviceaccount dashboard-admin -n kube-system
    
    # 创建clusterrolebinding
    kubectl create clusterrolebinding cluster-dashboard-admin --clusterrole=cluster-admin --serviceaccount=kube-system:dashboard-admin
     
    # 将dashboard的service 的类型改成NodePort  ,也可以在部署dashboard的时候修改下yaml文件
    kubectl patch svc kubernetes-dashboard -p '{"spec":{"type":"NodePort"}}' -n kube-system
     
    # 查看secret 的具体名字 dashboard-admin-token-xxxxx
    kubectl get secret -n kube-system | grep dashboard-admin-token
    
     
    # 查看 token 信息
    kubectl describe secret dashboard-admin-token-jv6gk -n kube-system
    
    # 通过如下命令查找dashboard映射的端口
    kubectl get svc -n kube-system
    
    
    $ kubectl get svc -n kube-system
    NAME                   TYPE        CLUSTER-IP       EXTERNAL-IP   PORT(S)                  AGE
    kube-dns               ClusterIP   10.96.0.10       <none>        53/UDP,53/TCP,9153/TCP   17h
    kubernetes-dashboard   NodePort    10.105.254.197   <none>        443:32722/TCP            15h
    
    
    # 访问web
    https://172.17.1.200:32722
    
    
  • 相关阅读:
    设计模式——单例模式
    设计模式——抽象工厂模式
    设计模式开篇——工厂模式
    Python编写工具Pycharm破解
    Maven的下载及安装
    如何在cmd命令行中运行Java程序
    聊聊BIO、NIO与AIO的区别
    Centos7查不出ip地址
    TensorFlow从1到2(六)结构化数据预处理和心脏病预测
    TensorFlow从1到2(五)图片内容识别和自然语言语义识别
  • 原文地址:https://www.cnblogs.com/lixinliang/p/14518864.html
Copyright © 2011-2022 走看看