zoukankan      html  css  js  c++  java
  • Kubernetes 1.7版本安装

      为什么搞完kubernetes 1.5又要装1.7, :( 是因为微服务架构istio的要求,而且直接用yum安装怎么都是1.5,所以只能通过下载包并且改配置文件的方式了,也好,花两天时间把整个过程又走了一遍 :((

    因为涉及东西太多,我也是搞到一定进度才上来更新,可能会漏掉些什么,慢慢补充把。

    证书生成

    $ wget https://pkg.cfssl.org/R1.2/cfssl_linux-amd64
    $ chmod +x cfssl_linux-amd64
    $ sudo mv cfssl_linux-amd64 /usr/local/bin/cfssl
    
    $ wget https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64
    $ chmod +x cfssljson_linux-amd64
    $ sudo mv cfssljson_linux-amd64 /usr/local/bin/cfssljson
    
    $ wget https://pkg.cfssl.org/R1.2/cfssl-certinfo_linux-amd64
    $ chmod +x cfssl-certinfo_linux-amd64
    $ sudo mv cfssl-certinfo_linux-amd64 /usr/local/bin/cfssl-certinfo
    
    $ export PATH=/usr/local/bin:$PATH

     创建CA文件

    $ mkdir /root/ssl
    $ cd /root/ssl
    $ cfssl print-defaults config > config.json
    $ cfssl print-defaults csr > csr.json
    # cp config.json ca-config.json
    # cat ca-config.json
    {
        "signing": {
            "default": {
                "expiry": "87600h"
            },
            "profiles": {
                "kubernetes": {
                    "expiry": "87600h",
                    "usages": [
                        "signing",
                        "key encipherment",
                        "server auth",
                        "client auth"
                    ]
                }
            }
        }
    }
    # cp csr.json ca-csr.json
    # cat ca-csr.json
    {
        "CN": "kubernetes",
        "key": {
            "algo": "rsa",
            "size": 2048
        },
        "names": [
            {
                "C": "US",
                "L": "CA",
                "ST": "San Francisco",
                "O": "k8s",
                "OU": "System"
            }
        ]
    }

    生成证书和私钥

    [root@k8s-master ssl]# cfssl gencert -initca ca-csr.json | cfssljson -bare ca
    2018/05/04 15:59:17 [INFO] generating a new CA key and certificate from CSR
    2018/05/04 15:59:17 [INFO] generate received request
    2018/05/04 15:59:17 [INFO] received CSR
    2018/05/04 15:59:17 [INFO] generating key: rsa-2048
    2018/05/04 15:59:18 [INFO] encoded CSR
    2018/05/04 15:59:18 [INFO] signed certificate with serial number 695354289400364080274546358321124872024891978466
    [root@k8s-master ssl]# ls ca*
    ca-config.json  ca.csr  ca-csr.json  ca-key.pem  ca.pem

    创建Kubernetes证书

    [root@k8s-master ssl]# cat kubernetes-csr.json 
    {
        "CN": "kubernetes",
        "hosts": [
          "127.0.0.1",
          "192.168.44.108",
          "192.168.44.109",
          "192.168.44.110",
          "10.254.0.1",
          "kubernetes",
          "kubernetes.default",
          "kubernetes.default.svc",
          "kubernetes.default.svc.cluster",
          "kubernetes.default.svc.cluster.local"
        ],
        "key": {
            "algo": "rsa",
            "size": 2048
        },
        "names": [
            {
                "C": "US",
                "L": "CA",
                "ST": "San Francisco",
                "O": "k8s",
                "OU": "System"
            }
        ]
    }
    [root@k8s-master ssl]# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kubernetes-csr.json | cfssljson -bare kubernetes
    2018/05/04 16:04:51 [INFO] generate received request
    2018/05/04 16:04:51 [INFO] received CSR
    2018/05/04 16:04:51 [INFO] generating key: rsa-2048
    2018/05/04 16:04:52 [INFO] encoded CSR
    2018/05/04 16:04:52 [INFO] signed certificate with serial number 506940293312398261575885326074037672650522516311
    2018/05/04 16:04:52 [WARNING] This certificate lacks a "hosts" field. This makes it unsuitable for
    websites. For more information see the Baseline Requirements for the Issuance and Management
    of Publicly-Trusted Certificates, v.1.1.6, from the CA/Browser Forum (https://cabforum.org);
    specifically, section 10.2.3 ("Information Requirements").
    
    
    [root@k8s-master ssl]# ls kubernetes*
    kubernetes.csr  kubernetes-csr.json  kubernetes-key.pem  kubernetes.pem

    分发证书

    [root@k8s-master ssl]# mkdir -p /etc/kubernetes/ssl
    [root@k8s-master ssl]# cp *.pem /etc/kubernetes/ssl

    Master节点

    • ETCD

    下载和安装

    访问 https://github.com/coreos/etcd/releases 下载

    tar -xvf etcd-v3.2.10-linux-amd64.tar.gz
    mv etcd-v3.2.10-linux-amd64/etcd* /usr/local/bin
    [root@k8s-master ~]# cat /etc/etcd/etcd.conf

    ETCD_NAME=etcd01
    ETCD_DATA_DIR="/var/lib/etcd"
    ETCD_LISTEN_PEER_URLS="https://192.168.44.108:2380"
    ETCD_LISTEN_CLIENT_URLS="https://192.168.44.108:2379,https://127.0.0.1:2379"
    ETCD_INITIAL_ADVERTISE_PEER_URLS="https://192.168.44.108:2380"
    ETCD_ADVERTISE_CLIENT_URLS="https://192.168.44.108:2379"
    ETCD_INITIAL_CLUSTER_STATE="new"
    ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster1"
    ETCD_INITIAL_CLUSTER="etcd01=https://192.168.44.108:2380"

    [root@k8s-master ~]# cat /etc/systemd/system/etcd.service
    [Unit]
    Description=Etcd Server
    After=network.target
    After=network-online.target
    Wants=network-online.target

    [Service]
    Type=notify
    WorkingDirectory=/var/lib/etcd/
    EnvironmentFile=/etc/etcd/etcd.conf
    #User=etcd
    # set GOMAXPROCS to number of processors
    ExecStart=/bin/bash -c "GOMAXPROCS=$(nproc) /usr/local/bin/etcd --name="${ETCD_NAME}" --data-dir="${ETCD_DATA_DIR}" --listen-client-urls="${ETCD_LISTEN_CLIENT_URLS}" --listen-peer-urls="${ETCD_LISTEN_PEER_URLS}" --advertise-client-urls="${ETCD_ADVERTISE_CLIENT_URLS}" --cert-file=/etc/kubernetes/ssl/kubernetes.pem --key-file=/etc/kubernetes/ssl/kubernetes-key.pem --peer-cert-file=/etc/kubernetes/ssl/kubernetes.pem --peer-key-file=/etc/kubernetes/ssl/kubernetes-key.pem --trusted-ca-file=/etc/kubernetes/ssl/ca.pem --peer-trusted-ca-file=/etc/kubernetes/ssl/ca.pem --initial-cluster-token="${ETCD_INITIAL_CLUSTER_TOKEN}" --initial-cluster="${ETCD_INITIAL_CLUSTER}" --initial-cluster-state="${ETCD_INITIAL_CLUSTER_STATE}" "
    Restart=on-failure
    LimitNOFILE=65536

    [Install]
    WantedBy=multi-user.target

    启动完成后状态

    [root@k8s-master ~]# systemctl status -l etcd
    ● etcd.service - Etcd Server
       Loaded: loaded (/etc/systemd/system/etcd.service; enabled; vendor preset: disabled)
       Active: active (running) since Fri 2018-05-04 16:20:04 CST; 2min 22s ago
     Main PID: 3779 (etcd)
       Memory: 105.3M
       CGroup: /system.slice/etcd.service
               └─3779 /usr/local/bin/etcd --name=etcd01 --data-dir=/var/lib/etcd --listen-client-urls=https://192.168.44.108:2379,https://127.0.0.1:2379 --listen-peer-urls=https://192.168.44.108:2380 --advertise-client-urls=https://192.168.44.108:2379 --cert-file=/etc/kubernetes/ssl/kubernetes.pem --key-file=/etc/kubernetes/ssl/kubernetes-key.pem --peer-cert-file=/etc/kubernetes/ssl/kubernetes.pem --peer-key-file=/etc/kubernetes/ssl/kubernetes-key.pem --trusted-ca-file=/etc/kubernetes/ssl/ca.pem --peer-trusted-ca-file=/etc/kubernetes/ssl/ca.pem --initial-cluster-token=etcd-cluster1 --initial-cluster=etcd01=https://192.168.44.108:2380 --initial-cluster-state=new
    
    May 04 16:20:04 k8s-master etcd[3779]: published {Name:etcd01 ClientURLs:[https://192.168.44.108:2379]} to cluster 8a776c225cc2b77d
    May 04 16:20:04 k8s-master etcd[3779]: ready to serve client requests
    May 04 16:20:04 k8s-master etcd[3779]: dialing to target with scheme: ""
    May 04 16:20:04 k8s-master etcd[3779]: could not get resolver for scheme: ""
    May 04 16:20:04 k8s-master etcd[3779]: serving client requests on 127.0.0.1:2379
    May 04 16:20:04 k8s-master systemd[1]: Started Etcd Server.
    May 04 16:20:04 k8s-master etcd[3779]: ready to serve client requests
    May 04 16:20:04 k8s-master etcd[3779]: dialing to target with scheme: ""
    May 04 16:20:04 k8s-master etcd[3779]: could not get resolver for scheme: ""
    May 04 16:20:04 k8s-master etcd[3779]: serving client requests on 192.168.44.108:2379

    验证一下

    [root@k8s-master ~]# etcdctl --ca-file=/etc/kubernetes/ssl/ca.pem --cert-file=/etc/kubernetes/ssl/kubernetes.pem --key-file=/etc/kubernetes/ssl/kubernetes-key.pem --endpoints=https://192.168.44.108:2379 cluster-health
    member 83fe4aa9dfd8208e is healthy: got healthy result from https://192.168.44.108:2379
    cluster is healthy
    
    
    [root@k8s-master ~]# etcdctl --ca-file=/etc/kubernetes/ssl/ca.pem --cert-file=/etc/kubernetes/ssl/kubernetes.pem --key-file=/etc/kubernetes/ssl/kubernetes-key.pem --endpoints=https://192.168.44.108:2379 member list
    83fe4aa9dfd8208e: name=etcd01 peerURLs=http://192.168.44.108:2380 clientURLs=https://192.168.44.108:2379 isLeader=true
    •  APIServer
    [root@k8s-master ~]# cat  /usr/lib/systemd/system/kube-apiserver.service
    [Unit]
    Description=Kubernetes API Server
    Documentation=https://github.com/GoogleCloudPlatform/kubernetes
    Wants=etcd.service
    After=etcd.service
    
    [Service]
    EnvironmentFile=-/etc/kubernetes/config
    EnvironmentFile=-/etc/kubernetes/apiserver
    ExecStart=/usr/local/bin/kube-apiserver 
            $KUBE_LOGTOSTDERR 
            $KUBE_LOG_LEVEL 
            $KUBE_ETCD_SERVERS 
            $KUBE_API_ADDRESS 
            $KUBE_API_PORT 
            $KUBELET_PORT 
            $KUBE_ALLOW_PRIV 
            $KUBE_SERVICE_ADDRESSES 
            $KUBE_ADMISSION_CONTROL 
            $KUBE_API_ARGS
    Restart=on-failure
    Type=notify
    LimitNOFILE=65536
    
    [Install]
    WantedBy=multi-user.target
    [root@k8s-master system]# cat /etc/kubernetes/config
    ###
    # kubernetes system config
    #
    # The following values are used to configure various aspects of all
    # kubernetes services, including
    #
    #   kube-apiserver.service
    #   kube-controller-manager.service
    #   kube-scheduler.service
    #   kubelet.service
    #   kube-proxy.service
    # logging to stderr means we get it in the systemd journal
    KUBE_LOGTOSTDERR="--logtostderr=true"
    
    # journal message level, 0 is debug
    KUBE_LOG_LEVEL="--v=0"
    
    # Should this cluster be allowed to run privileged docker containers
    KUBE_ALLOW_PRIV="--allow-privileged=false"
    
    # How the controller-manager, scheduler, and proxy find the apiserver
    KUBE_MASTER="--master=http://192.168.0.108:8080"

    [root@k8s-master ssl]# cat /etc/kubernetes/apiserver
    ###
    # kubernetes system config
    #
    # The following values are used to configure the kube-apiserver
    #

    # The address on the local server to listen to.
    KUBE_API_ADDRESS="--advertise-address=192.168.44.108 --bind-address=192.168.44.108 --insecure-bind-address=192.168.44.108"

    # The port on the local server to listen on.
    # KUBE_API_PORT="--port=8080"

    # Port minions listen on
    # KUBELET_PORT="--kubelet-port=10250"

    # Comma separated list of nodes in the etcd cluster
    KUBE_ETCD_SERVERS="--etcd-servers=https://192.168.44.108:2379"

    # Address range to use for services
    KUBE_SERVICE_ADDRESSES="--service-cluster-ip-range=10.254.0.0/16"

    # default admission control policies
    #KUBE_ADMISSION_CONTROL="--admission-control=NamespaceLifecycle,NamespaceExists,LimitRanger,ServiceAccount,SecurityContextDeny,ResourceQuota"
    KUBE_ADMISSION_CONTROL="--admission-control=NamespaceLifecycle,NamespaceExists,LimitRanger,ServiceAccount,ResourceQuota"

    # Add your own!
    KUBE_API_ARGS="--secure-port=443 --tls-cert-file=/etc/kubernetes/ssl/kubernetes.pem --tls-private-key-file=/etc/kubernetes/ssl/kubernetes-key.pem --client-ca-file=/etc/kubernetes/ssl/ca.pem --service-account-key-file=/etc/kubernetes/ssl/ca-key.pem --etcd-cafile=/etc/kubernetes/ssl/ca.pem --etcd-certfile=/etc/kubernetes/ssl/kubernetes.pem --etcd-keyfile=/etc/kubernetes/ssl/kubernetes-key.pem"

    • Controller-manager
    [root@k8s-master ~]# cat /usr/lib/systemd/system/kube-controller-manager.service 
    [Unit]
    Description=Kubernetes Controller Manager
    Documentation=https://github.com/GoogleCloudPlatform/kubernetes
    After=etcd.service
    After=kube-apiserver.service
    Requires=etcd.service
    Requires=kube-apiserver.service
    
    [Service]
    EnvironmentFile=-/etc/kubernetes/config
    EnvironmentFile=-/etc/kubernetes/controller-manager
    ExecStart=/usr/local/bin/kube-controller-manager 
            $KUBE_LOGTOSTDERR 
            $KUBE_LOG_LEVEL 
            $KUBE_MASTER 
            $KUBE_CONTROLLER_MANAGER_ARGS
    Restart=on-failure
    LimitNOFILE=65536
    
    [Install]
    WantedBy=multi-user.target

    [root@k8s-master ssl]# cat /etc/kubernetes/controller-manager
    KUBE_CONTROLLER_MANAGER_ARGS="--address=127.0.0.1 --service-cluster-ip-range=10.254.0.0/16 --cluster-name=kubernetes --cluster-signing-cert-file=/etc/kubernetes/ssl/ca.pem --cluster-signing-key-file=/etc/kubernetes/ssl/ca-key.pem --service-account-private-key-file=/etc/kubernetes/ssl/ca-key.pem --root-ca-file=/etc/kubernetes/ssl/ca.pem --leader-elect=true"

    • Scheduler
    [root@k8s-master ~]# cat /usr/lib/systemd/system/kube-scheduler.service 
    [Unit]
    Description=Kubernetes Scheduler Plugin
    Documentation=https://github.com/GoogleCloudPlatform/kubernetes
    After=etcd.service
    After=kube-apiserver.service
    Requires=etcd.service
    Requires=kube-apiserver.service
    
    [Service]
    EnvironmentFile=-/etc/kubernetes/config
    EnvironmentFile=-/etc/kubernetes/scheduler
    ExecStart=/usr/local/bin/kube-scheduler 
            $KUBE_LOGTOSTDERR 
            $KUBE_LOG_LEVEL 
            $KUBE_MASTER 
            $KUBE_SCHEDULER_ARGS
    Restart=on-failure
    LimitNOFILE=65536
    
    [Install]
    WantedBy=multi-user.target
    [root@k8s-master ~]# cat /etc/kubernetes/scheduler 
    ###
    # kubernetes scheduler config
    # default config should be adequate
    # Add your own!
    KUBE_SCHEDULER_ARGS="--address=127.0.0.1"

    验证各个组件

    [root@k8s-master ssl]# kubectl get componentstatuses
    NAME                 STATUS    MESSAGE              ERROR
    scheduler            Healthy   ok                   
    controller-manager   Healthy   ok                   
    etcd-0               Healthy   {"health": "true"}  

    Kubeconfig文件创建

    export BOOTSTRAP_TOKEN=$(head -c 16 /dev/urandom | od -An -t x | tr -d ' ')
    cat > token.csv <<EOF
    ${BOOTSTRAP_TOKEN},kubelet-bootstrap,10001,"system:kubelet-bootstrap"
    EOF

    创建kubelet bootstrapping kubeconfig文件

    $ cd /etc/kubernetes
    $ export KUBE_APISERVER="https://192.168.44.108:6443"
    
    $ kubectl config set-cluster kubernetes 
    --certificate-authority=/etc/kubernetes/ssl/ca.pem 
    --embed-certs=true 
    --server=${KUBE_APISERVER} 
    --kubeconfig=bootstrap.kubeconfig
    
    $ kubectl config set-credentials kubelet-bootstrap 
    --token=${BOOTSTRAP_TOKEN} 
    --kubeconfig=bootstrap.kubeconfig
    
    $ kubectl config set-context default 
    --cluster=kubernetes 
    --user=kubelet-bootstrap 
    --kubeconfig=bootstrap.kubeconfig
    
    $ kubectl config use-context default --kubeconfig=bootstrap.kubeconfig

    创建kube-proxy kubeconfig文件

    $ export KUBE_APISERVER="https://192.168.44.108:6443"
    $ 
    $ kubectl config set-cluster kubernetes 
    --certificate-authority=/etc/kubernetes/ssl/ca.pem 
    --embed-certs=true 
    --server=${KUBE_APISERVER} 
    --kubeconfig=kube-proxy.kubeconfig
    $ 
    $ kubectl config set-credentials kube-proxy 
    --client-certificate=/etc/kubernetes/ssl/kubernetes.pem 
    --client-key=/etc/kubernetes/ssl/kubernetes-key.pem 
    --embed-certs=true 
    --kubeconfig=kube-proxy.kubeconfig
    $ 
    $ kubectl config set-context default 
    --cluster=kubernetes 
    --user=kube-proxy 
    --kubeconfig=kube-proxy.kubeconfig
    $ 
    $ kubectl config use-context default --kubeconfig=kube-proxy.kubeconfig

    分发到各个节点

    [root@k8s-master kubernetes]# scp bootstrap.kubeconfig kube-proxy.kubeconfig root@192.168.44.109:/etc/kubernetes/
    root@192.168.44.109's password: 
    bootstrap.kubeconfig                                                                                  100% 2176     2.1KB/s   00:00    
    kube-proxy.kubeconfig                                                                                 100% 6586     6.4KB/s   00:00    

    Nodes节点

    • Kube-Proxy
    [root@node1 kubernetes]# cat  /usr/lib/systemd/system/kube-proxy.service 
    [Unit]
    Description=Kubernetes Kube-Proxy Server
    Documentation=https://github.com/GoogleCloudPlatform/kubernetes
    After=network.target
    
    [Service]
    EnvironmentFile=-/etc/kubernetes/config
    EnvironmentFile=-/etc/kubernetes/proxy
    ExecStart=/usr/local/bin/kube-proxy 
                $KUBE_LOGTOSTDERR 
                $KUBE_LOG_LEVEL 
                $KUBE_MASTER 
                $KUBE_PROXY_ARGS
    Restart=on-failure
    LimitNOFILE=65536
    
    [Install]
    WantedBy=multi-user.target
    [root@node1 kubernetes]# cat /etc/kubernetes/config
    KUBE_LOGTOSTDERR="--logtostderr=true"
    KUBE_LOG_LEVEL="--v=0"
    KUBE_ALLOW_PRIV="--allow_privileged=false"
    KUBE_MASTER="--master=http://k8s-master:8080"
    [root@node1 kubernetes]# cat /etc/kubernetes/proxy
    KUBE_PROXY_ARGS=""
    • Kubelet
    [root@node1 kubernetes]# cat  /usr/lib/systemd/system/kubelet.service 
    [Unit]
    Description=Kubernetes Kubelet Server
    Documentation=https://github.com/GoogleCloudPlatform/kubernetes
    After=docker.service
    Requires=docker.service
    
    [Service]
    WorkingDirectory=/var/lib/kubelet
    EnvironmentFile=-/etc/kubernetes/config
    EnvironmentFile=-/etc/kubernetes/kubelet
    ExecStart=/usr/local/bin/kubelet 
                $KUBE_LOGTOSTDERR 
                $KUBE_LOG_LEVEL 
                $KUBELET_API_SERVER 
                $KUBELET_ADDRESS 
                $KUBELET_PORT 
                $KUBELET_HOSTNAME 
                $KUBE_ALLOW_PRIV 
                $KUBELET_ARGS
    Restart=on-failure
    
    [Install]
    WantedBy=multi-user.target
    [root@node1 kubernetes]# cat /etc/kubernetes/kubelet

    KUBELET_ADDRESS="--address=0.0.0.0"
    KUBELET_PORT="--port=10250"
    KUBELET_HOSTNAME="--hostname_override=node1"
    KUBELET_API_SERVER="--api_servers=http://k8s-master:8080"
    KUBELET_POD_INFRA_CONTAINER="--pod-infra-container-image=kubernetes/pause:latest"
    KUBELET_ARGS="--cgroup-driver=cgroupfs --cluster-dns=10.254.0.2 --experimental-bootstrap-kubeconfig=/etc/kubernetes/bootstrap.kubeconfig --kubeconfig=/etc/kubernetes/kube-proxy.kubeconfig --require-kubeconfig --cert-dir=/etc/kubernetes/ssl --cluster-domain=cluster.local. --hairpin-mode promiscuous-bridge --serialize-image-pulls=false"

    work-dir需要手工建立起来

    Flannel:

    yum install flannel

    [root@k8s-master ~]# cat /etc/sysconfig/flanneld # Flanneld configuration options

    # Flanneld configuration options

    
    

    # etcd url location. Point this to the server where etcd runs
    FLANNEL_ETCD_ENDPOINTS="https://192.168.44.108:2379"

    
    

    # etcd config key. This is the configuration key that flannel queries
    # For address range assignment
    FLANNEL_ETCD_PREFIX="/atomic.io/network"

    
    

    # Any additional options that you want to pass
    #FLANNEL_OPTIONS=""

    
    

    FLANNEL_OPTIONS="-etcd-cafile=/etc/kubernetes/ssl/ca.pem -etcd-certfile=/etc/kubernetes/ssl/kubernetes.pem -etcd-keyfile=/etc/kubernetes/ssl/kubernetes-key.pem"

     
    [root@k8s-master ~]# cat /usr/lib/systemd/system/flanneld.service 
    [Unit]
    Description=Flanneld overlay address etcd agent
    After=network.target
    After=network-online.target
    Wants=network-online.target
    After=etcd.service
    Before=docker.service
    
    [Service]
    Type=notify
    EnvironmentFile=/etc/sysconfig/flanneld
    EnvironmentFile=-/etc/sysconfig/docker-network
    ExecStart=/usr/bin/flanneld-start $FLANNEL_OPTIONS
    ExecStartPost=/usr/libexec/flannel/mk-docker-opts.sh -k DOCKER_NETWORK_OPTIONS -d /run/flannel/docker
    Restart=on-failure
    
    [Install]
    WantedBy=multi-user.target
    RequiredBy=docker.service

    master节点上操作

    [root@k8s-master ~]# etcdctl mk /atomic.io/network/config '{ "Network": "10.1.0.0/16" }'

    拉docker.io的镜像

    [root@k8s-master1 ~]# cat  /etc/docker/daemon.json 
    { 
    "registry-mirrors": ["https://registry.docker-cn.com"] 
    }

    建立ServiceAccountf方式

     详细参考 http://www.cnblogs.com/ericnie/p/6894688.html

     很高兴依然能用!

    生成Kubectl的kubeconfg文件

    kubectl config set-cluster kubernetes 
    --certificate-authority=/srv/kubernetes/ca.crt 
    --embed-certs=true 
    --server=http://192.168.44.108:8080
    
    <<'COMMENT'
    Cluster "kubernetes" set.
    COMMENT
    
    # 设置客户端认证参数
    kubectl config set-credentials admin 
    --client-certificate=/srv/kubernetes/server.cert 
    --embed-certs=true 
    --client-key=/srv/kubernetes/server.key
    
    <<'COMMENT'
    User "admin" set.
    COMMENT
    
    # 设置上下文参数
    kubectl config set-context kubernetes 
    --cluster=kubernetes 
    --user=admin
    
    
    <<'COMMENT'
    Context "kubernetes" set.
    COMMENT
    
    # 设置默认上下文
    kubectl config use-context kubernetes

    完成后就看到生成了 ~/.kube/config文件。

    具体参考

    https://o-my-chenjian.com/2017/04/26/Create-The-File-Of-Kubeconfig-For-K8s/

    建立KubeDNS

    在istio启动的时候会调用ClusterDNS来访问ingress的地址,所以必须配置 

    详细参考 

    https://github.com/zhuchuangang/k8s-install-scripts/tree/master/yaml/kubedns

    [root@k8s-master kubedns]# cat kubedns-cm.yaml 
    # Copyright 2016 The Kubernetes Authors.
    #
    # Licensed under the Apache License, Version 2.0 (the "License");
    # you may not use this file except in compliance with the License.
    # You may obtain a copy of the License at
    #
    #     http://www.apache.org/licenses/LICENSE-2.0
    #
    # Unless required by applicable law or agreed to in writing, software
    # distributed under the License is distributed on an "AS IS" BASIS,
    # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    # See the License for the specific language governing permissions and
    # limitations under the License.
    
    apiVersion: v1
    kind: ConfigMap
    metadata:
      name: kube-dns
      namespace: kube-system
      labels:
        addonmanager.kubernetes.io/mode: EnsureExists
    [root@k8s-master kubedns]# cat kubedns-sa.yaml 
    apiVersion: v1
    kind: ServiceAccount
    metadata:
      name: kube-dns
      namespace: kube-system
      labels:
        kubernetes.io/cluster-service: "true"
        addonmanager.kubernetes.io/mode: Reconcile
    [root@k8s-master kubedns]# cat kubedns-sa.yaml 
    apiVersion: v1
    kind: ServiceAccount
    metadata:
      name: kube-dns
      namespace: kube-system
      labels:
        kubernetes.io/cluster-service: "true"
        addonmanager.kubernetes.io/mode: Reconcile
    [root@k8s-master kubedns]# cat kubedns-svc.yaml 
    # Copyright 2016 The Kubernetes Authors.
    #
    # Licensed under the Apache License, Version 2.0 (the "License");
    # you may not use this file except in compliance with the License.
    # You may obtain a copy of the License at
    #
    #     http://www.apache.org/licenses/LICENSE-2.0
    #
    # Unless required by applicable law or agreed to in writing, software
    # distributed under the License is distributed on an "AS IS" BASIS,
    # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    # See the License for the specific language governing permissions and
    # limitations under the License.
    
    # __MACHINE_GENERATED_WARNING__
    
    apiVersion: v1
    kind: Service
    metadata:
      name: kube-dns
      namespace: kube-system
      labels:
        k8s-app: kube-dns
        kubernetes.io/cluster-service: "true"
        addonmanager.kubernetes.io/mode: Reconcile
        kubernetes.io/name: "KubeDNS"
    spec:
      selector:
        k8s-app: kube-dns
      clusterIP: 10.254.254.254
      ports:
      - name: dns
        port: 53
        protocol: UDP
      - name: dns-tcp
        port: 53
        protocol: TCP
    [root@k8s-master kubedns]# cat kubedns-controller.yaml 
    # Copyright 2016 The Kubernetes Authors.
    #
    # Licensed under the Apache License, Version 2.0 (the "License");
    # you may not use this file except in compliance with the License.
    # You may obtain a copy of the License at
    #
    #     http://www.apache.org/licenses/LICENSE-2.0
    #
    # Unless required by applicable law or agreed to in writing, software
    # distributed under the License is distributed on an "AS IS" BASIS,
    # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    # See the License for the specific language governing permissions and
    # limitations under the License.
    
    # Should keep target in cluster/addons/dns-horizontal-autoscaler/dns-horizontal-autoscaler.yaml
    # in sync with this file.
    
    # __MACHINE_GENERATED_WARNING__
    
    apiVersion: extensions/v1beta1
    kind: Deployment
    metadata:
      name: kube-dns
      namespace: kube-system
      labels:
        k8s-app: kube-dns
        kubernetes.io/cluster-service: "true"
        addonmanager.kubernetes.io/mode: Reconcile
    spec:
      # replicas: not specified here:
      # 1. In order to make Addon Manager do not reconcile this replicas parameter.
      # 2. Default is 1.
      # 3. Will be tuned in real time if DNS horizontal auto-scaling is turned on.
      strategy:
        rollingUpdate:
          maxSurge: 10%
          maxUnavailable: 0
      selector:
        matchLabels:
          k8s-app: kube-dns
      template:
        metadata:
          labels:
            k8s-app: kube-dns
          annotations:
            scheduler.alpha.kubernetes.io/critical-pod: ''
        spec:
          tolerations:
          - key: "CriticalAddonsOnly"
            operator: "Exists"
          volumes:
          - name: kube-dns-config
            configMap:
              name: kube-dns
              optional: true
          containers:
          - name: kubedns
            image: registry.cn-hangzhou.aliyuncs.com/szss_k8s/k8s-dns-kube-dns-amd64:1.14.5
            resources:
              # TODO: Set memory limits when we've profiled the container for large
              # clusters, then set request = limit to keep this container in
              # guaranteed class. Currently, this container falls into the
              # "burstable" category so the kubelet doesn't backoff from restarting it.
              limits:
                memory: 170Mi
              requests:
                cpu: 100m
                memory: 70Mi
            livenessProbe:
              httpGet:
                path: /healthcheck/kubedns
                port: 10054
                scheme: HTTP
              initialDelaySeconds: 60
              timeoutSeconds: 5
              successThreshold: 1
              failureThreshold: 5
            readinessProbe:
              httpGet:
                path: /readiness
                port: 8081
                scheme: HTTP
              # we poll on pod startup for the Kubernetes master service and
              # only setup the /readiness HTTP server once that's available.
              initialDelaySeconds: 3
              timeoutSeconds: 5
            args:
            - --domain=cluster.local.
            - --dns-port=10053
            - --config-dir=/kube-dns-config
            - --v=2
    
            env:
            - name: PROMETHEUS_PORT
              value: "10055"
            ports:
            - containerPort: 10053
              name: dns-local
              protocol: UDP
            - containerPort: 10053
              name: dns-tcp-local
              protocol: TCP
            - containerPort: 10055
              name: metrics
              protocol: TCP
            volumeMounts:
            - name: kube-dns-config
              mountPath: /kube-dns-config
          - name: dnsmasq
            image: registry.cn-hangzhou.aliyuncs.com/szss_k8s/k8s-dns-dnsmasq-nanny-amd64:1.14.5
            livenessProbe:
              httpGet:
                path: /healthcheck/dnsmasq
                port: 10054
                scheme: HTTP
              initialDelaySeconds: 60
              timeoutSeconds: 5
              successThreshold: 1
              failureThreshold: 5
            args:
            - -v=2
            - -logtostderr
            - -configDir=/etc/k8s/dns/dnsmasq-nanny
            - -restartDnsmasq=true
            - --
            - -k
            - --cache-size=1000
            - --log-facility=-
            - --server=/cluster.local./127.0.0.1#10053
            - --server=/in-addr.arpa/127.0.0.1#10053
            - --server=/ip6.arpa/127.0.0.1#10053
            ports:
            - containerPort: 53
              name: dns
              protocol: UDP
            - containerPort: 53
              name: dns-tcp
              protocol: TCP
            # see: https://github.com/kubernetes/kubernetes/issues/29055 for details
            resources:
              requests:
                cpu: 150m
                memory: 20Mi
            volumeMounts:
            - name: kube-dns-config
              mountPath: /etc/k8s/dns/dnsmasq-nanny
          - name: sidecar
            image: registry.cn-hangzhou.aliyuncs.com/szss_k8s/k8s-dns-sidecar-amd64:1.14.5
            livenessProbe:
              httpGet:
                path: /metrics
                port: 10054
                scheme: HTTP
              initialDelaySeconds: 60
              timeoutSeconds: 5
              successThreshold: 1
              failureThreshold: 5
            args:
            - --v=2
            - --logtostderr
            - --probe=kubedns,127.0.0.1:10053,kubernetes.default.svc.cluster.local.,5,A
            - --probe=dnsmasq,127.0.0.1:53,kubernetes.default.svc.cluster.local.,5,A
            ports:
            - containerPort: 10054
              name: metrics
              protocol: TCP
            resources:
              requests:
                memory: 20Mi
                cpu: 10m
          dnsPolicy: Default  # Don't use cluster DNS.
          serviceAccountName: kube-dns

    kube-dns如果启动不了的话,我在centos中遇到过确认关闭防火墙,然后reboot

    systemctl disable firewalld
    
    reboot

    在kubelet中需要配置cluster-dns和cluster-domain

    [root@node1 ~]# cat /etc/kubernetes/kubelet 
    KUBELET_ADDRESS="--address=0.0.0.0"
    KUBELET_PORT="--port=10250"
    KUBELET_HOSTNAME="--hostname_override=node1"
    KUBELET_API_SERVER="--api_servers=http://k8s-master:8080"
    KUBELET_ARGS="--cluster-dns=10.254.254.254 --cluster-domain=cluster.local --pod-infra-container-image=docker.io/kubernetes/pause:latest"
  • 相关阅读:
    The 4 Most Important Skills for a Software Developer
    Youth is not a time of life, it is a state of mind——青春不是一段年华,而是一种心境
    英雄所见略同——每个人都有的一套价值体系观念
    28法则————10分钟休息胜过半小时努力
    离职员工心声
    员工必备素质、能力——职场精英
    安卓sqlite数据库的使用
    安卓adb命令的使用
    windows使用命令行,提高效率
    命令行编译java文件(含第三方jar包)
  • 原文地址:https://www.cnblogs.com/ericnie/p/7904617.html
Copyright © 2011-2022 走看看