zoukankan      html  css  js  c++  java
  • k8s二进制安装

    各节点安装docker

    yum install -y yum-utils device-mapper-persistent-data lvm2

    yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo

    yum install docker-ce -y

    先不启动docker

    tls自签证书:

    mkdir ssl && cd ssl/

    wget https://pkg.cfssl.org/R1.2/cfssl_linux-amd64

    wget https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64

    wget https://pkg.cfssl.org/R1.2/cfssl-certinfo_linux-amd64

    chmod +x cfssl_linux-amd64 cfssljson_linux-amd64 cfssl-certinfo_linux-amd64

    mv cfssl_linux-amd64 /usr/local/bin/cfssl

    mv cfssljson_linux-amd64 /usr/local/bin/cfssljson

    mv cfssl-certinfo_linux-amd64 /usr/bin/cfssl-certinfo

    ./certificate.sh

    ls | grep -v pem | xargs -i rm {}

    admin-key.pem  ca-key.pem  kube-proxy-key.pem  server-key.pem

    admin.pem      ca.pem      kube-proxy.pem      server.pem

    部署ETCD集群

    tar xf etcd-v3.2.12-linux-amd64.tar.gz

    mkdir /opt/kubernetes/{ssl,bin,cfg} -p

    cp ssl/server*pem ssl/ca*.pem /opt/kubernetes/ssl/

    ca-key.pem  ca.pem  server-key.pem  server.pem

    scp -r /opt/kubernetes/ssl/* node1:/opt/kubernetes/ssl

    mv etcd-v3.2.12-linux-amd64/etcd /opt/kubernetes/bin/

    mv etcd-v3.2.12-linux-amd64/etcdctl /opt/kubernetes/bin/

    scp -r /opt/kubernetes/bin/* node1:/opt/kubernetes/bin

    scp -r /opt/kubernetes/cfg/etcd node1:/opt/kubernetes/cfg/

    vim etcd

    #[Member]

    ETCD_NAME="etcd01"

    ETCD_DATA_DIR="/var/lib/etcd/default.etcd"

    ETCD_LISTEN_PEER_URLS="https://192.168.81.10:2380"

    ETCD_LISTEN_CLIENT_URLS="https://192.168.81.10:2379"

    #[Clustering]

    ETCD_INITIAL_ADVERTISE_PEER_URLS="https://192.168.81.10:2380"

    ETCD_ADVERTISE_CLIENT_URLS="https://192.168.81.10:2379"

    ETCD_INITIAL_CLUSTER="etcd01=https://192.168.81.10:2380,etcd02=https://192.168.81.20:2380,etcd03=https://192.168.81.30:2380"

    ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"

    ETCD_INITIAL_CLUSTER_STATE="new"

    scp /usr/lib/systemd/system/etcd.service node1:/usr/lib/systemd/system

    vim /usr/lib/systemd/system/etcd.service

    [Unit]

    Description=Etcd Server

    After=network.target

    After=network-online.target

    Wants=network-online.target

    [Service]

    Type=notify

    EnvironmentFile=-/opt/kubernetes/cfg/etcd

    ExecStart=/opt/kubernetes/bin/etcd

    --name=${ETCD_NAME}

    --data-dir=${ETCD_DATA_DIR}

    --listen-peer-urls=${ETCD_LISTEN_PEER_URLS}

    --listen-client-urls=${ETCD_LISTEN_CLIENT_URLS},http://127.0.0.1:2379

    --advertise-client-urls=${ETCD_ADVERTISE_CLIENT_URLS}

    --initial-advertise-peer-urls=${ETCD_INITIAL_ADVERTISE_PEER_URLS}

    --initial-cluster=${ETCD_INITIAL_CLUSTER}

    --initial-cluster-token=${ETCD_INITIAL_CLUSTER}

    --initial-cluster-state=new

    --cert-file=/opt/kubernetes/ssl/server.pem

    --key-file=/opt/kubernetes/ssl/server-key.pem

    --peer-cert-file=/opt/kubernetes/ssl/server.pem

    --peer-key-file=/opt/kubernetes/ssl/server-key.pem

    --trusted-ca-file=/opt/kubernetes/ssl/ca.pem

    --peer-trusted-ca-file=/opt/kubernetes/ssl/ca.pem

    Restart=on-failure

    LimitNOFILE=65536

    [Install]

    WantedBy=multi-user.target

    服务启动

    systemctl start etcd

    systemctl enable etcd

    添加命令

    vim /etc/profile

    PATH=$PATH:/opt/kubernetes/bin

    source /etc/profile

    查看集群状态

    cd ssl

    etcdctl --ca-file=ca.pem --cert-file=server.pem --key-file=server-key.pem --endpoints="https://192.168.81.10:2379,https://192.168.81.20:2379,https://192.168.81.30:2379" cluster-health

    部署Flannel网络

    vim /usr/lib/systemd/system/flanneld.service

    [Unit]

    Description=Flanneld overlay address etcd agent

    After=network-online.target network.target

    Before=docker.service

    [Service]

    Type=notify

    EnvironmentFile=/opt/kubernetes/cfg/flanneld

    ExecStart=/opt/kubernetes/bin/flanneld --ip-masq $FLANNEL_OPTIONS

    ExecStartPost=/opt/kubernetes/bin/mk-docker-opts.sh -k DOCKER_NETWORK_OPTIONS -d /run/flannel/subnet.env

    Restart=on-failure

    [Install]

    WantedBy=multi-user.target

    scp /usr/lib/systemd/system/flanneld.service node1:/usr/lib/systemd/system

    vim /usr/lib/systemd/system/docker.service

    参考:https://cloud.tencent.com/developer/article/1405328

    [Unit]

    Description=Docker Application Container Engine

    Documentation=https://docs.docker.com

    After=network-online.target firewalld.service

    Wants=network-online.target

    [Service]

    Type=notify

    EnvironmentFile=/run/flannel/subnet.env

    ExecStart=/usr/bin/dockerd $DOCKER_NETWORK_OPTIONS

    ExecReload=/bin/kill -s HUP $MAINPID

    LimitNOFILE=infinity

    LimitNPROC=infinity

    LimitCORE=infinity

    TimeoutStartSec=0

    Delegate=yes

    KillMode=process

    Restart=on-failure

    StartLimitBurst=3

    StartLimitInterval=60s

    [Install]

    WantedBy=multi-user.target

    scp /usr/lib/systemd/system/docker.service node1:/usr/lib/systemd/system

    cd /opt/kubernetes/cfg/ && vim flanneld

    FLANNEL_OPTIONS="--etcd-endpoints=https://192.168.81.10:2379,https://192.168.81.20:2379,https://192.168.81.30:2379

    -etcd-cafile=/opt/kubernetes/ssl/ca.pem

    -etcd-certfile=/opt/kubernetes/ssl/server.pem

    -etcd-keyfile=/opt/kubernetes/ssl/server-key.pem"

    scp /opt/kubernetes/cfg/flanneld node1:/opt/kubernetes/cfg/

    tar xf flannel-v0.9.1-linux-amd64.tar.gz

    mv flanneld mk-docker-opts.sh /opt/kubernetes/bin/

    scp flanneld mk-docker-opts.sh node1:/opt/kubernetes/bin/

    写入分配的子网段到etcd,供flanneld使用

    cd /opt/kubernetes/ssl/

    /opt/kubernetes/bin/etcdctl --ca-file=ca.pem --cert-file=server.pem --key-file=server-key.pem --endpoints="https://192.168.81.10:2379,https://192.168.81.20:2379,https://192.168.81.30:2379" set /coreos.com/network/config '{ "Network": "172.17.0.0/16", "Backend": {"Type": "vxlan"}}'

    systemctl daemon-reload

    systemctl start flanneld.service

    systemctl enable flanneld

    systemctl start docker

    docker镜像加速

    cat << EOF > /etc/docker/daemon.json

    {

    "registry-mirrors": [ "https://registry.docker-cn.com"],

    "storage-driver":"devicemapper"

    }

    EOF

    systemctl daemon-reload

    systemctl restart docker

    docker无法启动,参照:https://blog.csdn.net/admin_1_1/article/details/81054712

    查看分配的flannel子网

    etcdctl --ca-file=ca.pem --cert-file=server.pem --key-file=server-key.pem --endpoints="https://192.168.81.10:2379,https://192.168.81.20:2379,https://192.168.81.30:2379" ls /coreos.com/network/subnets

    查看flannel子网详细信息

    etcdctl --ca-file=ca.pem --cert-file=server.pem --key-file=server-key.pem --endpoints="https://192.168.81.10:2379,https://192.168.81.20:2379,https://192.168.81.30:2379" get coreos.com/network/subnets/172.17.17.0-24

    node节点创建kubeconfig文件

    rm –rf /usr/bin/kubectl

    cd /opt/kubernetes/bin/ 

    rz kubectl

    chmod +x kubectl

    # 创建 TLS Bootstrapping Token

    cd /root/ssl  && rz kubeconfig.sh 

    export BOOTSTRAP_TOKEN=$(head -c 16 /dev/urandom | od -An -t x | tr -d ' ')

    cat > token.csv <<EOF

    ${BOOTSTRAP_TOKEN},kubelet-bootstrap,10001,"system:kubelet-bootstrap"

    EOF

    # 创建kubelet bootstrapping kubeconfig

    export KUBE_APISERVER="https://192.168.81.100:6443"   #设置master高可用集群虚拟IP

    # 设置集群参数

    kubectl config set-cluster kubernetes

      --certificate-authority=./ca.pem

      --embed-certs=true

      --server=${KUBE_APISERVER}

      --kubeconfig=bootstrap.kubeconfig

    # 设置客户端认证参数

    kubectl config set-credentials kubelet-bootstrap

      --token=${BOOTSTRAP_TOKEN}

      --kubeconfig=bootstrap.kubeconfig

    #设置上下文参数

    kubectl config set-context default

      --cluster=kubernetes

      --user=kubelet-bootstrap

      --kubeconfig=bootstrap.kubeconfig

    # 设置默认上下文

    kubectl config use-context default --kubeconfig=bootstrap.kubeconfig

    # 创建kube-proxy kubeconfig文件

    kubectl config set-cluster kubernetes

      --certificate-authority=./ca.pem

      --embed-certs=true

      --server=${KUBE_APISERVER}

      --kubeconfig=kube-proxy.kubeconfig

    kubectl config set-credentials kube-proxy

      --client-certificate=./kube-proxy.pem

      --client-key=./kube-proxy-key.pem

      --embed-certs=true

      --kubeconfig=kube-proxy.kubeconfig

    kubectl config set-context default

      --cluster=kubernetes

      --user=kube-proxy

      --kubeconfig=kube-proxy.kubeconfig

    kubectl config use-context default --kubeconfig=kube-proxy.kubeconfig

    结果: bootstrap.kubeconfig kube-proxy.kubeconfig

    部署master节点

    mkdir master && cd master

    rz master.zip && unzip master.zip

    mv kube-controller-manager kube-scheduler kube-apiserver /opt/kubernetes/bin/

    chmod +x /opt/kubernetes/bin/*

    chmod +x *.sh

    vim apiserver.sh

    MASTER_ADDRESS=${1:-"0.0.0.0"}

    api-server配置文件需要将--advertise-address--bind-address两个参数修改为全网监听,0.0.0.0,master做高可用做准备

    安装kube-apiserver

    ./apiserver.sh 0.0.0.0 https://192.168.81.10:2379,https://192.168.81.20:2379,https://192.168.81.30:2379

    cp token.csv /opt/kubernetes/cfg/

    结果: /usr/lib/systemd/system/kube-apiserver.service /opt/kubernetes/cfg/kube-apiserver

    安装kube-controller-manager

    ./controller-manager.sh  127.0.0.1

    安装kube-scheduler

    ./scheduler.sh 127.0.0.1

    验证集群状态

    kubectl get cs

    部署master高可用

    参考:https://blog.51cto.com/ylw6006/2164981

    复制masterkube-apiserverkube-controller-managerkube-scheduler二进制文件到节点上

    scp -rp kube-apiserver kube-controller-manager kube-scheduler node1:/opt/kubernetes/bin/

    复制master的证书文件到节点上 上面已经复制

    将服务启动文件复制给备节点

    scp /usr/lib/systemd/system/kube-apiserver.service node1:/usr/lib/systemd/system/

    scp /usr/lib/systemd/system/kube-scheduler.service node1:/usr/lib/systemd/system/

    scp /usr/lib/systemd/system/kube-controller-manager.service node1:/usr/lib/systemd/system/

    复制配置文件到备节点上

    scp token.csv node1:/opt/kubernetes/cfg/

    scp kube-apiserver kube-controller-manager kube-scheduler node1:/opt/kubernetes/cfg/

    备节点启动服务

    systemctl enable kube-apiserver

    systemctl enable kube-controller-manager

    systemctl enable kube-scheduler

    systemctl start kube-apiserver

    systemctl start kube-controller-manager

    systemctl start kube-scheduler

    部署node节点

    主节点授权

    kubectl create clusterrolebinding kubelet-bootstrap --clusterrole=system:node-bootstrapper --user=kubelet-bootstrap

    mkdir node_pakage && cd node_pakage/

    rz unzip node.zip && unzip node.zip

    mv kubelet kube-proxy /opt/kubernetes/bin/

    chmod +x /opt/kubernetes/bin/*

    chmod +x *sh

    主节点传kubeconfig

    cd ssl/

    scp *kubeconfig node1:/opt/kubernetes/cfg/

    安装kubelet

    vim kubelet.sh

    ./kubelet.sh 192.168.81.20 10.10.10.2

    journalctl -u kubelet -f

    安装kube-proxy

    vim proxy.sh

    ./proxy.sh 192.168.81.20

    在主节点添加node

    kubectl get csr  查看node请求

    kubectl certificate approve node-csr-NcST9yP_lzVPXcc00p2g3KFfDMyqaKbTKONBEA_6IEw

    ode  允许证书加入

    kubectl get node 查看节点

    测试

    kubectl run web --image=nginx --replicas=1

    kubectl get pods

    kubectl expose deployment web --port=88 --target-port=80 --type=NodePort

    kubectl get svc

    容器无法运行,参照:https://blog.51cto.com/xingej/2117623

    主节点和node都执行才能运行pod

    ln -s /usr/libexec/docker/docker-runc-current /usr/bin/docker-runc

    部署dashboard

    mkdir UI && cd UI

    vim dashboard-rbac.yaml

    apiVersion: v1

    kind: ServiceAccount

    metadata:

      labels:

        k8s-app: kubernetes-dashboard

        addonmanager.kubernetes.io/mode: Reconcile

      name: kubernetes-dashboard

      namespace: kube-system

    ---

    kind: ClusterRoleBinding

    apiVersion: rbac.authorization.k8s.io/v1beta1

    metadata:

      name: kubernetes-dashboard-minimal

      namespace: kube-system

      labels:

        k8s-app: kubernetes-dashboard

        addonmanager.kubernetes.io/mode: Reconcile

    roleRef:

      apiGroup: rbac.authorization.k8s.io

      kind: ClusterRole

      name: cluster-admin

    subjects:

      - kind: ServiceAccount

        name: kubernetes-dashboard

        namespace: kube-system

    vim dashboard-deployment.yaml

    apiVersion: apps/v1beta2

    kind: Deployment

    metadata:

      name: kubernetes-dashboard

      namespace: kube-system

      labels:

        k8s-app: kubernetes-dashboard

        kubernetes.io/cluster-service: "true"

        addonmanager.kubernetes.io/mode: Reconcile

    spec:

      selector:

        matchLabels:

          k8s-app: kubernetes-dashboard

      template:

        metadata:

          labels:

            k8s-app: kubernetes-dashboard

          annotations:

            scheduler.alpha.kubernetes.io/critical-pod: ''

        spec:

          serviceAccountName: kubernetes-dashboard

          containers:

          - name: kubernetes-dashboard

            image: registry.cn-hangzhou.aliyuncs.com/google_containers/kubernetes-dashboard-amd64:v1.7.1

            resources:

              limits:

                cpu: 100m

                memory: 300Mi

              requests:

                cpu: 100m

                memory: 100Mi

            ports:

            - containerPort: 9090

              protocol: TCP

            livenessProbe:

              httpGet:

                scheme: HTTP

                path: /

                port: 9090

              initialDelaySeconds: 30

              timeoutSeconds: 30

          tolerations:

          - key: "CriticalAddonsOnly"

            operator: "Exists"

    vim dashboard-service.yaml

    apiVersion: v1

    kind: Service

    metadata:

      name: kubernetes-dashboard

      namespace: kube-system

      labels:

        k8s-app: kubernetes-dashboard

        kubernetes.io/cluster-service: "true"

        addonmanager.kubernetes.io/mode: Reconcile

    spec:

      type: NodePort

      selector:

        k8s-app: kubernetes-dashboard

      ports:

      - port: 80

        targetPort: 9090

    验证:

    kubectl get svc

  • 相关阅读:
    SAP S/4HANA extensibility扩展原理介绍
    SAP CRM系统订单模型的设计与实现
    使用nodejs代码在SAP C4C里创建Individual customer
    SAP Cloud for Customer Account和individual customer的区别
    Let the Balloon Rise map一个数组
    How Many Tables 简单并查集
    Heap Operations 优先队列
    Arpa’s obvious problem and Mehrdad’s terrible solution 思维
    Passing the Message 单调栈两次
    The Suspects 并查集
  • 原文地址:https://www.cnblogs.com/leiwenbin627/p/11493264.html
Copyright © 2011-2022 走看看