zoukankan      html  css  js  c++  java
  • 【k8s记录】二进制安装kubernetes高可用集群全过程完整版v1.20

    1.总体规划
    1.1 服务器规划配置如下(master节点不安装kubelet)
    IP地址 主机名 节点角色 安装组件
    192.168.1.180 master1 master,IP入口 kube-apiserver、kube-controller-manager、kube-scheduler、etcd
    192.168.1.181 master2 master kube-apiserver、kube-controller-manager、kube-scheduler、etcd
    192.168.1.182 master3 master kube-apiserver、kube-controller-manager、kube-scheduler、etcd
    192.168.1.183 node1 node kubelet、kube-proxy
    192.168.1.184 node2 node kubelet、kube-proxy
    192.168.1.185 node3 node kubelet、kube-proxy

    1.2 软件版本
    软件名 版本
    kube-apiserver、kube-controller-manager、kube-scheduler、kubelet、kube-proxy v1.20.2
    kube-apiserver、kube-controller-manager、kube-scheduler、kubelet、kube-proxy v1.20.2
    etcd v3.4.13
    calico v3.14
    coredns v1.7.0
    注:文档中用到的文件,已上传都网盘,如下载失败,可以从网盘下载:
    https://pan.baidu.com/s/1nz3WoiYKQ1x8YCMUSV9L7A
    提取码:ufjw

    内容较多,建议先收藏再使用电脑端打开观看!
    2.环境配置
    2.1 修改主机名
    修改机器的/etc/hosts文件

    cat >> /etc/hosts << EOF
    192.168.1.180 master1
    192.168.1.181 master2
    192.168.1.182 master3
    192.168.1.183 node1
    192.168.1.184 node2
    192.168.1.185 node3
    EOF

    2.2 关闭防火墙和selinux
    systemctl stop firewalld
    setenforce 0
    sed -i 's/^SELINUX=.*/SELINUX=disabled/' /etc/selinux/config

    2.3 关闭交换分区
    swapoff -a && sed -i.bak "s//dev/mapper/centos-swap/#/dev/mapper/centos-swap/g" /etc/fstab

    2.4 时间同步
    yum install -y chrony
    systemctl start chronyd
    systemctl enable chronyd
    chronyc sources

    2.5 修改内核参数
    cat > /etc/sysctl.d/k8s.conf << EOF
    net.ipv4.ip_forward = 1
    net.bridge.bridge-nf-call-ip6tables = 1
    net.bridge.bridge-nf-call-iptables = 1
    EOF
    sysctl --system

    2.6 加载ipvs模块
    modprobe -- ip_vs
    modprobe -- ip_vs_rr
    modprobe -- ip_vs_wrr
    modprobe -- ip_vs_sh
    modprobe -- nf_conntrack_ipv4
    lsmod | grep ip_vs
    lsmod | grep nf_conntrack_ipv4
    yum install -y ipvsadm

    2.7 配置工作目录
    每台机器都需要配置证书文件、组件的配置文件、组件的服务启动文件,现专门选择 master1 来统一生成这些文件,然后再分发到其他机器。以下操作在 master1 上进行:
    [root@master1 ~]# mkdir -p /data/work
    注:该目录为配置文件和证书文件生成目录,后面的所有文件生成相关操作均在此目录下进行
    [root@master1 ~]# ssh-keygen -t rsa -b 2048
    注:将秘钥分发到另外五台机器,让 master1 可以免密码登录其他机器
    这里如果不设置免密也可以,就是麻烦一些,每次发送都要填写密码。

    3.搭建etcd集群
    3.1 配置etcd工作目录
    配置文件目录 && 证书文件目录
    mkdir -p /etc/etcd
    mkdir -p /etc/etcd/ssl

    3.1 创建etcd证书
    如果wget下载失败或速度慢,请下载上面的百度网盘链接,所有文件都已经下载好,然后上传到服务器。
    cd /data/work/
    wget https://pkg.cfssl.org/R1.2/cfssl_linux-amd64
    wget https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64
    wget https://pkg.cfssl.org/R1.2/cfssl-certinfo_linux-amd64

    chmod +x cfssl*
    mv cfssl_linux-amd64 /usr/local/bin/cfssl
    mv cfssljson_linux-amd64 /usr/local/bin/cfssljson
    mv cfssl-certinfo_linux-amd64 /usr/local/bin/cfssl-certinfo

    3.2 配置ca请求文件
    [root@master1 work]# vim ca-csr.json
    {
    "CN": "kubernetes",
    "key": {
    "algo": "rsa",
    "size": 2048
    },
    "names": [
    {
    "C": "CN",
    "ST": "Hubei",
    "L": "Wuhan",
    "O": "k8s",
    "OU": "system"
    }
    ],
    "ca": {
    "expiry": "87600h"
    }
    }
    注: CN:Common Name,kube-apiserver 从证书中提取该字段作为请求的用户名 (User Name);浏览器使用该字段验证网站是否合法; O:Organization,kube-apiserver 从证书中提取该字段作为请求用户所属的组 (Group)

    3.3 配置ca证书
    [root@master1 work]# cfssl gencert -initca ca-csr.json | cfssljson -bare ca
    [root@master1 work]# vim ca-config.json
    {
    "signing": {
    "default": {
    "expiry": "87600h"
    },
    "profiles": {
    "kubernetes": {
    "usages": [
    "signing",
    "key encipherment",
    "server auth",
    "client auth"
    ],
    "expiry": "87600h"
    }
    }
    }
    }

    3.4 配置etcd请求文件,注意替换为自己的IP
    [root@master1 work]# vim etcd-csr.json
    {
    "CN": "etcd",
    "hosts": [
    "127.0.0.1",
    "192.168.1.180",
    "192.168.1.181",
    "192.168.1.182"
    ],
    "key": {
    "algo": "rsa",
    "size": 2048
    },
    "names": [{
    "C": "CN",
    "ST": "Hubei",
    "L": "Wuhan",
    "O": "k8s",
    "OU": "system"
    }]
    }

    3.5 生成证书
    [root@master1 work]# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes etcd-csr.json | cfssljson -bare etcd
    [root@master1 work]# ls etcd*.pem
    etcd-key.pem etcd.pem

    3.6 部署etcd集群
    下载etcd软件包
    [root@master1 work]# wget https://github.com/etcd-io/etcd/releases/download/v3.4.13/etcd-v3.4.13-linux-amd64.tar.gz
    [root@master1 work]# tar -xf etcd-v3.4.13-linux-amd64.tar.gz
    [root@master1 work]# cp -p etcd-v3.4.13-linux-amd64/etcd* /usr/local/bin/
    [root@master1 work]# rsync -vaz etcd-v3.4.13-linux-amd64/etcd* master2:/usr/local/bin/
    [root@master1 work]# rsync -vaz etcd-v3.4.13-linux-amd64/etcd* master3:/usr/local/bin/

    创建配置文件,注意替换为自己的IP
    [root@master1 work]# vim etcd.conf
    #[Member]
    ETCD_NAME="etcd1"
    ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
    ETCD_LISTEN_PEER_URLS="https://192.168.1.180:2380"
    ETCD_LISTEN_CLIENT_URLS="https://192.168.1.180:2379,http://127.0.0.1:2379"

    #[Clustering]
    ETCD_INITIAL_ADVERTISE_PEER_URLS="https://172.10.1.11:2380"
    ETCD_ADVERTISE_CLIENT_URLS="https://192.168.1.180:2379"
    ETCD_INITIAL_CLUSTER="etcd1=https://192.168.1.180:2380,etcd2=https://192.168.1.181:2380,etcd3=https://192.168.1.182:2380"
    ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
    ETCD_INITIAL_CLUSTER_STATE="new"
    注: ETCD_NAME:节点名称,集群中唯一 ETCD_DATA_DIR:数据目录 ETCD_LISTEN_PEER_URLS:集群通信监听地址 ETCD_LISTEN_CLIENT_URLS:客户端访问监听地址 ETCD_INITIAL_ADVERTISE_PEER_URLS:集群通告地址 ETCD_ADVERTISE_CLIENT_URLS:客户端通告地址 ETCD_INITIAL_CLUSTER:集群节点地址 ETCD_INITIAL_CLUSTER_TOKEN:集群Token ETCD_INITIAL_CLUSTER_STATE:加入集群的当前状态,new是新集群,existing表示加入已有集群

    3.7 创建启动服务文件
    [root@master1 work]# vim etcd.service
    [Unit]
    Description=Etcd Server
    After=network.target
    After=network-online.target
    Wants=network-online.target

    [Service]
    Type=notify
    EnvironmentFile=-/etc/etcd/etcd.conf
    WorkingDirectory=/var/lib/etcd/
    ExecStart=/usr/local/bin/etcd
    --cert-file=/etc/etcd/ssl/etcd.pem
    --key-file=/etc/etcd/ssl/etcd-key.pem
    --trusted-ca-file=/etc/etcd/ssl/ca.pem
    --peer-cert-file=/etc/etcd/ssl/etcd.pem
    --peer-key-file=/etc/etcd/ssl/etcd-key.pem
    --peer-trusted-ca-file=/etc/etcd/ssl/ca.pem
    --peer-client-cert-auth
    --client-cert-auth
    Restart=on-failure
    RestartSec=5
    LimitNOFILE=65536

    [Install]
    WantedBy=multi-user.target

    同步相关文件到各个节点
    [root@master1 work]# cp ca*.pem /etc/etcd/ssl/
    [root@master1 work]# cp etcd*.pem /etc/etcd/ssl/
    [root@master1 work]# cp etcd.conf /etc/etcd/
    [root@master1 work]# cp etcd.service /usr/lib/systemd/system/
    [root@master1 work]# for i in master2 master3;do rsync -vaz etcd.conf $i:/etc/etcd/;done
    [root@master1 work]# for i in master2 master3;do rsync -vaz etcd*.pem ca*.pem $i:/etc/etcd/ssl/;done
    [root@master1 work]# for i in master2 master3;do rsync -vaz etcd.service $i:/usr/lib/systemd/system/;done
    注:master2和master3分别修改etcd.conf配置文件中etcd名字和ip,并创建目录 /var/lib/etcd/default.etcd

    3.8 启动etcd集群
    注:第一次启动可能会卡一段时间,因为节点会等待其他节点启动,如果启动失败,可以手动启动每个etcd服务
    [root@master1 work]# mkdir -p /var/lib/etcd/default.etcd
    [root@master1 work]# systemctl daemon-reload
    [root@master1 work]# systemctl enable etcd.service
    [root@master1 work]# systemctl start etcd.service
    [root@master1 work]# systemctl status etcd

    查看集群状态
    [root@master1 work]# ETCDCTL_API=3 /usr/local/bin/etcdctl --write-out=table --cacert=/etc/etcd/ssl/ca.pem --cert=/etc/etcd/ssl/etcd.pem --key=/etc/etcd/ssl/etcd-key.pem --endpoints=https://192.168.1.180:2379,https://192.168.1.181:2379,https://192.168.1.182:2379 endpoint health

    4.kubernetes组件部署
    4.1 下载安装包
    [root@master1 work]# wget https://dl.k8s.io/v1.20.1/kubernetes-server-linux-amd64.tar.gz
    [root@master1 work]# tar -xf kubernetes-server-linux-amd64.tar
    [root@master1 work]# cd kubernetes/server/bin/
    [root@master1 bin]# cp kube-apiserver kube-controller-manager kube-scheduler kubectl /usr/local/bin/
    [root@master1 bin]# rsync -vaz kube-apiserver kube-controller-manager kube-scheduler kubectl master2:/usr/local/bin/
    [root@master1 bin]# rsync -vaz kube-apiserver kube-controller-manager kube-scheduler kubectl master3:/usr/local/bin/
    [root@master1 bin]# for i in node1 node2 node3;do rsync -vaz kubelet kube-proxy $i:/usr/local/bin/;done
    [root@master1 bin]# cd /data/work/
    [root@master1 work]# mkdir -p /etc/kubernetes/ # kubernetes组件配置文件存放目录
    [root@master1 work]# mkdir -p /etc/kubernetes/ssl # kubernetes组件证书文件存放目录
    [root@master1 work]# mkdir /var/log/kubernetes # kubernetes组件日志文件存放目录

    4.2 部署api-server组件
    创建csr请求文件,注意替换为自己的IP
    [root@master1 work]# vim kube-apiserver-csr.json
    {
    "CN": "kubernetes",
    "hosts": [
    "127.0.0.1",
    "192.168.1.180",
    "192.168.1.181",
    "192.168.1.182",
    "192.168.1.183",
    "192.168.1.184",
    "192.168.1.185",
    "10.255.0.1",
    "kubernetes",
    "kubernetes.default",
    "kubernetes.default.svc",
    "kubernetes.default.svc.cluster",
    "kubernetes.default.svc.cluster.local"
    ],
    "key": {
    "algo": "rsa",
    "size": 2048
    },
    "names": [
    {
    "C": "CN",
    "ST": "Hubei",
    "L": "Wuhan",
    "O": "k8s",
    "OU": "system"
    }
    ]
    }
    注: 如果 hosts 字段不为空则需要指定授权使用该证书的 IP 或域名列表。 由于该证书后续被 kubernetes master 集群使用,需要将master节点的IP都填上,同时还需要填写 service 网络的首个IP。(一般是 kube-apiserver 指定的 service-cluster-ip-range 网段的第一个IP,如 10.254.0.1)

    生成证书和token文件
    [root@master1 work]# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-apiserver-csr.json | cfssljson -bare kube-apiserver
    [root@master1 work]# cat > token.csv << EOF
    $(head -c 16 /dev/urandom | od -An -t x | tr -d ' '),kubelet-bootstrap,10001,"system:kubelet-bootstrap"
    EOF

    创建配置文件,注意替换为自己的IP
    [root@master1 work]# vim kube-apiserver.conf
    KUBE_APISERVER_OPTS="--enable-admission-plugins=NamespaceLifecycle,NodeRestriction,LimitRanger,ServiceAccount,DefaultStorageClass,ResourceQuota
    --anonymous-auth=false
    --bind-address=192.168.1.180
    --secure-port=6443
    --advertise-address=192.168.1.180
    --insecure-port=0
    --authorization-mode=Node,RBAC
    --runtime-config=api/all=true
    --enable-bootstrap-token-auth
    --service-cluster-ip-range=10.255.0.0/16
    --token-auth-file=/etc/kubernetes/token.csv
    --service-node-port-range=30000-50000
    --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem
    --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem
    --client-ca-file=/etc/kubernetes/ssl/ca.pem
    --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem
    --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem
    --service-account-key-file=/etc/kubernetes/ssl/ca-key.pem
    --service-account-signing-key-file=/etc/kubernetes/ssl/ca-key.pem
    --service-account-issuer=https://kubernetes.default.svc.cluster.local
    --etcd-cafile=/etc/etcd/ssl/ca.pem
    --etcd-certfile=/etc/etcd/ssl/etcd.pem
    --etcd-keyfile=/etc/etcd/ssl/etcd-key.pem
    --etcd-servers=https://192.168.1.180:2379,https://192.168.1.181:2379,https://192.168.1.182:2379
    --enable-swagger-ui=true
    --allow-privileged=true
    --apiserver-count=3
    --audit-log-maxage=30
    --audit-log-maxbackup=3
    --audit-log-maxsize=100
    --audit-log-path=/var/log/kube-apiserver-audit.log
    --event-ttl=1h
    --alsologtostderr=true
    --logtostderr=false
    --log-dir=/var/log/kubernetes
    --v=4"
    注: --logtostderr:启用日志 --v:日志等级 --log-dir:日志目录 --etcd-servers:etcd集群地址 --bind-address:监听地址 --secure-port:https安全端口 --advertise-address:集群通告地址 --allow-privileged:启用授权 --service-cluster-ip-range:Service虚拟IP地址段 --enable-admission-plugins:准入控制模块 --authorization-mode:认证授权,启用RBAC授权和节点自管理 --enable-bootstrap-token-auth:启用TLS bootstrap机制 --token-auth-file:bootstrap token文件 --service-node-port-range:Service nodeport类型默认分配端口范围 --kubelet-client-xxx:apiserver访问kubelet客户端证书 --tls-xxx-file:apiserver https证书 --etcd-xxxfile:连接Etcd集群证书 --audit-log-xxx:审计日志

    创建服务启动文件
    [root@master1 work]# vim kube-apiserver.service
    [Unit]
    Description=Kubernetes API Server
    Documentation=https://github.com/kubernetes/kubernetes
    After=etcd.service
    Wants=etcd.service

    [Service]
    EnvironmentFile=-/etc/kubernetes/kube-apiserver.conf
    ExecStart=/usr/local/bin/kube-apiserver $KUBE_APISERVER_OPTS
    Restart=on-failure
    RestartSec=5
    Type=notify
    LimitNOFILE=65536

    [Install]
    WantedBy=multi-user.target

    同步相关文件到各个节点
    [root@master1 work]# cp ca*.pem /etc/kubernetes/ssl/
    [root@master1 work]# cp kube-apiserver*.pem /etc/kubernetes/ssl/
    [root@master1 work]# cp token.csv /etc/kubernetes/
    [root@master1 work]# cp kube-apiserver.conf /etc/kubernetes/
    [root@master1 work]# cp kube-apiserver.service /usr/lib/systemd/system/
    [root@master1 work]# rsync -vaz token.csv master2:/etc/kubernetes/
    [root@master1 work]# rsync -vaz token.csv master3:/etc/kubernetes/
    [root@master1 work]# rsync -vaz kube-apiserver*.pem master2:/etc/kubernetes/ssl/
    [root@master1 work]# rsync -vaz kube-apiserver*.pem master3:/etc/kubernetes/ssl/
    [root@master1 work]# rsync -vaz ca*.pem master2:/etc/kubernetes/ssl/
    [root@master1 work]# rsync -vaz ca*.pem master3:/etc/kubernetes/ssl/
    [root@master1 work]# rsync -vaz kube-apiserver.conf master2:/etc/kubernetes/
    [root@master1 work]# rsync -vaz kube-apiserver.conf master3:/etc/kubernetes/
    [root@master1 work]# rsync -vaz kube-apiserver.service master2:/usr/lib/systemd/system/
    [root@master1 work]# rsync -vaz kube-apiserver.service master3:/usr/lib/systemd/system/
    注:master2和master3配置文件kube-apiserver.conf的IP地址修改为实际的本机IP

    启动服务测试
    [root@master1 work]# systemctl daemon-reload
    [root@master1 work]# systemctl enable kube-apiserver
    [root@master1 work]# systemctl start kube-apiserver
    [root@master1 work]# systemctl status kube-apiserver
    [root@master1 work]# curl --insecure https://192.168.1.1806443/
    只要有返回说明启动正常

    4.2 部署kubectl组件
    创建csr请求文件
    [root@master1 work]# vim admin-csr.json
    {
    "CN": "admin",
    "hosts": [],
    "key": {
    "algo": "rsa",
    "size": 2048
    },
    "names": [
    {
    "C": "CN",
    "ST": "Hubei",
    "L": "Wuhan",
    "O": "system:masters",
    "OU": "system"
    }
    ]
    }
    说明: 后续 kube-apiserver 使用 RBAC 对客户端(如 kubelet、kube-proxy、Pod)请求进行授权; kube-apiserver 预定义了一些 RBAC 使用的 RoleBindings,如 cluster-admin 将 Group system:masters 与 Role cluster-admin 绑定,该 Role 授予了调用kube-apiserver 的所有 API的权限; O指定该证书的 Group 为 system:masters,kubelet 使用该证书访问 kube-apiserver 时 ,由于证书被 CA 签名,所以认证通过,同时由于证书用户组为经过预授权的 system:masters,所以被授予访问所有 API 的权限; 注: 这个admin 证书,是将来生成管理员用的kube config 配置文件用的,现在我们一般建议使用RBAC 来对kubernetes 进行角色权限控制, kubernetes 将证书中的CN 字段 作为User, O 字段作为 Group; "O": "system:masters", 必须是system:masters,否则后面kubectl create clusterrolebinding报错。

    生成证书
    [root@master1 work]# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes admin-csr.json | cfssljson -bare admin
    [root@master1 work]# cp admin*.pem /etc/kubernetes/ssl/

    创建kubeconfig配置文件,比较重要

    kubeconfig 为 kubectl 的配置文件,包含访问 apiserver 的所有信息,如 apiserver 地址、CA 证书和自身使用的证书(这里如果报错找不到kubeconfig路径,请手动复制到相应路径下,没有则忽略)
    1.设置集群参数
    [root@master1 work]# kubectl config set-cluster kubernetes --certificate-authority=ca.pem --embed-certs=true --server=https://192.168.1.180:6443 --kubeconfig=kube.config
    2.设置客户端认证参数
    [root@master1 work]# kubectl config set-credentials admin --client-certificate=admin.pem --client-key=admin-key.pem --embed-certs=true --kubeconfig=kube.config
    3.设置上下文参数
    [root@master1 work]# kubectl config set-context kubernetes --cluster=kubernetes --user=admin --kubeconfig=kube.config
    4.设置默认上下文
    [root@master1 work]# kubectl config use-context kubernetes --kubeconfig=kube.config
    [root@master1 work]# mkdir ~/.kube
    [root@master1 work]# cp kube.config ~/.kube/config
    5.授权kubernetes证书访问kubelet api权限
    [root@master1 work]# kubectl create clusterrolebinding kube-apiserver:kubelet-apis --clusterrole=system:kubelet-api-admin --user kubernetes

    查看集群组件状态
    [root@master1 work]# kubectl cluster-info
    [root@master1 work]# kubectl get componentstatuses
    [root@master1 work]# kubectl get all --all-namespaces

    同步kubectl配置文件到其他节点
    [root@master1 work]# rsync -vaz /root/.kube/config master2:/root/.kube/
    [root@master1 work]# rsync -vaz /root/.kube/config master3:/root/.kube/

    配置kubectl子命令补全
    [root@master1 work]# yum install -y bash-completion
    [root@master1 work]# source /usr/share/bash-completion/bash_completion
    [root@master1 work]# source <(kubectl completion bash)
    [root@master1 work]# kubectl completion bash > ~/.kube/completion.bash.inc
    [root@master1 work]# source '/root/.kube/completion.bash.inc'
    [root@master1 work]# source $HOME/.bash_profile

    4.3 部署kube-controller-manager组件
    创建csr请求文件
    [root@master1 work]# vim kube-controller-manager-csr.json
    {
    "CN": "system:kube-controller-manager",
    "key": {
    "algo": "rsa",
    "size": 2048
    },
    "hosts": [
    "127.0.0.1",
    "192.168.1.180",
    "192.168.1.181",
    "192.168.1.182"
    ],
    "names": [
    {
    "C": "CN",
    "ST": "Hubei",
    "L": "Wuhan",
    "O": "system:kube-controller-manager",
    "OU": "system"
    }
    ]
    注: hosts 列表包含所有 kube-controller-manager 节点 IP; CN 为 system:kube-controller-manager、O 为 system:kube-controller-manager,kubernetes 内置的 ClusterRoleBindings system:kube-controller-manager 赋予 kube-controller-manager 工作所需的权限

    生成证书
    [root@master1 work]# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-controller-manager-csr.json | cfssljson -bare kube-controller-manager

    创建kube-controller-manager的kubeconfig
    1.设置集群参数
    [root@master1 work]# kubectl config set-cluster kubernetes --certificate-authority=ca.pem --embed-certs=true --server=https://192.168.1.180:6443 --kubeconfig=kube-controller-manager.kubeconfig
    2.设置客户端认证参数
    [root@master1 work]# kubectl config set-credentials system:kube-controller-manager --client-certificate=kube-controller-manager.pem --client-key=kube-controller-manager-key.pem --embed-certs=true --kubeconfig=kube-controller-manager.kubeconfig
    3.设置上下文参数
    [root@master1 work]# kubectl config set-context system:kube-controller-manager --cluster=kubernetes --user=system:kube-controller-manager --kubeconfig=kube-controller-manager.kubeconfig
    4.设置默认上下文
    [root@master1 work]# kubectl config use-context system:kube-controller-manager --kubeconfig=kube-controller-manager.kubeconfig

    创建配置文件kube-controller-manager.conf
    [root@master1 work]# vim kube-controller-manager.conf
    KUBE_CONTROLLER_MANAGER_OPTS="--port=0
    --secure-port=10252
    --bind-address=127.0.0.1
    --kubeconfig=/etc/kubernetes/kube-controller-manager.kubeconfig
    --service-cluster-ip-range=10.255.0.0/16
    --cluster-name=kubernetes
    --cluster-signing-cert-file=/etc/kubernetes/ssl/ca.pem
    --cluster-signing-key-file=/etc/kubernetes/ssl/ca-key.pem
    --allocate-node-cidrs=true
    --cluster-cidr=10.0.0.0/16
    --experimental-cluster-signing-duration=87600h
    --root-ca-file=/etc/kubernetes/ssl/ca.pem
    --service-account-private-key-file=/etc/kubernetes/ssl/ca-key.pem
    --leader-elect=true
    --feature-gates=RotateKubeletServerCertificate=true
    --controllers=*,bootstrapsigner,tokencleaner
    --horizontal-pod-autoscaler-use-rest-clients=true
    --horizontal-pod-autoscaler-sync-period=10s
    --tls-cert-file=/etc/kubernetes/ssl/kube-controller-manager.pem
    --tls-private-key-file=/etc/kubernetes/ssl/kube-controller-manager-key.pem
    --use-service-account-credentials=true
    --alsologtostderr=true
    --logtostderr=false
    --log-dir=/var/log/kubernetes
    --v=2"

    创建启动文件
    [root@master1 work]# vim kube-controller-manager.service
    [Unit]
    Description=Kubernetes Controller Manager
    Documentation=https://github.com/kubernetes/kubernetes

    [Service]
    EnvironmentFile=-/etc/kubernetes/kube-controller-manager.conf
    ExecStart=/usr/local/bin/kube-controller-manager $KUBE_CONTROLLER_MANAGER_OPTS
    Restart=on-failure
    RestartSec=5

    [Install]
    WantedBy=multi-user.target

    同步相关文件到各个节点
    [root@master1 work]# cp kube-controller-manager*.pem /etc/kubernetes/ssl/
    [root@master1 work]# cp kube-controller-manager.kubeconfig /etc/kubernetes/
    [root@master1 work]# cp kube-controller-manager.conf /etc/kubernetes/
    [root@master1 work]# cp kube-controller-manager.service /usr/lib/systemd/system/
    [root@master1 work]# rsync -vaz kube-controller-manager*.pem master2:/etc/kubernetes/ssl/
    [root@master1 work]# rsync -vaz kube-controller-manager*.pem master3:/etc/kubernetes/ssl/
    [root@master1 work]# rsync -vaz kube-controller-manager.kubeconfig kube-controller-manager.conf master2:/etc/kubernetes/
    [root@master1 work]# rsync -vaz kube-controller-manager.kubeconfig kube-controller-manager.conf master3:/etc/kubernetes/
    [root@master1 work]# rsync -vaz kube-controller-manager.service master2:/usr/lib/systemd/system/
    [root@master1 work]# rsync -vaz kube-controller-manager.service master3:/usr/lib/systemd/system/

    启动服务
    [root@master1 work]# systemctl daemon-reload
    [root@master1 work]# systemctl enable kube-controller-manager
    [root@master1 work]# systemctl start kube-controller-manager
    [root@master1 work]# systemctl status kube-controller-manager

    4.4 部署kube-scheduler组件
    创建csr请求文件

    [root@master1 work]# vim kube-scheduler-csr.json
    {
    "CN": "system:kube-scheduler",
    "hosts": [
    "127.0.0.1",
    "192.168.1.180",
    "192.168.1.181",
    "192.168.1.182"
    ],
    "key": {
    "algo": "rsa",
    "size": 2048
    },
    "names": [
    {
    "C": "CN",
    "ST": "Hubei",
    "L": "Wuhan",
    "O": "system:kube-scheduler",
    "OU": "system"
    }
    ]
    }
    注: hosts 列表包含所有 kube-scheduler 节点 IP; CN 为 system:kube-scheduler、O 为 system:kube-scheduler,kubernetes 内置的 ClusterRoleBindings system:kube-scheduler 将赋予 kube-scheduler 工作所需的权限。

    生成证书
    [root@master1 work]# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-scheduler-csr.json | cfssljson -bare kube-scheduler
    创建kube-scheduler的kubeconfig
    1.设置集群参数
    [root@master1 work]# kubectl config set-cluster kubernetes --certificate-authority=ca.pem --embed-certs=true --server=https://192.168.1.180:6443 --kubeconfig=kube-scheduler.kubeconfig
    2.设置客户端认证参数
    [root@master1 work]# kubectl config set-credentials system:kube-scheduler --client-certificate=kube-scheduler.pem --client-key=kube-scheduler-key.pem --embed-certs=true --kubeconfig=kube-scheduler.kubeconfig
    3.设置上下文参数
    [root@master1 work]# kubectl config set-context system:kube-scheduler --cluster=kubernetes --user=system:kube-scheduler --kubeconfig=kube-scheduler.kubeconfig
    4.设置默认上下文
    [root@master1 work]# kubectl config use-context system:kube-scheduler --kubeconfig=kube-scheduler.kubeconfig

    创建配置文件kube-scheduler.conf
    [root@master1 work]# vim kube-scheduler.conf
    KUBE_SCHEDULER_OPTS="--address=127.0.0.1
    --kubeconfig=/etc/kubernetes/kube-scheduler.kubeconfig
    --leader-elect=true
    --alsologtostderr=true
    --logtostderr=false
    --log-dir=/var/log/kubernetes
    --v=2"

    创建服务启动文件
    [root@master1 work]# vim kube-scheduler.service
    [Unit]
    Description=Kubernetes Scheduler
    Documentation=https://github.com/kubernetes/kubernetes

    [Service]
    EnvironmentFile=-/etc/kubernetes/kube-scheduler.conf
    ExecStart=/usr/local/bin/kube-scheduler $KUBE_SCHEDULER_OPTS
    Restart=on-failure
    RestartSec=5

    [Install]
    WantedBy=multi-user.target

    同步相关文件到各个节点
    [root@master1 work]# cp kube-scheduler*.pem /etc/kubernetes/ssl/
    [root@master1 work]# cp kube-scheduler.kubeconfig /etc/kubernetes/
    [root@master1 work]# cp kube-scheduler.conf /etc/kubernetes/
    [root@master1 work]# cp kube-scheduler.service /usr/lib/systemd/system/
    [root@master1 work]# rsync -vaz kube-scheduler*.pem master2:/etc/kubernetes/ssl/
    [root@master1 work]# rsync -vaz kube-scheduler*.pem master3:/etc/kubernetes/ssl/
    [root@master1 work]# rsync -vaz kube-scheduler.kubeconfig kube-scheduler.conf master2:/etc/kubernetes/
    [root@master1 work]# rsync -vaz kube-scheduler.kubeconfig kube-scheduler.conf master3:/etc/kubernetes/
    [root@master1 work]# rsync -vaz kube-scheduler.service master2:/usr/lib/systemd/system/
    [root@master1 work]# rsync -vaz kube-scheduler.service master3:/usr/lib/systemd/system/

    启动服务
    [root@master1 work]# systemctl daemon-reload
    [root@master1 work]# systemctl enable kube-scheduler
    [root@master1 work]# systemctl start kube-scheduler
    [root@master1 work]# systemctl status kube-scheduler

    4.5 部署docker组件
    在三个work节点上安装docker组件
    [root@node1 ~]# wget https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo -O /etc/yum.repos.d/docker-ce.repo
    [root@node1 ~]# yum install -y docker-ce
    [root@node1 ~]# systemctl enable docker
    [root@node1 ~]# systemctl start docker
    [root@node1 ~]# docker --version

    修改docker源和驱动
    [root@node1 ~]# cat > /etc/docker/daemon.json << EOF
    {
    "exec-opts": ["native.cgroupdriver=systemd"],
    "registry-mirrors": [
    "https://1nj0zren.mirror.aliyuncs.com",
    "https://kfwkfulq.mirror.aliyuncs.com",
    "https://2lqq34jg.mirror.aliyuncs.com",
    "https://pee6w651.mirror.aliyuncs.com",
    "http://hub-mirror.c.163.com",
    "https://docker.mirrors.ustc.edu.cn",
    "http://f1361db2.m.daocloud.io",
    "https://registry.docker-cn.com"
    ]
    }
    EOF
    [root@node1 ~]# systemctl restart docker
    [root@node1 ~]# docker info | grep "Cgroup Driver"

    下载依赖镜像
    [root@node1 ~]# docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.2
    [root@node1 ~]# docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.2 k8s.gcr.io/pause:3.2
    [root@node1 ~]# docker rmi registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.2
    [root@node1 ~]# docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/coredns:1.7.0
    [root@node1 ~]# docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/coredns:1.7.0 k8s.gcr.io/coredns:1.7.0
    [root@node1 ~]# docker rmi registry.cn-hangzhou.aliyuncs.com/google_containers/coredns:1.7.0

    4.6 部署kubelet组件
    以下操作在master1上操作
    创建kubelet-bootstrap.kubeconfig
    [root@master1 work]# BOOTSTRAP_TOKEN=$(awk -F "," '{print $1}' /etc/kubernetes/token.csv)
    1.设置集群参数
    [root@master1 work]# kubectl config set-cluster kubernetes --certificate-authority=ca.pem --embed-certs=true --server=https://192.168.1.180:6443 --kubeconfig=kubelet-bootstrap.kubeconfig
    2.设置客户端认证参数
    [root@master1 work]# kubectl config set-credentials kubelet-bootstrap --token=${BOOTSTRAP_TOKEN} --kubeconfig=kubelet-bootstrap.kubeconfig
    3.设置上下文参数
    [root@master1 work]# kubectl config set-context default --cluster=kubernetes --user=kubelet-bootstrap --kubeconfig=kubelet-bootstrap.kubeconfig
    4.设置默认上下文
    [root@master1 work]# kubectl config use-context default --kubeconfig=kubelet-bootstrap.kubeconfig
    5.创建角色绑定
    [root@master1 work]# kubectl create clusterrolebinding kubelet-bootstrap --clusterrole=system:node-bootstrapper --user=kubelet-bootstrap

    创建配置文件kubelet.json
    "cgroupDriver": "cgroupfs"要和docker的驱动一致。

    address替换为自己node1的IP地址。
    [root@master1 work]# vim kubelet.json
    {
    "kind": "KubeletConfiguration",
    "apiVersion": "kubelet.config.k8s.io/v1beta1",
    "authentication": {
    "x509": {
    "clientCAFile": "/etc/kubernetes/ssl/ca.pem"
    },
    "webhook": {
    "enabled": true,
    "cacheTTL": "2m0s"
    },
    "anonymous": {
    "enabled": false
    }
    },
    "authorization": {
    "mode": "Webhook",
    "webhook": {
    "cacheAuthorizedTTL": "5m0s",
    "cacheUnauthorizedTTL": "30s"
    }
    },
    "address": "192.168.1.183",
    "port": 10250,
    "readOnlyPort": 10255,
    "cgroupDriver": "cgroupfs",
    "hairpinMode": "promiscuous-bridge",
    "serializeImagePulls": false,
    "featureGates": {
    "RotateKubeletClientCertificate": true,
    "RotateKubeletServerCertificate": true
    },
    "clusterDomain": "cluster.local.",
    "clusterDNS": ["10.255.0.2"]
    }

    创建启动文件kubelet.service
    [root@master1 work]# vim kubelet.service
    [Unit]
    Description=Kubernetes Kubelet
    Documentation=https://github.com/kubernetes/kubernetes
    After=docker.service
    Requires=docker.service

    [Service]
    WorkingDirectory=/var/lib/kubelet
    ExecStart=/usr/local/bin/kubelet
    --bootstrap-kubeconfig=/etc/kubernetes/kubelet-bootstrap.kubeconfig
    --cert-dir=/etc/kubernetes/ssl
    --kubeconfig=/etc/kubernetes/kubelet.kubeconfig
    --config=/etc/kubernetes/kubelet.json
    --network-plugin=cni
    --pod-infra-container-image=k8s.gcr.io/pause:3.2
    --alsologtostderr=true
    --logtostderr=false
    --log-dir=/var/log/kubernetes
    --v=2
    Restart=on-failure
    RestartSec=5

    [Install]
    WantedBy=multi-user.target
    注:–hostname-override:显示名称,集群中唯一 –network-plugin:启用CNI –kubeconfig:空路径,会自动生成,后面用于连接apiserver –bootstrap-kubeconfig:首次启动向apiserver申请证书 –config:配置参数文件 –cert-dir:kubelet证书生成目录 –pod-infra-container-image:管理Pod网络容器的镜像

    同步相关文件到各个node节点
    [root@master1 work]# for i in node1 node2 node3;do rsync -vaz kubelet-bootstrap.kubeconfig kubelet.json $i:/etc/kubernetes/;done
    [root@master1 work]# for i in node1 node2 node3;do rsync -vaz ca.pem $i:/etc/kubernetes/ssl/;done
    [root@master1 work]# for i in node1 node2 node3;do rsync -vaz kubelet.service $i:/usr/lib/systemd/system/;done
    注:kubelete.json配置文件address改为各个节点的ip地址 在各个work节点上启动服务

    [root@node1 ~]# mkdir /var/lib/kubelet
    [root@node1 ~]# mkdir /var/log/kubernetes
    [root@node1 ~]# systemctl daemon-reload
    [root@node1 ~]# systemctl enable kubelet
    [root@node1 ~]# systemctl start kubelet
    [root@node1 ~]# systemctl status kubelet
    确认kubelet服务启动成功后,接着到master1节点上Approve一下bootstrap请求。

    执行如下命令可以看到三个worker节点分别发送了三个 CSR 请求:
    [root@master1 work]# kubectl get csr
    [root@master1 work]# kubectl certificate approve node-csr-HlX3cExsZohWsu8Dd6Rp_ztFejmMdpzvti_qgxo4SAQ
    [root@master1 work]# kubectl certificate approve node-csr-oykYfnH_coRF2PLJH4fOHlGznOZUBPDg5BPZXDo2wgk
    [root@master1 work]# kubectl certificate approve node-csr-ytRB2fikhL6dykcekGg4BdD87o-zw9WPU44SZ1nFT50
    [root@master1 work]# kubectl get csr
    [root@master1 work]# kubectl get nodes

    4.7 部署kube-proxy组件

    创建csr请求文件

    [root@master1 work]# vim kube-proxy-csr.json
    {
    "CN": "system:kube-proxy",
    "key": {
    "algo": "rsa",
    "size": 2048
    },
    "names": [
    {
    "C": "CN",
    "ST": "Hubei",
    "L": "Wuhan",
    "O": "k8s",
    "OU": "system"
    }
    ]
    }

    生成证书
    [root@master1 work]# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-proxy-csr.json | cfssljson -bare kube-proxy

    创建kubeconfig文件
    [root@master1 work]# kubectl config set-cluster kubernetes --certificate-authority=ca.pem --embed-certs=true --server=https://192.168.1.180:6443 --kubeconfig=kube-proxy.kubeconfig
    [root@master1 work]# kubectl config set-credentials kube-proxy --client-certificate=kube-proxy.pem --client-key=kube-proxy-key.pem --embed-certs=true --kubeconfig=kube-proxy.kubeconfig
    [root@master1 work]# kubectl config set-context default --cluster=kubernetes --user=kube-proxy --kubeconfig=kube-proxy.kubeconfig
    [root@master1 work]# kubectl config use-context default --kubeconfig=kube-proxy.kubeconfig

    创建kube-proxy配置文件
    [root@master1 work]# vim kube-proxy.yaml
    apiVersion: kubeproxy.config.k8s.io/v1alpha1
    bindAddress: 192.168.1.183
    clientConnection:
    kubeconfig: /etc/kubernetes/kube-proxy.kubeconfig
    clusterCIDR: 192.168.0.0/16
    healthzBindAddress: 192.168.1.183:10256
    kind: KubeProxyConfiguration
    metricsBindAddress: 192.168.1.183:10249
    mode: "ipvs"

    创建服务启动文件
    [root@master1 work]# vim kube-proxy.service
    [Unit]
    Description=Kubernetes Kube-Proxy Server
    Documentation=https://github.com/kubernetes/kubernetes
    After=network.target

    [Service]
    WorkingDirectory=/var/lib/kube-proxy
    ExecStart=/usr/local/bin/kube-proxy
    --config=/etc/kubernetes/kube-proxy.yaml
    --alsologtostderr=true
    --logtostderr=false
    --log-dir=/var/log/kubernetes
    --v=2
    Restart=on-failure
    RestartSec=5
    LimitNOFILE=65536

    [Install]
    WantedBy=multi-user.target

    同步文件到各个节点
    [root@master1 work]# for i in node1 node2 node3;do rsync -vaz kube-proxy.kubeconfig kube-proxy.yaml $i:/etc/kubernetes/;done
    [root@master1 work]# for i in node1 node2 node3;do rsync -vaz kube-proxy.service $i:/usr/lib/systemd/system/;done

    注:配置文件kube-proxy.yaml中address修改为各节点的实际IP

    启动服务
    [root@node1 ~]# mkdir -p /var/lib/kube-proxy
    [root@node1 ~]# systemctl daemon-reload
    [root@node1 ~]# systemctl enable kube-proxy
    [root@node1 ~]# systemctl restart kube-proxy
    [root@node1 ~]# systemctl status kube-proxy

    4.7 配置网络组件calico
    [root@master1 work]# wget https://docs.projectcalico.org/v3.14/manifests/calico.yaml
    [root@master1 work]# kubectl apply -f calico.yaml
    [root@master1 work]# kubectl get pods -A

    4.8 配置部署coredns组件
    [root@master1 work]wget https://raw.githubusercontent.com/coredns/deployment/master/kubernetes/coredns.yaml.sed
    修改yaml文件三处:
    kubernetes cluster.local in-addr.arpa ip6.arpa
    forward . /etc/resolv.conf
    clusterIP: 10.255.0.2
    在下面文件
    apiVersion: v1
    kind: ConfigMap
    metadata:
    name: coredns
    namespace: kube-system
    data:
    Corefile: |
    .:53 {
    errors
    health {
    lameduck 5s
    }
    ready
    kubernetes cluster.local in-addr.arpa ip6.arpa {
    fallthrough in-addr.arpa ip6.arpa
    }
    prometheus :9153
    forward . /etc/resolv.conf {
    max_concurrent 1000
    }
    cache 30
    loop
    reload
    loadbalance
    }
    ---
    apiVersion: v1
    kind: Service
    metadata:
    name: kube-dns
    namespace: kube-system
    annotations:
    prometheus.io/port: "9153"
    prometheus.io/scrape: "true"
    labels:
    k8s-app: kube-dns
    kubernetes.io/cluster-service: "true"
    kubernetes.io/name: "CoreDNS"
    spec:
    selector:
    k8s-app: kube-dns
    clusterIP: 10.255.0.2
    ports:
    - name: dns
    port: 53
    protocol: UDP
    - name: dns-tcp
    port: 53
    protocol: TCP
    - name: metrics
    port: 9153
    protocol: TCP
    [root@master1 work]# kubectl apply -f coredns.yaml

    5.部署Nginx,验证集群
    5.1 验证集群访问

    [root@master1 ~]# vim nginx.yaml
    ---
    apiVersion: v1
    kind: ReplicationController
    metadata:
    name: nginx-controller
    spec:
    replicas: 2
    selector:
    name: nginx
    template:
    metadata:
    labels:
    name: nginx
    spec:
    containers:
    - name: nginx
    image: nginx:1.19.6
    ports:
    - containerPort: 80
    ---
    apiVersion: v1
    kind: Service
    metadata:
    name: nginx-service-nodeport
    spec:
    ports:
    - port: 80
    targetPort: 80
    nodePort: 30001
    protocol: TCP
    type: NodePort
    selector:
    name: nginx
    [root@master1 ~]# kubectl apply -f nginx.yaml
    [root@master1 ~]# kubectl get svc
    [root@master1 ~]# kubectl get pods
    注:这里只显示了两个node节点

    在浏览器中,使用http://任意nodeIP:30001,都可以访问nginx页面。

    验证coreDNS
    [root@master1 ~]# vim busybox.yaml
    apiVersion: v1
    kind: Pod
    metadata:
    name: busybox
    namespace: default
    spec:
    containers:
    - name: busybox
    image: busybox:1.28
    command:
    - sleep
    - "3600"
    imagePullPolicy: IfNotPresent
    restartPolicy: Always
    [root@master1 ~]# kubectl create -f busybox.yaml

    在node节点上操作
    [root@work1 ~]# docker exec -it 055207a7d2b2 /bin/sh
    / # cat /etc/resolv.conf
    nameserver 10.255.0.2
    search default.svc.cluster.local. svc.cluster.local. cluster.local. novalocal
    options ndots:5
    / # nslookup kubernetes.default
    Server: 10.255.0.2
    Address 1: 10.255.0.2 kube-dns.kube-system.svc.cluster.local

    Name: kubernetes.default
    Address 1: 10.255.0.1 kubernetes.default.svc.cluster.local
    / # nslookup nginx-service-nodeport
    Server: 10.255.0.2
    Address 1: 10.255.0.2 kube-dns.kube-system.svc.cluster.local

    Name: nginx-service-nodeport
    Address 1: 10.255.78.185 nginx-service-nodeport.default.svc.cluster.local
    / # nslookup baidu.com
    Server: 10.255.0.2
    Address 1: 10.255.0.2 kube-dns.kube-system.svc.cluster.local

    Name: baidu.com
    Address 1: 220.181.38.148
    Address 2: 39.156.69.79

    10.255.0.2 就是coreDNS的clusterIP,说明coreDNS配置好了。
    解析内部Service的名称,是通过coreDNS去解析
    解析外部baidu.com地址 ,按照前文coreDNS的配置,是通过pod所在node上的/etc/resolv.conf 来代理解析的
    整个集群的搭建结束!
    ————————————————
    原文链接:https://blog.csdn.net/ma726518972/article/details/115025198

  • 相关阅读:
    JDBC 复习4 批量执行SQL
    JDBC 复习3 存取Oracle大数据 clob blob
    Oracle复习
    Linux命令(1)grep
    JDBC 复习2 存取mysql 大数据
    JDBC 复习1 DBUtil
    php 环境搭建问题
    Windows 批处理 bat 开启 WiFi 菜单选项 设置ID PWD
    Bat 批处理启动和停止Oracle 服务
    docker 学习1 WSL docker ,Windows docker
  • 原文地址:https://www.cnblogs.com/OrcinusOrca/p/14704499.html
Copyright © 2011-2022 走看看