zoukankan      html  css  js  c++  java
  • K8S集群 阿里云ECS异地部署

    阿里云服务器,异地部署k8s集群

    参考:https://blog.csdn.net/chen645800876/article/details/105833835

    问题:由于在使用云服务器时,内网几乎是ping不通的,在使用内网进行注册时,会导致节点之间不能正常通信。

    为了方便,把参考博客的内容提取整合下,方便下次操作。这里介绍的是使用虚拟网卡+kubeadm安装

    主机名 配置 系统
    master01 2核4G Alibaba Cloud Linux (内核 4.19.91-21.al7.x86_64)
    work01 2核4G Alibaba Cloud Linux (内核 4.19.91-21.al7.x86_64)
    ....

    建议,系统内核版本啥的不要差别太大。

    准备工作

    • 在阿里云后台开放下安全组

      • 10250/10260 TCP端口:给kube-schedulekube-controllkube-proxykubelet`等使用

      • 6443 TCP端口:给kube-apiserver`使用

      • 2379 2380 2381 TCP

      • 8472 UDP端口:vxlan`使用端口

    • 调整内核参数

      cat > k8s.conf <<EOF
      #开启网桥模式
      net.bridge.bridge-nf-call-ip6tables = 1
      net.bridge.bridge-nf-call-iptables = 1
      #开启转发
      net.ipv4.ip_forward = 1
      ##关闭ipv6
      net.ipv6.conf.all.disable_ipv6=1
      EOF
      cp k8s.conf /etc/sysctl.d/k8s.conf
      sysctl -p /etc/sysctl.d/k8s.conf
      
    • 调整系统时区

      # 设置系统时区为 中国/上海
      timedatectl set-timezone Asia/Shanghai
      # 将当前的UTC时间写入硬件时钟
      timedatectl set-local-rtc 0
      # 重启依赖于系统时间的服务
      systemctl restart rsyslog
      systemctl restart crond
      
    • 关闭系统不需要的服务

      #关闭邮件服务
      systemctl stop postfix && systemctl disable postfix
      
    • 设置rsyslogdsystemd journald

      默认有两个日志服务,使用journald关闭rsyslogd

      mkdir /var/log/journal # 持久化保存日志的目录
      mkdir /etc/systemd/journald.conf.d
      cat > /etc/systemd/journald.conf.d/99-prophet.conf <<EOF
      [Journal]
      # 持久化
      Storage=persistent
      
      # 压缩历史日志
      Compress=yes
      
      SysnIntervalSec=5m
      RateLimitInterval=30s
      RateLimitBurst=1000
      
      # 最大占用空间 10G
      SystemMaxUse=10G
      
      # 单日志文件最大 200M
      SystemMaxFileSize=200M
      
      # 日志保存时间 2 周
      MaxRetentionSec=2week
      
      # 不将日志转发到 syslog
      ForwardToSyslog=no
      
      EOF
      
      systemctl restart systemd-journald
      
    • ipvs前置条件准备

      ipvs转发效率比iptables更高,看上去也比iptables舒服

      # step1
      modprobe br_netfilter
      
      # step2
      cat > /etc/sysconfig/modules/ipvs.modules <<EOF
      #!/bin/bash
      modprobe -- ip_vs
      modprobe -- ip_vs_rr
      modprobe -- ip_vs_wrr
      modprobe -- ip_vs_sh
      modprobe -- nf_conntrack_ipv4
      EOF
      
      # step3
      chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/
      
    • Docker安装

      # step 1: 安装必要的一些系统工具
      yum install -y yum-utils device-mapper-persistent-data lvm2
      # Step 2: 添加软件源信息
      yum-config-manager --add-repo https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
      # Step 3: 更新并安装Docker-CE
      yum makecache fast
      yum -y install docker-ce
      # Step 4: 开启Docker服务
      systemctl start docker
      
      # 创建 `/etc/docker`目录
      mkdir -p /etc/docker
      
      # 配置 `daemon`
      cat > /etc/docker/daemon.json << EOF
      {
        "exec-opts": ["native.cgroupdriver=systemd"],
        "log-driver": "json-file",
        "log-opts": {
          "max-size": "100m"
        }
      }
      EOF
      
      # 启动docker
      systemctl daemon-reload && systemctl restart docker && systemctl enable docker
      
    • 关闭swap分区

      swapoff -a
      
    • 配置镜像加速器

      针对Docker客户端版本大于 1.10.0 的用户

      您可以通过修改daemon配置文件/etc/docker/daemon.json来使用加速器

      sudo mkdir -p /etc/docker
      sudo tee /etc/docker/daemon.json <<-'EOF'
      {
        "registry-mirrors": ["https://ckwu66hv.mirror.aliyuncs.com"]
      }
      EOF
      sudo systemctl daemon-reload
      sudo systemctl restart docker
      
    • KubeadmKubeletKubectl安装

      这里使用kebernetes v1.18.0版本
      # 添加源
      cat <<EOF > /etc/yum.repos.d/kubernetes.repo
      [kubernetes]
      name=Kubernetes
      baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
      enabled=1
      gpgcheck=1
      repo_gpgcheck=1
      gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
      EOF
      
      # 关闭selinux
      setenforce 0
      
      # 安装kubelet、kubeadm、kubectl
      yum -y install kubelet-1.18.0 kubeadm-1.18.0 kubectl-1.18.0
      
      # 设置为开机自启
      systemctl enable kubelet 
      
    • 建立虚拟网卡(所有节点)

      # step1 ,注意替换你的公网IP进去
      cat > /etc/sysconfig/network-scripts/ifcfg-eth0:1 <<EOF
      BOOTPROTO=static
      DEVICE=eth0:1
      IPADDR=你的公网IP
      PREFIX=32
      TYPE=Ethernet
      USERCTL=no
      ONBOOT=yes
      EOF
      # step2 如果是centos8,需要重启
      systemctl restart network
      # step3 查看新建的IP是否进去
      ip addr
      
    • 修改kubelet启动参数(重点,所有节点都要操作)

      # 此文件安装kubeadm后就存在了
      vim /usr/lib/systemd/system/kubelet.service.d/10-kubeadm.conf
      
      # 注意,这步很重要,如果不做,节点仍然会使用内网IP注册进集群
      # 在末尾添加参数 --node-ip=公网IP
      
      # Note: This dropin only works with kubeadm and kubelet v1.11+
      [Service]
      Environment="KUBELET_KUBECONFIG_ARGS=--bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --kubeconfig=/etc/kubernetes/kubelet.conf"
      Environment="KUBELET_CONFIG_ARGS=--config=/var/lib/kubelet/config.yaml"
      # This is a file that "kubeadm init" and "kubeadm join" generates at runtime, populating the KUBELET_KUBEADM_ARGS variable dynamically
      EnvironmentFile=-/var/lib/kubelet/kubeadm-flags.env
      # This is a file that the user can use for overrides of the kubelet args as a last resort. Preferably, the user should use
      # the .NodeRegistration.KubeletExtraArgs object in the configuration files instead. KUBELET_EXTRA_ARGS should be sourced from this file.
      EnvironmentFile=-/etc/sysconfig/kubelet
      ExecStart=
      ExecStart=/usr/bin/kubelet $KUBELET_KUBECONFIG_ARGS $KUBELET_CONFIG_ARGS $KUBELET_KUBEADM_ARGS  $KUBELET_EXTRA_ARGS --node-ip=公网IP
      
    • 使用kubeadm初始化主节点(主节点)

      使用下面命令,查看需要拉取的镜像列表

      kubeadm config images list
      

      执行下面命令:

      docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-apiserver:v1.18.10
      docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-controller-manager:v1.18.10
      docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-scheduler:v1.18.10
      docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-proxy:v1.18.10
      docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.2
      docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/etcd:3.4.3-0
      docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/coredns:1.6.7
      docker pull quay.io/coreos/flannel:v0.13.0-rc2
      

      重新tag一下

      docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/kube-apiserver:v1.18.10 k8s.gcr.io/kube-apiserver:v1.18.10
      docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/kube-controller-manager:v1.18.10 k8s.gcr.io/kube-controller-manager:v1.18.10
      docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/kube-scheduler:v1.18.10 k8s.gcr.io/kube-scheduler:v1.18.10
      docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/kube-proxy:v1.18.10 k8s.gcr.io/kube-proxy:v1.18.10
      docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.2 k8s.gcr.io/pause:3.2
      docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/etcd:3.4.3-0 k8s.gcr.io/etcd:3.4.3-0
      docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/coredns:1.6.7 k8s.gcr.io/coredns:1.6.7
      
      # step1 添加配置文件,注意替换下面的IP
      cat > kubeadm-config.yaml <<EOF
      apiVersion: kubeadm.k8s.io/v1beta2
      kind: ClusterConfiguration
      kubernetesVersion: v1.18.0
      apiServer:
        certSANs:    #填写所有kube-apiserver节点的hostname、IP、VIP
        - master    #请替换为hostname
        - 47.74.22.13   #请替换为公网
        - 175.24.19.12  #请替换为私网
        - 10.96.0.1   #不要替换,此IP是API的集群地址,部分服务会用到
      controlPlaneEndpoint: 47.74.22.13:6443 #替换为公网IP
      networking:
        podSubnet: 10.244.0.0/16
        serviceSubnet: 10.96.0.0/12
      --- 将默认调度方式改为ipvs
      apiVersion: kubeproxy-config.k8s.io/v1alpha1
      kind: KubeProxyConfiguration
      featureGates:
        SupportIPVSProxyMode: true
      mode: ipvs
      EOF
      
      # step2 如果是1核心或者1G内存的请在末尾添加参数(--ignore-preflight-errors=all),否则会初始化失败
      # 同时注意,此步骤成功后,会打印,两个重要信息
      kubeadm init --config=kubeadm-config.yaml 
      
      # 信息1 上面初始化成功后,将会生成kubeconfig文件,用于请求api服务器,请执行下面操作
      mkdir -p $HOME/.kube
      sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
      sudo chown $(id -u):$(id -g) $HOME/.kube/config
      
      # 信息2 此信息用于后面工作节点加入主节点使用
      kubeadm join 47.74.22.13:6443 --token sdfs.dsfsdfsdfijdth 
          --discovery-token-ca-cert-hash sha256:sdfsdfsdfsdfsdfsdfsdfsdfg9a460f44b11805009124
      
    • 修改kube-apiserver参数(主节点)

      # 修改两个信息,添加--bind-address和修改--advertise-address
      vim /etc/kubernetes/manifests/kube-apiserver.yaml
      
      apiVersion: v1
      kind: Pod
      metadata:
        annotations:
          kubeadm.kubernetes.io/kube-apiserver.advertise-address.endpoint: 47.74.22.13:6443
        creationTimestamp: null
        labels:
          component: kube-apiserver
          tier: control-plane
        name: kube-apiserver
        namespace: kube-system
      spec:
        containers:
        - command:
          - kube-apiserver
          - --advertise-address=47.74.22.13  #修改为公网IP
          - --bind-address=0.0.0.0 #添加此参数
          - --allow-privileged=true
          - --authorization-mode=Node,RBAC
          - --client-ca-file=/etc/kubernetes/pki/ca.crt
          - --enable-admission-plugins=NodeRestriction
          - --enable-bootstrap-token-auth=true
          - --etcd-cafile=/etc/kubernetes/pki/etcd/ca.crt
          - --etcd-certfile=/etc/kubernetes/pki/apiserver-etcd-client.crt
          - --etcd-keyfile=/etc/kubernetes/pki/apiserver-etcd-client.key
          - --etcd-servers=https://127.0.0.1:2379
          - --insecure-port=0
          - --kubelet-client-certificate=/etc/kubernetes/pki/apiserver-kubelet-client.crt
          - --kubelet-client-key=/etc/kubernetes/pki/apiserver-kubelet-client.key
          - --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname
          - --proxy-client-cert-file=/etc/kubernetes/pki/front-proxy-client.crt
          - --proxy-client-key-file=/etc/kubernetes/pki/front-proxy-client.key
          - --requestheader-allowed-names=front-proxy-client
          - --requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.crt
          - --requestheader-extra-headers-prefix=X-Remote-Extra-
          - --requestheader-group-headers=X-Remote-Group
          - --requestheader-username-headers=X-Remote-User
          - --secure-port=6443
          - --service-account-key-file=/etc/kubernetes/pki/sa.pub
          - --service-cluster-ip-range=10.96.0.0/12
          - --tls-cert-file=/etc/kubernetes/pki/apiserver.crt
          - --tls-private-key-file=/etc/kubernetes/pki/apiserver.key
          image: k8s.gcr.io/kube-apiserver:v1.18.0
          imagePullPolicy: IfNotPresent
          livenessProbe:
            failureThreshold: 8
            httpGet:
              host: 175.24.19.12
              path: /healthz
              port: 6443
              scheme: HTTPS
            initialDelaySeconds: 15
            timeoutSeconds: 15
          name: kube-apiserver
          resources:
            requests:
              cpu: 250m
          volumeMounts:
          - mountPath: /etc/ssl/certs
            name: ca-certs
            readOnly: true
          - mountPath: /etc/pki
            name: etc-pki
            readOnly: true
          - mountPath: /etc/kubernetes/pki
            name: k8s-certs
            readOnly: true
        hostNetwork: true
        priorityClassName: system-cluster-critical
        volumes:
        - hostPath:
            path: /etc/ssl/certs
            type: DirectoryOrCreate
          name: ca-certs
        - hostPath:
            path: /etc/pki
            type: DirectoryOrCreate
          name: etc-pki
        - hostPath:
            path: /etc/kubernetes/pki
            type: DirectoryOrCreate
          name: k8s-certs
      status: {}
      
    • 工作节点加入集群(工作节点)

      先拉取好相关镜像

      docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-apiserver:v1.18.10
      docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-proxy:v1.18.10
      docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.2
      docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/coredns:1.6.7
      docker pull quay.io/coreos/flannel:v0.13.0-rc2
      
      
      docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/kube-apiserver:v1.18.10 k8s.gcr.io/kube-apiserver:v1.18.10
      docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/kube-proxy:v1.18.10 k8s.gcr.io/kube-proxy:v1.18.10
      docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.2 k8s.gcr.io/pause:3.2
      docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/coredns:1.6.7 k8s.gcr.io/coredns:1.6.7
      

      加入

      kubeadm join 47.74.22.13:6443 --token sdfs.dsfsdfsdfijdth 
          --discovery-token-ca-cert-hash sha256:sdfsdfsdfsdfsdfsdfsdfsdfg9a460f44b118050091245c1d
      
    • 检查是否加入集群(主节点)

      # 成功后,INTERNAL-IP均显示公网IP
      [root@master01 ~]# kubectl get nodes -o wide
      

      修改flannel文件并安装(主节点)

      # 下载flannel的yaml配置文件
      
      # 共修改两个地方,一个是args下,添加
       args:
       - --public-ip=$(PUBLIC_IP) # 添加此参数,申明公网IP
       - --iface=eth0             # 添加此参数,绑定网卡
       
       
       # 然后是env下
       env:
       - name: PUBLIC_IP     #添加环境变量
         valueFrom:          
           fieldRef:          
             fieldPath: status.podIP 
      
      #创建flannel
      kubectl apply -f flannel.yaml
      
    • 完整flannel.yaml

      ---
      apiVersion: policy/v1beta1
      kind: PodSecurityPolicy
      metadata:
        name: psp.flannel.unprivileged
        annotations:
          seccomp.security.alpha.kubernetes.io/allowedProfileNames: docker/default
          seccomp.security.alpha.kubernetes.io/defaultProfileName: docker/default
          apparmor.security.beta.kubernetes.io/allowedProfileNames: runtime/default
          apparmor.security.beta.kubernetes.io/defaultProfileName: runtime/default
      spec:
        privileged: false
        volumes:
        - configMap
        - secret
        - emptyDir
        - hostPath
        allowedHostPaths:
        - pathPrefix: "/etc/cni/net.d"
        - pathPrefix: "/etc/kube-flannel"
        - pathPrefix: "/run/flannel"
        readOnlyRootFilesystem: false
        # Users and groups
        runAsUser:
          rule: RunAsAny
        supplementalGroups:
          rule: RunAsAny
        fsGroup:
          rule: RunAsAny
        # Privilege Escalation
        allowPrivilegeEscalation: false
        defaultAllowPrivilegeEscalation: false
        # Capabilities
        allowedCapabilities: ['NET_ADMIN', 'NET_RAW']
        defaultAddCapabilities: []
        requiredDropCapabilities: []
        # Host namespaces
        hostPID: false
        hostIPC: false
        hostNetwork: true
        hostPorts:
        - min: 0
          max: 65535
        # SELinux
        seLinux:
          # SELinux is unused in CaaSP
          rule: 'RunAsAny'
      ---
      kind: ClusterRole
      apiVersion: rbac.authorization.k8s.io/v1beta1
      metadata:
        name: flannel
      rules:
      - apiGroups: ['extensions']
        resources: ['podsecuritypolicies']
        verbs: ['use']
        resourceNames: ['psp.flannel.unprivileged']
      - apiGroups:
        - ""
        resources:
        - pods
        verbs:
        - get
      - apiGroups:
        - ""
        resources:
        - nodes
        verbs:
        - list
        - watch
      - apiGroups:
        - ""
        resources:
        - nodes/status
        verbs:
        - patch
      ---
      kind: ClusterRoleBinding
      apiVersion: rbac.authorization.k8s.io/v1beta1
      metadata:
        name: flannel
      roleRef:
        apiGroup: rbac.authorization.k8s.io
        kind: ClusterRole
        name: flannel
      subjects:
      - kind: ServiceAccount
        name: flannel
        namespace: kube-system
      ---
      apiVersion: v1
      kind: ServiceAccount
      metadata:
        name: flannel
        namespace: kube-system
      ---
      kind: ConfigMap
      apiVersion: v1
      metadata:
        name: kube-flannel-cfg
        namespace: kube-system
        labels:
          tier: node
          app: flannel
      data:
        cni-conf.json: |
          {
            "name": "cbr0",
            "cniVersion": "0.3.1",
            "plugins": [
              {
                "type": "flannel",
                "delegate": {
                  "hairpinMode": true,
                  "isDefaultGateway": true
                }
              },
              {
                "type": "portmap",
                "capabilities": {
                  "portMappings": true
                }
              }
            ]
          }
        net-conf.json: |
          {
            "Network": "10.244.0.0/16",
            "Backend": {
              "Type": "vxlan"
            }
          }
      ---
      apiVersion: apps/v1
      kind: DaemonSet
      metadata:
        name: kube-flannel-ds
        namespace: kube-system
        labels:
          tier: node
          app: flannel
      spec:
        selector:
          matchLabels:
            app: flannel
        template:
          metadata:
            labels:
              tier: node
              app: flannel
          spec:
            affinity:
              nodeAffinity:
                requiredDuringSchedulingIgnoredDuringExecution:
                  nodeSelectorTerms:
                  - matchExpressions:
                    - key: kubernetes.io/os
                      operator: In
                      values:
                      - linux
            hostNetwork: true
            priorityClassName: system-node-critical
            tolerations:
            - operator: Exists
              effect: NoSchedule
            serviceAccountName: flannel
            initContainers:
            - name: install-cni
              image: quay.io/coreos/flannel:v0.13.0-rc2
              command:
              - cp
              args:
              - -f
              - /etc/kube-flannel/cni-conf.json
              - /etc/cni/net.d/10-flannel.conflist
              volumeMounts:
              - name: cni
                mountPath: /etc/cni/net.d
              - name: flannel-cfg
                mountPath: /etc/kube-flannel/
            containers:
            - name: kube-flannel
              image: quay.io/coreos/flannel:v0.13.0-rc2
              command:
              - /opt/bin/flanneld
              args:
              - --ip-masq
              - --public-ip=$(PUBLIC_IP) 
              - --iface=eth0
              - --kube-subnet-mgr
              resources:
                requests:
                  cpu: "100m"
                  memory: "50Mi"
                limits:
                  cpu: "100m"
                  memory: "50Mi"
              securityContext:
                privileged: false
                capabilities:
                  add: ["NET_ADMIN", "NET_RAW"]
              env:
              - name: PUBLIC_IP    
                valueFrom:          
                  fieldRef:          
                    fieldPath: status.podIP
              - name: POD_NAME
                valueFrom:
                  fieldRef:
                    fieldPath: metadata.name
              - name: POD_NAMESPACE
                valueFrom:
                  fieldRef:
                    fieldPath: metadata.namespace
              volumeMounts:
              - name: run
                mountPath: /run/flannel
              - name: flannel-cfg
                mountPath: /etc/kube-flannel/
            volumes:
            - name: run
              hostPath:
                path: /run/flannel
            - name: cni
              hostPath:
                path: /etc/cni/net.d
            - name: flannel-cfg
              configMap:
                name: kube-flannel-cfg
      
    • 检查网络是否连通(主节点)

      # 检查pod是否都是ready状态
      kubectl get pods -o wide --all-namespaces
      ...
      
      # 手动创建一个pod
      kubectl create deployment nginx --image=nginx
      
      # 查看pod的ip
      kubectl get pods -o wide
      
      # 主节点或其它节点,ping一下此ip,看看是否能ping通
      

    ​ 。。。。不出意外,就是这样了

  • 相关阅读:
    jmeter脚本开发:influxdb + grafana + jmeter(九)
    jmeter脚本开发:Beanshell(八)
    android APP上线前,应该准备的东西
    android硬件返回
    PagerAdapter 普通写法
    FragmentStatePagerAdapter写法
    Fragment之间传数据
    配置gradle.properties
    android最新版 极光推送
    SensorManager
  • 原文地址:https://www.cnblogs.com/ljincheng/p/13796781.html
Copyright © 2011-2022 走看看