zoukankan      html  css  js  c++  java
  • 基于k8s安装TiDB4.0集群

    基于k8s安装TiDB4.0集群
    作者:周万春
    微信:lovemysql3306
    
    
    
    1、导入需要的镜像(所有节点)
    (1).联网环境镜像 pull 地址
    docker pull pingcap/pd:v4.0.8
    docker pull pingcap/tikv:v4.0.8
    docker pull pingcap/tidb:v4.0.8
    docker pull pingcap/tidb-binlog:v4.0.8
    docker pull pingcap/ticdc:v4.0.8
    docker pull pingcap/tiflash:v4.0.8
    docker pull pingcap/tidb-monitor-reloader:v1.0.1
    docker pull pingcap/tidb-monitor-initializer:v4.0.8
    docker pull grafana/grafana:6.0.1
    docker pull prom/prometheus:v2.18.1
    docker pull busybox:1.26.2
    docker pull quay.io/external_storage/local-volume-provisioner:v2.3.4
    docker pull pingcap/tidb-operator:v1.1.7
    docker pull pingcap/tidb-backup-manager:v1.1.7
    docker pull bitnami/kubectl:latest
    docker pull pingcap/advanced-statefulset:v0.3.3
    
    (2).导出镜像
    docker save -o local-volume-provisioner-v2.3.4.tar quay.io/external_storage/local-volume-provisioner:v2.3.4
    docker save -o tidb-operator-v1.1.7.tar pingcap/tidb-operator:v1.1.7
    docker save -o tidb-backup-manager-v1.1.7.tar pingcap/tidb-backup-manager:v1.1.7
    docker save -o bitnami-kubectl.tar bitnami/kubectl:latest
    docker save -o advanced-statefulset-v0.3.3.tar pingcap/advanced-statefulset:v0.3.3
    docker save -o pd-v4.0.8.tar pingcap/pd:v4.0.8
    docker save -o tikv-v4.0.8.tar pingcap/tikv:v4.0.8
    docker save -o tidb-v4.0.8.tar pingcap/tidb:v4.0.8
    docker save -o tidb-binlog-v4.0.8.tar pingcap/tidb-binlog:v4.0.8
    docker save -o ticdc-v4.0.8.tar pingcap/ticdc:v4.0.8
    docker save -o tiflash-v4.0.8.tar pingcap/tiflash:v4.0.8
    docker save -o tidb-monitor-reloader-v1.0.1.tar pingcap/tidb-monitor-reloader:v1.0.1
    docker save -o tidb-monitor-initializer-v4.0.8.tar pingcap/tidb-monitor-initializer:v4.0.8
    docker save -o grafana-6.0.1.tar grafana/grafana:6.0.1
    docker save -o prometheus-v2.18.1.tar prom/prometheus:v2.18.1
    docker save -o busybox-1.26.2.tar busybox:1.26.2
    
    (3).导入镜像
    docker load -i advanced-statefulset-v0.3.3.tar
    docker load -i bitnami-kubectl.tar
    docker load -i busybox-1.26.2.tar
    docker load -i grafana-6.0.1.tar
    docker load -i kube-scheduler-v1.15.9.tar
    docker load -i kube-scheduler-v1.16.9.tar
    docker load -i local-volume-provisioner-v2.3.4.tar
    docker load -i mysqlclient-latest.tar
    docker load -i pd-v4.0.8.tar
    docker load -i prometheus-v2.18.1.tar
    docker load -i ticdc-v4.0.8.tar
    docker load -i tidb-backup-manager-v1.1.7.tar
    docker load -i tidb-binlog-v4.0.8.tar
    docker load -i tidb-monitor-initializer-v4.0.8.tar
    docker load -i tidb-monitor-reloader-v1.0.1.tar
    docker load -i tidb-operator-v1.1.7.tar
    docker load -i tidb-v4.0.8.tar
    docker load -i tiflash-v4.0.8.tar
    docker load -i tikv-v4.0.8.tar
    docker load -i tiller-v2.16.7.tar
    
    
    
    2、安装 helm 客户端(master 节点)
    # wget -c https://get.helm.sh/helm-v3.4.2-linux-amd64.tar.gz
    # tar -xf helm-v3.4.2-linux-amd64.tar.gz
    # cd linux-amd64/
    # chmod +x helm
    # cp -a helm /usr/local/bin/
    # helm version -c
    
    
    
    3、创建持久化存储 PV(所有节点)
    (1).格式化挂载磁盘
    # mkfs.ext4 /dev/sdb
    # mkdir -p /mnt/disks
    # vim /etc/fstab
    /dev/sdb  /mnt/disks  ext4  defaults 0 0
    # mount -a
    # df -Th
    
    (2).创建目录并绑定
    # for i in `seq 50`; do 
      mkdir -p /tidb-disk/pv0$i && mkdir -p /mnt/disks/pv0$i
      mount --bind /tidb-disk/pv0$i /mnt/disks/pv0$i
    done
    
    # for i in `seq 50`; do
      echo /tidb-disk/pv0${i} /mnt/disks/pv0${i} none bind 0 0 | sudo tee -a /etc/fstab
    done
    
    (3).创建 provisioner
    # wget -c https://raw.githubusercontent.com/pingcap/tidb-operator/v1.1.7/manifests/local-dind/local-volume-provisioner.yaml
    里面的 hostDir & mountDir 目录需要配置成对应的:
        hostDir: /mnt/disks
        mountDir: /mnt/disks
    
    安装 provisioner
    # kubectl apply -f ./local-volume-provisioner.yaml
    
    查看 provisioner
    # kubectl get pods -n kube-system
    
    # 查看 PV 状态
    # kubectl get pv -n kube-system
    # kubectl get storageclass
    
    
    
    4、创建 TiDB CRD(master 节点)
    # wget -c https://raw.githubusercontent.com/pingcap/tidb-operator/v1.1.7/manifests/crd.yaml
    # kubectl apply -f ./crd.yaml
    # kubectl get crd
    
    
    
    5、安装 operator(master 节点)
    # wget -c http://charts.pingcap.org/tidb-operator-v1.1.7.tgz
    # tar -xf tidb-operator-v1.1.7.tgz
    # vim ./tidb-operator/values.yaml
    kubeSchedulerImageName: k8s.gcr.io/kube-scheduler
    # kubectl create namespace tidb-admin
    helm install --namespace tidb-admin tidb-operator -f ./tidb-operator/values.yaml --version v1.1.7
    kubectl get pods --namespace tidb-admin -l app.kubernetes.io/instance=tidb-operator
    kubectl get pods -n tidb-admin
    
    
    
    6、配置 coredns(master 节点)
    # kubectl edit cm -n kube-system coredns
    找到 fallthrough 行:
        去掉 fallthrough 后面的 pro.cluster188
    
    # kubectl get pod -n kube-system -o wide | grep dns
    coredns-6c7c584fd5-5vnhl                    1/1     Running            0          39m   20.2.0.123    host1   <none>           <none>
    coredns-6c7c584fd5-5xsn5                    1/1     Running            0          39m   20.2.42.63    host2   <none>           <none>
    
    # kubectl delete pod coredns-6c7c584fd5-5vnhl -n kube-system
    pod "coredns-6c7c584fd5-5vnhl" deleted
    
    # kubectl delete pod coredns-6c7c584fd5-5xsn5 -n kube-system
    pod "coredns-6c7c584fd5-5xsn5" deleted
    
    再次查看会出来两个新的coredns pod
    # kubectl get pod -n kube-system -o wide | grep dns
    
    
    
    7、部署 TiDB 集群(master 节点)
    # wget -c https://github.com/pingcap/tidb-operator/blob/v1.1.7/examples/advanced/tidb-cluster.yaml
    # vim ./tidb-cluster.yaml 修改内容
    # cat ./tidb-cluster.yaml
    apiVersion: pingcap.com/v1alpha1
    kind: TidbCluster
    metadata:
      name: mycluster
      namespace: mycluster
    
    spec:
      version: "v4.0.8"
      timezone: UTC
      configUpdateStrategy: RollingUpdate
      hostNetwork: false
      imagePullPolicy: IfNotPresent
      helper:
        image: busybox:1.26.2
      enableDynamicConfiguration: true
    
      pd:
        baseImage: pingcap/pd
        config: {}
        replicas: 3
        requests:
          cpu: "100m"
          storage: 1Gi
        mountClusterClientSecret: false
        storageClassName: "local-storage"
    
      tidb:
        baseImage: pingcap/tidb
        replicas: 3
        requests:
          cpu: "100m"
        config: {}
        service:
          type: NodePort
          externalTrafficPolicy: Cluster
          mysqlNodePort: 30011
          statusNodePort: 30012
    
      tikv:
        baseImage: pingcap/tikv
        config: {}
        replicas: 3
        requests:
          cpu: "100m"
          storage: 1Gi
        mountClusterClientSecret: false
        storageClassName: "local-storage"
    
      tiflash:
        baseImage: pingcap/tiflash
        maxFailoverCount: 3
        replicas: 1
        storageClaims:
        - resources:
            requests:
              storage: 100Gi
          storageClassName: local-storage
    
      pump:
        baseImage: pingcap/tidb-binlog
        replicas: 1
        storageClassName: local-storage
        requests:
          storage: 30Gi
        schedulerName: default-scheduler
        config:
          addr: 0.0.0.0:8250
          gc: 7
          heartbeat-interval: 2
    
      ticdc:
        baseImage: pingcap/ticdc
        replicas: 3
        config:
          logLevel: info
    
      enablePVReclaim: false
      pvReclaimPolicy: Retain
      tlsCluster: {}
    
    # kubectl create namespace mycluster
    # kubectl apply -f ./tidb-cluster1.yaml -n mycluster
    # kubectl get po -n mycluster
    
    
    
    8、初始化 TiDB 集群设置密码(master 节点)
    # kubectl create secret generic tidb-secret --from-literal=root="PingCAP@TiDB4000"
    # wget -c https://github.com/pingcap/tidb-operator/blob/master/manifests/initializer/tidb-initializer.yaml
    # cat ./tidb-initializer.yaml
    ---
    apiVersion: pingcap.com/v1alpha1
    kind: TidbInitializer
    metadata:
      name: mycluster
      namespace: mycluster
    spec:
      image: tnir/mysqlclient
      # imagePullPolicy: IfNotPresent
      cluster:
        namespace: demo
        name: demo
      initSql: |-
        create database app;
      # initSqlConfigMap: tidb-initsql
      passwordSecret: tidb-secret
      # permitHost: 172.6.5.8
      # resources:
      #   limits:
      #     cpu: 1000m
      #     memory: 500Mi
      #   requests:
      #     cpu: 100m
      #     memory: 50Mi
      # timezone: "Asia/Shanghai"
    
    # kubectl apply -f ./tidb-initializer.yaml --namespace=mycluster
    
    
    
    9、部署监控(master 节点)(master 节点)
    # wget -c https://raw.githubusercontent.com/pingcap/tidb-operator/master/examples/basic/tidb-monitor.yaml
    # cat ./tidb-monitor.yaml
    apiVersion: pingcap.com/v1alpha1
    kind: TidbMonitor
    metadata:
      name: mycluster
    spec:
      clusters:
      - name: mycluster
      prometheus:
        baseImage: prom/prometheus
        version: v2.18.1
      grafana:
        baseImage: grafana/grafana
        version: 6.0.1
      initializer:
        baseImage: pingcap/tidb-monitor-initializer
        version: v4.0.8
      reloader:
        baseImage: pingcap/tidb-monitor-reloader
        version: v1.0.1
      imagePullPolicy: IfNotPresent
    
    # kubectl -n mycluster apply -f ./tidb-monitor.yaml
    tidbmonitor.pingcap.com/basic created
    
    
    
    10、查看 TiDB 集群信息命令
    # kubectl get pods -n mycluster -o wide
    # kubectl get all -n mycluster
    # kubectl get svc -n mycluster
    
    
    
    11、更改 TiDB 集群配置命令
    # kubectl edit tc -n mycluster
  • 相关阅读:
    Oracle EXTRACT()函数与to_char() 函数
    Java内部类
    SQL 之 Group By
    Android LayoutInflater布局填充器
    JS 图片转Base64
    C# 事件与委托的区别
    AngularJS的循环输出
    jquery实现button倒计时
    重新理解B/S和C/S的区别
    HashMap与HashTable
  • 原文地址:https://www.cnblogs.com/zhouwanchun/p/14139413.html
Copyright © 2011-2022 走看看