zoukankan      html  css  js  c++  java
  • Kubernetes使用GlusterFS实现数据持久化

    k8s中部署有状态应用等需要持久化数据的应用,必不可少得用存储,k8s支持很多中存储方案,我司目前使用的存储有glusterfs(分为容器化和裸机方式)、nfs供应用选用,本次就简单实战下glusterfs配合k8s做数据存储。

    介绍

    GlusterFS系统是一个可扩展的网络文件系统,相比其他分布式文件系统,GlusterFS具有高扩展性、高可用性、高性能、可横向扩展等特点,并且其没有元数据服务器的设计,让整个服务没有单点故障的隐患。

    规划

    • 192.168.209.130 master-01
    • 192.168.209.131 master-02
    • 192.168.209.132 master-03

    以上三台作为主控并作为GlusterFS集群节点。

    添加hosts

    [root@master-01 gluster]# cat /etc/hosts
    192.168.209.130 master-01
    192.168.209.131 master-02
    192.168.209.132 master-03
    

    安装GlusterFS

    GlusterFS默认配置文件在/etc/glusterfs/glusterd.vol
    工作目录默认在/var/lib/glusterfsd

    [root@master-01 ~]#yum install -y centos-release-gluster glusterfs glusterfs-server glusterfs-fuse
    [root@master-01 ~]#systemctl start glusterd
    [root@master-01 ~]# systemctl status  glusterd
    ● glusterd.service - GlusterFS, a clustered file-system server
       Loaded: loaded (/usr/lib/systemd/system/glusterd.service; disabled; vendor preset: disabled)
       Active: active (running) since 日 2019-04-28 13:20:08 CST; 2min 57s ago
         Docs: man:glusterd(8)
    

    集群配置

    [root@master-01 ~]# gluster peer probe master-01
    peer probe: success. Probe on localhost not needed   
    [root@master-01 ~]# gluster peer probe master-02
    peer probe: success. 
    [root@master-01 ~]# gluster peer probe master-03
    peer probe: success. 
    

    创建卷并启动

    [root@master-01 ~]# gluster volume create gv0 replica 3 master-01:/opt/data/gv0 master-02:/opt/data/gv0 master-03:/opt/data/gv0 force
    volume create: gv0: success: please start the volume to access data
    [root@master-01 ~]# gluster volume  start gv0
    volume start: gv0: success
    
    • 默认不支持使用/分区作为存储路径,我这没有数据盘,所以使用force强制创建了。
    • /opt/data/gv0目录得先在三台机器上都创建。

    查看节点和卷信息

    [root@master-01 ~]# gluster peer status
    Number of Peers: 2
    Hostname: master-02
    Uuid: 6f5a823c-4044-4dd9-95bc-129018b2eec0
    State: Peer in Cluster (Connected)
    Hostname: master-03
    Uuid: dfeb6d81-c90b-4628-82f2-e379ed0ceded
    State: Peer in Cluster (Connected)
    [root@master-01 ~]# gluster volume info
    Volume Name: gv0
    Type: Replicate
    Volume ID: 1891c9f8-8acb-48c2-94d9-29e2a625f9a0
    Status: Started
    Snapshot Count: 0
    Number of Bricks: 1 x 3 = 3
    Transport-type: tcp
    Bricks:
    Brick1: master-01:/opt/data/gv0
    Brick2: master-02:/opt/data/gv0
    Brick3: master-03:/opt/data/gv0
    Options Reconfigured:
    transport.address-family: inet
    nfs.disable: on
    performance.client-io-threads: off
    

    查看端口监听

    [root@master-01 ~]#  netstat -tunlp|grep glusterd
    tcp        0      0 0.0.0.0:24007           0.0.0.0:*               LISTEN      19533/glusterd
    

    测试挂载

    想要正常的在kubernetes集群中使用或者挂载glusterfs,集群中的对应节点都需要安装 glusterfs-fuse

    [root@harbor-01 harbor]# yum install -y  centos-release-gluster  glusterfs-fuse
    [root@master-01 ~]# mount -t glusterfs master-01:/gv0 /mnt
    [root@master-01 ~]# df -h
    ...
    master-01:/gv0            38G   13G   25G   34% /mnt
    

    常用命令

    #删除卷
    gluster volume stop gv0
    gluster volume delete gv0
    #将机器移出集群
    gluster peer detach master-01
    #只允许172.28.0.0的网络访问glusterfs
    gluster volume set gv0 auth.allow 172.28.26.*
    gluster volume set gv0 auth.allow 192.168.222.1,192.168.*.*
    #加入新的机器并添加到卷里(由于副本数设置为2,至少要添加2(4、6、8..)台机器)
    gluster peer probe master-02
    gluster peer probe master-03
    #新加卷
    gluster volume add-brick gv0 repl 2 master-02:/data/gluster master-03:/data/gluster force
    #删除卷
    gluster volume remove-brick gv0 repl 2 master-02:/opt/data/gv0 master-03:/opt/data/gv0 start
    gluster volume remove-brick gv0 repl 2 master-02:/opt/data/gv0 master-03:/opt/data/gv0 status
    gluster volume remove-brick gv0 repl 2 master-02:/opt/data/gv0 master-03:/opt/data/gv0 commit
    注意:扩展或收缩卷时,也要按照卷的类型,加入或减少的brick个数必须满足相应的要求。
    #当对卷进行了扩展或收缩后,需要对卷的数据进行重新均衡。
    gluster volume rebalance mamm-volume start|stop|status
    ###########################################################
    迁移卷---主要完成数据在卷之间的在线迁移
    #启动迁移过程
    gluster volume replace-brick gv0 master-02:/opt/data/gv0 master-02:/opt/test start force
    #查看迁移状态
    gluster volume replace-brick gv0 master-02:/opt/data/gv0 master-02:/opt/test status
    #迁移完成后提交完成
    gluster volume replace-brick gv0 master-02:/opt/data/gv0 master-02:/opt/test commit
    #机器出现故障,执行强制提交
    gluster volume replace-brick gv0 master-02:/opt/data/gv0 master-02:/opt/test commit force
    ###########################################################
    触发副本自愈
    gluster volume heal mamm-volume #只修复有问题的文件
    gluster volume heal mamm-volume full #修复所有文件
    gluster volume heal mamm-volume info #查看自愈详情
    #####################################################
    data-self-heal, metadata-self-heal and entry-self-heal
    启用或禁用文件内容、文件元数据和目录项的自我修复功能,默认情况下三个全部是“on”。
    #将其中的一个设置为off的范例:
    gluster volume set gv0 entry-self-heal off
    

    到这一步GlusterFS就部署完成了,接下来就在k8s使用它。

    创建Endpoints

    # ep 编排
    [root@master-01 gluster]# cat gluster-ep.yaml 
    apiVersion: v1
    kind: Endpoints
    metadata:
      name: glusterfs-cluster
    subsets:
    - addresses:
      - ip: 192.168.209.130
      - ip: 192.168.209.131
      - ip: 192.168.209.132
      ports:
      - port: 1990
        protocol: TCP
    ---
    kind: Service
    apiVersion: v1
    metadata:
      name: glusterfs-cluster
    spec:
      ports:
      - port: 1990
    # 开始创建 
    [root@master-01 gluster]# kubectl  apply -f gluster-ep.yaml 
    endpoints/glusterfs-cluster created
    service/glusterfs-cluster created
    # 查看状态结果
    [root@master-01 gluster]# kubectl  get ep,svc|grep glusterfs
    endpoints/glusterfs-cluster   192.168.209.130:1990,192.168.209.131:1990,192.168.209.132:1990   2m4s
    service/glusterfs-cluster   ClusterIP   10.254.120.33   <none>        1990/TCP       2m4s
    
    

    创建Pv/Pvc

    # 编排
    [root@master-01 gluster]# cat gluster-pvc.yaml 
    ---
    apiVersion: v1
    kind: PersistentVolume   # pv
    metadata:
      name: test-pv
      namespace: default
      labels:
        alicloud-pvname: test-pv
    spec:     # 定义pv属性
      capacity:         # 容量
        storage: 2Gi   # 存储容量
      accessModes:    # 访问模式,支持ReadWriteOnce、ReadOnlyMany和ReadWriteMany
        - ReadWriteMany  
      glusterfs:
        endpoints: 'glusterfs-cluster'
        path: 'gv0'   
        readOnly: false
      persistentVolumeReclaimPolicy: Recycle  
    ---
    kind: PersistentVolumeClaim  # pvc
    apiVersion: v1
    metadata:
      name: test-pvc
      namespace: default
    spec:
      accessModes:
        - ReadWriteMany
      resources:
        requests:
          storage: 2Gi
      selector:
        matchLabels:
          alicloud-pvname: test-pv
    # 开始创建
    [root@master-01 gluster]# kubectl  apply -f gluster-pvc.yaml 
    persistentvolume/dt-pv created
    persistentvolumeclaim/dt-pvc created
    # 查看结果状态
    [root@master-01 gluster]# kubectl  get pv,pvc
    NAME                     CAPACITY   ACCESS MODES   RECLAIM POLICY   STATUS   CLAIM            STORAGECLASS   REASON   AGE
    persistentvolume/test-pv   2Gi        RWX            Recycle          Bound    default/test-pvc                           50s
    NAME                           STATUS   VOLUME   CAPACITY   ACCESS MODES   STORAGECLASS   AGE
    persistentvolumeclaim/test-pvc   Bound    test-pv    2Gi        RWX                           50s
    

    创建pod

    # 编排
    [root@master-01 gluster]# cat ng-deploy.yaml 
    ---
    apiVersion: apps/v1beta1
    kind: Deployment
    metadata:
      labels:
        run: nginx01
      name: nginx01
    spec:
      replicas: 1
      selector:
        matchLabels:
          app: nginx-pod1  
      template:
        metadata:
          labels:
            app: nginx-pod1
        spec:
          containers:
          - name: nginx11
            image: nginx
            imagePullPolicy: Always
            volumeMounts:
            - mountPath: /usr/share/nginx/html   # 挂载到容器的路径
              name: glu
          restartPolicy: Always
          volumes: 
          - name: glu   定义卷
            persistentVolumeClaim:
              claimName: test-pvc    使用pvc
    ---
    apiVersion: v1
    kind: Service
    metadata:
      labels:
        app: nginx1
      name: nginx1
    spec:
      ports:
      - port: 80
        protocol: TCP
        targetPort: 80
        nodePort: 30003
      selector:
        app: nginx-pod1
      type: NodePort
    
    # 创建
    [root@master-01 gluster]# kubectl  apply -f ng-deploy.yaml 
    deployment.apps/nginx01 created
    service/nginx1 created
    

    查看状态

    [root@master-01 gluster]# kubectl  get pod,svc|grep nginx
    pod/nginx01-7df547bf8c-cxvht       1/1     Running       0          10m
    service/nginx1              NodePort    10.254.55.187   <none>        80:30003/TCP   79m
    
    [root@master-01 gluster]# kubectl  describe po nginx01-7df547bf8c-cxvht
    Name:               nginx01-7df547bf8c-cxvht
    ......
    Volumes:
      glu:
        Type:       PersistentVolumeClaim (a reference to a PersistentVolumeClaim in the same namespace)
        ClaimName:  test-pvc
        ReadOnly:   false
      default-token-tb5bg:
        Type:        Secret (a volume populated by a Secret)
        SecretName:  default-token-tb5bg
    
    

    测试

    在glusterfs存储卷中创建index.html,并写入内容

    [root@master-01 gluster]# vi  /opt/data/gv0/index.html
    [root@master-01 gluster]# kubectl  exec -ti nginx01-7df547bf8c-cxvht bash
    root@nginx01-7df547bf8c-cxvht:/# ls /usr/share/nginx/html/
    index.html
    root@nginx01-7df547bf8c-cxvht:/# cat  /usr/share/nginx/html/index.html 
    <h1>hello glusterfs</h1>
    

    浏览器访问测试

    1556437978042

    现在就算pod 挂了等情况,数据也不会丢的,需要注意的是,上GlusterFS 一定得有人技术兜底,不然出了问题很麻烦的。

  • 相关阅读:
    流畅的python,Fluent Python 第四章笔记
    Python中的eval()、exec()及其相关函数(转)
    给自己与初学者关于decode,encode的建议(啥utf-8,GBK)。
    流畅的python,Fluent Python 第三章笔记
    流畅的python,Fluent Python 第二章笔记
    python数组array.array(转帖)
    流畅的python,Fluent Python 第一章笔记
    流畅的Python第五章,一等函数笔记
    python中的__slots__使用极其定义(转)
    load
  • 原文地址:https://www.cnblogs.com/guigujun/p/10789142.html
Copyright © 2011-2022 走看看