zoukankan      html  css  js  c++  java
  • 在CentOS 6.4上安装配置GlusterFS

    背景介绍:
    项目目前在文件同步方面采用的是rsync,在尝试用分布式文件系统替换的时候,使用过MooseFS,效果差强人意,在了解到了GlusterFS之 后,决定尝试一下,因为它跟MooseFS相比,感觉部署上更加简单一些,同时没有元数据服务器的特点使其没有单点故障的存在,感觉非常不错。

    环境介绍:
    OS: CentOS 6.4 x86_64 Minimal
    Servers: sc2-log1,sc2-log2,sc2-log3,sc2-log4
    Client: sc2-ads15

    具体步骤:
    1. 在sc2-log{1-4}上安装GlusterFS软件包:
    # wget -P /etc/yum.repos.d http://download.gluster.org/pub/gluster/glusterfs/LATEST/CentOS/glusterfs-epel.repo
    # yum install -y glusterfs-3.4.2-1.el6 glusterfs-server-3.4.2-1.el6 glusterfs-fuse-3.4.2-1.el6

    # /etc/init.d/glusterd start
    # chkconfig glusterfsd on

    2. 在sc2-log1上配置整个GlusterFS集群:
    [root@sc2-log1 ~]# gluster peer probe sc2-log1

    peer probe: success: on localhost not needed

    [root@sc2-log1 ~]# gluster peer probe sc2-log2

    peer probe: success

    [root@sc2-log1 ~]# gluster peer probe sc2-log3

    peer probe: success

    [root@sc2-log1 ~]# gluster peer probe sc2-log4

    peer probe: success

    [root@sc2-log1 ~]# gluster peer status

    Number of Peers: 3
    
    Hostname: sc2-log2
    Port: 24007
    Uuid: 399973af-bae9-4326-9cbd-b5b05e5d2927
    State: Peer in Cluster (Connected)
    
    Hostname: sc2-log3
    Port: 24007
    Uuid: 833a7b8d-e3b3-4099-baf9-416ee7213337
    State: Peer in Cluster (Connected)
    
    Hostname: sc2-log4
    Port: 24007
    Uuid: 54bf115a-0119-4021-af80-7a6bca137fd9
    State: Peer in Cluster (Connected)

    3. 在sc2-log{1-4}上创建数据存放目录:
    # mkdir -p /usr/local/share/{models,geoip,wurfl}
    # ls -l /usr/local/share/

    total 24
    drwxr-xr-x   2 root root 4096 Apr  1 12:19 geoip
    drwxr-xr-x   2 root root 4096 Apr  1 12:19 models
    drwxr-xr-x   2 root root 4096 Apr  1 12:19 wurfl

    4. 在sc2-log1上创建GlusterFS磁盘:
    [root@sc2-log1 ~]# gluster volume create models replica 4 sc2-log1:/usr/local/share/models sc2-log2:/usr/local/share/models sc2-log3:/usr/local/share/models sc2-log4:/usr/local/share/models force

    volume create: models: success: please start the volume to access data

    [root@sc2-log1 ~]# gluster volume create geoip replica 4 sc2-log1:/usr/local/share/geoip sc2-log2:/usr/local/share/geoip sc2-log3:/usr/local/share/geoip sc2-log4:/usr/local/share/geoip force

    volume create: geoip: success: please start the volume to access data

    [root@sc2-log1 ~]# gluster volume create wurfl replica 4 sc2-log1:/usr/local/share/wurfl sc2-log2:/usr/local/share/wurfl sc2-log3:/usr/local/share/wurfl sc2-log4:/usr/local/share/wurfl force

    volume create: wurfl: success: please start the volume to access data

    [root@sc2-log1 ~]# gluster volume start models

    volume start: models: success

    [root@sc2-log1 ~]# gluster volume start geoip

    volume start: geoip: success

    [root@sc2-log1 ~]# gluster volume start wurfl

    volume start: wurfl: success

    [root@sc2-log1 ~]# gluster volume info

    Volume Name: models
    Type: Replicate
    Volume ID: b29b22bd-6d8c-45c0-b199-91fa5a76801f
    Status: Started
    Number of Bricks: 1 x 4 = 4
    Transport-type: tcp
    Bricks:
    Brick1: sc2-log1:/usr/local/share/models
    Brick2: sc2-log2:/usr/local/share/models
    Brick3: sc2-log3:/usr/local/share/models
    Brick4: sc2-log4:/usr/local/share/models
     
    Volume Name: geoip
    Type: Replicate
    Volume ID: 69b0caa8-7c23-4712-beae-6f536b1cffa3
    Status: Started
    Number of Bricks: 1 x 4 = 4
    Transport-type: tcp
    Bricks:
    Brick1: sc2-log1:/usr/local/share/geoip
    Brick2: sc2-log2:/usr/local/share/geoip
    Brick3: sc2-log3:/usr/local/share/geoip
    Brick4: sc2-log4:/usr/local/share/geoip
     
    Volume Name: wurfl
    Type: Replicate
    Volume ID: c723a99d-eeab-4865-819a-c0926cf7b88a
    Status: Started
    Number of Bricks: 1 x 4 = 4
    Transport-type: tcp
    Bricks:
    Brick1: sc2-log1:/usr/local/share/wurfl
    Brick2: sc2-log2:/usr/local/share/wurfl
    Brick3: sc2-log3:/usr/local/share/wurfl
    Brick4: sc2-log4:/usr/local/share/wurfl

    5. 在sc2-ads15上部署客户端并mount GlusterFS文件系统:
    [sc2-ads15][root@sc2-ads15 ~]# wget -P /etc/yum.repos.d http://download.gluster.org/pub/gluster/glusterfs/LATEST/CentOS/glusterfs-epel.repo
    [sc2-ads15][root@sc2-ads15 ~]# yum install -y glusterfs-3.4.2-1.el6 glusterfs-fuse-3.4.2-1.el6
    [sc2-ads15][root@sc2-ads15 ~]# mkdir -p /mnt/{models,geoip,wurfl}
    [sc2-ads15][root@sc2-ads15 ~]# mount -t glusterfs -o ro sc2-log3:models /mnt/models/
    [sc2-ads15][root@sc2-ads15 ~]# mount -t glusterfs -o ro sc2-log3:geoip /mnt/geoip/
    [sc2-ads15][root@sc2-ads15 ~]# mount -t glusterfs -o ro sc2-log3:wurfl /mnt/wurfl/
    [sc2-ads15][root@sc2-ads15 ~]# df -h

    Filesystem            Size  Used Avail Use% Mounted on
    /dev/mapper/vg_t-lv_root
      59G  7.7G   48G  14% /
    tmpfs                 3.9G     0  3.9G   0% /dev/shm
    /dev/xvda1            485M   33M  428M   8% /boot
    sc2-log3:models        98G  8.6G   85G  10% /mnt/models
    sc2-log3:geoip         98G  8.6G   85G  10% /mnt/geoip
    sc2-log3:wurfl         98G  8.6G   85G  10% /mnt/wurfl

    6. 相关数据读写可用性测试:
    在sc2-ads15挂载点上写入数据:
    [sc2-ads15][root@sc2-ads15 ~]# umount /mnt/models
    [sc2-ads15][root@sc2-ads15 ~]# mount -t glusterfs sc2-log3:models /mnt/models/
    [sc2-ads15][root@sc2-ads15 ~]# echo "This is sc2-ads15" > /mnt/models/hello.txt
    [sc2-ads15][root@sc2-ads15 ~]# mkdir /mnt/testdir
    在sc2-log1数据目录中进行查看:
    [root@sc2-log1 ~]# ls /usr/local/share/models/

    hello.txt testdir

    结果: 数据写入成功

    在sc2-log1数据目录中直接写入数据:
    [root@sc2-log1 ~]# echo "This is sc2-log1" > /usr/local/share/models/hello.2.txt
    [root@sc2-log1 ~]# mkdir /usr/local/share/models/test2
    在sc2-ads15挂载点上进行查看:
    [sc2-ads15][root@sc2-ads15 ~]# ls /mnt/models
    [sc2-ads15][root@sc2-ads15 ~]# ls -l /mnt/models

    hello.txt testdir

    结果: 数据写入失败

    在sc2-log1挂载点上写入数据:
    [root@sc2-log1 ~]# mount -t glusterfs sc2-log1:models /mnt/models/
    [root@sc2-log1 ~]# echo "This is sc2-log1" > /mnt/models/hello.3.txt
    [root@sc2-log1 ~]# mkdir /mnt/models/test3
    在sc2-ads15挂载点上进行查看:
    [sc2-ads15][root@sc2-ads15 models]# ls /mnt/models

    hello.2.txt  hello.3.txt hello.txt  test2  test3  testdir

    结果: 数据写入成功,同时之前写入失败的数据也成功加载了。

    最终结论:
    在数据目录中直接写入数据,会导致其它节点因为得不到通知而使数据同步失败。
    正确的做法是所有的读写操作都通过挂载点来进行。

    7. 其它操作笔记:
    删除GlusterFS磁盘:
    # gluster volume stop models
    # gluster volume delete models

    卸载GlusterFS磁盘:
    # gluster peer detach sc2-log4

    ACL访问控制:
    # gluster volume set models auth.allow 10.60.1.*

    添加GlusterFS节点:
    # gluster peer probe sc2-log5
    # gluster peer probe sc2-log6
    # gluster volume add-brick models sc2-log5:/data/gluster sc2-log6:/data/gluster

    迁移GlusterFS磁盘数据:
    # gluster volume remove-brick models sc2-log1:/usr/local/share/models sc2-log5:/usr/local/share/models start
    # gluster volume remove-brick models sc2-log1:/usr/local/share/models sc2-log5:/usr/local/share/models status
    # gluster volume remove-brick models sc2-log1:/usr/local/share/models sc2-log5:/usr/local/share/models commit

    修复GlusterFS磁盘数据(例如在sc2-log1宕机的情况下):
    # gluster volume replace-brick models sc2-log1:/usr/local/share/models sc2-log5:/usr/local/share/models commit -force
    # gluster volume heal models full  

    http://www.tuicool.com/articles/zQVZ7z
  • 相关阅读:
    左式堆
    winsock库
    二叉堆
    关键字explicit
    HDOJ 1012
    HDOJ 1013
    STL priority实例
    二项队列
    ASP.NET Session过期问题揭秘
    RenderControl (asp.net)
  • 原文地址:https://www.cnblogs.com/meditator/p/5745889.html
Copyright © 2011-2022 走看看