zoukankan      html  css  js  c++  java
  • 013 CephFS文件系统

    一、Ceph文件系统简介

    CephFS提供兼容POSIX的文件系统,将其数据和与那数据作为对象那个存储在Ceph中

    CephFS依靠MDS节点来协调RADOS集群的访问

    元数据服务器

    MDS管理元数据(文件的所有者、时间戳和模式等),也负责缓存元数据访问权限,管理客户端缓存来维护缓存一致性。

    CephFS客户端首先联系MON,进行身份验证后,将查询活跃MDS的文件元数据,并通过直接与OSD通信来访问文件或目录的对象。

    Ceph支持一个集群中有一个活跃的MDS,多个备用MDS。目前也支持同时多个活跃MDS,但该功能尚未GA

    目前Ceph仅支持一个集群中有一个活跃的CephFS文件系统

    目前CephFS的快照功能尚未GA

    在ceph2部署mds

    二、部署

    2.1 安装mds包

    [root@ceph2 ~]#  yum -y install ceph-mds

    授权

    [root@ceph2 ~]# ceph auth get-or-create mds.ceph2 mon 'allow profile mds' osd 'allow rwx' msd 'allow' -o /etc/ceph/ceph.mds.ceph2.keyring

    [root@ceph2 ~]# systemctl restart ceph-mds@ceph2

    [root@ceph2 ~]# mkdir /var/lib/ceph/mds/ceph-ceph2

    [root@ceph2 ~]# mv /etc/ceph/ceph.mds.ceph2.keyring /var/lib/ceph/mds/ceph-ceph2/keyring

    [root@ceph2 ~]# chown ceph.ceph /var/lib/ceph/mds/ceph-ceph2/keyring

    [root@ceph2 ~]# /usr/bin/ceph-mds -f --cluster ceph --id ceph2  --setuser ceph --setgroup ceph

    starting mds.ceph2 at -

    [root@ceph2 ~]# systemctl restart ceph-mds@ceph2

    [root@ceph2 ~]# ps -ef |grep mds

    ceph      991855       1  0 17:06 ?        00:00:00 /usr/bin/ceph-mds -f --cluster ceph --id ceph2 --setuser ceph --setgroup ceph

    2.2 创建Ceph文件系统

    CephFS文件系统需要两个存储池,一个用于存储CephFS数据,一个用于存储CephFS元数据

    [root@ceph2 ~]# ceph osd pool create cephfs_metadata  64 64
    pool 'cephfs_metadata' created
    [root@ceph2 ~]# ceph osd pool create cephfs_data  128 128
    pool 'cephfs_data' created

    创建一个名叫cephfs的文件系统

    [root@ceph2 ~]# ceph fs new cephfs cephfs_metadata  cephfs_data
    new fs with metadata pool 14 and data pool 15
    [root@ceph2 ~]# ceph -s
      cluster:
        id:     35a91e48-8244-4e96-a7ee-980ab989d20d
        health: HEALTH_OK
     
      services:
        mon:        3 daemons, quorum ceph2,ceph3,ceph4
        mgr:        ceph4(active), standbys: ceph2, ceph3
        mds:        cephfs-1/1/1 up  {0=ceph2=up:active}
        osd:        9 osds: 9 up, 9 in
        rbd-mirror: 1 daemon active
     
      data:
        pools:   12 pools, 472 pgs
        objects: 213 objects, 240 MB
        usage:   1733 MB used, 133 GB / 134 GB avail
        pgs:     472 active+clean
     
      io:
        client:   409 B/s rd, 614 B/s wr, 0 op/s rd, 3 op/s wr

    2.4 查看

    [root@ceph2 ~]# ceph fs status

    2.5 在ceoh3上安装MDS

    [root@ceph3 ~]# yum -y install ceph-mds
    [root@ceph3 ~]# mkdir /var/lib/ceph/mds/ceph-ceph3
    [root@ceph3 ~]# ceph auth  get-or-create  mds.ceph3  mon 'allow profile mds' osd 'allow rwx' mds 'allow' -o /var/lib/ceph/mds/ceph-ceph3/keyring
    [root@ceph3 ~]# chown ceph.ceph /var/lib/ceph/mds/ceph-ceph3/keyring
    [root@ceph3 ~]# systemctl restart ceph-mds@ceph3
    [root@ceph3 ~]# ps -ef|grep mds
    ceph       87716       1  0 17:21 ?        00:00:00 /usr/bin/ceph-mds -f --cluster ceph --id ceph3 --setuser ceph --setgroup ceph

     [root@ceph3 ~]# ceph fs status

    注:默认情况下,ceph只支持一个活跃的mds,其他的作为备用mds,但目前在实验性质下也可以同时开启多个活跃的mds,生产环境不保证数据的完整性

    2.5 创建授权用户进行挂载操作

    [root@ceph2 ~]# ceph auth get-or-create client.cephfs mon 'allow r' osd 'allow rw pool=cephfs_metadata,allow rw pool=cephfs_data' -o /etc/ceph/ceph.client.cephfs.keyring
    [root@ceph2 ~]# ll /etc/ceph/ceph.client.cephfs.keyring
    -rw-r--r-- 1 root root 64 Mar 26 17:30 /etc/ceph/ceph.client.cephfs.keyring
    [root@ceph2 ~]# scp /etc/ceph/ceph.client.cephfs.keyring root@ceph1:/etc/ceph/
    root@ceph1's password: 
    ceph.client.cephfs.keyring  

    2.6 在ceph1上测试秘钥环是否可用

    [root@ceph1 ~]# ceph -s --id cephfs
      cluster:
        id:     35a91e48-8244-4e96-a7ee-980ab989d20d
        health: HEALTH_OK
     
      services:
        mon:        3 daemons, quorum ceph2,ceph3,ceph4
        mgr:        ceph4(active), standbys: ceph2, ceph3
        mds:        cephfs-1/1/1 up  {0=ceph2=up:active}, 1 up:standby   #一个主,一个备
        osd:        9 osds: 9 up, 9 in
        rbd-mirror: 1 daemon active
     
      data:
        pools:   12 pools, 472 pgs
        objects: 217 objects, 240 MB
        usage:   1733 MB used, 133 GB / 134 GB avail
        pgs:     472 active+clean
     
      io:
        client:   0 B/s wr, 0 op/s rd, 0 op/s wr

    2.7 使用cephfs挂载

    挂载卡住,修改用户权限,再次尝试

    [root@ceph2 ~]# cd /etc/ceph/
    [root@ceph2 ceph]# rm -rf ceph.client.cephfs.keyring 
    [root@ceph2 ceph]# ceph auth caps client.cephfs mon 'allow r' mds 'allow' osd 'allow rw pool=cephfs_metadata, allow rw pool=cephfs_data' -o /etc/ceph/ceph.client.cephfs.keyring
    updated caps for client.cephfs
    [root@ceph2 ceph]# ceph auth get-or-create client.cephfs -o /etc/ceph/ceph.client.cephfs.keyring 
    [root@ceph2 ceph]# cat /etc/ceph/ceph.client.cephfs.keyring 
    [client.cephfs]
        key = AQAl8Zlcdbt/DRAA3cHKjt2BFSCY7cmio6mrXw==
    [root@ceph2 ceph]# scp /etc/ceph/ceph.client.cephfs.keyring  ceph1:/etc/ceph/
    root@ceph1's password: 
    ceph.client.cephfs.keyring            

    2.8 挂载

    root@ceph1 ~]# ceph-fuse --keyring  /etc/ceph/ceph.client.cephfs.keyring --name client.cephfs -m ceph2:6789,ceph3:6789,ceph4:6789  /mnt/cephfs/
    ceph-fuse[15243]: starting ceph client
    2019-03-26 17:57:26.351649 7f4a3242b040 -1 init, newargv = 0x55bc8e6bba40 newargc=9
    ceph-fuse[15243]: starting fuse
    [root@ceph1 ~]# df -hT
    Filesystem     Type            Size  Used Avail Use% Mounted on
    /dev/vda1      xfs              40G  1.7G   39G   5% /
    devtmpfs       devtmpfs        893M     0  893M   0% /dev
    tmpfs          tmpfs           920M     0  920M   0% /dev/shm
    tmpfs          tmpfs           920M   25M  896M   3% /run
    tmpfs          tmpfs           920M     0  920M   0% /sys/fs/cgroup
    /dev/rbd1      xfs             2.0G   33M  2.0G   2% /mnt/ceph2
    /dev/rbd0      xfs             2.0G   33M  2.0G   2% /mnt/ceph
    tmpfs          tmpfs           184M     0  184M   0% /run/user/0
    ceph-fuse      fuse.ceph-fuse   43G     0   43G   0% /mnt/cephfs     #挂载成功
    [root@ceph1 ~]# cd /mnt/cephfs/
    [root@ceph1 cephfs]# touch 111                             #尝试写数据,正常
    [root@ceph1 cephfs]# echo  sucessfull >> 111
    [root@ceph1 cephfs]# cat 111
    sucessfull

    2.9 写进启动文件

    [root@ceph1 cephfs]# cd 
    [root@ceph1 ~]# umount /mnt/cephfs
    [root@ceph1 ~]# echo "id=cephfs,keyring=/etc/ceph/ceph.client.cephfs.keyring /mnt/cephfs fuse.ceph defaults,_netdev 0 0 " >> /etc/fstab 
    [root@ceph1 ~]# mount -a
    mount: special device /dev/rbd/rbd/testimg-copy does not exist
    ceph-fuse[15320]: starting ceph client
    2019-03-26 18:03:57.070081 7f25f0c5c040 -1 init, newargv = 0x55aa8aab30a0 newargc=11
    ceph-fuse[15320]: starting fuse
    [root@ceph1 ~]# df -hT
    Filesystem     Type            Size  Used Avail Use% Mounted on
    /dev/vda1      xfs              40G  1.7G   39G   5% /
    devtmpfs       devtmpfs        893M     0  893M   0% /dev
    tmpfs          tmpfs           920M     0  920M   0% /dev/shm
    tmpfs          tmpfs           920M   25M  896M   3% /run
    tmpfs          tmpfs           920M     0  920M   0% /sys/fs/cgroup
    /dev/rbd1      xfs             2.0G   33M  2.0G   2% /mnt/ceph2
    /dev/rbd0      xfs             2.0G   33M  2.0G   2% /mnt/ceph
    tmpfs          tmpfs           184M     0  184M   0% /run/user/0
    ceph-fuse      fuse.ceph-fuse   43G     0   43G   0% /mnt/cephfs     #成功
    [root@ceph1 ~]# cd /mnt/cephfs/
    [root@ceph1 cephfs]# ll
    total 1
    -rw-r--r-- 1 root root 11 Mar 26 18:00 111

    2.10 使用内核挂载

    ceph auth get-key  client.cephfs  -o /etc/ceph/cephfskey
    [root@ceph2 ceph]# ll /etc/ceph/cephfskey
    -rw-r--r-- 1 root root 40 Mar 26 18:08 /etc/ceph/cephfskey
    [root@ceph2 ceph]# scp /etc/ceph/cephfskey  ceph1:/etc/ceph/
    root@ceph1's password: 
    cephfskey      
    [root@ceph1 ~]# mount -t ceph  ceph2:6789,ceph3:6789,ceph4:6789:/  /mnt/cephfs -o name=cephfs,secretfile=/etc/ceph/cephfskey
    [root@ceph1 ~]# df -hT
    Filesystem                                                 Type      Size  Used Avail Use% Mounted on
    /dev/vda1                                                  xfs        40G  1.7G   39G   5% /
    devtmpfs                                                   devtmpfs  893M     0  893M   0% /dev
    tmpfs                                                      tmpfs     920M     0  920M   0% /dev/shm
    tmpfs                                                      tmpfs     920M   25M  896M   3% /run
    tmpfs                                                      tmpfs     920M     0  920M   0% /sys/fs/cgroup
    /dev/rbd1                                                  xfs       2.0G   33M  2.0G   2% /mnt/ceph2
    /dev/rbd0                                                  xfs       2.0G   33M  2.0G   2% /mnt/ceph
    tmpfs                                                      tmpfs     184M     0  184M   0% /run/user/0
    172.25.250.11:6789,172.25.250.12:6789,172.25.250.13:6789:/ ceph      135G  1.8G  134G   2% /mnt/cephfs
    [root@ceph1 ~]# echo "ceph2:6789,ceph3:6789,ceph4:6789:/ /mnt/cephfs ceph name=cephfs,secretfile=/etc/ceph/cephfskey,noatime,_netdev 0 0" >> /etc/fstab

    注:

    通过内核客户端挂载时,可能出现超时,解决办法:

    ceph osd crush tunables hammer

    ceph osd crush reweight-all

    实验完成!!!


    博主声明:本文的内容来源主要来自誉天教育晏威老师,由本人实验完成操作验证,需要的博友请联系誉天教育(http://www.yutianedu.com/),获得官方同意或者晏老师(https://www.cnblogs.com/breezey/)本人同意即可转载,谢谢!

  • 相关阅读:
    Azure PowerShell (2) 修改Azure订阅名称
    Windows Azure Platform Introduction (11) 了解Org ID、Windows Azure订阅、账户
    Azure PowerShell (3) 上传证书
    Azure PowerShell (1) PowerShell入门
    Windows Azure Service Bus (2) 队列(Queue)入门
    Windows Azure Service Bus (1) 基础
    Windows Azure Cloud Service (10) Role的生命周期
    Windows Azure Cloud Service (36) 在Azure Cloud Service配置SSL证书
    Android studio 使用心得(一)—android studio快速掌握快捷键
    android 签名、混淆打包
  • 原文地址:https://www.cnblogs.com/zyxnhr/p/10602039.html
Copyright © 2011-2022 走看看