zoukankan      html  css  js  c++  java
  • rbd_rados命令拷屏

    mimic或者luminous

    rbd_rados

    sudo mount -t ceph 192.168.7.151:6789:/ /mnt -o name=admin,secret=AQBaPZNcCalvLRAAt4iyva3DHfb8NbOX4MxBAw==


    rbd
    ceph osd pool create rbdpool 64
    rbd pool init rbdpool
    rbd create --size 1024 rbdpool/rbdimage
    rbd ls rbdpool
    rbd info rbdpool/rbdimage
    
    Unless specified, the rbd command will access the Ceph cluster using the ID admin.
    ceph auth get-or-create client.qemu mon 'profile rbd' osd 'profile rbd pool=vms, profile rbd-read-only pool=images'
    
    Ceph Block Device images are thin provisioned
    
    [root@ceph1 ceph]# rbd info rbdpool/rbdimage
    rbd image 'rbdimage':
            size 1 GiB in 256 objects
            order 22 (4 MiB objects)
            id: 12926b8b4567
            block_name_prefix: rbd_data.12926b8b4567
            format: 2
            features: layering, exclusive-lock, object-map, fast-diff, deep-flatten
            op_features:
            flags:
            create_timestamp: Wed Mar 27 18:06:28 2019
    
    
    [root@ceph1 ceph]# rbd feature disable rbdpool/rbdimage object-map fast-diff deep-flatten
    [root@ceph1 ceph]# rbd device map rbdpool/rbdimage  --id admin
    /dev/rbd0
    [root@ceph1 ~]# lsblk
    NAME                                                                                                  MAJ:MIN RM  SIZE RO TYPE MOUNTPOINT
    sr0                                                                                                    11:0    1  918M  0 rom
    vda                                                                                                   253:0    0   50G  0 disk
    ├─vda1                                                                                                253:1    0  512M  0 part /boot
    ├─vda2                                                                                                253:2    0    8G  0 part [SWAP]
    └─vda3                                                                                                253:3    0 41.5G  0 part /
    vdb                                                                                                   253:16   0   60G  0 disk
    └─ceph--c087d78f--9bb1--49a5--97ad--437995ee0ae7-osd--block--da3283a7--adfe--43ad--8ebc--0853ee8900bb 252:0    0   60G  0 lvm
    rbd0                                                                                                  251:0    0    1G  0 disk
    

    root@cu-pve04:~# rbd info kycrbd/vm-111-disk-0
    rbd image 'vm-111-disk-0':
    size 50GiB in 12800 objects
    order 22 (4MiB objects)
    block_name_prefix: rbd_data.b52c6b8b4567
    format: 2
    features: layering, exclusive-lock, object-map, fast-diff, deep-flatten
    flags:
    create_timestamp: Fri May 10 17:31:49 2019


    [root@ceph1
    ~]# rbd device list id pool image snap device 0 rbdpool rbdimage - /dev/rbd0 root@cu-pve04:~# rbd help du usage: rbd du [--pool <pool>] [--image <image>] [--snap <snap>] [--format <format>] [--pretty-format] [--from-snap <from-snap>] <image-or-snap-spec> Show disk usage stats for pool, image or snapshot Positional arguments <image-or-snap-spec> image or snapshot specification (example: [<pool-name>/]<image-name>[@<snap-name>]) Optional arguments -p [ --pool ] arg pool name --image arg image name --snap arg snapshot name --format arg output format (plain, json, or xml) [default: plain] --pretty-format pretty formatting (json and xml) --from-snap arg snapshot starting point root@cu-pve04:~# rbd du -p kycrbd NAME PROVISIONED USED base-101-disk-0@__base__ 50GiB 1.51GiB base-101-disk-0 50GiB 0B vm-100-disk-0 100GiB 83.5GiB vm-102-disk-0 100GiB 9.18GiB vm-103-disk-0 100GiB 26.7GiB vm-104-disk-0 100GiB 19.7GiB vm-105-disk-0 50GiB 13.7GiB vm-106-disk-0 50GiB 5.45GiB <TOTAL> 550GiB 160GiB root@cu-pve04:~# rbd ls -l kycrbd NAME SIZE PARENT FMT PROT LOCK base-101-disk-0 50GiB 2 base-101-disk-0@__base__ 50GiB 2 yes vm-100-disk-0 100GiB 2 excl vm-102-disk-0 100GiB 2 vm-103-disk-0 100GiB 2 excl vm-104-disk-0 100GiB 2 excl vm-105-disk-0 50GiB 2 excl vm-106-disk-0 50GiB 2 excl root@cu-pve04:~# rbd status -p kycrbd vm-100-disk-0 Watchers: watcher=192.168.7.204:0/4098848611 client.301073 cookie=140228010702336 root@cu-pve04:~# rbd info -p kycrbd vm-100-disk-0 rbd image 'vm-100-disk-0': size 100GiB in 25600 objects order 22 (4MiB objects) block_name_prefix: rbd_data.422076b8b4567 format: 2 features: layering, exclusive-lock, object-map, fast-diff, deep-flatten flags: create_timestamp: Sat May 4 20:38:07 2019 root@cu-pve04:~# rbd ls kycrbd --format json --pretty-format [ "base-101-disk-0", "vm-100-disk-0", "vm-102-disk-0", "vm-103-disk-0", "vm-104-disk-0", "vm-105-disk-0", "vm-106-disk-0" ] root@cu-pve04:~# rbd ls kycrbd --format xml --pretty-format <images> <name>base-101-disk-0</name> <name>vm-100-disk-0</name> <name>vm-102-disk-0</name> <name>vm-103-disk-0</name> <name>vm-104-disk-0</name> <name>vm-105-disk-0</name> <name>vm-106-disk-0</name> </images> root@cu-pve04:~# rbd ls kycrbd --format xml <images><name>base-101-disk-0</name><name>vm-100-disk-0</name><name>vm-102-disk-0</name><name>vm-103-disk-0</name><name>vm-104-disk-0</name><name>vm-105-disk-0</name><name>vm-106-disk-0</name></images> root@cu-pve04:~# rbd ls kycrbd --format json ["base-101-disk-0","vm-100-disk-0","vm-102-disk-0","vm-103-disk-0","vm-104-disk-0","vm-105-disk-0","vm-106-disk-0"]

    rados

    rados -h
    radosgw-admin -h

    [root@ceph1 ~]# rados lspools
    mypool
    [root@ceph1 ~]# rados df
    POOL_NAME USED OBJECTS CLONES COPIES MISSING_ON_PRIMARY UNFOUND DEGRADED RD_OPS  RD WR_OPS  WR
    mypool     0 B       0      0      0                  0       0        0      0 0 B      0 0 B
    
    total_objects    0
    total_used       3.0 GiB
    total_avail      177 GiB
    total_space      180 GiB
    
    [root@ali-3 ~]# rados df
    POOL_NAME                              USED OBJECTS CLONES COPIES MISSING_ON_PRIMARY UNFOUND DEGRADED  RD_OPS     RD WR_OPS    WR
    pool-d83c6154956b44aea7639c7bd4c45c65 2001M   17460      0  52380                  0       0     1023 5775755 11819M 318825 6488M
    
    total_objects    17460
    total_used       44969M
    total_avail      53608G
    total_space      53652G
    
    
    [root@ali-3 ~]# rados -p pool-d83c6154956b44aea7639c7bd4c45c65  ls
    xbd_data.618177.0000000000000014
    xbd_data.618177.0000000000000014
    xbd_data.618177.0000000000000014
    xbd_data.618177.0000000000000014
    xbd_data.618177.0000000000000014
    xbd_data.618177.0000000000000014
    xbd_data.618177.0000000000000014
    xbd_data.618177.0000000000000014
    xbd_data.618177.0000000000000014
    xbd_data.618177.0000000000000014
    xbd_data.618177.0000000000000014
    xbd_data.618177.0000000000000014
    xbd_data.618177.0000000000000014
    xbd_data.618177.0000000000000014
    xbd_data.618177.0000000000000014
    xbd_data.618177.0000000000000014
    xbd_data.618177.0000000000000014
    xbd_data.618177.0000000000000014
    
    
    
    [root@ceph1 ~]# ceph osd lspools
    1 mypool
    2 .rgw.root
    3 default.rgw.control
    4 default.rgw.meta
    5 default.rgw.log
    6 cfs_data
    7 cfs_meta
    8 rbdpool
    
    列出池中对象
    [root@ceph1 ~]# rados -p cfs_data ls
    10000000005.00000000
    10000000006.00000000
    [root@ceph1 ~]# rados -p cfs_meta ls
    601.00000000
    602.00000000
    600.00000000
    603.00000000
    1.00000000.inode
    200.00000000
    200.00000001
    606.00000000
    607.00000000
    mds0_openfiles.0
    608.00000000
    500.00000001
    604.00000000
    500.00000000
    mds_snaptable
    605.00000000
    mds0_inotable
    100.00000000
    mds0_sessionmap
    609.00000000
    400.00000000
    100.00000000.inode
    1.00000000
    [root@ceph1 ~]# rados -p rbdpool ls
    rbd_directory
    rbd_id.rbdimage
    rbd_info
    rbd_header.12926b8b4567



  • 相关阅读:
    烂泥:jira7.2安装、中文及破解
    烂泥:VMWare Workation双网卡配置IP地址
    烂泥:centos6 yum方式升级内核
    烂泥:python2.7和python3.5源码安装
    烂泥:zabbix3.0安装与配置
    烂泥:利用awstats分析nginx日志
    烂泥:切割nginx日志
    JavaScript之函数
    Django之根据已经存在数据库中的表自动生成模型
    Django之操作数据库
  • 原文地址:https://www.cnblogs.com/createyuan/p/10815413.html
Copyright © 2011-2022 走看看