zoukankan      html  css  js  c++  java
  • ceph-pg

    版本:mimic

    https://192.168.1.5:8006/pve-docs/chapter-pveceph.html#pve_ceph_osds

    As a rule of thumb, for roughly 1 TiB of data, 1 GiB of memory will be used by an OSD. OSD caching will use additional memory.
    mon_command failed - pg_num 128 size 3 would mean 6147 total pgs, which exceeds max 6000 (mon_max_pg_per_osd 250 * num_in_osds 24)

    mon_command failed - pg_num 128 size 3 would mean 6147 total pgs, which exceeds max 6000 (mon_max_pg_per_osd 250 * num_in_osds 24)
    
    [root@ali-1 dd]# ceph pg dump
    dumped all
    version 544167
    stamp 2019-03-20 10:28:24.077612
    last_osdmap_epoch 816
    last_pg_scan 816
    full_ratio 0.9
    nearfull_ratio 0.8
    
    [root@ceph1 ~]# ceph pg ls
    PG   OBJECTS DEGRADED MISPLACED UNFOUND BYTES   LOG  STATE        STATE_STAMP                VERSION  REPORTED  UP        ACTING    SCRUB_STAMP                DEEP_SCRUB_STAMP
    1.0        0        0         0       0       0    2 active+clean 2019-03-28 02:42:54.430131     16'2     57:95 [1,2,0]p1 [1,2,0]p1 2019-03-28 02:42:54.430020 2019-03-28 02:42:54.430020
    1.1        0        0         0       0       0    0 active+clean 2019-03-27 20:42:33.846731      0'0     57:78 [2,0,1]p2 [2,0,1]p2 2019-03-27 20:42:33.846600 2019-03-27 20:42:33.846600
    1.2        0        0         0       0       0    0 active+clean 2019-03-27 20:02:31.853254      0'0     57:92 [1,0,2]p1 [1,0,2]p1 2019-03-27 20:02:31.853127 2019-03-21 18:53:07.286885
    1.3        0        0         0       0       0    0 active+clean 2019-03-28 01:04:29.499574      0'0     57:94 [0,1,2]p0 [0,1,2]p0 2019-03-28 01:04:29.499476 2019-03-21 18:53:07.286885
    1.4        0        0         0       0       0    0 active+clean 2019-03-28 10:17:42.694788      0'0     57:77 [2,1,0]p2 [2,1,0]p2 2019-03-28 10:17:42.694658 2019-03-21 18:53:07.286885
    1.5        0        0         0       0       0    0 active+clean 2019-03-28 14:33:49.922515      0'0     57:78 [2,0,1]p2 [2,0,1]p2 2019-03-28 14:33:49.922414 2019-03-21 18:53:07.286885
    1.6        0        0         0       0       0    0 active+clean 2019-03-28 08:33:08.897114      0'0     57:78 [2,1,0]p2 [2,1,0]p2 2019-03-28 08:33:08.897044 2019-03-25 19:51:32.716535
    1.7        0        0         0       0       0    0 active+clean 2019-03-27 21:37:16.417698      0'0     57:92 [1,2,0]p1 [1,2,0]p1 2019-03-27 21:37:16.417553 2019-03-22 23:05:53.863908
    2.0        1        0         0       0     337    1 active+clean 2019-03-27 15:07:09.127196     19'1    57:155 [1,2,0]p1 [1,2,0]p1 2019-03-27 15:07:09.127107 2019-03-22 15:05:32.211389
    2.1        0        0         0       0       0    0 active+clean 2019-03-27 20:55:41.958378      0'0     57:89 [0,2,1]p0 [0,2,1]p0 2019-03-27 20:55:41.958328 2019-03-27 20:55:41.958328
    2.2        0        0         0       0       0    0 active+clean 2019-03-28 03:09:45.117140      0'0     57:87 [1,0,2]p1 [1,0,2]p1 2019-03-28 03:09:45.117036 2019-03-28 03:09:45.117036
    2.3        0        0         0       0       0    0 active+clean 2019-03-27 08:54:17.944907      0'0     57:87 [1,0,2]p1 [1,0,2]p1 2019-03-27 08:54:17.944792 2019-03-26 05:44:21.586541
    2.4        0        0         0       0       0    0 active+clean 2019-03-27 23:42:52.040458      0'0     57:89 [0,2,1]p0 [0,2,1]p0 2019-03-27 23:42:52.040353 2019-03-22 15:05:32.211389
    2.5        0        0         0       0       0    0 active+clean 2019-03-27 14:26:15.908085      0'0     57:73 [2,0,1]p2 [2,0,1]p2 2019-03-27 14:26:15.908022 2019-03-22 15:05:32.211389
    2.6        1        0         0       0     736    2 active+clean 2019-03-28 15:00:22.282027     33'2    57:161 [0,2,1]p0 [0,2,1]p0 2019-03-28 15:00:22.281923 2019-03-26 05:39:41.395132
    2.7        2        0         0       0      92    4 active+clean 2019-03-27 17:09:39.415262     41'4    57:253 [1,2,0]p1 [1,2,0]p1 2019-03-27 17:09:39.415167 2019-03-27 17:09:39.415167
    
    
    [root@ceph1 rbdpool]# ceph pg map 8.13
    osdmap e55 pg 8.13 (8.13) -> up [0,2,1] acting [0,2,1]
    
    pg id由{pool-num}.{pg-id}组成
    ceph osd lspools
    
    
    [root@ceph1 rbdpool]# ceph pg stat
    124 pgs: 124 active+clean; 56 GiB data, 172 GiB used, 8.4 GiB / 180 GiB avail
    [root@client mnt]# rm -rf a*
    上面的删除操作后,下面的pg才开始清理
    [root@ceph1 rbdpool]# ceph pg stat
    124 pgs: 124 active+clean; 2.5 MiB data, 3.5 GiB used, 177 GiB / 180 GiB avail; 8.7 KiB/s rd, 85 B/s wr, 479 op/s
    
    
    
    [root@ceph1 ~]# ceph pg dump
    dumped all
    version 41279
    stamp 2019-03-28 16:39:18.312134
    last_osdmap_epoch 0
    last_pg_scan 0
    PG_STAT OBJECTS MISSING_ON_PRIMARY DEGRADED MISPLACED UNFOUND BYTES   LOG  DISK_LOG STATE        STATE_STAMP                VERSION  REPORTED  UP      UP_PRIMARY ACTING  ACTING_PRIMARY LAST_SCRUB SCRUB_STAMP                LAST_DEEP_SCRUB DEEP_SCRUB_STAMP           SNAPTRIMQ_LEN
    8.3f          0                  0        0         0       0       0    0        0 active+clean 2019-03-27 18:06:27.945410      0'0     57:30 [0,1,2]          0 [0,1,2]              0        0'0 2019-03-27 17:58:20.276896             0'0 2019-03-27 17:58:20.276896             0
    8.3e          0                  0        0         0       0       0    0        0 active+clean 2019-03-27 18:06:27.967178      0'0     57:28 [2,1,0]          2 [2,1,0]              2        0'0 2019-03-27 17:58:20.276896             0'0 2019-03-27 17:58:20.276896             0
    8.3d          0                  0        0         0       0       0    0        0 active+clean 2019-03-27 18:06:27.946169      0'0     57:29 [0,2,1]          0 [0,2,1]              0        0'0 2019-03-27 17:58:20.276896             0'0 2019-03-27 17:58:20.276896             0
    8.3c          0                  0        0         0       0       0    0        0 active+clean 2019-03-27 18:06:27.954775      0'0     57:29 [1,2,0]          1 [1,2,0]              1        0'0 2019-03-27 17:58:20.276896             0'0 2019-03-27 17:58:20.276896             0
    8.3b          0                  0        0         0       0       0    0        0 active+clean 2019-03-27 18:06:27.958550      0'0     57:28 [1,2,0]          1 [1,2,0]              1        0'0 2019-03-27 17:58:20.276896             0'0 2019-03-27 17:58:20.276896             0
    8.3a          1                  0        0         0       0      19    2        2 active+clean 2019-03-27 18:06:27.968929     47'2     57:31 [2,0,1]          2 [2,0,1]              2        0'0 2019-03-27 17:58:20.276896             0'0 2019-03-27 17:58:20.276896             0
    8.39          0                  0        0         0       0       0    0        0 active+clean 2019-03-27 18:06:27.966700      0'0     57:28 [2,0,1]          2 [2,0,1]              2        0'0 2019-03-27 17:58:20.276896             0'0 2019-03-27 17:58:20.276896             0
    8.38          0                  0        0         0       0       0    0        0 active+clean 2019-03-27 18:06:27.946091      0'0     57:29 [0,2,1]          0 [0,2,1]              0        0'0 2019-03-27 17:58:20.276896             0'0 2019-03-27 17:58:20.276896             0
    
    
    
    
    8   4 0 0 0 0      35   429   429
    7  23 0 0 0 0 2497109  1421  1421
    6   2 0 0 0 0  151467 26355 26355
    5 207 0 0 0 0       0 24394 24394
    1   0 0 0 0 0       0     2     2
    2   4 0 0 0 0    1165     7     7
    3   8 0 0 0 0       0   301   301
    4   2 0 0 0 0     345     2     2
    
    sum 250 0 0 0 0 2650121 52911 52911
    OSD_STAT USED    AVAIL   TOTAL   HB_PEERS PG_SUM PRIMARY_PG_SUM
    2        1.3 GiB  59 GiB  60 GiB    [0,1]    124             42
    1        1.1 GiB  59 GiB  60 GiB    [0,2]    124             35
    0        1.1 GiB  59 GiB  60 GiB    [1,2]    124             47
    sum      3.5 GiB 177 GiB 180 GiB
    [root@ceph1 ~]#
    
    
    
    [root@ceph1 ~]# ceph df
    GLOBAL:
        SIZE        AVAIL       RAW USED     %RAW USED
        180 GiB     177 GiB      3.5 GiB          1.93
    POOLS:
        NAME                    ID     USED        %USED     MAX AVAIL     OBJECTS
        mypool                  1          0 B         0        56 GiB           0
        .rgw.root               2      1.1 KiB         0        56 GiB           4
        default.rgw.control     3          0 B         0        56 GiB           8
        default.rgw.meta        4        345 B         0        56 GiB           2
        default.rgw.log         5          0 B         0        56 GiB         207
        cfs_data                6      148 KiB         0        56 GiB           2
        cfs_meta                7      2.4 MiB         0        56 GiB          23
        rbdpool                 8         35 B         0        56 GiB           4
    
    [root@ceph1 ~]# ceph pg 8.1 query
    
    [root@ceph1 ~]# ceph osd map cfs_data secure
    osdmap e58 pool 'cfs_data' (6) object 'secure' -> pg 6.a67b1c61 (6.1) -> up ([2,1,0], p2) acting ([2,1,0], p2)
    
    
    ===========================================
    root@cu-pve05:/mnt/pve# ceph osd pool stats
    pool kyc_block01 id 5
      client io 0B/s rd, 0op/s rd, 0op/s wr
    
    pool cephfs_data id 6
      nothing is going on
    
    pool cephfs_metadata id 7
      nothing is going on
    
    pool system_disks id 9
      client io 0B/s rd, 576B/s wr, 0op/s rd, 0op/s wr
    
    pool data_disks id 10
      nothing is going on
    
    pool fs01 id 12
      nothing is going on
    
    root@cu-pve05:/mnt/pve#
    
    
    
    root@cu-pve05:/mnt/pve# ceph df
    GLOBAL:
        SIZE        AVAIL       RAW USED     %RAW USED
        52.4TiB     51.9TiB       528GiB          0.98
    POOLS:
        NAME                ID     USED        %USED     MAX AVAIL     OBJECTS
        kyc_block01         5       130GiB      0.77       16.4TiB       33265
        cephfs_data         6      6.62GiB      0.04       16.4TiB        1702
        cephfs_metadata     7       645KiB         0       16.4TiB          22
        system_disks        9      31.1GiB      0.19       16.4TiB        8353
        data_disks          10          0B         0       16.4TiB           0
        fs01                12      128MiB         0       16.4TiB           6
    root@cu-pve05:/mnt/pve#
    
    
    root@cu-pve05:/mnt/pve# ceph pg dump pgs_brief|grep ^9|wc -l
    dumped pgs_brief
    64
    上面的这个结果就是pve中的pool上的pg_num数量。
    
    PG_STAT STATE        UP        UP_PRIMARY ACTING    ACTING_PRIMARY
    9.21    active+clean [2,14,22]          2 [2,14,22]              2
    9.20    active+clean [0,18,11]          0 [0,18,11]              0
    9.27    active+clean [7,22,14]          7 [7,22,14]              7
    
    ===============================
    root@cu-pve05:/mnt/pve# rados df
    POOL_NAME       USED    OBJECTS CLONES COPIES MISSING_ON_PRIMARY UNFOUND DEGRADED RD_OPS  RD      WR_OPS WR
    cephfs_data     7.14GiB    1834      0   5502                  0       0        0     695 1.73GiB   4874 12.9GiB
    cephfs_metadata  698KiB      22      0     66                  0       0        0      68  556KiB    528 1.01MiB
    fs01             128MiB       6      0     18                  0       0        0       0      0B     69  256MiB
    kyc_block01      133GiB   34056      0 102168                  0       0        0 1405587  524GiB 564006  223GiB
    system_disks    31.1GiB    8353    423  25059                  0       0        0  397980 59.3GiB 270495  109GiB
    
    total_objects    44271
    total_used       539GiB
    total_avail      51.9TiB
    total_space      52.4TiB
    
    OBJECTS*3=COPIES
    ===============================
    name: cephfs, metadata pool: cephfs_metadata, data pools: [cephfs_data ]
    1:4pg
    metadata是128pg
    data是512pg
    placement groups 
    
    
                            objects    object_size
    vm-103-disk-0    32GiB    8.19k    4MiB
    8.19*4=32.76g
    
    8.19+0.643=8.833
    
    2.18t*8=17.44
    17.44*3=52.32
    52.39 TiB
    
    PGs active+clean:1921
    24osd
    
    each node has 1921pgs
    236
    243
    235
    269
    222
    229
    239
    248
  • 相关阅读:
    移动端图片按比例裁剪
    bootstrap悬停下拉菜单显示
    videojs兼容ie8
    ie浏览器不支持多行隐藏显示省略号
    rem和px
    浏览器默认返回,页面刷新
    centos src compile gcc 7.3
    docker与gosu
    centos 安装 kernel
    docker proxy
  • 原文地址:https://www.cnblogs.com/createyuan/p/10815419.html
Copyright © 2011-2022 走看看