root@cu-pve04:~# ceph fs get kycfs
Filesystem 'kycfs' (1)
fs_name kycfs
epoch 17
flags c
created 2019-04-30 20:52:48.957941
modified 2019-04-30 21:22:33.599472
tableserver 0
root 0
session_timeout 60
session_autoclose 300
max_file_size 1099511627776
last_failure 0
last_failure_osd_epoch 172
compat compat={},rocompat={},incompat={1=base v0.20,2=client writeable ranges,3=default file layouts on dirs,4=dir inode in separate object,5=mds uses versioned encoding,6=dirfrag is stored in omap,8=no anchor table,9=file layout v2}
max_mds 1
in 0
up {0=64099}
failed
damaged
stopped
data_pools [5]
metadata_pool 6
inline_data disabled
balancer
standby_count_wanted 1
64099: 192.168.7.205:6800/836062941 'cu-pve05' mds.0.12 up:active seq 31 (standby for rank -1 'pve')
root@cu-pve04:~# ceph fs ls
name: kycfs, metadata pool: kycfs_metadata, data pools: [kycfs_data ]
root@cu-pve04:~# ceph fs status
kycfs - 9 clients
=====
+------+--------+----------+---------------+-------+-------+
| Rank | State | MDS | Activity | dns | inos |
+------+--------+----------+---------------+-------+-------+
| 0 | active | cu-pve05 | Reqs: 0 /s | 1693 | 1622 |
+------+--------+----------+---------------+-------+-------+
+----------------+----------+-------+-------+
| Pool | type | used | avail |
+----------------+----------+-------+-------+
| kycfs_metadata | metadata | 89.7M | 16.3T |
| kycfs_data | data | 14.0G | 16.3T |
+----------------+----------+-------+-------+
+-------------+
| Standby MDS |
+-------------+
| cu-pve04 |
| cu-pve06 |
+-------------+
MDS version: ceph version 12.2.12 (39cfebf25a7011204a9876d2950e4b28aba66d11) luminous (stable)
root@cu-pve04:~# ceph fsid
b5fd132b-9ff4-470a-9a14-172eb48dc973
root@cu-pve04:~# ceph health
HEALTH_OK
root@cu-pve04:~# ceph -s
cluster:
id: b5fd132b-9ff4-470a-9a14-172eb48dc973
health: HEALTH_OK
services:
mon: 3 daemons, quorum cu-pve04,cu-pve05,cu-pve06
mgr: cu-pve04(active), standbys: cu-pve05, cu-pve06
mds: kycfs-1/1/1 up {0=cu-pve05=up:active}, 2 up:standby
osd: 24 osds: 24 up, 24 in
data:
pools: 3 pools, 1152 pgs
objects: 46.35k objects, 176GiB
usage: 550GiB used, 51.9TiB / 52.4TiB avail
pgs: 1152 active+clean
io:
client: 0B/s rd, 43.5KiB/s wr, 0op/s rd, 6op/s wr
root@cu-pve04:~# ceph mgr module ls
{
"enabled_modules": [
"balancer",
"dashboard",
"restful",
"status"
],
"disabled_modules": [
"influx",
"localpool",
"prometheus",
"selftest",
"zabbix"
]
}
root@cu-pve04:~# ceph mgr module enable dashboard
root@cu-pve04:~# ceph mgr services
{
"dashboard": "http://cu-pve04.ka1che.com:7000/"
}
root@cu-pve04:~# ceph -v
ceph version 12.2.12 (39cfebf25a7011204a9876d2950e4b28aba66d11) luminous (stable)
root@cu-pve04:~# ceph mds versions
{
"ceph version 12.2.12 (39cfebf25a7011204a9876d2950e4b28aba66d11) luminous (stable)": 3
}
root@cu-pve04:~# ceph mgr versions
{
"ceph version 12.2.12 (39cfebf25a7011204a9876d2950e4b28aba66d11) luminous (stable)": 3
}
root@cu-pve04:~# ceph mon versions
{
"ceph version 12.2.12 (39cfebf25a7011204a9876d2950e4b28aba66d11) luminous (stable)": 3
}
root@cu-pve04:~# ceph osd versions
{
"ceph version 12.2.12 (39cfebf25a7011204a9876d2950e4b28aba66d11) luminous (stable)": 24
}
root@cu-pve04:~# ceph mon feature ls
all features
supported: [kraken,luminous]
persistent: [kraken,luminous]
on current monmap (epoch 3)
persistent: [kraken,luminous]
required: [kraken,luminous]
root@cu-pve04:~# ceph mds stat
kycfs-1/1/1 up {0=cu-pve05=up:active}, 2 up:standby
root@cu-pve04:~# ceph mon stat
e3: 3 mons at {cu-pve04=192.168.7.204:6789/0,cu-pve05=192.168.7.205:6789/0,cu-pve06=192.168.7.206:6789/0}, election epoch 22, leader 0 cu-pve04, quorum 0,1,2 cu-pve04,cu-pve05,cu-pve06
root@cu-pve04:~# ceph osd stat
24 osds: 24 up, 24 in
root@cu-pve04:~# ceph pg stat
1152 pgs: 1152 active+clean; 176GiB data, 550GiB used, 51.9TiB / 52.4TiB avail; 673B/s rd, 197KiB/s wr, 23op/s
root@cu-pve04:~# ceph node ls
{
"mon": {
"cu-pve04": [
0
],
"cu-pve05": [
1
],
"cu-pve06": [
2
]
},
"osd": {
"cu-pve04": [
0,
1,
2,
3,
4,
5,
6,
7
],
"cu-pve05": [
8,
9,
10,
11,
12,
13,
14,
15
],
"cu-pve06": [
16,
17,
18,
19,
20,
21,
22,
23
]
},
"mds": {
"cu-pve04": [
-1
],
"cu-pve05": [
0
],
"cu-pve06": [
-1
]
}
}
root@cu-pve04:~# ceph osd crush rule ls
replicated_rule
root@cu-pve04:~# ceph osd crush rule dump
[
{
"rule_id": 0,
"rule_name": "replicated_rule",
"ruleset": 0,
"type": 1,
"min_size": 1,
"max_size": 10,
"steps": [
{
"op": "take",
"item": -1,
"item_name": "default"
},
{
"op": "chooseleaf_firstn",
"num": 0,
"type": "host"
},
{
"op": "emit"
}
]
}
]
root@cu-pve04:~# ceph osd df tree
ID CLASS WEIGHT REWEIGHT SIZE USE AVAIL %USE VAR PGS TYPE NAME
-1 52.39417 - 52.4TiB 550GiB 51.9TiB 1.03 1.00 - root default
-3 17.46472 - 17.5TiB 183GiB 17.3TiB 1.03 1.00 - host cu-pve04
0 hdd 2.18309 1.00000 2.18TiB 23.1GiB 2.16TiB 1.04 1.01 144 osd.0
1 hdd 2.18309 1.00000 2.18TiB 20.2GiB 2.16TiB 0.90 0.88 126 osd.1
2 hdd 2.18309 1.00000 2.18TiB 25.1GiB 2.16TiB 1.12 1.10 152 osd.2
3 hdd 2.18309 1.00000 2.18TiB 27.0GiB 2.16TiB 1.21 1.18 153 osd.3
4 hdd 2.18309 1.00000 2.18TiB 19.1GiB 2.16TiB 0.85 0.83 142 osd.4
5 hdd 2.18309 1.00000 2.18TiB 25.1GiB 2.16TiB 1.12 1.09 160 osd.5
6 hdd 2.18309 1.00000 2.18TiB 23.2GiB 2.16TiB 1.04 1.01 137 osd.6
7 hdd 2.18309 1.00000 2.18TiB 20.6GiB 2.16TiB 0.92 0.90 138 osd.7
-5 17.46472 - 17.5TiB 183GiB 17.3TiB 1.03 1.00 - host cu-pve05
8 hdd 2.18309 1.00000 2.18TiB 27.0GiB 2.16TiB 1.21 1.18 160 osd.8
9 hdd 2.18309 1.00000 2.18TiB 24.4GiB 2.16TiB 1.09 1.07 142 osd.9
10 hdd 2.18309 1.00000 2.18TiB 24.4GiB 2.16TiB 1.09 1.06 148 osd.10
11 hdd 2.18309 1.00000 2.18TiB 22.2GiB 2.16TiB 0.99 0.97 164 osd.11
12 hdd 2.18309 1.00000 2.18TiB 22.9GiB 2.16TiB 1.02 1.00 134 osd.12
13 hdd 2.18309 1.00000 2.18TiB 22.2GiB 2.16TiB 1.00 0.97 132 osd.13
14 hdd 2.18309 1.00000 2.18TiB 20.3GiB 2.16TiB 0.91 0.89 135 osd.14
15 hdd 2.18309 1.00000 2.18TiB 19.9GiB 2.16TiB 0.89 0.87 137 osd.15
-7 17.46472 - 17.5TiB 183GiB 17.3TiB 1.03 1.00 - host cu-pve06
16 hdd 2.18309 1.00000 2.18TiB 22.9GiB 2.16TiB 1.03 1.00 141 osd.16
17 hdd 2.18309 1.00000 2.18TiB 23.3GiB 2.16TiB 1.04 1.02 148 osd.17
18 hdd 2.18309 1.00000 2.18TiB 26.0GiB 2.16TiB 1.16 1.13 154 osd.18
19 hdd 2.18309 1.00000 2.18TiB 21.0GiB 2.16TiB 0.94 0.92 141 osd.19
20 hdd 2.18309 1.00000 2.18TiB 25.4GiB 2.16TiB 1.14 1.11 151 osd.20
21 hdd 2.18309 1.00000 2.18TiB 18.8GiB 2.16TiB 0.84 0.82 140 osd.21
22 hdd 2.18309 1.00000 2.18TiB 24.4GiB 2.16TiB 1.09 1.06 134 osd.22
23 hdd 2.18309 1.00000 2.18TiB 21.5GiB 2.16TiB 0.96 0.94 143 osd.23
TOTAL 52.4TiB 550GiB 51.9TiB 1.03
MIN/MAX VAR: 0.82/1.18 STDDEV: 0.11
root@cu-pve04:~# ceph osd lspools
5 kycfs_data,6 kycfs_metadata,7 kycrbd,
root@cu-pve04:~# ceph osd perf
osd commit_latency(ms) apply_latency(ms)
23 0 0
22 1 1
9 1 1
8 0 0
7 0 0
6 1 1
5 2 2
4 1 1
0 2 2
1 0 0
2 0 0
3 0 0
10 1 1
11 7 7
12 0 0
13 2 2
14 1 1
15 0 0
16 0 0
17 4 4
18 0 0
19 11 11
20 0 0
21 0 0
root@cu-pve04:~# ceph osd pool get kycrbd all
size: 3
min_size: 2
crash_replay_interval: 0
pg_num: 512
pgp_num: 512
crush_rule: replicated_rule
hashpspool: true
nodelete: false
nopgchange: false
nosizechange: false
write_fadvise_dontneed: false
noscrub: false
nodeep-scrub: false
use_gmt_hitset: 1
auid: 0
fast_read: 0
[root@ceph1 ceph]# ceph osd pool create cfs_data 10
pool 'cfs_data' created
[root@ceph1 ceph]# ceph osd pool create cfs_meta 10
pool 'cfs_meta' created
[root@ceph1 ceph]# ceph fs new cefs cfs_meta cfs_data
new fs with metadata pool 7 and data pool 6
root@cu-pve04:~# ceph osd pool ls
kycfs_data
kycfs_metadata
kycrbd
root@cu-pve04:~# ceph osd pool ls detail
pool 5 'kycfs_data' replicated size 3 min_size 2 crush_rule 0 object_hash rjenkins pg_num 512 pgp_num 512 last_change 156 flags hashpspool stripe_width 0 application cephfs
pool 6 'kycfs_metadata' replicated size 3 min_size 2 crush_rule 0 object_hash rjenkins pg_num 128 pgp_num 128 last_change 156 flags hashpspool stripe_width 0 application cephfs
pool 7 'kycrbd' replicated size 3 min_size 2 crush_rule 0 object_hash rjenkins pg_num 512 pgp_num 512 last_change 187 flags hashpspool stripe_width 0 application rbd
removed_snaps [1~3]
root@cu-pve04:~# ceph osd pool stats
pool kycfs_data id 5
client io 2.42KiB/s wr, 0op/s rd, 0op/s wr
pool kycfs_metadata id 6
client io 1.08KiB/s wr, 0op/s rd, 0op/s wr
pool kycrbd id 7
client io 0B/s rd, 357KiB/s wr, 0op/s rd, 25op/s wr
root@cu-pve04:~# ceph osd status
+----+----------+-------+-------+--------+---------+--------+---------+-----------+
| id | host | used | avail | wr ops | wr data | rd ops | rd data | state |
+----+----------+-------+-------+--------+---------+--------+---------+-----------+
| 0 | cu-pve04 | 23.1G | 2212G | 0 | 7071 | 0 | 0 | exists,up |
| 1 | cu-pve04 | 20.1G | 2215G | 0 | 12.8k | 0 | 0 | exists,up |
| 2 | cu-pve04 | 25.1G | 2210G | 0 | 6553 | 0 | 0 | exists,up |
| 3 | cu-pve04 | 27.0G | 2208G | 0 | 11.2k | 0 | 0 | exists,up |
| 4 | cu-pve04 | 19.0G | 2216G | 0 | 1948 | 0 | 0 | exists,up |
| 5 | cu-pve04 | 25.0G | 2210G | 0 | 16.5k | 0 | 0 | exists,up |
| 6 | cu-pve04 | 23.2G | 2212G | 0 | 16.0k | 0 | 0 | exists,up |
| 7 | cu-pve04 | 20.5G | 2214G | 1 | 16.0k | 0 | 0 | exists,up |
| 8 | cu-pve05 | 27.0G | 2208G | 3 | 85.2k | 0 | 0 | exists,up |
| 9 | cu-pve05 | 24.4G | 2211G | 0 | 3276 | 0 | 0 | exists,up |
| 10 | cu-pve05 | 24.3G | 2211G | 2 | 38.4k | 0 | 0 | exists,up |
| 11 | cu-pve05 | 22.2G | 2213G | 0 | 12.8k | 0 | 0 | exists,up |
| 12 | cu-pve05 | 22.8G | 2212G | 0 | 7035 | 0 | 0 | exists,up |
| 13 | cu-pve05 | 22.2G | 2213G | 1 | 16.1k | 0 | 0 | exists,up |
| 14 | cu-pve05 | 20.3G | 2215G | 0 | 12.8k | 0 | 0 | exists,up |
| 15 | cu-pve05 | 19.8G | 2215G | 0 | 3559 | 0 | 6 | exists,up |
| 16 | cu-pve06 | 22.9G | 2212G | 1 | 22.4k | 0 | 0 | exists,up |
| 17 | cu-pve06 | 23.3G | 2212G | 0 | 25.6k | 0 | 0 | exists,up |
| 18 | cu-pve06 | 25.9G | 2209G | 1 | 8192 | 0 | 0 | exists,up |
| 19 | cu-pve06 | 21.0G | 2214G | 0 | 352 | 0 | 0 | exists,up |
| 20 | cu-pve06 | 25.4G | 2210G | 2 | 24.2k | 0 | 0 | exists,up |
| 21 | cu-pve06 | 18.8G | 2216G | 0 | 6553 | 0 | 0 | exists,up |
| 22 | cu-pve06 | 24.3G | 2211G | 1 | 13.9k | 0 | 0 | exists,up |
| 23 | cu-pve06 | 21.4G | 2214G | 0 | 6553 | 0 | 0 | exists,up |
+----+----------+-------+-------+--------+---------+--------+---------+-----------+
root@cu-pve04:~# ceph osd tree
ID CLASS WEIGHT TYPE NAME STATUS REWEIGHT PRI-AFF
-1 52.39417 root default
-3 17.46472 host cu-pve04
0 hdd 2.18309 osd.0 up 1.00000 1.00000
1 hdd 2.18309 osd.1 up 1.00000 1.00000
2 hdd 2.18309 osd.2 up 1.00000 1.00000
3 hdd 2.18309 osd.3 up 1.00000 1.00000
4 hdd 2.18309 osd.4 up 1.00000 1.00000
5 hdd 2.18309 osd.5 up 1.00000 1.00000
6 hdd 2.18309 osd.6 up 1.00000 1.00000
7 hdd 2.18309 osd.7 up 1.00000 1.00000
-5 17.46472 host cu-pve05
8 hdd 2.18309 osd.8 up 1.00000 1.00000
9 hdd 2.18309 osd.9 up 1.00000 1.00000
10 hdd 2.18309 osd.10 up 1.00000 1.00000
11 hdd 2.18309 osd.11 up 1.00000 1.00000
12 hdd 2.18309 osd.12 up 1.00000 1.00000
13 hdd 2.18309 osd.13 up 1.00000 1.00000
14 hdd 2.18309 osd.14 up 1.00000 1.00000
15 hdd 2.18309 osd.15 up 1.00000 1.00000
-7 17.46472 host cu-pve06
16 hdd 2.18309 osd.16 up 1.00000 1.00000
17 hdd 2.18309 osd.17 up 1.00000 1.00000
18 hdd 2.18309 osd.18 up 1.00000 1.00000
19 hdd 2.18309 osd.19 up 1.00000 1.00000
20 hdd 2.18309 osd.20 up 1.00000 1.00000
21 hdd 2.18309 osd.21 up 1.00000 1.00000
22 hdd 2.18309 osd.22 up 1.00000 1.00000
23 hdd 2.18309 osd.23 up 1.00000 1.00000
root@cu-pve04:~# ceph osd utilization
avg 144
stddev 9.49561 (expected baseline 11.7473)
min osd.1 with 126 pgs (0.875 * mean)
max osd.11 with 164 pgs (1.13889 * mean)
root@cu-pve04:~# ceph pg dump sum
dumped sum
version 8480
stamp 2019-05-06 15:20:45.513442
last_osdmap_epoch 0
last_pg_scan 0
full_ratio 0
nearfull_ratio 0
PG_STAT OBJECTS MISSING_ON_PRIMARY DEGRADED MISPLACED UNFOUND BYTES LOG DISK_LOG
sum 46354 0 0 0 0 188474766417 838378 838378
OSD_STAT USED AVAIL TOTAL
sum 550GiB 51.9TiB 52.4TiB
root@cu-pve04:~# ceph pg dump pools
dumped pools
POOLID OBJECTS MISSING_ON_PRIMARY DEGRADED MISPLACED UNFOUND BYTES LOG DISK_LOG
7 41495 0 0 0 0 173343324074 792409 792409
6 45 0 0 0 0 96511173 18569 18569
5 4814 0 0 0 0 15034939056 27514 27514
root@cu-pve04:~# ceph pg dump osds
dumped osds
OSD_STAT USED AVAIL TOTAL HB_PEERS PG_SUM PRIMARY_PG_SUM
23 21.5GiB 2.16TiB 2.18TiB [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,22] 143 42
22 24.4GiB 2.16TiB 2.18TiB [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,21,23] 134 50
9 24.4GiB 2.16TiB 2.18TiB [0,1,2,3,4,5,6,7,8,10,16,17,18,19,20,21,22,23] 142 48
8 27.0GiB 2.16TiB 2.18TiB [0,1,2,3,4,5,6,7,9,16,17,18,19,20,21,22,23] 160 56
7 20.6GiB 2.16TiB 2.18TiB [6,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23] 138 45
6 23.2GiB 2.16TiB 2.18TiB [5,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23] 137 44
5 25.1GiB 2.16TiB 2.18TiB [4,6,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23] 160 52
4 19.1GiB 2.16TiB 2.18TiB [3,5,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23] 142 50
0 23.2GiB 2.16TiB 2.18TiB [1,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23] 144 59
1 20.2GiB 2.16TiB 2.18TiB [0,2,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23] 126 42
2 25.1GiB 2.16TiB 2.18TiB [1,3,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23] 152 46
3 27.0GiB 2.16TiB 2.18TiB [2,4,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23] 153 54
10 24.4GiB 2.16TiB 2.18TiB [0,1,2,3,4,5,6,7,9,11,16,17,18,19,20,21,22,23] 148 52
11 22.2GiB 2.16TiB 2.18TiB [0,1,2,3,4,5,6,7,10,12,16,17,18,19,20,21,22,23] 164 69
12 22.9GiB 2.16TiB 2.18TiB [0,1,2,3,4,5,6,7,11,13,16,17,18,19,20,21,22,23] 134 30
13 22.2GiB 2.16TiB 2.18TiB [0,1,2,3,4,5,6,7,12,14,16,17,18,19,20,21,22,23] 132 48
14 20.3GiB 2.16TiB 2.18TiB [0,1,2,3,4,5,6,7,13,15,16,17,18,19,20,21,22,23] 135 40
15 19.9GiB 2.16TiB 2.18TiB [0,1,2,3,4,5,6,7,14,16,17,18,19,20,21,22,23] 137 36
16 22.9GiB 2.16TiB 2.18TiB [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,17,18,19,20] 141 47
17 23.3GiB 2.16TiB 2.18TiB [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,18] 148 47
18 26.0GiB 2.16TiB 2.18TiB [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,17,19] 154 43
19 21.0GiB 2.16TiB 2.18TiB [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,18,20] 141 43
20 25.4GiB 2.16TiB 2.18TiB [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,19,21] 151 60
21 18.8GiB 2.16TiB 2.18TiB [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,20,22] 140 49
sum 550GiB 51.9TiB 52.4TiB
root@cu-pve04:~# ceph pg map 7.1e8
osdmap e190 pg 7.1e8 (7.1e8) -> up [0,14,16] acting [0,14,16]
root@cu-pve04:~# ceph status
cluster:
id: b5fd132b-9ff4-470a-9a14-172eb48dc973
health: HEALTH_OK
services:
mon: 3 daemons, quorum cu-pve04,cu-pve05,cu-pve06
mgr: cu-pve04(active), standbys: cu-pve05, cu-pve06
mds: kycfs-1/1/1 up {0=cu-pve05=up:active}, 2 up:standby
osd: 24 osds: 24 up, 24 in
data:
pools: 3 pools, 1152 pgs
objects: 46.35k objects, 176GiB
usage: 550GiB used, 51.9TiB / 52.4TiB avail
pgs: 1152 active+clean
io:
client: 0B/s rd, 290KiB/s wr, 0op/s rd, 15op/s wr
root@cu-pve04:~# ceph time-sync-status
{
"time_skew_status": {
"cu-pve04": {
"skew": 0.000000,
"latency": 0.000000,
"health": "HEALTH_OK"
},
"cu-pve05": {
"skew": 0.002848,
"latency": 0.001070,
"health": "HEALTH_OK"
},
"cu-pve06": {
"skew": 0.002570,
"latency": 0.001064,
"health": "HEALTH_OK"
}
},
"timechecks": {
"epoch": 22,
"round": 3536,
"round_status": "finished"
}
}
root@cu-pve04:~# ceph versions
{
"mon": {
"ceph version 12.2.12 (39cfebf25a7011204a9876d2950e4b28aba66d11) luminous (stable)": 3
},
"mgr": {
"ceph version 12.2.12 (39cfebf25a7011204a9876d2950e4b28aba66d11) luminous (stable)": 3
},
"osd": {
"ceph version 12.2.12 (39cfebf25a7011204a9876d2950e4b28aba66d11) luminous (stable)": 24
},
"mds": {
"ceph version 12.2.12 (39cfebf25a7011204a9876d2950e4b28aba66d11) luminous (stable)": 3
},
"overall": {
"ceph version 12.2.12 (39cfebf25a7011204a9876d2950e4b28aba66d11) luminous (stable)": 33
}
}