zoukankan      html  css  js  c++  java
  • ceph安装过程

    创建群集
    [2019-03-20 18:35:04,232][ceph_deploy.conf][DEBUG ] found configuration file at: /home/sceph/.cephdeploy.conf
    [2019-03-20 18:35:04,233][ceph_deploy.cli][INFO ] Invoked (2.0.1): /usr/bin/ceph-deploy new ceph1


    sudo /usr/sbin/ip link show
    sudo /usr/sbin/ip addr show

    ========================================
    安装ceph文件

    [2019-03-21 10:36:07,664][ceph_deploy.conf][DEBUG ] found configuration file at: /home/sceph/.cephdeploy.conf
    [2019-03-21 10:36:07,666][ceph_deploy.cli][INFO ] Invoked (2.0.1): /usr/bin/ceph-deploy install ceph1 ceph2 ceph3


    sudo yum clean all
    sudo yum -y install epel-release
    sudo yum -y install yum-plugin-priorities
    sudo rpm --import https://download.ceph.com/keys/release.asc
    sudo yum remove -y ceph-release
    sudo yum install -y https://download.ceph.com/rpm-mimic/el7/noarch/ceph-release-1-0.el7.noarch.rpm
    ensuring that /etc/yum.repos.d/ceph.repo contains a high priority
    sudo yum -y install ceph ceph-radosgw


    [2019-03-21 11:15:01,990][ceph1][INFO ] Running command: sudo ceph --version
    [2019-03-21 11:15:02,126][ceph1][DEBUG ] ceph version 13.2.5 (cbff874f9007f1869bfd3821b7e33b2a6ffd4988) mimic (stable)


    ====================================
    创建mon
    [2019-03-21 15:29:30,161][ceph_deploy.conf][DEBUG ] found configuration file at: /home/sceph/.cephdeploy.conf
    [2019-03-21 15:29:30,161][ceph_deploy.cli][INFO ] Invoked (2.0.1): /usr/bin/ceph-deploy mon create-initial


    sudo ceph-mon --cluster ceph --mkfs -i ceph1 --keyring /var/lib/ceph/tmp/ceph-ceph1.mon.keyring --setuser 167 --setgroup 167

    sudo systemctl enable ceph.target
    sudo systemctl enable ceph-mon@ceph1

    sudo systemctl start ceph-mon@ceph1
    sudo ceph --cluster=ceph --admin-daemon /var/run/ceph/ceph-mon.ceph1.asok mon_status
    sudo ceph --cluster=ceph --admin-daemon /var/run/ceph/ceph-mon.ceph1.asok mon_status
    sudo ceph --cluster=ceph --admin-daemon /var/run/ceph/ceph-mon.ceph1.asok mon_status

    sudo /usr/bin/ceph --connect-timeout=25 --cluster=ceph --admin-daemon=/var/run/ceph/ceph-mon.ceph1.asok mon_status
    sudo /usr/bin/ceph --connect-timeout=25 --cluster=ceph --name mon. --keyring=/var/lib/ceph/mon/ceph-ceph1/keyring auth get client.admin
    sudo /usr/bin/ceph --connect-timeout=25 --cluster=ceph --name mon. --keyring=/var/lib/ceph/mon/ceph-ceph1/keyring auth get client.bootstrap-mds
    sudo /usr/bin/ceph --connect-timeout=25 --cluster=ceph --name mon. --keyring=/var/lib/ceph/mon/ceph-ceph1/keyring auth get client.bootstrap-mgr
    sudo /usr/bin/ceph --connect-timeout=25 --cluster=ceph --name mon. --keyring=/var/lib/ceph/mon/ceph-ceph1/keyring auth get client.bootstrap-osd
    sudo /usr/bin/ceph --connect-timeout=25 --cluster=ceph --name mon. --keyring=/var/lib/ceph/mon/ceph-ceph1/keyring auth get client.bootstrap-rgw


    ================================
    推送配置
    [2019-03-21 17:22:03,950][ceph_deploy.conf][DEBUG ] found configuration file at: /home/sceph/.cephdeploy.conf
    [2019-03-21 17:22:03,950][ceph_deploy.cli][INFO ] Invoked (2.0.1): /usr/bin/ceph-deploy admin ceph1 ceph2 ceph3
    Pushing admin keys and conf to ceph3


    ================================
    创建mgr
    [2019-03-21 17:26:53,722][ceph_deploy.conf][DEBUG ] found configuration file at: /home/sceph/.cephdeploy.conf
    [2019-03-21 17:26:53,723][ceph_deploy.cli][INFO ] Invoked (2.0.1): /usr/bin/ceph-deploy mgr create ceph2
    Deploying mgr, cluster ceph hosts ceph2:ceph2

    sudo ceph --cluster ceph --name client.bootstrap-mgr --keyring /var/lib/ceph/bootstrap-mgr/ceph.keyring auth get-or-create mgr.ceph2 mon allow profile mgr osd allow * mds allow * -o /var/lib/ceph/mgr/ceph-ceph2/keyring
    sudo systemctl enable ceph-mgr@ceph2

    ================================
    创建三个osd,在admin node上
    ceph-deploy osd create --data /dev/vdb ceph1
    ceph-deploy osd create --data /dev/vdb ceph2
    ceph-deploy osd create --data /dev/vdb ceph3

    ====================================
    到此完毕,检查状态是否正常
    sudo ceph health
    sudo ceph -s
    ceph quorum_status --format json-pretty

    ====================================
    实践,创建pool,并存储一个对象
    echo "11111" > my.txt
    ceph osd pool create mypool 8
    rados put m1 my.txt --pool=mypool
    rados -p mypool ls
    rados df

    检查到底存了没有
    [root@ceph1 ~]# rados -p mypool ls
    m1
    [root@ceph1 ~]# rados df
    POOL_NAME USED OBJECTS CLONES COPIES MISSING_ON_PRIMARY UNFOUND DEGRADED RD_OPS RD WR_OPS WR
    mypool 12 B 1 0 3 0 0 0 0 0 B 1 1 KiB

    total_objects 1
    total_used 3.0 GiB
    total_avail 177 GiB
    total_space 180 GiB
    [root@ceph1 ~]# cat my.txt
    11111111111

    标识对象位置
    [root@ceph1 ~]# ceph osd map mypool m1
    osdmap e16 pool 'mypool' (1) object 'm1' -> pg 1.c3958388 (1.0) -> up ([1,2,0], p1) acting ([1,2,0], p1)
    [root@ceph1 ~]#

    删除对象
    [root@ceph1 ~]# rados rm m1 --pool=mypool
    删除pool
    [root@ceph1 ~]# ceph osd pool rm mypool
    Error EPERM: WARNING: this will *PERMANENTLY DESTROY* all data stored in pool mypool. If you are *ABSOLUTELY CERTAIN* that is what you want, pass the pool name *twice*, followed by --yes-i-really-really-mean-it.
    [root@ceph1 ~]# ceph osd pool rm mypool mypool --yes-i-really-really-mean-it
    Error EPERM: pool deletion is disabled; you must first set the mon_allow_pool_delete config option to true before you can destroy a pool


    ==================================
    ceph mgr module enable dashboard
    ceph dashboard create-self-signed-cert

    禁用ssl
    ceph config set mgr mgr/dashboard/ssl false

    [root@ceph2 yum.repos.d]# ceph mgr services
    {
    "dashboard": "https://192.168.7.152:9800/"
    }

    ===================================
    [sceph@ceph1 ~]$ ceph-deploy mds create ceph2
    =====================================

    [sceph@ceph1 ~]$ ceph-deploy --overwrite-conf rgw create ceph1

    radosgw-admin user create --uid=786 --display-name=fff --system
    radosgw-admin user info --uid=786
    ceph dashboard set-rgw-api-access-key Y2CAASJE4Z0HFAD26R1R
    ceph dashboard set-rgw-api-secret-key dJzSwCHeMkqhbksV0zcDnalPzBPjHz3wRG3aRlSN

    然后在dashboard上就可看到object gateway了
    [root@ceph1 ceph]# systemctl status ceph-radosgw@ceph1

    =====================================
    vi /etc/ceph/ceph.conf
    [client.rgw.ceph1]
    rgw_frontends = civetweb port=9080


    systemctl restart ceph-radosgw.target
    systemctl -a


    #ceph-deploy --overwrite-conf config push ceph1

    =======================================
    mon的数据库是rocksdb
    https://github.com/facebook/rocksdb/wiki/Administration-and-Data-Access-Tool
    在/var/lib/ceph/mon/ceph-ceph1/store.db
    59 unzip rocksdb-master.zip
    61 cd rocksdb-master
    65 yum install gcc gcc-c++ perl
    70 yum install perl
    72 make
    73 make install
    83 scp -r 192.168.7.151:/var/lib/ceph/mon/ceph-ceph1/store.db ~/
    87 cd ~/store.db/
    90 ~/rocksdb/rocksdb-master/ldb --db=007031.sst scan
    91 ~/rocksdb/rocksdb-master/sst_dump -h
    92 ~/rocksdb/rocksdb-master/sst_dump --file=007031.sst --command=scan
    93 ~/rocksdb/rocksdb-master/sst_dump --file=007031.sst --command=scan --read_num=5
    97 ~/rocksdb/rocksdb-master/sst_dump --file=007031.sst --command=raw
    99 less 007031_dump.txt

    =======================================

    [root@ceph1 rbdpool]# ceph pg map 8.13
    osdmap e55 pg 8.13 (8.13) -> up [0,2,1] acting [0,2,1]

  • 相关阅读:
    普通锁和分布式锁
    java 正则表达式
    java 字符串转date 格式转换
    消息中间件 kafka
    数据的存储方式:对象存储、文件存储、块存储
    Exceptional Control Flow(6)
    Exceptional Control Flow(5)
    Exceptional Control Flow(4)
    Exceptional Control Flow(3)
    Exceptional Control Flow(2)
  • 原文地址:https://www.cnblogs.com/createyuan/p/10820287.html
Copyright © 2011-2022 走看看