zoukankan      html  css  js  c++  java
  • ceph集成openstack cinder

    本环境ceph已经搭建,ceph搭建麻烦见本博客的其他文章
    1 在cinder-volume节点安装ceph client
    yum install -y ceph-common
    注意:glance要安装python-rbd
    cinder-volume节点安装ceph成功后,会生成/etc/ceph/这个目录,里面有一个文件。
    [root@controller2 etc]# cd ceph/
    [root@controller2 ceph]# ls -ls
    总用量 4
    4 -rwxr-xr-x. 1 root root 2910 10月 31 2018 rbdmap
    2 ceph提前建立pool volumes,并创建cinder账号和授权;
    ceph:
    ceph osd pool create volumes 128
     
    ceph auth get-or-create client.cinder mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=volumes, allow rwx pool=volumes-cache, allow rwx pool=vms, allow rwx pool=vms-cache, allow rx pool=images, allow rx pool=images-cache'
    [client.cinder]
    key = AQCuYipewZ4IOhAArwkoAs8BxEAnxF1MAHIn8g==
     
    ceph auth get-or-create client.glance mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=images
    [client.cinder]
    key = AQAVdm5dojfsLxAAAtt+eX7psQC7pXpisqsvBg==
     
     
    将生成的密码发送到运行glance-api和cinder-volume的节点上,glance-api通常运行在控制节点上,本例的cinder-volume也运行在控制节点上。
    ceph
    [root@ceph1 ceph]# ceph auth get-or-create client.glance | ssh controller2 tee /etc/ceph/ceph.client.glance.keyring
    root@172.16.3.71's password:
    [client.glance]
    key = AQAVdm5dojfsLxAAAtt+eX7psQC7pXpisqsvBg==
    [root@ceph1 ceph]# ceph auth get-or-create client.cinder | ssh controller2 tee /etc/ceph/ceph.client.cinder.keyring
    root@172.16.3.71's password:
    [client.cinder]
    key = AQDOdW5do2jzEhAA/v/VYEBHOUk440mpP6GMBg==
    运行nova-compute服务节点需要用到client,cinder的秘钥文件,并需将其传递到计算节点:
    [root@ceph1 ceph]# ceph auth get-or-create client.cinder | ssh compute1 tee /etc/ceph/ceph.client.cinder.keyring
    root@172.16.3.72's password:
    [client.cinder]
    key = AQDOdW5do2jzEhAA/v/VYEBHOUk440mpP6GMBg==
    [root@ceph1 ceph]# ceph auth get-key client.cinder | ssh compute1 tee client.cinder.key
    root@172.16.3.72's password:
    AQDOdW5do2jzEhAA/v/VYEBHOUk440mpP6GMBg==[root@ceph1 ceph]#
    运行nova-compute节点需要将client,cinder用户的秘钥文件存储到libvirt中,当基于ceph后端的cinder卷被attach到虚拟机实例时,libvirt需要用到秘钥文件以访问ceph集群。在运行nova-compute的节点上暂时创建秘钥临时文件:
    [root@ceph1 ceph]# ceph auth get-key client.cinder | ssh compute1 tee client.cinder.key
    root@172.16.3.72's password:
    AQDOdW5do2jzEhAA/v/VYEBHOUk440mpP6GMBg==
    在运行nova-compute的计算节点上将秘钥文件添加到libvirt中,然后将其删除:
    compute1:
    [root@compute1 ceph]# uuidgen
    35bbf774-17df-407d-8b52-45bf93269b9a
    [root@compute1 ~]# cat > secret.xml <<EOF
    > <secret ephemeral='no' private='no'>
    > <uuid>35bbf774-17df-407d-8b52-45bf93269b9a</uuid>
    > <usage type='ceph'>
    > <name>client.cinder secret</name>
    > </usage>
    > </secret>
    > EOF
    [root@compute1 ~]# virsh secret-define --file secret.xml
    生成 secret 35bbf774-17df-407d-8b52-45bf93269b9a
    [root@compute1 ~]# virsh secret-set-value --secret 35bbf774-17df-407d-8b52-45bf93269b9a --base64 $(cat client.cinder.key)
    secret 值设定
     
    备注:重置秘钥 virsh secret-undefine 8a9359b5-ed26-49e3-ad92-41170a675a7f
    3 将ceph-server服务器的ceph.conf拷贝过来cinder-volume及compute节点/etc/ceph这个目录下。
    ceph:
    scp /etc/ceph/ceph.conf root@controller2:/etc/ceph/
    scp /etc/ceph/ceph.conf root@compute1:/etc/ceph/
    本环境的配置文件如下,可供参考:
    [root@controller2 ~]# cat /etc/ceph/ceph.conf
    [global]
    fsid = 6bbab2f3-f90c-439d-86d7-9c0f3603303c
    mon_initial_members = ceph1, ceph2, ceph3
    mon_host = 172.16.3.61,172.16.3.62,172.16.3.63
    auth_cluster_required = cephx
    auth_service_required = cephx
    auth_client_required = cephx
    mon clock drift allowed = 10
    mon clock drift warn backoff = 30
    osd pool default pg num = 64
    osd pool default pgp num = 64
    osd_crush_update_on_start = false
     
    4 编辑cinder-volume节点,在/etc/cinder/cinder-volume添加以下信息
    [DEFAULT]
    enabled_backends = lvm,rbd-1
    glance_api_version = 2
    [lvm]
    volume_backend_name = lvm
    [rbd-1]
    volume_driver = cinder.volume.drivers.rbd.RBDDriver
    volume_backend_name = rbd-1
    rbd_pool = volumes
    rbd_ceph_conf = /etc/ceph/ceph.conf
    rbd_flatten_volume_from_snapshot = false
    rbd_max_clone_depth = 5
    rbd_store_chunk_size = 4
    rados_connect_timeout = 5
    rbd_user = cinder
    rbd_secret_uuid = 35bbf774-17df-407d-8b52-45bf93269b9a
    report_discard_supported = True
    image_upload_use_cinder_backend = False
    本配置文件完成如下,可参考:
    [root@controller2 ~]# cat /etc/cinder/cinder.conf
    #wangjx
    [DEFAULT]
    transport_url = rabbit://openstack:21vianet@controller2
    auth_strategy = keystone
    my_ip = 10.160.20.22
    enabled_backends = lvm,rbd-1
    glance_api_version = 2
    glance_api_servers = http://controller2:9292
    [lvm]
    volume_driver = cinder.volume.drivers.lvm.LVMVolumeDriver
    volume_group = cinder-volumes
    iscsi_protocol = iscsi
    iscsi_helper = lioadm
    volume_backend_name = lvm
    [rbd-1]
    volume_driver = cinder.volume.drivers.rbd.RBDDriver
    volume_backend_name = rbd-1
    rbd_pool = volumes
    rbd_ceph_conf = /etc/ceph/ceph.conf
    rbd_flatten_volume_from_snapshot = false
    rbd_max_clone_depth = 5
    rbd_store_chunk_size = 4
    rados_connect_timeout = 5
    rbd_user = cinder
    rbd_secret_uuid = 35bbf774-17df-407d-8b52-45bf93269b9a
    report_discard_supported = True
    image_upload_use_cinder_backend = False
    [backend]
    [backend_defaults]
    [barbican]
    [brcd_fabric_example]
    [cisco_fabric_example]
    [coordination]
    [cors]
    [database]
    connection = mysql+pymysql://cinder:21vianet@controller2/cinder
    [fc-zone-manager]
    [healthcheck]
    [key_manager]
    [keystone_authtoken]
    auth_uri = http://controller2:5000
    auth_url = http://controller2:5000
    memcached_servers = controller2:11211
    auth_type = password
    project_domain_id = default
    user_domain_id = default
    project_name = service
    username = cinder
    password = 21vianet
    [matchmaker_redis]
    [nova]
    [oslo_concurrency]
    lock_path = /var/lib/cinder/tmp
    [oslo_messaging_amqp]
    [oslo_messaging_kafka]
    [oslo_messaging_notifications]
    [oslo_messaging_rabbit]
    [oslo_messaging_zmq]
    [oslo_middleware]
    [oslo_policy]
    [oslo_reports]
    [oslo_versionedobjects]
    [profiler]
    [sample_remote_file_source]
    [service_user]
    [ssl]
    [vault]
    7 创建cinder type类型 ceph1。
    [root@controller2 ceph]# cinder type-create ceph1
    +--------------------------------------+-------+-------------+-----------+
    | ID | Name | Description | Is_Public |
    +--------------------------------------+-------+-------------+-----------+
    | 7932f657-87a1-495b-ac24-4308197f51e4 | ceph1 | - | True |
    +--------------------------------------+-------+-------------+-----------+
    [root@controller2 ceph]# cinder type-key ceph1 set volume_backend_name=rbd-1
    查询tpye是否生效
    [root@controller2 ceph]# openstack volume type list
    +--------------------------------------+-------+-----------+
    | ID | Name | Is Public |
    +--------------------------------------+-------+-----------+
    | 7932f657-87a1-495b-ac24-4308197f51e4 | ceph1 | True |
    +--------------------------------------+-------+-----------+
    [root@controller2 ceph]# openstack volume type show ceph1
    +--------------------+--------------------------------------+
    | Field | Value |
    +--------------------+--------------------------------------+
    | access_project_ids | None |
    | description | None |
    | id | 7932f657-87a1-495b-ac24-4308197f51e4 |
    | is_public | True |
    | name | ceph1 |
    | properties | volume_backend_name='rbd-1' |
    | qos_specs_id | None |
    +--------------------+--------------------------------------+
    创建ceph块存储
    [root@controller2 ceph]# openstack volume create disk0205ceph1 --type ceph1 --size 1
    +---------------------+--------------------------------------+
    | Field | Value |
    +---------------------+--------------------------------------+
    | attachments | [] |
    | availability_zone | nova |
    | bootable | false |
    | consistencygroup_id | None |
    | created_at | 2020-02-05T12:10:10.000000 |
    | description | None |
    | encrypted | False |
    | id | 6043b12f-6b7e-4256-a095-ede1d019d501 |
    | migration_status | None |
    | multiattach | False |
    | name | disk0205ceph1 |
    | properties | |
    | replication_status | None |
    | size | 1 |
    | snapshot_id | None |
    | source_volid | None |
    | status | creating |
    | type | ceph1 |
    | updated_at | None |
    | user_id | ccd5b0f0d5534366b27ae4fab3a82945 |
    +---------------------+--------------------------------------+
    [root@controller2 ceph]# openstack volume list
    +--------------------------------------+---------------+-----------+------+------------------------------------+
    | ID | Name | Status | Size | Attached to |
    +--------------------------------------+---------------+-----------+------+------------------------------------+
    | 6043b12f-6b7e-4256-a095-ede1d019d501 | disk0205ceph1 | available | 1 | |
    | 2f9f4c64-ded8-4ff0-b2b2-94319a60e8d4 | disk0205 | in-use | 1 | Attached to vm0205lvm on /dev/vda |
    +--------------------------------------+---------------+-----------+------+------------------------------------+


     
  • 相关阅读:
    linux下安装tomcat
    linux 中安装mysql8.0
    国家助学贷款
    语句摘录
    Java 今天是否为节假日
    Jfinal框架登陆页面的图形验证码
    ActiveRecord初始化,可以实现jfinal系统启动完成后,再建立数据库连接
    socket 通讯
    datatable的excel导入,其中有关于datatables的很多参数设置
    jfinal form表单提交文件
  • 原文地址:https://www.cnblogs.com/cloud-datacenter/p/12266419.html
Copyright © 2011-2022 走看看