zoukankan      html  css  js  c++  java
  • 分布式存储ceph部署

    版本

    ceph版本 os支持版本 ceph-ansible支持版本
    pacific (16) CentOS 8 stable-6.0
    octopus (15) CentOS 8 & CentOS 7 stable-5.0

    admin

    hostname category hardware eth0 - public eth1 eth2 - cluster eth3 gateway
    vm-210 deploy core*4 / 4g / 20GB 192.168.100.210 10.0.100.210 10.0.110.210 10.0.120.210 192.168.100.1

    ceph

    hostname category hardware eth0 - public eth1 eth2 - cluster eth3 gateway
    vm-201 ceph-mon core*1 / 2g / 20GB 192.168.100.201 10.0.100.201 10.0.110.201 10.0.120.201 192.168.100.1
    vm-202 ceph-mon core*1 / 2g / 20GB 192.168.100.202 10.0.100.202 10.0.110.202 10.0.120.202 192.168.100.1
    vm-203 ceph-mon core*1 / 2g / 20GB 192.168.100.203 10.0.100.203 10.0.110.203 10.0.120.203 192.168.100.1
    vm-204 ceph-osd core*4 / 4g / 20GB,10GBx2,30GBx4 192.168.100.204 10.0.100.204 10.0.110.204 10.0.120.204 192.168.100.1
    vm-205 ceph-osd core*4 / 4g / 20GB,10GBx2,30GBx4 192.168.100.205 10.0.100.205 10.0.110.205 10.0.120.205 192.168.100.1
    vm-206 ceph-osd core*4 / 4g / 20GB,10GBx2,30GBx4 192.168.100.206 10.0.100.206 10.0.110.206 10.0.120.206 192.168.100.1

    在admin操作所有步骤

    1. 静态指向

    cat > /etc/hosts <<EOF
    127.0.0.1   localhost localhost.localdomain localhost4 localhost4.localdomain4
    ::1         localhost localhost.localdomain localhost6 localhost6.localdomain6
    
    # admin
    192.168.100.210 vm-210
    
    # openstack
    192.168.100.211 vm-211
    192.168.100.212 vm-212
    192.168.100.213 vm-213
    192.168.100.214 vm-214
    192.168.100.215 vm-215
    192.168.100.216 vm-216
    192.168.100.217 vm-217
    192.168.100.218 vm-218
    192.168.100.219 vm-219
    
    # k8s
    192.168.100.191 vm-191
    192.168.100.192 vm-192
    192.168.100.193 vm-193
    192.168.100.194 vm-194
    192.168.100.195 vm-195
    192.168.100.196 vm-196
    192.168.100.197 vm-197
    192.168.100.207 vm-207
    192.168.100.198 vm-198
    
    # ceph
    192.168.100.201 vm-201
    192.168.100.202 vm-202
    192.168.100.203 vm-203
    192.168.100.204 vm-204
    192.168.100.205 vm-205
    192.168.100.206 vm-206
    EOF
    

    2. ssh信任登录

    ssh-keygen -b 1024 -t rsa -P '' -f ~/.ssh/id_rsa
    
    for i in {201..206}; do ssh-copy-id -i .ssh/id_rsa.pub vm-$i; done
    

    3. ceph-ansible

    git clone https://github.com/ceph/ceph-ansible.git
    cd ceph-ansible
    git checkout  stable-5.0 
    
    # 或者从镜像站下载  git clone -b stable-5.0 https://hub.fastgit.org/ceph/ceph-ansible.git
    
    pip3 install pip -U
    pip3 config set global.index-url https://pypi.tuna.tsinghua.edu.cn/simple
    pip3 install -r requirements.txt 
    
    cp group_vars/all.yml.sample group_vars/all.yml
    cp group_vars/osds.yml.sample group_vars/osds.yml
    cp site.yml.sample site.yml
    cp site-container.yml.sample site-container.yml
    

    4. ansible hosts

    cat >> inventory <<EOF
    [mons]
    vm-[201:203]
    
    [mgrs]
    vm-[201:203]
    
    [grafana-server]
    vm-[201:203]
    
    [rgws]
    vm-[201:203]
    
    [osds]
    vm-[204:206]
    EOF
    
    ansible -i inventory all -m copy -a 'src=/etc/hosts dest=/etc/hosts'
    ansible -i inventory osds -m shell -a 'pip3 install --index https://pypi.tuna.tsinghua.edu.cn/simple six pyyaml'
    ansible -i inventory mons -m shell -a 'pip3 install --index https://pypi.douban.com/simple pecan werkzeug && reboot'
    

    5. ceph-ansibles group_vars/all.yml

    cat > group_vars/all.yml << EOF
    ---
    dummy:
    cluster: ceph
    
    mon_group_name: mons
    osd_group_name: osds
    rgw_group_name: rgws
    mds_group_name: mdss
    nfs_group_name: nfss
    rbdmirror_group_name: rbdmirrors
    client_group_name: clients
    iscsi_gw_group_name: iscsigws
    mgr_group_name: mgrs
    rgwloadbalancer_group_name: rgwloadbalancers
    grafana_server_group_name: grafana-server
    
    configure_firewall: false
    ceph_test: false
    upgrade_ceph_packages: False
    
    ceph_origin: repository
    ceph_repository: community
    ceph_mirror: https://mirrors.nju.edu.cn/ceph
    ceph_stable_key: "{{ ceph_mirror }}/keys/release.asc"
    ceph_stable_release: octopus
    ceph_stable_repo: "{{ ceph_mirror }}/rpm-{{ ceph_stable_release }}"
    
    nfs_ganesha_stable: true
    nfs_ganesha_stable_branch: V2.8-stable
    nfs_ganesha_stable_deb_repo: "{{ ceph_mirror }}/nfs-ganesha/rpm-{{ nfs_ganesha_stable_branch }}/{{ ceph_stable_release }}"
    
    generate_fsid: true
    ceph_conf_key_directory: /etc/ceph
    cephx: true
    monitor_interface: eth0
    public_network: 192.168.100.0/24
    cluster_network: 10.0.110.0/24
    
    
    radosgw_interface: eth0
    radosgw_num_instances: 1
    
    dashboard_enabled: false
    dashboard_protocol: http
    dashboard_port: 8443
    dashboard_admin_user: admin
    dashboard_admin_user_ro: false
    dashboard_admin_password: admin
    grafana_admin_user: admin
    grafana_admin_password: admin
    
    ceph_conf_overrides:
        global:
          rbd_default_features: 7
          auth cluster required: cephx
          auth service required: cephx
          auth client required: cephx
          osd journal size: 2048
          osd pool default size: 3
          osd pool default min size: 1
          mon_pg_warn_max_per_osd: 1024
          osd pool default pg num: 128
          osd pool default pgp num: 128
          max open files: 131072
          osd_deep_scrub_randomize_ratio: 0.01
    
        mgr:
          mgr modules: dashboard
    
        mon:
          mon_allow_pool_delete: true
    
        client:
          rbd_cache: true
          rbd_cache_size: 335544320
          rbd_cache_max_dirty: 134217728
          rbd_cache_max_dirty_age: 10
    
        osd:
          osd mkfs type: xfs
          ms_bind_port_max: 7100
          osd_client_message_size_cap: 2147483648
          osd_crush_update_on_start: true
          osd_deep_scrub_stride: 131072
          osd_disk_threads: 4
          osd_map_cache_bl_size: 128
          osd_max_object_name_len: 256
          osd_max_object_namespace_len: 64
          osd_max_write_size: 1024
          osd_op_threads: 8
    
          osd_recovery_op_priority: 1
          osd_recovery_max_active: 1
          osd_recovery_max_single_start: 1
          osd_recovery_max_chunk: 1048576
          osd_recovery_threads: 1
          osd_max_backfills: 4
          osd_scrub_begin_hour: 23
          osd_scrub_end_hour: 7
    EOF
    

    6. ceph-ansibles group_vars/osds.yml

    # ssd: sdb,sdb
    # sas: sdc,sdd,sde,sdf
    cat > group_vars/osds.yml <<EOF
    ---
    devices:
      - /dev/sdc
      - /dev/sdd
      - /dev/sde
      - /dev/sdf
    dedicated_devices:
       - /dev/sdb
    bluestore_wal_devices:
       - /dev/sdg
    
    osd_auto_discovery: false
    osd_scenario: collocated
    osd_objectstore: bluestore
    EOF
    

    7. 安装

    ansible -i inventory all -m ping
    ansible-playbook -i inventory site.yml
    

    8. ceph dashboard

    yum install -y ceph-mgr-dashboard
    
    ceph mgr module enable dashboard
    ceph dashboard create-self-signed-cert
    
    ceph config set mgr mgr/dashboard/server_addr vm-202
    ceph config set mgr mgr/dashboard/server_port 8443
    ceph config set mgr mgr/dashboard/ssl_server_port 8443
    
    echo 'password' > passwd
    ceph dashboard ac-user-create admin -i passwd administrator
    

    ceph -s 出现错误提示: mons are allowing insecure global_id reclaim
    ceph config set mon auth_allow_insecure_global_id_reclaim false

    ceph -s 出现错误提示:Module 'restful' has failed dependency: No module named 'pecan'
    pip3 install pecan werkzeug && reboot

  • 相关阅读:
    Math 方法的使用
    敏捷开发 故事墙
    利用OpenPOP开源组件实现接收电子邮件功能
    JDBC连接MySql数据库
    【转】"已成功与服务器建立连接,但是在登录前的握手期间发生错误"问题解决方案
    【转】如何让虚拟目录里面的webconfig不继承网站
    【转】JavaScript控制图片放大缩小
    【Wonder原创】CheckBoxList实现单选
    【杂记】ASP.Net Web开发
    【Wonder整理】防止重复提交并弹出半透明对话框
  • 原文地址:https://www.cnblogs.com/liujitao79/p/15222151.html
Copyright © 2011-2022 走看看