一、部署准备:
(1)所有ceph集群节点(包括客户端)设置静态域名解析;
[root@zxw9 ~]# cat /etc/hosts
127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
192.168.126.6 zxw6
192.168.126.7 zxw7
192.168.126.8 zxw8
192.168.126.9 zxw9
(2)所有集群节点(包括客户端)创建cent用户,并设置密码,后执行如下命令:
useradd cent && echo "123" | passwd --stdin cent
[root@zxw9 ~]# echo -e 'Defaults:cent !requiretty
cent ALL = (root) NOPASSWD:ALL' | tee /etc/sudoers.d/ceph
chmod 440 /etc/sudoers.d/ceph
(3)在部署节点切换为cent用户,设置无密钥登陆各节点包括客户端节点
[root@zxw9 ~]# su - cent
[cent@zxw9 ~]$
[cent@zxw9 ~]$ ssh-keygen
[cent@zxw9 ~]$ ssh-copy-id zxw6
[cent@zxw9 ~]$ ssh-copy-id zxw7
[cent@zxw9 ~]$ ssh-copy-id zxw8
(4)在部署节点切换为cent用户,在cent用户家目录,设置如下文件:
[cent@zxw9 ~]$ vim .ssh/config
Host zxw9
Hostname zxw9
User cent
Host zxw8
Hostname zxw8
User cent
Host zxw7
Hostname zxw7
User cent
Host zxw6
Hostname zxw6
User cent
[cent@zxw9 ~]$ chmod 600 .ssh/config
二、所有节点配置国内ceph源:
vim ceph.repo
[Ceph] name=Ceph packages for $basearch baseurl=http://mirrors.163.com/ceph/rpm-jewel/el7/x86_64 enabled=1 gpgcheck=1 type=rpm-md gpgkey=http://mirrors.163.com/ceph/keys/release.asc priority=1 [Ceph-noarch] name=Ceph noarch packages baseurl=http://mirrors.163.com/ceph/rpm-jewel/el7/noarch enabled=1 gpgcheck=1 type=rpm-md gpgkey=http://mirrors.163.com/ceph/keys/release.asc priority=1 [ceph-source] name=Ceph source packages baseurl=http://mirrors.163.com/ceph/rpm-jewel/el7/SRPMS enabled=1 gpgcheck=1 type=rpm-md gpgkey=http://mirrors.163.com/ceph/keys/release.asc priority=1
root@zxw9 ~]# mv rdo-release-yunwei.repo rdo-release-yunwei.repo.bak
[root@zxw9 ~]# yum localinstall ceph-deploy-1.5.39-0.noarch.rpm -y
下载包脚本
所有都安装
[root@zxw6 cephjrpm]# ls
ceph-10.2.11-0.el7.x86_64.rpm libcephfs_jni1-devel-10.2.11-0.el7.x86_64.rpm
ceph-base-10.2.11-0.el7.x86_64.rpm librados2-10.2.11-0.el7.x86_64.rpm
ceph-common-10.2.11-0.el7.x86_64.rpm librados2-devel-10.2.11-0.el7.x86_64.rpm
ceph-devel-compat-10.2.11-0.el7.x86_64.rpm libradosstriper1-10.2.11-0.el7.x86_64.rpm
cephfs-java-10.2.11-0.el7.x86_64.rpm libradosstriper1-devel-10.2.11-0.el7.x86_64.rpm
ceph-fuse-10.2.11-0.el7.x86_64.rpm librbd1-10.2.11-0.el7.x86_64.rpm
ceph-libs-compat-10.2.11-0.el7.x86_64.rpm librbd1-devel-10.2.11-0.el7.x86_64.rpm
ceph-mds-10.2.11-0.el7.x86_64.rpm librgw2-10.2.11-0.el7.x86_64.rpm
ceph-mon-10.2.11-0.el7.x86_64.rpm librgw2-devel-10.2.11-0.el7.x86_64.rpm
ceph-osd-10.2.11-0.el7.x86_64.rpm python-ceph-compat-10.2.11-0.el7.x86_64.rpm
ceph-radosgw-10.2.11-0.el7.x86_64.rpm python-cephfs-10.2.11-0.el7.x86_64.rpm
ceph-resource-agents-10.2.11-0.el7.x86_64.rpm python-rados-10.2.11-0.el7.x86_64.rpm
ceph-selinux-10.2.11-0.el7.x86_64.rpm python-rbd-10.2.11-0.el7.x86_64.rpm
ceph-test-10.2.11-0.el7.x86_64.rpm rbd-fuse-10.2.11-0.el7.x86_64.rpm
libcephfs1-10.2.11-0.el7.x86_64.rpm rbd-mirror-10.2.11-0.el7.x86_64.rpm
libcephfs1-devel-10.2.11-0.el7.x86_64.rpm rbd-nbd-10.2.11-0.el7.x86_64.rpm
libcephfs_jni1-10.2.11-0.el7.x86_64.rpm
在部署节点(cent用户下执行):安装 ceph-deploy,在root用户下,进入下载好的rpm包目录,执行:
[root@zxw6 cephjrpm]# yum localinstall ./* -y
[root@zxw9 ~]# su - cent
上一次登录:六 8月 10 10:48:11 CST 2019pts/0 上
[cent@zxw9 ~]$
创建ceph工作目录
[cent@zxw9 ~]$ mkdir ceph
[cent@zxw9 ~]$ cd ceph/
[cent@zxw9 ceph]$ pwd
/home/cent/ceph
在部署节点(cent用户下执行):配置新集群
[cent@zxw9 ceph]$ ceph-deploy new zxw6 zxw7 zxw8
[cent@zxw9 ceph]$ ls
ceph.conf ceph-deploy-ceph.log ceph.mon.keyring
[cent@zxw9 ceph]$ vim ceph.conf
添加
[global]
fsid = 1538d7bd-e989-4e8a-a37f-f536881482f2
mon_initial_members = zxw6, zxw7, zxw8
mon_host = 192.168.126.6,192.168.126.7,192.168.126.8
auth_cluster_required = cephx
auth_service_required = cephx
auth_client_required = cephx
osd_pool_default_size = 1 保存 副本
osd_pool_default_min_size = 1 最小保存
mon_clock_drift_allowed = 2 时间差
mon_clock_drift_warn_backoff = 3 检测时间
可选参数如下:
public_network = 192.168.254.0/24
cluster_network = 172.16.254.0/24
osd_pool_default_size = 3
osd_pool_default_min_size = 1
osd_pool_default_pg_num = 8
osd_pool_default_pgp_num = 8
osd_crush_chooseleaf_type = 1
[mon]
mon_clock_drift_allowed = 0.5
[osd]
osd_mkfs_type = xfs
osd_mkfs_options_xfs = -f
filestore_max_sync_interval = 5
filestore_min_sync_interval = 0.1
filestore_fd_cache_size = 655350
filestore_omap_header_cache_size = 655350
filestore_fd_cache_random = true
osd op threads = 8
osd disk threads = 4
filestore op threads = 8
max_open_files = 655350
在部署节点执行,所有节点安装ceph软件
[cent@zxw9 ceph]$ ceph-deploy install zxw6 zxw7 zxw8 zxw9
在部署节点初始化集群(cent用户下执行):
[cent@zxw9 ceph]$ ceph-deploy mon create-initial
添加磁盘
分区磁盘
fdisk /dev/sdc
格式化所有磁盘格式
mkfs.xfs /dev/sdd1
注意:准备前先将硬盘做文件系统 xfs,挂载到/var/lib/ceph/osd,并且注意属主和属主为ceph:
列出节点磁盘:ceph-deploy disk list zxw6
擦净节点磁盘:ceph-deploy disk zap zxw6:/dev/vdb1
在cent部署节点查看各个节点磁盘格式
[cent@zxw9 ceph]$ ceph-deploy disk list zxw9
初始化磁盘为osd
[cent@zxw9 ceph]$ ceph-deploy osd prepare zxw6:/dev/sdd1 zxw6:/dev/sdc1
[cent@zxw9 ceph]$ ceph-deploy osd prepare zxw7:/dev/sdd1 zxw7:/dev/sdc1
[cent@zxw9 ceph]$ ceph-deploy osd prepare zxw8:/dev/sdd1 zxw6:/dev/sdc1
报错时候执行这个
[root@zxw6 tmp]# chown -R ceph:ceph /var/lib/ceph/osd
挂载所有格式化osd磁盘到/var/lib/ceph/osd
激活
[cent@zxw9 ceph]$ ceph-deploy osd activate zxw8:/dev/sdb1
同步所有的ceph配置文件
[cent@zxw9 ceph]$ ceph-deploy admin zxw9 zxw8 zxw7 zxw6
所有节点给予644权限
chmod 644 /etc/ceph/ceph.client.admin.keyring
查看ceph创建状态任意节点都可看
ceph -s
cluster faec8d46-40c2-416d-b566-24962119cd36
health HEALTH_OK
monmap e1: 3 mons at {zxw6=192.168.126.6:6789/0,zxw7=192.168.126.7:6789/0,zxw8=192.168.126.8:6789/0}
election epoch 4, quorum 0,1,2 zxw6,zxw7,zxw8
osdmap e32: 8 osds: 6 up, 6 in
flags sortbitwise,require_jewel_osds
pgmap v96: 64 pgs, 1 pools, 0 bytes data, 0 objects
665 MB used, 17700 MB / 18366 MB avail
64 active+clean