ceph osd lspools
# 查看 ceph 的用户认证信息
ceph auth list
# 查看 ceph 集群 monitor 的信息
ceph mon dump
# 生成CEPH-CSI 配置映射
mkdir /data/csi
cd /data/csi
# ceph-csi 需要存储在 Kubernetes 中的 ConfigMap 对象来定义 Ceph 群集的 Ceph 监视器地址。
vi csi-config-map.yaml
---
apiVersion: v1
kind: ConfigMap
data:
config.json: |-
[
{
"clusterID": "af7a6791-5fc8-484e-b170-68828097d55b",
"monitors": [
"192.168.31.207:6789",
"192.168.31.159:6789",
"192.168.31.198:6789"
]
}
]
metadata:
name: ceph-csi-config
kubectl apply -f csi-config-map.yaml
kubectl get configmap
# 创建 CEPH-CSI SECRET
ceph auth list|grep -A 4 client.admin
# 注意:因为kubernetes用户会报错,我使用的 admin 用户
vi csi-rbd-secret.yaml
---
apiVersion: v1
kind: Secret
metadata:
name: csi-rbd-secret
namespace: default
stringData:
userID: admin
userKey: AQC3PpVged1WHBAANe1mTE4mS/CGkplYdhxqVw==
kubectl apply -f csi-rbd-secret.yaml
kubectl get secret
kubectl get secret csi-rbd-secret -o yaml
echo YWRtaW4= | base64 -d
# 配置 CEPH-CSI 插件
# 创建所需的ServiceAccount和 RBAC群集区域/群集区域绑定 kubernetes 对象。这些对象不一定需要针对您的 Kubernetes 环境进行自定义,因此可以从ceph-csi 部署 YAM使用:
wget https://raw.githubusercontent.com/ceph/ceph-csi/master/deploy/rbd/kubernetes/csi-provisioner-rbac.yaml
kubectl apply -f csi-provisioner-rbac.yaml
wget https://raw.githubusercontent.com/ceph/ceph-csi/master/deploy/rbd/kubernetes/csi-nodeplugin-rbac.yaml
kubectl apply -f csi-nodeplugin-rbac.yaml
# 创建ceph-csi预配和节点插件,注意镜像问题,可能需要FQ下载
wget https://raw.githubusercontent.com/ceph/ceph-csi/master/deploy/rbd/kubernetes/csi-rbdplugin-provisioner.yaml
kubectl apply -f csi-rbdplugin-provisioner.yaml
wget https://raw.githubusercontent.com/ceph/ceph-csi/master/deploy/rbd/kubernetes/csi-rbdplugin.yaml
kubectl apply -f csi-rbdplugin.yaml
wget https://raw.githubusercontent.com/ceph/ceph-csi/devel/examples/kms/vault/kms-config.yaml
kubectl apply -f kms-config.yaml
kubectl get configmap
kubectl get secrets
# 创建存储类
# Kubernetes存储类定义存储类。可以创建多个 StorageClass 对象以映射到不同的服务
# 确保下面的 clusterID 为 ceph cluster id
vi csi-rbd-sc.yaml
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: csi-rbd-sc
provisioner: rbd.csi.ceph.com
parameters:
clusterID: af7a6791-5fc8-484e-b170-68828097d55b
pool: kubernetes
csi.storage.k8s.io/provisioner-secret-name: csi-rbd-secret
csi.storage.k8s.io/provisioner-secret-namespace: default
csi.storage.k8s.io/node-stage-secret-name: csi-rbd-secret
csi.storage.k8s.io/node-stage-secret-namespace: default
imageFormat: "2"
imageFeatures: "layering"
reclaimPolicy: Delete
mountOptions:
- discard
kubectl apply -f csi-rbd-sc.yaml
kubectl get sc
# 创建的基于ceph-csi 的基于 cephfs PVC ,可以使用以下 YAML 从csi-rbd-sc存储类请求装载的文件系统(由 RBD image 支持)
cat filesystem-pvc.yaml
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: filesystem-rbd-pvc
spec:
accessModes:
- ReadWriteOnce
volumeMode: Filesystem
resources:
requests:
storage: 1Gi
storageClassName: csi-rbd-sc
kubectl apply -f filesystem-pvc.yaml
cat filesystem-pod.yaml
---
apiVersion: v1
kind: Pod
metadata:
name: csi-rbd-demo-pod
spec:
containers:
- name: web-server
image: nginx
volumeMounts:
- name: mypvc
mountPath: /var/lib/www/html
volumes:
- name: mypvc
persistentVolumeClaim:
claimName: filesystem-rbd-pvc
readOnly: false
kubectl apply -f filesystem-pod.yaml
创建 StatefulSet 进行测试
cat statefulset-pod.yaml
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: web
spec:
selector:
matchLabels:
app: nginx
serviceName: "nginx"
replicas: 3
template:
metadata:
labels:
app: nginx
spec:
terminationGracePeriodSeconds: 10
containers:
- name: nginx
image: nginx
ports:
- containerPort: 80
name: web
volumeMounts:
- name: www
mountPath: /var/lib/www/html
volumeClaimTemplates:
- metadata:
name: www
spec:
accessModes: [ "ReadWriteOnce" ]
storageClassName: "csi-rbd-sc"
resources:
requests:
storage: 1Gi
kubectl apply -f statefulset-pod.yaml