zoukankan      html  css  js  c++  java
  • 基于ceph rbd 在kubernetes harbor 空间下创建动态存储

    [root@bs-k8s-ceph ~]# ceph osd pool create harbor 128
    Error ETIMEDOUT: crush test failed with -110: timed out during smoke test (5 seconds)
    //这个问题 我不知道怎么解决   因为过了一小会  就又好了
    [root@bs-k8s-ceph ~]# ceph osd pool create harbor 128
    pool 'harbor' created
    [root@bs-k8s-ceph ceph]# ceph auth get-or-create client.harbor mon 'allow r' osd 'allow class-read, allow rwx pool=harbor' -o ceph.client.harbor.keyring
    [root@bs-k8s-ceph ceph]# ceph auth get client.harbor
    exported keyring for client.harbor
    [client.harbor]
        key = AQDoCklen6e4NxAAVXmy/PG+R5iH8fNzMhk6Jg==
        caps mon = "allow r"
        caps osd = "allow class-read, allow rwx pool=harbor"
        
    [root@bs-k8s-node01 ~]# ceph auth get-key client.admin | base64
    QVFDNmNVSmV2eU8yRnhBQVBxYzE5Mm5PelNnZk5acmg5aEFQYXc9PQ==
    [root@bs-k8s-node01 ~]# ceph auth get-key client.harbor | base64
    
    [root@bs-k8s-master01 ~]# kubectl get nodes
    The connection to the server 20.0.0.250:8443 was refused - did you specify the right host or port?
    [root@bs-hk-hk01 ~]# systemctl status haproxy
    ● haproxy.service - HAProxy Load Balancer
       Loaded: loaded (/usr/lib/systemd/system/haproxy.service; enabled; vendor preset: disabled)
       Active: failed (Result: exit-code) since 日 2020-02-16 17:16:43 CST; 12min ago
      Process: 1168 ExecStart=/usr/sbin/haproxy -Ws -f /etc/haproxy/haproxy.cfg -p /var/lib/haproxy/haproxy.pid (code=exited, status=134)
     Main PID: 1168 (code=exited, status=134)
    
    2月 15 20:22:54 bs-hk-hk01 haproxy[1168]: [WARNING] 045/202254 (1184) : Server k8s_api_nodes...ue.
    2月 15 20:25:15 bs-hk-hk01 haproxy[1168]: [WARNING] 045/202515 (1183) : Server k8s_api_nodes...ue.
    2月 15 20:25:15 bs-hk-hk01 haproxy[1168]: [WARNING] 045/202515 (1184) : Server k8s_api_nodes...ue.
    2月 15 20:26:03 bs-hk-hk01 haproxy[1168]: [WARNING] 045/202603 (1184) : Server k8s_api_nodes...ue.
    2月 15 20:26:03 bs-hk-hk01 haproxy[1168]: [WARNING] 045/202603 (1183) : Server k8s_api_nodes...ue.
    2月 15 20:26:13 bs-hk-hk01 haproxy[1168]: [WARNING] 045/202613 (1183) : Server k8s_api_nodes...ue.
    2月 15 20:26:13 bs-hk-hk01 haproxy[1168]: [WARNING] 045/202613 (1184) : Server k8s_api_nodes...ue.
    2月 16 17:16:43 bs-hk-hk01 systemd[1]: haproxy.service: main process exited, code=exited, st...n/a
    2月 16 17:16:44 bs-hk-hk01 systemd[1]: Unit haproxy.service entered failed state.
    2月 16 17:16:44 bs-hk-hk01 systemd[1]: haproxy.service failed.
    Hint: Some lines were ellipsized, use -l to show in full.
    
    [root@bs-hk-hk01 ~]# systemctl start haproxy
    [root@bs-hk-hk01 ~]# systemctl status haproxy
    ● haproxy.service - HAProxy Load Balancer
       Loaded: loaded (/usr/lib/systemd/system/haproxy.service; enabled; vendor preset: disabled)
       Active: active (running) since 日 2020-02-16 17:30:03 CST; 1s ago
      Process: 4196 ExecStartPre=/usr/sbin/haproxy -f /etc/haproxy/haproxy.cfg -c -q (code=exited, status=0/SUCCESS)
     Main PID: 4212 (haproxy)
       CGroup: /system.slice/haproxy.service
               ├─4212 /usr/sbin/haproxy -Ws -f /etc/haproxy/haproxy.cfg -p /var/lib/haproxy/haproxy....
               ├─4216 /usr/sbin/haproxy -Ws -f /etc/haproxy/haproxy.cfg -p /var/lib/haproxy/haproxy....
               └─4217 /usr/sbin/haproxy -Ws -f /etc/haproxy/haproxy.cfg -p /var/lib/haproxy/haproxy....
    
    2月 16 17:30:00 bs-hk-hk01 systemd[1]: Starting HAProxy Load Balancer...
    2月 16 17:30:03 bs-hk-hk01 systemd[1]: Started HAProxy Load Balancer.
    2月 16 17:30:04 bs-hk-hk01 haproxy[4212]: [WARNING] 046/173004 (4212) : config : 'option for...de.
    2月 16 17:30:04 bs-hk-hk01 haproxy[4212]: [WARNING] 046/173004 (4212) : config : 'option for...de.
    2月 16 17:30:04 bs-hk-hk01 haproxy[4212]: [WARNING] 046/173004 (4212) : Proxy 'stats': in mu...st.
    2月 16 17:30:04 bs-hk-hk01 haproxy[4212]: [NOTICE] 046/173004 (4212) : New worker #1 (4216) forked
    2月 16 17:30:04 bs-hk-hk01 haproxy[4212]: [NOTICE] 046/173004 (4212) : New worker #2 (4217) forked
    Hint: Some lines were ellipsized, use -l to show in full.
    [root@bs-hk-hk01 ~]# systemctl enable haproxy
    
    [root@bs-k8s-master01 ~]# kubectl get nodes
    NAME              STATUS   ROLES    AGE    VERSION
    bs-k8s-master01   Ready    master   7d6h   v1.17.2
    bs-k8s-master02   Ready    master   7d6h   v1.17.2
    bs-k8s-master03   Ready    master   7d6h   v1.17.2
    bs-k8s-node01     Ready    <none>   7d6h   v1.17.2
    bs-k8s-node02     Ready    <none>   7d6h   v1.17.2
    bs-k8s-node03     Ready    <none>   7d6h   v1.17.2
    [root@bs-k8s-master01 ~]# kubectl get pods --all-namespaces 
    NAMESPACE     NAME                                        READY   STATUS             RESTARTS   AGE
    default       rbd-provisioner-75b85f85bd-8ftdm            1/1     Running            11         7d6h
    kube-system   calico-node-4jxbp                           1/1     Running            4          7d6h
    kube-system   calico-node-7t9cj                           1/1     Running            7          7d6h
    kube-system   calico-node-cchgl                           1/1     Running            14         7d6h
    kube-system   calico-node-czj76                           1/1     Running            6          7d6h
    kube-system   calico-node-lxb2s                           1/1     Running            14         7d6h
    kube-system   calico-node-nmg9t                           1/1     Running            8          7d6h
    kube-system   coredns-7f9c544f75-bwx9p                    1/1     Running            4          7d6h
    kube-system   coredns-7f9c544f75-q58mr                    1/1     Running            3          7d6h
    kube-system   dashboard-metrics-scraper-6b66849c9-qtwzx   1/1     Running            2          7d5h
    kube-system   etcd-bs-k8s-master01                        1/1     Running            17         7d6h
    kube-system   etcd-bs-k8s-master02                        1/1     Running            7          7d6h
    kube-system   etcd-bs-k8s-master03                        1/1     Running            32         7d6h
    kube-system   kube-apiserver-bs-k8s-master01              1/1     Running            28         7d6h
    kube-system   kube-apiserver-bs-k8s-master02              1/1     Running            15         7d6h
    kube-system   kube-apiserver-bs-k8s-master03              1/1     Running            62         7d6h
    kube-system   kube-controller-manager-bs-k8s-master01     1/1     Running            32         7d6h
    kube-system   kube-controller-manager-bs-k8s-master02     1/1     Running            27         7d6h
    kube-system   kube-controller-manager-bs-k8s-master03     1/1     Running            31         7d6h
    kube-system   kube-proxy-26ffm                            1/1     Running            3          7d6h
    kube-system   kube-proxy-298tr                            1/1     Running            5          7d6h
    kube-system   kube-proxy-hzsmb                            1/1     Running            3          7d6h
    kube-system   kube-proxy-jb4sq                            1/1     Running            4          7d6h
    kube-system   kube-proxy-pt94r                            1/1     Running            4          7d6h
    kube-system   kube-proxy-wljwv                            1/1     Running            4          7d6h
    kube-system   kube-scheduler-bs-k8s-master01              1/1     Running            32         7d6h
    kube-system   kube-scheduler-bs-k8s-master02              1/1     Running            21         7d6h
    kube-system   kube-scheduler-bs-k8s-master03              1/1     Running            31         7d6h
    kube-system   kubernetes-dashboard-887cbd9c6-j7ptq        1/1     Running            22         7d5h
    [root@bs-k8s-master01 harbor]# pwd
    /data/k8s/harbor
    [root@bs-k8s-master01 rbd]# kubectl apply -f ceph-harbor-namespace.yaml
    namespace/harbor created
    [root@bs-k8s-master01 rbd]# kubectl get namespaces
    NAME              STATUS   AGE
    default           Active   7d8h
    harbor            Active   16s
    kube-node-lease   Active   7d8h
    kube-public       Active   7d8h
    kube-system       Active   7d8h
    [root@bs-k8s-master01 rbd]# cat ceph-harbor-namespace.yaml 
    ##########################################################################
    #Author:                     zisefeizhu
    #QQ:                         2********0
    #Date:                       2020-02-16
    #FileName:                   ceph-harbor-namespace.yaml
    #URL:                        https://www.cnblogs.com/zisefeizhu/
    #Description:                The test script
    #Copyright (C):              2020 All rights reserved
    ###########################################################################
    apiVersion: v1
    kind: Namespace
    metadata:
      name: harbor
    [root@bs-k8s-master01 rbd]# kubectl apply -f external-storage-rbd-provisioner.yaml
    serviceaccount/rbd-provisioner created
    clusterrole.rbac.authorization.k8s.io/rbd-provisioner unchanged
    clusterrolebinding.rbac.authorization.k8s.io/rbd-provisioner configured
    role.rbac.authorization.k8s.io/rbd-provisioner created
    rolebinding.rbac.authorization.k8s.io/rbd-provisioner created
    deployment.apps/rbd-provisioner created
    [root@bs-k8s-master01 rbd]# kubectl get pods -n harbor -o wide
    NAME                               READY   STATUS    RESTARTS   AGE     IP             NODE            NOMINATED NODE   READINESS GATES
    rbd-provisioner-75b85f85bd-dhnr4   1/1     Running   0          3m48s   10.209.46.84   bs-k8s-node01   <none>           <none>
    [root@bs-k8s-master01 rbd]# cat external-storage-rbd-provisioner.yaml
    apiVersion: v1
    kind: ServiceAccount
    metadata:
      name: rbd-provisioner
      namespace: harbor
    ---
    kind: ClusterRole
    apiVersion: rbac.authorization.k8s.io/v1
    metadata:
      name: rbd-provisioner
    rules:
      - apiGroups: [""]
        resources: ["persistentvolumes"]
        verbs: ["get", "list", "watch", "create", "delete"]
      - apiGroups: [""]
        resources: ["persistentvolumeclaims"]
        verbs: ["get", "list", "watch", "update"]
      - apiGroups: ["storage.k8s.io"]
        resources: ["storageclasses"]
        verbs: ["get", "list", "watch"]
      - apiGroups: [""]
        resources: ["events"]
        verbs: ["create", "update", "patch"]
      - apiGroups: [""]
        resources: ["endpoints"]
        verbs: ["get", "list", "watch", "create", "update", "patch"]
      - apiGroups: [""]
        resources: ["services"]
        resourceNames: ["kube-dns"]
        verbs: ["list", "get"]
    ---
    kind: ClusterRoleBinding
    apiVersion: rbac.authorization.k8s.io/v1
    metadata:
      name: rbd-provisioner
    subjects:
      - kind: ServiceAccount
        name: rbd-provisioner
        namespace: harbor
    roleRef:
      kind: ClusterRole
      name: rbd-provisioner
      apiGroup: rbac.authorization.k8s.io
    
    ---
    apiVersion: rbac.authorization.k8s.io/v1
    kind: Role
    metadata:
      name: rbd-provisioner
      namespace: harbor
    rules:
    - apiGroups: [""]
      resources: ["secrets"]
      verbs: ["get"]
    ---
    apiVersion: rbac.authorization.k8s.io/v1
    kind: RoleBinding
    metadata:
      name: rbd-provisioner
      namespace: harbor
    roleRef:
      apiGroup: rbac.authorization.k8s.io
      kind: Role
      name: rbd-provisioner
    subjects:
    - kind: ServiceAccount
      name: rbd-provisioner
      namespace: harbor
    
    ---
    apiVersion: apps/v1
    kind: Deployment
    metadata:
      name: rbd-provisioner
      namespace: harbor
    spec:
      replicas: 1
      selector:
        matchLabels:
          app: rbd-provisioner
      strategy:
        type: Recreate
      template:
        metadata:
          labels:
            app: rbd-provisioner
        spec:
          containers:
          - name: rbd-provisioner
            image: "quay.io/external_storage/rbd-provisioner:latest"
            env:
            - name: PROVISIONER_NAME
              value: ceph.com/rbd
          serviceAccount: rbd-provisioner
    [root@bs-k8s-master01 harbor]# kubectl apply -f ceph-harbor-secret.yaml
    secret/ceph-harbor-admin-secret created
    secret/ceph-harbor-harbor-secret created
    [root@bs-k8s-master01 harbor]# kubectl get secret -n harbor
    NAME                          TYPE                                  DATA   AGE
    ceph-harbor-admin-secret      kubernetes.io/rbd                     1      23s
    ceph-harbor-harbor-secret     kubernetes.io/rbd                     1      23s
    default-token-8k9gs           kubernetes.io/service-account-token   3      8m49s
    rbd-provisioner-token-mhl29   kubernetes.io/service-account-token   3      5m24s
    [root@bs-k8s-master01 harbor]# cat ceph-harbor-secret.yaml 
    ##########################################################################
    #Author:                     zisefeizhu
    #QQ:                         2********0
    #Date:                       2020-02-16
    #FileName:                   ceph-harbor-secret.yaml
    #URL:                        https://www.cnblogs.com/zisefeizhu/
    #Description:                The test script
    #Copyright (C):              2020 All rights reserved
    ###########################################################################
    apiVersion: v1
    kind: Secret
    metadata:
      name: ceph-harbor-admin-secret
      namespace: harbor
    data:
      key: QVFDNmNVSmV2eU8yRnhBQVBxYzE5Mm5PelNnZk5acmg5aEFQYXc9PQ==
    type: kubernetes.io/rbd
    ---
    apiVersion: v1
    kind: Secret
    metadata:
      name: ceph-harbor-harbor-secret
      namespace: harbor
    data:
      key: QVFEb0NrbGVuNmU0TnhBQVZYbXkvUEcrUjVpSDhmTnpNaGs2Smc9PQ==
    type: kubernetes.io/rbd
    [root@bs-k8s-master01 harbor]# kubectl apply -f ceph-harbor-storageclass.yaml
    storageclass.storage.k8s.io/ceph-harbor created
    [root@bs-k8s-master01 harbor]# kubectl get sc
    NAME          PROVISIONER    RECLAIMPOLICY   VOLUMEBINDINGMODE   ALLOWVOLUMEEXPANSION   AGE
    ceph-harbor   ceph.com/rbd   Retain          Immediate           false                  11s
    ceph-rbd      ceph.com/rbd   Retain          Immediate           false                  25h
    [root@bs-k8s-master01 harbor]# cat ceph-harbor-storageclass.yaml
    ##########################################################################
    #Author:                     zisefeizhu
    #QQ:                         2********0
    #Date:                       2020-02-16
    #FileName:                   ceph-harbor-storageclass.yaml
    #URL:                        https://www.cnblogs.com/zisefeizhu/
    #Description:                The test script
    #Copyright (C):              2020 All rights reserved
    ###########################################################################
    apiVersion: storage.k8s.io/v1
    kind: StorageClass
    metadata:
      name: ceph-harbor
      annotations:
        storageclass.kubernetes.io/is-default-class: "false"
    provisioner: ceph.com/rbd
    reclaimPolicy: Retain
    parameters:
      monitors: 20.0.0.206:6789,20.0.0.207:6789,20.0.0.208:6789
      adminId: admin
      adminSecretName: ceph-harbor-admin-secret
      adminSecretNamespace: harbor
      pool: harbor
      fsType: xfs
      userId: harbor
      userSecretName: ceph-harbor-harbor-secret
      imageFormat: "2"
      imageFeatures: "layering"
    [root@bs-k8s-master01 harbor]# kubectl apply -f ceph-harbor-pvc.yaml
    persistentvolumeclaim/pvc-ceph-harbor created
    wp-pv-claim      Bound    pvc-494a130d-018c-4be3-9b31-e951cc4367a5   20Gi       RWO            ceph-rbd       23h
    [root@bs-k8s-master01 harbor]# kubectl get pv -n harbor
    NAME                                       CAPACITY   ACCESS MODES   RECLAIM POLICY   STATUS   CLAIM                    STORAGECLASS   REASON   AGE
    pvc-494a130d-018c-4be3-9b31-e951cc4367a5   20Gi       RWO            Retain           Bound    default/wp-pv-claim      ceph-rbd                23h
    pvc-4df6a301-c9f3-4694-8271-d1d0184c00aa   1Gi        RWO            Retain           Bound    harbor/pvc-ceph-harbor   ceph-harbor             6s
    pvc-8ffa3182-a2f6-47d9-a71d-ff8e8b379a16   1Gi        RWO            Retain           Bound    default/ceph-pvc         ceph-rbd                26h
    pvc-ac7d3a09-123e-4614-886c-cded8822a078   20Gi       RWO            Retain           Bound    default/mysql-pv-claim   ceph-rbd                23h
    [root@bs-k8s-master01 harbor]# kubectl get pvc
    NAME             STATUS   VOLUME                                     CAPACITY   ACCESS MODES   STORAGECLASS   AGE
    ceph-pvc         Bound    pvc-8ffa3182-a2f6-47d9-a71d-ff8e8b379a16   1Gi        RWO            ceph-rbd       26h
    mysql-pv-claim   Bound    pvc-ac7d3a09-123e-4614-886c-cded8822a078   20Gi       RWO            ceph-rbd       23h
    wp-pv-claim      Bound    pvc-494a130d-018c-4be3-9b31-e951cc4367a5   20Gi       RWO            ceph-rbd       23h
    [root@bs-k8s-master01 harbor]# kubectl get pvc -n harbor
    NAME              STATUS   VOLUME                                     CAPACITY   ACCESS MODES   STORAGECLASS   AGE
    pvc-ceph-harbor   Bound    pvc-4df6a301-c9f3-4694-8271-d1d0184c00aa   1Gi        RWO            ceph-harbor    24s
    [root@bs-k8s-master01 harbor]# cat ceph-harbor-pvc.yaml 
    ##########################################################################
    #Author:                     zisefeizhu
    #QQ:                         2********0
    #Date:                       2020-02-16
    #FileName:                   ceph-harbor-pvc.yaml
    #URL:                        https://www.cnblogs.com/zisefeizhu/
    #Description:                The test script
    #Copyright (C):              2020 All rights reserved
    ###########################################################################
    apiVersion: v1
    kind: PersistentVolumeClaim
    metadata:
      name: pvc-ceph-harbor
      namespace: harbor
    spec:
      storageClassName: ceph-harbor
      accessModes:
      - ReadWriteOnce
      resources:
        requests: 
          storage: 1Gi
    
    //到此 完成了在harbor 名称空间下创建动态pv
  • 相关阅读:
    nodejs+express+mysql实现restful风格的增删改查示例
    使百度统计排除自己
    node.js和JavaScript的关系
    完善chrome翻译插件ChaZD,支持有道智云api
    面向对象编程 —— java实现函数求导
    我的第一篇博客 —— 博客内容简介
    微信公众号支付
    Shiro的原理及Web搭建
    AOP 切面编程------JoinPoint ---- log日志
    quartz 不同时间间隔调度任务
  • 原文地址:https://www.cnblogs.com/zisefeizhu/p/12318246.html
Copyright © 2011-2022 走看看