kubernetes 对接ceph存储 但是pvc 一直处于pending状态
参考文档:https://docs.ceph.com/en/reef/rbd/rbd-kubernetes/
kubernetes 版本v1.32.3
ceph版本18.2.7
ceph集群使用cephadm搭建 节点为 192.168.0.31、192.168.0.32、192.168.0.33
kubernetes集群节点为 192.168.0.101、192.168.0.102、192.168.0.103
环境为本地VMware环境 没有网络策略,kubernetes中也不存在networkpolicies相关策略。
pvc信息:
root@knode1:~/git-project/tmp# kubectl get pvc
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS VOLUMEATTRIBUTESCLASS AGE
raw-block-pvc Pending csi-rbd-sc <unset> 20m
root@knode1:~/git-project/tmp# kubectl describe pvc raw-block-pvc
Name: raw-block-pvc
Namespace: default
StorageClass: csi-rbd-sc
Status: Pending
Volume:
Labels: <none>
Annotations: volume.beta.kubernetes.io/storage-provisioner: rbd.csi.ceph.com
volume.kubernetes.io/storage-provisioner: rbd.csi.ceph.com
Finalizers: [kubernetes.io/pvc-protection]
Capacity:
Access Modes:
VolumeMode: Block
Used By: <none>
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Warning ProvisioningFailed 5m3s (x2 over 10m) rbd.csi.ceph.com_csi-rbdplugin-provisioner-db44f6c49-gml52_0bf3a6b7-337f-4072-b8ee-dfa7d18eb0d5 failed to provision volume with StorageClass "csi-rbd-sc": rpc error: code = DeadlineExceeded desc = stream terminated by RST_STREAM with error code: CANCEL
Normal ExternalProvisioning 2m29s (x62 over 17m) persistentvolume-controller Waiting for a volume to be created either by the external provisioner 'rbd.csi.ceph.com' or manually by the system administrator. If volume creation is delayed, please verify that the provisioner is running and correctly registered.
Normal Provisioning 3s (x8 over 17m) rbd.csi.ceph.com_csi-rbdplugin-provisioner-db44f6c49-gml52_0bf3a6b7-337f-4072-b8ee-dfa7d18eb0d5 External provisioner is provisioning volume for claim "default/raw-block-pvc"
Warning ProvisioningFailed 3s (x5 over 15m) rbd.csi.ceph.com_csi-rbdplugin-provisioner-db44f6c49-gml52_0bf3a6b7-337f-4072-b8ee-dfa7d18eb0d5 failed to provision volume with StorageClass "csi-rbd-sc": rpc error: code = DeadlineExceeded desc = context deadline exceeded
sc信息
root@knode1:~/git-project/tmp# kubectl get pvc
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS VOLUMEATTRIBUTESCLASS AGE
raw-block-pvc Pending csi-rbd-sc <unset> 20m
其他configmap等信息:
root@knode1:~/git-project/tmp# cat csi-config-map.yaml
---
apiVersion: v1
kind: ConfigMap
data:
config.json: |-
[
{
"clusterID": "6342545a-4a92-11f0-be56-000c29c48aad",
"monitors": [
"192.168.0.31:6789",
"192.168.0.32:6789",
"192.168.0.33:6789"
]
}
]
metadata:
name: ceph-csi-config
root@knode1:~/git-project/tmp# cat ceph-config-map.yaml
---
apiVersion: v1
kind: ConfigMap
data:
ceph.conf: |
[global]
auth_cluster_required = cephx
auth_service_required = cephx
auth_client_required = cephx
# keyring is a required key and its value should be empty
keyring: |
metadata:
name: ceph-config
root@knode1:~/git-project/tmp# cat csi-rbd-secret.yaml
---
apiVersion: v1
kind: Secret
metadata:
name: csi-rbd-secret
namespace: default
stringData:
userID: kubernetes
userKey: AQDVIVFoUd8IJBAA+D4cR7CL5ICxagwWMvCV6Q==
root@knode1:~/git-project/tmp# cat csi-rbd-sc.yaml
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: csi-rbd-sc
provisioner: rbd.csi.ceph.com
parameters:
clusterID: 6342545a-4a92-11f0-be56-000c29c48aad
pool: kubernetes
imageFeatures: layering
csi.storage.k8s.io/provisioner-secret-name: csi-rbd-secret
csi.storage.k8s.io/provisioner-secret-namespace: default
csi.storage.k8s.io/controller-expand-secret-name: csi-rbd-secret
csi.storage.k8s.io/controller-expand-secret-namespace: default
csi.storage.k8s.io/node-stage-secret-name: csi-rbd-secret
csi.storage.k8s.io/node-stage-secret-namespace: default
reclaimPolicy: Delete
allowVolumeExpansion: true
mountOptions:
- discard
root@knode1:~/git-project/tmp# cat csi-kms-config-map.yaml
---
apiVersion: v1
kind: ConfigMap
data:
config.json: |-
{}
metadata:
name: ceph-csi-encryption-kms-config
root@knode1:~/git-project/tmp#
ceph集群相关信息
root@ceph1:~# ceph -s
cluster:
id: 6342545a-4a92-11f0-be56-000c29c48aad
health: HEALTH_OK
services:
mon: 3 daemons, quorum ceph1,ceph3,ceph2 (age 2h)
mgr: ceph3.nryehs(active, since 2h), standbys: ceph1.chyiik
osd: 6 osds: 6 up (since 2h), 6 in (since 2h)
data:
pools: 2 pools, 33 pgs
objects: 11 objects, 449 KiB
usage: 253 MiB used, 60 GiB / 60 GiB avail
pgs: 33 active+clean
root@ceph1:~# ceph mon dump
epoch 3
fsid 6342545a-4a92-11f0-be56-000c29c48aad
last_changed 2025-06-16T10:14:11.100252+0000
created 2025-06-16T09:15:05.890378+0000
min_mon_release 18 (reef)
election_strategy: 1
0: [v2:192.168.0.31:3300/0,v1:192.168.0.31:6789/0] mon.ceph1
1: [v2:192.168.0.33:3300/0,v1:192.168.0.33:6789/0] mon.ceph3
2: [v2:192.168.0.32:3300/0,v1:192.168.0.32:6789/0] mon.ceph2
dumped monmap epoch 3
root@ceph1:~# ceph auth get client.kubernetes
[client.kubernetes]
key = AQDVIVFoUd8IJBAA+D4cR7CL5ICxagwWMvCV6Q==
caps mgr = "profile rbd pool=kubernetes"
caps mon = "profile rbd"
caps osd = "profile rbd pool=kubernetes"
root@ceph1:~#
恳请提供帮助,感激不尽
