# 安装Ceph
sudo apt-get install ceph ceph-common
# 优化OSD配置 (ceph.conf)
[osd]
osd_memory_target = 4G # 根据主机内存调整
bluestore_cache_size_hdd = 1G
bluestore_cache_size_ssd = 4G
osd_op_num_threads_per_shard = 4
osd_op_num_shards = 8
osd_recovery_max_active = 10
osd_max_backfills = 4
# 性能调优参数
gluster volume set <VOLNAME> performance.cache-size 2GB
gluster volume set <VOLNAME> performance.io-thread-count 16
gluster volume set <VOLNAME> performance.read-ahead-page-count 16
gluster volume set <VOLNAME> performance.write-behind-window-size 4MB
gluster volume set <VOLNAME> network.ping-timeout 30
bash
ceph osd pool set <pool-name> size 3
ceph osd pool set <pool-name> min_size 2
# Ceph故障域设置
ceph osd crush rule create-replicated <rule-name> default host
# Ceph RBD StorageClass示例
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: ceph-rbd
provisioner: rbd.csi.ceph.com
parameters:
clusterID: <ceph-cluster-id>
pool: rbd
imageFeatures: layering
csi.storage.k8s.io/provisioner-secret-name: csi-rbd-secret
csi.storage.k8s.io/provisioner-secret-namespace: default
reclaimPolicy: Delete
allowVolumeExpansion: true
mountOptions:
- discard
# 添加GlusterFS存储
oc adm storage-gluster-addon
oc create -f glusterfs-storage-class.yaml
# Ceph认证配置
ceph auth get-or-create client.k8s mon 'allow r' osd 'allow rwx pool=rbd'
# Kubernetes Secret创建
kubectl create secret generic csi-rbd-secret \
--from-literal=userID=k8s \
--from-literal=userKey=<key> \
--type=rbd.csi.ceph.com
定期快照
rbd snap create <pool>/<image>@<snap>
异地备份策略
恢复测试
通过以上配置和优化,您可以在Linux上构建一个高性能、高可用的容器存储解决方案,满足生产环境的需求。