| 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141 |
- # https://github.com/rook/rook/blob/v1.19.1/deploy/charts/rook-ceph-cluster/values.yaml
- toolbox:
- enabled: true
- cephClusterSpec:
- mon:
- count: 1
- allowMultiplePerNode: true
- mgr:
- count: 1
- allowMultiplePerNode: true
- dashboard:
- port: 8080
- ssl: false
- logCollector:
- enabled: false
- resources:
- mgr:
- limits:
- cpu: 0
- memory: "1.5Gi"
- requests:
- cpu: 0
- memory: "512Mi"
- mon:
- limits:
- cpu: 0
- memory: "1Gi"
- requests:
- cpu: 0
- memory: "500Mi"
- osd:
- limits:
- cpu: 0
- memory: "4Gi"
- requests:
- cpu: 0
- memory: "1Gi"
- prepareosd:
- # limits: It is not recommended to set limits on the OSD prepare job
- # since it's a one-time burst for memory that must be allowed to
- # complete without an OOM kill. Note however that if a k8s
- # limitRange guardrail is defined external to Rook, the lack of
- # a limit here may result in a sync failure, in which case a
- # limit should be added. 1200Mi may suffice for up to 15Ti
- # OSDs ; for larger devices 2Gi may be required.
- # cf. https://github.com/rook/rook/pull/11103
- requests:
- cpu: 0
- memory: "500Mi"
- mgr-sidecar:
- limits:
- cpu: 0
- memory: "100Mi"
- requests:
- cpu: 0
- memory: "40Mi"
- crashcollector:
- limits:
- cpu: 0
- memory: "60Mi"
- requests:
- cpu: 0
- memory: "60Mi"
- logcollector:
- limits:
- cpu: 0
- memory: "1Gi"
- requests:
- cpu: 0
- memory: "100Mi"
- cleanup:
- limits:
- cpu: 0
- memory: "1Gi"
- requests:
- cpu: 0
- memory: "100Mi"
- exporter:
- limits:
- cpu: 0
- memory: "128Mi"
- requests:
- cpu: 0
- memory: "50Mi"
- cephBlockPools:
- - name: ceph-blockpool
- spec:
- failureDomain: osd
- erasureCoded:
- dataChunks: 2
- codingChunks: 1
- storageClass:
- enabled: true
- name: ceph-block
- isDefault: true
- reclaimPolicy: Delete
- allowVolumeExpansion: true
- volumeBindingMode: "Immediate"
- parameters:
- # These secrets contain Ceph admin credentials.
- csi.storage.k8s.io/provisioner-secret-name: rook-csi-rbd-provisioner
- csi.storage.k8s.io/provisioner-secret-namespace: "rook-ceph"
- csi.storage.k8s.io/controller-expand-secret-name: rook-csi-rbd-provisioner
- csi.storage.k8s.io/controller-expand-secret-namespace: "rook-ceph"
- csi.storage.k8s.io/controller-publish-secret-name: rook-csi-rbd-provisioner
- csi.storage.k8s.io/controller-publish-secret-namespace: "rook-ceph"
- csi.storage.k8s.io/node-stage-secret-name: rook-csi-rbd-node
- csi.storage.k8s.io/node-stage-secret-namespace: "rook-ceph"
- csi.storage.k8s.io/fstype: ext4
- cephObjectStores:
- - name: ceph-objectstore
- spec:
- metadataPool:
- failureDomain: osd
- replicated:
- size: 3
- dataPool:
- failureDomain: osd
- erasureCoded:
- dataChunks: 2
- codingChunks: 1
- preservePoolsOnDelete: true
- gateway:
- port: 80
- resources: {}
- instances: 1
- priorityClassName: system-cluster-critical
- storageClass:
- enabled: false
- ingress:
- enabled: false
- route:
- enabled: false
- cephFileSystems: []
|