# https://github.com/rook/rook/blob/v1.19.1/deploy/charts/rook-ceph-cluster/values.yaml toolbox: enabled: true cephClusterSpec: mon: count: 1 allowMultiplePerNode: true mgr: count: 1 allowMultiplePerNode: true dashboard: port: 8080 ssl: false logCollector: enabled: false resources: mgr: limits: cpu: 0 memory: "1.5Gi" requests: cpu: 0 memory: "512Mi" mon: limits: cpu: 0 memory: "1Gi" requests: cpu: 0 memory: "500Mi" osd: limits: cpu: 0 memory: "4Gi" requests: cpu: 0 memory: "1Gi" prepareosd: # limits: It is not recommended to set limits on the OSD prepare job # since it's a one-time burst for memory that must be allowed to # complete without an OOM kill. Note however that if a k8s # limitRange guardrail is defined external to Rook, the lack of # a limit here may result in a sync failure, in which case a # limit should be added. 1200Mi may suffice for up to 15Ti # OSDs ; for larger devices 2Gi may be required. # cf. https://github.com/rook/rook/pull/11103 requests: cpu: 0 memory: "500Mi" mgr-sidecar: limits: cpu: 0 memory: "100Mi" requests: cpu: 0 memory: "40Mi" crashcollector: limits: cpu: 0 memory: "60Mi" requests: cpu: 0 memory: "60Mi" logcollector: limits: cpu: 0 memory: "1Gi" requests: cpu: 0 memory: "100Mi" cleanup: limits: cpu: 0 memory: "1Gi" requests: cpu: 0 memory: "100Mi" exporter: limits: cpu: 0 memory: "128Mi" requests: cpu: 0 memory: "50Mi" cephBlockPools: - name: ceph-blockpool spec: failureDomain: osd erasureCoded: dataChunks: 2 codingChunks: 1 storageClass: enabled: true name: ceph-block isDefault: true reclaimPolicy: Delete allowVolumeExpansion: true volumeBindingMode: "Immediate" parameters: # These secrets contain Ceph admin credentials. csi.storage.k8s.io/provisioner-secret-name: rook-csi-rbd-provisioner csi.storage.k8s.io/provisioner-secret-namespace: "rook-ceph" csi.storage.k8s.io/controller-expand-secret-name: rook-csi-rbd-provisioner csi.storage.k8s.io/controller-expand-secret-namespace: "rook-ceph" csi.storage.k8s.io/controller-publish-secret-name: rook-csi-rbd-provisioner csi.storage.k8s.io/controller-publish-secret-namespace: "rook-ceph" csi.storage.k8s.io/node-stage-secret-name: rook-csi-rbd-node csi.storage.k8s.io/node-stage-secret-namespace: "rook-ceph" csi.storage.k8s.io/fstype: ext4 cephObjectStores: - name: ceph-objectstore spec: metadataPool: failureDomain: osd replicated: size: 3 dataPool: failureDomain: osd erasureCoded: dataChunks: 2 codingChunks: 1 preservePoolsOnDelete: true gateway: port: 80 resources: {} instances: 1 priorityClassName: system-cluster-critical storageClass: enabled: false ingress: enabled: false route: enabled: false cephFileSystems: []