# From https://raw.githubusercontent.com/rook/rook/v1.18.9/deploy/charts/rook-ceph-cluster/values.yaml # Installs a debugging toolbox deployment toolbox: # -- Enable Ceph debugging pod deployment. See [toolbox](../Troubleshooting/ceph-toolbox.md) enabled: true # -- Toolbox image, defaults to the image used by the Ceph cluster monitoring: # -- Enable Prometheus integration, will also create necessary RBAC rules to allow Operator to create ServiceMonitors. # Monitoring requires Prometheus to be pre-installed enabled: true # -- Whether to create the Prometheus rules for Ceph alerts createPrometheusRules: true # -- The namespace in which to create the prometheus rules, if different from the rook cluster namespace. # If you have multiple rook-ceph clusters in the same k8s cluster, choose the same namespace (ideally, namespace with prometheus # deployed) to set rulesNamespaceOverride for all the clusters. Otherwise, you will get duplicate alerts with multiple alert definitions. # All values below are taken from the CephCluster CRD # -- Cluster configuration. # @default -- See [below](#ceph-cluster-spec) cephClusterSpec: dashboard: # serve the dashboard under a subpath (useful when you are accessing the dashboard via a reverse proxy) # urlPrefix: /ceph-dashboard # serve the dashboard at the given port. port: 8080 # Serve the dashboard using SSL (if using ingress to expose the dashboard and `ssl: true` you need to set # the corresponding "backend protocol" annotation(s) for your ingress controller of choice) ssl: false # Network configuration, see: https://github.com/rook/rook/blob/master/Documentation/CRDs/Cluster/ceph-cluster-crd.md#network-configuration-settings network: # enable host networking provider: host # enable log collector, daemons will log on files and rotate logCollector: enabled: false # To control where various services will be scheduled by kubernetes, use the placement configuration sections below. # The example under 'all' would have all services scheduled on kubernetes nodes labeled with 'role=storage-node' and # tolerate taints with a key of 'storage-node'. placement: all: # TODO are snapshots easier if mgr/mon/mds run on a compute node? nodeAffinity: requiredDuringSchedulingIgnoredDuringExecution: nodeSelectorTerms: - matchExpressions: - key: storage-node operator: In values: - "true" tolerations: - key: storage-node operator: Equal value: "true" effect: PreferNoSchedule resources: mgr: requests: cpu: 0 memory: 3Gi limits: cpu: 0 memory: 0 mon: requests: cpu: 0 memory: 1Gi limits: cpu: 0 memory: 0 osd: requests: cpu: 0 memory: 0 limits: cpu: 0 # Ensure osd_memory_target reflects this # https://docs.ceph.com/en/latest/start/hardware-recommendations/#ram memory: "4Gi" prepareosd: # limits: It is not recommended to set limits on the OSD prepare job # since it's a one-time burst for memory that must be allowed to # complete without an OOM kill. Note however that if a k8s # limitRange guardrail is defined external to Rook, the lack of # a limit here may result in a sync failure, in which case a # limit should be added. 1200Mi may suffice for up to 15Ti # OSDs ; for larger devices 2Gi may be required. # cf. https://github.com/rook/rook/pull/11103 requests: cpu: 0 memory: "500Mi" mgr-sidecar: limits: cpu: 0 memory: "100Mi" requests: cpu: 0 memory: "40Mi" crashcollector: limits: cpu: 0 memory: "60Mi" requests: cpu: 0 memory: "60Mi" logcollector: limits: cpu: 0 memory: "1Gi" requests: cpu: 0 memory: "100Mi" cleanup: limits: cpu: 0 memory: "1Gi" requests: cpu: 0 memory: "100Mi" exporter: limits: cpu: 0 memory: "128Mi" requests: cpu: 0 memory: "50Mi" # -- A list of CephBlockPool configurations to deploy # @default -- See [below](#ceph-block-pools) cephBlockPools: - name: ceph-blockpool # see https://github.com/rook/rook/blob/master/Documentation/CRDs/Block-Storage/ceph-block-pool-crd.md#spec for available configuration spec: failureDomain: host replicated: size: 3 deviceClass: hdd # Enables collecting RBD per-image IO statistics by enabling dynamic OSD performance counters. Defaults to false. # For reference: https://docs.ceph.com/docs/latest/mgr/prometheus/#rbd-io-statistics enableRBDStats: true storageClass: enabled: true name: ceph-block isDefault: true reclaimPolicy: Delete allowVolumeExpansion: true volumeBindingMode: "Immediate" mountOptions: [] # see https://kubernetes.io/docs/concepts/storage/storage-classes/#allowed-topologies allowedTopologies: [] # - matchLabelExpressions: # - key: rook-ceph-role # values: # - storage-node # see https://github.com/rook/rook/blob/master/Documentation/Storage-Configuration/Block-Storage-RBD/block-storage.md#provision-storage for available configuration parameters: # (optional) mapOptions is a comma-separated list of map options. # For krbd options refer # https://docs.ceph.com/docs/latest/man/8/rbd/#kernel-rbd-krbd-options # For nbd options refer # https://docs.ceph.com/docs/latest/man/8/rbd-nbd/#options # mapOptions: lock_on_read,queue_depth=1024 # (optional) unmapOptions is a comma-separated list of unmap options. # For krbd options refer # https://docs.ceph.com/docs/latest/man/8/rbd/#kernel-rbd-krbd-options # For nbd options refer # https://docs.ceph.com/docs/latest/man/8/rbd-nbd/#options # unmapOptions: force # RBD image format. Defaults to "2". imageFormat: "2" # RBD image features, equivalent to OR'd bitfield value: 63 # Available for imageFormat: "2". Older releases of CSI RBD # support only the `layering` feature. The Linux kernel (KRBD) supports the # full feature complement as of 5.4 imageFeatures: layering # These secrets contain Ceph admin credentials. csi.storage.k8s.io/provisioner-secret-name: rook-csi-rbd-provisioner csi.storage.k8s.io/provisioner-secret-namespace: "{{ .Release.Namespace }}" csi.storage.k8s.io/controller-expand-secret-name: rook-csi-rbd-provisioner csi.storage.k8s.io/controller-expand-secret-namespace: "{{ .Release.Namespace }}" csi.storage.k8s.io/node-stage-secret-name: rook-csi-rbd-node csi.storage.k8s.io/node-stage-secret-namespace: "{{ .Release.Namespace }}" # Specify the filesystem type of the volume. If not specified, csi-provisioner # will set default as `ext4`. Note that `xfs` is not recommended due to potential deadlock # in hyperconverged settings where the volume is mounted on the same node as the osds. csi.storage.k8s.io/fstype: ext4 - name: ceph-blockpool-ssd spec: failureDomain: host replicated: size: 3 deviceClass: ssd enableRBDStats: true storageClass: enabled: true name: ceph-block-ssd isDefault: false reclaimPolicy: Delete allowVolumeExpansion: true volumeBindingMode: "Immediate" mountOptions: [] allowedTopologies: [] parameters: imageFormat: "2" imageFeatures: layering # These secrets contain Ceph admin credentials. csi.storage.k8s.io/provisioner-secret-name: rook-csi-rbd-provisioner csi.storage.k8s.io/provisioner-secret-namespace: "{{ .Release.Namespace }}" csi.storage.k8s.io/controller-expand-secret-name: rook-csi-rbd-provisioner csi.storage.k8s.io/controller-expand-secret-namespace: "{{ .Release.Namespace }}" csi.storage.k8s.io/node-stage-secret-name: rook-csi-rbd-node csi.storage.k8s.io/node-stage-secret-namespace: "{{ .Release.Namespace }}" csi.storage.k8s.io/fstype: ext4 # -- A list of CephFileSystem configurations to deploy # @default -- See [below](#ceph-file-systems) cephFileSystems: [] # -- Settings for the filesystem snapshot class # @default -- See [CephFS Snapshots](../Storage-Configuration/Ceph-CSI/ceph-csi-snapshot.md#cephfs-snapshots) cephFileSystemVolumeSnapshotClass: enabled: true name: ceph-filesystem isDefault: true deletionPolicy: Delete annotations: {} labels: velero.io/csi-volumesnapshot-class: "true" # see https://rook.io/docs/rook/v1.10/Storage-Configuration/Ceph-CSI/ceph-csi-snapshot/#cephfs-snapshots for available configuration parameters: {} # -- Settings for the block pool snapshot class # @default -- See [RBD Snapshots](../Storage-Configuration/Ceph-CSI/ceph-csi-snapshot.md#rbd-snapshots) cephBlockPoolsVolumeSnapshotClass: enabled: true name: ceph-block isDefault: false deletionPolicy: Delete annotations: {} labels: velero.io/csi-volumesnapshot-class: "true" # see https://rook.io/docs/rook/v1.10/Storage-Configuration/Ceph-CSI/ceph-csi-snapshot/#rbd-snapshots for available configuration parameters: {} # -- A list of CephObjectStore configurations to deploy # @default -- See [below](#ceph-object-stores) cephObjectStores: []