| 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283 |
- # From https://raw.githubusercontent.com/rook/rook/v1.18.9/deploy/charts/rook-ceph-cluster/values.yaml
- # Installs a debugging toolbox deployment
- toolbox:
- # -- Enable Ceph debugging pod deployment. See [toolbox](../Troubleshooting/ceph-toolbox.md)
- enabled: true
- # -- Toolbox image, defaults to the image used by the Ceph cluster
- monitoring:
- # -- Enable Prometheus integration, will also create necessary RBAC rules to allow Operator to create ServiceMonitors.
- # Monitoring requires Prometheus to be pre-installed
- enabled: true
- # -- Whether to create the Prometheus rules for Ceph alerts
- createPrometheusRules: true
- # -- The namespace in which to create the prometheus rules, if different from the rook cluster namespace.
- # If you have multiple rook-ceph clusters in the same k8s cluster, choose the same namespace (ideally, namespace with prometheus
- # deployed) to set rulesNamespaceOverride for all the clusters. Otherwise, you will get duplicate alerts with multiple alert definitions.
- # All values below are taken from the CephCluster CRD
- # -- Cluster configuration.
- # @default -- See [below](#ceph-cluster-spec)
- cephClusterSpec:
- dashboard:
- # serve the dashboard under a subpath (useful when you are accessing the dashboard via a reverse proxy)
- # urlPrefix: /ceph-dashboard
- # serve the dashboard at the given port.
- port: 8080
- # Serve the dashboard using SSL (if using ingress to expose the dashboard and `ssl: true` you need to set
- # the corresponding "backend protocol" annotation(s) for your ingress controller of choice)
- ssl: false
- # Network configuration, see: https://github.com/rook/rook/blob/master/Documentation/CRDs/Cluster/ceph-cluster-crd.md#network-configuration-settings
- network:
- # enable host networking
- provider: host
- # enable log collector, daemons will log on files and rotate
- logCollector:
- enabled: false
- # To control where various services will be scheduled by kubernetes, use the placement configuration sections below.
- # The example under 'all' would have all services scheduled on kubernetes nodes labeled with 'role=storage-node' and
- # tolerate taints with a key of 'storage-node'.
- placement:
- all:
- nodeAffinity:
- #requiredDuringSchedulingIgnoredDuringExecution:
- # nodeSelectorTerms:
- # - matchExpressions:
- # - key: storage-node
- # operator: In
- # values:
- # - "true"
- # TODO are snapshots easier if mgr/mon/mds run on a compute node?
- # TODO does running on a compute node change cephfs mounting? I think it requires mon's port 6789
- preferredDuringSchedulingIgnoredDuringExecution:
- - weight: 1
- preference:
- matchExpressions:
- - key: storage-node
- operator: DoesNotExist
- tolerations:
- - key: storage-node
- operator: Equal
- value: "true"
- effect: PreferNoSchedule
- resources:
- mgr:
- requests:
- cpu: 0
- memory: 0
- limits:
- cpu: 0
- memory: 2.5Gi
- mon:
- requests:
- cpu: 0
- memory: 0
- limits:
- cpu: 0
- memory: 1Gi
- osd:
- requests:
- cpu: 0
- memory: 0
- limits:
- cpu: 0
- # Ensure osd_memory_target reflects this
- # https://docs.ceph.com/en/latest/start/hardware-recommendations/#ram
- memory: "4Gi"
- prepareosd:
- # limits: It is not recommended to set limits on the OSD prepare job
- # since it's a one-time burst for memory that must be allowed to
- # complete without an OOM kill. Note however that if a k8s
- # limitRange guardrail is defined external to Rook, the lack of
- # a limit here may result in a sync failure, in which case a
- # limit should be added. 1200Mi may suffice for up to 15Ti
- # OSDs ; for larger devices 2Gi may be required.
- # cf. https://github.com/rook/rook/pull/11103
- requests:
- cpu: 0
- memory: "500Mi"
- mgr-sidecar:
- limits:
- cpu: 0
- memory: "100Mi"
- requests:
- cpu: 0
- memory: "40Mi"
- crashcollector:
- limits:
- cpu: 0
- memory: "60Mi"
- requests:
- cpu: 0
- memory: "60Mi"
- logcollector:
- limits:
- cpu: 0
- memory: "1Gi"
- requests:
- cpu: 0
- memory: "100Mi"
- cleanup:
- limits:
- cpu: 0
- memory: "1Gi"
- requests:
- cpu: 0
- memory: "100Mi"
- exporter:
- limits:
- cpu: 0
- memory: "128Mi"
- requests:
- cpu: 0
- memory: "50Mi"
- # https://github.com/rook/rook/blob/f244f47eeacc42c4e755b9bf21e88e0431ba4cac/Documentation/CRDs/Cluster/ceph-cluster-crd.md#ceph-config
- cephConfig:
- global:
- # https://silvenga.com/posts/ceph-and-deep-scrubs/
- # Schedule the next normal scrub in between 1-7 days.
- osd_scrub_min_interval: "86400" # 1 day
- osd_scrub_interval_randomize_ratio: "7" # 700%
- # No more delays, normal scrub after 14 days.
- osd_scrub_max_interval: "1209600" # 14 days
- # No more waiting on a random 15% chance to deep-scrub, just deep-scrub.
- osd_deep_scrub_interval: "2419200" # 28 days
- "osd.*":
- osd_memory_target_autotune: "false"
- osd_memory_target: "4294967296"
- osd_scrub_disable_reservation_queuing: "true" # trying to help with OOMs https://tracker.ceph.com/issues/69078
- # -- A list of CephBlockPool configurations to deploy
- # @default -- See [below](#ceph-block-pools)
- cephBlockPools:
- - name: ceph-blockpool
- # see https://github.com/rook/rook/blob/master/Documentation/CRDs/Block-Storage/ceph-block-pool-crd.md#spec for available configuration
- spec:
- failureDomain: host
- replicated:
- size: 3
- deviceClass: hdd
- # Enables collecting RBD per-image IO statistics by enabling dynamic OSD performance counters. Defaults to false.
- # For reference: https://docs.ceph.com/docs/latest/mgr/prometheus/#rbd-io-statistics
- enableRBDStats: true
- storageClass:
- enabled: true
- name: ceph-block
- isDefault: true
- reclaimPolicy: Delete
- allowVolumeExpansion: true
- volumeBindingMode: "Immediate"
- mountOptions: []
- # see https://kubernetes.io/docs/concepts/storage/storage-classes/#allowed-topologies
- allowedTopologies: []
- # - matchLabelExpressions:
- # - key: rook-ceph-role
- # values:
- # - storage-node
- # see https://github.com/rook/rook/blob/master/Documentation/Storage-Configuration/Block-Storage-RBD/block-storage.md#provision-storage for available configuration
- parameters:
- # (optional) mapOptions is a comma-separated list of map options.
- # For krbd options refer
- # https://docs.ceph.com/docs/latest/man/8/rbd/#kernel-rbd-krbd-options
- # For nbd options refer
- # https://docs.ceph.com/docs/latest/man/8/rbd-nbd/#options
- # mapOptions: lock_on_read,queue_depth=1024
- # (optional) unmapOptions is a comma-separated list of unmap options.
- # For krbd options refer
- # https://docs.ceph.com/docs/latest/man/8/rbd/#kernel-rbd-krbd-options
- # For nbd options refer
- # https://docs.ceph.com/docs/latest/man/8/rbd-nbd/#options
- # unmapOptions: force
- # RBD image format. Defaults to "2".
- imageFormat: "2"
- # RBD image features, equivalent to OR'd bitfield value: 63
- # Available for imageFormat: "2". Older releases of CSI RBD
- # support only the `layering` feature. The Linux kernel (KRBD) supports the
- # full feature complement as of 5.4
- imageFeatures: layering
- # These secrets contain Ceph admin credentials.
- csi.storage.k8s.io/provisioner-secret-name: rook-csi-rbd-provisioner
- csi.storage.k8s.io/provisioner-secret-namespace: "{{ .Release.Namespace }}"
- csi.storage.k8s.io/controller-expand-secret-name: rook-csi-rbd-provisioner
- csi.storage.k8s.io/controller-expand-secret-namespace: "{{ .Release.Namespace }}"
- csi.storage.k8s.io/node-stage-secret-name: rook-csi-rbd-node
- csi.storage.k8s.io/node-stage-secret-namespace: "{{ .Release.Namespace }}"
- # Specify the filesystem type of the volume. If not specified, csi-provisioner
- # will set default as `ext4`. Note that `xfs` is not recommended due to potential deadlock
- # in hyperconverged settings where the volume is mounted on the same node as the osds.
- csi.storage.k8s.io/fstype: ext4
- - name: ceph-blockpool-ssd
- spec:
- failureDomain: host
- replicated:
- size: 3
- deviceClass: ssd
- enableRBDStats: true
- storageClass:
- enabled: true
- name: ceph-block-ssd
- isDefault: false
- reclaimPolicy: Delete
- allowVolumeExpansion: true
- volumeBindingMode: "Immediate"
- mountOptions: []
- allowedTopologies: []
- parameters:
- imageFormat: "2"
- imageFeatures: layering
- # These secrets contain Ceph admin credentials.
- csi.storage.k8s.io/provisioner-secret-name: rook-csi-rbd-provisioner
- csi.storage.k8s.io/provisioner-secret-namespace: "{{ .Release.Namespace }}"
- csi.storage.k8s.io/controller-expand-secret-name: rook-csi-rbd-provisioner
- csi.storage.k8s.io/controller-expand-secret-namespace: "{{ .Release.Namespace }}"
- csi.storage.k8s.io/node-stage-secret-name: rook-csi-rbd-node
- csi.storage.k8s.io/node-stage-secret-namespace: "{{ .Release.Namespace }}"
- csi.storage.k8s.io/fstype: ext4
- # -- A list of CephFileSystem configurations to deploy
- # @default -- See [below](#ceph-file-systems)
- cephFileSystems: []
- # -- Settings for the filesystem snapshot class
- # @default -- See [CephFS Snapshots](../Storage-Configuration/Ceph-CSI/ceph-csi-snapshot.md#cephfs-snapshots)
- cephFileSystemVolumeSnapshotClass:
- enabled: true
- name: ceph-filesystem
- isDefault: true
- deletionPolicy: Delete
- annotations: {}
- labels:
- velero.io/csi-volumesnapshot-class: "true"
- # see https://rook.io/docs/rook/v1.10/Storage-Configuration/Ceph-CSI/ceph-csi-snapshot/#cephfs-snapshots for available configuration
- parameters: {}
- # -- Settings for the block pool snapshot class
- # @default -- See [RBD Snapshots](../Storage-Configuration/Ceph-CSI/ceph-csi-snapshot.md#rbd-snapshots)
- cephBlockPoolsVolumeSnapshotClass:
- enabled: true
- name: ceph-block
- isDefault: false
- deletionPolicy: Delete
- annotations: {}
- labels:
- velero.io/csi-volumesnapshot-class: "true"
- # see https://rook.io/docs/rook/v1.10/Storage-Configuration/Ceph-CSI/ceph-csi-snapshot/#rbd-snapshots for available configuration
- parameters: {}
- # -- A list of CephObjectStore configurations to deploy
- # @default -- See [below](#ceph-object-stores)
- cephObjectStores: []
|