rook-ceph-cluster-values.yaml 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284
  1. # From https://raw.githubusercontent.com/rook/rook/v1.18.9/deploy/charts/rook-ceph-cluster/values.yaml
  2. # Installs a debugging toolbox deployment
  3. toolbox:
  4. # -- Enable Ceph debugging pod deployment. See [toolbox](../Troubleshooting/ceph-toolbox.md)
  5. enabled: true
  6. # -- Toolbox image, defaults to the image used by the Ceph cluster
  7. monitoring:
  8. # -- Enable Prometheus integration, will also create necessary RBAC rules to allow Operator to create ServiceMonitors.
  9. # Monitoring requires Prometheus to be pre-installed
  10. enabled: true
  11. # -- Whether to create the Prometheus rules for Ceph alerts
  12. createPrometheusRules: true
  13. # -- The namespace in which to create the prometheus rules, if different from the rook cluster namespace.
  14. # If you have multiple rook-ceph clusters in the same k8s cluster, choose the same namespace (ideally, namespace with prometheus
  15. # deployed) to set rulesNamespaceOverride for all the clusters. Otherwise, you will get duplicate alerts with multiple alert definitions.
  16. # All values below are taken from the CephCluster CRD
  17. # -- Cluster configuration.
  18. # @default -- See [below](#ceph-cluster-spec)
  19. cephClusterSpec:
  20. dashboard:
  21. # serve the dashboard under a subpath (useful when you are accessing the dashboard via a reverse proxy)
  22. # urlPrefix: /ceph-dashboard
  23. # serve the dashboard at the given port.
  24. port: 8080
  25. # Serve the dashboard using SSL (if using ingress to expose the dashboard and `ssl: true` you need to set
  26. # the corresponding "backend protocol" annotation(s) for your ingress controller of choice)
  27. ssl: false
  28. # Network configuration, see: https://github.com/rook/rook/blob/master/Documentation/CRDs/Cluster/ceph-cluster-crd.md#network-configuration-settings
  29. network:
  30. # enable host networking
  31. provider: host
  32. # enable log collector, daemons will log on files and rotate
  33. logCollector:
  34. enabled: false
  35. # To control where various services will be scheduled by kubernetes, use the placement configuration sections below.
  36. # The example under 'all' would have all services scheduled on kubernetes nodes labeled with 'role=storage-node' and
  37. # tolerate taints with a key of 'storage-node'.
  38. placement:
  39. all:
  40. nodeAffinity:
  41. #requiredDuringSchedulingIgnoredDuringExecution:
  42. # nodeSelectorTerms:
  43. # - matchExpressions:
  44. # - key: storage-node
  45. # operator: In
  46. # values:
  47. # - "true"
  48. # TODO are snapshots easier if mgr/mon/mds run on a compute node?
  49. # TODO does running on a compute node change cephfs mounting? I think it requires mon's port 6789
  50. preferredDuringSchedulingIgnoredDuringExecution:
  51. - weight: 1
  52. preference:
  53. matchExpressions:
  54. - key: storage-node
  55. operator: DoesNotExist
  56. tolerations:
  57. - key: storage-node
  58. operator: Equal
  59. value: "true"
  60. effect: PreferNoSchedule
  61. resources:
  62. mgr:
  63. requests:
  64. cpu: 0
  65. memory: 0
  66. limits:
  67. cpu: 0
  68. memory: 2.5Gi
  69. mon:
  70. requests:
  71. cpu: 0
  72. memory: 0
  73. limits:
  74. cpu: 0
  75. memory: 1Gi
  76. osd:
  77. requests:
  78. cpu: 0
  79. memory: 0
  80. limits:
  81. cpu: 0
  82. # Ensure osd_memory_target reflects this
  83. # https://docs.ceph.com/en/latest/start/hardware-recommendations/#ram
  84. memory: "4Gi"
  85. prepareosd:
  86. # limits: It is not recommended to set limits on the OSD prepare job
  87. # since it's a one-time burst for memory that must be allowed to
  88. # complete without an OOM kill. Note however that if a k8s
  89. # limitRange guardrail is defined external to Rook, the lack of
  90. # a limit here may result in a sync failure, in which case a
  91. # limit should be added. 1200Mi may suffice for up to 15Ti
  92. # OSDs ; for larger devices 2Gi may be required.
  93. # cf. https://github.com/rook/rook/pull/11103
  94. requests:
  95. cpu: 0
  96. memory: "500Mi"
  97. mgr-sidecar:
  98. limits:
  99. cpu: 0
  100. memory: "100Mi"
  101. requests:
  102. cpu: 0
  103. memory: "40Mi"
  104. crashcollector:
  105. limits:
  106. cpu: 0
  107. memory: "60Mi"
  108. requests:
  109. cpu: 0
  110. memory: "60Mi"
  111. logcollector:
  112. limits:
  113. cpu: 0
  114. memory: "1Gi"
  115. requests:
  116. cpu: 0
  117. memory: "100Mi"
  118. cleanup:
  119. limits:
  120. cpu: 0
  121. memory: "1Gi"
  122. requests:
  123. cpu: 0
  124. memory: "100Mi"
  125. exporter:
  126. limits:
  127. cpu: 0
  128. memory: "128Mi"
  129. requests:
  130. cpu: 0
  131. memory: "50Mi"
  132. # https://github.com/rook/rook/blob/f244f47eeacc42c4e755b9bf21e88e0431ba4cac/Documentation/CRDs/Cluster/ceph-cluster-crd.md#ceph-config
  133. cephConfig:
  134. global:
  135. # https://silvenga.com/posts/ceph-and-deep-scrubs/
  136. # Schedule the next normal scrub in between 1-7 days.
  137. osd_scrub_min_interval: "86400" # 1 day
  138. osd_scrub_interval_randomize_ratio: "7" # 700%
  139. # No more delays, normal scrub after 14 days.
  140. osd_scrub_max_interval: "1209600" # 14 days
  141. # No more waiting on a random 15% chance to deep-scrub, just deep-scrub.
  142. osd_deep_scrub_interval: "2419200" # 28 days
  143. "osd.*":
  144. osd_memory_target_autotune: "false"
  145. #osd_memory_target: "4294967296" # 4Gi
  146. osd_memory_target: "4026531840" # 3.75Gi
  147. osd_scrub_disable_reservation_queuing: "true" # trying to help with OOMs https://tracker.ceph.com/issues/69078
  148. # -- A list of CephBlockPool configurations to deploy
  149. # @default -- See [below](#ceph-block-pools)
  150. cephBlockPools:
  151. - name: ceph-blockpool
  152. # see https://github.com/rook/rook/blob/master/Documentation/CRDs/Block-Storage/ceph-block-pool-crd.md#spec for available configuration
  153. spec:
  154. failureDomain: host
  155. replicated:
  156. size: 3
  157. deviceClass: hdd
  158. # Enables collecting RBD per-image IO statistics by enabling dynamic OSD performance counters. Defaults to false.
  159. # For reference: https://docs.ceph.com/docs/latest/mgr/prometheus/#rbd-io-statistics
  160. enableRBDStats: true
  161. storageClass:
  162. enabled: true
  163. name: ceph-block
  164. isDefault: true
  165. reclaimPolicy: Delete
  166. allowVolumeExpansion: true
  167. volumeBindingMode: "Immediate"
  168. mountOptions: []
  169. # see https://kubernetes.io/docs/concepts/storage/storage-classes/#allowed-topologies
  170. allowedTopologies: []
  171. # - matchLabelExpressions:
  172. # - key: rook-ceph-role
  173. # values:
  174. # - storage-node
  175. # see https://github.com/rook/rook/blob/master/Documentation/Storage-Configuration/Block-Storage-RBD/block-storage.md#provision-storage for available configuration
  176. parameters:
  177. # (optional) mapOptions is a comma-separated list of map options.
  178. # For krbd options refer
  179. # https://docs.ceph.com/docs/latest/man/8/rbd/#kernel-rbd-krbd-options
  180. # For nbd options refer
  181. # https://docs.ceph.com/docs/latest/man/8/rbd-nbd/#options
  182. # mapOptions: lock_on_read,queue_depth=1024
  183. # (optional) unmapOptions is a comma-separated list of unmap options.
  184. # For krbd options refer
  185. # https://docs.ceph.com/docs/latest/man/8/rbd/#kernel-rbd-krbd-options
  186. # For nbd options refer
  187. # https://docs.ceph.com/docs/latest/man/8/rbd-nbd/#options
  188. # unmapOptions: force
  189. # RBD image format. Defaults to "2".
  190. imageFormat: "2"
  191. # RBD image features, equivalent to OR'd bitfield value: 63
  192. # Available for imageFormat: "2". Older releases of CSI RBD
  193. # support only the `layering` feature. The Linux kernel (KRBD) supports the
  194. # full feature complement as of 5.4
  195. imageFeatures: layering
  196. # These secrets contain Ceph admin credentials.
  197. csi.storage.k8s.io/provisioner-secret-name: rook-csi-rbd-provisioner
  198. csi.storage.k8s.io/provisioner-secret-namespace: "{{ .Release.Namespace }}"
  199. csi.storage.k8s.io/controller-expand-secret-name: rook-csi-rbd-provisioner
  200. csi.storage.k8s.io/controller-expand-secret-namespace: "{{ .Release.Namespace }}"
  201. csi.storage.k8s.io/node-stage-secret-name: rook-csi-rbd-node
  202. csi.storage.k8s.io/node-stage-secret-namespace: "{{ .Release.Namespace }}"
  203. # Specify the filesystem type of the volume. If not specified, csi-provisioner
  204. # will set default as `ext4`. Note that `xfs` is not recommended due to potential deadlock
  205. # in hyperconverged settings where the volume is mounted on the same node as the osds.
  206. csi.storage.k8s.io/fstype: ext4
  207. - name: ceph-blockpool-ssd
  208. spec:
  209. failureDomain: host
  210. replicated:
  211. size: 3
  212. deviceClass: ssd
  213. enableRBDStats: true
  214. storageClass:
  215. enabled: true
  216. name: ceph-block-ssd
  217. isDefault: false
  218. reclaimPolicy: Delete
  219. allowVolumeExpansion: true
  220. volumeBindingMode: "Immediate"
  221. mountOptions: []
  222. allowedTopologies: []
  223. parameters:
  224. imageFormat: "2"
  225. imageFeatures: layering
  226. # These secrets contain Ceph admin credentials.
  227. csi.storage.k8s.io/provisioner-secret-name: rook-csi-rbd-provisioner
  228. csi.storage.k8s.io/provisioner-secret-namespace: "{{ .Release.Namespace }}"
  229. csi.storage.k8s.io/controller-expand-secret-name: rook-csi-rbd-provisioner
  230. csi.storage.k8s.io/controller-expand-secret-namespace: "{{ .Release.Namespace }}"
  231. csi.storage.k8s.io/node-stage-secret-name: rook-csi-rbd-node
  232. csi.storage.k8s.io/node-stage-secret-namespace: "{{ .Release.Namespace }}"
  233. csi.storage.k8s.io/fstype: ext4
  234. # -- A list of CephFileSystem configurations to deploy
  235. # @default -- See [below](#ceph-file-systems)
  236. cephFileSystems: []
  237. # -- Settings for the filesystem snapshot class
  238. # @default -- See [CephFS Snapshots](../Storage-Configuration/Ceph-CSI/ceph-csi-snapshot.md#cephfs-snapshots)
  239. cephFileSystemVolumeSnapshotClass:
  240. enabled: true
  241. name: ceph-filesystem
  242. isDefault: true
  243. deletionPolicy: Delete
  244. annotations: {}
  245. labels:
  246. velero.io/csi-volumesnapshot-class: "true"
  247. # see https://rook.io/docs/rook/v1.10/Storage-Configuration/Ceph-CSI/ceph-csi-snapshot/#cephfs-snapshots for available configuration
  248. parameters: {}
  249. # -- Settings for the block pool snapshot class
  250. # @default -- See [RBD Snapshots](../Storage-Configuration/Ceph-CSI/ceph-csi-snapshot.md#rbd-snapshots)
  251. cephBlockPoolsVolumeSnapshotClass:
  252. enabled: true
  253. name: ceph-block
  254. isDefault: false
  255. deletionPolicy: Delete
  256. annotations: {}
  257. labels:
  258. velero.io/csi-volumesnapshot-class: "true"
  259. # see https://rook.io/docs/rook/v1.10/Storage-Configuration/Ceph-CSI/ceph-csi-snapshot/#rbd-snapshots for available configuration
  260. parameters: {}
  261. # -- A list of CephObjectStore configurations to deploy
  262. # @default -- See [below](#ceph-object-stores)
  263. cephObjectStores: []