rook-ceph-cluster-values.yaml 9.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262
  1. # From https://raw.githubusercontent.com/rook/rook/v1.18.9/deploy/charts/rook-ceph-cluster/values.yaml
  2. # Installs a debugging toolbox deployment
  3. toolbox:
  4. # -- Enable Ceph debugging pod deployment. See [toolbox](../Troubleshooting/ceph-toolbox.md)
  5. enabled: true
  6. # -- Toolbox image, defaults to the image used by the Ceph cluster
  7. monitoring:
  8. # -- Enable Prometheus integration, will also create necessary RBAC rules to allow Operator to create ServiceMonitors.
  9. # Monitoring requires Prometheus to be pre-installed
  10. enabled: true
  11. # -- Whether to create the Prometheus rules for Ceph alerts
  12. createPrometheusRules: true
  13. # -- The namespace in which to create the prometheus rules, if different from the rook cluster namespace.
  14. # If you have multiple rook-ceph clusters in the same k8s cluster, choose the same namespace (ideally, namespace with prometheus
  15. # deployed) to set rulesNamespaceOverride for all the clusters. Otherwise, you will get duplicate alerts with multiple alert definitions.
  16. # All values below are taken from the CephCluster CRD
  17. # -- Cluster configuration.
  18. # @default -- See [below](#ceph-cluster-spec)
  19. cephClusterSpec:
  20. dashboard:
  21. # serve the dashboard under a subpath (useful when you are accessing the dashboard via a reverse proxy)
  22. # urlPrefix: /ceph-dashboard
  23. # serve the dashboard at the given port.
  24. port: 8080
  25. # Serve the dashboard using SSL (if using ingress to expose the dashboard and `ssl: true` you need to set
  26. # the corresponding "backend protocol" annotation(s) for your ingress controller of choice)
  27. ssl: false
  28. # Network configuration, see: https://github.com/rook/rook/blob/master/Documentation/CRDs/Cluster/ceph-cluster-crd.md#network-configuration-settings
  29. network:
  30. # enable host networking
  31. provider: host
  32. # enable log collector, daemons will log on files and rotate
  33. logCollector:
  34. enabled: false
  35. # To control where various services will be scheduled by kubernetes, use the placement configuration sections below.
  36. # The example under 'all' would have all services scheduled on kubernetes nodes labeled with 'role=storage-node' and
  37. # tolerate taints with a key of 'storage-node'.
  38. placement:
  39. all:
  40. # TODO are snapshots easier if mgr/mon/mds run on a compute node?
  41. nodeAffinity:
  42. #requiredDuringSchedulingIgnoredDuringExecution:
  43. # nodeSelectorTerms:
  44. # - matchExpressions:
  45. # - key: storage-node
  46. # operator: In
  47. # values:
  48. # - "true"
  49. preferredDuringSchedulingIgnoredDuringExecution:
  50. - preference:
  51. matchExpressions:
  52. - key: storage-node
  53. operator: DoesNotExist
  54. tolerations:
  55. - key: storage-node
  56. operator: Equal
  57. value: "true"
  58. effect: PreferNoSchedule
  59. resources:
  60. mgr:
  61. requests:
  62. cpu: 0
  63. memory: 3Gi
  64. limits:
  65. cpu: 0
  66. memory: 0
  67. mon:
  68. requests:
  69. cpu: 0
  70. memory: 1Gi
  71. limits:
  72. cpu: 0
  73. memory: 0
  74. osd:
  75. requests:
  76. cpu: 0
  77. memory: 0
  78. limits:
  79. cpu: 0
  80. # Ensure osd_memory_target reflects this
  81. # https://docs.ceph.com/en/latest/start/hardware-recommendations/#ram
  82. memory: "4Gi"
  83. prepareosd:
  84. # limits: It is not recommended to set limits on the OSD prepare job
  85. # since it's a one-time burst for memory that must be allowed to
  86. # complete without an OOM kill. Note however that if a k8s
  87. # limitRange guardrail is defined external to Rook, the lack of
  88. # a limit here may result in a sync failure, in which case a
  89. # limit should be added. 1200Mi may suffice for up to 15Ti
  90. # OSDs ; for larger devices 2Gi may be required.
  91. # cf. https://github.com/rook/rook/pull/11103
  92. requests:
  93. cpu: 0
  94. memory: "500Mi"
  95. mgr-sidecar:
  96. limits:
  97. cpu: 0
  98. memory: "100Mi"
  99. requests:
  100. cpu: 0
  101. memory: "40Mi"
  102. crashcollector:
  103. limits:
  104. cpu: 0
  105. memory: "60Mi"
  106. requests:
  107. cpu: 0
  108. memory: "60Mi"
  109. logcollector:
  110. limits:
  111. cpu: 0
  112. memory: "1Gi"
  113. requests:
  114. cpu: 0
  115. memory: "100Mi"
  116. cleanup:
  117. limits:
  118. cpu: 0
  119. memory: "1Gi"
  120. requests:
  121. cpu: 0
  122. memory: "100Mi"
  123. exporter:
  124. limits:
  125. cpu: 0
  126. memory: "128Mi"
  127. requests:
  128. cpu: 0
  129. memory: "50Mi"
  130. # -- A list of CephBlockPool configurations to deploy
  131. # @default -- See [below](#ceph-block-pools)
  132. cephBlockPools:
  133. - name: ceph-blockpool
  134. # see https://github.com/rook/rook/blob/master/Documentation/CRDs/Block-Storage/ceph-block-pool-crd.md#spec for available configuration
  135. spec:
  136. failureDomain: host
  137. replicated:
  138. size: 3
  139. deviceClass: hdd
  140. # Enables collecting RBD per-image IO statistics by enabling dynamic OSD performance counters. Defaults to false.
  141. # For reference: https://docs.ceph.com/docs/latest/mgr/prometheus/#rbd-io-statistics
  142. enableRBDStats: true
  143. storageClass:
  144. enabled: true
  145. name: ceph-block
  146. isDefault: true
  147. reclaimPolicy: Delete
  148. allowVolumeExpansion: true
  149. volumeBindingMode: "Immediate"
  150. mountOptions: []
  151. # see https://kubernetes.io/docs/concepts/storage/storage-classes/#allowed-topologies
  152. allowedTopologies: []
  153. # - matchLabelExpressions:
  154. # - key: rook-ceph-role
  155. # values:
  156. # - storage-node
  157. # see https://github.com/rook/rook/blob/master/Documentation/Storage-Configuration/Block-Storage-RBD/block-storage.md#provision-storage for available configuration
  158. parameters:
  159. # (optional) mapOptions is a comma-separated list of map options.
  160. # For krbd options refer
  161. # https://docs.ceph.com/docs/latest/man/8/rbd/#kernel-rbd-krbd-options
  162. # For nbd options refer
  163. # https://docs.ceph.com/docs/latest/man/8/rbd-nbd/#options
  164. # mapOptions: lock_on_read,queue_depth=1024
  165. # (optional) unmapOptions is a comma-separated list of unmap options.
  166. # For krbd options refer
  167. # https://docs.ceph.com/docs/latest/man/8/rbd/#kernel-rbd-krbd-options
  168. # For nbd options refer
  169. # https://docs.ceph.com/docs/latest/man/8/rbd-nbd/#options
  170. # unmapOptions: force
  171. # RBD image format. Defaults to "2".
  172. imageFormat: "2"
  173. # RBD image features, equivalent to OR'd bitfield value: 63
  174. # Available for imageFormat: "2". Older releases of CSI RBD
  175. # support only the `layering` feature. The Linux kernel (KRBD) supports the
  176. # full feature complement as of 5.4
  177. imageFeatures: layering
  178. # These secrets contain Ceph admin credentials.
  179. csi.storage.k8s.io/provisioner-secret-name: rook-csi-rbd-provisioner
  180. csi.storage.k8s.io/provisioner-secret-namespace: "{{ .Release.Namespace }}"
  181. csi.storage.k8s.io/controller-expand-secret-name: rook-csi-rbd-provisioner
  182. csi.storage.k8s.io/controller-expand-secret-namespace: "{{ .Release.Namespace }}"
  183. csi.storage.k8s.io/node-stage-secret-name: rook-csi-rbd-node
  184. csi.storage.k8s.io/node-stage-secret-namespace: "{{ .Release.Namespace }}"
  185. # Specify the filesystem type of the volume. If not specified, csi-provisioner
  186. # will set default as `ext4`. Note that `xfs` is not recommended due to potential deadlock
  187. # in hyperconverged settings where the volume is mounted on the same node as the osds.
  188. csi.storage.k8s.io/fstype: ext4
  189. - name: ceph-blockpool-ssd
  190. spec:
  191. failureDomain: host
  192. replicated:
  193. size: 3
  194. deviceClass: ssd
  195. enableRBDStats: true
  196. storageClass:
  197. enabled: true
  198. name: ceph-block-ssd
  199. isDefault: false
  200. reclaimPolicy: Delete
  201. allowVolumeExpansion: true
  202. volumeBindingMode: "Immediate"
  203. mountOptions: []
  204. allowedTopologies: []
  205. parameters:
  206. imageFormat: "2"
  207. imageFeatures: layering
  208. # These secrets contain Ceph admin credentials.
  209. csi.storage.k8s.io/provisioner-secret-name: rook-csi-rbd-provisioner
  210. csi.storage.k8s.io/provisioner-secret-namespace: "{{ .Release.Namespace }}"
  211. csi.storage.k8s.io/controller-expand-secret-name: rook-csi-rbd-provisioner
  212. csi.storage.k8s.io/controller-expand-secret-namespace: "{{ .Release.Namespace }}"
  213. csi.storage.k8s.io/node-stage-secret-name: rook-csi-rbd-node
  214. csi.storage.k8s.io/node-stage-secret-namespace: "{{ .Release.Namespace }}"
  215. csi.storage.k8s.io/fstype: ext4
  216. # -- A list of CephFileSystem configurations to deploy
  217. # @default -- See [below](#ceph-file-systems)
  218. cephFileSystems: []
  219. # -- Settings for the filesystem snapshot class
  220. # @default -- See [CephFS Snapshots](../Storage-Configuration/Ceph-CSI/ceph-csi-snapshot.md#cephfs-snapshots)
  221. cephFileSystemVolumeSnapshotClass:
  222. enabled: true
  223. name: ceph-filesystem
  224. isDefault: true
  225. deletionPolicy: Delete
  226. annotations: {}
  227. labels:
  228. velero.io/csi-volumesnapshot-class: "true"
  229. # see https://rook.io/docs/rook/v1.10/Storage-Configuration/Ceph-CSI/ceph-csi-snapshot/#cephfs-snapshots for available configuration
  230. parameters: {}
  231. # -- Settings for the block pool snapshot class
  232. # @default -- See [RBD Snapshots](../Storage-Configuration/Ceph-CSI/ceph-csi-snapshot.md#rbd-snapshots)
  233. cephBlockPoolsVolumeSnapshotClass:
  234. enabled: true
  235. name: ceph-block
  236. isDefault: false
  237. deletionPolicy: Delete
  238. annotations: {}
  239. labels:
  240. velero.io/csi-volumesnapshot-class: "true"
  241. # see https://rook.io/docs/rook/v1.10/Storage-Configuration/Ceph-CSI/ceph-csi-snapshot/#rbd-snapshots for available configuration
  242. parameters: {}
  243. # -- A list of CephObjectStore configurations to deploy
  244. # @default -- See [below](#ceph-object-stores)
  245. cephObjectStores: []