rook-ceph-cluster-values.yaml 10.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268
  1. # From https://raw.githubusercontent.com/rook/rook/v1.18.9/deploy/charts/rook-ceph-cluster/values.yaml
  2. configOverride: |
  3. [osd]
  4. osd_memory_target_autotune = true
  5. # Installs a debugging toolbox deployment
  6. toolbox:
  7. # -- Enable Ceph debugging pod deployment. See [toolbox](../Troubleshooting/ceph-toolbox.md)
  8. enabled: true
  9. # -- Toolbox image, defaults to the image used by the Ceph cluster
  10. monitoring:
  11. # -- Enable Prometheus integration, will also create necessary RBAC rules to allow Operator to create ServiceMonitors.
  12. # Monitoring requires Prometheus to be pre-installed
  13. enabled: true
  14. # -- Whether to create the Prometheus rules for Ceph alerts
  15. createPrometheusRules: true
  16. # -- The namespace in which to create the prometheus rules, if different from the rook cluster namespace.
  17. # If you have multiple rook-ceph clusters in the same k8s cluster, choose the same namespace (ideally, namespace with prometheus
  18. # deployed) to set rulesNamespaceOverride for all the clusters. Otherwise, you will get duplicate alerts with multiple alert definitions.
  19. # All values below are taken from the CephCluster CRD
  20. # -- Cluster configuration.
  21. # @default -- See [below](#ceph-cluster-spec)
  22. cephClusterSpec:
  23. dashboard:
  24. # serve the dashboard under a subpath (useful when you are accessing the dashboard via a reverse proxy)
  25. # urlPrefix: /ceph-dashboard
  26. # serve the dashboard at the given port.
  27. port: 8080
  28. # Serve the dashboard using SSL (if using ingress to expose the dashboard and `ssl: true` you need to set
  29. # the corresponding "backend protocol" annotation(s) for your ingress controller of choice)
  30. ssl: false
  31. # Network configuration, see: https://github.com/rook/rook/blob/master/Documentation/CRDs/Cluster/ceph-cluster-crd.md#network-configuration-settings
  32. network:
  33. # enable host networking
  34. provider: host
  35. # enable log collector, daemons will log on files and rotate
  36. logCollector:
  37. enabled: false
  38. # To control where various services will be scheduled by kubernetes, use the placement configuration sections below.
  39. # The example under 'all' would have all services scheduled on kubernetes nodes labeled with 'role=storage-node' and
  40. # tolerate taints with a key of 'storage-node'.
  41. placement:
  42. all:
  43. nodeAffinity:
  44. requiredDuringSchedulingIgnoredDuringExecution:
  45. nodeSelectorTerms:
  46. - matchExpressions:
  47. - key: storage-node
  48. operator: In
  49. values:
  50. - "true"
  51. # TODO are snapshots easier if mgr/mon/mds run on a compute node?
  52. # TODO does running on a compute node change cephfs mounting? I think it requires mon's port 6789
  53. preferredDuringSchedulingIgnoredDuringExecution:
  54. - weight: 1
  55. preference:
  56. matchExpressions:
  57. - key: storage-node
  58. operator: DoesNotExist
  59. tolerations:
  60. - key: storage-node
  61. operator: Equal
  62. value: "true"
  63. effect: PreferNoSchedule
  64. resources:
  65. mgr:
  66. requests:
  67. cpu: 0
  68. memory: 0
  69. limits:
  70. cpu: 0
  71. memory: 2.5Gi
  72. mon:
  73. requests:
  74. cpu: 0
  75. memory: 0
  76. limits:
  77. cpu: 0
  78. memory: 1Gi
  79. osd:
  80. requests:
  81. cpu: 0
  82. memory: 0
  83. limits:
  84. cpu: 0
  85. # Ensure osd_memory_target reflects this
  86. # https://docs.ceph.com/en/latest/start/hardware-recommendations/#ram
  87. memory: "4Gi"
  88. prepareosd:
  89. # limits: It is not recommended to set limits on the OSD prepare job
  90. # since it's a one-time burst for memory that must be allowed to
  91. # complete without an OOM kill. Note however that if a k8s
  92. # limitRange guardrail is defined external to Rook, the lack of
  93. # a limit here may result in a sync failure, in which case a
  94. # limit should be added. 1200Mi may suffice for up to 15Ti
  95. # OSDs ; for larger devices 2Gi may be required.
  96. # cf. https://github.com/rook/rook/pull/11103
  97. requests:
  98. cpu: 0
  99. memory: "500Mi"
  100. mgr-sidecar:
  101. limits:
  102. cpu: 0
  103. memory: "100Mi"
  104. requests:
  105. cpu: 0
  106. memory: "40Mi"
  107. crashcollector:
  108. limits:
  109. cpu: 0
  110. memory: "60Mi"
  111. requests:
  112. cpu: 0
  113. memory: "60Mi"
  114. logcollector:
  115. limits:
  116. cpu: 0
  117. memory: "1Gi"
  118. requests:
  119. cpu: 0
  120. memory: "100Mi"
  121. cleanup:
  122. limits:
  123. cpu: 0
  124. memory: "1Gi"
  125. requests:
  126. cpu: 0
  127. memory: "100Mi"
  128. exporter:
  129. limits:
  130. cpu: 0
  131. memory: "128Mi"
  132. requests:
  133. cpu: 0
  134. memory: "50Mi"
  135. # -- A list of CephBlockPool configurations to deploy
  136. # @default -- See [below](#ceph-block-pools)
  137. cephBlockPools:
  138. - name: ceph-blockpool
  139. # see https://github.com/rook/rook/blob/master/Documentation/CRDs/Block-Storage/ceph-block-pool-crd.md#spec for available configuration
  140. spec:
  141. failureDomain: host
  142. replicated:
  143. size: 3
  144. deviceClass: hdd
  145. # Enables collecting RBD per-image IO statistics by enabling dynamic OSD performance counters. Defaults to false.
  146. # For reference: https://docs.ceph.com/docs/latest/mgr/prometheus/#rbd-io-statistics
  147. enableRBDStats: true
  148. storageClass:
  149. enabled: true
  150. name: ceph-block
  151. isDefault: true
  152. reclaimPolicy: Delete
  153. allowVolumeExpansion: true
  154. volumeBindingMode: "Immediate"
  155. mountOptions: []
  156. # see https://kubernetes.io/docs/concepts/storage/storage-classes/#allowed-topologies
  157. allowedTopologies: []
  158. # - matchLabelExpressions:
  159. # - key: rook-ceph-role
  160. # values:
  161. # - storage-node
  162. # see https://github.com/rook/rook/blob/master/Documentation/Storage-Configuration/Block-Storage-RBD/block-storage.md#provision-storage for available configuration
  163. parameters:
  164. # (optional) mapOptions is a comma-separated list of map options.
  165. # For krbd options refer
  166. # https://docs.ceph.com/docs/latest/man/8/rbd/#kernel-rbd-krbd-options
  167. # For nbd options refer
  168. # https://docs.ceph.com/docs/latest/man/8/rbd-nbd/#options
  169. # mapOptions: lock_on_read,queue_depth=1024
  170. # (optional) unmapOptions is a comma-separated list of unmap options.
  171. # For krbd options refer
  172. # https://docs.ceph.com/docs/latest/man/8/rbd/#kernel-rbd-krbd-options
  173. # For nbd options refer
  174. # https://docs.ceph.com/docs/latest/man/8/rbd-nbd/#options
  175. # unmapOptions: force
  176. # RBD image format. Defaults to "2".
  177. imageFormat: "2"
  178. # RBD image features, equivalent to OR'd bitfield value: 63
  179. # Available for imageFormat: "2". Older releases of CSI RBD
  180. # support only the `layering` feature. The Linux kernel (KRBD) supports the
  181. # full feature complement as of 5.4
  182. imageFeatures: layering
  183. # These secrets contain Ceph admin credentials.
  184. csi.storage.k8s.io/provisioner-secret-name: rook-csi-rbd-provisioner
  185. csi.storage.k8s.io/provisioner-secret-namespace: "{{ .Release.Namespace }}"
  186. csi.storage.k8s.io/controller-expand-secret-name: rook-csi-rbd-provisioner
  187. csi.storage.k8s.io/controller-expand-secret-namespace: "{{ .Release.Namespace }}"
  188. csi.storage.k8s.io/node-stage-secret-name: rook-csi-rbd-node
  189. csi.storage.k8s.io/node-stage-secret-namespace: "{{ .Release.Namespace }}"
  190. # Specify the filesystem type of the volume. If not specified, csi-provisioner
  191. # will set default as `ext4`. Note that `xfs` is not recommended due to potential deadlock
  192. # in hyperconverged settings where the volume is mounted on the same node as the osds.
  193. csi.storage.k8s.io/fstype: ext4
  194. - name: ceph-blockpool-ssd
  195. spec:
  196. failureDomain: host
  197. replicated:
  198. size: 3
  199. deviceClass: ssd
  200. enableRBDStats: true
  201. storageClass:
  202. enabled: true
  203. name: ceph-block-ssd
  204. isDefault: false
  205. reclaimPolicy: Delete
  206. allowVolumeExpansion: true
  207. volumeBindingMode: "Immediate"
  208. mountOptions: []
  209. allowedTopologies: []
  210. parameters:
  211. imageFormat: "2"
  212. imageFeatures: layering
  213. # These secrets contain Ceph admin credentials.
  214. csi.storage.k8s.io/provisioner-secret-name: rook-csi-rbd-provisioner
  215. csi.storage.k8s.io/provisioner-secret-namespace: "{{ .Release.Namespace }}"
  216. csi.storage.k8s.io/controller-expand-secret-name: rook-csi-rbd-provisioner
  217. csi.storage.k8s.io/controller-expand-secret-namespace: "{{ .Release.Namespace }}"
  218. csi.storage.k8s.io/node-stage-secret-name: rook-csi-rbd-node
  219. csi.storage.k8s.io/node-stage-secret-namespace: "{{ .Release.Namespace }}"
  220. csi.storage.k8s.io/fstype: ext4
  221. # -- A list of CephFileSystem configurations to deploy
  222. # @default -- See [below](#ceph-file-systems)
  223. cephFileSystems: []
  224. # -- Settings for the filesystem snapshot class
  225. # @default -- See [CephFS Snapshots](../Storage-Configuration/Ceph-CSI/ceph-csi-snapshot.md#cephfs-snapshots)
  226. cephFileSystemVolumeSnapshotClass:
  227. enabled: true
  228. name: ceph-filesystem
  229. isDefault: true
  230. deletionPolicy: Delete
  231. annotations: {}
  232. labels:
  233. velero.io/csi-volumesnapshot-class: "true"
  234. # see https://rook.io/docs/rook/v1.10/Storage-Configuration/Ceph-CSI/ceph-csi-snapshot/#cephfs-snapshots for available configuration
  235. parameters: {}
  236. # -- Settings for the block pool snapshot class
  237. # @default -- See [RBD Snapshots](../Storage-Configuration/Ceph-CSI/ceph-csi-snapshot.md#rbd-snapshots)
  238. cephBlockPoolsVolumeSnapshotClass:
  239. enabled: true
  240. name: ceph-block
  241. isDefault: false
  242. deletionPolicy: Delete
  243. annotations: {}
  244. labels:
  245. velero.io/csi-volumesnapshot-class: "true"
  246. # see https://rook.io/docs/rook/v1.10/Storage-Configuration/Ceph-CSI/ceph-csi-snapshot/#rbd-snapshots for available configuration
  247. parameters: {}
  248. # -- A list of CephObjectStore configurations to deploy
  249. # @default -- See [below](#ceph-object-stores)
  250. cephObjectStores: []