rook-ceph-operator-values.yaml 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442
  1. # From https://github.com/rook/rook/blob/release-1.9/deploy/charts/rook-ceph/values.yaml
  2. # Default values for rook-ceph-operator
  3. # This is a YAML-formatted file.
  4. # Declare variables to be passed into your templates.
  5. image:
  6. repository: rook/ceph
  7. tag: v1.9.2
  8. pullPolicy: IfNotPresent
  9. crds:
  10. # Whether the helm chart should create and update the CRDs. If false, the CRDs must be
  11. # managed independently with deploy/examples/crds.yaml.
  12. # **WARNING** Only set during first deployment. If later disabled the cluster may be DESTROYED.
  13. # If the CRDs are deleted in this case, see the disaster recovery guide to restore them.
  14. # https://rook.github.io/docs/rook/latest/ceph-disaster-recovery.html#restoring-crds-after-deletion
  15. enabled: true
  16. resources:
  17. limits:
  18. cpu: 500m
  19. memory: 256Mi
  20. requests:
  21. cpu: 100m
  22. memory: 128Mi
  23. # Constraint rook-ceph-operator Deployment to nodes with label
  24. # For more info, see https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector
  25. nodeSelector:
  26. storage-node: "true"
  27. # Tolerations for the rook-ceph-operator to allow it to run on nodes with particular taints
  28. tolerations: []
  29. # Delay to use in node.kubernetes.io/unreachable toleration
  30. unreachableNodeTolerationSeconds: 5
  31. # Whether rook watches its current namespace for CRDs or the entire cluster, defaults to false
  32. currentNamespaceOnly: false
  33. ## Annotations to be added to pod
  34. annotations: {}
  35. ## The logging level for the operator: ERROR | WARNING | INFO | DEBUG
  36. logLevel: INFO
  37. ## If true, create & use RBAC resources
  38. ##
  39. rbacEnable: true
  40. ## If true, create & use PSP resources
  41. ##
  42. pspEnable: true
  43. # Set the priority class for the rook operator deployment if desired
  44. # priorityClassName: class
  45. ## Settings for whether to disable the drivers or other daemons if they are not
  46. ## needed
  47. csi:
  48. enableRbdDriver: true
  49. enableCephfsDriver: true
  50. enableGrpcMetrics: false
  51. # Set to true to enable host networking for CSI CephFS and RBD nodeplugins. This may be necessary
  52. # in some network configurations where the SDN does not provide access to an external cluster or
  53. # there is significant drop in read/write performance.
  54. enableCSIHostNetwork: true
  55. # set to false to disable deployment of snapshotter container in CephFS provisioner pod.
  56. enableCephfsSnapshotter: true
  57. # set to false to disable deployment of snapshotter container in RBD provisioner pod.
  58. enableRBDSnapshotter: true
  59. # set to false if the selinux is not enabled or unavailable in cluster nodes.
  60. enablePluginSelinuxHostMount: false
  61. # set to true to enable Ceph CSI pvc encryption support.
  62. enableCSIEncryption: false
  63. # (Optional) set user created priorityclassName for csi plugin pods.
  64. pluginPriorityClassName: system-node-critical
  65. # (Optional) set user created priorityclassName for csi provisioner pods.
  66. provisionerPriorityClassName: system-cluster-critical
  67. # (Optional) policy for modifying a volume's ownership or permissions when the RBD PVC is being mounted.
  68. # supported values are documented at https://kubernetes-csi.github.io/docs/support-fsgroup.html
  69. rbdFSGroupPolicy: "ReadWriteOnceWithFSType"
  70. # (Optional) policy for modifying a volume's ownership or permissions when the CephFS PVC is being mounted.
  71. # supported values are documented at https://kubernetes-csi.github.io/docs/support-fsgroup.html
  72. cephFSFSGroupPolicy: "ReadWriteOnceWithFSType"
  73. # (Optional) policy for modifying a volume's ownership or permissions when the NFS PVC is being mounted.
  74. # supported values are documented at https://kubernetes-csi.github.io/docs/support-fsgroup.html
  75. nfsFSGroupPolicy: "ReadWriteOnceWithFSType"
  76. # OMAP generator generates the omap mapping between the PV name and the RBD image
  77. # which helps CSI to identify the rbd images for CSI operations.
  78. # CSI_ENABLE_OMAP_GENERATOR need to be enabled when we are using rbd mirroring feature.
  79. # By default OMAP generator is disabled and when enabled it will be deployed as a
  80. # sidecar with CSI provisioner pod, to enable set it to true.
  81. enableOMAPGenerator: false
  82. # Set replicas for csi provisioner deployment.
  83. provisionerReplicas: 2
  84. # Set logging level for csi containers.
  85. # Supported values from 0 to 5. 0 for general useful logs, 5 for trace level verbosity.
  86. #logLevel: 0
  87. # CSI RBD plugin daemonset update strategy, supported values are OnDelete and RollingUpdate.
  88. # Default value is RollingUpdate.
  89. #rbdPluginUpdateStrategy: OnDelete
  90. # CSI CephFS plugin daemonset update strategy, supported values are OnDelete and RollingUpdate.
  91. # Default value is RollingUpdate.
  92. #cephFSPluginUpdateStrategy: OnDelete
  93. # CSI NFS plugin daemonset update strategy, supported values are OnDelete and RollingUpdate.
  94. # Default value is RollingUpdate.
  95. #nfsPluginUpdateStrategy: OnDelete
  96. # The CSI GRPC timeout value (in seconds). It should be >= 120. If this variable is not set or is an invalid value, it's default to 150.
  97. grpcTimeoutInSeconds: 150
  98. # Allow starting unsupported ceph-csi image
  99. allowUnsupportedVersion: false
  100. # CEPH CSI RBD provisioner resource requirement list, Put here list of resource
  101. # requests and limits you want to apply for provisioner pod
  102. # csi-omap-generator resources will be applied only if enableOMAPGenerator is set to true
  103. csiRBDProvisionerResource: |
  104. - name : csi-provisioner
  105. resource:
  106. requests:
  107. memory: 128Mi
  108. cpu: 100m
  109. limits:
  110. memory: 256Mi
  111. cpu: 200m
  112. - name : csi-resizer
  113. resource:
  114. requests:
  115. memory: 128Mi
  116. cpu: 100m
  117. limits:
  118. memory: 256Mi
  119. cpu: 200m
  120. - name : csi-attacher
  121. resource:
  122. requests:
  123. memory: 128Mi
  124. cpu: 100m
  125. limits:
  126. memory: 256Mi
  127. cpu: 200m
  128. - name : csi-snapshotter
  129. resource:
  130. requests:
  131. memory: 128Mi
  132. cpu: 100m
  133. limits:
  134. memory: 256Mi
  135. cpu: 200m
  136. - name : csi-rbdplugin
  137. resource:
  138. requests:
  139. memory: 512Mi
  140. cpu: 250m
  141. limits:
  142. memory: 1Gi
  143. cpu: 500m
  144. - name : csi-omap-generator
  145. resource:
  146. requests:
  147. memory: 512Mi
  148. cpu: 250m
  149. limits:
  150. memory: 1Gi
  151. cpu: 500m
  152. - name : liveness-prometheus
  153. resource:
  154. requests:
  155. memory: 128Mi
  156. cpu: 50m
  157. limits:
  158. memory: 256Mi
  159. cpu: 100m
  160. # CEPH CSI RBD plugin resource requirement list, Put here list of resource
  161. # requests and limits you want to apply for plugin pod
  162. csiRBDPluginResource: |
  163. - name : driver-registrar
  164. resource:
  165. requests:
  166. memory: 128Mi
  167. cpu: 50m
  168. limits:
  169. memory: 256Mi
  170. cpu: 100m
  171. - name : csi-rbdplugin
  172. resource:
  173. requests:
  174. memory: 512Mi
  175. cpu: 250m
  176. limits:
  177. memory: 1Gi
  178. cpu: 500m
  179. - name : liveness-prometheus
  180. resource:
  181. requests:
  182. memory: 128Mi
  183. cpu: 50m
  184. limits:
  185. memory: 256Mi
  186. cpu: 100m
  187. # CEPH CSI CephFS provisioner resource requirement list, Put here list of resource
  188. # requests and limits you want to apply for provisioner pod
  189. csiCephFSProvisionerResource: |
  190. - name : csi-provisioner
  191. resource:
  192. requests:
  193. memory: 128Mi
  194. cpu: 100m
  195. limits:
  196. memory: 256Mi
  197. cpu: 200m
  198. - name : csi-resizer
  199. resource:
  200. requests:
  201. memory: 128Mi
  202. cpu: 100m
  203. limits:
  204. memory: 256Mi
  205. cpu: 200m
  206. - name : csi-attacher
  207. resource:
  208. requests:
  209. memory: 128Mi
  210. cpu: 100m
  211. limits:
  212. memory: 256Mi
  213. cpu: 200m
  214. - name : csi-snapshotter
  215. resource:
  216. requests:
  217. memory: 128Mi
  218. cpu: 100m
  219. limits:
  220. memory: 256Mi
  221. cpu: 200m
  222. - name : csi-cephfsplugin
  223. resource:
  224. requests:
  225. memory: 512Mi
  226. cpu: 250m
  227. limits:
  228. memory: 1Gi
  229. cpu: 500m
  230. - name : liveness-prometheus
  231. resource:
  232. requests:
  233. memory: 128Mi
  234. cpu: 50m
  235. limits:
  236. memory: 256Mi
  237. cpu: 100m
  238. # CEPH CSI CephFS plugin resource requirement list, Put here list of resource
  239. # requests and limits you want to apply for plugin pod
  240. csiCephFSPluginResource: |
  241. - name : driver-registrar
  242. resource:
  243. requests:
  244. memory: 128Mi
  245. cpu: 50m
  246. limits:
  247. memory: 256Mi
  248. cpu: 100m
  249. - name : csi-cephfsplugin
  250. resource:
  251. requests:
  252. memory: 512Mi
  253. cpu: 250m
  254. limits:
  255. memory: 1Gi
  256. cpu: 500m
  257. - name : liveness-prometheus
  258. resource:
  259. requests:
  260. memory: 128Mi
  261. cpu: 50m
  262. limits:
  263. memory: 256Mi
  264. cpu: 100m
  265. # CEPH CSI NFS provisioner resource requirement list, Put here list of resource
  266. # requests and limits you want to apply for provisioner pod
  267. csiNFSProvisionerResource: |
  268. - name : csi-provisioner
  269. resource:
  270. requests:
  271. memory: 128Mi
  272. cpu: 100m
  273. limits:
  274. memory: 256Mi
  275. cpu: 200m
  276. - name : csi-nfsplugin
  277. resource:
  278. requests:
  279. memory: 512Mi
  280. cpu: 250m
  281. limits:
  282. memory: 1Gi
  283. cpu: 500m
  284. # CEPH CSI NFS plugin resource requirement list, Put here list of resource
  285. # requests and limits you want to apply for plugin pod
  286. csiNFSPluginResource: |
  287. - name : driver-registrar
  288. resource:
  289. requests:
  290. memory: 128Mi
  291. cpu: 50m
  292. limits:
  293. memory: 256Mi
  294. cpu: 100m
  295. - name : csi-nfsplugin
  296. resource:
  297. requests:
  298. memory: 512Mi
  299. cpu: 250m
  300. limits:
  301. memory: 1Gi
  302. cpu: 500m
  303. # Set provisonerTolerations and provisionerNodeAffinity for provisioner pod.
  304. # The CSI provisioner would be best to start on the same nodes as other ceph daemons.
  305. # provisionerTolerations:
  306. # - key: key
  307. # operator: Exists
  308. # effect: NoSchedule
  309. # provisionerNodeAffinity: key1=value1,value2; key2=value3
  310. provisionerNodeAffinity: "storage-node=true"
  311. # Set pluginTolerations and pluginNodeAffinity for plugin daemonset pods.
  312. # The CSI plugins need to be started on all the nodes where the clients need to mount the storage.
  313. pluginTolerations:
  314. - key: storage-node
  315. operator: Exists
  316. effect: NoSchedule
  317. # pluginNodeAffinity: key1=value1,value2; key2=value3
  318. #pluginNodeAffinity: "storage-node=true"
  319. #cephfsGrpcMetricsPort: 9091
  320. #cephfsLivenessMetricsPort: 9081
  321. #rbdGrpcMetricsPort: 9090
  322. #csiAddonsPort: 9070
  323. # Enable Ceph Kernel clients on kernel < 4.17. If your kernel does not support quotas for CephFS
  324. # you may want to disable this setting. However, this will cause an issue during upgrades
  325. # with the FUSE client. See the upgrade guide: https://rook.io/docs/rook/v1.2/ceph-upgrade.html
  326. forceCephFSKernelClient: true
  327. #rbdLivenessMetricsPort: 9080
  328. #kubeletDirPath: /var/lib/kubelet
  329. #cephcsi:
  330. #image: quay.io/cephcsi/cephcsi:v3.6.1
  331. #registrar:
  332. #image: k8s.gcr.io/sig-storage/csi-node-driver-registrar:v2.5.0
  333. #provisioner:
  334. #image: k8s.gcr.io/sig-storage/csi-provisioner:v3.1.0
  335. #snapshotter:
  336. #image: k8s.gcr.io/sig-storage/csi-snapshotter:v5.0.1
  337. #attacher:
  338. #image: k8s.gcr.io/sig-storage/csi-attacher:v3.4.0
  339. #resizer:
  340. #image: k8s.gcr.io/sig-storage/csi-resizer:v1.4.0
  341. # Labels to add to the CSI CephFS Deployments and DaemonSets Pods.
  342. #cephfsPodLabels: "key1=value1,key2=value2"
  343. # Labels to add to the CSI NFS Deployments and DaemonSets Pods.
  344. #nfsPodLabels: "key1=value1,key2=value2"
  345. # Labels to add to the CSI RBD Deployments and DaemonSets Pods.
  346. #rbdPodLabels: "key1=value1,key2=value2"
  347. # Enable the volume replication controller.
  348. # Before enabling, ensure the Volume Replication CRDs are created.
  349. # See https://rook.io/docs/rook/latest/ceph-csi-drivers.html#rbd-mirroring
  350. volumeReplication:
  351. enabled: false
  352. #image: "quay.io/csiaddons/volumereplication-operator:v0.3.0"
  353. # Enable the CSIAddons sidecar.
  354. csiAddons:
  355. enabled: false
  356. #image: "quay.io/csiaddons/k8s-sidecar:v0.2.1"
  357. # Enable the nfs csi driver.
  358. nfs:
  359. enabled: false
  360. #image: "k8s.gcr.io/sig-storage/nfsplugin:v3.1.0"
  361. enableDiscoveryDaemon: false
  362. cephCommandsTimeoutSeconds: "15"
  363. ## if true, run rook operator on the host network
  364. useOperatorHostNetwork: true
  365. ## Rook Discover configuration
  366. ## toleration: NoSchedule, PreferNoSchedule or NoExecute
  367. ## tolerationKey: Set this to the specific key of the taint to tolerate
  368. ## tolerations: Array of tolerations in YAML format which will be added to agent deployment
  369. ## nodeAffinity: Set to labels of the node to match
  370. # discover:
  371. # toleration: NoSchedule
  372. # tolerationKey: key
  373. # tolerations:
  374. # - key: key
  375. # operator: Exists
  376. # effect: NoSchedule
  377. # nodeAffinity: key1=value1,value2; key2=value3
  378. # podLabels: "key1=value1,key2=value2"
  379. # In some situations SELinux relabelling breaks (times out) on large filesystems, and doesn't work with cephfs ReadWriteMany volumes (last relabel wins).
  380. # Disable it here if you have similar issues.
  381. # For more details see https://github.com/rook/rook/issues/2417
  382. enableSelinuxRelabeling: true
  383. disableAdmissionController: false
  384. # Writing to the hostPath is required for the Ceph mon and osd pods. Given the restricted permissions in OpenShift with SELinux,
  385. # the pod must be running privileged in order to write to the hostPath volume, this must be set to true then.
  386. hostpathRequiresPrivileged: false
  387. # Disable automatic orchestration when new devices are discovered.
  388. disableDeviceHotplug: false
  389. # Blacklist certain disks according to the regex provided.
  390. discoverDaemonUdev:
  391. # imagePullSecrets option allow to pull docker images from private docker registry. Option will be passed to all service accounts.
  392. # imagePullSecrets:
  393. # - name: my-registry-secret
  394. # Whether the OBC provisioner should watch on the operator namespace or not, if not the namespace of the cluster will be used
  395. enableOBCWatchOperatorNamespace: true
  396. admissionController:
  397. # Set tolerations and nodeAffinity for admission controller pod.
  398. # The admission controller would be best to start on the same nodes as other ceph daemons.
  399. # tolerations:
  400. # - key: key
  401. # operator: Exists
  402. # effect: NoSchedule
  403. # nodeAffinity: key1=value1,value2; key2=value3
  404. nodeAffinity: "storage-node=true"
  405. monitoring:
  406. # requires Prometheus to be pre-installed
  407. # enabling will also create RBAC rules to allow Operator to create ServiceMonitors
  408. enabled: true