rook-ceph-cluster-values.yaml 3.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141
  1. # https://github.com/rook/rook/blob/v1.19.1/deploy/charts/rook-ceph-cluster/values.yaml
  2. toolbox:
  3. enabled: true
  4. cephClusterSpec:
  5. mon:
  6. count: 1
  7. allowMultiplePerNode: true
  8. mgr:
  9. count: 1
  10. allowMultiplePerNode: true
  11. dashboard:
  12. port: 8080
  13. ssl: false
  14. logCollector:
  15. enabled: false
  16. resources:
  17. mgr:
  18. limits:
  19. cpu: 0
  20. memory: "1.5Gi"
  21. requests:
  22. cpu: 0
  23. memory: "512Mi"
  24. mon:
  25. limits:
  26. cpu: 0
  27. memory: "1Gi"
  28. requests:
  29. cpu: 0
  30. memory: "500Mi"
  31. osd:
  32. limits:
  33. cpu: 0
  34. memory: "4Gi"
  35. requests:
  36. cpu: 0
  37. memory: "1Gi"
  38. prepareosd:
  39. # limits: It is not recommended to set limits on the OSD prepare job
  40. # since it's a one-time burst for memory that must be allowed to
  41. # complete without an OOM kill. Note however that if a k8s
  42. # limitRange guardrail is defined external to Rook, the lack of
  43. # a limit here may result in a sync failure, in which case a
  44. # limit should be added. 1200Mi may suffice for up to 15Ti
  45. # OSDs ; for larger devices 2Gi may be required.
  46. # cf. https://github.com/rook/rook/pull/11103
  47. requests:
  48. cpu: 0
  49. memory: "500Mi"
  50. mgr-sidecar:
  51. limits:
  52. cpu: 0
  53. memory: "100Mi"
  54. requests:
  55. cpu: 0
  56. memory: "40Mi"
  57. crashcollector:
  58. limits:
  59. cpu: 0
  60. memory: "60Mi"
  61. requests:
  62. cpu: 0
  63. memory: "60Mi"
  64. logcollector:
  65. limits:
  66. cpu: 0
  67. memory: "1Gi"
  68. requests:
  69. cpu: 0
  70. memory: "100Mi"
  71. cleanup:
  72. limits:
  73. cpu: 0
  74. memory: "1Gi"
  75. requests:
  76. cpu: 0
  77. memory: "100Mi"
  78. exporter:
  79. limits:
  80. cpu: 0
  81. memory: "128Mi"
  82. requests:
  83. cpu: 0
  84. memory: "50Mi"
  85. cephBlockPools:
  86. - name: ceph-blockpool
  87. spec:
  88. failureDomain: osd
  89. erasureCoded:
  90. dataChunks: 2
  91. codingChunks: 1
  92. storageClass:
  93. enabled: true
  94. name: ceph-block
  95. isDefault: true
  96. reclaimPolicy: Delete
  97. allowVolumeExpansion: true
  98. volumeBindingMode: "Immediate"
  99. parameters:
  100. # These secrets contain Ceph admin credentials.
  101. csi.storage.k8s.io/provisioner-secret-name: rook-csi-rbd-provisioner
  102. csi.storage.k8s.io/provisioner-secret-namespace: "rook-ceph"
  103. csi.storage.k8s.io/controller-expand-secret-name: rook-csi-rbd-provisioner
  104. csi.storage.k8s.io/controller-expand-secret-namespace: "rook-ceph"
  105. csi.storage.k8s.io/controller-publish-secret-name: rook-csi-rbd-provisioner
  106. csi.storage.k8s.io/controller-publish-secret-namespace: "rook-ceph"
  107. csi.storage.k8s.io/node-stage-secret-name: rook-csi-rbd-node
  108. csi.storage.k8s.io/node-stage-secret-namespace: "rook-ceph"
  109. csi.storage.k8s.io/fstype: ext4
  110. cephObjectStores:
  111. - name: ceph-objectstore
  112. spec:
  113. metadataPool:
  114. failureDomain: osd
  115. replicated:
  116. size: 3
  117. dataPool:
  118. failureDomain: osd
  119. erasureCoded:
  120. dataChunks: 2
  121. codingChunks: 1
  122. preservePoolsOnDelete: true
  123. gateway:
  124. port: 80
  125. resources: {}
  126. instances: 1
  127. priorityClassName: system-cluster-critical
  128. storageClass:
  129. enabled: false
  130. ingress:
  131. enabled: false
  132. route:
  133. enabled: false
  134. cephFileSystems: []