rook-ceph-cluster-values.yaml 2.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130
  1. # https://github.com/rook/rook/blob/v1.19.1/deploy/charts/rook-ceph-cluster/values.yaml
  2. toolbox:
  3. enabled: true
  4. cephClusterSpec:
  5. mon:
  6. count: 2
  7. allowMultiplePerNode: true
  8. mgr:
  9. allowMultiplePerNode: true
  10. dashboard:
  11. port: 8080
  12. ssl: false
  13. logCollector:
  14. enabled: false
  15. resources:
  16. mgr:
  17. limits:
  18. cpu: 0
  19. memory: "1.5Gi"
  20. requests:
  21. cpu: 0
  22. memory: "512Mi"
  23. mon:
  24. limits:
  25. cpu: 0
  26. memory: "1Gi"
  27. requests:
  28. cpu: 0
  29. memory: "500Mi"
  30. osd:
  31. limits:
  32. cpu: 0
  33. memory: "4Gi"
  34. requests:
  35. cpu: 0
  36. memory: "1Gi"
  37. prepareosd:
  38. # limits: It is not recommended to set limits on the OSD prepare job
  39. # since it's a one-time burst for memory that must be allowed to
  40. # complete without an OOM kill. Note however that if a k8s
  41. # limitRange guardrail is defined external to Rook, the lack of
  42. # a limit here may result in a sync failure, in which case a
  43. # limit should be added. 1200Mi may suffice for up to 15Ti
  44. # OSDs ; for larger devices 2Gi may be required.
  45. # cf. https://github.com/rook/rook/pull/11103
  46. requests:
  47. cpu: 0
  48. memory: "500Mi"
  49. mgr-sidecar:
  50. limits:
  51. cpu: 0
  52. memory: "100Mi"
  53. requests:
  54. cpu: 0
  55. memory: "40Mi"
  56. crashcollector:
  57. limits:
  58. cpu: 0
  59. memory: "60Mi"
  60. requests:
  61. cpu: 0
  62. memory: "60Mi"
  63. logcollector:
  64. limits:
  65. cpu: 0
  66. memory: "1Gi"
  67. requests:
  68. cpu: 0
  69. memory: "100Mi"
  70. cleanup:
  71. limits:
  72. cpu: 0
  73. memory: "1Gi"
  74. requests:
  75. cpu: 0
  76. memory: "100Mi"
  77. exporter:
  78. limits:
  79. cpu: 0
  80. memory: "128Mi"
  81. requests:
  82. cpu: 0
  83. memory: "50Mi"
  84. cephBlockPools: []
  85. cephFileSystems: []
  86. cephObjectStores:
  87. - name: ceph-objectstore
  88. spec:
  89. metadataPool:
  90. failureDomain: osd
  91. replicated:
  92. size: 3
  93. dataPool:
  94. failureDomain: osd
  95. erasureCoded:
  96. dataChunks: 2
  97. codingChunks: 1
  98. parameters:
  99. bulk: "true"
  100. preservePoolsOnDelete: true
  101. gateway:
  102. port: 80
  103. resources:
  104. limits:
  105. memory: "2Gi"
  106. requests:
  107. cpu: "1000m"
  108. memory: "1Gi"
  109. instances: 1
  110. priorityClassName: system-cluster-critical
  111. storageClass:
  112. enabled: false
  113. #name: ceph-bucket
  114. #reclaimPolicy: Delete
  115. #volumeBindingMode: "Immediate"
  116. #annotations: {}
  117. #labels: {}
  118. #parameters:
  119. # # note: objectStoreNamespace and objectStoreName are configured by the chart
  120. # region: us-east-1
  121. ingress:
  122. enabled: false
  123. route:
  124. enabled: false