rook-ceph-cluster-values.yaml 2.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131
  1. # https://github.com/rook/rook/blob/v1.19.1/deploy/charts/rook-ceph-cluster/values.yaml
  2. toolbox:
  3. enabled: true
  4. cephClusterSpec:
  5. mon:
  6. count: 1
  7. allowMultiplePerNode: true
  8. mgr:
  9. count: 1
  10. allowMultiplePerNode: true
  11. modules:
  12. - name: pg_autoscaler
  13. enabled: false
  14. - name: rook
  15. enabled: true
  16. dashboard:
  17. port: 8080
  18. ssl: false
  19. logCollector:
  20. enabled: false
  21. resources:
  22. mgr:
  23. limits:
  24. cpu: 0
  25. memory: "1.5Gi"
  26. requests:
  27. cpu: 0
  28. memory: "512Mi"
  29. mon:
  30. limits:
  31. cpu: 0
  32. memory: "1Gi"
  33. requests:
  34. cpu: 0
  35. memory: "500Mi"
  36. osd:
  37. limits:
  38. cpu: 0
  39. memory: "4Gi"
  40. requests:
  41. cpu: 0
  42. memory: "1Gi"
  43. prepareosd:
  44. # limits: It is not recommended to set limits on the OSD prepare job
  45. # since it's a one-time burst for memory that must be allowed to
  46. # complete without an OOM kill. Note however that if a k8s
  47. # limitRange guardrail is defined external to Rook, the lack of
  48. # a limit here may result in a sync failure, in which case a
  49. # limit should be added. 1200Mi may suffice for up to 15Ti
  50. # OSDs ; for larger devices 2Gi may be required.
  51. # cf. https://github.com/rook/rook/pull/11103
  52. requests:
  53. cpu: 0
  54. memory: "500Mi"
  55. mgr-sidecar:
  56. limits:
  57. cpu: 0
  58. memory: "100Mi"
  59. requests:
  60. cpu: 0
  61. memory: "40Mi"
  62. crashcollector:
  63. limits:
  64. cpu: 0
  65. memory: "60Mi"
  66. requests:
  67. cpu: 0
  68. memory: "60Mi"
  69. logcollector:
  70. limits:
  71. cpu: 0
  72. memory: "1Gi"
  73. requests:
  74. cpu: 0
  75. memory: "100Mi"
  76. cleanup:
  77. limits:
  78. cpu: 0
  79. memory: "1Gi"
  80. requests:
  81. cpu: 0
  82. memory: "100Mi"
  83. exporter:
  84. limits:
  85. cpu: 0
  86. memory: "128Mi"
  87. requests:
  88. cpu: 0
  89. memory: "50Mi"
  90. cephBlockPools: []
  91. cephFileSystems: []
  92. cephObjectStores: []
  93. #- name: ceph-objectstore
  94. # spec:
  95. # metadataPool:
  96. # failureDomain: osd
  97. # replicated:
  98. # size: 3
  99. # dataPool:
  100. # failureDomain: osd
  101. # erasureCoded:
  102. # dataChunks: 2
  103. # codingChunks: 1
  104. # parameters:
  105. # bulk: "true"
  106. # preservePoolsOnDelete: true
  107. # gateway:
  108. # port: 80
  109. # resources: {}
  110. # instances: 1
  111. # priorityClassName: system-cluster-critical
  112. # storageClass:
  113. # enabled: false
  114. # #name: ceph-bucket
  115. # #reclaimPolicy: Delete
  116. # #volumeBindingMode: "Immediate"
  117. # #annotations: {}
  118. # #labels: {}
  119. # #parameters:
  120. # # # note: objectStoreNamespace and objectStoreName are configured by the chart
  121. # # region: us-east-1
  122. # ingress:
  123. # enabled: false
  124. # route:
  125. # enabled: false