rook-ceph-cluster-values.yaml 2.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129
  1. # https://github.com/rook/rook/blob/v1.19.1/deploy/charts/rook-ceph-cluster/values.yaml
  2. toolbox:
  3. enabled: true
  4. cephClusterSpec:
  5. mon:
  6. count: 1
  7. allowMultiplePerNode: true
  8. mgr:
  9. count: 1
  10. allowMultiplePerNode: true
  11. modules:
  12. - name: pg_autoscaler
  13. enabled: false
  14. dashboard:
  15. port: 8080
  16. ssl: false
  17. logCollector:
  18. enabled: false
  19. resources:
  20. mgr:
  21. limits:
  22. cpu: 0
  23. memory: "1.5Gi"
  24. requests:
  25. cpu: 0
  26. memory: "512Mi"
  27. mon:
  28. limits:
  29. cpu: 0
  30. memory: "1Gi"
  31. requests:
  32. cpu: 0
  33. memory: "500Mi"
  34. osd:
  35. limits:
  36. cpu: 0
  37. memory: "4Gi"
  38. requests:
  39. cpu: 0
  40. memory: "1Gi"
  41. prepareosd:
  42. # limits: It is not recommended to set limits on the OSD prepare job
  43. # since it's a one-time burst for memory that must be allowed to
  44. # complete without an OOM kill. Note however that if a k8s
  45. # limitRange guardrail is defined external to Rook, the lack of
  46. # a limit here may result in a sync failure, in which case a
  47. # limit should be added. 1200Mi may suffice for up to 15Ti
  48. # OSDs ; for larger devices 2Gi may be required.
  49. # cf. https://github.com/rook/rook/pull/11103
  50. requests:
  51. cpu: 0
  52. memory: "500Mi"
  53. mgr-sidecar:
  54. limits:
  55. cpu: 0
  56. memory: "100Mi"
  57. requests:
  58. cpu: 0
  59. memory: "40Mi"
  60. crashcollector:
  61. limits:
  62. cpu: 0
  63. memory: "60Mi"
  64. requests:
  65. cpu: 0
  66. memory: "60Mi"
  67. logcollector:
  68. limits:
  69. cpu: 0
  70. memory: "1Gi"
  71. requests:
  72. cpu: 0
  73. memory: "100Mi"
  74. cleanup:
  75. limits:
  76. cpu: 0
  77. memory: "1Gi"
  78. requests:
  79. cpu: 0
  80. memory: "100Mi"
  81. exporter:
  82. limits:
  83. cpu: 0
  84. memory: "128Mi"
  85. requests:
  86. cpu: 0
  87. memory: "50Mi"
  88. cephBlockPools: []
  89. cephFileSystems: []
  90. cephObjectStores: []
  91. #- name: ceph-objectstore
  92. # spec:
  93. # metadataPool:
  94. # failureDomain: osd
  95. # replicated:
  96. # size: 3
  97. # dataPool:
  98. # failureDomain: osd
  99. # erasureCoded:
  100. # dataChunks: 2
  101. # codingChunks: 1
  102. # parameters:
  103. # bulk: "true"
  104. # preservePoolsOnDelete: true
  105. # gateway:
  106. # port: 80
  107. # resources: {}
  108. # instances: 1
  109. # priorityClassName: system-cluster-critical
  110. # storageClass:
  111. # enabled: false
  112. # #name: ceph-bucket
  113. # #reclaimPolicy: Delete
  114. # #volumeBindingMode: "Immediate"
  115. # #annotations: {}
  116. # #labels: {}
  117. # #parameters:
  118. # # # note: objectStoreNamespace and objectStoreName are configured by the chart
  119. # # region: us-east-1
  120. # ingress:
  121. # enabled: false
  122. # route:
  123. # enabled: false