data-filesystem.yaml 825 B

1234567891011121314151617181920212223242526272829303132333435363738
  1. # TODO move to the main helm values
  2. apiVersion: ceph.rook.io/v1
  3. kind: CephFilesystem
  4. metadata:
  5. name: data
  6. namespace: rook-ceph
  7. spec:
  8. metadataPool:
  9. replicated:
  10. size: 3
  11. deviceClass: ssd
  12. dataPools:
  13. - replicated:
  14. size: 3
  15. deviceClass: hdd
  16. metadataServer:
  17. activeCount: 1
  18. activeStandby: true
  19. placement:
  20. nodeAffinity:
  21. requiredDuringSchedulingIgnoredDuringExecution:
  22. nodeSelectorTerms:
  23. - matchExpressions:
  24. - key: storage-node
  25. operator: In
  26. values:
  27. - "true"
  28. tolerations:
  29. - key: storage-node
  30. operator: Exists
  31. priorityClassName: system-cluster-critical
  32. resources:
  33. limits:
  34. cpu: "2"
  35. memory: 4Gi
  36. requests:
  37. cpu: "1"
  38. memory: 4Gi