data-filesystem.yaml 873 B

123456789101112131415161718192021222324252627282930313233343536373839
  1. # TODO move to the main helm values
  2. # TODO isn't written much, could probably be EC
  3. apiVersion: ceph.rook.io/v1
  4. kind: CephFilesystem
  5. metadata:
  6. name: data
  7. namespace: rook-ceph
  8. spec:
  9. metadataPool:
  10. replicated:
  11. size: 3
  12. deviceClass: ssd
  13. dataPools:
  14. - replicated:
  15. size: 3
  16. deviceClass: hdd
  17. metadataServer:
  18. activeCount: 1
  19. activeStandby: true
  20. placement:
  21. nodeAffinity:
  22. requiredDuringSchedulingIgnoredDuringExecution:
  23. nodeSelectorTerms:
  24. - matchExpressions:
  25. - key: storage-node
  26. operator: In
  27. values:
  28. - "true"
  29. tolerations:
  30. - key: storage-node
  31. operator: Exists
  32. priorityClassName: system-cluster-critical
  33. resources:
  34. limits:
  35. cpu: "2"
  36. memory: 4Gi
  37. requests:
  38. cpu: "1"
  39. memory: 4Gi