Selaa lähdekoodia

remove default ceph-filesystem

Josh Bicking 2 viikkoa sitten
vanhempi
sitoutus
eff2cd939f
1 muutettua tiedostoa jossa 59 lisäystä ja 59 poistoa
  1. 59 59
      argocd/rook/rook-ceph-cluster-values.yaml

+ 59 - 59
argocd/rook/rook-ceph-cluster-values.yaml

@@ -538,65 +538,65 @@ cephBlockPools:
 
 # -- A list of CephFileSystem configurations to deploy
 # @default -- See [below](#ceph-file-systems)
-cephFileSystems:
-  - name: ceph-filesystem
-    # see https://github.com/rook/rook/blob/master/Documentation/CRDs/Shared-Filesystem/ceph-filesystem-crd.md#filesystem-settings for available configuration
-    spec:
-      metadataPool:
-        replicated:
-          size: 3
-        deviceClass: ssd
-      dataPools:
-        - failureDomain: host
-          replicated:
-            size: 3
-          # Optional and highly recommended, 'data0' by default, see https://github.com/rook/rook/blob/master/Documentation/CRDs/Shared-Filesystem/ceph-filesystem-crd.md#pools
-          name: data0
-          deviceClass: hdd
-      metadataServer:
-        activeCount: 1
-        activeStandby: true
-        placement:
-          nodeAffinity:
-            requiredDuringSchedulingIgnoredDuringExecution:
-              nodeSelectorTerms:
-                - matchExpressions:
-                  - key: storage-node
-                    operator: In
-                    values:
-                      - "true"
-          podAffinity:
-          podAntiAffinity:
-          topologySpreadConstraints:
-          tolerations:
-          - key: storage-node
-            operator: Equal
-            value: "true"
-            effect: PreferNoSchedule
-        priorityClassName: system-cluster-critical
-    storageClass:
-      enabled: true
-      isDefault: false
-      name: ceph-filesystem
-      # (Optional) specify a data pool to use, must be the name of one of the data pools above, 'data0' by default
-      pool: data0
-      reclaimPolicy: Delete
-      allowVolumeExpansion: true
-      volumeBindingMode: "Immediate"
-      mountOptions: []
-      # see https://github.com/rook/rook/blob/master/Documentation/Storage-Configuration/Shared-Filesystem-CephFS/filesystem-storage.md#provision-storage for available configuration
-      parameters:
-        # The secrets contain Ceph admin credentials.
-        csi.storage.k8s.io/provisioner-secret-name: rook-csi-cephfs-provisioner
-        csi.storage.k8s.io/provisioner-secret-namespace: "{{ .Release.Namespace }}"
-        csi.storage.k8s.io/controller-expand-secret-name: rook-csi-cephfs-provisioner
-        csi.storage.k8s.io/controller-expand-secret-namespace: "{{ .Release.Namespace }}"
-        csi.storage.k8s.io/node-stage-secret-name: rook-csi-cephfs-node
-        csi.storage.k8s.io/node-stage-secret-namespace: "{{ .Release.Namespace }}"
-        # Specify the filesystem type of the volume. If not specified, csi-provisioner
-        # will set default as `ext4`. Note that `xfs` is not recommended due to potential deadlock
-        # in hyperconverged settings where the volume is mounted on the same node as the osds.
-        csi.storage.k8s.io/fstype: ext4
+cephFileSystems: []
+  #- name: ceph-filesystem
+  #  # see https://github.com/rook/rook/blob/master/Documentation/CRDs/Shared-Filesystem/ceph-filesystem-crd.md#filesystem-settings for available configuration
+  #  spec:
+  #    metadataPool:
+  #      replicated:
+  #        size: 3
+  #      deviceClass: ssd
+  #    dataPools:
+  #      - failureDomain: host
+  #        replicated:
+  #          size: 3
+  #        # Optional and highly recommended, 'data0' by default, see https://github.com/rook/rook/blob/master/Documentation/CRDs/Shared-Filesystem/ceph-filesystem-crd.md#pools
+  #        name: data0
+  #        deviceClass: hdd
+  #    metadataServer:
+  #      activeCount: 1
+  #      activeStandby: true
+  #      placement:
+  #        nodeAffinity:
+  #          requiredDuringSchedulingIgnoredDuringExecution:
+  #            nodeSelectorTerms:
+  #              - matchExpressions:
+  #                - key: storage-node
+  #                  operator: In
+  #                  values:
+  #                    - "true"
+  #        podAffinity:
+  #        podAntiAffinity:
+  #        topologySpreadConstraints:
+  #        tolerations:
+  #        - key: storage-node
+  #          operator: Equal
+  #          value: "true"
+  #          effect: PreferNoSchedule
+  #      priorityClassName: system-cluster-critical
+  #  storageClass:
+  #    enabled: true
+  #    isDefault: false
+  #    name: ceph-filesystem
+  #    # (Optional) specify a data pool to use, must be the name of one of the data pools above, 'data0' by default
+  #    pool: data0
+  #    reclaimPolicy: Delete
+  #    allowVolumeExpansion: true
+  #    volumeBindingMode: "Immediate"
+  #    mountOptions: []
+  #    # see https://github.com/rook/rook/blob/master/Documentation/Storage-Configuration/Shared-Filesystem-CephFS/filesystem-storage.md#provision-storage for available configuration
+  #    parameters:
+  #      # The secrets contain Ceph admin credentials.
+  #      csi.storage.k8s.io/provisioner-secret-name: rook-csi-cephfs-provisioner
+  #      csi.storage.k8s.io/provisioner-secret-namespace: "{{ .Release.Namespace }}"
+  #      csi.storage.k8s.io/controller-expand-secret-name: rook-csi-cephfs-provisioner
+  #      csi.storage.k8s.io/controller-expand-secret-namespace: "{{ .Release.Namespace }}"
+  #      csi.storage.k8s.io/node-stage-secret-name: rook-csi-cephfs-node
+  #      csi.storage.k8s.io/node-stage-secret-namespace: "{{ .Release.Namespace }}"
+  #      # Specify the filesystem type of the volume. If not specified, csi-provisioner
+  #      # will set default as `ext4`. Note that `xfs` is not recommended due to potential deadlock
+  #      # in hyperconverged settings where the volume is mounted on the same node as the osds.
+  #      csi.storage.k8s.io/fstype: ext4
 
 # -- Settings for the filesystem snapshot class
 # @default -- See [CephFS Snapshots](../Storage-Configuration/Ceph-CSI/ceph-csi-snapshot.md#cephfs-snapshots)