Browse Source

start upgrading rook cluster to v1.14.12

Josh Bicking 1 tuần trước cách đây
mục cha
commit
2a71ebaa2a
1 tập tin đã thay đổi với 3 bổ sung508 xóa
  1. 3 508
      argocd/rook/rook-ceph-cluster-values.yaml

+ 3 - 508
argocd/rook/rook-ceph-cluster-values.yaml

@@ -1,52 +1,9 @@
-# From https://raw.githubusercontent.com/rook/rook/v1.13.10/deploy/charts/rook-ceph-cluster/values.yaml
-# Default values for a single rook-ceph cluster
-# This is a YAML-formatted file.
-# Declare variables to be passed into your templates.
-
-# -- Namespace of the main rook operator
-operatorNamespace: rook-ceph
-
-# -- The metadata.name of the CephCluster CR
-# @default -- The same as the namespace
-clusterName:
-
-# -- Optional override of the target kubernetes version
-kubeVersion:
-
-# -- Cluster ceph.conf override
-configOverride:
-# configOverride: |
-#   [global]
-#   mon_allow_pool_delete = true
-#   osd_pool_default_size = 3
-#   osd_pool_default_min_size = 2
-
+# From https://raw.githubusercontent.com/rook/rook/v1.14.12/deploy/charts/rook-ceph-cluster/values.yaml
 # Installs a debugging toolbox deployment
 toolbox:
   # -- Enable Ceph debugging pod deployment. See [toolbox](../Troubleshooting/ceph-toolbox.md)
   enabled: true
   # -- Toolbox image, defaults to the image used by the Ceph cluster
-  image: #quay.io/ceph/ceph:v18.2.2
-  # -- Toolbox tolerations
-  tolerations: []
-  # -- Toolbox affinity
-  affinity: {}
-  # -- Toolbox container security context
-  containerSecurityContext:
-    runAsNonRoot: true
-    runAsUser: 2016
-    runAsGroup: 2016
-    capabilities:
-      drop: ["ALL"]
-  # -- Toolbox resources
-  resources:
-    limits:
-      memory: "1Gi"
-    requests:
-      cpu: "100m"
-      memory: "128Mi"
-  # -- Set the priority class for the toolbox if desired
-  priorityClassName:
 
 monitoring:
   # -- Enable Prometheus integration, will also create necessary RBAC rules to allow Operator to create ServiceMonitors.
@@ -57,96 +14,13 @@ monitoring:
   # -- The namespace in which to create the prometheus rules, if different from the rook cluster namespace.
   # If you have multiple rook-ceph clusters in the same k8s cluster, choose the same namespace (ideally, namespace with prometheus
   # deployed) to set rulesNamespaceOverride for all the clusters. Otherwise, you will get duplicate alerts with multiple alert definitions.
-  rulesNamespaceOverride:
-  # Monitoring settings for external clusters:
-  # externalMgrEndpoints: <list of endpoints>
-  # externalMgrPrometheusPort: <port>
-  # Scrape interval for prometheus
-  # interval: 10s
-  # allow adding custom labels and annotations to the prometheus rule
-  prometheusRule:
-    # -- Labels applied to PrometheusRule
-    labels: {}
-    # -- Annotations applied to PrometheusRule
-    annotations: {}
-
-# -- Create & use PSP resources. Set this to the same value as the rook-ceph chart.
-pspEnable: true
-
-# imagePullSecrets option allow to pull docker images from private docker registry. Option will be passed to all service accounts.
-# imagePullSecrets:
-# - name: my-registry-secret
 
 # All values below are taken from the CephCluster CRD
 # -- Cluster configuration.
 # @default -- See [below](#ceph-cluster-spec)
 cephClusterSpec:
-  # This cluster spec example is for a converged cluster where all the Ceph daemons are running locally,
-  # as in the host-based example (cluster.yaml). For a different configuration such as a
-  # PVC-based cluster (cluster-on-pvc.yaml), external cluster (cluster-external.yaml),
-  # or stretch cluster (cluster-stretched.yaml), replace this entire `cephClusterSpec`
-  # with the specs from those examples.
-
-  # For more details, check https://rook.io/docs/rook/v1.10/CRDs/Cluster/ceph-cluster-crd/
-  cephVersion:
-    # The container image used to launch the Ceph daemon pods (mon, mgr, osd, mds, rgw).
-    # v17 is Quincy, v18 is Reef.
-    # RECOMMENDATION: In production, use a specific version tag instead of the general v18 flag, which pulls the latest release and could result in different
-    # versions running within the cluster. See tags available at https://hub.docker.com/r/ceph/ceph/tags/.
-    # If you want to be more precise, you can always use a timestamp tag such as quay.io/ceph/ceph:v18.2.2-20240311
-    # This tag might not contain a new Ceph version, just security fixes from the underlying operating system, which will reduce vulnerabilities
-    image: quay.io/ceph/ceph:v18.2.4
-    # Whether to allow unsupported versions of Ceph. Currently `quincy`, and `reef` are supported.
-    # Future versions such as `squid` (v19) would require this to be set to `true`.
-    # Do not set to true in production.
-    allowUnsupported: false
-
-  # The path on the host where configuration files will be persisted. Must be specified.
-  # Important: if you reinstall the cluster, make sure you delete this directory from each host or else the mons will fail to start on the new cluster.
-  # In Minikube, the '/data' directory is configured to persist across reboots. Use "/data/rook" in Minikube environment.
-  dataDirHostPath: /var/lib/rook
-
-  # Whether or not upgrade should continue even if a check fails
-  # This means Ceph's status could be degraded and we don't recommend upgrading but you might decide otherwise
-  # Use at your OWN risk
-  # To understand Rook's upgrade process of Ceph, read https://rook.io/docs/rook/v1.10/Upgrade/ceph-upgrade/
-  skipUpgradeChecks: false
-
-  # Whether or not continue if PGs are not clean during an upgrade
-  continueUpgradeAfterChecksEvenIfNotHealthy: false
-
-  # WaitTimeoutForHealthyOSDInMinutes defines the time (in minutes) the operator would wait before an OSD can be stopped for upgrade or restart.
-  # If the timeout exceeds and OSD is not ok to stop, then the operator would skip upgrade for the current OSD and proceed with the next one
-  # if `continueUpgradeAfterChecksEvenIfNotHealthy` is `false`. If `continueUpgradeAfterChecksEvenIfNotHealthy` is `true`, then operator would
-  # continue with the upgrade of an OSD even if its not ok to stop after the timeout. This timeout won't be applied if `skipUpgradeChecks` is `true`.
-  # The default wait timeout is 10 minutes.
-  waitTimeoutForHealthyOSDInMinutes: 10
-
-  mon:
-    # Set the number of mons to be started. Generally recommended to be 3.
-    # For highest availability, an odd number of mons should be specified.
-    count: 3
-    # The mons should be on unique nodes. For production, at least 3 nodes are recommended for this reason.
-    # Mons should only be allowed on the same node for test environments where data loss is acceptable.
-    allowMultiplePerNode: false
-
-  mgr:
-    # When higher availability of the mgr is needed, increase the count to 2.
-    # In that case, one mgr will be active and one in standby. When Ceph updates which
-    # mgr is active, Rook will update the mgr services to match the active mgr.
-    count: 2
-    allowMultiplePerNode: false
-    modules:
-      # Several modules should not need to be included in this list. The "dashboard" and "monitoring" modules
-      # are already enabled by other settings in the cluster CR.
-      - name: pg_autoscaler
-        enabled: true
-      - name: rook
-        enabled: true
 
-  # enable the ceph dashboard for viewing cluster status
   dashboard:
-    enabled: true
     # serve the dashboard under a subpath (useful when you are accessing the dashboard via a reverse proxy)
     # urlPrefix: /ceph-dashboard
     # serve the dashboard at the given port.
@@ -155,81 +29,14 @@ cephClusterSpec:
     # the corresponding "backend protocol" annotation(s) for your ingress controller of choice)
     ssl: false
 
-  # Network configuration, see: https://github.com/rook/rook/blob/master/Documentation/CRDs/ceph-cluster-crd.md#network-configuration-settings
+  # Network configuration, see: https://github.com/rook/rook/blob/master/Documentation/CRDs/Cluster/ceph-cluster-crd.md#network-configuration-settings
   network:
-    connections:
-      # Whether to encrypt the data in transit across the wire to prevent eavesdropping the data on the network.
-      # The default is false. When encryption is enabled, all communication between clients and Ceph daemons, or between Ceph daemons will be encrypted.
-      # When encryption is not enabled, clients still establish a strong initial authentication and data integrity is still validated with a crc check.
-      # IMPORTANT: Encryption requires the 5.11 kernel for the latest nbd and cephfs drivers. Alternatively for testing only,
-      # you can set the "mounter: rbd-nbd" in the rbd storage class, or "mounter: fuse" in the cephfs storage class.
-      # The nbd and fuse drivers are *not* recommended in production since restarting the csi driver pod will disconnect the volumes.
-      encryption:
-        enabled: false
-      # Whether to compress the data in transit across the wire. The default is false.
-      # Requires Ceph Quincy (v17) or newer. Also see the kernel requirements above for encryption.
-      compression:
-        enabled: false
-      # Whether to require communication over msgr2. If true, the msgr v1 port (6789) will be disabled
-      # and clients will be required to connect to the Ceph cluster with the v2 port (3300).
-      # Requires a kernel that supports msgr v2 (kernel 5.11 or CentOS 8.4 or newer).
-      requireMsgr2: false
     # enable host networking
     provider: host
-  #   # EXPERIMENTAL: enable the Multus network provider
-  #   provider: multus
-  #   selectors:
-  #     # The selector keys are required to be `public` and `cluster`.
-  #     # Based on the configuration, the operator will do the following:
-  #     #   1. if only the `public` selector key is specified both public_network and cluster_network Ceph settings will listen on that interface
-  #     #   2. if both `public` and `cluster` selector keys are specified the first one will point to 'public_network' flag and the second one to 'cluster_network'
-  #     #
-  #     # In order to work, each selector value must match a NetworkAttachmentDefinition object in Multus
-  #     #
-  #     # public: public-conf --> NetworkAttachmentDefinition object name in Multus
-  #     # cluster: cluster-conf --> NetworkAttachmentDefinition object name in Multus
-  #   # Provide internet protocol version. IPv6, IPv4 or empty string are valid options. Empty string would mean IPv4
-  #   ipFamily: "IPv6"
-  #   # Ceph daemons to listen on both IPv4 and Ipv6 networks
-  #   dualStack: false
-
-  # enable the crash collector for ceph daemon crash collection
-  crashCollector:
-    disable: false
-    # Uncomment daysToRetain to prune ceph crash entries older than the
-    # specified number of days.
-    # daysToRetain: 30
 
   # enable log collector, daemons will log on files and rotate
   logCollector:
     enabled: false
-    periodicity: daily # one of: hourly, daily, weekly, monthly
-    maxLogSize: 500M # SUFFIX may be 'M' or 'G'. Must be at least 1M.
-
-  # automate [data cleanup process](https://github.com/rook/rook/blob/master/Documentation/Storage-Configuration/ceph-teardown.md#delete-the-data-on-hosts) in cluster destruction.
-  cleanupPolicy:
-    # Since cluster cleanup is destructive to data, confirmation is required.
-    # To destroy all Rook data on hosts during uninstall, confirmation must be set to "yes-really-destroy-data".
-    # This value should only be set when the cluster is about to be deleted. After the confirmation is set,
-    # Rook will immediately stop configuring the cluster and only wait for the delete command.
-    # If the empty string is set, Rook will not destroy any data on hosts during uninstall.
-    confirmation: ""
-    # sanitizeDisks represents settings for sanitizing OSD disks on cluster deletion
-    sanitizeDisks:
-      # method indicates if the entire disk should be sanitized or simply ceph's metadata
-      # in both case, re-install is possible
-      # possible choices are 'complete' or 'quick' (default)
-      method: quick
-      # dataSource indicate where to get random bytes from to write on the disk
-      # possible choices are 'zero' (default) or 'random'
-      # using random sources will consume entropy from the system and will take much more time then the zero source
-      dataSource: zero
-      # iteration overwrite N times instead of the default (1)
-      # takes an integer value
-      iteration: 1
-    # allowUninstallWithVolumes defines how the uninstall should be performed
-    # If set to true, cephCluster deletion does not wait for the PVs to be deleted.
-    allowUninstallWithVolumes: false
 
   # To control where various services will be scheduled by kubernetes, use the placement configuration sections below.
   # The example under 'all' would have all services scheduled on kubernetes nodes labeled with 'role=storage-node' and
@@ -249,35 +56,6 @@ cephClusterSpec:
           operator: Equal
           value: "true"
           effect: PreferNoSchedule
-  #   # The above placement information can also be specified for mon, osd, and mgr components
-  #   mon:
-  #   # Monitor deployments may contain an anti-affinity rule for avoiding monitor
-  #   # collocation on the same node. This is a required rule when host network is used
-  #   # or when AllowMultiplePerNode is false. Otherwise this anti-affinity rule is a
-  #   # preferred rule with weight: 50.
-  #   osd:
-  #   mgr:
-  #   cleanup:
-
-  # annotations:
-  #   all:
-  #   mon:
-  #   osd:
-  #   cleanup:
-  #   prepareosd:
-  #   # If no mgr annotations are set, prometheus scrape annotations will be set by default.
-  #   mgr:
-
-  # labels:
-  #   all:
-  #   mon:
-  #   osd:
-  #   cleanup:
-  #   mgr:
-  #   prepareosd:
-  #   # monitoring is a list of key-value pairs. It is injected into all the monitoring resources created by operator.
-  #   # These labels can be passed as LabelSelector to Prometheus
-  #   monitoring:
 
   resources:
     mgr:
@@ -349,97 +127,6 @@ cephClusterSpec:
         cpu: 0
         memory: "50Mi"
 
-  # The option to automatically remove OSDs that are out and are safe to destroy.
-  removeOSDsIfOutAndSafeToRemove: false
-
-  # priority classes to apply to ceph resources
-  priorityClassNames:
-    mon: system-node-critical
-    osd: system-node-critical
-    mgr: system-cluster-critical
-
-  storage: # cluster level storage configuration and selection
-    useAllNodes: true
-    useAllDevices: true
-    # deviceFilter:
-    # config:
-    #   crushRoot: "custom-root" # specify a non-default root label for the CRUSH map
-    #   metadataDevice: "md0" # specify a non-rotational storage so ceph-volume will use it as block db device of bluestore.
-    #   databaseSizeMB: "1024" # uncomment if the disks are smaller than 100 GB
-    #   osdsPerDevice: "1" # this value can be overridden at the node or device level
-    #   encryptedDevice: "true" # the default value for this option is "false"
-    # # Individual nodes and their config can be specified as well, but 'useAllNodes' above must be set to false. Then, only the named
-    # # nodes below will be used as storage resources. Each node's 'name' field should match their 'kubernetes.io/hostname' label.
-    # nodes:
-    #   - name: "172.17.4.201"
-    #     devices: # specific devices to use for storage can be specified for each node
-    #       - name: "sdb"
-    #       - name: "nvme01" # multiple osds can be created on high performance devices
-    #         config:
-    #           osdsPerDevice: "5"
-    #       - name: "/dev/disk/by-id/ata-ST4000DM004-XXXX" # devices can be specified using full udev paths
-    #     config: # configuration can be specified at the node level which overrides the cluster level config
-    #   - name: "172.17.4.301"
-    #     deviceFilter: "^sd."
-
-  # The section for configuring management of daemon disruptions during upgrade or fencing.
-  disruptionManagement:
-    # If true, the operator will create and manage PodDisruptionBudgets for OSD, Mon, RGW, and MDS daemons. OSD PDBs are managed dynamically
-    # via the strategy outlined in the [design](https://github.com/rook/rook/blob/master/design/ceph/ceph-managed-disruptionbudgets.md). The operator will
-    # block eviction of OSDs by default and unblock them safely when drains are detected.
-    managePodBudgets: true
-    # A duration in minutes that determines how long an entire failureDomain like `region/zone/host` will be held in `noout` (in addition to the
-    # default DOWN/OUT interval) when it is draining. This is only relevant when  `managePodBudgets` is `true`. The default value is `30` minutes.
-    osdMaintenanceTimeout: 30
-    # A duration in minutes that the operator will wait for the placement groups to become healthy (active+clean) after a drain was completed and OSDs came back up.
-    # Operator will continue with the next drain if the timeout exceeds. It only works if `managePodBudgets` is `true`.
-    # No values or 0 means that the operator will wait until the placement groups are healthy before unblocking the next drain.
-    pgHealthCheckTimeout: 0
-
-  # Configure the healthcheck and liveness probes for ceph pods.
-  # Valid values for daemons are 'mon', 'osd', 'status'
-  healthCheck:
-    daemonHealth:
-      mon:
-        disabled: false
-        interval: 45s
-      osd:
-        disabled: false
-        interval: 60s
-      status:
-        disabled: false
-        interval: 60s
-    # Change pod liveness probe, it works for all mon, mgr, and osd pods.
-    livenessProbe:
-      mon:
-        disabled: false
-      mgr:
-        disabled: false
-      osd:
-        disabled: false
-
-ingress:
-  # -- Enable an ingress for the ceph-dashboard
-  dashboard:
-    {}
-    # annotations:
-    #   external-dns.alpha.kubernetes.io/hostname: dashboard.example.com
-    #   nginx.ingress.kubernetes.io/rewrite-target: /ceph-dashboard/$2
-    # If the dashboard has ssl: true the following will make sure the NGINX Ingress controller can expose the dashboard correctly
-    #   nginx.ingress.kubernetes.io/backend-protocol: "HTTPS"
-    #   nginx.ingress.kubernetes.io/server-snippet: |
-    #     proxy_ssl_verify off;
-    # host:
-    #   name: dashboard.example.com
-    #   path: "/ceph-dashboard(/|$)(.*)"
-    # tls:
-    # - hosts:
-    #     - dashboard.example.com
-    #   secretName: testsecret-tls
-    ## Note: Only one of ingress class annotation or the `ingressClassName:` can be used at a time
-    ## to set the ingress class
-    # ingressClassName: nginx
-
 # -- A list of CephBlockPool configurations to deploy
 # @default -- See [below](#ceph-block-pools)
 cephBlockPools:
@@ -532,69 +219,9 @@ cephBlockPools:
         csi.storage.k8s.io/node-stage-secret-namespace: "{{ .Release.Namespace }}"
 
         csi.storage.k8s.io/fstype: ext4
-
 # -- A list of CephFileSystem configurations to deploy
 # @default -- See [below](#ceph-file-systems)
 cephFileSystems: []
-  #- name: ceph-filesystem
-  #  # see https://github.com/rook/rook/blob/master/Documentation/CRDs/Shared-Filesystem/ceph-filesystem-crd.md#filesystem-settings for available configuration
-  #  spec:
-  #    metadataPool:
-  #      replicated:
-  #        size: 3
-  #      deviceClass: ssd
-  #    dataPools:
-  #      - failureDomain: host
-  #        replicated:
-  #          size: 3
-  #        # Optional and highly recommended, 'data0' by default, see https://github.com/rook/rook/blob/master/Documentation/CRDs/Shared-Filesystem/ceph-filesystem-crd.md#pools
-  #        name: data0
-  #        deviceClass: hdd
-  #    metadataServer:
-  #      activeCount: 1
-  #      activeStandby: true
-  #      placement:
-  #        nodeAffinity:
-  #          requiredDuringSchedulingIgnoredDuringExecution:
-  #            nodeSelectorTerms:
-  #              - matchExpressions:
-  #                - key: storage-node
-  #                  operator: In
-  #                  values:
-  #                    - "true"
-  #        podAffinity:
-  #        podAntiAffinity:
-  #        topologySpreadConstraints:
-  #        tolerations:
-  #        - key: storage-node
-  #          operator: Equal
-  #          value: "true"
-  #          effect: PreferNoSchedule
-  #      priorityClassName: system-cluster-critical
-  #  storageClass:
-  #    enabled: true
-  #    isDefault: false
-  #    name: ceph-filesystem
-  #    # (Optional) specify a data pool to use, must be the name of one of the data pools above, 'data0' by default
-  #    pool: data0
-  #    reclaimPolicy: Delete
-  #    allowVolumeExpansion: true
-  #    volumeBindingMode: "Immediate"
-  #    mountOptions: []
-  #    # see https://github.com/rook/rook/blob/master/Documentation/Storage-Configuration/Shared-Filesystem-CephFS/filesystem-storage.md#provision-storage for available configuration
-  #    parameters:
-  #      # The secrets contain Ceph admin credentials.
-  #      csi.storage.k8s.io/provisioner-secret-name: rook-csi-cephfs-provisioner
-  #      csi.storage.k8s.io/provisioner-secret-namespace: "{{ .Release.Namespace }}"
-  #      csi.storage.k8s.io/controller-expand-secret-name: rook-csi-cephfs-provisioner
-  #      csi.storage.k8s.io/controller-expand-secret-namespace: "{{ .Release.Namespace }}"
-  #      csi.storage.k8s.io/node-stage-secret-name: rook-csi-cephfs-node
-  #      csi.storage.k8s.io/node-stage-secret-namespace: "{{ .Release.Namespace }}"
-  #      # Specify the filesystem type of the volume. If not specified, csi-provisioner
-  #      # will set default as `ext4`. Note that `xfs` is not recommended due to potential deadlock
-  #      # in hyperconverged settings where the volume is mounted on the same node as the osds.
-  #      csi.storage.k8s.io/fstype: ext4
-
 # -- Settings for the filesystem snapshot class
 # @default -- See [CephFS Snapshots](../Storage-Configuration/Ceph-CSI/ceph-csi-snapshot.md#cephfs-snapshots)
 cephFileSystemVolumeSnapshotClass:
@@ -621,136 +248,4 @@ cephBlockPoolsVolumeSnapshotClass:
 
 # -- A list of CephObjectStore configurations to deploy
 # @default -- See [below](#ceph-object-stores)
-cephObjectStores:
-#   - name: ceph-objectstore
-#     # see https://github.com/rook/rook/blob/master/Documentation/CRDs/Object-Storage/ceph-object-store-crd.md#object-store-settings for available configuration
-#     spec:
-#       metadataPool:
-#         failureDomain: host
-#         replicated:
-#           size: 3
-#         deviceClass: ssd
-#       dataPool:
-#         failureDomain: host
-#         replicated:
-#           size: 3
-#           # erasureCoded:
-#           #   dataChunks: 2
-#           #   codingChunks: 1
-#         deviceClass: hdd
-#       preservePoolsOnDelete: true
-#       gateway:
-#         placement:
-#           nodeAffinity:
-#             requiredDuringSchedulingIgnoredDuringExecution:
-#               nodeSelectorTerms:
-#                 - matchExpressions:
-#                   - key: storage-node
-#                     operator: In
-#                     values:
-#                       - "true"
-#           podAffinity:
-#           podAntiAffinity:
-#           topologySpreadConstraints:
-#           tolerations:
-#           #- key: storage-node
-#           #  operator: Exists
-#         port: 6980
-#         resources:
-#           limits:
-#             cpu: "500m"
-#             memory: "2Gi"
-#           requests:
-#             cpu: "250m"
-#             memory: "1Gi"
-#         # securePort: 443
-#         # sslCertificateRef:
-#         instances: 1
-#         priorityClassName: system-cluster-critical
-#       healthCheck:
-#         bucket:
-#           interval: 60s
-#     storageClass:
-#       enabled: true
-#       name: ceph-bucket
-#       reclaimPolicy: Delete
-#       volumeBindingMode: "Immediate"
-#       # see https://github.com/rook/rook/blob/master/Documentation/Storage-Configuration/Object-Storage-RGW/ceph-object-bucket-claim.md#storageclass for available configuration
-#       parameters:
-#         # note: objectStoreNamespace and objectStoreName are configured by the chart
-#         region: us-east-1
-#     ingress:
-#       # Enable an ingress for the ceph-objectstore
-#       enabled: false
-#       # annotations: {}
-#       # host:
-#       #   name: objectstore.example.com
-#       #   path: /
-#       # tls:
-#       # - hosts:
-#       #     - objectstore.example.com
-#       #   secretName: ceph-objectstore-tls
-#       # ingressClassName: nginx
-# cephECBlockPools are disabled by default, please remove the comments and set desired values to enable it
-#cephECBlockPools:
-#  # For erasure coded a replicated metadata pool is required.
-#  # https://rook.io/docs/rook/latest/CRDs/Shared-Filesystem/ceph-filesystem-crd/#erasure-coded
-#  - name: ec-metadata-pool
-#    # see https://github.com/rook/rook/blob/master/Documentation/CRDs/Block-Storage/ceph-block-pool-crd.md#spec for available configuration
-#    spec:
-#      replicated:
-#        size: 2
-#  - name: ec-data-pool
-#    spec:
-#      failureDomain: osd
-#      erasureCoded:
-#        dataChunks: 2
-#        codingChunks: 1
-#      deviceClass: hdd
-
-# cephECStorageClass also is disabled by default, please remove the comments and set desired values to enable it
-# if cephECBlockPools are uncommented you must remove the comments of cephEcStorageClass as well
-#cephECStorageClass:
-#  name: rook-ceph-block
-#  parameters:
-#    # clusterID is the namespace where the rook cluster is running
-#    # If you change this namespace, also change the namespace below where the secret namespaces are defined
-#    clusterID: rook-ceph # namespace:cluster
-#
-#    # If you want to use erasure coded pool with RBD, you need to create
-#    # two pools. one erasure coded and one replicated.
-#    # You need to specify the replicated pool here in the `pool` parameter, it is
-#    # used for the metadata of the images.
-#    # The erasure coded pool must be set as the `dataPool` parameter below.
-#    dataPool: ec-data-pool
-#    pool: ec-metadata-pool
-#
-#    # (optional) mapOptions is a comma-separated list of map options.
-#    # For krbd options refer
-#    # https://docs.ceph.com/docs/latest/man/8/rbd/#kernel-rbd-krbd-options
-#    # For nbd options refer
-#    # https://docs.ceph.com/docs/latest/man/8/rbd-nbd/#options
-#    # mapOptions: lock_on_read,queue_depth=1024
-#
-#    # (optional) unmapOptions is a comma-separated list of unmap options.
-#    # For krbd options refer
-#    # https://docs.ceph.com/docs/latest/man/8/rbd/#kernel-rbd-krbd-options
-#    # For nbd options refer
-#    # https://docs.ceph.com/docs/latest/man/8/rbd-nbd/#options
-#    # unmapOptions: force
-#
-#    # RBD image format. Defaults to "2".
-#    imageFormat: "2"
-#
-#    # RBD image features, equivalent to OR'd bitfield value: 63
-#    # Available for imageFormat: "2". Older releases of CSI RBD
-#    # support only the `layering` feature. The Linux kernel (KRBD) supports the
-#    # full feature complement as of 5.4
-#    # imageFeatures: layering,fast-diff,object-map,deep-flatten,exclusive-lock
-#    imageFeatures: layering
-#  allowVolumeExpansion: true
-#  reclaimPolicy: Delete
-
-# -- CSI driver name prefix for cephfs, rbd and nfs.
-# @default -- `namespace name where rook-ceph operator is deployed`
-csiDriverNamePrefix:
+cephObjectStores: []