2 Angajamente 716e2aba8f ... 9a273342f5

Autor SHA1 Permisiunea de a trimite mesaje. Dacă este dezactivată, utilizatorul nu va putea trimite nici un fel de mesaj Data
  Josh Bicking 9a273342f5 update rook to v1.11.11 11 luni în urmă
  Josh Bicking ee44d611f4 update rook to v1.10.13 11 luni în urmă
3 a modificat fișierele cu 592 adăugiri și 226 ștergeri
  1. 228 69
      rook/rook-ceph-cluster-values.yaml
  2. 363 156
      rook/rook-ceph-operator-values.yaml
  3. 1 1
      shelly-plug-exporter.yaml

+ 228 - 69
rook/rook-ceph-cluster-values.yaml

@@ -1,18 +1,21 @@
-# From https://raw.githubusercontent.com/rook/rook/release-1.9/deploy/charts/rook-ceph-cluster/values.yaml
+# From https://raw.githubusercontent.com/rook/rook/v1.11.11/deploy/charts/rook-ceph-cluster/values.yaml
+# KUBECONFIG=/etc/rancher/k3s/k3s.yaml helm upgrade --namespace rook-ceph rook-ceph-cluster --set operatorNamespace=rook-ceph rook-release/rook-ceph-cluster -f rook/rook-ceph-cluster-values.yaml --version 1.10.13
 # Default values for a single rook-ceph cluster
 # This is a YAML-formatted file.
 # Declare variables to be passed into your templates.
 
-# Namespace of the main rook operator
+# -- Namespace of the main rook operator
 operatorNamespace: rook-ceph
 
-# The metadata.name of the CephCluster CR. The default name is the same as the namespace.
-# clusterName: rook-ceph
+# -- The metadata.name of the CephCluster CR
+# @default -- The same as the namespace
+clusterName:
 
-# Ability to override the kubernetes version used in rendering the helm chart
-# kubeVersion: 1.21
+# -- Optional override of the target kubernetes version
+kubeVersion:
 
-# Ability to override ceph.conf
+# -- Cluster ceph.conf override
+configOverride:
 # configOverride: |
 #   [global]
 #   mon_allow_pool_delete = true
@@ -21,10 +24,16 @@ operatorNamespace: rook-ceph
 
 # Installs a debugging toolbox deployment
 toolbox:
+  # -- Enable Ceph debugging pod deployment. See [toolbox](../Troubleshooting/ceph-toolbox.md)
   enabled: true
-  image: rook/ceph:v1.9.0.230.g6a87cb44a
+  # -- Toolbox image, defaults to the image used by the Ceph cluster
+  # image: quay.io/ceph/ceph:v17.2.6
+  image: rook/ceph:v1.9.0.230.g6a87cb44a # TODO probably safe to remove?
+  # -- Toolbox tolerations
   tolerations: []
+  # -- Toolbox affinity
   affinity: {}
+  # -- Toolbox resources
   resources:
     limits:
       cpu: "500m"
@@ -32,21 +41,30 @@ toolbox:
     requests:
       cpu: "100m"
       memory: "128Mi"
-  # Set the priority class for the toolbox if desired
-  # priorityClassName: class
+  # -- Set the priority class for the toolbox if desired
+  priorityClassName:
 
-# monitoring requires Prometheus to be pre-installed
 monitoring:
-  # enabling will also create RBAC rules to allow Operator to create ServiceMonitors
+  # -- Enable Prometheus integration, will also create necessary RBAC rules to allow Operator to create ServiceMonitors.
+  # Monitoring requires Prometheus to be pre-installed
   enabled: true
-  # whether to create the prometheus rules
+  # -- Whether to create the Prometheus rules for Ceph alerts
   createPrometheusRules: true
-  # the namespace in which to create the prometheus rules, if different from the rook cluster namespace
+  # -- The namespace in which to create the prometheus rules, if different from the rook cluster namespace.
   # If you have multiple rook-ceph clusters in the same k8s cluster, choose the same namespace (ideally, namespace with prometheus
-  # deployed) to set rulesNamespace for all the clusters. Otherwise, you will get duplicate alerts with multiple alert definitions.
+  # deployed) to set rulesNamespaceOverride for all the clusters. Otherwise, you will get duplicate alerts with multiple alert definitions.
   rulesNamespaceOverride:
-
-# If true, create & use PSP resources. Set this to the same value as the rook-ceph chart.
+  # Monitoring settings for external clusters:
+  # externalMgrEndpoints: <list of endpoints>
+  # externalMgrPrometheusPort: <port>
+  # allow adding custom labels and annotations to the prometheus rule
+  prometheusRule:
+    # -- Labels applied to PrometheusRule
+    labels: {}
+    # -- Annotations applied to PrometheusRule
+    annotations: {}
+
+# -- Create & use PSP resources. Set this to the same value as the rook-ceph chart.
 pspEnable: true
 
 # imagePullSecrets option allow to pull docker images from private docker registry. Option will be passed to all service accounts.
@@ -54,18 +72,27 @@ pspEnable: true
 # - name: my-registry-secret
 
 # All values below are taken from the CephCluster CRD
-# More information can be found at [Ceph Cluster CRD](/Documentation/ceph-cluster-crd.md)
+# -- Cluster configuration.
+# @default -- See [below](#ceph-cluster-spec)
 cephClusterSpec:
+  # This cluster spec example is for a converged cluster where all the Ceph daemons are running locally,
+  # as in the host-based example (cluster.yaml). For a different configuration such as a
+  # PVC-based cluster (cluster-on-pvc.yaml), external cluster (cluster-external.yaml),
+  # or stretch cluster (cluster-stretched.yaml), replace this entire `cephClusterSpec`
+  # with the specs from those examples.
+
+  # For more details, check https://rook.io/docs/rook/v1.10/CRDs/Cluster/ceph-cluster-crd/
   cephVersion:
     # The container image used to launch the Ceph daemon pods (mon, mgr, osd, mds, rgw).
-    # v15 is octopus, and v16 is pacific.
+    # v16 is Pacific, v17 is Quincy.
     # RECOMMENDATION: In production, use a specific version tag instead of the general v16 flag, which pulls the latest release and could result in different
     # versions running within the cluster. See tags available at https://hub.docker.com/r/ceph/ceph/tags/.
     # If you want to be more precise, you can always use a timestamp tag such quay.io/ceph/ceph:v15.2.11-20200419
     # This tag might not contain a new Ceph version, just security fixes from the underlying operating system, which will reduce vulnerabilities
+    # image: quay.io/ceph/ceph:v17.2.6
     image: quay.io/ceph/ceph:v16.2.7
-    # Whether to allow unsupported versions of Ceph. Currently `octopus` and `pacific` are supported.
-    # Future versions such as `pacific` would require this to be set to `true`.
+    # Whether to allow unsupported versions of Ceph. Currently `pacific` and `quincy` are supported.
+    # Future versions such as `reef` (v18) would require this to be set to `true`.
     # Do not set to true in production.
     allowUnsupported: false
 
@@ -77,7 +104,7 @@ cephClusterSpec:
   # Whether or not upgrade should continue even if a check fails
   # This means Ceph's status could be degraded and we don't recommend upgrading but you might decide otherwise
   # Use at your OWN risk
-  # To understand Rook's upgrade process of Ceph, read https://rook.io/docs/rook/latest/ceph-upgrade.html#ceph-version-upgrades
+  # To understand Rook's upgrade process of Ceph, read https://rook.io/docs/rook/v1.10/Upgrade/ceph-upgrade/
   skipUpgradeChecks: false
 
   # Whether or not continue if PGs are not clean during an upgrade
@@ -85,7 +112,7 @@ cephClusterSpec:
 
   # WaitTimeoutForHealthyOSDInMinutes defines the time (in minutes) the operator would wait before an OSD can be stopped for upgrade or restart.
   # If the timeout exceeds and OSD is not ok to stop, then the operator would skip upgrade for the current OSD and proceed with the next one
-  # if `continueUpgradeAfterChecksEvenIfNotHealthy` is `false`. If `continueUpgradeAfterChecksEvenIfNotHealthy` is `true`, then opertor would
+  # if `continueUpgradeAfterChecksEvenIfNotHealthy` is `false`. If `continueUpgradeAfterChecksEvenIfNotHealthy` is `true`, then operator would
   # continue with the upgrade of an OSD even if its not ok to stop after the timeout. This timeout won't be applied if `skipUpgradeChecks` is `true`.
   # The default wait timeout is 10 minutes.
   waitTimeoutForHealthyOSDInMinutes: 10
@@ -119,12 +146,30 @@ cephClusterSpec:
     # urlPrefix: /ceph-dashboard
     # serve the dashboard at the given port.
     # port: 8443
-    # serve the dashboard using SSL
+    # Serve the dashboard using SSL (if using ingress to expose the dashboard and `ssl: true` you need to set
+    # the corresponding "backend protocol" annotation(s) for your ingress controller of choice)
     ssl: true
 
-  # Network configuration, see: https://github.com/rook/rook/blob/master/Documentation/ceph-cluster-crd.md#network-configuration-settings
+  # Network configuration, see: https://github.com/rook/rook/blob/master/Documentation/CRDs/ceph-cluster-crd.md#network-configuration-settings
   network:
-  # enable host networking
+    connections:
+      # Whether to encrypt the data in transit across the wire to prevent eavesdropping the data on the network.
+      # The default is false. When encryption is enabled, all communication between clients and Ceph daemons, or between Ceph daemons will be encrypted.
+      # When encryption is not enabled, clients still establish a strong initial authentication and data integrity is still validated with a crc check.
+      # IMPORTANT: Encryption requires the 5.11 kernel for the latest nbd and cephfs drivers. Alternatively for testing only,
+      # you can set the "mounter: rbd-nbd" in the rbd storage class, or "mounter: fuse" in the cephfs storage class.
+      # The nbd and fuse drivers are *not* recommended in production since restarting the csi driver pod will disconnect the volumes.
+      encryption:
+        enabled: false
+      # Whether to compress the data in transit across the wire. The default is false.
+      # Requires Ceph Quincy (v17) or newer. Also see the kernel requirements above for encryption.
+      compression:
+        enabled: false
+      # Whether to require communication over msgr2. If true, the msgr v1 port (6789) will be disabled
+      # and clients will be required to connect to the Ceph cluster with the v2 port (3300).
+      # Requires a kernel that supports msgr v2 (kernel 5.11 or CentOS 8.4 or newer).
+      requireMsgr2: false
+    # enable host networking
     provider: host
   #   # EXPERIMENTAL: enable the Multus network provider
   #   provider: multus
@@ -151,9 +196,10 @@ cephClusterSpec:
     # daysToRetain: 30
 
   # enable log collector, daemons will log on files and rotate
-  # logCollector:
-  #   enabled: true
-  #   periodicity: 24h # SUFFIX may be 'h' for hours or 'd' for days.
+  #logCollector:
+  #  enabled: true
+  #  periodicity: daily # one of: hourly, daily, weekly, monthly
+  #  maxLogSize: 500M # SUFFIX may be 'M' or 'G'. Must be at least 1M.
 
   # automate [data cleanup process](https://github.com/rook/rook/blob/master/Documentation/ceph-teardown.md#delete-the-data-on-hosts) in cluster destruction.
   cleanupPolicy:
@@ -192,22 +238,22 @@ cephClusterSpec:
               - key: storage-node
                 operator: In
                 values:
-                  - "true"
+                - "true"
       podAffinity:
       podAntiAffinity:
       topologySpreadConstraints:
       tolerations:
       - key: storage-node
         operator: Exists
-    # The above placement information can also be specified for mon, osd, and mgr components
-    mon:
-    # Monitor deployments may contain an anti-affinity rule for avoiding monitor
-    # collocation on the same node. This is a required rule when host network is used
-    # or when AllowMultiplePerNode is false. Otherwise this anti-affinity rule is a
-    # preferred rule with weight: 50.
-    osd:
-    mgr:
-    cleanup:
+  #   # The above placement information can also be specified for mon, osd, and mgr components
+  #   mon:
+  #   # Monitor deployments may contain an anti-affinity rule for avoiding monitor
+  #   # collocation on the same node. This is a required rule when host network is used
+  #   # or when AllowMultiplePerNode is false. Otherwise this anti-affinity rule is a
+  #   # preferred rule with weight: 50.
+  #   osd:
+  #   mgr:
+  #   cleanup:
 
   # annotations:
   #   all:
@@ -229,7 +275,6 @@ cephClusterSpec:
   #   # These labels can be passed as LabelSelector to Prometheus
   #   monitoring:
 
-  # https://home.robusta.dev/blog/stop-using-cpu-limits
   resources:
     mgr:
       limits:
@@ -253,9 +298,14 @@ cephClusterSpec:
         cpu: 0
         memory: "1Gi"
     prepareosd:
-      limits:
-        cpu: 0
-        memory: "12Gi"
+      # limits: It is not recommended to set limits on the OSD prepare job
+      #         since it's a one-time burst for memory that must be allowed to
+      #         complete without an OOM kill.  Note however that if a k8s
+      #         limitRange guardrail is defined external to Rook, the lack of
+      #         a limit here may result in a sync failure, in which case a
+      #         limit should be added.  1200Mi may suffice for up to 15Ti
+      #         OSDs ; for larger devices 2Gi may be required.
+      #         cf. https://github.com/rook/rook/pull/11103
       requests:
         cpu: 0
         memory: "500Mi"
@@ -287,6 +337,13 @@ cephClusterSpec:
       requests:
         cpu: 0
         memory: "100Mi"
+    exporter:
+      limits:
+        cpu: 0
+        memory: "128Mi"
+      requests:
+        cpu: 0
+        memory: "50Mi"
 
   # The option to automatically remove OSDs that are out and are safe to destroy.
   removeOSDsIfOutAndSafeToRemove: false
@@ -309,7 +366,7 @@ cephClusterSpec:
     #   osdsPerDevice: "1" # this value can be overridden at the node or device level
     #   encryptedDevice: "true" # the default value for this option is "false"
     # # Individual nodes and their config can be specified as well, but 'useAllNodes' above must be set to false. Then, only the named
-    # # nodes below will be used as storage resources.  Each node's 'name' field should match their 'kubernetes.io/hostname' label.
+    # # nodes below will be used as storage resources. Each node's 'name' field should match their 'kubernetes.io/hostname' label.
     # nodes:
     #   - name: "172.17.4.201"
     #     devices: # specific devices to use for storage can be specified for each node
@@ -335,11 +392,6 @@ cephClusterSpec:
     # Operator will continue with the next drain if the timeout exceeds. It only works if `managePodBudgets` is `true`.
     # No values or 0 means that the operator will wait until the placement groups are healthy before unblocking the next drain.
     pgHealthCheckTimeout: 0
-    # If true, the operator will create and manage MachineDisruptionBudgets to ensure OSDs are only fenced when the cluster is healthy.
-    # Only available on OpenShift.
-    manageMachineDisruptionBudgets: false
-    # Namespace in which to watch for the MachineDisruptionBudgets.
-    machineDisruptionBudgetNamespace: openshift-machine-api
 
   # Configure the healthcheck and liveness probes for ceph pods.
   # Valid values for daemons are 'mon', 'osd', 'status'
@@ -364,35 +416,53 @@ cephClusterSpec:
         disabled: false
 
 ingress:
-  dashboard: {}
+  # -- Enable an ingress for the ceph-dashboard
+  dashboard:
+    {}
     # annotations:
-    #   kubernetes.io/ingress.class: nginx
-    #   external-dns.alpha.kubernetes.io/hostname: example.com
+    #   external-dns.alpha.kubernetes.io/hostname: dashboard.example.com
     #   nginx.ingress.kubernetes.io/rewrite-target: /ceph-dashboard/$2
     # If the dashboard has ssl: true the following will make sure the NGINX Ingress controller can expose the dashboard correctly
     #   nginx.ingress.kubernetes.io/backend-protocol: "HTTPS"
     #   nginx.ingress.kubernetes.io/server-snippet: |
     #     proxy_ssl_verify off;
     # host:
-    #   name: example.com
+    #   name: dashboard.example.com
     #   path: "/ceph-dashboard(/|$)(.*)"
     # tls:
-    # ingressClassName:
-
+    # - hosts:
+    #     - dashboard.example.com
+    #   secretName: testsecret-tls
+    ## Note: Only one of ingress class annotation or the `ingressClassName:` can be used at a time
+    ## to set the ingress class
+    # ingressClassName: nginx
+
+# -- A list of CephBlockPool configurations to deploy
+# @default -- See [below](#ceph-block-pools)
 cephBlockPools:
   - name: ceph-blockpool
-    # see https://github.com/rook/rook/blob/master/Documentation/ceph-pool-crd.md#spec for available configuration
+    # see https://github.com/rook/rook/blob/master/Documentation/CRDs/Block-Storage/ceph-block-pool-crd.md#spec for available configuration
     spec:
       failureDomain: host
       replicated:
         size: 3
+      # Enables collecting RBD per-image IO statistics by enabling dynamic OSD performance counters. Defaults to false.
+      # For reference: https://docs.ceph.com/docs/master/mgr/prometheus/#rbd-io-statistics
+      # enableRBDStats: true
     storageClass:
       enabled: true
       name: ceph-block
       isDefault: true
       reclaimPolicy: Delete
       allowVolumeExpansion: true
+      volumeBindingMode: "Immediate"
       mountOptions: []
+      # see https://kubernetes.io/docs/concepts/storage/storage-classes/#allowed-topologies
+      allowedTopologies: []
+      #        - matchLabelExpressions:
+      #            - key: rook-ceph-role
+      #              values:
+      #                - storage-node
       # see https://github.com/rook/rook/blob/master/Documentation/ceph-block.md#provision-storage for available configuration
       parameters:
         # (optional) mapOptions is a comma-separated list of map options.
@@ -411,23 +481,30 @@ cephBlockPools:
 
         # RBD image format. Defaults to "2".
         imageFormat: "2"
-        # RBD image features. Available for imageFormat: "2". CSI RBD currently supports only `layering` feature.
+
+        # RBD image features, equivalent to OR'd bitfield value: 63
+        # Available for imageFormat: "2". Older releases of CSI RBD
+        # support only the `layering` feature. The Linux kernel (KRBD) supports the
+        # full feature complement as of 5.4
         imageFeatures: layering
-        # The secrets contain Ceph admin credentials.
+
+        # These secrets contain Ceph admin credentials.
         csi.storage.k8s.io/provisioner-secret-name: rook-csi-rbd-provisioner
-        csi.storage.k8s.io/provisioner-secret-namespace: rook-ceph
+        csi.storage.k8s.io/provisioner-secret-namespace: "{{ .Release.Namespace }}"
         csi.storage.k8s.io/controller-expand-secret-name: rook-csi-rbd-provisioner
-        csi.storage.k8s.io/controller-expand-secret-namespace: rook-ceph
+        csi.storage.k8s.io/controller-expand-secret-namespace: "{{ .Release.Namespace }}"
         csi.storage.k8s.io/node-stage-secret-name: rook-csi-rbd-node
-        csi.storage.k8s.io/node-stage-secret-namespace: rook-ceph
+        csi.storage.k8s.io/node-stage-secret-namespace: "{{ .Release.Namespace }}"
         # Specify the filesystem type of the volume. If not specified, csi-provisioner
         # will set default as `ext4`. Note that `xfs` is not recommended due to potential deadlock
         # in hyperconverged settings where the volume is mounted on the same node as the osds.
         csi.storage.k8s.io/fstype: ext4
 
+# -- A list of CephFileSystem configurations to deploy
+# @default -- See [below](#ceph-file-systems)
 cephFileSystems:
   - name: ceph-filesystem
-    # see https://github.com/rook/rook/blob/master/Documentation/ceph-filesystem-crd.md#filesystem-settings for available configuration
+    # see https://github.com/rook/rook/blob/master/Documentation/CRDs/Shared-Filesystem/ceph-filesystem-crd.md#filesystem-settings for available configuration
     spec:
       metadataPool:
         replicated:
@@ -436,7 +513,7 @@ cephFileSystems:
         - failureDomain: host
           replicated:
             size: 3
-          # Optional and highly recommended, 'data0' by default, see https://github.com/rook/rook/blob/master/Documentation/ceph-filesystem-crd.md#pools
+          # Optional and highly recommended, 'data0' by default, see https://github.com/rook/rook/blob/master/Documentation/CRDs/Shared-Filesystem/ceph-filesystem-crd.md#pools
           name: data0
       metadataServer:
         activeCount: 1
@@ -472,21 +549,24 @@ cephFileSystems:
       pool: data0
       reclaimPolicy: Delete
       allowVolumeExpansion: true
+      volumeBindingMode: "Immediate"
       mountOptions: []
       # see https://github.com/rook/rook/blob/master/Documentation/ceph-filesystem.md#provision-storage for available configuration
       parameters:
         # The secrets contain Ceph admin credentials.
         csi.storage.k8s.io/provisioner-secret-name: rook-csi-cephfs-provisioner
-        csi.storage.k8s.io/provisioner-secret-namespace: rook-ceph
+        csi.storage.k8s.io/provisioner-secret-namespace: "{{ .Release.Namespace }}"
         csi.storage.k8s.io/controller-expand-secret-name: rook-csi-cephfs-provisioner
-        csi.storage.k8s.io/controller-expand-secret-namespace: rook-ceph
+        csi.storage.k8s.io/controller-expand-secret-namespace: "{{ .Release.Namespace }}"
         csi.storage.k8s.io/node-stage-secret-name: rook-csi-cephfs-node
-        csi.storage.k8s.io/node-stage-secret-namespace: rook-ceph
+        csi.storage.k8s.io/node-stage-secret-namespace: "{{ .Release.Namespace }}"
         # Specify the filesystem type of the volume. If not specified, csi-provisioner
         # will set default as `ext4`. Note that `xfs` is not recommended due to potential deadlock
         # in hyperconverged settings where the volume is mounted on the same node as the osds.
         csi.storage.k8s.io/fstype: ext4
 
+# -- Settings for the filesystem snapshot class
+# @default -- See [CephFS Snapshots](../Storage-Configuration/Ceph-CSI/ceph-csi-snapshot.md#cephfs-snapshots)
 cephFileSystemVolumeSnapshotClass:
   enabled: false
   name: ceph-filesystem
@@ -494,9 +574,11 @@ cephFileSystemVolumeSnapshotClass:
   deletionPolicy: Delete
   annotations: {}
   labels: {}
-  # see https://rook.io/docs/rook/latest/ceph-csi-snapshot.html#cephfs-snapshots for available configuration
+  # see https://rook.io/docs/rook/v1.10/Storage-Configuration/Ceph-CSI/ceph-csi-snapshot/#cephfs-snapshots for available configuration
   parameters: {}
 
+# -- Settings for the block pool snapshot class
+# @default -- See [RBD Snapshots](../Storage-Configuration/Ceph-CSI/ceph-csi-snapshot.md#rbd-snapshots)
 cephBlockPoolsVolumeSnapshotClass:
   enabled: false
   name: ceph-block
@@ -504,12 +586,14 @@ cephBlockPoolsVolumeSnapshotClass:
   deletionPolicy: Delete
   annotations: {}
   labels: {}
-  # see https://rook.io/docs/rook/latest/ceph-csi-snapshot.html#rbd-snapshots for available configuration
+  # see https://rook.io/docs/rook/v1.10/Storage-Configuration/Ceph-CSI/ceph-csi-snapshot/#rbd-snapshots for available configuration
   parameters: {}
 
+# -- A list of CephObjectStore configurations to deploy
+# @default -- See [below](#ceph-object-stores)
 cephObjectStores:
   - name: ceph-objectstore
-    # see https://github.com/rook/rook/blob/master/Documentation/ceph-object-store-crd.md#object-store-settings for available configuration
+    # see https://github.com/rook/rook/blob/master/Documentation/CRDs/Object-Storage/ceph-object-store-crd.md#object-store-settings for available configuration
     spec:
       metadataPool:
         failureDomain: host
@@ -556,7 +640,82 @@ cephObjectStores:
       enabled: true
       name: ceph-bucket
       reclaimPolicy: Delete
+      volumeBindingMode: "Immediate"
       # see https://github.com/rook/rook/blob/master/Documentation/ceph-object-bucket-claim.md#storageclass for available configuration
       parameters:
         # note: objectStoreNamespace and objectStoreName are configured by the chart
         region: us-east-1
+    ingress:
+      # Enable an ingress for the ceph-objectstore
+      enabled: false
+      # annotations: {}
+      # host:
+      #   name: objectstore.example.com
+      #   path: /
+      # tls:
+      # - hosts:
+      #     - objectstore.example.com
+      #   secretName: ceph-objectstore-tls
+      # ingressClassName: nginx
+
+# cephECBlockPools are disabled by default, please remove the comments and set desired values to enable it
+#cephECBlockPools:
+#  # For erasure coded a replicated metadata pool is required.
+#  # https://rook.io/docs/rook/latest/CRDs/Shared-Filesystem/ceph-filesystem-crd/#erasure-coded
+#  - name: ec-metadata-pool
+#    # see https://github.com/rook/rook/blob/master/Documentation/CRDs/Block-Storage/ceph-block-pool-crd.md#spec for available configuration
+#    spec:
+#      replicated:
+#        size: 2
+#  - name: ec-data-pool
+#    spec:
+#      failureDomain: osd
+#      erasureCoded:
+#        dataChunks: 2
+#        codingChunks: 1
+#      deviceClass: hdd
+
+# cephECStorageClass also is disabled by default, please remove the comments and set desired values to enable it
+# if cephECBlockPools are uncommented you must remove the comments of cephEcStorageClass as well
+#cephECStorageClass:
+#  name: rook-ceph-block
+#  # Change "rook-ceph" provisioner prefix to match the operator namespace if needed
+#  provisioner: rook-ceph.rbd.csi.ceph.com # driver:namespace:operator
+#  parameters:
+#    # clusterID is the namespace where the rook cluster is running
+#    # If you change this namespace, also change the namespace below where the secret namespaces are defined
+#    clusterID: rook-ceph # namespace:cluster
+#
+#    # If you want to use erasure coded pool with RBD, you need to create
+#    # two pools. one erasure coded and one replicated.
+#    # You need to specify the replicated pool here in the `pool` parameter, it is
+#    # used for the metadata of the images.
+#    # The erasure coded pool must be set as the `dataPool` parameter below.
+#    dataPool: ec-data-pool
+#    pool: ec-metadata-pool
+#
+#    # (optional) mapOptions is a comma-separated list of map options.
+#    # For krbd options refer
+#    # https://docs.ceph.com/docs/master/man/8/rbd/#kernel-rbd-krbd-options
+#    # For nbd options refer
+#    # https://docs.ceph.com/docs/master/man/8/rbd-nbd/#options
+#    # mapOptions: lock_on_read,queue_depth=1024
+#
+#    # (optional) unmapOptions is a comma-separated list of unmap options.
+#    # For krbd options refer
+#    # https://docs.ceph.com/docs/master/man/8/rbd/#kernel-rbd-krbd-options
+#    # For nbd options refer
+#    # https://docs.ceph.com/docs/master/man/8/rbd-nbd/#options
+#    # unmapOptions: force
+#
+#    # RBD image format. Defaults to "2".
+#    imageFormat: "2"
+#
+#    # RBD image features, equivalent to OR'd bitfield value: 63
+#    # Available for imageFormat: "2". Older releases of CSI RBD
+#    # support only the `layering` feature. The Linux kernel (KRBD) supports the
+#    # full feature complement as of 5.4
+#    # imageFeatures: layering,fast-diff,object-map,deep-flatten,exclusive-lock
+#    imageFeatures: layering
+#  allowVolumeExpansion: true
+#  reclaimPolicy: Delete

+ 363 - 156
rook/rook-ceph-operator-values.yaml

@@ -1,127 +1,202 @@
-# From https://github.com/rook/rook/blob/release-1.9/deploy/charts/rook-ceph/values.yaml
+# From https://github.com/rook/rook/blob/v1.11.11/deploy/charts/rook-ceph/values.yaml
+# export ROOK_OPERATOR_NAMESPACE=rook-ceph
+# export ROOK_CLUSTER_NAMESPACE=rook-ceph
+# KUBECONFIG=/etc/rancher/k3s/k3s.yaml helm upgrade --install --namespace rook-ceph rook-ceph rook-release/rook-ceph -f rook/rook-ceph-operator-values.yaml --version 1.10.13
 # Default values for rook-ceph-operator
 # This is a YAML-formatted file.
 # Declare variables to be passed into your templates.
 
 image:
+  # -- Image
   repository: rook/ceph
-  tag: v1.9.2
+  # -- Image tag
+  # @default -- `master`
+  tag: v1.11.11
+  # -- Image pull policy
   pullPolicy: IfNotPresent
 
 crds:
-  # Whether the helm chart should create and update the CRDs. If false, the CRDs must be
+  # -- Whether the helm chart should create and update the CRDs. If false, the CRDs must be
   # managed independently with deploy/examples/crds.yaml.
   # **WARNING** Only set during first deployment. If later disabled the cluster may be DESTROYED.
-  # If the CRDs are deleted in this case, see the disaster recovery guide to restore them.
-  # https://rook.github.io/docs/rook/latest/ceph-disaster-recovery.html#restoring-crds-after-deletion
+  # If the CRDs are deleted in this case, see
+  # [the disaster recovery guide](https://rook.io/docs/rook/latest/Troubleshooting/disaster-recovery/#restoring-crds-after-deletion)
+  # to restore them.
   enabled: true
 
+# -- Pod resource requests & limits
 resources:
   limits:
     cpu: 500m
-    memory: 256Mi
+    memory: 512Mi
   requests:
     cpu: 100m
     memory: 128Mi
 
-# Constraint rook-ceph-operator Deployment to nodes with label
+# -- Kubernetes [`nodeSelector`](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector) to add to the Deployment.
+nodeSelector: {}
+# Constraint rook-ceph-operator Deployment to nodes with label `disktype: ssd`.
 # For more info, see https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector
-nodeSelector:
-  storage-node: "true"
+#  disktype: ssd
 
-# Tolerations for the rook-ceph-operator to allow it to run on nodes with particular taints
+# -- List of Kubernetes [`tolerations`](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/) to add to the Deployment.
 tolerations: []
 
-# Delay to use in node.kubernetes.io/unreachable toleration
+# -- Delay to use for the `node.kubernetes.io/unreachable` pod failure toleration to override
+# the Kubernetes default of 5 minutes
 unreachableNodeTolerationSeconds: 5
 
-# Whether rook watches its current namespace for CRDs or the entire cluster, defaults to false
+# -- Whether the operator should watch cluster CRD in its own namespace or not
 currentNamespaceOnly: false
 
-## Annotations to be added to pod
+# -- Pod annotations
 annotations: {}
 
-## The logging level for the operator: ERROR | WARNING | INFO | DEBUG
+# -- Global log level for the operator.
+# Options: `ERROR`, `WARNING`, `INFO`, `DEBUG`
 logLevel: INFO
 
-## If true, create & use RBAC resources
-##
+# -- If true, create & use RBAC resources
 rbacEnable: true
 
-## If true, create & use PSP resources
-##
+# -- If true, create & use PSP resources
 pspEnable: true
 
-# Set the priority class for the rook operator deployment if desired
-# priorityClassName: class
+# -- Set the priority class for the rook operator deployment if desired
+priorityClassName:
 
-## Settings for whether to disable the drivers or other daemons if they are not
-## needed
+# -- If true, loop devices are allowed to be used for osds in test clusters
+allowLoopDevices: false
+
+# Settings for whether to disable the drivers or other daemons if they are not
+# needed
 csi:
+  # -- Enable Ceph CSI RBD driver
   enableRbdDriver: true
+  # -- Enable Ceph CSI CephFS driver
   enableCephfsDriver: true
+  # -- Enable Ceph CSI GRPC Metrics
   enableGrpcMetrics: false
-  # Set to true to enable host networking for CSI CephFS and RBD nodeplugins. This may be necessary
+  # -- Enable host networking for CSI CephFS and RBD nodeplugins. This may be necessary
   # in some network configurations where the SDN does not provide access to an external cluster or
-  # there is significant drop in read/write performance.
+  # there is significant drop in read/write performance
   enableCSIHostNetwork: true
-  # set to false to disable deployment of snapshotter container in CephFS provisioner pod.
+  # -- Enable Snapshotter in CephFS provisioner pod
   enableCephfsSnapshotter: true
-  # set to false to disable deployment of snapshotter container in RBD provisioner pod.
+  # -- Enable Snapshotter in NFS provisioner pod
+  enableNFSSnapshotter: true
+  # -- Enable Snapshotter in RBD provisioner pod
   enableRBDSnapshotter: true
-  # set to false if the selinux is not enabled or unavailable in cluster nodes.
+  # -- Enable Host mount for `/etc/selinux` directory for Ceph CSI nodeplugins
   enablePluginSelinuxHostMount: false
-  # set to true to enable Ceph CSI pvc encryption support.
+  # -- Enable Ceph CSI PVC encryption support
   enableCSIEncryption: false
 
-  # (Optional) set user created priorityclassName for csi plugin pods.
+  # -- PriorityClassName to be set on csi driver plugin pods
   pluginPriorityClassName: system-node-critical
 
-  # (Optional) set user created priorityclassName for csi provisioner pods.
+  # -- PriorityClassName to be set on csi driver provisioner pods
   provisionerPriorityClassName: system-cluster-critical
 
-  # (Optional) policy for modifying a volume's ownership or permissions when the RBD PVC is being mounted.
+  # -- Policy for modifying a volume's ownership or permissions when the RBD PVC is being mounted.
   # supported values are documented at https://kubernetes-csi.github.io/docs/support-fsgroup.html
-  rbdFSGroupPolicy: "ReadWriteOnceWithFSType"
+  rbdFSGroupPolicy: "File"
 
-  # (Optional) policy for modifying a volume's ownership or permissions when the CephFS PVC is being mounted.
+  # -- Policy for modifying a volume's ownership or permissions when the CephFS PVC is being mounted.
   # supported values are documented at https://kubernetes-csi.github.io/docs/support-fsgroup.html
-  cephFSFSGroupPolicy: "ReadWriteOnceWithFSType"
+  cephFSFSGroupPolicy: "File"
 
-  # (Optional) policy for modifying a volume's ownership or permissions when the NFS PVC is being mounted.
+  # -- Policy for modifying a volume's ownership or permissions when the NFS PVC is being mounted.
   # supported values are documented at https://kubernetes-csi.github.io/docs/support-fsgroup.html
-  nfsFSGroupPolicy: "ReadWriteOnceWithFSType"
+  nfsFSGroupPolicy: "File"
 
-  # OMAP generator generates the omap mapping between the PV name and the RBD image
+  # -- OMAP generator generates the omap mapping between the PV name and the RBD image
   # which helps CSI to identify the rbd images for CSI operations.
-  # CSI_ENABLE_OMAP_GENERATOR need to be enabled when we are using rbd mirroring feature.
-  # By default OMAP generator is disabled and when enabled it will be deployed as a
+  # `CSI_ENABLE_OMAP_GENERATOR` needs to be enabled when we are using rbd mirroring feature.
+  # By default OMAP generator is disabled and when enabled, it will be deployed as a
   # sidecar with CSI provisioner pod, to enable set it to true.
   enableOMAPGenerator: false
 
-  # Set replicas for csi provisioner deployment.
+  # -- Set CephFS Kernel mount options to use https://docs.ceph.com/en/latest/man/8/mount.ceph/#options.
+  # Set to "ms_mode=secure" when connections.encrypted is enabled in CephCluster CR
+  cephFSKernelMountOptions:
+
+  # -- Enable adding volume metadata on the CephFS subvolumes and RBD images.
+  # Not all users might be interested in getting volume/snapshot details as metadata on CephFS subvolume and RBD images.
+  # Hence enable metadata is false by default
+  enableMetadata: false
+
+  # -- Set replicas for csi provisioner deployment
   provisionerReplicas: 2
 
-  # Set logging level for csi containers.
+  # -- Cluster name identifier to set as metadata on the CephFS subvolume and RBD images. This will be useful
+  # in cases like for example, when two container orchestrator clusters (Kubernetes/OCP) are using a single ceph cluster
+  clusterName:
+
+  # -- Set logging level for cephCSI containers maintained by the cephCSI.
   # Supported values from 0 to 5. 0 for general useful logs, 5 for trace level verbosity.
-  logLevel: 5
-  # CSI RBD plugin daemonset update strategy, supported values are OnDelete and RollingUpdate.
-  # Default value is RollingUpdate.
-  #rbdPluginUpdateStrategy: OnDelete
-  # CSI CephFS plugin daemonset update strategy, supported values are OnDelete and RollingUpdate.
-  # Default value is RollingUpdate.
-  #cephFSPluginUpdateStrategy: OnDelete
-  # CSI NFS plugin daemonset update strategy, supported values are OnDelete and RollingUpdate.
-  # Default value is RollingUpdate.
-  #nfsPluginUpdateStrategy: OnDelete
-  # The CSI GRPC timeout value (in seconds). It should be >= 120. If this variable is not set or is an invalid value, it's default to 150.
+  logLevel: 0
+
+  # -- Set logging level for Kubernetes-csi sidecar containers.
+  # Supported values from 0 to 5. 0 for general useful logs (the default), 5 for trace level verbosity.
+  # @default -- `0`
+  sidecarLogLevel:
+
+  # -- CSI RBD plugin daemonset update strategy, supported values are OnDelete and RollingUpdate
+  # @default -- `RollingUpdate`
+  rbdPluginUpdateStrategy:
+
+  # -- A maxUnavailable parameter of CSI RBD plugin daemonset update strategy.
+  # @default -- `1`
+  rbdPluginUpdateStrategyMaxUnavailable:
+
+  # -- CSI CephFS plugin daemonset update strategy, supported values are OnDelete and RollingUpdate
+  # @default -- `RollingUpdate`
+  cephFSPluginUpdateStrategy:
+
+  # -- CSI NFS plugin daemonset update strategy, supported values are OnDelete and RollingUpdate
+  # @default -- `RollingUpdate`
+  nfsPluginUpdateStrategy:
+
+  # -- Set GRPC timeout for csi containers (in seconds). It should be >= 120. If this value is not set or is invalid, it defaults to 150
   grpcTimeoutInSeconds: 150
 
-  # Allow starting unsupported ceph-csi image
+  # -- Allow starting an unsupported ceph-csi image
   allowUnsupportedVersion: false
-  # CEPH CSI RBD provisioner resource requirement list, Put here list of resource
-  # requests and limits you want to apply for provisioner pod
-  # csi-omap-generator resources will be applied only if enableOMAPGenerator is set to true
+
+  # -- The volume of the CephCSI RBD plugin DaemonSet
+  csiRBDPluginVolume:
+  #  - name: lib-modules
+  #    hostPath:
+  #      path: /run/booted-system/kernel-modules/lib/modules/
+  #  - name: host-nix
+  #    hostPath:
+  #      path: /nix
+
+  # -- The volume mounts of the CephCSI RBD plugin DaemonSet
+  csiRBDPluginVolumeMount:
+  #  - name: host-nix
+  #    mountPath: /nix
+  #    readOnly: true
+
+  # -- The volume of the CephCSI CephFS plugin DaemonSet
+  csiCephFSPluginVolume:
+  #  - name: lib-modules
+  #    hostPath:
+  #      path: /run/booted-system/kernel-modules/lib/modules/
+  #  - name: host-nix
+  #    hostPath:
+  #      path: /nix
+
+  # -- The volume mounts of the CephCSI CephFS plugin DaemonSet
+  csiCephFSPluginVolumeMount:
+  #  - name: host-nix
+  #    mountPath: /nix
+  #    readOnly: true
+
+  # -- CEPH CSI RBD provisioner resource requirement list
+  # csi-omap-generator resources will be applied only if `enableOMAPGenerator` is set to `true`
+  # @default -- see values.yaml
   csiRBDProvisionerResource: |
     - name : csi-provisioner
       resource:
@@ -179,8 +254,9 @@ csi:
         limits:
           memory: 256Mi
           cpu: 100m
-  # CEPH CSI RBD plugin resource requirement list, Put here list of resource
-  # requests and limits you want to apply for plugin pod
+
+  # -- CEPH CSI RBD plugin resource requirement list
+  # @default -- see values.yaml
   csiRBDPluginResource: |
     - name : driver-registrar
       resource:
@@ -206,8 +282,9 @@ csi:
         limits:
           memory: 256Mi
           cpu: 100m
-  # CEPH CSI CephFS provisioner resource requirement list, Put here list of resource
-  # requests and limits you want to apply for provisioner pod
+
+  # -- CEPH CSI CephFS provisioner resource requirement list
+  # @default -- see values.yaml
   csiCephFSProvisionerResource: |
     - name : csi-provisioner
       resource:
@@ -257,8 +334,9 @@ csi:
         limits:
           memory: 256Mi
           cpu: 100m
-  # CEPH CSI CephFS plugin resource requirement list, Put here list of resource
-  # requests and limits you want to apply for plugin pod
+
+  # -- CEPH CSI CephFS plugin resource requirement list
+  # @default -- see values.yaml
   csiCephFSPluginResource: |
     - name : driver-registrar
       resource:
@@ -284,8 +362,9 @@ csi:
         limits:
           memory: 256Mi
           cpu: 100m
-  # CEPH CSI NFS provisioner resource requirement list, Put here list of resource
-  # requests and limits you want to apply for provisioner pod
+
+  # -- CEPH CSI NFS provisioner resource requirement list
+  # @default -- see values.yaml
   csiNFSProvisionerResource: |
     - name : csi-provisioner
       resource:
@@ -303,132 +382,258 @@ csi:
         limits:
           memory: 1Gi
           cpu: 500m
-  # CEPH CSI NFS plugin resource requirement list, Put here list of resource
-  # requests and limits you want to apply for plugin pod
+    - name : csi-attacher
+      resource:
+        requests:
+          memory: 512Mi
+          cpu: 250m
+        limits:
+          memory: 1Gi
+          cpu: 500m
+
+  # -- CEPH CSI NFS plugin resource requirement list
+  # @default -- see values.yaml
   csiNFSPluginResource: |
-   - name : driver-registrar
-     resource:
-       requests:
-         memory: 128Mi
-         cpu: 50m
-       limits:
-         memory: 256Mi
-         cpu: 100m
-   - name : csi-nfsplugin
-     resource:
-       requests:
-         memory: 512Mi
-         cpu: 250m
-       limits:
-         memory: 1Gi
-         cpu: 500m
-
-  # Set provisonerTolerations and provisionerNodeAffinity for provisioner pod.
+    - name : driver-registrar
+      resource:
+        requests:
+          memory: 128Mi
+          cpu: 50m
+        limits:
+          memory: 256Mi
+          cpu: 100m
+    - name : csi-nfsplugin
+      resource:
+        requests:
+          memory: 512Mi
+          cpu: 250m
+        limits:
+          memory: 1Gi
+          cpu: 500m
+
+  # Set provisionerTolerations and provisionerNodeAffinity for provisioner pod.
   # The CSI provisioner would be best to start on the same nodes as other ceph daemons.
-  # provisionerTolerations:
+
+  # -- Array of tolerations in YAML format which will be added to CSI provisioner deployment
+  provisionerTolerations:
   #    - key: key
   #      operator: Exists
   #      effect: NoSchedule
-  # provisionerNodeAffinity: key1=value1,value2; key2=value3
-  provisionerNodeAffinity: "storage-node=true"
+
+  # -- The node labels for affinity of the CSI provisioner deployment [^1]
+  provisionerNodeAffinity: "storage-node=true" #key1=value1,value2; key2=value3
   # Set pluginTolerations and pluginNodeAffinity for plugin daemonset pods.
   # The CSI plugins need to be started on all the nodes where the clients need to mount the storage.
+
+  # -- Array of tolerations in YAML format which will be added to CephCSI plugin DaemonSet
   pluginTolerations:
-     - key: storage-node
-       operator: Exists
-       effect: NoSchedule
-  # pluginNodeAffinity: key1=value1,value2; key2=value3
-  #pluginNodeAffinity: "storage-node=true"
-  #cephfsGrpcMetricsPort: 9091
-  #cephfsLivenessMetricsPort: 9081
-  #rbdGrpcMetricsPort: 9090
-  #csiAddonsPort: 9070
-  # Enable Ceph Kernel clients on kernel < 4.17. If your kernel does not support quotas for CephFS
+      - key: storage-node
+        operator: Exists
+        effect: NoSchedule
+
+  # -- The node labels for affinity of the CephCSI RBD plugin DaemonSet [^1]
+  pluginNodeAffinity: # key1=value1,value2; key2=value3
+
+  # -- Enable Ceph CSI Liveness sidecar deployment
+  enableLiveness: false
+
+  # -- CSI CephFS driver GRPC metrics port
+  # @default -- `9091`
+  cephfsGrpcMetricsPort:
+
+  # -- CSI CephFS driver metrics port
+  # @default -- `9081`
+  cephfsLivenessMetricsPort:
+
+  # -- Ceph CSI RBD driver GRPC metrics port
+  # @default -- `9090`
+  rbdGrpcMetricsPort:
+
+  # -- CSI Addons server port
+  # @default -- `9070`
+  csiAddonsPort:
+
+  # -- Enable Ceph Kernel clients on kernel < 4.17. If your kernel does not support quotas for CephFS
   # you may want to disable this setting. However, this will cause an issue during upgrades
-  # with the FUSE client. See the upgrade guide: https://rook.io/docs/rook/v1.2/ceph-upgrade.html
+  # with the FUSE client. See the [upgrade guide](https://rook.io/docs/rook/v1.2/ceph-upgrade.html)
   forceCephFSKernelClient: true
-  #rbdLivenessMetricsPort: 9080
-  #kubeletDirPath: /var/lib/kubelet
-  #cephcsi:
-    #image: quay.io/cephcsi/cephcsi:v3.6.1
-  #registrar:
-    #image: k8s.gcr.io/sig-storage/csi-node-driver-registrar:v2.5.0
-  #provisioner:
-    #image: k8s.gcr.io/sig-storage/csi-provisioner:v3.1.0
-  #snapshotter:
-    #image: k8s.gcr.io/sig-storage/csi-snapshotter:v5.0.1
-  #attacher:
-    #image: k8s.gcr.io/sig-storage/csi-attacher:v3.4.0
-  #resizer:
-    #image: k8s.gcr.io/sig-storage/csi-resizer:v1.4.0
-  # Labels to add to the CSI CephFS Deployments and DaemonSets Pods.
-  #cephfsPodLabels: "key1=value1,key2=value2"
-  # Labels to add to the CSI NFS Deployments and DaemonSets Pods.
-  #nfsPodLabels: "key1=value1,key2=value2"
-  # Labels to add to the CSI RBD Deployments and DaemonSets Pods.
-  #rbdPodLabels: "key1=value1,key2=value2"
-  # Enable the volume replication controller.
-  # Before enabling, ensure the Volume Replication CRDs are created.
-  # See https://rook.io/docs/rook/latest/ceph-csi-drivers.html#rbd-mirroring
-  volumeReplication:
+
+  # -- Ceph CSI RBD driver metrics port
+  # @default -- `8080`
+  rbdLivenessMetricsPort:
+
+  serviceMonitor:
+    # -- Enable ServiceMonitor for Ceph CSI drivers
     enabled: false
-    #image: "quay.io/csiaddons/volumereplication-operator:v0.3.0"
-  # Enable the CSIAddons sidecar.
+    # -- Service monitor scrape interval
+    interval: 5s
+
+  # -- Kubelet root directory path (if the Kubelet uses a different path for the `--root-dir` flag)
+  # @default -- `/var/lib/kubelet`
+  kubeletDirPath:
+
+  cephcsi:
+    # -- Ceph CSI image
+    # @default -- `quay.io/cephcsi/cephcsi:v3.8.0`
+    image:
+
+  registrar:
+    # -- Kubernetes CSI registrar image
+    # @default -- `registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.7.0`
+    image:
+
+  provisioner:
+    # -- Kubernetes CSI provisioner image
+    # @default -- `registry.k8s.io/sig-storage/csi-provisioner:v3.4.0`
+    image:
+
+  snapshotter:
+    # -- Kubernetes CSI snapshotter image
+    # @default -- `registry.k8s.io/sig-storage/csi-snapshotter:v6.2.1`
+    image:
+
+  attacher:
+    # -- Kubernetes CSI Attacher image
+    # @default -- `registry.k8s.io/sig-storage/csi-attacher:v4.1.0`
+    image:
+
+  resizer:
+    # -- Kubernetes CSI resizer image
+    # @default -- `registry.k8s.io/sig-storage/csi-resizer:v1.7.0`
+    image:
+
+  # -- Image pull policy
+  imagePullPolicy: IfNotPresent
+
+  # -- Labels to add to the CSI CephFS Deployments and DaemonSets Pods
+  cephfsPodLabels: #"key1=value1,key2=value2"
+
+  # -- Labels to add to the CSI NFS Deployments and DaemonSets Pods
+  nfsPodLabels: #"key1=value1,key2=value2"
+
+  # -- Labels to add to the CSI RBD Deployments and DaemonSets Pods
+  rbdPodLabels: #"key1=value1,key2=value2"
+
   csiAddons:
+    # -- Enable CSIAddons
     enabled: false
-    #image: "quay.io/csiaddons/k8s-sidecar:v0.2.1"
-  # Enable the nfs csi driver.
+    # -- CSIAddons Sidecar image
+    image: "quay.io/csiaddons/k8s-sidecar:v0.5.0"
+
   nfs:
+    # -- Enable the nfs csi driver
+    enabled: false
+
+  topology:
+    # -- Enable topology based provisioning
     enabled: false
-    #image: "k8s.gcr.io/sig-storage/nfsplugin:v3.1.0"
+    # NOTE: the value here serves as an example and needs to be
+    # updated with node labels that define domains of interest
+    # -- domainLabels define which node labels to use as domains
+    # for CSI nodeplugins to advertise their domains
+    domainLabels:
+    # - kubernetes.io/hostname
+    # - topology.kubernetes.io/zone
+    # - topology.rook.io/rack
+
+  readAffinity:
+    # -- Enable read affinity for RBD volumes. Recommended to
+    # set to true if running kernel 5.8 or newer.
+    # @default -- `false`
+    enabled: true
+    # -- Define which node labels to use
+    # as CRUSH location. This should correspond to the values set
+    # in the CRUSH map.
+    # @default -- labels listed [here](../CRDs/Cluster/ceph-cluster-crd.md#osd-topology)
+    crushLocationLabels:
+
+  # -- Whether to skip any attach operation altogether for CephFS PVCs. See more details
+  # [here](https://kubernetes-csi.github.io/docs/skip-attach.html#skip-attach-with-csi-driver-object).
+  # If cephFSAttachRequired is set to false it skips the volume attachments and makes the creation
+  # of pods using the CephFS PVC fast. **WARNING** It's highly discouraged to use this for
+  # CephFS RWO volumes. Refer to this [issue](https://github.com/kubernetes/kubernetes/issues/103305) for more details.
+  cephFSAttachRequired: true
+  # -- Whether to skip any attach operation altogether for RBD PVCs. See more details
+  # [here](https://kubernetes-csi.github.io/docs/skip-attach.html#skip-attach-with-csi-driver-object).
+  # If set to false it skips the volume attachments and makes the creation of pods using the RBD PVC fast.
+  # **WARNING** It's highly discouraged to use this for RWO volumes as it can cause data corruption.
+  # csi-addons operations like Reclaimspace and PVC Keyrotation will also not be supported if set
+  # to false since we'll have no VolumeAttachments to determine which node the PVC is mounted on.
+  # Refer to this [issue](https://github.com/kubernetes/kubernetes/issues/103305) for more details.
+  rbdAttachRequired: true
+  # -- Whether to skip any attach operation altogether for NFS PVCs. See more details
+  # [here](https://kubernetes-csi.github.io/docs/skip-attach.html#skip-attach-with-csi-driver-object).
+  # If cephFSAttachRequired is set to false it skips the volume attachments and makes the creation
+  # of pods using the NFS PVC fast. **WARNING** It's highly discouraged to use this for
+  # NFS RWO volumes. Refer to this [issue](https://github.com/kubernetes/kubernetes/issues/103305) for more details.
+  nfsAttachRequired: true
+
+# -- Enable discovery daemon
 enableDiscoveryDaemon: false
+
+# -- The timeout for ceph commands in seconds
 cephCommandsTimeoutSeconds: "15"
 
-## if true, run rook operator on the host network
+# -- If true, run rook operator on the host network
 useOperatorHostNetwork: false
 
+# -- If true, scale down the rook operator.
+# This is useful for administrative actions where the rook operator must be scaled down, while using gitops style tooling
+# to deploy your helm charts.
+scaleDownOperator: false
+
 ## Rook Discover configuration
 ## toleration: NoSchedule, PreferNoSchedule or NoExecute
 ## tolerationKey: Set this to the specific key of the taint to tolerate
 ## tolerations: Array of tolerations in YAML format which will be added to agent deployment
 ## nodeAffinity: Set to labels of the node to match
-# discover:
-#   toleration: NoSchedule
-#   tolerationKey: key
-#   tolerations:
-#   - key: key
-#     operator: Exists
-#     effect: NoSchedule
-#   nodeAffinity: key1=value1,value2; key2=value3
-#   podLabels: "key1=value1,key2=value2"
-
-# In some situations SELinux relabelling breaks (times out) on large filesystems, and doesn't work with cephfs ReadWriteMany volumes (last relabel wins).
-# Disable it here if you have similar issues.
-# For more details see https://github.com/rook/rook/issues/2417
-enableSelinuxRelabeling: true
-
-disableAdmissionController: false
-
-# Writing to the hostPath is required for the Ceph mon and osd pods. Given the restricted permissions in OpenShift with SELinux,
-# the pod must be running privileged in order to write to the hostPath volume, this must be set to true then.
+
+discover:
+  # -- Toleration for the discover pods.
+  # Options: `NoSchedule`, `PreferNoSchedule` or `NoExecute`
+  toleration:
+  # -- The specific key of the taint to tolerate
+  tolerationKey:
+  # -- Array of tolerations in YAML format which will be added to discover deployment
+  tolerations:
+  #   - key: key
+  #     operator: Exists
+  #     effect: NoSchedule
+  # -- The node labels for affinity of `discover-agent` [^1]
+  nodeAffinity: # key1=value1,value2; key2=value3
+  # -- Labels to add to the discover pods
+  podLabels: # "key1=value1,key2=value2"
+  # -- Add resources to discover daemon pods
+  resources:
+  #   - limits:
+  #       cpu: 500m
+  #       memory: 512Mi
+  #   - requests:
+  #       cpu: 100m
+  #       memory: 128Mi
+
+# -- Whether to disable the admission controller
+disableAdmissionController: true
+
+# -- Runs Ceph Pods as privileged to be able to write to `hostPaths` in OpenShift with SELinux restrictions.
 hostpathRequiresPrivileged: false
 
-# Disable automatic orchestration when new devices are discovered.
+# -- Disable automatic orchestration when new devices are discovered.
 disableDeviceHotplug: false
 
-# Blacklist certain disks according to the regex provided.
+# -- Blacklist certain disks according to the regex provided.
 discoverDaemonUdev:
 
-# imagePullSecrets option allow to pull docker images from private docker registry. Option will be passed to all service accounts.
-# imagePullSecrets:
+# -- imagePullSecrets option allow to pull docker images from private docker registry. Option will be passed to all service accounts.
+imagePullSecrets:
 # - name: my-registry-secret
 
-# Whether the OBC provisioner should watch on the operator namespace or not, if not the namespace of the cluster will be used
+# -- Whether the OBC provisioner should watch on the operator namespace or not, if not the namespace of the cluster will be used
 enableOBCWatchOperatorNamespace: true
 
+# -- Set tolerations and nodeAffinity [^1] for admission controller pod.
+# The admission controller would be best to start on the same nodes as other ceph daemons.
 admissionController:
-  # Set tolerations and nodeAffinity for admission controller pod.
-  # The admission controller would be best to start on the same nodes as other ceph daemons.
   # tolerations:
   #    - key: key
   #      operator: Exists
@@ -436,7 +641,9 @@ admissionController:
   # nodeAffinity: key1=value1,value2; key2=value3
   nodeAffinity: "storage-node=true"
 
+# [^1]: `nodeAffinity` and `*NodeAffinity` options should have the format `"role=storage,rook; storage=ceph"` or `storage=;role=rook-example` or `storage=;` (_checks only for presence of key_)
+
 monitoring:
-  # requires Prometheus to be pre-installed
-  # enabling will also create RBAC rules to allow Operator to create ServiceMonitors
+  # -- Enable monitoring. Requires Prometheus to be pre-installed.
+  # Enabling will also create RBAC rules to allow Operator to create ServiceMonitors
   enabled: true

+ 1 - 1
shelly-plug-exporter.yaml

@@ -22,7 +22,7 @@ spec:
     spec:
       containers:
       - name: shelly-plug-exporter
-        image: jibby0/shelly-plug-exporter:24.2.0-fork
+        image: jibby0/shelly-plug-exporter:24.2.0-fork2
         ports:
         - containerPort: 8080
           name: metrics