Browse Source

update rook to v1.10.13

Josh Bicking 3 months ago
parent
commit
ee44d611f4
3 changed files with 422 additions and 209 deletions
  1. 112 52
      rook/rook-ceph-cluster-values.yaml
  2. 309 156
      rook/rook-ceph-operator-values.yaml
  3. 1 1
      shelly-plug-exporter.yaml

+ 112 - 52
rook/rook-ceph-cluster-values.yaml

@@ -1,18 +1,21 @@
-# From https://raw.githubusercontent.com/rook/rook/release-1.9/deploy/charts/rook-ceph-cluster/values.yaml
+# From https://raw.githubusercontent.com/rook/rook/v1.10.13/deploy/charts/rook-ceph-cluster/values.yaml
+# KUBECONFIG=/etc/rancher/k3s/k3s.yaml helm upgrade --namespace rook-ceph rook-ceph-cluster --set operatorNamespace=rook-ceph rook-release/rook-ceph-cluster -f rook/rook-ceph-cluster-values.yaml --version 1.10.13
 # Default values for a single rook-ceph cluster
 # Default values for a single rook-ceph cluster
 # This is a YAML-formatted file.
 # This is a YAML-formatted file.
 # Declare variables to be passed into your templates.
 # Declare variables to be passed into your templates.
 
 
-# Namespace of the main rook operator
+# -- Namespace of the main rook operator
 operatorNamespace: rook-ceph
 operatorNamespace: rook-ceph
 
 
-# The metadata.name of the CephCluster CR. The default name is the same as the namespace.
-# clusterName: rook-ceph
+# -- The metadata.name of the CephCluster CR
+# @default -- The same as the namespace
+clusterName:
 
 
-# Ability to override the kubernetes version used in rendering the helm chart
-# kubeVersion: 1.21
+# -- Optional override of the target kubernetes version
+kubeVersion:
 
 
-# Ability to override ceph.conf
+# -- Cluster ceph.conf override
+configOverride:
 # configOverride: |
 # configOverride: |
 #   [global]
 #   [global]
 #   mon_allow_pool_delete = true
 #   mon_allow_pool_delete = true
@@ -21,10 +24,15 @@ operatorNamespace: rook-ceph
 
 
 # Installs a debugging toolbox deployment
 # Installs a debugging toolbox deployment
 toolbox:
 toolbox:
+  # -- Enable Ceph debugging pod deployment. See [toolbox](../Troubleshooting/ceph-toolbox.md)
   enabled: true
   enabled: true
-  image: rook/ceph:v1.9.0.230.g6a87cb44a
+  # -- Toolbox image, defaults to the image used by the Ceph cluster
+  image: rook/ceph:v1.9.0.230.g6a87cb44a # TODO probably safe to remove?
+  # -- Toolbox tolerations
   tolerations: []
   tolerations: []
+  # -- Toolbox affinity
   affinity: {}
   affinity: {}
+  # -- Toolbox resources
   resources:
   resources:
     limits:
     limits:
       cpu: "500m"
       cpu: "500m"
@@ -32,21 +40,30 @@ toolbox:
     requests:
     requests:
       cpu: "100m"
       cpu: "100m"
       memory: "128Mi"
       memory: "128Mi"
-  # Set the priority class for the toolbox if desired
-  # priorityClassName: class
+  # -- Set the priority class for the toolbox if desired
+  priorityClassName:
 
 
-# monitoring requires Prometheus to be pre-installed
 monitoring:
 monitoring:
-  # enabling will also create RBAC rules to allow Operator to create ServiceMonitors
+  # -- Enable Prometheus integration, will also create necessary RBAC rules to allow Operator to create ServiceMonitors.
+  # Monitoring requires Prometheus to be pre-installed
   enabled: true
   enabled: true
-  # whether to create the prometheus rules
+  # -- Whether to create the Prometheus rules for Ceph alerts
   createPrometheusRules: true
   createPrometheusRules: true
-  # the namespace in which to create the prometheus rules, if different from the rook cluster namespace
+  # -- The namespace in which to create the prometheus rules, if different from the rook cluster namespace.
   # If you have multiple rook-ceph clusters in the same k8s cluster, choose the same namespace (ideally, namespace with prometheus
   # If you have multiple rook-ceph clusters in the same k8s cluster, choose the same namespace (ideally, namespace with prometheus
   # deployed) to set rulesNamespace for all the clusters. Otherwise, you will get duplicate alerts with multiple alert definitions.
   # deployed) to set rulesNamespace for all the clusters. Otherwise, you will get duplicate alerts with multiple alert definitions.
   rulesNamespaceOverride:
   rulesNamespaceOverride:
-
-# If true, create & use PSP resources. Set this to the same value as the rook-ceph chart.
+  # Monitoring settings for external clusters:
+  # externalMgrEndpoints: <list of endpoints>
+  # externalMgrPrometheusPort: <port>
+  # allow adding custom labels and annotations to the prometheus rule
+  prometheusRule:
+    # -- Labels applied to PrometheusRule
+    labels: {}
+    # -- Annotations applied to PrometheusRule
+    annotations: {}
+
+# -- Create & use PSP resources. Set this to the same value as the rook-ceph chart.
 pspEnable: true
 pspEnable: true
 
 
 # imagePullSecrets option allow to pull docker images from private docker registry. Option will be passed to all service accounts.
 # imagePullSecrets option allow to pull docker images from private docker registry. Option will be passed to all service accounts.
@@ -54,18 +71,27 @@ pspEnable: true
 # - name: my-registry-secret
 # - name: my-registry-secret
 
 
 # All values below are taken from the CephCluster CRD
 # All values below are taken from the CephCluster CRD
-# More information can be found at [Ceph Cluster CRD](/Documentation/ceph-cluster-crd.md)
+# -- Cluster configuration.
+# @default -- See [below](#ceph-cluster-spec)
 cephClusterSpec:
 cephClusterSpec:
+  # This cluster spec example is for a converged cluster where all the Ceph daemons are running locally,
+  # as in the host-based example (cluster.yaml). For a different configuration such as a
+  # PVC-based cluster (cluster-on-pvc.yaml), external cluster (cluster-external.yaml),
+  # or stretch cluster (cluster-stretched.yaml), replace this entire `cephClusterSpec`
+  # with the specs from those examples.
+
+  # For more details, check https://rook.io/docs/rook/v1.10/CRDs/Cluster/ceph-cluster-crd/
   cephVersion:
   cephVersion:
     # The container image used to launch the Ceph daemon pods (mon, mgr, osd, mds, rgw).
     # The container image used to launch the Ceph daemon pods (mon, mgr, osd, mds, rgw).
-    # v15 is octopus, and v16 is pacific.
+    # v16 is Pacific, v17 is Quincy.
     # RECOMMENDATION: In production, use a specific version tag instead of the general v16 flag, which pulls the latest release and could result in different
     # RECOMMENDATION: In production, use a specific version tag instead of the general v16 flag, which pulls the latest release and could result in different
     # versions running within the cluster. See tags available at https://hub.docker.com/r/ceph/ceph/tags/.
     # versions running within the cluster. See tags available at https://hub.docker.com/r/ceph/ceph/tags/.
     # If you want to be more precise, you can always use a timestamp tag such quay.io/ceph/ceph:v15.2.11-20200419
     # If you want to be more precise, you can always use a timestamp tag such quay.io/ceph/ceph:v15.2.11-20200419
     # This tag might not contain a new Ceph version, just security fixes from the underlying operating system, which will reduce vulnerabilities
     # This tag might not contain a new Ceph version, just security fixes from the underlying operating system, which will reduce vulnerabilities
+    #image: quay.io/ceph/ceph:v17.2.5
     image: quay.io/ceph/ceph:v16.2.7
     image: quay.io/ceph/ceph:v16.2.7
-    # Whether to allow unsupported versions of Ceph. Currently `octopus` and `pacific` are supported.
-    # Future versions such as `pacific` would require this to be set to `true`.
+    # Whether to allow unsupported versions of Ceph. Currently `pacific` and `quincy` are supported.
+    # Future versions such as `reef` (v18) would require this to be set to `true`.
     # Do not set to true in production.
     # Do not set to true in production.
     allowUnsupported: false
     allowUnsupported: false
 
 
@@ -77,7 +103,7 @@ cephClusterSpec:
   # Whether or not upgrade should continue even if a check fails
   # Whether or not upgrade should continue even if a check fails
   # This means Ceph's status could be degraded and we don't recommend upgrading but you might decide otherwise
   # This means Ceph's status could be degraded and we don't recommend upgrading but you might decide otherwise
   # Use at your OWN risk
   # Use at your OWN risk
-  # To understand Rook's upgrade process of Ceph, read https://rook.io/docs/rook/latest/ceph-upgrade.html#ceph-version-upgrades
+  # To understand Rook's upgrade process of Ceph, read https://rook.io/docs/rook/v1.10/Upgrade/ceph-upgrade/
   skipUpgradeChecks: false
   skipUpgradeChecks: false
 
 
   # Whether or not continue if PGs are not clean during an upgrade
   # Whether or not continue if PGs are not clean during an upgrade
@@ -85,7 +111,7 @@ cephClusterSpec:
 
 
   # WaitTimeoutForHealthyOSDInMinutes defines the time (in minutes) the operator would wait before an OSD can be stopped for upgrade or restart.
   # WaitTimeoutForHealthyOSDInMinutes defines the time (in minutes) the operator would wait before an OSD can be stopped for upgrade or restart.
   # If the timeout exceeds and OSD is not ok to stop, then the operator would skip upgrade for the current OSD and proceed with the next one
   # If the timeout exceeds and OSD is not ok to stop, then the operator would skip upgrade for the current OSD and proceed with the next one
-  # if `continueUpgradeAfterChecksEvenIfNotHealthy` is `false`. If `continueUpgradeAfterChecksEvenIfNotHealthy` is `true`, then opertor would
+  # if `continueUpgradeAfterChecksEvenIfNotHealthy` is `false`. If `continueUpgradeAfterChecksEvenIfNotHealthy` is `true`, then operator would
   # continue with the upgrade of an OSD even if its not ok to stop after the timeout. This timeout won't be applied if `skipUpgradeChecks` is `true`.
   # continue with the upgrade of an OSD even if its not ok to stop after the timeout. This timeout won't be applied if `skipUpgradeChecks` is `true`.
   # The default wait timeout is 10 minutes.
   # The default wait timeout is 10 minutes.
   waitTimeoutForHealthyOSDInMinutes: 10
   waitTimeoutForHealthyOSDInMinutes: 10
@@ -119,12 +145,13 @@ cephClusterSpec:
     # urlPrefix: /ceph-dashboard
     # urlPrefix: /ceph-dashboard
     # serve the dashboard at the given port.
     # serve the dashboard at the given port.
     # port: 8443
     # port: 8443
-    # serve the dashboard using SSL
+    # Serve the dashboard using SSL (if using ingress to expose the dashboard and `ssl: true` you need to set
+    # the corresponding "backend protocol" annotation(s) for your ingress controller of choice)
     ssl: true
     ssl: true
 
 
-  # Network configuration, see: https://github.com/rook/rook/blob/master/Documentation/ceph-cluster-crd.md#network-configuration-settings
+  # Network configuration, see: https://github.com/rook/rook/blob/master/Documentation/CRDs/ceph-cluster-crd.md#network-configuration-settings
   network:
   network:
-  # enable host networking
+    # enable host networking
     provider: host
     provider: host
   #   # EXPERIMENTAL: enable the Multus network provider
   #   # EXPERIMENTAL: enable the Multus network provider
   #   provider: multus
   #   provider: multus
@@ -153,7 +180,8 @@ cephClusterSpec:
   # enable log collector, daemons will log on files and rotate
   # enable log collector, daemons will log on files and rotate
   # logCollector:
   # logCollector:
   #   enabled: true
   #   enabled: true
-  #   periodicity: 24h # SUFFIX may be 'h' for hours or 'd' for days.
+  #   periodicity: daily # one of: hourly, daily, weekly, monthly
+  #   maxLogSize: 500M # SUFFIX may be 'M' or 'G'. Must be at least 1M.
 
 
   # automate [data cleanup process](https://github.com/rook/rook/blob/master/Documentation/ceph-teardown.md#delete-the-data-on-hosts) in cluster destruction.
   # automate [data cleanup process](https://github.com/rook/rook/blob/master/Documentation/ceph-teardown.md#delete-the-data-on-hosts) in cluster destruction.
   cleanupPolicy:
   cleanupPolicy:
@@ -192,7 +220,7 @@ cephClusterSpec:
               - key: storage-node
               - key: storage-node
                 operator: In
                 operator: In
                 values:
                 values:
-                  - "true"
+                - "true"
       podAffinity:
       podAffinity:
       podAntiAffinity:
       podAntiAffinity:
       topologySpreadConstraints:
       topologySpreadConstraints:
@@ -229,7 +257,6 @@ cephClusterSpec:
   #   # These labels can be passed as LabelSelector to Prometheus
   #   # These labels can be passed as LabelSelector to Prometheus
   #   monitoring:
   #   monitoring:
 
 
-  # https://home.robusta.dev/blog/stop-using-cpu-limits
   resources:
   resources:
     mgr:
     mgr:
       limits:
       limits:
@@ -253,9 +280,14 @@ cephClusterSpec:
         cpu: 0
         cpu: 0
         memory: "1Gi"
         memory: "1Gi"
     prepareosd:
     prepareosd:
-      limits:
-        cpu: 0
-        memory: "12Gi"
+      # limits: It is not recommended to set limits on the OSD prepare job
+      #         since it's a one-time burst for memory that must be allowed to
+      #         complete without an OOM kill.  Note however that if a k8s
+      #         limitRange guardrail is defined external to Rook, the lack of
+      #         a limit here may result in a sync failure, in which case a
+      #         limit should be added.  1200Mi may suffice for up to 15Ti
+      #         OSDs ; for larger devices 2Gi may be required.
+      #         cf. https://github.com/rook/rook/pull/11103
       requests:
       requests:
         cpu: 0
         cpu: 0
         memory: "500Mi"
         memory: "500Mi"
@@ -309,7 +341,7 @@ cephClusterSpec:
     #   osdsPerDevice: "1" # this value can be overridden at the node or device level
     #   osdsPerDevice: "1" # this value can be overridden at the node or device level
     #   encryptedDevice: "true" # the default value for this option is "false"
     #   encryptedDevice: "true" # the default value for this option is "false"
     # # Individual nodes and their config can be specified as well, but 'useAllNodes' above must be set to false. Then, only the named
     # # Individual nodes and their config can be specified as well, but 'useAllNodes' above must be set to false. Then, only the named
-    # # nodes below will be used as storage resources.  Each node's 'name' field should match their 'kubernetes.io/hostname' label.
+    # # nodes below will be used as storage resources. Each node's 'name' field should match their 'kubernetes.io/hostname' label.
     # nodes:
     # nodes:
     #   - name: "172.17.4.201"
     #   - name: "172.17.4.201"
     #     devices: # specific devices to use for storage can be specified for each node
     #     devices: # specific devices to use for storage can be specified for each node
@@ -364,24 +396,33 @@ cephClusterSpec:
         disabled: false
         disabled: false
 
 
 ingress:
 ingress:
-  dashboard: {}
+  # -- Enable an ingress for the ceph-dashboard
+  dashboard:
+    {}
     # annotations:
     # annotations:
-    #   kubernetes.io/ingress.class: nginx
-    #   external-dns.alpha.kubernetes.io/hostname: example.com
+    #   external-dns.alpha.kubernetes.io/hostname: dashboard.example.com
     #   nginx.ingress.kubernetes.io/rewrite-target: /ceph-dashboard/$2
     #   nginx.ingress.kubernetes.io/rewrite-target: /ceph-dashboard/$2
+    #   kubernetes.io/ingress.class: nginx
     # If the dashboard has ssl: true the following will make sure the NGINX Ingress controller can expose the dashboard correctly
     # If the dashboard has ssl: true the following will make sure the NGINX Ingress controller can expose the dashboard correctly
     #   nginx.ingress.kubernetes.io/backend-protocol: "HTTPS"
     #   nginx.ingress.kubernetes.io/backend-protocol: "HTTPS"
     #   nginx.ingress.kubernetes.io/server-snippet: |
     #   nginx.ingress.kubernetes.io/server-snippet: |
     #     proxy_ssl_verify off;
     #     proxy_ssl_verify off;
     # host:
     # host:
-    #   name: example.com
+    #   name: dashboard.example.com
     #   path: "/ceph-dashboard(/|$)(.*)"
     #   path: "/ceph-dashboard(/|$)(.*)"
     # tls:
     # tls:
-    # ingressClassName:
-
+    # - hosts:
+    #     - dashboard.example.com
+    #   secretName: testsecret-tls
+    ## Note: Only one of ingress class annotation or the `ingressClassName:` can be used at a time
+    ## to set the ingress class
+    # ingressClassName: nginx
+
+# -- A list of CephBlockPool configurations to deploy
+# @default -- See [below](#ceph-block-pools)
 cephBlockPools:
 cephBlockPools:
   - name: ceph-blockpool
   - name: ceph-blockpool
-    # see https://github.com/rook/rook/blob/master/Documentation/ceph-pool-crd.md#spec for available configuration
+    # see https://github.com/rook/rook/blob/master/Documentation/CRDs/Block-Storage/ceph-block-pool-crd.md#spec for available configuration
     spec:
     spec:
       failureDomain: host
       failureDomain: host
       replicated:
       replicated:
@@ -393,6 +434,12 @@ cephBlockPools:
       reclaimPolicy: Delete
       reclaimPolicy: Delete
       allowVolumeExpansion: true
       allowVolumeExpansion: true
       mountOptions: []
       mountOptions: []
+      # see https://kubernetes.io/docs/concepts/storage/storage-classes/#allowed-topologies
+      allowedTopologies: []
+#        - matchLabelExpressions:
+#            - key: rook-ceph-role
+#              values:
+#                - storage-node
       # see https://github.com/rook/rook/blob/master/Documentation/ceph-block.md#provision-storage for available configuration
       # see https://github.com/rook/rook/blob/master/Documentation/ceph-block.md#provision-storage for available configuration
       parameters:
       parameters:
         # (optional) mapOptions is a comma-separated list of map options.
         # (optional) mapOptions is a comma-separated list of map options.
@@ -411,23 +458,30 @@ cephBlockPools:
 
 
         # RBD image format. Defaults to "2".
         # RBD image format. Defaults to "2".
         imageFormat: "2"
         imageFormat: "2"
-        # RBD image features. Available for imageFormat: "2". CSI RBD currently supports only `layering` feature.
+
+        # RBD image features, equivalent to OR'd bitfield value: 63
+        # Available for imageFormat: "2". Older releases of CSI RBD
+        # support only the `layering` feature. The Linux kernel (KRBD) supports the
+        # full feature complement as of 5.4
         imageFeatures: layering
         imageFeatures: layering
-        # The secrets contain Ceph admin credentials.
+
+        # These secrets contain Ceph admin credentials.
         csi.storage.k8s.io/provisioner-secret-name: rook-csi-rbd-provisioner
         csi.storage.k8s.io/provisioner-secret-name: rook-csi-rbd-provisioner
-        csi.storage.k8s.io/provisioner-secret-namespace: rook-ceph
+        csi.storage.k8s.io/provisioner-secret-namespace: "{{ .Release.Namespace }}"
         csi.storage.k8s.io/controller-expand-secret-name: rook-csi-rbd-provisioner
         csi.storage.k8s.io/controller-expand-secret-name: rook-csi-rbd-provisioner
-        csi.storage.k8s.io/controller-expand-secret-namespace: rook-ceph
+        csi.storage.k8s.io/controller-expand-secret-namespace: "{{ .Release.Namespace }}"
         csi.storage.k8s.io/node-stage-secret-name: rook-csi-rbd-node
         csi.storage.k8s.io/node-stage-secret-name: rook-csi-rbd-node
-        csi.storage.k8s.io/node-stage-secret-namespace: rook-ceph
+        csi.storage.k8s.io/node-stage-secret-namespace: "{{ .Release.Namespace }}"
         # Specify the filesystem type of the volume. If not specified, csi-provisioner
         # Specify the filesystem type of the volume. If not specified, csi-provisioner
         # will set default as `ext4`. Note that `xfs` is not recommended due to potential deadlock
         # will set default as `ext4`. Note that `xfs` is not recommended due to potential deadlock
         # in hyperconverged settings where the volume is mounted on the same node as the osds.
         # in hyperconverged settings where the volume is mounted on the same node as the osds.
         csi.storage.k8s.io/fstype: ext4
         csi.storage.k8s.io/fstype: ext4
 
 
+# -- A list of CephFileSystem configurations to deploy
+# @default -- See [below](#ceph-file-systems)
 cephFileSystems:
 cephFileSystems:
   - name: ceph-filesystem
   - name: ceph-filesystem
-    # see https://github.com/rook/rook/blob/master/Documentation/ceph-filesystem-crd.md#filesystem-settings for available configuration
+    # see https://github.com/rook/rook/blob/master/Documentation/CRDs/Shared-Filesystem/ceph-filesystem-crd.md#filesystem-settings for available configuration
     spec:
     spec:
       metadataPool:
       metadataPool:
         replicated:
         replicated:
@@ -436,7 +490,7 @@ cephFileSystems:
         - failureDomain: host
         - failureDomain: host
           replicated:
           replicated:
             size: 3
             size: 3
-          # Optional and highly recommended, 'data0' by default, see https://github.com/rook/rook/blob/master/Documentation/ceph-filesystem-crd.md#pools
+          # Optional and highly recommended, 'data0' by default, see https://github.com/rook/rook/blob/master/Documentation/CRDs/Shared-Filesystem/ceph-filesystem-crd.md#pools
           name: data0
           name: data0
       metadataServer:
       metadataServer:
         activeCount: 1
         activeCount: 1
@@ -477,16 +531,18 @@ cephFileSystems:
       parameters:
       parameters:
         # The secrets contain Ceph admin credentials.
         # The secrets contain Ceph admin credentials.
         csi.storage.k8s.io/provisioner-secret-name: rook-csi-cephfs-provisioner
         csi.storage.k8s.io/provisioner-secret-name: rook-csi-cephfs-provisioner
-        csi.storage.k8s.io/provisioner-secret-namespace: rook-ceph
+        csi.storage.k8s.io/provisioner-secret-namespace: "{{ .Release.Namespace }}"
         csi.storage.k8s.io/controller-expand-secret-name: rook-csi-cephfs-provisioner
         csi.storage.k8s.io/controller-expand-secret-name: rook-csi-cephfs-provisioner
-        csi.storage.k8s.io/controller-expand-secret-namespace: rook-ceph
+        csi.storage.k8s.io/controller-expand-secret-namespace: "{{ .Release.Namespace }}"
         csi.storage.k8s.io/node-stage-secret-name: rook-csi-cephfs-node
         csi.storage.k8s.io/node-stage-secret-name: rook-csi-cephfs-node
-        csi.storage.k8s.io/node-stage-secret-namespace: rook-ceph
+        csi.storage.k8s.io/node-stage-secret-namespace: "{{ .Release.Namespace }}"
         # Specify the filesystem type of the volume. If not specified, csi-provisioner
         # Specify the filesystem type of the volume. If not specified, csi-provisioner
         # will set default as `ext4`. Note that `xfs` is not recommended due to potential deadlock
         # will set default as `ext4`. Note that `xfs` is not recommended due to potential deadlock
         # in hyperconverged settings where the volume is mounted on the same node as the osds.
         # in hyperconverged settings where the volume is mounted on the same node as the osds.
         csi.storage.k8s.io/fstype: ext4
         csi.storage.k8s.io/fstype: ext4
 
 
+# -- Settings for the filesystem snapshot class
+# @default -- See [CephFS Snapshots](../Storage-Configuration/Ceph-CSI/ceph-csi-snapshot.md#cephfs-snapshots)
 cephFileSystemVolumeSnapshotClass:
 cephFileSystemVolumeSnapshotClass:
   enabled: false
   enabled: false
   name: ceph-filesystem
   name: ceph-filesystem
@@ -494,9 +550,11 @@ cephFileSystemVolumeSnapshotClass:
   deletionPolicy: Delete
   deletionPolicy: Delete
   annotations: {}
   annotations: {}
   labels: {}
   labels: {}
-  # see https://rook.io/docs/rook/latest/ceph-csi-snapshot.html#cephfs-snapshots for available configuration
+  # see https://rook.io/docs/rook/v1.10/Storage-Configuration/Ceph-CSI/ceph-csi-snapshot/#cephfs-snapshots for available configuration
   parameters: {}
   parameters: {}
 
 
+# -- Settings for the block pool snapshot class
+# @default -- See [RBD Snapshots](../Storage-Configuration/Ceph-CSI/ceph-csi-snapshot.md#rbd-snapshots)
 cephBlockPoolsVolumeSnapshotClass:
 cephBlockPoolsVolumeSnapshotClass:
   enabled: false
   enabled: false
   name: ceph-block
   name: ceph-block
@@ -504,12 +562,14 @@ cephBlockPoolsVolumeSnapshotClass:
   deletionPolicy: Delete
   deletionPolicy: Delete
   annotations: {}
   annotations: {}
   labels: {}
   labels: {}
-  # see https://rook.io/docs/rook/latest/ceph-csi-snapshot.html#rbd-snapshots for available configuration
+  # see https://rook.io/docs/rook/v1.10/Storage-Configuration/Ceph-CSI/ceph-csi-snapshot/#rbd-snapshots for available configuration
   parameters: {}
   parameters: {}
 
 
+# -- A list of CephObjectStore configurations to deploy
+# @default -- See [below](#ceph-object-stores)
 cephObjectStores:
 cephObjectStores:
   - name: ceph-objectstore
   - name: ceph-objectstore
-    # see https://github.com/rook/rook/blob/master/Documentation/ceph-object-store-crd.md#object-store-settings for available configuration
+    # see https://github.com/rook/rook/blob/master/Documentation/CRDs/Object-Storage/ceph-object-store-crd.md#object-store-settings for available configuration
     spec:
     spec:
       metadataPool:
       metadataPool:
         failureDomain: host
         failureDomain: host

+ 309 - 156
rook/rook-ceph-operator-values.yaml

@@ -1,127 +1,199 @@
-# From https://github.com/rook/rook/blob/release-1.9/deploy/charts/rook-ceph/values.yaml
+# From https://github.com/rook/rook/blob/v1.10.13/deploy/charts/rook-ceph/values.yaml
+# export ROOK_OPERATOR_NAMESPACE=rook-ceph
+# export ROOK_CLUSTER_NAMESPACE=rook-ceph
+# KUBECONFIG=/etc/rancher/k3s/k3s.yaml helm upgrade --install --namespace rook-ceph rook-ceph rook-release/rook-ceph -f rook/rook-ceph-operator-values.yaml --version 1.10.13
 # Default values for rook-ceph-operator
 # Default values for rook-ceph-operator
 # This is a YAML-formatted file.
 # This is a YAML-formatted file.
 # Declare variables to be passed into your templates.
 # Declare variables to be passed into your templates.
 
 
 image:
 image:
+  # -- Image
   repository: rook/ceph
   repository: rook/ceph
-  tag: v1.9.2
+  # -- Image tag
+  # @default -- `master`
+  tag: v1.10.13
+  # -- Image pull policy
   pullPolicy: IfNotPresent
   pullPolicy: IfNotPresent
 
 
 crds:
 crds:
-  # Whether the helm chart should create and update the CRDs. If false, the CRDs must be
+  # -- Whether the helm chart should create and update the CRDs. If false, the CRDs must be
   # managed independently with deploy/examples/crds.yaml.
   # managed independently with deploy/examples/crds.yaml.
   # **WARNING** Only set during first deployment. If later disabled the cluster may be DESTROYED.
   # **WARNING** Only set during first deployment. If later disabled the cluster may be DESTROYED.
-  # If the CRDs are deleted in this case, see the disaster recovery guide to restore them.
-  # https://rook.github.io/docs/rook/latest/ceph-disaster-recovery.html#restoring-crds-after-deletion
+  # If the CRDs are deleted in this case, see
+  # [the disaster recovery guide](https://rook.io/docs/rook/latest/Troubleshooting/disaster-recovery/#restoring-crds-after-deletion)
+  # to restore them.
   enabled: true
   enabled: true
 
 
+# -- Pod resource requests & limits
 resources:
 resources:
   limits:
   limits:
     cpu: 500m
     cpu: 500m
-    memory: 256Mi
+    memory: 512Mi
   requests:
   requests:
     cpu: 100m
     cpu: 100m
     memory: 128Mi
     memory: 128Mi
 
 
-# Constraint rook-ceph-operator Deployment to nodes with label
-# For more info, see https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector
+# -- Kubernetes [`nodeSelector`](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector) to add to the Deployment.
 nodeSelector:
 nodeSelector:
   storage-node: "true"
   storage-node: "true"
+# Constraint rook-ceph-operator Deployment to nodes with label `disktype: ssd`.
+# For more info, see https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector
+#  disktype: ssd
 
 
-# Tolerations for the rook-ceph-operator to allow it to run on nodes with particular taints
+# -- List of Kubernetes [`tolerations`](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/) to add to the Deployment.
 tolerations: []
 tolerations: []
 
 
-# Delay to use in node.kubernetes.io/unreachable toleration
+# -- Delay to use for the `node.kubernetes.io/unreachable` pod failure toleration to override
+# the Kubernetes default of 5 minutes
 unreachableNodeTolerationSeconds: 5
 unreachableNodeTolerationSeconds: 5
 
 
-# Whether rook watches its current namespace for CRDs or the entire cluster, defaults to false
+# -- Whether the operator should watch cluster CRD in its own namespace or not
 currentNamespaceOnly: false
 currentNamespaceOnly: false
 
 
-## Annotations to be added to pod
+# -- Pod annotations
 annotations: {}
 annotations: {}
 
 
-## The logging level for the operator: ERROR | WARNING | INFO | DEBUG
+# -- Global log level for the operator.
+# Options: `ERROR`, `WARNING`, `INFO`, `DEBUG`
 logLevel: INFO
 logLevel: INFO
 
 
-## If true, create & use RBAC resources
-##
+# -- If true, create & use RBAC resources
 rbacEnable: true
 rbacEnable: true
 
 
-## If true, create & use PSP resources
-##
+# -- If true, create & use PSP resources
 pspEnable: true
 pspEnable: true
 
 
-# Set the priority class for the rook operator deployment if desired
-# priorityClassName: class
+# -- Set the priority class for the rook operator deployment if desired
+priorityClassName:
 
 
-## Settings for whether to disable the drivers or other daemons if they are not
-## needed
+# -- If true, loop devices are allowed to be used for osds in test clusters
+allowLoopDevices: false
+
+# Settings for whether to disable the drivers or other daemons if they are not
+# needed
 csi:
 csi:
+  # -- Enable Ceph CSI RBD driver
   enableRbdDriver: true
   enableRbdDriver: true
+  # -- Enable Ceph CSI CephFS driver
   enableCephfsDriver: true
   enableCephfsDriver: true
+  # -- Enable Ceph CSI GRPC Metrics
   enableGrpcMetrics: false
   enableGrpcMetrics: false
-  # Set to true to enable host networking for CSI CephFS and RBD nodeplugins. This may be necessary
+  # -- Enable host networking for CSI CephFS and RBD nodeplugins. This may be necessary
   # in some network configurations where the SDN does not provide access to an external cluster or
   # in some network configurations where the SDN does not provide access to an external cluster or
-  # there is significant drop in read/write performance.
+  # there is significant drop in read/write performance
   enableCSIHostNetwork: true
   enableCSIHostNetwork: true
-  # set to false to disable deployment of snapshotter container in CephFS provisioner pod.
+  # -- Enable Snapshotter in CephFS provisioner pod
   enableCephfsSnapshotter: true
   enableCephfsSnapshotter: true
-  # set to false to disable deployment of snapshotter container in RBD provisioner pod.
+  # -- Enable Snapshotter in NFS provisioner pod
+  enableNFSSnapshotter: true
+  # -- Enable Snapshotter in RBD provisioner pod
   enableRBDSnapshotter: true
   enableRBDSnapshotter: true
-  # set to false if the selinux is not enabled or unavailable in cluster nodes.
+  # -- Enable Host mount for `/etc/selinux` directory for Ceph CSI nodeplugins
   enablePluginSelinuxHostMount: false
   enablePluginSelinuxHostMount: false
-  # set to true to enable Ceph CSI pvc encryption support.
+  # -- Enable Ceph CSI PVC encryption support
   enableCSIEncryption: false
   enableCSIEncryption: false
 
 
-  # (Optional) set user created priorityclassName for csi plugin pods.
+  # -- PriorityClassName to be set on csi driver plugin pods
   pluginPriorityClassName: system-node-critical
   pluginPriorityClassName: system-node-critical
 
 
-  # (Optional) set user created priorityclassName for csi provisioner pods.
+  # -- PriorityClassName to be set on csi driver provisioner pods
   provisionerPriorityClassName: system-cluster-critical
   provisionerPriorityClassName: system-cluster-critical
 
 
-  # (Optional) policy for modifying a volume's ownership or permissions when the RBD PVC is being mounted.
+  # -- Policy for modifying a volume's ownership or permissions when the RBD PVC is being mounted.
   # supported values are documented at https://kubernetes-csi.github.io/docs/support-fsgroup.html
   # supported values are documented at https://kubernetes-csi.github.io/docs/support-fsgroup.html
-  rbdFSGroupPolicy: "ReadWriteOnceWithFSType"
+  rbdFSGroupPolicy: "File"
 
 
-  # (Optional) policy for modifying a volume's ownership or permissions when the CephFS PVC is being mounted.
+  # -- Policy for modifying a volume's ownership or permissions when the CephFS PVC is being mounted.
   # supported values are documented at https://kubernetes-csi.github.io/docs/support-fsgroup.html
   # supported values are documented at https://kubernetes-csi.github.io/docs/support-fsgroup.html
-  cephFSFSGroupPolicy: "ReadWriteOnceWithFSType"
+  cephFSFSGroupPolicy: "File"
 
 
-  # (Optional) policy for modifying a volume's ownership or permissions when the NFS PVC is being mounted.
+  # -- Policy for modifying a volume's ownership or permissions when the NFS PVC is being mounted.
   # supported values are documented at https://kubernetes-csi.github.io/docs/support-fsgroup.html
   # supported values are documented at https://kubernetes-csi.github.io/docs/support-fsgroup.html
-  nfsFSGroupPolicy: "ReadWriteOnceWithFSType"
+  nfsFSGroupPolicy: "File"
 
 
-  # OMAP generator generates the omap mapping between the PV name and the RBD image
+  # -- OMAP generator generates the omap mapping between the PV name and the RBD image
   # which helps CSI to identify the rbd images for CSI operations.
   # which helps CSI to identify the rbd images for CSI operations.
-  # CSI_ENABLE_OMAP_GENERATOR need to be enabled when we are using rbd mirroring feature.
-  # By default OMAP generator is disabled and when enabled it will be deployed as a
+  # `CSI_ENABLE_OMAP_GENERATOR` needs to be enabled when we are using rbd mirroring feature.
+  # By default OMAP generator is disabled and when enabled, it will be deployed as a
   # sidecar with CSI provisioner pod, to enable set it to true.
   # sidecar with CSI provisioner pod, to enable set it to true.
   enableOMAPGenerator: false
   enableOMAPGenerator: false
 
 
-  # Set replicas for csi provisioner deployment.
+  # -- Enable adding volume metadata on the CephFS subvolumes and RBD images.
+  # Not all users might be interested in getting volume/snapshot details as metadata on CephFS subvolume and RBD images.
+  # Hence enable metadata is false by default
+  enableMetadata: false
+
+  # -- Set replicas for csi provisioner deployment
   provisionerReplicas: 2
   provisionerReplicas: 2
 
 
-  # Set logging level for csi containers.
+  # -- Cluster name identifier to set as metadata on the CephFS subvolume and RBD images. This will be useful
+  # in cases like for example, when two container orchestrator clusters (Kubernetes/OCP) are using a single ceph cluster
+  clusterName:
+
+  # -- Set logging level for cephCSI containers maintained by the cephCSI.
   # Supported values from 0 to 5. 0 for general useful logs, 5 for trace level verbosity.
   # Supported values from 0 to 5. 0 for general useful logs, 5 for trace level verbosity.
-  logLevel: 5
-  # CSI RBD plugin daemonset update strategy, supported values are OnDelete and RollingUpdate.
-  # Default value is RollingUpdate.
-  #rbdPluginUpdateStrategy: OnDelete
-  # CSI CephFS plugin daemonset update strategy, supported values are OnDelete and RollingUpdate.
-  # Default value is RollingUpdate.
-  #cephFSPluginUpdateStrategy: OnDelete
-  # CSI NFS plugin daemonset update strategy, supported values are OnDelete and RollingUpdate.
-  # Default value is RollingUpdate.
-  #nfsPluginUpdateStrategy: OnDelete
-  # The CSI GRPC timeout value (in seconds). It should be >= 120. If this variable is not set or is an invalid value, it's default to 150.
+  logLevel: 0
+
+  # -- Set logging level for Kubernetes-csi sidecar containers.
+  # Supported values from 0 to 5. 0 for general useful logs (the default), 5 for trace level verbosity.
+  # @default -- `0`
+  sidecarLogLevel:
+
+  # -- CSI RBD plugin daemonset update strategy, supported values are OnDelete and RollingUpdate
+  # @default -- `RollingUpdate`
+  rbdPluginUpdateStrategy:
+
+  # -- A maxUnavailable parameter of CSI RBD plugin daemonset update strategy.
+  # @default -- `1`
+  rbdPluginUpdateStrategyMaxUnavailable:
+
+  # -- CSI CephFS plugin daemonset update strategy, supported values are OnDelete and RollingUpdate
+  # @default -- `RollingUpdate`
+  cephFSPluginUpdateStrategy:
+
+  # -- CSI NFS plugin daemonset update strategy, supported values are OnDelete and RollingUpdate
+  # @default -- `RollingUpdate`
+  nfsPluginUpdateStrategy:
+
+  # -- Set GRPC timeout for csi containers (in seconds). It should be >= 120. If this value is not set or is invalid, it defaults to 150
   grpcTimeoutInSeconds: 150
   grpcTimeoutInSeconds: 150
 
 
-  # Allow starting unsupported ceph-csi image
+  # -- Allow starting an unsupported ceph-csi image
   allowUnsupportedVersion: false
   allowUnsupportedVersion: false
-  # CEPH CSI RBD provisioner resource requirement list, Put here list of resource
-  # requests and limits you want to apply for provisioner pod
-  # csi-omap-generator resources will be applied only if enableOMAPGenerator is set to true
+
+  # -- The volume of the CephCSI RBD plugin DaemonSet
+  csiRBDPluginVolume:
+  #  - name: lib-modules
+  #    hostPath:
+  #      path: /run/booted-system/kernel-modules/lib/modules/
+  #  - name: host-nix
+  #    hostPath:
+  #      path: /nix
+
+  # -- The volume mounts of the CephCSI RBD plugin DaemonSet
+  csiRBDPluginVolumeMount:
+  #  - name: host-nix
+  #    mountPath: /nix
+  #    readOnly: true
+
+  # -- The volume of the CephCSI CephFS plugin DaemonSet
+  csiCephFSPluginVolume:
+  #  - name: lib-modules
+  #    hostPath:
+  #      path: /run/booted-system/kernel-modules/lib/modules/
+  #  - name: host-nix
+  #    hostPath:
+  #      path: /nix
+
+  # -- The volume mounts of the CephCSI CephFS plugin DaemonSet
+  csiCephFSPluginVolumeMount:
+  #  - name: host-nix
+  #    mountPath: /nix
+  #    readOnly: true
+
+  # -- CEPH CSI RBD provisioner resource requirement list
+  # csi-omap-generator resources will be applied only if `enableOMAPGenerator` is set to `true`
+  # @default -- see values.yaml
   csiRBDProvisionerResource: |
   csiRBDProvisionerResource: |
     - name : csi-provisioner
     - name : csi-provisioner
       resource:
       resource:
@@ -179,8 +251,9 @@ csi:
         limits:
         limits:
           memory: 256Mi
           memory: 256Mi
           cpu: 100m
           cpu: 100m
-  # CEPH CSI RBD plugin resource requirement list, Put here list of resource
-  # requests and limits you want to apply for plugin pod
+
+  # -- CEPH CSI RBD plugin resource requirement list
+  # @default -- see values.yaml
   csiRBDPluginResource: |
   csiRBDPluginResource: |
     - name : driver-registrar
     - name : driver-registrar
       resource:
       resource:
@@ -206,8 +279,9 @@ csi:
         limits:
         limits:
           memory: 256Mi
           memory: 256Mi
           cpu: 100m
           cpu: 100m
-  # CEPH CSI CephFS provisioner resource requirement list, Put here list of resource
-  # requests and limits you want to apply for provisioner pod
+
+  # -- CEPH CSI CephFS provisioner resource requirement list
+  # @default -- see values.yaml
   csiCephFSProvisionerResource: |
   csiCephFSProvisionerResource: |
     - name : csi-provisioner
     - name : csi-provisioner
       resource:
       resource:
@@ -257,8 +331,9 @@ csi:
         limits:
         limits:
           memory: 256Mi
           memory: 256Mi
           cpu: 100m
           cpu: 100m
-  # CEPH CSI CephFS plugin resource requirement list, Put here list of resource
-  # requests and limits you want to apply for plugin pod
+
+  # -- CEPH CSI CephFS plugin resource requirement list
+  # @default -- see values.yaml
   csiCephFSPluginResource: |
   csiCephFSPluginResource: |
     - name : driver-registrar
     - name : driver-registrar
       resource:
       resource:
@@ -284,8 +359,9 @@ csi:
         limits:
         limits:
           memory: 256Mi
           memory: 256Mi
           cpu: 100m
           cpu: 100m
-  # CEPH CSI NFS provisioner resource requirement list, Put here list of resource
-  # requests and limits you want to apply for provisioner pod
+
+  # -- CEPH CSI NFS provisioner resource requirement list
+  # @default -- see values.yaml
   csiNFSProvisionerResource: |
   csiNFSProvisionerResource: |
     - name : csi-provisioner
     - name : csi-provisioner
       resource:
       resource:
@@ -303,88 +379,153 @@ csi:
         limits:
         limits:
           memory: 1Gi
           memory: 1Gi
           cpu: 500m
           cpu: 500m
-  # CEPH CSI NFS plugin resource requirement list, Put here list of resource
-  # requests and limits you want to apply for plugin pod
+
+  # -- CEPH CSI NFS plugin resource requirement list
+  # @default -- see values.yaml
   csiNFSPluginResource: |
   csiNFSPluginResource: |
-   - name : driver-registrar
-     resource:
-       requests:
-         memory: 128Mi
-         cpu: 50m
-       limits:
-         memory: 256Mi
-         cpu: 100m
-   - name : csi-nfsplugin
-     resource:
-       requests:
-         memory: 512Mi
-         cpu: 250m
-       limits:
-         memory: 1Gi
-         cpu: 500m
-
-  # Set provisonerTolerations and provisionerNodeAffinity for provisioner pod.
+    - name : driver-registrar
+      resource:
+        requests:
+          memory: 128Mi
+          cpu: 50m
+        limits:
+          memory: 256Mi
+          cpu: 100m
+    - name : csi-nfsplugin
+      resource:
+        requests:
+          memory: 512Mi
+          cpu: 250m
+        limits:
+          memory: 1Gi
+          cpu: 500m
+
+  # Set provisionerTolerations and provisionerNodeAffinity for provisioner pod.
   # The CSI provisioner would be best to start on the same nodes as other ceph daemons.
   # The CSI provisioner would be best to start on the same nodes as other ceph daemons.
-  # provisionerTolerations:
+
+  # -- Array of tolerations in YAML format which will be added to CSI provisioner deployment
+  provisionerTolerations:
   #    - key: key
   #    - key: key
   #      operator: Exists
   #      operator: Exists
   #      effect: NoSchedule
   #      effect: NoSchedule
-  # provisionerNodeAffinity: key1=value1,value2; key2=value3
-  provisionerNodeAffinity: "storage-node=true"
+
+  # -- The node labels for affinity of the CSI provisioner deployment [^1]
+  provisionerNodeAffinity: "storage-node=true" #key1=value1,value2; key2=value3
   # Set pluginTolerations and pluginNodeAffinity for plugin daemonset pods.
   # Set pluginTolerations and pluginNodeAffinity for plugin daemonset pods.
   # The CSI plugins need to be started on all the nodes where the clients need to mount the storage.
   # The CSI plugins need to be started on all the nodes where the clients need to mount the storage.
+
+  # -- Array of tolerations in YAML format which will be added to CephCSI plugin DaemonSet
   pluginTolerations:
   pluginTolerations:
-     - key: storage-node
-       operator: Exists
-       effect: NoSchedule
-  # pluginNodeAffinity: key1=value1,value2; key2=value3
-  #pluginNodeAffinity: "storage-node=true"
-  #cephfsGrpcMetricsPort: 9091
-  #cephfsLivenessMetricsPort: 9081
-  #rbdGrpcMetricsPort: 9090
-  #csiAddonsPort: 9070
-  # Enable Ceph Kernel clients on kernel < 4.17. If your kernel does not support quotas for CephFS
+      - key: storage-node
+        operator: Exists
+        effect: NoSchedule
+
+  # -- The node labels for affinity of the CephCSI RBD plugin DaemonSet [^1]
+  pluginNodeAffinity: # key1=value1,value2; key2=value3
+
+  # -- Enable Ceph CSI Liveness sidecar deployment
+  enableLiveness: false
+
+  # -- CSI CephFS driver GRPC metrics port
+  # @default -- `9091`
+  cephfsGrpcMetricsPort:
+
+  # -- CSI CephFS driver metrics port
+  # @default -- `9081`
+  cephfsLivenessMetricsPort:
+
+  # -- Ceph CSI RBD driver GRPC metrics port
+  # @default -- `9090`
+  rbdGrpcMetricsPort:
+
+  # -- CSI Addons server port
+  # @default -- `9070`
+  csiAddonsPort:
+
+  # -- Enable Ceph Kernel clients on kernel < 4.17. If your kernel does not support quotas for CephFS
   # you may want to disable this setting. However, this will cause an issue during upgrades
   # you may want to disable this setting. However, this will cause an issue during upgrades
-  # with the FUSE client. See the upgrade guide: https://rook.io/docs/rook/v1.2/ceph-upgrade.html
+  # with the FUSE client. See the [upgrade guide](https://rook.io/docs/rook/v1.2/ceph-upgrade.html)
   forceCephFSKernelClient: true
   forceCephFSKernelClient: true
-  #rbdLivenessMetricsPort: 9080
-  #kubeletDirPath: /var/lib/kubelet
-  #cephcsi:
-    #image: quay.io/cephcsi/cephcsi:v3.6.1
-  #registrar:
-    #image: k8s.gcr.io/sig-storage/csi-node-driver-registrar:v2.5.0
-  #provisioner:
-    #image: k8s.gcr.io/sig-storage/csi-provisioner:v3.1.0
-  #snapshotter:
-    #image: k8s.gcr.io/sig-storage/csi-snapshotter:v5.0.1
-  #attacher:
-    #image: k8s.gcr.io/sig-storage/csi-attacher:v3.4.0
-  #resizer:
-    #image: k8s.gcr.io/sig-storage/csi-resizer:v1.4.0
-  # Labels to add to the CSI CephFS Deployments and DaemonSets Pods.
-  #cephfsPodLabels: "key1=value1,key2=value2"
-  # Labels to add to the CSI NFS Deployments and DaemonSets Pods.
-  #nfsPodLabels: "key1=value1,key2=value2"
-  # Labels to add to the CSI RBD Deployments and DaemonSets Pods.
-  #rbdPodLabels: "key1=value1,key2=value2"
-  # Enable the volume replication controller.
-  # Before enabling, ensure the Volume Replication CRDs are created.
-  # See https://rook.io/docs/rook/latest/ceph-csi-drivers.html#rbd-mirroring
-  volumeReplication:
-    enabled: false
-    #image: "quay.io/csiaddons/volumereplication-operator:v0.3.0"
-  # Enable the CSIAddons sidecar.
+
+  # -- Ceph CSI RBD driver metrics port
+  # @default -- `8080`
+  rbdLivenessMetricsPort:
+
+  # -- Kubelet root directory path (if the Kubelet uses a different path for the `--root-dir` flag)
+  # @default -- `/var/lib/kubelet`
+  kubeletDirPath:
+
+  cephcsi:
+    # -- Ceph CSI image
+    # @default -- `quay.io/cephcsi/cephcsi:v3.7.2`
+    image:
+
+  registrar:
+    # -- Kubernetes CSI registrar image
+    # @default -- `registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.7.0`
+    image:
+
+  provisioner:
+    # -- Kubernetes CSI provisioner image
+    # @default -- `registry.k8s.io/sig-storage/csi-provisioner:v3.4.0`
+    image:
+
+  snapshotter:
+    # -- Kubernetes CSI snapshotter image
+    # @default -- `registry.k8s.io/sig-storage/csi-snapshotter:v6.2.1`
+    image:
+
+  attacher:
+    # -- Kubernetes CSI Attacher image
+    # @default -- `registry.k8s.io/sig-storage/csi-attacher:v4.1.0`
+    image:
+
+  resizer:
+    # -- Kubernetes CSI resizer image
+    # @default -- `registry.k8s.io/sig-storage/csi-resizer:v1.7.0`
+    image:
+
+  # -- Image pull policy
+  imagePullPolicy: IfNotPresent
+
+  # -- Labels to add to the CSI CephFS Deployments and DaemonSets Pods
+  cephfsPodLabels: #"key1=value1,key2=value2"
+
+  # -- Labels to add to the CSI NFS Deployments and DaemonSets Pods
+  nfsPodLabels: #"key1=value1,key2=value2"
+
+  # -- Labels to add to the CSI RBD Deployments and DaemonSets Pods
+  rbdPodLabels: #"key1=value1,key2=value2"
+
   csiAddons:
   csiAddons:
+    # -- Enable CSIAddons
     enabled: false
     enabled: false
-    #image: "quay.io/csiaddons/k8s-sidecar:v0.2.1"
-  # Enable the nfs csi driver.
+    # -- CSIAddons Sidecar image
+    image: "quay.io/csiaddons/k8s-sidecar:v0.5.0"
+
   nfs:
   nfs:
+    # -- Enable the nfs csi driver
+    enabled: false
+
+  topology:
+    # -- Enable topology based provisioning
     enabled: false
     enabled: false
-    #image: "k8s.gcr.io/sig-storage/nfsplugin:v3.1.0"
+    # NOTE: the value here serves as an example and needs to be
+    # updated with node labels that define domains of interest
+    # -- domainLabels define which node labels to use as domains
+    # for CSI nodeplugins to advertise their domains
+    domainLabels:
+    # - kubernetes.io/hostname
+    # - topology.kubernetes.io/zone
+    # - topology.rook.io/rack
+
+# -- Enable discovery daemon
 enableDiscoveryDaemon: false
 enableDiscoveryDaemon: false
+
+# -- The timeout for ceph commands in seconds
 cephCommandsTimeoutSeconds: "15"
 cephCommandsTimeoutSeconds: "15"
 
 
-## if true, run rook operator on the host network
+# -- if true, run rook operator on the host network
 useOperatorHostNetwork: false
 useOperatorHostNetwork: false
 
 
 ## Rook Discover configuration
 ## Rook Discover configuration
@@ -392,43 +533,53 @@ useOperatorHostNetwork: false
 ## tolerationKey: Set this to the specific key of the taint to tolerate
 ## tolerationKey: Set this to the specific key of the taint to tolerate
 ## tolerations: Array of tolerations in YAML format which will be added to agent deployment
 ## tolerations: Array of tolerations in YAML format which will be added to agent deployment
 ## nodeAffinity: Set to labels of the node to match
 ## nodeAffinity: Set to labels of the node to match
-# discover:
-#   toleration: NoSchedule
-#   tolerationKey: key
-#   tolerations:
-#   - key: key
-#     operator: Exists
-#     effect: NoSchedule
-#   nodeAffinity: key1=value1,value2; key2=value3
-#   podLabels: "key1=value1,key2=value2"
-
-# In some situations SELinux relabelling breaks (times out) on large filesystems, and doesn't work with cephfs ReadWriteMany volumes (last relabel wins).
-# Disable it here if you have similar issues.
-# For more details see https://github.com/rook/rook/issues/2417
-enableSelinuxRelabeling: true
-
-disableAdmissionController: false
-
-# Writing to the hostPath is required for the Ceph mon and osd pods. Given the restricted permissions in OpenShift with SELinux,
-# the pod must be running privileged in order to write to the hostPath volume, this must be set to true then.
+
+discover:
+  # -- Toleration for the discover pods.
+  # Options: `NoSchedule`, `PreferNoSchedule` or `NoExecute`
+  toleration:
+  # -- The specific key of the taint to tolerate
+  tolerationKey:
+  # -- Array of tolerations in YAML format which will be added to discover deployment
+  tolerations:
+  #   - key: key
+  #     operator: Exists
+  #     effect: NoSchedule
+  # -- The node labels for affinity of `discover-agent` [^1]
+  nodeAffinity: # key1=value1,value2; key2=value3
+  # -- Labels to add to the discover pods
+  podLabels: # "key1=value1,key2=value2"
+  # -- Add resources to discover daemon pods
+  resources:
+  #   - limits:
+  #       cpu: 500m
+  #       memory: 512Mi
+  #   - requests:
+  #       cpu: 100m
+  #       memory: 128Mi
+
+# -- Whether to disable the admission controller
+disableAdmissionController: true
+
+# -- Runs Ceph Pods as privileged to be able to write to `hostPaths` in OpenShift with SELinux restrictions.
 hostpathRequiresPrivileged: false
 hostpathRequiresPrivileged: false
 
 
-# Disable automatic orchestration when new devices are discovered.
+# -- Disable automatic orchestration when new devices are discovered.
 disableDeviceHotplug: false
 disableDeviceHotplug: false
 
 
-# Blacklist certain disks according to the regex provided.
+# -- Blacklist certain disks according to the regex provided.
 discoverDaemonUdev:
 discoverDaemonUdev:
 
 
-# imagePullSecrets option allow to pull docker images from private docker registry. Option will be passed to all service accounts.
-# imagePullSecrets:
+# -- imagePullSecrets option allow to pull docker images from private docker registry. Option will be passed to all service accounts.
+imagePullSecrets:
 # - name: my-registry-secret
 # - name: my-registry-secret
 
 
-# Whether the OBC provisioner should watch on the operator namespace or not, if not the namespace of the cluster will be used
+# -- Whether the OBC provisioner should watch on the operator namespace or not, if not the namespace of the cluster will be used
 enableOBCWatchOperatorNamespace: true
 enableOBCWatchOperatorNamespace: true
 
 
+# -- Set tolerations and nodeAffinity [^1] for admission controller pod.
+# The admission controller would be best to start on the same nodes as other ceph daemons.
 admissionController:
 admissionController:
-  # Set tolerations and nodeAffinity for admission controller pod.
-  # The admission controller would be best to start on the same nodes as other ceph daemons.
   # tolerations:
   # tolerations:
   #    - key: key
   #    - key: key
   #      operator: Exists
   #      operator: Exists
@@ -436,7 +587,9 @@ admissionController:
   # nodeAffinity: key1=value1,value2; key2=value3
   # nodeAffinity: key1=value1,value2; key2=value3
   nodeAffinity: "storage-node=true"
   nodeAffinity: "storage-node=true"
 
 
+# [^1]: `nodeAffinity` and `*NodeAffinity` options should have the format `"role=storage,rook; storage=ceph"` or `storage=;role=rook-example` or `storage=;` (_checks only for presence of key_)
+
 monitoring:
 monitoring:
-  # requires Prometheus to be pre-installed
-  # enabling will also create RBAC rules to allow Operator to create ServiceMonitors
+  # -- Enable monitoring. Requires Prometheus to be pre-installed.
+  # Enabling will also create RBAC rules to allow Operator to create ServiceMonitors
   enabled: true
   enabled: true

+ 1 - 1
shelly-plug-exporter.yaml

@@ -22,7 +22,7 @@ spec:
     spec:
     spec:
       containers:
       containers:
       - name: shelly-plug-exporter
       - name: shelly-plug-exporter
-        image: jibby0/shelly-plug-exporter:24.2.0-fork
+        image: jibby0/shelly-plug-exporter:24.2.0-fork2
         ports:
         ports:
         - containerPort: 8080
         - containerPort: 8080
           name: metrics
           name: metrics