Josh Bicking 3 місяців тому
батько
коміт
937c115fe7
2 змінених файлів з 37 додано та 24 видалено
  1. 15 14
      rook/rook-ceph-cluster-values.yaml
  2. 22 10
      rook/rook-ceph-operator-values.yaml

+ 15 - 14
rook/rook-ceph-cluster-values.yaml

@@ -1,5 +1,5 @@
-# From https://raw.githubusercontent.com/rook/rook/v1.11.11/deploy/charts/rook-ceph-cluster/values.yaml
-# KUBECONFIG=/etc/rancher/k3s/k3s.yaml helm upgrade --namespace rook-ceph rook-ceph-cluster --set operatorNamespace=rook-ceph rook-release/rook-ceph-cluster -f rook/rook-ceph-cluster-values.yaml --version 1.10.13
+# From https://raw.githubusercontent.com/rook/rook/v1.12.11/deploy/charts/rook-ceph-cluster/values.yaml
+# KUBECONFIG=/etc/rancher/k3s/k3s.yaml helm upgrade --namespace rook-ceph rook-ceph-cluster --set operatorNamespace=rook-ceph rook-release/rook-ceph-cluster -f rook/rook-ceph-cluster-values.yaml --version 1.12.11
 # Default values for a single rook-ceph cluster
 # This is a YAML-formatted file.
 # Declare variables to be passed into your templates.
@@ -27,8 +27,7 @@ toolbox:
   # -- Enable Ceph debugging pod deployment. See [toolbox](../Troubleshooting/ceph-toolbox.md)
   enabled: true
   # -- Toolbox image, defaults to the image used by the Ceph cluster
-  # image: quay.io/ceph/ceph:v17.2.6
-  image: rook/ceph:v1.9.0.230.g6a87cb44a # TODO probably safe to remove?
+  image: #quay.io/ceph/ceph:v17.2.6
   # -- Toolbox tolerations
   tolerations: []
   # -- Toolbox affinity
@@ -57,6 +56,8 @@ monitoring:
   # Monitoring settings for external clusters:
   # externalMgrEndpoints: <list of endpoints>
   # externalMgrPrometheusPort: <port>
+  # Scrape interval for prometheus
+  # interval: 5s
   # allow adding custom labels and annotations to the prometheus rule
   prometheusRule:
     # -- Labels applied to PrometheusRule
@@ -91,8 +92,8 @@ cephClusterSpec:
     # This tag might not contain a new Ceph version, just security fixes from the underlying operating system, which will reduce vulnerabilities
     # image: quay.io/ceph/ceph:v17.2.6
     image: quay.io/ceph/ceph:v16.2.7
-    # Whether to allow unsupported versions of Ceph. Currently `pacific` and `quincy` are supported.
-    # Future versions such as `reef` (v18) would require this to be set to `true`.
+    # Whether to allow unsupported versions of Ceph. Currently `pacific`, `quincy`, and `reef` are supported.
+    # Future versions such as `squid` (v19) would require this to be set to `true`.
     # Do not set to true in production.
     allowUnsupported: false
 
@@ -196,12 +197,12 @@ cephClusterSpec:
     # daysToRetain: 30
 
   # enable log collector, daemons will log on files and rotate
-  #logCollector:
-  #  enabled: true
-  #  periodicity: daily # one of: hourly, daily, weekly, monthly
-  #  maxLogSize: 500M # SUFFIX may be 'M' or 'G'. Must be at least 1M.
+  logCollector:
+    enabled: false
+    periodicity: daily # one of: hourly, daily, weekly, monthly
+    maxLogSize: 500M # SUFFIX may be 'M' or 'G'. Must be at least 1M.
 
-  # automate [data cleanup process](https://github.com/rook/rook/blob/master/Documentation/ceph-teardown.md#delete-the-data-on-hosts) in cluster destruction.
+  # automate [data cleanup process](https://github.com/rook/rook/blob/master/Documentation/Storage-Configuration/ceph-teardown.md#delete-the-data-on-hosts) in cluster destruction.
   cleanupPolicy:
     # Since cluster cleanup is destructive to data, confirmation is required.
     # To destroy all Rook data on hosts during uninstall, confirmation must be set to "yes-really-destroy-data".
@@ -463,7 +464,7 @@ cephBlockPools:
       #            - key: rook-ceph-role
       #              values:
       #                - storage-node
-      # see https://github.com/rook/rook/blob/master/Documentation/ceph-block.md#provision-storage for available configuration
+      # see https://github.com/rook/rook/blob/master/Documentation/Storage-Configuration/Block-Storage-RBD/block-storage.md#provision-storage for available configuration
       parameters:
         # (optional) mapOptions is a comma-separated list of map options.
         # For krbd options refer
@@ -551,7 +552,7 @@ cephFileSystems:
       allowVolumeExpansion: true
       volumeBindingMode: "Immediate"
       mountOptions: []
-      # see https://github.com/rook/rook/blob/master/Documentation/ceph-filesystem.md#provision-storage for available configuration
+      # see https://github.com/rook/rook/blob/master/Documentation/Storage-Configuration/Shared-Filesystem-CephFS/filesystem-storage.md#provision-storage for available configuration
       parameters:
         # The secrets contain Ceph admin credentials.
         csi.storage.k8s.io/provisioner-secret-name: rook-csi-cephfs-provisioner
@@ -641,7 +642,7 @@ cephObjectStores:
       name: ceph-bucket
       reclaimPolicy: Delete
       volumeBindingMode: "Immediate"
-      # see https://github.com/rook/rook/blob/master/Documentation/ceph-object-bucket-claim.md#storageclass for available configuration
+      # see https://github.com/rook/rook/blob/master/Documentation/Storage-Configuration/Object-Storage-RGW/ceph-object-bucket-claim.md#storageclass for available configuration
       parameters:
         # note: objectStoreNamespace and objectStoreName are configured by the chart
         region: us-east-1

+ 22 - 10
rook/rook-ceph-operator-values.yaml

@@ -1,7 +1,7 @@
-# From https://github.com/rook/rook/blob/v1.11.11/deploy/charts/rook-ceph/values.yaml
+# From https://github.com/rook/rook/blob/v1.12.11/deploy/charts/rook-ceph/values.yaml
 # export ROOK_OPERATOR_NAMESPACE=rook-ceph
 # export ROOK_CLUSTER_NAMESPACE=rook-ceph
-# KUBECONFIG=/etc/rancher/k3s/k3s.yaml helm upgrade --install --namespace rook-ceph rook-ceph rook-release/rook-ceph -f rook/rook-ceph-operator-values.yaml --version 1.10.13
+# KUBECONFIG=/etc/rancher/k3s/k3s.yaml helm upgrade --install --namespace rook-ceph rook-ceph rook-release/rook-ceph -f rook/rook-ceph-operator-values.yaml --version 1.12.11
 # Default values for rook-ceph-operator
 # This is a YAML-formatted file.
 # Declare variables to be passed into your templates.
@@ -11,7 +11,7 @@ image:
   repository: rook/ceph
   # -- Image tag
   # @default -- `master`
-  tag: v1.11.11
+  tag: v1.12.11
   # -- Image pull policy
   pullPolicy: IfNotPresent
 
@@ -59,6 +59,10 @@ logLevel: INFO
 # -- If true, create & use RBAC resources
 rbacEnable: true
 
+rbacAggregate:
+  # -- If true, create a ClusterRole aggregated to [user facing roles](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#user-facing-roles) for objectbucketclaims
+  enableOBCs: false
+
 # -- If true, create & use PSP resources
 pspEnable: true
 
@@ -154,6 +158,10 @@ csi:
   # @default -- `RollingUpdate`
   cephFSPluginUpdateStrategy:
 
+  # -- A maxUnavailable parameter of CSI cephFS plugin daemonset update strategy.
+  # @default -- `1`
+  cephFSPluginUpdateStrategyMaxUnavailable:
+
   # -- CSI NFS plugin daemonset update strategy, supported values are OnDelete and RollingUpdate
   # @default -- `RollingUpdate`
   nfsPluginUpdateStrategy:
@@ -467,6 +475,8 @@ csi:
     enabled: false
     # -- Service monitor scrape interval
     interval: 5s
+    # -- ServiceMonitor additional labels
+    labels: {}
 
   # -- Kubelet root directory path (if the Kubelet uses a different path for the `--root-dir` flag)
   # @default -- `/var/lib/kubelet`
@@ -474,32 +484,32 @@ csi:
 
   cephcsi:
     # -- Ceph CSI image
-    # @default -- `quay.io/cephcsi/cephcsi:v3.8.0`
+    # @default -- `quay.io/cephcsi/cephcsi:v3.9.0`
     image:
 
   registrar:
     # -- Kubernetes CSI registrar image
-    # @default -- `registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.7.0`
+    # @default -- `registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.8.0`
     image:
 
   provisioner:
     # -- Kubernetes CSI provisioner image
-    # @default -- `registry.k8s.io/sig-storage/csi-provisioner:v3.4.0`
+    # @default -- `registry.k8s.io/sig-storage/csi-provisioner:v3.5.0`
     image:
 
   snapshotter:
     # -- Kubernetes CSI snapshotter image
-    # @default -- `registry.k8s.io/sig-storage/csi-snapshotter:v6.2.1`
+    # @default -- `registry.k8s.io/sig-storage/csi-snapshotter:v6.2.2`
     image:
 
   attacher:
     # -- Kubernetes CSI Attacher image
-    # @default -- `registry.k8s.io/sig-storage/csi-attacher:v4.1.0`
+    # @default -- `registry.k8s.io/sig-storage/csi-attacher:v4.3.0`
     image:
 
   resizer:
     # -- Kubernetes CSI resizer image
-    # @default -- `registry.k8s.io/sig-storage/csi-resizer:v1.7.0`
+    # @default -- `registry.k8s.io/sig-storage/csi-resizer:v1.8.0`
     image:
 
   # -- Image pull policy
@@ -518,7 +528,7 @@ csi:
     # -- Enable CSIAddons
     enabled: false
     # -- CSIAddons Sidecar image
-    image: "quay.io/csiaddons/k8s-sidecar:v0.5.0"
+    image: "quay.io/csiaddons/k8s-sidecar:v0.7.0"
 
   nfs:
     # -- Enable the nfs csi driver
@@ -570,6 +580,8 @@ csi:
 
 # -- Enable discovery daemon
 enableDiscoveryDaemon: false
+# -- Set the discovery daemon device discovery interval (default to 60m)
+discoveryDaemonInterval: 60m
 
 # -- The timeout for ceph commands in seconds
 cephCommandsTimeoutSeconds: "15"