Josh Bicking пре 3 месеци
родитељ
комит
9a273342f5
2 измењених фајлова са 185 додато и 32 уклоњено
  1. 125 26
      rook/rook-ceph-cluster-values.yaml
  2. 60 6
      rook/rook-ceph-operator-values.yaml

+ 125 - 26
rook/rook-ceph-cluster-values.yaml

@@ -1,4 +1,4 @@
-# From https://raw.githubusercontent.com/rook/rook/v1.10.13/deploy/charts/rook-ceph-cluster/values.yaml
+# From https://raw.githubusercontent.com/rook/rook/v1.11.11/deploy/charts/rook-ceph-cluster/values.yaml
 # KUBECONFIG=/etc/rancher/k3s/k3s.yaml helm upgrade --namespace rook-ceph rook-ceph-cluster --set operatorNamespace=rook-ceph rook-release/rook-ceph-cluster -f rook/rook-ceph-cluster-values.yaml --version 1.10.13
 # Default values for a single rook-ceph cluster
 # This is a YAML-formatted file.
@@ -27,6 +27,7 @@ toolbox:
   # -- Enable Ceph debugging pod deployment. See [toolbox](../Troubleshooting/ceph-toolbox.md)
   enabled: true
   # -- Toolbox image, defaults to the image used by the Ceph cluster
+  # image: quay.io/ceph/ceph:v17.2.6
   image: rook/ceph:v1.9.0.230.g6a87cb44a # TODO probably safe to remove?
   # -- Toolbox tolerations
   tolerations: []
@@ -51,7 +52,7 @@ monitoring:
   createPrometheusRules: true
   # -- The namespace in which to create the prometheus rules, if different from the rook cluster namespace.
   # If you have multiple rook-ceph clusters in the same k8s cluster, choose the same namespace (ideally, namespace with prometheus
-  # deployed) to set rulesNamespace for all the clusters. Otherwise, you will get duplicate alerts with multiple alert definitions.
+  # deployed) to set rulesNamespaceOverride for all the clusters. Otherwise, you will get duplicate alerts with multiple alert definitions.
   rulesNamespaceOverride:
   # Monitoring settings for external clusters:
   # externalMgrEndpoints: <list of endpoints>
@@ -88,7 +89,7 @@ cephClusterSpec:
     # versions running within the cluster. See tags available at https://hub.docker.com/r/ceph/ceph/tags/.
     # If you want to be more precise, you can always use a timestamp tag such quay.io/ceph/ceph:v15.2.11-20200419
     # This tag might not contain a new Ceph version, just security fixes from the underlying operating system, which will reduce vulnerabilities
-    #image: quay.io/ceph/ceph:v17.2.5
+    # image: quay.io/ceph/ceph:v17.2.6
     image: quay.io/ceph/ceph:v16.2.7
     # Whether to allow unsupported versions of Ceph. Currently `pacific` and `quincy` are supported.
     # Future versions such as `reef` (v18) would require this to be set to `true`.
@@ -151,6 +152,23 @@ cephClusterSpec:
 
   # Network configuration, see: https://github.com/rook/rook/blob/master/Documentation/CRDs/ceph-cluster-crd.md#network-configuration-settings
   network:
+    connections:
+      # Whether to encrypt the data in transit across the wire to prevent eavesdropping the data on the network.
+      # The default is false. When encryption is enabled, all communication between clients and Ceph daemons, or between Ceph daemons will be encrypted.
+      # When encryption is not enabled, clients still establish a strong initial authentication and data integrity is still validated with a crc check.
+      # IMPORTANT: Encryption requires the 5.11 kernel for the latest nbd and cephfs drivers. Alternatively for testing only,
+      # you can set the "mounter: rbd-nbd" in the rbd storage class, or "mounter: fuse" in the cephfs storage class.
+      # The nbd and fuse drivers are *not* recommended in production since restarting the csi driver pod will disconnect the volumes.
+      encryption:
+        enabled: false
+      # Whether to compress the data in transit across the wire. The default is false.
+      # Requires Ceph Quincy (v17) or newer. Also see the kernel requirements above for encryption.
+      compression:
+        enabled: false
+      # Whether to require communication over msgr2. If true, the msgr v1 port (6789) will be disabled
+      # and clients will be required to connect to the Ceph cluster with the v2 port (3300).
+      # Requires a kernel that supports msgr v2 (kernel 5.11 or CentOS 8.4 or newer).
+      requireMsgr2: false
     # enable host networking
     provider: host
   #   # EXPERIMENTAL: enable the Multus network provider
@@ -178,10 +196,10 @@ cephClusterSpec:
     # daysToRetain: 30
 
   # enable log collector, daemons will log on files and rotate
-  # logCollector:
-  #   enabled: true
-  #   periodicity: daily # one of: hourly, daily, weekly, monthly
-  #   maxLogSize: 500M # SUFFIX may be 'M' or 'G'. Must be at least 1M.
+  #logCollector:
+  #  enabled: true
+  #  periodicity: daily # one of: hourly, daily, weekly, monthly
+  #  maxLogSize: 500M # SUFFIX may be 'M' or 'G'. Must be at least 1M.
 
   # automate [data cleanup process](https://github.com/rook/rook/blob/master/Documentation/ceph-teardown.md#delete-the-data-on-hosts) in cluster destruction.
   cleanupPolicy:
@@ -227,15 +245,15 @@ cephClusterSpec:
       tolerations:
       - key: storage-node
         operator: Exists
-    # The above placement information can also be specified for mon, osd, and mgr components
-    mon:
-    # Monitor deployments may contain an anti-affinity rule for avoiding monitor
-    # collocation on the same node. This is a required rule when host network is used
-    # or when AllowMultiplePerNode is false. Otherwise this anti-affinity rule is a
-    # preferred rule with weight: 50.
-    osd:
-    mgr:
-    cleanup:
+  #   # The above placement information can also be specified for mon, osd, and mgr components
+  #   mon:
+  #   # Monitor deployments may contain an anti-affinity rule for avoiding monitor
+  #   # collocation on the same node. This is a required rule when host network is used
+  #   # or when AllowMultiplePerNode is false. Otherwise this anti-affinity rule is a
+  #   # preferred rule with weight: 50.
+  #   osd:
+  #   mgr:
+  #   cleanup:
 
   # annotations:
   #   all:
@@ -319,6 +337,13 @@ cephClusterSpec:
       requests:
         cpu: 0
         memory: "100Mi"
+    exporter:
+      limits:
+        cpu: 0
+        memory: "128Mi"
+      requests:
+        cpu: 0
+        memory: "50Mi"
 
   # The option to automatically remove OSDs that are out and are safe to destroy.
   removeOSDsIfOutAndSafeToRemove: false
@@ -367,11 +392,6 @@ cephClusterSpec:
     # Operator will continue with the next drain if the timeout exceeds. It only works if `managePodBudgets` is `true`.
     # No values or 0 means that the operator will wait until the placement groups are healthy before unblocking the next drain.
     pgHealthCheckTimeout: 0
-    # If true, the operator will create and manage MachineDisruptionBudgets to ensure OSDs are only fenced when the cluster is healthy.
-    # Only available on OpenShift.
-    manageMachineDisruptionBudgets: false
-    # Namespace in which to watch for the MachineDisruptionBudgets.
-    machineDisruptionBudgetNamespace: openshift-machine-api
 
   # Configure the healthcheck and liveness probes for ceph pods.
   # Valid values for daemons are 'mon', 'osd', 'status'
@@ -402,7 +422,6 @@ ingress:
     # annotations:
     #   external-dns.alpha.kubernetes.io/hostname: dashboard.example.com
     #   nginx.ingress.kubernetes.io/rewrite-target: /ceph-dashboard/$2
-    #   kubernetes.io/ingress.class: nginx
     # If the dashboard has ssl: true the following will make sure the NGINX Ingress controller can expose the dashboard correctly
     #   nginx.ingress.kubernetes.io/backend-protocol: "HTTPS"
     #   nginx.ingress.kubernetes.io/server-snippet: |
@@ -427,19 +446,23 @@ cephBlockPools:
       failureDomain: host
       replicated:
         size: 3
+      # Enables collecting RBD per-image IO statistics by enabling dynamic OSD performance counters. Defaults to false.
+      # For reference: https://docs.ceph.com/docs/master/mgr/prometheus/#rbd-io-statistics
+      # enableRBDStats: true
     storageClass:
       enabled: true
       name: ceph-block
       isDefault: true
       reclaimPolicy: Delete
       allowVolumeExpansion: true
+      volumeBindingMode: "Immediate"
       mountOptions: []
       # see https://kubernetes.io/docs/concepts/storage/storage-classes/#allowed-topologies
       allowedTopologies: []
-#        - matchLabelExpressions:
-#            - key: rook-ceph-role
-#              values:
-#                - storage-node
+      #        - matchLabelExpressions:
+      #            - key: rook-ceph-role
+      #              values:
+      #                - storage-node
       # see https://github.com/rook/rook/blob/master/Documentation/ceph-block.md#provision-storage for available configuration
       parameters:
         # (optional) mapOptions is a comma-separated list of map options.
@@ -526,6 +549,7 @@ cephFileSystems:
       pool: data0
       reclaimPolicy: Delete
       allowVolumeExpansion: true
+      volumeBindingMode: "Immediate"
       mountOptions: []
       # see https://github.com/rook/rook/blob/master/Documentation/ceph-filesystem.md#provision-storage for available configuration
       parameters:
@@ -616,7 +640,82 @@ cephObjectStores:
       enabled: true
       name: ceph-bucket
       reclaimPolicy: Delete
+      volumeBindingMode: "Immediate"
       # see https://github.com/rook/rook/blob/master/Documentation/ceph-object-bucket-claim.md#storageclass for available configuration
       parameters:
         # note: objectStoreNamespace and objectStoreName are configured by the chart
         region: us-east-1
+    ingress:
+      # Enable an ingress for the ceph-objectstore
+      enabled: false
+      # annotations: {}
+      # host:
+      #   name: objectstore.example.com
+      #   path: /
+      # tls:
+      # - hosts:
+      #     - objectstore.example.com
+      #   secretName: ceph-objectstore-tls
+      # ingressClassName: nginx
+
+# cephECBlockPools are disabled by default, please remove the comments and set desired values to enable it
+#cephECBlockPools:
+#  # For erasure coded a replicated metadata pool is required.
+#  # https://rook.io/docs/rook/latest/CRDs/Shared-Filesystem/ceph-filesystem-crd/#erasure-coded
+#  - name: ec-metadata-pool
+#    # see https://github.com/rook/rook/blob/master/Documentation/CRDs/Block-Storage/ceph-block-pool-crd.md#spec for available configuration
+#    spec:
+#      replicated:
+#        size: 2
+#  - name: ec-data-pool
+#    spec:
+#      failureDomain: osd
+#      erasureCoded:
+#        dataChunks: 2
+#        codingChunks: 1
+#      deviceClass: hdd
+
+# cephECStorageClass also is disabled by default, please remove the comments and set desired values to enable it
+# if cephECBlockPools are uncommented you must remove the comments of cephEcStorageClass as well
+#cephECStorageClass:
+#  name: rook-ceph-block
+#  # Change "rook-ceph" provisioner prefix to match the operator namespace if needed
+#  provisioner: rook-ceph.rbd.csi.ceph.com # driver:namespace:operator
+#  parameters:
+#    # clusterID is the namespace where the rook cluster is running
+#    # If you change this namespace, also change the namespace below where the secret namespaces are defined
+#    clusterID: rook-ceph # namespace:cluster
+#
+#    # If you want to use erasure coded pool with RBD, you need to create
+#    # two pools. one erasure coded and one replicated.
+#    # You need to specify the replicated pool here in the `pool` parameter, it is
+#    # used for the metadata of the images.
+#    # The erasure coded pool must be set as the `dataPool` parameter below.
+#    dataPool: ec-data-pool
+#    pool: ec-metadata-pool
+#
+#    # (optional) mapOptions is a comma-separated list of map options.
+#    # For krbd options refer
+#    # https://docs.ceph.com/docs/master/man/8/rbd/#kernel-rbd-krbd-options
+#    # For nbd options refer
+#    # https://docs.ceph.com/docs/master/man/8/rbd-nbd/#options
+#    # mapOptions: lock_on_read,queue_depth=1024
+#
+#    # (optional) unmapOptions is a comma-separated list of unmap options.
+#    # For krbd options refer
+#    # https://docs.ceph.com/docs/master/man/8/rbd/#kernel-rbd-krbd-options
+#    # For nbd options refer
+#    # https://docs.ceph.com/docs/master/man/8/rbd-nbd/#options
+#    # unmapOptions: force
+#
+#    # RBD image format. Defaults to "2".
+#    imageFormat: "2"
+#
+#    # RBD image features, equivalent to OR'd bitfield value: 63
+#    # Available for imageFormat: "2". Older releases of CSI RBD
+#    # support only the `layering` feature. The Linux kernel (KRBD) supports the
+#    # full feature complement as of 5.4
+#    # imageFeatures: layering,fast-diff,object-map,deep-flatten,exclusive-lock
+#    imageFeatures: layering
+#  allowVolumeExpansion: true
+#  reclaimPolicy: Delete

+ 60 - 6
rook/rook-ceph-operator-values.yaml

@@ -1,4 +1,4 @@
-# From https://github.com/rook/rook/blob/v1.10.13/deploy/charts/rook-ceph/values.yaml
+# From https://github.com/rook/rook/blob/v1.11.11/deploy/charts/rook-ceph/values.yaml
 # export ROOK_OPERATOR_NAMESPACE=rook-ceph
 # export ROOK_CLUSTER_NAMESPACE=rook-ceph
 # KUBECONFIG=/etc/rancher/k3s/k3s.yaml helm upgrade --install --namespace rook-ceph rook-ceph rook-release/rook-ceph -f rook/rook-ceph-operator-values.yaml --version 1.10.13
@@ -11,7 +11,7 @@ image:
   repository: rook/ceph
   # -- Image tag
   # @default -- `master`
-  tag: v1.10.13
+  tag: v1.11.11
   # -- Image pull policy
   pullPolicy: IfNotPresent
 
@@ -34,8 +34,7 @@ resources:
     memory: 128Mi
 
 # -- Kubernetes [`nodeSelector`](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector) to add to the Deployment.
-nodeSelector:
-  storage-node: "true"
+nodeSelector: {}
 # Constraint rook-ceph-operator Deployment to nodes with label `disktype: ssd`.
 # For more info, see https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector
 #  disktype: ssd
@@ -118,6 +117,10 @@ csi:
   # sidecar with CSI provisioner pod, to enable set it to true.
   enableOMAPGenerator: false
 
+  # -- Set CephFS Kernel mount options to use https://docs.ceph.com/en/latest/man/8/mount.ceph/#options.
+  # Set to "ms_mode=secure" when connections.encrypted is enabled in CephCluster CR
+  cephFSKernelMountOptions:
+
   # -- Enable adding volume metadata on the CephFS subvolumes and RBD images.
   # Not all users might be interested in getting volume/snapshot details as metadata on CephFS subvolume and RBD images.
   # Hence enable metadata is false by default
@@ -379,6 +382,14 @@ csi:
         limits:
           memory: 1Gi
           cpu: 500m
+    - name : csi-attacher
+      resource:
+        requests:
+          memory: 512Mi
+          cpu: 250m
+        limits:
+          memory: 1Gi
+          cpu: 500m
 
   # -- CEPH CSI NFS plugin resource requirement list
   # @default -- see values.yaml
@@ -451,13 +462,19 @@ csi:
   # @default -- `8080`
   rbdLivenessMetricsPort:
 
+  serviceMonitor:
+    # -- Enable ServiceMonitor for Ceph CSI drivers
+    enabled: false
+    # -- Service monitor scrape interval
+    interval: 5s
+
   # -- Kubelet root directory path (if the Kubelet uses a different path for the `--root-dir` flag)
   # @default -- `/var/lib/kubelet`
   kubeletDirPath:
 
   cephcsi:
     # -- Ceph CSI image
-    # @default -- `quay.io/cephcsi/cephcsi:v3.7.2`
+    # @default -- `quay.io/cephcsi/cephcsi:v3.8.0`
     image:
 
   registrar:
@@ -519,15 +536,52 @@ csi:
     # - topology.kubernetes.io/zone
     # - topology.rook.io/rack
 
+  readAffinity:
+    # -- Enable read affinity for RBD volumes. Recommended to
+    # set to true if running kernel 5.8 or newer.
+    # @default -- `false`
+    enabled: true
+    # -- Define which node labels to use
+    # as CRUSH location. This should correspond to the values set
+    # in the CRUSH map.
+    # @default -- labels listed [here](../CRDs/Cluster/ceph-cluster-crd.md#osd-topology)
+    crushLocationLabels:
+
+  # -- Whether to skip any attach operation altogether for CephFS PVCs. See more details
+  # [here](https://kubernetes-csi.github.io/docs/skip-attach.html#skip-attach-with-csi-driver-object).
+  # If cephFSAttachRequired is set to false it skips the volume attachments and makes the creation
+  # of pods using the CephFS PVC fast. **WARNING** It's highly discouraged to use this for
+  # CephFS RWO volumes. Refer to this [issue](https://github.com/kubernetes/kubernetes/issues/103305) for more details.
+  cephFSAttachRequired: true
+  # -- Whether to skip any attach operation altogether for RBD PVCs. See more details
+  # [here](https://kubernetes-csi.github.io/docs/skip-attach.html#skip-attach-with-csi-driver-object).
+  # If set to false it skips the volume attachments and makes the creation of pods using the RBD PVC fast.
+  # **WARNING** It's highly discouraged to use this for RWO volumes as it can cause data corruption.
+  # csi-addons operations like Reclaimspace and PVC Keyrotation will also not be supported if set
+  # to false since we'll have no VolumeAttachments to determine which node the PVC is mounted on.
+  # Refer to this [issue](https://github.com/kubernetes/kubernetes/issues/103305) for more details.
+  rbdAttachRequired: true
+  # -- Whether to skip any attach operation altogether for NFS PVCs. See more details
+  # [here](https://kubernetes-csi.github.io/docs/skip-attach.html#skip-attach-with-csi-driver-object).
+  # If cephFSAttachRequired is set to false it skips the volume attachments and makes the creation
+  # of pods using the NFS PVC fast. **WARNING** It's highly discouraged to use this for
+  # NFS RWO volumes. Refer to this [issue](https://github.com/kubernetes/kubernetes/issues/103305) for more details.
+  nfsAttachRequired: true
+
 # -- Enable discovery daemon
 enableDiscoveryDaemon: false
 
 # -- The timeout for ceph commands in seconds
 cephCommandsTimeoutSeconds: "15"
 
-# -- if true, run rook operator on the host network
+# -- If true, run rook operator on the host network
 useOperatorHostNetwork: false
 
+# -- If true, scale down the rook operator.
+# This is useful for administrative actions where the rook operator must be scaled down, while using gitops style tooling
+# to deploy your helm charts.
+scaleDownOperator: false
+
 ## Rook Discover configuration
 ## toleration: NoSchedule, PreferNoSchedule or NoExecute
 ## tolerationKey: Set this to the specific key of the taint to tolerate