Josh Bicking 3 місяців тому
батько
коміт
260e4d3989
3 змінених файлів з 94 додано та 90 видалено
  1. 16 0
      rook/ingress.yaml
  2. 31 25
      rook/rook-ceph-cluster-values.yaml
  3. 47 65
      rook/rook-ceph-operator-values.yaml

+ 16 - 0
rook/ingress.yaml

@@ -0,0 +1,16 @@
+---
+apiVersion: traefik.containo.us/v1alpha1
+kind: IngressRoute
+metadata:
+  name: rook-ceph-dashboard
+  namespace: rook-ceph
+spec:
+  entryPoints:
+  - websecure
+  routes:
+  - kind: Rule
+    match: Host(`ceph.lan.jibby.org`)
+    services:
+    - kind: Service
+      name: rook-ceph-mgr-dashboard
+      port: 8080

+ 31 - 25
rook/rook-ceph-cluster-values.yaml

@@ -1,5 +1,5 @@
-# From https://raw.githubusercontent.com/rook/rook/v1.12.11/deploy/charts/rook-ceph-cluster/values.yaml
-# KUBECONFIG=/etc/rancher/k3s/k3s.yaml helm upgrade --namespace rook-ceph rook-ceph-cluster --set operatorNamespace=rook-ceph rook-release/rook-ceph-cluster -f rook/rook-ceph-cluster-values.yaml --version 1.12.11
+# From https://raw.githubusercontent.com/rook/rook/v1.13.10/deploy/charts/rook-ceph-cluster/values.yaml
+# KUBECONFIG=/etc/rancher/k3s/k3s.yaml helm upgrade --namespace rook-ceph rook-ceph-cluster --set operatorNamespace=rook-ceph rook-release/rook-ceph-cluster -f rook/rook-ceph-cluster-values.yaml --version 1.13.10
 # Default values for a single rook-ceph cluster
 # This is a YAML-formatted file.
 # Declare variables to be passed into your templates.
@@ -27,15 +27,21 @@ toolbox:
   # -- Enable Ceph debugging pod deployment. See [toolbox](../Troubleshooting/ceph-toolbox.md)
   enabled: true
   # -- Toolbox image, defaults to the image used by the Ceph cluster
-  image: quay.io/ceph/ceph:v17.2.7
+  image: #quay.io/ceph/ceph:v18.2.2
   # -- Toolbox tolerations
   tolerations: []
   # -- Toolbox affinity
   affinity: {}
+  # -- Toolbox container security context
+  containerSecurityContext:
+    runAsNonRoot: true
+    runAsUser: 2016
+    runAsGroup: 2016
+    capabilities:
+      drop: ["ALL"]
   # -- Toolbox resources
   resources:
     limits:
-      cpu: "500m"
       memory: "1Gi"
     requests:
       cpu: "100m"
@@ -57,7 +63,7 @@ monitoring:
   # externalMgrEndpoints: <list of endpoints>
   # externalMgrPrometheusPort: <port>
   # Scrape interval for prometheus
-  # interval: 5s
+  # interval: 10s
   # allow adding custom labels and annotations to the prometheus rule
   prometheusRule:
     # -- Labels applied to PrometheusRule
@@ -85,13 +91,14 @@ cephClusterSpec:
   # For more details, check https://rook.io/docs/rook/v1.10/CRDs/Cluster/ceph-cluster-crd/
   cephVersion:
     # The container image used to launch the Ceph daemon pods (mon, mgr, osd, mds, rgw).
-    # v16 is Pacific, v17 is Quincy.
-    # RECOMMENDATION: In production, use a specific version tag instead of the general v16 flag, which pulls the latest release and could result in different
+    # v17 is Quincy, v18 is Reef.
+    # RECOMMENDATION: In production, use a specific version tag instead of the general v18 flag, which pulls the latest release and could result in different
     # versions running within the cluster. See tags available at https://hub.docker.com/r/ceph/ceph/tags/.
-    # If you want to be more precise, you can always use a timestamp tag such quay.io/ceph/ceph:v15.2.11-20200419
+    # If you want to be more precise, you can always use a timestamp tag such as quay.io/ceph/ceph:v18.2.2-20240311
     # This tag might not contain a new Ceph version, just security fixes from the underlying operating system, which will reduce vulnerabilities
+    # image: quay.io/ceph/ceph:v18.2.2
     image: quay.io/ceph/ceph:v17.2.7
-    # Whether to allow unsupported versions of Ceph. Currently `pacific`, `quincy`, and `reef` are supported.
+    # Whether to allow unsupported versions of Ceph. Currently `quincy`, and `reef` are supported.
     # Future versions such as `squid` (v19) would require this to be set to `true`.
     # Do not set to true in production.
     allowUnsupported: false
@@ -145,10 +152,10 @@ cephClusterSpec:
     # serve the dashboard under a subpath (useful when you are accessing the dashboard via a reverse proxy)
     # urlPrefix: /ceph-dashboard
     # serve the dashboard at the given port.
-    # port: 8443
+    port: 8080
     # Serve the dashboard using SSL (if using ingress to expose the dashboard and `ssl: true` you need to set
     # the corresponding "backend protocol" annotation(s) for your ingress controller of choice)
-    ssl: true
+    ssl: false
 
   # Network configuration, see: https://github.com/rook/rook/blob/master/Documentation/CRDs/ceph-cluster-crd.md#network-configuration-settings
   network:
@@ -362,7 +369,6 @@ cephClusterSpec:
     #   crushRoot: "custom-root" # specify a non-default root label for the CRUSH map
     #   metadataDevice: "md0" # specify a non-rotational storage so ceph-volume will use it as block db device of bluestore.
     #   databaseSizeMB: "1024" # uncomment if the disks are smaller than 100 GB
-    #   journalSizeMB: "1024"  # uncomment if the disks are 20 GB or smaller
     #   osdsPerDevice: "1" # this value can be overridden at the node or device level
     #   encryptedDevice: "true" # the default value for this option is "false"
     # # Individual nodes and their config can be specified as well, but 'useAllNodes' above must be set to false. Then, only the named
@@ -438,7 +444,6 @@ ingress:
     # ingressClassName: nginx
 
 # TODO a ssd blockpool
-
 # -- A list of CephBlockPool configurations to deploy
 # @default -- See [below](#ceph-block-pools)
 cephBlockPools:
@@ -450,7 +455,7 @@ cephBlockPools:
         size: 3
       deviceClass: hdd
       # Enables collecting RBD per-image IO statistics by enabling dynamic OSD performance counters. Defaults to false.
-      # For reference: https://docs.ceph.com/docs/master/mgr/prometheus/#rbd-io-statistics
+      # For reference: https://docs.ceph.com/docs/latest/mgr/prometheus/#rbd-io-statistics
       # enableRBDStats: true
     storageClass:
       enabled: true
@@ -470,16 +475,16 @@ cephBlockPools:
       parameters:
         # (optional) mapOptions is a comma-separated list of map options.
         # For krbd options refer
-        # https://docs.ceph.com/docs/master/man/8/rbd/#kernel-rbd-krbd-options
+        # https://docs.ceph.com/docs/latest/man/8/rbd/#kernel-rbd-krbd-options
         # For nbd options refer
-        # https://docs.ceph.com/docs/master/man/8/rbd-nbd/#options
+        # https://docs.ceph.com/docs/latest/man/8/rbd-nbd/#options
         # mapOptions: lock_on_read,queue_depth=1024
 
         # (optional) unmapOptions is a comma-separated list of unmap options.
         # For krbd options refer
-        # https://docs.ceph.com/docs/master/man/8/rbd/#kernel-rbd-krbd-options
+        # https://docs.ceph.com/docs/latest/man/8/rbd/#kernel-rbd-krbd-options
         # For nbd options refer
-        # https://docs.ceph.com/docs/master/man/8/rbd-nbd/#options
+        # https://docs.ceph.com/docs/latest/man/8/rbd-nbd/#options
         # unmapOptions: force
 
         # RBD image format. Defaults to "2".
@@ -664,7 +669,6 @@ cephObjectStores:
       #     - objectstore.example.com
       #   secretName: ceph-objectstore-tls
       # ingressClassName: nginx
-
 # cephECBlockPools are disabled by default, please remove the comments and set desired values to enable it
 #cephECBlockPools:
 #  # For erasure coded a replicated metadata pool is required.
@@ -686,8 +690,6 @@ cephObjectStores:
 # if cephECBlockPools are uncommented you must remove the comments of cephEcStorageClass as well
 #cephECStorageClass:
 #  name: rook-ceph-block
-#  # Change "rook-ceph" provisioner prefix to match the operator namespace if needed
-#  provisioner: rook-ceph.rbd.csi.ceph.com # driver:namespace:operator
 #  parameters:
 #    # clusterID is the namespace where the rook cluster is running
 #    # If you change this namespace, also change the namespace below where the secret namespaces are defined
@@ -703,16 +705,16 @@ cephObjectStores:
 #
 #    # (optional) mapOptions is a comma-separated list of map options.
 #    # For krbd options refer
-#    # https://docs.ceph.com/docs/master/man/8/rbd/#kernel-rbd-krbd-options
+#    # https://docs.ceph.com/docs/latest/man/8/rbd/#kernel-rbd-krbd-options
 #    # For nbd options refer
-#    # https://docs.ceph.com/docs/master/man/8/rbd-nbd/#options
+#    # https://docs.ceph.com/docs/latest/man/8/rbd-nbd/#options
 #    # mapOptions: lock_on_read,queue_depth=1024
 #
 #    # (optional) unmapOptions is a comma-separated list of unmap options.
 #    # For krbd options refer
-#    # https://docs.ceph.com/docs/master/man/8/rbd/#kernel-rbd-krbd-options
+#    # https://docs.ceph.com/docs/latest/man/8/rbd/#kernel-rbd-krbd-options
 #    # For nbd options refer
-#    # https://docs.ceph.com/docs/master/man/8/rbd-nbd/#options
+#    # https://docs.ceph.com/docs/latest/man/8/rbd-nbd/#options
 #    # unmapOptions: force
 #
 #    # RBD image format. Defaults to "2".
@@ -726,3 +728,7 @@ cephObjectStores:
 #    imageFeatures: layering
 #  allowVolumeExpansion: true
 #  reclaimPolicy: Delete
+
+# -- CSI driver name prefix for cephfs, rbd and nfs.
+# @default -- `namespace name where rook-ceph operator is deployed`
+csiDriverNamePrefix:

+ 47 - 65
rook/rook-ceph-operator-values.yaml

@@ -1,7 +1,7 @@
-# From https://github.com/rook/rook/blob/v1.12.11/deploy/charts/rook-ceph/values.yaml
+# From https://github.com/rook/rook/blob/v1.13.10/deploy/charts/rook-ceph/values.yaml
 # export ROOK_OPERATOR_NAMESPACE=rook-ceph
 # export ROOK_CLUSTER_NAMESPACE=rook-ceph
-# KUBECONFIG=/etc/rancher/k3s/k3s.yaml helm upgrade --install --namespace rook-ceph rook-ceph rook-release/rook-ceph -f rook/rook-ceph-operator-values.yaml --version 1.12.11
+# KUBECONFIG=/etc/rancher/k3s/k3s.yaml helm upgrade --install --namespace rook-ceph rook-ceph rook-release/rook-ceph -f rook/rook-ceph-operator-values.yaml --version 1.13.10
 # Default values for rook-ceph-operator
 # This is a YAML-formatted file.
 # Declare variables to be passed into your templates.
@@ -11,7 +11,7 @@ image:
   repository: rook/ceph
   # -- Image tag
   # @default -- `master`
-  tag: v1.12.11
+  tag: v1.13.10
   # -- Image pull policy
   pullPolicy: IfNotPresent
 
@@ -27,10 +27,9 @@ crds:
 # -- Pod resource requests & limits
 resources:
   limits:
-    cpu: 500m
     memory: 512Mi
   requests:
-    cpu: 100m
+    cpu: 200m
     memory: 128Mi
 
 # -- Kubernetes [`nodeSelector`](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector) to add to the Deployment.
@@ -69,6 +68,13 @@ pspEnable: true
 # -- Set the priority class for the rook operator deployment if desired
 priorityClassName:
 
+# -- Set the container security context for the operator
+containerSecurityContext:
+  runAsNonRoot: true
+  runAsUser: 2016
+  runAsGroup: 2016
+  capabilities:
+    drop: ["ALL"]
 # -- If true, loop devices are allowed to be used for osds in test clusters
 allowLoopDevices: false
 
@@ -79,8 +85,6 @@ csi:
   enableRbdDriver: true
   # -- Enable Ceph CSI CephFS driver
   enableCephfsDriver: true
-  # -- Enable Ceph CSI GRPC Metrics
-  enableGrpcMetrics: false
   # -- Enable host networking for CSI CephFS and RBD nodeplugins. This may be necessary
   # in some network configurations where the SDN does not provide access to an external cluster or
   # there is significant drop in read/write performance
@@ -146,6 +150,10 @@ csi:
   # @default -- `0`
   sidecarLogLevel:
 
+  # -- CSI driver name prefix for cephfs, rbd and nfs.
+  # @default -- `namespace name where rook-ceph operator is deployed`
+  csiDriverNamePrefix:
+
   # -- CSI RBD plugin daemonset update strategy, supported values are OnDelete and RollingUpdate
   # @default -- `RollingUpdate`
   rbdPluginUpdateStrategy:
@@ -213,7 +221,6 @@ csi:
           cpu: 100m
         limits:
           memory: 256Mi
-          cpu: 200m
     - name : csi-resizer
       resource:
         requests:
@@ -221,7 +228,6 @@ csi:
           cpu: 100m
         limits:
           memory: 256Mi
-          cpu: 200m
     - name : csi-attacher
       resource:
         requests:
@@ -229,7 +235,6 @@ csi:
           cpu: 100m
         limits:
           memory: 256Mi
-          cpu: 200m
     - name : csi-snapshotter
       resource:
         requests:
@@ -237,15 +242,12 @@ csi:
           cpu: 100m
         limits:
           memory: 256Mi
-          cpu: 200m
     - name : csi-rbdplugin
       resource:
         requests:
           memory: 512Mi
-          cpu: 250m
         limits:
           memory: 1Gi
-          cpu: 500m
     - name : csi-omap-generator
       resource:
         requests:
@@ -253,7 +255,6 @@ csi:
           cpu: 250m
         limits:
           memory: 1Gi
-          cpu: 500m
     - name : liveness-prometheus
       resource:
         requests:
@@ -261,7 +262,6 @@ csi:
           cpu: 50m
         limits:
           memory: 256Mi
-          cpu: 100m
 
   # -- CEPH CSI RBD plugin resource requirement list
   # @default -- see values.yaml
@@ -273,7 +273,6 @@ csi:
           cpu: 50m
         limits:
           memory: 256Mi
-          cpu: 100m
     - name : csi-rbdplugin
       resource:
         requests:
@@ -281,7 +280,6 @@ csi:
           cpu: 250m
         limits:
           memory: 1Gi
-          cpu: 500m
     - name : liveness-prometheus
       resource:
         requests:
@@ -289,7 +287,6 @@ csi:
           cpu: 50m
         limits:
           memory: 256Mi
-          cpu: 100m
 
   # -- CEPH CSI CephFS provisioner resource requirement list
   # @default -- see values.yaml
@@ -301,7 +298,6 @@ csi:
           cpu: 100m
         limits:
           memory: 256Mi
-          cpu: 200m
     - name : csi-resizer
       resource:
         requests:
@@ -309,7 +305,6 @@ csi:
           cpu: 100m
         limits:
           memory: 256Mi
-          cpu: 200m
     - name : csi-attacher
       resource:
         requests:
@@ -317,7 +312,6 @@ csi:
           cpu: 100m
         limits:
           memory: 256Mi
-          cpu: 200m
     - name : csi-snapshotter
       resource:
         requests:
@@ -325,7 +319,6 @@ csi:
           cpu: 100m
         limits:
           memory: 256Mi
-          cpu: 200m
     - name : csi-cephfsplugin
       resource:
         requests:
@@ -333,7 +326,6 @@ csi:
           cpu: 250m
         limits:
           memory: 1Gi
-          cpu: 500m
     - name : liveness-prometheus
       resource:
         requests:
@@ -341,7 +333,6 @@ csi:
           cpu: 50m
         limits:
           memory: 256Mi
-          cpu: 100m
 
   # -- CEPH CSI CephFS plugin resource requirement list
   # @default -- see values.yaml
@@ -353,7 +344,6 @@ csi:
           cpu: 50m
         limits:
           memory: 256Mi
-          cpu: 100m
     - name : csi-cephfsplugin
       resource:
         requests:
@@ -361,7 +351,6 @@ csi:
           cpu: 250m
         limits:
           memory: 1Gi
-          cpu: 500m
     - name : liveness-prometheus
       resource:
         requests:
@@ -369,7 +358,6 @@ csi:
           cpu: 50m
         limits:
           memory: 256Mi
-          cpu: 100m
 
   # -- CEPH CSI NFS provisioner resource requirement list
   # @default -- see values.yaml
@@ -381,7 +369,6 @@ csi:
           cpu: 100m
         limits:
           memory: 256Mi
-          cpu: 200m
     - name : csi-nfsplugin
       resource:
         requests:
@@ -389,7 +376,6 @@ csi:
           cpu: 250m
         limits:
           memory: 1Gi
-          cpu: 500m
     - name : csi-attacher
       resource:
         requests:
@@ -397,7 +383,6 @@ csi:
           cpu: 250m
         limits:
           memory: 1Gi
-          cpu: 500m
 
   # -- CEPH CSI NFS plugin resource requirement list
   # @default -- see values.yaml
@@ -409,7 +394,6 @@ csi:
           cpu: 50m
         limits:
           memory: 256Mi
-          cpu: 100m
     - name : csi-nfsplugin
       resource:
         requests:
@@ -417,7 +401,6 @@ csi:
           cpu: 250m
         limits:
           memory: 1Gi
-          cpu: 500m
 
   # Set provisionerTolerations and provisionerNodeAffinity for provisioner pod.
   # The CSI provisioner would be best to start on the same nodes as other ceph daemons.
@@ -445,18 +428,10 @@ csi:
   # -- Enable Ceph CSI Liveness sidecar deployment
   enableLiveness: false
 
-  # -- CSI CephFS driver GRPC metrics port
-  # @default -- `9091`
-  cephfsGrpcMetricsPort:
-
   # -- CSI CephFS driver metrics port
   # @default -- `9081`
   cephfsLivenessMetricsPort:
 
-  # -- Ceph CSI RBD driver GRPC metrics port
-  # @default -- `9090`
-  rbdGrpcMetricsPort:
-
   # -- CSI Addons server port
   # @default -- `9070`
   csiAddonsPort:
@@ -474,42 +449,56 @@ csi:
     # -- Enable ServiceMonitor for Ceph CSI drivers
     enabled: false
     # -- Service monitor scrape interval
-    interval: 5s
+    interval: 10s
     # -- ServiceMonitor additional labels
     labels: {}
+    # -- Use a different namespace for the ServiceMonitor
+    namespace:
 
   # -- Kubelet root directory path (if the Kubelet uses a different path for the `--root-dir` flag)
   # @default -- `/var/lib/kubelet`
   kubeletDirPath:
 
+  # -- Duration in seconds that non-leader candidates will wait to force acquire leadership.
+  # @default -- `137s`
+  csiLeaderElectionLeaseDuration:
+
+  # -- Deadline in seconds that the acting leader will retry refreshing leadership before giving up.
+  # @default -- `107s`
+  csiLeaderElectionRenewDeadline:
+
+  # -- Retry period in seconds the LeaderElector clients should wait between tries of actions.
+  # @default -- `26s`
+  csiLeaderElectionRetryPeriod:
+
   cephcsi:
     # -- Ceph CSI image
-    # @default -- `quay.io/cephcsi/cephcsi:v3.9.0`
+    # @default -- `quay.io/cephcsi/cephcsi:v3.10.2`
     image:
 
   registrar:
     # -- Kubernetes CSI registrar image
-    # @default -- `registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.8.0`
+    # @default -- `registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.10.0`
     image:
 
   provisioner:
     # -- Kubernetes CSI provisioner image
-    # @default -- `registry.k8s.io/sig-storage/csi-provisioner:v3.5.0`
+    # @default -- `registry.k8s.io/sig-storage/csi-provisioner:v4.0.0`
     image:
 
   snapshotter:
     # -- Kubernetes CSI snapshotter image
-    # @default -- `registry.k8s.io/sig-storage/csi-snapshotter:v6.2.2`
+    # @default -- `registry.k8s.io/sig-storage/csi-snapshotter:v7.0.1`
     image:
 
   attacher:
     # -- Kubernetes CSI Attacher image
-    # @default -- `registry.k8s.io/sig-storage/csi-attacher:v4.3.0`
+    # @default -- `registry.k8s.io/sig-storage/csi-attacher:v4.5.0`
     image:
 
   resizer:
     # -- Kubernetes CSI resizer image
-    # @default -- `registry.k8s.io/sig-storage/csi-resizer:v1.8.0`
+    # @default -- `registry.k8s.io/sig-storage/csi-resizer:v1.10.0`
     image:
 
   # -- Image pull policy
@@ -528,7 +517,7 @@ csi:
     # -- Enable CSIAddons
     enabled: false
     # -- CSIAddons Sidecar image
-    image: "quay.io/csiaddons/k8s-sidecar:v0.7.0"
+    image: "quay.io/csiaddons/k8s-sidecar:v0.8.0"
 
   nfs:
     # -- Enable the nfs csi driver
@@ -612,21 +601,26 @@ discover:
   #     operator: Exists
   #     effect: NoSchedule
   # -- The node labels for affinity of `discover-agent` [^1]
-  nodeAffinity: # key1=value1,value2; key2=value3
+  nodeAffinity:
+  #   key1=value1,value2; key2=value3
+  #
+  #   or
+  #
+  #   requiredDuringSchedulingIgnoredDuringExecution:
+  #     nodeSelectorTerms:
+  #       - matchExpressions:
+  #           - key: storage-node
+  #             operator: Exists
   # -- Labels to add to the discover pods
   podLabels: # "key1=value1,key2=value2"
   # -- Add resources to discover daemon pods
   resources:
   #   - limits:
-  #       cpu: 500m
   #       memory: 512Mi
   #   - requests:
   #       cpu: 100m
   #       memory: 128Mi
 
-# -- Whether to disable the admission controller
-disableAdmissionController: true
-
 # -- Runs Ceph Pods as privileged to be able to write to `hostPaths` in OpenShift with SELinux restrictions.
 hostpathRequiresPrivileged: false
 
@@ -643,18 +637,6 @@ imagePullSecrets:
 # -- Whether the OBC provisioner should watch on the operator namespace or not, if not the namespace of the cluster will be used
 enableOBCWatchOperatorNamespace: true
 
-# -- Set tolerations and nodeAffinity [^1] for admission controller pod.
-# The admission controller would be best to start on the same nodes as other ceph daemons.
-admissionController:
-  # tolerations:
-  #    - key: key
-  #      operator: Exists
-  #      effect: NoSchedule
-  # nodeAffinity: key1=value1,value2; key2=value3
-  nodeAffinity: "storage-node=true"
-
-# [^1]: `nodeAffinity` and `*NodeAffinity` options should have the format `"role=storage,rook; storage=ceph"` or `storage=;role=rook-example` or `storage=;` (_checks only for presence of key_)
-
 monitoring:
   # -- Enable monitoring. Requires Prometheus to be pre-installed.
   # Enabling will also create RBAC rules to allow Operator to create ServiceMonitors