Преглед изворни кода

start upgrading rook to v1.14.12

Josh Bicking пре 1 недеља
родитељ
комит
07348e4b3d
2 измењених фајлова са 5 додато и 422 уклоњено
  1. 4 4
      argocd/applications.yaml
  2. 1 418
      argocd/rook/rook-ceph-operator-values.yaml

+ 4 - 4
argocd/applications.yaml

@@ -429,7 +429,7 @@ spec:
   project: default
   sources:
   - repoURL: https://charts.rook.io/release
-    targetRevision: '1.13.10'
+    targetRevision: '1.14.12'
     chart: rook-ceph
     helm:
       valueFiles:
@@ -442,7 +442,7 @@ spec:
       - CreateNamespace=true
       - ServerSideApply=true
     automated:
-      enabled: true
+      enabled: false
 ---
 apiVersion: argoproj.io/v1alpha1
 kind: Application
@@ -456,7 +456,7 @@ spec:
   project: default
   sources:
   - repoURL: https://charts.rook.io/release
-    targetRevision: '1.13.10'
+    targetRevision: '1.14.12'
     chart: rook-ceph-cluster
     helm:
       valueFiles:
@@ -472,7 +472,7 @@ spec:
       - CreateNamespace=true
       - ServerSideApply=true
     automated:
-      enabled: true
+      enabled: false
 ---
 apiVersion: argoproj.io/v1alpha1
 kind: Application

+ 1 - 418
argocd/rook/rook-ceph-operator-values.yaml

@@ -1,211 +1,6 @@
-# From https://github.com/rook/rook/blob/v1.13.10/deploy/charts/rook-ceph/values.yaml
-# Default values for rook-ceph-operator
-# This is a YAML-formatted file.
-# Declare variables to be passed into your templates.
+# https://github.com/rook/rook/blob/v1.14.12/deploy/charts/rook-ceph/values.yaml
 
-image:
-  # -- Image
-  repository: rook/ceph
-  # -- Image tag
-  # @default -- `master`
-  tag: v1.13.10
-  # -- Image pull policy
-  pullPolicy: IfNotPresent
-
-crds:
-  # -- Whether the helm chart should create and update the CRDs. If false, the CRDs must be
-  # managed independently with deploy/examples/crds.yaml.
-  # **WARNING** Only set during first deployment. If later disabled the cluster may be DESTROYED.
-  # If the CRDs are deleted in this case, see
-  # [the disaster recovery guide](https://rook.io/docs/rook/latest/Troubleshooting/disaster-recovery/#restoring-crds-after-deletion)
-  # to restore them.
-  enabled: true
-
-# -- Pod resource requests & limits
-resources:
-  limits:
-    memory: 512Mi
-  requests:
-    cpu: 200m
-    memory: 128Mi
-
-# -- Kubernetes [`nodeSelector`](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector) to add to the Deployment.
-nodeSelector: {}
-# Constraint rook-ceph-operator Deployment to nodes with label `disktype: ssd`.
-# For more info, see https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector
-#  disktype: ssd
-
-# -- List of Kubernetes [`tolerations`](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/) to add to the Deployment.
-tolerations: []
-
-# -- Delay to use for the `node.kubernetes.io/unreachable` pod failure toleration to override
-# the Kubernetes default of 5 minutes
-unreachableNodeTolerationSeconds: 5
-
-# -- Whether the operator should watch cluster CRD in its own namespace or not
-currentNamespaceOnly: false
-
-# -- Pod annotations
-annotations: {}
-
-# -- Global log level for the operator.
-# Options: `ERROR`, `WARNING`, `INFO`, `DEBUG`
-logLevel: INFO
-
-# -- If true, create & use RBAC resources
-rbacEnable: true
-
-rbacAggregate:
-  # -- If true, create a ClusterRole aggregated to [user facing roles](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#user-facing-roles) for objectbucketclaims
-  enableOBCs: false
-
-# -- If true, create & use PSP resources
-pspEnable: true
-
-# -- Set the priority class for the rook operator deployment if desired
-priorityClassName:
-
-# -- Set the container security context for the operator
-containerSecurityContext:
-  runAsNonRoot: true
-  runAsUser: 2016
-  runAsGroup: 2016
-  capabilities:
-    drop: ["ALL"]
-# -- If true, loop devices are allowed to be used for osds in test clusters
-allowLoopDevices: false
-
-# Settings for whether to disable the drivers or other daemons if they are not
-# needed
 csi:
-  # -- Enable Ceph CSI RBD driver
-  enableRbdDriver: true
-  # -- Enable Ceph CSI CephFS driver
-  enableCephfsDriver: true
-  # -- Enable host networking for CSI CephFS and RBD nodeplugins. This may be necessary
-  # in some network configurations where the SDN does not provide access to an external cluster or
-  # there is significant drop in read/write performance
-  enableCSIHostNetwork: true
-  # -- Enable Snapshotter in CephFS provisioner pod
-  enableCephfsSnapshotter: true
-  # -- Enable Snapshotter in NFS provisioner pod
-  enableNFSSnapshotter: true
-  # -- Enable Snapshotter in RBD provisioner pod
-  enableRBDSnapshotter: true
-  # -- Enable Host mount for `/etc/selinux` directory for Ceph CSI nodeplugins
-  enablePluginSelinuxHostMount: false
-  # -- Enable Ceph CSI PVC encryption support
-  enableCSIEncryption: false
-
-  # -- PriorityClassName to be set on csi driver plugin pods
-  pluginPriorityClassName: system-node-critical
-
-  # -- PriorityClassName to be set on csi driver provisioner pods
-  provisionerPriorityClassName: system-cluster-critical
-
-  # -- Policy for modifying a volume's ownership or permissions when the RBD PVC is being mounted.
-  # supported values are documented at https://kubernetes-csi.github.io/docs/support-fsgroup.html
-  rbdFSGroupPolicy: "File"
-
-  # -- Policy for modifying a volume's ownership or permissions when the CephFS PVC is being mounted.
-  # supported values are documented at https://kubernetes-csi.github.io/docs/support-fsgroup.html
-  cephFSFSGroupPolicy: "File"
-
-  # -- Policy for modifying a volume's ownership or permissions when the NFS PVC is being mounted.
-  # supported values are documented at https://kubernetes-csi.github.io/docs/support-fsgroup.html
-  nfsFSGroupPolicy: "File"
-
-  # -- OMAP generator generates the omap mapping between the PV name and the RBD image
-  # which helps CSI to identify the rbd images for CSI operations.
-  # `CSI_ENABLE_OMAP_GENERATOR` needs to be enabled when we are using rbd mirroring feature.
-  # By default OMAP generator is disabled and when enabled, it will be deployed as a
-  # sidecar with CSI provisioner pod, to enable set it to true.
-  enableOMAPGenerator: false
-
-  # -- Set CephFS Kernel mount options to use https://docs.ceph.com/en/latest/man/8/mount.ceph/#options.
-  # Set to "ms_mode=secure" when connections.encrypted is enabled in CephCluster CR
-  cephFSKernelMountOptions:
-
-  # -- Enable adding volume metadata on the CephFS subvolumes and RBD images.
-  # Not all users might be interested in getting volume/snapshot details as metadata on CephFS subvolume and RBD images.
-  # Hence enable metadata is false by default
-  enableMetadata: false
-
-  # -- Set replicas for csi provisioner deployment
-  provisionerReplicas: 2
-
-  # -- Cluster name identifier to set as metadata on the CephFS subvolume and RBD images. This will be useful
-  # in cases like for example, when two container orchestrator clusters (Kubernetes/OCP) are using a single ceph cluster
-  clusterName:
-
-  # -- Set logging level for cephCSI containers maintained by the cephCSI.
-  # Supported values from 0 to 5. 0 for general useful logs, 5 for trace level verbosity.
-  logLevel: 0
-
-  # -- Set logging level for Kubernetes-csi sidecar containers.
-  # Supported values from 0 to 5. 0 for general useful logs (the default), 5 for trace level verbosity.
-  # @default -- `0`
-  sidecarLogLevel:
-
-  # -- CSI driver name prefix for cephfs, rbd and nfs.
-  # @default -- `namespace name where rook-ceph operator is deployed`
-  csiDriverNamePrefix:
-
-  # -- CSI RBD plugin daemonset update strategy, supported values are OnDelete and RollingUpdate
-  # @default -- `RollingUpdate`
-  rbdPluginUpdateStrategy:
-
-  # -- A maxUnavailable parameter of CSI RBD plugin daemonset update strategy.
-  # @default -- `1`
-  rbdPluginUpdateStrategyMaxUnavailable:
-
-  # -- CSI CephFS plugin daemonset update strategy, supported values are OnDelete and RollingUpdate
-  # @default -- `RollingUpdate`
-  cephFSPluginUpdateStrategy:
-
-  # -- A maxUnavailable parameter of CSI cephFS plugin daemonset update strategy.
-  # @default -- `1`
-  cephFSPluginUpdateStrategyMaxUnavailable:
-
-  # -- CSI NFS plugin daemonset update strategy, supported values are OnDelete and RollingUpdate
-  # @default -- `RollingUpdate`
-  nfsPluginUpdateStrategy:
-
-  # -- Set GRPC timeout for csi containers (in seconds). It should be >= 120. If this value is not set or is invalid, it defaults to 150
-  grpcTimeoutInSeconds: 150
-
-  # -- Allow starting an unsupported ceph-csi image
-  allowUnsupportedVersion: false
-
-  # -- The volume of the CephCSI RBD plugin DaemonSet
-  csiRBDPluginVolume:
-  #  - name: lib-modules
-  #    hostPath:
-  #      path: /run/booted-system/kernel-modules/lib/modules/
-  #  - name: host-nix
-  #    hostPath:
-  #      path: /nix
-
-  # -- The volume mounts of the CephCSI RBD plugin DaemonSet
-  csiRBDPluginVolumeMount:
-  #  - name: host-nix
-  #    mountPath: /nix
-  #    readOnly: true
-
-  # -- The volume of the CephCSI CephFS plugin DaemonSet
-  csiCephFSPluginVolume:
-  #  - name: lib-modules
-  #    hostPath:
-  #      path: /run/booted-system/kernel-modules/lib/modules/
-  #  - name: host-nix
-  #    hostPath:
-  #      path: /nix
-
-  # -- The volume mounts of the CephCSI CephFS plugin DaemonSet
-  csiCephFSPluginVolumeMount:
-  #  - name: host-nix
-  #    mountPath: /nix
-  #    readOnly: true
 
   # -- CEPH CSI RBD provisioner resource requirement list
   # csi-omap-generator resources will be applied only if `enableOMAPGenerator` is set to `true`
@@ -421,225 +216,13 @@ csi:
       operator: Equal
       value: "true"
       effect: PreferNoSchedule
-    - key: seedbox
-      operator: Equal
-      value: "true"
-      effect: NoSchedule
-
-  # -- The node labels for affinity of the CephCSI RBD plugin DaemonSet [^1]
-  pluginNodeAffinity: "" # key1=value1,value2; key2=value3
 
   # -- Enable Ceph CSI Liveness sidecar deployment
   enableLiveness: true
 
-  # -- CSI CephFS driver metrics port
-  # @default -- `9081`
-  cephfsLivenessMetricsPort:
-
-  # -- CSI Addons server port
-  # @default -- `9070`
-  csiAddonsPort:
-
-  # -- Enable Ceph Kernel clients on kernel < 4.17. If your kernel does not support quotas for CephFS
-  # you may want to disable this setting. However, this will cause an issue during upgrades
-  # with the FUSE client. See the [upgrade guide](https://rook.io/docs/rook/v1.2/ceph-upgrade.html)
-  forceCephFSKernelClient: true
-
-  # -- Ceph CSI RBD driver metrics port
-  # @default -- `8080`
-  rbdLivenessMetricsPort:
-
   serviceMonitor:
     # -- Enable ServiceMonitor for Ceph CSI drivers
     enabled: true
-    # -- Service monitor scrape interval
-    interval: 10s
-    # -- ServiceMonitor additional labels
-    labels: {}
-    # -- Use a different namespace for the ServiceMonitor
-    namespace:
-
-  # -- Kubelet root directory path (if the Kubelet uses a different path for the `--root-dir` flag)
-  # @default -- `/var/lib/kubelet`
-  kubeletDirPath:
-
-  # -- Duration in seconds that non-leader candidates will wait to force acquire leadership.
-  # @default -- `137s`
-  csiLeaderElectionLeaseDuration:
-
-  # -- Deadline in seconds that the acting leader will retry refreshing leadership before giving up.
-  # @default -- `107s`
-  csiLeaderElectionRenewDeadline:
-
-  # -- Retry period in seconds the LeaderElector clients should wait between tries of actions.
-  # @default -- `26s`
-  csiLeaderElectionRetryPeriod:
-
-  cephcsi:
-    # -- Ceph CSI image
-    # @default -- `quay.io/cephcsi/cephcsi:v3.10.2`
-    image:
-
-  registrar:
-    # -- Kubernetes CSI registrar image
-    # @default -- `registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.10.0`
-    image:
-
-  provisioner:
-    # -- Kubernetes CSI provisioner image
-    # @default -- `registry.k8s.io/sig-storage/csi-provisioner:v4.0.0`
-    image:
-
-  snapshotter:
-    # -- Kubernetes CSI snapshotter image
-    # @default -- `registry.k8s.io/sig-storage/csi-snapshotter:v7.0.1`
-    image:
-
-  attacher:
-    # -- Kubernetes CSI Attacher image
-    # @default -- `registry.k8s.io/sig-storage/csi-attacher:v4.5.0`
-    image:
-
-  resizer:
-    # -- Kubernetes CSI resizer image
-    # @default -- `registry.k8s.io/sig-storage/csi-resizer:v1.10.0`
-    image:
-
-  # -- Image pull policy
-  imagePullPolicy: IfNotPresent
-
-  # -- Labels to add to the CSI CephFS Deployments and DaemonSets Pods
-  cephfsPodLabels: #"key1=value1,key2=value2"
-
-  # -- Labels to add to the CSI NFS Deployments and DaemonSets Pods
-  nfsPodLabels: #"key1=value1,key2=value2"
-
-  # -- Labels to add to the CSI RBD Deployments and DaemonSets Pods
-  rbdPodLabels: #"key1=value1,key2=value2"
-
-  csiAddons:
-    # -- Enable CSIAddons
-    enabled: false
-    # -- CSIAddons Sidecar image
-    image: "quay.io/csiaddons/k8s-sidecar:v0.8.0"
-
-  nfs:
-    # -- Enable the nfs csi driver
-    enabled: false
-
-  topology:
-    # -- Enable topology based provisioning
-    enabled: false
-    # NOTE: the value here serves as an example and needs to be
-    # updated with node labels that define domains of interest
-    # -- domainLabels define which node labels to use as domains
-    # for CSI nodeplugins to advertise their domains
-    domainLabels:
-    # - kubernetes.io/hostname
-    # - topology.kubernetes.io/zone
-    # - topology.rook.io/rack
-
-  readAffinity:
-    # -- Enable read affinity for RBD volumes. Recommended to
-    # set to true if running kernel 5.8 or newer.
-    # @default -- `false`
-    enabled: true
-    # -- Define which node labels to use
-    # as CRUSH location. This should correspond to the values set
-    # in the CRUSH map.
-    # @default -- labels listed [here](../CRDs/Cluster/ceph-cluster-crd.md#osd-topology)
-    crushLocationLabels:
-
-  # -- Whether to skip any attach operation altogether for CephFS PVCs. See more details
-  # [here](https://kubernetes-csi.github.io/docs/skip-attach.html#skip-attach-with-csi-driver-object).
-  # If cephFSAttachRequired is set to false it skips the volume attachments and makes the creation
-  # of pods using the CephFS PVC fast. **WARNING** It's highly discouraged to use this for
-  # CephFS RWO volumes. Refer to this [issue](https://github.com/kubernetes/kubernetes/issues/103305) for more details.
-  cephFSAttachRequired: true
-  # -- Whether to skip any attach operation altogether for RBD PVCs. See more details
-  # [here](https://kubernetes-csi.github.io/docs/skip-attach.html#skip-attach-with-csi-driver-object).
-  # If set to false it skips the volume attachments and makes the creation of pods using the RBD PVC fast.
-  # **WARNING** It's highly discouraged to use this for RWO volumes as it can cause data corruption.
-  # csi-addons operations like Reclaimspace and PVC Keyrotation will also not be supported if set
-  # to false since we'll have no VolumeAttachments to determine which node the PVC is mounted on.
-  # Refer to this [issue](https://github.com/kubernetes/kubernetes/issues/103305) for more details.
-  rbdAttachRequired: true
-  # -- Whether to skip any attach operation altogether for NFS PVCs. See more details
-  # [here](https://kubernetes-csi.github.io/docs/skip-attach.html#skip-attach-with-csi-driver-object).
-  # If cephFSAttachRequired is set to false it skips the volume attachments and makes the creation
-  # of pods using the NFS PVC fast. **WARNING** It's highly discouraged to use this for
-  # NFS RWO volumes. Refer to this [issue](https://github.com/kubernetes/kubernetes/issues/103305) for more details.
-  nfsAttachRequired: true
-
-# -- Enable discovery daemon
-enableDiscoveryDaemon: false
-# -- Set the discovery daemon device discovery interval (default to 60m)
-discoveryDaemonInterval: 60m
-
-# -- The timeout for ceph commands in seconds
-cephCommandsTimeoutSeconds: "15"
-
-# -- If true, run rook operator on the host network
-useOperatorHostNetwork: false
-
-# -- If true, scale down the rook operator.
-# This is useful for administrative actions where the rook operator must be scaled down, while using gitops style tooling
-# to deploy your helm charts.
-scaleDownOperator: false
-
-## Rook Discover configuration
-## toleration: NoSchedule, PreferNoSchedule or NoExecute
-## tolerationKey: Set this to the specific key of the taint to tolerate
-## tolerations: Array of tolerations in YAML format which will be added to agent deployment
-## nodeAffinity: Set to labels of the node to match
-
-discover:
-  # -- Toleration for the discover pods.
-  # Options: `NoSchedule`, `PreferNoSchedule` or `NoExecute`
-  toleration:
-  # -- The specific key of the taint to tolerate
-  tolerationKey:
-  # -- Array of tolerations in YAML format which will be added to discover deployment
-  tolerations:
-  #   - key: key
-  #     operator: Exists
-  #     effect: NoSchedule
-  # -- The node labels for affinity of `discover-agent` [^1]
-  nodeAffinity:
-  #   key1=value1,value2; key2=value3
-  #
-  #   or
-  #
-  #   requiredDuringSchedulingIgnoredDuringExecution:
-  #     nodeSelectorTerms:
-  #       - matchExpressions:
-  #           - key: storage-node
-  #             operator: Exists
-  # -- Labels to add to the discover pods
-  podLabels: # "key1=value1,key2=value2"
-  # -- Add resources to discover daemon pods
-  resources:
-  #   - limits:
-  #       memory: 512Mi
-  #   - requests:
-  #       cpu: 100m
-  #       memory: 128Mi
-
-# -- Runs Ceph Pods as privileged to be able to write to `hostPaths` in OpenShift with SELinux restrictions.
-hostpathRequiresPrivileged: false
-
-# -- Disable automatic orchestration when new devices are discovered.
-disableDeviceHotplug: false
-
-# -- Blacklist certain disks according to the regex provided.
-discoverDaemonUdev:
-
-# -- imagePullSecrets option allow to pull docker images from private docker registry. Option will be passed to all service accounts.
-imagePullSecrets:
-# - name: my-registry-secret
-
-# -- Whether the OBC provisioner should watch on the operator namespace or not, if not the namespace of the cluster will be used
-enableOBCWatchOperatorNamespace: true
 
 monitoring:
   # -- Enable monitoring. Requires Prometheus to be pre-installed.