123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643 |
- # From https://github.com/rook/rook/blob/v1.13.10/deploy/charts/rook-ceph/values.yaml
- # export ROOK_OPERATOR_NAMESPACE=rook-ceph
- # export ROOK_CLUSTER_NAMESPACE=rook-ceph
- # KUBECONFIG=/etc/rancher/k3s/k3s.yaml helm upgrade --install --namespace rook-ceph rook-ceph rook-release/rook-ceph -f rook/rook-ceph-operator-values.yaml --version 1.13.10
- # Default values for rook-ceph-operator
- # This is a YAML-formatted file.
- # Declare variables to be passed into your templates.
- image:
- # -- Image
- repository: rook/ceph
- # -- Image tag
- # @default -- `master`
- tag: v1.13.10
- # -- Image pull policy
- pullPolicy: IfNotPresent
- crds:
- # -- Whether the helm chart should create and update the CRDs. If false, the CRDs must be
- # managed independently with deploy/examples/crds.yaml.
- # **WARNING** Only set during first deployment. If later disabled the cluster may be DESTROYED.
- # If the CRDs are deleted in this case, see
- # [the disaster recovery guide](https://rook.io/docs/rook/latest/Troubleshooting/disaster-recovery/#restoring-crds-after-deletion)
- # to restore them.
- enabled: true
- # -- Pod resource requests & limits
- resources:
- limits:
- memory: 512Mi
- requests:
- cpu: 200m
- memory: 128Mi
- # -- Kubernetes [`nodeSelector`](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector) to add to the Deployment.
- nodeSelector: {}
- # Constraint rook-ceph-operator Deployment to nodes with label `disktype: ssd`.
- # For more info, see https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector
- # disktype: ssd
- # -- List of Kubernetes [`tolerations`](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/) to add to the Deployment.
- tolerations: []
- # -- Delay to use for the `node.kubernetes.io/unreachable` pod failure toleration to override
- # the Kubernetes default of 5 minutes
- unreachableNodeTolerationSeconds: 5
- # -- Whether the operator should watch cluster CRD in its own namespace or not
- currentNamespaceOnly: false
- # -- Pod annotations
- annotations: {}
- # -- Global log level for the operator.
- # Options: `ERROR`, `WARNING`, `INFO`, `DEBUG`
- logLevel: INFO
- # -- If true, create & use RBAC resources
- rbacEnable: true
- rbacAggregate:
- # -- If true, create a ClusterRole aggregated to [user facing roles](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#user-facing-roles) for objectbucketclaims
- enableOBCs: false
- # -- If true, create & use PSP resources
- pspEnable: true
- # -- Set the priority class for the rook operator deployment if desired
- priorityClassName:
- # -- Set the container security context for the operator
- containerSecurityContext:
- runAsNonRoot: true
- runAsUser: 2016
- runAsGroup: 2016
- capabilities:
- drop: ["ALL"]
- # -- If true, loop devices are allowed to be used for osds in test clusters
- allowLoopDevices: false
- # Settings for whether to disable the drivers or other daemons if they are not
- # needed
- csi:
- # -- Enable Ceph CSI RBD driver
- enableRbdDriver: true
- # -- Enable Ceph CSI CephFS driver
- enableCephfsDriver: true
- # -- Enable host networking for CSI CephFS and RBD nodeplugins. This may be necessary
- # in some network configurations where the SDN does not provide access to an external cluster or
- # there is significant drop in read/write performance
- enableCSIHostNetwork: true
- # -- Enable Snapshotter in CephFS provisioner pod
- enableCephfsSnapshotter: true
- # -- Enable Snapshotter in NFS provisioner pod
- enableNFSSnapshotter: true
- # -- Enable Snapshotter in RBD provisioner pod
- enableRBDSnapshotter: true
- # -- Enable Host mount for `/etc/selinux` directory for Ceph CSI nodeplugins
- enablePluginSelinuxHostMount: false
- # -- Enable Ceph CSI PVC encryption support
- enableCSIEncryption: false
- # -- PriorityClassName to be set on csi driver plugin pods
- pluginPriorityClassName: system-node-critical
- # -- PriorityClassName to be set on csi driver provisioner pods
- provisionerPriorityClassName: system-cluster-critical
- # -- Policy for modifying a volume's ownership or permissions when the RBD PVC is being mounted.
- # supported values are documented at https://kubernetes-csi.github.io/docs/support-fsgroup.html
- rbdFSGroupPolicy: "File"
- # -- Policy for modifying a volume's ownership or permissions when the CephFS PVC is being mounted.
- # supported values are documented at https://kubernetes-csi.github.io/docs/support-fsgroup.html
- cephFSFSGroupPolicy: "File"
- # -- Policy for modifying a volume's ownership or permissions when the NFS PVC is being mounted.
- # supported values are documented at https://kubernetes-csi.github.io/docs/support-fsgroup.html
- nfsFSGroupPolicy: "File"
- # -- OMAP generator generates the omap mapping between the PV name and the RBD image
- # which helps CSI to identify the rbd images for CSI operations.
- # `CSI_ENABLE_OMAP_GENERATOR` needs to be enabled when we are using rbd mirroring feature.
- # By default OMAP generator is disabled and when enabled, it will be deployed as a
- # sidecar with CSI provisioner pod, to enable set it to true.
- enableOMAPGenerator: false
- # -- Set CephFS Kernel mount options to use https://docs.ceph.com/en/latest/man/8/mount.ceph/#options.
- # Set to "ms_mode=secure" when connections.encrypted is enabled in CephCluster CR
- cephFSKernelMountOptions:
- # -- Enable adding volume metadata on the CephFS subvolumes and RBD images.
- # Not all users might be interested in getting volume/snapshot details as metadata on CephFS subvolume and RBD images.
- # Hence enable metadata is false by default
- enableMetadata: false
- # -- Set replicas for csi provisioner deployment
- provisionerReplicas: 2
- # -- Cluster name identifier to set as metadata on the CephFS subvolume and RBD images. This will be useful
- # in cases like for example, when two container orchestrator clusters (Kubernetes/OCP) are using a single ceph cluster
- clusterName:
- # -- Set logging level for cephCSI containers maintained by the cephCSI.
- # Supported values from 0 to 5. 0 for general useful logs, 5 for trace level verbosity.
- logLevel: 0
- # -- Set logging level for Kubernetes-csi sidecar containers.
- # Supported values from 0 to 5. 0 for general useful logs (the default), 5 for trace level verbosity.
- # @default -- `0`
- sidecarLogLevel:
- # -- CSI driver name prefix for cephfs, rbd and nfs.
- # @default -- `namespace name where rook-ceph operator is deployed`
- csiDriverNamePrefix:
- # -- CSI RBD plugin daemonset update strategy, supported values are OnDelete and RollingUpdate
- # @default -- `RollingUpdate`
- rbdPluginUpdateStrategy:
- # -- A maxUnavailable parameter of CSI RBD plugin daemonset update strategy.
- # @default -- `1`
- rbdPluginUpdateStrategyMaxUnavailable:
- # -- CSI CephFS plugin daemonset update strategy, supported values are OnDelete and RollingUpdate
- # @default -- `RollingUpdate`
- cephFSPluginUpdateStrategy:
- # -- A maxUnavailable parameter of CSI cephFS plugin daemonset update strategy.
- # @default -- `1`
- cephFSPluginUpdateStrategyMaxUnavailable:
- # -- CSI NFS plugin daemonset update strategy, supported values are OnDelete and RollingUpdate
- # @default -- `RollingUpdate`
- nfsPluginUpdateStrategy:
- # -- Set GRPC timeout for csi containers (in seconds). It should be >= 120. If this value is not set or is invalid, it defaults to 150
- grpcTimeoutInSeconds: 150
- # -- Allow starting an unsupported ceph-csi image
- allowUnsupportedVersion: false
- # -- The volume of the CephCSI RBD plugin DaemonSet
- csiRBDPluginVolume:
- # - name: lib-modules
- # hostPath:
- # path: /run/booted-system/kernel-modules/lib/modules/
- # - name: host-nix
- # hostPath:
- # path: /nix
- # -- The volume mounts of the CephCSI RBD plugin DaemonSet
- csiRBDPluginVolumeMount:
- # - name: host-nix
- # mountPath: /nix
- # readOnly: true
- # -- The volume of the CephCSI CephFS plugin DaemonSet
- csiCephFSPluginVolume:
- # - name: lib-modules
- # hostPath:
- # path: /run/booted-system/kernel-modules/lib/modules/
- # - name: host-nix
- # hostPath:
- # path: /nix
- # -- The volume mounts of the CephCSI CephFS plugin DaemonSet
- csiCephFSPluginVolumeMount:
- # - name: host-nix
- # mountPath: /nix
- # readOnly: true
- # -- CEPH CSI RBD provisioner resource requirement list
- # csi-omap-generator resources will be applied only if `enableOMAPGenerator` is set to `true`
- # @default -- see values.yaml
- csiRBDProvisionerResource: |
- - name : csi-provisioner
- resource:
- requests:
- memory: 128Mi
- cpu: 100m
- limits:
- memory: 256Mi
- - name : csi-resizer
- resource:
- requests:
- memory: 128Mi
- cpu: 100m
- limits:
- memory: 256Mi
- - name : csi-attacher
- resource:
- requests:
- memory: 128Mi
- cpu: 100m
- limits:
- memory: 256Mi
- - name : csi-snapshotter
- resource:
- requests:
- memory: 128Mi
- cpu: 100m
- limits:
- memory: 256Mi
- - name : csi-rbdplugin
- resource:
- requests:
- memory: 512Mi
- limits:
- memory: 1Gi
- - name : csi-omap-generator
- resource:
- requests:
- memory: 512Mi
- cpu: 250m
- limits:
- memory: 1Gi
- - name : liveness-prometheus
- resource:
- requests:
- memory: 128Mi
- cpu: 50m
- limits:
- memory: 256Mi
- # -- CEPH CSI RBD plugin resource requirement list
- # @default -- see values.yaml
- csiRBDPluginResource: |
- - name : driver-registrar
- resource:
- requests:
- memory: 128Mi
- cpu: 50m
- limits:
- memory: 256Mi
- - name : csi-rbdplugin
- resource:
- requests:
- memory: 512Mi
- cpu: 250m
- limits:
- memory: 1Gi
- - name : liveness-prometheus
- resource:
- requests:
- memory: 128Mi
- cpu: 50m
- limits:
- memory: 256Mi
- # -- CEPH CSI CephFS provisioner resource requirement list
- # @default -- see values.yaml
- csiCephFSProvisionerResource: |
- - name : csi-provisioner
- resource:
- requests:
- memory: 128Mi
- cpu: 100m
- limits:
- memory: 256Mi
- - name : csi-resizer
- resource:
- requests:
- memory: 128Mi
- cpu: 100m
- limits:
- memory: 256Mi
- - name : csi-attacher
- resource:
- requests:
- memory: 128Mi
- cpu: 100m
- limits:
- memory: 256Mi
- - name : csi-snapshotter
- resource:
- requests:
- memory: 128Mi
- cpu: 100m
- limits:
- memory: 256Mi
- - name : csi-cephfsplugin
- resource:
- requests:
- memory: 512Mi
- cpu: 250m
- limits:
- memory: 1Gi
- - name : liveness-prometheus
- resource:
- requests:
- memory: 128Mi
- cpu: 50m
- limits:
- memory: 256Mi
- # -- CEPH CSI CephFS plugin resource requirement list
- # @default -- see values.yaml
- csiCephFSPluginResource: |
- - name : driver-registrar
- resource:
- requests:
- memory: 128Mi
- cpu: 50m
- limits:
- memory: 256Mi
- - name : csi-cephfsplugin
- resource:
- requests:
- memory: 512Mi
- cpu: 250m
- limits:
- memory: 1Gi
- - name : liveness-prometheus
- resource:
- requests:
- memory: 128Mi
- cpu: 50m
- limits:
- memory: 256Mi
- # -- CEPH CSI NFS provisioner resource requirement list
- # @default -- see values.yaml
- csiNFSProvisionerResource: |
- - name : csi-provisioner
- resource:
- requests:
- memory: 128Mi
- cpu: 100m
- limits:
- memory: 256Mi
- - name : csi-nfsplugin
- resource:
- requests:
- memory: 512Mi
- cpu: 250m
- limits:
- memory: 1Gi
- - name : csi-attacher
- resource:
- requests:
- memory: 512Mi
- cpu: 250m
- limits:
- memory: 1Gi
- # -- CEPH CSI NFS plugin resource requirement list
- # @default -- see values.yaml
- csiNFSPluginResource: |
- - name : driver-registrar
- resource:
- requests:
- memory: 128Mi
- cpu: 50m
- limits:
- memory: 256Mi
- - name : csi-nfsplugin
- resource:
- requests:
- memory: 512Mi
- cpu: 250m
- limits:
- memory: 1Gi
- # Set provisionerTolerations and provisionerNodeAffinity for provisioner pod.
- # The CSI provisioner would be best to start on the same nodes as other ceph daemons.
- # -- Array of tolerations in YAML format which will be added to CSI provisioner deployment
- provisionerTolerations:
- # - key: key
- # operator: Exists
- # effect: NoSchedule
- # -- The node labels for affinity of the CSI provisioner deployment [^1]
- provisionerNodeAffinity: "storage-node=true" #key1=value1,value2; key2=value3
- # Set pluginTolerations and pluginNodeAffinity for plugin daemonset pods.
- # The CSI plugins need to be started on all the nodes where the clients need to mount the storage.
- # -- Array of tolerations in YAML format which will be added to CephCSI plugin DaemonSet
- pluginTolerations:
- - key: storage-node
- operator: Exists
- effect: NoSchedule
- # -- The node labels for affinity of the CephCSI RBD plugin DaemonSet [^1]
- pluginNodeAffinity: # key1=value1,value2; key2=value3
- # -- Enable Ceph CSI Liveness sidecar deployment
- enableLiveness: false
- # -- CSI CephFS driver metrics port
- # @default -- `9081`
- cephfsLivenessMetricsPort:
- # -- CSI Addons server port
- # @default -- `9070`
- csiAddonsPort:
- # -- Enable Ceph Kernel clients on kernel < 4.17. If your kernel does not support quotas for CephFS
- # you may want to disable this setting. However, this will cause an issue during upgrades
- # with the FUSE client. See the [upgrade guide](https://rook.io/docs/rook/v1.2/ceph-upgrade.html)
- forceCephFSKernelClient: true
- # -- Ceph CSI RBD driver metrics port
- # @default -- `8080`
- rbdLivenessMetricsPort:
- serviceMonitor:
- # -- Enable ServiceMonitor for Ceph CSI drivers
- enabled: false
- # -- Service monitor scrape interval
- interval: 10s
- # -- ServiceMonitor additional labels
- labels: {}
- # -- Use a different namespace for the ServiceMonitor
- namespace:
- # -- Kubelet root directory path (if the Kubelet uses a different path for the `--root-dir` flag)
- # @default -- `/var/lib/kubelet`
- kubeletDirPath:
- # -- Duration in seconds that non-leader candidates will wait to force acquire leadership.
- # @default -- `137s`
- csiLeaderElectionLeaseDuration:
- # -- Deadline in seconds that the acting leader will retry refreshing leadership before giving up.
- # @default -- `107s`
- csiLeaderElectionRenewDeadline:
- # -- Retry period in seconds the LeaderElector clients should wait between tries of actions.
- # @default -- `26s`
- csiLeaderElectionRetryPeriod:
- cephcsi:
- # -- Ceph CSI image
- # @default -- `quay.io/cephcsi/cephcsi:v3.10.2`
- image:
- registrar:
- # -- Kubernetes CSI registrar image
- # @default -- `registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.10.0`
- image:
- provisioner:
- # -- Kubernetes CSI provisioner image
- # @default -- `registry.k8s.io/sig-storage/csi-provisioner:v4.0.0`
- image:
- snapshotter:
- # -- Kubernetes CSI snapshotter image
- # @default -- `registry.k8s.io/sig-storage/csi-snapshotter:v7.0.1`
- image:
- attacher:
- # -- Kubernetes CSI Attacher image
- # @default -- `registry.k8s.io/sig-storage/csi-attacher:v4.5.0`
- image:
- resizer:
- # -- Kubernetes CSI resizer image
- # @default -- `registry.k8s.io/sig-storage/csi-resizer:v1.10.0`
- image:
- # -- Image pull policy
- imagePullPolicy: IfNotPresent
- # -- Labels to add to the CSI CephFS Deployments and DaemonSets Pods
- cephfsPodLabels: #"key1=value1,key2=value2"
- # -- Labels to add to the CSI NFS Deployments and DaemonSets Pods
- nfsPodLabels: #"key1=value1,key2=value2"
- # -- Labels to add to the CSI RBD Deployments and DaemonSets Pods
- rbdPodLabels: #"key1=value1,key2=value2"
- csiAddons:
- # -- Enable CSIAddons
- enabled: false
- # -- CSIAddons Sidecar image
- image: "quay.io/csiaddons/k8s-sidecar:v0.8.0"
- nfs:
- # -- Enable the nfs csi driver
- enabled: false
- topology:
- # -- Enable topology based provisioning
- enabled: false
- # NOTE: the value here serves as an example and needs to be
- # updated with node labels that define domains of interest
- # -- domainLabels define which node labels to use as domains
- # for CSI nodeplugins to advertise their domains
- domainLabels:
- # - kubernetes.io/hostname
- # - topology.kubernetes.io/zone
- # - topology.rook.io/rack
- readAffinity:
- # -- Enable read affinity for RBD volumes. Recommended to
- # set to true if running kernel 5.8 or newer.
- # @default -- `false`
- enabled: true
- # -- Define which node labels to use
- # as CRUSH location. This should correspond to the values set
- # in the CRUSH map.
- # @default -- labels listed [here](../CRDs/Cluster/ceph-cluster-crd.md#osd-topology)
- crushLocationLabels:
- # -- Whether to skip any attach operation altogether for CephFS PVCs. See more details
- # [here](https://kubernetes-csi.github.io/docs/skip-attach.html#skip-attach-with-csi-driver-object).
- # If cephFSAttachRequired is set to false it skips the volume attachments and makes the creation
- # of pods using the CephFS PVC fast. **WARNING** It's highly discouraged to use this for
- # CephFS RWO volumes. Refer to this [issue](https://github.com/kubernetes/kubernetes/issues/103305) for more details.
- cephFSAttachRequired: true
- # -- Whether to skip any attach operation altogether for RBD PVCs. See more details
- # [here](https://kubernetes-csi.github.io/docs/skip-attach.html#skip-attach-with-csi-driver-object).
- # If set to false it skips the volume attachments and makes the creation of pods using the RBD PVC fast.
- # **WARNING** It's highly discouraged to use this for RWO volumes as it can cause data corruption.
- # csi-addons operations like Reclaimspace and PVC Keyrotation will also not be supported if set
- # to false since we'll have no VolumeAttachments to determine which node the PVC is mounted on.
- # Refer to this [issue](https://github.com/kubernetes/kubernetes/issues/103305) for more details.
- rbdAttachRequired: true
- # -- Whether to skip any attach operation altogether for NFS PVCs. See more details
- # [here](https://kubernetes-csi.github.io/docs/skip-attach.html#skip-attach-with-csi-driver-object).
- # If cephFSAttachRequired is set to false it skips the volume attachments and makes the creation
- # of pods using the NFS PVC fast. **WARNING** It's highly discouraged to use this for
- # NFS RWO volumes. Refer to this [issue](https://github.com/kubernetes/kubernetes/issues/103305) for more details.
- nfsAttachRequired: true
- # -- Enable discovery daemon
- enableDiscoveryDaemon: false
- # -- Set the discovery daemon device discovery interval (default to 60m)
- discoveryDaemonInterval: 60m
- # -- The timeout for ceph commands in seconds
- cephCommandsTimeoutSeconds: "15"
- # -- If true, run rook operator on the host network
- useOperatorHostNetwork: false
- # -- If true, scale down the rook operator.
- # This is useful for administrative actions where the rook operator must be scaled down, while using gitops style tooling
- # to deploy your helm charts.
- scaleDownOperator: false
- ## Rook Discover configuration
- ## toleration: NoSchedule, PreferNoSchedule or NoExecute
- ## tolerationKey: Set this to the specific key of the taint to tolerate
- ## tolerations: Array of tolerations in YAML format which will be added to agent deployment
- ## nodeAffinity: Set to labels of the node to match
- discover:
- # -- Toleration for the discover pods.
- # Options: `NoSchedule`, `PreferNoSchedule` or `NoExecute`
- toleration:
- # -- The specific key of the taint to tolerate
- tolerationKey:
- # -- Array of tolerations in YAML format which will be added to discover deployment
- tolerations:
- # - key: key
- # operator: Exists
- # effect: NoSchedule
- # -- The node labels for affinity of `discover-agent` [^1]
- nodeAffinity:
- # key1=value1,value2; key2=value3
- #
- # or
- #
- # requiredDuringSchedulingIgnoredDuringExecution:
- # nodeSelectorTerms:
- # - matchExpressions:
- # - key: storage-node
- # operator: Exists
- # -- Labels to add to the discover pods
- podLabels: # "key1=value1,key2=value2"
- # -- Add resources to discover daemon pods
- resources:
- # - limits:
- # memory: 512Mi
- # - requests:
- # cpu: 100m
- # memory: 128Mi
- # -- Runs Ceph Pods as privileged to be able to write to `hostPaths` in OpenShift with SELinux restrictions.
- hostpathRequiresPrivileged: false
- # -- Disable automatic orchestration when new devices are discovered.
- disableDeviceHotplug: false
- # -- Blacklist certain disks according to the regex provided.
- discoverDaemonUdev:
- # -- imagePullSecrets option allow to pull docker images from private docker registry. Option will be passed to all service accounts.
- imagePullSecrets:
- # - name: my-registry-secret
- # -- Whether the OBC provisioner should watch on the operator namespace or not, if not the namespace of the cluster will be used
- enableOBCWatchOperatorNamespace: true
- monitoring:
- # -- Enable monitoring. Requires Prometheus to be pre-installed.
- # Enabling will also create RBAC rules to allow Operator to create ServiceMonitors
- enabled: true
|