123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442 |
- # From https://github.com/rook/rook/blob/release-1.9/deploy/charts/rook-ceph/values.yaml
- # Default values for rook-ceph-operator
- # This is a YAML-formatted file.
- # Declare variables to be passed into your templates.
- image:
- repository: rook/ceph
- tag: v1.9.2
- pullPolicy: IfNotPresent
- crds:
- # Whether the helm chart should create and update the CRDs. If false, the CRDs must be
- # managed independently with deploy/examples/crds.yaml.
- # **WARNING** Only set during first deployment. If later disabled the cluster may be DESTROYED.
- # If the CRDs are deleted in this case, see the disaster recovery guide to restore them.
- # https://rook.github.io/docs/rook/latest/ceph-disaster-recovery.html#restoring-crds-after-deletion
- enabled: true
- resources:
- limits:
- cpu: 500m
- memory: 256Mi
- requests:
- cpu: 100m
- memory: 128Mi
- # Constraint rook-ceph-operator Deployment to nodes with label
- # For more info, see https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector
- nodeSelector:
- storage-node: "true"
- # Tolerations for the rook-ceph-operator to allow it to run on nodes with particular taints
- tolerations: []
- # Delay to use in node.kubernetes.io/unreachable toleration
- unreachableNodeTolerationSeconds: 5
- # Whether rook watches its current namespace for CRDs or the entire cluster, defaults to false
- currentNamespaceOnly: false
- ## Annotations to be added to pod
- annotations: {}
- ## The logging level for the operator: ERROR | WARNING | INFO | DEBUG
- logLevel: INFO
- ## If true, create & use RBAC resources
- ##
- rbacEnable: true
- ## If true, create & use PSP resources
- ##
- pspEnable: true
- # Set the priority class for the rook operator deployment if desired
- # priorityClassName: class
- ## Settings for whether to disable the drivers or other daemons if they are not
- ## needed
- csi:
- enableRbdDriver: true
- enableCephfsDriver: true
- enableGrpcMetrics: false
- # Set to true to enable host networking for CSI CephFS and RBD nodeplugins. This may be necessary
- # in some network configurations where the SDN does not provide access to an external cluster or
- # there is significant drop in read/write performance.
- enableCSIHostNetwork: true
- # set to false to disable deployment of snapshotter container in CephFS provisioner pod.
- enableCephfsSnapshotter: true
- # set to false to disable deployment of snapshotter container in RBD provisioner pod.
- enableRBDSnapshotter: true
- # set to false if the selinux is not enabled or unavailable in cluster nodes.
- enablePluginSelinuxHostMount: false
- # set to true to enable Ceph CSI pvc encryption support.
- enableCSIEncryption: false
- # (Optional) set user created priorityclassName for csi plugin pods.
- pluginPriorityClassName: system-node-critical
- # (Optional) set user created priorityclassName for csi provisioner pods.
- provisionerPriorityClassName: system-cluster-critical
- # (Optional) policy for modifying a volume's ownership or permissions when the RBD PVC is being mounted.
- # supported values are documented at https://kubernetes-csi.github.io/docs/support-fsgroup.html
- rbdFSGroupPolicy: "ReadWriteOnceWithFSType"
- # (Optional) policy for modifying a volume's ownership or permissions when the CephFS PVC is being mounted.
- # supported values are documented at https://kubernetes-csi.github.io/docs/support-fsgroup.html
- cephFSFSGroupPolicy: "ReadWriteOnceWithFSType"
- # (Optional) policy for modifying a volume's ownership or permissions when the NFS PVC is being mounted.
- # supported values are documented at https://kubernetes-csi.github.io/docs/support-fsgroup.html
- nfsFSGroupPolicy: "ReadWriteOnceWithFSType"
- # OMAP generator generates the omap mapping between the PV name and the RBD image
- # which helps CSI to identify the rbd images for CSI operations.
- # CSI_ENABLE_OMAP_GENERATOR need to be enabled when we are using rbd mirroring feature.
- # By default OMAP generator is disabled and when enabled it will be deployed as a
- # sidecar with CSI provisioner pod, to enable set it to true.
- enableOMAPGenerator: false
- # Set replicas for csi provisioner deployment.
- provisionerReplicas: 2
- # Set logging level for csi containers.
- # Supported values from 0 to 5. 0 for general useful logs, 5 for trace level verbosity.
- logLevel: 5
- # CSI RBD plugin daemonset update strategy, supported values are OnDelete and RollingUpdate.
- # Default value is RollingUpdate.
- #rbdPluginUpdateStrategy: OnDelete
- # CSI CephFS plugin daemonset update strategy, supported values are OnDelete and RollingUpdate.
- # Default value is RollingUpdate.
- #cephFSPluginUpdateStrategy: OnDelete
- # CSI NFS plugin daemonset update strategy, supported values are OnDelete and RollingUpdate.
- # Default value is RollingUpdate.
- #nfsPluginUpdateStrategy: OnDelete
- # The CSI GRPC timeout value (in seconds). It should be >= 120. If this variable is not set or is an invalid value, it's default to 150.
- grpcTimeoutInSeconds: 150
- # Allow starting unsupported ceph-csi image
- allowUnsupportedVersion: false
- # CEPH CSI RBD provisioner resource requirement list, Put here list of resource
- # requests and limits you want to apply for provisioner pod
- # csi-omap-generator resources will be applied only if enableOMAPGenerator is set to true
- csiRBDProvisionerResource: |
- - name : csi-provisioner
- resource:
- requests:
- memory: 128Mi
- cpu: 100m
- limits:
- memory: 256Mi
- cpu: 200m
- - name : csi-resizer
- resource:
- requests:
- memory: 128Mi
- cpu: 100m
- limits:
- memory: 256Mi
- cpu: 200m
- - name : csi-attacher
- resource:
- requests:
- memory: 128Mi
- cpu: 100m
- limits:
- memory: 256Mi
- cpu: 200m
- - name : csi-snapshotter
- resource:
- requests:
- memory: 128Mi
- cpu: 100m
- limits:
- memory: 256Mi
- cpu: 200m
- - name : csi-rbdplugin
- resource:
- requests:
- memory: 512Mi
- cpu: 250m
- limits:
- memory: 1Gi
- cpu: 500m
- - name : csi-omap-generator
- resource:
- requests:
- memory: 512Mi
- cpu: 250m
- limits:
- memory: 1Gi
- cpu: 500m
- - name : liveness-prometheus
- resource:
- requests:
- memory: 128Mi
- cpu: 50m
- limits:
- memory: 256Mi
- cpu: 100m
- # CEPH CSI RBD plugin resource requirement list, Put here list of resource
- # requests and limits you want to apply for plugin pod
- csiRBDPluginResource: |
- - name : driver-registrar
- resource:
- requests:
- memory: 128Mi
- cpu: 50m
- limits:
- memory: 256Mi
- cpu: 100m
- - name : csi-rbdplugin
- resource:
- requests:
- memory: 512Mi
- cpu: 250m
- limits:
- memory: 1Gi
- cpu: 500m
- - name : liveness-prometheus
- resource:
- requests:
- memory: 128Mi
- cpu: 50m
- limits:
- memory: 256Mi
- cpu: 100m
- # CEPH CSI CephFS provisioner resource requirement list, Put here list of resource
- # requests and limits you want to apply for provisioner pod
- csiCephFSProvisionerResource: |
- - name : csi-provisioner
- resource:
- requests:
- memory: 128Mi
- cpu: 100m
- limits:
- memory: 256Mi
- cpu: 200m
- - name : csi-resizer
- resource:
- requests:
- memory: 128Mi
- cpu: 100m
- limits:
- memory: 256Mi
- cpu: 200m
- - name : csi-attacher
- resource:
- requests:
- memory: 128Mi
- cpu: 100m
- limits:
- memory: 256Mi
- cpu: 200m
- - name : csi-snapshotter
- resource:
- requests:
- memory: 128Mi
- cpu: 100m
- limits:
- memory: 256Mi
- cpu: 200m
- - name : csi-cephfsplugin
- resource:
- requests:
- memory: 512Mi
- cpu: 250m
- limits:
- memory: 1Gi
- cpu: 500m
- - name : liveness-prometheus
- resource:
- requests:
- memory: 128Mi
- cpu: 50m
- limits:
- memory: 256Mi
- cpu: 100m
- # CEPH CSI CephFS plugin resource requirement list, Put here list of resource
- # requests and limits you want to apply for plugin pod
- csiCephFSPluginResource: |
- - name : driver-registrar
- resource:
- requests:
- memory: 128Mi
- cpu: 50m
- limits:
- memory: 256Mi
- cpu: 100m
- - name : csi-cephfsplugin
- resource:
- requests:
- memory: 512Mi
- cpu: 250m
- limits:
- memory: 1Gi
- cpu: 500m
- - name : liveness-prometheus
- resource:
- requests:
- memory: 128Mi
- cpu: 50m
- limits:
- memory: 256Mi
- cpu: 100m
- # CEPH CSI NFS provisioner resource requirement list, Put here list of resource
- # requests and limits you want to apply for provisioner pod
- csiNFSProvisionerResource: |
- - name : csi-provisioner
- resource:
- requests:
- memory: 128Mi
- cpu: 100m
- limits:
- memory: 256Mi
- cpu: 200m
- - name : csi-nfsplugin
- resource:
- requests:
- memory: 512Mi
- cpu: 250m
- limits:
- memory: 1Gi
- cpu: 500m
- # CEPH CSI NFS plugin resource requirement list, Put here list of resource
- # requests and limits you want to apply for plugin pod
- csiNFSPluginResource: |
- - name : driver-registrar
- resource:
- requests:
- memory: 128Mi
- cpu: 50m
- limits:
- memory: 256Mi
- cpu: 100m
- - name : csi-nfsplugin
- resource:
- requests:
- memory: 512Mi
- cpu: 250m
- limits:
- memory: 1Gi
- cpu: 500m
- # Set provisonerTolerations and provisionerNodeAffinity for provisioner pod.
- # The CSI provisioner would be best to start on the same nodes as other ceph daemons.
- # provisionerTolerations:
- # - key: key
- # operator: Exists
- # effect: NoSchedule
- # provisionerNodeAffinity: key1=value1,value2; key2=value3
- provisionerNodeAffinity: "storage-node=true"
- # Set pluginTolerations and pluginNodeAffinity for plugin daemonset pods.
- # The CSI plugins need to be started on all the nodes where the clients need to mount the storage.
- pluginTolerations:
- - key: storage-node
- operator: Exists
- effect: NoSchedule
- # pluginNodeAffinity: key1=value1,value2; key2=value3
- #pluginNodeAffinity: "storage-node=true"
- #cephfsGrpcMetricsPort: 9091
- #cephfsLivenessMetricsPort: 9081
- #rbdGrpcMetricsPort: 9090
- #csiAddonsPort: 9070
- # Enable Ceph Kernel clients on kernel < 4.17. If your kernel does not support quotas for CephFS
- # you may want to disable this setting. However, this will cause an issue during upgrades
- # with the FUSE client. See the upgrade guide: https://rook.io/docs/rook/v1.2/ceph-upgrade.html
- forceCephFSKernelClient: true
- #rbdLivenessMetricsPort: 9080
- #kubeletDirPath: /var/lib/kubelet
- #cephcsi:
- #image: quay.io/cephcsi/cephcsi:v3.6.1
- #registrar:
- #image: k8s.gcr.io/sig-storage/csi-node-driver-registrar:v2.5.0
- #provisioner:
- #image: k8s.gcr.io/sig-storage/csi-provisioner:v3.1.0
- #snapshotter:
- #image: k8s.gcr.io/sig-storage/csi-snapshotter:v5.0.1
- #attacher:
- #image: k8s.gcr.io/sig-storage/csi-attacher:v3.4.0
- #resizer:
- #image: k8s.gcr.io/sig-storage/csi-resizer:v1.4.0
- # Labels to add to the CSI CephFS Deployments and DaemonSets Pods.
- #cephfsPodLabels: "key1=value1,key2=value2"
- # Labels to add to the CSI NFS Deployments and DaemonSets Pods.
- #nfsPodLabels: "key1=value1,key2=value2"
- # Labels to add to the CSI RBD Deployments and DaemonSets Pods.
- #rbdPodLabels: "key1=value1,key2=value2"
- # Enable the volume replication controller.
- # Before enabling, ensure the Volume Replication CRDs are created.
- # See https://rook.io/docs/rook/latest/ceph-csi-drivers.html#rbd-mirroring
- volumeReplication:
- enabled: false
- #image: "quay.io/csiaddons/volumereplication-operator:v0.3.0"
- # Enable the CSIAddons sidecar.
- csiAddons:
- enabled: false
- #image: "quay.io/csiaddons/k8s-sidecar:v0.2.1"
- # Enable the nfs csi driver.
- nfs:
- enabled: false
- #image: "k8s.gcr.io/sig-storage/nfsplugin:v3.1.0"
- enableDiscoveryDaemon: false
- cephCommandsTimeoutSeconds: "15"
- ## if true, run rook operator on the host network
- useOperatorHostNetwork: false
- ## Rook Discover configuration
- ## toleration: NoSchedule, PreferNoSchedule or NoExecute
- ## tolerationKey: Set this to the specific key of the taint to tolerate
- ## tolerations: Array of tolerations in YAML format which will be added to agent deployment
- ## nodeAffinity: Set to labels of the node to match
- # discover:
- # toleration: NoSchedule
- # tolerationKey: key
- # tolerations:
- # - key: key
- # operator: Exists
- # effect: NoSchedule
- # nodeAffinity: key1=value1,value2; key2=value3
- # podLabels: "key1=value1,key2=value2"
- # In some situations SELinux relabelling breaks (times out) on large filesystems, and doesn't work with cephfs ReadWriteMany volumes (last relabel wins).
- # Disable it here if you have similar issues.
- # For more details see https://github.com/rook/rook/issues/2417
- enableSelinuxRelabeling: true
- disableAdmissionController: false
- # Writing to the hostPath is required for the Ceph mon and osd pods. Given the restricted permissions in OpenShift with SELinux,
- # the pod must be running privileged in order to write to the hostPath volume, this must be set to true then.
- hostpathRequiresPrivileged: false
- # Disable automatic orchestration when new devices are discovered.
- disableDeviceHotplug: false
- # Blacklist certain disks according to the regex provided.
- discoverDaemonUdev:
- # imagePullSecrets option allow to pull docker images from private docker registry. Option will be passed to all service accounts.
- # imagePullSecrets:
- # - name: my-registry-secret
- # Whether the OBC provisioner should watch on the operator namespace or not, if not the namespace of the cluster will be used
- enableOBCWatchOperatorNamespace: true
- admissionController:
- # Set tolerations and nodeAffinity for admission controller pod.
- # The admission controller would be best to start on the same nodes as other ceph daemons.
- # tolerations:
- # - key: key
- # operator: Exists
- # effect: NoSchedule
- # nodeAffinity: key1=value1,value2; key2=value3
- nodeAffinity: "storage-node=true"
- monitoring:
- # requires Prometheus to be pre-installed
- # enabling will also create RBAC rules to allow Operator to create ServiceMonitors
- enabled: true
|