|
@@ -1,18 +1,21 @@
|
|
-# From https://raw.githubusercontent.com/rook/rook/release-1.9/deploy/charts/rook-ceph-cluster/values.yaml
|
|
|
|
|
|
+# From https://raw.githubusercontent.com/rook/rook/v1.11.11/deploy/charts/rook-ceph-cluster/values.yaml
|
|
|
|
+# KUBECONFIG=/etc/rancher/k3s/k3s.yaml helm upgrade --namespace rook-ceph rook-ceph-cluster --set operatorNamespace=rook-ceph rook-release/rook-ceph-cluster -f rook/rook-ceph-cluster-values.yaml --version 1.10.13
|
|
# Default values for a single rook-ceph cluster
|
|
# Default values for a single rook-ceph cluster
|
|
# This is a YAML-formatted file.
|
|
# This is a YAML-formatted file.
|
|
# Declare variables to be passed into your templates.
|
|
# Declare variables to be passed into your templates.
|
|
|
|
|
|
-# Namespace of the main rook operator
|
|
|
|
|
|
+# -- Namespace of the main rook operator
|
|
operatorNamespace: rook-ceph
|
|
operatorNamespace: rook-ceph
|
|
|
|
|
|
-# The metadata.name of the CephCluster CR. The default name is the same as the namespace.
|
|
|
|
-# clusterName: rook-ceph
|
|
|
|
|
|
+# -- The metadata.name of the CephCluster CR
|
|
|
|
+# @default -- The same as the namespace
|
|
|
|
+clusterName:
|
|
|
|
|
|
-# Ability to override the kubernetes version used in rendering the helm chart
|
|
|
|
-# kubeVersion: 1.21
|
|
|
|
|
|
+# -- Optional override of the target kubernetes version
|
|
|
|
+kubeVersion:
|
|
|
|
|
|
-# Ability to override ceph.conf
|
|
|
|
|
|
+# -- Cluster ceph.conf override
|
|
|
|
+configOverride:
|
|
# configOverride: |
|
|
# configOverride: |
|
|
# [global]
|
|
# [global]
|
|
# mon_allow_pool_delete = true
|
|
# mon_allow_pool_delete = true
|
|
@@ -21,10 +24,16 @@ operatorNamespace: rook-ceph
|
|
|
|
|
|
# Installs a debugging toolbox deployment
|
|
# Installs a debugging toolbox deployment
|
|
toolbox:
|
|
toolbox:
|
|
|
|
+ # -- Enable Ceph debugging pod deployment. See [toolbox](../Troubleshooting/ceph-toolbox.md)
|
|
enabled: true
|
|
enabled: true
|
|
- image: rook/ceph:v1.9.0.230.g6a87cb44a
|
|
|
|
|
|
+ # -- Toolbox image, defaults to the image used by the Ceph cluster
|
|
|
|
+ # image: quay.io/ceph/ceph:v17.2.6
|
|
|
|
+ image: rook/ceph:v1.9.0.230.g6a87cb44a # TODO probably safe to remove?
|
|
|
|
+ # -- Toolbox tolerations
|
|
tolerations: []
|
|
tolerations: []
|
|
|
|
+ # -- Toolbox affinity
|
|
affinity: {}
|
|
affinity: {}
|
|
|
|
+ # -- Toolbox resources
|
|
resources:
|
|
resources:
|
|
limits:
|
|
limits:
|
|
cpu: "500m"
|
|
cpu: "500m"
|
|
@@ -32,21 +41,30 @@ toolbox:
|
|
requests:
|
|
requests:
|
|
cpu: "100m"
|
|
cpu: "100m"
|
|
memory: "128Mi"
|
|
memory: "128Mi"
|
|
- # Set the priority class for the toolbox if desired
|
|
|
|
- # priorityClassName: class
|
|
|
|
|
|
+ # -- Set the priority class for the toolbox if desired
|
|
|
|
+ priorityClassName:
|
|
|
|
|
|
-# monitoring requires Prometheus to be pre-installed
|
|
|
|
monitoring:
|
|
monitoring:
|
|
- # enabling will also create RBAC rules to allow Operator to create ServiceMonitors
|
|
|
|
|
|
+ # -- Enable Prometheus integration, will also create necessary RBAC rules to allow Operator to create ServiceMonitors.
|
|
|
|
+ # Monitoring requires Prometheus to be pre-installed
|
|
enabled: true
|
|
enabled: true
|
|
- # whether to create the prometheus rules
|
|
|
|
|
|
+ # -- Whether to create the Prometheus rules for Ceph alerts
|
|
createPrometheusRules: true
|
|
createPrometheusRules: true
|
|
- # the namespace in which to create the prometheus rules, if different from the rook cluster namespace
|
|
|
|
|
|
+ # -- The namespace in which to create the prometheus rules, if different from the rook cluster namespace.
|
|
# If you have multiple rook-ceph clusters in the same k8s cluster, choose the same namespace (ideally, namespace with prometheus
|
|
# If you have multiple rook-ceph clusters in the same k8s cluster, choose the same namespace (ideally, namespace with prometheus
|
|
- # deployed) to set rulesNamespace for all the clusters. Otherwise, you will get duplicate alerts with multiple alert definitions.
|
|
|
|
|
|
+ # deployed) to set rulesNamespaceOverride for all the clusters. Otherwise, you will get duplicate alerts with multiple alert definitions.
|
|
rulesNamespaceOverride:
|
|
rulesNamespaceOverride:
|
|
-
|
|
|
|
-# If true, create & use PSP resources. Set this to the same value as the rook-ceph chart.
|
|
|
|
|
|
+ # Monitoring settings for external clusters:
|
|
|
|
+ # externalMgrEndpoints: <list of endpoints>
|
|
|
|
+ # externalMgrPrometheusPort: <port>
|
|
|
|
+ # allow adding custom labels and annotations to the prometheus rule
|
|
|
|
+ prometheusRule:
|
|
|
|
+ # -- Labels applied to PrometheusRule
|
|
|
|
+ labels: {}
|
|
|
|
+ # -- Annotations applied to PrometheusRule
|
|
|
|
+ annotations: {}
|
|
|
|
+
|
|
|
|
+# -- Create & use PSP resources. Set this to the same value as the rook-ceph chart.
|
|
pspEnable: true
|
|
pspEnable: true
|
|
|
|
|
|
# imagePullSecrets option allow to pull docker images from private docker registry. Option will be passed to all service accounts.
|
|
# imagePullSecrets option allow to pull docker images from private docker registry. Option will be passed to all service accounts.
|
|
@@ -54,18 +72,27 @@ pspEnable: true
|
|
# - name: my-registry-secret
|
|
# - name: my-registry-secret
|
|
|
|
|
|
# All values below are taken from the CephCluster CRD
|
|
# All values below are taken from the CephCluster CRD
|
|
-# More information can be found at [Ceph Cluster CRD](/Documentation/ceph-cluster-crd.md)
|
|
|
|
|
|
+# -- Cluster configuration.
|
|
|
|
+# @default -- See [below](#ceph-cluster-spec)
|
|
cephClusterSpec:
|
|
cephClusterSpec:
|
|
|
|
+ # This cluster spec example is for a converged cluster where all the Ceph daemons are running locally,
|
|
|
|
+ # as in the host-based example (cluster.yaml). For a different configuration such as a
|
|
|
|
+ # PVC-based cluster (cluster-on-pvc.yaml), external cluster (cluster-external.yaml),
|
|
|
|
+ # or stretch cluster (cluster-stretched.yaml), replace this entire `cephClusterSpec`
|
|
|
|
+ # with the specs from those examples.
|
|
|
|
+
|
|
|
|
+ # For more details, check https://rook.io/docs/rook/v1.10/CRDs/Cluster/ceph-cluster-crd/
|
|
cephVersion:
|
|
cephVersion:
|
|
# The container image used to launch the Ceph daemon pods (mon, mgr, osd, mds, rgw).
|
|
# The container image used to launch the Ceph daemon pods (mon, mgr, osd, mds, rgw).
|
|
- # v15 is octopus, and v16 is pacific.
|
|
|
|
|
|
+ # v16 is Pacific, v17 is Quincy.
|
|
# RECOMMENDATION: In production, use a specific version tag instead of the general v16 flag, which pulls the latest release and could result in different
|
|
# RECOMMENDATION: In production, use a specific version tag instead of the general v16 flag, which pulls the latest release and could result in different
|
|
# versions running within the cluster. See tags available at https://hub.docker.com/r/ceph/ceph/tags/.
|
|
# versions running within the cluster. See tags available at https://hub.docker.com/r/ceph/ceph/tags/.
|
|
# If you want to be more precise, you can always use a timestamp tag such quay.io/ceph/ceph:v15.2.11-20200419
|
|
# If you want to be more precise, you can always use a timestamp tag such quay.io/ceph/ceph:v15.2.11-20200419
|
|
# This tag might not contain a new Ceph version, just security fixes from the underlying operating system, which will reduce vulnerabilities
|
|
# This tag might not contain a new Ceph version, just security fixes from the underlying operating system, which will reduce vulnerabilities
|
|
|
|
+ # image: quay.io/ceph/ceph:v17.2.6
|
|
image: quay.io/ceph/ceph:v16.2.7
|
|
image: quay.io/ceph/ceph:v16.2.7
|
|
- # Whether to allow unsupported versions of Ceph. Currently `octopus` and `pacific` are supported.
|
|
|
|
- # Future versions such as `pacific` would require this to be set to `true`.
|
|
|
|
|
|
+ # Whether to allow unsupported versions of Ceph. Currently `pacific` and `quincy` are supported.
|
|
|
|
+ # Future versions such as `reef` (v18) would require this to be set to `true`.
|
|
# Do not set to true in production.
|
|
# Do not set to true in production.
|
|
allowUnsupported: false
|
|
allowUnsupported: false
|
|
|
|
|
|
@@ -77,7 +104,7 @@ cephClusterSpec:
|
|
# Whether or not upgrade should continue even if a check fails
|
|
# Whether or not upgrade should continue even if a check fails
|
|
# This means Ceph's status could be degraded and we don't recommend upgrading but you might decide otherwise
|
|
# This means Ceph's status could be degraded and we don't recommend upgrading but you might decide otherwise
|
|
# Use at your OWN risk
|
|
# Use at your OWN risk
|
|
- # To understand Rook's upgrade process of Ceph, read https://rook.io/docs/rook/latest/ceph-upgrade.html#ceph-version-upgrades
|
|
|
|
|
|
+ # To understand Rook's upgrade process of Ceph, read https://rook.io/docs/rook/v1.10/Upgrade/ceph-upgrade/
|
|
skipUpgradeChecks: false
|
|
skipUpgradeChecks: false
|
|
|
|
|
|
# Whether or not continue if PGs are not clean during an upgrade
|
|
# Whether or not continue if PGs are not clean during an upgrade
|
|
@@ -85,7 +112,7 @@ cephClusterSpec:
|
|
|
|
|
|
# WaitTimeoutForHealthyOSDInMinutes defines the time (in minutes) the operator would wait before an OSD can be stopped for upgrade or restart.
|
|
# WaitTimeoutForHealthyOSDInMinutes defines the time (in minutes) the operator would wait before an OSD can be stopped for upgrade or restart.
|
|
# If the timeout exceeds and OSD is not ok to stop, then the operator would skip upgrade for the current OSD and proceed with the next one
|
|
# If the timeout exceeds and OSD is not ok to stop, then the operator would skip upgrade for the current OSD and proceed with the next one
|
|
- # if `continueUpgradeAfterChecksEvenIfNotHealthy` is `false`. If `continueUpgradeAfterChecksEvenIfNotHealthy` is `true`, then opertor would
|
|
|
|
|
|
+ # if `continueUpgradeAfterChecksEvenIfNotHealthy` is `false`. If `continueUpgradeAfterChecksEvenIfNotHealthy` is `true`, then operator would
|
|
# continue with the upgrade of an OSD even if its not ok to stop after the timeout. This timeout won't be applied if `skipUpgradeChecks` is `true`.
|
|
# continue with the upgrade of an OSD even if its not ok to stop after the timeout. This timeout won't be applied if `skipUpgradeChecks` is `true`.
|
|
# The default wait timeout is 10 minutes.
|
|
# The default wait timeout is 10 minutes.
|
|
waitTimeoutForHealthyOSDInMinutes: 10
|
|
waitTimeoutForHealthyOSDInMinutes: 10
|
|
@@ -119,12 +146,30 @@ cephClusterSpec:
|
|
# urlPrefix: /ceph-dashboard
|
|
# urlPrefix: /ceph-dashboard
|
|
# serve the dashboard at the given port.
|
|
# serve the dashboard at the given port.
|
|
# port: 8443
|
|
# port: 8443
|
|
- # serve the dashboard using SSL
|
|
|
|
|
|
+ # Serve the dashboard using SSL (if using ingress to expose the dashboard and `ssl: true` you need to set
|
|
|
|
+ # the corresponding "backend protocol" annotation(s) for your ingress controller of choice)
|
|
ssl: true
|
|
ssl: true
|
|
|
|
|
|
- # Network configuration, see: https://github.com/rook/rook/blob/master/Documentation/ceph-cluster-crd.md#network-configuration-settings
|
|
|
|
|
|
+ # Network configuration, see: https://github.com/rook/rook/blob/master/Documentation/CRDs/ceph-cluster-crd.md#network-configuration-settings
|
|
network:
|
|
network:
|
|
- # enable host networking
|
|
|
|
|
|
+ connections:
|
|
|
|
+ # Whether to encrypt the data in transit across the wire to prevent eavesdropping the data on the network.
|
|
|
|
+ # The default is false. When encryption is enabled, all communication between clients and Ceph daemons, or between Ceph daemons will be encrypted.
|
|
|
|
+ # When encryption is not enabled, clients still establish a strong initial authentication and data integrity is still validated with a crc check.
|
|
|
|
+ # IMPORTANT: Encryption requires the 5.11 kernel for the latest nbd and cephfs drivers. Alternatively for testing only,
|
|
|
|
+ # you can set the "mounter: rbd-nbd" in the rbd storage class, or "mounter: fuse" in the cephfs storage class.
|
|
|
|
+ # The nbd and fuse drivers are *not* recommended in production since restarting the csi driver pod will disconnect the volumes.
|
|
|
|
+ encryption:
|
|
|
|
+ enabled: false
|
|
|
|
+ # Whether to compress the data in transit across the wire. The default is false.
|
|
|
|
+ # Requires Ceph Quincy (v17) or newer. Also see the kernel requirements above for encryption.
|
|
|
|
+ compression:
|
|
|
|
+ enabled: false
|
|
|
|
+ # Whether to require communication over msgr2. If true, the msgr v1 port (6789) will be disabled
|
|
|
|
+ # and clients will be required to connect to the Ceph cluster with the v2 port (3300).
|
|
|
|
+ # Requires a kernel that supports msgr v2 (kernel 5.11 or CentOS 8.4 or newer).
|
|
|
|
+ requireMsgr2: false
|
|
|
|
+ # enable host networking
|
|
provider: host
|
|
provider: host
|
|
# # EXPERIMENTAL: enable the Multus network provider
|
|
# # EXPERIMENTAL: enable the Multus network provider
|
|
# provider: multus
|
|
# provider: multus
|
|
@@ -151,9 +196,10 @@ cephClusterSpec:
|
|
# daysToRetain: 30
|
|
# daysToRetain: 30
|
|
|
|
|
|
# enable log collector, daemons will log on files and rotate
|
|
# enable log collector, daemons will log on files and rotate
|
|
- # logCollector:
|
|
|
|
- # enabled: true
|
|
|
|
- # periodicity: 24h # SUFFIX may be 'h' for hours or 'd' for days.
|
|
|
|
|
|
+ #logCollector:
|
|
|
|
+ # enabled: true
|
|
|
|
+ # periodicity: daily # one of: hourly, daily, weekly, monthly
|
|
|
|
+ # maxLogSize: 500M # SUFFIX may be 'M' or 'G'. Must be at least 1M.
|
|
|
|
|
|
# automate [data cleanup process](https://github.com/rook/rook/blob/master/Documentation/ceph-teardown.md#delete-the-data-on-hosts) in cluster destruction.
|
|
# automate [data cleanup process](https://github.com/rook/rook/blob/master/Documentation/ceph-teardown.md#delete-the-data-on-hosts) in cluster destruction.
|
|
cleanupPolicy:
|
|
cleanupPolicy:
|
|
@@ -192,22 +238,22 @@ cephClusterSpec:
|
|
- key: storage-node
|
|
- key: storage-node
|
|
operator: In
|
|
operator: In
|
|
values:
|
|
values:
|
|
- - "true"
|
|
|
|
|
|
+ - "true"
|
|
podAffinity:
|
|
podAffinity:
|
|
podAntiAffinity:
|
|
podAntiAffinity:
|
|
topologySpreadConstraints:
|
|
topologySpreadConstraints:
|
|
tolerations:
|
|
tolerations:
|
|
- key: storage-node
|
|
- key: storage-node
|
|
operator: Exists
|
|
operator: Exists
|
|
- # The above placement information can also be specified for mon, osd, and mgr components
|
|
|
|
- mon:
|
|
|
|
- # Monitor deployments may contain an anti-affinity rule for avoiding monitor
|
|
|
|
- # collocation on the same node. This is a required rule when host network is used
|
|
|
|
- # or when AllowMultiplePerNode is false. Otherwise this anti-affinity rule is a
|
|
|
|
- # preferred rule with weight: 50.
|
|
|
|
- osd:
|
|
|
|
- mgr:
|
|
|
|
- cleanup:
|
|
|
|
|
|
+ # # The above placement information can also be specified for mon, osd, and mgr components
|
|
|
|
+ # mon:
|
|
|
|
+ # # Monitor deployments may contain an anti-affinity rule for avoiding monitor
|
|
|
|
+ # # collocation on the same node. This is a required rule when host network is used
|
|
|
|
+ # # or when AllowMultiplePerNode is false. Otherwise this anti-affinity rule is a
|
|
|
|
+ # # preferred rule with weight: 50.
|
|
|
|
+ # osd:
|
|
|
|
+ # mgr:
|
|
|
|
+ # cleanup:
|
|
|
|
|
|
# annotations:
|
|
# annotations:
|
|
# all:
|
|
# all:
|
|
@@ -229,7 +275,6 @@ cephClusterSpec:
|
|
# # These labels can be passed as LabelSelector to Prometheus
|
|
# # These labels can be passed as LabelSelector to Prometheus
|
|
# monitoring:
|
|
# monitoring:
|
|
|
|
|
|
- # https://home.robusta.dev/blog/stop-using-cpu-limits
|
|
|
|
resources:
|
|
resources:
|
|
mgr:
|
|
mgr:
|
|
limits:
|
|
limits:
|
|
@@ -253,9 +298,14 @@ cephClusterSpec:
|
|
cpu: 0
|
|
cpu: 0
|
|
memory: "1Gi"
|
|
memory: "1Gi"
|
|
prepareosd:
|
|
prepareosd:
|
|
- limits:
|
|
|
|
- cpu: 0
|
|
|
|
- memory: "12Gi"
|
|
|
|
|
|
+ # limits: It is not recommended to set limits on the OSD prepare job
|
|
|
|
+ # since it's a one-time burst for memory that must be allowed to
|
|
|
|
+ # complete without an OOM kill. Note however that if a k8s
|
|
|
|
+ # limitRange guardrail is defined external to Rook, the lack of
|
|
|
|
+ # a limit here may result in a sync failure, in which case a
|
|
|
|
+ # limit should be added. 1200Mi may suffice for up to 15Ti
|
|
|
|
+ # OSDs ; for larger devices 2Gi may be required.
|
|
|
|
+ # cf. https://github.com/rook/rook/pull/11103
|
|
requests:
|
|
requests:
|
|
cpu: 0
|
|
cpu: 0
|
|
memory: "500Mi"
|
|
memory: "500Mi"
|
|
@@ -287,6 +337,13 @@ cephClusterSpec:
|
|
requests:
|
|
requests:
|
|
cpu: 0
|
|
cpu: 0
|
|
memory: "100Mi"
|
|
memory: "100Mi"
|
|
|
|
+ exporter:
|
|
|
|
+ limits:
|
|
|
|
+ cpu: 0
|
|
|
|
+ memory: "128Mi"
|
|
|
|
+ requests:
|
|
|
|
+ cpu: 0
|
|
|
|
+ memory: "50Mi"
|
|
|
|
|
|
# The option to automatically remove OSDs that are out and are safe to destroy.
|
|
# The option to automatically remove OSDs that are out and are safe to destroy.
|
|
removeOSDsIfOutAndSafeToRemove: false
|
|
removeOSDsIfOutAndSafeToRemove: false
|
|
@@ -309,7 +366,7 @@ cephClusterSpec:
|
|
# osdsPerDevice: "1" # this value can be overridden at the node or device level
|
|
# osdsPerDevice: "1" # this value can be overridden at the node or device level
|
|
# encryptedDevice: "true" # the default value for this option is "false"
|
|
# encryptedDevice: "true" # the default value for this option is "false"
|
|
# # Individual nodes and their config can be specified as well, but 'useAllNodes' above must be set to false. Then, only the named
|
|
# # Individual nodes and their config can be specified as well, but 'useAllNodes' above must be set to false. Then, only the named
|
|
- # # nodes below will be used as storage resources. Each node's 'name' field should match their 'kubernetes.io/hostname' label.
|
|
|
|
|
|
+ # # nodes below will be used as storage resources. Each node's 'name' field should match their 'kubernetes.io/hostname' label.
|
|
# nodes:
|
|
# nodes:
|
|
# - name: "172.17.4.201"
|
|
# - name: "172.17.4.201"
|
|
# devices: # specific devices to use for storage can be specified for each node
|
|
# devices: # specific devices to use for storage can be specified for each node
|
|
@@ -335,11 +392,6 @@ cephClusterSpec:
|
|
# Operator will continue with the next drain if the timeout exceeds. It only works if `managePodBudgets` is `true`.
|
|
# Operator will continue with the next drain if the timeout exceeds. It only works if `managePodBudgets` is `true`.
|
|
# No values or 0 means that the operator will wait until the placement groups are healthy before unblocking the next drain.
|
|
# No values or 0 means that the operator will wait until the placement groups are healthy before unblocking the next drain.
|
|
pgHealthCheckTimeout: 0
|
|
pgHealthCheckTimeout: 0
|
|
- # If true, the operator will create and manage MachineDisruptionBudgets to ensure OSDs are only fenced when the cluster is healthy.
|
|
|
|
- # Only available on OpenShift.
|
|
|
|
- manageMachineDisruptionBudgets: false
|
|
|
|
- # Namespace in which to watch for the MachineDisruptionBudgets.
|
|
|
|
- machineDisruptionBudgetNamespace: openshift-machine-api
|
|
|
|
|
|
|
|
# Configure the healthcheck and liveness probes for ceph pods.
|
|
# Configure the healthcheck and liveness probes for ceph pods.
|
|
# Valid values for daemons are 'mon', 'osd', 'status'
|
|
# Valid values for daemons are 'mon', 'osd', 'status'
|
|
@@ -364,35 +416,53 @@ cephClusterSpec:
|
|
disabled: false
|
|
disabled: false
|
|
|
|
|
|
ingress:
|
|
ingress:
|
|
- dashboard: {}
|
|
|
|
|
|
+ # -- Enable an ingress for the ceph-dashboard
|
|
|
|
+ dashboard:
|
|
|
|
+ {}
|
|
# annotations:
|
|
# annotations:
|
|
- # kubernetes.io/ingress.class: nginx
|
|
|
|
- # external-dns.alpha.kubernetes.io/hostname: example.com
|
|
|
|
|
|
+ # external-dns.alpha.kubernetes.io/hostname: dashboard.example.com
|
|
# nginx.ingress.kubernetes.io/rewrite-target: /ceph-dashboard/$2
|
|
# nginx.ingress.kubernetes.io/rewrite-target: /ceph-dashboard/$2
|
|
# If the dashboard has ssl: true the following will make sure the NGINX Ingress controller can expose the dashboard correctly
|
|
# If the dashboard has ssl: true the following will make sure the NGINX Ingress controller can expose the dashboard correctly
|
|
# nginx.ingress.kubernetes.io/backend-protocol: "HTTPS"
|
|
# nginx.ingress.kubernetes.io/backend-protocol: "HTTPS"
|
|
# nginx.ingress.kubernetes.io/server-snippet: |
|
|
# nginx.ingress.kubernetes.io/server-snippet: |
|
|
# proxy_ssl_verify off;
|
|
# proxy_ssl_verify off;
|
|
# host:
|
|
# host:
|
|
- # name: example.com
|
|
|
|
|
|
+ # name: dashboard.example.com
|
|
# path: "/ceph-dashboard(/|$)(.*)"
|
|
# path: "/ceph-dashboard(/|$)(.*)"
|
|
# tls:
|
|
# tls:
|
|
- # ingressClassName:
|
|
|
|
-
|
|
|
|
|
|
+ # - hosts:
|
|
|
|
+ # - dashboard.example.com
|
|
|
|
+ # secretName: testsecret-tls
|
|
|
|
+ ## Note: Only one of ingress class annotation or the `ingressClassName:` can be used at a time
|
|
|
|
+ ## to set the ingress class
|
|
|
|
+ # ingressClassName: nginx
|
|
|
|
+
|
|
|
|
+# -- A list of CephBlockPool configurations to deploy
|
|
|
|
+# @default -- See [below](#ceph-block-pools)
|
|
cephBlockPools:
|
|
cephBlockPools:
|
|
- name: ceph-blockpool
|
|
- name: ceph-blockpool
|
|
- # see https://github.com/rook/rook/blob/master/Documentation/ceph-pool-crd.md#spec for available configuration
|
|
|
|
|
|
+ # see https://github.com/rook/rook/blob/master/Documentation/CRDs/Block-Storage/ceph-block-pool-crd.md#spec for available configuration
|
|
spec:
|
|
spec:
|
|
failureDomain: host
|
|
failureDomain: host
|
|
replicated:
|
|
replicated:
|
|
size: 3
|
|
size: 3
|
|
|
|
+ # Enables collecting RBD per-image IO statistics by enabling dynamic OSD performance counters. Defaults to false.
|
|
|
|
+ # For reference: https://docs.ceph.com/docs/master/mgr/prometheus/#rbd-io-statistics
|
|
|
|
+ # enableRBDStats: true
|
|
storageClass:
|
|
storageClass:
|
|
enabled: true
|
|
enabled: true
|
|
name: ceph-block
|
|
name: ceph-block
|
|
isDefault: true
|
|
isDefault: true
|
|
reclaimPolicy: Delete
|
|
reclaimPolicy: Delete
|
|
allowVolumeExpansion: true
|
|
allowVolumeExpansion: true
|
|
|
|
+ volumeBindingMode: "Immediate"
|
|
mountOptions: []
|
|
mountOptions: []
|
|
|
|
+ # see https://kubernetes.io/docs/concepts/storage/storage-classes/#allowed-topologies
|
|
|
|
+ allowedTopologies: []
|
|
|
|
+ # - matchLabelExpressions:
|
|
|
|
+ # - key: rook-ceph-role
|
|
|
|
+ # values:
|
|
|
|
+ # - storage-node
|
|
# see https://github.com/rook/rook/blob/master/Documentation/ceph-block.md#provision-storage for available configuration
|
|
# see https://github.com/rook/rook/blob/master/Documentation/ceph-block.md#provision-storage for available configuration
|
|
parameters:
|
|
parameters:
|
|
# (optional) mapOptions is a comma-separated list of map options.
|
|
# (optional) mapOptions is a comma-separated list of map options.
|
|
@@ -411,23 +481,30 @@ cephBlockPools:
|
|
|
|
|
|
# RBD image format. Defaults to "2".
|
|
# RBD image format. Defaults to "2".
|
|
imageFormat: "2"
|
|
imageFormat: "2"
|
|
- # RBD image features. Available for imageFormat: "2". CSI RBD currently supports only `layering` feature.
|
|
|
|
|
|
+
|
|
|
|
+ # RBD image features, equivalent to OR'd bitfield value: 63
|
|
|
|
+ # Available for imageFormat: "2". Older releases of CSI RBD
|
|
|
|
+ # support only the `layering` feature. The Linux kernel (KRBD) supports the
|
|
|
|
+ # full feature complement as of 5.4
|
|
imageFeatures: layering
|
|
imageFeatures: layering
|
|
- # The secrets contain Ceph admin credentials.
|
|
|
|
|
|
+
|
|
|
|
+ # These secrets contain Ceph admin credentials.
|
|
csi.storage.k8s.io/provisioner-secret-name: rook-csi-rbd-provisioner
|
|
csi.storage.k8s.io/provisioner-secret-name: rook-csi-rbd-provisioner
|
|
- csi.storage.k8s.io/provisioner-secret-namespace: rook-ceph
|
|
|
|
|
|
+ csi.storage.k8s.io/provisioner-secret-namespace: "{{ .Release.Namespace }}"
|
|
csi.storage.k8s.io/controller-expand-secret-name: rook-csi-rbd-provisioner
|
|
csi.storage.k8s.io/controller-expand-secret-name: rook-csi-rbd-provisioner
|
|
- csi.storage.k8s.io/controller-expand-secret-namespace: rook-ceph
|
|
|
|
|
|
+ csi.storage.k8s.io/controller-expand-secret-namespace: "{{ .Release.Namespace }}"
|
|
csi.storage.k8s.io/node-stage-secret-name: rook-csi-rbd-node
|
|
csi.storage.k8s.io/node-stage-secret-name: rook-csi-rbd-node
|
|
- csi.storage.k8s.io/node-stage-secret-namespace: rook-ceph
|
|
|
|
|
|
+ csi.storage.k8s.io/node-stage-secret-namespace: "{{ .Release.Namespace }}"
|
|
# Specify the filesystem type of the volume. If not specified, csi-provisioner
|
|
# Specify the filesystem type of the volume. If not specified, csi-provisioner
|
|
# will set default as `ext4`. Note that `xfs` is not recommended due to potential deadlock
|
|
# will set default as `ext4`. Note that `xfs` is not recommended due to potential deadlock
|
|
# in hyperconverged settings where the volume is mounted on the same node as the osds.
|
|
# in hyperconverged settings where the volume is mounted on the same node as the osds.
|
|
csi.storage.k8s.io/fstype: ext4
|
|
csi.storage.k8s.io/fstype: ext4
|
|
|
|
|
|
|
|
+# -- A list of CephFileSystem configurations to deploy
|
|
|
|
+# @default -- See [below](#ceph-file-systems)
|
|
cephFileSystems:
|
|
cephFileSystems:
|
|
- name: ceph-filesystem
|
|
- name: ceph-filesystem
|
|
- # see https://github.com/rook/rook/blob/master/Documentation/ceph-filesystem-crd.md#filesystem-settings for available configuration
|
|
|
|
|
|
+ # see https://github.com/rook/rook/blob/master/Documentation/CRDs/Shared-Filesystem/ceph-filesystem-crd.md#filesystem-settings for available configuration
|
|
spec:
|
|
spec:
|
|
metadataPool:
|
|
metadataPool:
|
|
replicated:
|
|
replicated:
|
|
@@ -436,7 +513,7 @@ cephFileSystems:
|
|
- failureDomain: host
|
|
- failureDomain: host
|
|
replicated:
|
|
replicated:
|
|
size: 3
|
|
size: 3
|
|
- # Optional and highly recommended, 'data0' by default, see https://github.com/rook/rook/blob/master/Documentation/ceph-filesystem-crd.md#pools
|
|
|
|
|
|
+ # Optional and highly recommended, 'data0' by default, see https://github.com/rook/rook/blob/master/Documentation/CRDs/Shared-Filesystem/ceph-filesystem-crd.md#pools
|
|
name: data0
|
|
name: data0
|
|
metadataServer:
|
|
metadataServer:
|
|
activeCount: 1
|
|
activeCount: 1
|
|
@@ -472,21 +549,24 @@ cephFileSystems:
|
|
pool: data0
|
|
pool: data0
|
|
reclaimPolicy: Delete
|
|
reclaimPolicy: Delete
|
|
allowVolumeExpansion: true
|
|
allowVolumeExpansion: true
|
|
|
|
+ volumeBindingMode: "Immediate"
|
|
mountOptions: []
|
|
mountOptions: []
|
|
# see https://github.com/rook/rook/blob/master/Documentation/ceph-filesystem.md#provision-storage for available configuration
|
|
# see https://github.com/rook/rook/blob/master/Documentation/ceph-filesystem.md#provision-storage for available configuration
|
|
parameters:
|
|
parameters:
|
|
# The secrets contain Ceph admin credentials.
|
|
# The secrets contain Ceph admin credentials.
|
|
csi.storage.k8s.io/provisioner-secret-name: rook-csi-cephfs-provisioner
|
|
csi.storage.k8s.io/provisioner-secret-name: rook-csi-cephfs-provisioner
|
|
- csi.storage.k8s.io/provisioner-secret-namespace: rook-ceph
|
|
|
|
|
|
+ csi.storage.k8s.io/provisioner-secret-namespace: "{{ .Release.Namespace }}"
|
|
csi.storage.k8s.io/controller-expand-secret-name: rook-csi-cephfs-provisioner
|
|
csi.storage.k8s.io/controller-expand-secret-name: rook-csi-cephfs-provisioner
|
|
- csi.storage.k8s.io/controller-expand-secret-namespace: rook-ceph
|
|
|
|
|
|
+ csi.storage.k8s.io/controller-expand-secret-namespace: "{{ .Release.Namespace }}"
|
|
csi.storage.k8s.io/node-stage-secret-name: rook-csi-cephfs-node
|
|
csi.storage.k8s.io/node-stage-secret-name: rook-csi-cephfs-node
|
|
- csi.storage.k8s.io/node-stage-secret-namespace: rook-ceph
|
|
|
|
|
|
+ csi.storage.k8s.io/node-stage-secret-namespace: "{{ .Release.Namespace }}"
|
|
# Specify the filesystem type of the volume. If not specified, csi-provisioner
|
|
# Specify the filesystem type of the volume. If not specified, csi-provisioner
|
|
# will set default as `ext4`. Note that `xfs` is not recommended due to potential deadlock
|
|
# will set default as `ext4`. Note that `xfs` is not recommended due to potential deadlock
|
|
# in hyperconverged settings where the volume is mounted on the same node as the osds.
|
|
# in hyperconverged settings where the volume is mounted on the same node as the osds.
|
|
csi.storage.k8s.io/fstype: ext4
|
|
csi.storage.k8s.io/fstype: ext4
|
|
|
|
|
|
|
|
+# -- Settings for the filesystem snapshot class
|
|
|
|
+# @default -- See [CephFS Snapshots](../Storage-Configuration/Ceph-CSI/ceph-csi-snapshot.md#cephfs-snapshots)
|
|
cephFileSystemVolumeSnapshotClass:
|
|
cephFileSystemVolumeSnapshotClass:
|
|
enabled: false
|
|
enabled: false
|
|
name: ceph-filesystem
|
|
name: ceph-filesystem
|
|
@@ -494,9 +574,11 @@ cephFileSystemVolumeSnapshotClass:
|
|
deletionPolicy: Delete
|
|
deletionPolicy: Delete
|
|
annotations: {}
|
|
annotations: {}
|
|
labels: {}
|
|
labels: {}
|
|
- # see https://rook.io/docs/rook/latest/ceph-csi-snapshot.html#cephfs-snapshots for available configuration
|
|
|
|
|
|
+ # see https://rook.io/docs/rook/v1.10/Storage-Configuration/Ceph-CSI/ceph-csi-snapshot/#cephfs-snapshots for available configuration
|
|
parameters: {}
|
|
parameters: {}
|
|
|
|
|
|
|
|
+# -- Settings for the block pool snapshot class
|
|
|
|
+# @default -- See [RBD Snapshots](../Storage-Configuration/Ceph-CSI/ceph-csi-snapshot.md#rbd-snapshots)
|
|
cephBlockPoolsVolumeSnapshotClass:
|
|
cephBlockPoolsVolumeSnapshotClass:
|
|
enabled: false
|
|
enabled: false
|
|
name: ceph-block
|
|
name: ceph-block
|
|
@@ -504,12 +586,14 @@ cephBlockPoolsVolumeSnapshotClass:
|
|
deletionPolicy: Delete
|
|
deletionPolicy: Delete
|
|
annotations: {}
|
|
annotations: {}
|
|
labels: {}
|
|
labels: {}
|
|
- # see https://rook.io/docs/rook/latest/ceph-csi-snapshot.html#rbd-snapshots for available configuration
|
|
|
|
|
|
+ # see https://rook.io/docs/rook/v1.10/Storage-Configuration/Ceph-CSI/ceph-csi-snapshot/#rbd-snapshots for available configuration
|
|
parameters: {}
|
|
parameters: {}
|
|
|
|
|
|
|
|
+# -- A list of CephObjectStore configurations to deploy
|
|
|
|
+# @default -- See [below](#ceph-object-stores)
|
|
cephObjectStores:
|
|
cephObjectStores:
|
|
- name: ceph-objectstore
|
|
- name: ceph-objectstore
|
|
- # see https://github.com/rook/rook/blob/master/Documentation/ceph-object-store-crd.md#object-store-settings for available configuration
|
|
|
|
|
|
+ # see https://github.com/rook/rook/blob/master/Documentation/CRDs/Object-Storage/ceph-object-store-crd.md#object-store-settings for available configuration
|
|
spec:
|
|
spec:
|
|
metadataPool:
|
|
metadataPool:
|
|
failureDomain: host
|
|
failureDomain: host
|
|
@@ -556,7 +640,82 @@ cephObjectStores:
|
|
enabled: true
|
|
enabled: true
|
|
name: ceph-bucket
|
|
name: ceph-bucket
|
|
reclaimPolicy: Delete
|
|
reclaimPolicy: Delete
|
|
|
|
+ volumeBindingMode: "Immediate"
|
|
# see https://github.com/rook/rook/blob/master/Documentation/ceph-object-bucket-claim.md#storageclass for available configuration
|
|
# see https://github.com/rook/rook/blob/master/Documentation/ceph-object-bucket-claim.md#storageclass for available configuration
|
|
parameters:
|
|
parameters:
|
|
# note: objectStoreNamespace and objectStoreName are configured by the chart
|
|
# note: objectStoreNamespace and objectStoreName are configured by the chart
|
|
region: us-east-1
|
|
region: us-east-1
|
|
|
|
+ ingress:
|
|
|
|
+ # Enable an ingress for the ceph-objectstore
|
|
|
|
+ enabled: false
|
|
|
|
+ # annotations: {}
|
|
|
|
+ # host:
|
|
|
|
+ # name: objectstore.example.com
|
|
|
|
+ # path: /
|
|
|
|
+ # tls:
|
|
|
|
+ # - hosts:
|
|
|
|
+ # - objectstore.example.com
|
|
|
|
+ # secretName: ceph-objectstore-tls
|
|
|
|
+ # ingressClassName: nginx
|
|
|
|
+
|
|
|
|
+# cephECBlockPools are disabled by default, please remove the comments and set desired values to enable it
|
|
|
|
+#cephECBlockPools:
|
|
|
|
+# # For erasure coded a replicated metadata pool is required.
|
|
|
|
+# # https://rook.io/docs/rook/latest/CRDs/Shared-Filesystem/ceph-filesystem-crd/#erasure-coded
|
|
|
|
+# - name: ec-metadata-pool
|
|
|
|
+# # see https://github.com/rook/rook/blob/master/Documentation/CRDs/Block-Storage/ceph-block-pool-crd.md#spec for available configuration
|
|
|
|
+# spec:
|
|
|
|
+# replicated:
|
|
|
|
+# size: 2
|
|
|
|
+# - name: ec-data-pool
|
|
|
|
+# spec:
|
|
|
|
+# failureDomain: osd
|
|
|
|
+# erasureCoded:
|
|
|
|
+# dataChunks: 2
|
|
|
|
+# codingChunks: 1
|
|
|
|
+# deviceClass: hdd
|
|
|
|
+
|
|
|
|
+# cephECStorageClass also is disabled by default, please remove the comments and set desired values to enable it
|
|
|
|
+# if cephECBlockPools are uncommented you must remove the comments of cephEcStorageClass as well
|
|
|
|
+#cephECStorageClass:
|
|
|
|
+# name: rook-ceph-block
|
|
|
|
+# # Change "rook-ceph" provisioner prefix to match the operator namespace if needed
|
|
|
|
+# provisioner: rook-ceph.rbd.csi.ceph.com # driver:namespace:operator
|
|
|
|
+# parameters:
|
|
|
|
+# # clusterID is the namespace where the rook cluster is running
|
|
|
|
+# # If you change this namespace, also change the namespace below where the secret namespaces are defined
|
|
|
|
+# clusterID: rook-ceph # namespace:cluster
|
|
|
|
+#
|
|
|
|
+# # If you want to use erasure coded pool with RBD, you need to create
|
|
|
|
+# # two pools. one erasure coded and one replicated.
|
|
|
|
+# # You need to specify the replicated pool here in the `pool` parameter, it is
|
|
|
|
+# # used for the metadata of the images.
|
|
|
|
+# # The erasure coded pool must be set as the `dataPool` parameter below.
|
|
|
|
+# dataPool: ec-data-pool
|
|
|
|
+# pool: ec-metadata-pool
|
|
|
|
+#
|
|
|
|
+# # (optional) mapOptions is a comma-separated list of map options.
|
|
|
|
+# # For krbd options refer
|
|
|
|
+# # https://docs.ceph.com/docs/master/man/8/rbd/#kernel-rbd-krbd-options
|
|
|
|
+# # For nbd options refer
|
|
|
|
+# # https://docs.ceph.com/docs/master/man/8/rbd-nbd/#options
|
|
|
|
+# # mapOptions: lock_on_read,queue_depth=1024
|
|
|
|
+#
|
|
|
|
+# # (optional) unmapOptions is a comma-separated list of unmap options.
|
|
|
|
+# # For krbd options refer
|
|
|
|
+# # https://docs.ceph.com/docs/master/man/8/rbd/#kernel-rbd-krbd-options
|
|
|
|
+# # For nbd options refer
|
|
|
|
+# # https://docs.ceph.com/docs/master/man/8/rbd-nbd/#options
|
|
|
|
+# # unmapOptions: force
|
|
|
|
+#
|
|
|
|
+# # RBD image format. Defaults to "2".
|
|
|
|
+# imageFormat: "2"
|
|
|
|
+#
|
|
|
|
+# # RBD image features, equivalent to OR'd bitfield value: 63
|
|
|
|
+# # Available for imageFormat: "2". Older releases of CSI RBD
|
|
|
|
+# # support only the `layering` feature. The Linux kernel (KRBD) supports the
|
|
|
|
+# # full feature complement as of 5.4
|
|
|
|
+# # imageFeatures: layering,fast-diff,object-map,deep-flatten,exclusive-lock
|
|
|
|
+# imageFeatures: layering
|
|
|
|
+# allowVolumeExpansion: true
|
|
|
|
+# reclaimPolicy: Delete
|