Sfoglia il codice sorgente

start on backup rook

Josh Bicking 2 settimane fa
parent
commit
aa48aec402
2 ha cambiato i file con 185 aggiunte e 0 eliminazioni
  1. 55 0
      backup/applications.yaml
  2. 130 0
      backup/rook/rook-ceph-cluster-values.yaml

+ 55 - 0
backup/applications.yaml

@@ -34,3 +34,58 @@ spec:
   syncPolicy:
     automated:
       enabled: true
+---
+apiVersion: argoproj.io/v1alpha1
+kind: Application
+metadata:
+  name: rook-ceph-operator
+  namespace: argocd
+spec:
+  destination:
+    namespace: rook-ceph
+    server: https://kubernetes.default.svc
+  project: default
+  source:
+    repoURL: https://charts.rook.io/release
+    targetRevision: '1.19.1'
+    chart: rook-ceph
+    #helm:
+    #  valueFiles:
+    #    - $values/backup/rook/rook-ceph-operator-values.yaml
+  #- repoURL: https://gogs.jibby.org/jhb2345/server
+  #  ref: values
+  #  targetRevision: HEAD
+  syncPolicy:
+    syncOptions:
+      - CreateNamespace=true
+    automated:
+      enabled: true
+---
+apiVersion: argoproj.io/v1alpha1
+kind: Application
+metadata:
+  name: rook-ceph-cluster
+  namespace: argocd
+spec:
+  destination:
+    namespace: rook-ceph
+    server: https://kubernetes.default.svc
+  project: default
+  sources:
+  - repoURL: https://charts.rook.io/release
+    targetRevision: '1.19.1'
+    chart: rook-ceph-cluster
+    helm:
+      valueFiles:
+        - $values/backup/rook/rook-ceph-cluster-values.yaml
+      parameters:
+        - name: "operatorNamespace"
+          value: rook-ceph
+  - repoURL: https://gogs.jibby.org/jhb2345/server
+    ref: values
+    targetRevision: HEAD
+  syncPolicy:
+    syncOptions:
+      - CreateNamespace=true
+    automated:
+      enabled: true

+ 130 - 0
backup/rook/rook-ceph-cluster-values.yaml

@@ -0,0 +1,130 @@
+# https://github.com/rook/rook/blob/v1.19.1/deploy/charts/rook-ceph-cluster/values.yaml
+toolbox:
+  enabled: true
+
+cephClusterSpec:
+  mon:
+    count: 3
+    allowMultiplePerNode: true
+  mgr:
+    allowMultiplePerNode: true
+
+  dashboard:
+    port: 8080
+    ssl: false
+
+  logCollector:
+    enabled: false
+
+  resources:
+    mgr:
+      limits:
+        cpu: 0
+        memory: "1.5Gi"
+      requests:
+        cpu: 0
+        memory: "512Mi"
+    mon:
+      limits:
+        cpu: 0
+        memory: "1Gi"
+      requests:
+        cpu: 0
+        memory: "500Mi"
+    osd:
+      limits:
+        cpu: 0
+        memory: "4Gi"
+      requests:
+        cpu: 0
+        memory: "1Gi"
+    prepareosd:
+      # limits: It is not recommended to set limits on the OSD prepare job
+      #         since it's a one-time burst for memory that must be allowed to
+      #         complete without an OOM kill.  Note however that if a k8s
+      #         limitRange guardrail is defined external to Rook, the lack of
+      #         a limit here may result in a sync failure, in which case a
+      #         limit should be added.  1200Mi may suffice for up to 15Ti
+      #         OSDs ; for larger devices 2Gi may be required.
+      #         cf. https://github.com/rook/rook/pull/11103
+      requests:
+        cpu: 0
+        memory: "500Mi"
+    mgr-sidecar:
+      limits:
+        cpu: 0
+        memory: "100Mi"
+      requests:
+        cpu: 0
+        memory: "40Mi"
+    crashcollector:
+      limits:
+        cpu: 0
+        memory: "60Mi"
+      requests:
+        cpu: 0
+        memory: "60Mi"
+    logcollector:
+      limits:
+        cpu: 0
+        memory: "1Gi"
+      requests:
+        cpu: 0
+        memory: "100Mi"
+    cleanup:
+      limits:
+        cpu: 0
+        memory: "1Gi"
+      requests:
+        cpu: 0
+        memory: "100Mi"
+    exporter:
+      limits:
+        cpu: 0
+        memory: "128Mi"
+      requests:
+        cpu: 0
+        memory: "50Mi"
+
+cephBlockPools: []
+cephFileSystems: []
+cephObjectStores:
+  - name: ceph-objectstore
+    spec:
+      metadataPool:
+        failureDomain: osd
+        replicated:
+          size: 3
+      dataPool:
+        failureDomain: osd
+        erasureCoded:
+          dataChunks: 2
+          codingChunks: 1
+        parameters:
+          bulk: "true"
+      preservePoolsOnDelete: true
+      gateway:
+        port: 80
+        resources:
+          limits:
+            memory: "2Gi"
+          requests:
+            cpu: "1000m"
+            memory: "1Gi"
+        instances: 1
+        priorityClassName: system-cluster-critical
+    storageClass:
+      enabled: true
+      name: ceph-bucket
+      reclaimPolicy: Delete
+      volumeBindingMode: "Immediate"
+      annotations: {}
+      labels: {}
+      parameters:
+        # note: objectStoreNamespace and objectStoreName are configured by the chart
+        region: us-east-1
+    ingress:
+      enabled: false
+    route:
+      enabled: false
+