浏览代码

updates and cephfs docs

Josh Bicking 1 年之前
父节点
当前提交
30507da839
共有 5 个文件被更改,包括 48 次插入23 次删除
  1. 20 10
      README.md
  2. 1 1
      nextcloud/values.yaml
  3. 25 10
      seedbox_sync.py
  4. 1 1
      temp-pvc-pod.yaml
  5. 1 1
      vaultwarden.yaml

+ 20 - 10
README.md

@@ -37,16 +37,7 @@ TODO
  ceph osd metadata <id>
 
 
-## Sharing 1 CephFS instance between multiple PVCs
 
-https://github.com/rook/rook/blob/677d3fa47f21b07245e2e4ab6cc964eb44223c48/Documentation/Storage-Configuration/Shared-Filesystem-CephFS/filesystem-storage.md
-
-Create CephFilesystem
-Create SC backed by Filesystem & Pool
-Ensure the CSI subvolumegroup was created. If not, `ceph fs subvolumegroup create <fsname> csi`
-Create PVC without a specified PV: PV will be auto-created
-_Super important_: Set created PV to ReclaimPolicy: Retain
-Create a new, better-named PVC
 
 ## tolerations
 If your setup divides k8s nodes into ceph & non-ceph nodes (using a label, like `storage-node=true`), ensure labels & a toleration are set properly (`storage-node=false`, with a toleration checking for `storage-node`) so non-ceph nodes still run PV plugin Daemonsets.
@@ -55,7 +46,9 @@ Otherwise, any pod scheduled on a non-ceph node won't be able to mount ceph-back
 
 See rook-ceph-cluster-values.yaml->cephClusterSpec->placement for an example.
 
-## CephFS w/ EC backing pool
+## CephFS
+
+### EC backing pool
 
 EC-backed filesystems require a regular replicated pool as a default.
 
@@ -68,6 +61,23 @@ setfattr -n ceph.dir.layout.pool -v cephfs-erasurecoded /mnt/cephfs/my-erasure-c
 
 https://docs.ceph.com/en/quincy/cephfs/file-layouts/
 
+### Sharing 1 CephFS instance between multiple PVCs
+
+https://github.com/rook/rook/blob/677d3fa47f21b07245e2e4ab6cc964eb44223c48/Documentation/Storage-Configuration/Shared-Filesystem-CephFS/filesystem-storage.md
+
+Create CephFilesystem
+Create SC backed by Filesystem & Pool
+Ensure the CSI subvolumegroup was created. If not, `ceph fs subvolumegroup create <fsname> csi`
+Create PVC without a specified PV: PV will be auto-created
+_Super important_: Set created PV to ReclaimPolicy: Retain
+Create a new, better-named PVC
+
+### Resizing a CephFS PVC
+Grow resources->storage on PV
+Grow resources->storage on PVC
+
+Verify the new limit: `getfattr -n ceph.quota.max_bytes /mnt/volumes/csi/csi-vol-<uuid>/<uuid>`
+
 ## Crush rules for each pool
 
  for i in `ceph osd pool ls`; do echo $i: `ceph osd pool get $i crush_rule`; done

+ 1 - 1
nextcloud/values.yaml

@@ -365,7 +365,7 @@ redis:
 ## ref: https://docs.nextcloud.com/server/latest/admin_manual/configuration_server/background_jobs_configuration.html#cron
 ##
 cronjob:
-  enabled: false
+  enabled: true
 
   ## Cronjob sidecar resource requests and limits
   ## ref: http://kubernetes.io/docs/user-guide/compute-resources/

+ 25 - 10
seedbox_sync.py

@@ -1,13 +1,21 @@
-# Usage: sonarr_sync.py my-seedbox /seedbox/path/to/data /local/working /local/metadata /local/data
-# Get all file names in HOST:HOST_DATA_PATH
-# Get all previously processed file names in LOCAL_METADATA_PATH
-# Diff the above to get newly added files
-# For each new file:
-#   Copy file to LOCAL_WORKING_PATH (used in case of transfer failure)
-#   Add file name to LOCAL_METADATA_PATH
-#   Move file to LOCAL_DATA_PATH
+# rsync files from a seedbox to a local machine, exactly once, over SSH.
+#
+# Why?
+#  sonarr requires that any Remote Path Mappings have a local path reflecting its contents. This can be done with NFS or SSHFS, but those are difficult to set up in containers, and get wonky when the remote server reboots.
+#  rsync over SSH + cron doesn't care if the remote machine reboots, + easily runs in a container.
+
+# How?
+#  Usage: sonarr_sync.py my-seedbox /seedbox/path/to/data /local/working /local/metadata /local/data
+#  - Get all file names in my-seedbox:/seedbox/path/to/data
+#  - Get all previously processed file names in /local/metadata
+#  - Diff the above to get newly added files
+#  - For each new file:
+#    - Copy file from my-seedbox to /local/working (used in case of transfer failure)
+#    - Add file name to /local/metadata
+#    - Move file to /local/data
 
 # */1 * * * * /usr/bin/run-one /usr/bin/python3 /path/to/seedbox_sync.py <seedbox host> /seedbox/path/to/completed/ /local/path/to/downloading /local/path/to/processed /local/path/to/ready 2>&1 | /usr/bin/logger -t seedbox
+# Or run it in a k8s cronjob.
 
 import subprocess
 import sys
@@ -23,6 +31,7 @@ r = subprocess.run(["ssh", host, "bash", "-c", f"IFS=$'\n'; ls {host_data_path}"
 
 available = {f for f in r.stdout.decode().split('\n') if f}
 
+# There's better ways to list a dir locally, but using bash +ls again avoids any possible formatting discrepencies.
 r = subprocess.run(["bash", "-c", f"IFS=$'\n'; ls {local_metadata_path}"], stdout=subprocess.PIPE, check=True)
 
 processed = {f for f in r.stdout.decode().split('\n') if f}
@@ -36,8 +45,14 @@ for new_file in new:
 
     print(f"Processing: {new_file}")
     subprocess.run(["rsync", "-rsvv", f'{host}:{host_data_path}/{new_file}', f'{local_working_path}'], check=True)
-    r = subprocess.run(["touch", f'{local_metadata_path}/{new_file}'], check=True)
+    subprocess.run(["touch", f'{local_metadata_path}/{new_file}'], check=True)
 
     print(f"Moving to ready: {new_file}")
-    subprocess.run(["rsync", "-r", f'{local_working_path}/{new_file}', f'{local_data_path}'], check=True)
+    try:
+        # rsync here is probably overkill
+        subprocess.run(["rsync", "-r", f'{local_working_path}/{new_file}', f'{local_data_path}'], check=True)
+    except:
+        subprocess.run(["rm", f'{local_metadata_path}/{new_file}'], check=True)
+        raise
+
     subprocess.run(["rm", "-rf", f'{local_working_path}/{new_file}'], check=True)

+ 1 - 1
temp-pvc-pod.yaml

@@ -7,7 +7,7 @@ metadata:
 spec:
   containers:
   - name: nginx
-    image: nginx:1.14.2
+    image: nginx:1.25.1
     volumeMounts:
     - mountPath: /data
       name: data

+ 1 - 1
vaultwarden.yaml

@@ -21,7 +21,7 @@ spec:
     spec:
       containers:
       - name: vaultwarden
-        image: vaultwarden/server:1.26.0
+        image: vaultwarden/server:1.28.1
         ports:
         - containerPort: 80
           name: http-web-svc