Josh Bicking 2 months ago
parent
commit
f2ce2e8ea6

+ 1 - 0
.gitignore

@@ -5,3 +5,4 @@ playbook.retry
 hosts
 secret-*.yaml
 credentials-*
+__pycache__

+ 6 - 1
README.md

@@ -307,6 +307,10 @@ sudo vi /etc/fstab
 none /ceph fuse.ceph ceph.id=admin,ceph.client_fs=data,x-systemd.requires=ceph.target,x-systemd.mount-timeout=5min,_netdev 0 0
 ```
 
+# nfs client
+```
+192.168.1.1:/data /nfs/seedbox nfs rw,soft 0 0
+```
 
 # disable mitigations
 https://unix.stackexchange.com/questions/554908/disable-spectre-and-meltdown-mitigations
@@ -399,5 +403,6 @@ This is a nice PVC option for simpler backup target setups.
 - [ ] explore anubis https://xeiaso.net/talks/2025/surreal-joy-homelab/
 - [ ] explore bitwarden secret integration (similar to 1password integration in https://xeiaso.net/talks/2025/surreal-joy-homelab/)
 - [ ] finish this writeup 🥺👉👈
-- [ ] node affinity + eviction: how do i limit non-rook pods running on rook nodes?
+- [ ] write up: node affinity + eviction, how i limit non-rook pods running on rook nodes
   - PreferNoSchedule taint on rook nodes
+- [ ] write up: seedbox VM & sharing the disk w/ NFS

+ 2 - 1
bazarr.yaml

@@ -20,7 +20,8 @@ spec:
     spec:
       containers:
       - name: bazarr
-        image: lscr.io/linuxserver/bazarr:development-v1.4.4-beta.24-ls617
+        image: lscr.io/linuxserver/bazarr:latest
+        imagePullPolicy: Always
         ports:
         - containerPort: 6767
           name: http-web-svc

+ 2 - 0
cloudflared.yaml

@@ -72,6 +72,8 @@ data:
       service: http://homeassistant-service.homeassistant.svc.cluster.local:8123
     - hostname: ntfy.jibby.org
       service: http://ntfy-service.ntfy.svc.cluster.local:80
+    - hostname: paperless.jibby.org
+      service: http://paperless-service.paperless.svc.cluster.local:8000
     # - hostname: mastodon.jibby.org
     #   service: http://mastodon-service.mastodon.svc.cluster.local:3000
     # - hostname: streaming-mastodon.jibby.org

+ 10 - 2
jellyfin.yaml

@@ -25,7 +25,8 @@ spec:
     spec:
       containers:
       - name: jellyfin
-        image: jellyfin/jellyfin:10.9.8
+        image: jellyfin/jellyfin:latest
+        imagePullPolicy: Always
         ports:
         - containerPort: 8096
           name: http-web-svc
@@ -44,6 +45,8 @@ spec:
           value: "1000"
         - name: PGID
           value: "1000"
+        - name: "MALLOC_TRIM_THRESHOLD_"
+          value: "131072"
         ## NVIDIA
         #- name: NVIDIA_DRIVER_CAPABILITIES
         #  value: "all"
@@ -62,6 +65,11 @@ spec:
         #resources:
         #  limits:
         #    nvidia.com/gpu: 1
+        resources:
+          requests:
+            memory: "0"
+          limits:
+            memory: "2Gi"
       affinity:
         nodeAffinity:
           requiredDuringSchedulingIgnoredDuringExecution:
@@ -81,7 +89,7 @@ spec:
       - name: tmpfs
         emptyDir:
           medium: Memory
-          sizeLimit: 6Gi
+          sizeLimit: 2Gi
       # Quicksync
       - name: "render-device"
         hostPath:

+ 1 - 1
lidarr-empty-folders.yaml

@@ -5,7 +5,7 @@ metadata:
   name: lidarr-empty-folders
   namespace: plex
 spec:
-  schedule: "*/1 * * * *"
+  schedule: "*/30 * * * *"
   successfulJobsHistoryLimit: 1
   failedJobsHistoryLimit: 1
   concurrencyPolicy: Forbid

+ 8 - 1
lidarr.yaml

@@ -16,11 +16,12 @@ spec:
       labels:
         app: lidarr
       annotations:
-        backup.velero.io/backup-volumes-excludes: plex,seedbox 
+        backup.velero.io/backup-volumes-excludes: plex,seedbox,scratch
     spec:
       containers:
       - name: lidarr
         image: lscr.io/linuxserver/lidarr:latest
+        imagePullPolicy: Always
         ports:
         - containerPort: 8686
           name: http-web-svc
@@ -38,6 +39,8 @@ spec:
           name: seedbox
         - mountPath: "/config"
           name: config
+        - mountPath: "/scratch"
+          name: scratch
         resources:
           requests:
             memory: "0"
@@ -53,6 +56,10 @@ spec:
       - name: config
         persistentVolumeClaim:
           claimName: lidarr-pvc
+      - name: scratch
+        nfs:
+          server: 172.16.69.52
+          path: /data/torrents
 ---
 apiVersion: v1
 kind: Service

+ 4 - 1
monitoring/grafana/grafana-deployment.yaml

@@ -18,8 +18,11 @@ spec:
         app: grafana
     spec:
       containers:
-      - env: []
+      - env:
+        - name: GF_DATABASE_WAL
+          value: "true"
         image: grafana/grafana:latest
+        imagePullPolicy: Always
         name: grafana
         ports:
         - containerPort: 3000

+ 1 - 1
nextcloud/values.yaml

@@ -22,7 +22,7 @@
 ##
 image:
   repository: nextcloud
-  tag: 29.0.9-apache
+  tag: 31.0.4-apache
   pullPolicy: IfNotPresent
   # pullSecrets:
   #   - myRegistrKeySecretName

+ 2 - 1
ooniprobe.yaml

@@ -26,6 +26,7 @@ spec:
       containers:
       - name: ooniprobe
         image: aaimio/ooniprobe:latest
+        imagePullPolicy: Always
         env:
         - name: informed_consent
           value: "true"
@@ -44,4 +45,4 @@ spec:
       volumes:
       - name: ooniprobe
         persistentVolumeClaim:
-          claimName: ooniprobe-pvc
+          claimName: ooniprobe-pvc

+ 21 - 0
paperless-pvc.yaml

@@ -0,0 +1,21 @@
+---
+apiVersion: v1
+kind: Namespace
+metadata:
+    name: paperless
+---
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+  name: paperless-pvc
+  namespace: paperless
+  labels:
+    app: paperless
+spec:
+  storageClassName: ceph-block-ssd
+  accessModes:
+    - ReadWriteOnce
+  resources:
+    requests:
+      storage: 10Gi
+

+ 84 - 0
paperless.yaml

@@ -0,0 +1,84 @@
+---
+apiVersion: v1
+kind: Namespace
+metadata:
+    name: paperless
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: paperless
+  namespace: paperless
+spec:
+  strategy:
+    type: Recreate
+  selector:
+    matchLabels:
+      app: paperless
+  replicas: 1
+  template:
+    metadata:
+      labels:
+        app: paperless
+    spec:
+      containers:
+      - name: paperless
+        image: ghcr.io/paperless-ngx/paperless-ngx:latest
+        imagePullPolicy: Always
+        ports:
+        - containerPort: 8000
+          name: http-web-svc
+        volumeMounts:
+        - mountPath: "/usr/src/paperless/data"
+          name: data
+          subPath: data
+        - mountPath: "/usr/src/paperless/media"
+          name: data
+          subPath: media
+        - mountPath: "/usr/src/paperless/export"
+          name: data
+          subPath: export
+        - mountPath: "/usr/src/paperless/consume"
+          name: data
+          subPath: consume
+        env:
+        - name: PAPERLESS_REDIS
+          valueFrom:
+            secretKeyRef:
+              name: paperless-secret
+              key: PAPERLESS_REDIS
+        - name: PAPERLESS_DBHOST
+          value: postgres-postgresql.postgres.svc.cluster.local
+        - name: PAPERLESS_URL
+          value: https://paperless.jibby.org
+        - name: POSTGRES_DB
+          value: paperless
+        - name: PAPERLESS_DBUSER
+          valueFrom:
+            secretKeyRef:
+              name: paperless-secret
+              key: PAPERLESS_DBUSER
+        - name: PAPERLESS_DBPASS
+          valueFrom:
+            secretKeyRef:
+              name: paperless-secret
+              key: PAPERLESS_DBPASS
+      volumes:
+      - name: data
+        persistentVolumeClaim:
+          claimName: paperless-pvc
+---
+apiVersion: v1
+kind: Service
+metadata:
+  name: paperless-service
+  namespace: paperless
+spec:
+  selector:
+    app: paperless
+  type: ClusterIP
+  ports:
+  - name: paperless-web-port
+    protocol: TCP
+    port: 8000
+    targetPort: http-web-svc

+ 1 - 0
prowlarr.yaml

@@ -19,6 +19,7 @@ spec:
       containers:
       - name: prowlarr
         image: lscr.io/linuxserver/prowlarr:latest
+        imagePullPolicy: Always
         ports:
         - containerPort: 9696
           name: http-web-svc

+ 1 - 1
qbittorrentvpn-pvc.yaml

@@ -11,4 +11,4 @@ spec:
     - ReadWriteOnce
   resources:
     requests:
-      storage: 500Mi
+      storage: 1Gi

+ 29 - 17
qbittorrentvpn.yaml

@@ -16,20 +16,25 @@ spec:
       labels:
         app: qbittorrentvpn
       annotations:
-        backup.velero.io/backup-volumes-excludes: seedbox,media,media2,data-ec
+        backup.velero.io/backup-volumes-excludes: seedbox,media,media2,data-ec,scratch
     spec:
       affinity:
         nodeAffinity:
           requiredDuringSchedulingIgnoredDuringExecution:
             nodeSelectorTerms:
             - matchExpressions:
-              - key: cluster-ingress
+              - key: seedbox
                 operator: In
                 values:
                 - "true"
+      tolerations:
+      - key: seedbox
+        operator: Equal
+        value: "true"
+        effect: NoSchedule
       containers:
       - name: qbittorrentvpn
-        image: binhex/arch-qbittorrentvpn:5.0.2-1-01
+        image: binhex/arch-qbittorrentvpn:5.1.0-1-01
         ports:
         - containerPort: 8080
           name: http-web-svc
@@ -49,11 +54,8 @@ spec:
           name: data-ec
         - mountPath: "/config"
           name: config
-        resources:
-          requests:
-            memory: "0"
-          limits:
-            memory: "4Gi"
+        - mountPath: "/scratch"
+          name: scratch
       volumes:
       - name: seedbox
         persistentVolumeClaim:
@@ -70,6 +72,10 @@ spec:
       - name: config
         persistentVolumeClaim:
           claimName: qbittorrentvpn-pvc
+      - name: scratch
+        hostPath:
+          path: /mnt/data/torrents
+          type: Directory
 ---
 apiVersion: apps/v1
 kind: Deployment
@@ -88,15 +94,6 @@ spec:
       labels:
         app: qbittorrentvpn-exporter
     spec:
-      affinity:
-        nodeAffinity:
-          requiredDuringSchedulingIgnoredDuringExecution:
-            nodeSelectorTerms:
-            - matchExpressions:
-              - key: cluster-ingress
-                operator: In
-                values:
-                - "true"
       containers:
       - name: qbittorrentvpn-exporter
         image: ghcr.io/esanchezm/prometheus-qbittorrent-exporter:v1.6.0
@@ -172,3 +169,18 @@ spec:
                 name: qbittorrentvpn-service
                 port:
                   number: 8080
+---
+apiVersion: monitoring.coreos.com/v1
+kind: PrometheusRule
+metadata:
+  labels:
+    prometheus: qbittorrent
+    role: alert-rules
+  name: prometheus-qbittorrent-rules
+  namespace: plex
+spec:
+  groups:
+  - name: ./qbittorrent.rules
+    rules:
+    - alert: QbittorrentErroredTorrents
+      expr: sum(qbittorrent_torrents_count{status="error"}) > 0

+ 8 - 1
radarr.yaml

@@ -16,11 +16,12 @@ spec:
       labels:
         app: radarr
       annotations:
-        backup.velero.io/backup-volumes-excludes: plex,seedbox 
+        backup.velero.io/backup-volumes-excludes: plex,seedbox,scratch
     spec:
       containers:
       - name: radarr
         image: lscr.io/linuxserver/radarr:latest
+        imagePullPolicy: Always
         ports:
         - containerPort: 7878
           name: http-web-svc
@@ -36,6 +37,8 @@ spec:
           name: plex
         - mountPath: "/seedbox"
           name: seedbox
+        - mountPath: "/scratch"
+          name: scratch
         - mountPath: "/config"
           name: config
         resources:
@@ -53,6 +56,10 @@ spec:
       - name: config
         persistentVolumeClaim:
           claimName: radarr-pvc
+      - name: scratch
+        nfs:
+          server: 172.16.69.52
+          path: /data/torrents
 ---
 apiVersion: v1
 kind: Service

+ 5 - 1
rook/rook-ceph-operator-values.yaml

@@ -423,9 +423,13 @@ csi:
       operator: Equal
       value: "true"
       effect: PreferNoSchedule
+    - key: seedbox
+      operator: Equal
+      value: "true"
+      effect: NoSchedule
 
   # -- The node labels for affinity of the CephCSI RBD plugin DaemonSet [^1]
-  pluginNodeAffinity: "storage-node=true,false" # key1=value1,value2; key2=value3
+  pluginNodeAffinity: "" # key1=value1,value2; key2=value3
 
   # -- Enable Ceph CSI Liveness sidecar deployment
   enableLiveness: true

+ 95 - 0
seedbox-nfs.yaml

@@ -0,0 +1,95 @@
+---
+apiVersion: v1
+kind: Namespace
+metadata:
+    name: nfs
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: nfs-seedbox
+  namespace: nfs
+spec:
+  strategy:
+    type: Recreate
+  selector:
+    matchLabels:
+      app: nfs-seedbox
+  replicas: 1
+  template:
+    metadata:
+      labels:
+        app: nfs-seedbox
+      annotations:
+        backup.velero.io/backup-volumes-excludes: data
+    spec:
+      affinity:
+        nodeAffinity:
+          requiredDuringSchedulingIgnoredDuringExecution:
+            nodeSelectorTerms:
+            - matchExpressions:
+              - key: seedbox
+                operator: In
+                values:
+                - "true"
+      tolerations:
+      - key: seedbox
+        operator: Equal
+        value: "true"
+        effect: NoSchedule
+      hostNetwork: true
+      containers:
+      - name: nfs
+        image: erichough/nfs-server
+        ports:
+        - containerPort: 2049
+          name: nfs
+        securityContext:
+          privileged: true
+        volumeMounts:
+        - mountPath: "/data"
+          name: data
+        - mountPath: /etc/exports
+          name: exports
+          subPath: exports
+          readOnly: true
+      volumes:
+      - name: data
+        hostPath:
+          path: /mnt/data
+          type: Directory
+      - name: exports
+        configMap:
+          name: seedbox-nfs-exports
+          items:
+          - key: exports
+            path: exports
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: seedbox-nfs-exports
+  namespace: nfs
+data:
+  exports: |
+    /data 172.16.69.30 172.16.69.31 172.16.69.32 172.16.69.52 172.16.69.253 10.42.0.0/16
+
+# Testing NFS access from within a pod
+# ---
+# apiVersion: v1
+# kind: Pod
+# metadata:
+#   name: test-pd-2
+#   namespace: nfs
+# spec:
+#   containers:
+#   - image: nginx:1.25.1
+#     name: test-container
+#     volumeMounts:
+#     - mountPath: /my-nfs-data
+#       name: test-volume
+#   volumes:
+#   - name: test-volume
+#     nfs:
+#       server: 172.16.69.52
+#       path: /data

+ 8 - 1
sonarr.yaml

@@ -16,11 +16,12 @@ spec:
       labels:
         app: sonarr
       annotations:
-        backup.velero.io/backup-volumes-excludes: plex,seedbox 
+        backup.velero.io/backup-volumes-excludes: plex,seedbox,scratch
     spec:
       containers:
       - name: sonarr
         image: lscr.io/linuxserver/sonarr:latest
+        imagePullPolicy: Always
         ports:
         - containerPort: 8989
           name: http-web-svc
@@ -38,6 +39,8 @@ spec:
           name: seedbox
         - mountPath: "/config"
           name: config
+        - mountPath: "/scratch"
+          name: scratch
         resources:
           requests:
             memory: "0"
@@ -53,6 +56,10 @@ spec:
       - name: config
         persistentVolumeClaim:
           claimName: sonarr-pvc
+      - name: scratch
+        nfs:
+          server: 172.16.69.52
+          path: /data/torrents
 ---
 apiVersion: v1
 kind: Service

+ 1 - 0
tautulli.yaml

@@ -19,6 +19,7 @@ spec:
       containers:
       - name: tautulli
         image: linuxserver/tautulli:latest
+        imagePullPolicy: Always
         ports:
         - containerPort: 8181
           name: http-web-svc

+ 2 - 0
traefik/helmchartconfig.yaml

@@ -93,6 +93,8 @@ spec:
         volumeMounts:
           - name: data
             mountPath: /data
+      podAnnotations:
+        backup.velero.io/backup-volumes-excludes: data
 
 
     # ACME functionality is not supported when running Traefik as a DaemonSet