Explorar o código

cronjobs in k8s, add tautulli

Josh Bicking hai 4 meses
pai
achega
e64975518f

+ 2 - 0
lidarr-empty-folders.Dockerfile

@@ -0,0 +1,2 @@
+FROM python:3.11-alpine
+RUN pip install requests

+ 56 - 0
lidarr-empty-folders.yaml

@@ -0,0 +1,56 @@
+---
+apiVersion: batch/v1
+kind: CronJob
+metadata:
+  name: lidarr-empty-folders
+  namespace: plex
+spec:
+  schedule: "*/1 * * * *"
+  successfulJobsHistoryLimit: 1
+  failedJobsHistoryLimit: 1
+  concurrencyPolicy: Forbid
+  jobTemplate:
+    spec:
+      template:
+        metadata:
+          labels:
+            app: lidarr-empty-folders
+          annotations:
+            backup.velero.io/backup-volumes-excludes: media
+        spec:
+          securityContext:
+            runAsUser: 1000
+            runAsGroup: 1000
+          restartPolicy: OnFailure
+          containers:
+          - name: lidarr-empty-folders
+            image: jibby0/lidarr-empty-folders
+            command:
+            - /bin/sh
+            - -c
+            - python3 /script/lidarr_empty_folders.py $LIDARR_HOST $LIDARR_API_KEY $LIDARR_MUSIC_PATH
+            env:
+              - name: LIDARR_HOST
+                value: https://lidarr.lan.jibby.org
+              - name: LIDARR_API_KEY
+                valueFrom:
+                  secretKeyRef:
+                    name: lidarr-empty-folders
+                    key: api-key
+              - name: LIDARR_MUSIC_PATH
+                value: /media/Music/
+            volumeMounts:
+            - mountPath: "/media"
+              name: media
+            - mountPath: /script
+              name: lidarr-empty-folders
+          volumes:
+          - name: media
+            persistentVolumeClaim:
+              claimName: plex-pvc
+          - name: lidarr-empty-folders
+            configMap:
+              name: lidarr-empty-folders
+              items:
+              - key: lidarr_empty_folders.py
+                path: lidarr_empty_folders.py

+ 12 - 1
lidarr_empty_folders.py

@@ -1,6 +1,11 @@
+# Lidarr has trouble moving Music without a pre-existing artist folder.
+# 
 # */1 * * * * /usr/bin/run-one /usr/bin/python3 /path/to/lidarr_empty_folders.py <lidarr IP>:8686 <API key> /path/to/Music/ 2>&1 | /usr/bin/logger -t lidarr_empty_folders
+# Or run it in a k8s cronjob. See lidarr-empty-folders.yaml
+# kubectl -n plex create configmap lidarr-empty-folders --from-file=lidarr_empty_folders.py
 
 import requests
+from requests.adapters import HTTPAdapter, Retry
 import os
 import sys
 if len(sys.argv) != 4:
@@ -9,7 +14,13 @@ if len(sys.argv) != 4:
 
 lidarr_server, lidarr_api_key, music_folder = sys.argv[1:4]
 
-resp = requests.get(
+
+retries = Retry(total=10,
+                backoff_factor=1,
+                status_forcelist=[ 500, 502, 503, 504 ])
+s = requests.Session()
+s.mount('http://', HTTPAdapter(max_retries=retries))
+resp = s.get(
     f"{lidarr_server}/api/v1/artist",
     headers={"Authorization": f"Bearer {lidarr_api_key}"}
     )

+ 1 - 1
nextcloud/pvc.yaml

@@ -27,4 +27,4 @@ spec:
     - ReadWriteOnce
   resources:
     requests:
-      storage: 200Gi
+      storage: 500Gi

+ 1 - 1
plex.yaml

@@ -23,7 +23,7 @@ spec:
     spec:
       containers:
       - name: plex
-        image: linuxserver/plex:version-1.40.1.8227-c0dd5a73e
+        image: linuxserver/plex:version-1.40.3.8555-fef15d30c
         # for debugging
         # command: ["/bin/sh"]
         # args: ["-c", "sleep 3600"]

+ 2 - 0
rook/rook-ceph-cluster-values.yaml

@@ -109,6 +109,8 @@ cephClusterSpec:
       # are already enabled by other settings in the cluster CR.
       - name: pg_autoscaler
         enabled: true
+      - name: rook
+        enabled: true
 
   # enable the ceph dashboard for viewing cluster status
   dashboard:

+ 7 - 0
seedbox-sync.Dockerfile

@@ -0,0 +1,7 @@
+FROM python:3.11-alpine
+RUN apk update && \
+    apk add openssh bash rsync && \
+    apk cache clean
+# We need a real user to use SSH. https://superuser.com/questions/1761504/openssh-allow-nonexistent-user-to-login
+RUN addgroup -g 1000 nonroot && \
+    adduser -u 1000 nonroot -G nonroot -s /bin/bash -S

+ 87 - 0
seedbox-sync.yaml

@@ -0,0 +1,87 @@
+---
+apiVersion: batch/v1
+kind: CronJob
+metadata:
+  name: seedbox-sync
+  namespace: plex
+spec:
+  schedule: "*/1 * * * *"
+  successfulJobsHistoryLimit: 1
+  failedJobsHistoryLimit: 1
+  concurrencyPolicy: Forbid
+  jobTemplate:
+    spec:
+      template:
+        metadata:
+          labels:
+            app: seedbox-sync
+          annotations:
+            backup.velero.io/backup-volumes-excludes: media
+        spec:
+          restartPolicy: OnFailure
+          containers:
+          - name: seedbox-sync
+            image: jibby0/seedbox-sync
+            command:
+            - /bin/sh
+            - -c
+            # User 1000 runs the script to set folder permissions properly.
+            # We can't change a secret volume owner, so copy the files & chown
+            # them ourselves.
+            - |
+              mkdir /home/nonroot/.ssh &&
+              cp -Lr /ssh/config /ssh/known_hosts /ssh/seedbox_ecdsa /home/nonroot/.ssh &&
+              chmod 400 ~/.ssh/* &&
+              python3 /script/seedbox_sync.py $SEEDBOX_HOST $PATH_TO_COMPLETED $LOCAL_PATH_TO_DOWNLOADING $LOCAL_PATH_TO_PROCESSED $LOCAL_PATH_TO_READY
+            env:
+              - name: LIDARR_MUSIC_PATH
+                value: /media/Music/
+              - name: SEEDBOX_HOST
+                valueFrom:
+                  secretKeyRef:
+                    name: seedbox-sync
+                    key: seedbox-host
+              - name: PATH_TO_COMPLETED
+                valueFrom:
+                  secretKeyRef:
+                    name: seedbox-sync
+                    key: path-to-completed
+              - name: LOCAL_PATH_TO_DOWNLOADING
+                valueFrom:
+                  secretKeyRef:
+                    name: seedbox-sync
+                    key: local-path-to-downloading
+              - name: LOCAL_PATH_TO_PROCESSED
+                valueFrom:
+                  secretKeyRef:
+                    name: seedbox-sync
+                    key: local-path-to-processed
+              - name: LOCAL_PATH_TO_READY
+                valueFrom:
+                  secretKeyRef:
+                    name: seedbox-sync
+                    key: local-path-to-ready
+            volumeMounts:
+            - mountPath: "/media"
+              name: media
+            - mountPath: /script
+              name: seedbox-sync
+            - mountPath: /ssh
+              name: seedbox-sync-ssh
+            securityContext:
+              runAsUser: 1000
+              runAsGroup: 1000
+          volumes:
+          - name: media
+            persistentVolumeClaim:
+              claimName: plex-pvc
+          - name: seedbox-sync
+            configMap:
+              name: seedbox-sync
+              items:
+              - key: seedbox_sync.py
+                path: seedbox_sync.py
+          - name: seedbox-sync-ssh
+            secret:
+              secretName: seedbox-sync-ssh
+              defaultMode: 0777

+ 3 - 1
seedbox_sync.py

@@ -15,7 +15,9 @@
 #    - Move file to /local/data
 
 # */1 * * * * /usr/bin/run-one /usr/bin/python3 /path/to/seedbox_sync.py <seedbox host> /seedbox/path/to/completed/ /local/path/to/downloading /local/path/to/processed /local/path/to/ready 2>&1 | /usr/bin/logger -t seedbox
-# Or run it in a k8s cronjob.
+# Or run it in a k8s cronjob. See seedbox-sync.yaml
+# kubectl -n plex create configmap seedbox-sync --from-file=seedbox_sync.py
+
 
 import subprocess
 import sys

+ 6 - 0
shelly-plug-exporter.yaml

@@ -35,6 +35,12 @@ spec:
               name: shelly-plug-exporter
               key: password
               optional: false
+        - name: SHELLY_REQUEST_TIMEOUT
+          value: 30s
+        - name: SERVER_TIMEOUT_READ
+          value: 30s
+        - name: SERVER_TIMEOUT_WRITE
+          value: 30s
 ---
 apiVersion: v1
 kind: Service

+ 14 - 0
tautulli-pvc.yaml

@@ -0,0 +1,14 @@
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+  name: tautulli-pvc
+  namespace: plex
+  labels:
+    app: tautulli
+spec:
+  storageClassName: ceph-block
+  accessModes:
+    - ReadWriteOnce
+  resources:
+    requests:
+      storage: 10Gi

+ 67 - 0
tautulli.yaml

@@ -0,0 +1,67 @@
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: tautulli
+  namespace: plex
+spec:
+  selector:
+    matchLabels:
+      app: tautulli
+  replicas: 1
+  template:
+    metadata:
+      labels:
+        app: tautulli
+    spec:
+      containers:
+      - name: tautulli
+        image: linuxserver/tautulli:2.14.3
+        ports:
+        - containerPort: 8181
+          name: http-web-svc
+        env:
+        - name: TZ
+          value: America/New_York
+        - name: PUID
+          value: "1000"
+        - name: PGID
+          value: "1000"
+        volumeMounts:
+        - mountPath: "/config"
+          name: config
+      volumes:
+      - name: config
+        persistentVolumeClaim:
+          claimName: tautulli-pvc
+---
+apiVersion: v1
+kind: Service
+metadata:
+  name: tautulli-service
+  namespace: plex
+spec:
+  selector:
+    app: tautulli
+  type: ClusterIP
+  ports:
+  - name: tautulli-web-port
+    protocol: TCP
+    port: 8181
+    targetPort: http-web-svc
+---
+apiVersion: traefik.containo.us/v1alpha1
+kind: IngressRoute
+metadata:
+  name: tautulli
+  namespace: plex
+spec:
+  entryPoints:
+  - websecure
+  routes:
+  - kind: Rule
+    match: Host(`tautulli.lan.jibby.org`)
+    services:
+    - kind: Service
+      name: tautulli-service
+      port: 8181

+ 1 - 1
traefik-helmchartconfig.yaml

@@ -88,7 +88,7 @@ spec:
         # Required to show real IP to proxied services
         externalTrafficPolicy: Local
 
-    # isn't necessary anymore, but reduces hairpinning
+    # pin pod to cluster-ingress node, so ServiceLB gives it the right external IP
     affinity:
       nodeAffinity:
         requiredDuringSchedulingIgnoredDuringExecution: