浏览代码

use ingress instead of ingressroute

Josh Bicking 3 天之前
父节点
当前提交
1dcdb5132b

+ 22 - 2
README.md

@@ -44,11 +44,18 @@ containerd really doesn't want you batch-deleting snapshots.
 
 https://github.com/k3s-io/k3s/issues/1905#issuecomment-820554037
 
+Run the below command a few times until it stops returning results:
+
+```
+sudo k3s ctr -n k8s.io i rm $(sudo k3s ctr -n k8s.io i ls -q)
+```
+
+
+This other command below has given me problems before, but may purge more images. Beware of `error unpacking image: failed to extract layer sha256:1021ef88c7974bfff89c5a0ec4fd3160daac6c48a075f74cff721f85dd104e68: failed to get reader from content store: content digest sha256:fbe1a72f5dcd08ba4ca3ce3468c742786c1f6578c1f6bb401be1c4620d6ff705: not found` (if it's not found... redownload it??)
 ```
 for sha in $(sudo k3s ctr snapshot usage | awk '{print $1}'); do sudo k3s ctr snapshot rm $sha && echo $sha; done
 ```
 
-Run this a few times until it stops returning results.
 
 ## ingress
 
@@ -157,6 +164,19 @@ conn = connect()
 conn.upload_file('path/to/s3-bucket-listing/index.html', 'public', 'index.html', ExtraArgs={'ContentType': 'text/html'})
 ```
 
+## Imbalance of PGs across OSDs
+
+https://github.com/TheJJ/ceph-balancer
+
+See the README for how this balancing strategy compares to ceph's `balancer` module. 
+
+TLDR:
+```
+$ kubectl -n rook-ceph cp placementoptimizer.py <rook-ceph-tools pod>:/tmp/
+$ kubectl -n rook-ceph exec -it deployment/rook-ceph-tools -- bash
+$ python3 /tmp/placementoptimizer.py -v balance --max-pg-moves 10 | tee /tmp/balance-upmaps
+$ bash /tmp/balance-upmaps
+```
 
 # nvidia driver (on debian)
 
@@ -313,7 +333,7 @@ An A record for `lan.jibby.org` & `*.lan.jibby.org` points to an internal IP.
 
 To be safe, a middleware is included to filter out source IPs outside of the LAN network & k3s CIDR. See `traefik/middleware-lanonly.yaml`.
 
-Then, internal services can be exposed with an IngressRoute, as a subdomain of `lan.jibby.org`. See `sonarr.yaml`'s IngressRoute.
+Then, internal services can be exposed with an Ingress, as a subdomain of `lan.jibby.org`. See `examples/nginx`'s Ingress.
 
 # Backups
 

+ 16 - 14
bazarr.yaml

@@ -59,21 +59,23 @@ spec:
     port: 6767
     targetPort: http-web-svc
 ---
-apiVersion: traefik.containo.us/v1alpha1
-kind: IngressRoute
+apiVersion: networking.k8s.io/v1
+kind: Ingress
 metadata:
   name: bazarr
   namespace: plex
+  annotations:
+    traefik.ingress.kubernetes.io/router.entrypoints: websecure
+    traefik.ingress.kubernetes.io/router.middlewares: kube-system-lanonly@kubernetescrd
 spec:
-  entryPoints:
-  - websecure
-  routes:
-  - kind: Rule
-    match: Host(`bazarr.lan.jibby.org`)
-    services:
-    - kind: Service
-      name: bazarr-service
-      port: 6767
-    middlewares:
-    - name: lanonly
-      namespace: kube-system
+  rules:
+    - host: bazarr.lan.jibby.org
+      http:
+        paths:
+          - path: /
+            pathType: Prefix
+            backend:
+              service:
+                name: bazarr-service
+                port:
+                  number: 6767

+ 16 - 14
duplicati.yaml

@@ -105,21 +105,23 @@ spec:
               claimName: media2-pvc
           restartPolicy: OnFailure
 ---
-apiVersion: traefik.containo.us/v1alpha1
-kind: IngressRoute
+apiVersion: networking.k8s.io/v1
+kind: Ingress
 metadata:
   name: duplicati
   namespace: plex
+  annotations:
+    traefik.ingress.kubernetes.io/router.entrypoints: websecure
+    traefik.ingress.kubernetes.io/router.middlewares: kube-system-lanonly@kubernetescrd
 spec:
-  entryPoints:
-  - websecure
-  routes:
-  - kind: Rule
-    match: Host(`duplicati.lan.jibby.org`)
-    services:
-    - kind: Service
-      name: duplicati-service
-      port: 8200
-    middlewares:
-    - name: lanonly
-      namespace: kube-system
+  rules:
+    - host: duplicati.lan.jibby.org
+      http:
+        paths:
+          - path: /
+            pathType: Prefix
+            backend:
+              service:
+                name: duplicati-service
+                port:
+                  number: 8200

+ 15 - 13
examples/nginx/nginx.yaml

@@ -12,7 +12,6 @@ spec:
     ports:
       - containerPort: 80
         name: http-web-svc
-
 ---
 apiVersion: v1
 kind: Service
@@ -27,19 +26,22 @@ spec:
     protocol: TCP
     port: 80
     targetPort: http-web-svc
-
 ---
-apiVersion: traefik.containo.us/v1alpha1
-kind: IngressRoute
+apiVersion: networking.k8s.io/v1
+kind: Ingress
 metadata:
   name: nginx
+  annotations:
+    traefik.ingress.kubernetes.io/router.entrypoints: websecure
 spec:
-  entryPoints:
-  - websecure
-  routes:
-  - kind: Rule
-    match: Host(`nginx.example.com`)
-    services:
-    - kind: Service
-      name: nginx-service
-      port: 80
+  rules:
+    - host: nginx.example.com
+      http:
+        paths:
+          - path: /
+            pathType: Prefix
+            backend:
+              service:
+                name: nginx-service
+                port:
+                  number: 80

+ 0 - 16
gogs.yaml

@@ -67,19 +67,3 @@ spec:
     protocol: TCP
     port: 22
     targetPort: ssh-svc
----
-apiVersion: traefik.containo.us/v1alpha1
-kind: IngressRoute
-metadata:
-  name: gogs
-  namespace: gogs
-spec:
-  entryPoints:
-  - websecure
-  routes:
-  - kind: Rule
-    match: Host(`gogs.jibby.org`)
-    services:
-    - kind: Service
-      name: gogs-service
-      port: 3000

+ 0 - 16
homeassistant.yaml

@@ -55,22 +55,6 @@ spec:
     port: 8123
     targetPort: http-web-svc
 ---
-apiVersion: traefik.containo.us/v1alpha1
-kind: IngressRoute
-metadata:
-  name: homeassistant
-  namespace: homeassistant
-spec:
-  entryPoints:
-  - websecure
-  routes:
-  - kind: Rule
-    match: Host(`homeassistant.jibby.org`)
-    services:
-    - kind: Service
-      name: homeassistant-service
-      port: 8123
----
 apiVersion: apps/v1
 kind: Deployment
 metadata:

+ 18 - 14
jellyfin.yaml

@@ -34,8 +34,8 @@ spec:
           name: config
         - mountPath: "/media"
           name: media
-        #- mountPath: "/transcodes"
-        #  name: tmpfs
+        - mountPath: "/transcodes"
+          name: tmpfs
         # Quicksync
         - name: "render-device"
           mountPath: "/dev/dri/renderD128"
@@ -81,7 +81,7 @@ spec:
       - name: tmpfs
         emptyDir:
           medium: Memory
-          sizeLimit: 12Gi
+          sizeLimit: 6Gi
       # Quicksync
       - name: "render-device"
         hostPath:
@@ -102,18 +102,22 @@ spec:
     port: 8096
     targetPort: http-web-svc
 ---
-apiVersion: traefik.containo.us/v1alpha1
-kind: IngressRoute
+apiVersion: networking.k8s.io/v1
+kind: Ingress
 metadata:
   name: jellyfin
   namespace: plex
+  annotations:
+    traefik.ingress.kubernetes.io/router.entrypoints: websecure
 spec:
-  entryPoints:
-  - websecure
-  routes:
-  - kind: Rule
-    match: Host(`jellyfin.jibby.org`)
-    services:
-    - kind: Service
-      name: jellyfin-service
-      port: 8096
+  rules:
+    - host: jellyfin.jibby.org
+      http:
+        paths:
+          - path: /
+            pathType: Prefix
+            backend:
+              service:
+                name: jellyfin-service
+                port:
+                  number: 8096

+ 16 - 14
lidarr.yaml

@@ -69,21 +69,23 @@ spec:
     port: 8686
     targetPort: http-web-svc
 ---
-apiVersion: traefik.containo.us/v1alpha1
-kind: IngressRoute
+apiVersion: networking.k8s.io/v1
+kind: Ingress
 metadata:
   name: lidarr
   namespace: plex
+  annotations:
+    traefik.ingress.kubernetes.io/router.entrypoints: websecure
+    traefik.ingress.kubernetes.io/router.middlewares: kube-system-lanonly@kubernetescrd
 spec:
-  entryPoints:
-  - websecure
-  routes:
-  - kind: Rule
-    match: Host(`lidarr.lan.jibby.org`)
-    services:
-    - kind: Service
-      name: lidarr-service
-      port: 8686
-    middlewares:
-    - name: lanonly
-      namespace: kube-system
+  rules:
+    - host: lidarr.lan.jibby.org
+      http:
+        paths:
+          - path: /
+            pathType: Prefix
+            backend:
+              service:
+                name: lidarr-service
+                port:
+                  number: 8686

+ 65 - 0
magicmirror-pvc.yaml

@@ -0,0 +1,65 @@
+---
+apiVersion: v1
+kind: Namespace
+metadata:
+    name: magicmirror
+---
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+  name: magicmirror-pvc
+  namespace: magicmirror
+  labels:
+    app: magicmirror
+spec:
+  storageClassName: ceph-block-ssd
+  accessModes:
+    - ReadWriteOnce
+  resources:
+    requests:
+      storage: 500Mi
+---
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+  name: magicmirror-module-pvc
+  namespace: magicmirror
+  labels:
+    app: magicmirror
+spec:
+  storageClassName: ceph-block-ssd
+  accessModes:
+    - ReadWriteOnce
+  resources:
+    requests:
+      storage: 500Mi
+---
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+  name: magicmirror-css-pvc
+  namespace: magicmirror
+  labels:
+    app: magicmirror
+spec:
+  storageClassName: ceph-block-ssd
+  accessModes:
+    - ReadWriteOnce
+  resources:
+    requests:
+      storage: 10Mi
+---
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+  name: magicmirror-config-pvc
+  namespace: magicmirror
+  labels:
+    app: magicmirror
+spec:
+  storageClassName: ceph-block-ssd
+  accessModes:
+    - ReadWriteOnce
+  resources:
+    requests:
+      storage: 10Mi

+ 87 - 0
magicmirror.yaml

@@ -0,0 +1,87 @@
+---
+apiVersion: v1
+kind: Namespace
+metadata:
+    name: magicmirror
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: magicmirror
+  namespace: magicmirror
+spec:
+  strategy:
+    type: Recreate
+  selector:
+    matchLabels:
+      app: magicmirror
+  template:
+    metadata:
+      labels:
+        app: magicmirror
+    spec:
+      containers:
+      - name: magicmirror
+        image: jibby0/magicmirror-gkeepapi:v2.29.0 
+        imagePullPolicy: Always
+        ports:
+        - containerPort: 8080
+          name: http
+        env:
+        - name: MM_SCENARIO
+          value: server
+        volumeMounts:
+        - name: config
+          mountPath: "/opt/magic_mirror/config"
+        - name: css
+          mountPath: "/opt/magic_mirror/css"
+        - name: modules
+          mountPath: "/opt/magic_mirror/modules"
+      volumes:
+      - name: config
+        persistentVolumeClaim:
+          claimName: magicmirror-config-pvc
+      - name: css
+        persistentVolumeClaim:
+          claimName: magicmirror-css-pvc
+      - name: modules
+        persistentVolumeClaim:
+          claimName: magicmirror-module-pvc
+      securityContext:
+        fsGroup: 1000
+---
+apiVersion: v1
+kind: Service
+metadata:
+  name: magicmirror-service
+  namespace: magicmirror
+  labels:
+    app: magicmirror
+spec:
+  selector:
+    app: magicmirror
+  ports:
+  - port: 8080
+    targetPort: 8080
+    name: http
+---
+apiVersion: networking.k8s.io/v1
+kind: Ingress
+metadata:
+  name: magicmirror
+  namespace: magicmirror
+  annotations:
+    traefik.ingress.kubernetes.io/router.entrypoints: websecure
+    traefik.ingress.kubernetes.io/router.middlewares: kube-system-lanonly@kubernetescrd
+spec:
+  rules:
+    - host: magicmirror.lan.jibby.org
+      http:
+        paths:
+          - path: /
+            pathType: Prefix
+            backend:
+              service:
+                name: magicmirror-service
+                port:
+                  number: 8080

+ 0 - 16
miniflux.yaml

@@ -56,19 +56,3 @@ spec:
     protocol: TCP
     port: 8080
     targetPort: http-web-svc
----
-apiVersion: traefik.containo.us/v1alpha1
-kind: IngressRoute
-metadata:
-  name: miniflux
-  namespace: miniflux
-spec:
-  entryPoints:
-  - websecure
-  routes:
-  - kind: Rule
-    match: Host(`miniflux.jibby.org`)
-    services:
-    - kind: Service
-      name: miniflux-service
-      port: 8080

+ 21 - 0
monitoring/alertmanager-ingress.yaml

@@ -0,0 +1,21 @@
+---
+apiVersion: networking.k8s.io/v1
+kind: Ingress
+metadata:
+  name: alertmanager-operated
+  namespace: monitoring
+  annotations:
+    traefik.ingress.kubernetes.io/router.entrypoints: websecure
+    traefik.ingress.kubernetes.io/router.middlewares: kube-system-lanonly@kubernetescrd
+spec:
+  rules:
+    - host: alertmanager.lan.jibby.org
+      http:
+        paths:
+          - path: /
+            pathType: Prefix
+            backend:
+              service:
+                name: alertmanager-operated
+                port:
+                  number: 9093

+ 0 - 20
monitoring/alertmanager-ingressroute.yaml

@@ -1,20 +0,0 @@
----
-apiVersion: traefik.containo.us/v1alpha1
-kind: IngressRoute
-metadata:
-  name: alertmanager-operated
-  namespace: monitoring
-spec:
-  entryPoints:
-  - websecure
-  routes:
-  - kind: Rule
-    match: Host(`alertmanager.lan.jibby.org`)
-    services:
-    - kind: Service
-      name: alertmanager-operated
-      port: 9093
-    middlewares:
-    - name: lanonly
-      namespace: kube-system
-

+ 21 - 0
monitoring/grafana/grafana-ingress.yaml

@@ -0,0 +1,21 @@
+---
+apiVersion: networking.k8s.io/v1
+kind: Ingress
+metadata:
+  name: grafana
+  namespace: monitoring
+  annotations:
+    traefik.ingress.kubernetes.io/router.entrypoints: websecure
+    traefik.ingress.kubernetes.io/router.middlewares: kube-system-lanonly@kubernetescrd
+spec:
+  rules:
+    - host: grafana.lan.jibby.org
+      http:
+        paths:
+          - path: /
+            pathType: Prefix
+            backend:
+              service:
+                name: grafana
+                port:
+                  number: 3000

+ 0 - 19
monitoring/grafana/grafana-ingressroute.yaml

@@ -1,19 +0,0 @@
----
-apiVersion: traefik.containo.us/v1alpha1
-kind: IngressRoute
-metadata:
-  name: grafana
-  namespace: monitoring
-spec:
-  entryPoints:
-  - websecure
-  routes:
-  - kind: Rule
-    match: Host(`grafana.lan.jibby.org`)
-    services:
-    - kind: Service
-      name: grafana
-      port: 3000
-    middlewares:
-    - name: lanonly
-      namespace: kube-system

+ 15 - 11
monitoring/ntfy-alertmanager.yaml

@@ -44,19 +44,23 @@ spec:
     port: 80
     targetPort: http-web-svc
 ---
-apiVersion: traefik.containo.us/v1alpha1
-kind: IngressRoute
+apiVersion: networking.k8s.io/v1
+kind: Ingress
 metadata:
   name: ntfy-alertmanager
   namespace: monitoring
+  annotations:
+    traefik.ingress.kubernetes.io/router.entrypoints: websecure
 spec:
-  entryPoints:
-  - websecure
-  routes:
-  - kind: Rule
-    match: Host(`ntfy-alertmanager.jibby.org`)
-    services:
-    - kind: Service
-      name: ntfy-alertmanager
-      port: 80
+  rules:
+    - host: ntfy-alertmanager.jibby.org
+      http:
+        paths:
+          - path: /
+            pathType: Prefix
+            backend:
+              service:
+                name: ntfy-alertmanager
+                port:
+                  number: 80
 

+ 21 - 0
monitoring/prometheus/prometheus-ingress.yaml

@@ -0,0 +1,21 @@
+---
+apiVersion: networking.k8s.io/v1
+kind: Ingress
+metadata:
+  name: prometheus
+  namespace: monitoring
+  annotations:
+    traefik.ingress.kubernetes.io/router.entrypoints: websecure
+    traefik.ingress.kubernetes.io/router.middlewares: kube-system-lanonly@kubernetescrd
+spec:
+  rules:
+    - host: prometheus.lan.jibby.org
+      http:
+        paths:
+          - path: /
+            pathType: Prefix
+            backend:
+              service:
+                name: prometheus
+                port:
+                  number: 9090

+ 0 - 19
monitoring/prometheus/prometheus-ingressroute.yaml

@@ -1,19 +0,0 @@
----
-apiVersion: traefik.containo.us/v1alpha1
-kind: IngressRoute
-metadata:
-  name: prometheus
-  namespace: monitoring
-spec:
-  entryPoints:
-  - websecure
-  routes:
-  - kind: Rule
-    match: Host(`prometheus.lan.jibby.org`)
-    services:
-    - kind: Service
-      name: prometheus
-      port: 9090
-    middlewares:
-    - name: lanonly
-      namespace: kube-system

+ 16 - 11
nextcloud/ingress.yaml

@@ -1,15 +1,20 @@
-apiVersion: traefik.containo.us/v1alpha1
-kind: IngressRoute
+---
+apiVersion: networking.k8s.io/v1
+kind: Ingress
 metadata:
   name: nextcloud
   namespace: nextcloud
+  annotations:
+    traefik.ingress.kubernetes.io/router.entrypoints: websecure
 spec:
-  entryPoints:
-  - websecure
-  routes:
-  - kind: Rule
-    match: Host(`nextcloud.jibby.org`)
-    services:
-    - kind: Service
-      name: nextcloud
-      port: 8080
+  rules:
+    - host: nextcloud.jibby.org
+      http:
+        paths:
+          - path: /
+            pathType: Prefix
+            backend:
+              service:
+                name: nextcloud
+                port:
+                  number: 8080

+ 1 - 1
plex-pvc.yaml

@@ -42,4 +42,4 @@ spec:
     - ReadWriteMany
   resources:
     requests:
-      storage: 40Ti
+      storage: 40Ti

+ 16 - 12
plex.yaml

@@ -92,7 +92,7 @@ spec:
       - name: tmpfs
         emptyDir:
           medium: Memory
-          sizeLimit: 12Gi
+          sizeLimit: 6Gi
       # Quicksync
       - name: "render-device"
         hostPath:
@@ -113,18 +113,22 @@ spec:
     port: 32400
     targetPort: http-web-svc
 ---
-apiVersion: traefik.containo.us/v1alpha1
-kind: IngressRoute
+apiVersion: networking.k8s.io/v1
+kind: Ingress
 metadata:
   name: plex
   namespace: plex
+  annotations:
+    traefik.ingress.kubernetes.io/router.entrypoints: websecure
 spec:
-  entryPoints:
-  - websecure
-  routes:
-  - kind: Rule
-    match: Host(`plex.jibby.org`)
-    services:
-    - kind: Service
-      name: plex-service
-      port: 32400
+  rules:
+    - host: plex.jibby.org
+      http:
+        paths:
+          - path: /
+            pathType: Prefix
+            backend:
+              service:
+                name: plex-service
+                port:
+                  number: 32400

+ 17 - 15
prowlarr.yaml

@@ -18,7 +18,7 @@ spec:
     spec:
       containers:
       - name: prowlarr
-        image: lscr.io/linuxserver/prowlarr:nightly-1.21.1.4626-ls25
+        image: lscr.io/linuxserver/prowlarr:latest
         ports:
         - containerPort: 9696
           name: http-web-svc
@@ -52,21 +52,23 @@ spec:
     port: 9696
     targetPort: http-web-svc
 ---
-apiVersion: traefik.containo.us/v1alpha1
-kind: IngressRoute
+apiVersion: networking.k8s.io/v1
+kind: Ingress
 metadata:
   name: prowlarr
   namespace: plex
+  annotations:
+    traefik.ingress.kubernetes.io/router.entrypoints: websecure
+    traefik.ingress.kubernetes.io/router.middlewares: kube-system-lanonly@kubernetescrd
 spec:
-  entryPoints:
-  - websecure
-  routes:
-  - kind: Rule
-    match: Host(`prowlarr.lan.jibby.org`)
-    services:
-    - kind: Service
-      name: prowlarr-service
-      port: 9696
-    middlewares:
-    - name: lanonly
-      namespace: kube-system
+  rules:
+    - host: prowlarr.lan.jibby.org
+      http:
+        paths:
+          - path: /
+            pathType: Prefix
+            backend:
+              service:
+                name: prowlarr-service
+                port:
+                  number: 9696

+ 16 - 14
radarr.yaml

@@ -69,21 +69,23 @@ spec:
     port: 7878
     targetPort: http-web-svc
 ---
-apiVersion: traefik.containo.us/v1alpha1
-kind: IngressRoute
+apiVersion: networking.k8s.io/v1
+kind: Ingress
 metadata:
   name: radarr
   namespace: plex
+  annotations:
+    traefik.ingress.kubernetes.io/router.entrypoints: websecure
+    traefik.ingress.kubernetes.io/router.middlewares: kube-system-lanonly@kubernetescrd
 spec:
-  entryPoints:
-  - websecure
-  routes:
-  - kind: Rule
-    match: Host(`radarr.lan.jibby.org`)
-    services:
-    - kind: Service
-      name: radarr-service
-      port: 7878
-    middlewares:
-    - name: lanonly
-      namespace: kube-system
+  rules:
+    - host: radarr.lan.jibby.org
+      http:
+        paths:
+          - path: /
+            pathType: Prefix
+            backend:
+              service:
+                name: radarr-service
+                port:
+                  number: 7878

+ 3 - 3
rook/buckets/ceph-objectstore-ec-user-jibby.yaml

@@ -7,9 +7,9 @@ spec:
   store: ceph-objectstore-ec
   displayName: jibby
   quotas:
-    maxBuckets: 1
-    maxSize: 10G
-    maxObjects: 10000
+    maxBuckets: 2
+    maxSize: 28T
+    maxObjects: -1
   capabilities:
     user: "*"
     bucket: "*"

+ 16 - 11
rook/buckets/ingress.yaml

@@ -1,15 +1,20 @@
-apiVersion: traefik.containo.us/v1alpha1
-kind: IngressRoute
+---
+apiVersion: networking.k8s.io/v1
+kind: Ingress
 metadata:
   name: s3
   namespace: rook-ceph
+  annotations:
+    traefik.ingress.kubernetes.io/router.entrypoints: websecure
 spec:
-  entryPoints:
-  - websecure
-  routes:
-  - kind: Rule
-    match: Host(`s3.jibby.org`)
-    services:
-    - kind: Service
-      name: rook-ceph-rgw-ceph-objectstore-ec
-      port: 6981
+  rules:
+    - host: s3.jibby.org
+      http:
+        paths:
+          - path: /
+            pathType: Prefix
+            backend:
+              service:
+                name: rook-ceph-rgw-ceph-objectstore-ec
+                port:
+                  number: 6981

+ 39 - 0
rook/data/data-ec-filesystem.yaml

@@ -0,0 +1,39 @@
+# TODO move to the main helm values
+apiVersion: ceph.rook.io/v1
+kind: CephFilesystem
+metadata:
+  name: data-ec
+  namespace: rook-ceph
+spec:
+  metadataPool:
+    replicated:
+      size: 3
+    deviceClass: ssd
+  dataPools:
+    - name: default
+      replicated:
+        size: 3
+      deviceClass: hdd
+    - name: erasurecoded
+      erasureCoded:
+        dataChunks: 2
+        codingChunks: 1
+      deviceClass: hdd
+  preserveFilesystemOnDelete: true
+  metadataServer:
+    activeCount: 1
+    activeStandby: true
+    placement:
+      nodeAffinity:
+        requiredDuringSchedulingIgnoredDuringExecution:
+          nodeSelectorTerms:
+          - matchExpressions:
+            - key: storage-node
+              operator: In
+              values:
+              - "true"
+      tolerations:
+      - key: storage-node
+        operator: Exists
+    priorityClassName: system-cluster-critical
+

+ 15 - 0
rook/data/data-ec-pvc.yaml

@@ -0,0 +1,15 @@
+---
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+  name: data-ec-pvc
+  namespace: plex
+spec:
+  storageClassName: data-ec-sc
+  volumeName: data-ec-static-pv
+  volumeMode: Filesystem
+  accessModes:
+    - ReadWriteMany
+  resources:
+    requests:
+      storage: 50Ti

+ 17 - 0
rook/data/data-ec-sc.yaml

@@ -0,0 +1,17 @@
+apiVersion: storage.k8s.io/v1
+kind: StorageClass
+metadata:
+  name: data-ec-sc
+parameters:
+  clusterID: rook-ceph
+  fsName: data-ec
+  pool: data-ec-erasurecoded
+  csi.storage.k8s.io/controller-expand-secret-name: rook-csi-cephfs-provisioner
+  csi.storage.k8s.io/controller-expand-secret-namespace: rook-ceph
+  csi.storage.k8s.io/node-stage-secret-name: rook-csi-cephfs-node
+  csi.storage.k8s.io/node-stage-secret-namespace: rook-ceph
+  csi.storage.k8s.io/provisioner-secret-name: rook-csi-cephfs-provisioner
+  csi.storage.k8s.io/provisioner-secret-namespace: rook-ceph
+provisioner: rook-ceph.cephfs.csi.ceph.com
+reclaimPolicy: Delete
+allowVolumeExpansion: true

+ 28 - 0
rook/data/data-ec-static-pv.yaml

@@ -0,0 +1,28 @@
+apiVersion: v1
+kind: PersistentVolume
+metadata:
+  name: data-ec-static-pv
+spec:
+  accessModes:
+  - ReadWriteMany
+  capacity:
+    storage: 20Ti
+  csi:
+    controllerExpandSecretRef:
+      name: rook-csi-cephfs-provisioner
+      namespace: rook-ceph
+    driver: rook-ceph.cephfs.csi.ceph.com
+    nodeStageSecretRef:
+      name: rook-csi-cephfs-node
+      namespace: rook-ceph
+    volumeAttributes:
+      clusterID: rook-ceph
+      fsName: data-ec
+      pool: data-ec-erasurecoded
+      storage.kubernetes.io/csiProvisionerIdentity: 1737727666651-6570-rook-ceph.cephfs.csi.ceph.com
+      subvolumeName: csi-vol-c06598f6-39eb-47c0-a812-a3f17c59e567
+      subvolumePath: /volumes/csi/csi-vol-c06598f6-39eb-47c0-a812-a3f17c59e567/8a27a294-0884-4778-b664-c2046449c937
+    volumeHandle: 0001-0009-rook-ceph-000000000000000a-c06598f6-39eb-47c0-a812-a3f17c59e567
+  persistentVolumeReclaimPolicy: Retain
+  storageClassName: data-ec-sc
+  volumeMode: Filesystem

+ 16 - 14
rook/ingress.yaml

@@ -1,19 +1,21 @@
 ---
-apiVersion: traefik.containo.us/v1alpha1
-kind: IngressRoute
+apiVersion: networking.k8s.io/v1
+kind: Ingress
 metadata:
   name: rook-ceph-dashboard
   namespace: rook-ceph
+  annotations:
+    traefik.ingress.kubernetes.io/router.entrypoints: websecure
+    traefik.ingress.kubernetes.io/router.middlewares: kube-system-lanonly@kubernetescrd
 spec:
-  entryPoints:
-  - websecure
-  routes:
-  - kind: Rule
-    match: Host(`ceph.lan.jibby.org`)
-    services:
-    - kind: Service
-      name: rook-ceph-mgr-dashboard
-      port: 8080
-    middlewares:
-    - name: lanonly
-      namespace: kube-system
+  rules:
+    - host: ceph.lan.jibby.org
+      http:
+        paths:
+          - path: /
+            pathType: Prefix
+            backend:
+              service:
+                name: rook-ceph-mgr-dashboard
+                port:
+                  number: 8080

+ 69 - 67
rook/rook-ceph-cluster-values.yaml

@@ -629,73 +629,75 @@ cephBlockPoolsVolumeSnapshotClass:
 # -- A list of CephObjectStore configurations to deploy
 # @default -- See [below](#ceph-object-stores)
 cephObjectStores:
-  - name: ceph-objectstore
-    # see https://github.com/rook/rook/blob/master/Documentation/CRDs/Object-Storage/ceph-object-store-crd.md#object-store-settings for available configuration
-    spec:
-      metadataPool:
-        failureDomain: host
-        replicated:
-          size: 3
-        deviceClass: ssd
-      dataPool:
-        failureDomain: host
-        erasureCoded:
-          dataChunks: 2
-          codingChunks: 1
-        deviceClass: hdd
-      preservePoolsOnDelete: true
-      gateway:
-        placement:
-          nodeAffinity:
-            requiredDuringSchedulingIgnoredDuringExecution:
-              nodeSelectorTerms:
-                - matchExpressions:
-                  - key: storage-node
-                    operator: In
-                    values:
-                      - "true"
-          podAffinity:
-          podAntiAffinity:
-          topologySpreadConstraints:
-          tolerations:
-          - key: storage-node
-            operator: Exists
-        port: 6980
-        resources:
-          limits:
-            cpu: "500m"
-            memory: "2Gi"
-          requests:
-            cpu: "250m"
-            memory: "1Gi"
-        # securePort: 443
-        # sslCertificateRef:
-        instances: 1
-        priorityClassName: system-cluster-critical
-      healthCheck:
-        bucket:
-          interval: 60s
-    storageClass:
-      enabled: true
-      name: ceph-bucket
-      reclaimPolicy: Delete
-      volumeBindingMode: "Immediate"
-      # see https://github.com/rook/rook/blob/master/Documentation/Storage-Configuration/Object-Storage-RGW/ceph-object-bucket-claim.md#storageclass for available configuration
-      parameters:
-        # note: objectStoreNamespace and objectStoreName are configured by the chart
-        region: us-east-1
-    ingress:
-      # Enable an ingress for the ceph-objectstore
-      enabled: false
-      # annotations: {}
-      # host:
-      #   name: objectstore.example.com
-      #   path: /
-      # tls:
-      # - hosts:
-      #     - objectstore.example.com
-      #   secretName: ceph-objectstore-tls
-      # ingressClassName: nginx
+#   - name: ceph-objectstore
+#     # see https://github.com/rook/rook/blob/master/Documentation/CRDs/Object-Storage/ceph-object-store-crd.md#object-store-settings for available configuration
+#     spec:
+#       metadataPool:
+#         failureDomain: host
+#         replicated:
+#           size: 3
+#         deviceClass: ssd
+#       dataPool:
+#         failureDomain: host
+#         replicated:
+#           size: 3
+#           # erasureCoded:
+#           #   dataChunks: 2
+#           #   codingChunks: 1
+#         deviceClass: hdd
+#       preservePoolsOnDelete: true
+#       gateway:
+#         placement:
+#           nodeAffinity:
+#             requiredDuringSchedulingIgnoredDuringExecution:
+#               nodeSelectorTerms:
+#                 - matchExpressions:
+#                   - key: storage-node
+#                     operator: In
+#                     values:
+#                       - "true"
+#           podAffinity:
+#           podAntiAffinity:
+#           topologySpreadConstraints:
+#           tolerations:
+#           #- key: storage-node
+#           #  operator: Exists
+#         port: 6980
+#         resources:
+#           limits:
+#             cpu: "500m"
+#             memory: "2Gi"
+#           requests:
+#             cpu: "250m"
+#             memory: "1Gi"
+#         # securePort: 443
+#         # sslCertificateRef:
+#         instances: 1
+#         priorityClassName: system-cluster-critical
+#       healthCheck:
+#         bucket:
+#           interval: 60s
+#     storageClass:
+#       enabled: true
+#       name: ceph-bucket
+#       reclaimPolicy: Delete
+#       volumeBindingMode: "Immediate"
+#       # see https://github.com/rook/rook/blob/master/Documentation/Storage-Configuration/Object-Storage-RGW/ceph-object-bucket-claim.md#storageclass for available configuration
+#       parameters:
+#         # note: objectStoreNamespace and objectStoreName are configured by the chart
+#         region: us-east-1
+#     ingress:
+#       # Enable an ingress for the ceph-objectstore
+#       enabled: false
+#       # annotations: {}
+#       # host:
+#       #   name: objectstore.example.com
+#       #   path: /
+#       # tls:
+#       # - hosts:
+#       #     - objectstore.example.com
+#       #   secretName: ceph-objectstore-tls
+#       # ingressClassName: nginx
 # cephECBlockPools are disabled by default, please remove the comments and set desired values to enable it
 #cephECBlockPools:
 #  # For erasure coded a replicated metadata pool is required.

+ 1 - 2
rook/seedbox/seedbox-filesystem.yaml

@@ -14,8 +14,7 @@ spec:
         size: 2
       deviceClass: hdd
       parameters:
-        # had constant 50% misplaced objects when size=2, min_size=1
-        min_size: "2"
+        min_size: "1"
   metadataServer:
     activeCount: 1
     activeStandby: true

+ 14 - 0
rook/wait_for_ceph_repair.sh

@@ -0,0 +1,14 @@
+#!/bin/bash -ex
+
+RET=0
+while [ $RET -eq "0" ]; do
+	sleep 300
+	date
+	kubectl rook-ceph ceph status > /tmp/cephstatus
+	if [ $? -eq "0" ]; then
+		cat /tmp/cephstatus | grep -e 'objects misplaced' -e 'objects degraded'
+		RET=$?
+	fi
+done
+
+curl --fail -u ${NTFY_USER}:${NTFY_PASS} -d "no objects misplaced" ${NTFY_URL}

+ 0 - 6
shelly-plug-exporter.yaml

@@ -37,12 +37,6 @@ spec:
               name: shelly-plug-exporter
               key: password
               optional: false
-        - name: SHELLY_REQUEST_TIMEOUT
-          value: 30s
-        - name: SERVER_TIMEOUT_READ
-          value: 30s
-        - name: SERVER_TIMEOUT_WRITE
-          value: 30s
         - name: LOG_DEBUG
           value: "1"
         - name: LOG_JSON

+ 17 - 14
sonarr.yaml

@@ -69,21 +69,24 @@ spec:
     port: 8989
     targetPort: http-web-svc
 ---
-apiVersion: traefik.containo.us/v1alpha1
-kind: IngressRoute
+apiVersion: networking.k8s.io/v1
+kind: Ingress
 metadata:
   name: sonarr
   namespace: plex
+  annotations:
+    traefik.ingress.kubernetes.io/router.entrypoints: websecure
+    traefik.ingress.kubernetes.io/router.middlewares: kube-system-lanonly@kubernetescrd
 spec:
-  entryPoints:
-  - websecure
-  routes:
-  - kind: Rule
-    match: Host(`sonarr.lan.jibby.org`)
-    services:
-    - kind: Service
-      name: sonarr-service
-      port: 8989
-    middlewares:
-    - name: lanonly
-      namespace: kube-system
+  rules:
+    - host: sonarr.lan.jibby.org
+      http:
+        paths:
+          - path: /
+            pathType: Prefix
+            backend:
+              service:
+                name: sonarr-service
+                port:
+                  number: 8989
+

+ 17 - 15
tautulli.yaml

@@ -18,7 +18,7 @@ spec:
     spec:
       containers:
       - name: tautulli
-        image: linuxserver/tautulli:2.14.4
+        image: linuxserver/tautulli:latest
         ports:
         - containerPort: 8181
           name: http-web-svc
@@ -52,21 +52,23 @@ spec:
     port: 8181
     targetPort: http-web-svc
 ---
-apiVersion: traefik.containo.us/v1alpha1
-kind: IngressRoute
+apiVersion: networking.k8s.io/v1
+kind: Ingress
 metadata:
   name: tautulli
   namespace: plex
+  annotations:
+    traefik.ingress.kubernetes.io/router.entrypoints: websecure
+    traefik.ingress.kubernetes.io/router.middlewares: kube-system-lanonly@kubernetescrd
 spec:
-  entryPoints:
-  - websecure
-  routes:
-  - kind: Rule
-    match: Host(`tautulli.lan.jibby.org`)
-    services:
-    - kind: Service
-      name: tautulli-service
-      port: 8181
-    middlewares:
-    - name: lanonly
-      namespace: kube-system
+  rules:
+    - host: tautulli.lan.jibby.org
+      http:
+        paths:
+          - path: /
+            pathType: Prefix
+            backend:
+              service:
+                name: tautulli-service
+                port:
+                  number: 8181

+ 6 - 0
traefik/configmap.yaml

@@ -19,6 +19,8 @@ data:
           service: octoprint
           middlewares:
             - "lanonly"
+          entrypoints:
+            - "websecure"
       services:
         octoprint:
           loadBalancer:
@@ -32,6 +34,8 @@ data:
           service: pikvm
           middlewares:
             - "lanonly"
+          entrypoints:
+            - "websecure"
       services:
         pikvm:
           loadBalancer:
@@ -49,6 +53,8 @@ data:
           service: proxmox
           middlewares:
             - "lanonly"
+          entrypoints:
+            - "websecure"
       services:
         proxmox:
           loadBalancer:

+ 0 - 14
traefik/dashboard.yaml

@@ -1,14 +0,0 @@
-# TODO redo this as a .lan domain
-# k3s doesn't expose the traefik dashboard in a service by default
-apiVersion: v1
-kind: Service
-metadata:
-  name: traefik-dashboard
-spec:
-  ports:
-  - name: traefik
-    port: 9000
-    targetPort: 9000
-  selector:
-    app.kubernetes.io/name: traefik
-  type: ClusterIP

+ 16 - 1
traefik/helmchartconfig.yaml

@@ -27,7 +27,7 @@ spec:
       # Configuration for extra routers
       - "--providers.file.directory=/config"
 
-      - "--log.level=INFO"
+      #- "--log.level=INFO"
       # debug, uncomment for testing
       #- "--log.level=DEBUG"
       #- "--certificatesresolvers.letsencrypt.acme.caServer=https://acme-staging-v02.api.letsencrypt.org/directory"
@@ -51,6 +51,7 @@ spec:
             key: api-key
             optional: false
 
+    # TODO can this be something with ReadWriteMany?
     persistence:
       enabled: true
       storageClass: ceph-block-ssd
@@ -58,6 +59,20 @@ spec:
     metrics:
       prometheus:
         addServicesLabels: true
+    logs:
+      access:
+        enabled: true
+
+    ingressRoute:
+      dashboard:
+        enabled: false
+        #enabled: true
+        #matchRule: Host(`traefik.lan.jibby.org`)
+        # TODO auth https://doc.traefik.io/traefik/middlewares/http/basicauth/
+        #entryPoints: ["websecure"]
+        #middlewares:
+        #- name: lanonly
+        #  namespace: kube-system
 
     # Fix for acme.json file being changed to 660 from 600
     # This can manifest as the incredibly unhelpful "the router <router name> uses a non-existent resolver: <resolver name>"

+ 1 - 1
watchlistarr.yaml

@@ -30,7 +30,7 @@ spec:
           requests:
             memory: '0'
           limits:
-            memory: 500Mi
+            memory: 1Gi
       volumes:
       - name: watchlistarr-secret
         secret:

+ 36 - 20
whoami.yaml

@@ -41,27 +41,43 @@ spec:
     port: 80
     targetPort: http-web-svc
 ---
-apiVersion: traefik.containo.us/v1alpha1
-kind: IngressRoute
+apiVersion: networking.k8s.io/v1
+kind: Ingress
+metadata:
+  name: whoami-lan
+  namespace: whoami
+  annotations:
+    traefik.ingress.kubernetes.io/router.entrypoints: websecure
+    traefik.ingress.kubernetes.io/router.middlewares: kube-system-lanonly@kubernetescrd
+spec:
+  rules:
+    - host: whoami.lan.jibby.org
+      http:
+        paths:
+          - path: /
+            pathType: Prefix
+            backend:
+              service:
+                name: whoami-service
+                port:
+                  number: 80
+---
+apiVersion: networking.k8s.io/v1
+kind: Ingress
 metadata:
   name: whoami
   namespace: whoami
+  annotations:
+    traefik.ingress.kubernetes.io/router.entrypoints: websecure
 spec:
-  entryPoints:
-  - websecure
-  routes:
-  - kind: Rule
-    match: Host(`whoami.jibby.org`)
-    services:
-    - kind: Service
-      name: whoami-service
-      port: 80
-  - kind: Rule
-    match: Host(`whoami.lan.jibby.org`)
-    services:
-    - kind: Service
-      name: whoami-service
-      port: 80
-    middlewares:
-    - name: lanonly
-      namespace: kube-system
+  rules:
+    - host: whoami.jibby.org
+      http:
+        paths:
+          - path: /
+            pathType: Prefix
+            backend:
+              service:
+                name: whoami-service
+                port:
+                  number: 80