Browse Source

gpu sharing & object storage working

Josh Bicking 2 years ago
parent
commit
1440bee64a

+ 53 - 2
README.md

@@ -13,14 +13,50 @@ KUBECONFIG=/etc/rancher/k3s/k3s.yaml helm install --create-namespace --namespace
 
 ## things in the rook folder
 
-## reference
+## Sharing 1 CephFS instance between multiple PVCs
+
 https://github.com/rook/rook/blob/677d3fa47f21b07245e2e4ab6cc964eb44223c48/Documentation/Storage-Configuration/Shared-Filesystem-CephFS/filesystem-storage.md
 
+Create CephFilesystem
+Create SC backed by Filesystem & Pool
+Ensure the CSI subvolumegroup was created. If not, `ceph fs subvolumegroup create <fsname> csi`
+Create PVC without a specified PV: PV will be auto-created
+Set created PV to ReclaimPolicy: Retain
+Create a new, better-named PVC
+
 If important data is on CephBlockPool-backed PVCs, don't forget to set the PV's persistentVolumeReclaimPolicy to `Retain`.
 
 ## tolerations
 If your setup divides k8s nodes into ceph & non-ceph nodes (using a label, like `storage-node=true`), ensure labels & a toleration are set properly (`storage-node=false`, with a toleration checking for `storage-node`) so non-ceph nodes still run PV plugin Daemonsets.
 
+## CephFS w/ EC backing pool
+
+EC-backed filesystems require a regular replicated pool as a default
+
+https://lists.ceph.io/hyperkitty/list/[email protected]/thread/Y6T7OVTC4XAAWMFTK3MYGC7TB6G47OCH/
+https://tracker.ceph.com/issues/42450
+
+
+## ObjectStore
+
+If hostNetwork is enabled on the cluster, ensure rook-ceph-operator is not running with hostNetwork enable. It doesn't need host network access to orchestrate the cluster, & impedes orchestration of objectstores & associated resources.
+
+## public s3-interface bucket listing w/ HTML
+
+This is great for setting up easy public downloads.
+
+- Create a user (rook/buckets/user-josh.yaml)
+- kubectl -n rook-ceph get secret rook-ceph-object-user-ceph-objectstore-josh -o go-template='{{range $k,$v := .data}}{{printf "%s: " $k}}{{if not $v}}{{$v}}{{else}}{{$v | base64decode}}{{end}}{{"\n"}}{{end}}
+- Create bucket (rook/buckets/bucket.py::create_bucket)
+- Set policy (rook/buckets/bucket.py::set_public_read_policy)
+- Upload file
+```python
+from bucket import *
+conn = connect()
+conn.upload_file('path/to/s3-bucket-listing/index.html', 'public', 'index.html', ExtraArgs={'ContentType': 'text/html'})
+```
+
+
 # nvidia driver (on debian)
 curl -s -L https://nvidia.github.io/nvidia-container-runtime/gpgkey |   sudo apt-key add -
 distribution=$(. /etc/os-release;echo $ID$VERSION_ID)
@@ -79,7 +115,22 @@ Ensure the pods on the namespace are Running.
 
 Test GPU passthrough by applying examples/cuda-pod.yaml, then exec-ing into it & running `nvidia-smi`.
 
-Currently, 1 GPU = 1 pod can use the GPU.
+## Sharing GPU
+
+https://github.com/NVIDIA/k8s-device-plugin#shared-access-to-gpus-with-cuda-time-slicing
+
+```yaml
+version: v1
+sharing:
+  timeSlicing:
+    renameByDefault: false
+    failRequestsGreaterThanOne: false
+    resources:
+    - name: nvidia.com/gpu
+      replicas: 5
+```
+
+$ helm upgrade -i nvdp nvdp/nvidia-device-plugin ... --set-file config.map.config=nvidia-device-plugin-config.yaml
 
 # ceph client
 

+ 2 - 0
cloudflared.yaml

@@ -76,4 +76,6 @@ data:
       service: http://plex-service.plex.svc.cluster.local:32400
     - hostname: jellyfin.jibby.org
       service: http://jellyfin-service.plex.svc.cluster.local:8096
+    - hostname: s3.jibby.org
+      service: http://rook-ceph-rgw-ceph-objectstore.rook-ceph.svc.cluster.local:6980
     - service: http_status:404

+ 16 - 1
jellyfin-pvc.yaml

@@ -11,4 +11,19 @@ spec:
     - ReadWriteOnce
   resources:
     requests:
-      storage: 50Gi
+      storage: 50Gi
+---
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+  name: media2-pvc
+  namespace: plex
+spec:
+  storageClassName: media2-sc
+  volumeName: media2-static-pv
+  volumeMode: Filesystem
+  accessModes:
+    - ReadWriteMany
+  resources:
+    requests:
+      storage: 20Ti

+ 7 - 5
jellyfin.yaml

@@ -21,7 +21,7 @@ spec:
     spec:
       containers:
       - name: jellyfin
-        image: jellyfin/jellyfin:10.7.7
+        image: jellyfin/jellyfin:10.8.5
         ports:
         - containerPort: 8096
           name: http-web-svc
@@ -37,6 +37,8 @@ spec:
           value: "1000"
         - name: PGID
           value: "1000"
+        - name: NVIDIA_DRIVER_CAPABILITIES
+          value: "all"
         livenessProbe:
           httpGet:
             path: /web/index.html
@@ -45,9 +47,9 @@ spec:
           initialDelaySeconds: 10
           periodSeconds: 30
           timeoutSeconds: 10
-        #resources:
-        #  limits:
-        #    nvidia.com/gpu: 1
+        resources:
+          limits:
+            nvidia.com/gpu: 1
       affinity:
         nodeAffinity:
           requiredDuringSchedulingIgnoredDuringExecution:
@@ -63,7 +65,7 @@ spec:
           claimName: jellyfin-config-pvc
       - name: media
         persistentVolumeClaim:
-          claimName: plex-media-pvc
+          claimName: media2-pvc
       - name: tmpfs
         emptyDir:
           medium: Memory

+ 1 - 1
matrix.yaml

@@ -36,7 +36,7 @@ spec:
     spec:
       containers:
       - name: matrix
-        image: matrixdotorg/synapse:v1.55.2
+        image: matrixdotorg/synapse:v1.67.0
         ports:
         - containerPort: 8008
           name: http-web-svc

+ 8 - 0
nvidia-device-plugin-config.yaml

@@ -0,0 +1,8 @@
+version: v1
+sharing:
+  timeSlicing:
+    renameByDefault: false
+    failRequestsGreaterThanOne: false
+    resources:
+    - name: nvidia.com/gpu
+      replicas: 5

+ 7 - 65
plex-pvc.yaml

@@ -15,42 +15,13 @@ spec:
       storage: 50Gi
 ---
 apiVersion: v1
-kind: PersistentVolume
-metadata:
-  name: plex-media-metadata-static-pv
-spec:
-  accessModes:
-  - ReadWriteMany
-  capacity:
-    storage: 20Ti
-  csi:
-    controllerExpandSecretRef:
-      name: rook-csi-cephfs-provisioner
-      namespace: rook-ceph
-    driver: rook-ceph.cephfs.csi.ceph.com
-    nodeStageSecretRef:
-      name: rook-csi-cephfs-node
-      namespace: rook-ceph
-    volumeAttributes:
-      clusterID: rook-ceph
-      fsName: media
-      pool: media-data0
-      storage.kubernetes.io/csiProvisionerIdentity: 1657147448506-8081-rook-ceph.cephfs.csi.ceph.com
-      subvolumeName: csi-vol-9b2f40f9-0613-11ed-8662-4a986e7745e3
-      subvolumePath: /volumes/csi/csi-vol-9b2f40f9-0613-11ed-8662-4a986e7745e3/07b0a3bf-e458-4442-90df-f70aaa971da6
-    volumeHandle: 0001-0009-rook-ceph-0000000000000002-9b2f40f9-0613-11ed-8662-4a986e7745e3
-  persistentVolumeReclaimPolicy: Retain
-  storageClassName: media-sc
-  volumeMode: Filesystem
----
-apiVersion: v1
 kind: PersistentVolumeClaim
 metadata:
-  name: plex-media-metadata-pvc
+  name: plex-metadata-pvc
   namespace: plex
 spec:
-  storageClassName: media-sc
-  volumeName: plex-media-metadata-static-pv
+  storageClassName: plex-metadata-sc
+  volumeName: plex-metadata-static-pv
   volumeMode: Filesystem
   accessModes:
     - ReadWriteMany
@@ -59,45 +30,16 @@ spec:
       storage: 20Ti
 ---
 apiVersion: v1
-kind: PersistentVolume
-metadata:
-  name: plex-media-static-pv
-spec:
-  accessModes:
-  - ReadWriteMany
-  capacity:
-    storage: 20Ti
-  csi:
-    controllerExpandSecretRef:
-      name: rook-csi-cephfs-provisioner
-      namespace: rook-ceph
-    driver: rook-ceph.cephfs.csi.ceph.com
-    nodeStageSecretRef:
-      name: rook-csi-cephfs-node
-      namespace: rook-ceph
-    volumeAttributes:
-      clusterID: rook-ceph
-      fsName: media
-      pool: media-data0
-      storage.kubernetes.io/csiProvisionerIdentity: 1657147447431-8081-rook-ceph.cephfs.csi.ceph.com
-      subvolumeName: csi-vol-474d5ba4-fe4f-11ec-9369-b20c27405718
-      subvolumePath: /volumes/csi/csi-vol-474d5ba4-fe4f-11ec-9369-b20c27405718/ed910e5e-ebd1-40b5-9b58-464534002120
-    volumeHandle: 0001-0009-rook-ceph-0000000000000002-474d5ba4-fe4f-11ec-9369-b20c27405718
-  persistentVolumeReclaimPolicy: Retain
-  storageClassName: media-sc
-  volumeMode: Filesystem
----
-apiVersion: v1
 kind: PersistentVolumeClaim
 metadata:
-  name: plex-media-pvc
+  name: plex-pvc
   namespace: plex
 spec:
-  storageClassName: media-sc
-  volumeName: plex-media-static-pv
+  storageClassName: plex-sc
+  volumeName: plex-static-pv
   volumeMode: Filesystem
   accessModes:
-    - ReadWriteMany
+    - ReadOnlyMany
   resources:
     requests:
       storage: 20Ti

+ 4 - 2
plex.yaml

@@ -39,6 +39,8 @@ spec:
           value: "1000"
         - name: PGID
           value: "1000"
+        - name: NVIDIA_DRIVER_CAPABILITIES
+          value: "all"
         livenessProbe:
           httpGet:
             path: /web/index.html
@@ -56,10 +58,10 @@ spec:
           claimName: plex-config-pvc
       - name: media-metadata
         persistentVolumeClaim:
-          claimName: plex-media-metadata-pvc
+          claimName: plex-metadata-pvc
       - name: media
         persistentVolumeClaim:
-          claimName: plex-media-pvc
+          claimName: plex-pvc
       - name: tmpfs
         emptyDir:
           medium: Memory

+ 37 - 0
rook/buckets/bucket.py

@@ -0,0 +1,37 @@
+import boto3
+import json
+import os
+
+def connect():
+    access_key = os.environ["AWS_ACCESS_KEY_ID"]
+    secret_key = os.environ["AWS_SECRET_ACCESS_KEY"]
+
+    return boto3.client('s3', 'us-east-1',
+                        endpoint_url="https://s3.jibby.org",
+                        aws_access_key_id = access_key,
+                        aws_secret_access_key = secret_key)
+
+def create_bucket(bucket_name):
+    conn = connect()
+    conn.create_bucket(Bucket=bucket_name)
+
+def set_public_read_policy(bucket_name):
+    bucket_policy = {
+    "Version":"2012-10-17",
+    "Statement":[
+        {
+        "Sid":"AddPerm",
+        "Effect":"Allow",
+        "Principal": "*",
+        "Action":["s3:GetObject", "s3:ListBucket"],
+        "Resource":[
+            "arn:aws:s3:::{0}/*".format(bucket_name),
+            "arn:aws:s3:::{0}".format(bucket_name),
+            ]
+        }
+    ]
+    }
+
+    bucket_policy = json.dumps(bucket_policy)
+    conn = connect()
+    conn.put_bucket_policy(Bucket=bucket_name, Policy=bucket_policy)

+ 15 - 0
rook/buckets/ingress.yaml

@@ -0,0 +1,15 @@
+apiVersion: traefik.containo.us/v1alpha1
+kind: IngressRoute
+metadata:
+  name: s3
+  namespace: rook-ceph
+spec:
+  entryPoints:
+  - websecure
+  routes:
+  - kind: Rule
+    match: Host(`s3.jibby.org`)
+    services:
+    - kind: Service
+      name: rook-ceph-rgw-ceph-objectstore
+      port: 6980

+ 24 - 0
rook/buckets/s3-bucket-listing.html

@@ -0,0 +1,24 @@
+<!DOCTYPE html>
+<html>
+<head>
+  <title>S3 Bucket Listing Generator</title>
+</head>
+<body>
+  <div id="navigation"></div>
+  <div id="listing"></div>
+
+<script type="text/javascript" src="https://ajax.googleapis.com/ajax/libs/jquery/3.1.1/jquery.min.js"></script>
+<script type="text/javascript">
+  var S3BL_IGNORE_PATH = true;
+  //var BUCKET_NAME = 'public';
+  var BUCKET_URL = 'https://s3.jibby.org/public';
+  //var S3B_ROOT_DIR = 'public';
+  // var S3B_SORT = 'DEFAULT';
+  var EXCLUDE_FILE = 'index.html';  // change to array to exclude multiple files, regexp also supported e.g. /^(.*\/)?index.html$/ to exclude all index.html
+  // var AUTO_TITLE = true;
+  // var S3_REGION = 's3'; // for us-east-1
+</script>
+<script type="text/javascript" src="https://rufuspollock.github.io/s3-bucket-listing/list.js"></script>
+
+</body>
+</html>

+ 15 - 0
rook/buckets/user-josh.yaml

@@ -0,0 +1,15 @@
+apiVersion: ceph.rook.io/v1
+kind: CephObjectStoreUser
+metadata:
+  name: josh
+  namespace: rook-ceph
+spec:
+  store: ceph-objectstore
+  displayName: Josh
+  quotas:
+    maxBuckets: 1
+    maxSize: 10G
+    maxObjects: 10000
+  capabilities:
+    user: "*"
+    bucket: "*"

+ 25 - 0
rook/ceph-object-store.yaml

@@ -0,0 +1,25 @@
+apiVersion: ceph.rook.io/v1
+kind: CephObjectStore
+metadata:
+  name: ceph-objectstore
+  namespace: rook-ceph
+spec:
+  metadataPool:
+    failureDomain: host
+    replicated:
+      size: 3
+  dataPool:
+    failureDomain: host
+    erasureCoded:
+      dataChunks: 2
+      codingChunks: 1
+  preservePoolsOnDelete: true
+  gateway:
+    sslCertificateRef:
+    port: 6980
+    # securePort: 443
+    instances: 1
+  healthCheck:
+    bucket:
+      disabled: false
+      interval: 60s

+ 0 - 28
rook/media/media-static-pv.yaml

@@ -1,28 +0,0 @@
-apiVersion: v1
-kind: PersistentVolume
-metadata:
-  name: media-static-pv
-spec:
-  accessModes:
-  - ReadWriteMany
-  capacity:
-    storage: 20Ti
-  csi:
-    controllerExpandSecretRef:
-      name: rook-csi-cephfs-provisioner
-      namespace: rook-ceph
-    driver: rook-ceph.cephfs.csi.ceph.com
-    nodeStageSecretRef:
-      name: rook-csi-cephfs-node
-      namespace: rook-ceph
-    volumeAttributes:
-      clusterID: rook-ceph
-      fsName: media
-      pool: media-data0
-      storage.kubernetes.io/csiProvisionerIdentity: 1657147447431-8081-rook-ceph.cephfs.csi.ceph.com
-      subvolumeName: csi-vol-474d5ba4-fe4f-11ec-9369-b20c27405718
-      subvolumePath: /volumes/csi/csi-vol-474d5ba4-fe4f-11ec-9369-b20c27405718/ed910e5e-ebd1-40b5-9b58-464534002120
-    volumeHandle: 0001-0009-rook-ceph-0000000000000002-474d5ba4-fe4f-11ec-9369-b20c27405718
-  persistentVolumeReclaimPolicy: Retain
-  storageClassName: media-sc
-  volumeMode: Filesystem

+ 0 - 13
rook/media/plex-media-metadata/plex-media-metadata-base-pvc.yaml

@@ -1,13 +0,0 @@
-kind: PersistentVolumeClaim
-apiVersion: v1
-metadata:
-  name: plexmd-base-pvc
-  namespace: kube-system
-spec:
-  accessModes:
-    - ReadWriteMany
-  resources:
-    requests:
-      storage: 20Ti
-  storageClassName: media-sc
-  volumeMode: Filesystem

+ 0 - 28
rook/media/plex-media-metadata/plex-media-metadata-static-pv.yaml

@@ -1,28 +0,0 @@
-apiVersion: v1
-kind: PersistentVolume
-metadata:
-  name: plex-media-metadata-static-pv
-spec:
-  accessModes:
-  - ReadWriteMany
-  capacity:
-    storage: 20Ti
-  csi:
-    controllerExpandSecretRef:
-      name: rook-csi-cephfs-provisioner
-      namespace: rook-ceph
-    driver: rook-ceph.cephfs.csi.ceph.com
-    nodeStageSecretRef:
-      name: rook-csi-cephfs-node
-      namespace: rook-ceph
-    volumeAttributes:
-      clusterID: rook-ceph
-      fsName: media
-      pool: media-data0
-      storage.kubernetes.io/csiProvisionerIdentity: 1657147448506-8081-rook-ceph.cephfs.csi.ceph.com
-      subvolumeName: csi-vol-9b2f40f9-0613-11ed-8662-4a986e7745e3
-      subvolumePath: /volumes/csi/csi-vol-9b2f40f9-0613-11ed-8662-4a986e7745e3/07b0a3bf-e458-4442-90df-f70aaa971da6
-    volumeHandle: 0001-0009-rook-ceph-0000000000000002-9b2f40f9-0613-11ed-8662-4a986e7745e3
-  persistentVolumeReclaimPolicy: Retain
-  storageClassName: media-sc
-  volumeMode: Filesystem

+ 45 - 0
rook/media2/media2-filesystem.yaml

@@ -0,0 +1,45 @@
+apiVersion: ceph.rook.io/v1
+kind: CephFilesystem
+metadata:
+  name: media2
+  namespace: rook-ceph
+spec:
+  metadataPool:
+    replicated:
+      size: 3
+      deviceClass: ssd
+  dataPools:
+    - name: default
+      replicated:
+        size: 3
+      deviceClass: hdd
+    - name: erasurecoded
+      erasureCoded:
+        dataChunks: 2
+        codingChunks: 1
+      deviceClass: hdd
+  preserveFilesystemOnDelete: true
+  metadataServer:
+    activeCount: 1
+    activeStandby: true
+    placement:
+      nodeAffinity:
+        requiredDuringSchedulingIgnoredDuringExecution:
+          nodeSelectorTerms:
+          - matchExpressions:
+            - key: storage-node
+              operator: In
+              values:
+              - "true"
+      tolerations:
+      - key: storage-node
+        operator: Exists
+    priorityClassName: system-cluster-critical
+    # 4GiB is recommended
+    resources:
+      limits:
+        cpu: "300m"
+        memory: 1Gi
+      requests:
+        cpu: "100m"
+        memory: 500Mi

+ 17 - 0
rook/media2/media2-sc.yaml

@@ -0,0 +1,17 @@
+apiVersion: storage.k8s.io/v1
+kind: StorageClass
+metadata:
+  name: media2-sc
+parameters:
+  clusterID: rook-ceph
+  fsName: media2
+  pool: media2-erasurecoded
+  csi.storage.k8s.io/controller-expand-secret-name: rook-csi-cephfs-provisioner
+  csi.storage.k8s.io/controller-expand-secret-namespace: rook-ceph
+  csi.storage.k8s.io/node-stage-secret-name: rook-csi-cephfs-node
+  csi.storage.k8s.io/node-stage-secret-namespace: rook-ceph
+  csi.storage.k8s.io/provisioner-secret-name: rook-csi-cephfs-provisioner
+  csi.storage.k8s.io/provisioner-secret-namespace: rook-ceph
+provisioner: rook-ceph.cephfs.csi.ceph.com
+reclaimPolicy: Delete
+allowVolumeExpansion: true

+ 28 - 0
rook/media2/media2-static-pv.yaml

@@ -0,0 +1,28 @@
+apiVersion: v1
+kind: PersistentVolume
+metadata:
+  name: media2-static-pv
+spec:
+  accessModes:
+  - ReadWriteMany
+  capacity:
+    storage: 20Ti
+  csi:
+    controllerExpandSecretRef:
+      name: rook-csi-cephfs-provisioner
+      namespace: rook-ceph
+    driver: rook-ceph.cephfs.csi.ceph.com
+    nodeStageSecretRef:
+      name: rook-csi-cephfs-node
+      namespace: rook-ceph
+    volumeAttributes:
+      clusterID: rook-ceph
+      fsName: media2
+      pool: media2-erasurecoded
+      storage.kubernetes.io/csiProvisionerIdentity: 1662922724498-8081-rook-ceph.cephfs.csi.ceph.com
+      subvolumeName: csi-vol-14a8ab27-3495-11ed-8f63-2e9244479848
+      subvolumePath: /volumes/csi/csi-vol-14a8ab27-3495-11ed-8f63-2e9244479848/7d1cbe20-dcdd-4f4f-b6b9-2424d6a2a9a2
+    volumeHandle: 0001-0009-rook-ceph-0000000000000005-14a8ab27-3495-11ed-8f63-2e9244479848
+  persistentVolumeReclaimPolicy: Retain
+  storageClassName: media2-sc
+  volumeMode: Filesystem

+ 17 - 7
rook/media/media-filesystem.yaml → rook/plex/plex-filesystem.yaml

@@ -1,15 +1,24 @@
 apiVersion: ceph.rook.io/v1
 kind: CephFilesystem
 metadata:
-  name: media
+  name: plex
   namespace: rook-ceph
 spec:
   metadataPool:
     replicated:
       size: 3
+      deviceClass: ssd
   dataPools:
-    - replicated:
-        size: 1
+    - name: default
+      replicated:
+        size: 3
+      deviceClass: hdd
+    - name: erasurecoded
+      erasureCoded:
+        dataChunks: 2
+        codingChunks: 1
+      deviceClass: hdd
+  preserveFilesystemOnDelete: true
   metadataServer:
     activeCount: 1
     activeStandby: true
@@ -26,10 +35,11 @@ spec:
       - key: storage-node
         operator: Exists
     priorityClassName: system-cluster-critical
+    # 4GiB is recommended
     resources:
       limits:
-        cpu: "2"
-        memory: 4Gi
+        cpu: "300m"
+        memory: 1Gi
       requests:
-        cpu: "1"
-        memory: 4Gi
+        cpu: "100m"
+        memory: 500Mi

+ 17 - 0
rook/plex/plex-metadata-sc.yaml

@@ -0,0 +1,17 @@
+apiVersion: storage.k8s.io/v1
+kind: StorageClass
+metadata:
+  name: plex-metadata-sc
+parameters:
+  clusterID: rook-ceph
+  fsName: plex
+  pool: plex-erasurecoded
+  csi.storage.k8s.io/controller-expand-secret-name: rook-csi-cephfs-provisioner
+  csi.storage.k8s.io/controller-expand-secret-namespace: rook-ceph
+  csi.storage.k8s.io/node-stage-secret-name: rook-csi-cephfs-node
+  csi.storage.k8s.io/node-stage-secret-namespace: rook-ceph
+  csi.storage.k8s.io/provisioner-secret-name: rook-csi-cephfs-provisioner
+  csi.storage.k8s.io/provisioner-secret-namespace: rook-ceph
+provisioner: rook-ceph.cephfs.csi.ceph.com
+reclaimPolicy: Delete
+allowVolumeExpansion: true

+ 30 - 0
rook/plex/plex-metadata-static-pv.yaml

@@ -0,0 +1,30 @@
+apiVersion: v1
+kind: PersistentVolume
+metadata:
+  annotations:
+    pv.kubernetes.io/provisioned-by: rook-ceph.cephfs.csi.ceph.com
+  name: plex-metadata-static-pv
+spec:
+  accessModes:
+  - ReadWriteMany
+  capacity:
+    storage: 20Ti
+  csi:
+    controllerExpandSecretRef:
+      name: rook-csi-cephfs-provisioner
+      namespace: rook-ceph
+    driver: rook-ceph.cephfs.csi.ceph.com
+    nodeStageSecretRef:
+      name: rook-csi-cephfs-node
+      namespace: rook-ceph
+    volumeAttributes:
+      clusterID: rook-ceph
+      fsName: plex
+      pool: plex-erasurecoded
+      storage.kubernetes.io/csiProvisionerIdentity: 1662922724498-8081-rook-ceph.cephfs.csi.ceph.com
+      subvolumeName: csi-vol-a24996ad-3789-11ed-8f63-2e9244479848
+      subvolumePath: /volumes/csi/csi-vol-a24996ad-3789-11ed-8f63-2e9244479848/4bf9ff78-453e-48aa-99d9-a06ffc94d7ff
+    volumeHandle: 0001-0009-rook-ceph-0000000000000006-a24996ad-3789-11ed-8f63-2e9244479848
+  persistentVolumeReclaimPolicy: Retain
+  storageClassName: plex-metadata-sc
+  volumeMode: Filesystem

+ 3 - 3
rook/media/media-sc.yaml → rook/plex/plex-sc.yaml

@@ -1,11 +1,11 @@
 apiVersion: storage.k8s.io/v1
 kind: StorageClass
 metadata:
-  name: media-sc
+  name: plex-sc
 parameters:
   clusterID: rook-ceph
-  fsName: media
-  pool: media-data0
+  fsName: plex
+  pool: plex-erasurecoded
   csi.storage.k8s.io/controller-expand-secret-name: rook-csi-cephfs-provisioner
   csi.storage.k8s.io/controller-expand-secret-namespace: rook-ceph
   csi.storage.k8s.io/node-stage-secret-name: rook-csi-cephfs-node

+ 30 - 0
rook/plex/plex-static-pv.yaml

@@ -0,0 +1,30 @@
+apiVersion: v1
+kind: PersistentVolume
+metadata:
+  annotations:
+    pv.kubernetes.io/provisioned-by: rook-ceph.cephfs.csi.ceph.com
+  name: plex-static-pv
+spec:
+  accessModes:
+  - ReadWriteMany
+  capacity:
+    storage: 20Ti
+  csi:
+    controllerExpandSecretRef:
+      name: rook-csi-cephfs-provisioner
+      namespace: rook-ceph
+    driver: rook-ceph.cephfs.csi.ceph.com
+    nodeStageSecretRef:
+      name: rook-csi-cephfs-node
+      namespace: rook-ceph
+    volumeAttributes:
+      clusterID: rook-ceph
+      fsName: plex
+      pool: plex-erasurecoded
+      storage.kubernetes.io/csiProvisionerIdentity: 1662922724498-8081-rook-ceph.cephfs.csi.ceph.com
+      subvolumeName: csi-vol-75be7e58-3548-11ed-8f63-2e9244479848
+      subvolumePath: /volumes/csi/csi-vol-75be7e58-3548-11ed-8f63-2e9244479848/e9eef038-dd57-4e89-94ea-08011a6b6e74
+    volumeHandle: 0001-0009-rook-ceph-0000000000000006-75be7e58-3548-11ed-8f63-2e9244479848
+  persistentVolumeReclaimPolicy: Retain
+  storageClassName: plex-sc
+  volumeMode: Filesystem

+ 2 - 2
rook/rook-ceph-operator-values.yaml

@@ -104,7 +104,7 @@ csi:
 
   # Set logging level for csi containers.
   # Supported values from 0 to 5. 0 for general useful logs, 5 for trace level verbosity.
-  #logLevel: 0
+  logLevel: 5
   # CSI RBD plugin daemonset update strategy, supported values are OnDelete and RollingUpdate.
   # Default value is RollingUpdate.
   #rbdPluginUpdateStrategy: OnDelete
@@ -385,7 +385,7 @@ enableDiscoveryDaemon: false
 cephCommandsTimeoutSeconds: "15"
 
 ## if true, run rook operator on the host network
-useOperatorHostNetwork: true
+useOperatorHostNetwork: false
 
 ## Rook Discover configuration
 ## toleration: NoSchedule, PreferNoSchedule or NoExecute