Bladeren bron

add mastodon and sonarr

Josh Bicking 2 jaren geleden
bovenliggende
commit
17b77eed0c
8 gewijzigde bestanden met toevoegingen van 612 en 3 verwijderingen
  1. 14 0
      blog.yaml
  2. 4 1
      cloudflared.yaml
  3. 364 0
      elasticsearch/values.yaml
  4. 1 1
      examples/secret.yaml
  5. 143 0
      mastodon.yaml
  6. 1 1
      plex-pvc.yaml
  7. 14 0
      sonarr-pvc.yaml
  8. 71 0
      sonarr.yaml

+ 14 - 0
blog.yaml

@@ -77,3 +77,17 @@ spec:
     - kind: Service
       name: jekyll-service
       port: 80
+    middlewares:
+    - name: well-known-redirect
+# HACK: for mastodon
+---
+apiVersion: traefik.containo.us/v1alpha1
+kind: Middleware
+metadata:
+  name: well-known-redirect
+  namespace: blog
+spec:
+  redirectRegex:
+    regex: ^https://jibby.org/.well-known/webfinger(.*)
+    replacement: https://mastodon.jibby.org/.well-known/webfinger${1}
+    permanent: true

+ 4 - 1
cloudflared.yaml

@@ -82,8 +82,11 @@ data:
       service: http://miniflux-service.miniflux.svc.cluster.local:8080
     - hostname: vaultwarden.jibby.org
       service: http://vaultwarden-service.vaultwarden.svc.cluster.local:80
-    # Rules can match the request's path to a regular expression:
     - hostname: vaultwarden.jibby.org
       path: /notifications/hub.*
       service: http://vaultwarden-service.vaultwarden.svc.cluster.local:3012
+    - hostname: mastodon.jibby.org
+      service: http://mastodon-service.mastodon.svc.cluster.local:3000
+    - hostname: streaming-mastodon.jibby.org
+      service: http://mastodon-service.mastodon.svc.cluster.local:4000
     - service: http_status:404

+ 364 - 0
elasticsearch/values.yaml

@@ -0,0 +1,364 @@
+# helm repo add elastic https://helm.elastic.co
+# helm upgrade --install elasticsearch elastic/elasticsearch -n elasticsearch -f ~/server/elasticsearch/values.yaml --version 8.5.1
+
+---
+clusterName: "elasticsearch"
+nodeGroup: "master"
+
+# The service that non master groups will try to connect to when joining the cluster
+# This should be set to clusterName + "-" + nodeGroup for your master group
+masterService: ""
+
+# Elasticsearch roles that will be applied to this nodeGroup
+# These will be set as environment variables. E.g. node.roles=master
+# https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-node.html#node-roles
+roles:
+  - master
+  - data
+  - data_content
+  - data_hot
+  - data_warm
+  - data_cold
+  - ingest
+  - ml
+  - remote_cluster_client
+  - transform
+
+replicas: 3
+minimumMasterNodes: 2
+
+esMajorVersion: ""
+
+# Allows you to add any config files in /usr/share/elasticsearch/config/
+# such as elasticsearch.yml and log4j2.properties
+esConfig:
+ # SSL is disabled: internal-only cluster, & self-signed certs in mastodon are a pain
+ elasticsearch.yml: |
+    xpack.security.enabled: false
+    xpack.security.http.ssl.enabled: false
+    xpack.security.transport.ssl.enabled: false
+#    key:
+#      nestedkey: value
+#  log4j2.properties: |
+#    key = value
+
+createCert: false
+
+esJvmOptions: {}
+#  processors.options: |
+#    -XX:ActiveProcessorCount=3
+
+# Extra environment variables to append to this nodeGroup
+# This will be appended to the current 'env:' key. You can use any of the kubernetes env
+# syntax here
+extraEnvs: []
+#  - name: MY_ENVIRONMENT_VAR
+#    value: the_value_goes_here
+
+# Allows you to load environment variables from kubernetes secret or config map
+envFrom: []
+# - secretRef:
+#     name: env-secret
+# - configMapRef:
+#     name: config-map
+
+# Disable it to use your own elastic-credential Secret.
+secret:
+  enabled: true
+  password: "" # generated randomly if not defined
+
+# A list of secrets and their paths to mount inside the pod
+# This is useful for mounting certificates for security and for mounting
+# the X-Pack license
+secretMounts: []
+#  - name: elastic-certificates
+#    secretName: elastic-certificates
+#    path: /usr/share/elasticsearch/config/certs
+#    defaultMode: 0755
+
+hostAliases: []
+#- ip: "127.0.0.1"
+#  hostnames:
+#  - "foo.local"
+#  - "bar.local"
+
+image: "docker.elastic.co/elasticsearch/elasticsearch"
+imageTag: "8.5.1"
+imagePullPolicy: "IfNotPresent"
+
+podAnnotations: {}
+# iam.amazonaws.com/role: es-cluster
+
+# additionals labels
+labels: {}
+
+esJavaOpts: "" # example: "-Xmx1g -Xms1g"
+
+resources:
+  requests:
+    cpu: "1000m"
+    memory: "2Gi"
+  limits:
+    cpu: "1000m"
+    memory: "2Gi"
+
+initResources: {}
+# limits:
+#   cpu: "25m"
+#   # memory: "128Mi"
+# requests:
+#   cpu: "25m"
+#   memory: "128Mi"
+
+networkHost: "0.0.0.0"
+
+volumeClaimTemplate:
+  accessModes: ["ReadWriteOnce"]
+  resources:
+    requests:
+      storage: 30Gi
+  storageClassName: "ceph-block"
+
+rbac:
+  create: false
+  serviceAccountAnnotations: {}
+  serviceAccountName: ""
+  automountToken: true
+
+podSecurityPolicy:
+  create: false
+  name: ""
+  spec:
+    privileged: true
+    fsGroup:
+      rule: RunAsAny
+    runAsUser:
+      rule: RunAsAny
+    seLinux:
+      rule: RunAsAny
+    supplementalGroups:
+      rule: RunAsAny
+    volumes:
+      - secret
+      - configMap
+      - persistentVolumeClaim
+      - emptyDir
+
+persistence:
+  enabled: true
+  labels:
+    # Add default labels for the volumeClaimTemplate of the StatefulSet
+    enabled: false
+  annotations: {}
+
+extraVolumes: []
+# - name: extras
+#   emptyDir: {}
+
+extraVolumeMounts: []
+# - name: extras
+#   mountPath: /usr/share/extras
+#   readOnly: true
+
+extraContainers: []
+# - name: do-something
+#   image: busybox
+#   command: ['do', 'something']
+
+extraInitContainers: []
+# - name: do-something
+#   image: busybox
+#   command: ['do', 'something']
+
+# This is the PriorityClass settings as defined in
+# https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass
+priorityClassName: ""
+
+# By default this will make sure two pods don't end up on the same node
+# Changing this to a region would allow you to spread pods across regions
+antiAffinityTopologyKey: "kubernetes.io/hostname"
+
+# Hard means that by default pods will only be scheduled if there are enough nodes for them
+# and that they will never end up on the same node. Setting this to soft will do this "best effort"
+antiAffinity: "hard"
+
+# This is the node affinity settings as defined in
+# https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#node-affinity-beta-feature
+nodeAffinity: {}
+
+# The default is to deploy all pods serially. By setting this to parallel all pods are started at
+# the same time when bootstrapping the cluster
+podManagementPolicy: "Parallel"
+
+# The environment variables injected by service links are not used, but can lead to slow Elasticsearch boot times when
+# there are many services in the current namespace.
+# If you experience slow pod startups you probably want to set this to `false`.
+enableServiceLinks: true
+
+protocol: http
+httpPort: 9200
+transportPort: 9300
+
+service:
+  enabled: true
+  labels: {}
+  labelsHeadless: {}
+  type: ClusterIP
+  # Consider that all endpoints are considered "ready" even if the Pods themselves are not
+  # https://kubernetes.io/docs/reference/kubernetes-api/service-resources/service-v1/#ServiceSpec
+  publishNotReadyAddresses: false
+  nodePort: ""
+  annotations: {}
+  httpPortName: http
+  transportPortName: transport
+  loadBalancerIP: ""
+  loadBalancerSourceRanges: []
+  externalTrafficPolicy: ""
+
+updateStrategy: RollingUpdate
+
+# This is the max unavailable setting for the pod disruption budget
+# The default value of 1 will make sure that kubernetes won't allow more than 1
+# of your pods to be unavailable during maintenance
+maxUnavailable: 1
+
+podSecurityContext:
+  fsGroup: 1000
+  runAsUser: 1000
+
+securityContext:
+  capabilities:
+    drop:
+      - ALL
+  # readOnlyRootFilesystem: true
+  runAsNonRoot: true
+  runAsUser: 1000
+
+# How long to wait for elasticsearch to stop gracefully
+terminationGracePeriod: 120
+
+sysctlVmMaxMapCount: 262144
+
+readinessProbe:
+  failureThreshold: 3
+  initialDelaySeconds: 10
+  periodSeconds: 10
+  successThreshold: 3
+  timeoutSeconds: 5
+
+# https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-health.html#request-params wait_for_status
+clusterHealthCheckParams: "wait_for_status=green&timeout=1s"
+
+## Use an alternate scheduler.
+## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
+##
+schedulerName: ""
+
+imagePullSecrets: []
+nodeSelector: {}
+tolerations: []
+
+# Enabling this will publicly expose your Elasticsearch instance.
+# Only enable this if you have security enabled on your cluster
+ingress:
+  enabled: false
+  annotations: {}
+  # kubernetes.io/ingress.class: nginx
+  # kubernetes.io/tls-acme: "true"
+  className: "nginx"
+  pathtype: ImplementationSpecific
+  hosts:
+    - host: chart-example.local
+      paths:
+        - path: /
+  tls: []
+  #  - secretName: chart-example-tls
+  #    hosts:
+  #      - chart-example.local
+
+nameOverride: ""
+fullnameOverride: ""
+healthNameOverride: ""
+
+lifecycle: {}
+# preStop:
+#   exec:
+#     command: ["/bin/sh", "-c", "echo Hello from the postStart handler > /usr/share/message"]
+# postStart:
+#   exec:
+#     command:
+#       - bash
+#       - -c
+#       - |
+#         #!/bin/bash
+#         # Add a template to adjust number of shards/replicas
+#         TEMPLATE_NAME=my_template
+#         INDEX_PATTERN="logstash-*"
+#         SHARD_COUNT=8
+#         REPLICA_COUNT=1
+#         ES_URL=http://localhost:9200
+#         while [[ "$(curl -s -o /dev/null -w '%{http_code}\n' $ES_URL)" != "200" ]]; do sleep 1; done
+#         curl -XPUT "$ES_URL/_template/$TEMPLATE_NAME" -H 'Content-Type: application/json' -d'{"index_patterns":['\""$INDEX_PATTERN"\"'],"settings":{"number_of_shards":'$SHARD_COUNT',"number_of_replicas":'$REPLICA_COUNT'}}'
+
+sysctlInitContainer:
+  enabled: true
+
+keystore: []
+
+networkPolicy:
+  ## Enable creation of NetworkPolicy resources. Only Ingress traffic is filtered for now.
+  ## In order for a Pod to access Elasticsearch, it needs to have the following label:
+  ## {{ template "uname" . }}-client: "true"
+  ## Example for default configuration to access HTTP port:
+  ## elasticsearch-master-http-client: "true"
+  ## Example for default configuration to access transport port:
+  ## elasticsearch-master-transport-client: "true"
+
+  http:
+    enabled: false
+    ## if explicitNamespacesSelector is not set or set to {}, only client Pods being in the networkPolicy's namespace
+    ## and matching all criteria can reach the DB.
+    ## But sometimes, we want the Pods to be accessible to clients from other namespaces, in this case, we can use this
+    ## parameter to select these namespaces
+    ##
+    # explicitNamespacesSelector:
+    #   # Accept from namespaces with all those different rules (only from whitelisted Pods)
+    #   matchLabels:
+    #     role: frontend
+    #   matchExpressions:
+    #     - {key: role, operator: In, values: [frontend]}
+
+    ## Additional NetworkPolicy Ingress "from" rules to set. Note that all rules are OR-ed.
+    ##
+    # additionalRules:
+    #   - podSelector:
+    #       matchLabels:
+    #         role: frontend
+    #   - podSelector:
+    #       matchExpressions:
+    #         - key: role
+    #           operator: In
+    #           values:
+    #             - frontend
+
+  transport:
+    ## Note that all Elasticsearch Pods can talk to themselves using transport port even if enabled.
+    enabled: false
+    # explicitNamespacesSelector:
+    #   matchLabels:
+    #     role: frontend
+    #   matchExpressions:
+    #     - {key: role, operator: In, values: [frontend]}
+    # additionalRules:
+    #   - podSelector:
+    #       matchLabels:
+    #         role: frontend
+    #   - podSelector:
+    #       matchExpressions:
+    #         - key: role
+    #           operator: In
+    #           values:
+    #             - frontend
+
+tests:
+  enabled: true

+ 1 - 1
examples/secret.yaml

@@ -5,4 +5,4 @@ metadata:
   namespace: somenamespace
 type: Opaque
 data:
-  SOME_PASSWORD: MTIzNDU=  # echo -n "12345" | base64
+  SOME_PASSWORD: MTIzNDU=  # echo -n "12345" | base64 -w 0

+ 143 - 0
mastodon.yaml

@@ -0,0 +1,143 @@
+---
+apiVersion: v1
+kind: Namespace
+metadata:
+    name: mastodon
+---
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+  name: mastodon-web-pvc
+  namespace: mastodon
+  labels:
+    app: mastodon
+spec:
+  storageClassName: ceph-block
+  accessModes:
+    - ReadWriteOnce
+  resources:
+    requests:
+      storage: 5Gi
+---
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+  name: mastodon-sidekiq-pvc
+  namespace: mastodon
+  labels:
+    app: mastodon
+spec:
+  storageClassName: ceph-block
+  accessModes:
+    - ReadWriteOnce
+  resources:
+    requests:
+      storage: 5Gi
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: mastodon
+  namespace: mastodon
+spec:
+  selector:
+    matchLabels:
+      app: mastodon
+  replicas: 1
+  template:
+    metadata:
+      labels:
+        app: mastodon
+    spec:
+      containers:
+      - name: web
+        image: tootsuite/mastodon:v4.0.2
+        command: ["bash", "-c", "rm -f /mastodon/tmp/pids/server.pid; bundle exec rails s -p 3000"]
+        ports:
+        - containerPort: 3000
+          name: http-web-svc
+        envFrom:
+          - secretRef:
+              name: mastodon-secret
+        volumeMounts:
+        - mountPath: "/mastodon/public/system"
+          name: system
+        livenessProbe:
+          httpGet:
+            path: /health
+            port: 3000
+          failureThreshold: 10
+          initialDelaySeconds: 30
+          periodSeconds: 30
+          timeoutSeconds: 10
+      - name: streaming
+        image: tootsuite/mastodon:v4.0.2
+        command: ["node", "./streaming"]
+        ports:
+        - containerPort: 4000
+          name: http-stream-svc
+        envFrom:
+          - secretRef:
+              name: mastodon-secret
+        livenessProbe:
+          httpGet:
+            path: /api/v1/streaming/health
+            port: 4000
+          failureThreshold: 10
+          initialDelaySeconds: 30
+          periodSeconds: 30
+          timeoutSeconds: 10
+      - name: sidekiq
+        image: tootsuite/mastodon:v4.0.2
+        command: ["bundle", "exec", "sidekiq"]
+        envFrom:
+          - secretRef:
+              name: mastodon-secret
+        volumeMounts:
+        - mountPath: "/mastodon/public/system"
+          name: system
+      volumes:
+      - name: system
+        persistentVolumeClaim:
+          claimName: mastodon-web-pvc
+---
+apiVersion: v1
+kind: Service
+metadata:
+  name: mastodon-service
+  namespace: mastodon
+spec:
+  selector:
+    app: mastodon
+  type: ClusterIP
+  ports:
+  - name: mast-web-port
+    protocol: TCP
+    port: 3000
+    targetPort: http-web-svc
+  - name: mast-stream-port
+    protocol: TCP
+    port: 4000
+    targetPort: http-stream-svc
+---
+apiVersion: traefik.containo.us/v1alpha1
+kind: IngressRoute
+metadata:
+  name: mastodon
+  namespace: mastodon
+spec:
+  entryPoints:
+  - websecure
+  routes:
+  - kind: Rule
+    match: Host(`mastodon.jibby.org`)
+    services:
+    - kind: Service
+      name: mastodon-service
+      port: 3000
+  - kind: Rule
+    match: Host(`streaming.mastodon.jibby.org`)
+    services:
+    - kind: Service
+      name: mastodon-service
+      port: 4000

+ 1 - 1
plex-pvc.yaml

@@ -39,7 +39,7 @@ spec:
   volumeName: plex-static-pv
   volumeMode: Filesystem
   accessModes:
-    - ReadOnlyMany
+    - ReadWriteMany
   resources:
     requests:
       storage: 20Ti

+ 14 - 0
sonarr-pvc.yaml

@@ -0,0 +1,14 @@
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+  name: sonarr-pvc
+  namespace: plex
+  labels:
+    app: sonarr
+spec:
+  storageClassName: ceph-block
+  accessModes:
+    - ReadWriteOnce
+  resources:
+    requests:
+      storage: 1Gi

+ 71 - 0
sonarr.yaml

@@ -0,0 +1,71 @@
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: sonarr
+  namespace: plex
+spec:
+  selector:
+    matchLabels:
+      app: sonarr
+  replicas: 1
+  template:
+    metadata:
+      labels:
+        app: sonarr
+    spec:
+      containers:
+      - name: sonarr
+        image: lscr.io/linuxserver/sonarr:4.0.0-v4
+        ports:
+        - containerPort: 8989
+          name: http-web-svc
+        command:
+        - "/bin/bash"
+        - "-c"
+        # Mount an sshfs connection to the seedbox
+        - "apk update && apk add sshfs && cp remote-secrets/REMOTE_KEY /key && chown 600 /key && mkdir /remote && chown 1000:1000 /remote && sshfs -o allow_other,default_permissions,uid=1000,gid=1000,umask=002 $REMOTE -o IdentityFile=/key -o StrictHostKeyChecking=no /remote && /init"
+        env:
+        - name: TZ
+          value: America/New_York
+        - name: PUID
+          value: "1000"
+        - name: PGID
+          value: "1000"
+        volumeMounts:
+        - mountPath: "/plex"
+          name: plex
+        - mountPath: "/config"
+          name: config
+        - name: remote-secret
+          mountPath: /remote-secrets
+        envFrom:
+        - secretRef:
+            name: sonarr-remote-secret
+        securityContext:
+          privileged: true  # required for sshfs mount
+      volumes:
+      - name: plex
+        persistentVolumeClaim:
+          claimName: plex-pvc
+      - name: config
+        persistentVolumeClaim:
+          claimName: sonarr-pvc
+      - name: remote-secret
+        secret:
+          secretName: sonarr-remote-secret
+---
+apiVersion: v1
+kind: Service
+metadata:
+  name: sonarr-service
+  namespace: plex
+spec:
+  selector:
+    app: sonarr
+  type: ClusterIP
+  ports:
+  - name: sonarr-web-port
+    protocol: TCP
+    port: 8989
+    targetPort: http-web-svc