diff --git a/charts/myprecious/Chart.yaml b/charts/myprecious/Chart.yaml
index abd8dda15..15afdf0f9 100644
--- a/charts/myprecious/Chart.yaml
+++ b/charts/myprecious/Chart.yaml
@@ -390,11 +390,11 @@ dependencies:
condition: joplinserver.enabled
version: ~0.0.1
repository: oci://ghcr.io/elfhosted/charts
- - name: app-template
- version: ~0.2.1
- condition: rdtclientvpnpia.enabled
- repository: oci://ghcr.io/elfhosted/charts
- alias: rdtclientvpnpia
+ # - name: app-template
+ # version: ~0.2.1
+ # condition: rdtclientvpnpia.enabled
+ # repository: oci://ghcr.io/elfhosted/charts
+ # alias: rdtclientvpnpia
- name: app-template
condition: rclonemountalldebridpia.enabled
version: ~0.2.1
diff --git a/charts/myprecious/templates/configmaps/configmap-homer-config.yaml b/charts/myprecious/templates/configmaps/configmap-homer-config.yaml
index 3eaf1f756..91e161738 100644
--- a/charts/myprecious/templates/configmaps/configmap-homer-config.yaml
+++ b/charts/myprecious/templates/configmaps/configmap-homer-config.yaml
@@ -20,69 +20,69 @@ data:
while true
do
- # # disabled this until migrations are done - remove the next 2 lines to restore
-
- # # Once for all of /config...
- # # tree --du -hF -s /config/ -H https://{{ .Release.Name }}-filebrowser.{{ .Values.dns_domain }}/files/config -T "๐ง ElfHosted [/config] usage for {{ .Release.Name }}"> /www/assets/message/disk-usage-config.html
-
- # for STORAGE_PATH in $(ls /config); do
- # # And again per-app /config...
- # # disabled on 19 Feb 2024 - consumes too much I/O now that we have "infinite" streaming - @funkypenguin
- # tree --du -hF -s /config/${STORAGE_PATH} -H https://{{ .Release.Name }}-filebrowser.{{ .Values.dns_domain }}/files/config/${STORAGE_PATH} -T "๐ง ElfHosted [/config/${STORAGE_PATH}] usage for {{ .Release.Name }}"> /www/assets/message/disk-usage-config-${STORAGE_PATH}.html
-
- # df -Ph /config/${STORAGE_PATH} | sed s/%//g > /www/assets/message/disk_space_${STORAGE_PATH}.txt
- # PERCENTAGE_USED=$(cat /www/assets/message/disk_space_${STORAGE_PATH}.txt | sed s/%//g | awk '{print $5}' | tail -n1)
- # DISK_USED=$(cat /www/assets/message/disk_space_${STORAGE_PATH}.txt | awk '{print $3}' | tail -n1)
- # DISK_TOTAL=$(cat /www/assets/message/disk_space_${STORAGE_PATH}.txt | awk '{print $2}' | tail -n1)
- # DISK_FREE=$(cat /www/assets/message/disk_space_${STORAGE_PATH}.txt | awk '{print $4}' | tail -n1)
- # PERCENTAGE_FREE=$(echo $((100-${PERCENTAGE_USED})))
-
- # # Keep the highest percentage used number
- # if [[ ! -z ${EXISTING_USAGE+x} ]]; then
- # if [[ "$PERCENTAGE_USED" -gt "$EXISTING_USAGE" ]]; then
- # EXISTING_USAGE=$PERCENTAGE_USED
- # CONFIG_BIGGEST_CONSUMER=$STORAGE_PATH
- # CONFIG_DISK_FREE=$DISK_FREE
- # CONFIG_PERCENTAGE_USED=$PERCENTAGE_USED
- # CONFIG_DISK_TOTAL=$DISK_TOTAL
- # # echo -n "/config: Largest volume-per-app consumer is ${CONFIG_BIGGEST_CONSUMER} with ${PERCENTAGE_FREE}% (${DISK_FREE}) of ${DISK_TOTAL} remaining
" > /www/assets/message/message_details_config.html
- # echo -n "/config: Largest volume-per-app consumer is ${CONFIG_BIGGEST_CONSUMER} with ${PERCENTAGE_FREE}% (${DISK_FREE}) of ${DISK_TOTAL} remaining
" > /www/assets/message/message_details_config.html
- # fi
- # else
- # EXISTING_USAGE=$PERCENTAGE_USED
- # CONFIG_BIGGEST_CONSUMER=$STORAGE_PATH
- # CONFIG_DISK_FREE=$DISK_FREE
- # CONFIG_PERCENTAGE_USED=$PERCENTAGE_USED
- # CONFIG_DISK_TOTAL=$DISK_TOTAL
- # echo -n "/config: Largest volume-per-app consumer is ${CONFIG_BIGGEST_CONSUMER} with ${PERCENTAGE_FREE}% (${DISK_FREE}) of ${DISK_TOTAL} remaining
" > /www/assets/message/message_details_config.html
- # fi
- # done
+ # Once for all of /config, /backup, /logs...
+ tree --du -hF -s /config/ -H https://{{ .Release.Name }}-filebrowser.{{ .Values.dns_domain }}/files/config -T "๐ง ElfHosted [/config] usage for {{ .Release.Name }}"> /www/assets/message/disk-usage-config.html
+ tree --du -hF -s /backup/ -H https://{{ .Release.Name }}-filebrowser.{{ .Values.dns_domain }}/files/backup -T "๐ง ElfHosted [/backup] usage for {{ .Release.Name }}"> /www/assets/message/disk-usage-backup.html
+ tree --du -hF -s /logs/ -H https://{{ .Release.Name }}-filebrowser.{{ .Values.dns_domain }}/files/logs -T "๐ง ElfHosted [/logs] usage for {{ .Release.Name }}"> /www/assets/message/disk-usage-logs.html
+
+ for STORAGE_PATH in $(ls /config); do
+ # And again per-app /config...
+ # disabled on 19 Feb 2024 - consumes too much I/O now that we have "infinite" streaming - @funkypenguin
+ tree --du -hF -s /config/${STORAGE_PATH} -H https://{{ .Release.Name }}-filebrowser.{{ .Values.dns_domain }}/files/config/${STORAGE_PATH} -T "๐ง ElfHosted [/config/${STORAGE_PATH}] usage for {{ .Release.Name }}"> /www/assets/message/disk-usage-config-${STORAGE_PATH}.html
+
+ df -Ph /config/${STORAGE_PATH} | sed s/%//g > /www/assets/message/disk_space_${STORAGE_PATH}.txt
+ PERCENTAGE_USED=$(cat /www/assets/message/disk_space_${STORAGE_PATH}.txt | sed s/%//g | awk '{print $5}' | tail -n1)
+ DISK_USED=$(cat /www/assets/message/disk_space_${STORAGE_PATH}.txt | awk '{print $3}' | tail -n1)
+ DISK_TOTAL=$(cat /www/assets/message/disk_space_${STORAGE_PATH}.txt | awk '{print $2}' | tail -n1)
+ DISK_FREE=$(cat /www/assets/message/disk_space_${STORAGE_PATH}.txt | awk '{print $4}' | tail -n1)
+ PERCENTAGE_FREE=$(echo $((100-${PERCENTAGE_USED})))
+
+ # Keep the highest percentage used number
+ if [[ ! -z ${EXISTING_USAGE+x} ]]; then
+ if [[ "$PERCENTAGE_USED" -gt "$EXISTING_USAGE" ]]; then
+ EXISTING_USAGE=$PERCENTAGE_USED
+ CONFIG_BIGGEST_CONSUMER=$STORAGE_PATH
+ CONFIG_DISK_FREE=$DISK_FREE
+ CONFIG_PERCENTAGE_USED=$PERCENTAGE_USED
+ CONFIG_DISK_TOTAL=$DISK_TOTAL
+ # echo -n "/config: Largest volume-per-app consumer is ${CONFIG_BIGGEST_CONSUMER} with ${PERCENTAGE_FREE}% (${DISK_FREE}) of ${DISK_TOTAL} remaining
" > /www/assets/message/message_details_config.html
+ echo -n "/config: Largest volume-per-app consumer is ${CONFIG_BIGGEST_CONSUMER} with ${PERCENTAGE_FREE}% (${DISK_FREE}) of ${DISK_TOTAL} remaining
" > /www/assets/message/message_details_config.html
+ fi
+ else
+ EXISTING_USAGE=$PERCENTAGE_USED
+ CONFIG_BIGGEST_CONSUMER=$STORAGE_PATH
+ CONFIG_DISK_FREE=$DISK_FREE
+ CONFIG_PERCENTAGE_USED=$PERCENTAGE_USED
+ CONFIG_DISK_TOTAL=$DISK_TOTAL
+ echo -n "/config: Largest volume-per-app consumer is ${CONFIG_BIGGEST_CONSUMER} with ${PERCENTAGE_FREE}% (${DISK_FREE}) of ${DISK_TOTAL} remaining
" > /www/assets/message/message_details_config.html
+ fi
+ done
cat /dev/null > /www/assets/message/message_details.html
- # PERCENTAGE_USED=$(cat /www/assets/message/disk_space_percent_used)
- # if [[ "$PERCENTAGE_USED" -ge 90 || "$CONFIG_PERCENTAGE_USED" -ge 90 ]]; then
- # STYLE="is-danger"
- # ICON="fa fa-thermometer-full"
- # MESSAGE="Time to clean up! ๐งน"
- # elif [[ "$PERCENTAGE_USED" -ge 75 || "$CONFIG_PERCENTAGE_USED" -ge 75 ]]; then
- # STYLE="is-warning"
- # ICON="fa fa-thermometer-three-quarters"
- # MESSAGE="Getting full.. ๐"
- # elif [[ "$PERCENTAGE_USED" -ge 50 || "$CONFIG_PERCENTAGE_USED" -ge 50 ]]; then
- # STYLE="is-info"
- # ICON="fa fa-thermometer-half"
- # MESSAGE="Halfway there.."
- # elif [[ "$PERCENTAGE_USED" -ge 25 || "$CONFIG_PERCENTAGE_USED" -ge 25 ]]; then
- # STYLE="is-success"
- # ICON="fa fa-thermometer-quarter"
- # MESSAGE="Nice and tidy! ๐"
- # else
- # STYLE="is-success"
- # ICON="fa fa-thermometer-empty"
- # MESSAGE="Gigabytes everywhere! ๐ฅณ"
- # fi
+ PERCENTAGE_USED=$(cat /www/assets/message/disk_space_percent_used)
+ if [[ "$PERCENTAGE_USED" -ge 90 || "$CONFIG_PERCENTAGE_USED" -ge 90 ]]; then
+ STYLE="is-danger"
+ ICON="fa fa-thermometer-full"
+ MESSAGE="Time to clean up! ๐งน"
+ elif [[ "$PERCENTAGE_USED" -ge 75 || "$CONFIG_PERCENTAGE_USED" -ge 75 ]]; then
+ STYLE="is-warning"
+ ICON="fa fa-thermometer-three-quarters"
+ MESSAGE="Getting full.. ๐"
+ elif [[ "$PERCENTAGE_USED" -ge 50 || "$CONFIG_PERCENTAGE_USED" -ge 50 ]]; then
+ STYLE="is-info"
+ ICON="fa fa-thermometer-half"
+ MESSAGE="Halfway there.."
+ elif [[ "$PERCENTAGE_USED" -ge 25 || "$CONFIG_PERCENTAGE_USED" -ge 25 ]]; then
+ STYLE="is-success"
+ ICON="fa fa-thermometer-quarter"
+ MESSAGE="Nice and tidy! ๐"
+ else
+ STYLE="is-success"
+ ICON="fa fa-thermometer-empty"
+ MESSAGE="Gigabytes everywhere! ๐ฅณ"
+ fi
STYLE="is-success"
ICON="fa fa-magic"
@@ -105,8 +105,8 @@ data:
COMPLIMENT=$(curl -s https://8768zwfurd.execute-api.us-east-1.amazonaws.com/v1/compliments | tr -d '"')
- # MESSAGE_DETAILS=$(cat /www/assets/message/message_details.html /www/assets/message/message_details_config.html )
- echo "{ \"title\":\"Welcome, {{ .Release.Name }}!\", \"content\":\"${COMPLIMENT}${ZURG_MESSAGE} ${RIVEN_MESSAGE}\",\"style\":\"${STYLE}\",\"icon\":\"${ICON}\" }" > /www/assets/message/message.json
+ MESSAGE_DETAILS=$(cat /www/assets/message/message_details.html /www/assets/message/message_details_config.html )
+ echo "{ \"title\":\"Welcome, {{ .Release.Name }}!\", \"content\":\"${COMPLIMENT}${MESSAGE_DETAILS}${ZURG_MESSAGE} ${RIVEN_MESSAGE}\",\"style\":\"${STYLE}\",\"icon\":\"${ICON}\" }" > /www/assets/message/message.json
sleep $(shuf -i 1800-5400 -n 1)
done
diff --git a/charts/myprecious/templates/pvcs-ssd/pvc-mediacovers.yaml b/charts/myprecious/templates/pvcs-ssd/pvc-mediacovers.yaml
deleted file mode 100644
index 54c03886b..000000000
--- a/charts/myprecious/templates/pvcs-ssd/pvc-mediacovers.yaml
+++ /dev/null
@@ -1,15 +0,0 @@
-{{ if .Values.radarr.enabled }}
-apiVersion: v1
-kind: PersistentVolumeClaim
-metadata:
- name: mediacovers
- annotations:
- helm.sh/resource-policy: delete
-spec:
- accessModes:
- - {{ .Values.storageclass.rwx.accessMode }}
- resources:
- requests:
- storage: 10Gi
- storageClassName: {{ .Values.storageclass.rwx.name }}
-{{ end }}
\ No newline at end of file
diff --git a/charts/myprecious/templates/pvcs/pvc-b2-mediacovers.yaml b/charts/myprecious/templates/pvcs/pvc-b2-mediacovers.yaml
deleted file mode 100644
index c1856a5d6..000000000
--- a/charts/myprecious/templates/pvcs/pvc-b2-mediacovers.yaml
+++ /dev/null
@@ -1,16 +0,0 @@
-apiVersion: v1
-kind: PersistentVolumeClaim
-metadata:
- name: b2-mediacovers
- labels:
- velero.io/exclude-from-backup: "true"
-spec:
- accessModes:
- - ReadWriteMany
- resources:
- requests:
- storage: 1Gi
- storageClassName: rclone
- selector:
- matchLabels:
- name: rclone-{{ .Release.Name }}-b2-mediacovers
\ No newline at end of file
diff --git a/charts/myprecious/templates/pvs/pv-rclone-b2-mediacovers.yaml b/charts/myprecious/templates/pvs/pv-rclone-b2-mediacovers.yaml
deleted file mode 100644
index 3fff86406..000000000
--- a/charts/myprecious/templates/pvs/pv-rclone-b2-mediacovers.yaml
+++ /dev/null
@@ -1,18 +0,0 @@
-apiVersion: v1
-kind: PersistentVolume
-metadata:
- name: rclone-{{ .Release.Name }}-b2-mediacovers
- labels:
- name: rclone-{{ .Release.Name }}-b2-mediacovers
-spec:
- accessModes:
- - ReadWriteMany
- capacity:
- storage: 1Gi # doesn't matter, is used to match with the PVC
- storageClassName: rclone
- persistentVolumeReclaimPolicy: Delete
- csi:
- driver: csi-rclone
- volumeHandle: rclone-{{ .Release.Name }}-b2-mediacovers
- volumeAttributes:
- secretName: "{{ .Release.Name }}-b2-mediacovers"
diff --git a/charts/myprecious/templates/replicationdestinations/replicationdestination-symlinks.yaml b/charts/myprecious/templates/replicationdestinations/replicationdestination-symlinks.yaml
new file mode 100644
index 000000000..5181bb946
--- /dev/null
+++ b/charts/myprecious/templates/replicationdestinations/replicationdestination-symlinks.yaml
@@ -0,0 +1,29 @@
+apiVersion: volsync.backube/v1alpha1
+kind: ReplicationDestination
+metadata:
+ name: rd-symlinks
+spec:
+ trigger:
+ manual: runonce
+ restic:
+ repository: symlinks-volsync-restic-config
+ copyMethod: Snapshot
+ moverAffinity:
+ podAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ - labelSelector:
+ matchExpressions:
+ - key: app.kubernetes.io/name
+ operator: In
+ values:
+ - gatus
+ topologyKey: "kubernetes.io/hostname"
+ cacheCapacity: 5Gi
+ capacity: 5Gi
+ cacheStorageClassName: topolvm-provisioner-thin
+ accessModes:
+ - ReadWriteOnce
+ storageClassName: topolvm-provisioner-thin
+ cacheAccessModes:
+ - ReadWriteOnce
+ volumeSnapshotClassName: topolvm-provisioner-thin
\ No newline at end of file
diff --git a/charts/myprecious/templates/replicationsources/replicationsource-symlinks.yaml b/charts/myprecious/templates/replicationsources/replicationsource-symlinks.yaml
new file mode 100644
index 000000000..2a9822978
--- /dev/null
+++ b/charts/myprecious/templates/replicationsources/replicationsource-symlinks.yaml
@@ -0,0 +1,41 @@
+apiVersion: volsync.backube/v1alpha1
+kind: ReplicationSource
+metadata:
+ name: rs-symlinks
+spec:
+ sourcePVC: symlinks
+ trigger:
+ schedule: "0 {{ mod .Values.userId 24 }} * * *"
+ restic:
+ unlock: {{ .Chart.Version }}
+ pruneIntervalDays: 7
+ repository: symlinks-volsync-restic-symlinks
+ retain:
+ daily: 2
+ copyMethod: Snapshot
+ moverAffinity:
+ podAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 50
+ podAffinityTerm:
+ labelSelector:
+ matchExpressions:
+ - key: app.elfhosted.com/role
+ operator: In
+ values:
+ - nodefinder # use nodefinder in the absense of zurg...
+ topologyKey: "kubernetes.io/hostname"
+ - weight: 100
+ podAffinityTerm:
+ labelSelector:
+ matchExpressions:
+ - key: app.elfhosted.com/name
+ operator: In
+ values:
+ - zurg # .. but prefer zurg
+ topologyKey: "kubernetes.io/hostname"
+ cacheCapacity: 5Gi
+ cacheStorageClassName: topolvm-provisioner-thin
+ cacheAccessModes:
+ - ReadWriteOnce
+ volumeSnapshotClassName: topolvm-provisioner-thin
diff --git a/charts/myprecious/templates/secrets/secret-rclone-b2-mediacovers.yaml b/charts/myprecious/templates/secrets/secret-rclone-b2-mediacovers.yaml
deleted file mode 100644
index 7f88e56ab..000000000
--- a/charts/myprecious/templates/secrets/secret-rclone-b2-mediacovers.yaml
+++ /dev/null
@@ -1,35 +0,0 @@
-apiVersion: v1
-kind: Secret
-metadata:
- name: {{ .Release.Name }}-b2-mediacovers
- namespace: csi-rclone
-type: Opaque
-stringData:
- remote: b2-mediacovers-{{ .Release.Name }}
- remotePath: elfhosted-mediacovers/{{ .Release.Name }}
- allow-non-empty: "true"
- allow-other: "true"
- uid: "568"
- gid: "568"
- # debugging RD issues
- vfs-cache-mode: full
- vfs-cache-max-age: 1h
- vfs-cache-max-size: 2G
- async-read: "true"
- vfs-read-ahead: 128M
- vfs-read-chunk-size: 8M
- vfs-read-chunk-size-limit: 2G
- vfs-fast-fingerprint: "true"
- no-modtime: "true"
- no-checksum: "true"
- use-mmap: "true"
- buffer-size: 16M
- dir-cache-time: 10s
- contimeout: 10s
- timeout: 10s
- configData: |
- [b2-mediacovers-{{ .Release.Name }}]
- type = b2
- account: {{ .Values.b2_mediacovers_account }}
- key: {{ .Values.b2_mediacovers_key }}
- hard_delete = true
\ No newline at end of file
diff --git a/charts/myprecious/templates/secrets/volsync/secret-volsync-symlinks.yaml b/charts/myprecious/templates/secrets/volsync/secret-volsync-symlinks.yaml
new file mode 100644
index 000000000..4c6494c1b
--- /dev/null
+++ b/charts/myprecious/templates/secrets/volsync/secret-volsync-symlinks.yaml
@@ -0,0 +1,10 @@
+apiVersion: v1
+kind: Secret
+metadata:
+ name: symlinks-volsync-restic-config
+type: Opaque
+stringData:
+ RESTIC_REPOSITORY: s3:{{ .Values.volsync.restic_repository }}/volsync/symlinks/{{ .Release.Namespace }}
+ RESTIC_PASSWORD: {{ .Values.volsync.restic_password }}
+ AWS_ACCESS_KEY_ID: {{ .Values.volsync.aws_access_key_id }}
+ AWS_SECRET_ACCESS_KEY: {{ .Values.volsync.aws_secret_access_key }}
diff --git a/charts/myprecious/values.yaml b/charts/myprecious/values.yaml
index 6b407d12e..04f4e899b 100644
--- a/charts/myprecious/values.yaml
+++ b/charts/myprecious/values.yaml
@@ -6,36 +6,36 @@ location:
enabled: false
imagemaid:
- enabled: false
+ enabled: false
# false by default
volsync:
enabled: false
- restic_repository:
- restic_password:
- aws_access_key_id:
+ restic_repository:
+ restic_password:
+ aws_access_key_id:
aws_secret_access_key:
# Set these to the default if nothing else is set
storageclass:
- rwx:
+ rwx:
name: ceph-filesystem-ssd
accessMode: ReadWriteMany
volumeSnapshotClassName: ceph-filesystem
- rwo:
+ rwo:
name: ceph-block-ssd
accessMode: ReadWriteOnce
volumeSnapshotClassName: ceph-block
# These control the egress bandwidth of the semi-dedi products
hobbit_streamer_podAnnotations: &hobbit_streamer_podAnnotations
- kubernetes.io/egress-bandwidth: "250M"
+ kubernetes.io/egress-bandwidth: "250M"
ranger_streamer_podAnnotations: &ranger_streamer_podAnnotations
- kubernetes.io/egress-bandwidth: "500M"
+ kubernetes.io/egress-bandwidth: "500M"
halfling_streamer_podAnnotations: &halfling_streamer_podAnnotations
- kubernetes.io/egress-bandwidth: "1000M"
+ kubernetes.io/egress-bandwidth: "1000M"
nazgul_streamer_podAnnotations: &nazgul_streamer_podAnnotations
- kubernetes.io/egress-bandwidth: "1000M"
+ kubernetes.io/egress-bandwidth: "1000M"
# These control the requests used to "anchor" a stack to a particular dedicated node. The following defaults can be overridden on a per-cluster basis:
hobbit_zurg_resources: &hobbit_zurg_resources
@@ -52,7 +52,7 @@ ranger_zurg_resources: &ranger_zurg_resources
memory: 30Mi
limits:
cpu: "4"
- memory: 4Gi
+ memory: 4Gi
halfling_zurg_resources: &halfling_zurg_resources
requests:
@@ -60,7 +60,7 @@ halfling_zurg_resources: &halfling_zurg_resources
memory: 30Mi
limits:
cpu: "8"
- memory: 4Gi
+ memory: 4Gi
nazgul_zurg_resources: &nazgul_zurg_resources
requests:
@@ -68,7 +68,7 @@ nazgul_zurg_resources: &nazgul_zurg_resources
memory: 30Mi
limits:
cpu: "16"
- memory: 4Gi
+ memory: 4Gi
# These allow us to manage RAM usage on streamers
@@ -78,7 +78,7 @@ hobbit_streamer_resources: &hobbit_streamer_resources
memory: 30Mi
limits:
cpu: "2"
- memory: 4Gi
+ memory: 4Gi
ranger_streamer_resources: &ranger_streamer_resources
requests:
@@ -112,26 +112,26 @@ tooling_image: &tooling_image ghcr.io/elfhosted/tooling:focal-20240530@sha256:45
# all RD pods have to exist with zurg - make this soft for now
standard_affinity: &standard_affinity
- podAffinity:
- preferredDuringSchedulingIgnoredDuringExecution:
- - weight: 50
- podAffinityTerm:
+ podAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 50
+ podAffinityTerm:
labelSelector:
matchExpressions:
- key: app.elfhosted.com/role
operator: In
values:
- nodefinder # use nodefinder in the absense of zurg...
- topologyKey: "kubernetes.io/hostname"
- - weight: 100
- podAffinityTerm:
+ topologyKey: "kubernetes.io/hostname"
+ - weight: 100
+ podAffinityTerm:
labelSelector:
matchExpressions:
- key: app.elfhosted.com/name
operator: In
values:
- zurg # .. but prefer zurg
- topologyKey: "kubernetes.io/hostname"
+ topologyKey: "kubernetes.io/hostname"
dedicated_affinity: &dedicated_affinity
nodeAffinity:
@@ -142,38 +142,38 @@ dedicated_affinity: &dedicated_affinity
operator: In
values:
- "true"
- podAffinity:
- preferredDuringSchedulingIgnoredDuringExecution:
- - weight: 50
- podAffinityTerm:
+ podAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 50
+ podAffinityTerm:
labelSelector:
matchExpressions:
- key: app.elfhosted.com/role
operator: In
values:
- nodefinder # use nodefinder in the absense of zurg...
- topologyKey: "kubernetes.io/hostname"
- - weight: 100
- podAffinityTerm:
+ topologyKey: "kubernetes.io/hostname"
+ - weight: 100
+ podAffinityTerm:
labelSelector:
matchExpressions:
- key: app.elfhosted.com/name
operator: In
values:
- zurg # .. but prefer zurg
- topologyKey: "kubernetes.io/hostname"
+ topologyKey: "kubernetes.io/hostname"
standard_tolerations: &standard_tolerations
# not using tolerations anymore
# - key: node-role.elfhosted.com/dedicated
-# operator: Exists
+# operator: Exists
# - key: node-role.elfhosted.com/hobbit
# operator: Exists
hobbit_tolerations: &hobbit_tolerations
# not using tolerations anymore
# - key: node-role.elfhosted.com/hobbit
-# operator: Exists
+# operator: Exists
# Set minimal requests so that pods can co-exist with streamers
hobbit_resources: &hobbit_resources
@@ -182,7 +182,7 @@ hobbit_resources: &hobbit_resources
memory: "16Mi"
limits:
cpu: "1"
- memory: 4Gi
+ memory: 4Gi
ranger_resources: &ranger_resources
requests:
@@ -190,98 +190,98 @@ ranger_resources: &ranger_resources
memory: "16Mi"
limits:
cpu: "2"
- memory: 8Gi
+ memory: 8Gi
volumespec_ephemeral_volume_1000g: &volumespec_ephemeral_volume_1000g
ephemeral:
volumeClaimTemplate:
metadata:
labels:
- velero.io/exclude-from-backup: "true"
+ velero.io/exclude-from-backup: "true"
spec:
accessModes: [ "ReadWriteOnce" ]
storageClassName: "topolvm-provisioner-thin"
resources:
requests:
- storage: 1000Gi
+ storage: 1000Gi
volumespec_ephemeral_volume_100g: &volumespec_ephemeral_volume_100g
ephemeral:
volumeClaimTemplate:
metadata:
labels:
- velero.io/exclude-from-backup: "true"
+ velero.io/exclude-from-backup: "true"
spec:
accessModes: [ "ReadWriteOnce" ]
storageClassName: "topolvm-provisioner-thin"
resources:
requests:
- storage: 100Gi
+ storage: 100Gi
volumespec_ephemeral_volume_1g: &volumespec_ephemeral_volume_1g
ephemeral:
volumeClaimTemplate:
metadata:
labels:
- velero.io/exclude-from-backup: "true"
+ velero.io/exclude-from-backup: "true"
spec:
accessModes: [ "ReadWriteOnce" ]
storageClassName: "topolvm-provisioner-thin"
resources:
requests:
- storage: 1Gi
+ storage: 1Gi
volumespec_ephemeral_volume_10g: &volumespec_ephemeral_volume_10g
ephemeral:
volumeClaimTemplate:
metadata:
labels:
- velero.io/exclude-from-backup: "true"
+ velero.io/exclude-from-backup: "true"
spec:
accessModes: [ "ReadWriteOnce" ]
storageClassName: "topolvm-provisioner-thin"
resources:
requests:
- storage: 10Gi
+ storage: 10Gi
volumespec_ephemeral_volume_50g: &volumespec_ephemeral_volume_50g
ephemeral:
volumeClaimTemplate:
metadata:
labels:
- velero.io/exclude-from-backup: "true"
+ velero.io/exclude-from-backup: "true"
spec:
accessModes: [ "ReadWriteOnce" ]
storageClassName: "topolvm-provisioner-thin"
resources:
requests:
- storage: 50Gi
+ storage: 50Gi
volumespec_ephemeral_volume_200g: &volumespec_ephemeral_volume_200g
ephemeral:
volumeClaimTemplate:
metadata:
labels:
- velero.io/exclude-from-backup: "true"
+ velero.io/exclude-from-backup: "true"
spec:
accessModes: [ "ReadWriteOnce" ]
storageClassName: "topolvm-provisioner-thin"
resources:
requests:
- storage: 200Gi
+ storage: 200Gi
volumespec_ephemeral_volume_500g: &volumespec_ephemeral_volume_500g
ephemeral:
volumeClaimTemplate:
metadata:
labels:
- velero.io/exclude-from-backup: "true"
+ velero.io/exclude-from-backup: "true"
spec:
accessModes: [ "ReadWriteOnce" ]
storageClassName: "topolvm-provisioner-thin"
resources:
requests:
- storage: 500Gi
+ storage: 500Gi
# And this makes the media / rclone mounts tidier.
rclonecustoma: &rclonecustoma
@@ -325,14 +325,14 @@ rclonemounts3b: &rclonemounts3b
volumeSpec:
persistentVolumeClaim:
claimName: s3-b
- mountPath: /storage/s3-b
+ mountPath: /storage/s3-b
rclonemounts3c: &rclonemounts3c
enabled: false
type: custom
volumeSpec:
persistentVolumeClaim:
claimName: rclone-s3-c
- mountPath: /storage/s3-c
+ mountPath: /storage/s3-c
gdrivea: &gdrivea
enabled: false
type: custom
@@ -367,21 +367,21 @@ rclonegdriveencrypteda: &rclonegdriveencrypteda
volumeSpec:
persistentVolumeClaim:
claimName: gdrive-encrypted-a
- mountPath: /storage/gdrive-encrypted-a
+ mountPath: /storage/gdrive-encrypted-a
rclonegdriveencryptedb: &rclonegdriveencryptedb
enabled: false
type: custom
volumeSpec:
persistentVolumeClaim:
claimName: gdrive-encrypted-b
- mountPath: /storage/gdrive-encrypted-b
+ mountPath: /storage/gdrive-encrypted-b
rclonegdriveencryptedc: &rclonegdriveencryptedc
enabled: false
type: custom
volumeSpec:
persistentVolumeClaim:
claimName: gdrive-encrypted-c
- mountPath: /storage/gdrive-encrypted-c
+ mountPath: /storage/gdrive-encrypted-c
onedrivea: &onedrivea
enabled: false
type: custom
@@ -430,14 +430,14 @@ rclonemountputio: &rclonemountputio
volumeSpec:
persistentVolumeClaim:
claimName: putio
- mountPath: /storage/putio
+ mountPath: /storage/putio
rclonemountpikpak: &rclonemountpikpak
enabled: false
type: custom
volumeSpec:
persistentVolumeClaim:
claimName: pikpak
- mountPath: /storage/pikpak
+ mountPath: /storage/pikpak
rclonemountssha: &rclonemountssha
enabled: false
type: custom
@@ -504,7 +504,7 @@ rclonestorageboxencrypteda: &rclonestorageboxencrypteda
persistentVolumeClaim:
claimName: storagebox-encrypted-a
mountPath: /storage/storagebox-encrypted-a
- subaccount: false
+ subaccount: false
rcloneb2encrypteda: &rcloneb2encrypteda
enabled: false
type: custom
@@ -520,7 +520,7 @@ rclonestorageboxencryptedb: &rclonestorageboxencryptedb
persistentVolumeClaim:
claimName: storagebox-encrypted-b
mountPath: /storage/storagebox-encrypted-b
- subaccount: false
+ subaccount: false
rclonestorageboxencryptedc: &rclonestorageboxencryptedc
enabled: false
type: custom
@@ -528,7 +528,7 @@ rclonestorageboxencryptedc: &rclonestorageboxencryptedc
persistentVolumeClaim:
claimName: storagebox-encrypted-c
mountPath: /storage/storagebox-encrypted-c
- subaccount: false
+ subaccount: false
rclonemountpremiumize: &rclonemountpremiumize
enabled: false
type: custom
@@ -542,7 +542,7 @@ rclonemounttorbox: &rclonemounttorbox
volumeSpec:
persistentVolumeClaim:
claimName: torbox
- mountPath: /storage/torbox
+ mountPath: /storage/torbox
b2: &b2
enabled: false
type: custom
@@ -577,7 +577,7 @@ rclonesmbencrypteda: &rclonesmbencrypteda
volumeSpec:
persistentVolumeClaim:
claimName: smb-encrypted-a
- mountPath: /storage/smb-encrypted-a
+ mountPath: /storage/smb-encrypted-a
rclonemountdebridlink: &rclonemountdebridlink
enabled: false
type: custom
@@ -601,7 +601,7 @@ storagemounts: &storagemounts
gdrived: *gdrived
rclonegdriveencrypteda: *rclonegdriveencrypteda
rclonegdriveencryptedb: *rclonegdriveencryptedb
- rclonegdriveencryptedc: *rclonegdriveencryptedc
+ rclonegdriveencryptedc: *rclonegdriveencryptedc
onedrivea: *onedrivea
rclonemountwebdava: *rclonemountwebdava
rclonemountrealdebridzurg: *rclonemountrealdebridzurg
@@ -615,8 +615,8 @@ storagemounts: &storagemounts
storageboxc: *storageboxc
rclonestorageboxencrypteda: *rclonestorageboxencrypteda
rclonestorageboxencryptedb: *rclonestorageboxencryptedb
- rclonestorageboxencryptedc: *rclonestorageboxencryptedc
- rcloneb2encrypteda: *rcloneb2encrypteda
+ rclonestorageboxencryptedc: *rclonestorageboxencryptedc
+ rcloneb2encrypteda: *rcloneb2encrypteda
rclonemountpremiumize: *rclonemountpremiumize
rclonemounttorbox: *rclonemounttorbox
b2: *b2
@@ -627,100 +627,21 @@ storagemounts: &storagemounts
tmp: &tmp
enabled: true
type: emptyDir
- mountPath: /tmp
+ mountPath: /tmp
symlinks: &symlinks
enabled: true
type: custom
volumeSpec:
persistentVolumeClaim:
- claimName: symlinks-ssd
- mountPath: /storage/symlinks
+ claimName: symlinks
+ mountPath: /storage/symlinks
backup: &backup
enabled: true
type: custom
volumeSpec:
persistentVolumeClaim:
claimName: backup
- mountPath: /storage/backup
-
-
-# These are used for containers (like codeserver) which need access to all **all** the '/config' volumes
-configmounts: &config_mounts
- config:
- enabled: true
- type: custom
- mountPath: /config
- volumeSpec:
- persistentVolumeClaim:
- claimName: config
- elfterm:
- enabled: true
- type: custom
- mountPath: /config/elfterm
- volumeSpec:
- persistentVolumeClaim:
- claimName: config-elfterm-ssd
- homer:
- enabled: true
- type: custom
- mountPath: /config/homer
- volumeSpec:
- persistentVolumeClaim:
- claimName: config-homer-ssd
- rclonebrowser:
- enabled: true # this is always on
- type: custom
- mountPath: /config/rclonebrowser/
- volumeSpec:
- persistentVolumeClaim:
- claimName: config-rclonebrowser-ssd
- recyclarr:
- enabled: true
- type: custom
- mountPath: /config/recyclarr/
- volumeSpec:
- persistentVolumeClaim:
- claimName: config-recyclarr-ssd
-
-# And this is for mounting the volumes into `/storage/config/`
-storageconfigmounts: &storageconfigmounts
- config:
- enabled: true
- type: custom
- mountPath: /storage/config/
- volumeSpec:
- persistentVolumeClaim:
- claimName: config
- elfterm:
- enabled: true
- type: custom
- mountPath: /storage/config/elfterm
- volumeSpec:
- persistentVolumeClaim:
- claimName: config-elfterm-ssd
- homer:
- enabled: true
- type: custom
- mountPath: /storage/config/homer
- volumeSpec:
- persistentVolumeClaim:
- claimName: config-homer-ssd
- rclonebrowser:
- enabled: true # this is always on
- type: custom
- mountPath: /storage/config/rclonebrowser/
- volumeSpec:
- persistentVolumeClaim:
- claimName: config-rclonebrowser-ssd
- recyclarr:
- enabled: true
- type: custom
- mountPath: /storage/config/recyclarr/
- volumeSpec:
- persistentVolumeClaim:
- claimName: config-recyclarr-ssd
-
# The entire bootstrap sidecar/additionalcontainer
default_resources: &default_resources
requests:
@@ -766,7 +687,7 @@ bootstrap_env: &bootstrap_env
valueFrom:
fieldRef:
fieldPath: metadata.labels['app.elfhosted.com/name']
-
+
migrate_data: &migrate_data
image: *tooling_image
imagePullPolicy: IfNotPresent
@@ -777,7 +698,7 @@ migrate_data: &migrate_data
if [[ ! -f /config/.migrated-20241007 ]]
then
- if [[ ! -z "$(ls -A /migration)" ]]
+ if [[ ! -z "$(ls -A /migration)" ]]
then
echo "Migrating from /migration/..."
cp -rfpv /migration/* /config/
@@ -785,8 +706,8 @@ migrate_data: &migrate_data
fi
else
echo "No migration necessary"
- fi
-
+ fi
+
volumeMounts:
- mountPath: /config
name: config
@@ -813,27 +734,27 @@ update_dns_on_init: &update_dns_on_init
- name: MY_POD_NAMESPACE
valueFrom:
fieldRef:
- fieldPath: metadata.namespace
+ fieldPath: metadata.namespace
- name: ELF_APP_NAME
valueFrom:
fieldRef:
- fieldPath: metadata.labels['app.elfhosted.com/name']
+ fieldPath: metadata.labels['app.elfhosted.com/name']
- name: ELF_TENANT_NAME
valueFrom:
fieldRef:
- fieldPath: metadata.labels['app.kubernetes.io/instance']
+ fieldPath: metadata.labels['app.kubernetes.io/instance']
- name: CF_API_DOMAIN
- value: elfhosted.com
+ value: elfhosted.com
envFrom:
- secretRef:
name: cloudflare-api-token
volumeMounts:
- mountPath: /tooling-scripts
- name: tooling-scripts
+ name: tooling-scripts
securityContext:
seccompProfile:
type: RuntimeDefault
- readOnlyRootFilesystem: true
+ readOnlyRootFilesystem: true
# This is almost the same as the dns-update script, except we run it as a sidecar, waiting on pod termination
clean_up_dns_on_termination: &clean_up_dns_on_termination
@@ -920,9 +841,9 @@ bootstrap: &bootstrap
- mountPath: /config
name: config
- mountPath: /storage/backup
- name: backup
+ name: backup
- mountPath: /tmp
- name: tmp
+ name: tmp
env: *bootstrap_env
resources: *default_resources
securityContext: *default_securitycontext
@@ -996,7 +917,7 @@ bootstrap_elfbot: &bootstrap_elfbot
- mountPath: /config
name: config
- mountPath: /tmp
- name: tmp
+ name: tmp
env: *bootstrap_env
resources: *default_resources
securityContext: *default_securitycontext
@@ -1077,7 +998,7 @@ storagehub_bootstrap: &storagehub_bootstrap
- mountPath: /config
name: config
- mountPath: /tmp
- name: tmp
+ name: tmp
env: *bootstrap_env
resources: *default_resources
securityContext: *default_securitycontext
@@ -1155,7 +1076,7 @@ bootstrap_migration: &bootstrap_migration
touch /config/i-am-migrated
fi
fi
-
+
if [[ ! -f /config/i-am-bootstrapped ]]
then
@@ -1223,9 +1144,9 @@ bazarr4kexposed:
qbittorrentexposed:
enabled: false
rdtclientexposed:
- enabled: false
+ enabled: false
rdtclientalldebridexposed:
- enabled: false
+ enabled: false
delugeexposed:
enabled: false
rutorrentexposed:
@@ -1241,7 +1162,7 @@ mylarrexposed:
tunarrexposed:
enabled: false
threadfinexposed:
- enabled: false
+ enabled: false
zurgexposed:
enabled: false
elfassessment:
@@ -1254,19 +1175,19 @@ mattermostcustomdomain:
vaultwardencustomdomain:
enabled: false
jellyseerrcustomdomain:
- enabled: false
+ enabled: false
overseerrcustomdomain:
- enabled: false
+ enabled: false
plexcustomdomain:
- enabled: false
+ enabled: false
jellyfincustomdomain:
- enabled: false
+ enabled: false
embycustomdomain:
- enabled: false
+ enabled: false
stremiowebcustomdomain:
- enabled: false
+ enabled: false
pairdropcustomdomain:
- enabled: false
+ enabled: false
rutorrentgluetun: &rutorrent
enabled: false
@@ -1280,12 +1201,12 @@ rutorrentgluetun: &rutorrent
podLabels:
app.elfhosted.com/name: rutorrent
podAnnotations:
- kubernetes.io/egress-bandwidth: "150M"
+ kubernetes.io/egress-bandwidth: "150M"
controller:
annotations:
configmap.reloader.stakater.com/reload: "elfbot-all,storage-changed,rutorrent-config,rutorrent-gluetun-config,elfbot-rutorrent" # Reload the deployment every time the yaml config changes
- affinity: *standard_affinity
- tolerations: *standard_tolerations
+ affinity: *standard_affinity
+ tolerations: *standard_tolerations
securityContext:
# runAsUser: 568 # enforced in env vars
# runAsGroup: 568
@@ -1295,7 +1216,7 @@ rutorrentgluetun: &rutorrent
envFrom:
- configMapRef:
name: elfbot-rutorrent
- optional: true
+ optional: true
# we need the injected initcontainer to run as root, so we can't change the pod-level uid/gid
podSecurityContext:
seccompProfile:
@@ -1392,14 +1313,14 @@ rutorrentgluetun: &rutorrent
type: "custom"
volumeSpec:
configMap:
- name: dante-config
+ name: dante-config
tooling-scripts:
enabled: "true"
type: "custom"
volumeSpec:
configMap:
- name: tooling-scripts
- defaultMode: 0755
+ name: tooling-scripts
+ defaultMode: 0755
ingress:
main:
enabled: false
@@ -1423,7 +1344,7 @@ rutorrentgluetun: &rutorrent
PORT_FILE: /data/rtorrent/forwarded-port
WAN_IP_CMD: 'curl -s ifconfig.me'
initContainers:
- a-migration:
+ a-migration:
<<: *migrate_data
volumeMounts:
- mountPath: /migration
@@ -1431,7 +1352,7 @@ rutorrentgluetun: &rutorrent
- mountPath: /config
name: config
subPath: rutorrent
- bootstrap:
+ bootstrap:
<<: *bootstrap
volumeMounts:
- mountPath: /etc/elfbot
@@ -1442,8 +1363,8 @@ rutorrentgluetun: &rutorrent
name: config
subPath: rutorrent
- mountPath: /tmp
- name: tmp
- update-dns: *update_dns_on_init
+ name: tmp
+ update-dns: *update_dns_on_init
setup:
image: *tooling_image
command:
@@ -1452,7 +1373,7 @@ rutorrentgluetun: &rutorrent
- |
set -x
set -e
-
+
# If the VPN_ENDPOINT_IP is set, but is not an IP address, then convert it to one
if [[ ! -z "$VPN_ENDPOINT_IP" ]]
then
@@ -1466,12 +1387,12 @@ rutorrentgluetun: &rutorrent
fi
volumeMounts:
- mountPath: /shared
- name: shared
+ name: shared
securityContext: *default_securitycontext
resources: *default_resources
envFrom:
- configMapRef:
- name: rutorrent-gluetun-config
+ name: rutorrent-gluetun-config
addons:
vpn: &rutorrent_addons_vpn
enabled: true
@@ -1511,7 +1432,7 @@ rutorrentgluetun: &rutorrent
dante:
image: ghcr.io/elfhosted/dante:v1.4.3
env: *bootstrap_env
- securityContext: *default_securitycontext
+ securityContext: *default_securitycontext
volumeMounts:
- mountPath: /tmp
name: tmp
@@ -1539,7 +1460,7 @@ rutorrentgluetun: &rutorrent
sleep 2
done
echo "VPN Connected, processing cookies..."
-
+
# If we have a cookie already, try to use it
if [[ -f /config/mam/saved.cookies ]]; then
curl -c /config/mam/saved.cookies -b /config/mam/saved.cookies https://t.myanonamouse.net/json/dynamicSeedbox.php -o /config/mam/mam_id-curl-output.log
@@ -1559,10 +1480,10 @@ rutorrentgluetun: &rutorrent
name: config
subPath: rutorrent
- mountPath: /shared
- name: shared
+ name: shared
resources: *default_resources
- securityContext: *default_securitycontext
- clean-up-dns: *clean_up_dns_on_termination
+ securityContext: *default_securitycontext
+ clean-up-dns: *clean_up_dns_on_termination
rutorrentpia:
<<: *rutorrent
@@ -1580,7 +1501,7 @@ rutorrentpia:
- configMapRef:
name: rutorrent-pia-config
initContainers:
- a-migration:
+ a-migration:
<<: *migrate_data
volumeMounts:
- mountPath: /migration
@@ -1588,7 +1509,7 @@ rutorrentpia:
- mountPath: /config
name: config
subPath: rutorrent
- bootstrap:
+ bootstrap:
<<: *bootstrap
volumeMounts:
- mountPath: /etc/elfbot
@@ -1599,14 +1520,14 @@ rutorrentpia:
name: config
subPath: rutorrent
- mountPath: /tmp
- name: tmp
-
+ name: tmp
+
delugegluetun: &deluge
enabled: false
podLabels:
app.elfhosted.com/name: deluge
podAnnotations:
- kubernetes.io/egress-bandwidth: "150M"
+ kubernetes.io/egress-bandwidth: "150M"
sso:
enabled: true
automountServiceAccountToken: false
@@ -1629,15 +1550,15 @@ delugegluetun: &deluge
controller:
annotations:
configmap.reloader.stakater.com/reload: "elfbot-all,storage-changed,elfbot-deluge,deluge-gluetun-config"
- affinity: *standard_affinity
- tolerations: *standard_tolerations
+ affinity: *standard_affinity
+ tolerations: *standard_tolerations
persistence:
<<: *storagemounts
tmp:
enabled: true
type: custom
mountPath: /tmp
- volumeSpec: *volumespec_ephemeral_volume_100g
+ volumeSpec: *volumespec_ephemeral_volume_100g
config:
enabled: true
type: custom
@@ -1651,7 +1572,7 @@ delugegluetun: &deluge
type: custom
volumeSpec:
persistentVolumeClaim:
- claimName: config-deluge-ssd
+ claimName: config-deluge-ssd
shared:
enabled: true
mountPath: /shared
@@ -1678,7 +1599,7 @@ delugegluetun: &deluge
type: "custom"
volumeSpec:
configMap:
- name: dante-config
+ name: dante-config
ingress:
main:
enabled: false
@@ -1696,7 +1617,7 @@ delugegluetun: &deluge
envFrom:
- configMapRef:
name: elfbot-deluge
- optional: true
+ optional: true
extraEnvVars:
- name: PORT_FILE
valueFrom:
@@ -1705,15 +1626,15 @@ delugegluetun: &deluge
key: PORT_FILE
optional: true
initContainers:
- a-migration:
+ a-migration:
<<: *migrate_data
volumeMounts:
- mountPath: /migration
name: migration
- mountPath: /config
name: config
- subPath: deluge
- bootstrap:
+ subPath: deluge
+ bootstrap:
<<: *bootstrap
volumeMounts:
- mountPath: /etc/elfbot
@@ -1724,7 +1645,7 @@ delugegluetun: &deluge
name: config
subPath: deluge
- mountPath: /tmp
- name: tmp
+ name: tmp
setup:
image: *tooling_image
imagePullPolicy: IfNotPresent
@@ -1767,12 +1688,12 @@ delugegluetun: &deluge
name: config
subPath: deluge
- mountPath: /shared
- name: shared
+ name: shared
env: *bootstrap_env
securityContext: *default_securitycontext
envFrom:
- configMapRef:
- name: deluge-gluetun-config
+ name: deluge-gluetun-config
resources:
requests:
cpu: 100m
@@ -1837,7 +1758,7 @@ delugepia:
<<: *deluge
controller:
annotations:
- configmap.reloader.stakater.com/reload: "elfbot-all,storage-changed,deluge-config,deluge-pia-config,elfbot-deluge" # Reload the deployment every time the yaml config changes
+ configmap.reloader.stakater.com/reload: "elfbot-all,storage-changed,deluge-config,deluge-pia-config,elfbot-deluge" # Reload the deployment every time the yaml config changes
addons:
vpn:
<<: *deluge_addons_vpn
@@ -1849,14 +1770,14 @@ delugepia:
- configMapRef:
name: deluge-pia-config
initContainers:
- a-migration:
+ a-migration:
<<: *migrate_data
volumeMounts:
- mountPath: /migration
name: migration
- mountPath: /config
name: config
- subPath: deluge
+ subPath: deluge
bootstrap: *bootstrap
setup:
image: *tooling_image
@@ -1900,9 +1821,9 @@ delugepia:
name: config
subPath: deluge
- mountPath: /shared
- name: shared
+ name: shared
env: *bootstrap_env
- securityContext: *default_securitycontext
+ securityContext: *default_securitycontext
additionalContainers:
deluge-web:
image: ghcr.io/geek-cookbook/deluge:2.1.1@sha256:448324e342c47020e4e9fbc236282ceb80ebebd7934a486a6f1e487a7e4034bf
@@ -1925,16 +1846,16 @@ delugepia:
dante:
image: ghcr.io/elfhosted/dante:v1.4.3
env: *bootstrap_env
- securityContext: *default_securitycontext
+ securityContext: *default_securitycontext
volumeMounts:
- mountPath: /tmp
- name: tmp
+ name: tmp
qbittorrentgluetun: &qbittorrent
podLabels:
app.elfhosted.com/name: qbittorrent
podAnnotations:
- kubernetes.io/egress-bandwidth: "150M"
+ kubernetes.io/egress-bandwidth: "150M"
enabled: false
sso:
enabled: true
@@ -1960,15 +1881,15 @@ qbittorrentgluetun: &qbittorrent
controller:
annotations:
configmap.reloader.stakater.com/reload: "elfbot-all,storage-changed,elfbot-qbittorrent,qbittorrent-gluetun-config"
- affinity: *standard_affinity
- tolerations: *standard_tolerations
+ affinity: *standard_affinity
+ tolerations: *standard_tolerations
persistence:
<<: *storagemounts
tmp:
enabled: true
type: custom
mountPath: /tmp
- volumeSpec: *volumespec_ephemeral_volume_100g
+ volumeSpec: *volumespec_ephemeral_volume_100g
config:
enabled: true
type: custom
@@ -1982,7 +1903,7 @@ qbittorrentgluetun: &qbittorrent
type: custom
volumeSpec:
persistentVolumeClaim:
- claimName: config-qbittorrent-ssd
+ claimName: config-qbittorrent-ssd
shared:
enabled: true
mountPath: /shared
@@ -2009,7 +1930,7 @@ qbittorrentgluetun: &qbittorrent
type: "custom"
volumeSpec:
configMap:
- name: dante-config
+ name: dante-config
ingress:
main:
enabled: false
@@ -2018,7 +1939,7 @@ qbittorrentgluetun: &qbittorrent
enabled: true # necessary for probes
ports:
http:
- port: 8080
+ port: 8080
nameOverride: spanky
env:
# -- Set the container timezone
@@ -2030,7 +1951,7 @@ qbittorrentgluetun: &qbittorrent
envFrom:
- configMapRef:
name: elfbot-qbittorrent
- optional: true
+ optional: true
extraEnvVars:
- name: PORT_FILE
valueFrom:
@@ -2039,14 +1960,14 @@ qbittorrentgluetun: &qbittorrent
key: PORT_FILE
optional: true
initContainers:
- a-migration:
+ a-migration:
<<: *migrate_data
volumeMounts:
- mountPath: /migration
name: migration
- mountPath: /config
name: config
- subPath: qbittorrent
+ subPath: qbittorrent
bootstrap:
<<: *bootstrap
volumeMounts:
@@ -2106,18 +2027,18 @@ qbittorrentgluetun: &qbittorrent
name: config
subPath: qbittorrent
- mountPath: /shared
- name: shared
+ name: shared
securityContext: *default_securitycontext
resources: *default_resources
envFrom:
- configMapRef:
- name: qbittorrent-gluetun-config
+ name: qbittorrent-gluetun-config
additionalContainers:
# Use this to provied proxied access to arrs
dante:
image: ghcr.io/elfhosted/dante:v1.4.3
env: *bootstrap_env
- securityContext: *default_securitycontext
+ securityContext: *default_securitycontext
volumeMounts:
- mountPath: /tmp
name: tmp
@@ -2145,7 +2066,7 @@ qbittorrentgluetun: &qbittorrent
sleep 2
done
echo "VPN Connected, processing cookies..."
-
+
# If we have a cookie already, try to use it
if [[ -f /config/mam/saved.cookies ]]; then
curl -c /config/mam/saved.cookies -b /config/mam/saved.cookies https://t.myanonamouse.net/json/dynamicSeedbox.php -o /config/mam/mam_id-curl-output.log
@@ -2167,7 +2088,7 @@ qbittorrentgluetun: &qbittorrent
- mountPath: /shared
name: shared
resources: *default_resources
- securityContext: *default_securitycontext
+ securityContext: *default_securitycontext
resources:
requests:
cpu: 100m
@@ -2184,7 +2105,7 @@ qbittorrentgluetun: &qbittorrent
gluetun:
image:
repository: ghcr.io/elfhosted/gluetun
- tag: 3.39.1@sha256:47688e70bd1519bcedaf48270328d85a5405496330787e53371d23fa590af4d3
+ tag: 3.39.1@sha256:47688e70bd1519bcedaf48270328d85a5405496330787e53371d23fa590af4d3
securityContext:
runAsUser: 0
capabilities:
@@ -2237,20 +2158,20 @@ qbittorrentpia:
dante:
image: ghcr.io/elfhosted/dante:v1.4.3
env: *bootstrap_env
- securityContext: *default_securitycontext
+ securityContext: *default_securitycontext
volumeMounts:
- mountPath: /tmp
name: tmp
initContainers:
- a-migration:
+ a-migration:
<<: *migrate_data
volumeMounts:
- mountPath: /migration
name: migration
- mountPath: /config
name: config
- subPath: qbittorrent
- bootstrap:
+ subPath: qbittorrent
+ bootstrap:
<<: *bootstrap
volumeMounts:
- mountPath: /etc/elfbot
@@ -2261,7 +2182,7 @@ qbittorrentpia:
name: config
subPath: qbittorrent
- mountPath: /tmp
- name: tmp
+ name: tmp
setup:
image: *tooling_image
command:
@@ -2309,7 +2230,7 @@ qbittorrentpia:
name: config
subPath: qbittorrent
- mountPath: /shared
- name: shared
+ name: shared
securityContext: *default_securitycontext
resources: *default_resources
@@ -2335,8 +2256,8 @@ nzbget:
controller:
annotations:
configmap.reloader.stakater.com/reload: "elfbot-all,storage-changed,elfbot-nzbget"
- affinity: *standard_affinity
- tolerations: *standard_tolerations
+ affinity: *standard_affinity
+ tolerations: *standard_tolerations
automountServiceAccountToken: false
persistence:
<<: *storagemounts
@@ -2347,7 +2268,7 @@ nzbget:
subPath: nzbget
volumeSpec:
persistentVolumeClaim:
- claimName: config
+ claimName: config
tmp:
enabled: true
type: custom
@@ -2371,7 +2292,7 @@ nzbget:
http:
port: 6789
initContainers:
- bootstrap:
+ bootstrap:
<<: *bootstrap
volumeMounts:
- mountPath: /etc/elfbot
@@ -2382,13 +2303,13 @@ nzbget:
name: config
subPath: nzbget
- mountPath: /tmp
- name: tmp
+ name: tmp
sabnzbd:
enabled: false
hostname: sabnzbd # required to prevent whitelisting requirement per https://sabnzbd.org/wiki/extra/hostname-check.html
podLabels:
- app.elfhosted.com/class: nzb
+ app.elfhosted.com/class: nzb
sso:
enabled: true
image:
@@ -2399,8 +2320,8 @@ sabnzbd:
controller:
annotations:
configmap.reloader.stakater.com/reload: "elfbot-all,storage-changed,elfbot-sabnzbd"
- affinity: *standard_affinity
- tolerations: *standard_tolerations
+ affinity: *standard_affinity
+ tolerations: *standard_tolerations
securityContext:
seccompProfile:
type: RuntimeDefault
@@ -2432,19 +2353,19 @@ sabnzbd:
subPath: sabnzbd
volumeSpec:
persistentVolumeClaim:
- claimName: config
+ claimName: config
tmp:
enabled: true
type: custom
mountPath: /tmp
- volumeSpec: *volumespec_ephemeral_volume_500g
+ volumeSpec: *volumespec_ephemeral_volume_500g
elfbot:
enabled: "true"
type: "custom"
volumeSpec:
configMap:
name: elfbot-sabnzbd
- optional: true
+ optional: true
ingress:
main:
enabled: false
@@ -2455,15 +2376,15 @@ sabnzbd:
http:
port: 8080
initContainers:
- a-migration:
+ a-migration:
<<: *migrate_data
volumeMounts:
- mountPath: /migration
name: migration
- mountPath: /config
name: config
- subPath: sabnzbd
- bootstrap:
+ subPath: sabnzbd
+ bootstrap:
<<: *bootstrap
volumeMounts:
- mountPath: /etc/elfbot
@@ -2501,7 +2422,7 @@ sabnzbd:
name: config
subPath: sabnzbd
- mountPath: /tmp
- name: tmp
+ name: tmp
env: *bootstrap_env
securityContext: *default_securitycontext
resources: *default_resources
@@ -2542,7 +2463,7 @@ sabnzbd:
do
echo "Waiting for SIGTERM to backup queue from /tmp"
sleep infinity
- done
+ done
volumeMounts:
- mountPath: /config
name: config
@@ -2550,7 +2471,7 @@ sabnzbd:
name: tmp
env: *bootstrap_env
securityContext: *default_securitycontext
- resources: *default_resources
+ resources: *default_resources
env:
HOST_WHITELIST_ENTRIES: "{{ .Release.Name }}.sabnzbd.elfhosted.com"
@@ -2570,8 +2491,8 @@ tautulli:
annotations:
configmap.reloader.stakater.com/reload: "elfbot-all,storage-changed,elfbot-tautulli"
# Always prefer to cohabit with zurg
- affinity: *standard_affinity
- tolerations: *standard_tolerations
+ affinity: *standard_affinity
+ tolerations: *standard_tolerations
securityContext:
seccompProfile:
type: RuntimeDefault
@@ -2625,15 +2546,15 @@ tautulli:
cpu: 1
memory: 1Gi
initContainers:
- a-migration:
+ a-migration:
<<: *migrate_data
volumeMounts:
- mountPath: /migration
name: migration
- mountPath: /config
name: config
- subPath: tautulli
- bootstrap:
+ subPath: tautulli
+ bootstrap:
<<: *bootstrap
volumeMounts:
- mountPath: /etc/elfbot
@@ -2644,7 +2565,7 @@ tautulli:
name: config
subPath: tautulli
- mountPath: /tmp
- name: tmp
+ name: tmp
setup:
image: *tooling_image
imagePullPolicy: IfNotPresent
@@ -2678,8 +2599,8 @@ radarr: &app_radarr
enabled: false
podLabels:
app.elfhosted.com/name: radarr
- app.elfhosted.com/class: debrid
- priorityClassName: tenant-normal
+ app.elfhosted.com/class: debrid
+ priorityClassName: tenant-normal
image:
registry: ghcr.io
repository: elfhosted/radarr
@@ -2698,14 +2619,14 @@ radarr: &app_radarr
controller:
annotations:
configmap.reloader.stakater.com/reload: "elfbot-all,storage-changed,elfbot-radarr" # Reload the deployment every time the rclones change
- affinity: *standard_affinity
- tolerations: *standard_tolerations
+ affinity: *standard_affinity
+ tolerations: *standard_tolerations
automountServiceAccountToken: false
envFrom:
- configMapRef:
name: radarr-env
- secretRef:
- name: radarr-env
+ name: radarr-env
persistence:
<<: *storagemounts
config:
@@ -2721,7 +2642,7 @@ radarr: &app_radarr
type: custom
volumeSpec:
persistentVolumeClaim:
- claimName: config-radarr-ssd
+ claimName: config-radarr-ssd
logs:
enabled: true
type: custom
@@ -2729,7 +2650,7 @@ radarr: &app_radarr
subPath: radarr
volumeSpec:
persistentVolumeClaim:
- claimName: logs
+ claimName: logs
backup:
enabled: true
type: custom
@@ -2737,7 +2658,7 @@ radarr: &app_radarr
subPath: radarr
volumeSpec:
persistentVolumeClaim:
- claimName: backup
+ claimName: backup
elfbot:
enabled: "true"
type: "custom"
@@ -2755,21 +2676,21 @@ radarr: &app_radarr
http:
port: 7878
initContainers:
- a-migration:
+ a-migration:
<<: *migrate_data
volumeMounts:
- mountPath: /migration
name: migration
- mountPath: /config
name: config
- subPath: radarr
+ subPath: radarr
- mountPath: /config/logs
name: logs
- subPath: radarr
+ subPath: radarr
- mountPath: /config/Backups
name: backup
- subPath: radarr
- bootstrap:
+ subPath: radarr
+ bootstrap:
<<: *bootstrap
volumeMounts:
- mountPath: /etc/elfbot
@@ -2780,7 +2701,7 @@ radarr: &app_radarr
name: config
subPath: radarr
- mountPath: /tmp
- name: tmp
+ name: tmp
setup:
image: *tooling_image
imagePullPolicy: IfNotPresent
@@ -2793,13 +2714,13 @@ radarr: &app_radarr
# for symlink downloads and imports
mkdir -p /storage/symlinks/downloads/radarr
- mkdir -p /storage/symlinks/movies
+ mkdir -p /storage/symlinks/movies
volumeMounts:
- mountPath: /storage/symlinks
name: symlinks
resources: *default_resources
- securityContext: *default_securitycontext
+ securityContext: *default_securitycontext
resources:
requests:
cpu: 1m
@@ -2832,8 +2753,8 @@ radarr4k: &app_radarr4k
controller:
annotations:
configmap.reloader.stakater.com/reload: "elfbot-all,storage-changed,elfbot-radarr4k" # Reload the deployment every time the rclones change
- affinity: *standard_affinity
- tolerations: *standard_tolerations
+ affinity: *standard_affinity
+ tolerations: *standard_tolerations
automountServiceAccountToken: false
envFrom:
- configMapRef:
@@ -2845,7 +2766,7 @@ radarr4k: &app_radarr4k
type: custom
volumeSpec:
persistentVolumeClaim:
- claimName: config-radarr4k-ssd
+ claimName: config-radarr4k-ssd
config:
enabled: true
type: custom
@@ -2853,7 +2774,7 @@ radarr4k: &app_radarr4k
subPath: radarr4k
volumeSpec:
persistentVolumeClaim:
- claimName: config
+ claimName: config
elfbot:
enabled: "true"
type: "custom"
@@ -2868,7 +2789,7 @@ radarr4k: &app_radarr4k
subPath: radarr4k
volumeSpec:
persistentVolumeClaim:
- claimName: logs
+ claimName: logs
backup:
enabled: true
type: custom
@@ -2876,7 +2797,7 @@ radarr4k: &app_radarr4k
subPath: radarr4k
volumeSpec:
persistentVolumeClaim:
- claimName: backup
+ claimName: backup
ingress:
main:
enabled: false
@@ -2887,7 +2808,7 @@ radarr4k: &app_radarr4k
http:
port: 7878
initContainers:
- a-migration:
+ a-migration:
<<: *migrate_data
volumeMounts:
- mountPath: /migration
@@ -2897,11 +2818,11 @@ radarr4k: &app_radarr4k
subPath: radarr4k
- mountPath: /config/logs
name: logs
- subPath: radarr4k
+ subPath: radarr4k
- mountPath: /config/Backups
name: backup
- subPath: radarr4k
- bootstrap:
+ subPath: radarr4k
+ bootstrap:
<<: *bootstrap
volumeMounts:
- mountPath: /etc/elfbot
@@ -2912,7 +2833,7 @@ radarr4k: &app_radarr4k
name: config
subPath: radarr4k
- mountPath: /tmp
- name: tmp
+ name: tmp
setup:
image: *tooling_image
imagePullPolicy: IfNotPresent
@@ -2925,13 +2846,13 @@ radarr4k: &app_radarr4k
# for symlink downloads and imports
mkdir -p /storage/symlinks/downloads/radarr4k
- mkdir -p /storage/symlinks/movies-4k
+ mkdir -p /storage/symlinks/movies-4k
volumeMounts:
- mountPath: /storage/symlinks
name: symlinks
resources: *default_resources
- securityContext: *default_securitycontext
+ securityContext: *default_securitycontext
resources:
requests:
cpu: 1m
@@ -2950,8 +2871,8 @@ ombi:
controller:
annotations:
configmap.reloader.stakater.com/reload: "elfbot-all,storage-changed,elfbot-ombi"
- affinity: *standard_affinity
- tolerations: *standard_tolerations
+ affinity: *standard_affinity
+ tolerations: *standard_tolerations
securityContext:
seccompProfile:
type: RuntimeDefault
@@ -3000,15 +2921,15 @@ ombi:
http:
port: 5000
initContainers:
- a-migration:
+ a-migration:
<<: *migrate_data
volumeMounts:
- mountPath: /migration
name: migration
- mountPath: /config
name: config
- subPath: ombi
- bootstrap:
+ subPath: ombi
+ bootstrap:
<<: *bootstrap
volumeMounts:
- mountPath: /etc/elfbot
@@ -3019,7 +2940,7 @@ ombi:
name: config
subPath: ombi
- mountPath: /tmp
- name: tmp
+ name: tmp
resources:
requests:
cpu: 3m
@@ -3036,7 +2957,7 @@ scannarr: &app_scannarr
image:
registry: ghcr.io
repository: elfhosted/scannarr
- tag: rolling@sha256:c9cbc74b5dff7a25b5c32ecc0e081498d77c85af4725dcd9e5c9a74df35d865d
+ tag: rolling@sha256:c9cbc74b5dff7a25b5c32ecc0e081498d77c85af4725dcd9e5c9a74df35d865d
securityContext:
seccompProfile:
type: RuntimeDefault
@@ -3051,11 +2972,11 @@ scannarr: &app_scannarr
controller:
annotations:
configmap.reloader.stakater.com/reload: "elfbot-all,elfbot-scannarr"
- affinity: *standard_affinity
- tolerations: *standard_tolerations
+ affinity: *standard_affinity
+ tolerations: *standard_tolerations
automountServiceAccountToken: false
persistence:
- tmp: *tmp
+ tmp: *tmp
sonarr-settings:
enabled: "true"
mountPath: "/app/settings_sonarr.json"
@@ -3063,7 +2984,7 @@ scannarr: &app_scannarr
type: "custom"
volumeSpec:
configMap:
- name: scannarr-config
+ name: scannarr-config
radarr-settings:
enabled: "true"
mountPath: "/app/settings_radarr.json"
@@ -3071,7 +2992,7 @@ scannarr: &app_scannarr
type: "custom"
volumeSpec:
configMap:
- name: scannarr-config
+ name: scannarr-config
ingress:
main:
enabled: false
@@ -3080,22 +3001,22 @@ scannarr: &app_scannarr
enabled: true
ports:
http:
- port: 9898 # doesn't matter this doesn,t actually use ports
+ port: 9898 # doesn't matter this doesn,t actually use ports
additionalContainers:
podinfo:
image: stefanprodan/podinfo # used to run probes from gatus
resources: *default_resources
-
-scannarr4k:
+
+scannarr4k:
<<: *app_scannarr
enabled: false
controller:
annotations:
- configmap.reloader.stakater.com/reload: "elfbot-all,elfbot-scannarr4k"
- affinity: *standard_affinity
- tolerations: *standard_tolerations
+ configmap.reloader.stakater.com/reload: "elfbot-all,elfbot-scannarr4k"
+ affinity: *standard_affinity
+ tolerations: *standard_tolerations
persistence:
- tmp: *tmp
+ tmp: *tmp
sonarr-settings:
enabled: "true"
mountPath: "/app/settings_sonarr.json"
@@ -3103,7 +3024,7 @@ scannarr4k:
type: "custom"
volumeSpec:
configMap:
- name: scannarr4k-config
+ name: scannarr4k-config
radarr-settings:
enabled: "true"
mountPath: "/app/settings_radarr.json"
@@ -3111,7 +3032,7 @@ scannarr4k:
type: "custom"
volumeSpec:
configMap:
- name: scannarr4k-config
+ name: scannarr4k-config
bazarr:
enabled: false
@@ -3132,8 +3053,8 @@ bazarr:
allowPrivilegeEscalation: false
runAsUser: 568
runAsGroup: 568
- affinity: *standard_affinity
- tolerations: *standard_tolerations
+ affinity: *standard_affinity
+ tolerations: *standard_tolerations
podSecurityContext:
seccompProfile:
type: RuntimeDefault
@@ -3142,7 +3063,7 @@ bazarr:
automountServiceAccountToken: false
envFrom:
- configMapRef:
- name: bazarr-config
+ name: bazarr-config
persistence:
<<: *storagemounts
migration:
@@ -3165,7 +3086,7 @@ bazarr:
volumeSpec:
configMap:
name: elfbot-bazarr
- optional: true
+ optional: true
ingress:
main:
enabled: false
@@ -3176,15 +3097,15 @@ bazarr:
http:
port: 6767
initContainers:
- a-migration:
+ a-migration:
<<: *migrate_data
volumeMounts:
- mountPath: /migration
name: migration
- mountPath: /config
name: config
- subPath: bazarr
- bootstrap:
+ subPath: bazarr
+ bootstrap:
<<: *bootstrap
volumeMounts:
- mountPath: /etc/elfbot
@@ -3195,7 +3116,7 @@ bazarr:
name: config
subPath: bazarr
- mountPath: /tmp
- name: tmp
+ name: tmp
resources:
requests:
cpu: 1m
@@ -3223,8 +3144,8 @@ bazarr4k:
allowPrivilegeEscalation: false
runAsUser: 568
runAsGroup: 568
- affinity: *standard_affinity
- tolerations: *standard_tolerations
+ affinity: *standard_affinity
+ tolerations: *standard_tolerations
podSecurityContext:
seccompProfile:
type: RuntimeDefault
@@ -3233,7 +3154,7 @@ bazarr4k:
automountServiceAccountToken: false
envFrom:
- configMapRef:
- name: bazarr4k-config
+ name: bazarr4k-config
persistence:
<<: *storagemounts
migration:
@@ -3256,7 +3177,7 @@ bazarr4k:
volumeSpec:
configMap:
name: elfbot-bazarr4k
- optional: true
+ optional: true
ingress:
main:
enabled: false
@@ -3267,15 +3188,15 @@ bazarr4k:
http:
port: 6767
initContainers:
- a-migration:
+ a-migration:
<<: *migrate_data
volumeMounts:
- mountPath: /migration
name: migration
- mountPath: /config
name: config
- subPath: bazarr4k
- bootstrap:
+ subPath: bazarr4k
+ bootstrap:
<<: *bootstrap
volumeMounts:
- mountPath: /etc/elfbot
@@ -3286,7 +3207,7 @@ bazarr4k:
name: config
subPath: bazarr4k
- mountPath: /tmp
- name: tmp
+ name: tmp
resources:
requests:
cpu: 1m
@@ -3307,10 +3228,10 @@ filebrowser:
kubernetes.io/egress-bandwidth: "5M" # filebrowser is not for streaming
# Always prefer to cohabit with zurg
affinity: *standard_affinity
- tolerations: *standard_tolerations
+ tolerations: *standard_tolerations
envFrom:
- configMapRef:
- name: filebrowser-env
+ name: filebrowser-env
priorityClassName: tenant-normal
securityContext:
seccompProfile:
@@ -3331,7 +3252,7 @@ filebrowser:
replicas: 1 # not sure we need 2 replicas anymore
strategy: Recreate
# rollingUpdate:
- # unavailable: 1
+ # unavailable: 1
annotations:
configmap.reloader.stakater.com/reload: "elfbot-all,storage-changed,filebrowser-elfbot-script,elfbot-filebrowser" # Reload the deployment every time the rclones change
# We will use this to alter configmaps to trigger pod restarts
@@ -3341,30 +3262,35 @@ filebrowser:
automountServiceAccountToken: true
persistence:
<<: *storagemounts
- <<: *storageconfigmounts
+ config:
+ enabled: true
+ type: custom
+ mountPath: /storage/config
+ volumeSpec:
+ persistentVolumeClaim:
+ claimName: config
logs:
enabled: true
type: custom
mountPath: /storage/logs
volumeSpec:
persistentVolumeClaim:
- claimName: logs
- elfterm:
+ claimName: logs
+ elfterm-migration:
enabled: true
type: custom
- mountPath: /home/elfie/.config/
volumeSpec:
persistentVolumeClaim:
- claimName: config-elfterm-ssd
+ claimName: config-elfterm-ssd
elfterm-state: # so auto-provisioning doesn't break
enabled: true
type: emptyDir
sizeLimit: 1Gi
- mouthPath: /home/elfie/.local/state
+ mouthPath: /home/elfie/.local/state
dummy-storage: # so auto-provisioning doesn't break
enabled: true
type: emptyDir
- sizeLimit: 1Gi
+ sizeLimit: 1Gi
elfbot:
enabled: true
type: emptyDir
@@ -3378,7 +3304,7 @@ filebrowser:
volumeSpec:
configMap:
name: filebrowser-elfbot-script
- defaultMode: 0755
+ defaultMode: 0755
elfbot-script-ucfirst:
enabled: "true"
mountPath: "/usr/local/bin/Elfbot" # make it easier for mobile users
@@ -3394,7 +3320,13 @@ filebrowser:
volumeSpec:
configMap:
name: recyclarr-config
- symlinks: *symlinks
+ symlinks: *symlinks
+ migration:
+ enabled: true
+ type: custom
+ volumeSpec:
+ persistentVolumeClaim:
+ claimName: symlinks-ssd
tmp: *tmp
service:
main:
@@ -3423,7 +3355,7 @@ filebrowser:
then
rm /tmp/filebrowser.db
fi
-
+
/filebrowser config init \
--disable-preview-resize \
@@ -3468,39 +3400,64 @@ filebrowser:
fi
volumeMounts:
- mountPath: /config/
- name: homer
+ name: config
subPath: recyclarr
- name: recyclarr-config
mountPath: "/bootstrap/"
- securityContext: *default_securitycontext
- # migrate-symlinks:
- # image: *tooling_image
- # imagePullPolicy: IfNotPresent
- # command:
- # - /bin/bash
- # - -c
- # - |
- # set -x
- # if [[ ! -f /storage/symlinks/.i-was-migrated-to-cephfs-b ]]
- # then
- # if [[ ! -z "$(ls -A /migration)" ]]
- # then
- # echo "Tar-migrating from /migration/..."
- # tar --exclude='.symlink_cache' --exclude='real-debrid-blackhole' --exclude='import' -cf - -C /migration/ . | tar xvmf - -C /storage/symlinks/
- # touch /storage/symlinks/.i-was-migrated-to-cephfs-b
- # fi
- # fi
- # volumeMounts:
- # - mountPath: /migration
- # name: migration
- # subPath: symlinks
- # - name: symlinks
- # mountPath: /storage/symlinks
- # securityContext: *default_securitycontext
+ securityContext: *default_securitycontext
+ migrate-symlinks:
+ image: *tooling_image
+ imagePullPolicy: IfNotPresent
+ command:
+ - /bin/bash
+ - -c
+ - |
+ set -x
+ if [[ ! -f /storage/symlinks/.migrated-to-topolvm ]]
+ then
+ if [[ ! -z "$(ls -A /migration)" ]]
+ then
+ echo "Tar-migrating from /migration/..."
+ tar --exclude='.symlink_cache' --exclude='real-debrid-blackhole' --exclude='import' -cf - -C /migration/ . | tar xvmf - -C /storage/symlinks/
+ touch /storage/symlinks/.migrated-to-topolvm
+ fi
+ fi
+ volumeMounts:
+ - mountPath: /migration
+ name: migration
+ - name: symlinks
+ mountPath: /storage/symlinks
+ securityContext: *default_securitycontext
+ migrate-elfterm:
+ image: *tooling_image
+ imagePullPolicy: IfNotPresent
+ command:
+ - /bin/bash
+ - -c
+ - |
+
+ if [[ ! -f /config/.migrated-20241007 ]]
+ then
+ if [[ ! -z "$(ls -A /migration)" ]]
+ then
+ echo "Migrating from /migration/..."
+ cp -rfpv /migration/* /config/
+ touch /config/.migrated-20241007
+ fi
+ else
+ echo "No migration necessary"
+ fi
+ volumeMounts:
+ - mountPath: /migration
+ name: elfterm-migration
+ - mountPath: /config
+ name: config
+ subPath: elfterm
+ securityContext: *default_securitycontext
additionalContainers:
# this container exists to watch for restarts requested by elfbot, and to use create configmaps to trigger restarts using reloader
elfbot:
- image: *tooling_image
+ image: *tooling_image
imagePullPolicy: IfNotPresent
command:
- /usr/bin/dumb-init
@@ -3516,7 +3473,7 @@ filebrowser:
echo "forcerestart requested, deleting $APP pod with --force.."
kubectl delete pod -l app.kubernetes.io/name=$APP --force
kubectl delete pod -l app.elfhosted.com/name=$APP --force
- else
+ else
# put the contents of the file into the configmap which will trigger the restart
echo command received for ${APP} : [$(cat /elfbot/$APP)]
@@ -3530,7 +3487,7 @@ filebrowser:
COMMAND=$(cat /elfbot/$APP)
# We separate key and value with an '=', but sometimes the value may contain __another__ '=' (like Plex preferences)
- sep='='
+ sep='='
case $COMMAND in
# If we are separated by an =
(*"$sep"*)
@@ -3583,7 +3540,7 @@ uptimekuma:
type: RuntimeDefault
fsGroup: 568
fsGroupChangePolicy: "OnRootMismatch"
- affinity: *standard_affinity
+ affinity: *standard_affinity
tolerations: *standard_tolerations
automountServiceAccountToken: false
persistence:
@@ -3613,7 +3570,7 @@ uptimekuma:
http:
port: 3001
initContainers:
- bootstrap:
+ bootstrap:
<<: *bootstrap
volumeMounts:
- mountPath: /etc/elfbot
@@ -3624,7 +3581,7 @@ uptimekuma:
name: config
subPath: uptimekuma
- mountPath: /tmp
- name: tmp
+ name: tmp
resources:
requests:
cpu: 1m
@@ -3640,12 +3597,12 @@ privatebin:
image:
repository: privatebin/fs
tag: 1.7.4
- priorityClassName:
+ priorityClassName:
controller:
annotations:
configmap.reloader.stakater.com/reload: "elfbot-all,storage-changed,elfbot-privatebin"
- affinity: *standard_affinity
- tolerations: *standard_tolerations
+ affinity: *standard_affinity
+ tolerations: *standard_tolerations
securityContext:
seccompProfile:
type: RuntimeDefault
@@ -3691,7 +3648,7 @@ privatebin:
limits:
cpu: 100m
memory: 128Mi
- ephemeral-storage: 100Mi # a safety net against node ephemeral space exhaustion
+ ephemeral-storage: 100Mi # a safety net against node ephemeral space exhaustion
config:
main:
discussion: false
@@ -3728,75 +3685,7 @@ privatebin:
limit: 10
# exemptedIp: "1.2.3.4,10.10.10/24"
-nzbhydra:
- enabled: false
- sso:
- enabled: true
- image:
- registry: ghcr.io
- repository: elfhosted/nzbhydra2
- tag: 7.2.3@sha256:1d5c9bd87f5a702509e0288a87d37accd1c4f0823c1a1d7e54e83a0cf0a4a01f
- priorityClassName:
- controller:
- annotations:
- configmap.reloader.stakater.com/reload: "elfbot-all,storage-changed,elfbot-nzbhydra"
- securityContext:
- seccompProfile:
- type: RuntimeDefault
- readOnlyRootFilesystem: true
- allowPrivilegeEscalation: false
- runAsUser: 568
- runAsGroup: 568
- podSecurityContext:
- seccompProfile:
- type: RuntimeDefault
- fsGroup: 568
- fsGroupChangePolicy: "OnRootMismatch"
- probes:
- liveness:
- enabled: false
- startup:
- enabled: false
- readiness:
- enabled: false
- automountServiceAccountToken: false
- persistence:
- <<: *storagemounts
- config:
- enabled: true
- type: custom
- mountPath: /config/
- volumeSpec:
- persistentVolumeClaim:
- claimName: config-nzbhydra-ssd
- elfbot:
- enabled: "true"
- type: "custom"
- volumeSpec:
- configMap:
- name: elfbot-nzbhydra
- optional: true
- tmp: *tmp
- ingress:
- main:
- enabled: false
- service:
- main:
- enabled: true # necessary for probes
- ports:
- http:
- port: 5076
- resources:
- requests:
- cpu: 1m
- memory: 128Mi
- limits:
- cpu: 1
- memory: 1Gi
- initContainers:
- bootstrap: *bootstrap
-
-kapowarr:
+kapowarr:
enabled: false
sso:
enabled: true
@@ -3804,12 +3693,12 @@ kapowarr:
registry: ghcr.io
repository: elfhosted/kapowarr
tag: V1.0.0@sha256:09443693816a5152c0e6beeb0afe810e8bdf048c8a641009b2d2dea50140ce1e
- priorityClassName:
+ priorityClassName:
controller:
annotations:
configmap.reloader.stakater.com/reload: "elfbot-all,storage-changed,elfbot-kapowarr"
- affinity: *standard_affinity
- tolerations: *standard_tolerations
+ affinity: *standard_affinity
+ tolerations: *standard_tolerations
securityContext:
seccompProfile:
type: RuntimeDefault
@@ -3837,7 +3726,7 @@ kapowarr:
type: custom
volumeSpec:
persistentVolumeClaim:
- claimName: config-kapowarr-ssd
+ claimName: config-kapowarr-ssd
config:
enabled: true
type: custom
@@ -3845,12 +3734,12 @@ kapowarr:
subPath: kapowarr
volumeSpec:
persistentVolumeClaim:
- claimName: config
+ claimName: config
temp-downloads:
enabled: true
type: emptyDir
mountPath: /app/temp_downloads
- sizeLimit: 10Gi
+ sizeLimit: 10Gi
elfbot:
enabled: "true"
type: "custom"
@@ -3876,15 +3765,15 @@ kapowarr:
cpu: 1
memory: 1Gi
initContainers:
- a-migration:
+ a-migration:
<<: *migrate_data
volumeMounts:
- mountPath: /migration
name: migration
- mountPath: /config
name: config
- subPath: kapowarr
- bootstrap:
+ subPath: kapowarr
+ bootstrap:
<<: *bootstrap
volumeMounts:
- mountPath: /etc/elfbot
@@ -3895,7 +3784,7 @@ kapowarr:
name: config
subPath: kapowarr
- mountPath: /tmp
- name: tmp
+ name: tmp
setup:
image: *tooling_image
imagePullPolicy: IfNotPresent
@@ -3912,22 +3801,22 @@ kapowarr:
volumeMounts:
- mountPath: /storage/symlinks
- name: symlinks
+ name: symlinks
resources: *default_resources
- securityContext: *default_securitycontext
+ securityContext: *default_securitycontext
calibreweb:
enabled: false
- priorityClassName:
+ priorityClassName:
image:
repository: linuxserver/calibre-web
tag: nightly-version-0ff3f0f4
controller:
annotations:
configmap.reloader.stakater.com/reload: "elfbot-all,storage-changed,elfbot-calibreweb"
- affinity: *standard_affinity
- tolerations: *standard_tolerations
+ affinity: *standard_affinity
+ tolerations: *standard_tolerations
securityContext:
seccompProfile:
type: RuntimeDefault
@@ -3963,13 +3852,14 @@ calibreweb:
volumeSpec:
persistentVolumeClaim:
claimName: config
- config-calibre:
- enabled: true
- type: custom
- mountPath: /calibre/
- volumeSpec:
- persistentVolumeClaim:
- claimName: config-calibre-ssd
+ # config-calibre:
+ # enabled: true
+ # type: custom
+ # mountPath: /calibre/
+ # subPath: calibre
+ # volumeSpec:
+ # persistentVolumeClaim:
+ # claimName: config
elfbot:
enabled: "true"
type: "custom"
@@ -3986,15 +3876,15 @@ calibreweb:
http:
port: 8083
initContainers:
- a-migration:
+ a-migration:
<<: *migrate_data
volumeMounts:
- mountPath: /migration
name: migration
- mountPath: /config
name: config
- subPath: calibreweb
- bootstrap:
+ subPath: calibreweb
+ bootstrap:
<<: *bootstrap
volumeMounts:
- mountPath: /etc/elfbot
@@ -4005,7 +3895,7 @@ calibreweb:
name: config
subPath: calibreweb
- mountPath: /tmp
- name: tmp
+ name: tmp
resources:
requests:
cpu: 1m
@@ -4018,7 +3908,7 @@ pyload:
enabled: false
sso:
enabled: true
- priorityClassName:
+ priorityClassName:
image:
repository: ghcr.io/geek-cookbook/pyload-ng
tag: 0.5.0b3.dev71@sha256:17b0414059c2aad0ae0318244a4f024f3e54851430ad6d44bedba260466c78d2
@@ -4029,8 +3919,8 @@ pyload:
controller:
annotations:
configmap.reloader.stakater.com/reload: "elfbot-all,storage-changed,elfbot-pyload"
- affinity: *standard_affinity
- tolerations: *standard_tolerations
+ affinity: *standard_affinity
+ tolerations: *standard_tolerations
securityContext:
seccompProfile:
type: RuntimeDefault
@@ -4071,15 +3961,15 @@ pyload:
persistentVolumeClaim:
claimName: config
initContainers:
- a-migration:
+ a-migration:
<<: *migrate_data
volumeMounts:
- mountPath: /migration
name: migration
- mountPath: /config
name: config
- subPath: pyload
- bootstrap:
+ subPath: pyload
+ bootstrap:
<<: *bootstrap
volumeMounts:
- mountPath: /etc/elfbot
@@ -4090,7 +3980,7 @@ pyload:
name: config
subPath: pyload
- mountPath: /tmp
- name: tmp
+ name: tmp
ingress:
main:
enabled: false
@@ -4119,8 +4009,8 @@ lazylibrarian:
controller:
annotations:
configmap.reloader.stakater.com/reload: "elfbot-all,storage-changed,elfbot-lazylibrarian"
- affinity: *standard_affinity
- tolerations: *standard_tolerations
+ affinity: *standard_affinity
+ tolerations: *standard_tolerations
securityContext:
seccompProfile:
type: RuntimeDefault
@@ -4158,8 +4048,8 @@ lazylibrarian:
ports:
http:
port: 5299
- initContainers:
- bootstrap:
+ initContainers:
+ bootstrap:
<<: *bootstrap
volumeMounts:
- mountPath: /etc/elfbot
@@ -4170,7 +4060,7 @@ lazylibrarian:
name: config
subPath: lazylibrarian
- mountPath: /tmp
- name: tmp
+ name: tmp
resources:
requests:
cpu: 1m
@@ -4203,8 +4093,8 @@ mylar:
controller:
annotations:
configmap.reloader.stakater.com/reload: "elfbot-all,storage-changed,elfbot-mylar" # Reload the deployment every time the rclones change
- affinity: *standard_affinity
- tolerations: *standard_tolerations
+ affinity: *standard_affinity
+ tolerations: *standard_tolerations
automountServiceAccountToken: false
persistence:
<<: *storagemounts
@@ -4234,7 +4124,7 @@ mylar:
http:
port: 8090
initContainers:
- bootstrap:
+ bootstrap:
<<: *bootstrap
volumeMounts:
- mountPath: /etc/elfbot
@@ -4245,7 +4135,7 @@ mylar:
name: config
subPath: mylar
- mountPath: /tmp
- name: tmp
+ name: tmp
resources:
requests:
cpu: 1m
@@ -4279,8 +4169,8 @@ komga:
controller:
annotations:
configmap.reloader.stakater.com/reload: "elfbot-all,storage-changed,elfbot-komga" # Reload the deployment every time the rclones change
- affinity: *standard_affinity
- tolerations: *standard_tolerations
+ affinity: *standard_affinity
+ tolerations: *standard_tolerations
persistence:
<<: *storagemounts
migration:
@@ -4314,15 +4204,15 @@ komga:
http:
port: 25600
initContainers:
- a-migration:
+ a-migration:
<<: *migrate_data
volumeMounts:
- mountPath: /migration
name: migration
- mountPath: /config
name: config
- subPath: komga
- bootstrap:
+ subPath: komga
+ bootstrap:
<<: *bootstrap
volumeMounts:
- mountPath: /etc/elfbot
@@ -4333,7 +4223,7 @@ komga:
name: config
subPath: komga
- mountPath: /tmp
- name: tmp
+ name: tmp
resources:
requests:
cpu: 1m
@@ -4361,8 +4251,8 @@ kavita:
controller:
annotations:
configmap.reloader.stakater.com/reload: "elfbot-all,storage-changed,elfbot-kavita" # Reload the deployment every time the rclones change
- affinity: *standard_affinity
- tolerations: *standard_tolerations
+ affinity: *standard_affinity
+ tolerations: *standard_tolerations
persistence:
<<: *storagemounts
migration:
@@ -4378,7 +4268,7 @@ kavita:
subPath: kavita
volumeSpec:
persistentVolumeClaim:
- claimName: config
+ claimName: config
elfbot:
enabled: "true"
type: "custom"
@@ -4387,15 +4277,15 @@ kavita:
name: elfbot-kavita
optional: true
initContainers:
- a-migration:
+ a-migration:
<<: *migrate_data
volumeMounts:
- mountPath: /migration
name: migration
- mountPath: /config
name: config
- subPath: kavita
- bootstrap:
+ subPath: kavita
+ bootstrap:
<<: *bootstrap
volumeMounts:
- mountPath: /etc/elfbot
@@ -4406,7 +4296,7 @@ kavita:
name: config
subPath: kavita
- mountPath: /tmp
- name: tmp
+ name: tmp
ingress:
main:
enabled: false
@@ -4447,8 +4337,8 @@ calibre:
controller:
annotations:
configmap.reloader.stakater.com/reload: "elfbot-all,storage-changed,elfbot-calibre"
- affinity: *standard_affinity
- tolerations: *standard_tolerations
+ affinity: *standard_affinity
+ tolerations: *standard_tolerations
automountServiceAccountToken: false
persistence:
<<: *storagemounts
@@ -4465,7 +4355,7 @@ calibre:
subPath: calibre
volumeSpec:
persistentVolumeClaim:
- claimName: config
+ claimName: config
elfbot:
enabled: "true"
type: "custom"
@@ -4499,15 +4389,15 @@ calibre:
cpu: 1
memory: 4Gi
initContainers:
- a-migration:
+ a-migration:
<<: *migrate_data
volumeMounts:
- mountPath: /migration
name: migration
- mountPath: /config
name: config
- subPath: calibre
- bootstrap:
+ subPath: calibre
+ bootstrap:
<<: *bootstrap
volumeMounts:
- mountPath: /etc/elfbot
@@ -4518,17 +4408,17 @@ calibre:
name: config
subPath: calibre
- mountPath: /tmp
- name: tmp
+ name: tmp
envFrom:
- configMapRef:
- name: elfbot-calibre
+ name: elfbot-calibre
optional: true
sonarr: &app_sonarr
enabled: false
podLabels:
app.elfhosted.com/name: sonarr
- app.elfhosted.com/class: debrid
+ app.elfhosted.com/class: debrid
priorityClassName: tenant-normal
image:
registry: ghcr.io
@@ -4546,12 +4436,12 @@ sonarr: &app_sonarr
controller:
annotations:
configmap.reloader.stakater.com/reload: "elfbot-all,storage-changed,elfbot-sonarr" # Reload the deployment every time the rclones change
- affinity: *standard_affinity
- tolerations: *standard_tolerations
+ affinity: *standard_affinity
+ tolerations: *standard_tolerations
automountServiceAccountToken: false
envFrom:
- configMapRef:
- name: sonarr-env
+ name: sonarr-env
persistence:
<<: *storagemounts
migration:
@@ -4559,7 +4449,7 @@ sonarr: &app_sonarr
type: custom
volumeSpec:
persistentVolumeClaim:
- claimName: config-sonarr-ssd
+ claimName: config-sonarr-ssd
config:
enabled: true
type: custom
@@ -4567,7 +4457,7 @@ sonarr: &app_sonarr
subPath: sonarr
volumeSpec:
persistentVolumeClaim:
- claimName: config
+ claimName: config
logs:
enabled: true
type: custom
@@ -4575,7 +4465,7 @@ sonarr: &app_sonarr
subPath: sonarr
volumeSpec:
persistentVolumeClaim:
- claimName: logs
+ claimName: logs
backup:
enabled: true
type: custom
@@ -4583,14 +4473,14 @@ sonarr: &app_sonarr
subPath: sonarr
volumeSpec:
persistentVolumeClaim:
- claimName: backup
+ claimName: backup
elfbot:
enabled: "true"
type: "custom"
volumeSpec:
configMap:
name: elfbot-sonarr
- optional: true
+ optional: true
ingress:
main:
enabled: false
@@ -4599,23 +4489,23 @@ sonarr: &app_sonarr
enabled: true # necessary for probes
ports:
http:
- port: 8989
+ port: 8989
initContainers:
- a-migration:
+ a-migration:
<<: *migrate_data
volumeMounts:
- mountPath: /migration
name: migration
- mountPath: /config
name: config
- subPath: sonarr
+ subPath: sonarr
- mountPath: /config/logs
name: logs
- subPath: sonarr
+ subPath: sonarr
- mountPath: /config/Backups
name: backup
- subPath: sonarr
- bootstrap:
+ subPath: sonarr
+ bootstrap:
<<: *bootstrap
volumeMounts:
- mountPath: /etc/elfbot
@@ -4626,7 +4516,7 @@ sonarr: &app_sonarr
name: config
subPath: sonarr
- mountPath: /tmp
- name: tmp
+ name: tmp
setup:
image: *tooling_image
imagePullPolicy: IfNotPresent
@@ -4643,9 +4533,9 @@ sonarr: &app_sonarr
volumeMounts:
- mountPath: /storage/symlinks
- name: symlinks
+ name: symlinks
resources: *default_resources
- securityContext: *default_securitycontext
+ securityContext: *default_securitycontext
resources:
requests:
cpu: 1m
@@ -4676,12 +4566,12 @@ sonarr4k: &app_sonarr4k
controller:
annotations:
configmap.reloader.stakater.com/reload: "elfbot-all,storage-changed,elfbot-sonarr4k" # Reload the deployment every time the rclones change
- affinity: *standard_affinity
- tolerations: *standard_tolerations
+ affinity: *standard_affinity
+ tolerations: *standard_tolerations
automountServiceAccountToken: false
envFrom:
- configMapRef:
- name: sonarr4k-env
+ name: sonarr4k-env
persistence:
<<: *storagemounts
migration:
@@ -4689,7 +4579,7 @@ sonarr4k: &app_sonarr4k
type: custom
volumeSpec:
persistentVolumeClaim:
- claimName: config-sonarr4k-ssd
+ claimName: config-sonarr4k-ssd
config:
enabled: true
type: custom
@@ -4697,7 +4587,7 @@ sonarr4k: &app_sonarr4k
subPath: sonarr4k
volumeSpec:
persistentVolumeClaim:
- claimName: config
+ claimName: config
logs:
enabled: true
type: custom
@@ -4705,7 +4595,7 @@ sonarr4k: &app_sonarr4k
subPath: sonarr4k
volumeSpec:
persistentVolumeClaim:
- claimName: logs
+ claimName: logs
backup:
enabled: true
type: custom
@@ -4713,14 +4603,14 @@ sonarr4k: &app_sonarr4k
subPath: sonarr4k
volumeSpec:
persistentVolumeClaim:
- claimName: backup
+ claimName: backup
elfbot:
enabled: "true"
type: "custom"
volumeSpec:
configMap:
name: elfbot-sonarr4k
- optional: true
+ optional: true
ingress:
main:
enabled: false
@@ -4729,9 +4619,9 @@ sonarr4k: &app_sonarr4k
enabled: true # necessary for probes
ports:
http:
- port: 8989
+ port: 8989
initContainers:
- a-migration:
+ a-migration:
<<: *migrate_data
volumeMounts:
- mountPath: /migration
@@ -4741,11 +4631,11 @@ sonarr4k: &app_sonarr4k
subPath: sonarr4k
- mountPath: /config/logs
name: logs
- subPath: sonarr4k
+ subPath: sonarr4k
- mountPath: /config/Backups
name: backup
- subPath: sonarr4k
- bootstrap:
+ subPath: sonarr4k
+ bootstrap:
<<: *bootstrap
volumeMounts:
- mountPath: /etc/elfbot
@@ -4756,7 +4646,7 @@ sonarr4k: &app_sonarr4k
name: config
subPath: sonarr4k
- mountPath: /tmp
- name: tmp
+ name: tmp
setup:
image: *tooling_image
imagePullPolicy: IfNotPresent
@@ -4773,9 +4663,9 @@ sonarr4k: &app_sonarr4k
volumeMounts:
- mountPath: /storage/symlinks
- name: symlinks
+ name: symlinks
resources: *default_resources
- securityContext: *default_securitycontext
+ securityContext: *default_securitycontext
resources:
requests:
cpu: 1m
@@ -4811,8 +4701,8 @@ resiliosync:
controller:
annotations:
configmap.reloader.stakater.com/reload: "elfbot-all,storage-changed,elfbot-resiliosync"
- affinity: *standard_affinity
- tolerations: *standard_tolerations
+ affinity: *standard_affinity
+ tolerations: *standard_tolerations
podSecurityContext:
seccompProfile:
type: RuntimeDefault
@@ -4850,7 +4740,7 @@ resiliosync:
name: elfbot-resiliosync
optional: true
initContainers:
- bootstrap:
+ bootstrap:
<<: *bootstrap
volumeMounts:
- mountPath: /etc/elfbot
@@ -4861,7 +4751,7 @@ resiliosync:
name: config
subPath: resiliosync
- mountPath: /tmp
- name: tmp
+ name: tmp
ingress:
main:
enabled: false
@@ -4893,8 +4783,8 @@ prowlarr: &app_prowlarr
controller:
annotations:
configmap.reloader.stakater.com/reload: "elfbot-all,storage-changed,elfbot-prowlarr"
- affinity: *standard_affinity
- tolerations: *standard_tolerations
+ affinity: *standard_affinity
+ tolerations: *standard_tolerations
automountServiceAccountToken: false
persistence:
<<: *storagemounts
@@ -4903,7 +4793,7 @@ prowlarr: &app_prowlarr
type: custom
volumeSpec:
persistentVolumeClaim:
- claimName: config-prowlarr-ssd
+ claimName: config-prowlarr-ssd
config:
enabled: true
type: custom
@@ -4919,7 +4809,7 @@ prowlarr: &app_prowlarr
subPath: prowlarr
volumeSpec:
persistentVolumeClaim:
- claimName: logs
+ claimName: logs
backup:
enabled: true
type: custom
@@ -4927,7 +4817,7 @@ prowlarr: &app_prowlarr
subPath: prowlarr
volumeSpec:
persistentVolumeClaim:
- claimName: backup
+ claimName: backup
run: # used for s6-init with non-root
enabled: true
type: emptyDir
@@ -4950,21 +4840,21 @@ prowlarr: &app_prowlarr
http:
port: 9696
initContainers:
- a-migration:
+ a-migration:
<<: *migrate_data
volumeMounts:
- mountPath: /migration
name: migration
- mountPath: /config
name: config
- subPath: prowlarr
+ subPath: prowlarr
- mountPath: /config/logs
name: logs
- subPath: prowlarr
+ subPath: prowlarr
- mountPath: /config/Backups
name: backup
- subPath: prowlarr
- bootstrap:
+ subPath: prowlarr
+ bootstrap:
<<: *bootstrap
volumeMounts:
- mountPath: /etc/elfbot
@@ -4975,7 +4865,7 @@ prowlarr: &app_prowlarr
name: config
subPath: prowlarr
- mountPath: /tmp
- name: tmp
+ name: tmp
setup:
image: *tooling_image
imagePullPolicy: IfNotPresent
@@ -4997,7 +4887,7 @@ prowlarr: &app_prowlarr
# Get custom torrent.io definition
mkdir -p /config/Definitions/Custom
-
+
if [ -f /config/Definitions/Custom/elfhosted-torrentio.yml ]; then rm /config/Definitions/Custom/elfhosted-torrentio.yml; fi
curl https://raw.githubusercontent.com/geek-cookbook/elfhosted-prowlarr-indexers/main/Custom/torrentio.yml > /config/Definitions/Custom/torrentio.yml
curl https://raw.githubusercontent.com/geek-cookbook/elfhosted-prowlarr-indexers/main/Custom/elfhosted-internal.yml > /config/Definitions/Custom/elfhosted-internal.yml
@@ -5043,12 +4933,12 @@ lidarr:
controller:
annotations:
configmap.reloader.stakater.com/reload: "elfbot-all,storage-changed,elfbot-lidarr" # Reload the deployment every time the rclones change
- affinity: *standard_affinity
- tolerations: *standard_tolerations
+ affinity: *standard_affinity
+ tolerations: *standard_tolerations
automountServiceAccountToken: false
envFrom:
- configMapRef:
- name: lidarr-config
+ name: lidarr-config
persistence:
<<: *storagemounts
config:
@@ -5064,7 +4954,7 @@ lidarr:
type: custom
volumeSpec:
persistentVolumeClaim:
- claimName: config-lidarr-ssd
+ claimName: config-lidarr-ssd
s6:
enabled: true
type: emptyDir
@@ -5076,7 +4966,7 @@ lidarr:
volumeSpec:
configMap:
name: elfbot-lidarr
- optional: true
+ optional: true
ingress:
main:
enabled: false
@@ -5087,15 +4977,15 @@ lidarr:
http:
port: 8686
initContainers:
- a-migration:
+ a-migration:
<<: *migrate_data
volumeMounts:
- mountPath: /migration
name: migration
- mountPath: /config
name: config
- subPath: lidarr
- bootstrap:
+ subPath: lidarr
+ bootstrap:
<<: *bootstrap
volumeMounts:
- mountPath: /etc/elfbot
@@ -5106,7 +4996,7 @@ lidarr:
name: config
subPath: lidarr
- mountPath: /tmp
- name: tmp
+ name: tmp
setup:
image: *tooling_image
imagePullPolicy: IfNotPresent
@@ -5123,7 +5013,7 @@ lidarr:
volumeMounts:
- mountPath: /config
- name: config
+ name: config
subPath: lidarr
resources: *default_resources
securityContext: *default_securitycontext
@@ -5157,8 +5047,8 @@ navidrome:
controller:
annotations:
configmap.reloader.stakater.com/reload: "elfbot-all,storage-changed,elfbot-navidrome"
- affinity: *standard_affinity
- tolerations: *standard_tolerations
+ affinity: *standard_affinity
+ tolerations: *standard_tolerations
automountServiceAccountToken: false
env:
ND_MUSICFOLDER: /tmp
@@ -5181,7 +5071,7 @@ navidrome:
name: elfbot-navidrome
optional: true
initContainers:
- bootstrap:
+ bootstrap:
<<: *bootstrap
volumeMounts:
- mountPath: /etc/elfbot
@@ -5192,7 +5082,7 @@ navidrome:
name: config
subPath: navidrome
- mountPath: /tmp
- name: tmp
+ name: tmp
ingress:
main:
enabled: false
@@ -5232,12 +5122,12 @@ readarr:
controller:
annotations:
configmap.reloader.stakater.com/reload: "elfbot-all,storage-changed,elfbot-readarr" # Reload the deployment every time the rclones change
- affinity: *standard_affinity
+ affinity: *standard_affinity
tolerations: *standard_tolerations
automountServiceAccountToken: false
envFrom:
- configMapRef:
- name: readarr-config
+ name: readarr-config
persistence:
<<: *storagemounts
migration:
@@ -5245,7 +5135,7 @@ readarr:
type: custom
volumeSpec:
persistentVolumeClaim:
- claimName: config-readarr-ssd
+ claimName: config-readarr-ssd
config:
enabled: true
type: custom
@@ -5253,7 +5143,7 @@ readarr:
subPath: readarr
volumeSpec:
persistentVolumeClaim:
- claimName: config
+ claimName: config
logs:
enabled: true
type: custom
@@ -5261,7 +5151,7 @@ readarr:
subPath: readarr
volumeSpec:
persistentVolumeClaim:
- claimName: logs
+ claimName: logs
tmp-readarr-backup:
enabled: true
type: emptyDir
@@ -5273,7 +5163,7 @@ readarr:
volumeSpec:
configMap:
name: elfbot-readarr
- optional: true
+ optional: true
ingress:
main:
enabled: false
@@ -5284,15 +5174,15 @@ readarr:
http:
port: 8787
initContainers:
- a-migration:
+ a-migration:
<<: *migrate_data
volumeMounts:
- mountPath: /migration
name: migration
- mountPath: /config
name: config
- subPath: readarr
- bootstrap:
+ subPath: readarr
+ bootstrap:
<<: *bootstrap
volumeMounts:
- mountPath: /etc/elfbot
@@ -5303,7 +5193,7 @@ readarr:
name: config
subPath: readarr
- mountPath: /tmp
- name: tmp
+ name: tmp
setup:
image: *tooling_image
imagePullPolicy: IfNotPresent
@@ -5315,10 +5205,10 @@ readarr:
set -e
# Set auth to external
- # sed -i "s|None|External|" /config/config.xml
+ # sed -i "s|None|External|" /config/config.xml
volumeMounts:
- mountPath: /config
- name: config
+ name: config
subPath: readarr
resources: *default_resources
securityContext: *default_securitycontext
@@ -5343,8 +5233,8 @@ readarraudio:
seccompProfile:
type: RuntimeDefault
readOnlyRootFilesystem: true
- affinity: *standard_affinity
- tolerations: *standard_tolerations
+ affinity: *standard_affinity
+ tolerations: *standard_tolerations
podSecurityContext:
seccompProfile:
type: RuntimeDefault
@@ -5355,7 +5245,7 @@ readarraudio:
automountServiceAccountToken: false
envFrom:
- configMapRef:
- name: readarraudio-config
+ name: readarraudio-config
controller:
annotations:
configmap.reloader.stakater.com/reload: "elfbot-all,storage-changed,elfbot-readarraudio" # Reload the deployment every time the rclones change
@@ -5368,13 +5258,13 @@ readarraudio:
subPath: readarraudio
volumeSpec:
persistentVolumeClaim:
- claimName: config
+ claimName: config
migration:
enabled: true
type: custom
volumeSpec:
persistentVolumeClaim:
- claimName: config-readarraudio-ssd
+ claimName: config-readarraudio-ssd
logs:
enabled: true
type: custom
@@ -5382,7 +5272,7 @@ readarraudio:
subPath: readarraudio
volumeSpec:
persistentVolumeClaim:
- claimName: logs
+ claimName: logs
tmp-readarr-backup:
enabled: true
type: emptyDir
@@ -5394,7 +5284,7 @@ readarraudio:
volumeSpec:
configMap:
name: elfbot-readarraudio
- optional: true
+ optional: true
ingress:
main:
enabled: false
@@ -5405,15 +5295,15 @@ readarraudio:
http:
port: 8787
initContainers:
- a-migration:
+ a-migration:
<<: *migrate_data
volumeMounts:
- mountPath: /migration
name: migration
- mountPath: /config
name: config
- subPath: readarraudio
- bootstrap:
+ subPath: readarraudio
+ bootstrap:
<<: *bootstrap
volumeMounts:
- mountPath: /etc/elfbot
@@ -5424,7 +5314,7 @@ readarraudio:
name: config
subPath: readarraudio
- mountPath: /tmp
- name: tmp
+ name: tmp
setup:
image: *tooling_image
imagePullPolicy: IfNotPresent
@@ -5439,7 +5329,7 @@ readarraudio:
# sed -i "s|None|External|" /config/config.xml
volumeMounts:
- mountPath: /config
- name: config
+ name: config
subPath: readarraudio
resources: *default_resources
securityContext: *default_securitycontext
@@ -5459,7 +5349,7 @@ plex: &app_plex
app.elfhosted.com/class: debrid
podAnnotations:
kubernetes.io/egress-bandwidth: "150M" # tested with _kilos in Discord on a 97Mbit remux
- affinity: *standard_affinity
+ affinity: *standard_affinity
tolerations: *standard_tolerations
securityContext:
runAsUser: 568
@@ -5475,19 +5365,19 @@ plex: &app_plex
automountServiceAccountToken: false
controller:
annotations:
- configmap.reloader.stakater.com/reload: "elfbot-all,storage-changed,elfbot-plex,elfbot-imagemaid,plex-config,imagemaid-env" # Reload the deployment every time the rclones change
+ configmap.reloader.stakater.com/reload: "elfbot-all,storage-changed,elfbot-plex,elfbot-imagemaid,plex-config,imagemaid-env" # Reload the deployment every time the rclones change
image:
registry: ghcr.io
repository: elfhosted/plex
tag: rolling
persistence:
- <<: *storagemounts
+ <<: *storagemounts
migration:
enabled: true
type: custom
volumeSpec:
persistentVolumeClaim:
- claimName: config-plex-block
+ claimName: config-plex-block
config:
enabled: true
type: custom
@@ -5495,7 +5385,7 @@ plex: &app_plex
subPath: plex
volumeSpec:
persistentVolumeClaim:
- claimName: config
+ claimName: config
transcode: # in case users use /tmp
enabled: true
mountPath: /transcode
@@ -5505,7 +5395,7 @@ plex: &app_plex
enabled: true
mountPath: /phototranscoder
type: emptyDir
- sizeLimit: 50Gi
+ sizeLimit: 50Gi
elfbot:
enabled: "true"
type: "custom"
@@ -5523,14 +5413,14 @@ plex: &app_plex
mountPath: /shared
type: emptyDir
volumeSpec:
- medium: Memory
+ medium: Memory
tooling-scripts:
enabled: "true"
type: "custom"
volumeSpec:
configMap:
- name: tooling-scripts
- defaultMode: 0755
+ name: tooling-scripts
+ defaultMode: 0755
ingress:
main:
enabled: false
@@ -5554,15 +5444,15 @@ plex: &app_plex
cpu: "2" # 1.5 works, but results in buffering when playback starts, see https://github.com/elfhosted/charts/issues/501
memory: 4Gi
initContainers:
- a-migration:
+ a-migration:
<<: *migrate_data
volumeMounts:
- mountPath: /migration
name: migration
- mountPath: /config
name: config
- subPath: plex
- bootstrap:
+ subPath: plex
+ bootstrap:
<<: *bootstrap
volumeMounts:
- mountPath: /etc/elfbot
@@ -5587,14 +5477,14 @@ plex: &app_plex
grep -q PlexOnlineToken /config/Library/Application\ Support/Plex\ Media\ Server/Preferences.xml || (
echo "Plex is not claimed yet, no point repairing" && exit 0)
- # If the DB directory exists, then repair
+ # If the DB directory exists, then repair
if [ -d "/config/Library/Application Support/Plex Media Server/Plug-in Support/Databases" ]; then
- /usr/local/bin/DBRepair.sh --sqlite /usr/lib/plexmediaserver --databases '/config/Library/Application Support/Plex Media Server/Plug-in Support/Databases' auto
+ /usr/local/bin/DBRepair.sh --sqlite /usr/lib/plexmediaserver --databases '/config/Library/Application Support/Plex Media Server/Plug-in Support/Databases' auto
fi
volumeMounts:
- mountPath: /config
- name: config
- subPath: plex
+ name: config
+ subPath: plex
setup:
image: *tooling_image
imagePullPolicy: IfNotPresent
@@ -5616,7 +5506,7 @@ plex: &app_plex
name: config
subPath: plex
- mountPath: /phototranscoder
- name: phototranscoder
+ name: phototranscoder
# can't use default resources because the ephemeral limit kicks out /phototranscoder later
# resources: *default_resources
securityContext: *default_securitycontext
@@ -5650,7 +5540,7 @@ plex: &app_plex
securityContext:
seccompProfile:
type: RuntimeDefault
- readOnlyRootFilesystem: false
+ readOnlyRootFilesystem: false
additionalContainers:
clean-up-dns: *clean_up_dns_on_termination
speedtest:
@@ -5660,7 +5550,7 @@ plex: &app_plex
HTTP_PORT: "3002"
HTTPS_PORT: "3003"
imagemaid:
- image: ghcr.io/elfhosted/imagemaid:v1.1.1@sha256:8fb3b3b89431e4b0217ec268abbabfb67a970de3fb2d763f72847d558c1c8aa5
+ image: ghcr.io/elfhosted/imagemaid:v1.1.1@sha256:8fb3b3b89431e4b0217ec268abbabfb67a970de3fb2d763f72847d558c1c8aa5
volumeMounts:
- mountPath: /config
name: config
@@ -5671,7 +5561,7 @@ plex: &app_plex
envFrom:
- configMapRef:
name: imagemaid-env
- optional: true
+ optional: true
- configMapRef:
name: elfbot-imagemaid
optional: true
@@ -5684,7 +5574,7 @@ plex: &app_plex
tag: 3.39.1@sha256:47688e70bd1519bcedaf48270328d85a5405496330787e53371d23fa590af4d3
env:
FIREWALL_OUTBOUND_SUBNETS: 10.0.0.0/8
- DNS_KEEP_NAMESERVER: "on"
+ DNS_KEEP_NAMESERVER: "on"
HTTP_CONTROL_SERVER_PORT: "8000"
HTTP_CONTROL_SERVER_ADDRESS: ":8000"
VPN_TYPE: wireguard
@@ -5714,7 +5604,7 @@ plex: &app_plex
port: 32400
initialDelaySeconds: 30
periodSeconds: 10
- timeoutSeconds: 10
+ timeoutSeconds: 10
readiness:
custom: true
enabled: true
@@ -5742,12 +5632,12 @@ plexranger:
<<: *app_plex
podLabels:
app.elfhosted.com/name: plex
- app.elfhosted.com/class: dedicated
+ app.elfhosted.com/class: dedicated
podAnnotations:
- kubernetes.io/egress-bandwidth: "500M"
+ kubernetes.io/egress-bandwidth: "500M"
enabled: false
automountServiceAccountToken: false
- resources: *ranger_streamer_resources
+ resources: *ranger_streamer_resources
controller:
annotations:
configmap.reloader.stakater.com/reload: "elfbot-all,elfbot-plex,plex-config"
@@ -5759,7 +5649,7 @@ jellyfin: &app_jellyfin
tag: 10.9.11@sha256:7d7d1024b821febf4dcb3c2c843c700986ecf743cc949423701ceadfb62ba9fb
enabled: false
podLabels:
- app.elfhosted.com/class: debrid
+ app.elfhosted.com/class: debrid
app.elfhosted.com/name: jellyfin
podAnnotations:
kubernetes.io/egress-bandwidth: "150M" # tested with _kilos in Discord on a 97Mbit remux
@@ -5780,9 +5670,9 @@ jellyfin: &app_jellyfin
automountServiceAccountToken: false
controller:
annotations:
- configmap.reloader.stakater.com/reload: "elfbot-all,storage-changed,elfbot-jellyfin" # Reload the deployment every time the rclones change
- affinity: *standard_affinity
- tolerations: *standard_tolerations
+ configmap.reloader.stakater.com/reload: "elfbot-all,storage-changed,elfbot-jellyfin" # Reload the deployment every time the rclones change
+ affinity: *standard_affinity
+ tolerations: *standard_tolerations
persistence:
<<: *storagemounts
config:
@@ -5792,13 +5682,13 @@ jellyfin: &app_jellyfin
subPath: jellyfin
volumeSpec:
persistentVolumeClaim:
- claimName: config
+ claimName: config
migration:
enabled: true
type: custom
volumeSpec:
persistentVolumeClaim:
- claimName: config-jellyfin-block
+ claimName: config-jellyfin-block
transcode: # in case users use /tmp
enabled: true
type: custom
@@ -5821,18 +5711,18 @@ jellyfin: &app_jellyfin
type: "custom"
volumeSpec:
configMap:
- name: tooling-scripts
- defaultMode: 0755
+ name: tooling-scripts
+ defaultMode: 0755
initContainers:
- a-migration:
+ a-migration:
<<: *migrate_data
volumeMounts:
- mountPath: /migration
name: migration
- mountPath: /config
name: config
- subPath: jellyfin
- bootstrap:
+ subPath: jellyfin
+ bootstrap:
<<: *bootstrap
volumeMounts:
- mountPath: /etc/elfbot
@@ -5843,8 +5733,8 @@ jellyfin: &app_jellyfin
name: config
subPath: jellyfin
- mountPath: /tmp
- name: tmp
- update-dns: *update_dns_on_init
+ name: tmp
+ update-dns: *update_dns_on_init
setup:
image: *tooling_image
command:
@@ -5860,10 +5750,10 @@ jellyfin: &app_jellyfin
fi
if [[ -f /config/transcoding-temp ]]; then
rm -rf /config/transcoding-temp
- fi
+ fi
if [[ -f /config/transcodes ]]; then
rm -rf /config/transcodes
- fi
+ fi
# Make symlinks for various variations of transcode paths to /transcode
ln -sf /transcode /config/transcodes
@@ -5879,9 +5769,9 @@ jellyfin: &app_jellyfin
- mountPath: /config
name: config
- mountPath: /transcode
- name: transcode
+ name: transcode
securityContext: *default_securitycontext
- resources: *default_resources
+ resources: *default_resources
ingress:
main:
enabled: false
@@ -5931,27 +5821,27 @@ jellyfin: &app_jellyfin
memory: 4Gi
envFrom:
- configMapRef:
- name: jellyfin-config
+ name: jellyfin-config
additionalContainers:
clean-up-dns: *clean_up_dns_on_termination
speedtest:
- image: openspeedtest/latest
+ image: openspeedtest/latest
jellyfixer:
image: quay.io/xsteadfastx/jellyfixer:latest
env:
JELLYFIXER_INTERNAL_URL: http://jellyfin:8096
JELLYFIXER_EXTERNAL_URL: https://{{ .Release.Name }}-jellyfin.elfhosted.com
-
+
jellyfinranger:
<<: *app_jellyfin
podLabels:
app.elfhosted.com/name: jellyfin
- app.elfhosted.com/class: dedicated
+ app.elfhosted.com/class: dedicated
podAnnotations:
- kubernetes.io/egress-bandwidth: "500M"
+ kubernetes.io/egress-bandwidth: "500M"
enabled: false
automountServiceAccountToken: false
- resources: *ranger_streamer_resources
+ resources: *ranger_streamer_resources
controller:
annotations:
configmap.reloader.stakater.com/reload: "elfbot-jellyfin,elfbot-all"
@@ -5969,7 +5859,7 @@ emby: &app_emby
app.elfhosted.com/class: debrid
app.elfhosted.com/name: emby
podAnnotations:
- kubernetes.io/egress-bandwidth: "150M" # tested with _kilos in Discord on a 97Mbit remux
+ kubernetes.io/egress-bandwidth: "150M" # tested with _kilos in Discord on a 97Mbit remux
securityContext:
seccompProfile:
type: RuntimeDefault
@@ -5986,8 +5876,8 @@ emby: &app_emby
controller:
annotations:
configmap.reloader.stakater.com/reload: "elfbot-all,storage-changed,elfbot-emby" # Reload the deployment every time the rclones change
- affinity: *standard_affinity
- tolerations: *standard_tolerations
+ affinity: *standard_affinity
+ tolerations: *standard_tolerations
persistence:
<<: *storagemounts
migration:
@@ -5995,7 +5885,7 @@ emby: &app_emby
type: custom
volumeSpec:
persistentVolumeClaim:
- claimName: config-emby-block
+ claimName: config-emby-block
config:
enabled: true
type: custom
@@ -6003,7 +5893,7 @@ emby: &app_emby
subPath: emby
volumeSpec:
persistentVolumeClaim:
- claimName: config
+ claimName: config
transcode: # in case users use /tmp
enabled: true
type: custom
@@ -6026,18 +5916,18 @@ emby: &app_emby
type: "custom"
volumeSpec:
configMap:
- name: tooling-scripts
- defaultMode: 0755
+ name: tooling-scripts
+ defaultMode: 0755
initContainers:
- a-migration:
+ a-migration:
<<: *migrate_data
volumeMounts:
- mountPath: /migration
name: migration
- mountPath: /config
name: config
- subPath: emby
- bootstrap:
+ subPath: emby
+ bootstrap:
<<: *bootstrap
volumeMounts:
- mountPath: /etc/elfbot
@@ -6048,7 +5938,7 @@ emby: &app_emby
name: config
subPath: emby
- mountPath: /tmp
- name: tmp
+ name: tmp
update-dns: *update_dns_on_init
setup:
image: *tooling_image
@@ -6065,15 +5955,15 @@ emby: &app_emby
fi
if [[ -f /config/transcoding-temp ]]; then
rm -rf /config/transcoding-temp
- fi
+ fi
if [[ -f /config/transcodes ]]; then
rm -rf /config/transcodes
- fi
+ fi
# Make symlinks for various variations of transcode paths to /transcode
ln -sf /transcode /config/transcodes
ln -sf /transcode /config/transcode
- ln -sf /transcode /config/transcoding-temp
+ ln -sf /transcode /config/transcoding-temp
# Also keep cache in /transcode
mkdir -p /transcode/cache
@@ -6085,13 +5975,13 @@ emby: &app_emby
name: config
subPath: emby
- mountPath: /transcode
- name: transcode
+ name: transcode
securityContext: *default_securitycontext
- resources: *default_resources
+ resources: *default_resources
additionalContainers:
- clean-up-dns: *clean_up_dns_on_termination
+ clean-up-dns: *clean_up_dns_on_termination
speedtest:
- image: openspeedtest/latest
+ image: openspeedtest/latest
ingress:
main:
enabled: false
@@ -6138,18 +6028,18 @@ emby: &app_emby
memory: 1Gi
limits:
cpu: 2
- memory: 4Gi
+ memory: 4Gi
embyranger:
<<: *app_emby
podLabels:
app.elfhosted.com/name: emby
- app.elfhosted.com/class: dedicated
+ app.elfhosted.com/class: dedicated
podAnnotations:
- kubernetes.io/egress-bandwidth: "500M"
+ kubernetes.io/egress-bandwidth: "500M"
enabled: false
automountServiceAccountToken: false
- resources: *ranger_streamer_resources
+ resources: *ranger_streamer_resources
controller:
annotations:
configmap.reloader.stakater.com/reload: "elfbot-emby,elfbot-all"
@@ -6162,7 +6052,7 @@ homer:
readOnlyRootFilesystem: true
runAsNonRoot: false
runAsUser: 568
- runAsGroup: 568
+ runAsGroup: 568
podSecurityContext:
runAsNonRoot: false
seccompProfile:
@@ -6178,12 +6068,34 @@ homer:
enabled: false
service:
main:
- enabled: true # necessary for probes
- affinity: *standard_affinity
- tolerations: *standard_tolerations
+ enabled: true # necessary for probes
+ affinity: *standard_affinity
+ tolerations: *standard_tolerations
podLabels:
- app.elfhosted.com/role: nodefinder # let this be an anchor for replicationdestinations
+ app.elfhosted.com/role: nodefinder # let this be an anchor for replicationdestinations
persistence:
+ <<: *storagemounts
+ logs:
+ enabled: true
+ type: custom
+ mountPath: /logs
+ volumeSpec:
+ persistentVolumeClaim:
+ claimName: logs
+ config:
+ enabled: true
+ type: custom
+ mountPath: /config
+ volumeSpec:
+ persistentVolumeClaim:
+ claimName: config
+ backup:
+ enabled: true
+ type: custom
+ mountPath: /backup
+ volumeSpec:
+ persistentVolumeClaim:
+ claimName: backup
config-yml:
enabled: "true"
subPath: "config.yml"
@@ -6197,13 +6109,13 @@ homer:
type: "custom"
volumeSpec:
configMap:
- name: homer-config
+ name: homer-config
gatus-config:
enabled: "true"
type: "custom"
volumeSpec:
configMap:
- name: gatus-config
+ name: gatus-config
disk-usage:
enabled: "true"
mountPath: "/usr/local/bin/disk_usage.sh"
@@ -6211,19 +6123,17 @@ homer:
type: "custom"
volumeSpec:
configMap:
- name: homer-config
+ name: homer-config
message:
enabled: true
type: emptyDir
mountPath: /www/assets/message
- homer:
+ migration:
enabled: true
type: custom
- mountPath: /www/assets/backgrounds
- subPath: backgrounds
volumeSpec:
persistentVolumeClaim:
- claimName: config-homer-ssd
+ claimName: config-homer-ssd
command:
- /bin/bash
- /usr/local/bin/disk_usage.sh
@@ -6237,15 +6147,15 @@ homer:
subPath: "config.yml"
- mountPath: /www/assets/custom.css
name: custom-css
- subPath: "custom.css"
+ subPath: "custom.css"
- mountPath: /www/assets/message
name: message
- mountPath: /www/assets/backgrounds
- name: homer
- subPath: backgrounds
- readOnly: true
+ name: config
+ subPath: homer/backgrounds
+ readOnly: true
resources: *default_resources
- securityContext: *default_securitycontext
+ securityContext: *default_securitycontext
configmap:
config:
# -- Store homer configuration as a ConfigMap, but don't specify any config, since we'll supply our own
@@ -6264,9 +6174,18 @@ homer:
limits:
cpu: 100m
memory: 128Mi
+ initContainers:
+ a-migration:
+ <<: *migrate_data
+ volumeMounts:
+ - mountPath: /migration
+ name: migration
+ - mountPath: /config
+ name: config
+ subPath: homer
traefikforwardauth:
- affinity: *standard_affinity
+ affinity: *standard_affinity
tolerations: *standard_tolerations
whitelist: admin@elfhosted.com
priorityClassName: tenant-normal
@@ -6286,7 +6205,7 @@ traefikforwardauth:
image:
repository: ghcr.io/elfhosted/traefik-forward-auth
pullPolicy: IfNotPresent
- tag: 3.1.0@sha256:19cd990fae90c544100676bc049f944becc8c454639e57d20f6f48e27de90776
+ tag: 3.1.0@sha256:19cd990fae90c544100676bc049f944becc8c454639e57d20f6f48e27de90776
middleware:
# middleware.enabled -- Enable to deploy a preconfigured middleware
@@ -6325,8 +6244,8 @@ gatus:
seccompProfile:
type: RuntimeDefault
fsGroup: 568
- affinity: *standard_affinity
- tolerations: *standard_tolerations
+ affinity: *standard_affinity
+ tolerations: *standard_tolerations
automountServiceAccountToken: false
resources:
requests:
@@ -6347,7 +6266,7 @@ gatus:
controller:
# strategy: RollingUpdate
annotations:
- configmap.reloader.stakater.com/reload: "gatus-config"
+ configmap.reloader.stakater.com/reload: "gatus-config"
env:
GATUS_CONFIG_PATH: /config/config.yaml
SMTP_FROM: 'health@elfhosted.com'
@@ -6367,14 +6286,14 @@ gatus:
subPath: gatus
volumeSpec:
persistentVolumeClaim:
- claimName: config
+ claimName: config
envFrom:
- secretRef:
name: gatus-smtp-config
configmap:
config:
# -- Store homer configuration as a ConfigMap, but don't specify any config, since we'll supply our own
- enabled: false
+ enabled: false
gotify:
sso:
@@ -6396,8 +6315,8 @@ gotify:
controller:
annotations:
configmap.reloader.stakater.com/reload: "elfbot-all,storage-changed,elfbot-gotify"
- affinity: *standard_affinity
- tolerations: *standard_tolerations
+ affinity: *standard_affinity
+ tolerations: *standard_tolerations
automountServiceAccountToken: false
env:
GOTIFY_SERVER_PORT: 8080
@@ -6432,7 +6351,7 @@ gotify:
type: custom
volumeSpec:
persistentVolumeClaim:
- claimName: config-gotify-ssd
+ claimName: config-gotify-ssd
elfbot:
enabled: "true"
type: "custom"
@@ -6442,15 +6361,15 @@ gotify:
optional: true
tmp: *tmp # Avoids issues with readOnlyRootFilesystem
initContainers:
- a-migration:
+ a-migration:
<<: *migrate_data
volumeMounts:
- mountPath: /migration
name: migration
- mountPath: /config
name: config
- subPath: gotify
- bootstrap:
+ subPath: gotify
+ bootstrap:
<<: *bootstrap
volumeMounts:
- mountPath: /etc/elfbot
@@ -6461,13 +6380,13 @@ gotify:
name: config
subPath: gotify
- mountPath: /tmp
- name: tmp
+ name: tmp
bootstrap: *bootstrap
flaresolverr: &app_flaresolverr
- enabled: false
+ enabled: false
podLabels:
- app.elfhosted.com/name: flaresolverr
+ app.elfhosted.com/name: flaresolverr
image:
registry: ghcr.io
repository: elfhosted/flaresolverr
@@ -6478,29 +6397,29 @@ flaresolverr: &app_flaresolverr
type: RuntimeDefault
readOnlyRootFilesystem: false # makes node unhappy
affinity:
- podAffinity:
- preferredDuringSchedulingIgnoredDuringExecution:
- - weight: 100
- podAffinityTerm:
+ podAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ podAffinityTerm:
labelSelector:
matchExpressions:
- key: app.elfhosted.com/name
operator: In
values:
- zurg
- topologyKey: "kubernetes.io/hostname"
- - weight: 50
- podAffinityTerm:
+ topologyKey: "kubernetes.io/hostname"
+ - weight: 50
+ podAffinityTerm:
labelSelector:
matchExpressions:
- key: app.kubernetes.io/name
operator: In
values:
- kubernetesdashboard
- topologyKey: "kubernetes.io/hostname"
+ topologyKey: "kubernetes.io/hostname"
tolerations:
- key: node-role.elfhosted.com/dedicated
- operator: Exists
+ operator: Exists
- key: node-role.elfhosted.com/hobbit
operator: Exists
controller:
@@ -6519,7 +6438,7 @@ flaresolverr: &app_flaresolverr
mountPath: /shared
type: emptyDir
volumeSpec:
- medium: Memory
+ medium: Memory
podSecurityContext:
seccompProfile:
type: RuntimeDefault
@@ -6577,7 +6496,7 @@ flaresolverr: &app_flaresolverr
securityContext:
seccompProfile:
type: RuntimeDefault
- readOnlyRootFilesystem: false
+ readOnlyRootFilesystem: false
addons:
vpn:
enabled: true # in case we ever need it
@@ -6589,8 +6508,8 @@ flaresolverr: &app_flaresolverr
IPTABLES_BACKEND: nft
KILLSWITCH: "true"
LOCAL_NETWORK: 10.0.0.0/8
- NFTABLES: "1"
- VPNDNS: "0"
+ NFTABLES: "1"
+ VPNDNS: "0"
# HTTP_CONTROL_SERVER_PORT: "8000"
# HTTP_CONTROL_SERVER_ADDRESS: ":8000"
VPN_TYPE: wireguard
@@ -6748,14 +6667,14 @@ tunarr:
controller:
annotations:
configmap.reloader.stakater.com/reload: "elfbot-all,storage-changed,elfbot-tunarr"
- affinity: *standard_affinity
- tolerations: *standard_tolerations
+ affinity: *standard_affinity
+ tolerations: *standard_tolerations
podSecurityContext:
seccompProfile:
type: RuntimeDefault
fsGroupChangePolicy: "OnRootMismatch"
supplementalGroups:
- - 993
+ - 993
automountServiceAccountToken: false
resources:
requests:
@@ -6795,27 +6714,27 @@ tunarr:
volumeSpec:
configMap:
name: elfbot-tunarr
- optional: true
+ optional: true
render-device:
enabled: "true"
type: hostPath
hostPath: "/dev/dri/renderD128"
- mountPath: "/dev/dri/renderD128"
+ mountPath: "/dev/dri/renderD128"
media: # in case users use /tmp
enabled: true
mountPath: /streams
type: emptyDir
- sizeLimit: 50Gi
+ sizeLimit: 50Gi
initContainers:
- a-migration:
+ a-migration:
<<: *migrate_data
volumeMounts:
- mountPath: /migration
name: migration
- mountPath: /config
name: config
- subPath: tunarr
- bootstrap:
+ subPath: tunarr
+ bootstrap:
<<: *bootstrap
volumeMounts:
- mountPath: /etc/elfbot
@@ -6826,7 +6745,7 @@ tunarr:
name: config
subPath: tunarr
- mountPath: /tmp
- name: tmp
+ name: tmp
threadfin:
enabled: false
@@ -6844,8 +6763,8 @@ threadfin:
controller:
annotations:
configmap.reloader.stakater.com/reload: "elfbot-all,storage-changed,elfbot-threadfin"
- affinity: *standard_affinity
- tolerations: *standard_tolerations
+ affinity: *standard_affinity
+ tolerations: *standard_tolerations
podSecurityContext:
seccompProfile:
type: RuntimeDefault
@@ -6892,17 +6811,17 @@ threadfin:
volumeSpec:
configMap:
name: elfbot-tunarr
- optional: true
+ optional: true
initContainers:
- a-migration:
+ a-migration:
<<: *migrate_data
volumeMounts:
- mountPath: /migration
name: migration
- mountPath: /config
name: config
- subPath: threadfin
- bootstrap:
+ subPath: threadfin
+ bootstrap:
<<: *bootstrap
volumeMounts:
- mountPath: /etc/elfbot
@@ -6913,7 +6832,7 @@ threadfin:
name: config
subPath: threadfin
- mountPath: /tmp
- name: tmp
+ name: tmp
thelounge:
enabled: false
@@ -6926,8 +6845,8 @@ thelounge:
controller:
annotations:
configmap.reloader.stakater.com/reload: "elfbot-all,storage-changed,elfbot-thelounge"
- affinity: *standard_affinity
- tolerations: *standard_tolerations
+ affinity: *standard_affinity
+ tolerations: *standard_tolerations
securityContext:
seccompProfile:
type: RuntimeDefault
@@ -6977,8 +6896,8 @@ thelounge:
configMap:
name: elfbot-thelounge
optional: true
- initContainers:
- bootstrap:
+ initContainers:
+ bootstrap:
<<: *bootstrap
volumeMounts:
- mountPath: /etc/elfbot
@@ -6989,7 +6908,7 @@ thelounge:
name: config
subPath: thelounge
- mountPath: /tmp
- name: tmp
+ name: tmp
create-user:
image: ghcr.io/elfhosted/thelounge:4.4.3@sha256:92d172b18085509de57442a31d9f29d60e1f0f0dc3cf2d3c05e4406cfc543b70
imagePullPolicy: IfNotPresent
@@ -7043,13 +6962,13 @@ overseerr:
type: RuntimeDefault
readOnlyRootFilesystem: true
podLabels:
- app.elfhosted.com/name: overseerr
+ app.elfhosted.com/name: overseerr
controller:
annotations:
configmap.reloader.stakater.com/reload: "elfbot-all,storage-changed,elfbot-overseerr"
# Always prefer to cohabit with zurg
- affinity: *standard_affinity
- tolerations: *standard_tolerations
+ affinity: *standard_affinity
+ tolerations: *standard_tolerations
podSecurityContext:
seccompProfile:
type: RuntimeDefault
@@ -7090,18 +7009,18 @@ overseerr:
type: "custom"
volumeSpec:
configMap:
- name: tooling-scripts
- defaultMode: 0755
+ name: tooling-scripts
+ defaultMode: 0755
initContainers:
- a-migration:
+ a-migration:
<<: *migrate_data
volumeMounts:
- mountPath: /migration
name: migration
- mountPath: /config
name: config
- subPath: overseerr
- bootstrap:
+ subPath: overseerr
+ bootstrap:
<<: *bootstrap
volumeMounts:
- mountPath: /etc/elfbot
@@ -7112,15 +7031,15 @@ overseerr:
name: config
subPath: overseerr
- mountPath: /tmp
- name: tmp
- update-dns: *update_dns_on_init
+ name: tmp
+ update-dns: *update_dns_on_init
# We do this so that we can override the /app/jellyseer/public path with our own, allowing the user to customize the branding
copy-branding:
image: ghcr.io/elfhosted/overseerr:1.33.2@sha256:51e1c93d68eb4aed7631ea6a10d5a5244841e3b66f04c5b1d3bc03cfa08a6729
command:
- /bin/bash
- -c
- - |
+ - |
mkdir -p /config/branding
cp --no-clobber -rf /app/overseerr/public/logo_* /config/branding
volumeMounts:
@@ -7128,7 +7047,7 @@ overseerr:
name: config
subPath: overseerr
resources: *default_resources
- securityContext: *default_securitycontext
+ securityContext: *default_securitycontext
ingress:
main:
enabled: false
@@ -7156,8 +7075,8 @@ overseerr:
- mountPath: /tmp
name: tmp
resources: *default_resources
- securityContext: *default_securitycontext
- clean-up-dns: *clean_up_dns_on_termination
+ securityContext: *default_securitycontext
+ clean-up-dns: *clean_up_dns_on_termination
jellyseerr:
enabled: false
@@ -7174,8 +7093,8 @@ jellyseerr:
controller:
annotations:
configmap.reloader.stakater.com/reload: "elfbot-all,storage-changed,elfbot-jellyseerr"
- affinity: *standard_affinity
- tolerations: *standard_tolerations
+ affinity: *standard_affinity
+ tolerations: *standard_tolerations
podSecurityContext:
seccompProfile:
type: RuntimeDefault
@@ -7202,7 +7121,7 @@ jellyseerr:
type: custom
volumeSpec:
persistentVolumeClaim:
- claimName: config-jellyseerr-ssd
+ claimName: config-jellyseerr-ssd
elfbot:
enabled: "true"
type: "custom"
@@ -7216,21 +7135,21 @@ jellyseerr:
type: "custom"
volumeSpec:
configMap:
- name: tooling-scripts
- defaultMode: 0755
+ name: tooling-scripts
+ defaultMode: 0755
envFrom:
- configMapRef:
- name: jellyseerr-env
+ name: jellyseerr-env
initContainers:
- a-migration:
+ a-migration:
<<: *migrate_data
volumeMounts:
- mountPath: /migration
name: migration
- mountPath: /config
name: config
- subPath: jellyseerr
- bootstrap:
+ subPath: jellyseerr
+ bootstrap:
<<: *bootstrap
volumeMounts:
- mountPath: /etc/elfbot
@@ -7241,15 +7160,15 @@ jellyseerr:
name: config
subPath: jellyseerr
- mountPath: /tmp
- name: tmp
- update-dns: *update_dns_on_init
+ name: tmp
+ update-dns: *update_dns_on_init
# We do this so that we can override the /app/jellyseer/public path with our own, allowing the user to customize the branding
copy-branding:
image: ghcr.io/elfhosted/jellyseerr:1.9.2@sha256:a4023a0b8b5e919e179bad2c9bcfbb57ce51805ecf3e0dc9680498b5e4912418
command:
- /bin/bash
- -c
- - |
+ - |
mkdir -p /config/branding
cp --no-clobber -rf /app/overseerr/public/logo_* /config/branding/
volumeMounts:
@@ -7257,7 +7176,7 @@ jellyseerr:
name: config
subPath: jellyseerr
resources: *default_resources
- securityContext: *default_securitycontext
+ securityContext: *default_securitycontext
ingress:
main:
enabled: false
@@ -7283,10 +7202,10 @@ jellyseerr:
subPath: jellyseerr/branding
readOnly: true
- mountPath: /tmp
- name: tmp
+ name: tmp
resources: *default_resources
- securityContext: *default_securitycontext
- clean-up-dns: *clean_up_dns_on_termination
+ securityContext: *default_securitycontext
+ clean-up-dns: *clean_up_dns_on_termination
audiobookshelf:
enabled: false
@@ -7299,8 +7218,8 @@ audiobookshelf:
type: RuntimeDefault
readOnlyRootFilesystem: true
podLabels:
- app.elfhosted.com/name: audiobookshelf
- affinity: *standard_affinity
+ app.elfhosted.com/name: audiobookshelf
+ affinity: *standard_affinity
tolerations: *standard_tolerations
podSecurityContext:
seccompProfile:
@@ -7335,35 +7254,35 @@ audiobookshelf:
# never used, just satisfies startup scripts
metadata:
enabled: true
- type: emptyDir
- mountPath: /metadata
+ type: emptyDir
+ mountPath: /metadata
elfbot:
enabled: "true"
type: "custom"
volumeSpec:
configMap:
name: elfbot-audiobookshelf
- optional: true
+ optional: true
tooling-scripts:
enabled: "true"
type: "custom"
volumeSpec:
configMap:
- name: tooling-scripts
- defaultMode: 0755
+ name: tooling-scripts
+ defaultMode: 0755
env:
- METADATA_PATH: /config/metadata
- SOURCE: ElfHosted
+ METADATA_PATH: /config/metadata
+ SOURCE: ElfHosted
initContainers:
- a-migration:
+ a-migration:
<<: *migrate_data
volumeMounts:
- mountPath: /migration
name: migration
- mountPath: /config
name: config
- subPath: audiobookshelf
- bootstrap:
+ subPath: audiobookshelf
+ bootstrap:
<<: *bootstrap
volumeMounts:
- mountPath: /etc/elfbot
@@ -7374,8 +7293,8 @@ audiobookshelf:
name: config
subPath: audiobookshelf
- mountPath: /tmp
- name: tmp
- update-dns: *update_dns_on_init
+ name: tmp
+ update-dns: *update_dns_on_init
ingress:
main:
enabled: false
@@ -7393,8 +7312,8 @@ audiobookshelf:
cpu: 2
memory: 1Gi
additionalContainers:
- clean-up-dns: *clean_up_dns_on_termination
-
+ clean-up-dns: *clean_up_dns_on_termination
+
openbooks:
enabled: false
image:
@@ -7414,7 +7333,7 @@ openbooks:
fsGroup: 568
fsGroupChangePolicy: "OnRootMismatch"
persistence:
- <<: *storagemounts
+ <<: *storagemounts
command:
- /bin/bash
- -c
@@ -7435,13 +7354,13 @@ openbooks:
envFrom:
- configMapRef:
name: elfbot-openbooks
- optional: true
+ optional: true
automountServiceAccountToken: false
controller:
annotations:
configmap.reloader.stakater.com/reload: "elfbot-all,storage-changed,elfbot-openbooks"
- affinity: *standard_affinity
- tolerations: *standard_tolerations
+ affinity: *standard_affinity
+ tolerations: *standard_tolerations
ingress:
main:
enabled: false
@@ -7480,8 +7399,8 @@ vaultwarden:
controller:
annotations:
configmap.reloader.stakater.com/reload: "elfbot-all,storage-changed,elfbot-vaultwarden"
- affinity: *standard_affinity
- tolerations: *standard_tolerations
+ affinity: *standard_affinity
+ tolerations: *standard_tolerations
automountServiceAccountToken: false
envFrom:
- configMapRef:
@@ -7505,8 +7424,8 @@ vaultwarden:
name: elfbot-vaultwarden
optional: true
tmp: *tmp
- initContainers:
- bootstrap:
+ initContainers:
+ bootstrap:
<<: *bootstrap
volumeMounts:
- mountPath: /etc/elfbot
@@ -7517,7 +7436,7 @@ vaultwarden:
name: config
subPath: vaultwarden
- mountPath: /tmp
- name: tmp
+ name: tmp
ingress:
main:
enabled: false
@@ -7552,8 +7471,8 @@ notifiarr:
controller:
annotations:
configmap.reloader.stakater.com/reload: "elfbot-all,storage-changed,elfbot-notifiarr"
- affinity: *standard_affinity
- tolerations: *standard_tolerations
+ affinity: *standard_affinity
+ tolerations: *standard_tolerations
podSecurityContext:
seccompProfile:
type: RuntimeDefault
@@ -7609,15 +7528,15 @@ notifiarr:
name: elfbot-notifiarr
optional: true
initContainers:
- a-migration:
+ a-migration:
<<: *migrate_data
volumeMounts:
- mountPath: /migration
name: migration
- mountPath: /config
name: config
- subPath: notifiarr
- bootstrap:
+ subPath: notifiarr
+ bootstrap:
<<: *bootstrap
volumeMounts:
- mountPath: /etc/elfbot
@@ -7628,7 +7547,7 @@ notifiarr:
name: config
subPath: notifiarr
- mountPath: /tmp
- name: tmp
+ name: tmp
copy-example-config:
image: *tooling_image
imagePullPolicy: IfNotPresent
@@ -7669,8 +7588,8 @@ shoko:
controller:
annotations:
configmap.reloader.stakater.com/reload: "elfbot-all,storage-changed,elfbot-shoko"
- affinity: *standard_affinity
- tolerations: *standard_tolerations
+ affinity: *standard_affinity
+ tolerations: *standard_tolerations
securityContext:
seccompProfile:
type: RuntimeDefault
@@ -7707,15 +7626,15 @@ shoko:
name: elfbot-shoko
optional: true
initContainers:
- a-migration:
+ a-migration:
<<: *migrate_data
volumeMounts:
- mountPath: /migration
name: migration
- mountPath: /config
name: config
- subPath: shoko
- bootstrap:
+ subPath: shoko
+ bootstrap:
<<: *bootstrap
volumeMounts:
- mountPath: /etc/elfbot
@@ -7726,7 +7645,7 @@ shoko:
name: config
subPath: shoko
- mountPath: /tmp
- name: tmp
+ name: tmp
ingress:
main:
enabled: false
@@ -7758,8 +7677,8 @@ filebot:
controller:
annotations:
configmap.reloader.stakater.com/reload: "elfbot-all,storage-changed,elfbot-filebot"
- affinity: *standard_affinity
- tolerations: *standard_tolerations
+ affinity: *standard_affinity
+ tolerations: *standard_tolerations
securityContext:
seccompProfile:
type: RuntimeDefault
@@ -7802,15 +7721,15 @@ filebot:
mountPath: /home/seedy
sizeLimit: 1Gi
initContainers:
- a-migration:
+ a-migration:
<<: *migrate_data
volumeMounts:
- mountPath: /migration
name: migration
- mountPath: /config
name: config
- subPath: filebot
- bootstrap:
+ subPath: filebot
+ bootstrap:
<<: *bootstrap
volumeMounts:
- mountPath: /etc/elfbot
@@ -7821,7 +7740,7 @@ filebot:
name: config
subPath: filebot
- mountPath: /tmp
- name: tmp
+ name: tmp
ingress:
main:
enabled: false
@@ -7837,7 +7756,7 @@ filebot:
memory: 16Mi
limits:
cpu: 2
- memory: 1Gi
+ memory: 1Gi
kometa: &app_kometa
enabled: false
@@ -7852,12 +7771,12 @@ kometa: &app_kometa
type: RuntimeDefault
readOnlyRootFilesystem: true
podLabels:
- app.elfhosted.com/name: kometa
+ app.elfhosted.com/name: kometa
controller:
annotations:
configmap.reloader.stakater.com/reload: "elfbot-all,storage-changed,elfbot-kometa"
- affinity: *standard_affinity
- tolerations: *standard_tolerations
+ affinity: *standard_affinity
+ tolerations: *standard_tolerations
podSecurityContext:
seccompProfile:
type: RuntimeDefault
@@ -7875,7 +7794,7 @@ kometa: &app_kometa
envFrom:
- configMapRef:
name: elfbot-kometa
- optional: true
+ optional: true
persistence:
<<: *storagemounts
migration:
@@ -7892,6 +7811,14 @@ kometa: &app_kometa
volumeSpec:
persistentVolumeClaim:
claimName: config
+ logsg:
+ enabled: true
+ type: custom
+ mountPath: /config/logs/
+ subPath: kometa
+ volumeSpec:
+ persistentVolumeClaim:
+ claimName: logs
elfbot:
enabled: "true"
type: "custom"
@@ -7906,15 +7833,15 @@ kometa: &app_kometa
configMap:
name: kometa-config
initContainers:
- a-migration:
+ a-migration:
<<: *migrate_data
volumeMounts:
- mountPath: /migration
name: migration
- mountPath: /config
name: config
- subPath: kometa
- bootstrap:
+ subPath: kometa
+ bootstrap:
<<: *bootstrap
volumeMounts:
- mountPath: /etc/elfbot
@@ -7925,7 +7852,7 @@ kometa: &app_kometa
name: config
subPath: kometa
- mountPath: /tmp
- name: tmp
+ name: tmp
setup:
image: *tooling_image
imagePullPolicy: IfNotPresent
@@ -7978,15 +7905,15 @@ rcloneui:
image:
repository: ghcr.io/elfhosted/rclone
tag: 1.68.1@sha256:288b3d9d6a480b5751ccbc457e1837669e5eb2a2146ffcc7a0f0f1df45418793
- command:
+ command:
- /rcloneui.sh
controller:
annotations:
configmap.reloader.stakater.com/reload: "elfbot-all,storage-changed,elfbot-rcloneui,elfhosted-user-config"
podAnnotations:
- kubernetes.io/egress-bandwidth: "40M"
- affinity: *standard_affinity
- tolerations: *standard_tolerations
+ kubernetes.io/egress-bandwidth: "40M"
+ affinity: *standard_affinity
+ tolerations: *standard_tolerations
securityContext:
seccompProfile:
type: RuntimeDefault
@@ -8006,20 +7933,27 @@ rcloneui:
type: emptyDir
mountPath: /home/elfie/.cache
sizeLimit: 1Gi
+ migration:
+ enabled: true
+ type: custom
+ volumeSpec:
+ persistentVolumeClaim:
+ claimName: config-rclonebrowser-ssd
config:
enabled: true
type: custom
mountPath: /config/
+ subPath: rclone
volumeSpec:
persistentVolumeClaim:
- claimName: config-rclonebrowser-ssd
+ claimName: config
rclone-remote-storage:
enabled: "true"
subPath: "rclone-remote-storage"
type: "custom"
volumeSpec:
configMap:
- name: rclonefm-config
+ name: rclonefm-config
elfbot:
enabled: "true"
type: "custom"
@@ -8028,7 +7962,26 @@ rcloneui:
name: elfbot-rclonebrowser
optional: true
initContainers:
- bootstrap: *bootstrap
+ a-migration:
+ <<: *migrate_data
+ volumeMounts:
+ - mountPath: /migration
+ name: migration
+ - mountPath: /config
+ name: config
+ subPath: rclone
+ bootstrap:
+ <<: *bootstrap
+ volumeMounts:
+ - mountPath: /etc/elfbot
+ name: elfbot
+ - mountPath: /storage/backup
+ name: backup
+ - mountPath: /config
+ name: config
+ subPath: rclone
+ - mountPath: /tmp
+ name: tmp
setup:
image: *tooling_image
imagePullPolicy: IfNotPresent
@@ -8045,8 +7998,9 @@ rcloneui:
volumeMounts:
- mountPath: /config
name: config
+ subPath: rclone
- mountPath: /tmp # need this for cating into a file
- name: tmp
+ name: tmp
- mountPath: /rclone-remote-storage
subPath: rclone-remote-storage
name: rclone-remote-storage
@@ -8075,16 +8029,16 @@ rclonefm:
image:
repository: ghcr.io/elfhosted/rclone
tag: 1.68.1@sha256:288b3d9d6a480b5751ccbc457e1837669e5eb2a2146ffcc7a0f0f1df45418793
- command:
+ command:
- /rclonefm.sh
controller:
annotations:
configmap.reloader.stakater.com/reload: "elfbot-all,storage-changed,elfbot-rclonefm,rclonefm-config"
podAnnotations:
- kubernetes.io/egress-bandwidth: "40M"
+ kubernetes.io/egress-bandwidth: "40M"
# Always prefer to cohabit with zurg
- affinity: *standard_affinity
- tolerations: *standard_tolerations
+ affinity: *standard_affinity
+ tolerations: *standard_tolerations
securityContext:
seccompProfile:
type: RuntimeDefault
@@ -8103,14 +8057,15 @@ rclonefm:
enabled: true
type: emptyDir
mountPath: /home/elfie/.cache
- sizeLimit: 1Gi
+ sizeLimit: 1Gi
config:
enabled: true
type: custom
mountPath: /config/
+ subPath: rclone
volumeSpec:
persistentVolumeClaim:
- claimName: config-rclonebrowser-ssd
+ claimName: config
rclonefm-config:
enabled: "true"
mountPath: /var/lib/rclonefm/js/settings.js
@@ -8125,7 +8080,7 @@ rclonefm:
type: "custom"
volumeSpec:
configMap:
- name: rclonefm-config
+ name: rclonefm-config
elfbot:
enabled: "true"
type: "custom"
@@ -8134,7 +8089,18 @@ rclonefm:
name: elfbot-rclonefm
optional: true
initContainers:
- bootstrap: *bootstrap
+ bootstrap:
+ <<: *bootstrap
+ volumeMounts:
+ - mountPath: /etc/elfbot
+ name: elfbot
+ - mountPath: /storage/backup
+ name: backup
+ - mountPath: /config
+ name: config
+ subPath: rclone
+ - mountPath: /tmp
+ name: tmp
setup:
image: *tooling_image
imagePullPolicy: IfNotPresent
@@ -8151,8 +8117,9 @@ rclonefm:
volumeMounts:
- mountPath: /config
name: config
+ subPath: rclone
- mountPath: /tmp # need this for cating into a file
- name: tmp
+ name: tmp
- mountPath: /rclone-remote-storage
subPath: rclone-remote-storage
name: rclone-remote-storage
@@ -8173,7 +8140,7 @@ rclonefm:
memory: 60Mi
limits:
cpu: 150m
- memory: 512Mi
+ memory: 512Mi
webdav: &webdav
enabled:
@@ -8181,10 +8148,10 @@ webdav: &webdav
podLabels:
app.elfhosted.com/name: webdav
podAnnotations:
- kubernetes.io/egress-bandwidth: "40M"
+ kubernetes.io/egress-bandwidth: "40M"
priorityClassName: tenant-normal
- affinity: *standard_affinity
- tolerations: *standard_tolerations
+ affinity: *standard_affinity
+ tolerations: *standard_tolerations
image:
repository: ghcr.io/elfhosted/rclone
tag: 1.68.1@sha256:288b3d9d6a480b5751ccbc457e1837669e5eb2a2146ffcc7a0f0f1df45418793
@@ -8194,7 +8161,7 @@ webdav: &webdav
securityContext:
seccompProfile:
type: RuntimeDefault
- readOnlyRootFilesystem: true
+ readOnlyRootFilesystem: true
command:
- /webdav.sh
podSecurityContext:
@@ -8210,10 +8177,16 @@ webdav: &webdav
name: webdav-config
- configMapRef:
name: elfbot-webdav
- optional: true
+ optional: true
persistence:
<<: *storagemounts
- <<: *storageconfigmounts
+ config:
+ enabled: true
+ type: custom
+ mountPath: /storage/config
+ volumeSpec:
+ persistentVolumeClaim:
+ claimName: config
elfbot:
enabled: "true"
type: "custom"
@@ -8226,12 +8199,12 @@ webdav: &webdav
type: "custom"
volumeSpec:
configMap:
- name: tooling-scripts
- defaultMode: 0755
+ name: tooling-scripts
+ defaultMode: 0755
initContainers:
update-dns: *update_dns_on_init
additionalContainers:
- clean-up-dns: *clean_up_dns_on_termination
+ clean-up-dns: *clean_up_dns_on_termination
service:
main:
enabled: false # necessary for probes
@@ -8255,14 +8228,14 @@ storagehub:
app.elfhosted.com/name: storagehub
controller:
annotations:
- configmap.reloader.stakater.com/reload: "storagehub-scripts,storagehub-env"
- secret.reloader.stakater.com/reload: ",storagehub-config,storagehub-env"
+ configmap.reloader.stakater.com/reload: "storagehub-scripts,storagehub-env"
+ secret.reloader.stakater.com/reload: ",storagehub-config,storagehub-env"
# affinity:
# podAffinity:
# # prefer to be located with zurg, if tolerations permit
- # preferredDuringSchedulingIgnoredDuringExecution:
- # - weight: 100
- # podAffinityTerm:
+ # preferredDuringSchedulingIgnoredDuringExecution:
+ # - weight: 100
+ # podAffinityTerm:
# labelSelector:
# matchExpressions:
# - key: app.elfhosted.com/name
@@ -8272,9 +8245,9 @@ storagehub:
# topologyKey: "kubernetes.io/hostname"
# tolerations:
# - key: node-role.elfhosted.com/download-only
- # operator: Exists
+ # operator: Exists
# - key: node-role.elfhosted.com/dedicated
- # operator: Exists
+ # operator: Exists
priorityClassName: tenant-critical
image:
repository: itsthenetwork/nfs-server-alpine
@@ -8284,12 +8257,12 @@ storagehub:
securityContext:
seccompProfile:
type: RuntimeDefault
- privileged: true
+ privileged: true
podSecurityContext:
seccompProfile:
type: RuntimeDefault
fsGroup: 568
- fsGroupChangePolicy: "OnRootMismatch"
+ fsGroupChangePolicy: "OnRootMismatch"
automountServiceAccountToken: false
persistence:
# # this is an ephemeral volume
@@ -8297,14 +8270,14 @@ storagehub:
# enabled: true
# type: custom
# mountPath: /storage
- # volumeSpec: *volumespec_ephemeral_volume_50g
+ # volumeSpec: *volumespec_ephemeral_volume_50g
# these are the persistent volumes we support currently
# needed for the migration
rclonemountrealdebridzurg: *rclonemountrealdebridzurg
# this is the old symlinks on HDD
# symlinks: *symlinks # these only get mounted on storagehub. Everything else accesses symlinks **through** storagehub
- # samba config
+ # samba config
# config:
# enabled: "true"
# subPath: "container.json"
@@ -8312,19 +8285,19 @@ storagehub:
# type: "custom"
# volumeSpec:
# secret:
- # secretName: storagehub-config
+ # secretName: storagehub-config
# storagehub-scripts:
# enabled: "true"
# type: "custom"
# volumeSpec:
# configMap:
- # name: storagehub-scripts
- # defaultMode: 0755
+ # name: storagehub-scripts
+ # defaultMode: 0755
tmp:
enabled: true
type: emptyDir
mountPath: /tmp
- sizeLimit: 1Gi
+ sizeLimit: 1Gi
service:
main:
enabled: false # necessary for probes
@@ -8344,10 +8317,10 @@ storagehub:
# envFrom:
# - configMapRef:
# name: storagehub-env
- # - secretRef:
+ # - secretRef:
# name: storagehub-env
# initContainers:
- # setup:
+ # setup:
# image: *tooling_image
# imagePullPolicy: IfNotPresent
# command:
@@ -8361,32 +8334,32 @@ storagehub:
# echo nothing
# # if [[ ! -f /storage/symlinks/i-was-migrated-to-storagehub ]]
# # then
- # # if [[ ! -z "$(ls -A /migration)" ]]
+ # # if [[ ! -z "$(ls -A /migration)" ]]
# # then
# # echo "Tar-migrating from /migration/..."
# # tar -cf - -C /migration/ . | tar xvmf - -C /storage/symlinks/
# # touch /storage/symlinks/i-was-migrated-to-storagehub
# # fi
- # # fi
-
+ # # fi
+
# envFrom:
# - configMapRef:
# name: storagehub-env
# - configMapRef:
- # name: elfhosted-user-config
+ # name: elfhosted-user-config
# - secretRef:
# name: storagehub-env
# volumeMounts:
# - mountPath: /storagehub-scripts
- # name: storagehub-scripts
+ # name: storagehub-scripts
# - mountPath: /home/elfie
# name: tmp
# - mountPath: /migration
# name: migration
# - mountPath: /storage/symlinks
- # name: symlinks
+ # name: symlinks
# resources: *default_resources
- # securityContext: *default_securitycontext
+ # securityContext: *default_securitycontext
# additionalContainers:
# backup-on-termination:
# image: *tooling_image
@@ -8399,12 +8372,12 @@ storagehub:
# - configMapRef:
# name: storagehub-env
# - configMapRef:
- # name: elfhosted-user-config
+ # name: elfhosted-user-config
# - secretRef:
# name: storagehub-env
# volumeMounts:
# - mountPath: /storagehub-scripts
- # name: storagehub-scripts
+ # name: storagehub-scripts
# - mountPath: /home/elfie
# name: tmp
# - mountPath: /ephemeral
@@ -8413,10 +8386,10 @@ storagehub:
# - mountPath: /persistent/plex
# name: persistent-plex
# - mountPath: /symlinks
- # name: symlinks
+ # name: symlinks
# terminationGracePeriodSeconds: "3600" # take up to an hour to backup
-webdavplus:
+webdavplus:
enabled: false
<<: *webdav
podLabels:
@@ -8425,7 +8398,7 @@ webdavplus:
kubernetes.io/egress-bandwidth: "150M"
envFrom:
- configMapRef:
- name: webdav-plus-config
+ name: webdav-plus-config
# This is basically a proxy webdav for alldebrid, so that we can use it over a VPN
alldebridwebdavpia:
@@ -8438,7 +8411,7 @@ alldebridwebdavpia:
- /usr/bin/dumb-init
- /bin/bash
- -c
- - |
+ - |
/usr/bin/rclone serve webdav \
--config /config/rclone.conf \
--addr :9999 \
@@ -8452,7 +8425,7 @@ alldebridwebdavpia:
--vfs-cache-max-size 10G \
-v \
alldebrid:
- envFrom: [] # nullify the inherited values
+ envFrom: [] # nullify the inherited values
persistence:
tmp: # in case users use /tmp
enabled: true
@@ -8472,8 +8445,8 @@ alldebridwebdavpia:
type: "custom"
volumeSpec:
configMap:
- name: tooling-scripts
- defaultMode: 0755
+ name: tooling-scripts
+ defaultMode: 0755
addons:
vpn:
enabled: true # in case we ever need it
@@ -8490,7 +8463,7 @@ alldebridwebdavpia:
add:
- NET_ADMIN
- SYS_MODULE
- config: # We have to set this to null so that we can override with our own config
+ config: # We have to set this to null so that we can override with our own config
jfa:
enabled: false
@@ -8507,8 +8480,8 @@ jfa:
controller:
annotations:
configmap.reloader.stakater.com/reload: "elfbot-all,storage-changed,elfbot-jfa"
- affinity: *standard_affinity
- tolerations: *standard_tolerations
+ affinity: *standard_affinity
+ tolerations: *standard_tolerations
podSecurityContext:
seccompProfile:
type: RuntimeDefault
@@ -8546,15 +8519,15 @@ jfa:
mountPath: /tmp
sizeLimit: 1Gi
initContainers:
- a-migration:
+ a-migration:
<<: *migrate_data
volumeMounts:
- mountPath: /migration
name: migration
- mountPath: /config
name: config
- subPath: jfa
- bootstrap:
+ subPath: jfa
+ bootstrap:
<<: *bootstrap
volumeMounts:
- mountPath: /etc/elfbot
@@ -8565,7 +8538,7 @@ jfa:
name: config
subPath: jfa
- mountPath: /tmp
- name: tmp
+ name: tmp
ingress:
main:
enabled: false
@@ -8704,8 +8677,8 @@ mattermost:
enabled: false # probes can make helm fail/restart under some conditions. Either do or do not, there is no try
persistence:
enabled: true
- existingClaim: config-mattermost-ssd
- subPath: database
+ existingClaim: config
+ subPath: mattermost/database
resources:
requests:
cpu: 5m
@@ -8855,7 +8828,7 @@ mattermost:
capabilities:
drop:
- ALL
-
+
serviceAccount:
create: false
name:
@@ -8880,8 +8853,8 @@ syncthing:
controller:
annotations:
configmap.reloader.stakater.com/reload: "elfbot-all,storage-changed,elfbot-syncthing"
- affinity: *standard_affinity
- tolerations: *standard_tolerations
+ affinity: *standard_affinity
+ tolerations: *standard_tolerations
securityContext:
seccompProfile:
type: RuntimeDefault
@@ -8903,7 +8876,7 @@ syncthing:
subPath: syncthing
volumeSpec:
persistentVolumeClaim:
- claimName: config
+ claimName: config
elfbot:
enabled: "true"
type: "custom"
@@ -8929,7 +8902,7 @@ syncthing:
cpu: 1
memory: 1Gi
initContainers:
- bootstrap:
+ bootstrap:
<<: *bootstrap
volumeMounts:
- mountPath: /etc/elfbot
@@ -8940,7 +8913,7 @@ syncthing:
name: config
subPath: syncthing
- mountPath: /tmp
- name: tmp
+ name: tmp
setup:
image: ghcr.io/elfhosted/syncthing:1.27.10@sha256:e97e468c112cafb92e89c1a00d6793d0f869e780b98830ad19de8b9a08071574
imagePullPolicy: IfNotPresent
@@ -8988,15 +8961,15 @@ rdtclient: &app_rdtclient
enabled: true
priorityClassName: tenant-bulk
podLabels:
- app.elfhosted.com/class: debrid
+ app.elfhosted.com/class: debrid
image:
repository: ghcr.io/elfhosted/rdtclient
tag: v2.0.86
controller:
annotations:
configmap.reloader.stakater.com/reload: "elfbot-all,storage-changed,elfbot-rdtclient"
- affinity: *standard_affinity
- tolerations: *standard_tolerations
+ affinity: *standard_affinity
+ tolerations: *standard_tolerations
securityContext:
seccompProfile:
type: RuntimeDefault
@@ -9036,7 +9009,7 @@ rdtclient: &app_rdtclient
enabled: true
type: custom
mountPath: /data/downloads
- volumeSpec: *volumespec_ephemeral_volume_1g
+ volumeSpec: *volumespec_ephemeral_volume_1g
ingress:
main:
enabled: false
@@ -9056,15 +9029,15 @@ rdtclient: &app_rdtclient
memory: 2Gi
ephemeral-storage: 100Mi # a safety net against node ephemeral space exhaustion
initContainers:
- a-migration:
+ a-migration:
<<: *migrate_data
volumeMounts:
- mountPath: /migration
name: migration
- mountPath: /config
name: config
- subPath: rdtclient
- bootstrap:
+ subPath: rdtclient
+ bootstrap:
<<: *bootstrap
volumeMounts:
- mountPath: /etc/elfbot
@@ -9075,53 +9048,54 @@ rdtclient: &app_rdtclient
name: config
subPath: rdtclient
- mountPath: /tmp
- name: tmp
+ name: tmp
# Add a VPN option for rdtclient
rdtclientvpnpia:
enabled: false
- <<: *app_rdtclient
- podLabels:
- app.elfhosted.com/name: rdtclient-alldebrid
- persistence:
- <<: *storagemounts
- config:
- enabled: true
- type: custom
- mountPath: /data/db
- subPath: rdtclient-alldebrid
- volumeSpec:
- persistentVolumeClaim:
- claimName: config
- elfbot:
- enabled: "true"
- type: "custom"
- volumeSpec:
- configMap:
- name: elfbot-rdtclient-alldebrid
- optional: true
- download: # in case users use /tmp
- enabled: true
- type: custom
- mountPath: /data/downloads
- volumeSpec: *volumespec_ephemeral_volume_1g
- addons:
- vpn:
- enabled: true # in case we ever need it
- gluetun:
- image:
- repository: thrnz/docker-wireguard-pia
- tag: latest
- envFrom:
- - secretRef:
- name: rdtclient-pia-config
- securityContext:
- runAsUser: 0
- capabilities:
- add:
- - NET_ADMIN
- - SYS_MODULE
- config: # We have to set this to null so that we can override with our own config
+ # bring this back after migration
+ # <<: *app_rdtclient
+ # podLabels:
+ # app.elfhosted.com/name: rdtclient-alldebrid
+ # persistence:
+ # <<: *storagemounts
+ # config:
+ # enabled: true
+ # type: custom
+ # mountPath: /data/db
+ # subPath: rdtclient-alldebrid
+ # volumeSpec:
+ # persistentVolumeClaim:
+ # claimName: config
+ # elfbot:
+ # enabled: "true"
+ # type: "custom"
+ # volumeSpec:
+ # configMap:
+ # name: elfbot-rdtclient-alldebrid
+ # optional: true
+ # download: # in case users use /tmp
+ # enabled: true
+ # type: custom
+ # mountPath: /data/downloads
+ # volumeSpec: *volumespec_ephemeral_volume_1g
+ # addons:
+ # vpn:
+ # enabled: true # in case we ever need it
+ # gluetun:
+ # image:
+ # repository: thrnz/docker-wireguard-pia
+ # tag: latest
+ # envFrom:
+ # - secretRef:
+ # name: rdtclient-pia-config
+ # securityContext:
+ # runAsUser: 0
+ # capabilities:
+ # add:
+ # - NET_ADMIN
+ # - SYS_MODULE
+ # config: # We have to set this to null so that we can override with our own config
jdownloader:
enabled: false
@@ -9153,7 +9127,7 @@ jdownloader:
- configMapRef:
name: jdownloader-config
persistence:
- tmp: *tmp
+ tmp: *tmp
backup: *backup
config:
enabled: true
@@ -9168,7 +9142,7 @@ jdownloader:
type: custom
volumeSpec:
persistentVolumeClaim:
- claimName: config-jdownloader-ssd
+ claimName: config-jdownloader-ssd
elfbot:
enabled: "true"
type: "custom"
@@ -9193,15 +9167,15 @@ jdownloader:
cpu: 0.5
memory: 1Gi
initContainers:
- a-migration:
+ a-migration:
<<: *migrate_data
volumeMounts:
- mountPath: /migration
name: migration
- mountPath: /config
name: config
- subPath: jdownloader
- bootstrap:
+ subPath: jdownloader
+ bootstrap:
<<: *bootstrap
volumeMounts:
- mountPath: /etc/elfbot
@@ -9212,7 +9186,7 @@ jdownloader:
name: config
subPath: jdownloader
- mountPath: /tmp
- name: tmp
+ name: tmp
additionalContainers:
podinfo:
image: stefanprodan/podinfo # used to run probes from gatus
@@ -9226,8 +9200,8 @@ miniflux:
controller:
annotations:
configmap.reloader.stakater.com/reload: "elfbot-all,storage-changed,elfbot-miniflux,miniflux-config"
- affinity: *standard_affinity
- tolerations: *standard_tolerations
+ affinity: *standard_affinity
+ tolerations: *standard_tolerations
securityContext:
seccompProfile:
type: RuntimeDefault
@@ -9261,12 +9235,12 @@ miniflux:
database: miniflux
postgresPassword: miniflux
primary:
- affinity: *standard_affinity
- tolerations: *standard_tolerations
+ affinity: *standard_affinity
+ tolerations: *standard_tolerations
persistence:
enabled: true
- existingClaim: config-miniflux-ssd
- subPath: database
+ existingClaim: config
+ subPath: miniflux/database
resources:
requests:
cpu: 5m
@@ -9334,8 +9308,8 @@ joplinserver:
controller:
annotations:
configmap.reloader.stakater.com/reload: "elfbot-all,storage-changed,elfbot-joplinserver,joplinserver-config"
- affinity: *standard_affinity
- tolerations: *standard_tolerations
+ affinity: *standard_affinity
+ tolerations: *standard_tolerations
securityContext:
seccompProfile:
type: RuntimeDefault
@@ -9379,8 +9353,8 @@ joplinserver:
database: joplinserver
postgresPassword: joplinserver
primary:
- affinity: *standard_affinity
- tolerations: *standard_tolerations
+ affinity: *standard_affinity
+ tolerations: *standard_tolerations
persistence:
enabled: true
existingClaim: config
@@ -9449,8 +9423,8 @@ homepage:
controller:
annotations:
configmap.reloader.stakater.com/reload: "elfbot-all,storage-changed,elfbot-homepage,homepage-config,homepage-env"
- affinity: *standard_affinity
- tolerations: *standard_tolerations
+ affinity: *standard_affinity
+ tolerations: *standard_tolerations
securityContext:
seccompProfile:
type: RuntimeDefault
@@ -9466,7 +9440,7 @@ homepage:
serviceAccount:
create: true
name: homepage
- automountServiceAccountToken: true
+ automountServiceAccountToken: true
env:
PUID: 568
PGID: 568
@@ -9476,7 +9450,7 @@ homepage:
optional: true
- configMapRef:
name: homepage-env
- optional: true
+ optional: true
persistence:
<<: *storagemounts
migration:
@@ -9523,15 +9497,15 @@ homepage:
cpu: 250m # deliberately hobble the CPU in favor of GPU transcoding
memory: 1Gi
initContainers:
- a-migration:
+ a-migration:
<<: *migrate_data
volumeMounts:
- mountPath: /migration
name: migration
- mountPath: /config
name: config
- subPath: homepage
- bootstrap:
+ subPath: homepage
+ bootstrap:
<<: *bootstrap
volumeMounts:
- mountPath: /etc/elfbot
@@ -9542,7 +9516,7 @@ homepage:
name: config
subPath: homepage
- mountPath: /tmp
- name: tmp
+ name: tmp
copy-example-config:
image: *tooling_image
imagePullPolicy: IfNotPresent
@@ -9608,18 +9582,18 @@ wallabag:
type: RuntimeDefault
readOnlyRootFilesystem: false
privileged: false
- # runtimeClassName: kata
+ # runtimeClassName: kata
podSecurityContext:
seccompProfile:
type: RuntimeDefault
- fsGroup: 568 # for the mounted volumes
+ fsGroup: 568 # for the mounted volumes
persistence:
config:
enabled: true
type: custom
volumeSpec:
persistentVolumeClaim:
- claimName: config-wallabag-ssd
+ claimName: config
ingress:
main:
enabled: false
@@ -9634,7 +9608,7 @@ wallabag:
cpu: 1m
memory: 32Mi
limits:
- cpu: 100m
+ cpu: 100m
memory: 1Gi
additionalContainers:
ui:
@@ -9642,10 +9616,10 @@ wallabag:
volumeMounts:
- mountPath: /var/www/wallabag/data
name: config
- subPath: data
+ subPath: wallabag/data
- mountPath: /var/www/wallabag/images
name: config
- subPath: images
+ subPath: wallabag/images
envFrom:
- configMapRef:
name: elfbot-wallbag
@@ -9654,15 +9628,15 @@ wallabag:
name: wallabag-config
securityContext:
seccompProfile:
- type: RuntimeDefault
+ type: RuntimeDefault
allowPrivilegeEscalation: false
resources:
requests:
cpu: 1m
memory: 100Mi
limits:
- cpu: 500m
- memory: 200Mi
+ cpu: 500m
+ memory: 200Mi
autoscan:
enabled: false
@@ -9673,8 +9647,8 @@ autoscan:
controller:
annotations:
configmap.reloader.stakater.com/reload: "elfbot-all,storage-changed,elfbot-autoscan"
- affinity: *standard_affinity
- tolerations: *standard_tolerations
+ affinity: *standard_affinity
+ tolerations: *standard_tolerations
securityContext:
seccompProfile:
type: RuntimeDefault
@@ -9704,7 +9678,7 @@ autoscan:
enabled: true # necessary for probes
ports:
http:
- port: 3030
+ port: 3030
persistence:
<<: *storagemounts
config:
@@ -9720,7 +9694,7 @@ autoscan:
type: custom
volumeSpec:
persistentVolumeClaim:
- claimName: config-autoscan-ssd
+ claimName: config-autoscan-ssd
example-config:
enabled: "true"
type: "custom"
@@ -9736,15 +9710,15 @@ autoscan:
optional: true
initContainers:
- a-migration:
+ a-migration:
<<: *migrate_data
volumeMounts:
- mountPath: /migration
name: migration
- mountPath: /config
name: config
- subPath: autoscan
- bootstrap:
+ subPath: autoscan
+ bootstrap:
<<: *bootstrap
volumeMounts:
- mountPath: /etc/elfbot
@@ -9755,7 +9729,7 @@ autoscan:
name: config
subPath: autoscan
- mountPath: /tmp
- name: tmp
+ name: tmp
copy-example-config:
image: *tooling_image
imagePullPolicy: IfNotPresent
@@ -9794,8 +9768,8 @@ riven: &app_riven
controller:
annotations:
configmap.reloader.stakater.com/reload: "elfbot-all,storage-changed,elfbot-riven,riven-env,riven-frontend-env"
- affinity: *standard_affinity
- tolerations: *standard_tolerations
+ affinity: *standard_affinity
+ tolerations: *standard_tolerations
securityContext:
seccompProfile:
type: RuntimeDefault
@@ -9811,10 +9785,10 @@ riven: &app_riven
envFrom:
- configMapRef:
name: riven-env
- - configMapRef:
+ - configMapRef:
name: elfbot-riven
- optional: true
- resources:
+ optional: true
+ resources:
requests:
cpu: 1m
memory: 20Mi
@@ -9831,7 +9805,7 @@ riven: &app_riven
http:
port: 3001
persistence:
- <<: *storagemounts
+ <<: *storagemounts
config:
enabled: true
type: custom
@@ -9847,13 +9821,13 @@ riven: &app_riven
subPath: riven
volumeSpec:
persistentVolumeClaim:
- claimName: logs
+ claimName: logs
migration:
enabled: true
type: custom
volumeSpec:
persistentVolumeClaim:
- claimName: config-riven-ssd
+ claimName: config-riven-ssd
elfbot:
enabled: "true"
type: "custom"
@@ -9865,17 +9839,17 @@ riven: &app_riven
enabled: "true"
type: emptyDir
volumeSpec:
- medium: Memory
+ medium: Memory
initContainers:
- a-migration:
+ a-migration:
<<: *migrate_data
volumeMounts:
- mountPath: /migration
name: migration
- mountPath: /config
name: config
- subPath: riven
- bootstrap:
+ subPath: riven
+ bootstrap:
<<: *bootstrap
volumeMounts:
- mountPath: /etc/elfbot
@@ -9886,7 +9860,7 @@ riven: &app_riven
name: config
subPath: riven
- mountPath: /tmp
- name: tmp
+ name: tmp
setup-postgres:
image: *tooling_image
command:
@@ -9900,7 +9874,7 @@ riven: &app_riven
mkdir -p /config/postgresql/database
mkdir -p /config/postgresql/backups
chown elfie:elfie /config/postgresql -R
-
+
volumeMounts:
- mountPath: /config
name: config
@@ -9908,7 +9882,7 @@ riven: &app_riven
securityContext:
runAsUser: 0
seccompProfile:
- type: RuntimeDefault
+ type: RuntimeDefault
setup:
image: *tooling_image
command:
@@ -9957,7 +9931,7 @@ riven: &app_riven
value: postgres
volumeMounts:
- mountPath: /var/lib/postgresql/data
- name: config
+ name: config
subPath: riven/postgresql/database
- mountPath: /dev/shm
name: dev-shm
@@ -9987,8 +9961,8 @@ airdcpp: &app_airdcpp
controller:
annotations:
configmap.reloader.stakater.com/reload: "elfbot-all,storage-changed,elfbot-airdcpp"
- affinity: *standard_affinity
- tolerations: *standard_tolerations
+ affinity: *standard_affinity
+ tolerations: *standard_tolerations
securityContext:
seccompProfile:
type: RuntimeDefault
@@ -10016,7 +9990,7 @@ airdcpp: &app_airdcpp
enabled: true # necessary for probes
ports:
http:
- port: 5600
+ port: 5600
env:
WAIT_FOR_VPN: "true"
PORT_FILE: /.airdcpp/forwarded-port
@@ -10024,7 +9998,7 @@ airdcpp: &app_airdcpp
liveness:
enabled: false
startup:
- enabled: false
+ enabled: false
persistence:
<<: *storagemounts
config:
@@ -10040,7 +10014,7 @@ airdcpp: &app_airdcpp
mountPath: /shared
type: emptyDir
volumeSpec:
- medium: Memory
+ medium: Memory
elfbot:
enabled: "true"
type: "custom"
@@ -10066,10 +10040,10 @@ airdcpp: &app_airdcpp
add:
- NET_ADMIN
- SYS_MODULE
- config: # We have to set this to null so that we can override with our own config
+ config: # We have to set this to null so that we can override with our own config
additionalVolumeMounts:
- mountPath: /config
- name: config
+ name: config
scripts:
up: |-
#!/bin/ash
@@ -10077,7 +10051,7 @@ airdcpp: &app_airdcpp
down: |-
#!/bin/ash
- echo "disconnected" > /shared/vpnstatus
+ echo "disconnected" > /shared/vpnstatus
airdcpppia:
enabled: false
@@ -10102,10 +10076,10 @@ airdcppgluetun:
add:
- NET_ADMIN
- SYS_MODULE
- config: # We have to set this to null so that we can override with our own config
+ config: # We have to set this to null so that we can override with our own config
additionalVolumeMounts:
- mountPath: /config
- name: config
+ name: config
scripts:
up: |-
#!/bin/ash
@@ -10113,7 +10087,7 @@ airdcppgluetun:
down: |-
#!/bin/ash
- echo "disconnected" > /shared/vpnstatus
+ echo "disconnected" > /shared/vpnstatus
jackett:
enabled: false
@@ -10129,8 +10103,8 @@ jackett:
type: RuntimeDefault
readOnlyRootFilesystem: true
# Always prefer to cohabit with zurg
- affinity: *standard_affinity
- tolerations: *standard_tolerations
+ affinity: *standard_affinity
+ tolerations: *standard_tolerations
podSecurityContext:
seccompProfile:
type: RuntimeDefault
@@ -10154,7 +10128,7 @@ jackett:
enabled: true # necessary for probes
ports:
http:
- port: 9117
+ port: 9117
persistence:
tmp: *tmp
backup: *backup
@@ -10190,9 +10164,9 @@ stremioserver: &app_stremioserver
configmap.reloader.stakater.com/reload: "elfbot-all,elfbot-stremio-server,stremio-server-env,stremio-server-config"
strategy:
rollingUpdate:
- unavailable: 1
- affinity: *standard_affinity
- tolerations: *standard_tolerations
+ unavailable: 1
+ affinity: *standard_affinity
+ tolerations: *standard_tolerations
securityContext:
seccompProfile:
type: RuntimeDefault
@@ -10226,14 +10200,14 @@ stremioserver: &app_stremioserver
port: 11470
envFrom:
- configMapRef:
- name: stremio-server-env
+ name: stremio-server-env
persistence:
tmp: *tmp
render-device:
enabled: "true"
type: hostPath
hostPath: "/dev/dri/renderD128"
- mountPath: "/dev/dri/renderD128"
+ mountPath: "/dev/dri/renderD128"
config:
enabled: "true"
subPath: "server-settings.json"
@@ -10241,14 +10215,14 @@ stremioserver: &app_stremioserver
type: "custom"
volumeSpec:
configMap:
- name: stremio-server-config
+ name: stremio-server-config
tooling-scripts:
enabled: "true"
type: "custom"
volumeSpec:
configMap:
- name: tooling-scripts
- defaultMode: 0755
+ name: tooling-scripts
+ defaultMode: 0755
transcode: # in case users use /tmp
enabled: true
type: custom
@@ -10259,11 +10233,11 @@ stremioserver: &app_stremioserver
mountPath: /shared
type: emptyDir
volumeSpec:
- medium: Memory
+ medium: Memory
initContainers:
update-dns: *update_dns_on_init
additionalContainers:
- clean-up-dns: *clean_up_dns_on_termination
+ clean-up-dns: *clean_up_dns_on_termination
# Avoid 404s on the /casting endpoint
casting:
image: nginxinc/nginx-unprivileged
@@ -10275,7 +10249,7 @@ stremioserver: &app_stremioserver
- mountPath: /tmp
name: tmp
resources: *default_resources
- securityContext: *default_securitycontext
+ securityContext: *default_securitycontext
# Stremioserver with PIA
stremioserverpia:
@@ -10297,7 +10271,7 @@ stremioserverpia:
add:
- NET_ADMIN
- SYS_MODULE
- config: # We have to set this to null so that we can override with our own config
+ config: # We have to set this to null so that we can override with our own config
scripts:
up: |-
#!/bin/ash
@@ -10327,7 +10301,7 @@ stremioservergluetun:
add:
- NET_ADMIN
- SYS_MODULE
- config: # We have to set this to null so that we can override with our own config
+ config: # We have to set this to null so that we can override with our own config
scripts:
up: |-
#!/bin/ash
@@ -10335,7 +10309,7 @@ stremioservergluetun:
down: |-
#!/bin/ash
- echo "disconnected" > /shared/vpnstatus
+ echo "disconnected" > /shared/vpnstatus
stremioweb:
enabled: false
@@ -10350,9 +10324,9 @@ stremioweb:
configmap.reloader.stakater.com/reload: "elfbot-all,elfbot-stremio-web"
strategy:
rollingUpdate:
- unavailable: 1
- affinity: *standard_affinity
- tolerations: *standard_tolerations
+ unavailable: 1
+ affinity: *standard_affinity
+ tolerations: *standard_tolerations
securityContext:
seccompProfile:
type: RuntimeDefault
@@ -10396,9 +10370,9 @@ stremiojackett:
configmap.reloader.stakater.com/reload: "elfbot-all,elfbot-stremio-jackett"
strategy:
rollingUpdate:
- unavailable: 1
- affinity: *standard_affinity
- tolerations: *standard_tolerations
+ unavailable: 1
+ affinity: *standard_affinity
+ tolerations: *standard_tolerations
securityContext:
seccompProfile:
type: RuntimeDefault
@@ -10413,7 +10387,7 @@ stremiojackett:
automountServiceAccountToken: false
envFrom:
- configMapRef:
- name: stremio-jackett-env
+ name: stremio-jackett-env
resources:
requests:
cpu: 1m
@@ -10429,7 +10403,7 @@ stremiojackett:
enabled: true # necessary for probes
ports:
http:
- port: 3000
+ port: 3000
persistence:
pm2:
enabled: true
@@ -10440,7 +10414,7 @@ stremiojackett:
enabled: true
type: emptyDir
mountPath: /.npm
- sizeLimit: 1Gi
+ sizeLimit: 1Gi
elfbot:
enabled: "true"
type: "custom"
@@ -10456,15 +10430,15 @@ pairdrop:
tag: v1.10.10@sha256:c0188414adebb07c69a170c81256825f7ea83784f2f7ff491679fa0165b4425f
priorityClassName: tenant-normal
podLabels:
- app.elfhosted.com/name: pairdrop
+ app.elfhosted.com/name: pairdrop
controller:
annotations:
configmap.reloader.stakater.com/reload: "elfbot-all,elfbot-pairdrop"
strategy:
rollingUpdate:
- unavailable: 1
- affinity: *standard_affinity
- tolerations: *standard_tolerations
+ unavailable: 1
+ affinity: *standard_affinity
+ tolerations: *standard_tolerations
securityContext:
seccompProfile:
type: RuntimeDefault
@@ -10479,7 +10453,7 @@ pairdrop:
automountServiceAccountToken: false
# envFrom:
# - configMapRef:
- # name: pairdrop-env
+ # name: pairdrop-env
resources:
requests:
cpu: 1m
@@ -10495,8 +10469,8 @@ pairdrop:
enabled: true # necessary for probes
ports:
http:
- port: 3000
- persistence:
+ port: 3000
+ persistence:
elfbot:
enabled: "true"
type: "custom"
@@ -10516,9 +10490,9 @@ petio:
configmap.reloader.stakater.com/reload: "elfbot-all,elfbot-petio"
strategy:
rollingUpdate:
- unavailable: 1
- affinity: *standard_affinity
- tolerations: *standard_tolerations
+ unavailable: 1
+ affinity: *standard_affinity
+ tolerations: *standard_tolerations
securityContext:
seccompProfile:
type: RuntimeDefault
@@ -10530,7 +10504,7 @@ petio:
runAsGroup: 568
fsGroup: 568
fsGroupChangePolicy: "OnRootMismatch"
- automountServiceAccountToken: false
+ automountServiceAccountToken: false
resources:
requests:
cpu: 1m
@@ -10546,8 +10520,8 @@ petio:
enabled: true # necessary for probes
ports:
http:
- port: 7777
- persistence:
+ port: 7777
+ persistence:
backup: *backup
elfbot:
enabled: "true"
@@ -10562,7 +10536,7 @@ petio:
subPath: config
volumeSpec:
persistentVolumeClaim:
- claimName: config-petio-ssd
+ claimName: config-petio-ssd
config:
enabled: true
type: custom
@@ -10570,10 +10544,10 @@ petio:
subPath: petio/config
volumeSpec:
persistentVolumeClaim:
- claimName: config
+ claimName: config
tmp: *tmp
initContainers:
- a-migration:
+ a-migration:
<<: *migrate_data
volumeMounts:
- mountPath: /migration
@@ -10581,7 +10555,7 @@ petio:
- mountPath: /config
name: config
subPath: petio
- bootstrap:
+ bootstrap:
<<: *bootstrap
volumeMounts:
- mountPath: /etc/elfbot
@@ -10592,7 +10566,7 @@ petio:
name: config
subPath: petio
- mountPath: /tmp
- name: tmp
+ name: tmp
additionalContainers:
mongodb:
image: mongodb/mongodb-community-server:8.0.1-ubi8
@@ -10601,7 +10575,7 @@ petio:
subPath: petio/mongodb
mountPath: /data/db/
- name: tmp
- mountPath: /tmp
+ mountPath: /tmp
securityContext: *default_securitycontext
pgadmin:
@@ -10615,7 +10589,7 @@ pgadmin:
configmap.reloader.stakater.com/reload: "elfbot-all,elfbot-pgadmin"
strategy:
rollingUpdate:
- unavailable: 1
+ unavailable: 1
securityContext:
seccompProfile:
type: RuntimeDefault
@@ -10631,11 +10605,11 @@ pgadmin:
add:
- NET_BIND_SERVICE
drop:
- - ALL
+ - ALL
automountServiceAccountToken: false
envFrom:
- configMapRef:
- name: pgadmin-env
+ name: pgadmin-env
resources:
requests:
cpu: 1m
@@ -10651,15 +10625,16 @@ pgadmin:
enabled: true # necessary for probes
ports:
http:
- port: 80
+ port: 80
persistence:
config:
enabled: true
type: custom
mountPath: /data
+ subPath: pgadmin
volumeSpec:
persistentVolumeClaim:
- claimName: config-pgadmin-ssd
+ claimName: config
tmp: *tmp
redisinsight:
@@ -10673,7 +10648,7 @@ redisinsight:
configmap.reloader.stakater.com/reload: "elfbot-all,elfbot-redisinsight"
strategy:
rollingUpdate:
- unavailable: 1
+ unavailable: 1
securityContext:
seccompProfile:
type: RuntimeDefault
@@ -10687,9 +10662,9 @@ redisinsight:
fsGroupChangePolicy: "OnRootMismatch"
capabilities:
add:
- - IPC_LOCK
+ - IPC_LOCK
drop:
- - ALL
+ - ALL
automountServiceAccountToken: false
resources:
requests:
@@ -10714,7 +10689,7 @@ redisinsight:
mountPath: /data
volumeSpec:
persistentVolumeClaim:
- claimName: config-redisinsight-block
+ claimName: config-redisinsight-block
mongoexpress:
enabled: false
@@ -10727,7 +10702,7 @@ mongoexpress:
configmap.reloader.stakater.com/reload: "elfbot-all,elfbot-mongoexpress"
strategy:
rollingUpdate:
- unavailable: 1
+ unavailable: 1
securityContext:
seccompProfile:
type: RuntimeDefault
@@ -10757,9 +10732,9 @@ mongoexpress:
http:
port: 8081
envFrom:
- - configMapRef:
+ - configMapRef:
name: elfbot-mongoexpress
- optional: true
+ optional: true
annatar:
enabled: false
@@ -10772,9 +10747,9 @@ annatar:
configmap.reloader.stakater.com/reload: "elfbot-all,elfbot-annatar,annatar-env"
strategy:
rollingUpdate:
- unavailable: 1
- affinity: *standard_affinity
- tolerations: *standard_tolerations
+ unavailable: 1
+ affinity: *standard_affinity
+ tolerations: *standard_tolerations
securityContext:
seccompProfile:
type: RuntimeDefault
@@ -10789,9 +10764,9 @@ annatar:
automountServiceAccountToken: false
persistence:
tmp: *tmp
- redis:
+ redis:
enabled: true
- type: emptyDir
+ type: emptyDir
resources:
requests:
cpu: 1m
@@ -10807,18 +10782,18 @@ annatar:
enabled: true # necessary for probes
ports:
http:
- port: 8000
+ port: 8000
envFrom:
- configMapRef:
- name: annatar-env
- - configMapRef:
+ name: annatar-env
+ - configMapRef:
name: elfbot-annatar
- optional: true
+ optional: true
env:
- name: NAMESPACE
valueFrom:
fieldRef:
- fieldPath: metadata.namespace
+ fieldPath: metadata.namespace
addons:
vpn:
enabled: true # in case we ever need it
@@ -10833,8 +10808,8 @@ annatar:
LOC: de-frankfurt
PORT_FORWARDING: "0"
PORT_PERSIST: "1"
- NFTABLES: "1"
- VPNDNS: "0"
+ NFTABLES: "1"
+ VPNDNS: "0"
envFrom:
- secretRef:
name: annatar-vpn
@@ -10844,7 +10819,7 @@ annatar:
add:
- NET_ADMIN
- SYS_MODULE
- config: # We have to set this to null so that we can override with our own config
+ config: # We have to set this to null so that we can override with our own config
additionalContainers:
redis:
image: redis:alpine@sha256:de13e74e14b98eb96bdf886791ae47686c3c5d29f9d5f85ea55206843e3fce26
@@ -10860,7 +10835,7 @@ comet:
priorityClassName: tenant-normal
podAnnotations:
kubernetes.io/egress-bandwidth: "150M"
- podLabels:
+ podLabels:
app.elfhosted.com/name: comet
controller:
annotations:
@@ -10868,9 +10843,9 @@ comet:
secret.reloader.stakater.com/reload: "comet-env"
strategy:
rollingUpdate:
- unavailable: 1
- affinity: *standard_affinity
- tolerations: *standard_tolerations
+ unavailable: 1
+ affinity: *standard_affinity
+ tolerations: *standard_tolerations
securityContext:
seccompProfile:
type: RuntimeDefault
@@ -10892,7 +10867,7 @@ comet:
type: "custom"
volumeSpec:
configMap:
- name: tooling-scripts
+ name: tooling-scripts
defaultMode: 0755
config:
enabled: true
@@ -10901,18 +10876,18 @@ comet:
subPath: comet
volumeSpec:
persistentVolumeClaim:
- claimName: config
- cache:
+ claimName: config
+ cache:
enabled: true
type: emptyDir
- mountPath: /.cache
+ mountPath: /.cache
elfbot:
enabled: "true"
type: "custom"
volumeSpec:
configMap:
name: elfbot-comet
- optional: true
+ optional: true
resources:
requests:
cpu: 1m
@@ -10928,19 +10903,19 @@ comet:
enabled: true # necessary for probes
ports:
http:
- port: 8000
+ port: 8000
envFrom:
- configMapRef:
- name: comet-env
+ name: comet-env
- secretRef:
- name: comet-env
- - configMapRef:
+ name: comet-env
+ - configMapRef:
name: elfbot-comet
- optional: true
+ optional: true
additionalContainers:
- clean-up-dns: *clean_up_dns_on_termination
+ clean-up-dns: *clean_up_dns_on_termination
speedtest:
- image: openspeedtest/latest
+ image: openspeedtest/latest
warp:
image: ghcr.io/elfhosted/warp:rolling@sha256:4e89fc07ff24c30ffc7f3804d7f3738d634c7b9aa4fecb4941618a366e43d8c9
securityContext:
@@ -10949,10 +10924,10 @@ comet:
privileged: false
capabilities:
add:
- - NET_ADMIN
+ - NET_ADMIN
envFrom:
- secretRef:
- name: comet-env
+ name: comet-env
volumeMounts:
- mountPath: /data
subPath: comet/warp
@@ -10969,9 +10944,9 @@ jackettio:
configmap.reloader.stakater.com/reload: "elfbot-all,elfbot-jackettio,jackettio-env"
strategy:
rollingUpdate:
- unavailable: 1
- affinity: *standard_affinity
- tolerations: *standard_tolerations
+ unavailable: 1
+ affinity: *standard_affinity
+ tolerations: *standard_tolerations
securityContext:
seccompProfile:
type: RuntimeDefault
@@ -10993,7 +10968,7 @@ jackettio:
subPath: jackettio
volumeSpec:
persistentVolumeClaim:
- claimName: config
+ claimName: config
resources:
requests:
cpu: 1m
@@ -11009,13 +10984,13 @@ jackettio:
enabled: true # necessary for probes
ports:
http:
- port: 4000
+ port: 4000
envFrom:
- configMapRef:
- name: jackettio-env
- - configMapRef:
+ name: jackettio-env
+ - configMapRef:
name: elfbot-jackettio
- optional: true
+ optional: true
davio:
enabled: false
@@ -11029,7 +11004,7 @@ davio:
secret.reloader.stakater.com/reload: "davio-env"
strategy:
rollingUpdate:
- unavailable: 1
+ unavailable: 1
securityContext:
seccompProfile:
type: RuntimeDefault
@@ -11044,7 +11019,7 @@ davio:
automountServiceAccountToken: false
persistence:
tmp: *tmp
- config:
+ config:
enabled: true
type: custom
mountPath: /config
@@ -11064,23 +11039,23 @@ davio:
enabled: true # necessary for probes
ports:
http:
- port: 4000
+ port: 4000
envFrom:
- configMapRef:
- name: davio-env
+ name: davio-env
- secretRef:
- name: davio-env
- - configMapRef:
+ name: davio-env
+ - configMapRef:
name: elfbot-davio
- optional: true
+ optional: true
mediafusion:
enabled: false
image:
repository: ghcr.io/elfhosted/mediafusion
tag: 4.0.1@sha256:ad36cff532436f08d638f0674d4c99dadf5ad3a011b264aa47dc15a07e3d555f
- podLabels:
- app.elfhosted.com/name: mediafusion
+ podLabels:
+ app.elfhosted.com/name: mediafusion
priorityClassName: tenant-normal
controller:
annotations:
@@ -11088,9 +11063,9 @@ mediafusion:
secret.reloader.stakater.com/reload: "mediafusion-env"
strategy:
rollingUpdate:
- unavailable: 1
- affinity: *standard_affinity
- tolerations: *standard_tolerations
+ unavailable: 1
+ affinity: *standard_affinity
+ tolerations: *standard_tolerations
securityContext:
seccompProfile:
type: RuntimeDefault
@@ -11117,15 +11092,15 @@ mediafusion:
enabled: true # necessary for probes
ports:
http:
- port: 8000
+ port: 8000
envFrom:
- configMapRef:
- name: mediafusion-env
+ name: mediafusion-env
- configMapRef:
name: elfbot-mediafusion
optional: true
- secretRef:
- name: mediafusion-env
+ name: mediafusion-env
addons:
vpn:
enabled: true # in case we ever need it
@@ -11140,8 +11115,8 @@ mediafusion:
LOC: de-frankfurt
PORT_FORWARDING: "0"
PORT_PERSIST: "1"
- NFTABLES: "1"
- VPNDNS: "0"
+ NFTABLES: "1"
+ VPNDNS: "0"
envFrom:
- secretRef:
name: mediafusion-vpn
@@ -11151,15 +11126,15 @@ mediafusion:
add:
- NET_ADMIN
- SYS_MODULE
- config: # We have to set this to null so that we can override with our own config
+ config: # We have to set this to null so that we can override with our own config
mediaflowproxy:
enabled: false
image:
repository: ghcr.io/elfhosted/mediaflow-proxy
tag: 1.7.3@sha256:b02a01aa45716741b3f2679c06c3e8a85d3f8420a4e193feb27ac92f551286e9
- podLabels:
- app.elfhosted.com/name: mediaflow-proxy
+ podLabels:
+ app.elfhosted.com/name: mediaflow-proxy
priorityClassName: tenant-normal
controller:
annotations:
@@ -11168,9 +11143,9 @@ mediaflowproxy:
kubernetes.io/egress-bandwidth: "125M"
strategy:
rollingUpdate:
- unavailable: 1
- affinity: *standard_affinity
- tolerations: *standard_tolerations
+ unavailable: 1
+ affinity: *standard_affinity
+ tolerations: *standard_tolerations
securityContext:
seccompProfile:
type: RuntimeDefault
@@ -11187,12 +11162,12 @@ mediaflowproxy:
type: "custom"
volumeSpec:
configMap:
- name: tooling-scripts
- defaultMode: 0755
+ name: tooling-scripts
+ defaultMode: 0755
initContainers:
update-dns: *update_dns_on_init
additionalContainers:
- clean-up-dns: *clean_up_dns_on_termination
+ clean-up-dns: *clean_up_dns_on_termination
resources:
requests:
cpu: 1m
@@ -11208,18 +11183,18 @@ mediaflowproxy:
enabled: true # necessary for probes
ports:
http:
- port: 8888
+ port: 8888
envFrom:
- configMapRef:
- name: mediaflow-proxy-env
+ name: mediaflow-proxy-env
- configMapRef:
name: elfbot-mediaflow-proxy
- optional: true
+ optional: true
stremiojackettcommunity: &stremiojackettcommunity
enabled: false
- podLabels:
- app.elfhosted.com/name: stremio-jackett-community
+ podLabels:
+ app.elfhosted.com/name: stremio-jackett-community
image:
repository: ghcr.io/elfhosted/stremio-jackett
tag: v4.1.6@sha256:62f993618240c950ca98e73e499985588e324717fc9cead481a8de8612222bd0
@@ -11230,9 +11205,9 @@ stremiojackettcommunity: &stremiojackettcommunity
configmap.reloader.stakater.com/reload: "elfbot-all,elfbot-stremio-jackett-community"
strategy:
rollingUpdate:
- unavailable: 1
- affinity: *standard_affinity
- tolerations: *standard_tolerations
+ unavailable: 1
+ affinity: *standard_affinity
+ tolerations: *standard_tolerations
securityContext:
seccompProfile:
type: RuntimeDefault
@@ -11262,16 +11237,16 @@ stremiojackettcommunity: &stremiojackettcommunity
enabled: true # necessary for probes
ports:
http:
- port: 3000
+ port: 3000
envFrom:
- configMapRef:
- name: stremio-jackett-community-env
+ name: stremio-jackett-community-env
-stremiojackettcommunityalldebrid:
+stremiojackettcommunityalldebrid:
enabled: false
<<: *stremiojackettcommunity
- podLabels:
- app.elfhosted.com/name: stremio-jackett-community
+ podLabels:
+ app.elfhosted.com/name: stremio-jackett-community
addons:
vpn:
enabled: true # in case we ever need it
@@ -11286,8 +11261,8 @@ stremiojackettcommunityalldebrid:
LOC: de-frankfurt
PORT_FORWARDING: "0"
PORT_PERSIST: "1"
- NFTABLES: "1"
- VPNDNS: "0"
+ NFTABLES: "1"
+ VPNDNS: "0"
envFrom:
- secretRef:
name: stremio-jackett-community-vpn
@@ -11297,7 +11272,7 @@ stremiojackettcommunityalldebrid:
add:
- NET_ADMIN
- SYS_MODULE
- config: # We have to set this to null so that we can override with our own config
+ config: # We have to set this to null so that we can override with our own config
xtremio:
enabled: false
@@ -11310,9 +11285,9 @@ xtremio:
configmap.reloader.stakater.com/reload: "elfbot-all,elfbot-xtremio"
strategy:
rollingUpdate:
- unavailable: 1
- affinity: *standard_affinity
- tolerations: *standard_tolerations
+ unavailable: 1
+ affinity: *standard_affinity
+ tolerations: *standard_tolerations
securityContext:
seccompProfile:
type: RuntimeDefault
@@ -11326,7 +11301,7 @@ xtremio:
fsGroupChangePolicy: "OnRootMismatch"
automountServiceAccountToken: false
env:
- PORT: 3649
+ PORT: 3649
persistence:
tmp: *tmp
resources:
@@ -11344,11 +11319,11 @@ xtremio:
enabled: true # necessary for probes
ports:
http:
- port: 3649
+ port: 3649
envFrom:
- - configMapRef:
+ - configMapRef:
name: elfbot-xtremio
- optional: true
+ optional: true
shluflix:
enabled: false
@@ -11361,9 +11336,9 @@ shluflix:
configmap.reloader.stakater.com/reload: "elfbot-all,elfbot-shluflix"
strategy:
rollingUpdate:
- unavailable: 1
- affinity: *standard_affinity
- tolerations: *standard_tolerations
+ unavailable: 1
+ affinity: *standard_affinity
+ tolerations: *standard_tolerations
securityContext:
seccompProfile:
type: RuntimeDefault
@@ -11377,14 +11352,14 @@ shluflix:
fsGroupChangePolicy: "OnRootMismatch"
automountServiceAccountToken: false
env:
- PORT: 8001
+ PORT: 8001
persistence:
tmp: *tmp
npm:
enabled: true
type: emptyDir
mountPath: /home/elfie/.npm
- sizeLimit: 1Gi
+ sizeLimit: 1Gi
resources:
requests:
cpu: 1m
@@ -11400,11 +11375,11 @@ shluflix:
enabled: true # necessary for probes
ports:
http:
- port: 8001
+ port: 8001
envFrom:
- - configMapRef:
+ - configMapRef:
name: shluflix-env
- - configMapRef:
+ - configMapRef:
name: elfbot-shluflix
optional: true
@@ -11419,9 +11394,9 @@ stremify:
configmap.reloader.stakater.com/reload: "elfbot-all,elfbot-stremify"
strategy:
rollingUpdate:
- unavailable: 1
- affinity: *standard_affinity
- tolerations: *standard_tolerations
+ unavailable: 1
+ affinity: *standard_affinity
+ tolerations: *standard_tolerations
securityContext:
seccompProfile:
type: RuntimeDefault
@@ -11433,21 +11408,21 @@ stremify:
runAsGroup: 568
fsGroup: 568
fsGroupChangePolicy: "OnRootMismatch"
- automountServiceAccountToken: false
+ automountServiceAccountToken: false
persistence:
- tmp:
+ tmp:
enabled: true
type: emptyDir
mountPath: /tmp
- nuxt-node-modules:
+ nuxt-node-modules:
enabled: true
type: emptyDir
- mountPath: /nuxt/node_modules
- nuxt:
+ mountPath: /nuxt/node_modules
+ nuxt:
enabled: true
type: emptyDir
- mountPath: /nuxt/.nuxt
- nitro:
+ mountPath: /nuxt/.nuxt
+ nitro:
enabled: true
type: emptyDir
mountPath: /home/node/app/.nitro
@@ -11466,15 +11441,15 @@ stremify:
enabled: true # necessary for probes
ports:
http:
- port: 3000
+ port: 3000
envFrom:
- - configMapRef:
- name: stremify-env
- - configMapRef:
+ - configMapRef:
+ name: stremify-env
+ - configMapRef:
name: elfbot-stremify
- optional: true
- - secretRef:
- name: stremify-env
+ optional: true
+ - secretRef:
+ name: stremify-env
addons:
vpn:
enabled: true # in case we ever need it
@@ -11489,8 +11464,8 @@ stremify:
LOC: de-frankfurt
PORT_FORWARDING: "0"
PORT_PERSIST: "1"
- NFTABLES: "1"
- VPNDNS: "0"
+ NFTABLES: "1"
+ VPNDNS: "0"
envFrom:
- secretRef:
name: stremify-vpn
@@ -11500,7 +11475,7 @@ stremify:
add:
- NET_ADMIN
- SYS_MODULE
- config: # We have to set this to null so that we can override with our own config
+ config: # We have to set this to null so that we can override with our own config
recyclarr:
enabled: false
@@ -11513,9 +11488,9 @@ recyclarr:
configmap.reloader.stakater.com/reload: "elfbot-all,elfbot-recyclarr"
strategy:
rollingUpdate:
- unavailable: 1
- affinity: *standard_affinity
- tolerations: *standard_tolerations
+ unavailable: 1
+ affinity: *standard_affinity
+ tolerations: *standard_tolerations
securityContext:
seccompProfile:
type: RuntimeDefault
@@ -11543,13 +11518,20 @@ recyclarr:
enabled: true # necessary for probes
ports:
http:
- port: 9898
+ port: 9898
persistence:
<<: *storagemounts
config:
enabled: true
type: custom
mountPath: /config/
+ subPath: recyclarr
+ volumeSpec:
+ persistentVolumeClaim:
+ claimName: config
+ migration:
+ enabled: true
+ type: custom
volumeSpec:
persistentVolumeClaim:
claimName: config-recyclarr-ssd
@@ -11565,9 +11547,28 @@ recyclarr:
volumeSpec:
configMap:
name: elfbot-recyclarr
- optional: true
+ optional: true
initContainers:
- bootstrap: *bootstrap
+ a-migration:
+ <<: *migrate_data
+ volumeMounts:
+ - mountPath: /migration
+ name: migration
+ - mountPath: /config
+ name: config
+ subPath: recyclarr
+ bootstrap:
+ <<: *bootstrap
+ volumeMounts:
+ - mountPath: /etc/elfbot
+ name: elfbot
+ - mountPath: /storage/backup
+ name: backup
+ - mountPath: /config
+ name: config
+ subPath: recyclarr
+ - mountPath: /tmp
+ name: tmp
copy-example-config:
image: *tooling_image
imagePullPolicy: IfNotPresent
@@ -11596,7 +11597,7 @@ recyclarr:
podinfo:
image: stefanprodan/podinfo # used to run probes from gatus
sync:
- image: ghcr.io/recyclarr/recyclarr:latest@sha256:1bf2436ed4749a4309765dd21643aac858dd436a536e37c25bb463513601e962
+ image: ghcr.io/recyclarr/recyclarr:latest@sha256:1bf2436ed4749a4309765dd21643aac858dd436a536e37c25bb463513601e962
command:
- /bin/bash
- -c
@@ -11608,7 +11609,7 @@ recyclarr:
name: config
envFrom:
- configMapRef:
- name: recyclarr-env
+ name: recyclarr-env
knightcrawler: &app_knightcrawler
enabled: false
@@ -11616,8 +11617,8 @@ knightcrawler: &app_knightcrawler
repository: ghcr.io/elfhosted/knightcrawler-addon
tag: v2.0.27@sha256:4095b77202ed089fe46f14b5b0d5eba6bb652cd094cffb96dc4f665bc850a30f
priorityClassName: tenant-normal
- podLabels:
- app.elfhosted.com/name: knightcrawler
+ podLabels:
+ app.elfhosted.com/name: knightcrawler
controller:
annotations:
configmap.reloader.stakater.com/reload: "elfbot-all,storage-changed,elfbot-knightcrawler,elfbot-torrentio,knightcrawler-env"
@@ -11625,8 +11626,8 @@ knightcrawler: &app_knightcrawler
strategy:
rollingUpdate:
unavailable: 1
- affinity: *standard_affinity
- tolerations: *standard_tolerations
+ affinity: *standard_affinity
+ tolerations: *standard_tolerations
securityContext:
seccompProfile:
type: RuntimeDefault
@@ -11655,30 +11656,30 @@ knightcrawler: &app_knightcrawler
ports:
http:
port: 7000
- persistence:
+ persistence:
elfbot:
enabled: "true"
type: "custom"
volumeSpec:
configMap:
name: elfbot-knightcrawler
- optional: true
- npm:
+ optional: true
+ npm:
enabled: true
type: emptyDir
mountPath: /.npm
- pm2:
+ pm2:
enabled: true
mountPath: /.pm2
- type: emptyDir
+ type: emptyDir
envFrom:
- configMapRef:
- name: knightcrawler-env
+ name: knightcrawler-env
- secretRef:
- name: knightcrawler-env
+ name: knightcrawler-env
# Old naming convention
-torrentio:
+torrentio:
<<: *app_knightcrawler
enabled: false
@@ -11704,13 +11705,13 @@ knightcrawlervpn:
add:
- NET_ADMIN
- SYS_MODULE
- config: # We have to set this to null so that we can override with our own config
+ config: # We have to set this to null so that we can override with our own config
zurg: &app_zurg
enabled: false
podLabels:
app.elfhosted.com/class: debrid
- app.elfhosted.com/name: zurg
+ app.elfhosted.com/name: zurg
podAnnotations:
kubernetes.io/egress-bandwidth: "150M" # tested with _kilos in Discord on a 97Mbit remux
image:
@@ -11720,7 +11721,7 @@ zurg: &app_zurg
controller:
annotations:
configmap.reloader.stakater.com/reload: "elfbot-all,elfbot-zurg,zurg-env,gluetun-config"
- strategy: Recreate
+ strategy: Recreate
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
@@ -11730,36 +11731,36 @@ zurg: &app_zurg
operator: In
values:
- "true"
- podAffinity:
- preferredDuringSchedulingIgnoredDuringExecution:
- - weight: 50
- podAffinityTerm:
+ podAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 50
+ podAffinityTerm:
labelSelector:
matchExpressions:
- key: app.elfhosted.com/role
operator: In
values:
- nodefinder # use nodefinder in the absense of zurg...
- topologyKey: "kubernetes.io/hostname"
- - weight: 60
- podAffinityTerm:
+ topologyKey: "kubernetes.io/hostname"
+ - weight: 60
+ podAffinityTerm:
labelSelector:
matchExpressions:
- key: app.elfhosted.com/name
operator: In
values:
- zurg # .. but prefer zurg
- topologyKey: "kubernetes.io/hostname"
+ topologyKey: "kubernetes.io/hostname"
namespaceSelector: {} # i.e., in the absense of any better signal, pick a node which already has zurg on it
- - weight: 70
- podAffinityTerm:
+ - weight: 70
+ podAffinityTerm:
labelSelector:
matchExpressions:
- key: app.elfhosted.com/name
operator: In
values:
- zurg # .. but prefer zurg
- topologyKey: "kubernetes.io/hostname"
+ topologyKey: "kubernetes.io/hostname"
securityContext:
seccompProfile:
type: RuntimeDefault
@@ -11787,7 +11788,7 @@ zurg: &app_zurg
enabled: true # necessary for probes
ports:
http:
- port: 9999
+ port: 9999
persistence:
tmp: *tmp
backup: *backup # to pin zurg to the node with the backup PVC
@@ -11830,13 +11831,13 @@ zurg: &app_zurg
type: "custom"
volumeSpec:
configMap:
- name: tooling-scripts
- defaultMode: 0755
+ name: tooling-scripts
+ defaultMode: 0755
envFrom:
- configMapRef:
- name: zurg-env # this is here so we can use env vars to detect whether to enable warp
+ name: zurg-env # this is here so we can use env vars to detect whether to enable warp
initContainers:
- a-migration:
+ a-migration:
<<: *migrate_data
volumeMounts:
- mountPath: /migration
@@ -11844,7 +11845,7 @@ zurg: &app_zurg
- mountPath: /config
name: config
subPath: zurg
- bootstrap:
+ bootstrap:
<<: *bootstrap
volumeMounts:
- mountPath: /etc/elfbot
@@ -11855,7 +11856,7 @@ zurg: &app_zurg
name: config
subPath: zurg
- mountPath: /tmp
- name: tmp
+ name: tmp
copy-example-config:
image: *tooling_image
imagePullPolicy: IfNotPresent
@@ -11879,13 +11880,13 @@ zurg: &app_zurg
if [[ ! -f /config/plex_update.sh ]];
then
cp /bootstrap/plex_update.sh /config/
- fi
+ fi
volumeMounts:
- mountPath: /config
name: config
subPath: zurg
- name: example-config
- mountPath: "/bootstrap/"
+ mountPath: "/bootstrap/"
securityContext:
seccompProfile:
type: RuntimeDefault
@@ -11915,7 +11916,7 @@ zurg: &app_zurg
# privileged: false
# capabilities:
# add:
- # - NET_ADMIN
+ # - NET_ADMIN
# sysctls:
# - name: net.ipv6.conf.all.disable_ipv6
# value: "0"
@@ -11924,7 +11925,7 @@ zurg: &app_zurg
# volumeMounts:
# - mountPath: /data
# subPath: warp
- # name: config
+ # name: config
# envFrom:
# - configMapRef:
# name: zurg-env # this is here so we can use env vars to detect whether to enable warp
@@ -11945,8 +11946,8 @@ zurg: &app_zurg
DOT: "off"
FIREWALL_INPUT_PORTS: "9999" # 9999 is for zurg
HTTP_CONTROL_SERVER_PORT: "8000"
- HTTP_CONTROL_SERVER_ADDRESS: ":8000"
- HEALTH_SERVER_ADDRESS: "127.0.0.1:9991"
+ HTTP_CONTROL_SERVER_ADDRESS: ":8000"
+ HEALTH_SERVER_ADDRESS: "127.0.0.1:9991"
securityContext:
runAsUser: 0
capabilities:
@@ -11961,7 +11962,7 @@ zurg: &app_zurg
down: |-
#!/bin/ash
- echo "disconnected" > /shared/vpnstatus
+ echo "disconnected" > /shared/vpnstatus
probes:
startup:
spec:
@@ -12002,10 +12003,10 @@ zurgranger:
<<: *app_zurg
podLabels:
app.elfhosted.com/name: zurg
- app.elfhosted.com/class: dedicated
+ app.elfhosted.com/class: dedicated
podAnnotations:
- kubernetes.io/egress-bandwidth: "500M"
- enabled: false
+ kubernetes.io/egress-bandwidth: "500M"
+ enabled: false
automountServiceAccountToken: false
controller:
annotations:
@@ -12016,7 +12017,7 @@ zurgranger:
plexdebrid: &app_plexdebrid
enabled: false
podLabels:
- app.elfhosted.com/name: plexdebrid
+ app.elfhosted.com/name: plexdebrid
image:
repository: ghcr.io/elfhosted/plex-debrid
tag: rolling@sha256:0a287b77b8f38231bc55b33c4792addc666b8681789b79ff78b6f09219cbaaaa
@@ -12024,8 +12025,8 @@ plexdebrid: &app_plexdebrid
controller:
annotations:
configmap.reloader.stakater.com/reload: "elfbot-all,elfbot-plex-debrid"
- affinity: *standard_affinity
- tolerations: *standard_tolerations
+ affinity: *standard_affinity
+ tolerations: *standard_tolerations
securityContext:
seccompProfile:
type: RuntimeDefault
@@ -12051,15 +12052,15 @@ plexdebrid: &app_plexdebrid
enabled: false
envFrom:
- secretRef:
- name: plex-debrid-env
+ name: plex-debrid-env
service:
main:
enabled: true # necessary for probes
ports:
http:
- port: 3001
+ port: 3001
persistence:
- tmp: *tmp
+ tmp: *tmp
backup: *backup
migration:
enabled: true
@@ -12089,7 +12090,7 @@ plexdebrid: &app_plexdebrid
configMap:
name: plexdebrid-config
initContainers:
- a-migration:
+ a-migration:
<<: *migrate_data
volumeMounts:
- mountPath: /migration
@@ -12097,7 +12098,7 @@ plexdebrid: &app_plexdebrid
- mountPath: /config
name: config
subPath: plex-debrid
- bootstrap:
+ bootstrap:
<<: *bootstrap
volumeMounts:
- mountPath: /etc/elfbot
@@ -12108,7 +12109,7 @@ plexdebrid: &app_plexdebrid
name: config
subPath: plex-debrid
- mountPath: /tmp
- name: tmp
+ name: tmp
copy-example-config:
image: *tooling_image
imagePullPolicy: IfNotPresent
@@ -12133,11 +12134,11 @@ plexdebrid: &app_plexdebrid
securityContext:
seccompProfile:
type: RuntimeDefault
- readOnlyRootFilesystem: true
+ readOnlyRootFilesystem: true
codeserver:
enabled: false
- # runtimeClassName: kata
+ # runtimeClassName: kata
image:
repository: ghcr.io/elfhosted/codeserver
tag: 4.93.1@sha256:0403390965ae2dc7fcc2649cfafe039496354451ba4fc77ee237e2be8537a6b2
@@ -12145,8 +12146,8 @@ codeserver:
controller:
annotations:
configmap.reloader.stakater.com/reload: "elfbot-all,elfbot-plex-codeserver"
- affinity: *standard_affinity
- tolerations: *standard_tolerations
+ affinity: *standard_affinity
+ tolerations: *standard_tolerations
securityContext:
seccompProfile:
type: RuntimeDefault
@@ -12174,18 +12175,18 @@ codeserver:
enabled: true # necessary for probes
ports:
http:
- port: 8080
+ port: 8080
persistence:
- tmp: *tmp
+ tmp: *tmp
backup: *backup
- # <<: *config_mounts
config:
enabled: true
type: custom
mountPath: /home/coder
+ subPath: codeserver
volumeSpec:
persistentVolumeClaim:
- claimName: config-codeserver-ssd
+ claimName: config
elfbot:
enabled: "true"
type: "custom"
@@ -12198,9 +12199,20 @@ codeserver:
type: "custom"
volumeSpec:
configMap:
- name: codeserver-config
+ name: codeserver-config
initContainers:
- bootstrap: *bootstrap
+ bootstrap:
+ <<: *bootstrap
+ volumeMounts:
+ - mountPath: /etc/elfbot
+ name: elfbot
+ - mountPath: /storage/backup
+ name: backup
+ - mountPath: /config
+ name: config
+ subPath: codeserver
+ - mountPath: /tmp
+ name: tmp
copy-example-config:
image: *tooling_image
imagePullPolicy: IfNotPresent
@@ -12236,8 +12248,8 @@ doplarr: &app_doplarr
controller:
annotations:
configmap.reloader.stakater.com/reload: "elfbot-all,elfbot-doplarr,doplarr-env"
- affinity: *standard_affinity
- tolerations: *standard_tolerations
+ affinity: *standard_affinity
+ tolerations: *standard_tolerations
securityContext:
runAsUser: 568
runAsGroup: 568
@@ -12257,19 +12269,19 @@ doplarr: &app_doplarr
memory: 1Gi
ingress:
main:
- enabled: false
+ enabled: false
service:
main:
enabled: true # necessary for probes
ports:
http:
- port: 3001
- envFrom:
- - configMapRef:
+ port: 3001
+ envFrom:
+ - configMapRef:
name: elfbot-doplarr
- optional: true
+ optional: true
-requestrr:
+requestrr:
enabled: false
image:
repository: ghcr.io/elfhosted/requestrr
@@ -12278,8 +12290,8 @@ requestrr:
controller:
annotations:
configmap.reloader.stakater.com/reload: "elfbot-all,elfbot-requestrr,requestrr-env"
- affinity: *standard_affinity
- tolerations: *standard_tolerations
+ affinity: *standard_affinity
+ tolerations: *standard_tolerations
securityContext:
runAsUser: 568
runAsGroup: 568
@@ -12291,13 +12303,14 @@ requestrr:
type: RuntimeDefault
automountServiceAccountToken: false
persistence:
+ tmp: *tmp
backup: *backup
migration:
enabled: true
type: custom
volumeSpec:
persistentVolumeClaim:
- claimName: config-requestrr-ssd
+ claimName: config-requestrr-ssd
config:
enabled: true
type: custom
@@ -12305,7 +12318,14 @@ requestrr:
subPath: requestrr
volumeSpec:
persistentVolumeClaim:
- claimName: config-requestrr-ssd
+ claimName: config
+ elfbot:
+ enabled: "true"
+ type: "custom"
+ volumeSpec:
+ configMap:
+ name: elfbot-requestrr
+ optional: true
resources:
requests:
cpu: 1m
@@ -12315,19 +12335,19 @@ requestrr:
memory: 1Gi
ingress:
main:
- enabled: false
+ enabled: false
service:
main:
enabled: true # necessary for probes
ports:
http:
- port: 4545
- envFrom:
- - configMapRef:
+ port: 4545
+ envFrom:
+ - configMapRef:
name: elfbot-requestrr
- optional: true
+ optional: true
initContainers:
- a-migration:
+ a-migration:
<<: *migrate_data
volumeMounts:
- mountPath: /migration
@@ -12335,7 +12355,7 @@ requestrr:
- mountPath: /config
name: config
subPath: requestrr
- bootstrap:
+ bootstrap:
<<: *bootstrap
volumeMounts:
- mountPath: /etc/elfbot
@@ -12346,7 +12366,7 @@ requestrr:
name: config
subPath: requestrr
- mountPath: /tmp
- name: tmp
+ name: tmp
debridlink: &app_davdebrid
enabled: false
@@ -12354,10 +12374,10 @@ debridlink: &app_davdebrid
repository: ghcr.io/elfhosted/davdebrid
tag: v1.1.0@sha256:7d8a4f9c9358067eff1a2a1e662111bdd9e6599e2f05e3460471409bbdbbd785
priorityClassName: tenant-normal
- affinity: *standard_affinity
- tolerations: *standard_tolerations
+ affinity: *standard_affinity
+ tolerations: *standard_tolerations
podLabels:
- app.elfhosted.com/name: debridlink
+ app.elfhosted.com/name: debridlink
controller:
annotations:
configmap.reloader.stakater.com/reload: "elfbot-all,elfbot-debridlink,debridlink-env"
@@ -12380,13 +12400,13 @@ debridlink: &app_davdebrid
subPath: debridlink
volumeSpec:
persistentVolumeClaim:
- claimName: config
+ claimName: config
envFrom:
- configMapRef:
- name: debridlink-env
+ name: debridlink-env
- configMapRef:
name: elfbot-debridlink
- optional: true
+ optional: true
resources:
requests:
cpu: 1m
@@ -12396,7 +12416,7 @@ debridlink: &app_davdebrid
memory: 100Mi
ingress:
main:
- enabled: false
+ enabled: false
service:
main:
enabled: true # necessary for probes
@@ -12404,10 +12424,10 @@ debridlink: &app_davdebrid
http:
port: 8080
-alldebrid:
+alldebrid:
<<: *app_davdebrid
podLabels:
- app.elfhosted.com/name: alldebrid
+ app.elfhosted.com/name: alldebrid
persistence:
config:
enabled: true
@@ -12416,7 +12436,7 @@ alldebrid:
subPath: alldebrid
volumeSpec:
persistentVolumeClaim:
- claimName: config
+ claimName: config
blackhole: &app_blackhole
enabled: false
@@ -12424,8 +12444,8 @@ blackhole: &app_blackhole
repository: ghcr.io/elfhosted/wests-blackhole-script
tag: v1.4.4@sha256:59c563324ef68bbdddf56cc48b32066556d0d84b193950c5adb50c017b18b615
priorityClassName: tenant-normal
- affinity: *standard_affinity
- tolerations: *standard_tolerations
+ affinity: *standard_affinity
+ tolerations: *standard_tolerations
controller:
annotations:
configmap.reloader.stakater.com/reload: "elfbot-all,elfbot-blackhole,blackhole-env"
@@ -12442,13 +12462,13 @@ blackhole: &app_blackhole
automountServiceAccountToken: false
env:
BLACKHOLE_RADARR_PATH: "radarr"
- BLACKHOLE_SONARR_PATH: "sonarr"
+ BLACKHOLE_SONARR_PATH: "sonarr"
envFrom:
- configMapRef:
- name: blackhole-env
+ name: blackhole-env
- configMapRef:
name: elfbot-blackhole
- optional: true
+ optional: true
resources:
requests:
cpu: 1m
@@ -12458,15 +12478,15 @@ blackhole: &app_blackhole
memory: 100Mi
ingress:
main:
- enabled: false
+ enabled: false
service:
main:
enabled: true # necessary for probes
ports:
http:
- port: 3001
+ port: 3001
persistence:
- <<: *storagemounts
+ <<: *storagemounts
tmp: *tmp
config:
enabled: true
@@ -12477,10 +12497,10 @@ blackhole: &app_blackhole
persistentVolumeClaim:
claimName: logs
-blackhole4k:
+blackhole4k:
<<: *app_blackhole
persistence:
- <<: *storagemounts
+ <<: *storagemounts
tmp: *tmp
config:
enabled: true
@@ -12489,18 +12509,18 @@ blackhole4k:
subPath: blackhole4k
volumeSpec:
persistentVolumeClaim:
- claimName: logs
+ claimName: logs
env:
BLACKHOLE_RADARR_PATH: "radarr4k"
- BLACKHOLE_SONARR_PATH: "sonarr4k"
+ BLACKHOLE_SONARR_PATH: "sonarr4k"
-blackholetorbox:
+blackholetorbox:
<<: *app_blackhole
controller:
annotations:
- configmap.reloader.stakater.com/reload: "elfbot-all,elfbot-blackholetorbox,blackholetorbox-env"
+ configmap.reloader.stakater.com/reload: "elfbot-all,elfbot-blackholetorbox,blackholetorbox-env"
persistence:
- <<: *storagemounts
+ <<: *storagemounts
tmp: *tmp
config:
enabled: true
@@ -12509,24 +12529,24 @@ blackholetorbox:
subPath: blackholetorbox
volumeSpec:
persistentVolumeClaim:
- claimName: logs
+ claimName: logs
env:
BLACKHOLE_RADARR_PATH: "radarr"
- BLACKHOLE_SONARR_PATH: "sonarr"
+ BLACKHOLE_SONARR_PATH: "sonarr"
envFrom:
- configMapRef:
- name: blackholetorbox-env
+ name: blackholetorbox-env
- configMapRef:
name: elfbot-blackholetorbox
- optional: true
+ optional: true
-blackholetorbox4k:
+blackholetorbox4k:
<<: *app_blackhole
controller:
annotations:
- configmap.reloader.stakater.com/reload: "elfbot-all,elfbot-blackholetorbox,blackholetorbox-env"
- persistence:
- <<: *storagemounts
+ configmap.reloader.stakater.com/reload: "elfbot-all,elfbot-blackholetorbox,blackholetorbox-env"
+ persistence:
+ <<: *storagemounts
tmp: *tmp
config:
enabled: true
@@ -12535,16 +12555,16 @@ blackholetorbox4k:
subPath: blackholetorbox4k
volumeSpec:
persistentVolumeClaim:
- claimName: logs
+ claimName: logs
env:
BLACKHOLE_RADARR_PATH: "radarr4k"
- BLACKHOLE_SONARR_PATH: "sonarr4k"
+ BLACKHOLE_SONARR_PATH: "sonarr4k"
envFrom:
- configMapRef:
- name: blackholetorbox-env
+ name: blackholetorbox-env
- configMapRef:
name: elfbot-blackholetorbox
- optional: true
+ optional: true
channelsdvr:
enabled: false
@@ -12553,12 +12573,12 @@ channelsdvr:
tag: latest@sha256:f6b5a737781d679d14ca55f176425a2cb766add038471f9cfb1b32f8f26033da
priorityClassName: tenant-normal
podAnnotations:
- kubernetes.io/egress-bandwidth: "125M"
+ kubernetes.io/egress-bandwidth: "125M"
controller:
annotations:
configmap.reloader.stakater.com/reload: "elfbot-all,elfbot-channelsdvr"
- affinity: *standard_affinity
- tolerations: *standard_tolerations
+ affinity: *standard_affinity
+ tolerations: *standard_tolerations
securityContext:
runAsUser: 568
runAsGroup: 568
@@ -12586,14 +12606,14 @@ channelsdvr:
enabled: true # necessary for probes
ports:
http:
- port: 8089
+ port: 8089
persistence:
- <<: *storagemounts
+ <<: *storagemounts
render-device:
enabled: "true"
type: hostPath
hostPath: "/dev/dri/renderD128"
- mountPath: "/dev/dri/renderD128"
+ mountPath: "/dev/dri/renderD128"
tmp: *tmp
config:
enabled: true
@@ -12609,7 +12629,7 @@ channelsdvr:
volumeSpec:
configMap:
name: elfbot-channelsdvr
- optional: true
+ optional: true
initContainers:
bootstrap: *bootstrap
@@ -12623,8 +12643,8 @@ immich:
controller:
annotations:
configmap.reloader.stakater.com/reload: "elfbot-all,elfbot-immich,immich-env"
- affinity: *standard_affinity
- tolerations: *standard_tolerations
+ affinity: *standard_affinity
+ tolerations: *standard_tolerations
securityContext:
runAsUser: 568
runAsGroup: 568
@@ -12652,14 +12672,14 @@ immich:
enabled: true # necessary for probes
ports:
http:
- port: 3001
+ port: 3001
persistence:
- <<: *storagemounts
+ <<: *storagemounts
render-device:
enabled: "true"
type: hostPath
hostPath: "/dev/dri/renderD128"
- mountPath: "/dev/dri/renderD128"
+ mountPath: "/dev/dri/renderD128"
tmp: *tmp
config:
enabled: true
@@ -12677,22 +12697,22 @@ immich:
enabled: true
type: emptyDir
mountPath: /usr/src/app/upload/encoded-video
- sizeLimit: 1Gi
+ sizeLimit: 1Gi
elfbot:
enabled: "true"
type: "custom"
volumeSpec:
configMap:
name: elfbot-immich
- optional: true
+ optional: true
envFrom:
- configMapRef:
- name: immich-env
+ name: immich-env
- configMapRef:
name: elfbot-immich
- optional: true
+ optional: true
initContainers:
- bootstrap:
+ bootstrap:
<<: *bootstrap
volumeMounts:
- mountPath: /etc/elfbot
@@ -12703,24 +12723,24 @@ immich:
name: config
subPath: immich
- mountPath: /tmp
- name: tmp
+ name: tmp
additionalContainers:
- ml:
+ ml:
image: ghcr.io/immich-app/immich-machine-learning:v1.117.0
envFrom:
- configMapRef:
- name: immich-env
+ name: immich-env
resources:
requests:
cpu: 15m
memory: 200Mi
limits:
cpu: 500m
- memory: 4Gi
+ memory: 4Gi
database:
- image: docker.io/tensorchord/pgvecto-rs:pg14-v0.2.0@sha256:90724186f0a3517cf6914295b5ab410db9ce23190a2d9d0b9dd6463e3fa298f0
+ image: docker.io/tensorchord/pgvecto-rs:pg14-v0.2.0@sha256:90724186f0a3517cf6914295b5ab410db9ce23190a2d9d0b9dd6463e3fa298f0
env:
- POSTGRES_INITDB_ARGS: '--data-checksums'
+ POSTGRES_INITDB_ARGS: '--data-checksums'
POSTGRES_PASSWORD: immich
POSTGRES_USER: immich
POSTGRES_DB: immich
@@ -12734,19 +12754,19 @@ immich:
memory: 200Mi
limits:
cpu: 500m
- memory: 4Gi
+ memory: 4Gi
redis:
image: docker.io/redis:7.4-alpine@sha256:de13e74e14b98eb96bdf886791ae47686c3c5d29f9d5f85ea55206843e3fce26
envFrom:
- configMapRef:
- name: immich-env
+ name: immich-env
resources:
requests:
cpu: 15m
memory: 200Mi
limits:
cpu: 500m
- memory: 4Gi
+ memory: 4Gi
kubernetesdashboard:
@@ -12761,7 +12781,7 @@ kubernetesdashboard:
limits:
cpu: 1
memory: 256Mi
-
+
extraArgs:
- --enable-skip-login
- --enable-insecure-login
@@ -12772,8 +12792,8 @@ kubernetesdashboard:
## Note: If set to true, you may want to add --enable-insecure-login to extraArgs
protocolHttp: true
- affinity: *standard_affinity
- tolerations: *standard_tolerations
+ affinity: *standard_affinity
+ tolerations: *standard_tolerations
# Global dashboard settings
settings:
@@ -12781,7 +12801,7 @@ kubernetesdashboard:
clusterName: "ElfHosted"
# defaultNamespace: "{{ .Release.Namespace }}"
# namespaceFallbackList: [ "{{ .Release.Namespace }}" ]
-
+
## Max number of items that can be displayed on each list page
itemsPerPage: 10
## Number of seconds between every auto-refresh of logs
@@ -12825,13 +12845,13 @@ kubernetesdashboard:
# optional but disabled by default to prevent errors
-
+
gluetun:
enabled: false # just to avoid errors
cometproxystreaming:
enabled: false
mediafusionproxystreaming:
- enabled: false
+ enabled: false
elfassesment:
enabled: false
@@ -12859,47 +12879,47 @@ zurgnazgul:
plexhobbit:
<<: *app_plex
- podAnnotations: *hobbit_streamer_podAnnotations
+ podAnnotations: *hobbit_streamer_podAnnotations
resources: *hobbit_streamer_resources
jellyfinhobbit:
<<: *app_jellyfin
- podAnnotations: *hobbit_streamer_podAnnotations
+ podAnnotations: *hobbit_streamer_podAnnotations
resources: *hobbit_streamer_resources
embyhobbit:
<<: *app_emby
- podAnnotations: *hobbit_streamer_podAnnotations
+ podAnnotations: *hobbit_streamer_podAnnotations
resources: *hobbit_streamer_resources
plexhalfling:
<<: *app_plex
- podAnnotations: *halfling_streamer_podAnnotations
+ podAnnotations: *halfling_streamer_podAnnotations
resources: *halfling_streamer_resources
jellyfinhalfling:
<<: *app_jellyfin
- podAnnotations: *halfling_streamer_podAnnotations
+ podAnnotations: *halfling_streamer_podAnnotations
resources: *halfling_streamer_resources
embyhalfling:
<<: *app_emby
- podAnnotations: *halfling_streamer_podAnnotations
+ podAnnotations: *halfling_streamer_podAnnotations
resources: *halfling_streamer_resources
plexnazgul:
<<: *app_plex
- podAnnotations: *nazgul_streamer_podAnnotations
+ podAnnotations: *nazgul_streamer_podAnnotations
resources: *nazgul_streamer_resources
jellyfinnazgul:
<<: *app_jellyfin
- podAnnotations: *nazgul_streamer_podAnnotations
+ podAnnotations: *nazgul_streamer_podAnnotations
resources: *nazgul_streamer_resources
embynazgul:
<<: *app_emby
- podAnnotations: *nazgul_streamer_podAnnotations
+ podAnnotations: *nazgul_streamer_podAnnotations
resources: *nazgul_streamer_resources
# This file must end on a single newline