Automated Manifest Update: 2025-12-01 (#2155)

This PR contains newly rendered Kubernetes manifests automatically generated by the CI workflow.

Reviewed-on: #2155
Co-authored-by: gitea-bot <gitea-bot@alexlebens.net>
Co-committed-by: gitea-bot <gitea-bot@alexlebens.net>
This commit was merged in pull request #2155.
This commit is contained in:
2025-12-01 00:18:33 +00:00
committed by Alex Lebens
parent ccf87c8c73
commit 5ac696fd05
46 changed files with 26816 additions and 0 deletions

View File

@@ -0,0 +1,658 @@
---
# Source: tdarr/templates/persistent-volume.yaml
apiVersion: v1
kind: PersistentVolume
metadata:
name: tdarr-nfs-storage
namespace: tdarr
labels:
app.kubernetes.io/name: tdarr-nfs-storage
app.kubernetes.io/instance: tdarr
app.kubernetes.io/part-of: tdarr
spec:
persistentVolumeReclaimPolicy: Retain
storageClassName: nfs-client
capacity:
storage: 1Gi
accessModes:
- ReadWriteMany
nfs:
path: /volume2/Storage
server: synologybond.alexlebens.net
mountOptions:
- vers=4
- minorversion=1
- noac
---
# Source: tdarr/charts/tdarr/templates/common.yaml
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: tdarr-server
labels:
app.kubernetes.io/instance: tdarr
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: tdarr
helm.sh/chart: tdarr-4.4.0
annotations:
helm.sh/resource-policy: keep
namespace: tdarr
spec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: "50Gi"
storageClassName: "ceph-block"
---
# Source: tdarr/charts/tdarr/templates/common.yaml
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: tdarr-config
labels:
app.kubernetes.io/instance: tdarr
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: tdarr
helm.sh/chart: tdarr-4.4.0
annotations:
helm.sh/resource-policy: keep
namespace: tdarr
spec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: "50Gi"
storageClassName: "ceph-block"
---
# Source: tdarr/templates/persistent-volume-claim.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: tdarr-nfs-storage
namespace: tdarr
labels:
app.kubernetes.io/name: tdarr-nfs-storage
app.kubernetes.io/instance: tdarr
app.kubernetes.io/part-of: tdarr
spec:
volumeName: tdarr-nfs-storage
storageClassName: nfs-client
accessModes:
- ReadWriteMany
resources:
requests:
storage: 1Gi
---
# Source: tdarr/charts/tdarr-exporter/templates/service.yaml
apiVersion: v1
kind: Service
metadata:
name: tdarr-tdarr-exporter
labels:
helm.sh/chart: tdarr-exporter-1.1.7
app.kubernetes.io/name: tdarr-exporter
app.kubernetes.io/instance: tdarr
app.kubernetes.io/version: "1.4.3"
app.kubernetes.io/managed-by: Helm
spec:
type: ClusterIP
ports:
- port: 9090
targetPort: 9090
protocol: TCP
name: metrics
selector:
app.kubernetes.io/name: tdarr-exporter
app.kubernetes.io/instance: tdarr
---
# Source: tdarr/charts/tdarr/templates/common.yaml
apiVersion: v1
kind: Service
metadata:
name: tdarr-api
labels:
app.kubernetes.io/instance: tdarr
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: tdarr
app.kubernetes.io/service: tdarr-api
helm.sh/chart: tdarr-4.4.0
namespace: tdarr
spec:
type: ClusterIP
ports:
- port: 8266
targetPort: 8266
protocol: TCP
name: http
selector:
app.kubernetes.io/controller: server
app.kubernetes.io/instance: tdarr
app.kubernetes.io/name: tdarr
---
# Source: tdarr/charts/tdarr/templates/common.yaml
apiVersion: v1
kind: Service
metadata:
name: tdarr-web
labels:
app.kubernetes.io/instance: tdarr
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: tdarr
app.kubernetes.io/service: tdarr-web
helm.sh/chart: tdarr-4.4.0
namespace: tdarr
spec:
type: ClusterIP
ports:
- port: 8265
targetPort: 8265
protocol: TCP
name: http
selector:
app.kubernetes.io/controller: server
app.kubernetes.io/instance: tdarr
app.kubernetes.io/name: tdarr
---
# Source: tdarr/charts/tdarr/templates/common.yaml
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: tdarr-node
labels:
app.kubernetes.io/controller: node
app.kubernetes.io/instance: tdarr
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: tdarr
helm.sh/chart: tdarr-4.4.0
namespace: tdarr
spec:
revisionHistoryLimit: 3
selector:
matchLabels:
app.kubernetes.io/controller: node
app.kubernetes.io/name: tdarr
app.kubernetes.io/instance: tdarr
template:
metadata:
annotations:
labels:
app.kubernetes.io/controller: node
app.kubernetes.io/instance: tdarr
app.kubernetes.io/name: tdarr
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
nodeSelector:
intel.feature.node.kubernetes.io/gpu: "true"
containers:
- env:
- name: TZ
value: US/Central
- name: PUID
value: "1001"
- name: PGID
value: "1001"
- name: UMASK_SET
value: "002"
- name: ffmpegVersion
value: "6"
- name: inContainer
value: "true"
- name: nodeName
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: serverIP
value: tdarr-api
- name: serverPort
value: "8266"
image: ghcr.io/haveagitgat/tdarr_node:2.58.02
imagePullPolicy: IfNotPresent
name: main
resources:
limits:
gpu.intel.com/i915: 1
requests:
cpu: 10m
gpu.intel.com/i915: 1
memory: 512Mi
volumeMounts:
- mountPath: /mnt/store
name: media
readOnly: true
- mountPath: /tcache
name: node-cache
volumes:
- name: media
persistentVolumeClaim:
claimName: tdarr-nfs-storage
- emptyDir: {}
name: node-cache
---
# Source: tdarr/charts/tdarr-exporter/templates/deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: tdarr-tdarr-exporter
labels:
helm.sh/chart: tdarr-exporter-1.1.7
app.kubernetes.io/name: tdarr-exporter
app.kubernetes.io/instance: tdarr
app.kubernetes.io/version: "1.4.3"
app.kubernetes.io/managed-by: Helm
spec:
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/name: tdarr-exporter
app.kubernetes.io/instance: tdarr
template:
metadata:
annotations:
labels:
helm.sh/chart: tdarr-exporter-1.1.7
app.kubernetes.io/name: tdarr-exporter
app.kubernetes.io/instance: tdarr
app.kubernetes.io/version: "1.4.3"
app.kubernetes.io/managed-by: Helm
spec:
serviceAccountName: default
securityContext:
{}
containers:
- name: tdarr-exporter
securityContext:
{}
image: "docker.io/homeylab/tdarr-exporter:1.4.2"
imagePullPolicy: IfNotPresent
ports:
- name: metrics
containerPort: 9090
protocol: TCP
env:
- name: TDARR_URL
value: "http://tdarr-web.tdarr:8265"
- name: VERIFY_SSL
value: "false"
- name: LOG_LEVEL
value: "info"
- name: PROMETHEUS_PORT
value: "9090"
- name: PROMETHEUS_PATH
value: "/metrics"
livenessProbe:
failureThreshold: 5
httpGet:
path: /healthz
port: metrics
initialDelaySeconds: 5
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 3
readinessProbe:
failureThreshold: 5
httpGet:
path: /healthz
port: metrics
initialDelaySeconds: 5
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 2
startupProbe:
failureThreshold: 5
httpGet:
path: /healthz
port: metrics
initialDelaySeconds: 2
periodSeconds: 5
successThreshold: 1
timeoutSeconds: 2
resources:
requests:
cpu: 10m
memory: 256Mi
---
# Source: tdarr/charts/tdarr/templates/common.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: tdarr-server
labels:
app.kubernetes.io/controller: server
app.kubernetes.io/instance: tdarr
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: tdarr
helm.sh/chart: tdarr-4.4.0
namespace: tdarr
spec:
revisionHistoryLimit: 3
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/controller: server
app.kubernetes.io/name: tdarr
app.kubernetes.io/instance: tdarr
template:
metadata:
labels:
app.kubernetes.io/controller: server
app.kubernetes.io/instance: tdarr
app.kubernetes.io/name: tdarr
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
containers:
- env:
- name: TZ
value: US/Central
- name: PUID
value: "1001"
- name: PGID
value: "1001"
- name: UMASK_SET
value: "002"
- name: ffmpegVersion
value: "6"
- name: internalNode
value: "false"
- name: inContainer
value: "true"
- name: nodeName
value: tdarr-server
- name: serverIP
value: 0.0.0.0
- name: serverPort
value: "8266"
- name: webUIPort
value: "8265"
image: ghcr.io/haveagitgat/tdarr:2.58.02
imagePullPolicy: IfNotPresent
name: main
resources:
requests:
cpu: 200m
memory: 1Gi
volumeMounts:
- mountPath: /app/configs
name: config
- mountPath: /mnt/store
name: media
readOnly: true
- mountPath: /app/server
name: server
- mountPath: /tcache
name: server-cache
volumes:
- name: config
persistentVolumeClaim:
claimName: tdarr-config
- name: media
persistentVolumeClaim:
claimName: tdarr-nfs-storage
- name: server
persistentVolumeClaim:
claimName: tdarr-server
- emptyDir: {}
name: server-cache
---
# Source: tdarr/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: tdarr-config-backup-secret
namespace: tdarr
labels:
app.kubernetes.io/name: tdarr-config-backup-secret
app.kubernetes.io/instance: tdarr
app.kubernetes.io/part-of: tdarr
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
target:
template:
mergePolicy: Merge
engineVersion: v2
data:
RESTIC_REPOSITORY: "{{ .BUCKET_ENDPOINT }}/tdarr/tdarr-config"
data:
- secretKey: BUCKET_ENDPOINT
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: S3_BUCKET_ENDPOINT
- secretKey: RESTIC_PASSWORD
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: RESTIC_PASSWORD
- secretKey: AWS_DEFAULT_REGION
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: AWS_DEFAULT_REGION
- secretKey: AWS_ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: access_key
- secretKey: AWS_SECRET_ACCESS_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: secret_key
---
# Source: tdarr/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: tdarr-server-backup-secret
namespace: tdarr
labels:
app.kubernetes.io/name: tdarr-server-backup-secret
app.kubernetes.io/instance: tdarr
app.kubernetes.io/part-of: tdarr
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
target:
template:
mergePolicy: Merge
engineVersion: v2
data:
RESTIC_REPOSITORY: "{{ .BUCKET_ENDPOINT }}/tdarr/tdarr-server"
data:
- secretKey: BUCKET_ENDPOINT
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: S3_BUCKET_ENDPOINT
- secretKey: RESTIC_PASSWORD
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: RESTIC_PASSWORD
- secretKey: AWS_DEFAULT_REGION
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: AWS_DEFAULT_REGION
- secretKey: AWS_ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: access_key
- secretKey: AWS_SECRET_ACCESS_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: secret_key
---
# Source: tdarr/templates/http-route.yaml
apiVersion: gateway.networking.k8s.io/v1
kind: HTTPRoute
metadata:
name: http-route-tdarr
namespace: tdarr
labels:
app.kubernetes.io/name: http-route-tdarr
app.kubernetes.io/instance: tdarr
app.kubernetes.io/part-of: tdarr
spec:
parentRefs:
- group: gateway.networking.k8s.io
kind: Gateway
name: traefik-gateway
namespace: traefik
hostnames:
- tdarr.alexlebens.net
rules:
- matches:
- path:
type: PathPrefix
value: /
backendRefs:
- group: ''
kind: Service
name: tdarr-web
port: 8265
weight: 100
---
# Source: tdarr/templates/replication-source.yaml
apiVersion: volsync.backube/v1alpha1
kind: ReplicationSource
metadata:
name: tdarr-config-backup-source
namespace: tdarr
labels:
app.kubernetes.io/name: tdarr-config-backup-source
app.kubernetes.io/instance: tdarr
app.kubernetes.io/part-of: tdarr
spec:
sourcePVC: tdarr-config
trigger:
schedule: 0 4 * * *
restic:
pruneIntervalDays: 7
repository: tdarr-config-backup-secret
retain:
hourly: 1
daily: 3
weekly: 2
monthly: 2
yearly: 4
copyMethod: Snapshot
storageClassName: ceph-block
volumeSnapshotClassName: ceph-blockpool-snapshot
---
# Source: tdarr/templates/replication-source.yaml
apiVersion: volsync.backube/v1alpha1
kind: ReplicationSource
metadata:
name: tdarr-server-backup-source
namespace: tdarr
labels:
app.kubernetes.io/name: tdarr-server-backup-source
app.kubernetes.io/instance: tdarr
app.kubernetes.io/part-of: tdarr
spec:
sourcePVC: tdarr-server
trigger:
schedule: 0 4 * * *
restic:
pruneIntervalDays: 7
repository: tdarr-server-backup-secret
retain:
hourly: 1
daily: 3
weekly: 2
monthly: 2
yearly: 4
copyMethod: Snapshot
storageClassName: ceph-block
volumeSnapshotClassName: ceph-blockpool-snapshot
---
# Source: tdarr/charts/tdarr-exporter/templates/servicemonitor.yaml
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
labels:
helm.sh/chart: tdarr-exporter-1.1.7
app.kubernetes.io/name: tdarr-exporter
app.kubernetes.io/instance: tdarr
app.kubernetes.io/version: "1.4.3"
app.kubernetes.io/managed-by: Helm
name: tdarr-tdarr-exporter
spec:
endpoints:
- interval: 1m
path: /metrics
port: metrics
scrapeTimeout: 15s
namespaceSelector:
matchNames:
- tdarr
selector:
matchLabels:
app.kubernetes.io/name: tdarr-exporter
app.kubernetes.io/instance: tdarr
---
# Source: tdarr/charts/tdarr-exporter/templates/tests/test-connection.yaml
apiVersion: v1
kind: Pod
metadata:
name: "tdarr-tdarr-exporter-test-connection"
labels:
helm.sh/chart: tdarr-exporter-1.1.7
app.kubernetes.io/name: tdarr-exporter
app.kubernetes.io/instance: tdarr
app.kubernetes.io/version: "1.4.3"
app.kubernetes.io/managed-by: Helm
annotations:
"helm.sh/hook": test
spec:
containers:
- name: wget
image: "docker.io/busybox:1.36.1"
command: ['wget']
args: ['tdarr-tdarr-exporter:9090/healthz']
restartPolicy: Never