chore: Update manifests after change

This commit is contained in:
2025-12-02 03:12:25 +00:00
parent 693da9f62c
commit 5ca176e682
41 changed files with 207775 additions and 96 deletions

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,289 @@
---
# Source: backrest/templates/persistent-volume.yaml
apiVersion: v1
kind: PersistentVolume
metadata:
name: backrest-nfs-storage
namespace: backrest
labels:
app.kubernetes.io/name: backrest-nfs-storage
app.kubernetes.io/instance: backrest
app.kubernetes.io/part-of: backrest
spec:
persistentVolumeReclaimPolicy: Retain
storageClassName: nfs-client
capacity:
storage: 1Gi
accessModes:
- ReadWriteMany
nfs:
path: /volume2/Storage
server: synologybond.alexlebens.net
mountOptions:
- vers=4
- minorversion=1
- noac
---
# Source: backrest/templates/persistent-volume.yaml
apiVersion: v1
kind: PersistentVolume
metadata:
name: backrest-nfs-share
namespace: backrest
labels:
app.kubernetes.io/name: backrest-nfs-share
app.kubernetes.io/instance: backrest
app.kubernetes.io/part-of: backrest
spec:
persistentVolumeReclaimPolicy: Retain
storageClassName: nfs-client
capacity:
storage: 1Gi
accessModes:
- ReadWriteMany
nfs:
path: /volume2/Share
server: synologybond.alexlebens.net
mountOptions:
- vers=4
- minorversion=1
- noac
---
# Source: backrest/charts/backrest/templates/common.yaml
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: backrest-config
labels:
app.kubernetes.io/instance: backrest
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: backrest
helm.sh/chart: backrest-4.4.0
annotations:
helm.sh/resource-policy: keep
namespace: backrest
spec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: "1Gi"
storageClassName: "ceph-block"
---
# Source: backrest/charts/backrest/templates/common.yaml
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: backrest-data
labels:
app.kubernetes.io/instance: backrest
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: backrest
helm.sh/chart: backrest-4.4.0
annotations:
helm.sh/resource-policy: keep
namespace: backrest
spec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: "10Gi"
storageClassName: "ceph-block"
---
# Source: backrest/templates/persistent-volume-claim.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: backrest-nfs-storage
namespace: backrest
labels:
app.kubernetes.io/name: backrest-nfs-storage
app.kubernetes.io/instance: backrest
app.kubernetes.io/part-of: backrest
spec:
volumeName: backrest-nfs-storage
storageClassName: nfs-client
accessModes:
- ReadWriteMany
resources:
requests:
storage: 1Gi
---
# Source: backrest/templates/persistent-volume-claim.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: backrest-nfs-share
namespace: backrest
labels:
app.kubernetes.io/name: backrest-nfs-share
app.kubernetes.io/instance: backrest
app.kubernetes.io/part-of: backrest
spec:
volumeName: backrest-nfs-share
storageClassName: nfs-client
accessModes:
- ReadWriteMany
resources:
requests:
storage: 1Gi
---
# Source: backrest/charts/backrest/templates/common.yaml
apiVersion: v1
kind: Service
metadata:
name: backrest
labels:
app.kubernetes.io/instance: backrest
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: backrest
app.kubernetes.io/service: backrest
helm.sh/chart: backrest-4.4.0
namespace: backrest
spec:
type: ClusterIP
ports:
- port: 80
targetPort: 9898
protocol: TCP
name: http
selector:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: backrest
app.kubernetes.io/name: backrest
---
# Source: backrest/templates/service.yaml
apiVersion: v1
kind: Service
metadata:
name: garage-ps10rp
namespace: backrest
labels:
app.kubernetes.io/name: garage-ps10rp
app.kubernetes.io/instance: backrest
app.kubernetes.io/part-of: backrest
annotations:
tailscale.com/tailnet-fqdn: garage-ps10rp.boreal-beaufort.ts.net
spec:
externalName: placeholder
type: ExternalName
---
# Source: backrest/charts/backrest/templates/common.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: backrest
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: backrest
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: backrest
helm.sh/chart: backrest-4.4.0
namespace: backrest
spec:
revisionHistoryLimit: 3
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/controller: main
app.kubernetes.io/name: backrest
app.kubernetes.io/instance: backrest
template:
metadata:
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: backrest
app.kubernetes.io/name: backrest
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
containers:
- env:
- name: TZ
value: America/Chicago
- name: BACKREST_DATA
value: /data
- name: BACKREST_CONFIG
value: /config/config.json
- name: XDG_CACHE_HOME
value: /cache
- name: TMPDIR
value: /tmp
image: garethgeorge/backrest:v1.10.1
imagePullPolicy: IfNotPresent
name: main
resources:
requests:
cpu: 10m
memory: 256Mi
volumeMounts:
- mountPath: /cache
name: cache
- mountPath: /config
name: config
- mountPath: /data
name: data
- mountPath: /mnt/share
name: share
readOnly: true
- mountPath: /mnt/storage
name: storage
readOnly: true
- mountPath: /tmp
name: tmp
volumes:
- emptyDir: {}
name: cache
- name: config
persistentVolumeClaim:
claimName: backrest-config
- name: data
persistentVolumeClaim:
claimName: backrest-data
- name: share
persistentVolumeClaim:
claimName: backrest-nfs-share
- name: storage
persistentVolumeClaim:
claimName: backrest-nfs-storage
- emptyDir: {}
name: tmp
---
# Source: backrest/templates/http-route.yaml
apiVersion: gateway.networking.k8s.io/v1
kind: HTTPRoute
metadata:
name: http-route-backrest
namespace: backrest
labels:
app.kubernetes.io/name: http-route-backrest
app.kubernetes.io/instance: backrest
app.kubernetes.io/part-of: backrest
spec:
parentRefs:
- group: gateway.networking.k8s.io
kind: Gateway
name: traefik-gateway
namespace: traefik
hostnames:
- backrest.alexlebens.net
rules:
- matches:
- path:
type: PathPrefix
value: /
backendRefs:
- group: ''
kind: Service
name: backrest
port: 80
weight: 100

View File

@@ -0,0 +1,448 @@
---
# Source: blocky/charts/blocky/templates/common.yaml
---
apiVersion: v1
kind: ConfigMap
metadata:
name: blocky
labels:
app.kubernetes.io/instance: blocky
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: blocky
helm.sh/chart: blocky-4.4.0
namespace: blocky
data:
config.yml: |
upstreams:
init:
strategy: fast
groups:
default:
- tcp-tls:1.1.1.1:853
- tcp-tls:1.0.0.1:853
strategy: parallel_best
timeout: 2s
connectIPVersion: v4
customDNS:
filterUnmappedTypes: false
zone: |
$ORIGIN alexlebens.net.
$TTL 86400
;; Name Server
IN NS patryk.ns.cloudflare.com.
IN NS veda.ns.cloudflare.com.
IN NS dns1.
IN NS dns2.
IN NS dns3.
dns1 IN A 10.232.1.22
dns2 IN A 10.232.1.51
dns3 IN A 10.232.1.52
;; Computer Names
nw01un IN A 192.168.1.1 ; Unifi Gateway
ps08rp IN A 10.232.1.51 ; DNS
ps09rp IN A 10.232.1.52 ; DNS
ps02sn IN A 10.232.1.61 ; Synology Web
ps02sn-bond IN A 10.232.1.64 ; Synology Bond for Storage
pd05wd IN A 10.230.0.115 ; Desktop
pl02mc IN A 10.230.0.105 ; Laptop
dv01hr IN A 10.232.1.72 ; HD Homerun
dv02kv IN A 10.232.1.71 ; Pi KVM
it01ag IN A 10.232.1.83 ; Airgradient
it02ph IN A 10.232.1.85 ; Phillips Hue
it03tb IN A 10.232.1.81 ; TubesZB ZigBee
it04tb IN A 10.232.1.82 ; TubesZB Z-Wave
it05sp IN A 10.230.0.100 ; Shelly Plug
;; Common Names
synology IN CNAME ps02sn
synologybond IN CNAME ps02sn-bond
unifi IN CNAME nw01un
airgradient IN CNAME it01ag
hdhr IN CNAME dv01hr
pikvm IN CNAME dv02kv
;; Service Names
cl01tl IN A 10.232.1.11
cl01tl IN A 10.232.1.12
cl01tl IN A 10.232.1.13
cl01tl-api IN A 10.232.1.11
cl01tl-api IN A 10.232.1.12
cl01tl-api IN A 10.232.1.13
cl01tl-endpoint IN A 10.232.1.21
cl01tl-endpoint IN A 10.232.1.22
cl01tl-endpoint IN A 10.232.1.23
cl01tl-gateway IN A 10.232.1.200
traefik-cl01tl IN A 10.232.1.21
blocky IN A 10.232.1.22
plex-lb IN A 10.232.1.23
;; Application Names
actual IN CNAME traefik-cl01tl
alertmanager IN CNAME traefik-cl01tl
argo-workflows IN CNAME traefik-cl01tl
argocd IN CNAME traefik-cl01tl
audiobookshelf IN CNAME traefik-cl01tl
authentik IN CNAME traefik-cl01tl
backrest IN CNAME traefik-cl01tl
bazarr IN CNAME traefik-cl01tl
booklore IN CNAME traefik-cl01tl
ceph IN CNAME traefik-cl01tl
code-server IN CNAME traefik-cl01tl
ephemera IN CNAME traefik-cl01tl
garage-s3 IN CNAME traefik-cl01tl
garage-webui IN CNAME traefik-cl01tl
gatus IN CNAME traefik-cl01tl
gitea IN CNAME traefik-cl01tl
grafana IN CNAME traefik-cl01tl
harbor IN CNAME traefik-cl01tl
headlamp IN CNAME traefik-cl01tl
home IN CNAME traefik-cl01tl
home-assistant IN CNAME traefik-cl01tl
home-assistant-code-server IN CNAME traefik-cl01tl
hubble IN CNAME cl01tl-gateway
huntarr IN CNAME traefik-cl01tl
immich IN CNAME traefik-cl01tl
jellyfin IN CNAME traefik-cl01tl
jellystat IN CNAME traefik-cl01tl
kiwix IN CNAME traefik-cl01tl
komodo IN CNAME traefik-cl01tl
kronic IN CNAME traefik-cl01tl
lidarr IN CNAME traefik-cl01tl
lidatube IN CNAME traefik-cl01tl
listenarr IN CNAME traefik-cl01tl
mail IN CNAME traefik-cl01tl
n8n IN CNAME traefik-cl01tl
ntfy IN CNAME traefik-cl01tl
objects IN CNAME traefik-cl01tl
ollama IN CNAME traefik-cl01tl
omni-tools IN CNAME traefik-cl01tl
overseerr IN CNAME traefik-cl01tl
pgadmin IN CNAME traefik-cl01tl
photoview IN CNAME traefik-cl01tl
plex IN CNAME traefik-cl01tl
postiz IN CNAME traefik-cl01tl
prometheus IN CNAME traefik-cl01tl
prowlarr IN CNAME traefik-cl01tl
qbittorrent IN CNAME traefik-cl01tl
qui IN CNAME traefik-cl01tl
radarr IN CNAME traefik-cl01tl
radarr-4k IN CNAME traefik-cl01tl
radarr-anime IN CNAME traefik-cl01tl
radarr-standup IN CNAME traefik-cl01tl
searxng IN CNAME traefik-cl01tl
slskd IN CNAME traefik-cl01tl
sonarr IN CNAME traefik-cl01tl
sonarr-4k IN CNAME traefik-cl01tl
sonarr-anime IN CNAME traefik-cl01tl
stalwart IN CNAME traefik-cl01tl
tautulli IN CNAME traefik-cl01tl
tdarr IN CNAME traefik-cl01tl
tubearchivist IN CNAME traefik-cl01tl
vault IN CNAME traefik-cl01tl
whodb IN CNAME traefik-cl01tl
yamtrack IN CNAME traefik-cl01tl
blocking:
denylists:
sus:
- https://v.firebog.net/hosts/static/w3kbl.txt
ads:
- https://v.firebog.net/hosts/AdguardDNS.txt
- https://v.firebog.net/hosts/Admiral.txt
- https://v.firebog.net/hosts/Easylist.txt
- https://adaway.org/hosts.txt
priv:
- https://v.firebog.net/hosts/Easyprivacy.txt
- https://v.firebog.net/hosts/Prigent-Ads.txt
mal:
- https://v.firebog.net/hosts/Prigent-Crypto.txt
- https://osint.digitalside.it/Threat-Intel/lists/latestdomains.txt
pro:
- https://raw.githubusercontent.com/hagezi/dns-blocklists/main/wildcard/pro.plus.txt
allowlists:
sus:
- |
*.alexlebens.net
*.alexlebens.dev
*.boreal-beaufort.ts.net
ads:
- |
*.alexlebens.net
*.alexlebens.dev
*.boreal-beaufort.ts.net
priv:
- |
*.alexlebens.net
*.alexlebens.dev
*.boreal-beaufort.ts.net
mal:
- |
*.alexlebens.net
*.alexlebens.dev
*.boreal-beaufort.ts.net
pro:
- |
*.alexlebens.net
*.alexlebens.dev
*.boreal-beaufort.ts.net
clientGroupsBlock:
default:
- sus
- ads
- priv
- mal
- pro
blockType: zeroIp
blockTTL: 1m
loading:
refreshPeriod: 24h
downloads:
timeout: 60s
attempts: 5
cooldown: 10s
concurrency: 16
strategy: fast
maxErrorsPerSource: 5
caching:
minTime: 5m
maxTime: 30m
maxItemsCount: 0
prefetching: true
prefetchExpires: 2h
prefetchThreshold: 5
prefetchMaxItemsCount: 0
cacheTimeNegative: 30m
redis:
address: redis-replication-blocky-master.blocky:6379
required: true
prometheus:
enable: true
path: /metrics
queryLog:
type: console
logRetentionDays: 7
creationAttempts: 1
creationCooldown: 2s
flushInterval: 30s
minTlsServeVersion: 1.3
ports:
dns: 53
http: 4000
log:
level: info
format: text
timestamp: true
privacy: false
---
# Source: blocky/charts/blocky/templates/common.yaml
apiVersion: v1
kind: Service
metadata:
name: blocky-dns-external
labels:
app.kubernetes.io/instance: blocky
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: blocky
app.kubernetes.io/service: blocky-dns-external
helm.sh/chart: blocky-4.4.0
annotations:
tailscale.com/expose: "true"
namespace: blocky
spec:
type: LoadBalancer
ports:
- port: 53
targetPort: 53
protocol: TCP
name: tcp
- port: 53
targetPort: 53
protocol: UDP
name: udp
selector:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: blocky
app.kubernetes.io/name: blocky
---
# Source: blocky/charts/blocky/templates/common.yaml
apiVersion: v1
kind: Service
metadata:
name: blocky-metrics
labels:
app.kubernetes.io/instance: blocky
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: blocky
app.kubernetes.io/service: blocky-metrics
helm.sh/chart: blocky-4.4.0
namespace: blocky
spec:
type: ClusterIP
ports:
- port: 4000
targetPort: 4000
protocol: TCP
name: metrics
selector:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: blocky
app.kubernetes.io/name: blocky
---
# Source: blocky/charts/blocky/templates/common.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: blocky
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: blocky
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: blocky
helm.sh/chart: blocky-4.4.0
namespace: blocky
spec:
revisionHistoryLimit: 3
replicas: 3
strategy:
type: RollingUpdate
selector:
matchLabels:
app.kubernetes.io/controller: main
app.kubernetes.io/name: blocky
app.kubernetes.io/instance: blocky
template:
metadata:
annotations:
checksum/configMaps: 8a197f81daed9048c4565ecafc0c7ca534383a898e709a13c3441bc00bd12652
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: blocky
app.kubernetes.io/name: blocky
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
containers:
- env:
- name: TZ
value: US/Central
image: ghcr.io/0xerr0r/blocky:v0.28.2@sha256:5f84a54e4ee950c4ab21db905b7497476ece2f4e1a376d23ab8c4855cabddcba
imagePullPolicy: IfNotPresent
name: main
resources:
requests:
cpu: 10m
memory: 128Mi
volumeMounts:
- mountPath: /app/config.yml
mountPropagation: None
name: config
readOnly: true
subPath: config.yml
volumes:
- configMap:
name: blocky
name: config
---
# Source: blocky/templates/redis-replication.yaml
apiVersion: redis.redis.opstreelabs.in/v1beta2
kind: RedisReplication
metadata:
name: redis-replication-blocky
namespace: blocky
labels:
app.kubernetes.io/name: redis-replication-blocky
app.kubernetes.io/instance: blocky
app.kubernetes.io/part-of: blocky
spec:
clusterSize: 3
podSecurityContext:
runAsUser: 1000
fsGroup: 1000
kubernetesConfig:
image: quay.io/opstree/redis:v8.0.3
imagePullPolicy: IfNotPresent
resources:
requests:
cpu: 50m
memory: 128Mi
storage:
volumeClaimTemplate:
spec:
storageClassName: ceph-block
accessModes: ["ReadWriteOnce"]
resources:
requests:
storage: 1Gi
redisExporter:
enabled: true
image: quay.io/opstree/redis-exporter:v1.48.0
---
# Source: blocky/templates/service-monitor.yaml
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: blocky
namespace: blocky
labels:
app.kubernetes.io/name: blocky
app.kubernetes.io/instance: blocky
app.kubernetes.io/part-of: blocky
spec:
selector:
matchLabels:
app.kubernetes.io/name: blocky
app.kubernetes.io/instance: blocky
endpoints:
- port: metrics
interval: 30s
scrapeTimeout: 10s
path: /metrics
---
# Source: blocky/templates/service-monitor.yaml
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: redis-replication-blocky
namespace: blocky
labels:
app.kubernetes.io/name: redis-replication-blocky
app.kubernetes.io/instance: blocky
app.kubernetes.io/part-of: blocky
redis-operator: "true"
env: production
spec:
selector:
matchLabels:
redis_setup_type: replication
endpoints:
- port: redis-exporter
interval: 30s
scrapeTimeout: 10s

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,247 @@
---
# Source: descheduler/charts/descheduler/templates/serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: descheduler
namespace: descheduler
labels:
app.kubernetes.io/name: descheduler
helm.sh/chart: descheduler-0.34.0
app.kubernetes.io/instance: descheduler
app.kubernetes.io/version: "0.34.0"
app.kubernetes.io/managed-by: Helm
---
# Source: descheduler/charts/descheduler/templates/configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: descheduler
namespace: descheduler
labels:
app.kubernetes.io/name: descheduler
helm.sh/chart: descheduler-0.34.0
app.kubernetes.io/instance: descheduler
app.kubernetes.io/version: "0.34.0"
app.kubernetes.io/managed-by: Helm
data:
policy.yaml: |
apiVersion: "descheduler/v1alpha2"
kind: "DeschedulerPolicy"
profiles:
- name: default
pluginConfig:
- args:
evictDaemonSetPods: false
evictLocalStoragePods: false
ignorePvcPods: true
name: DefaultEvictor
- name: RemoveDuplicates
- args:
nodeAffinityType:
- requiredDuringSchedulingIgnoredDuringExecution
name: RemovePodsViolatingNodeAffinity
- name: RemovePodsViolatingNodeTaints
- name: RemovePodsViolatingInterPodAntiAffinity
- name: RemovePodsViolatingTopologySpreadConstraint
- args:
targetThresholds:
cpu: 60
memory: 60
pods: 60
thresholds:
cpu: 20
memory: 20
pods: 20
name: LowNodeUtilization
plugins:
balance:
enabled:
- RemoveDuplicates
- RemovePodsViolatingTopologySpreadConstraint
- LowNodeUtilization
deschedule:
enabled:
- RemovePodsViolatingNodeTaints
- RemovePodsViolatingNodeAffinity
- RemovePodsViolatingInterPodAntiAffinity
---
# Source: descheduler/charts/descheduler/templates/clusterrole.yaml
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: descheduler
labels:
app.kubernetes.io/name: descheduler
helm.sh/chart: descheduler-0.34.0
app.kubernetes.io/instance: descheduler
app.kubernetes.io/version: "0.34.0"
app.kubernetes.io/managed-by: Helm
rules:
- apiGroups: ["events.k8s.io"]
resources: ["events"]
verbs: ["create", "update"]
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "watch", "list"]
- apiGroups: [""]
resources: ["namespaces"]
verbs: ["get", "watch", "list"]
- apiGroups: [""]
resources: ["pods"]
verbs: ["get", "watch", "list", "delete"]
- apiGroups: [""]
resources: ["pods/eviction"]
verbs: ["create"]
- apiGroups: ["scheduling.k8s.io"]
resources: ["priorityclasses"]
verbs: ["get", "watch", "list"]
- apiGroups: ["policy"]
resources: ["poddisruptionbudgets"]
verbs: ["get", "watch", "list"]
---
# Source: descheduler/charts/descheduler/templates/clusterrolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: descheduler
labels:
app.kubernetes.io/name: descheduler
helm.sh/chart: descheduler-0.34.0
app.kubernetes.io/instance: descheduler
app.kubernetes.io/version: "0.34.0"
app.kubernetes.io/managed-by: Helm
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: descheduler
subjects:
- kind: ServiceAccount
name: descheduler
namespace: descheduler
---
# Source: descheduler/charts/descheduler/templates/service.yaml
apiVersion: v1
kind: Service
metadata:
labels:
app.kubernetes.io/name: descheduler
helm.sh/chart: descheduler-0.34.0
app.kubernetes.io/instance: descheduler
app.kubernetes.io/version: "0.34.0"
app.kubernetes.io/managed-by: Helm
name: descheduler
namespace: descheduler
spec:
clusterIP: None
ports:
- name: http-metrics
port: 10258
protocol: TCP
targetPort: 10258
selector:
app.kubernetes.io/name: descheduler
app.kubernetes.io/instance: descheduler
type: ClusterIP
---
# Source: descheduler/charts/descheduler/templates/deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: descheduler
namespace: descheduler
labels:
app.kubernetes.io/name: descheduler
helm.sh/chart: descheduler-0.34.0
app.kubernetes.io/instance: descheduler
app.kubernetes.io/version: "0.34.0"
app.kubernetes.io/managed-by: Helm
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: descheduler
app.kubernetes.io/instance: descheduler
template:
metadata:
labels:
app.kubernetes.io/name: descheduler
app.kubernetes.io/instance: descheduler
annotations:
checksum/config: 827e11ad319ee1e4c515e25bf575e74c44a0a9fdac5317e6caf8798b1d282036
spec:
priorityClassName: system-cluster-critical
serviceAccountName: descheduler
containers:
- name: descheduler
image: "registry.k8s.io/descheduler/descheduler:v0.34.0"
imagePullPolicy: IfNotPresent
command:
- /bin/descheduler
args:
- --policy-config-file=/policy-dir/policy.yaml
- --descheduling-interval=5m
- --v=3
ports:
- containerPort: 10258
protocol: TCP
livenessProbe:
failureThreshold: 3
httpGet:
path: /healthz
port: 10258
scheme: HTTPS
initialDelaySeconds: 5
periodSeconds: 20
timeoutSeconds: 5
resources:
limits:
cpu: 500m
memory: 256Mi
requests:
cpu: 10m
memory: 64Mi
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
privileged: false
readOnlyRootFilesystem: true
runAsNonRoot: true
runAsUser: 1000
volumeMounts:
- mountPath: /policy-dir
name: policy-volume
volumes:
- name: policy-volume
configMap:
name: descheduler
---
# Source: descheduler/charts/descheduler/templates/servicemonitor.yaml
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: descheduler-servicemonitor
namespace: descheduler
labels:
app.kubernetes.io/name: descheduler
helm.sh/chart: descheduler-0.34.0
app.kubernetes.io/instance: descheduler
app.kubernetes.io/version: "0.34.0"
app.kubernetes.io/managed-by: Helm
spec:
jobLabel: jobLabel
namespaceSelector:
matchNames:
- descheduler
selector:
matchLabels:
app.kubernetes.io/name: descheduler
app.kubernetes.io/instance: descheduler
endpoints:
- honorLabels: true
port: http-metrics
scheme: https
tlsConfig:
insecureSkipVerify: true

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,575 @@
---
# Source: eraser/charts/eraser/templates/eraser-controller-manager-serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
app.kubernetes.io/instance: 'eraser'
app.kubernetes.io/managed-by: 'Helm'
app.kubernetes.io/name: 'eraser'
helm.sh/chart: 'eraser'
name: eraser-controller-manager
namespace: 'eraser'
---
# Source: eraser/charts/eraser/templates/eraser-imagejob-pods-serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
app.kubernetes.io/instance: 'eraser'
app.kubernetes.io/managed-by: 'Helm'
app.kubernetes.io/name: 'eraser'
helm.sh/chart: 'eraser'
name: eraser-imagejob-pods
namespace: 'eraser'
---
# Source: eraser/charts/eraser/templates/configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: eraser-manager-config
namespace: "eraser"
data:
controller_manager_config.yaml: |
apiVersion: eraser.sh/v1alpha3
components:
collector:
enabled: true
image:
tag: v1.3.1
limit: {}
request:
cpu: 10m
memory: 128Mi
remover:
image:
tag: v1.3.1
limit: {}
request:
cpu: 10m
memory: 128Mi
scanner:
config: ""
enabled: false
image:
tag: v1.3.1
limit: {}
request:
cpu: 100m
memory: 128Mi
health: {}
kind: EraserConfig
leaderElection: {}
manager:
imageJob:
cleanup:
delayOnFailure: 24h
delayOnSuccess: 0s
successRatio: 1
logLevel: info
nodeFilter:
selectors:
- eraser.sh/cleanup.filter
- kubernetes.io/os=windows
type: exclude
otlpEndpoint: ""
priorityClassName: ""
profile:
enabled: false
port: 6060
pullSecrets: []
runtime:
address: unix:///run/containerd/containerd.sock
name: containerd
scheduling:
beginImmediately: true
repeatInterval: 24h
metrics: {}
webhook: {}
---
# Source: eraser/charts/eraser/templates/imagejobs.eraser.sh-customresourcedefinition.yaml
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.9.0
creationTimestamp: null
labels:
app.kubernetes.io/instance: 'eraser'
app.kubernetes.io/managed-by: 'Helm'
app.kubernetes.io/name: 'eraser'
helm.sh/chart: 'eraser'
name: imagejobs.eraser.sh
spec:
group: eraser.sh
names:
kind: ImageJob
listKind: ImageJobList
plural: imagejobs
singular: imagejob
scope: Cluster
versions:
- name: v1
schema:
openAPIV3Schema:
description: ImageJob is the Schema for the imagejobs API.
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
type: object
status:
description: ImageJobStatus defines the observed state of ImageJob.
properties:
deleteAfter:
description: Time to delay deletion until
format: date-time
type: string
desired:
description: desired number of pods
type: integer
failed:
description: number of pods that failed
type: integer
phase:
description: job running, successfully completed, or failed
type: string
skipped:
description: number of nodes that were skipped e.g. because they are not a linux node
type: integer
succeeded:
description: number of pods that completed successfully
type: integer
required:
- desired
- failed
- phase
- skipped
- succeeded
type: object
type: object
served: true
storage: true
subresources:
status: {}
- deprecated: true
deprecationWarning: v1alpha1 of the eraser API has been deprecated. Please migrate to v1.
name: v1alpha1
schema:
openAPIV3Schema:
description: ImageJob is the Schema for the imagejobs API.
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
type: object
status:
description: ImageJobStatus defines the observed state of ImageJob.
properties:
deleteAfter:
description: Time to delay deletion until
format: date-time
type: string
desired:
description: desired number of pods
type: integer
failed:
description: number of pods that failed
type: integer
phase:
description: job running, successfully completed, or failed
type: string
skipped:
description: number of nodes that were skipped e.g. because they are not a linux node
type: integer
succeeded:
description: number of pods that completed successfully
type: integer
required:
- desired
- failed
- phase
- skipped
- succeeded
type: object
type: object
served: true
storage: false
subresources:
status: {}
---
# Source: eraser/charts/eraser/templates/imagelists.eraser.sh-customresourcedefinition.yaml
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.9.0
creationTimestamp: null
labels:
app.kubernetes.io/instance: 'eraser'
app.kubernetes.io/managed-by: 'Helm'
app.kubernetes.io/name: 'eraser'
helm.sh/chart: 'eraser'
name: imagelists.eraser.sh
spec:
group: eraser.sh
names:
kind: ImageList
listKind: ImageListList
plural: imagelists
singular: imagelist
scope: Cluster
versions:
- name: v1
schema:
openAPIV3Schema:
description: ImageList is the Schema for the imagelists API.
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
type: object
spec:
description: ImageListSpec defines the desired state of ImageList.
properties:
images:
description: The list of non-compliant images to delete if non-running.
items:
type: string
type: array
required:
- images
type: object
status:
description: ImageListStatus defines the observed state of ImageList.
properties:
failed:
description: Number of nodes that failed to run the job
format: int64
type: integer
skipped:
description: Number of nodes that were skipped due to a skip selector
format: int64
type: integer
success:
description: Number of nodes that successfully ran the job
format: int64
type: integer
timestamp:
description: Information when the job was completed.
format: date-time
type: string
required:
- failed
- skipped
- success
- timestamp
type: object
type: object
served: true
storage: true
subresources:
status: {}
- deprecated: true
deprecationWarning: v1alpha1 of the eraser API has been deprecated. Please migrate to v1.
name: v1alpha1
schema:
openAPIV3Schema:
description: ImageList is the Schema for the imagelists API.
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
type: object
spec:
description: ImageListSpec defines the desired state of ImageList.
properties:
images:
description: The list of non-compliant images to delete if non-running.
items:
type: string
type: array
required:
- images
type: object
status:
description: ImageListStatus defines the observed state of ImageList.
properties:
failed:
description: Number of nodes that failed to run the job
format: int64
type: integer
skipped:
description: Number of nodes that were skipped due to a skip selector
format: int64
type: integer
success:
description: Number of nodes that successfully ran the job
format: int64
type: integer
timestamp:
description: Information when the job was completed.
format: date-time
type: string
required:
- failed
- skipped
- success
- timestamp
type: object
type: object
served: true
storage: false
subresources:
status: {}
---
# Source: eraser/charts/eraser/templates/eraser-manager-role-clusterrole.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
creationTimestamp: null
labels:
app.kubernetes.io/instance: 'eraser'
app.kubernetes.io/managed-by: 'Helm'
app.kubernetes.io/name: 'eraser'
helm.sh/chart: 'eraser'
name: eraser-manager-role
rules:
- apiGroups:
- ""
resources:
- nodes
verbs:
- get
- list
- watch
- apiGroups:
- eraser.sh
resources:
- imagejobs
verbs:
- create
- delete
- get
- list
- watch
- apiGroups:
- eraser.sh
resources:
- imagejobs/status
verbs:
- get
- patch
- update
- apiGroups:
- eraser.sh
resources:
- imagelists
verbs:
- get
- list
- watch
- apiGroups:
- eraser.sh
resources:
- imagelists/status
verbs:
- get
- patch
- update
---
# Source: eraser/charts/eraser/templates/eraser-manager-rolebinding-clusterrolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
app.kubernetes.io/instance: 'eraser'
app.kubernetes.io/managed-by: 'Helm'
app.kubernetes.io/name: 'eraser'
helm.sh/chart: 'eraser'
name: eraser-manager-rolebinding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: eraser-manager-role
subjects:
- kind: ServiceAccount
name: eraser-controller-manager
namespace: 'eraser'
---
# Source: eraser/charts/eraser/templates/eraser-manager-role-role.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
creationTimestamp: null
labels:
app.kubernetes.io/instance: 'eraser'
app.kubernetes.io/managed-by: 'Helm'
app.kubernetes.io/name: 'eraser'
helm.sh/chart: 'eraser'
name: eraser-manager-role
namespace: 'eraser'
rules:
- apiGroups:
- ""
resources:
- configmaps
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- ""
resources:
- pods
verbs:
- create
- delete
- get
- list
- update
- watch
- apiGroups:
- ""
resources:
- podtemplates
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
---
# Source: eraser/charts/eraser/templates/eraser-manager-rolebinding-rolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
app.kubernetes.io/instance: 'eraser'
app.kubernetes.io/managed-by: 'Helm'
app.kubernetes.io/name: 'eraser'
helm.sh/chart: 'eraser'
name: eraser-manager-rolebinding
namespace: 'eraser'
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: eraser-manager-role
subjects:
- kind: ServiceAccount
name: eraser-controller-manager
namespace: 'eraser'
---
# Source: eraser/charts/eraser/templates/eraser-controller-manager-deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app.kubernetes.io/instance: 'eraser'
app.kubernetes.io/managed-by: 'Helm'
app.kubernetes.io/name: 'eraser'
control-plane: controller-manager
helm.sh/chart: 'eraser'
name: eraser-controller-manager
namespace: 'eraser'
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/instance: 'eraser'
app.kubernetes.io/managed-by: 'Helm'
app.kubernetes.io/name: 'eraser'
control-plane: controller-manager
helm.sh/chart: 'eraser'
template:
metadata:
labels:
app.kubernetes.io/instance: 'eraser'
app.kubernetes.io/managed-by: 'Helm'
app.kubernetes.io/name: 'eraser'
control-plane: controller-manager
helm.sh/chart: 'eraser'
spec:
affinity:
{}
containers:
- args:
- --config=/config/controller_manager_config.yaml
command:
- /manager
env:
- name: POD_NAMESPACE
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
- name: OTEL_SERVICE_NAME
value: eraser-manager
image: 'ghcr.io/eraser-dev/eraser-manager:v1.3.1'
imagePullPolicy: 'IfNotPresent'
livenessProbe:
httpGet:
path: /healthz
port: 8081
initialDelaySeconds: 15
periodSeconds: 20
name: manager
readinessProbe:
httpGet:
path: /readyz
port: 8081
initialDelaySeconds: 5
periodSeconds: 10
resources:
limits:
memory: 30Mi
requests:
cpu: 10m
memory: 30Mi
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
runAsGroup: 65532
runAsNonRoot: true
runAsUser: 65532
seccompProfile:
type: RuntimeDefault
volumeMounts:
- mountPath: /config
name: eraser-manager-config
nodeSelector:
kubernetes.io/os: linux
priorityClassName: ''
serviceAccountName: eraser-controller-manager
terminationGracePeriodSeconds: 10
tolerations:
[]
volumes:
- configMap:
name: eraser-manager-config
name: eraser-manager-config

View File

@@ -0,0 +1,518 @@
---
# Source: external-dns/charts/external-dns-unifi/crds/dnsendpoints.externaldns.k8s.io.yaml
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
api-approved.kubernetes.io: https://github.com/kubernetes-sigs/external-dns/pull/2007
name: dnsendpoints.externaldns.k8s.io
spec:
group: externaldns.k8s.io
names:
kind: DNSEndpoint
listKind: DNSEndpointList
plural: dnsendpoints
singular: dnsendpoint
scope: Namespaced
versions:
- name: v1alpha1
schema:
openAPIV3Schema:
description: |-
DNSEndpoint is a contract that a user-specified CRD must implement to be used as a source for external-dns.
The user-specified CRD should also have the status sub-resource.
properties:
apiVersion:
description: |-
APIVersion defines the versioned schema of this representation of an object.
Servers should convert recognized schemas to the latest internal value, and
may reject unrecognized values.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
type: string
kind:
description: |-
Kind is a string value representing the REST resource this object represents.
Servers may infer this from the endpoint the client submits requests to.
Cannot be updated.
In CamelCase.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
type: string
metadata:
type: object
spec:
description: DNSEndpointSpec defines the desired state of DNSEndpoint
properties:
endpoints:
items:
description: Endpoint is a high-level way of a connection between a service and an IP
properties:
dnsName:
description: The hostname of the DNS record
type: string
labels:
additionalProperties:
type: string
description: Labels stores labels defined for the Endpoint
type: object
providerSpecific:
description: ProviderSpecific stores provider specific config
items:
description: ProviderSpecificProperty holds the name and value of a configuration which is specific to individual DNS providers
properties:
name:
type: string
value:
type: string
type: object
type: array
recordTTL:
description: TTL for the record
format: int64
type: integer
recordType:
description: RecordType type of record, e.g. CNAME, A, AAAA, SRV, TXT etc
type: string
setIdentifier:
description: Identifier to distinguish multiple records with the same name and type (e.g. Route53 records with routing policies other than 'simple')
type: string
targets:
description: The targets the DNS record points to
items:
type: string
type: array
type: object
type: array
type: object
status:
description: DNSEndpointStatus defines the observed state of DNSEndpoint
properties:
observedGeneration:
description: The generation observed by the external-dns controller.
format: int64
type: integer
type: object
type: object
served: true
storage: true
subresources:
status: {}
---
# Source: external-dns/charts/external-dns-unifi/templates/serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: external-dns-unifi
namespace: external-dns
labels:
helm.sh/chart: external-dns-unifi-1.19.0
app.kubernetes.io/name: external-dns-unifi
app.kubernetes.io/instance: external-dns
app.kubernetes.io/version: "0.19.0"
app.kubernetes.io/managed-by: Helm
automountServiceAccountToken: true
---
# Source: external-dns/charts/external-dns-unifi/templates/clusterrole.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: external-dns-unifi
labels:
helm.sh/chart: external-dns-unifi-1.19.0
app.kubernetes.io/name: external-dns-unifi
app.kubernetes.io/instance: external-dns
app.kubernetes.io/version: "0.19.0"
app.kubernetes.io/managed-by: Helm
rules:
- apiGroups: ["extensions","networking.k8s.io"]
resources: ["ingresses"]
verbs: ["get","watch","list"]
- apiGroups: ["externaldns.k8s.io"]
resources: ["dnsendpoints"]
verbs: ["get","watch","list"]
- apiGroups: ["externaldns.k8s.io"]
resources: ["dnsendpoints/status"]
verbs: ["*"]
- apiGroups: ["gateway.networking.k8s.io"]
resources: ["gateways"]
verbs: ["get","watch","list"]
- apiGroups: [""]
resources: ["namespaces"]
verbs: ["get","watch","list"]
- apiGroups: ["gateway.networking.k8s.io"]
resources: ["httproutes"]
verbs: ["get","watch","list"]
- apiGroups: ["gateway.networking.k8s.io"]
resources: ["tlsroutes"]
verbs: ["get","watch","list"]
---
# Source: external-dns/charts/external-dns-unifi/templates/clusterrolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: external-dns-unifi-viewer
labels:
helm.sh/chart: external-dns-unifi-1.19.0
app.kubernetes.io/name: external-dns-unifi
app.kubernetes.io/instance: external-dns
app.kubernetes.io/version: "0.19.0"
app.kubernetes.io/managed-by: Helm
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: external-dns-unifi
subjects:
- kind: ServiceAccount
name: external-dns-unifi
namespace: external-dns
---
# Source: external-dns/charts/external-dns-unifi/templates/service.yaml
apiVersion: v1
kind: Service
metadata:
name: external-dns-unifi
namespace: external-dns
labels:
helm.sh/chart: external-dns-unifi-1.19.0
app.kubernetes.io/name: external-dns-unifi
app.kubernetes.io/instance: external-dns
app.kubernetes.io/version: "0.19.0"
app.kubernetes.io/managed-by: Helm
spec:
type: ClusterIP
selector:
app.kubernetes.io/name: external-dns-unifi
app.kubernetes.io/instance: external-dns
ports:
- name: http
port: 7979
targetPort: http
protocol: TCP
- name: http-webhook
port: 8080
targetPort: http-webhook
protocol: TCP
---
# Source: external-dns/charts/external-dns-unifi/templates/deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: external-dns-unifi
namespace: external-dns
labels:
helm.sh/chart: external-dns-unifi-1.19.0
app.kubernetes.io/name: external-dns-unifi
app.kubernetes.io/instance: external-dns
app.kubernetes.io/version: "0.19.0"
app.kubernetes.io/managed-by: Helm
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: external-dns-unifi
app.kubernetes.io/instance: external-dns
strategy:
type: Recreate
template:
metadata:
labels:
app.kubernetes.io/name: external-dns-unifi
app.kubernetes.io/instance: external-dns
spec:
automountServiceAccountToken: true
serviceAccountName: external-dns-unifi
securityContext:
fsGroup: 65534
runAsNonRoot: true
seccompProfile:
type: RuntimeDefault
containers:
- name: external-dns
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
privileged: false
readOnlyRootFilesystem: true
runAsGroup: 65532
runAsNonRoot: true
runAsUser: 65532
image: registry.k8s.io/external-dns/external-dns:v0.19.0
imagePullPolicy: IfNotPresent
args:
- --log-level=info
- --log-format=text
- --interval=1m
- --source=ingress
- --source=crd
- --source=gateway-httproute
- --source=gateway-tlsroute
- --policy=sync
- --registry=txt
- --txt-owner-id=default
- --txt-prefix=k8s.
- --domain-filter=alexlebens.net
- --provider=webhook
- --ignore-ingress-tls-spec
ports:
- name: http
protocol: TCP
containerPort: 7979
livenessProbe:
failureThreshold: 2
httpGet:
path: /healthz
port: http
initialDelaySeconds: 10
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 5
readinessProbe:
failureThreshold: 6
httpGet:
path: /healthz
port: http
initialDelaySeconds: 5
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 5
- name: webhook
image: ghcr.io/kashalls/external-dns-unifi-webhook:v0.7.0
imagePullPolicy: IfNotPresent
env:
- name: UNIFI_HOST
value: https://192.168.1.1
- name: UNIFI_API_KEY
valueFrom:
secretKeyRef:
key: api-key
name: external-dns-unifi-secret
- name: LOG_LEVEL
value: debug
ports:
- name: http-webhook
protocol: TCP
containerPort: 8080
livenessProbe:
failureThreshold: 2
httpGet:
path: /healthz
port: http-webhook
initialDelaySeconds: 10
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 5
readinessProbe:
failureThreshold: 6
httpGet:
path: /readyz
port: http-webhook
initialDelaySeconds: 10
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 5
---
# Source: external-dns/templates/dns-endpoint.yaml
apiVersion: externaldns.k8s.io/v1alpha1
kind: DNSEndpoint
metadata:
name: external-device-names
namespace: external-dns
labels:
app.kubernetes.io/name: external-device-names
app.kubernetes.io/instance: external-dns
app.kubernetes.io/part-of: external-dns
spec:
endpoints:
# Unifi UDM
- dnsName: unifi.alexlebens.net
recordTTL: 180
recordType: A
targets:
- 192.168.1.1
# Synology Web
- dnsName: synology.alexlebens.net
recordTTL: 180
recordType: A
targets:
- 10.232.1.61
# Synology Storage
- dnsName: synologybond.alexlebens.net
recordTTL: 180
recordType: A
targets:
- 10.232.1.64
# HD Homerun
- dnsName: hdhr.alexlebens.net
recordTTL: 180
recordType: A
targets:
- 10.232.1.72
# Pi KVM
- dnsName: pikvm.alexlebens.net
recordTTL: 180
recordType: A
targets:
- 10.232.1.71
---
# Source: external-dns/templates/dns-endpoint.yaml
apiVersion: externaldns.k8s.io/v1alpha1
kind: DNSEndpoint
metadata:
name: iot-device-names
namespace: external-dns
labels:
app.kubernetes.io/name: iot-device-names
app.kubernetes.io/instance: external-dns
app.kubernetes.io/part-of: external-dns
spec:
endpoints:
# Airgradient
- dnsName: it01ag.alexlebens.net
recordTTL: 180
recordType: A
targets:
- 10.232.1.83
# Phillips Hue
- dnsName: it02ph.alexlebens.net
recordTTL: 180
recordType: A
targets:
- 10.232.1.85
# TubesZB ZigBee
- dnsName: it03tb.alexlebens.net
recordTTL: 180
recordType: A
targets:
- 10.232.1.81
# TubesZB Z-Wave
- dnsName: it04tb.alexlebens.net
recordTTL: 180
recordType: A
targets:
- 10.232.1.82
---
# Source: external-dns/templates/dns-endpoint.yaml
apiVersion: externaldns.k8s.io/v1alpha1
kind: DNSEndpoint
metadata:
name: server-host-names
namespace: external-dns
labels:
app.kubernetes.io/name: server-host-names
app.kubernetes.io/instance: external-dns
app.kubernetes.io/part-of: external-dns
spec:
endpoints:
# Unifi Gateway
- dnsName: nw01un.alexlebens.net
recordTTL: 180
recordType: A
targets:
- 192.168.1.1
# Synology
- dnsName: ps02sn.alexlebens.net
recordTTL: 180
recordType: A
targets:
- 10.232.1.61
# Synology Storage
- dnsName: ps02sn-bond.alexlebens.net
recordTTL: 180
recordType: A
targets:
- 10.232.1.64
# Raspberry Pi
- dnsName: ps08rp.alexlebens.net
recordTTL: 180
recordType: A
targets:
- 10.232.1.51
# Raspberry Pi
- dnsName: ps09rp.alexlebens.net
recordTTL: 180
recordType: A
targets:
- 10.232.1.52
---
# Source: external-dns/templates/dns-endpoint.yaml
apiVersion: externaldns.k8s.io/v1alpha1
kind: DNSEndpoint
metadata:
name: cluster-service-names
namespace: external-dns
labels:
app.kubernetes.io/name: cluster-service-names
app.kubernetes.io/instance: external-dns
app.kubernetes.io/part-of: external-dns
spec:
endpoints:
# Treafik Proxy
- dnsName: traefik-cl01tl.alexlebens.net
recordTTL: 180
recordType: A
targets:
- 10.232.1.21
# Treafik Proxy
- dnsName: blocky.alexlebens.net
recordTTL: 180
recordType: A
targets:
- 10.232.1.22
# Treafik Proxy
- dnsName: plex.alexlebens.net
recordTTL: 180
recordType: A
targets:
- 10.232.1.23
---
# Source: external-dns/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: external-dns-unifi-secret
namespace: external-dns
labels:
app.kubernetes.io/name: external-dns-unifi-secret
app.kubernetes.io/instance: external-dns
app.kubernetes.io/part-of: external-dns
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: api-key
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /unifi/auth/cl01tl
metadataPolicy: None
property: api-key
---
# Source: external-dns/charts/external-dns-unifi/templates/servicemonitor.yaml
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: external-dns-unifi
namespace: external-dns
labels:
helm.sh/chart: external-dns-unifi-1.19.0
app.kubernetes.io/name: external-dns-unifi
app.kubernetes.io/instance: external-dns
app.kubernetes.io/version: "0.19.0"
app.kubernetes.io/managed-by: Helm
spec:
jobLabel: app.kubernetes.io/instance
namespaceSelector:
matchNames:
- external-dns
selector:
matchLabels:
app.kubernetes.io/name: external-dns-unifi
app.kubernetes.io/instance: external-dns
endpoints:
- port: http
path: /metrics
- port: http-webhook
path: /metrics

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,430 @@
---
# Source: garage/charts/garage/templates/common.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: garage
labels:
app.kubernetes.io/instance: garage
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: garage
helm.sh/chart: garage-4.4.0
namespace: garage
data:
garage.toml: |
replication_factor = 1
metadata_dir = "/var/lib/garage/meta"
data_dir = "/var/lib/garage/data"
metadata_snapshots_dir = "/var/lib/garage/snapshots"
db_engine = "lmdb"
metadata_auto_snapshot_interval = "6h"
compression_level = 3
rpc_bind_addr = "[::]:3901"
rpc_public_addr = "127.0.0.1:3901"
allow_world_readable_secrets = false
[s3_api]
s3_region = "us-east-1"
api_bind_addr = "[::]:3900"
root_domain = ".garage-s3.alexlebens.net"
[s3_web]
bind_addr = "[::]:3902"
root_domain = ".garage-s3.alexlebens.net"
[admin]
api_bind_addr = "[::]:3903"
metrics_require_token = true
---
# Source: garage/charts/garage/templates/common.yaml
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: garage-data
labels:
app.kubernetes.io/instance: garage
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: garage
helm.sh/chart: garage-4.4.0
annotations:
helm.sh/resource-policy: keep
namespace: garage
spec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: "800Gi"
storageClassName: "synology-iscsi-delete"
---
# Source: garage/charts/garage/templates/common.yaml
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: garage-db
labels:
app.kubernetes.io/instance: garage
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: garage
helm.sh/chart: garage-4.4.0
annotations:
helm.sh/resource-policy: keep
namespace: garage
spec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: "10Gi"
storageClassName: "ceph-block"
---
# Source: garage/charts/garage/templates/common.yaml
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: garage-snapshots
labels:
app.kubernetes.io/instance: garage
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: garage
helm.sh/chart: garage-4.4.0
annotations:
helm.sh/resource-policy: keep
namespace: garage
spec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: "50Gi"
storageClassName: "synology-iscsi-delete"
---
# Source: garage/charts/garage/templates/common.yaml
apiVersion: v1
kind: Service
metadata:
name: garage-main
labels:
app.kubernetes.io/instance: garage
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: garage
app.kubernetes.io/service: garage-main
helm.sh/chart: garage-4.4.0
namespace: garage
spec:
type: ClusterIP
ports:
- port: 3903
targetPort: 3903
protocol: TCP
name: admin
- port: 3901
targetPort: 3901
protocol: TCP
name: rpc
- port: 3900
targetPort: 3900
protocol: TCP
name: s3
- port: 3902
targetPort: 3902
protocol: TCP
name: web
selector:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: garage
app.kubernetes.io/name: garage
---
# Source: garage/charts/garage/templates/common.yaml
apiVersion: v1
kind: Service
metadata:
name: garage-webui
labels:
app.kubernetes.io/instance: garage
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: garage
app.kubernetes.io/service: garage-webui
helm.sh/chart: garage-4.4.0
namespace: garage
spec:
type: ClusterIP
ports:
- port: 3909
targetPort: 3909
protocol: TCP
name: webui
selector:
app.kubernetes.io/controller: webui
app.kubernetes.io/instance: garage
app.kubernetes.io/name: garage
---
# Source: garage/charts/garage/templates/common.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: garage-main
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: garage
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: garage
helm.sh/chart: garage-4.4.0
namespace: garage
spec:
revisionHistoryLimit: 3
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/controller: main
app.kubernetes.io/name: garage
app.kubernetes.io/instance: garage
template:
metadata:
annotations:
checksum/configMaps: aecb65cb46684688a356974d7ecaec4abb2d4fed3f71863780e7f35505c7af02
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: garage
app.kubernetes.io/name: garage
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
containers:
- envFrom:
- secretRef:
name: garage-token-secret
image: dxflrs/garage:v2.1.0
imagePullPolicy: IfNotPresent
name: main
resources:
requests:
cpu: 10m
memory: 128Mi
volumeMounts:
- mountPath: /etc/garage.toml
mountPropagation: None
name: config
readOnly: true
subPath: garage.toml
- mountPath: /var/lib/garage/data
name: data
- mountPath: /var/lib/garage/meta
name: db
- mountPath: /var/lib/garage/snapshots
name: snapshots
volumes:
- configMap:
name: garage
name: config
- name: data
persistentVolumeClaim:
claimName: garage-data
- name: db
persistentVolumeClaim:
claimName: garage-db
- name: snapshots
persistentVolumeClaim:
claimName: garage-snapshots
---
# Source: garage/charts/garage/templates/common.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: garage-webui
labels:
app.kubernetes.io/controller: webui
app.kubernetes.io/instance: garage
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: garage
helm.sh/chart: garage-4.4.0
namespace: garage
spec:
revisionHistoryLimit: 3
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/controller: webui
app.kubernetes.io/name: garage
app.kubernetes.io/instance: garage
template:
metadata:
annotations:
checksum/configMaps: aecb65cb46684688a356974d7ecaec4abb2d4fed3f71863780e7f35505c7af02
labels:
app.kubernetes.io/controller: webui
app.kubernetes.io/instance: garage
app.kubernetes.io/name: garage
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
containers:
- env:
- name: API_BASE_URL
value: http://garage-main.garage:3903
- name: S3_ENDPOINT_URL
value: http://garage-main.garage:3900
- name: API_ADMIN_KEY
valueFrom:
secretKeyRef:
key: GARAGE_ADMIN_TOKEN
name: garage-token-secret
image: khairul169/garage-webui:1.1.0
imagePullPolicy: IfNotPresent
name: main
resources:
requests:
cpu: 10m
memory: 128Mi
volumeMounts:
- mountPath: /etc/garage.toml
mountPropagation: None
name: config
readOnly: true
subPath: garage.toml
volumes:
- configMap:
name: garage
name: config
---
# Source: garage/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: garage-token-secret
namespace: garage
labels:
app.kubernetes.io/name: garage-token-secret
app.kubernetes.io/instance: garage
app.kubernetes.io/part-of: garage
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: GARAGE_RPC_SECRET
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/garage/token
metadataPolicy: None
property: rpc
- secretKey: GARAGE_ADMIN_TOKEN
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/garage/token
metadataPolicy: None
property: admin
- secretKey: GARAGE_METRICS_TOKEN
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/garage/token
metadataPolicy: None
property: metric
---
# Source: garage/templates/http-route.yaml
apiVersion: gateway.networking.k8s.io/v1
kind: HTTPRoute
metadata:
name: http-route-garage-webui
namespace: garage
labels:
app.kubernetes.io/name: http-route-garage-webui
app.kubernetes.io/instance: garage
app.kubernetes.io/part-of: garage
spec:
parentRefs:
- group: gateway.networking.k8s.io
kind: Gateway
name: traefik-gateway
namespace: traefik
hostnames:
- garage-webui.alexlebens.net
rules:
- matches:
- path:
type: PathPrefix
value: /
backendRefs:
- group: ''
kind: Service
name: garage-webui
port: 3909
weight: 100
---
# Source: garage/templates/http-route.yaml
apiVersion: gateway.networking.k8s.io/v1
kind: HTTPRoute
metadata:
name: http-route-garage-s3
namespace: garage
labels:
app.kubernetes.io/name: http-route-garage-s3
app.kubernetes.io/instance: garage
app.kubernetes.io/part-of: garage
spec:
parentRefs:
- group: gateway.networking.k8s.io
kind: Gateway
name: traefik-gateway
namespace: traefik
hostnames:
- garage-s3.alexlebens.net
rules:
- matches:
- path:
type: PathPrefix
value: /
backendRefs:
- group: ''
kind: Service
name: garage-main
port: 3900
weight: 100
---
# Source: garage/templates/service-monitor.yaml
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: garage
namespace: garage
labels:
app.kubernetes.io/name: garage
app.kubernetes.io/instance: garage
app.kubernetes.io/part-of: garage
spec:
selector:
matchLabels:
app.kubernetes.io/name: garage
app.kubernetes.io/instance: garage
endpoints:
- port: admin
interval: 1m
scrapeTimeout: 30s
path: /metrics
bearerTokenSecret:
name: garage-token-secret
key: GARAGE_METRICS_TOKEN

View File

@@ -0,0 +1,163 @@
---
# Source: generic-device-plugin/templates/namespace.yaml
apiVersion: v1
kind: Namespace
metadata:
name: generic-device-plugin
labels:
app.kubernetes.io/name: generic-device-plugin
app.kubernetes.io/instance: generic-device-plugin
app.kubernetes.io/part-of: generic-device-plugin
pod-security.kubernetes.io/audit: privileged
pod-security.kubernetes.io/enforce: privileged
pod-security.kubernetes.io/warn: privileged
---
# Source: generic-device-plugin/charts/generic-device-plugin/templates/common.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: generic-device-plugin
labels:
app.kubernetes.io/instance: generic-device-plugin
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: generic-device-plugin
app.kubernetes.io/version: 0.15.0
helm.sh/chart: generic-device-plugin-0.20.1
namespace: generic-device-plugin
data:
config.yaml: |
devices:
- name: tun
groups:
- count: 1000
paths:
- path: /dev/net/tun
---
# Source: generic-device-plugin/charts/generic-device-plugin/templates/common.yaml
apiVersion: v1
kind: Service
metadata:
name: generic-device-plugin
labels:
app.kubernetes.io/instance: generic-device-plugin
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: generic-device-plugin
app.kubernetes.io/service: generic-device-plugin
app.kubernetes.io/version: 0.15.0
helm.sh/chart: generic-device-plugin-0.20.1
namespace: generic-device-plugin
spec:
type: ClusterIP
ports:
- port: 8080
targetPort: 8080
protocol: TCP
name: http
selector:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: generic-device-plugin
app.kubernetes.io/name: generic-device-plugin
---
# Source: generic-device-plugin/charts/generic-device-plugin/templates/common.yaml
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: generic-device-plugin
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: generic-device-plugin
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: generic-device-plugin
app.kubernetes.io/version: 0.15.0
helm.sh/chart: generic-device-plugin-0.20.1
namespace: generic-device-plugin
spec:
revisionHistoryLimit: 3
selector:
matchLabels:
app.kubernetes.io/controller: main
app.kubernetes.io/name: generic-device-plugin
app.kubernetes.io/instance: generic-device-plugin
template:
metadata:
annotations:
checksum/configMaps: 473a15a17751b0c136528e129767f6ed0871ca7522e5a6ccd90d041808571e81
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: generic-device-plugin
app.kubernetes.io/name: generic-device-plugin
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
priorityClassName: system-node-critical
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
tolerations:
- effect: NoExecute
operator: Exists
- effect: NoSchedule
operator: Exists
containers:
- args:
- --config=/config/config.yaml
env:
- name: LISTEN
value: :8080
- name: PLUGIN_DIRECTORY
value: /var/lib/kubelet/device-plugins
- name: DOMAIN
value: devic.es
image: ghcr.io/squat/generic-device-plugin:latest@sha256:ef5deb09dcf7e577c8603857ae56ef479d91be6ab6f40dd2427166d510b0745f
imagePullPolicy: Always
name: main
securityContext:
privileged: true
volumeMounts:
- mountPath: /config
name: config
- mountPath: /dev
name: dev
- mountPath: /var/lib/kubelet/device-plugins
name: device-plugins
volumes:
- configMap:
name: generic-device-plugin-config
name: config
- hostPath:
path: /dev
name: dev
- hostPath:
path: /var/lib/kubelet/device-plugins
name: device-plugins
---
# Source: generic-device-plugin/charts/generic-device-plugin/templates/common.yaml
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: generic-device-plugin
labels:
app.kubernetes.io/instance: generic-device-plugin
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: generic-device-plugin
app.kubernetes.io/version: 0.15.0
helm.sh/chart: generic-device-plugin-0.20.1
namespace: generic-device-plugin
spec:
jobLabel: "generic-device-plugin"
namespaceSelector:
matchNames:
- generic-device-plugin
selector:
matchLabels:
app.kubernetes.io/service: generic-device-plugin
app.kubernetes.io/name: generic-device-plugin
app.kubernetes.io/instance: generic-device-plugin
endpoints:
- interval: 30s
path: /metrics
port: http
scheme: http
scrapeTimeout: 10s

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -68,30 +68,6 @@ spec:
# Source: immich/charts/immich/templates/common.yaml
apiVersion: v1
kind: Service
metadata:
name: immich-machine-learning
labels:
app.kubernetes.io/instance: immich
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: immich
app.kubernetes.io/service: immich-machine-learning
helm.sh/chart: immich-4.4.0
namespace: immich
spec:
type: ClusterIP
ports:
- port: 3003
targetPort: 3003
protocol: TCP
name: http
selector:
app.kubernetes.io/controller: machine-learning
app.kubernetes.io/instance: immich
app.kubernetes.io/name: immich
---
# Source: immich/charts/immich/templates/common.yaml
apiVersion: v1
kind: Service
metadata:
name: immich-main
labels:
@@ -122,6 +98,30 @@ spec:
app.kubernetes.io/name: immich
---
# Source: immich/charts/immich/templates/common.yaml
apiVersion: v1
kind: Service
metadata:
name: immich-machine-learning
labels:
app.kubernetes.io/instance: immich
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: immich
app.kubernetes.io/service: immich-machine-learning
helm.sh/chart: immich-4.4.0
namespace: immich
spec:
type: ClusterIP
ports:
- port: 3003
targetPort: 3003
protocol: TCP
name: http
selector:
app.kubernetes.io/controller: machine-learning
app.kubernetes.io/instance: immich
app.kubernetes.io/name: immich
---
# Source: immich/charts/immich/templates/common.yaml
apiVersion: apps/v1
kind: Deployment
metadata:

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,96 @@
---
# Source: kubernetes-cloudflare-ddns/charts/kubernetes-cloudflare-ddns/templates/common.yaml
---
apiVersion: batch/v1
kind: CronJob
metadata:
name: kubernetes-cloudflare-ddns
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: kubernetes-cloudflare-ddns
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kubernetes-cloudflare-ddns
helm.sh/chart: kubernetes-cloudflare-ddns-4.4.0
namespace: kubernetes-cloudflare-ddns
spec:
suspend: false
concurrencyPolicy: Forbid
startingDeadlineSeconds: 90
timeZone: US/Central
schedule: "30 4 * * *"
successfulJobsHistoryLimit: 3
failedJobsHistoryLimit: 3
jobTemplate:
spec:
parallelism: 1
backoffLimit: 3
template:
metadata:
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: kubernetes-cloudflare-ddns
app.kubernetes.io/name: kubernetes-cloudflare-ddns
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
restartPolicy: Never
containers:
- envFrom:
- secretRef:
name: kubernetes-cloudflare-ddns-secret
image: kubitodev/kubernetes-cloudflare-ddns:2.0.0
imagePullPolicy: IfNotPresent
name: main
resources:
requests:
cpu: 10m
memory: 128Mi
---
# Source: kubernetes-cloudflare-ddns/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: kubernetes-cloudflare-ddns-secret
namespace: kubernetes-cloudflare-ddns
labels:
app.kubernetes.io/name: kubernetes-cloudflare-ddns-secret
app.kubernetes.io/instance: kubernetes-cloudflare-ddns
app.kubernetes.io/part-of: kubernetes-cloudflare-ddns
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: AUTH_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cloudflare/alexlebens.net/ddns
metadataPolicy: None
property: token
- secretKey: NAME
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cloudflare/alexlebens.net/ddns
metadataPolicy: None
property: name
- secretKey: RECORD_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cloudflare/alexlebens.net/ddns
metadataPolicy: None
property: record-id
- secretKey: ZONE_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cloudflare/alexlebens.net/ddns
metadataPolicy: None
property: zone-id

View File

@@ -0,0 +1,270 @@
---
# Source: local-path-provisioner/templates/namespace.yaml
apiVersion: v1
kind: Namespace
metadata:
name: local-path-provisioner
labels:
app.kubernetes.io/name: local-path-provisioner
app.kubernetes.io/instance: local-path-provisioner
app.kubernetes.io/part-of: local-path-provisioner
pod-security.kubernetes.io/audit: privileged
pod-security.kubernetes.io/enforce: privileged
pod-security.kubernetes.io/warn: privileged
---
# Source: local-path-provisioner/charts/local-path-provisioner/templates/serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: local-path-provisioner
namespace: local-path-provisioner
labels:
app.kubernetes.io/name: local-path-provisioner
helm.sh/chart: local-path-provisioner-0.0.33
app.kubernetes.io/instance: local-path-provisioner
app.kubernetes.io/version: "v0.0.32"
app.kubernetes.io/managed-by: Helm
imagePullSecrets:
---
# Source: local-path-provisioner/charts/local-path-provisioner/templates/configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: local-path-config
namespace: local-path-provisioner
labels:
app.kubernetes.io/name: local-path-provisioner
helm.sh/chart: local-path-provisioner-0.0.33
app.kubernetes.io/instance: local-path-provisioner
app.kubernetes.io/version: "v0.0.32"
app.kubernetes.io/managed-by: Helm
data:
config.json: |-
{
"nodePathMap": [
{
"node": "talos-2di-ktg",
"paths": [
"/var/local-path-provisioner"
]
},
{
"node": "talos-9vs-6hh",
"paths": [
"/var/local-path-provisioner"
]
},
{
"node": "talos-aoq-hpv",
"paths": [
"/var/local-path-provisioner"
]
}
]
}
setup: |-
#!/bin/sh
set -eu
mkdir -m 0777 -p "$VOL_DIR"
teardown: |-
#!/bin/sh
set -eu
rm -rf "$VOL_DIR"
helperPod.yaml: |-
apiVersion: v1
kind: Pod
metadata:
name: helper-pod
namespace: local-path-provisioner
spec:
priorityClassName: system-node-critical
tolerations:
- key: node.kubernetes.io/disk-pressure
operator: Exists
effect: NoSchedule
containers:
- name: helper-pod
image: busybox:1.37.0
imagePullPolicy: IfNotPresent
resources:
{}
---
# Source: local-path-provisioner/charts/local-path-provisioner/templates/storageclass.yaml
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: local-path
labels:
app.kubernetes.io/name: local-path-provisioner
helm.sh/chart: local-path-provisioner-0.0.33
app.kubernetes.io/instance: local-path-provisioner
app.kubernetes.io/version: "v0.0.32"
app.kubernetes.io/managed-by: Helm
annotations:
storageclass.kubernetes.io/is-default-class: "false"
defaultVolumeType: "hostPath"
provisioner: cluster.local/local-path-provisioner
volumeBindingMode: WaitForFirstConsumer
reclaimPolicy: Delete
allowVolumeExpansion: true
---
# Source: local-path-provisioner/charts/local-path-provisioner/templates/clusterrole.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: local-path-provisioner
labels:
app.kubernetes.io/name: local-path-provisioner
helm.sh/chart: local-path-provisioner-0.0.33
app.kubernetes.io/instance: local-path-provisioner
app.kubernetes.io/version: "v0.0.32"
app.kubernetes.io/managed-by: Helm
rules:
- apiGroups: [""]
resources: ["nodes", "persistentvolumeclaims", "configmaps", "pods", "pods/log"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "create", "patch", "update", "delete"]
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "patch"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
---
# Source: local-path-provisioner/charts/local-path-provisioner/templates/clusterrolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: local-path-provisioner
labels:
app.kubernetes.io/name: local-path-provisioner
helm.sh/chart: local-path-provisioner-0.0.33
app.kubernetes.io/instance: local-path-provisioner
app.kubernetes.io/version: "v0.0.32"
app.kubernetes.io/managed-by: Helm
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: local-path-provisioner
subjects:
- kind: ServiceAccount
name: local-path-provisioner
namespace: local-path-provisioner
---
# Source: local-path-provisioner/charts/local-path-provisioner/templates/role.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: local-path-provisioner
namespace: local-path-provisioner
labels:
app.kubernetes.io/name: local-path-provisioner
helm.sh/chart: local-path-provisioner-0.0.33
app.kubernetes.io/instance: local-path-provisioner
app.kubernetes.io/version: "v0.0.32"
app.kubernetes.io/managed-by: Helm
rules:
- apiGroups: [""]
resources: ["pods"]
verbs: ["get", "list", "watch", "create", "patch", "update", "delete"]
---
# Source: local-path-provisioner/charts/local-path-provisioner/templates/rolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: local-path-provisioner
namespace: local-path-provisioner
labels:
app.kubernetes.io/name: local-path-provisioner
helm.sh/chart: local-path-provisioner-0.0.33
app.kubernetes.io/instance: local-path-provisioner
app.kubernetes.io/version: "v0.0.32"
app.kubernetes.io/managed-by: Helm
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: local-path-provisioner
subjects:
- kind: ServiceAccount
name: local-path-provisioner
namespace: local-path-provisioner
---
# Source: local-path-provisioner/charts/local-path-provisioner/templates/deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: local-path-provisioner
namespace: local-path-provisioner
labels:
app.kubernetes.io/name: local-path-provisioner
helm.sh/chart: local-path-provisioner-0.0.33
app.kubernetes.io/instance: local-path-provisioner
app.kubernetes.io/version: "v0.0.32"
app.kubernetes.io/managed-by: Helm
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: local-path-provisioner
app.kubernetes.io/instance: local-path-provisioner
template:
metadata:
labels:
app.kubernetes.io/name: local-path-provisioner
helm.sh/chart: local-path-provisioner-0.0.33
app.kubernetes.io/instance: local-path-provisioner
app.kubernetes.io/version: "v0.0.32"
app.kubernetes.io/managed-by: Helm
spec:
serviceAccountName: local-path-provisioner
securityContext:
{}
containers:
- name: local-path-provisioner
securityContext:
{}
image: "rancher/local-path-provisioner:v0.0.32"
imagePullPolicy: IfNotPresent
command:
- local-path-provisioner
- --debug
- start
- --config
- /etc/config/config.json
- --service-account-name
- local-path-provisioner
- --provisioner-name
- cluster.local/local-path-provisioner
- --helper-image
- "busybox:1.37.0"
- --configmap-name
- local-path-config
volumeMounts:
- name: config-volume
mountPath: /etc/config/
env:
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: CONFIG_MOUNT_PATH
value: /etc/config/
resources:
{}
volumes:
- name: config-volume
configMap:
name: local-path-config
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/hostname
operator: In
values:
- talos-2di-ktg
- talos-9vs-6hh
- talos-aoq-hpv

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,211 @@
---
# Source: nfs-subdir-external-provisioner/charts/nfs-subdir-external-provisioner/templates/serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
chart: nfs-subdir-external-provisioner-4.0.18
heritage: Helm
app: nfs-subdir-external-provisioner
release: nfs
name: nfs-nfs-subdir-external-provisioner
---
# Source: nfs-subdir-external-provisioner/charts/nfs-subdir-external-provisioner/templates/storageclass.yaml
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
labels:
chart: nfs-subdir-external-provisioner-4.0.18
heritage: Helm
app: nfs-subdir-external-provisioner
release: nfs
name: nfs-client
annotations:
provisioner: cluster.local/nfs-nfs-subdir-external-provisioner
allowVolumeExpansion: true
reclaimPolicy: Delete
volumeBindingMode: Immediate
parameters:
archiveOnDelete: "true"
mountOptions:
- hard
- vers=4
- minorversion=1
---
# Source: nfs-subdir-external-provisioner/charts/nfs-subdir-external-provisioner/templates/persistentvolume.yaml
apiVersion: v1
kind: PersistentVolume
metadata:
name: pv-nfs-nfs-subdir-external-provisioner
labels:
chart: nfs-subdir-external-provisioner-4.0.18
heritage: Helm
app: nfs-subdir-external-provisioner
release: nfs
nfs-subdir-external-provisioner: nfs-nfs-subdir-external-provisioner
spec:
capacity:
storage: 10Mi
volumeMode: Filesystem
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
storageClassName: ""
mountOptions:
- hard
- vers=4
- minorversion=1
nfs:
server: 10.232.1.64
path: /volume2/Talos
---
# Source: nfs-subdir-external-provisioner/charts/nfs-subdir-external-provisioner/templates/persistentvolumeclaim.yaml
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: pvc-nfs-nfs-subdir-external-provisioner
labels:
chart: nfs-subdir-external-provisioner-4.0.18
heritage: Helm
app: nfs-subdir-external-provisioner
release: nfs
spec:
accessModes:
- ReadWriteOnce
volumeMode: Filesystem
storageClassName: ""
selector:
matchLabels:
nfs-subdir-external-provisioner: nfs-nfs-subdir-external-provisioner
resources:
requests:
storage: 10Mi
---
# Source: nfs-subdir-external-provisioner/charts/nfs-subdir-external-provisioner/templates/clusterrole.yaml
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
labels:
chart: nfs-subdir-external-provisioner-4.0.18
heritage: Helm
app: nfs-subdir-external-provisioner
release: nfs
name: nfs-nfs-subdir-external-provisioner-runner
rules:
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "create", "delete"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "update", "patch"]
---
# Source: nfs-subdir-external-provisioner/charts/nfs-subdir-external-provisioner/templates/clusterrolebinding.yaml
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
labels:
chart: nfs-subdir-external-provisioner-4.0.18
heritage: Helm
app: nfs-subdir-external-provisioner
release: nfs
name: run-nfs-nfs-subdir-external-provisioner
subjects:
- kind: ServiceAccount
name: nfs-nfs-subdir-external-provisioner
namespace: nfs
roleRef:
kind: ClusterRole
name: nfs-nfs-subdir-external-provisioner-runner
apiGroup: rbac.authorization.k8s.io
---
# Source: nfs-subdir-external-provisioner/charts/nfs-subdir-external-provisioner/templates/role.yaml
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
labels:
chart: nfs-subdir-external-provisioner-4.0.18
heritage: Helm
app: nfs-subdir-external-provisioner
release: nfs
name: leader-locking-nfs-nfs-subdir-external-provisioner
rules:
- apiGroups: [""]
resources: ["endpoints"]
verbs: ["get", "list", "watch", "create", "update", "patch"]
---
# Source: nfs-subdir-external-provisioner/charts/nfs-subdir-external-provisioner/templates/rolebinding.yaml
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
labels:
chart: nfs-subdir-external-provisioner-4.0.18
heritage: Helm
app: nfs-subdir-external-provisioner
release: nfs
name: leader-locking-nfs-nfs-subdir-external-provisioner
subjects:
- kind: ServiceAccount
name: nfs-nfs-subdir-external-provisioner
namespace: nfs
roleRef:
kind: Role
name: leader-locking-nfs-nfs-subdir-external-provisioner
apiGroup: rbac.authorization.k8s.io
---
# Source: nfs-subdir-external-provisioner/charts/nfs-subdir-external-provisioner/templates/deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: nfs-nfs-subdir-external-provisioner
labels:
chart: nfs-subdir-external-provisioner-4.0.18
heritage: Helm
app: nfs-subdir-external-provisioner
release: nfs
spec:
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app: nfs-subdir-external-provisioner
release: nfs
template:
metadata:
annotations:
labels:
app: nfs-subdir-external-provisioner
release: nfs
spec:
serviceAccountName: nfs-nfs-subdir-external-provisioner
securityContext:
{}
containers:
- name: nfs-subdir-external-provisioner
image: "registry.k8s.io/sig-storage/nfs-subdir-external-provisioner:v4.0.2"
imagePullPolicy: IfNotPresent
securityContext:
{}
volumeMounts:
- name: nfs-subdir-external-provisioner-root
mountPath: /persistentvolumes
env:
- name: PROVISIONER_NAME
value: cluster.local/nfs-nfs-subdir-external-provisioner
- name: NFS_SERVER
value: 10.232.1.64
- name: NFS_PATH
value: /volume2/Talos
volumes:
- name: nfs-subdir-external-provisioner-root
persistentVolumeClaim:
claimName: pvc-nfs-nfs-subdir-external-provisioner

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,195 @@
---
# Source: ntfy/charts/ntfy/templates/common.yaml
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: ntfy
labels:
app.kubernetes.io/instance: ntfy
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: ntfy
helm.sh/chart: ntfy-4.4.0
namespace: ntfy
spec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: "5Gi"
storageClassName: "ceph-block"
---
# Source: ntfy/charts/ntfy/templates/common.yaml
apiVersion: v1
kind: Service
metadata:
name: ntfy
labels:
app.kubernetes.io/instance: ntfy
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: ntfy
app.kubernetes.io/service: ntfy
helm.sh/chart: ntfy-4.4.0
namespace: ntfy
spec:
type: ClusterIP
ports:
- port: 80
targetPort: 80
protocol: TCP
name: http
- port: 9090
targetPort: 9090
protocol: TCP
name: metrics
selector:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: ntfy
app.kubernetes.io/name: ntfy
---
# Source: ntfy/charts/ntfy/templates/common.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: ntfy
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: ntfy
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: ntfy
helm.sh/chart: ntfy-4.4.0
namespace: ntfy
spec:
revisionHistoryLimit: 3
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/controller: main
app.kubernetes.io/name: ntfy
app.kubernetes.io/instance: ntfy
template:
metadata:
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: ntfy
app.kubernetes.io/name: ntfy
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
containers:
- args:
- serve
env:
- name: TZ
value: US/Central
- name: NTFY_BASE_URL
value: https://ntfy.alexlebens.net
- name: NTFY_LISTEN_HTTP
value: :80
- name: NTFY_CACHE_FILE
value: /var/cache/ntfy/cache.db
- name: NTFY_CACHE_DURATION
value: 36h
- name: NTFY_CACHE_STARTUP_QUERIES
value: |
pragma journal_mode = WAL;
pragma synchronous = normal;
pragma temp_store = memory;
pragma busy_timeout = 15000;
vacuum;
- name: NTFY_BEHIND_PROXY
value: "true"
- name: NTFY_ATTACHMENT_CACHE_DIR
value: /var/cache/ntfy/attachments
- name: NTFY_ATTACHMENT_TOTAL_SIZE_LIMIT
value: 4G
- name: NTFY_ATTACHMENT_FILE_SIZE_LIMIT
value: 15M
- name: NTFY_ATTACHMENT_EXPIRY_DURATION
value: 36h
- name: NTFY_ENABLE_SIGNUP
value: "false"
- name: NTFY_ENABLE_LOGIN
value: "true"
- name: NTFY_ENABLE_RESERVATIONS
value: "false"
- name: NTFY_AUTH_FILE
value: /var/cache/ntfy/user.db
- name: NTFY_AUTH_DEFAULT_ACCESS
value: deny-all
- name: NTFY_METRICS_LISTEN_HTTP
value: :9090
- name: NTFY_LOG_LEVEL
value: info
image: binwiederhier/ntfy:v2.15.0
imagePullPolicy: IfNotPresent
name: main
resources:
requests:
cpu: 10m
memory: 128Mi
volumeMounts:
- mountPath: /var/cache/ntfy
name: cache
volumes:
- name: cache
persistentVolumeClaim:
claimName: ntfy
---
# Source: ntfy/templates/http-route.yaml
apiVersion: gateway.networking.k8s.io/v1
kind: HTTPRoute
metadata:
name: http-route-ntfy
namespace: ntfy
labels:
app.kubernetes.io/name: http-route-ntfy
app.kubernetes.io/instance: ntfy
app.kubernetes.io/part-of: ntfy
spec:
parentRefs:
- group: gateway.networking.k8s.io
kind: Gateway
name: traefik-gateway
namespace: traefik
hostnames:
- ntfy.alexlebens.net
rules:
- matches:
- path:
type: PathPrefix
value: /
backendRefs:
- group: ''
kind: Service
name: ntfy
port: 80
weight: 100
---
# Source: ntfy/templates/service-monitor.yaml
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: ntfy
namespace: ntfy
labels:
app.kubernetes.io/name: ntfy
app.kubernetes.io/instance: ntfy
app.kubernetes.io/part-of: ntfy
spec:
selector:
matchLabels:
app.kubernetes.io/name: ntfy
app.kubernetes.io/instance: ntfy
endpoints:
- port: metrics
interval: 3m
scrapeTimeout: 1m
path: /metrics

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,309 @@
---
# Source: pgadmin4/charts/pgadmin4/templates/common.yaml
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: pgadmin4-data
labels:
app.kubernetes.io/instance: pgadmin
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: pgadmin
helm.sh/chart: pgadmin4-4.4.0
annotations:
helm.sh/resource-policy: keep
namespace: pgadmin
spec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: "5Gi"
storageClassName: "ceph-block"
---
# Source: pgadmin4/charts/pgadmin4/templates/common.yaml
apiVersion: v1
kind: Service
metadata:
name: pgadmin
labels:
app.kubernetes.io/instance: pgadmin
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: pgadmin
app.kubernetes.io/service: pgadmin
helm.sh/chart: pgadmin4-4.4.0
namespace: pgadmin
spec:
type: ClusterIP
ports:
- port: 80
targetPort: 80
protocol: TCP
name: http
selector:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: pgadmin
app.kubernetes.io/name: pgadmin
---
# Source: pgadmin4/charts/pgadmin4/templates/common.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: pgadmin
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: pgadmin
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: pgadmin
helm.sh/chart: pgadmin4-4.4.0
namespace: pgadmin
spec:
revisionHistoryLimit: 3
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/controller: main
app.kubernetes.io/name: pgadmin
app.kubernetes.io/instance: pgadmin
template:
metadata:
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: pgadmin
app.kubernetes.io/name: pgadmin
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
initContainers:
- command:
- /bin/sh
- -ec
- |
/bin/chown -R 5050:5050 /var/lib/pgadmin
image: busybox:1.37.0
imagePullPolicy: IfNotPresent
name: init-chmod-data
resources:
requests:
cpu: 10m
memory: 128Mi
securityContext:
runAsUser: 0
volumeMounts:
- mountPath: /var/lib/pgadmin
name: data
containers:
- env:
- name: PGADMIN_CONFIG_ENHANCED_COOKIE_PROTECTION
value: "False"
- name: PGADMIN_DEFAULT_EMAIL
value: alexanderlebens@gmail.com
- name: PGADMIN_DEFAULT_PASSWORD
valueFrom:
secretKeyRef:
key: pgadmin-password
name: pgadmin-password-secret
envFrom:
- secretRef:
name: pgadmin-env-secret
image: dpage/pgadmin4:9.10
imagePullPolicy: IfNotPresent
name: main
resources:
requests:
cpu: 10m
memory: 256Mi
securityContext:
runAsGroup: 5050
runAsUser: 5050
volumeMounts:
- mountPath: /var/lib/pgadmin
name: data
volumes:
- name: data
persistentVolumeClaim:
claimName: pgadmin4-data
---
# Source: pgadmin4/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: pgadmin-password-secret
namespace: pgadmin
labels:
app.kubernetes.io/name: pgadmin-password-secret
app.kubernetes.io/instance: pgadmin
app.kubernetes.io/part-of: pgadmin
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: pgadmin-password
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/pgadmin/auth
metadataPolicy: None
property: pgadmin-password
---
# Source: pgadmin4/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: pgadmin-env-secret
namespace: pgadmin
labels:
app.kubernetes.io/name: pgadmin-env-secret
app.kubernetes.io/instance: pgadmin
app.kubernetes.io/part-of: pgadmin
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: PGADMIN_CONFIG_AUTHENTICATION_SOURCES
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/pgadmin/env
metadataPolicy: None
property: PGADMIN_CONFIG_AUTHENTICATION_SOURCES
- secretKey: PGADMIN_CONFIG_OAUTH2_AUTO_CREATE_USER
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/pgadmin/env
metadataPolicy: None
property: PGADMIN_CONFIG_OAUTH2_AUTO_CREATE_USER
- secretKey: PGADMIN_CONFIG_OAUTH2_CONFIG
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/pgadmin/env
metadataPolicy: None
property: PGADMIN_CONFIG_OAUTH2_CONFIG
---
# Source: pgadmin4/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: pgadmin-data-backup-secret
namespace: pgadmin
labels:
app.kubernetes.io/name: pgadmin-data-backup-secret
app.kubernetes.io/instance: pgadmin
app.kubernetes.io/part-of: pgadmin
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
target:
template:
mergePolicy: Merge
engineVersion: v2
data:
RESTIC_REPOSITORY: "{{ .BUCKET_ENDPOINT }}/pgadmin/pgadmin-data"
data:
- secretKey: BUCKET_ENDPOINT
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: S3_BUCKET_ENDPOINT
- secretKey: RESTIC_PASSWORD
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: RESTIC_PASSWORD
- secretKey: AWS_DEFAULT_REGION
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: AWS_DEFAULT_REGION
- secretKey: AWS_ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: access_key
- secretKey: AWS_SECRET_ACCESS_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: secret_key
---
# Source: pgadmin4/templates/http-route.yaml
apiVersion: gateway.networking.k8s.io/v1
kind: HTTPRoute
metadata:
name: http-route-pgadmin
namespace: pgadmin
labels:
app.kubernetes.io/name: http-route-pgadmin
app.kubernetes.io/instance: pgadmin
app.kubernetes.io/part-of: pgadmin
spec:
parentRefs:
- group: gateway.networking.k8s.io
kind: Gateway
name: traefik-gateway
namespace: traefik
hostnames:
- pgadmin.alexlebens.net
rules:
- matches:
- path:
type: PathPrefix
value: /
backendRefs:
- group: ''
kind: Service
name: pgadmin
port: 80
weight: 100
---
# Source: pgadmin4/templates/replication-source.yaml
apiVersion: volsync.backube/v1alpha1
kind: ReplicationSource
metadata:
name: pgadmin-data-backup-source
namespace: pgadmin
labels:
app.kubernetes.io/name: pgadmin-data-backup-source
app.kubernetes.io/instance: pgadmin
app.kubernetes.io/part-of: pgadmin
spec:
sourcePVC: pgadmin-data
trigger:
schedule: 0 4 * * *
restic:
pruneIntervalDays: 7
repository: pgadmin-data-backup-secret
retain:
hourly: 1
daily: 3
weekly: 2
monthly: 2
yearly: 4
moverSecurityContext:
runAsUser: 5050
runAsGroup: 5050
copyMethod: Snapshot
storageClassName: ceph-block
volumeSnapshotClassName: ceph-blockpool-snapshot

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,303 @@
---
# Source: reloader/charts/reloader/templates/serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
annotations:
meta.helm.sh/release-namespace: "reloader"
meta.helm.sh/release-name: "reloader"
labels:
app: reloader-reloader
release: "reloader"
app.kubernetes.io/name: reloader
app.kubernetes.io/instance: "reloader"
helm.sh/chart: "reloader-2.2.5"
chart: "reloader-2.2.5"
heritage: "Helm"
app.kubernetes.io/managed-by: "Helm"
app.kubernetes.io/version: "v1.4.10"
name: reloader-reloader
namespace: reloader
---
# Source: reloader/charts/reloader/templates/clusterrole.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
annotations:
meta.helm.sh/release-namespace: "reloader"
meta.helm.sh/release-name: "reloader"
labels:
app: reloader-reloader
release: "reloader"
app.kubernetes.io/name: reloader
app.kubernetes.io/instance: "reloader"
helm.sh/chart: "reloader-2.2.5"
chart: "reloader-2.2.5"
heritage: "Helm"
app.kubernetes.io/managed-by: "Helm"
app.kubernetes.io/version: "v1.4.10"
name: reloader-reloader-role
rules:
- apiGroups:
- ""
resources:
- secrets
- configmaps
verbs:
- list
- get
- watch
- apiGroups:
- "apps"
resources:
- deployments
- daemonsets
- statefulsets
verbs:
- list
- get
- update
- patch
- apiGroups:
- "batch"
resources:
- cronjobs
verbs:
- list
- get
- apiGroups:
- "batch"
resources:
- jobs
verbs:
- create
- delete
- list
- get
- apiGroups:
- ""
resources:
- events
verbs:
- create
- patch
---
# Source: reloader/charts/reloader/templates/clusterrolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
annotations:
meta.helm.sh/release-namespace: "reloader"
meta.helm.sh/release-name: "reloader"
labels:
app: reloader-reloader
release: "reloader"
app.kubernetes.io/name: reloader
app.kubernetes.io/instance: "reloader"
helm.sh/chart: "reloader-2.2.5"
chart: "reloader-2.2.5"
heritage: "Helm"
app.kubernetes.io/managed-by: "Helm"
app.kubernetes.io/version: "v1.4.10"
name: reloader-reloader-role-binding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: reloader-reloader-role
subjects:
- kind: ServiceAccount
name: reloader-reloader
namespace: reloader
---
# Source: reloader/charts/reloader/templates/role.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
annotations:
meta.helm.sh/release-namespace: "reloader"
meta.helm.sh/release-name: "reloader"
labels:
app: reloader-reloader
release: "reloader"
app.kubernetes.io/name: reloader
app.kubernetes.io/instance: "reloader"
helm.sh/chart: "reloader-2.2.5"
chart: "reloader-2.2.5"
heritage: "Helm"
app.kubernetes.io/managed-by: "Helm"
app.kubernetes.io/version: "v1.4.10"
name: reloader-reloader-metadata-role
namespace: reloader
rules:
- apiGroups:
- ""
resources:
- configmaps
verbs:
- list
- get
- watch
- create
- update
---
# Source: reloader/charts/reloader/templates/rolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
annotations:
meta.helm.sh/release-namespace: "reloader"
meta.helm.sh/release-name: "reloader"
labels:
app: reloader-reloader
release: "reloader"
app.kubernetes.io/name: reloader
app.kubernetes.io/instance: "reloader"
helm.sh/chart: "reloader-2.2.5"
chart: "reloader-2.2.5"
heritage: "Helm"
app.kubernetes.io/managed-by: "Helm"
app.kubernetes.io/version: "v1.4.10"
name: reloader-reloader-metadata-role-binding
namespace: reloader
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: reloader-reloader-metadata-role
subjects:
- kind: ServiceAccount
name: reloader-reloader
namespace: reloader
---
# Source: reloader/charts/reloader/templates/deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
annotations:
meta.helm.sh/release-namespace: "reloader"
meta.helm.sh/release-name: "reloader"
labels:
app: reloader-reloader
release: "reloader"
app.kubernetes.io/name: reloader
app.kubernetes.io/instance: "reloader"
helm.sh/chart: "reloader-2.2.5"
chart: "reloader-2.2.5"
heritage: "Helm"
app.kubernetes.io/managed-by: "Helm"
app.kubernetes.io/version: "v1.4.10"
group: com.stakater.platform
provider: stakater
version: v1.4.10
name: reloader-reloader
namespace: reloader
spec:
replicas: 1
revisionHistoryLimit: 2
selector:
matchLabels:
app: reloader-reloader
release: "reloader"
template:
metadata:
labels:
app: reloader-reloader
release: "reloader"
app.kubernetes.io/name: reloader
app.kubernetes.io/instance: "reloader"
helm.sh/chart: "reloader-2.2.5"
chart: "reloader-2.2.5"
heritage: "Helm"
app.kubernetes.io/managed-by: "Helm"
app.kubernetes.io/version: "v1.4.10"
group: com.stakater.platform
provider: stakater
version: v1.4.10
spec:
containers:
- image: "ghcr.io/stakater/reloader:v1.4.10"
imagePullPolicy: IfNotPresent
name: reloader-reloader
env:
- name: GOMAXPROCS
valueFrom:
resourceFieldRef:
resource: limits.cpu
divisor: '1'
- name: GOMEMLIMIT
valueFrom:
resourceFieldRef:
resource: limits.memory
divisor: '1'
- name: RELOADER_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: RELOADER_DEPLOYMENT_NAME
value: reloader-reloader
ports:
- name: http
containerPort: 9090
livenessProbe:
httpGet:
path: /live
port: http
timeoutSeconds: 5
failureThreshold: 5
periodSeconds: 10
successThreshold: 1
initialDelaySeconds: 10
readinessProbe:
httpGet:
path: /metrics
port: http
timeoutSeconds: 5
failureThreshold: 5
periodSeconds: 10
successThreshold: 1
initialDelaySeconds: 10
securityContext:
{}
args:
- "--log-level=info"
securityContext:
runAsNonRoot: true
runAsUser: 65534
seccompProfile:
type: RuntimeDefault
serviceAccountName: reloader-reloader
---
# Source: reloader/charts/reloader/templates/podmonitor.yaml
apiVersion: monitoring.coreos.com/v1
kind: PodMonitor
metadata:
labels:
app: reloader-reloader
release: "reloader"
app.kubernetes.io/name: reloader
app.kubernetes.io/instance: "reloader"
helm.sh/chart: "reloader-2.2.5"
chart: "reloader-2.2.5"
heritage: "Helm"
app.kubernetes.io/managed-by: "Helm"
app.kubernetes.io/version: "v1.4.10"
name: reloader-reloader
namespace: reloader
spec:
podMetricsEndpoints:
- port: http
path: "/metrics"
honorLabels: true
jobLabel: reloader-reloader
namespaceSelector:
matchNames:
- reloader
selector:
matchLabels:
app: reloader-reloader
release: "reloader"

File diff suppressed because it is too large Load Diff

View File

@@ -2,6 +2,30 @@
# Source: s3-exporter/charts/s3-exporter/templates/common.yaml
apiVersion: v1
kind: Service
metadata:
name: s3-exporter-ceph-directus
labels:
app.kubernetes.io/instance: s3-exporter
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: s3-exporter
app.kubernetes.io/service: s3-exporter-ceph-directus
helm.sh/chart: s3-exporter-4.4.0
namespace: s3-exporter
spec:
type: ClusterIP
ports:
- port: 9655
targetPort: 9655
protocol: TCP
name: metrics
selector:
app.kubernetes.io/controller: ceph-directus
app.kubernetes.io/instance: s3-exporter
app.kubernetes.io/name: s3-exporter
---
# Source: s3-exporter/charts/s3-exporter/templates/common.yaml
apiVersion: v1
kind: Service
metadata:
name: s3-exporter-digital-ocean
labels:
@@ -72,30 +96,6 @@ spec:
app.kubernetes.io/name: s3-exporter
---
# Source: s3-exporter/charts/s3-exporter/templates/common.yaml
apiVersion: v1
kind: Service
metadata:
name: s3-exporter-ceph-directus
labels:
app.kubernetes.io/instance: s3-exporter
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: s3-exporter
app.kubernetes.io/service: s3-exporter-ceph-directus
helm.sh/chart: s3-exporter-4.4.0
namespace: s3-exporter
spec:
type: ClusterIP
ports:
- port: 9655
targetPort: 9655
protocol: TCP
name: metrics
selector:
app.kubernetes.io/controller: ceph-directus
app.kubernetes.io/instance: s3-exporter
app.kubernetes.io/name: s3-exporter
---
# Source: s3-exporter/charts/s3-exporter/templates/common.yaml
---
apiVersion: apps/v1
kind: Deployment

View File

@@ -41,30 +41,6 @@ spec:
# Source: searxng/charts/searxng/templates/common.yaml
apiVersion: v1
kind: Service
metadata:
name: searxng-api
labels:
app.kubernetes.io/instance: searxng
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: searxng
app.kubernetes.io/service: searxng-api
helm.sh/chart: searxng-4.4.0
namespace: searxng
spec:
type: ClusterIP
ports:
- port: 8080
targetPort: 8080
protocol: TCP
name: mail
selector:
app.kubernetes.io/controller: api
app.kubernetes.io/instance: searxng
app.kubernetes.io/name: searxng
---
# Source: searxng/charts/searxng/templates/common.yaml
apiVersion: v1
kind: Service
metadata:
name: searxng-browser
labels:
@@ -87,6 +63,30 @@ spec:
app.kubernetes.io/name: searxng
---
# Source: searxng/charts/searxng/templates/common.yaml
apiVersion: v1
kind: Service
metadata:
name: searxng-api
labels:
app.kubernetes.io/instance: searxng
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: searxng
app.kubernetes.io/service: searxng-api
helm.sh/chart: searxng-4.4.0
namespace: searxng
spec:
type: ClusterIP
ports:
- port: 8080
targetPort: 8080
protocol: TCP
name: mail
selector:
app.kubernetes.io/controller: api
app.kubernetes.io/instance: searxng
app.kubernetes.io/name: searxng
---
# Source: searxng/charts/searxng/templates/common.yaml
apiVersion: apps/v1
kind: Deployment
metadata:

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,937 @@
---
# Source: stalwart/templates/namespace.yaml
apiVersion: v1
kind: Namespace
metadata:
name: stalwart
labels:
app.kubernetes.io/name: stalwart
app.kubernetes.io/instance: stalwart
app.kubernetes.io/part-of: stalwart
pod-security.kubernetes.io/audit: privileged
pod-security.kubernetes.io/enforce: privileged
pod-security.kubernetes.io/warn: privileged
---
# Source: stalwart/charts/stalwart/templates/common.yaml
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: stalwart-config
labels:
app.kubernetes.io/instance: stalwart
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: stalwart
helm.sh/chart: stalwart-4.4.0
annotations:
helm.sh/resource-policy: keep
namespace: stalwart
spec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: "10Gi"
storageClassName: "ceph-block"
---
# Source: stalwart/charts/stalwart/templates/common.yaml
apiVersion: v1
kind: Service
metadata:
name: stalwart
labels:
app.kubernetes.io/instance: stalwart
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: stalwart
app.kubernetes.io/service: stalwart
helm.sh/chart: stalwart-4.4.0
namespace: stalwart
spec:
type: ClusterIP
ports:
- port: 80
targetPort: 8080
protocol: TCP
name: http
- port: 143
targetPort: 143
protocol: TCP
name: imap
- port: 993
targetPort: 993
protocol: TCP
name: imaps
- port: 25
targetPort: 25
protocol: TCP
name: smtp
- port: 465
targetPort: 465
protocol: TCP
name: smtps
selector:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: stalwart
app.kubernetes.io/name: stalwart
---
# Source: stalwart/charts/stalwart/templates/common.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: stalwart
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: stalwart
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: stalwart
helm.sh/chart: stalwart-4.4.0
namespace: stalwart
spec:
revisionHistoryLimit: 3
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/controller: main
app.kubernetes.io/name: stalwart
app.kubernetes.io/instance: stalwart
template:
metadata:
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: stalwart
app.kubernetes.io/name: stalwart
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
containers:
- image: stalwartlabs/stalwart:v0.14.1
imagePullPolicy: IfNotPresent
name: main
resources:
requests:
cpu: 10m
memory: 128Mi
volumeMounts:
- mountPath: /opt/stalwart
name: config
volumes:
- name: config
persistentVolumeClaim:
claimName: stalwart-config
---
# Source: stalwart/charts/postgres-17-cluster/templates/cluster.yaml
apiVersion: postgresql.cnpg.io/v1
kind: Cluster
metadata:
name: stalwart-postgresql-17-cluster
namespace: stalwart
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: stalwart-postgresql-17
app.kubernetes.io/instance: stalwart
app.kubernetes.io/part-of: stalwart
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
instances: 3
imageName: "ghcr.io/cloudnative-pg/postgresql:17.7-standard-trixie"
imagePullPolicy: IfNotPresent
postgresUID: 26
postgresGID: 26
plugins:
- name: barman-cloud.cloudnative-pg.io
enabled: true
isWALArchiver: false
parameters:
barmanObjectName: "stalwart-postgresql-17-external-backup"
serverName: "stalwart-postgresql-17-backup-1"
- name: barman-cloud.cloudnative-pg.io
enabled: true
isWALArchiver: true
parameters:
barmanObjectName: "stalwart-postgresql-17-garage-local-backup"
serverName: "stalwart-postgresql-17-backup-1"
externalClusters:
- name: recovery
plugin:
name: barman-cloud.cloudnative-pg.io
parameters:
barmanObjectName: "stalwart-postgresql-17-recovery"
serverName: stalwart-postgresql-17-backup-1
storage:
size: 10Gi
storageClass: local-path
walStorage:
size: 2Gi
storageClass: local-path
resources:
limits:
hugepages-2Mi: 256Mi
requests:
cpu: 200m
memory: 256Mi
affinity:
enablePodAntiAffinity: true
topologyKey: kubernetes.io/hostname
primaryUpdateMethod: switchover
primaryUpdateStrategy: unsupervised
logLevel: info
enableSuperuserAccess: false
enablePDB: true
postgresql:
parameters:
hot_standby_feedback: "on"
max_slot_wal_keep_size: 2000MB
shared_buffers: 128MB
monitoring:
enablePodMonitor: true
disableDefaultQueries: false
bootstrap:
recovery:
database: app
source: stalwart-postgresql-17-backup-1
externalClusters:
- name: stalwart-postgresql-17-backup-1
plugin:
name: barman-cloud.cloudnative-pg.io
enabled: true
isWALArchiver: false
parameters:
barmanObjectName: "stalwart-postgresql-17-recovery"
serverName: stalwart-postgresql-17-backup-1
---
# Source: stalwart/templates/elasticsearch.yaml
apiVersion: elasticsearch.k8s.elastic.co/v1
kind: Elasticsearch
metadata:
name: elasticsearch-stalwart
namespace: stalwart
labels:
app.kubernetes.io/name: elasticsearch-stalwart
app.kubernetes.io/instance: stalwart
app.kubernetes.io/part-of: stalwart
spec:
version: 8.18.0
auth:
fileRealm:
- secretName: stalwart-elasticsearch-secret
nodeSets:
- name: default
count: 1
config:
node.store.allow_mmap: false
volumeClaimTemplates:
- metadata:
name: elasticsearch-data
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 5Gi
storageClassName: ceph-block
---
# Source: stalwart/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: stalwart-elasticsearch-secret
namespace: stalwart
labels:
app.kubernetes.io/name: stalwart-elasticsearch-secret
app.kubernetes.io/instance: stalwart
app.kubernetes.io/part-of: stalwart
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: username
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/stalwart/elasticsearch
metadataPolicy: None
property: username
- secretKey: password
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/stalwart/elasticsearch
metadataPolicy: None
property: password
- secretKey: roles
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/stalwart/elasticsearch
metadataPolicy: None
property: roles
---
# Source: stalwart/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: stalwart-config-backup-secret
namespace: stalwart
labels:
app.kubernetes.io/name: stalwart-config-backup-secret
app.kubernetes.io/instance: stalwart
app.kubernetes.io/version: v0.11.8
app.kubernetes.io/component: backup
app.kubernetes.io/part-of: stalwart
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
target:
template:
mergePolicy: Merge
engineVersion: v2
data:
RESTIC_REPOSITORY: "{{ .BUCKET_ENDPOINT }}/stalwart/stalwart-config"
data:
- secretKey: BUCKET_ENDPOINT
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: S3_BUCKET_ENDPOINT
- secretKey: RESTIC_PASSWORD
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: RESTIC_PASSWORD
- secretKey: AWS_DEFAULT_REGION
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: AWS_DEFAULT_REGION
- secretKey: AWS_ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: access_key
- secretKey: AWS_SECRET_ACCESS_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: secret_key
---
# Source: stalwart/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: stalwart-postgresql-17-cluster-backup-secret
namespace: stalwart
labels:
app.kubernetes.io/name: stalwart-postgresql-17-cluster-backup-secret
app.kubernetes.io/instance: stalwart
app.kubernetes.io/version: v0.11.8
app.kubernetes.io/component: database
app.kubernetes.io/part-of: stalwart
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/postgres-backups
metadataPolicy: None
property: access
- secretKey: ACCESS_SECRET_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/postgres-backups
metadataPolicy: None
property: secret
---
# Source: stalwart/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: stalwart-postgresql-17-cluster-backup-secret-garage
namespace: stalwart
labels:
app.kubernetes.io/name: stalwart-postgresql-17-cluster-backup-secret-garage
app.kubernetes.io/instance: stalwart
app.kubernetes.io/part-of: stalwart
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/postgres-backups
metadataPolicy: None
property: ACCESS_KEY_ID
- secretKey: ACCESS_SECRET_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/postgres-backups
metadataPolicy: None
property: ACCESS_SECRET_KEY
- secretKey: ACCESS_REGION
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/postgres-backups
metadataPolicy: None
property: ACCESS_REGION
---
# Source: stalwart/templates/http-route.yaml
apiVersion: gateway.networking.k8s.io/v1
kind: HTTPRoute
metadata:
name: http-route-stalwart
namespace: stalwart
labels:
app.kubernetes.io/name: http-route-stalwart
app.kubernetes.io/instance: stalwart
app.kubernetes.io/part-of: stalwart
spec:
parentRefs:
- group: gateway.networking.k8s.io
kind: Gateway
name: traefik-gateway
namespace: traefik
hostnames:
- stalwart.alexlebens.net
rules:
- matches:
- path:
type: PathPrefix
value: /
backendRefs:
- group: ''
kind: Service
name: stalwart
port: 80
weight: 100
---
# Source: stalwart/charts/postgres-17-cluster/templates/object-store.yaml
apiVersion: barmancloud.cnpg.io/v1
kind: ObjectStore
metadata:
name: "stalwart-postgresql-17-external-backup"
namespace: stalwart
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: stalwart-postgresql-17
app.kubernetes.io/instance: stalwart
app.kubernetes.io/part-of: stalwart
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
retentionPolicy: 30d
configuration:
destinationPath: s3://postgres-backups-ce540ddf106d186bbddca68a/cl01tl/stalwart/stalwart-postgresql-17-cluster
endpointURL: https://nyc3.digitaloceanspaces.com
s3Credentials:
accessKeyId:
name: stalwart-postgresql-17-cluster-backup-secret
key: ACCESS_KEY_ID
secretAccessKey:
name: stalwart-postgresql-17-cluster-backup-secret
key: ACCESS_SECRET_KEY
---
# Source: stalwart/charts/postgres-17-cluster/templates/object-store.yaml
apiVersion: barmancloud.cnpg.io/v1
kind: ObjectStore
metadata:
name: "stalwart-postgresql-17-garage-local-backup"
namespace: stalwart
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: stalwart-postgresql-17
app.kubernetes.io/instance: stalwart
app.kubernetes.io/part-of: stalwart
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
retentionPolicy: 3d
configuration:
destinationPath: s3://postgres-backups/cl01tl/stalwart/stalwart-postgresql-17-cluster
endpointURL: http://garage-main.garage:3900
s3Credentials:
accessKeyId:
name: stalwart-postgresql-17-cluster-backup-secret-garage
key: ACCESS_KEY_ID
secretAccessKey:
name: stalwart-postgresql-17-cluster-backup-secret-garage
key: ACCESS_SECRET_KEY
region:
name: stalwart-postgresql-17-cluster-backup-secret-garage
key: ACCESS_REGION
---
# Source: stalwart/charts/postgres-17-cluster/templates/object-store.yaml
apiVersion: barmancloud.cnpg.io/v1
kind: ObjectStore
metadata:
name: "stalwart-postgresql-17-recovery"
namespace: stalwart
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: stalwart-postgresql-17
app.kubernetes.io/instance: stalwart
app.kubernetes.io/part-of: stalwart
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
configuration:
destinationPath: s3://postgres-backups/cl01tl/stalwart/stalwart-postgresql-17-cluster
endpointURL: http://garage-main.garage:3900
wal:
compression: snappy
maxParallel: 1
data:
compression: snappy
jobs: 1
s3Credentials:
accessKeyId:
name: stalwart-postgresql-17-cluster-backup-secret-garage
key: ACCESS_KEY_ID
secretAccessKey:
name: stalwart-postgresql-17-cluster-backup-secret-garage
key: ACCESS_SECRET_KEY
---
# Source: stalwart/charts/postgres-17-cluster/templates/prometheus-rule.yaml
apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:
name: stalwart-postgresql-17-alert-rules
namespace: stalwart
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: stalwart-postgresql-17
app.kubernetes.io/instance: stalwart
app.kubernetes.io/part-of: stalwart
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
groups:
- name: cloudnative-pg/stalwart-postgresql-17
rules:
- alert: CNPGClusterBackendsWaitingWarning
annotations:
summary: CNPG Cluster a backend is waiting for longer than 5 minutes.
description: |-
Pod {{ $labels.pod }}
has been waiting for longer than 5 minutes
expr: |
cnpg_backends_waiting_total > 300
for: 1m
labels:
severity: warning
namespace: stalwart
cnpg_cluster: stalwart-postgresql-17-cluster
- alert: CNPGClusterDatabaseDeadlockConflictsWarning
annotations:
summary: CNPG Cluster has over 10 deadlock conflicts.
description: |-
There are over 10 deadlock conflicts in
{{ $labels.pod }}
expr: |
cnpg_pg_stat_database_deadlocks > 10
for: 1m
labels:
severity: warning
namespace: stalwart
cnpg_cluster: stalwart-postgresql-17-cluster
- alert: CNPGClusterHACritical
annotations:
summary: CNPG Cluster has no standby replicas!
description: |-
CloudNativePG Cluster "{{`{{`}} $labels.job {{`}}`}}" has no ready standby replicas. Your cluster at a severe
risk of data loss and downtime if the primary instance fails.
The primary instance is still online and able to serve queries, although connections to the `-ro` endpoint
will fail. The `-r` endpoint os operating at reduced capacity and all traffic is being served by the main.
This can happen during a normal fail-over or automated minor version upgrades in a cluster with 2 or less
instances. The replaced instance may need some time to catch-up with the cluster primary instance.
This alarm will be always trigger if your cluster is configured to run with only 1 instance. In this
case you may want to silence it.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHACritical.md
expr: |
max by (job) (cnpg_pg_replication_streaming_replicas{namespace="stalwart"} - cnpg_pg_replication_is_wal_receiver_up{namespace="stalwart"}) < 1
for: 5m
labels:
severity: critical
namespace: stalwart
cnpg_cluster: stalwart-postgresql-17-cluster
- alert: CNPGClusterHAWarning
annotations:
summary: CNPG Cluster less than 2 standby replicas.
description: |-
CloudNativePG Cluster "{{`{{`}} $labels.job {{`}}`}}" has only {{`{{`}} $value {{`}}`}} standby replicas, putting
your cluster at risk if another instance fails. The cluster is still able to operate normally, although
the `-ro` and `-r` endpoints operate at reduced capacity.
This can happen during a normal fail-over or automated minor version upgrades. The replaced instance may
need some time to catch-up with the cluster primary instance.
This alarm will be constantly triggered if your cluster is configured to run with less than 3 instances.
In this case you may want to silence it.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHAWarning.md
expr: |
max by (job) (cnpg_pg_replication_streaming_replicas{namespace="stalwart"} - cnpg_pg_replication_is_wal_receiver_up{namespace="stalwart"}) < 2
for: 5m
labels:
severity: warning
namespace: stalwart
cnpg_cluster: stalwart-postgresql-17-cluster
- alert: CNPGClusterHighConnectionsCritical
annotations:
summary: CNPG Instance maximum number of connections critical!
description: |-
CloudNativePG Cluster "stalwart/stalwart-postgresql-17-cluster" instance {{`{{`}} $labels.pod {{`}}`}} is using {{`{{`}} $value {{`}}`}}% of
the maximum number of connections.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighConnectionsCritical.md
expr: |
sum by (pod) (cnpg_backends_total{namespace="stalwart", pod=~"stalwart-postgresql-17-cluster-([1-9][0-9]*)$"}) / max by (pod) (cnpg_pg_settings_setting{name="max_connections", namespace="stalwart", pod=~"stalwart-postgresql-17-cluster-([1-9][0-9]*)$"}) * 100 > 95
for: 5m
labels:
severity: critical
namespace: stalwart
cnpg_cluster: stalwart-postgresql-17-cluster
- alert: CNPGClusterHighConnectionsWarning
annotations:
summary: CNPG Instance is approaching the maximum number of connections.
description: |-
CloudNativePG Cluster "stalwart/stalwart-postgresql-17-cluster" instance {{`{{`}} $labels.pod {{`}}`}} is using {{`{{`}} $value {{`}}`}}% of
the maximum number of connections.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighConnectionsWarning.md
expr: |
sum by (pod) (cnpg_backends_total{namespace="stalwart", pod=~"stalwart-postgresql-17-cluster-([1-9][0-9]*)$"}) / max by (pod) (cnpg_pg_settings_setting{name="max_connections", namespace="stalwart", pod=~"stalwart-postgresql-17-cluster-([1-9][0-9]*)$"}) * 100 > 80
for: 5m
labels:
severity: warning
namespace: stalwart
cnpg_cluster: stalwart-postgresql-17-cluster
- alert: CNPGClusterHighReplicationLag
annotations:
summary: CNPG Cluster high replication lag
description: |-
CloudNativePG Cluster "stalwart/stalwart-postgresql-17-cluster" is experiencing a high replication lag of
{{`{{`}} $value {{`}}`}}ms.
High replication lag indicates network issues, busy instances, slow queries or suboptimal configuration.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighReplicationLag.md
expr: |
max(cnpg_pg_replication_lag{namespace="stalwart",pod=~"stalwart-postgresql-17-cluster-([1-9][0-9]*)$"}) * 1000 > 1000
for: 5m
labels:
severity: warning
namespace: stalwart
cnpg_cluster: stalwart-postgresql-17-cluster
- alert: CNPGClusterInstancesOnSameNode
annotations:
summary: CNPG Cluster instances are located on the same node.
description: |-
CloudNativePG Cluster "stalwart/stalwart-postgresql-17-cluster" has {{`{{`}} $value {{`}}`}}
instances on the same node {{`{{`}} $labels.node {{`}}`}}.
A failure or scheduled downtime of a single node will lead to a potential service disruption and/or data loss.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterInstancesOnSameNode.md
expr: |
count by (node) (kube_pod_info{namespace="stalwart", pod=~"stalwart-postgresql-17-cluster-([1-9][0-9]*)$"}) > 1
for: 5m
labels:
severity: warning
namespace: stalwart
cnpg_cluster: stalwart-postgresql-17-cluster
- alert: CNPGClusterLongRunningTransactionWarning
annotations:
summary: CNPG Cluster query is taking longer than 5 minutes.
description: |-
CloudNativePG Cluster Pod {{ $labels.pod }}
is taking more than 5 minutes (300 seconds) for a query.
expr: |-
cnpg_backends_max_tx_duration_seconds > 300
for: 1m
labels:
severity: warning
namespace: stalwart
cnpg_cluster: stalwart-postgresql-17-cluster
- alert: CNPGClusterLowDiskSpaceCritical
annotations:
summary: CNPG Instance is running out of disk space!
description: |-
CloudNativePG Cluster "stalwart/stalwart-postgresql-17-cluster" is running extremely low on disk space. Check attached PVCs!
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterLowDiskSpaceCritical.md
expr: |
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="stalwart", persistentvolumeclaim=~"stalwart-postgresql-17-cluster-([1-9][0-9]*)$"} / kubelet_volume_stats_capacity_bytes{namespace="stalwart", persistentvolumeclaim=~"stalwart-postgresql-17-cluster-([1-9][0-9]*)$"})) > 0.9 OR
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="stalwart", persistentvolumeclaim=~"stalwart-postgresql-17-cluster-([1-9][0-9]*)$-wal"} / kubelet_volume_stats_capacity_bytes{namespace="stalwart", persistentvolumeclaim=~"stalwart-postgresql-17-cluster-([1-9][0-9]*)$-wal"})) > 0.9 OR
max(sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_used_bytes{namespace="stalwart", persistentvolumeclaim=~"stalwart-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"})
/
sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_capacity_bytes{namespace="stalwart", persistentvolumeclaim=~"stalwart-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"})
*
on(namespace, persistentvolumeclaim) group_left(volume)
kube_pod_spec_volumes_persistentvolumeclaims_info{pod=~"stalwart-postgresql-17-cluster-([1-9][0-9]*)$"}
) > 0.9
for: 5m
labels:
severity: critical
namespace: stalwart
cnpg_cluster: stalwart-postgresql-17-cluster
- alert: CNPGClusterLowDiskSpaceWarning
annotations:
summary: CNPG Instance is running out of disk space.
description: |-
CloudNativePG Cluster "stalwart/stalwart-postgresql-17-cluster" is running low on disk space. Check attached PVCs.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterLowDiskSpaceWarning.md
expr: |
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="stalwart", persistentvolumeclaim=~"stalwart-postgresql-17-cluster-([1-9][0-9]*)$"} / kubelet_volume_stats_capacity_bytes{namespace="stalwart", persistentvolumeclaim=~"stalwart-postgresql-17-cluster-([1-9][0-9]*)$"})) > 0.7 OR
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="stalwart", persistentvolumeclaim=~"stalwart-postgresql-17-cluster-([1-9][0-9]*)$-wal"} / kubelet_volume_stats_capacity_bytes{namespace="stalwart", persistentvolumeclaim=~"stalwart-postgresql-17-cluster-([1-9][0-9]*)$-wal"})) > 0.7 OR
max(sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_used_bytes{namespace="stalwart", persistentvolumeclaim=~"stalwart-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"})
/
sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_capacity_bytes{namespace="stalwart", persistentvolumeclaim=~"stalwart-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"})
*
on(namespace, persistentvolumeclaim) group_left(volume)
kube_pod_spec_volumes_persistentvolumeclaims_info{pod=~"stalwart-postgresql-17-cluster-([1-9][0-9]*)$"}
) > 0.7
for: 5m
labels:
severity: warning
namespace: stalwart
cnpg_cluster: stalwart-postgresql-17-cluster
- alert: CNPGClusterOffline
annotations:
summary: CNPG Cluster has no running instances!
description: |-
CloudNativePG Cluster "stalwart/stalwart-postgresql-17-cluster" has no ready instances.
Having an offline cluster means your applications will not be able to access the database, leading to
potential service disruption and/or data loss.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterOffline.md
expr: |
(count(cnpg_collector_up{namespace="stalwart",pod=~"stalwart-postgresql-17-cluster-([1-9][0-9]*)$"}) OR on() vector(0)) == 0
for: 5m
labels:
severity: critical
namespace: stalwart
cnpg_cluster: stalwart-postgresql-17-cluster
- alert: CNPGClusterPGDatabaseXidAgeWarning
annotations:
summary: CNPG Cluster has a number of transactions from the frozen XID to the current one.
description: |-
Over 300,000,000 transactions from frozen xid
on pod {{ $labels.pod }}
expr: |
cnpg_pg_database_xid_age > 300000000
for: 1m
labels:
severity: warning
namespace: stalwart
cnpg_cluster: stalwart-postgresql-17-cluster
- alert: CNPGClusterPGReplicationWarning
annotations:
summary: CNPG Cluster standby is lagging behind the primary.
description: |-
Standby is lagging behind by over 300 seconds (5 minutes)
expr: |
cnpg_pg_replication_lag > 300
for: 1m
labels:
severity: warning
namespace: stalwart
cnpg_cluster: stalwart-postgresql-17-cluster
- alert: CNPGClusterReplicaFailingReplicationWarning
annotations:
summary: CNPG Cluster has a replica is failing to replicate.
description: |-
Replica {{ $labels.pod }}
is failing to replicate
expr: |
cnpg_pg_replication_in_recovery > cnpg_pg_replication_is_wal_receiver_up
for: 1m
labels:
severity: warning
namespace: stalwart
cnpg_cluster: stalwart-postgresql-17-cluster
- alert: CNPGClusterZoneSpreadWarning
annotations:
summary: CNPG Cluster instances in the same zone.
description: |-
CloudNativePG Cluster "stalwart/stalwart-postgresql-17-cluster" has instances in the same availability zone.
A disaster in one availability zone will lead to a potential service disruption and/or data loss.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterZoneSpreadWarning.md
expr: |
3 > count(count by (label_topology_kubernetes_io_zone) (kube_pod_info{namespace="stalwart", pod=~"stalwart-postgresql-17-cluster-([1-9][0-9]*)$"} * on(node,instance) group_left(label_topology_kubernetes_io_zone) kube_node_labels)) < 3
for: 5m
labels:
severity: warning
namespace: stalwart
cnpg_cluster: stalwart-postgresql-17-cluster
---
# Source: stalwart/templates/redis-replication.yaml
apiVersion: redis.redis.opstreelabs.in/v1beta2
kind: RedisReplication
metadata:
name: redis-replication-stalwart
namespace: stalwart
labels:
app.kubernetes.io/name: redis-replication-stalwart
app.kubernetes.io/instance: stalwart
app.kubernetes.io/part-of: stalwart
spec:
clusterSize: 3
podSecurityContext:
runAsUser: 1000
fsGroup: 1000
kubernetesConfig:
image: quay.io/opstree/redis:v8.0.3
imagePullPolicy: IfNotPresent
resources:
requests:
cpu: 50m
memory: 128Mi
storage:
volumeClaimTemplate:
spec:
storageClassName: ceph-block
accessModes: ["ReadWriteOnce"]
resources:
requests:
storage: 1Gi
redisExporter:
enabled: true
image: quay.io/opstree/redis-exporter:v1.48.0
---
# Source: stalwart/templates/replication-source.yaml
apiVersion: volsync.backube/v1alpha1
kind: ReplicationSource
metadata:
name: stalwart-config-backup-source
namespace: stalwart
labels:
app.kubernetes.io/name: stalwart-config-backup-source
app.kubernetes.io/instance: stalwart
app.kubernetes.io/part-of: stalwart
spec:
sourcePVC: stalwart-config
trigger:
schedule: 0 4 * * *
restic:
pruneIntervalDays: 7
repository: stalwart-config-backup-secret
retain:
hourly: 1
daily: 3
weekly: 2
monthly: 2
yearly: 4
copyMethod: Snapshot
storageClassName: ceph-block
volumeSnapshotClassName: ceph-blockpool-snapshot
---
# Source: stalwart/charts/postgres-17-cluster/templates/scheduled-backup.yaml
apiVersion: postgresql.cnpg.io/v1
kind: ScheduledBackup
metadata:
name: "stalwart-postgresql-17-daily-backup-scheduled-backup"
namespace: stalwart
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: stalwart-postgresql-17
app.kubernetes.io/instance: stalwart
app.kubernetes.io/part-of: stalwart
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
immediate: false
suspend: false
schedule: "0 0 0 * * *"
backupOwnerReference: self
cluster:
name: stalwart-postgresql-17-cluster
method: plugin
pluginConfiguration:
name: barman-cloud.cloudnative-pg.io
parameters:
barmanObjectName: "stalwart-postgresql-17-external-backup"
---
# Source: stalwart/charts/postgres-17-cluster/templates/scheduled-backup.yaml
apiVersion: postgresql.cnpg.io/v1
kind: ScheduledBackup
metadata:
name: "stalwart-postgresql-17-live-backup-scheduled-backup"
namespace: stalwart
labels:
helm.sh/chart: postgres-17-cluster-6.16.0
app.kubernetes.io/name: stalwart-postgresql-17
app.kubernetes.io/instance: stalwart
app.kubernetes.io/part-of: stalwart
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
immediate: true
suspend: false
schedule: "0 0 0 * * *"
backupOwnerReference: self
cluster:
name: stalwart-postgresql-17-cluster
method: plugin
pluginConfiguration:
name: barman-cloud.cloudnative-pg.io
parameters:
barmanObjectName: "stalwart-postgresql-17-garage-local-backup"
---
# Source: stalwart/templates/service-monitor.yaml
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: redis-replication-stalwart
namespace: stalwart
labels:
app.kubernetes.io/name: redis-replication-stalwart
app.kubernetes.io/instance: stalwart
app.kubernetes.io/part-of: stalwart
redis-operator: "true"
env: production
spec:
selector:
matchLabels:
redis_setup_type: replication
endpoints:
- port: redis-exporter
interval: 30s
scrapeTimeout: 10s

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,476 @@
---
# Source: talos/templates/service-account.yaml
apiVersion: talos.dev/v1alpha1
kind: ServiceAccount
metadata:
name: talos-backup-secrets
namespace: talos
labels:
app.kubernetes.io/name: talos-backup-secrets
app.kubernetes.io/instance: talos
app.kubernetes.io/part-of: talos
spec:
roles:
- os:etcd:backup
---
# Source: talos/templates/secret.yaml
apiVersion: v1
kind: Secret
metadata:
name: talos-backup-secrets
namespace: talos
labels:
app.kubernetes.io/name: talos-backup-secrets
app.kubernetes.io/instance: talos
app.kubernetes.io/part-of: talos
annotations:
kubernetes.io/service-account.name: talos-backup-secrets
---
# Source: talos/charts/etcd-backup/templates/common.yaml
---
apiVersion: batch/v1
kind: CronJob
metadata:
name: talos
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: talos
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: talos
helm.sh/chart: etcd-backup-4.4.0
namespace: talos
spec:
suspend: false
concurrencyPolicy: Forbid
startingDeadlineSeconds: 90
timeZone: US/Central
schedule: "0 2 * * *"
successfulJobsHistoryLimit: 3
failedJobsHistoryLimit: 3
jobTemplate:
spec:
parallelism: 1
backoffLimit: 3
template:
metadata:
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: talos
app.kubernetes.io/name: talos
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
restartPolicy: Never
nodeSelector:
node-role.kubernetes.io/control-plane: ""
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/control-plane
operator: Exists
containers:
- command:
- /talos-backup
env:
- name: AWS_ACCESS_KEY_ID
valueFrom:
secretKeyRef:
key: AWS_ACCESS_KEY_ID
name: talos-etcd-backup-secret
- name: AWS_SECRET_ACCESS_KEY
valueFrom:
secretKeyRef:
key: AWS_SECRET_ACCESS_KEY
name: talos-etcd-backup-secret
- name: AWS_REGION
value: nyc3
- name: CUSTOM_S3_ENDPOINT
value: https://nyc3.digitaloceanspaces.com
- name: BUCKET
value: talos-backups-bee8585f7b8a4d0239c9b823
- name: S3_PREFIX
value: cl01tl/etcd
- name: CLUSTER_NAME
value: cl01tl
- name: AGE_X25519_PUBLIC_KEY
valueFrom:
secretKeyRef:
key: AGE_X25519_PUBLIC_KEY
name: talos-etcd-backup-secret
- name: USE_PATH_STYLE
value: "false"
image: ghcr.io/siderolabs/talos-backup:v0.1.0-beta.3@sha256:05c86663b251a407551dc948097e32e163a345818117eb52c573b0447bd0c7a7
imagePullPolicy: IfNotPresent
name: main
resources:
requests:
cpu: 100m
memory: 128Mi
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
runAsGroup: 1000
runAsNonRoot: true
runAsUser: 1000
seccompProfile:
type: RuntimeDefault
volumeMounts:
- mountPath: /var/run/secrets/talos.dev
mountPropagation: None
name: secret
readOnly: true
- mountPath: /.talos
name: talos
- mountPath: /tmp
name: tmp
workingDir: /tmp
- args:
- -ec
- |
export DATE_RANGE=$(date -d @$(( $(date +%s) - 1209600 )) +%Y-%m-%dT%H:%M:%SZ);
export FILE_MATCH="$BUCKET/cl01tl/etcd/cl01tl-$DATE_RANGE.snap.age"
echo ">> Running S3 prune for Talos backup repository"
echo ">> Backups prior to '$DATE_RANGE' will be removed"
echo ">> Backups to be removed:"
s3cmd ls ${BUCKET}/cl01tl/etcd/ |
awk -v file_match="$FILE_MATCH" '$4 < file_match {print $4}'
echo ">> Deleting ..."
s3cmd ls ${BUCKET}/cl01tl/etcd/ |
awk -v file_match="$FILE_MATCH" '$4 < file_match {print $4}' |
while read file; do
s3cmd del "$file";
done;
echo ">> Completed S3 prune for Talos backup repository"
command:
- /bin/sh
env:
- name: BUCKET
valueFrom:
secretKeyRef:
key: BUCKET
name: talos-etcd-backup-secret
image: d3fk/s3cmd:latest@sha256:7bdbd33bb3d044884598898b9e9b383385759fbd6ebf52888700bd9b0e0fab91
imagePullPolicy: IfNotPresent
name: s3-prune
resources:
requests:
cpu: 100m
memory: 128Mi
volumeMounts:
- mountPath: /root/.s3cfg
mountPropagation: None
name: s3cmd-config
readOnly: true
subPath: .s3cfg
volumes:
- name: s3cmd-config
secret:
secretName: talos-etcd-backup-secret
- name: secret
secret:
secretName: talos-backup-secrets
- emptyDir:
medium: Memory
name: talos
- emptyDir:
medium: Memory
name: tmp
---
# Source: talos/charts/etcd-defrag/templates/common.yaml
---
apiVersion: batch/v1
kind: CronJob
metadata:
name: etcd-defrag-defrag-1
labels:
app.kubernetes.io/controller: defrag-1
app.kubernetes.io/instance: talos
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: talos
helm.sh/chart: etcd-defrag-4.4.0
namespace: talos
spec:
suspend: false
concurrencyPolicy: Forbid
startingDeadlineSeconds: 90
timeZone: US/Central
schedule: "0 0 * * 0"
successfulJobsHistoryLimit: 3
failedJobsHistoryLimit: 3
jobTemplate:
spec:
parallelism: 1
backoffLimit: 3
template:
metadata:
labels:
app.kubernetes.io/controller: defrag-1
app.kubernetes.io/instance: talos
app.kubernetes.io/name: talos
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
restartPolicy: Never
nodeSelector:
node-role.kubernetes.io/control-plane: ""
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/control-plane
operator: Exists
containers:
- args:
- etcd
- defrag
- -n
- 10.232.1.11
env:
- name: TALOSCONFIG
value: /tmp/.talos/config
image: ghcr.io/siderolabs/talosctl:v1.11.5
imagePullPolicy: IfNotPresent
name: main
resources:
requests:
cpu: 100m
memory: 128Mi
volumeMounts:
- mountPath: /tmp/.talos/config
mountPropagation: None
name: talos-config-1
readOnly: true
subPath: config
volumes:
- name: talos-config-1
secret:
secretName: talos-etcd-defrag-secret
---
# Source: talos/charts/etcd-defrag/templates/common.yaml
apiVersion: batch/v1
kind: CronJob
metadata:
name: etcd-defrag-defrag-2
labels:
app.kubernetes.io/controller: defrag-2
app.kubernetes.io/instance: talos
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: talos
helm.sh/chart: etcd-defrag-4.4.0
namespace: talos
spec:
suspend: false
concurrencyPolicy: Forbid
startingDeadlineSeconds: 90
timeZone: US/Central
schedule: "10 0 * * 0"
successfulJobsHistoryLimit: 3
failedJobsHistoryLimit: 3
jobTemplate:
spec:
parallelism: 1
backoffLimit: 3
template:
metadata:
labels:
app.kubernetes.io/controller: defrag-2
app.kubernetes.io/instance: talos
app.kubernetes.io/name: talos
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
restartPolicy: Never
nodeSelector:
node-role.kubernetes.io/control-plane: ""
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/control-plane
operator: Exists
containers:
- args:
- etcd
- defrag
- -n
- 10.232.1.12
env:
- name: TALOSCONFIG
value: /tmp/.talos/config
image: ghcr.io/siderolabs/talosctl:v1.11.5
imagePullPolicy: IfNotPresent
name: main
resources:
requests:
cpu: 100m
memory: 128Mi
volumeMounts:
- mountPath: /tmp/.talos/config
mountPropagation: None
name: talos-config-2
readOnly: true
subPath: config
volumes:
- name: talos-config-2
secret:
secretName: talos-etcd-defrag-secret
---
# Source: talos/charts/etcd-defrag/templates/common.yaml
apiVersion: batch/v1
kind: CronJob
metadata:
name: etcd-defrag-defrag-3
labels:
app.kubernetes.io/controller: defrag-3
app.kubernetes.io/instance: talos
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: talos
helm.sh/chart: etcd-defrag-4.4.0
namespace: talos
spec:
suspend: false
concurrencyPolicy: Forbid
startingDeadlineSeconds: 90
timeZone: US/Central
schedule: "20 0 * * 0"
successfulJobsHistoryLimit: 3
failedJobsHistoryLimit: 3
jobTemplate:
spec:
parallelism: 1
backoffLimit: 3
template:
metadata:
labels:
app.kubernetes.io/controller: defrag-3
app.kubernetes.io/instance: talos
app.kubernetes.io/name: talos
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
restartPolicy: Never
nodeSelector:
node-role.kubernetes.io/control-plane: ""
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/control-plane
operator: Exists
containers:
- args:
- etcd
- defrag
- -n
- 10.232.1.13
env:
- name: TALOSCONFIG
value: /tmp/.talos/config
image: ghcr.io/siderolabs/talosctl:v1.11.5
imagePullPolicy: IfNotPresent
name: main
resources:
requests:
cpu: 100m
memory: 128Mi
volumeMounts:
- mountPath: /tmp/.talos/config
mountPropagation: None
name: talos-config-3
readOnly: true
subPath: config
volumes:
- name: talos-config-3
secret:
secretName: talos-etcd-defrag-secret
---
# Source: talos/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: talos-etcd-backup-secret
namespace: talos
labels:
app.kubernetes.io/name: talos-etcd-backup-secret
app.kubernetes.io/instance: talos
app.kubernetes.io/part-of: talos
annotations:
kubernetes.io/service-account.name: talos-backup-secrets
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: AWS_ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/etcd-backup
metadataPolicy: None
property: AWS_ACCESS_KEY_ID
- secretKey: AWS_SECRET_ACCESS_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/etcd-backup
metadataPolicy: None
property: AWS_SECRET_ACCESS_KEY
- secretKey: .s3cfg
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/etcd-backup
metadataPolicy: None
property: s3cfg
- secretKey: BUCKET
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/etcd-backup
metadataPolicy: None
property: BUCKET
- secretKey: AGE_X25519_PUBLIC_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/talos/etcd-backup
metadataPolicy: None
property: AGE_X25519_PUBLIC_KEY
---
# Source: talos/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: talos-etcd-defrag-secret
namespace: talos
labels:
app.kubernetes.io/name: talos-etcd-defrag-secret
app.kubernetes.io/instance: talos
app.kubernetes.io/part-of: talos
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: config
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/talos/etcd-defrag
metadataPolicy: None
property: config

View File

@@ -111,30 +111,6 @@ spec:
# Source: tdarr/charts/tdarr/templates/common.yaml
apiVersion: v1
kind: Service
metadata:
name: tdarr-api
labels:
app.kubernetes.io/instance: tdarr
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: tdarr
app.kubernetes.io/service: tdarr-api
helm.sh/chart: tdarr-4.4.0
namespace: tdarr
spec:
type: ClusterIP
ports:
- port: 8266
targetPort: 8266
protocol: TCP
name: http
selector:
app.kubernetes.io/controller: server
app.kubernetes.io/instance: tdarr
app.kubernetes.io/name: tdarr
---
# Source: tdarr/charts/tdarr/templates/common.yaml
apiVersion: v1
kind: Service
metadata:
name: tdarr-web
labels:
@@ -157,6 +133,30 @@ spec:
app.kubernetes.io/name: tdarr
---
# Source: tdarr/charts/tdarr/templates/common.yaml
apiVersion: v1
kind: Service
metadata:
name: tdarr-api
labels:
app.kubernetes.io/instance: tdarr
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: tdarr
app.kubernetes.io/service: tdarr-api
helm.sh/chart: tdarr-4.4.0
namespace: tdarr
spec:
type: ClusterIP
ports:
- port: 8266
targetPort: 8266
protocol: TCP
name: http
selector:
app.kubernetes.io/controller: server
app.kubernetes.io/instance: tdarr
app.kubernetes.io/name: tdarr
---
# Source: tdarr/charts/tdarr/templates/common.yaml
apiVersion: apps/v1
kind: DaemonSet
metadata:

View File

@@ -0,0 +1,209 @@
---
# Source: unpackerr/templates/persistent-volume.yaml
apiVersion: v1
kind: PersistentVolume
metadata:
name: unpackerr-nfs-storage
namespace: unpackerr
labels:
app.kubernetes.io/name: unpackerr-nfs-storage
app.kubernetes.io/instance: unpackerr
app.kubernetes.io/part-of: unpackerr
spec:
persistentVolumeReclaimPolicy: Retain
storageClassName: nfs-client
capacity:
storage: 1Gi
accessModes:
- ReadWriteMany
nfs:
path: /volume2/Storage
server: synologybond.alexlebens.net
mountOptions:
- vers=4
- minorversion=1
- noac
---
# Source: unpackerr/templates/persistent-volume-claim.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: unpackerr-nfs-storage
namespace: unpackerr
labels:
app.kubernetes.io/name: unpackerr-nfs-storage
app.kubernetes.io/instance: unpackerr
app.kubernetes.io/part-of: unpackerr
spec:
volumeName: unpackerr-nfs-storage
storageClassName: nfs-client
accessModes:
- ReadWriteMany
resources:
requests:
storage: 1Gi
---
# Source: unpackerr/charts/unpackerr/templates/common.yaml
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: unpackerr
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: unpackerr
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: unpackerr
helm.sh/chart: unpackerr-4.4.0
namespace: unpackerr
spec:
revisionHistoryLimit: 3
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/controller: main
app.kubernetes.io/name: unpackerr
app.kubernetes.io/instance: unpackerr
template:
metadata:
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: unpackerr
app.kubernetes.io/name: unpackerr
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
containers:
- env:
- name: TZ
value: US/Central
- name: UN_WEBSERVER_METRICS
value: "true"
- name: UN_SONARR_0_URL
value: http://sonarr.sonarr:80
- name: UN_SONARR_0_PATHS_0
value: /mnt/store/Torrent/FINISHED/COMPLETED
- name: UN_SONARR_1_URL
value: http://sonarr-4k.sonarr-4k:80
- name: UN_SONARR_1_PATHS_0
value: /mnt/store/Torrent/FINISHED/COMPLETED
- name: UN_SONARR_2_URL
value: http://sonarr-anime.sonarr-anime:80
- name: UN_SONARR_2_PATHS_0
value: /mnt/store/Torrent/FINISHED/COMPLETED
- name: UN_RADARR_0_URL
value: http://radarr.radarr:80
- name: UN_RADARR_0_PATHS_0
value: /mnt/store/Torrent/FINISHED/COMPLETED
- name: UN_RADARR_1_URL
value: http://radarr-4k.radarr-4k:80
- name: UN_RADARR_1_PATHS_0
value: /mnt/store/Torrent/FINISHED/COMPLETED
- name: UN_RADARR_2_URL
value: http://radarr-anime.radarr-anime:80
- name: UN_RADARR_2_PATHS_0
value: /mnt/store/Torrent/FINISHED/COMPLETED
- name: UN_RADARR_3_URL
value: http://radarr-standup.radarr-standup:80
- name: UN_RADARR_3_PATHS_0
value: /mnt/store/Torrent/FINISHED/COMPLETED
- name: UN_LIDARR_0_URL
value: http://lidarr.lidarr:80
- name: UN_LIDARR_0_PATHS_0
value: /mnt/store/Torrent/FINISHED/COMPLETED
envFrom:
- secretRef:
name: unpackerr-key-secret
image: golift/unpackerr:0.14.5
imagePullPolicy: IfNotPresent
name: main
resources:
requests:
cpu: 10m
memory: 128Mi
volumeMounts:
- mountPath: /mnt/store
name: storage
volumes:
- name: storage
persistentVolumeClaim:
claimName: unpackerr-nfs-storage
---
# Source: unpackerr/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: unpackerr-key-secret
namespace: unpackerr
labels:
app.kubernetes.io/name: unpackerr-key-secret
app.kubernetes.io/instance: unpackerr
app.kubernetes.io/part-of: unpackerr
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: UN_SONARR_0_API_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/sonarr4/key
metadataPolicy: None
property: key
- secretKey: UN_SONARR_1_API_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/sonarr4-4k/key
metadataPolicy: None
property: key
- secretKey: UN_SONARR_2_API_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/sonarr4-anime/key
metadataPolicy: None
property: key
- secretKey: UN_RADARR_0_API_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/radarr5/key
metadataPolicy: None
property: key
- secretKey: UN_RADARR_1_API_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/radarr5-4k/key
metadataPolicy: None
property: key
- secretKey: UN_RADARR_2_API_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/radarr5-anime/key
metadataPolicy: None
property: key
- secretKey: UN_RADARR_3_API_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/radarr5-standup/key
metadataPolicy: None
property: key
- secretKey: UN_LIDARR_0_API_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/lidarr2/key
metadataPolicy: None
property: key

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,105 @@
---
# Source: whodb/charts/whodb/templates/common.yaml
apiVersion: v1
kind: Service
metadata:
name: whodb
labels:
app.kubernetes.io/instance: whodb
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: whodb
app.kubernetes.io/service: whodb
helm.sh/chart: whodb-4.4.0
namespace: whodb
spec:
type: ClusterIP
ports:
- port: 80
targetPort: 8080
protocol: TCP
name: http
selector:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: whodb
app.kubernetes.io/name: whodb
---
# Source: whodb/charts/whodb/templates/common.yaml
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: whodb
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: whodb
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: whodb
helm.sh/chart: whodb-4.4.0
namespace: whodb
spec:
revisionHistoryLimit: 3
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/controller: main
app.kubernetes.io/name: whodb
app.kubernetes.io/instance: whodb
template:
metadata:
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: whodb
app.kubernetes.io/name: whodb
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
containers:
- env:
- name: WHODB_OLLAMA_HOST
value: ollama-server-2.ollama
- name: WHODB_OLLAMA_PORT
value: "11434"
image: clidey/whodb:0.80.0
imagePullPolicy: IfNotPresent
name: main
resources:
requests:
cpu: 10m
memory: 256Mi
---
# Source: whodb/templates/http-route.yaml
apiVersion: gateway.networking.k8s.io/v1
kind: HTTPRoute
metadata:
name: http-route-whodb
namespace: whodb
labels:
app.kubernetes.io/name: http-route-whodb
app.kubernetes.io/instance: whodb
app.kubernetes.io/part-of: whodb
spec:
parentRefs:
- group: gateway.networking.k8s.io
kind: Gateway
name: traefik-gateway
namespace: traefik
hostnames:
- whodb.alexlebens.net
rules:
- matches:
- path:
type: PathPrefix
value: /
backendRefs:
- group: ''
kind: Service
name: whodb
port: 80
weight: 100