This commit is contained in:
2024-05-22 12:49:28 -05:00
commit 35b77bb0df
219 changed files with 9997 additions and 0 deletions

View File

@@ -0,0 +1,11 @@
apiVersion: v2
name: cert-manager
version: 1.0.0
sources:
- https://github.com/cert-manager/cert-manager
- https://github.com/cert-manager/cert-manager/tree/master/deploy/charts/cert-manager
dependencies:
- name: cert-manager
version: v1.14.5
repository: https://charts.jetstack.io
appVersion: v1.14.3

View File

@@ -0,0 +1,21 @@
apiVersion: cert-manager.io/v1
kind: ClusterIssuer
metadata:
name: letsencrypt-issuer
spec:
acme:
email: alexanderlebens@gmail.com
server: https://acme-v02.api.letsencrypt.org/directory
privateKeySecretRef:
name: letsencrypt-issuer-account-key
solvers:
- selector:
dnsZones:
- "alexlebens.net"
- "*.alexlebens.net"
dns01:
cloudflare:
email: alexanderlebens@gmail.com
apiTokenSecretRef:
name: cloudflare-api-token
key: api-token

View File

@@ -0,0 +1,11 @@
cert-manager:
installCRDs: true
replicaCount: 2
prometheus:
enabled: true
servicemonitor:
enabled: true
honorLabels: true
cainjector:
enabled: true
replicaCount: 2

View File

@@ -0,0 +1,13 @@
apiVersion: v2
name: node-feature-discovery
version: 1.0.0
sources:
- https://github.com/intel/intel-device-plugins-for-kubernetes
dependencies:
- name: intel-device-plugins-operator
version: 0.30.0
repository: https://intel.github.io/helm-charts/
- name: intel-device-plugins-gpu
version: 0.30.0
repository: https://intel.github.io/helm-charts/
appVersion: 0.29.0

View File

@@ -0,0 +1,28 @@
intel-device-plugins-operator:
nodeSelector:
kubernetes.io/arch: amd64
manager:
image:
hub: intel
kubeRbacProxy:
image:
hub: gcr.io
resources:
limits:
cpu: 100m
memory: 120Mi
requests:
cpu: 100m
memory: 100Mi
intel-device-plugins-gpu:
name: gpudeviceplugin
image:
hub: intel
sharedDevNum: 5
logLevel: 2
resourceManager: false
enableMonitoring: true
allocationPolicy: "none"
nodeSelector:
intel.feature.node.kubernetes.io/gpu: 'true'
nodeFeatureRule: false

View File

@@ -0,0 +1,11 @@
apiVersion: v2
name: kube-prometheus-stack
version: 1.0.0
sources:
- https://github.com/prometheus/prometheus
- https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack
dependencies:
- name: kube-prometheus-stack
version: 58.5.3
repository: https://prometheus-community.github.io/helm-charts
appVersion: v0.72.0

View File

@@ -0,0 +1,37 @@
apiVersion: external-secrets.io/v1beta1
kind: ExternalSecret
metadata:
name: alertmanager-config-secret
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: {{ .Release.Name }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/version: {{ .Chart.AppVersion }}
app.kubernetes.io/component: web
app.kubernetes.io/part-of: {{ .Release.Name }}
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: discord_webhook
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /discord/alertmanager
metadataPolicy: None
property: webhook
- secretKey: pushover_token
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /pushover/key
metadataPolicy: None
property: alertmanager_key
- secretKey: user_key
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /pushover/key
metadataPolicy: None
property: user_key

View File

@@ -0,0 +1,140 @@
kube-prometheus-stack:
crds:
enabled: false
defaultRules:
create: true
rules:
kubeControllerManager: false
kubeSchedulerAlerting: false
kubeSchedulerRecording: false
global:
rbac:
create: true
createAggregateClusterRoles: true
alertmanager:
enabled: true
config:
route:
group_by: ["namespace","alertname"]
group_wait: 30s
group_interval: 5m
repeat_interval: 24h
receiver: discord
routes:
- receiver: "null"
matchers:
- alertname = "Watchdog"
- receiver: 'pushover'
group_wait: 10s
group_interval: 5m
repeat_interval: 24h
matchers:
- severity = "critical"
receivers:
- name: "null"
- name: discord
discord_configs:
- send_resolved: true
webhook_url: https://discord.com/api/webhooks/1215465356315983922/CSaWG3SygslTNQo0uw07FB-0eKGl9nw2kDAqbAfH7JMe1ExVin8UvjlP4qkJoEyjDawz
- name: pushover
pushover_configs:
- send_resolved: true
user_key_file: /etc/alertmanager/secrets/alertmanager-config-secret/user_key
token_file: /etc/alertmanager/secrets/alertmanager-config-secret/pushover_token
ingress:
enabled: true
ingressClassName: traefik
annotations:
traefik.ingress.kubernetes.io/router.entrypoints: websecure
traefik.ingress.kubernetes.io/router.tls: "true"
cert-manager.io/cluster-issuer: letsencrypt-issuer
hosts:
- alertmanager.alexlebens.net
tls:
- secretName: alertmanager-secret-tls
hosts:
- alertmanager.alexlebens.net
alertmanagerSpec:
secrets:
- alertmanager-config-secret
replicas: 1
externalUrl: https://alertmanager.alexlebens.net
grafana:
enabled: false
kubeApiServer:
tlsConfig:
insecureSkipVerify: true
kubeControllerManager:
enabled: false
kubeScheduler:
enabled: false
kubeProxy:
enabled: false
kube-state-metrics:
selfMonitor:
enabled: true
nodeExporter:
operatingSystems:
darwin:
enabled: false
prometheusOperator:
admissionWebhooks:
enabled: true
namespaces:
releaseNamespace: true
additional:
- kube-system
- argocd
- authentik
- cert-manager
- cloudnative-pg
- freshrss
- gitea
- grafana
- home-assistant
- local-static-provisioner
- loki
- matrix-synapse
- outline
- rook-ceph
- speedtest-exporter
- unpoller
- vault
- vikunja
- lidarr2
- qbittorrent
- radarr5
- radarr5-4k
- readarr-audiobooks
- readarr-books
- sonarr4
- sonarr4-4k
- tdarr
prometheus:
ingress:
enabled: true
ingressClassName: traefik
annotations:
traefik.ingress.kubernetes.io/router.entrypoints: websecure
traefik.ingress.kubernetes.io/router.tls: "true"
cert-manager.io/cluster-issuer: letsencrypt-issuer
hosts:
- prometheus.alexlebens.net
tls:
- secretName: prometheus-secret-tls
hosts:
- prometheus.alexlebens.net
prometheusSpec:
scrapeInterval: 30s
retention: 30d
externalUrl: https://prometheus.alexlebens.net
serviceMonitorSelectorNilUsesHelmValues: false
podMonitorSelectorNilUsesHelmValues: false
storageSpec:
volumeClaimTemplate:
spec:
storageClassName: nfs-client
accessModes: ["ReadWriteOnce"]
resources:
requests:
storage: 1Gi

View File

@@ -0,0 +1,11 @@
apiVersion: v2
name: kubernetes-cloudflare-ddns
version: 0.0.1
sources:
- https://github.com/kubitodev/kubernetes-cloudflare-ddns
- https://github.com/kubitodev/helm/tree/main/charts/kubernetes-cloudflare-ddns
dependencies:
- name: kubernetes-cloudflare-ddns
version: 1.0.6
repository: https://charts.kubito.dev
appVersion: "1.0.1"

View File

@@ -0,0 +1,44 @@
apiVersion: external-secrets.io/v1beta1
kind: ExternalSecret
metadata:
name: kubernetes-cloudflare-ddns-secret
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: {{ .Release.Name }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/version: {{ .Chart.AppVersion }}
app.kubernetes.io/component: web
app.kubernetes.io/part-of: {{ .Release.Name }}
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: AUTH_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cloudflare/alexlebens-net
metadataPolicy: None
property: auth-key
- secretKey: NAME
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cloudflare/alexlebens-net
metadataPolicy: None
property: name
- secretKey: RECORD_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cloudflare/alexlebens-net
metadataPolicy: None
property: record-id
- secretKey: ZONE_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cloudflare/alexlebens-net
metadataPolicy: None
property: zone-id

View File

@@ -0,0 +1,12 @@
kubernetes-cloudflare-ddns:
image:
repository: kubitodev/kubernetes-cloudflare-ddns
tag: "2.0.0"
cron:
job:
schedule: '"0 0 * * *"'
successfulJobsHistoryLimit: 1
pod:
restartPolicy: OnFailure
secret:
existingSecret: kubernetes-cloudflare-ddns-secret

View File

@@ -0,0 +1,14 @@
apiVersion: v2
name: loki
version: 1.0.0
sources:
- https://github.com/grafana/loki
- https://github.com/grafana/helm-charts
dependencies:
- name: loki
version: 6.5.2
repository: https://grafana.github.io/helm-charts
- name: promtail
version: 6.15.5
repository: https://grafana.github.io/helm-charts
appVersion: 3.0.0

View File

@@ -0,0 +1,48 @@
loki:
deploymentMode: SingleBinary
loki:
auth_enabled: true
commonConfig:
replication_factor: 1
limits_config:
allow_structured_metadata: false
storage:
type: filesystem
schemaConfig:
configs:
- from: "2024-01-11"
store: boltdb-shipper
object_store: filesystem
schema: v13
index:
period: 24h
enterprise:
enabled: false
gateway:
enabled: true
basicAuth:
enabled: false
singleBinary:
replicas: 1
persistence:
enableStatefulSetAutoDeletePVC: true
enabled: true
size: 10Gi
storageClass: ceph-block
write:
replicas: 0
read:
replicas: 0
backend:
replicas: 0
promtail:
daemonset:
enabled: true
serviceMonitor:
enabled: true
prometheusRule:
enabled: false
config:
clients:
- url: http://loki-gateway/loki/api/v1/push
tenant_id: 1

View File

@@ -0,0 +1,11 @@
apiVersion: v2
name: metallb
version: 1.0.0
sources:
- https://github.com/metallb/metallb
- https://github.com/metallb/metallb/tree/main/charts/metallb
dependencies:
- name: metallb
version: 0.14.5
repository: https://metallb.github.io/metallb
appVersion: 0.15.1

View File

@@ -0,0 +1,16 @@
apiVersion: metallb.io/v1beta1
kind: IPAddressPool
metadata:
name: default
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: {{ .Release.Name }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/version: {{ .Chart.AppVersion }}
app.kubernetes.io/component: network
app.kubernetes.io/part-of: {{ .Release.Name }}
spec:
addresses:
- 192.168.1.17/32
- 192.168.1.16/32
- 192.168.1.15/32

View File

@@ -0,0 +1,8 @@
apiVersion: v1
kind: Namespace
metadata:
name: metallb
labels:
pod-security.kubernetes.io/enforce: privileged
pod-security.kubernetes.io/warn: privileged
pod-security.kubernetes.io/audit: privileged

View File

@@ -0,0 +1,15 @@
metallb:
controller:
metrics:
enabled: true
serviceMonitor:
enabled: true
speaker:
metrics:
enabled: true
serviceMonitor:
enabled: true
labels:
pod-security.kubernetes.io/enforce: privileged
pod-security.kubernetes.io/audit: privileged
pod-security.kubernetes.io/warn: privileged

View File

@@ -0,0 +1,10 @@
apiVersion: v2
name: node-feature-discovery
version: 1.0.0
sources:
- https://github.com/kubernetes-sigs/node-feature-discovery
dependencies:
- name: node-feature-discovery
version: 0.15.4
repository: https://kubernetes-sigs.github.io/node-feature-discovery/charts
appVersion: 0.15.1

View File

@@ -0,0 +1,256 @@
node-feature-discovery:
enableNodeFeatureApi: true
master:
enable: true
config: ### <NFD-MASTER-CONF-START-DO-NOT-REMOVE>
# noPublish: false
# autoDefaultNs: true
# extraLabelNs: ["added.ns.io","added.kubernets.io","intel.com","devicetree.org"]
# denyLabelNs: ["denied.ns.io","denied.kubernetes.io"]
# resourceLabels: ["vendor-1.com/feature-1","vendor-2.io/feature-2"]
# enableTaints: false
# labelWhiteList: "foo"
# resyncPeriod: "2h"
# klog:
# addDirHeader: false
# alsologtostderr: false
# logBacktraceAt:
# logtostderr: true
# skipHeaders: false
# stderrthreshold: 2
# v: 0
# vmodule:
## NOTE: the following options are not dynamically run-time configurable
## and require a nfd-master restart to take effect after being changed
# logDir:
# logFile:
# logFileMaxSize: 1800
# skipLogHeaders: false
# leaderElection:
# leaseDuration: 15s
# # this value has to be lower than leaseDuration and greater than retryPeriod*1.2
# renewDeadline: 10s
# # this value has to be greater than 0
# retryPeriod: 2s
# nfdApiParallelism: 10
### <NFD-MASTER-CONF-END-DO-NOT-REMOVE>
port: 8080
metricsPort: 8081
instance:
featureApi:
resyncPeriod:
denyLabelNs: []
extraLabelNs: []
resourceLabels: []
enableTaints: false
crdController: null
featureRulesController: null
nfdApiParallelism: null
deploymentAnnotations: {}
replicaCount: 1
podSecurityContext: {}
# fsGroup: 2000
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop: ["ALL"]
readOnlyRootFilesystem: true
runAsNonRoot: true
# runAsUser: 1000
serviceAccount:
create: true
name:
rbac:
create: true
service:
type: ClusterIP
port: 8080
resources:
limits:
cpu: 100m
memory: 128Mi
requests:
cpu: 100m
memory: 128Mi
tolerations:
- key: "node-role.kubernetes.io/control-plane"
operator: "Equal"
value: ""
effect: "NoSchedule"
affinity:
nodeAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 1
preference:
matchExpressions:
- key: "node-role.kubernetes.io/control-plane"
operator: In
values: [""]
worker:
enable: true
config: ### <NFD-WORKER-CONF-START-DO-NOT-REMOVE>
#core:
# labelWhiteList:
# noPublish: false
# sleepInterval: 60s
# featureSources: [all]
# labelSources: [all]
# klog:
# addDirHeader: false
# alsologtostderr: false
# logBacktraceAt:
# logtostderr: true
# skipHeaders: false
# stderrthreshold: 2
# v: 0
# vmodule:
## NOTE: the following options are not dynamically run-time configurable
## and require a nfd-worker restart to take effect after being changed
# logDir:
# logFile:
# logFileMaxSize: 1800
# skipLogHeaders: false
sources:
cpu:
cpuid:
attributeWhitelist:
- "AVX512BW"
- "AVX512CD"
- "AVX512DQ"
- "AVX512F"
- "AVX512VL"
kernel:
configOpts:
- "NO_HZ"
- "X86"
- "DMI"
usb:
deviceClassWhitelist:
- "02"
- "03"
- "0e"
- "ef"
- "fe"
- "ff"
deviceLabelFields:
- "vendor"
- "device"
- "class"
pci:
deviceClassWhitelist:
- "0200"
- "01"
- "08"
- "0300"
- "0302"
deviceLabelFields:
- "vendor"
- "device"
- "class"
custom:
- # Intel integrated GPU
name: "intel-gpu"
labels:
intel.feature.node.kubernetes.io/gpu: 'true'
matchOn:
- pciId:
class: ["0300"]
vendor: ["8086"]
- # Google Coral USB Accelerator
name: google.coral
labels:
google.feature.node.kubernetes.io/coral: "true"
matchFeatures:
- feature: usb.device
matchExpressions:
vendor: { op: In, value: ["1a6e", "18d1"] }
- # Aeotec Z-Stick Gen5+
name: aeotec.zwave
labels:
aeotec.feature.node.kubernetes.io/zwave: "true"
matchFeatures:
- feature: usb.device
matchExpressions:
class: { op: In, value: ["02"] }
vendor: { op: In, value: ["0658"] }
device: { op: In, value: ["0200"] }
### <NFD-WORKER-CONF-END-DO-NOT-REMOVE>
metricsPort: 8081
podSecurityContext: {}
# fsGroup: 2000
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop: ["ALL"]
readOnlyRootFilesystem: true
runAsNonRoot: true
# runAsUser: 1000
serviceAccount:
create: true
name:
rbac:
create: true
mountUsrSrc: false
resources:
limits:
cpu: 100m
memory: 128Mi
requests:
cpu: 100m
memory: 128Mi
topologyUpdater:
config: ### <NFD-TOPOLOGY-UPDATER-CONF-START-DO-NOT-REMOVE>
## key = node name, value = list of resources to be excluded.
## use * to exclude from all nodes.
## an example for how the exclude list should looks like
#excludeList:
# node1: [cpu]
# node2: [memory, example/deviceA]
# *: [hugepages-2Mi]
### <NFD-TOPOLOGY-UPDATER-CONF-END-DO-NOT-REMOVE>
enable: true
createCRDs: true
serviceAccount:
create: true
name:
rbac:
create: true
metricsPort: 8081
updateInterval: 60s
watchNamespace: "*"
kubeletStateDir: /var/lib/kubelet
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop: ["ALL"]
readOnlyRootFilesystem: true
runAsUser: 0
resources:
limits:
cpu: 100m
memory: 128Mi
requests:
cpu: 100m
memory: 128Mi
gc:
enable: true
replicaCount: 1
serviceAccount:
create: true
name:
rbac:
create: true
interval: 1h
resources:
limits:
cpu: 100m
memory: 128Mi
requests:
cpu: 100m
memory: 128Mi
metricsPort: 8081
tls:
enable: false
certManager: false
prometheus:
enable: false

View File

@@ -0,0 +1,11 @@
apiVersion: v2
name: reloader
version: 1.0.0
sources:
- https://github.com/stakater/Reloader
- https://github.com/stakater/Reloader/blob/master/deployments/kubernetes/chart/reloader/Chart.yaml
dependencies:
- name: reloader
version: 1.0.97
repository: https://stakater.github.io/stakater-charts
appVersion: 1.0.80

View File

@@ -0,0 +1,5 @@
reloader:
reloader:
serviceMonitor:
enabled: true
namespace: reloader

View File

@@ -0,0 +1,11 @@
apiVersion: v2
name: speedtest-exporter
version: 1.0.0
sources:
- https://github.com/MiguelNdeCarvalho/speedtest-exporter
- https://gitlab.com/alexander-chernov/helm/speedtest-exporter
dependencies:
- name: speedtest-exporter
version: 0.1.1
repository: https://charts.alekc.dev
appVersion: v3.5.4

View File

@@ -0,0 +1,16 @@
speedtest-exporter:
image:
repository: ghcr.io/miguelndecarvalho/speedtest-exporter
tag: v3.5.4
securityContext:
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
runAsNonRoot: true
runAsUser: 1000
serviceMonitor:
enabled: true
namespace: speedtest-exporter
interval: "60m"
scrapeTimeout: "2m"

View File

@@ -0,0 +1,11 @@
apiVersion: v2
name: tailscale-operator
version: 1.0.0
sources:
- https://github.com/tailscale/tailscale/tree/main/cmd/k8s-operator/deploy
- https://github.com/tailscale/tailscale/tree/main/cmd/k8s-operator/deploy/chart
dependencies:
- name: tailscale-operator
version: 1.66.3
repository: https://pkgs.tailscale.com/helmcharts
appVersion: v1.64.2

View File

@@ -0,0 +1,30 @@
apiVersion: external-secrets.io/v1beta1
kind: ExternalSecret
metadata:
name: operator-oauth
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: operator-oauth
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/version: {{ .Chart.AppVersion }}
app.kubernetes.io/component: web
app.kubernetes.io/part-of: {{ .Release.Name }}
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: client_id
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /tailscale/operator/oauth
metadataPolicy: None
property: clientId
- secretKey: client_secret
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /tailscale/operator/oauth
metadataPolicy: None
property: clientSecret

View File

@@ -0,0 +1,22 @@
tailscale-operator:
oauth: {}
installCRDs: true
operatorConfig:
defaultTags:
- "tag:k8s-operator"
image:
repo: tailscale/k8s-operator
tag: v1.64.2
pullPolicy: Always
logging: info
hostname: tailscale-operator-cl01tl
nodeSelector:
kubernetes.io/os: linux
proxyConfig:
image:
repo: tailscale/tailscale
tag: v1.64.2
defaultTags: "tag:k8s"
firewallMode: auto
apiServerProxyConfig:
mode: "false"

View File

@@ -0,0 +1,11 @@
apiVersion: v2
name: traefik
version: 1.0.0
sources:
- https://github.com/traefik/traefik
- https://github.com/traefik/traefik-helm-chart
dependencies:
- name: traefik
version: 28.0.0
repository: https://traefik.github.io/charts
appVersion: v3.0.0

View File

@@ -0,0 +1,19 @@
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: traefik-certificate
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: {{ .Release.Name }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/version: {{ .Chart.AppVersion }}
app.kubernetes.io/component: web
app.kubernetes.io/part-of: {{ .Release.Name }}
spec:
secretName: traefik-secret-tls
dnsNames:
- "alexlebens.net"
- "*.alexlebens.net"
issuerRef:
name: letsencrypt-issuer
kind: ClusterIssuer

View File

@@ -0,0 +1,82 @@
traefik:
deployment:
kind: DaemonSet
ingressClass:
enabled: true
isDefaultClass: true
ingressRoute:
dashboard:
enabled: true
matchRule: (Host(`traefik-cl01tl.alexlebens.net`) && (PathPrefix(`/api/`) || PathPrefix(`/dashboard/`)))
entryPoints: ["websecure"]
providers:
kubernetesCRD:
allowCrossNamespace: true
allowEmptyServices: true
kubernetesIngress:
allowEmptyServices: true
publishedService:
enabled: true
metrics:
service:
enabled: true
globalArguments: []
ports:
web:
expose:
default: true
exposedPort: 80
redirectTo:
port: websecure
priority: 10
forwardedHeaders:
trustedIPs:
- 10.0.0.0/8
- 172.16.0.0/16
- 192.168.0.0/16
- fc00::/7
insecure: false
proxyProtocol:
trustedIPs:
- 10.0.0.0/8
- 172.16.0.0/16
- 192.168.0.0/16
- fc00::/7
insecure: false
websecure:
port: 8443
expose:
default: true
exposedPort: 443
forwardedHeaders:
trustedIPs:
- 10.0.0.0/8
- 172.16.0.0/16
- 192.168.0.0/16
- fc00::/7
insecure: false
proxyProtocol:
trustedIPs:
- 10.0.0.0/8
- 172.16.0.0/16
- 192.168.0.0/16
- fc00::/7
insecure: false
tls:
enabled: true
metrics:
expose:
default: false
tlsStore:
default:
defaultCertificate:
secretName: traefik-secret-tls
service:
enabled: true
type: LoadBalancer
annotations:
metallb.universe.tf/allow-shared-ip: "external"
externalIPs:
- 192.168.1.17
- 192.168.1.16
- 192.168.1.15

View File

@@ -0,0 +1,11 @@
apiVersion: v2
name: unpoller
version: 1.0.0
sources:
- https://github.com/unpoller/unpoller
- https://github.com/homeylab/helm-charts/tree/main/charts/unpoller
dependencies:
- name: unpoller
version: 2.1.0
repository: https://homeylab.github.io/helm-charts/
appVersion: "v2.10.0"

View File

@@ -0,0 +1,30 @@
apiVersion: external-secrets.io/v1beta1
kind: ExternalSecret
metadata:
name: unpoller-unifi-secret
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: {{ .Release.Name }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/version: {{ .Chart.AppVersion }}
app.kubernetes.io/component: web
app.kubernetes.io/part-of: {{ .Release.Name }}
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: UP_UNIFI_CONTROLLER_0_USER
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /unifi/auth
metadataPolicy: None
property: user
- secretKey: UP_UNIFI_CONTROLLER_0_PASS
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /unifi/auth
metadataPolicy: None
property: password

View File

@@ -0,0 +1,33 @@
unpoller:
replicaCount: 1
image:
name: unpoller/unpoller
tag: "v2.10.0"
updateStrategy:
type: Recreate
service:
type: ClusterIP
port: 9130
protocol: TCP
name: metrics
metrics:
enabled: true
serviceMonitor:
enabled: true
interval: 30s
scrapeTimeout: 10s
prometheusRule:
enabled: true
existingSecret: unpoller-unifi-secret
settings:
unifi:
config:
url: https://unifi.alexlebens.net/
save_sites: true
verify_ssl: false
prometheus:
namespace: unpoller
http_listen: "0.0.0.0:9130"
unpoller:
debug: false
quiet: false