Automated Manifest Update #2174

Merged
alexlebens merged 1 commits from auto/update-manifests into manifests 2025-12-02 02:23:25 +00:00
12 changed files with 32249 additions and 0 deletions

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,308 @@
---
# Source: headlamp/charts/headlamp/templates/serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: headlamp
namespace: headlamp
labels:
helm.sh/chart: headlamp-0.38.0
app.kubernetes.io/name: headlamp
app.kubernetes.io/instance: headlamp
app.kubernetes.io/version: "0.38.0"
app.kubernetes.io/managed-by: Helm
---
# Source: headlamp/templates/service-account.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: headlamp-admin
namespace: headlamp
labels:
app.kubernetes.io/name: headlamp-admin
app.kubernetes.io/instance: headlamp
app.kubernetes.io/part-of: headlamp
---
# Source: headlamp/charts/headlamp/templates/plugin-configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: headlamp-plugin-config
namespace: headlamp
labels:
helm.sh/chart: headlamp-0.38.0
app.kubernetes.io/name: headlamp
app.kubernetes.io/instance: headlamp
app.kubernetes.io/version: "0.38.0"
app.kubernetes.io/managed-by: Helm
data:
plugin.yml: |
plugins:
- name: cert-manager
source: https://artifacthub.io/packages/headlamp/headlamp-plugins/headlamp_cert-manager
version: 0.1.0
- name: trivy
source: https://artifacthub.io/packages/headlamp/headlamp-trivy/headlamp_trivy
version: 0.3.1
installOptions:
parallel: true
maxConcurrent: 2
---
# Source: headlamp/charts/headlamp/templates/clusterrolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: headlamp-admin
labels:
helm.sh/chart: headlamp-0.38.0
app.kubernetes.io/name: headlamp
app.kubernetes.io/instance: headlamp
app.kubernetes.io/version: "0.38.0"
app.kubernetes.io/managed-by: Helm
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: headlamp
namespace: headlamp
---
# Source: headlamp/templates/cluster-role-binding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: cluster-admin-oidc
namespace: headlamp
labels:
app.kubernetes.io/name: cluster-admin-oidc
app.kubernetes.io/instance: headlamp
app.kubernetes.io/part-of: headlamp
roleRef:
kind: ClusterRole
name: cluster-admin
apiGroup: rbac.authorization.k8s.io
subjects:
- kind: User
name: alexanderlebens@gmail.com
apiGroup: rbac.authorization.k8s.io
- kind: ServiceAccount
name: headlamp-admin
namespace: headlamp
---
# Source: headlamp/charts/headlamp/templates/service.yaml
apiVersion: v1
kind: Service
metadata:
name: headlamp
namespace: headlamp
labels:
helm.sh/chart: headlamp-0.38.0
app.kubernetes.io/name: headlamp
app.kubernetes.io/instance: headlamp
app.kubernetes.io/version: "0.38.0"
app.kubernetes.io/managed-by: Helm
spec:
type: ClusterIP
ports:
- port: 80
targetPort: http
protocol: TCP
name: http
selector:
app.kubernetes.io/name: headlamp
app.kubernetes.io/instance: headlamp
---
# Source: headlamp/charts/headlamp/templates/deployment.yaml
# This block of code is used to extract the values from the env.
# This is done to check if the values are non-empty and if they are, they are used in the deployment.yaml.
apiVersion: apps/v1
kind: Deployment
metadata:
name: headlamp
namespace: headlamp
labels:
helm.sh/chart: headlamp-0.38.0
app.kubernetes.io/name: headlamp
app.kubernetes.io/instance: headlamp
app.kubernetes.io/version: "0.38.0"
app.kubernetes.io/managed-by: Helm
spec:
replicas: 2
selector:
matchLabels:
app.kubernetes.io/name: headlamp
app.kubernetes.io/instance: headlamp
template:
metadata:
labels:
app.kubernetes.io/name: headlamp
app.kubernetes.io/instance: headlamp
spec:
serviceAccountName: headlamp
automountServiceAccountToken: true
securityContext:
{}
containers:
- name: headlamp
securityContext:
privileged: false
runAsGroup: 101
runAsNonRoot: true
runAsUser: 100
image: "ghcr.io/headlamp-k8s/headlamp:v0.38.0"
imagePullPolicy: IfNotPresent
# Check if externalSecret is enabled
envFrom:
- secretRef:
name: headlamp-oidc-secret
args:
- "-in-cluster"
- "-watch-plugins-changes"
- "-plugins-dir=/headlamp/plugins"
- "-oidc-client-id=$(OIDC_CLIENT_ID)"
- "-oidc-client-secret=$(OIDC_CLIENT_SECRET)"
- "-oidc-idp-issuer-url=$(OIDC_ISSUER_URL)"
- "-oidc-scopes=$(OIDC_SCOPES)"
ports:
- name: http
containerPort: 4466
protocol: TCP
livenessProbe:
httpGet:
path: "/"
port: http
readinessProbe:
httpGet:
path: "/"
port: http
resources:
requests:
cpu: 10m
memory: 128Mi
volumeMounts:
- name: plugins-dir
mountPath: /headlamp/plugins
- name: headlamp-plugin
image: node:lts-alpine
command: ["/bin/sh", "-c"]
args:
- |
if [ -f "/config/plugin.yml" ]; then
echo "Installing plugins from config..."
cat /config/plugin.yml
# Use a writable cache directory
export NPM_CONFIG_CACHE=/tmp/npm-cache
# Use a writable config directory
export NPM_CONFIG_USERCONFIG=/tmp/npm-userconfig
mkdir -p /tmp/npm-cache /tmp/npm-userconfig
npx --yes @headlamp-k8s/pluginctl@latest install --config /config/plugin.yml --folderName /headlamp/plugins --watch
fi
volumeMounts:
- name: plugins-dir
mountPath: /headlamp/plugins
- name: plugin-config
mountPath: /config
resources:
null
securityContext:
readOnlyRootFilesystem: false
runAsNonRoot: false
runAsUser: 0
volumes:
- name: plugins-dir
emptyDir: {}
- name: plugin-config
configMap:
name: headlamp-plugin-config
---
# Source: headlamp/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: headlamp-oidc-secret
namespace: headlamp
labels:
app.kubernetes.io/name: headlamp-oidc-secret
app.kubernetes.io/instance: headlamp
app.kubernetes.io/part-of: headlamp
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: OIDC_CLIENT_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /authentik/oidc/headlamp
metadataPolicy: None
property: client
- secretKey: OIDC_CLIENT_SECRET
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /authentik/oidc/headlamp
metadataPolicy: None
property: secret
- secretKey: OIDC_ISSUER_URL
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /authentik/oidc/headlamp
metadataPolicy: None
property: issuer
- secretKey: OIDC_SCOPES
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /authentik/oidc/headlamp
metadataPolicy: None
property: scopes
- secretKey: OIDC_VALIDATOR_ISSUER_URL
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /authentik/oidc/headlamp
metadataPolicy: None
property: validator-issuer-url
- secretKey: OIDC_VALIDATOR_CLIENT_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /authentik/oidc/headlamp
metadataPolicy: None
property: validator-client-id
---
# Source: headlamp/templates/http-route.yaml
apiVersion: gateway.networking.k8s.io/v1
kind: HTTPRoute
metadata:
name: https-route-headlamp
namespace: headlamp
labels:
app.kubernetes.io/name: https-route-headlamp
app.kubernetes.io/instance: headlamp
app.kubernetes.io/part-of: headlamp
spec:
parentRefs:
- group: gateway.networking.k8s.io
kind: Gateway
name: traefik-gateway
namespace: traefik
hostnames:
- headlamp.alexlebens.net
rules:
- matches:
- path:
type: PathPrefix
value: /
backendRefs:
- group: ''
kind: Service
name: headlamp
port: 80
weight: 100

View File

@@ -0,0 +1,945 @@
---
# Source: komodo/charts/komodo/templates/common.yaml
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: komodo-cache
labels:
app.kubernetes.io/instance: komodo
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: komodo
helm.sh/chart: komodo-4.4.0
annotations:
helm.sh/resource-policy: keep
namespace: komodo
spec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: "5Gi"
storageClassName: "ceph-block"
---
# Source: komodo/charts/komodo/templates/common.yaml
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: komodo-syncs
labels:
app.kubernetes.io/instance: komodo
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: komodo
helm.sh/chart: komodo-4.4.0
annotations:
helm.sh/resource-policy: keep
namespace: komodo
spec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: "5Gi"
storageClassName: "ceph-block"
---
# Source: komodo/charts/komodo/templates/common.yaml
apiVersion: v1
kind: Service
metadata:
name: komodo-ferretdb-2
labels:
app.kubernetes.io/instance: komodo
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: komodo
app.kubernetes.io/service: komodo-ferretdb-2
helm.sh/chart: komodo-4.4.0
namespace: komodo
spec:
type: ClusterIP
ports:
- port: 27017
targetPort: 27017
protocol: TCP
name: http
selector:
app.kubernetes.io/controller: ferretdb-2
app.kubernetes.io/instance: komodo
app.kubernetes.io/name: komodo
---
# Source: komodo/charts/komodo/templates/common.yaml
apiVersion: v1
kind: Service
metadata:
name: komodo-main
labels:
app.kubernetes.io/instance: komodo
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: komodo
app.kubernetes.io/service: komodo-main
helm.sh/chart: komodo-4.4.0
namespace: komodo
spec:
type: ClusterIP
ports:
- port: 80
targetPort: 9120
protocol: TCP
name: http
selector:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: komodo
app.kubernetes.io/name: komodo
---
# Source: komodo/templates/service.yaml
apiVersion: v1
kind: Service
metadata:
name: komodo-periphery-ps10rp
namespace: komodo
labels:
app.kubernetes.io/name: komodo-periphery-ps10rp
app.kubernetes.io/instance: komodo
app.kubernetes.io/part-of: komodo
annotations:
tailscale.com/tailnet-fqdn: komodo-periphery-ps10rp.boreal-beaufort.ts.net
spec:
externalName: placeholder
type: ExternalName
---
# Source: komodo/charts/komodo/templates/common.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: komodo-ferretdb-2
labels:
app.kubernetes.io/controller: ferretdb-2
app.kubernetes.io/instance: komodo
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: komodo
helm.sh/chart: komodo-4.4.0
namespace: komodo
spec:
revisionHistoryLimit: 3
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/controller: ferretdb-2
app.kubernetes.io/name: komodo
app.kubernetes.io/instance: komodo
template:
metadata:
labels:
app.kubernetes.io/controller: ferretdb-2
app.kubernetes.io/instance: komodo
app.kubernetes.io/name: komodo
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
containers:
- env:
- name: FERRETDB_POSTGRESQL_URL
valueFrom:
secretKeyRef:
key: uri
name: komodo-postgresql-17-fdb-cluster-app
image: ghcr.io/ferretdb/ferretdb:2.7.0
imagePullPolicy: IfNotPresent
name: main
resources:
requests:
cpu: 10m
memory: 128Mi
---
# Source: komodo/charts/komodo/templates/common.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: komodo-main
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: komodo
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: komodo
helm.sh/chart: komodo-4.4.0
namespace: komodo
spec:
revisionHistoryLimit: 3
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/controller: main
app.kubernetes.io/name: komodo
app.kubernetes.io/instance: komodo
template:
metadata:
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: komodo
app.kubernetes.io/name: komodo
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
containers:
- env:
- name: COMPOSE_LOGGING_DRIVER
value: local
- name: KOMODO_HOST
value: https://komodo.alexlebens.net
- name: KOMODO_TITLE
value: Komodo
- name: PASSKEY
valueFrom:
secretKeyRef:
key: passkey
name: komodo-secret
- name: KOMODO_MONITORING_INTERVAL
value: 15-sec
- name: KOMODO_RESOURCE_POLL_INTERVAL
value: 5-min
- name: KOMODO_PASSKEY
valueFrom:
secretKeyRef:
key: passkey
name: komodo-secret
- name: KOMODO_WEBHOOK_SECRET
valueFrom:
secretKeyRef:
key: webhook
name: komodo-secret
- name: KOMODO_JWT_SECRET
valueFrom:
secretKeyRef:
key: jwt
name: komodo-secret
- name: KOMODO_LOCAL_AUTH
value: "true"
- name: KOMODO_ENABLE_NEW_USERS
value: "true"
- name: KOMODO_DISABLE_NON_ADMIN_CREATE
value: "true"
- name: KOMODO_TRANSPARENT_MODE
value: "false"
- name: PERIPHERY_SSL_ENABLED
value: "false"
- name: DB_USERNAME
valueFrom:
secretKeyRef:
key: user
name: komodo-postgresql-17-fdb-cluster-app
- name: DB_PASSWORD
valueFrom:
secretKeyRef:
key: password
name: komodo-postgresql-17-fdb-cluster-app
- name: KOMODO_DATABASE_URI
value: mongodb://$(DB_USERNAME):$(DB_PASSWORD)@komodo-ferretdb-2.komodo:27017/komodo
- name: KOMODO_OIDC_ENABLED
value: "true"
- name: KOMODO_OIDC_PROVIDER
value: http://authentik-server.authentik/application/o/komodo/
- name: KOMODO_OIDC_REDIRECT_HOST
value: https://authentik.alexlebens.net
- name: KOMODO_OIDC_CLIENT_ID
valueFrom:
secretKeyRef:
key: oidc-client-id
name: komodo-secret
- name: KOMODO_OIDC_CLIENT_SECRET
valueFrom:
secretKeyRef:
key: oidc-client-secret
name: komodo-secret
- name: KOMODO_OIDC_USE_FULL_EMAIL
value: "true"
image: ghcr.io/moghtech/komodo-core:1.19.5
imagePullPolicy: IfNotPresent
name: main
resources:
requests:
cpu: 10m
memory: 128Mi
volumeMounts:
- mountPath: /repo-cache
name: cache
- mountPath: /syncs
name: syncs
volumes:
- name: cache
persistentVolumeClaim:
claimName: komodo-cache
- name: syncs
persistentVolumeClaim:
claimName: komodo-syncs
---
# Source: komodo/charts/postgresql-17-fdb-cluster/templates/cluster.yaml
apiVersion: postgresql.cnpg.io/v1
kind: Cluster
metadata:
name: komodo-postgresql-17-fdb-cluster
namespace: komodo
labels:
helm.sh/chart: postgresql-17-fdb-cluster-6.16.0
app.kubernetes.io/name: komodo-postgresql-17-fdb
app.kubernetes.io/instance: komodo
app.kubernetes.io/part-of: komodo
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
instances: 3
imageName: "ghcr.io/ferretdb/postgres-documentdb:17-0.106.0-ferretdb-2.5.0"
imagePullPolicy: IfNotPresent
postgresUID: 999
postgresGID: 999
plugins:
- name: barman-cloud.cloudnative-pg.io
enabled: true
isWALArchiver: false
parameters:
barmanObjectName: "komodo-postgresql-17-fdb-external-backup"
serverName: "komodo-postgresql-17-fdb-backup-2"
- name: barman-cloud.cloudnative-pg.io
enabled: true
isWALArchiver: true
parameters:
barmanObjectName: "komodo-postgresql-17-fdb-garage-local-backup"
serverName: "komodo-postgresql-17-fdb-backup-1"
externalClusters:
- name: recovery
plugin:
name: barman-cloud.cloudnative-pg.io
parameters:
barmanObjectName: "komodo-postgresql-17-fdb-recovery"
serverName: komodo-postgresql-17-fdb-backup-1
storage:
size: 10Gi
storageClass: local-path
walStorage:
size: 2Gi
storageClass: local-path
resources:
limits:
hugepages-2Mi: 256Mi
requests:
cpu: 100m
memory: 256Mi
affinity:
enablePodAntiAffinity: true
topologyKey: kubernetes.io/hostname
primaryUpdateMethod: switchover
primaryUpdateStrategy: unsupervised
logLevel: info
enableSuperuserAccess: true
enablePDB: true
postgresql:
shared_preload_libraries:
- pg_cron
- pg_documentdb_core
- pg_documentdb
pg_hba:
- host ferretDB postgres localhost trust
- host ferretDB ferret localhost trust
parameters:
cron.database_name: ferretDB
documentdb.enableBypassDocumentValidation: "true"
documentdb.enableCompact: "true"
documentdb.enableLetAndCollationForQueryMatch: "true"
documentdb.enableNowSystemVariable: "true"
documentdb.enableSchemaValidation: "true"
documentdb.enableSortbyIdPushDownToPrimaryKey: "true"
documentdb.enableUserCrud: "true"
documentdb.maxUserLimit: "100"
hot_standby_feedback: "on"
max_slot_wal_keep_size: 2000MB
shared_buffers: 128MB
monitoring:
enablePodMonitor: true
disableDefaultQueries: false
bootstrap:
initdb:
database: ferretDB
owner: ferret
postInitApplicationSQL:
- create extension if not exists pg_cron;
- create extension if not exists documentdb cascade;
- grant documentdb_admin_role to ferret;
---
# Source: komodo/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: komodo-secret
namespace: komodo
labels:
app.kubernetes.io/name: komodo-secret
app.kubernetes.io/instance: komodo
app.kubernetes.io/part-of: komodo
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: passkey
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/komodo/config
metadataPolicy: None
property: passkey
- secretKey: jwt
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/komodo/config
metadataPolicy: None
property: jwt
- secretKey: webhook
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/komodo/config
metadataPolicy: None
property: webhook
- secretKey: oidc-client-id
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /authentik/oidc/komodo
metadataPolicy: None
property: client
- secretKey: oidc-client-secret
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /authentik/oidc/komodo
metadataPolicy: None
property: secret
---
# Source: komodo/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: komodo-postgresql-17-fdb-cluster-backup-secret
namespace: komodo
labels:
app.kubernetes.io/name: komodo-postgresql-17-fdb-cluster-backup-secret
app.kubernetes.io/instance: komodo
app.kubernetes.io/part-of: komodo
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/postgres-backups
metadataPolicy: None
property: access
- secretKey: ACCESS_SECRET_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/postgres-backups
metadataPolicy: None
property: secret
---
# Source: komodo/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: komodo-postgresql-17-cluster-backup-secret-garage
namespace: komodo
labels:
app.kubernetes.io/name: komodo-postgresql-17-cluster-backup-secret-garage
app.kubernetes.io/instance: komodo
app.kubernetes.io/part-of: komodo
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/postgres-backups
metadataPolicy: None
property: ACCESS_KEY_ID
- secretKey: ACCESS_SECRET_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/postgres-backups
metadataPolicy: None
property: ACCESS_SECRET_KEY
- secretKey: ACCESS_REGION
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/postgres-backups
metadataPolicy: None
property: ACCESS_REGION
---
# Source: komodo/templates/http-route.yaml
apiVersion: gateway.networking.k8s.io/v1
kind: HTTPRoute
metadata:
name: https-route-komodo
namespace: komodo
labels:
app.kubernetes.io/name: https-route-komodo
app.kubernetes.io/instance: komodo
app.kubernetes.io/part-of: komodo
spec:
parentRefs:
- group: gateway.networking.k8s.io
kind: Gateway
name: traefik-gateway
namespace: traefik
hostnames:
- komodo.alexlebens.net
rules:
- matches:
- path:
type: PathPrefix
value: /
backendRefs:
- group: ''
kind: Service
name: komodo-main
port: 80
weight: 100
---
# Source: komodo/charts/postgresql-17-fdb-cluster/templates/object-store.yaml
apiVersion: barmancloud.cnpg.io/v1
kind: ObjectStore
metadata:
name: "komodo-postgresql-17-fdb-external-backup"
namespace: komodo
labels:
helm.sh/chart: postgresql-17-fdb-cluster-6.16.0
app.kubernetes.io/name: komodo-postgresql-17-fdb
app.kubernetes.io/instance: komodo
app.kubernetes.io/part-of: komodo
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
retentionPolicy: 30d
configuration:
destinationPath: s3://postgres-backups-ce540ddf106d186bbddca68a/cl01tl/komodo/komodo-postgresql-17-fdb-cluster
endpointURL: https://nyc3.digitaloceanspaces.com
s3Credentials:
accessKeyId:
name: komodo-postgresql-17-fdb-cluster-backup-secret
key: ACCESS_KEY_ID
secretAccessKey:
name: komodo-postgresql-17-fdb-cluster-backup-secret
key: ACCESS_SECRET_KEY
---
# Source: komodo/charts/postgresql-17-fdb-cluster/templates/object-store.yaml
apiVersion: barmancloud.cnpg.io/v1
kind: ObjectStore
metadata:
name: "komodo-postgresql-17-fdb-garage-local-backup"
namespace: komodo
labels:
helm.sh/chart: postgresql-17-fdb-cluster-6.16.0
app.kubernetes.io/name: komodo-postgresql-17-fdb
app.kubernetes.io/instance: komodo
app.kubernetes.io/part-of: komodo
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
retentionPolicy: 3d
configuration:
destinationPath: s3://postgres-backups/cl01tl/komodo/komodo-postgresql-17-cluster
endpointURL: http://garage-main.garage:3900
s3Credentials:
accessKeyId:
name: komodo-postgresql-17-cluster-backup-secret-garage
key: ACCESS_KEY_ID
secretAccessKey:
name: komodo-postgresql-17-cluster-backup-secret-garage
key: ACCESS_SECRET_KEY
region:
name: komodo-postgresql-17-cluster-backup-secret-garage
key: ACCESS_REGION
---
# Source: komodo/charts/postgresql-17-fdb-cluster/templates/object-store.yaml
apiVersion: barmancloud.cnpg.io/v1
kind: ObjectStore
metadata:
name: "komodo-postgresql-17-fdb-recovery"
namespace: komodo
labels:
helm.sh/chart: postgresql-17-fdb-cluster-6.16.0
app.kubernetes.io/name: komodo-postgresql-17-fdb
app.kubernetes.io/instance: komodo
app.kubernetes.io/part-of: komodo
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
configuration:
destinationPath: s3://postgres-backups/cl01tl/komodo/komodo-postgresql-17-fdb-cluster
endpointURL: http://garage-main.garage:3900
wal:
compression: snappy
maxParallel: 1
data:
compression: snappy
jobs: 1
s3Credentials:
accessKeyId:
name: komodo-postgresql-17-cluster-backup-secret-garage
key: ACCESS_KEY_ID
secretAccessKey:
name: komodo-postgresql-17-cluster-backup-secret-garage
key: ACCESS_SECRET_KEY
---
# Source: komodo/charts/postgresql-17-fdb-cluster/templates/prometheus-rule.yaml
apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:
name: komodo-postgresql-17-fdb-alert-rules
namespace: komodo
labels:
helm.sh/chart: postgresql-17-fdb-cluster-6.16.0
app.kubernetes.io/name: komodo-postgresql-17-fdb
app.kubernetes.io/instance: komodo
app.kubernetes.io/part-of: komodo
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
groups:
- name: cloudnative-pg/komodo-postgresql-17-fdb
rules:
- alert: CNPGClusterBackendsWaitingWarning
annotations:
summary: CNPG Cluster a backend is waiting for longer than 5 minutes.
description: |-
Pod {{ $labels.pod }}
has been waiting for longer than 5 minutes
expr: |
cnpg_backends_waiting_total > 300
for: 1m
labels:
severity: warning
namespace: komodo
cnpg_cluster: komodo-postgresql-17-fdb-cluster
- alert: CNPGClusterDatabaseDeadlockConflictsWarning
annotations:
summary: CNPG Cluster has over 10 deadlock conflicts.
description: |-
There are over 10 deadlock conflicts in
{{ $labels.pod }}
expr: |
cnpg_pg_stat_database_deadlocks > 10
for: 1m
labels:
severity: warning
namespace: komodo
cnpg_cluster: komodo-postgresql-17-fdb-cluster
- alert: CNPGClusterHACritical
annotations:
summary: CNPG Cluster has no standby replicas!
description: |-
CloudNativePG Cluster "{{`{{`}} $labels.job {{`}}`}}" has no ready standby replicas. Your cluster at a severe
risk of data loss and downtime if the primary instance fails.
The primary instance is still online and able to serve queries, although connections to the `-ro` endpoint
will fail. The `-r` endpoint os operating at reduced capacity and all traffic is being served by the main.
This can happen during a normal fail-over or automated minor version upgrades in a cluster with 2 or less
instances. The replaced instance may need some time to catch-up with the cluster primary instance.
This alarm will be always trigger if your cluster is configured to run with only 1 instance. In this
case you may want to silence it.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHACritical.md
expr: |
max by (job) (cnpg_pg_replication_streaming_replicas{namespace="komodo"} - cnpg_pg_replication_is_wal_receiver_up{namespace="komodo"}) < 1
for: 5m
labels:
severity: critical
namespace: komodo
cnpg_cluster: komodo-postgresql-17-fdb-cluster
- alert: CNPGClusterHAWarning
annotations:
summary: CNPG Cluster less than 2 standby replicas.
description: |-
CloudNativePG Cluster "{{`{{`}} $labels.job {{`}}`}}" has only {{`{{`}} $value {{`}}`}} standby replicas, putting
your cluster at risk if another instance fails. The cluster is still able to operate normally, although
the `-ro` and `-r` endpoints operate at reduced capacity.
This can happen during a normal fail-over or automated minor version upgrades. The replaced instance may
need some time to catch-up with the cluster primary instance.
This alarm will be constantly triggered if your cluster is configured to run with less than 3 instances.
In this case you may want to silence it.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHAWarning.md
expr: |
max by (job) (cnpg_pg_replication_streaming_replicas{namespace="komodo"} - cnpg_pg_replication_is_wal_receiver_up{namespace="komodo"}) < 2
for: 5m
labels:
severity: warning
namespace: komodo
cnpg_cluster: komodo-postgresql-17-fdb-cluster
- alert: CNPGClusterHighConnectionsCritical
annotations:
summary: CNPG Instance maximum number of connections critical!
description: |-
CloudNativePG Cluster "komodo/komodo-postgresql-17-fdb-cluster" instance {{`{{`}} $labels.pod {{`}}`}} is using {{`{{`}} $value {{`}}`}}% of
the maximum number of connections.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighConnectionsCritical.md
expr: |
sum by (pod) (cnpg_backends_total{namespace="komodo", pod=~"komodo-postgresql-17-fdb-cluster-([1-9][0-9]*)$"}) / max by (pod) (cnpg_pg_settings_setting{name="max_connections", namespace="komodo", pod=~"komodo-postgresql-17-fdb-cluster-([1-9][0-9]*)$"}) * 100 > 95
for: 5m
labels:
severity: critical
namespace: komodo
cnpg_cluster: komodo-postgresql-17-fdb-cluster
- alert: CNPGClusterHighConnectionsWarning
annotations:
summary: CNPG Instance is approaching the maximum number of connections.
description: |-
CloudNativePG Cluster "komodo/komodo-postgresql-17-fdb-cluster" instance {{`{{`}} $labels.pod {{`}}`}} is using {{`{{`}} $value {{`}}`}}% of
the maximum number of connections.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighConnectionsWarning.md
expr: |
sum by (pod) (cnpg_backends_total{namespace="komodo", pod=~"komodo-postgresql-17-fdb-cluster-([1-9][0-9]*)$"}) / max by (pod) (cnpg_pg_settings_setting{name="max_connections", namespace="komodo", pod=~"komodo-postgresql-17-fdb-cluster-([1-9][0-9]*)$"}) * 100 > 80
for: 5m
labels:
severity: warning
namespace: komodo
cnpg_cluster: komodo-postgresql-17-fdb-cluster
- alert: CNPGClusterHighReplicationLag
annotations:
summary: CNPG Cluster high replication lag
description: |-
CloudNativePG Cluster "komodo/komodo-postgresql-17-fdb-cluster" is experiencing a high replication lag of
{{`{{`}} $value {{`}}`}}ms.
High replication lag indicates network issues, busy instances, slow queries or suboptimal configuration.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighReplicationLag.md
expr: |
max(cnpg_pg_replication_lag{namespace="komodo",pod=~"komodo-postgresql-17-fdb-cluster-([1-9][0-9]*)$"}) * 1000 > 1000
for: 5m
labels:
severity: warning
namespace: komodo
cnpg_cluster: komodo-postgresql-17-fdb-cluster
- alert: CNPGClusterInstancesOnSameNode
annotations:
summary: CNPG Cluster instances are located on the same node.
description: |-
CloudNativePG Cluster "komodo/komodo-postgresql-17-fdb-cluster" has {{`{{`}} $value {{`}}`}}
instances on the same node {{`{{`}} $labels.node {{`}}`}}.
A failure or scheduled downtime of a single node will lead to a potential service disruption and/or data loss.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterInstancesOnSameNode.md
expr: |
count by (node) (kube_pod_info{namespace="komodo", pod=~"komodo-postgresql-17-fdb-cluster-([1-9][0-9]*)$"}) > 1
for: 5m
labels:
severity: warning
namespace: komodo
cnpg_cluster: komodo-postgresql-17-fdb-cluster
- alert: CNPGClusterLongRunningTransactionWarning
annotations:
summary: CNPG Cluster query is taking longer than 5 minutes.
description: |-
CloudNativePG Cluster Pod {{ $labels.pod }}
is taking more than 5 minutes (300 seconds) for a query.
expr: |-
cnpg_backends_max_tx_duration_seconds > 300
for: 1m
labels:
severity: warning
namespace: komodo
cnpg_cluster: komodo-postgresql-17-fdb-cluster
- alert: CNPGClusterLowDiskSpaceCritical
annotations:
summary: CNPG Instance is running out of disk space!
description: |-
CloudNativePG Cluster "komodo/komodo-postgresql-17-fdb-cluster" is running extremely low on disk space. Check attached PVCs!
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterLowDiskSpaceCritical.md
expr: |
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="komodo", persistentvolumeclaim=~"komodo-postgresql-17-fdb-cluster-([1-9][0-9]*)$"} / kubelet_volume_stats_capacity_bytes{namespace="komodo", persistentvolumeclaim=~"komodo-postgresql-17-fdb-cluster-([1-9][0-9]*)$"})) > 0.9 OR
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="komodo", persistentvolumeclaim=~"komodo-postgresql-17-fdb-cluster-([1-9][0-9]*)$-wal"} / kubelet_volume_stats_capacity_bytes{namespace="komodo", persistentvolumeclaim=~"komodo-postgresql-17-fdb-cluster-([1-9][0-9]*)$-wal"})) > 0.9 OR
max(sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_used_bytes{namespace="komodo", persistentvolumeclaim=~"komodo-postgresql-17-fdb-cluster-([1-9][0-9]*)$-tbs.*"})
/
sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_capacity_bytes{namespace="komodo", persistentvolumeclaim=~"komodo-postgresql-17-fdb-cluster-([1-9][0-9]*)$-tbs.*"})
*
on(namespace, persistentvolumeclaim) group_left(volume)
kube_pod_spec_volumes_persistentvolumeclaims_info{pod=~"komodo-postgresql-17-fdb-cluster-([1-9][0-9]*)$"}
) > 0.9
for: 5m
labels:
severity: critical
namespace: komodo
cnpg_cluster: komodo-postgresql-17-fdb-cluster
- alert: CNPGClusterLowDiskSpaceWarning
annotations:
summary: CNPG Instance is running out of disk space.
description: |-
CloudNativePG Cluster "komodo/komodo-postgresql-17-fdb-cluster" is running low on disk space. Check attached PVCs.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterLowDiskSpaceWarning.md
expr: |
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="komodo", persistentvolumeclaim=~"komodo-postgresql-17-fdb-cluster-([1-9][0-9]*)$"} / kubelet_volume_stats_capacity_bytes{namespace="komodo", persistentvolumeclaim=~"komodo-postgresql-17-fdb-cluster-([1-9][0-9]*)$"})) > 0.7 OR
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="komodo", persistentvolumeclaim=~"komodo-postgresql-17-fdb-cluster-([1-9][0-9]*)$-wal"} / kubelet_volume_stats_capacity_bytes{namespace="komodo", persistentvolumeclaim=~"komodo-postgresql-17-fdb-cluster-([1-9][0-9]*)$-wal"})) > 0.7 OR
max(sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_used_bytes{namespace="komodo", persistentvolumeclaim=~"komodo-postgresql-17-fdb-cluster-([1-9][0-9]*)$-tbs.*"})
/
sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_capacity_bytes{namespace="komodo", persistentvolumeclaim=~"komodo-postgresql-17-fdb-cluster-([1-9][0-9]*)$-tbs.*"})
*
on(namespace, persistentvolumeclaim) group_left(volume)
kube_pod_spec_volumes_persistentvolumeclaims_info{pod=~"komodo-postgresql-17-fdb-cluster-([1-9][0-9]*)$"}
) > 0.7
for: 5m
labels:
severity: warning
namespace: komodo
cnpg_cluster: komodo-postgresql-17-fdb-cluster
- alert: CNPGClusterOffline
annotations:
summary: CNPG Cluster has no running instances!
description: |-
CloudNativePG Cluster "komodo/komodo-postgresql-17-fdb-cluster" has no ready instances.
Having an offline cluster means your applications will not be able to access the database, leading to
potential service disruption and/or data loss.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterOffline.md
expr: |
(count(cnpg_collector_up{namespace="komodo",pod=~"komodo-postgresql-17-fdb-cluster-([1-9][0-9]*)$"}) OR on() vector(0)) == 0
for: 5m
labels:
severity: critical
namespace: komodo
cnpg_cluster: komodo-postgresql-17-fdb-cluster
- alert: CNPGClusterPGDatabaseXidAgeWarning
annotations:
summary: CNPG Cluster has a number of transactions from the frozen XID to the current one.
description: |-
Over 300,000,000 transactions from frozen xid
on pod {{ $labels.pod }}
expr: |
cnpg_pg_database_xid_age > 300000000
for: 1m
labels:
severity: warning
namespace: komodo
cnpg_cluster: komodo-postgresql-17-fdb-cluster
- alert: CNPGClusterPGReplicationWarning
annotations:
summary: CNPG Cluster standby is lagging behind the primary.
description: |-
Standby is lagging behind by over 300 seconds (5 minutes)
expr: |
cnpg_pg_replication_lag > 300
for: 1m
labels:
severity: warning
namespace: komodo
cnpg_cluster: komodo-postgresql-17-fdb-cluster
- alert: CNPGClusterReplicaFailingReplicationWarning
annotations:
summary: CNPG Cluster has a replica is failing to replicate.
description: |-
Replica {{ $labels.pod }}
is failing to replicate
expr: |
cnpg_pg_replication_in_recovery > cnpg_pg_replication_is_wal_receiver_up
for: 1m
labels:
severity: warning
namespace: komodo
cnpg_cluster: komodo-postgresql-17-fdb-cluster
- alert: CNPGClusterZoneSpreadWarning
annotations:
summary: CNPG Cluster instances in the same zone.
description: |-
CloudNativePG Cluster "komodo/komodo-postgresql-17-fdb-cluster" has instances in the same availability zone.
A disaster in one availability zone will lead to a potential service disruption and/or data loss.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterZoneSpreadWarning.md
expr: |
3 > count(count by (label_topology_kubernetes_io_zone) (kube_pod_info{namespace="komodo", pod=~"komodo-postgresql-17-fdb-cluster-([1-9][0-9]*)$"} * on(node,instance) group_left(label_topology_kubernetes_io_zone) kube_node_labels)) < 3
for: 5m
labels:
severity: warning
namespace: komodo
cnpg_cluster: komodo-postgresql-17-fdb-cluster
---
# Source: komodo/charts/postgresql-17-fdb-cluster/templates/scheduled-backup.yaml
apiVersion: postgresql.cnpg.io/v1
kind: ScheduledBackup
metadata:
name: "komodo-postgresql-17-fdb-daily-backup-scheduled-backup"
namespace: komodo
labels:
helm.sh/chart: postgresql-17-fdb-cluster-6.16.0
app.kubernetes.io/name: komodo-postgresql-17-fdb
app.kubernetes.io/instance: komodo
app.kubernetes.io/part-of: komodo
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
immediate: false
suspend: false
schedule: "0 0 0 * * *"
backupOwnerReference: self
cluster:
name: komodo-postgresql-17-fdb-cluster
method: plugin
pluginConfiguration:
name: barman-cloud.cloudnative-pg.io
parameters:
barmanObjectName: "komodo-postgresql-17-fdb-external-backup"
---
# Source: komodo/charts/postgresql-17-fdb-cluster/templates/scheduled-backup.yaml
apiVersion: postgresql.cnpg.io/v1
kind: ScheduledBackup
metadata:
name: "komodo-postgresql-17-fdb-live-backup-scheduled-backup"
namespace: komodo
labels:
helm.sh/chart: postgresql-17-fdb-cluster-6.16.0
app.kubernetes.io/name: komodo-postgresql-17-fdb
app.kubernetes.io/instance: komodo
app.kubernetes.io/part-of: komodo
app.kubernetes.io/version: "6.16.0"
app.kubernetes.io/managed-by: Helm
spec:
immediate: true
suspend: false
schedule: "0 0 0 * * *"
backupOwnerReference: self
cluster:
name: komodo-postgresql-17-fdb-cluster
method: plugin
pluginConfiguration:
name: barman-cloud.cloudnative-pg.io
parameters:
barmanObjectName: "komodo-postgresql-17-fdb-garage-local-backup"

View File

@@ -0,0 +1,228 @@
---
# Source: kronic/charts/kronic/templates/serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: kronic
labels:
helm.sh/chart: kronic-0.1.7
app.kubernetes.io/name: kronic
app.kubernetes.io/instance: kronic
app.kubernetes.io/version: "v0.1.4"
app.kubernetes.io/managed-by: Helm
---
# Source: kronic/charts/kronic/templates/rbac.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
helm.sh/chart: kronic-0.1.7
app.kubernetes.io/name: kronic
app.kubernetes.io/instance: kronic
app.kubernetes.io/version: "v0.1.4"
app.kubernetes.io/managed-by: Helm
name: kronic
rules:
- apiGroups:
- ""
resources:
- pods
- events
- pods/log
- namespaces
verbs:
- get
- list
- watch
- apiGroups:
- batch
resources:
- jobs
- cronjobs
- cronjobs/status
verbs:
- "*"
---
# Source: kronic/charts/kronic/templates/rbac.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
helm.sh/chart: kronic-0.1.7
app.kubernetes.io/name: kronic
app.kubernetes.io/instance: kronic
app.kubernetes.io/version: "v0.1.4"
app.kubernetes.io/managed-by: Helm
name: kronic
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kronic
subjects:
- kind: ServiceAccount
name: kronic
namespace: "kronic"
---
# Source: kronic/charts/kronic/templates/service.yaml
apiVersion: v1
kind: Service
metadata:
name: kronic
labels:
helm.sh/chart: kronic-0.1.7
app.kubernetes.io/name: kronic
app.kubernetes.io/instance: kronic
app.kubernetes.io/version: "v0.1.4"
app.kubernetes.io/managed-by: Helm
spec:
type: ClusterIP
ports:
- port: 80
targetPort: http
protocol: TCP
name: http
selector:
app.kubernetes.io/name: kronic
app.kubernetes.io/instance: kronic
---
# Source: kronic/charts/kronic/templates/deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: kronic
labels:
helm.sh/chart: kronic-0.1.7
app.kubernetes.io/name: kronic
app.kubernetes.io/instance: kronic
app.kubernetes.io/version: "v0.1.4"
app.kubernetes.io/managed-by: Helm
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: kronic
app.kubernetes.io/instance: kronic
template:
metadata:
labels:
app.kubernetes.io/name: kronic
app.kubernetes.io/instance: kronic
spec:
serviceAccountName: kronic
securityContext:
{}
containers:
- name: kronic
securityContext:
{}
image: "ghcr.io/mshade/kronic:v0.1.4"
imagePullPolicy: IfNotPresent
env:
- name: KRONIC_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: KRONIC_ADMIN_PASSWORD
valueFrom:
secretKeyRef:
name: kronic-config-secret
key: password
- name: KRONIC_ADMIN_USERNAME
value: "kronic"
- name: KRONIC_ALLOW_NAMESPACES
value: "gitea,vault,talos,libation,kubernetes-cloudflare-ddns"
- name: KRONIC_NAMESPACE_ONLY
value: ""
ports:
- name: http
containerPort: 8000
protocol: TCP
livenessProbe:
httpGet:
path: /healthz
port: http
readinessProbe:
httpGet:
path: /healthz
port: http
resources:
limits:
cpu: 1
memory: 1024Mi
requests:
cpu: 10m
memory: 256Mi
---
# Source: kronic/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: kronic-config-secret
namespace: kronic
labels:
app.kubernetes.io/name: kronic-config-secret
app.kubernetes.io/instance: kronic
app.kubernetes.io/part-of: kronic
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: password
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/kronic/auth
metadataPolicy: None
property: password
---
# Source: kronic/templates/http-route.yaml
apiVersion: gateway.networking.k8s.io/v1
kind: HTTPRoute
metadata:
name: https-route-kronic
namespace: kronic
labels:
app.kubernetes.io/name: https-route-kronic
app.kubernetes.io/instance: kronic
app.kubernetes.io/part-of: kronic
spec:
parentRefs:
- group: gateway.networking.k8s.io
kind: Gateway
name: traefik-gateway
namespace: traefik
hostnames:
- kronic.alexlebens.net
rules:
- matches:
- path:
type: PathPrefix
value: /
backendRefs:
- group: ''
kind: Service
name: kronic
port: 80
weight: 100
---
# Source: kronic/charts/kronic/templates/tests/test-connection.yaml
apiVersion: v1
kind: Pod
metadata:
name: "kronic-test-connection"
labels:
helm.sh/chart: kronic-0.1.7
app.kubernetes.io/name: kronic
app.kubernetes.io/instance: kronic
app.kubernetes.io/version: "v0.1.4"
app.kubernetes.io/managed-by: Helm
annotations:
"helm.sh/hook": test
spec:
containers:
- name: wget
image: busybox
command: ['wget']
args: ['kronic:80/healthz']
restartPolicy: Never

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,559 @@
---
# Source: s3-exporter/charts/s3-exporter/templates/common.yaml
apiVersion: v1
kind: Service
metadata:
name: s3-exporter-digital-ocean
labels:
app.kubernetes.io/instance: s3-exporter
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: s3-exporter
app.kubernetes.io/service: s3-exporter-digital-ocean
helm.sh/chart: s3-exporter-4.4.0
namespace: s3-exporter
spec:
type: ClusterIP
ports:
- port: 9655
targetPort: 9655
protocol: TCP
name: metrics
selector:
app.kubernetes.io/controller: digital-ocean
app.kubernetes.io/instance: s3-exporter
app.kubernetes.io/name: s3-exporter
---
# Source: s3-exporter/charts/s3-exporter/templates/common.yaml
apiVersion: v1
kind: Service
metadata:
name: s3-exporter-garage-local
labels:
app.kubernetes.io/instance: s3-exporter
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: s3-exporter
app.kubernetes.io/service: s3-exporter-garage-local
helm.sh/chart: s3-exporter-4.4.0
namespace: s3-exporter
spec:
type: ClusterIP
ports:
- port: 9655
targetPort: 9655
protocol: TCP
name: metrics
selector:
app.kubernetes.io/controller: garage-local
app.kubernetes.io/instance: s3-exporter
app.kubernetes.io/name: s3-exporter
---
# Source: s3-exporter/charts/s3-exporter/templates/common.yaml
apiVersion: v1
kind: Service
metadata:
name: s3-exporter-garage-remote
labels:
app.kubernetes.io/instance: s3-exporter
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: s3-exporter
app.kubernetes.io/service: s3-exporter-garage-remote
helm.sh/chart: s3-exporter-4.4.0
namespace: s3-exporter
spec:
type: ClusterIP
ports:
- port: 9655
targetPort: 9655
protocol: TCP
name: metrics
selector:
app.kubernetes.io/controller: garage-remote
app.kubernetes.io/instance: s3-exporter
app.kubernetes.io/name: s3-exporter
---
# Source: s3-exporter/charts/s3-exporter/templates/common.yaml
apiVersion: v1
kind: Service
metadata:
name: s3-exporter-ceph-directus
labels:
app.kubernetes.io/instance: s3-exporter
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: s3-exporter
app.kubernetes.io/service: s3-exporter-ceph-directus
helm.sh/chart: s3-exporter-4.4.0
namespace: s3-exporter
spec:
type: ClusterIP
ports:
- port: 9655
targetPort: 9655
protocol: TCP
name: metrics
selector:
app.kubernetes.io/controller: ceph-directus
app.kubernetes.io/instance: s3-exporter
app.kubernetes.io/name: s3-exporter
---
# Source: s3-exporter/charts/s3-exporter/templates/common.yaml
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: s3-exporter-ceph-directus
labels:
app.kubernetes.io/controller: ceph-directus
app.kubernetes.io/instance: s3-exporter
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: s3-exporter
helm.sh/chart: s3-exporter-4.4.0
namespace: s3-exporter
spec:
revisionHistoryLimit: 3
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/controller: ceph-directus
app.kubernetes.io/name: s3-exporter
app.kubernetes.io/instance: s3-exporter
template:
metadata:
labels:
app.kubernetes.io/controller: ceph-directus
app.kubernetes.io/instance: s3-exporter
app.kubernetes.io/name: s3-exporter
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
containers:
- env:
- name: S3_NAME
value: ceph-directus
- name: S3_ENDPOINT
valueFrom:
secretKeyRef:
key: BUCKET_HOST
name: s3-ceph-directus-secret
- name: S3_ACCESS_KEY
valueFrom:
secretKeyRef:
key: AWS_ACCESS_KEY_ID
name: s3-ceph-directus-secret
- name: S3_SECRET_KEY
valueFrom:
secretKeyRef:
key: AWS_SECRET_ACCESS_KEY
name: s3-ceph-directus-secret
- name: S3_REGION
value: us-east-1
- name: LOG_LEVEL
value: info
- name: S3_FORCE_PATH_STYLE
value: "true"
image: molu8bits/s3bucket_exporter:1.0.2
imagePullPolicy: IfNotPresent
name: main
resources:
requests:
cpu: 10m
memory: 64Mi
---
# Source: s3-exporter/charts/s3-exporter/templates/common.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: s3-exporter-digital-ocean
labels:
app.kubernetes.io/controller: digital-ocean
app.kubernetes.io/instance: s3-exporter
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: s3-exporter
helm.sh/chart: s3-exporter-4.4.0
namespace: s3-exporter
spec:
revisionHistoryLimit: 3
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/controller: digital-ocean
app.kubernetes.io/name: s3-exporter
app.kubernetes.io/instance: s3-exporter
template:
metadata:
labels:
app.kubernetes.io/controller: digital-ocean
app.kubernetes.io/instance: s3-exporter
app.kubernetes.io/name: s3-exporter
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
containers:
- env:
- name: S3_NAME
value: digital-ocean
- name: S3_ENDPOINT
value: https://nyc3.digitaloceanspaces.com
- name: S3_ACCESS_KEY
valueFrom:
secretKeyRef:
key: AWS_ACCESS_KEY_ID
name: s3-do-home-infra-secret
- name: S3_SECRET_KEY
valueFrom:
secretKeyRef:
key: AWS_SECRET_ACCESS_KEY
name: s3-do-home-infra-secret
- name: S3_REGION
valueFrom:
secretKeyRef:
key: AWS_REGION
name: s3-do-home-infra-secret
- name: LOG_LEVEL
value: info
- name: S3_FORCE_PATH_STYLE
value: "false"
image: molu8bits/s3bucket_exporter:1.0.2
imagePullPolicy: IfNotPresent
name: main
resources:
requests:
cpu: 10m
memory: 64Mi
---
# Source: s3-exporter/charts/s3-exporter/templates/common.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: s3-exporter-garage-local
labels:
app.kubernetes.io/controller: garage-local
app.kubernetes.io/instance: s3-exporter
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: s3-exporter
helm.sh/chart: s3-exporter-4.4.0
namespace: s3-exporter
spec:
revisionHistoryLimit: 3
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/controller: garage-local
app.kubernetes.io/name: s3-exporter
app.kubernetes.io/instance: s3-exporter
template:
metadata:
labels:
app.kubernetes.io/controller: garage-local
app.kubernetes.io/instance: s3-exporter
app.kubernetes.io/name: s3-exporter
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
containers:
- env:
- name: S3_NAME
value: garage-local
- name: S3_ENDPOINT
value: http://garage-main.garage:3900
- name: S3_ACCESS_KEY
valueFrom:
secretKeyRef:
key: AWS_ACCESS_KEY_ID
name: s3-garage-secret
- name: S3_SECRET_KEY
valueFrom:
secretKeyRef:
key: AWS_SECRET_ACCESS_KEY
name: s3-garage-secret
- name: S3_REGION
value: us-east-1
- name: LOG_LEVEL
value: debug
- name: S3_FORCE_PATH_STYLE
value: "true"
image: molu8bits/s3bucket_exporter:1.0.2
imagePullPolicy: IfNotPresent
name: main
resources:
requests:
cpu: 10m
memory: 64Mi
---
# Source: s3-exporter/charts/s3-exporter/templates/common.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: s3-exporter-garage-remote
labels:
app.kubernetes.io/controller: garage-remote
app.kubernetes.io/instance: s3-exporter
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: s3-exporter
helm.sh/chart: s3-exporter-4.4.0
namespace: s3-exporter
spec:
revisionHistoryLimit: 3
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/controller: garage-remote
app.kubernetes.io/name: s3-exporter
app.kubernetes.io/instance: s3-exporter
template:
metadata:
labels:
app.kubernetes.io/controller: garage-remote
app.kubernetes.io/instance: s3-exporter
app.kubernetes.io/name: s3-exporter
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
containers:
- env:
- name: S3_NAME
value: garage-remote
- name: S3_ENDPOINT
value: https://garage-ps10rp.boreal-beaufort.ts.net:3900
- name: S3_ACCESS_KEY
valueFrom:
secretKeyRef:
key: AWS_ACCESS_KEY_ID
name: s3-garage-secret
- name: S3_SECRET_KEY
valueFrom:
secretKeyRef:
key: AWS_SECRET_ACCESS_KEY
name: s3-garage-secret
- name: S3_REGION
value: us-east-1
- name: LOG_LEVEL
value: debug
- name: S3_FORCE_PATH_STYLE
value: "true"
image: molu8bits/s3bucket_exporter:1.0.2
imagePullPolicy: IfNotPresent
name: main
resources:
requests:
cpu: 10m
memory: 64Mi
---
# Source: s3-exporter/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: s3-do-home-infra-secret
namespace: s3-exporter
labels:
app.kubernetes.io/name: s3-do-home-infra-secret
app.kubernetes.io/instance: s3-exporter
app.kubernetes.io/part-of: s3-exporter
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: AWS_ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/all-access
metadataPolicy: None
property: AWS_ACCESS_KEY_ID
- secretKey: AWS_SECRET_ACCESS_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/all-access
metadataPolicy: None
property: AWS_SECRET_ACCESS_KEY
- secretKey: AWS_REGION
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/prometheus-exporter
metadataPolicy: None
property: AWS_REGION
---
# Source: s3-exporter/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: s3-ceph-directus-secret
namespace: s3-exporter
labels:
app.kubernetes.io/name: s3-ceph-directus-secret
app.kubernetes.io/instance: s3-exporter
app.kubernetes.io/part-of: s3-exporter
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: AWS_ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/directus/ceph
metadataPolicy: None
property: AWS_ACCESS_KEY_ID
- secretKey: AWS_SECRET_ACCESS_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/directus/ceph
metadataPolicy: None
property: AWS_SECRET_ACCESS_KEY
- secretKey: BUCKET_HOST
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/directus/ceph
metadataPolicy: None
property: BUCKET_HOST
---
# Source: s3-exporter/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: s3-garage-secret
namespace: s3-exporter
labels:
app.kubernetes.io/name: s3-garage-secret
app.kubernetes.io/instance: s3-exporter
app.kubernetes.io/part-of: s3-exporter
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: AWS_ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/s3-exporter
metadataPolicy: None
property: ACCESS_KEY_ID
- secretKey: AWS_SECRET_ACCESS_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/s3-exporter
metadataPolicy: None
property: ACCESS_SECRET_KEY
---
# Source: s3-exporter/templates/service-monitor.yaml
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: s3-exporter-digital-ocean
namespace: s3-exporter
labels:
app.kubernetes.io/name: s3-exporter-digital-ocean
app.kubernetes.io/instance: s3-exporter
app.kubernetes.io/part-of: s3-exporter
spec:
selector:
matchLabels:
app.kubernetes.io/name: s3-exporter
app.kubernetes.io/instance: s3-exporter
app.kubernetes.io/service: s3-exporter-digital-ocean
endpoints:
- port: metrics
interval: 5m
scrapeTimeout: 120s
path: /metrics
---
# Source: s3-exporter/templates/service-monitor.yaml
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: s3-exporter-ceph-directus
namespace: s3-exporter
labels:
app.kubernetes.io/name: s3-exporter-ceph-directus
app.kubernetes.io/instance: s3-exporter
app.kubernetes.io/part-of: s3-exporter
spec:
selector:
matchLabels:
app.kubernetes.io/name: s3-exporter
app.kubernetes.io/instance: s3-exporter
app.kubernetes.io/service: s3-exporter-ceph-directus
endpoints:
- port: metrics
interval: 5m
scrapeTimeout: 120s
path: /metrics
---
# Source: s3-exporter/templates/service-monitor.yaml
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: s3-exporter-garage-local
namespace: s3-exporter
labels:
app.kubernetes.io/name: s3-exporter-garage-local
app.kubernetes.io/instance: s3-exporter
app.kubernetes.io/part-of: s3-exporter
spec:
selector:
matchLabels:
app.kubernetes.io/name: s3-exporter
app.kubernetes.io/instance: s3-exporter
app.kubernetes.io/service: s3-exporter-garage-local
endpoints:
- port: metrics
interval: 5m
scrapeTimeout: 120s
path: /metrics
---
# Source: s3-exporter/templates/service-monitor.yaml
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: s3-exporter-garage-remote
namespace: s3-exporter
labels:
app.kubernetes.io/name: s3-exporter-garage-remote
app.kubernetes.io/instance: s3-exporter
app.kubernetes.io/part-of: s3-exporter
spec:
selector:
matchLabels:
app.kubernetes.io/name: s3-exporter
app.kubernetes.io/instance: s3-exporter
app.kubernetes.io/service: s3-exporter-garage-remote
endpoints:
- port: metrics
interval: 5m
scrapeTimeout: 120s
path: /metrics

View File

@@ -0,0 +1,185 @@
---
# Source: shelly-plug/charts/shelly-plug/templates/common.yaml
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: shelly-plug
labels:
app.kubernetes.io/instance: shelly-plug
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: shelly-plug
helm.sh/chart: shelly-plug-4.4.0
annotations:
helm.sh/resource-policy: keep
namespace: shelly-plug
spec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: "1Gi"
storageClassName: "ceph-block"
---
# Source: shelly-plug/charts/shelly-plug/templates/common.yaml
apiVersion: v1
kind: Service
metadata:
name: shelly-plug
labels:
app.kubernetes.io/instance: shelly-plug
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: shelly-plug
app.kubernetes.io/service: shelly-plug
helm.sh/chart: shelly-plug-4.4.0
namespace: shelly-plug
spec:
type: ClusterIP
ports:
- port: 80
targetPort: 80
protocol: TCP
name: metrics
selector:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: shelly-plug
app.kubernetes.io/name: shelly-plug
---
# Source: shelly-plug/charts/shelly-plug/templates/common.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: shelly-plug
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: shelly-plug
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: shelly-plug
helm.sh/chart: shelly-plug-4.4.0
namespace: shelly-plug
spec:
revisionHistoryLimit: 3
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/controller: main
app.kubernetes.io/name: shelly-plug
app.kubernetes.io/instance: shelly-plug
template:
metadata:
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: shelly-plug
app.kubernetes.io/name: shelly-plug
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
initContainers:
- command:
- /bin/sh
- -ec
- |
cd /var/www/html
if [ -d ".git" ]; then
echo "Git repository found. Pulling latest changes..."
git pull
else
echo "Not a git repository. Initializing ..."
git init
git remote add origin https://github.com/geerlingguy/shelly-plug-prometheus.git
git fetch origin
git checkout origin/master -ft
fi
image: alpine/git:latest
imagePullPolicy: IfNotPresent
name: init-fetch-repo
resources:
requests:
cpu: 10m
memory: 128Mi
securityContext:
runAsUser: 0
volumeMounts:
- mountPath: /var/www/html
name: script
containers:
- env:
- name: SHELLY_HOSTNAME
value: it05sp.alexlebens.net
- name: SHELLY_GENERATION
value: "2"
envFrom:
- secretRef:
name: shelly-plug-config-secret
image: php:8.4.15-apache-bookworm
imagePullPolicy: IfNotPresent
name: main
resources:
requests:
cpu: 10m
memory: 64Mi
volumeMounts:
- mountPath: /var/www/html
name: script
volumes:
- name: script
persistentVolumeClaim:
claimName: shelly-plug
---
# Source: shelly-plug/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: shelly-plug-config-secret
namespace: shelly-plug
labels:
app.kubernetes.io/name: shelly-plug-config-secret
app.kubernetes.io/instance: shelly-plug
app.kubernetes.io/part-of: shelly-plug
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: SHELLY_HTTP_USERNAME
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /shelly-plug/auth/it05sp
metadataPolicy: None
property: SHELLY_HTTP_USERNAME
- secretKey: SHELLY_HTTP_PASSWORD
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /shelly-plug/auth/it05sp
metadataPolicy: None
property: SHELLY_HTTP_PASSWORD
---
# Source: shelly-plug/templates/service-monitor.yaml
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: shelly-plug
namespace: shelly-plug
labels:
app.kubernetes.io/name: shelly-plug
app.kubernetes.io/instance: shelly-plug
app.kubernetes.io/part-of: shelly-plug
spec:
selector:
matchLabels:
app.kubernetes.io/name: shelly-plug
app.kubernetes.io/instance: shelly-plug
endpoints:
- port: metrics
interval: 30s
scrapeTimeout: 10s
path: /metrics

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,151 @@
---
# Source: unpoller/charts/unpoller/templates/common.yaml
apiVersion: v1
kind: Service
metadata:
name: unpoller
labels:
app.kubernetes.io/instance: unpoller
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: unpoller
app.kubernetes.io/service: unpoller
helm.sh/chart: unpoller-4.4.0
namespace: unpoller
spec:
type: ClusterIP
ports:
- port: 9130
targetPort: 9130
protocol: TCP
name: metrics
selector:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: unpoller
app.kubernetes.io/name: unpoller
---
# Source: unpoller/charts/unpoller/templates/common.yaml
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: unpoller
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: unpoller
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: unpoller
helm.sh/chart: unpoller-4.4.0
namespace: unpoller
spec:
revisionHistoryLimit: 3
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/controller: main
app.kubernetes.io/name: unpoller
app.kubernetes.io/instance: unpoller
template:
metadata:
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: unpoller
app.kubernetes.io/name: unpoller
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
containers:
- env:
- name: UP_UNIFI_CONTROLLER_0_SAVE_ALARMS
value: "false"
- name: UP_UNIFI_CONTROLLER_0_SAVE_ANOMALIES
value: "false"
- name: UP_UNIFI_CONTROLLER_0_SAVE_DPI
value: "false"
- name: UP_UNIFI_CONTROLLER_0_SAVE_EVENTS
value: "false"
- name: UP_UNIFI_CONTROLLER_0_SAVE_IDS
value: "false"
- name: UP_UNIFI_CONTROLLER_0_SAVE_SITES
value: "true"
- name: UP_UNIFI_CONTROLLER_0_URL
value: https://unifi.alexlebens.net/
- name: UP_UNIFI_CONTROLLER_0_VERIFY_SSL
value: "false"
- name: UP_INFLUXDB_DISABLE
value: "true"
- name: UP_PROMETHEUS_HTTP_LISTEN
value: 0.0.0.0:9130
- name: UP_PROMETHEUS_NAMESPACE
value: unpoller
- name: UP_POLLER_DEBUG
value: "false"
- name: UP_POLLER_QUIET
value: "false"
envFrom:
- secretRef:
name: unpoller-unifi-secret
image: ghcr.io/unpoller/unpoller:v2.15.4
imagePullPolicy: IfNotPresent
name: main
resources:
requests:
cpu: 10m
memory: 64Mi
---
# Source: unpoller/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: unpoller-unifi-secret
namespace: unpoller
labels:
app.kubernetes.io/name: unpoller-unifi-secret
app.kubernetes.io/instance: unpoller
app.kubernetes.io/part-of: unpoller
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: UP_UNIFI_CONTROLLER_0_USER
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /unifi/auth/cl01tl
metadataPolicy: None
property: user
- secretKey: UP_UNIFI_CONTROLLER_0_PASS
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /unifi/auth/cl01tl
metadataPolicy: None
property: password
---
# Source: unpoller/templates/service-monitor.yaml
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: unpoller
namespace: unpoller
labels:
app.kubernetes.io/name: unpoller
app.kubernetes.io/instance: unpoller
app.kubernetes.io/part-of: unpoller
spec:
selector:
matchLabels:
app.kubernetes.io/name: unpoller
app.kubernetes.io/instance: unpoller
endpoints:
- port: metrics
interval: 30s
scrapeTimeout: 10s
path: /metrics