Compare commits

1 Commits

Author SHA1 Message Date
28625c7388 Update php Docker tag to v8.5.0
All checks were successful
renovate/stability-days Updates have met minimum release age requirement
lint-test-helm / helm-lint (pull_request) Successful in 17s
2025-12-03 22:44:09 +00:00
563 changed files with 12945 additions and 8007 deletions

View File

@@ -0,0 +1,86 @@
name: lint-test-docker
on:
pull_request:
branches:
- main
paths:
- 'hosts/**'
jobs:
docker-lint:
runs-on: ubuntu-js
steps:
- name: Checkout
uses: actions/checkout@v6
with:
fetch-depth: 0
- name: Check Branch Exists
id: check-branch-exists
uses: GuillaumeFalourd/branch-exists@v1.1
with:
branch: "${{ github.base_ref }}"
- name: Branch Does Not Exist
if: steps.check-branch-exists.outputs.exists == 'false'
run: echo "Branch ${{ github.base_ref }} was not found, likely already merged"
- name: Set up Node.js
if: steps.check-branch-exists.outputs.exists == 'true'
uses: actions/setup-node@v6
with:
node-version: '24'
- name: Lint Docker Compose
if: steps.check-branch-exists.outputs.exists == 'true'
run: |
set -e # Exit immediately if a command exits with a non-zero status.
TARGET_BRANCH="origin/${{ github.base_ref }}"
echo ">> Target branch for diff is: $TARGET_BRANCH"
CHANGED_FILES=$(git diff --name-only "$TARGET_BRANCH" -- 'hosts/**')
echo ">> Found changed files:"
echo "$CHANGED_FILES"
# For each changed file, find its parent chart directory (the one with compose.yaml).
# Then, create a unique list of those directories.
CHANGED_COMPOSE=$(echo "$CHANGED_FILES" | while read -r file; do
dir=$(dirname "$file")
while [[ "$dir" != "." && ! -f "$dir/compose.yaml" ]]; do
dir=$(dirname "$dir")
done
if [[ "$dir" != "." ]]; then
echo "$dir"
fi
done | sort -u)
if [[ -z "$CHANGED_COMPOSE" ]]; then
echo ">> Could not determine changed compose files. This will happen if only files outside a compose file were changed."
exit 0
fi
echo ">> Running dclint on changed compose files:"
echo "$CHANGED_COMPOSE"
echo "$CHANGED_COMPOSE" | while read -r compose; do
echo ">> Linting $compose ..."
npx dclint $compose
done
- name: ntfy Failed
uses: niniyas/ntfy-action@master
if: failure()
with:
url: '${{ secrets.NTFY_URL }}'
topic: '${{ secrets.NTFY_TOPIC }}'
title: 'Test Failure - Infrastructure'
priority: 3
headers: '{"Authorization": "Bearer ${{ secrets.NTFY_CRED }}"}'
tags: action,failed
details: 'Docker linting on Pull Request for Infrastructure has failed!'
icon: 'https://cdn.jsdelivr.net/gh/selfhst/icons/png/gitea.png'
actions: '[{"action": "view", "label": "Open Gitea", "url": "https://gitea.alexlebens.dev/alexlebens/infrastructure/actions?workflow=lint-test-docker-pull.yaml", "clear": true}]'
image: true

View File

@@ -0,0 +1,74 @@
name: lint-test-docker
on:
push:
branches:
- main
paths:
- 'hosts/**'
jobs:
docker-lint:
runs-on: ubuntu-js
steps:
- name: Checkout
uses: actions/checkout@v6
with:
fetch-depth: 0
- name: Set up Node.js
uses: actions/setup-node@v6
with:
node-version: '24'
- name: Lint Docker Compose
run: |
set -e # Exit immediately if a command exits with a non-zero status.
TARGET_BRANCH="origin/main"
echo ">> Target branch for diff is: $TARGET_BRANCH"
CHANGED_FILES=$(git diff --name-only "$TARGET_BRANCH" -- 'hosts/**')
echo ">> Found changed files:"
echo "$CHANGED_FILES"
# For each changed file, find its parent chart directory (the one with compose.yaml).
# Then, create a unique list of those directories.
CHANGED_COMPOSE=$(echo "$CHANGED_FILES" | while read -r file; do
dir=$(dirname "$file")
while [[ "$dir" != "." && ! -f "$dir/compose.yaml" ]]; do
dir=$(dirname "$dir")
done
if [[ "$dir" != "." ]]; then
echo "$dir"
fi
done | sort -u)
if [[ -z "$CHANGED_COMPOSE" ]]; then
echo ">> Could not determine changed compose files. This will happen if only files outside a compose file were changed."
exit 0
fi
echo ">> Running dclint on changed compose files:"
echo "$CHANGED_COMPOSE"
echo "$CHANGED_COMPOSE" | while read -r compose; do
echo ">> Linting $compose ..."
npx dclint $compose
done
- name: ntfy Failed
uses: niniyas/ntfy-action@master
if: failure()
with:
url: '${{ secrets.NTFY_URL }}'
topic: '${{ secrets.NTFY_TOPIC }}'
title: 'Test Failure - Infrastructure'
priority: 4
headers: '{"Authorization": "Bearer ${{ secrets.NTFY_CRED }}"}'
tags: action,failed
details: 'Docker linting on Push for Infrastructure has failed!'
icon: 'https://cdn.jsdelivr.net/gh/selfhst/icons/png/gitea.png'
actions: '[{"action": "view", "label": "Open Gitea", "url": "https://gitea.alexlebens.dev/alexlebens/infrastructure/actions?workflow=lint-test-docker-push.yaml", "clear": true}]'
image: true

View File

@@ -1,141 +0,0 @@
name: lint-test-docker
on:
pull_request:
branches:
- main
paths:
- 'hosts/**'
push:
branches:
- main
paths:
- 'hosts/**'
env:
BASE_BRANCH: "origin/${{ gitea.base_ref }}"
jobs:
lint-docker-compose:
runs-on: ubuntu-js
steps:
- name: Checkout
uses: actions/checkout@v6
with:
fetch-depth: 0
- name: Check Branch Exists
id: check-branch-exists
if: github.event_name == 'pull_request'
uses: GuillaumeFalourd/branch-exists@v1.1
with:
branch: "${{ github.base_ref }}"
- name: Report Branch Exists
id: branch-exists
if: github.event_name == 'push' || steps.check-branch-exists.outputs.exists == 'true' && github.event_name == 'pull_request'
run: |
if [ ${{ github.event_name == 'push' }} ]; then
echo ">> Action is from a push event, will continue with linting"
else
echo ">> Branch ${{ gitea.base_ref }} exists, will continue with linting"
fi
echo "----"
echo "exists=true" >> $GITEA_OUTPUT
- name: Set up Node.js
if: steps.branch-exists.outputs.exists == 'true'
uses: actions/setup-node@v6
with:
node-version: '24'
- name: Check Directories for Changes
id: check-dir-changes
if: steps.branch-exists.outputs.exists == 'true'
run: |
CHANGED_COMPOSE=()
echo ">> Target branch for diff is: ${BASE_BRANCH}"
if [ "${{ github.event_name }}" == "pull_request" ]; then
echo ""
echo ">> Checking for changes in a pull request ..."
GIT_DIFF=$(git diff --name-only "${BASE_BRANCH}" | xargs -I {} dirname {} | sort -u)
else
echo ""
echo ">> Checking for changes from a push ..."
GIT_DIFF=$(git diff --name-only ${{ gitea.event.before }}..HEAD | xargs -I {} dirname {} | sort -u)
fi
if [ -n "${GIT_DIFF}" ]; then
echo ""
echo ">> Changes detected:"
echo "$GIT_DIFF"
for path in $GIT_DIFF; do
if echo "$path" | grep -q -E "hosts/[^/]+/[^/]+"; then
echo ""
echo ">> Adding path: $path"
CHANGED_COMPOSE+=$(echo "$path")
CHANGED_COMPOSE+=$(echo " ")
fi
done
else
echo ""
echo ">> No changes detected"
fi
if [ -n "${CHANGED_COMPOSE}" ]; then
echo ""
echo ">> Compose to Lint:"
echo "$(echo "${CHANGED_COMPOSE}" | sort -u)"
echo "----"
echo "changes-detected=true" >> $GITEA_OUTPUT
echo "compose-dir<<EOF" >> $GITEA_OUTPUT
echo "$(echo "${CHANGED_COMPOSE}" | sort -u)" >> $GITEA_OUTPUT
echo "EOF" >> $GITEA_OUTPUT
else
echo ""
echo ">> Did not find any docker compose files to lint"
echo "----"
echo "changes-detected=false" >> $GITEA_OUTPUT
fi
- name: Lint Docker Compose
if: steps.check-dir-changes.outputs.changes-detected == 'true'
env:
CHANGED_COMPOSE: ${{ steps.check-dir-changes.outputs.compose-dir }}
run: |
echo ">> Running dclint on changed compose files:"
echo "$CHANGED_COMPOSE"
for compose in $CHANGED_COMPOSE; do
echo ">> Linting $compose ..."
npx dclint $compose
done
- name: ntfy Failed
uses: niniyas/ntfy-action@master
if: failure()
with:
url: '${{ secrets.NTFY_URL }}'
topic: '${{ secrets.NTFY_TOPIC }}'
title: 'Test Failure - Infrastructure'
priority: 3
headers: '{"Authorization": "Bearer ${{ secrets.NTFY_CRED }}"}'
tags: action,failed
details: 'Docker linting on Pull Request for Infrastructure has failed!'
icon: 'https://cdn.jsdelivr.net/gh/selfhst/icons/png/gitea.png'
actions: '[{"action": "view", "label": "Open Gitea", "url": "https://gitea.alexlebens.dev/alexlebens/infrastructure/actions?workflow=lint-test-docker-pull.yaml", "clear": true}]'
image: true

View File

@@ -0,0 +1,94 @@
name: lint-test-helm
on:
pull_request:
branches:
- main
paths:
- 'clusters/**'
jobs:
helm-lint:
runs-on: ubuntu-js
steps:
- name: Checkout
uses: actions/checkout@v6
with:
fetch-depth: 0
- name: Check Branch Exists
id: check-branch-exists
uses: GuillaumeFalourd/branch-exists@v1.1
with:
branch: ${{ github.base_ref }}
- name: Branch Does Not Exist
if: steps.check-branch-exists.outputs.exists == 'false'
run: echo "Branch ${{ github.base_ref }} was not found, likely already merged"
- name: Set up Helm
if: steps.check-branch-exists.outputs.exists == 'true'
uses: azure/setup-helm@v4
with:
token: ${{ secrets.GITEA_TOKEN }}
version: v3.19.2
- name: Lint Helm Chart
if: steps.check-branch-exists.outputs.exists == 'true'
run: |
set -e # Exit immediately if a command exits with a non-zero status.
TARGET_BRANCH="origin/${{ github.base_ref }}"
echo ">> Target branch for diff is: $TARGET_BRANCH"
CHANGED_FILES=$(git diff --name-only "$TARGET_BRANCH" -- 'clusters/**')
echo ">> Found changed files:"
echo "$CHANGED_FILES"
# For each changed file, find its parent chart directory (the one with Chart.yaml).
# Then, create a unique list of those directories.
CHANGED_CHARTS=$(echo "$CHANGED_FILES" | while read -r file; do
dir=$(dirname "$file")
while [[ "$dir" != "." && ! -f "$dir/Chart.yaml" ]]; do
dir=$(dirname "$dir")
done
if [[ "$dir" != "." ]]; then
echo "$dir"
fi
done | sort -u)
if [[ -z "$CHANGED_CHARTS" ]]; then
echo ">> Could not determine changed charts. This could happen if only files outside a chart were changed."
exit 0
fi
echo ">> Running helm lint on changed charts:"
echo "$CHANGED_CHARTS"
echo "$CHANGED_CHARTS" | while read -r chart; do
helm dependency list --max-col-width 120 $chart 2> /dev/null \
| tail +2 | head -n -1 \
| awk '{ print "helm repo add " $1 " " $3 }' \
| while read cmd; do echo "$cmd" | sh; done || true
echo ">> Building dependency for "$chart" ..."
helm dependency build "$chart"
echo ">> Linting $chart..."
helm lint "$chart"
done
- name: ntfy Failed
uses: niniyas/ntfy-action@master
if: failure()
with:
url: '${{ secrets.NTFY_URL }}'
topic: '${{ secrets.NTFY_TOPIC }}'
title: 'Test Failure - Infrastructure'
priority: 3
headers: '{"Authorization": "Bearer ${{ secrets.NTFY_CRED }}"}'
tags: action,failed
details: 'Helm linting on Pull Request for Infrastructure has failed!'
icon: 'https://cdn.jsdelivr.net/gh/selfhst/icons/png/gitea.png'
actions: '[{"action": "view", "label": "Open Gitea", "url": "https://gitea.alexlebens.dev/alexlebens/infrastructure/actions?workflow=lint-test-helm-pull.yaml", "clear": true}]'
image: true

View File

@@ -0,0 +1,80 @@
name: lint-test-helm
on:
push:
branches:
- main
paths:
- 'clusters/**'
jobs:
helm-lint:
runs-on: ubuntu-js
steps:
- name: Checkout
uses: actions/checkout@v6
with:
fetch-depth: 0
- name: Set up Helm
uses: azure/setup-helm@v4
with:
token: ${{ secrets.GITEA_TOKEN }}
version: v3.19.2
- name: Lint Helm Chart
run: |
TARGET_BRANCH="origin/main"
echo ">> Target branch for diff is: $TARGET_BRANCH"
CHANGED_FILES=$(git diff --name-only "$TARGET_BRANCH" -- 'clusters/**')
echo ">> Found changed files:"
echo "$CHANGED_FILES"
# For each changed file, find its parent chart directory (the one with Chart.yaml).
# Then, create a unique list of those directories.
CHANGED_CHARTS=$(echo "$CHANGED_FILES" | while read -r file; do
dir=$(dirname "$file")
while [[ "$dir" != "." && ! -f "$dir/Chart.yaml" ]]; do
dir=$(dirname "$dir")
done
if [[ "$dir" != "." ]]; then
echo "$dir"
fi
done | sort -u)
if [[ -z "$CHANGED_CHARTS" ]]; then
echo ">> Could not determine changed charts. This could happen if only files outside a chart were changed."
exit 0
fi
echo ">> Running helm lint on changed charts:"
echo "$CHANGED_CHARTS"
echo "$CHANGED_CHARTS" | while read -r chart; do
helm dependency list --max-col-width 120 $chart 2> /dev/null \
| tail +2 | head -n -1 \
| awk '{ print "helm repo add " $1 " " $3 }' \
| while read cmd; do echo "$cmd" | sh; done || true
echo ">> Building dependency for "$chart" ..."
helm dependency build "$chart"
echo ">> Linting $chart..."
helm lint "$chart"
done
- name: ntfy Failed
uses: niniyas/ntfy-action@master
if: failure()
with:
url: '${{ secrets.NTFY_URL }}'
topic: '${{ secrets.NTFY_TOPIC }}'
title: 'Test Failure - Infrastructure'
priority: 4
headers: '{"Authorization": "Bearer ${{ secrets.NTFY_CRED }}"}'
tags: action,failed
details: 'Helm linting on Push for Infrastructure has failed!'
icon: 'https://cdn.jsdelivr.net/gh/selfhst/icons/png/gitea.png'
actions: '[{"action": "view", "label": "Open Gitea", "url": "https://gitea.alexlebens.dev/alexlebens/infrastructure/actions?workflow=lint-test-helm-push.yaml", "clear": true}]'
image: true

View File

@@ -1,188 +0,0 @@
name: lint-test-helm
on:
pull_request:
branches:
- main
paths:
- 'clusters/cl01tl/helm/**'
push:
branches:
- main
paths:
- 'clusters/cl01tl/helm/**'
env:
CLUSTER: cl01tl
BASE_BRANCH: "origin/${{ gitea.base_ref }}"
jobs:
lint-helm:
runs-on: ubuntu-js
steps:
- name: Checkout
uses: actions/checkout@v6
with:
fetch-depth: 0
- name: Check Branch Exists
id: check-branch-exists
if: github.event_name == 'pull_request'
uses: GuillaumeFalourd/branch-exists@v1.1
with:
branch: ${{ gitea.base_ref }}
- name: Report Branch Exists
id: branch-exists
if: github.event_name == 'push' || steps.check-branch-exists.outputs.exists == 'true' && github.event_name == 'pull_request'
run: |
if [ ${{ github.event_name == 'push' }} ]; then
echo ">> Action is from a push event, will continue with linting"
else
echo ">> Branch ${{ gitea.base_ref }} exists, will continue with linting"
fi
echo "----"
echo "exists=true" >> $GITEA_OUTPUT
- name: Set up Helm
if: steps.branch-exists.outputs.exists == 'true'
uses: azure/setup-helm@v4
with:
token: ${{ secrets.GITEA_TOKEN }}
version: v3.19.2
cache: true
- name: Check Directories for Changes
id: check-dir-changes
if: steps.branch-exists.outputs.exists == 'true'
run: |
CHANGED_CHARTS=()
echo ">> Target branch for diff is: ${BASE_BRANCH}"
if [ "${{ github.event_name }}" == "pull_request" ]; then
echo ""
echo ">> Checking for changes in a pull request ..."
GIT_DIFF=$(git diff --name-only "${BASE_BRANCH}" | xargs -I {} dirname {} | sort -u)
else
echo ""
echo ">> Checking for changes from a push ..."
GIT_DIFF=$(git diff --name-only ${{ gitea.event.before }}..HEAD | xargs -I {} dirname {} | sort -u)
fi
if [ -n "${GIT_DIFF}" ]; then
echo ""
echo ">> Changes detected:"
echo "$GIT_DIFF"
for path in $GIT_DIFF; do
if echo "$path" | grep -q -E "clusters/[^/]+/helm/[^/]+"; then
echo ""
echo ">> Adding path: $path"
CHANGED_CHARTS+=$(echo "$path" | awk -F '/' '{print $4}')
CHANGED_CHARTS+=$(echo "\n")
fi
done
else
echo ""
echo ">> No changes detected"
fi
if [ -n "${CHANGED_CHARTS}" ]; then
echo ""
echo ">> Chart to Lint:"
echo "$(echo "${CHANGED_CHARTS}" | sort -u)"
echo "----"
echo "changes-detected=true" >> $GITEA_OUTPUT
echo "chart-dir<<EOF" >> $GITEA_OUTPUT
echo "$(echo "${CHANGED_CHARTS}" | sort -u)" >> $GITEA_OUTPUT
echo "EOF" >> $GITEA_OUTPUT
else
echo ""
echo ">> Did not find any helm charts files to lint"
echo "----"
echo "changes-detected=false" >> $GITEA_OUTPUT
fi
- name: Add Repositories
if: steps.check-dir-changes.outputs.changes-detected == 'true'
env:
CHANGED_CHARTS: ${{ steps.check-dir-changes.outputs.chart-dir }}
run: |
echo ">> Adding repositories for chart dependencies ..."
for dir in ${CHANGED_CHARTS}; do
helm dependency list --max-col-width 120 clusters/${CLUSTER}/helm/$dir 2> /dev/null \
| tail +2 | head -n -1 \
| awk '{ print "helm repo add " $1 " " $3 }' \
| while read cmd; do
if [[ "$cmd" == "*oci://*" ]]; then
echo ">> Ignoring OCI repo"
else
echo ">> Command: $cmd"
echo "$cmd" | sh;
fi
done || true
done
if helm repo list | tail +2 | read -r; then
echo ""
echo ">> Update repository cache ..."
helm repo update
fi
echo "----"
- name: Lint Helm Chart
if: steps.check-dir-changes.outputs.changes-detected == 'true'
env:
CHANGED_CHARTS: ${{ steps.check-dir-changes.outputs.chart-dir }}
run: |
echo ">> Running linting on changed charts ..."
for dir in ${CHANGED_CHARTS}; do
chart_path=clusters/${CLUSTER}/helm/$dir
chart_name=$(basename "$chart_path")
if [ -f "$chart_path/Chart.yaml" ]; then
cd $chart_path
echo ""
echo ">> Building helm dependency ..."
helm dependency build --skip-refresh
echo ""
echo ">> Linting helm ..."
helm lint --namespace "$chart_name"
else
echo ""
echo ">> Directory $chart_path does not contain a Chart.yaml. Skipping ..."
echo ""
fi
done
- name: ntfy Failed
uses: niniyas/ntfy-action@master
if: failure()
with:
url: '${{ secrets.NTFY_URL }}'
topic: '${{ secrets.NTFY_TOPIC }}'
title: 'Test Failure - Infrastructure'
priority: 3
headers: '{"Authorization": "Bearer ${{ secrets.NTFY_CRED }}"}'
tags: action,failed
details: 'Helm linting on Pull Request for Infrastructure has failed!'
icon: 'https://cdn.jsdelivr.net/gh/selfhst/icons/png/gitea.png'
actions: '[{"action": "view", "label": "Open Gitea", "url": "https://gitea.alexlebens.dev/alexlebens/infrastructure/actions?workflow=lint-test-helm-pull.yaml", "clear": true}]'
image: true

View File

@@ -38,13 +38,6 @@ jobs:
with:
token: ${{ secrets.GITEA_TOKEN }}
version: v3.17.2 # Pending https://github.com/helm/helm/pull/30743
cache: true
- name: Configure Kubeconfig
uses: azure/k8s-set-context@v4
with:
method: kubeconfig
kubeconfig: ${{ secrets.KUBECONFIG }}
- name: Prepare Manifest Branch
id: prepare-manifest-branch
@@ -72,14 +65,13 @@ jobs:
RENDER_DIR=()
echo ">> Checking for changes from HEAD^..HEAD ..."
GIT_DIFF=$(git diff --name-only HEAD^..HEAD | xargs -I {} dirname {} | sort -u | grep -E "clusters/[^/]+/helm/[^/]+")
GIT_DIFF=$(git diff --name-only HEAD^..HEAD | xargs -I {} dirname {} | sort -u | grep "clusters/cl01tl/helm/")
if [ -n "${GIT_DIFF}" ]; then
echo ">> Changes detected:"
echo "$GIT_DIFF"
for path in $GIT_DIFF; do
RENDER_DIR+=$(echo "$path" | awk -F '/' '{print $4}')
RENDER_DIR+=$(echo " ")
done
else
@@ -89,13 +81,13 @@ jobs:
if [ -n "${RENDER_DIR}" ]; then
echo ">> Directories to Render:"
echo "$(echo "${RENDER_DIR}" | sort -u)"
echo "$(printf "%s\n" "${RENDER_DIR[@]}" | sort -u)"
echo "----"
echo "changes-detected=true" >> $GITEA_OUTPUT
echo "render-dir<<EOF" >> $GITEA_OUTPUT
echo "$(echo "${RENDER_DIR}" | sort -u)" >> $GITEA_OUTPUT
echo "$(printf "%s\n" "${RENDER_DIR[@]}" | sort -u)" >> $GITEA_OUTPUT
echo "EOF" >> $GITEA_OUTPUT
else
echo "changes-detected=false" >> $GITEA_OUTPUT
@@ -113,13 +105,7 @@ jobs:
helm dependency list --max-col-width 120 ${MAIN_DIR}/clusters/${CLUSTER}/helm/$dir 2> /dev/null \
| tail +2 | head -n -1 \
| awk '{ print "helm repo add " $1 " " $3 }' \
| while read cmd; do
if [[ "$cmd" == "*oci://*" ]]; then
echo ">> Ignoring OCI repo"
else
echo "$cmd" | sh;
fi
done || true
| while read cmd; do echo "$cmd" | sh; done || true
done
if helm repo list | tail +2 | read -r; then
@@ -161,23 +147,15 @@ jobs:
chart_path=${MAIN_DIR}/clusters/${CLUSTER}/helm/$dir
chart_name=$(basename "$chart_path")
echo ""
echo ""
echo ">> Rendering chart: $chart_name"
echo ">> Chart path $chart_path"
if [ -f "$chart_path/Chart.yaml" ]; then
OUTPUT_FOLDER="${MANIFEST_DIR}/clusters/${CLUSTER}/manifests/$chart_name/"
TEMPLATE=""
mkdir -p ${MANIFEST_DIR}/clusters/${CLUSTER}/manifests/$chart_name
OUTPUT_FILE="${MANIFEST_DIR}/clusters/${CLUSTER}/manifests/$chart_name/$chart_name.yaml"
cd $chart_path
echo ""
echo ">> Updating helm dependency ..."
helm dependency update --skip-refresh
echo ""
echo ">> Building helm dependency ..."
helm dependency build --skip-refresh
@@ -188,36 +166,36 @@ jobs:
echo ""
echo ">> Rendering templates ..."
case "$chart_name" in
"stack")
echo ""
echo ">> Special Rendering for stack into argocd namespace ..."
TEMPLATE=$(helm template $chart_name ./ --namespace argocd --include-crds --dry-run=server --api-versions "gateway.networking.k8s.io/v1/HTTPRoute")
echo ">> Special Rendering for stack ..."
helm template stack ./ --namespace argocd --include-crds > "$OUTPUT_FILE"
;;
"cilium" | "coredns" | "metrics-server" |"prometheus-operator-crds")
echo ""
echo ">> Special Rendering for $chart_name into kube-system namespace ..."
TEMPLATE=$(helm template $chart_name ./ --namespace kube-system --include-crds --dry-run=server --api-versions "gateway.networking.k8s.io/v1/HTTPRoute")
"cilium")
echo ">> Special Rendering for cilium ..."
helm template cilium ./ --namespace kube-system --include-crds > "$OUTPUT_FILE"
;;
"coredns")
echo ">> Special Rendering for coredns ..."
helm template coredns ./ --namespace kube-system --include-crds > "$OUTPUT_FILE"
;;
"metrics-server")
echo ">> Special Rendering for metrics-server ..."
helm template metrics-server ./ --namespace kube-system --include-crds > "$OUTPUT_FILE"
;;
"prometheus-operator-crds")
echo ">> Special Rendering for prometheus-operator-crds ..."
helm template prometheus-operator-crds ./ --namespace kube-system --include-crds > "$OUTPUT_FILE"
;;
*)
echo ""
echo ">> Standard Rendering for $chart_name ..."
TEMPLATE=$(helm template "$chart_name" ./ --namespace "$chart_name" --include-crds --dry-run=server --api-versions "gateway.networking.k8s.io/v1/HTTPRoute")
helm template "$chart_name" ./ --namespace "$chart_name" --include-crds > "$OUTPUT_FILE"
;;
esac
echo ""
echo ">> Formating rendered template ..."
echo "$TEMPLATE" | yq '... comments=""' | yq 'select(. != null)' | yq -s '"'"$OUTPUT_FOLDER"'" + .kind + "-" + .metadata.name + ".yaml"'
# Strip comments again to ensure formatting correctness
for file in "$OUTPUT_FOLDER"/*; do
yq -i '... comments=""' $file
done
echo ""
echo ">> Manifests for $chart_name rendered to $OUTPUT_FOLDER"
ls $OUTPUT_FOLDER
echo ">> Manifests for $chart_name rendered to $OUTPUT_FILE"
echo ""
else
echo ""
@@ -234,16 +212,13 @@ jobs:
run: |
cd ${MANIFEST_DIR}
GIT_CHANGES=$(git status --porcelain)
if [ -n "$GIT_CHANGES" ]; then
if git status --porcelain | grep -q .; then
echo ">> Changes detected"
git status --porcelain
echo "changes-detected=true" >> $GITEA_OUTPUT
else
echo ">> No changes detected, skipping PR creation"
exit 0
fi
echo "----"
@@ -283,9 +258,9 @@ jobs:
PAYLOAD=$( jq -n \
--arg head "${BRANCH_NAME}" \
--arg base "${BASE_BRANCH}" \
--arg title "Automated Manifest Update - Automerge" \
--arg title "Automated Manifest Update" \
--arg body "This PR contains newly rendered Kubernetes manifests automatically generated by the CI workflow. This is expected to be automerged." \
'{head: $head, base: $base, title: $title, body: $body}' )
'{head: $head, base: $base, title: $title, body: $body'} )
echo ">> Creating PR from branch ${BRANCH_NAME} into ${BASE_BRANCH}"
echo ">> With Endpoint of:"
@@ -317,15 +292,10 @@ jobs:
if [ "$HTTP_STATUS" == "201" ]; then
echo ">> Pull Request created successfully!"
PR_URL=$(cat response_body.json | jq -r .html_url)
echo ">> Pull Request URL: $PR_URL"
echo "pull-request-url=${PR_URL}" >> $GITEA_OUTPUT
PR_NUMBER=$(cat response_body.json | jq -r .number)
echo ">> Pull Request Number: $PR_NUMBER"
echo "pull-request-number=${PR_NUMBER}" >> $GITEA_OUTPUT
PR_ID=$(cat response_body.json | jq -r .id)
echo "pull-request-id=${PR_ID}" >> $GITEA_OUTPUT
echo "pull-request-operation=created" >> $GITEA_OUTPUT
elif [ "$HTTP_STATUS" == "422" ]; then
@@ -348,17 +318,18 @@ jobs:
GITEA_TOKEN: ${{ secrets.BOT_TOKEN }}
GITEA_URL: ${{ secrets.REPO_URL }}
BRANCH_NAME: ${{ steps.prepare-manifest-branch.outputs.BRANCH_NAME }}
PR_NUMBER: ${{ steps.create-pull-request.outputs.pull-request-number }}
PR_ID: ${{ steps.prepare-manifest-branch.outputs.pull-request-id }}
run: |
cd ${MANIFEST_DIR}
API_ENDPOINT="${GITEA_URL}/api/v1/repos/${{ gitea.repository }}/pulls/${PR_NUMBER}/merge"
API_ENDPOINT="${GITEA_URL}/api/v1/repos/${{ gitea.repository }}/pulls/${PR_ID}/merge"
PAYLOAD=$( jq -n \
--arg Do "merge" \
'{Do: $Do}' )
--arg delete_branch_after_merge "true" \
'{Do: $Do, delete_branch_after_merge: $delete_branch_after_merge'} )
echo ">> Merging PR with ID: ${PR_NUMBER}"
echo ">> Merging PR with ID: ${PR_ID}"
echo ">> With Endpoint of:"
echo "$API_ENDPOINT"
echo ">> With Payload of:"

View File

@@ -1,9 +1,6 @@
name: render-manifests-dispatch
on:
schedule:
- cron: '0 3 * * *'
workflow_dispatch:
env:
@@ -35,13 +32,6 @@ jobs:
with:
token: ${{ secrets.GITEA_TOKEN }}
version: v3.17.2 # Pending https://github.com/helm/helm/pull/30743
cache: true
- name: Configure Kubeconfig
uses: azure/k8s-set-context@v4
with:
method: kubeconfig
kubeconfig: ${{ secrets.KUBECONFIG }}
- name: Prepare Manifest Branch
run: |
@@ -77,13 +67,13 @@ jobs:
if [ -n "${RENDER_DIR}" ]; then
echo ">> Directories to Render:"
echo "$(echo "${RENDER_DIR}" | sort -u)"
echo "$(printf "%s\n" "${RENDER_DIR[@]}" | sort -u)"
echo "----"
echo "changes-detected=true" >> $GITEA_OUTPUT
echo "render-dir<<EOF" >> $GITEA_OUTPUT
echo "$(echo "${RENDER_DIR}" | sort -u)" >> $GITEA_OUTPUT
echo "$(printf "%s\n" "${RENDER_DIR[@]}" | sort -u)" >> $GITEA_OUTPUT
echo "EOF" >> $GITEA_OUTPUT
else
echo "changes-detected=false" >> $GITEA_OUTPUT
@@ -101,13 +91,7 @@ jobs:
helm dependency list --max-col-width 120 ${MAIN_DIR}/clusters/${CLUSTER}/helm/$dir 2> /dev/null \
| tail +2 | head -n -1 \
| awk '{ print "helm repo add " $1 " " $3 }' \
| while read cmd; do
if [[ "$cmd" == "*oci://*" ]]; then
echo ">> Ignoring OCI repo"
else
echo "$cmd" | sh;
fi
done || true
| while read cmd; do echo "$cmd" | sh; done || true
done
if helm repo list | tail +2 | read -r; then
@@ -117,6 +101,24 @@ jobs:
echo "----"
- name: Remove Changed Manifest Files
if: steps.check-dir-changes.outputs.changes-detected == 'true'
env:
RENDER_DIR: ${{ steps.check-dir-changes.outputs.render-dir }}
run: |
cd ${MANIFEST_DIR}
echo ">> Remove manfiest files and rebuild from source ..."
for dir in ${RENDER_DIR}; do
chart_path=${MANIFEST_DIR}/clusters/${CLUSTER}/manifests/$dir
echo "$chart_path"
rm -rf $chart_path/*
done
echo "----"
- name: Render Helm Manifests
id: render-manifests
if: steps.check-dir-changes.outputs.changes-detected == 'true'
@@ -131,23 +133,15 @@ jobs:
chart_path=${MAIN_DIR}/clusters/${CLUSTER}/helm/$dir
chart_name=$(basename "$chart_path")
echo ""
echo ""
echo ">> Rendering chart: $chart_name"
echo ">> Chart path $chart_path"
if [ -f "$chart_path/Chart.yaml" ]; then
OUTPUT_FOLDER="${MANIFEST_DIR}/clusters/${CLUSTER}/manifests/$chart_name/"
TEMPLATE=""
mkdir -p ${MANIFEST_DIR}/clusters/${CLUSTER}/manifests/$chart_name
OUTPUT_FILE="${MANIFEST_DIR}/clusters/${CLUSTER}/manifests/$chart_name/$chart_name.yaml"
cd $chart_path
echo ""
echo ">> Updating helm dependency ..."
helm dependency update --skip-refresh
echo ""
echo ">> Building helm dependency ..."
helm dependency build --skip-refresh
@@ -158,36 +152,36 @@ jobs:
echo ""
echo ">> Rendering templates ..."
case "$chart_name" in
"stack")
echo ""
echo ">> Special Rendering for stack into argocd namespace ..."
TEMPLATE=$(helm template $chart_name ./ --namespace argocd --include-crds --dry-run=server --api-versions "gateway.networking.k8s.io/v1/HTTPRoute")
echo ">> Special Rendering for stack ..."
helm template stack ./ --namespace argocd --include-crds > "$OUTPUT_FILE"
;;
"cilium" | "coredns" | "metrics-server" |"prometheus-operator-crds")
echo ""
echo ">> Special Rendering for $chart_name into kube-system namespace ..."
TEMPLATE=$(helm template $chart_name ./ --namespace kube-system --include-crds --dry-run=server --api-versions "gateway.networking.k8s.io/v1/HTTPRoute")
"cilium")
echo ">> Special Rendering for cilium ..."
helm template cilium ./ --namespace kube-system --include-crds > "$OUTPUT_FILE"
;;
"coredns")
echo ">> Special Rendering for coredns ..."
helm template coredns ./ --namespace kube-system --include-crds > "$OUTPUT_FILE"
;;
"metrics-server")
echo ">> Special Rendering for metrics-server ..."
helm template metrics-server ./ --namespace kube-system --include-crds > "$OUTPUT_FILE"
;;
"prometheus-operator-crds")
echo ">> Special Rendering for prometheus-operator-crds ..."
helm template prometheus-operator-crds ./ --namespace kube-system --include-crds > "$OUTPUT_FILE"
;;
*)
echo ""
echo ">> Standard Rendering for $chart_name ..."
TEMPLATE=$(helm template "$chart_name" ./ --namespace "$chart_name" --include-crds --dry-run=server --api-versions "gateway.networking.k8s.io/v1/HTTPRoute")
helm template "$chart_name" ./ --namespace "$chart_name" --include-crds > "$OUTPUT_FILE"
;;
esac
echo ""
echo ">> Formating rendered template ..."
echo "$TEMPLATE" | yq '... comments=""' | yq 'select(. != null)' | yq -s '"'"$OUTPUT_FOLDER"'" + .kind + "-" + .metadata.name + ".yaml"'
# Strip comments again to ensure formatting correctness
for file in "$OUTPUT_FOLDER"/*; do
yq -i '... comments=""' $file
done
echo ""
echo ">> Manifests for $chart_name rendered to $OUTPUT_FOLDER"
ls $OUTPUT_FOLDER
echo ">> Manifests for $chart_name rendered to $OUTPUT_FILE"
echo ""
else
echo ""
@@ -204,16 +198,13 @@ jobs:
run: |
cd ${MANIFEST_DIR}
GIT_CHANGES=$(git status --porcelain)
if [ -n "$GIT_CHANGES" ]; then
if git status --porcelain | grep -q .; then
echo ">> Changes detected"
git status --porcelain
echo "changes-detected=true" >> $GITEA_OUTPUT
else
echo ">> No changes detected, skipping PR creation"
exit 0
fi
echo "----"
@@ -309,7 +300,7 @@ jobs:
--arg assignee "${ASSIGNEE}" \
--arg title "Automated Manifest Update" \
--arg body "This PR contains newly rendered Kubernetes manifests automatically generated by the CI workflow." \
'{head: $head, base: $base, assignee: $assignee, title: $title, body: $body}' )
'{head: $head, base: $base, assignee: $assignee, title: $title, body: $body'} )
echo ">> Creating PR from branch ${HEAD_BRANCH} into ${BASE_BRANCH}"
echo ">> With Endpoint of:"

View File

@@ -39,13 +39,6 @@ jobs:
with:
token: ${{ secrets.GITEA_TOKEN }}
version: v3.17.2 # Pending https://github.com/helm/helm/pull/30743
cache: true
- name: Configure Kubeconfig
uses: azure/k8s-set-context@v4
with:
method: kubeconfig
kubeconfig: ${{ secrets.KUBECONFIG }}
- name: Prepare Manifest Branch
run: |
@@ -76,15 +69,14 @@ jobs:
RENDER_DIR=()
echo ">> Checking for changes from HEAD^..HEAD ..."
GIT_DIFF=$(git diff --name-only HEAD^..HEAD | xargs -I {} dirname {} | sort -u | grep -E "clusters/[^/]+/helm/[^/]+")
echo ">> Checking for changes ..."
GIT_DIFF=$(git diff --name-only ${{ gitea.event.before }}..HEAD | xargs -I {} dirname {} | sort -u | grep "clusters/cl01tl/helm/")
if [ -n "${GIT_DIFF}" ]; then
echo ">> Changes detected:"
echo "$GIT_DIFF"
for path in $GIT_DIFF; do
RENDER_DIR+=$(echo "$path" | awk -F '/' '{print $4}')
RENDER_DIR+=$(echo " ")
done
else
@@ -94,13 +86,13 @@ jobs:
if [ -n "${RENDER_DIR}" ]; then
echo ">> Directories to Render:"
echo "$(echo "${RENDER_DIR}" | sort -u)"
echo "$(printf "%s\n" "${RENDER_DIR[@]}" | sort -u)"
echo "----"
echo "changes-detected=true" >> $GITEA_OUTPUT
echo "render-dir<<EOF" >> $GITEA_OUTPUT
echo "$(echo "${RENDER_DIR}" | sort -u)" >> $GITEA_OUTPUT
echo "$(printf "%s\n" "${RENDER_DIR[@]}" | sort -u)" >> $GITEA_OUTPUT
echo "EOF" >> $GITEA_OUTPUT
else
echo "changes-detected=false" >> $GITEA_OUTPUT
@@ -118,13 +110,7 @@ jobs:
helm dependency list --max-col-width 120 ${MAIN_DIR}/clusters/${CLUSTER}/helm/$dir 2> /dev/null \
| tail +2 | head -n -1 \
| awk '{ print "helm repo add " $1 " " $3 }' \
| while read cmd; do
if [[ "$cmd" == "*oci://*" ]]; then
echo ">> Ignoring OCI repo"
else
echo "$cmd" | sh;
fi
done || true
| while read cmd; do echo "$cmd" | sh; done || true
done
if helm repo list | tail +2 | read -r; then
@@ -166,23 +152,15 @@ jobs:
chart_path=${MAIN_DIR}/clusters/${CLUSTER}/helm/$dir
chart_name=$(basename "$chart_path")
echo ""
echo ""
echo ">> Rendering chart: $chart_name"
echo ">> Chart path $chart_path"
if [ -f "$chart_path/Chart.yaml" ]; then
OUTPUT_FOLDER="${MANIFEST_DIR}/clusters/${CLUSTER}/manifests/$chart_name/"
TEMPLATE=""
mkdir -p ${MANIFEST_DIR}/clusters/${CLUSTER}/manifests/$chart_name
OUTPUT_FILE="${MANIFEST_DIR}/clusters/${CLUSTER}/manifests/$chart_name/$chart_name.yaml"
cd $chart_path
echo ""
echo ">> Updating helm dependency ..."
helm dependency update --skip-refresh
echo ""
echo ">> Building helm dependency ..."
helm dependency build --skip-refresh
@@ -193,36 +171,36 @@ jobs:
echo ""
echo ">> Rendering templates ..."
case "$chart_name" in
"stack")
echo ""
echo ">> Special Rendering for stack into argocd namespace ..."
TEMPLATE=$(helm template $chart_name ./ --namespace argocd --include-crds --dry-run=server --api-versions "gateway.networking.k8s.io/v1/HTTPRoute")
echo ">> Special Rendering for stack ..."
helm template stack ./ --namespace argocd --include-crds > "$OUTPUT_FILE"
;;
"cilium" | "coredns" | "metrics-server" |"prometheus-operator-crds")
echo ""
echo ">> Special Rendering for $chart_name into kube-system namespace ..."
TEMPLATE=$(helm template $chart_name ./ --namespace kube-system --include-crds --dry-run=server --api-versions "gateway.networking.k8s.io/v1/HTTPRoute")
"cilium")
echo ">> Special Rendering for cilium ..."
helm template cilium ./ --namespace kube-system --include-crds > "$OUTPUT_FILE"
;;
"coredns")
echo ">> Special Rendering for coredns ..."
helm template coredns ./ --namespace kube-system --include-crds > "$OUTPUT_FILE"
;;
"metrics-server")
echo ">> Special Rendering for metrics-server ..."
helm template metrics-server ./ --namespace kube-system --include-crds > "$OUTPUT_FILE"
;;
"prometheus-operator-crds")
echo ">> Special Rendering for prometheus-operator-crds ..."
helm template prometheus-operator-crds ./ --namespace kube-system --include-crds > "$OUTPUT_FILE"
;;
*)
echo ""
echo ">> Standard Rendering for $chart_name ..."
TEMPLATE=$(helm template "$chart_name" ./ --namespace "$chart_name" --include-crds --dry-run=server --api-versions "gateway.networking.k8s.io/v1/HTTPRoute")
helm template "$chart_name" ./ --namespace "$chart_name" --include-crds > "$OUTPUT_FILE"
;;
esac
echo ""
echo ">> Formating rendered template ..."
echo "$TEMPLATE" | yq '... comments=""' | yq 'select(. != null)' | yq -s '"'"$OUTPUT_FOLDER"'" + .kind + "-" + .metadata.name + ".yaml"'
# Strip comments again to ensure formatting correctness
for file in "$OUTPUT_FOLDER"/*; do
yq -i '... comments=""' $file
done
echo ""
echo ">> Manifests for $chart_name rendered to $OUTPUT_FOLDER"
ls $OUTPUT_FOLDER
echo ">> Manifests for $chart_name rendered to $OUTPUT_FILE"
echo ""
else
echo ""
@@ -239,16 +217,13 @@ jobs:
run: |
cd ${MANIFEST_DIR}
GIT_CHANGES=$(git status --porcelain)
if [ -n "$GIT_CHANGES" ]; then
if git status --porcelain | grep -q .; then
echo ">> Changes detected"
git status --porcelain
echo "changes-detected=true" >> $GITEA_OUTPUT
else
echo ">> No changes detected, skipping PR creation"
exit 0
fi
echo "----"
@@ -344,7 +319,7 @@ jobs:
--arg assignee "${ASSIGNEE}" \
--arg title "Automated Manifest Update" \
--arg body "This PR contains newly rendered Kubernetes manifests automatically generated by the CI workflow." \
'{head: $head, base: $base, assignee: $assignee, title: $title, body: $body}' )
'{head: $head, base: $base, assignee: $assignee, title: $title, body: $body'} )
echo ">> Creating PR from branch ${HEAD_BRANCH} into ${BASE_BRANCH}"
echo ">> With Endpoint of:"

View File

@@ -18,7 +18,6 @@ env:
jobs:
render-manifests-push:
runs-on: ubuntu-js
if: gitea.event.commits[0].author.username != 'renovate-bot'
steps:
- name: Checkout Main
uses: actions/checkout@v6
@@ -32,20 +31,36 @@ jobs:
ref: manifests
path: infrastructure-manifests
- name: Check if Commit is from Push
id: check-push-type
run: |
echo "">> Checking if Commit is from Push ..."
git checkout ${{ gitea.event.after }}
PARENT_COUNT=$(git rev-list --parents -n 1 ${{ gitea.event.after }} | wc -w)
echo ">> Commit SHA: ${{ gitea.event.after }}"
echo ">> Word Count (Parent Count + 1): $PARENT_COUNT"
if [ $PARENT_COUNT > 1]; do
echo "">> Commit detected to be a direct commit to main"
fi
echo "parent-count=$PARENT_COUNT" >> "$GITHUB_OUTPUT"
- name: Run CI for Direct Pushes Only
if: steps.check-push-type.outputs.parent-count == '2'
run: |
echo "This is a direct push (non-merge). Running CI/Tests in Gitea."
- name: Set up Helm
uses: azure/setup-helm@v4
if: steps.check-push-type.outputs.parent-count == '2'
with:
token: ${{ secrets.GITEA_TOKEN }}
version: v3.17.2 # Pending https://github.com/helm/helm/pull/30743
cache: true
- name: Configure Kubeconfig
uses: azure/k8s-set-context@v4
with:
method: kubeconfig
kubeconfig: ${{ secrets.KUBECONFIG }}
- name: Prepare Manifest Branch
if: steps.check-push-type.outputs.parent-count == '2'
run: |
cd ${MANIFEST_DIR}
@@ -69,20 +84,20 @@ jobs:
- name: Check which Directories have Changes
id: check-dir-changes
if: steps.check-push-type.outputs.parent-count == '2'
run: |
cd ${MAIN_DIR}
RENDER_DIR=()
echo ">> Checking for changes ..."
GIT_DIFF=$(git diff --name-only ${{ gitea.event.before }}..HEAD | xargs -I {} dirname {} | sort -u | grep -E "clusters/[^/]+/helm/[^/]+")
GIT_DIFF=$(git diff --name-only ${{ gitea.event.before }}..HEAD | xargs -I {} dirname {} | sort -u | grep "clusters/cl01tl/helm/")
if [ -n "${GIT_DIFF}" ]; then
echo ">> Changes detected:"
echo "$GIT_DIFF"
for path in $GIT_DIFF; do
RENDER_DIR+=$(echo "$path" | awk -F '/' '{print $4}')
RENDER_DIR+=$(echo " ")
done
else
@@ -92,13 +107,13 @@ jobs:
if [ -n "${RENDER_DIR}" ]; then
echo ">> Directories to Render:"
echo "$(echo "${RENDER_DIR}" | sort -u)"
echo "$(printf "%s\n" "${RENDER_DIR[@]}" | sort -u)"
echo "----"
echo "changes-detected=true" >> $GITEA_OUTPUT
echo "render-dir<<EOF" >> $GITEA_OUTPUT
echo "$(echo "${RENDER_DIR}" | sort -u)" >> $GITEA_OUTPUT
echo "$(printf "%s\n" "${RENDER_DIR[@]}" | sort -u)" >> $GITEA_OUTPUT
echo "EOF" >> $GITEA_OUTPUT
else
echo "changes-detected=false" >> $GITEA_OUTPUT
@@ -116,13 +131,7 @@ jobs:
helm dependency list --max-col-width 120 ${MAIN_DIR}/clusters/${CLUSTER}/helm/$dir 2> /dev/null \
| tail +2 | head -n -1 \
| awk '{ print "helm repo add " $1 " " $3 }' \
| while read cmd; do
if [[ "$cmd" == "*oci://*" ]]; then
echo ">> Ignoring OCI repo"
else
echo "$cmd" | sh;
fi
done || true
| while read cmd; do echo "$cmd" | sh; done || true
done
if helm repo list | tail +2 | read -r; then
@@ -164,23 +173,15 @@ jobs:
chart_path=${MAIN_DIR}/clusters/${CLUSTER}/helm/$dir
chart_name=$(basename "$chart_path")
echo ""
echo ""
echo ">> Rendering chart: $chart_name"
echo ">> Chart path $chart_path"
if [ -f "$chart_path/Chart.yaml" ]; then
OUTPUT_FOLDER="${MANIFEST_DIR}/clusters/${CLUSTER}/manifests/$chart_name/"
TEMPLATE=""
mkdir -p ${MANIFEST_DIR}/clusters/${CLUSTER}/manifests/$chart_name
OUTPUT_FILE="${MANIFEST_DIR}/clusters/${CLUSTER}/manifests/$chart_name/$chart_name.yaml"
cd $chart_path
echo ""
echo ">> Updating helm dependency ..."
helm dependency update --skip-refresh
echo ""
echo ">> Building helm dependency ..."
helm dependency build --skip-refresh
@@ -191,36 +192,36 @@ jobs:
echo ""
echo ">> Rendering templates ..."
case "$chart_name" in
"stack")
echo ""
echo ">> Special Rendering for stack into argocd namespace ..."
TEMPLATE=$(helm template $chart_name ./ --namespace argocd --include-crds --dry-run=server --api-versions "gateway.networking.k8s.io/v1/HTTPRoute")
echo ">> Special Rendering for stack ..."
helm template stack ./ --namespace argocd --include-crds > "$OUTPUT_FILE"
;;
"cilium" | "coredns" | "metrics-server" |"prometheus-operator-crds")
echo ""
echo ">> Special Rendering for $chart_name into kube-system namespace ..."
TEMPLATE=$(helm template $chart_name ./ --namespace kube-system --include-crds --dry-run=server --api-versions "gateway.networking.k8s.io/v1/HTTPRoute")
"cilium")
echo ">> Special Rendering for cilium ..."
helm template cilium ./ --namespace kube-system --include-crds > "$OUTPUT_FILE"
;;
"coredns")
echo ">> Special Rendering for coredns ..."
helm template coredns ./ --namespace kube-system --include-crds > "$OUTPUT_FILE"
;;
"metrics-server")
echo ">> Special Rendering for metrics-server ..."
helm template metrics-server ./ --namespace kube-system --include-crds > "$OUTPUT_FILE"
;;
"prometheus-operator-crds")
echo ">> Special Rendering for prometheus-operator-crds ..."
helm template prometheus-operator-crds ./ --namespace kube-system --include-crds > "$OUTPUT_FILE"
;;
*)
echo ""
echo ">> Standard Rendering for $chart_name ..."
TEMPLATE=$(helm template "$chart_name" ./ --namespace "$chart_name" --include-crds --dry-run=server --api-versions "gateway.networking.k8s.io/v1/HTTPRoute")
helm template "$chart_name" ./ --namespace "$chart_name" --include-crds > "$OUTPUT_FILE"
;;
esac
echo ""
echo ">> Formating rendered template ..."
echo "$TEMPLATE" | yq '... comments=""' | yq 'select(. != null)' | yq -s '"'"$OUTPUT_FOLDER"'" + .kind + "-" + .metadata.name + ".yaml"'
# Strip comments again to ensure formatting correctness
for file in "$OUTPUT_FOLDER"/*; do
yq -i '... comments=""' $file
done
echo ""
echo ">> Manifests for $chart_name rendered to $OUTPUT_FOLDER"
ls $OUTPUT_FOLDER
echo ">> Manifests for $chart_name rendered to $OUTPUT_FILE"
echo ""
else
echo ""
@@ -237,16 +238,13 @@ jobs:
run: |
cd ${MANIFEST_DIR}
GIT_CHANGES=$(git status --porcelain)
if [ -n "$GIT_CHANGES" ]; then
if git status --porcelain | grep -q .; then
echo ">> Changes detected"
git status --porcelain
echo "changes-detected=true" >> $GITEA_OUTPUT
else
echo ">> No changes detected, skipping PR creation"
exit 0
fi
echo "----"
@@ -342,7 +340,7 @@ jobs:
--arg assignee "${ASSIGNEE}" \
--arg title "Automated Manifest Update" \
--arg body "This PR contains newly rendered Kubernetes manifests automatically generated by the CI workflow." \
'{head: $head, base: $base, assignee: $assignee, title: $title, body: $body}' )
'{head: $head, base: $base, assignee: $assignee, title: $title, body: $body'} )
echo ">> Creating PR from branch ${HEAD_BRANCH} into ${BASE_BRANCH}"
echo ">> With Endpoint of:"

1
.gitignore vendored
View File

@@ -1,4 +1,3 @@
/**/archive/
/**/charts/
/**/manifests/
/**/tmpcharts*/

View File

@@ -2,12 +2,6 @@
GitOps definied infrastrucutre for the alexlebens.net domain.
## Stack-cl01tl
https://argocd.alexlebens.net/api/badge?name=stack-cl01tl&revision=true&showAppName=true
App-of-Apps Application for cl01tl
## License
This project is licensed under the terms of the Apache 2.0 License license.

View File

@@ -1,9 +1,6 @@
dependencies:
- name: app-template
repository: https://bjw-s-labs.github.io/helm-charts/
version: 4.6.0
- name: volsync-target
repository: oci://harbor.alexlebens.net/helm-charts
version: 0.7.0
digest: sha256:ab1b7688f41127681b66c0673c713b651c58ac7c06f123d53ea871b6df9cda9f
generated: "2026-01-07T16:02:51.781699411Z"
version: 4.4.0
digest: sha256:b5d823171e1b4dc1d3856f782f0c67cbb5d49e4fa170df2f21b06303c7aff7f5
generated: "2025-11-30T21:05:19.732832-06:00"

View File

@@ -16,11 +16,6 @@ dependencies:
- name: app-template
alias: actual
repository: https://bjw-s-labs.github.io/helm-charts/
version: 4.6.0
- name: volsync-target
alias: volsync-target-data
version: 0.7.0
repository: oci://harbor.alexlebens.net/helm-charts
version: 4.4.0
icon: https://cdn.jsdelivr.net/gh/selfhst/icons/png/actual-budget.png
# renovate: github=actualbudget/actual
appVersion: 25.12.0
appVersion: 25.11.0

View File

@@ -0,0 +1,55 @@
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: actual-data-backup-secret
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: actual-data-backup-secret
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/part-of: {{ .Release.Name }}
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
target:
template:
mergePolicy: Merge
engineVersion: v2
data:
RESTIC_REPOSITORY: "{{ `{{ .BUCKET_ENDPOINT }}` }}/actual/actual-data"
data:
- secretKey: BUCKET_ENDPOINT
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: S3_BUCKET_ENDPOINT
- secretKey: RESTIC_PASSWORD
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: RESTIC_PASSWORD
- secretKey: AWS_DEFAULT_REGION
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: AWS_DEFAULT_REGION
- secretKey: AWS_ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: access_key
- secretKey: AWS_SECRET_ACCESS_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: secret_key

View File

@@ -0,0 +1,28 @@
apiVersion: gateway.networking.k8s.io/v1
kind: HTTPRoute
metadata:
name: http-route-actual
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: http-route-actual
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/part-of: {{ .Release.Name }}
spec:
parentRefs:
- group: gateway.networking.k8s.io
kind: Gateway
name: traefik-gateway
namespace: traefik
hostnames:
- actual.alexlebens.net
rules:
- matches:
- path:
type: PathPrefix
value: /
backendRefs:
- group: ''
kind: Service
name: actual
port: 80
weight: 100

View File

@@ -0,0 +1,25 @@
apiVersion: volsync.backube/v1alpha1
kind: ReplicationSource
metadata:
name: actual-data-backup-source
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: actual-data-backup-source
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/part-of: {{ .Release.Name }}
spec:
sourcePVC: actual-data
trigger:
schedule: 0 4 * * *
restic:
pruneIntervalDays: 7
repository: actual-data-backup-secret
retain:
hourly: 1
daily: 3
weekly: 2
monthly: 2
yearly: 4
copyMethod: Snapshot
storageClassName: ceph-block
volumeSnapshotClassName: ceph-blockpool-snapshot

View File

@@ -9,7 +9,7 @@ actual:
main:
image:
repository: ghcr.io/actualbudget/actual
tag: 26.1.0
tag: 25.11.0
pullPolicy: IfNotPresent
env:
- name: TZ
@@ -42,27 +42,6 @@ actual:
port: 80
targetPort: 5006
protocol: HTTP
route:
main:
kind: HTTPRoute
parentRefs:
- group: gateway.networking.k8s.io
kind: Gateway
name: traefik-gateway
namespace: traefik
hostnames:
- actual.alexlebens.net
rules:
- backendRefs:
- group: ''
kind: Service
name: actual
port: 80
weight: 100
matches:
- path:
type: PathPrefix
value: /
persistence:
data:
forceRename: actual-data
@@ -75,13 +54,3 @@ actual:
main:
- path: /data
readOnly: false
volsync-target-data:
pvcTarget: actual-data
local:
enabled: true
schedule: 0 8 * * *
remote:
enabled: false
external:
enabled: true
schedule: 0 9 * * *

View File

@@ -1,12 +1,12 @@
dependencies:
- name: argo-workflows
repository: https://argoproj.github.io/argo-helm
version: 0.47.0
version: 0.45.28
- name: argo-events
repository: https://argoproj.github.io/argo-helm
version: 2.4.19
version: 2.4.17
- name: postgres-cluster
repository: oci://harbor.alexlebens.net/helm-charts
version: 7.4.5
digest: sha256:c36845d5688e28e6f6c6b6f2e17b40514f2adb937f2c6077deadfec9e6b294fe
generated: "2026-01-14T21:30:10.440164554Z"
version: 6.16.0
digest: sha256:b00fd479a9d9e606661b3799182c8e24395b4f531f8d2bda87bdc5db16a8d66c
generated: "2025-12-01T19:55:40.18149-06:00"

View File

@@ -18,15 +18,14 @@ maintainers:
- name: alexlebens
dependencies:
- name: argo-workflows
version: 0.47.0
version: 0.45.28
repository: https://argoproj.github.io/argo-helm
- name: argo-events
version: 2.4.19
version: 2.4.17
repository: https://argoproj.github.io/argo-helm
- name: postgres-cluster
alias: postgres-18-cluster
version: 7.4.5
alias: postgres-17-cluster
version: 6.16.0
repository: oci://harbor.alexlebens.net/helm-charts
icon: https://cdn.jsdelivr.net/gh/selfhst/icons/png/argo-cd.png
# renovate: github=argoproj/argo-workflows
appVersion: v3.7.6
appVersion: v3.6.7

View File

@@ -26,3 +26,70 @@ spec:
key: /authentik/oidc/argo-workflows
metadataPolicy: None
property: client
---
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: argo-workflows-postgresql-17-cluster-backup-secret
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: argo-workflows-postgresql-17-cluster-backup-secret
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/part-of: {{ .Release.Name }}
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/postgres-backups
metadataPolicy: None
property: access
- secretKey: ACCESS_SECRET_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/postgres-backups
metadataPolicy: None
property: secret
---
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: argo-workflows-postgresql-17-cluster-backup-secret-garage
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: argo-workflows-postgresql-17-cluster-backup-secret-garage
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/part-of: {{ .Release.Name }}
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/postgres-backups
metadataPolicy: None
property: ACCESS_KEY_ID
- secretKey: ACCESS_SECRET_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/postgres-backups
metadataPolicy: None
property: ACCESS_SECRET_KEY
- secretKey: ACCESS_REGION
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/postgres-backups
metadataPolicy: None
property: ACCESS_REGION

View File

@@ -1,10 +1,10 @@
apiVersion: gateway.networking.k8s.io/v1
kind: HTTPRoute
metadata:
name: argo-workflows
name: http-route-argo-workflows
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: argo-workflows
app.kubernetes.io/name: http-route-argo-workflows
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/part-of: {{ .Release.Name }}
spec:

View File

@@ -0,0 +1,14 @@
apiVersion: v1
kind: Service
metadata:
name: garage-ps10rp
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: garage-ps10rp
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/part-of: {{ .Release.Name }}
annotations:
tailscale.com/tailnet-fqdn: garage-ps10rp.boreal-beaufort.ts.net
spec:
externalName: placeholder
type: ExternalName

View File

@@ -9,15 +9,15 @@ argo-workflows:
nodeStatusOffLoad: true
archive: true
postgresql:
host: argo-workflows-postgresql-18-cluster-rw
host: argo-workflows-postgresql-17-cluster-rw
port: 5432
database: app
tableName: app
userNameSecret:
name: argo-workflows-postgresql-18-cluster-app
name: argo-workflows-postgresql-17-cluster-app
key: username
passwordSecret:
name: argo-workflows-postgresql-18-cluster-app
name: argo-workflows-postgresql-17-cluster-app
key: password
ssl: false
sslMode: disable
@@ -59,6 +59,20 @@ argo-workflows:
useStaticCredentials: true
artifactRepository:
archiveLogs: false
s3: {}
# accessKeySecret:
# name: "{{ .Release.Name }}-minio"
# key: accesskey
# secretKeySecret:
# name: "{{ .Release.Name }}-minio"
# key: secretkey
# insecure: true
# bucket:
# endpoint:
# region:
# encryptionOptions:
# enableEncryption: true
argo-events:
controller:
resources:
@@ -75,33 +89,53 @@ argo-events:
requests:
cpu: 10m
memory: 128Mi
postgres-18-cluster:
postgres-17-cluster:
mode: recovery
cluster:
storage:
storageClass: local-path
walStorage:
storageClass: local-path
monitoring:
enabled: true
prometheusRule:
enabled: true
recovery:
method: objectStore
objectStore:
destinationPath: s3://postgres-backups/cl01tl/argo-workflows/argo-workflows-postgresql-17-cluster
endpointURL: http://garage-main.garage:3900
index: 1
endpointCredentials: argo-workflows-postgresql-17-cluster-backup-secret-garage
backup:
objectStore:
- name: garage-local
- name: external
destinationPath: s3://postgres-backups-ce540ddf106d186bbddca68a/cl01tl/argo-workflows/argo-workflows-postgresql-17-cluster
index: 1
destinationBucket: postgres-backups
externalSecretCredentialPath: /garage/home-infra/postgres-backups
retentionPolicy: "30d"
isWALArchiver: false
- name: garage-local
destinationPath: s3://postgres-backups/cl01tl/argo-workflows/argo-workflows-postgresql-17-cluster
index: 1
endpointURL: http://garage-main.garage:3900
endpointCredentials: argo-workflows-postgresql-17-cluster-backup-secret-garage
endpointCredentialsIncludeRegion: true
retentionPolicy: "3d"
isWALArchiver: true
# - name: garage-remote
# destinationPath: s3://postgres-backups/cl01tl/argo-workflows/argo-workflows-postgresql-17-cluster
# index: 1
# destinationBucket: postgres-backups
# externalSecretCredentialPath: /garage/home-infra/postgres-backups
# retentionPolicy: "90d"
# endpointURL: https://garage-ps10rp.boreal-beaufort.ts.net:3900
# endpointCredentials: argo-workflows-postgresql-17-cluster-backup-secret-garage
# endpointCredentialsIncludeRegion: true
# retentionPolicy: "30d"
# data:
# compression: bzip2
# - name: external
# index: 1
# endpointURL: https://nyc3.digitaloceanspaces.com
# destinationBucket: postgres-backups-ce540ddf106d186bbddca68a
# externalSecretCredentialPath: /garage/home-infra/postgres-backups
# isWALArchiver: false
scheduledBackups:
- name: daily-backup
suspend: false
schedule: "0 0 0 * * *"
backupName: external
- name: live-backup
suspend: false
immediate: true
@@ -109,11 +143,5 @@ postgres-18-cluster:
backupName: garage-local
# - name: weekly-backup
# suspend: true
# immediate: true
# schedule: "0 0 4 * * SAT"
# backupName: garage-remote
# - name: daily-backup
# suspend: true
# immediate: true
# schedule: "0 0 0 * * *"
# backupName: external

View File

@@ -1,6 +1,6 @@
dependencies:
- name: argo-cd
repository: https://argoproj.github.io/argo-helm
version: 9.3.4
digest: sha256:006518c10fc1636a5b0398de90d4a7687ae6c0bf4626c41d11b2bc3ad48ff416
generated: "2026-01-14T23:02:04.617990687Z"
version: 9.1.5
digest: sha256:07f7f6d369af426cdd213ddbc58373a4e5b4f54724efd4612662d7da0315232d
generated: "2025-12-02T21:27:41.876154-06:00"

View File

@@ -15,8 +15,7 @@ maintainers:
- name: alexlebens
dependencies:
- name: argo-cd
version: 9.3.4
version: 9.1.5
repository: https://argoproj.github.io/argo-helm
icon: https://cdn.jsdelivr.net/gh/selfhst/icons/png/argo-cd.png
# renovate: github=argoproj/argo-cd
appVersion: v3.2.1
appVersion: 3.0.0

View File

@@ -50,39 +50,39 @@ spec:
metadataPolicy: None
property: token
---
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: argocd-gitea-repo-infrastructure-secret
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: argocd-gitea-repo-infrastructure-secret
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/part-of: {{ .Release.Name }}
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: type
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/argocd/credentials/repo/infrastructure
metadataPolicy: None
property: type
- secretKey: url
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/argocd/credentials/repo/infrastructure
metadataPolicy: None
property: url
- secretKey: sshPrivateKey
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/argocd/credentials/repo/infrastructure
metadataPolicy: None
property: sshPrivateKey
# ---
# apiVersion: external-secrets.io/v1
# kind: ExternalSecret
# metadata:
# name: argocd-gitea-repo-infrastructure-secret
# namespace: {{ .Release.Namespace }}
# labels:
# app.kubernetes.io/name: argocd-gitea-repo-infrastructure-secret
# app.kubernetes.io/instance: {{ .Release.Name }}
# app.kubernetes.io/part-of: {{ .Release.Name }}
# spec:
# secretStoreRef:
# kind: ClusterSecretStore
# name: vault
# data:
# - secretKey: type
# remoteRef:
# conversionStrategy: Default
# decodingStrategy: None
# key: /cl01tl/argocd/credentials/repo/infrastructure
# metadataPolicy: None
# property: type
# - secretKey: url
# remoteRef:
# conversionStrategy: Default
# decodingStrategy: None
# key: /cl01tl/argocd/credentials/repo/infrastructure
# metadataPolicy: None
# property: url
# - secretKey: sshPrivateKey
# remoteRef:
# conversionStrategy: Default
# decodingStrategy: None
# key: /cl01tl/argocd/credentials/repo/infrastructure
# metadataPolicy: None
# property: sshPrivateKey

View File

@@ -0,0 +1,28 @@
apiVersion: gateway.networking.k8s.io/v1
kind: HTTPRoute
metadata:
name: http-route-argocd
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: http-route-argocd
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/part-of: {{ .Release.Name }}
spec:
parentRefs:
- group: gateway.networking.k8s.io
kind: Gateway
name: traefik-gateway
namespace: traefik
hostnames:
- argocd.alexlebens.net
rules:
- matches:
- path:
type: PathPrefix
value: /
backendRefs:
- group: ''
kind: Service
name: argocd-server
port: 80
weight: 100

View File

@@ -25,10 +25,21 @@ argo-cd:
id: authentik
params:
server.insecure: true
controller.diff.server.side: true
rbac:
policy.csv: |
g, ArgoCD Admins, role:admin
cmp:
create: true
plugins:
cdk8s:
init:
command: [cdk8s]
args: [import]
generate:
command: [cdk8s, synth]
args: [--stdout]
discover:
fileName: "*.go"
controller:
replicas: 1
metrics:
@@ -49,7 +60,7 @@ argo-cd:
enabled: true
auth: false
redisSecretInit:
enabled: false
enabled: true
server:
replicas: 2
extensions:
@@ -65,22 +76,34 @@ argo-cd:
enabled: true
serviceMonitor:
enabled: true
httproute:
enabled: true
parentRefs:
- group: gateway.networking.k8s.io
kind: Gateway
name: traefik-gateway
namespace: traefik
hostnames:
- argocd.alexlebens.net
rules:
- matches:
- path:
type: PathPrefix
value: /
ingress:
enabled: false
repoServer:
replicas: 2
extraContainers:
- name: cmp-cdk8s
command:
- /var/run/argocd/argocd-cmp-server
image: ghcr.io/akuity/cdk8s-cmp-typescript:1.0
securityContext:
runAsNonRoot: true
runAsUser: 999
volumeMounts:
- mountPath: /var/run/argocd
name: var-files
- mountPath: /home/argocd/cmp-server/plugins
name: plugins
- mountPath: /home/argocd/cmp-server/config/plugin.yaml
subPath: cdk8s.yaml
name: argocd-cmp-cm
- mountPath: /tmp
name: cmp-tmp
volumes:
- name: argocd-cmp-cm
configMap:
name: argocd-cmp-cm
- name: cmp-tmp
emptyDir: {}
metrics:
enabled: true
serviceMonitor:
@@ -256,7 +279,7 @@ argo-cd:
- description: Application has degraded
send:
- app-health-degraded
when: app.status.health.status == 'Degraded'
when: app.status.health.status == 'Degraded' and time.Now().Sub(time.Parse(app.status.health.lastTransitionTime).Minutes() >= 15
trigger.on-sync-failed: |
- description: Application syncing has failed
send:

View File

@@ -1,12 +1,6 @@
dependencies:
- name: app-template
repository: https://bjw-s-labs.github.io/helm-charts/
version: 4.6.0
- name: volsync-target
repository: oci://harbor.alexlebens.net/helm-charts
version: 0.7.0
- name: volsync-target
repository: oci://harbor.alexlebens.net/helm-charts
version: 0.7.0
digest: sha256:9ad54a69753a4bbfe53d1d7ed12551e89fe7bb8f90d96c1d3ff06ba200bc5202
generated: "2026-01-07T16:03:05.987463264Z"
version: 4.4.0
digest: sha256:f3a9990542f24965fadad0b5493059b78cdc3fae91c8214577fa6f41ca5f7de3
generated: "2025-11-30T21:05:21.317114-06:00"

View File

@@ -18,15 +18,6 @@ dependencies:
- name: app-template
alias: audiobookshelf
repository: https://bjw-s-labs.github.io/helm-charts/
version: 4.6.0
- name: volsync-target
alias: volsync-target-config
version: 0.7.0
repository: oci://harbor.alexlebens.net/helm-charts
- name: volsync-target
alias: volsync-target-metadata
version: 0.7.0
repository: oci://harbor.alexlebens.net/helm-charts
version: 4.4.0
icon: https://cdn.jsdelivr.net/gh/selfhst/icons/png/audiobookshelf.png
# renovate: github=advplyr/audiobookshelf
appVersion: 2.31.0
appVersion: 2.21.0

View File

@@ -19,3 +19,117 @@ spec:
key: /cl01tl/audiobookshelf/apprise
metadataPolicy: None
property: ntfy-url
---
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: audiobookshelf-config-backup-secret
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: audiobookshelf-config-backup-secret
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/part-of: {{ .Release.Name }}
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
target:
template:
mergePolicy: Merge
engineVersion: v2
data:
RESTIC_REPOSITORY: "{{ `{{ .BUCKET_ENDPOINT }}` }}/audiobookshelf/audiobookshelf-config"
data:
- secretKey: BUCKET_ENDPOINT
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: S3_BUCKET_ENDPOINT
- secretKey: RESTIC_PASSWORD
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: RESTIC_PASSWORD
- secretKey: AWS_DEFAULT_REGION
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: AWS_DEFAULT_REGION
- secretKey: AWS_ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: access_key
- secretKey: AWS_SECRET_ACCESS_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: secret_key
---
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: audiobookshelf-metadata-backup-secret
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: audiobookshelf-metadata-backup-secret
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/part-of: {{ .Release.Name }}
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
target:
template:
mergePolicy: Merge
engineVersion: v2
data:
RESTIC_REPOSITORY: "{{ `{{ .BUCKET_ENDPOINT }}` }}/audiobookshelf/audiobookshelf-metadata"
data:
- secretKey: BUCKET_ENDPOINT
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: S3_BUCKET_ENDPOINT
- secretKey: RESTIC_PASSWORD
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: RESTIC_PASSWORD
- secretKey: AWS_DEFAULT_REGION
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: AWS_DEFAULT_REGION
- secretKey: AWS_ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: access_key
- secretKey: AWS_SECRET_ACCESS_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: secret_key

View File

@@ -0,0 +1,28 @@
apiVersion: gateway.networking.k8s.io/v1
kind: HTTPRoute
metadata:
name: http-route-audiobookshelf
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: http-route-audiobookshelf
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/part-of: {{ .Release.Name }}
spec:
parentRefs:
- group: gateway.networking.k8s.io
kind: Gateway
name: traefik-gateway
namespace: traefik
hostnames:
- audiobookshelf.alexlebens.net
rules:
- matches:
- path:
type: PathPrefix
value: /
backendRefs:
- group: ''
kind: Service
name: audiobookshelf
port: 80
weight: 100

View File

@@ -1,5 +1,24 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: audiobookshelf-nfs-storage-backup
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: audiobookshelf-nfs-storage-backup
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/part-of: {{ .Release.Name }}
spec:
volumeMode: Filesystem
storageClassName: nfs-client
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: audiobookshelf-nfs-storage
namespace: {{ .Release.Namespace }}

View File

@@ -0,0 +1,52 @@
apiVersion: volsync.backube/v1alpha1
kind: ReplicationSource
metadata:
name: audiobookshelf-config-backup-source
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: audiobookshelf-config-backup-source
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/part-of: {{ .Release.Name }}
spec:
sourcePVC: audiobookshelf-config
trigger:
schedule: 0 4 * * *
restic:
pruneIntervalDays: 7
repository: audiobookshelf-config-backup-secret
retain:
hourly: 1
daily: 3
weekly: 2
monthly: 2
yearly: 4
copyMethod: Snapshot
storageClassName: ceph-block
volumeSnapshotClassName: ceph-blockpool-snapshot
---
apiVersion: volsync.backube/v1alpha1
kind: ReplicationSource
metadata:
name: audiobookshelf-metadata-backup-source
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: audiobookshelf-metadata-backup-source
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/part-of: {{ .Release.Name }}
spec:
sourcePVC: audiobookshelf-metadata
trigger:
schedule: 0 4 * * *
restic:
pruneIntervalDays: 7
repository: audiobookshelf-metadata-backup-secret
retain:
hourly: 1
daily: 3
weekly: 2
monthly: 2
yearly: 4
copyMethod: Snapshot
storageClassName: ceph-block
volumeSnapshotClassName: ceph-blockpool-snapshot

View File

@@ -0,0 +1,19 @@
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: audiobookshelf-apprise
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: audiobookshelf-apprise
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/part-of: {{ .Release.Name }}
spec:
endpoints:
- port: apprise
interval: 30s
scrapeTimeout: 15s
path: /metrics
selector:
matchLabels:
app.kubernetes.io/name: audiobookshelf
app.kubernetes.io/instance: {{ .Release.Name }}

View File

@@ -9,7 +9,7 @@ audiobookshelf:
main:
image:
repository: ghcr.io/advplyr/audiobookshelf
tag: 2.32.1
tag: 2.31.0
pullPolicy: IfNotPresent
env:
- name: TZ
@@ -21,7 +21,7 @@ audiobookshelf:
apprise-api:
image:
repository: caronc/apprise
tag: 1.3.0
tag: 1.2.6
pullPolicy: IfNotPresent
env:
- name: TZ
@@ -57,43 +57,8 @@ audiobookshelf:
port: 8000
targetPort: 8000
protocol: HTTP
serviceMonitor:
main:
selector:
matchLabels:
app.kubernetes.io/name: audiobookshelf
app.kubernetes.io/instance: audiobookshelf
serviceName: '{{ include "bjw-s.common.lib.chart.names.fullname" $ }}'
endpoints:
- port: apprise
scheme: http
path: /metrics
interval: 30s
scrapeTimeout: 15s
route:
main:
kind: HTTPRoute
parentRefs:
- group: gateway.networking.k8s.io
kind: Gateway
name: traefik-gateway
namespace: traefik
hostnames:
- audiobookshelf.alexlebens.net
rules:
- backendRefs:
- group: ''
kind: Service
name: audiobookshelf
port: 80
weight: 100
matches:
- path:
type: PathPrefix
value: /
persistence:
config:
forceRename: audiobookshelf-config
storageClass: ceph-block
accessMode: ReadWriteOnce
size: 2Gi
@@ -104,7 +69,6 @@ audiobookshelf:
- path: /config
readOnly: false
metadata:
forceRename: audiobookshelf-metadata
storageClass: ceph-block
accessMode: ReadWriteOnce
size: 10Gi
@@ -114,6 +78,13 @@ audiobookshelf:
main:
- path: /metadata
readOnly: false
backup:
existingClaim: audiobookshelf-nfs-storage-backup
advancedMounts:
main:
main:
- path: /metadata/backups
readOnly: false
audiobooks:
existingClaim: audiobookshelf-nfs-storage
advancedMounts:
@@ -121,23 +92,3 @@ audiobookshelf:
main:
- path: /mnt/store/
readOnly: false
volsync-target-config:
pvcTarget: audiobookshelf-config
local:
enabled: true
schedule: 2 8 * * *
remote:
enabled: false
external:
enabled: true
schedule: 2 9 * * *
volsync-target-metadata:
pvcTarget: audiobookshelf-metadata
local:
enabled: true
schedule: 4 8 * * *
remote:
enabled: false
external:
enabled: true
schedule: 4 9 * * *

View File

@@ -1,15 +1,12 @@
dependencies:
- name: authentik
repository: https://charts.goauthentik.io/
version: 2025.10.3
version: 2025.10.2
- name: cloudflared
repository: oci://harbor.alexlebens.net/helm-charts
version: 2.1.6
version: 1.23.1
- name: postgres-cluster
repository: oci://harbor.alexlebens.net/helm-charts
version: 7.4.5
- name: redis-replication
repository: oci://harbor.alexlebens.net/helm-charts
version: 1.0.1
digest: sha256:b9fe766e223b1f65430c64c02f94c1f97ad7149a5a9acc7bab8339615706293f
generated: "2026-01-08T23:01:41.173354097Z"
version: 6.16.0
digest: sha256:2c3f84ecf3c8cabb7aa7e7e8a1b38cced65156c1e1db9b984154f514c33c9f69
generated: "2025-12-03T06:02:07.501444093Z"

View File

@@ -21,18 +21,15 @@ maintainers:
- name: alexlebens
dependencies:
- name: authentik
version: 2025.10.3
version: 2025.10.2
repository: https://charts.goauthentik.io/
- name: cloudflared
alias: cloudflared
repository: oci://harbor.alexlebens.net/helm-charts
version: 2.1.6
version: 1.23.1
- name: postgres-cluster
alias: postgres-18-cluster
version: 7.4.5
repository: oci://harbor.alexlebens.net/helm-charts
- name: redis-replication
version: 1.0.1
alias: postgres-17-cluster
version: 6.16.0
repository: oci://harbor.alexlebens.net/helm-charts
icon: https://cdn.jsdelivr.net/gh/selfhst/icons/png/authentik.png
# renovate: github=goauthentik/authentik
appVersion: 2025.10.2
appVersion: 2025.4.1

View File

@@ -19,3 +19,93 @@ spec:
key: /cl01tl/authentik/key
metadataPolicy: None
property: key
---
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: authentik-cloudflared-secret
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: authentik-cloudflared-secret
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/part-of: {{ .Release.Name }}
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: cf-tunnel-token
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cloudflare/tunnels/authentik
metadataPolicy: None
property: token
---
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: authentik-postgresql-17-cluster-backup-secret
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: authentik-postgresql-17-cluster-backup-secret
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/part-of: {{ .Release.Name }}
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/postgres-backups
metadataPolicy: None
property: access
- secretKey: ACCESS_SECRET_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/postgres-backups
metadataPolicy: None
property: secret
---
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: authentik-postgresql-17-cluster-backup-secret-garage
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: authentik-postgresql-17-cluster-backup-secret-garage
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/part-of: {{ .Release.Name }}
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/postgres-backups
metadataPolicy: None
property: ACCESS_KEY_ID
- secretKey: ACCESS_SECRET_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/postgres-backups
metadataPolicy: None
property: ACCESS_SECRET_KEY
- secretKey: ACCESS_REGION
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/postgres-backups
metadataPolicy: None
property: ACCESS_REGION

View File

@@ -0,0 +1,28 @@
apiVersion: gateway.networking.k8s.io/v1
kind: HTTPRoute
metadata:
name: http-route-authentik
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: http-route-authentik
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/part-of: {{ .Release.Name }}
spec:
parentRefs:
- group: gateway.networking.k8s.io
kind: Gateway
name: traefik-gateway
namespace: traefik
hostnames:
- authentik.alexlebens.net
rules:
- matches:
- path:
type: PathPrefix
value: /
backendRefs:
- group: ''
kind: Service
name: authentik-server
port: 80
weight: 100

View File

@@ -0,0 +1,32 @@
apiVersion: redis.redis.opstreelabs.in/v1beta2
kind: RedisReplication
metadata:
name: redis-replication-authentik
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: redis-replication-authentik
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/part-of: {{ .Release.Name }}
spec:
clusterSize: 3
podSecurityContext:
runAsUser: 1000
fsGroup: 1000
kubernetesConfig:
image: quay.io/opstree/redis:v8.0.3
imagePullPolicy: IfNotPresent
resources:
requests:
cpu: 50m
memory: 128Mi
storage:
volumeClaimTemplate:
spec:
storageClassName: ceph-block
accessModes: ["ReadWriteOnce"]
resources:
requests:
storage: 1Gi
redisExporter:
enabled: true
image: quay.io/opstree/redis-exporter:v1.48.0

View File

@@ -0,0 +1,19 @@
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: redis-replication-authentik
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: redis-replication-authentik
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/part-of: {{ .Release.Name }}
redis-operator: "true"
env: production
spec:
selector:
matchLabels:
redis_setup_type: replication
endpoints:
- port: redis-exporter
interval: 30s
scrapeTimeout: 10s

View File

@@ -9,22 +9,22 @@ authentik:
- name: AUTHENTIK_POSTGRESQL__HOST
valueFrom:
secretKeyRef:
name: authentik-postgresql-18-cluster-app
name: authentik-postgresql-17-cluster-app
key: host
- name: AUTHENTIK_POSTGRESQL__NAME
valueFrom:
secretKeyRef:
name: authentik-postgresql-18-cluster-app
name: authentik-postgresql-17-cluster-app
key: dbname
- name: AUTHENTIK_POSTGRESQL__USER
valueFrom:
secretKeyRef:
name: authentik-postgresql-18-cluster-app
name: authentik-postgresql-17-cluster-app
key: user
- name: AUTHENTIK_POSTGRESQL__PASSWORD
valueFrom:
secretKeyRef:
name: authentik-postgresql-18-cluster-app
name: authentik-postgresql-17-cluster-app
key: password
authentik:
redis:
@@ -36,23 +36,8 @@ authentik:
enabled: true
serviceMonitor:
enabled: true
route:
main:
enabled: true
apiVersion: gateway.networking.k8s.io/v1
kind: HTTPRoute
hostnames:
- authentik.alexlebens.net
parentRefs:
- group: gateway.networking.k8s.io
kind: Gateway
name: traefik-gateway
namespace: traefik
httpsRedirect: false
matches:
- path:
type: PathPrefix
value: /
ingress:
enabled: false
worker:
name: worker
replicas: 1
@@ -63,52 +48,61 @@ authentik:
enabled: false
redis:
enabled: false
postgres-18-cluster:
cloudflared:
existingSecretName: authentik-cloudflared-secret
postgres-17-cluster:
mode: recovery
cluster:
storage:
storageClass: local-path
walStorage:
storageClass: local-path
monitoring:
enabled: true
prometheusRule:
enabled: true
recovery:
method: objectStore
objectStore:
destinationPath: s3://postgres-backups/cl01tl/authentik/authentik-postgresql-17-cluster
endpointURL: http://garage-main.garage:3900
index: 1
endpointCredentials: authentik-postgresql-17-cluster-backup-secret-garage
backup:
objectStore:
- name: garage-local
- name: external
destinationPath: s3://postgres-backups-ce540ddf106d186bbddca68a/cl01tl/authentik/authentik-postgresql-17-cluster
index: 1
destinationBucket: postgres-backups
externalSecretCredentialPath: /garage/home-infra/postgres-backups
retentionPolicy: "30d"
isWALArchiver: false
- name: garage-local
destinationPath: s3://postgres-backups/cl01tl/authentik/authentik-postgresql-17-cluster
index: 1
endpointURL: http://garage-main.garage:3900
endpointCredentials: authentik-postgresql-17-cluster-backup-secret-garage
endpointCredentialsIncludeRegion: true
retentionPolicy: "3d"
isWALArchiver: true
# - name: garage-remote
# destinationPath: s3://postgres-backups/cl01tl/authentik/authentik-postgresql-17-cluster
# index: 1
# destinationBucket: postgres-backups
# externalSecretCredentialPath: /garage/home-infra/postgres-backups
# retentionPolicy: "90d"
# endpointURL: https://garage-ps10rp.boreal-beaufort.ts.net:3900
# endpointCredentials: authentik-postgresql-17-cluster-backup-secret-garage
# retentionPolicy: "30d"
# data:
# compression: bzip2
# - name: external
# index: 1
# endpointURL: https://nyc3.digitaloceanspaces.com
# destinationBucket: postgres-backups-ce540ddf106d186bbddca68a
# externalSecretCredentialPath: /garage/home-infra/postgres-backups
# isWALArchiver: false
# jobs: 2
scheduledBackups:
- name: daily-backup
suspend: false
schedule: "0 0 0 * * *"
backupName: external
- name: live-backup
suspend: false
immediate: true
schedule: "0 0 0 * * *"
backupName: garage-local
# - name: weekly-backup
# suspend: true
# immediate: true
# suspend: false
# schedule: "0 0 4 * * SAT"
# backupName: garage-remote
# - name: daily-backup
# suspend: true
# immediate: true
# schedule: "0 0 0 * * *"
# backupName: external
redis-replication:
existingSecret:
enabled: false
redisReplication:
clusterSize: 3
sentinel:
enabled: true

View File

@@ -1,12 +1,6 @@
dependencies:
- name: app-template
repository: https://bjw-s-labs.github.io/helm-charts/
version: 4.6.0
- name: volsync-target
repository: oci://harbor.alexlebens.net/helm-charts
version: 0.7.0
- name: volsync-target
repository: oci://harbor.alexlebens.net/helm-charts
version: 0.7.0
digest: sha256:816f9485a6ecce1710004fcf8b5422d519f7d7a02576ae5767bb3a6f365fcdcc
generated: "2026-01-07T16:03:18.566019576Z"
version: 4.4.0
digest: sha256:aa797b99d6d8b7aafe142811938408b7f234df6d429a7e076196337cc63876cb
generated: "2025-12-01T20:25:09.888407-06:00"

View File

@@ -16,15 +16,6 @@ dependencies:
- name: app-template
alias: backrest
repository: https://bjw-s-labs.github.io/helm-charts/
version: 4.6.0
- name: volsync-target
alias: volsync-target-config
version: 0.7.0
repository: oci://harbor.alexlebens.net/helm-charts
- name: volsync-target
alias: volsync-target-data
version: 0.7.0
repository: oci://harbor.alexlebens.net/helm-charts
version: 4.4.0
icon: https://cdn.jsdelivr.net/gh/selfhst/icons/png/backrest.png
# renovate: github=garethgeorge/backrest
appVersion: v1.10.1

View File

@@ -0,0 +1,28 @@
apiVersion: gateway.networking.k8s.io/v1
kind: HTTPRoute
metadata:
name: http-route-backrest
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: http-route-backrest
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/part-of: {{ .Release.Name }}
spec:
parentRefs:
- group: gateway.networking.k8s.io
kind: Gateway
name: traefik-gateway
namespace: traefik
hostnames:
- backrest.alexlebens.net
rules:
- matches:
- path:
type: PathPrefix
value: /
backendRefs:
- group: ''
kind: Service
name: backrest
port: 80
weight: 100

View File

@@ -0,0 +1,14 @@
apiVersion: v1
kind: Service
metadata:
name: garage-ps10rp
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: garage-ps10rp
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/part-of: {{ .Release.Name }}
annotations:
tailscale.com/tailnet-fqdn: garage-ps10rp.boreal-beaufort.ts.net
spec:
externalName: placeholder
type: ExternalName

View File

@@ -33,30 +33,8 @@ backrest:
port: 80
targetPort: 9898
protocol: TCP
route:
main:
kind: HTTPRoute
parentRefs:
- group: gateway.networking.k8s.io
kind: Gateway
name: traefik-gateway
namespace: traefik
hostnames:
- backrest.alexlebens.net
rules:
- backendRefs:
- group: ''
kind: Service
name: backrest
port: 80
weight: 100
matches:
- path:
type: PathPrefix
value: /
persistence:
data:
forceRename: backrest-data
storageClass: ceph-block
accessMode: ReadWriteOnce
size: 10Gi
@@ -67,7 +45,6 @@ backrest:
- path: /data
readOnly: false
config:
forceRename: backrest-config
storageClass: ceph-block
accessMode: ReadWriteOnce
size: 1Gi
@@ -105,23 +82,3 @@ backrest:
main:
- path: /mnt/share
readOnly: true
volsync-target-data:
pvcTarget: backrest-data
local:
enabled: true
schedule: 6 8 * * *
remote:
enabled: false
external:
enabled: true
schedule: 6 9 * * *
volsync-target-config:
pvcTarget: backrest-config
local:
enabled: true
schedule: 8 8 * * *
remote:
enabled: false
external:
enabled: true
schedule: 8 9 * * *

View File

@@ -1,9 +1,6 @@
dependencies:
- name: app-template
repository: https://bjw-s-labs.github.io/helm-charts/
version: 4.6.0
- name: volsync-target
repository: oci://harbor.alexlebens.net/helm-charts
version: 0.7.0
digest: sha256:de894b87644bc7fe271df181dc27c9c4aff6716c7d61afe897776a90c8d1156f
generated: "2026-01-07T16:03:30.651964699Z"
version: 4.4.0
digest: sha256:c6f6d1f2fb9fedf54094920737a6f0bd1a2ab89f0a4122966ca98f6c9d3f11fa
generated: "2025-11-30T21:05:22.694344-06:00"

View File

@@ -18,11 +18,6 @@ dependencies:
- name: app-template
alias: bazarr
repository: https://bjw-s-labs.github.io/helm-charts/
version: 4.6.0
- name: volsync-target
alias: volsync-target-config
version: 0.7.0
repository: oci://harbor.alexlebens.net/helm-charts
version: 4.4.0
icon: https://cdn.jsdelivr.net/gh/selfhst/icons/png/bazarr.png
# renovate: github=linuxserver/bazarr
appVersion: 1.5.3
appVersion: 1.5.2

View File

@@ -0,0 +1,55 @@
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: bazarr-config-backup-secret
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: bazarr-config-backup-secret
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/part-of: {{ .Release.Name }}
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
target:
template:
mergePolicy: Merge
engineVersion: v2
data:
RESTIC_REPOSITORY: "{{ `{{ .BUCKET_ENDPOINT }}` }}/bazarr/bazarr-config"
data:
- secretKey: BUCKET_ENDPOINT
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: S3_BUCKET_ENDPOINT
- secretKey: RESTIC_PASSWORD
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: RESTIC_PASSWORD
- secretKey: AWS_DEFAULT_REGION
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: AWS_DEFAULT_REGION
- secretKey: AWS_ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: access_key
- secretKey: AWS_SECRET_ACCESS_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: secret_key

View File

@@ -0,0 +1,28 @@
apiVersion: gateway.networking.k8s.io/v1
kind: HTTPRoute
metadata:
name: http-route-bazarr
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: http-route-bazarr
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/part-of: {{ .Release.Name }}
spec:
parentRefs:
- group: gateway.networking.k8s.io
kind: Gateway
name: traefik-gateway
namespace: traefik
hostnames:
- bazarr.alexlebens.net
rules:
- matches:
- path:
type: PathPrefix
value: /
backendRefs:
- group: ''
kind: Service
name: bazarr
port: 80
weight: 100

View File

@@ -0,0 +1,30 @@
apiVersion: volsync.backube/v1alpha1
kind: ReplicationSource
metadata:
name: bazarr-config-backup-source
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: bazarr-config-backup-source
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/part-of: {{ .Release.Name }}
spec:
sourcePVC: bazarr-config
trigger:
schedule: 0 4 * * *
restic:
pruneIntervalDays: 7
repository: bazarr-config-backup-secret
retain:
hourly: 1
daily: 3
weekly: 2
monthly: 2
yearly: 4
moverSecurityContext:
runAsUser: 1000
runAsGroup: 1000
fsGroup: 1000
fsGroupChangePolicy: OnRootMismatch
copyMethod: Snapshot
storageClassName: ceph-block
volumeSnapshotClassName: ceph-blockpool-snapshot

View File

@@ -15,7 +15,7 @@ bazarr:
main:
image:
repository: ghcr.io/linuxserver/bazarr
tag: 1.5.4@sha256:7d0a091a63889ce1e4ac4c90595ebd2c50ba5a5df7039a4f4d2be6c2aed6d4ae
tag: 1.5.3@sha256:ec11e988e8e13411c994a4d9f43ed9b97409aa92c1da54d9f23926c3da7c2032
pullPolicy: IfNotPresent
env:
- name: TZ
@@ -36,27 +36,6 @@ bazarr:
port: 80
targetPort: 6767
protocol: HTTP
route:
main:
kind: HTTPRoute
parentRefs:
- group: gateway.networking.k8s.io
kind: Gateway
name: traefik-gateway
namespace: traefik
hostnames:
- bazarr.alexlebens.net
rules:
- backendRefs:
- group: ''
kind: Service
name: bazarr
port: 80
weight: 100
matches:
- path:
type: PathPrefix
value: /
persistence:
config:
forceRename: bazarr-config
@@ -76,18 +55,3 @@ bazarr:
main:
- path: /mnt/store
readOnly: false
volsync-target-config:
pvcTarget: bazarr-config
moverSecurityContext:
runAsUser: 1000
runAsGroup: 1000
fsGroup: 1000
fsGroupChangePolicy: OnRootMismatch
local:
enabled: true
schedule: 10 8 * * *
remote:
enabled: false
external:
enabled: true
schedule: 10 9 * * *

View File

@@ -1,9 +1,6 @@
dependencies:
- name: app-template
repository: https://bjw-s-labs.github.io/helm-charts/
version: 4.6.0
- name: redis-replication
repository: oci://harbor.alexlebens.net/helm-charts
version: 1.0.1
digest: sha256:e9dde2592347a72ab8b7154efcd12654fb0865c4e791c5bbea27f56b390a6b50
generated: "2026-01-08T15:47:39.239057-06:00"
version: 4.4.0
digest: sha256:0009729bcf7f1941401b767fd4ae952b7a8d44f80053090b4a9224de912a14ef
generated: "2025-12-01T20:25:13.511406-06:00"

View File

@@ -16,10 +16,6 @@ dependencies:
- name: app-template
alias: blocky
repository: https://bjw-s-labs.github.io/helm-charts/
version: 4.6.0
- name: redis-replication
version: 1.0.1
repository: oci://harbor.alexlebens.net/helm-charts
version: 4.4.0
icon: https://cdn.jsdelivr.net/gh/selfhst/icons/png/blocky.png
# renovate: github=0xerr0r/blocky
appVersion: v0.28.2
appVersion: v0.25

View File

@@ -0,0 +1,32 @@
apiVersion: redis.redis.opstreelabs.in/v1beta2
kind: RedisReplication
metadata:
name: redis-replication-blocky
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: redis-replication-blocky
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/part-of: {{ .Release.Name }}
spec:
clusterSize: 3
podSecurityContext:
runAsUser: 1000
fsGroup: 1000
kubernetesConfig:
image: quay.io/opstree/redis:v8.0.3
imagePullPolicy: IfNotPresent
resources:
requests:
cpu: 50m
memory: 128Mi
storage:
volumeClaimTemplate:
spec:
storageClassName: ceph-block
accessModes: ["ReadWriteOnce"]
resources:
requests:
storage: 1Gi
redisExporter:
enabled: true
image: quay.io/opstree/redis-exporter:v1.48.0

View File

@@ -0,0 +1,40 @@
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: blocky
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: blocky
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/part-of: {{ .Release.Name }}
spec:
selector:
matchLabels:
app.kubernetes.io/name: blocky
app.kubernetes.io/instance: {{ .Release.Name }}
endpoints:
- port: metrics
interval: 30s
scrapeTimeout: 10s
path: /metrics
---
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: redis-replication-blocky
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: redis-replication-blocky
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/part-of: {{ .Release.Name }}
redis-operator: "true"
env: production
spec:
selector:
matchLabels:
redis_setup_type: replication
endpoints:
- port: redis-exporter
interval: 30s
scrapeTimeout: 10s

View File

@@ -96,9 +96,11 @@ blocky:
cl01tl-endpoint IN A 10.232.1.22
cl01tl-endpoint IN A 10.232.1.23
cl01tl-gateway IN A 10.232.1.200
traefik-cl01tl IN A 10.232.1.21
blocky IN A 10.232.1.22
cilium-cl01tl IN A 10.232.1.23
plex-lb IN A 10.232.1.23
;; Application Names
actual IN CNAME traefik-cl01tl
@@ -113,8 +115,6 @@ blocky:
ceph IN CNAME traefik-cl01tl
code-server IN CNAME traefik-cl01tl
ephemera IN CNAME traefik-cl01tl
feishin IN CNAME traefik-cl01tl
fladder IN CNAME traefik-cl01tl
garage-s3 IN CNAME traefik-cl01tl
garage-webui IN CNAME traefik-cl01tl
gatus IN CNAME traefik-cl01tl
@@ -125,23 +125,25 @@ blocky:
home IN CNAME traefik-cl01tl
home-assistant IN CNAME traefik-cl01tl
home-assistant-code-server IN CNAME traefik-cl01tl
hubble IN CNAME traefik-cl01tl
hubble IN CNAME cl01tl-gateway
huntarr IN CNAME traefik-cl01tl
immich IN CNAME traefik-cl01tl
jellyfin IN CNAME traefik-cl01tl
jellystat IN CNAME traefik-cl01tl
kiwix IN CNAME traefik-cl01tl
komodo IN CNAME traefik-cl01tl
kronic IN CNAME traefik-cl01tl
lidarr IN CNAME traefik-cl01tl
lidatube IN CNAME traefik-cl01tl
listenarr IN CNAME traefik-cl01tl
mail IN CNAME traefik-cl01tl
navidrome IN CNAME traefik-cl01tl
n8n IN CNAME traefik-cl01tl
ntfy IN CNAME traefik-cl01tl
objects IN CNAME traefik-cl01tl
ollama IN CNAME traefik-cl01tl
omni-tools IN CNAME traefik-cl01tl
overseerr IN CNAME traefik-cl01tl
pgadmin IN CNAME traefik-cl01tl
photoview IN CNAME traefik-cl01tl
plex IN CNAME traefik-cl01tl
postiz IN CNAME traefik-cl01tl
@@ -154,7 +156,6 @@ blocky:
radarr-anime IN CNAME traefik-cl01tl
radarr-standup IN CNAME traefik-cl01tl
searxng IN CNAME traefik-cl01tl
seerr IN CNAME traefik-cl01tl
slskd IN CNAME traefik-cl01tl
sonarr IN CNAME traefik-cl01tl
sonarr-4k IN CNAME traefik-cl01tl
@@ -166,7 +167,6 @@ blocky:
vault IN CNAME traefik-cl01tl
whodb IN CNAME traefik-cl01tl
yamtrack IN CNAME traefik-cl01tl
yubal-playlist IN CNAME traefik-cl01tl
blocking:
denylists:
@@ -182,47 +182,35 @@ blocky:
- https://v.firebog.net/hosts/Prigent-Ads.txt
mal:
- https://v.firebog.net/hosts/Prigent-Crypto.txt
- https://osint.digitalside.it/Threat-Intel/lists/latestdomains.txt
pro:
- https://raw.githubusercontent.com/hagezi/dns-blocklists/main/wildcard/pro.plus.txt
oisd:
- https://big.oisd.nl/domainswild
allowlists:
sus:
- |
*.alexlebens.net
*.alexlebens.dev
*.boreal-beaufort.ts.net
*.discord.com
ads:
- |
*.alexlebens.net
*.alexlebens.dev
*.boreal-beaufort.ts.net
*.discord.com
priv:
- |
*.alexlebens.net
*.alexlebens.dev
*.boreal-beaufort.ts.net
*.discord.com
mal:
- |
*.alexlebens.net
*.alexlebens.dev
*.boreal-beaufort.ts.net
*.discord.com
pro:
- |
*.alexlebens.net
*.alexlebens.dev
*.boreal-beaufort.ts.net
*.discord.com
oisd:
- |
*.alexlebens.net
*.alexlebens.dev
*.boreal-beaufort.ts.net
*.discord.com
clientGroupsBlock:
default:
- sus
@@ -230,7 +218,6 @@ blocky:
- priv
- mal
- pro
- oisd
blockType: zeroIp
blockTTL: 1m
loading:
@@ -302,19 +289,6 @@ blocky:
port: 4000
targetPort: 4000
protocol: TCP
serviceMonitor:
main:
selector:
matchLabels:
app.kubernetes.io/name: blocky
app.kubernetes.io/instance: blocky
serviceName: '{{ include "bjw-s.common.lib.chart.names.fullname" $ }}'
endpoints:
- port: metrics
scheme: http
path: /metrics
interval: 30s
scrapeTimeout: 10s
persistence:
config:
enabled: true
@@ -327,6 +301,3 @@ blocky:
readOnly: true
mountPropagation: None
subPath: config.yml
redis-replication:
redisReplication:
clusterSize: 1

View File

@@ -1,15 +1,9 @@
dependencies:
- name: app-template
repository: https://bjw-s-labs.github.io/helm-charts/
version: 4.6.0
version: 4.4.0
- name: mariadb-cluster
repository: https://helm.mariadb.com/mariadb-operator
version: 25.10.4
- name: volsync-target
repository: oci://harbor.alexlebens.net/helm-charts
version: 0.7.0
- name: volsync-target
repository: oci://harbor.alexlebens.net/helm-charts
version: 0.7.0
digest: sha256:bce787056cbe11ac5291393a0e51f64e8e20bf665885580b8f6be79fd2be3e12
generated: "2026-01-08T21:50:26.303731301Z"
version: 25.10.2
digest: sha256:264725306c1d1f38140293c0820abdc7e8aa4f39764b4d91e20200705ce2ec91
generated: "2025-11-30T21:05:24.649316-06:00"

View File

@@ -16,18 +16,9 @@ dependencies:
- name: app-template
alias: booklore
repository: https://bjw-s-labs.github.io/helm-charts/
version: 4.6.0
version: 4.4.0
- name: mariadb-cluster
version: 25.10.4
version: 25.10.2
repository: https://helm.mariadb.com/mariadb-operator
- name: volsync-target
alias: volsync-target-config
version: 0.7.0
repository: oci://harbor.alexlebens.net/helm-charts
- name: volsync-target
alias: volsync-target-data
version: 0.7.0
repository: oci://harbor.alexlebens.net/helm-charts
icon: https://cdn.jsdelivr.net/gh/selfhst/icons/png/booklore.png
# renovate: github=booklore-app/BookLore
appVersion: v1.13.2
appVersion: v.1.10.0

View File

@@ -43,6 +43,234 @@ spec:
metadataPolicy: None
property: psk.txt
---
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: booklore-config-backup-secret
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: booklore-config-backup-secret
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/part-of: {{ .Release.Name }}
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
target:
template:
mergePolicy: Merge
engineVersion: v2
data:
RESTIC_REPOSITORY: "{{ `{{ .BUCKET_ENDPOINT }}` }}/booklore/booklore-config"
data:
- secretKey: BUCKET_ENDPOINT
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /volsync/restic/digital-ocean
metadataPolicy: None
property: BUCKET_ENDPOINT
- secretKey: RESTIC_PASSWORD
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /volsync/restic/digital-ocean
metadataPolicy: None
property: RESTIC_PASSWORD
- secretKey: AWS_DEFAULT_REGION
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: AWS_DEFAULT_REGION
- secretKey: AWS_ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: AWS_ACCESS_KEY_ID
- secretKey: AWS_SECRET_ACCESS_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: AWS_SECRET_ACCESS_KEY
---
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: booklore-data-backup-secret-local
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: booklore-data-backup-secret-local
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/part-of: {{ .Release.Name }}
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
target:
template:
mergePolicy: Merge
engineVersion: v2
data:
RESTIC_REPOSITORY: "{{ `{{ .BUCKET_ENDPOINT }}` }}/booklore/booklore-data"
data:
- secretKey: BUCKET_ENDPOINT
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /volsync/restic/garage-local
metadataPolicy: None
property: BUCKET_ENDPOINT
- secretKey: RESTIC_PASSWORD
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /volsync/restic/garage-local
metadataPolicy: None
property: RESTIC_PASSWORD
- secretKey: AWS_DEFAULT_REGION
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/volsync-backups
metadataPolicy: None
property: ACCESS_REGION
- secretKey: AWS_ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/volsync-backups
metadataPolicy: None
property: ACCESS_KEY_ID
- secretKey: AWS_SECRET_ACCESS_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/volsync-backups
metadataPolicy: None
property: ACCESS_SECRET_KEY
---
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: booklore-data-backup-secret-remote
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: booklore-data-backup-secret-remote
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/part-of: {{ .Release.Name }}
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
target:
template:
mergePolicy: Merge
engineVersion: v2
data:
RESTIC_REPOSITORY: "{{ `{{ .BUCKET_ENDPOINT }}` }}/booklore/booklore-data"
data:
- secretKey: BUCKET_ENDPOINT
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /volsync/restic/garage-remote
metadataPolicy: None
property: BUCKET_ENDPOINT
- secretKey: RESTIC_PASSWORD
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /volsync/restic/garage-remote
metadataPolicy: None
property: RESTIC_PASSWORD
- secretKey: AWS_DEFAULT_REGION
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/volsync-backups
metadataPolicy: None
property: ACCESS_REGION
- secretKey: AWS_ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/volsync-backups
metadataPolicy: None
property: ACCESS_KEY_ID
- secretKey: AWS_SECRET_ACCESS_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/volsync-backups
metadataPolicy: None
property: ACCESS_SECRET_KEY
---
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: booklore-data-backup-secret-external
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: booklore-data-backup-secret-external
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/part-of: {{ .Release.Name }}
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
target:
template:
mergePolicy: Merge
engineVersion: v2
data:
RESTIC_REPOSITORY: "{{ `{{ .BUCKET_ENDPOINT }}` }}/booklore/booklore-data"
data:
- secretKey: BUCKET_ENDPOINT
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /volsync/restic/digital-ocean
metadataPolicy: None
property: BUCKET_ENDPOINT
- secretKey: RESTIC_PASSWORD
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /volsync/restic/digital-ocean
metadataPolicy: None
property: RESTIC_PASSWORD
- secretKey: AWS_DEFAULT_REGION
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: AWS_DEFAULT_REGION
- secretKey: AWS_ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: AWS_ACCESS_KEY_ID
- secretKey: AWS_SECRET_ACCESS_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: AWS_SECRET_ACCESS_KEY
---
apiVersion: external-secrets.io/v1
kind: ExternalSecret

View File

@@ -0,0 +1,28 @@
apiVersion: gateway.networking.k8s.io/v1
kind: HTTPRoute
metadata:
name: http-route-booklore
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: http-route-booklore
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/part-of: {{ .Release.Name }}
spec:
parentRefs:
- group: gateway.networking.k8s.io
kind: Gateway
name: traefik-gateway
namespace: traefik
hostnames:
- booklore.alexlebens.net
rules:
- matches:
- path:
type: PathPrefix
value: /
backendRefs:
- group: ''
kind: Service
name: booklore
port: 80
weight: 100

View File

@@ -8,6 +8,3 @@ metadata:
app.kubernetes.io/name: booklore
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/part-of: {{ .Release.Name }}
pod-security.kubernetes.io/audit: privileged
pod-security.kubernetes.io/enforce: privileged
pod-security.kubernetes.io/warn: privileged

View File

@@ -15,3 +15,115 @@ spec:
keySecret: booklore-data-replication-secret
address: volsync-rsync-tls-dst-booklore-data-replication-destination
copyMethod: Snapshot
---
apiVersion: volsync.backube/v1alpha1
kind: ReplicationSource
metadata:
name: booklore-config-backup-source
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: booklore-config-backup-source
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/part-of: {{ .Release.Name }}
spec:
sourcePVC: booklore-config
trigger:
schedule: 0 4 * * *
restic:
pruneIntervalDays: 7
repository: booklore-config-backup-secret
retain:
hourly: 1
daily: 3
weekly: 2
monthly: 2
yearly: 4
copyMethod: Snapshot
storageClassName: ceph-block
volumeSnapshotClassName: ceph-blockpool-snapshot
cacheCapacity: 10Gi
---
apiVersion: volsync.backube/v1alpha1
kind: ReplicationSource
metadata:
name: booklore-data-backup-source-local
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: booklore-data-backup-source-local
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/part-of: {{ .Release.Name }}
spec:
sourcePVC: booklore-data
trigger:
schedule: 0 2 * * *
restic:
pruneIntervalDays: 7
repository: booklore-data-backup-secret-local
retain:
hourly: 1
daily: 3
weekly: 2
monthly: 2
yearly: 4
copyMethod: Snapshot
storageClassName: ceph-block
volumeSnapshotClassName: ceph-blockpool-snapshot
cacheCapacity: 10Gi
---
apiVersion: volsync.backube/v1alpha1
kind: ReplicationSource
metadata:
name: booklore-data-backup-source-remote
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: booklore-data-backup-source-remote
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/part-of: {{ .Release.Name }}
spec:
sourcePVC: booklore-data
trigger:
schedule: 0 3 * * *
restic:
pruneIntervalDays: 7
repository: booklore-data-backup-secret-remote
retain:
hourly: 1
daily: 3
weekly: 2
monthly: 2
yearly: 4
copyMethod: Snapshot
storageClassName: ceph-block
volumeSnapshotClassName: ceph-blockpool-snapshot
cacheCapacity: 10Gi
---
apiVersion: volsync.backube/v1alpha1
kind: ReplicationSource
metadata:
name: booklore-data-backup-source-external
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: booklore-data-backup-source-external
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/part-of: {{ .Release.Name }}
spec:
sourcePVC: booklore-data
trigger:
schedule: 0 4 * * *
restic:
pruneIntervalDays: 7
repository: booklore-data-backup-secret-external
retain:
hourly: 1
daily: 3
weekly: 2
monthly: 2
yearly: 4
copyMethod: Snapshot
storageClassName: ceph-block
volumeSnapshotClassName: ceph-blockpool-snapshot
cacheCapacity: 10Gi

View File

@@ -0,0 +1,14 @@
apiVersion: v1
kind: Service
metadata:
name: garage-ps10rp
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: garage-ps10rp
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/part-of: {{ .Release.Name }}
annotations:
tailscale.com/tailnet-fqdn: garage-ps10rp.boreal-beaufort.ts.net
spec:
externalName: placeholder
type: ExternalName

View File

@@ -9,7 +9,7 @@ booklore:
main:
image:
repository: ghcr.io/booklore-app/booklore
tag: v1.17.0
tag: v1.12.0
pullPolicy: IfNotPresent
env:
- name: TZ
@@ -39,30 +39,8 @@ booklore:
port: 80
targetPort: 6060
protocol: HTTP
route:
main:
kind: HTTPRoute
parentRefs:
- group: gateway.networking.k8s.io
kind: Gateway
name: traefik-gateway
namespace: traefik
hostnames:
- booklore.alexlebens.net
rules:
- backendRefs:
- group: ''
kind: Service
name: booklore
port: 80
weight: 100
matches:
- path:
type: PathPrefix
value: /
persistence:
config:
forceRename: booklore-config
storageClass: ceph-block
accessMode: ReadWriteOnce
size: 5Gi
@@ -73,7 +51,6 @@ booklore:
- path: /app/data
readOnly: false
data:
forceRename: booklore-data
storageClass: ceph-block
accessMode: ReadWriteOnce
size: 10Gi
@@ -108,21 +85,6 @@ mariadb-cluster:
replicas: 3
galera:
enabled: true
bootstrapFrom:
s3:
bucket: mariadb-backups-b230a2f5aecf080a4b372c08
prefix: cl01tl/booklore
endpoint: nyc3.digitaloceanspaces.com
region: us-east-1
accessKeyIdSecretKeyRef:
name: booklore-mariadb-cluster-backup-secret-external
key: access
secretAccessKeySecretKeyRef:
name: booklore-mariadb-cluster-backup-secret-external
key: secret
tls:
enabled: true
backupContentType: Physical
databases:
- name: booklore
characterSet: utf8
@@ -157,8 +119,7 @@ mariadb-cluster:
suspend: false
immediate: true
compression: gzip
maxRetention: 2160h
successfulJobsHistoryLimit: 1
maxRetention: 720h
storage:
s3:
bucket: mariadb-backups-b230a2f5aecf080a4b372c08
@@ -173,28 +134,6 @@ mariadb-cluster:
key: secret
tls:
enabled: true
- name: backup-remote
schedule:
cron: "0 0 * * 0"
suspend: false
immediate: true
compression: gzip
maxRetention: 2160h
successfulJobsHistoryLimit: 1
storage:
s3:
bucket: mariadb-backups
prefix: cl01tl/booklore
endpoint: garage-ps10rp.boreal-beaufort.ts.net:3900
region: us-east-1
accessKeyIdSecretKeyRef:
name: booklore-mariadb-cluster-backup-secret-garage
key: access
secretAccessKeySecretKeyRef:
name: booklore-mariadb-cluster-backup-secret-garage
key: secret
tls:
enabled: true
- name: backup-garage
schedule:
cron: "0 0 * * *"
@@ -202,7 +141,6 @@ mariadb-cluster:
immediate: true
compression: gzip
maxRetention: 360h
successfulJobsHistoryLimit: 1
storage:
s3:
bucket: mariadb-backups
@@ -215,30 +153,3 @@ mariadb-cluster:
secretAccessKeySecretKeyRef:
name: booklore-mariadb-cluster-backup-secret-garage
key: secret
volsync-target-config:
pvcTarget: booklore-config
local:
enabled: true
schedule: 12 8 * * *
remote:
enabled: false
external:
enabled: true
schedule: 12 9 * * *
volsync-target-data:
pvcTarget: booklore-data
local:
enabled: true
schedule: 14 8 * * *
restic:
cacheCapacity: 10Gi
remote:
enabled: true
schedule: 14 10 * * *
restic:
cacheCapacity: 10Gi
external:
enabled: true
schedule: 14 9 * * *
restic:
cacheCapacity: 10Gi

View File

@@ -1,6 +1,6 @@
dependencies:
- name: cert-manager
repository: https://charts.jetstack.io
version: v1.19.2
digest: sha256:b02bda9b9f2fc886af11d017a27a5761513defee603f9e3aa1d7add2749b925c
generated: "2025-12-10T15:01:57.196895547Z"
version: v1.19.1
digest: sha256:0b1238a5552bc6d457d4b1a2a1f387a3e7f2c19f820ecb64e14d20481a1ed1ce
generated: "2025-12-01T20:25:17.762628-06:00"

View File

@@ -14,8 +14,7 @@ maintainers:
- name: alexlebens
dependencies:
- name: cert-manager
version: v1.19.2
version: v1.19.1
repository: https://charts.jetstack.io
icon: https://raw.githubusercontent.com/walkxcode/dashboard-icons/main/png/cert-manager.png
# renovate: github=cert-manager/cert-manager
appVersion: v1.19.2
appVersion: v1.17.2

View File

@@ -1,6 +1,6 @@
dependencies:
- name: cilium
repository: https://helm.cilium.io/
version: 1.18.6
digest: sha256:8ea328ac238524b5b423e6289f5e25d05ef64e6aa19cfd5de238f1d5dd533e9b
generated: "2026-01-14T11:02:31.272963463Z"
version: 1.18.4
digest: sha256:e38eb92ee87c9a52b0f45a2451142ade02bac7d484b246d32379eacce3800bc8
generated: "2025-12-02T17:17:49.043599-06:00"

View File

@@ -15,8 +15,7 @@ maintainers:
- name: alexlebens
dependencies:
- name: cilium
version: 1.18.6
version: 1.18.4
repository: https://helm.cilium.io/
icon: https://cdn.jsdelivr.net/gh/selfhst/icons/png/cilium.png
# renovate: github=cilium/cilium
appVersion: 1.18.4
appVersion: 1.17.3

View File

@@ -1,19 +0,0 @@
# apiVersion: "cilium.io/v2alpha1"
# kind: CiliumL2AnnouncementPolicy
# metadata:
# name: general-l2-policy
# namespace: {{ .Release.Namespace }}
# labels:
# app.kubernetes.io/name: general-l2-policy
# app.kubernetes.io/instance: {{ .Release.Name }}
# app.kubernetes.io/part-of: {{ .Release.Name }}
# spec:
# nodeSelector:
# matchExpressions:
# - key: kubernetes.io/hostname
# operator: Exists
# interfaces:
# - end0
# - enp6s0
# externalIPs: true
# loadBalancerIPs: true

View File

@@ -1,7 +1,7 @@
# apiVersion: gateway.networking.k8s.io/v1
# kind: Gateway
# metadata:
# name: cilium-tls-gateway
# name: tls-gateway
# namespace: {{ .Release.Namespace }}
# labels:
# app.kubernetes.io/name: tls-gateway

View File

@@ -1,10 +1,10 @@
apiVersion: gateway.networking.k8s.io/v1
kind: HTTPRoute
metadata:
name: hubble
name: http-route-hubble
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: hubble
app.kubernetes.io/name: http-route-hubble
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/part-of: {{ .Release.Name }}
spec:

View File

@@ -55,12 +55,9 @@ cilium:
metrics:
serviceMonitor:
enabled: true
tls:
auto:
method: cronJob
relay:
enabled: true
prometheus:
metrics:
serviceMonitor:
enabled: true
ui:

View File

@@ -1,9 +1,9 @@
dependencies:
- name: cloudnative-pg
repository: https://cloudnative-pg.io/charts/
version: 0.27.0
version: 0.26.1
- name: plugin-barman-cloud
repository: https://cloudnative-pg.io/charts/
version: 0.4.0
digest: sha256:5e2a32fa5ed8b180ae5e556d65c67eeb3dcf38e2974b0d668eff4ee3c83258ce
generated: "2025-12-30T21:01:48.755246408Z"
version: 0.3.1
digest: sha256:b38e5104d77ab1737a27a2542eda958e82038443940f07b7c2cbe3b0a477e1e6
generated: "2025-12-01T20:25:20.341325-06:00"

View File

@@ -16,11 +16,10 @@ maintainers:
- name: alexlebens
dependencies:
- name: cloudnative-pg
version: 0.27.0
version: 0.26.1
repository: https://cloudnative-pg.io/charts/
- name: plugin-barman-cloud
version: 0.4.0
version: 0.3.1
repository: https://cloudnative-pg.io/charts/
icon: https://avatars.githubusercontent.com/u/100373852?s=200&v=4
# renovate: github=cloudnative-pg/cloudnative-pg
appVersion: 1.28.0
appVersion: 1.26.0

View File

@@ -7,10 +7,10 @@ plugin-barman-cloud:
image:
registry: ghcr.io
repository: cloudnative-pg/plugin-barman-cloud
tag: v0.10.0
tag: v0.9.0
sidecarImage:
registry: ghcr.io
repository: cloudnative-pg/plugin-barman-cloud-sidecar
tag: v0.10.0
tag: v0.9.0
crds:
create: true

View File

@@ -1,12 +1,9 @@
dependencies:
- name: app-template
repository: https://bjw-s-labs.github.io/helm-charts/
version: 4.6.0
version: 4.4.0
- name: cloudflared
repository: oci://harbor.alexlebens.net/helm-charts
version: 2.1.6
- name: volsync-target
repository: oci://harbor.alexlebens.net/helm-charts
version: 0.7.0
digest: sha256:62df4e053f3b887bae03f79ef3cb158b7e7147f8b0fba0bca019d4ca16c12ade
generated: "2026-01-08T23:01:55.996749204Z"
version: 1.23.1
digest: sha256:dd687a71edc2f7f03cba6d5f3e3221e2bb5172ed4c00659e327c79da5c01e89f
generated: "2025-12-03T06:02:20.44367742Z"

View File

@@ -19,14 +19,10 @@ dependencies:
- name: app-template
alias: code-server
repository: https://bjw-s-labs.github.io/helm-charts/
version: 4.6.0
version: 4.4.0
- name: cloudflared
alias: cloudflared
repository: oci://harbor.alexlebens.net/helm-charts
version: 2.1.6
- name: volsync-target
alias: volsync-target-config
version: 0.7.0
repository: oci://harbor.alexlebens.net/helm-charts
version: 1.23.1
icon: https://cdn.jsdelivr.net/gh/selfhst/icons/png/visual-studio-code.png
# renovate: github=coder/code-server
appVersion: 4.106.3
appVersion: 4.100.2

View File

@@ -26,3 +26,26 @@ spec:
key: /cl01tl/code-server/auth
metadataPolicy: None
property: SUDO_PASSWORD
---
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: code-server-cloudflared-secret
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: code-server-cloudflared-secret
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/part-of: {{ .Release.Name }}
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: cf-tunnel-token
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cloudflare/tunnels/codeserver
metadataPolicy: None
property: token

View File

@@ -0,0 +1,28 @@
apiVersion: gateway.networking.k8s.io/v1
kind: HTTPRoute
metadata:
name: http-route-code-server
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: http-route-code-server
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/part-of: {{ .Release.Name }}
spec:
parentRefs:
- group: gateway.networking.k8s.io
kind: Gateway
name: traefik-gateway
namespace: traefik
hostnames:
- code-server.alexlebens.net
rules:
- matches:
- path:
type: PathPrefix
value: /
backendRefs:
- group: ''
kind: Service
name: code-server
port: 8443
weight: 100

View File

@@ -0,0 +1,17 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: code-server-nfs-storage
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: code-server-nfs-storage
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/part-of: {{ .Release.Name }}
spec:
volumeMode: Filesystem
storageClassName: nfs-client
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi

View File

@@ -9,7 +9,7 @@ code-server:
main:
image:
repository: ghcr.io/linuxserver/code-server
tag: 4.108.0@sha256:982ef207e723fe04d001a9cfe58c545913c8271ae9fd4cb036fa004bfcc33631
tag: 4.106.3@sha256:aab9520fe923b2d93dccc2c806f3dc60649c2f4a2847fcd40c942227d0f1ae8f
pullPolicy: IfNotPresent
env:
- name: TZ
@@ -35,51 +35,13 @@ code-server:
port: 8443
targetPort: 8443
protocol: HTTP
route:
main:
kind: HTTPRoute
parentRefs:
- group: gateway.networking.k8s.io
kind: Gateway
name: traefik-gateway
namespace: traefik
hostnames:
- code-server.alexlebens.net
rules:
- backendRefs:
- group: ''
kind: Service
name: code-server
port: 8443
weight: 100
matches:
- path:
type: PathPrefix
value: /
persistence:
config:
forceRename: code-server-config
storageClass: ceph-block
accessMode: ReadWriteOnce
size: 2Gi
retain: true
existingClaim: code-server-nfs-storage
advancedMounts:
main:
main:
- path: /config
readOnly: false
volsync-target-config:
pvcTarget: code-server-config
moverSecurityContext:
runAsUser: 1000
runAsGroup: 1000
fsGroup: 1000
fsGroupChangePolicy: OnRootMismatch
local:
enabled: true
schedule: 16 8 * * *
remote:
enabled: false
external:
enabled: true
schedule: 16 9 * * *
cloudflared:
existingSecretName: code-server-cloudflared-secret

View File

@@ -18,5 +18,4 @@ dependencies:
version: 1.45.0
repository: https://coredns.github.io/helm
icon: https://raw.githubusercontent.com/walkxcode/dashboard-icons/main/png/coredns.png
# renovate: github=coredns/coredns
appVersion: v1.13.2
appVersion: v1.12.1

View File

@@ -1,7 +1,7 @@
coredns:
image:
repository: registry.k8s.io/coredns/coredns
tag: v1.13.2
tag: v1.13.1
replicaCount: 3
resources:
requests:

View File

@@ -1,6 +1,6 @@
dependencies:
- name: democratic-csi
repository: https://democratic-csi.github.io/charts/
version: 0.15.1
digest: sha256:e07d76a67023fb523e7d49730330995d0028faba9a4c7c3a6b87c5828921b3c3
generated: "2026-01-08T20:33:17.610556446Z"
version: 0.15.0
digest: sha256:6fe3d8ad7b990b07ed80a31c75a0a49db8da497c46a956c632615a2093d29d58
generated: "2025-12-01T20:25:24.972076-06:00"

View File

@@ -15,7 +15,6 @@ maintainers:
dependencies:
- name: democratic-csi
repository: https://democratic-csi.github.io/charts/
version: 0.15.1
version: 0.15.0
icon: https://cdn.jsdelivr.net/gh/selfhst/icons/png/kubernetes.png
# renovate: github=democratic-csi/democratic-csi
appVersion: v1.9.4

View File

@@ -17,5 +17,4 @@ dependencies:
version: 0.34.0
repository: https://kubernetes-sigs.github.io/descheduler/
icon: https://cdn.jsdelivr.net/gh/selfhst/icons/png/kubernetes.png
# renovate: github=kubernetes-sigs/descheduler
appVersion: 0.34.0
appVersion: 0.33.0

View File

@@ -39,27 +39,16 @@ descheduler:
- name: RemovePodsViolatingNodeTaints
- name: RemovePodsViolatingInterPodAntiAffinity
- name: RemovePodsViolatingTopologySpreadConstraint
- name: "HighNodeUtilization"
args:
thresholds:
cpu : 80
memory: 80
pods: 90
evictableNamespaces:
exclude:
- "kube-system"
evictionModes:
- "OnlyThresholdingResources"
- name: LowNodeUtilization
args:
thresholds:
cpu: 30
memory: 30
pods: 50
cpu: 20
memory: 20
pods: 20
targetThresholds:
cpu: 60
memory: 40
pods: 80
memory: 60
pods: 60
plugins:
balance:
enabled:

View File

@@ -1,15 +1,12 @@
dependencies:
- name: app-template
repository: https://bjw-s-labs.github.io/helm-charts/
version: 4.6.0
version: 4.4.0
- name: cloudflared
repository: oci://harbor.alexlebens.net/helm-charts
version: 2.1.6
version: 1.23.1
- name: postgres-cluster
repository: oci://harbor.alexlebens.net/helm-charts
version: 7.4.5
- name: redis-replication
repository: oci://harbor.alexlebens.net/helm-charts
version: 1.0.1
digest: sha256:4a666a87c07396ff40393f5d78658ee543ac442270868b65f350e6ad30bbcb5f
generated: "2026-01-08T23:02:12.156604855Z"
version: 6.16.0
digest: sha256:2291640ab25754ded07da90a6cb617df92262a9b33635e8a9c80adda00034937
generated: "2025-12-03T06:02:33.2806347Z"

View File

@@ -20,17 +20,14 @@ dependencies:
- name: app-template
alias: directus
repository: https://bjw-s-labs.github.io/helm-charts/
version: 4.6.0
version: 4.4.0
- name: cloudflared
alias: cloudflared-directus
repository: oci://harbor.alexlebens.net/helm-charts
version: 2.1.6
version: 1.23.1
- name: postgres-cluster
alias: postgres-18-cluster
version: 7.4.5
repository: oci://harbor.alexlebens.net/helm-charts
- name: redis-replication
version: 1.0.1
alias: postgres-17-cluster
version: 6.16.0
repository: oci://harbor.alexlebens.net/helm-charts
icon: https://cdn.jsdelivr.net/gh/selfhst/icons/png/directus.png
# renovate: github=directus/directus
appVersion: 11.14.0
appVersion: 11.7.2

View File

@@ -41,36 +41,6 @@ spec:
metadataPolicy: None
property: key
---
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: directus-oidc-secret
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: directus-oidc-secret
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/part-of: {{ .Release.Name }}
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: OIDC_CLIENT_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /authentik/oidc/directus
metadataPolicy: None
property: client
- secretKey: OIDC_CLIENT_SECRET
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /authentik/oidc/directus
metadataPolicy: None
property: secret
---
apiVersion: external-secrets.io/v1
kind: ExternalSecret
@@ -123,3 +93,153 @@ spec:
key: /cl01tl/directus/redis
metadataPolicy: None
property: password
---
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: directus-oidc-secret
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: directus-oidc-secret
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/part-of: {{ .Release.Name }}
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: OIDC_CLIENT_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /authentik/oidc/directus
metadataPolicy: None
property: client
- secretKey: OIDC_CLIENT_SECRET
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /authentik/oidc/directus
metadataPolicy: None
property: secret
---
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: directus-cloudflared-secret
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: directus-cloudflared-secret
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/part-of: {{ .Release.Name }}
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: cf-tunnel-token
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cloudflare/tunnels/directus
metadataPolicy: None
property: token
---
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: directus-postgresql-17-cluster-backup-secret
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: directus-postgresql-17-cluster-backup-secret
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/part-of: {{ .Release.Name }}
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/postgres-backups
metadataPolicy: None
property: access
- secretKey: ACCESS_SECRET_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/postgres-backups
metadataPolicy: None
property: secret
---
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: directus-postgresql-17-cluster-backup-secret-weekly
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: directus-postgresql-17-cluster-backup-secret-weekly
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/part-of: {{ .Release.Name }}
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/postgres-backups
metadataPolicy: None
property: ACCESS_KEY_ID
- secretKey: ACCESS_SECRET_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/postgres-backups
metadataPolicy: None
property: ACCESS_SECRET_KEY
---
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: directus-postgresql-17-cluster-backup-secret-garage
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: directus-postgresql-17-cluster-backup-secret-garage
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/part-of: {{ .Release.Name }}
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/postgres-backups
metadataPolicy: None
property: ACCESS_KEY_ID
- secretKey: ACCESS_SECRET_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/postgres-backups
metadataPolicy: None
property: ACCESS_SECRET_KEY
- secretKey: ACCESS_REGION
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/postgres-backups
metadataPolicy: None
property: ACCESS_REGION

View File

@@ -0,0 +1,35 @@
apiVersion: redis.redis.opstreelabs.in/v1beta2
kind: RedisReplication
metadata:
name: redis-replication-directus
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: redis-replication-directus
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/part-of: {{ .Release.Name }}
spec:
clusterSize: 3
podSecurityContext:
runAsUser: 1000
fsGroup: 1000
kubernetesConfig:
image: quay.io/opstree/redis:v8.2.1
imagePullPolicy: IfNotPresent
redisSecret:
name: directus-redis-config
key: password
resources:
requests:
cpu: 50m
memory: 128Mi
storage:
volumeClaimTemplate:
spec:
storageClassName: ceph-block
accessModes: ["ReadWriteOnce"]
resources:
requests:
storage: 1Gi
redisExporter:
enabled: true
image: quay.io/opstree/redis-exporter:v1.76.0

View File

@@ -0,0 +1,30 @@
apiVersion: redis.redis.opstreelabs.in/v1beta2
kind: RedisSentinel
metadata:
name: redis-sentinel-directus
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: redis-sentinel-directus
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/part-of: {{ .Release.Name }}
spec:
clusterSize: 3
podSecurityContext:
runAsUser: 1000
fsGroup: 1000
redisSentinelConfig:
redisReplicationName: redis-replication-directus
redisReplicationPassword:
secretKeyRef:
name: directus-redis-config
key: password
kubernetesConfig:
image: quay.io/opstree/redis-sentinel:v7.0.15
imagePullPolicy: IfNotPresent
redisSecret:
name: directus-redis-config
key: password
resources:
requests:
cpu: 10m
memory: 128Mi

View File

@@ -0,0 +1,43 @@
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: directus
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: directus
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/part-of: {{ .Release.Name }}
spec:
selector:
matchLabels:
app.kubernetes.io/name: directus
app.kubernetes.io/instance: {{ .Release.Name }}
endpoints:
- port: http
interval: 30s
scrapeTimeout: 15s
path: /metrics
bearerTokenSecret:
name: directus-metric-token
key: metric-token
---
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: redis-replication-directus
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: redis-replication-directus
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/part-of: {{ .Release.Name }}
redis-operator: "true"
env: production
spec:
selector:
matchLabels:
redis_setup_type: replication
endpoints:
- port: redis-exporter
interval: 30s
scrapeTimeout: 10s

View File

@@ -9,7 +9,7 @@ directus:
main:
image:
repository: directus/directus
tag: 11.14.0
tag: 11.13.4
pullPolicy: IfNotPresent
env:
- name: PUBLIC_URL
@@ -41,27 +41,27 @@ directus:
- name: DB_HOST
valueFrom:
secretKeyRef:
name: directus-postgresql-18-cluster-app
name: directus-postgresql-17-cluster-app
key: host
- name: DB_DATABASE
valueFrom:
secretKeyRef:
name: directus-postgresql-18-cluster-app
name: directus-postgresql-17-cluster-app
key: dbname
- name: DB_PORT
valueFrom:
secretKeyRef:
name: directus-postgresql-18-cluster-app
name: directus-postgresql-17-cluster-app
key: port
- name: DB_USER
valueFrom:
secretKeyRef:
name: directus-postgresql-18-cluster-app
name: directus-postgresql-17-cluster-app
key: user
- name: DB_PASSWORD
valueFrom:
secretKeyRef:
name: directus-postgresql-18-cluster-app
name: directus-postgresql-17-cluster-app
key: password
- name: SYNCHRONIZATION_STORE
value: redis
@@ -153,69 +153,62 @@ directus:
port: 80
targetPort: 8055
protocol: TCP
serviceMonitor:
main:
selector:
matchLabels:
app.kubernetes.io/name: directus
app.kubernetes.io/instance: directus
serviceName: '{{ include "bjw-s.common.lib.chart.names.fullname" $ }}'
endpoints:
- port: http
interval: 30s
scrapeTimeout: 15s
path: /metrics
bearerTokenSecret:
name: directus-metric-token
key: metric-token
postgres-18-cluster:
cloudflared-directus:
name: cloudflared-directus
existingSecretName: directus-cloudflared-secret
postgres-17-cluster:
mode: recovery
cluster:
storage:
storageClass: local-path
walStorage:
storageClass: local-path
monitoring:
enabled: true
prometheusRule:
enabled: true
recovery:
method: objectStore
objectStore:
destinationPath: s3://postgres-backups/cl01tl/directus/directus-postgresql-17-cluster
endpointURL: http://garage-main.garage:3900
index: 1
endpointCredentials: directus-postgresql-17-cluster-backup-secret-garage
backup:
objectStore:
- name: garage-local
- name: external
destinationPath: s3://postgres-backups-ce540ddf106d186bbddca68a/cl01tl/directus/directus-postgresql-17-cluster
index: 1
destinationBucket: postgres-backups
externalSecretCredentialPath: /garage/home-infra/postgres-backups
retentionPolicy: "30d"
isWALArchiver: false
- name: garage-local
destinationPath: s3://postgres-backups/cl01tl/directus/directus-postgresql-17-cluster
index: 1
endpointURL: http://garage-main.garage:3900
endpointCredentials: directus-postgresql-17-cluster-backup-secret-garage
endpointCredentialsIncludeRegion: true
retentionPolicy: "3d"
isWALArchiver: true
# - name: garage-remote
# destinationPath: s3://postgres-backups/cl01tl/directus/directus-postgresql-17-cluster
# index: 1
# destinationBucket: postgres-backups
# externalSecretCredentialPath: /garage/home-infra/postgres-backups
# retentionPolicy: "90d"
# endpointURL: https://garage-ps10rp.boreal-beaufort.ts.net:3900
# endpointCredentials: directus-postgresql-17-cluster-backup-secret-garage
# retentionPolicy: "30d"
# data:
# compression: bzip2
# - name: external
# index: 1
# endpointURL: https://nyc3.digitaloceanspaces.com
# destinationBucket: postgres-backups-ce540ddf106d186bbddca68a
# externalSecretCredentialPath: /garage/home-infra/postgres-backups
# isWALArchiver: false
# jobs: 2
scheduledBackups:
- name: daily-backup
suspend: false
schedule: "0 0 0 * * *"
backupName: external
- name: live-backup
suspend: false
immediate: true
schedule: "0 0 0 * * *"
backupName: garage-local
# - name: weekly-backup
# suspend: true
# immediate: true
# suspend: false
# schedule: "0 0 4 * * SAT"
# backupName: garage-remote
# - name: daily-backup
# suspend: true
# immediate: true
# schedule: "0 0 0 * * *"
# backupName: external
redis-replication:
existingSecret:
enabled: true
name: directus-redis-config
key: password
redisReplication:
clusterSize: 3
sentinel:
enabled: true

View File

@@ -18,5 +18,4 @@ dependencies:
version: 3.2.0
repository: https://helm.elastic.co
icon: https://helm.elastic.co/icons/eck.png
# renovate: github=elastic/cloud-on-k8s
appVersion: v3.2.0
appVersion: 1.26.0

View File

@@ -1,9 +1,9 @@
dependencies:
- name: element-web
repository: https://ananace.gitlab.io/charts
version: 1.4.27
version: 1.4.25
- name: cloudflared
repository: oci://harbor.alexlebens.net/helm-charts
version: 2.1.6
digest: sha256:b8b4e36fb88254e7575bd3aff60721fbf44a13af33eff949b6bc011facfcec62
generated: "2026-01-08T23:02:24.62763032Z"
version: 1.23.1
digest: sha256:06208f8ba47fc2e2c0e56ea2e08b3539cb4c5d3ac5eeaf22936e84925b7add90
generated: "2025-12-03T17:03:58.859116734Z"

Some files were not shown because too many files have changed in this diff Show More