1 Commits

Author SHA1 Message Date
f4c31c2a25 Update docker.io/postgres Docker tag to v18
All checks were successful
renovate/stability-days Updates have met minimum release age requirement
lint-test-helm / helm-lint (pull_request) Successful in 12s
lint-test-docker / docker-lint (pull_request) Successful in 20s
2025-12-02 01:49:04 +00:00
652 changed files with 13975 additions and 10929 deletions

View File

@@ -0,0 +1,86 @@
name: lint-test-docker
on:
pull_request:
branches:
- main
paths:
- 'hosts/**'
jobs:
docker-lint:
runs-on: ubuntu-js
steps:
- name: Checkout
uses: actions/checkout@v6
with:
fetch-depth: 0
- name: Check Branch Exists
id: check-branch-exists
uses: GuillaumeFalourd/branch-exists@v1.1
with:
branch: "${{ github.base_ref }}"
- name: Branch Does Not Exist
if: steps.check-branch-exists.outputs.exists == 'false'
run: echo "Branch ${{ github.base_ref }} was not found, likely already merged"
- name: Set up Node.js
if: steps.check-branch-exists.outputs.exists == 'true'
uses: actions/setup-node@v6
with:
node-version: '24'
- name: Lint Docker Compose
if: steps.check-branch-exists.outputs.exists == 'true'
run: |
set -e # Exit immediately if a command exits with a non-zero status.
TARGET_BRANCH="origin/${{ github.base_ref }}"
echo ">> Target branch for diff is: $TARGET_BRANCH"
CHANGED_FILES=$(git diff --name-only "$TARGET_BRANCH" -- 'hosts/**')
echo ">> Found changed files:"
echo "$CHANGED_FILES"
# For each changed file, find its parent chart directory (the one with compose.yaml).
# Then, create a unique list of those directories.
CHANGED_COMPOSE=$(echo "$CHANGED_FILES" | while read -r file; do
dir=$(dirname "$file")
while [[ "$dir" != "." && ! -f "$dir/compose.yaml" ]]; do
dir=$(dirname "$dir")
done
if [[ "$dir" != "." ]]; then
echo "$dir"
fi
done | sort -u)
if [[ -z "$CHANGED_COMPOSE" ]]; then
echo ">> Could not determine changed compose files. This will happen if only files outside a compose file were changed."
exit 0
fi
echo ">> Running dclint on changed compose files:"
echo "$CHANGED_COMPOSE"
echo "$CHANGED_COMPOSE" | while read -r compose; do
echo ">> Linting $compose ..."
npx dclint $compose
done
- name: ntfy Failed
uses: niniyas/ntfy-action@master
if: failure()
with:
url: '${{ secrets.NTFY_URL }}'
topic: '${{ secrets.NTFY_TOPIC }}'
title: 'Test Failure - Infrastructure'
priority: 3
headers: '{"Authorization": "Bearer ${{ secrets.NTFY_CRED }}"}'
tags: action,failed
details: 'Docker linting on Pull Request for Infrastructure has failed!'
icon: 'https://cdn.jsdelivr.net/gh/selfhst/icons/png/gitea.png'
actions: '[{"action": "view", "label": "Open Gitea", "url": "https://gitea.alexlebens.dev/alexlebens/infrastructure/actions?workflow=lint-test-docker-pull.yaml", "clear": true}]'
image: true

View File

@@ -0,0 +1,74 @@
name: lint-test-docker
on:
push:
branches:
- main
paths:
- 'hosts/**'
jobs:
docker-lint:
runs-on: ubuntu-js
steps:
- name: Checkout
uses: actions/checkout@v6
with:
fetch-depth: 0
- name: Set up Node.js
uses: actions/setup-node@v6
with:
node-version: '24'
- name: Lint Docker Compose
run: |
set -e # Exit immediately if a command exits with a non-zero status.
TARGET_BRANCH="origin/main"
echo ">> Target branch for diff is: $TARGET_BRANCH"
CHANGED_FILES=$(git diff --name-only "$TARGET_BRANCH" -- 'hosts/**')
echo ">> Found changed files:"
echo "$CHANGED_FILES"
# For each changed file, find its parent chart directory (the one with compose.yaml).
# Then, create a unique list of those directories.
CHANGED_COMPOSE=$(echo "$CHANGED_FILES" | while read -r file; do
dir=$(dirname "$file")
while [[ "$dir" != "." && ! -f "$dir/compose.yaml" ]]; do
dir=$(dirname "$dir")
done
if [[ "$dir" != "." ]]; then
echo "$dir"
fi
done | sort -u)
if [[ -z "$CHANGED_COMPOSE" ]]; then
echo ">> Could not determine changed compose files. This will happen if only files outside a compose file were changed."
exit 0
fi
echo ">> Running dclint on changed compose files:"
echo "$CHANGED_COMPOSE"
echo "$CHANGED_COMPOSE" | while read -r compose; do
echo ">> Linting $compose ..."
npx dclint $compose
done
- name: ntfy Failed
uses: niniyas/ntfy-action@master
if: failure()
with:
url: '${{ secrets.NTFY_URL }}'
topic: '${{ secrets.NTFY_TOPIC }}'
title: 'Test Failure - Infrastructure'
priority: 4
headers: '{"Authorization": "Bearer ${{ secrets.NTFY_CRED }}"}'
tags: action,failed
details: 'Docker linting on Push for Infrastructure has failed!'
icon: 'https://cdn.jsdelivr.net/gh/selfhst/icons/png/gitea.png'
actions: '[{"action": "view", "label": "Open Gitea", "url": "https://gitea.alexlebens.dev/alexlebens/infrastructure/actions?workflow=lint-test-docker-push.yaml", "clear": true}]'
image: true

View File

@@ -1,141 +0,0 @@
name: lint-test-docker
on:
pull_request:
branches:
- main
paths:
- 'hosts/**'
push:
branches:
- main
paths:
- 'hosts/**'
env:
BASE_BRANCH: "origin/${{ gitea.base_ref }}"
jobs:
lint-docker-compose:
runs-on: ubuntu-js
steps:
- name: Checkout
uses: actions/checkout@v6
with:
fetch-depth: 0
- name: Check Branch Exists
id: check-branch-exists
if: github.event_name == 'pull_request'
uses: GuillaumeFalourd/branch-exists@v1.1
with:
branch: "${{ github.base_ref }}"
- name: Report Branch Exists
id: branch-exists
if: github.event_name == 'push' || steps.check-branch-exists.outputs.exists == 'true' && github.event_name == 'pull_request'
run: |
if [ ${{ github.event_name == 'push' }} ]; then
echo ">> Action is from a push event, will continue with linting"
else
echo ">> Branch ${{ gitea.base_ref }} exists, will continue with linting"
fi
echo "----"
echo "exists=true" >> $GITEA_OUTPUT
- name: Set up Node.js
if: steps.branch-exists.outputs.exists == 'true'
uses: actions/setup-node@v6
with:
node-version: '24'
- name: Check Directories for Changes
id: check-dir-changes
if: steps.branch-exists.outputs.exists == 'true'
run: |
CHANGED_COMPOSE=()
echo ">> Target branch for diff is: ${BASE_BRANCH}"
if [ "${{ github.event_name }}" == "pull_request" ]; then
echo ""
echo ">> Checking for changes in a pull request ..."
GIT_DIFF=$(git diff --name-only "${BASE_BRANCH}" | xargs -I {} dirname {} | sort -u)
else
echo ""
echo ">> Checking for changes from a push ..."
GIT_DIFF=$(git diff --name-only ${{ gitea.event.before }}..HEAD | xargs -I {} dirname {} | sort -u)
fi
if [ -n "${GIT_DIFF}" ]; then
echo ""
echo ">> Changes detected:"
echo "$GIT_DIFF"
for path in $GIT_DIFF; do
if echo "$path" | grep -q -E "hosts/[^/]+/[^/]+"; then
echo ""
echo ">> Adding path: $path"
CHANGED_COMPOSE+=$(echo "$path")
CHANGED_COMPOSE+=$(echo " ")
fi
done
else
echo ""
echo ">> No changes detected"
fi
if [ -n "${CHANGED_COMPOSE}" ]; then
echo ""
echo ">> Compose to Lint:"
echo "$(echo "${CHANGED_COMPOSE}" | sort -u)"
echo "----"
echo "changes-detected=true" >> $GITEA_OUTPUT
echo "compose-dir<<EOF" >> $GITEA_OUTPUT
echo "$(echo "${CHANGED_COMPOSE}" | sort -u)" >> $GITEA_OUTPUT
echo "EOF" >> $GITEA_OUTPUT
else
echo ""
echo ">> Did not find any docker compose files to lint"
echo "----"
echo "changes-detected=false" >> $GITEA_OUTPUT
fi
- name: Lint Docker Compose
if: steps.check-dir-changes.outputs.changes-detected == 'true'
env:
CHANGED_COMPOSE: ${{ steps.check-dir-changes.outputs.compose-dir }}
run: |
echo ">> Running dclint on changed compose files:"
echo "$CHANGED_COMPOSE"
for compose in $CHANGED_COMPOSE; do
echo ">> Linting $compose ..."
npx dclint $compose
done
- name: ntfy Failed
uses: niniyas/ntfy-action@master
if: failure()
with:
url: '${{ secrets.NTFY_URL }}'
topic: '${{ secrets.NTFY_TOPIC }}'
title: 'Test Failure - Infrastructure'
priority: 3
headers: '{"Authorization": "Bearer ${{ secrets.NTFY_CRED }}"}'
tags: action,failed
details: 'Docker linting on Pull Request for Infrastructure has failed!'
icon: 'https://cdn.jsdelivr.net/gh/selfhst/icons/png/gitea.png'
actions: '[{"action": "view", "label": "Open Gitea", "url": "https://gitea.alexlebens.dev/alexlebens/infrastructure/actions?workflow=lint-test-docker-pull.yaml", "clear": true}]'
image: true

View File

@@ -0,0 +1,89 @@
name: lint-test-helm
on:
pull_request:
branches:
- main
paths:
- 'clusters/**'
jobs:
helm-lint:
runs-on: ubuntu-js
steps:
- name: Checkout
uses: actions/checkout@v6
with:
fetch-depth: 0
- name: Check Branch Exists
id: check-branch-exists
uses: GuillaumeFalourd/branch-exists@v1.1
with:
branch: ${{ github.base_ref }}
- name: Branch Does Not Exist
if: steps.check-branch-exists.outputs.exists == 'false'
run: echo "Branch ${{ github.base_ref }} was not found, likely already merged"
- name: Set up Helm
if: steps.check-branch-exists.outputs.exists == 'true'
uses: azure/setup-helm@v4
with:
token: ${{ secrets.GITEA_TOKEN }}
version: v3.19.2
- name: Lint Helm Chart
if: steps.check-branch-exists.outputs.exists == 'true'
run: |
set -e # Exit immediately if a command exits with a non-zero status.
TARGET_BRANCH="origin/${{ github.base_ref }}"
echo ">> Target branch for diff is: $TARGET_BRANCH"
CHANGED_FILES=$(git diff --name-only "$TARGET_BRANCH" -- 'clusters/**')
echo ">> Found changed files:"
echo "$CHANGED_FILES"
# For each changed file, find its parent chart directory (the one with Chart.yaml).
# Then, create a unique list of those directories.
CHANGED_CHARTS=$(echo "$CHANGED_FILES" | while read -r file; do
dir=$(dirname "$file")
while [[ "$dir" != "." && ! -f "$dir/Chart.yaml" ]]; do
dir=$(dirname "$dir")
done
if [[ "$dir" != "." ]]; then
echo "$dir"
fi
done | sort -u)
if [[ -z "$CHANGED_CHARTS" ]]; then
echo ">> Could not determine changed charts. This could happen if only files outside a chart were changed."
exit 0
fi
echo ">> Running helm lint on changed charts:"
echo "$CHANGED_CHARTS"
echo "$CHANGED_CHARTS" | while read -r chart; do
echo ">> Building dependency for "$chart" ..."
helm dependency build "$chart"
echo ">> Linting $chart..."
helm lint "$chart"
done
- name: ntfy Failed
uses: niniyas/ntfy-action@master
if: failure()
with:
url: '${{ secrets.NTFY_URL }}'
topic: '${{ secrets.NTFY_TOPIC }}'
title: 'Test Failure - Infrastructure'
priority: 3
headers: '{"Authorization": "Bearer ${{ secrets.NTFY_CRED }}"}'
tags: action,failed
details: 'Helm linting on Pull Request for Infrastructure has failed!'
icon: 'https://cdn.jsdelivr.net/gh/selfhst/icons/png/gitea.png'
actions: '[{"action": "view", "label": "Open Gitea", "url": "https://gitea.alexlebens.dev/alexlebens/infrastructure/actions?workflow=lint-test-helm-pull.yaml", "clear": true}]'
image: true

View File

@@ -0,0 +1,77 @@
name: lint-test-helm
on:
push:
branches:
- main
paths:
- 'clusters/**'
jobs:
helm-lint:
runs-on: ubuntu-js
steps:
- name: Checkout
uses: actions/checkout@v6
with:
fetch-depth: 0
- name: Set up Helm
uses: azure/setup-helm@v4
with:
token: ${{ secrets.GITEA_TOKEN }}
version: v3.19.2
- name: Lint Helm Chart
run: |
set -e # Exit immediately if a command exits with a non-zero status.
TARGET_BRANCH="origin/main"
echo ">> Target branch for diff is: $TARGET_BRANCH"
CHANGED_FILES=$(git diff --name-only "$TARGET_BRANCH" -- 'clusters/**')
echo ">> Found changed files:"
echo "$CHANGED_FILES"
# For each changed file, find its parent chart directory (the one with Chart.yaml).
# Then, create a unique list of those directories.
CHANGED_CHARTS=$(echo "$CHANGED_FILES" | while read -r file; do
dir=$(dirname "$file")
while [[ "$dir" != "." && ! -f "$dir/Chart.yaml" ]]; do
dir=$(dirname "$dir")
done
if [[ "$dir" != "." ]]; then
echo "$dir"
fi
done | sort -u)
if [[ -z "$CHANGED_CHARTS" ]]; then
echo ">> Could not determine changed charts. This could happen if only files outside a chart were changed."
exit 0
fi
echo ">> Running helm lint on changed charts:"
echo "$CHANGED_CHARTS"
echo "$CHANGED_CHARTS" | while read -r chart; do
echo ">> Building dependency for "$chart" ..."
helm dependency build "$chart"
echo ">> Linting $chart..."
helm lint "$chart"
done
- name: ntfy Failed
uses: niniyas/ntfy-action@master
if: failure()
with:
url: '${{ secrets.NTFY_URL }}'
topic: '${{ secrets.NTFY_TOPIC }}'
title: 'Test Failure - Infrastructure'
priority: 4
headers: '{"Authorization": "Bearer ${{ secrets.NTFY_CRED }}"}'
tags: action,failed
details: 'Helm linting on Push for Infrastructure has failed!'
icon: 'https://cdn.jsdelivr.net/gh/selfhst/icons/png/gitea.png'
actions: '[{"action": "view", "label": "Open Gitea", "url": "https://gitea.alexlebens.dev/alexlebens/infrastructure/actions?workflow=lint-test-helm-push.yaml", "clear": true}]'
image: true

View File

@@ -1,188 +0,0 @@
name: lint-test-helm
on:
pull_request:
branches:
- main
paths:
- 'clusters/cl01tl/helm/**'
push:
branches:
- main
paths:
- 'clusters/cl01tl/helm/**'
env:
CLUSTER: cl01tl
BASE_BRANCH: "origin/${{ gitea.base_ref }}"
jobs:
lint-helm:
runs-on: ubuntu-js
steps:
- name: Checkout
uses: actions/checkout@v6
with:
fetch-depth: 0
- name: Check Branch Exists
id: check-branch-exists
if: github.event_name == 'pull_request'
uses: GuillaumeFalourd/branch-exists@v1.1
with:
branch: ${{ gitea.base_ref }}
- name: Report Branch Exists
id: branch-exists
if: github.event_name == 'push' || steps.check-branch-exists.outputs.exists == 'true' && github.event_name == 'pull_request'
run: |
if [ ${{ github.event_name == 'push' }} ]; then
echo ">> Action is from a push event, will continue with linting"
else
echo ">> Branch ${{ gitea.base_ref }} exists, will continue with linting"
fi
echo "----"
echo "exists=true" >> $GITEA_OUTPUT
- name: Set up Helm
if: steps.branch-exists.outputs.exists == 'true'
uses: azure/setup-helm@v4
with:
token: ${{ secrets.GITEA_TOKEN }}
version: v3.19.2
cache: true
- name: Check Directories for Changes
id: check-dir-changes
if: steps.branch-exists.outputs.exists == 'true'
run: |
CHANGED_CHARTS=()
echo ">> Target branch for diff is: ${BASE_BRANCH}"
if [ "${{ github.event_name }}" == "pull_request" ]; then
echo ""
echo ">> Checking for changes in a pull request ..."
GIT_DIFF=$(git diff --name-only "${BASE_BRANCH}" | xargs -I {} dirname {} | sort -u)
else
echo ""
echo ">> Checking for changes from a push ..."
GIT_DIFF=$(git diff --name-only ${{ gitea.event.before }}..HEAD | xargs -I {} dirname {} | sort -u)
fi
if [ -n "${GIT_DIFF}" ]; then
echo ""
echo ">> Changes detected:"
echo "$GIT_DIFF"
for path in $GIT_DIFF; do
if echo "$path" | grep -q -E "clusters/[^/]+/helm/[^/]+"; then
echo ""
echo ">> Adding path: $path"
CHANGED_CHARTS+=$(echo "$path" | awk -F '/' '{print $4}')
CHANGED_CHARTS+=$(echo "\n")
fi
done
else
echo ""
echo ">> No changes detected"
fi
if [ -n "${CHANGED_CHARTS}" ]; then
echo ""
echo ">> Chart to Lint:"
echo "$(echo "${CHANGED_CHARTS}" | sort -u)"
echo "----"
echo "changes-detected=true" >> $GITEA_OUTPUT
echo "chart-dir<<EOF" >> $GITEA_OUTPUT
echo "$(echo "${CHANGED_CHARTS}" | sort -u)" >> $GITEA_OUTPUT
echo "EOF" >> $GITEA_OUTPUT
else
echo ""
echo ">> Did not find any helm charts files to lint"
echo "----"
echo "changes-detected=false" >> $GITEA_OUTPUT
fi
- name: Add Repositories
if: steps.check-dir-changes.outputs.changes-detected == 'true'
env:
CHANGED_CHARTS: ${{ steps.check-dir-changes.outputs.chart-dir }}
run: |
echo ">> Adding repositories for chart dependencies ..."
for dir in ${CHANGED_CHARTS}; do
helm dependency list --max-col-width 120 clusters/${CLUSTER}/helm/$dir 2> /dev/null \
| tail +2 | head -n -1 \
| awk '{ print "helm repo add " $1 " " $3 }' \
| while read cmd; do
if [[ "$cmd" == "*oci://*" ]]; then
echo ">> Ignoring OCI repo"
else
echo ">> Command: $cmd"
echo "$cmd" | sh;
fi
done || true
done
if helm repo list | tail +2 | read -r; then
echo ""
echo ">> Update repository cache ..."
helm repo update
fi
echo "----"
- name: Lint Helm Chart
if: steps.check-dir-changes.outputs.changes-detected == 'true'
env:
CHANGED_CHARTS: ${{ steps.check-dir-changes.outputs.chart-dir }}
run: |
echo ">> Running linting on changed charts ..."
for dir in ${CHANGED_CHARTS}; do
chart_path=clusters/${CLUSTER}/helm/$dir
chart_name=$(basename "$chart_path")
if [ -f "$chart_path/Chart.yaml" ]; then
cd $chart_path
echo ""
echo ">> Building helm dependency ..."
helm dependency build --skip-refresh
echo ""
echo ">> Linting helm ..."
helm lint --namespace "$chart_name"
else
echo ""
echo ">> Directory $chart_path does not contain a Chart.yaml. Skipping ..."
echo ""
fi
done
- name: ntfy Failed
uses: niniyas/ntfy-action@master
if: failure()
with:
url: '${{ secrets.NTFY_URL }}'
topic: '${{ secrets.NTFY_TOPIC }}'
title: 'Test Failure - Infrastructure'
priority: 3
headers: '{"Authorization": "Bearer ${{ secrets.NTFY_CRED }}"}'
tags: action,failed
details: 'Helm linting on Pull Request for Infrastructure has failed!'
icon: 'https://cdn.jsdelivr.net/gh/selfhst/icons/png/gitea.png'
actions: '[{"action": "view", "label": "Open Gitea", "url": "https://gitea.alexlebens.dev/alexlebens/infrastructure/actions?workflow=lint-test-helm-pull.yaml", "clear": true}]'
image: true

View File

@@ -1,440 +0,0 @@
name: render-manifests-automerge
on:
pull_request:
branches:
- main
paths:
- 'clusters/cl01tl/helm/**'
types:
- closed
env:
CLUSTER: cl01tl
BASE_BRANCH: manifests
BRANCH_NAME_BASE: auto/update-manifests-automerge
MAIN_DIR: /workspace/alexlebens/infrastructure/infrastructure
MANIFEST_DIR: /workspace/alexlebens/infrastructure/infrastructure-manifests
jobs:
render-manifests-automerge:
runs-on: ubuntu-js
if: ${{ (github.event.pull_request.merged == true) && (contains(github.event.pull_request.labels.*.name, 'automerge')) }}
steps:
- name: Checkout Main
uses: actions/checkout@v6
with:
path: infrastructure
fetch-depth: 0
- name: Checkout Manifests
uses: actions/checkout@v6
with:
ref: manifests
path: infrastructure-manifests
- name: Set up Helm
uses: azure/setup-helm@v4
with:
token: ${{ secrets.GITEA_TOKEN }}
version: v3.17.2 # Pending https://github.com/helm/helm/pull/30743
cache: true
- name: Configure Kubeconfig
uses: azure/k8s-set-context@v4
with:
method: kubeconfig
kubeconfig: ${{ secrets.KUBECONFIG }}
- name: Prepare Manifest Branch
id: prepare-manifest-branch
run: |
cd ${MANIFEST_DIR}
BRANCH_NAME="${BRANCH_NAME_BASE}-$(date +%Y%m%d%H%M%S)"
echo ">> Configure git to use gitea-bot as user ..."
git config user.name "gitea-bot"
git config user.email "gitea-bot@alexlebens.net"
echo ">> Creating branch ..."
git checkout -b $BRANCH_NAME
echo "----"
echo "BRANCH_NAME=${BRANCH_NAME}" >> $GITEA_OUTPUT
- name: Check which Directories have Changes
id: check-dir-changes
run: |
cd ${MAIN_DIR}
RENDER_DIR=()
echo ">> Checking for changes from HEAD^..HEAD ..."
GIT_DIFF=$(git diff --name-only HEAD^..HEAD | xargs -I {} dirname {} | sort -u | grep -E "clusters/[^/]+/helm/[^/]+")
if [ -n "${GIT_DIFF}" ]; then
echo ">> Changes detected:"
echo "$GIT_DIFF"
for path in $GIT_DIFF; do
RENDER_DIR+=$(echo "$path" | awk -F '/' '{print $4}')
RENDER_DIR+=$(echo " ")
done
else
echo ">> No changes detected"
fi
if [ -n "${RENDER_DIR}" ]; then
echo ">> Directories to Render:"
echo "$(echo "${RENDER_DIR}" | sort -u)"
echo "----"
echo "changes-detected=true" >> $GITEA_OUTPUT
echo "render-dir<<EOF" >> $GITEA_OUTPUT
echo "$(echo "${RENDER_DIR}" | sort -u)" >> $GITEA_OUTPUT
echo "EOF" >> $GITEA_OUTPUT
else
echo "changes-detected=false" >> $GITEA_OUTPUT
fi
- name: Add Repositories
if: steps.check-dir-changes.outputs.changes-detected == 'true'
env:
RENDER_DIR: ${{ steps.check-dir-changes.outputs.render-dir }}
run: |
cd ${MAIN_DIR}
echo ">> Adding repositories for chart dependencies ..."
for dir in ${RENDER_DIR}; do
helm dependency list --max-col-width 120 ${MAIN_DIR}/clusters/${CLUSTER}/helm/$dir 2> /dev/null \
| tail +2 | head -n -1 \
| awk '{ print "helm repo add " $1 " " $3 }' \
| while read cmd; do
if [[ "$cmd" == "*oci://*" ]]; then
echo ">> Ignoring OCI repo"
else
echo "$cmd" | sh;
fi
done || true
done
if helm repo list | tail +2 | read -r; then
echo ">> Update repository cache ..."
helm repo update
fi
echo "----"
- name: Remove Changed Manifest Files
if: steps.check-dir-changes.outputs.changes-detected == 'true'
env:
RENDER_DIR: ${{ steps.check-dir-changes.outputs.render-dir }}
run: |
cd ${MANIFEST_DIR}
echo ">> Remove manfiest files and rebuild from source ..."
for dir in ${RENDER_DIR}; do
chart_path=${MANIFEST_DIR}/clusters/${CLUSTER}/manifests/$dir
echo "$chart_path"
rm -rf $chart_path/*
done
echo "----"
- name: Render Helm Manifests
id: render-manifests
if: steps.check-dir-changes.outputs.changes-detected == 'true'
env:
RENDER_DIR: ${{ steps.check-dir-changes.outputs.render-dir }}
run: |
cd ${MAIN_DIR}
echo ">> Rendering Manifests ..."
for dir in ${RENDER_DIR}; do
chart_path=${MAIN_DIR}/clusters/${CLUSTER}/helm/$dir
chart_name=$(basename "$chart_path")
echo ""
echo ""
echo ">> Rendering chart: $chart_name"
echo ">> Chart path $chart_path"
if [ -f "$chart_path/Chart.yaml" ]; then
OUTPUT_FOLDER="${MANIFEST_DIR}/clusters/${CLUSTER}/manifests/$chart_name/"
TEMPLATE=""
mkdir -p ${MANIFEST_DIR}/clusters/${CLUSTER}/manifests/$chart_name
cd $chart_path
echo ""
echo ">> Updating helm dependency ..."
helm dependency update --skip-refresh
echo ""
echo ">> Building helm dependency ..."
helm dependency build --skip-refresh
echo ""
echo ">> Linting helm ..."
helm lint --namespace "$chart_name"
echo ""
echo ">> Rendering templates ..."
case "$chart_name" in
"stack")
echo ""
echo ">> Special Rendering for stack into argocd namespace ..."
TEMPLATE=$(helm template $chart_name ./ --namespace argocd --include-crds --dry-run=server --api-versions "gateway.networking.k8s.io/v1/HTTPRoute")
;;
"cilium" | "coredns" | "metrics-server" |"prometheus-operator-crds")
echo ""
echo ">> Special Rendering for $chart_name into kube-system namespace ..."
TEMPLATE=$(helm template $chart_name ./ --namespace kube-system --include-crds --dry-run=server --api-versions "gateway.networking.k8s.io/v1/HTTPRoute")
;;
*)
echo ""
echo ">> Standard Rendering for $chart_name ..."
TEMPLATE=$(helm template "$chart_name" ./ --namespace "$chart_name" --include-crds --dry-run=server --api-versions "gateway.networking.k8s.io/v1/HTTPRoute")
;;
esac
echo ""
echo ">> Formating rendered template ..."
echo "$TEMPLATE" | yq '... comments=""' | yq 'select(. != null)' | yq -s '"'"$OUTPUT_FOLDER"'" + .kind + "-" + .metadata.name + ".yaml"'
# Strip comments again to ensure formatting correctness
for file in "$OUTPUT_FOLDER"/*; do
yq -i '... comments=""' $file
done
echo ""
echo ">> Manifests for $chart_name rendered to $OUTPUT_FOLDER"
ls $OUTPUT_FOLDER
echo ""
else
echo ""
echo ">> Directory $chart_path does not contain a Chart.yaml. Skipping ..."
echo ""
fi
done
echo "----"
- name: Check for Changes
id: check-changes
if: steps.check-dir-changes.outputs.changes-detected == 'true'
run: |
cd ${MANIFEST_DIR}
GIT_CHANGES=$(git status --porcelain)
if [ -n "$GIT_CHANGES" ]; then
echo ">> Changes detected"
git status --porcelain
echo "changes-detected=true" >> $GITEA_OUTPUT
else
echo ">> No changes detected, skipping PR creation"
fi
echo "----"
- name: Commit and Push Changes
id: commit-push
if: steps.check-changes.outputs.changes-detected == 'true'
env:
BRANCH_NAME: ${{ steps.prepare-manifest-branch.outputs.BRANCH_NAME }}
run: |
cd ${MANIFEST_DIR}
echo ">> Commiting changes to ${BRANCH_NAME} ..."
git add .
git commit -m "chore: Update manifests after automerge"
REPO_URL="${{ secrets.REPO_URL }}/${{ gitea.repository }}"
echo ">> Pushing changes to $REPO_URL ..."
git push -u "https://oauth2:${{ secrets.BOT_TOKEN }}@$(echo $REPO_URL | sed -e 's|https://||')" ${BRANCH_NAME}
echo "----"
echo "push=true" >> $GITEA_OUTPUT
- name: Create Pull Request
id: create-pull-request
if: steps.commit-push.outputs.push == 'true'
env:
GITEA_TOKEN: ${{ secrets.BOT_TOKEN }}
GITEA_URL: ${{ secrets.REPO_URL }}
BRANCH_NAME: ${{ steps.prepare-manifest-branch.outputs.BRANCH_NAME }}
run: |
cd ${MANIFEST_DIR}
API_ENDPOINT="${GITEA_URL}/api/v1/repos/${{ gitea.repository }}/pulls"
PAYLOAD=$( jq -n \
--arg head "${BRANCH_NAME}" \
--arg base "${BASE_BRANCH}" \
--arg title "Automated Manifest Update - Automerge" \
--arg body "This PR contains newly rendered Kubernetes manifests automatically generated by the CI workflow. This is expected to be automerged." \
'{head: $head, base: $base, title: $title, body: $body}' )
echo ">> Creating PR from branch ${BRANCH_NAME} into ${BASE_BRANCH}"
echo ">> With Endpoint of:"
echo "$API_ENDPOINT"
echo ">> With Payload of:"
echo "$PAYLOAD"
HTTP_STATUS=$(
curl -X POST \
--silent \
--write-out '%{http_code}' \
--output response_body.json \
--dump-header response_headers.txt \
--data "$PAYLOAD" \
-H "Authorization: token ${GITEA_TOKEN}" \
-H "Content-Type: application/json" \
"$API_ENDPOINT" 2> response_errors.txt
)
echo ">> HTTP Status Code: $HTTP_STATUS"
echo ">> Response Output ..."
echo "----"
cat response_body.json
echo "----"
cat response_headers.txt
echo "----"
cat response_errors.txt
echo "----"
if [ "$HTTP_STATUS" == "201" ]; then
echo ">> Pull Request created successfully!"
PR_URL=$(cat response_body.json | jq -r .html_url)
echo ">> Pull Request URL: $PR_URL"
echo "pull-request-url=${PR_URL}" >> $GITEA_OUTPUT
PR_NUMBER=$(cat response_body.json | jq -r .number)
echo ">> Pull Request Number: $PR_NUMBER"
echo "pull-request-number=${PR_NUMBER}" >> $GITEA_OUTPUT
echo "pull-request-operation=created" >> $GITEA_OUTPUT
elif [ "$HTTP_STATUS" == "422" ]; then
echo ">> Failed to create PR (HTTP 422: Unprocessable Entity), PR may already exist"
elif [ "$HTTP_STATUS" == "409" ]; then
echo ">> Failed to create PR (HTTP 409: Conflict), PR already exists"
else
echo ">> Failed to create PR, HTTP status code: $HTTP_STATUS"
exit 1
fi
echo "----"
- name: Merge Changes
id: merge-changes
if: steps.commit-push.outputs.push == 'true'
env:
GITEA_TOKEN: ${{ secrets.BOT_TOKEN }}
GITEA_URL: ${{ secrets.REPO_URL }}
BRANCH_NAME: ${{ steps.prepare-manifest-branch.outputs.BRANCH_NAME }}
PR_NUMBER: ${{ steps.create-pull-request.outputs.pull-request-number }}
run: |
cd ${MANIFEST_DIR}
API_ENDPOINT="${GITEA_URL}/api/v1/repos/${{ gitea.repository }}/pulls/${PR_NUMBER}/merge"
PAYLOAD=$( jq -n \
--arg Do "merge" \
'{Do: $Do}' )
echo ">> Merging PR with ID: ${PR_NUMBER}"
echo ">> With Endpoint of:"
echo "$API_ENDPOINT"
echo ">> With Payload of:"
echo "$PAYLOAD"
HTTP_STATUS=$(
curl -X POST \
--silent \
--write-out '%{http_code}' \
--output response_body.json \
--dump-header response_headers.txt \
--data "$PAYLOAD" \
-H "Authorization: token ${GITEA_TOKEN}" \
-H "Content-Type: application/json" \
"$API_ENDPOINT" 2> response_errors.txt
)
echo ">> HTTP Status Code: $HTTP_STATUS"
echo ">> Response Output ..."
echo "----"
cat response_body.json
echo "----"
cat response_headers.txt
echo "----"
cat response_errors.txt
echo "----"
if [ "$HTTP_STATUS" == "200" ]; then
echo ">> Pull Request merged successfully!"
echo "pull-request-operation=merged" >> $GITEA_OUTPUT
else
echo ">> Failed to create PR, HTTP status code: $HTTP_STATUS"
echo "pull-request-operation=failed" >> $GITEA_OUTPUT
exit 1
fi
echo "----"
- name: Cleanup Branch
if: failure()
env:
BRANCH_NAME: ${{ steps.prepare-manifest-branch.outputs.BRANCH_NAME }}
run: |
cd ${MANIFEST_DIR}
echo ">> Removing branch: ${BRANCH_NAME}"
git push origin --delete ${BRANCH_NAME}
echo "----"
- name: ntfy Merged
uses: niniyas/ntfy-action@master
if: steps.merge-changes.outputs.pull-request-operation == 'merged'
with:
url: "${{ secrets.NTFY_URL }}"
topic: "${{ secrets.NTFY_TOPIC }}"
title: "Manifest Render PR Merged - Infrastructure"
priority: 3
headers: '{"Authorization": "Bearer ${{ secrets.NTFY_CRED }}"}'
tags: action,successfully,completed
details: "Automerge Manifest rendering for Infrastructure!"
icon: "https://cdn.jsdelivr.net/gh/selfhst/icons/png/gitea.png"
actions: '[{"action": "view", "label": "Open Gitea", "url": "${{ steps.create-pull-request.outputs.pull-request-url }}", "clear": true}]'
- name: ntfy Failed
uses: niniyas/ntfy-action@master
if: failure()
with:
url: "${{ secrets.NTFY_URL }}"
topic: "${{ secrets.NTFY_TOPIC }}"
title: "Manifest Render Failure - Infrastructure"
priority: 4
headers: '{"Authorization": "Bearer ${{ secrets.NTFY_CRED }}"}'
tags: action,failed
details: "Automerge Manifest rendering for Infrastructure has failed!"
icon: "https://cdn.jsdelivr.net/gh/selfhst/icons/png/gitea.png"
actions: '[{"action": "view", "label": "Open Gitea", "url": "https://gitea.alexlebens.dev/alexlebens/infrastructure/actions?workflow=render-manifests-automerge.yaml", "clear": true}]'
image: true

View File

@@ -1,425 +0,0 @@
name: render-manifests-merge
on:
pull_request:
branches:
- main
paths:
- 'clusters/cl01tl/helm/**'
types:
- closed
env:
CLUSTER: cl01tl
BASE_BRANCH: manifests
BRANCH_NAME: auto/update-manifests
ASSIGNEE: alexlebens
MAIN_DIR: /workspace/alexlebens/infrastructure/infrastructure
MANIFEST_DIR: /workspace/alexlebens/infrastructure/infrastructure-manifests
jobs:
render-manifests-merge:
runs-on: ubuntu-js
if: ${{ (github.event.pull_request.merged == true) && !(contains(github.event.pull_request.labels.*.name, 'automerge')) }}
steps:
- name: Checkout Main
uses: actions/checkout@v6
with:
path: infrastructure
fetch-depth: 0
- name: Checkout Manifests
uses: actions/checkout@v6
with:
ref: manifests
path: infrastructure-manifests
- name: Set up Helm
uses: azure/setup-helm@v4
with:
token: ${{ secrets.GITEA_TOKEN }}
version: v3.17.2 # Pending https://github.com/helm/helm/pull/30743
cache: true
- name: Configure Kubeconfig
uses: azure/k8s-set-context@v4
with:
method: kubeconfig
kubeconfig: ${{ secrets.KUBECONFIG }}
- name: Prepare Manifest Branch
run: |
cd ${MANIFEST_DIR}
echo ">> Configure git to use gitea-bot as user ..."
git config user.name "gitea-bot"
git config user.email "gitea-bot@alexlebens.net"
echo ">> Checking if PR branch exists ..."
if [[ $(git ls-remote --heads origin "${BRANCH_NAME}" | wc -l) -gt 0 ]]; then
echo ">> Branch '${BRANCH_NAME}' exists, pulling changes ..."
git fetch origin "${BRANCH_NAME}"
git checkout "${BRANCH_NAME}"
git pull --rebase
else
echo ">> Branch '${BRANCH_NAME}' does not exist, creating ..."
git checkout -b $BRANCH_NAME
fi
echo "----"
- name: Check which Directories have Changes
id: check-dir-changes
run: |
cd ${MAIN_DIR}
RENDER_DIR=()
echo ">> Checking for changes from HEAD^..HEAD ..."
GIT_DIFF=$(git diff --name-only HEAD^..HEAD | xargs -I {} dirname {} | sort -u | grep -E "clusters/[^/]+/helm/[^/]+")
if [ -n "${GIT_DIFF}" ]; then
echo ">> Changes detected:"
echo "$GIT_DIFF"
for path in $GIT_DIFF; do
RENDER_DIR+=$(echo "$path" | awk -F '/' '{print $4}')
RENDER_DIR+=$(echo " ")
done
else
echo ">> No changes detected"
fi
if [ -n "${RENDER_DIR}" ]; then
echo ">> Directories to Render:"
echo "$(echo "${RENDER_DIR}" | sort -u)"
echo "----"
echo "changes-detected=true" >> $GITEA_OUTPUT
echo "render-dir<<EOF" >> $GITEA_OUTPUT
echo "$(echo "${RENDER_DIR}" | sort -u)" >> $GITEA_OUTPUT
echo "EOF" >> $GITEA_OUTPUT
else
echo "changes-detected=false" >> $GITEA_OUTPUT
fi
- name: Add Repositories
if: steps.check-dir-changes.outputs.changes-detected == 'true'
env:
RENDER_DIR: ${{ steps.check-dir-changes.outputs.render-dir }}
run: |
cd ${MAIN_DIR}
echo ">> Adding repositories for chart dependencies ..."
for dir in ${RENDER_DIR}; do
helm dependency list --max-col-width 120 ${MAIN_DIR}/clusters/${CLUSTER}/helm/$dir 2> /dev/null \
| tail +2 | head -n -1 \
| awk '{ print "helm repo add " $1 " " $3 }' \
| while read cmd; do
if [[ "$cmd" == "*oci://*" ]]; then
echo ">> Ignoring OCI repo"
else
echo "$cmd" | sh;
fi
done || true
done
if helm repo list | tail +2 | read -r; then
echo ">> Update repository cache ..."
helm repo update
fi
echo "----"
- name: Remove Changed Manifest Files
if: steps.check-dir-changes.outputs.changes-detected == 'true'
env:
RENDER_DIR: ${{ steps.check-dir-changes.outputs.render-dir }}
run: |
cd ${MANIFEST_DIR}
echo ">> Remove manfiest files and rebuild from source ..."
for dir in ${RENDER_DIR}; do
chart_path=${MANIFEST_DIR}/clusters/${CLUSTER}/manifests/$dir
echo "$chart_path"
rm -rf $chart_path/*
done
echo "----"
- name: Render Helm Manifests
id: render-manifests
if: steps.check-dir-changes.outputs.changes-detected == 'true'
env:
RENDER_DIR: ${{ steps.check-dir-changes.outputs.render-dir }}
run: |
cd ${MAIN_DIR}
echo ">> Rendering Manifests ..."
for dir in ${RENDER_DIR}; do
chart_path=${MAIN_DIR}/clusters/${CLUSTER}/helm/$dir
chart_name=$(basename "$chart_path")
echo ""
echo ""
echo ">> Rendering chart: $chart_name"
echo ">> Chart path $chart_path"
if [ -f "$chart_path/Chart.yaml" ]; then
OUTPUT_FOLDER="${MANIFEST_DIR}/clusters/${CLUSTER}/manifests/$chart_name/"
TEMPLATE=""
mkdir -p ${MANIFEST_DIR}/clusters/${CLUSTER}/manifests/$chart_name
cd $chart_path
echo ""
echo ">> Updating helm dependency ..."
helm dependency update --skip-refresh
echo ""
echo ">> Building helm dependency ..."
helm dependency build --skip-refresh
echo ""
echo ">> Linting helm ..."
helm lint --namespace "$chart_name"
echo ""
echo ">> Rendering templates ..."
case "$chart_name" in
"stack")
echo ""
echo ">> Special Rendering for stack into argocd namespace ..."
TEMPLATE=$(helm template $chart_name ./ --namespace argocd --include-crds --dry-run=server --api-versions "gateway.networking.k8s.io/v1/HTTPRoute")
;;
"cilium" | "coredns" | "metrics-server" |"prometheus-operator-crds")
echo ""
echo ">> Special Rendering for $chart_name into kube-system namespace ..."
TEMPLATE=$(helm template $chart_name ./ --namespace kube-system --include-crds --dry-run=server --api-versions "gateway.networking.k8s.io/v1/HTTPRoute")
;;
*)
echo ""
echo ">> Standard Rendering for $chart_name ..."
TEMPLATE=$(helm template "$chart_name" ./ --namespace "$chart_name" --include-crds --dry-run=server --api-versions "gateway.networking.k8s.io/v1/HTTPRoute")
;;
esac
echo ""
echo ">> Formating rendered template ..."
echo "$TEMPLATE" | yq '... comments=""' | yq 'select(. != null)' | yq -s '"'"$OUTPUT_FOLDER"'" + .kind + "-" + .metadata.name + ".yaml"'
# Strip comments again to ensure formatting correctness
for file in "$OUTPUT_FOLDER"/*; do
yq -i '... comments=""' $file
done
echo ""
echo ">> Manifests for $chart_name rendered to $OUTPUT_FOLDER"
ls $OUTPUT_FOLDER
echo ""
else
echo ""
echo ">> Directory $chart_path does not contain a Chart.yaml. Skipping ..."
echo ""
fi
done
echo "----"
- name: Check for Changes
id: check-changes
if: steps.check-dir-changes.outputs.changes-detected == 'true'
run: |
cd ${MANIFEST_DIR}
GIT_CHANGES=$(git status --porcelain)
if [ -n "$GIT_CHANGES" ]; then
echo ">> Changes detected"
git status --porcelain
echo "changes-detected=true" >> $GITEA_OUTPUT
else
echo ">> No changes detected, skipping PR creation"
fi
echo "----"
- name: Commit and Push Changes
id: commit-push
if: steps.check-changes.outputs.changes-detected == 'true'
run: |
cd ${MANIFEST_DIR}
echo ">> Commiting changes to ${BRANCH_NAME} ..."
git add .
git commit -m "chore: Update manifests after change"
REPO_URL="${{ secrets.REPO_URL }}/${{ gitea.repository }}"
echo ">> Pushing changes to $REPO_URL ..."
git push -u "https://oauth2:${{ secrets.BOT_TOKEN }}@$(echo $REPO_URL | sed -e 's|https://||')" ${BRANCH_NAME}
echo "----"
echo "HEAD_BRANCH=${BRANCH_NAME}" >> $GITEA_OUTPUT
echo "push=true" >> $GITEA_OUTPUT
- name: Check for Pull Request
id: check-for-pull-requst
if: steps.commit-push.outputs.push == 'true'
env:
GITEA_TOKEN: ${{ secrets.BOT_TOKEN }}
GITEA_URL: ${{ secrets.REPO_URL }}
HEAD_BRANCH: ${{ steps.commit-push.outputs.HEAD_BRANCH }}
run: |
cd ${MANIFEST_DIR}
API_ENDPOINT="${GITEA_URL}/api/v1/repos/${{ gitea.repository }}/pulls?base_branch=${BASE_BRANCH}&state=open&page=1"
echo ">> Checking if PR from branch ${HEAD_BRANCH} into ${BASE_BRANCH}"
echo ">> With Endpoint of:"
echo "$API_ENDPOINT"
HTTP_STATUS=$(
curl -X GET \
--silent \
--write-out '%{http_code}' \
--output response_body.json \
--dump-header response_headers.txt \
-H "Authorization: token ${GITEA_TOKEN}" \
-H "Content-Type: application/json" \
"$API_ENDPOINT" 2> response_errors.txt
)
echo ">> HTTP Status Code: $HTTP_STATUS"
echo ">> Response Output ..."
echo "----"
cat response_body.json
echo "----"
cat response_headers.txt
echo "----"
cat response_errors.txt
echo "----"
if [ "$HTTP_STATUS" == "200" ] && [ "$(cat response_body.json | jq -r .[0].state)" == "open" ]; then
echo ">> Pull Request has been found open, will update"
PR_INDEX=$(cat response_body.json | jq -r .[0].number)
echo "pull-request-exists=${PR_INDEX}" >> $GITEA_OUTPUT
echo "pull-request-index=true" >> $GITEA_OUTPUT
elif [ "$HTTP_STATUS" == "200" ] && [ "$(cat response_body.json | jq -r .[0].state)" == "closed" ]; then
echo ">> Pull Request found, but was closed"
echo "pull-request-exists=false" >> $GITEA_OUTPUT
else
echo ">> Pull Request not found"
echo "pull-request-exists=false" >> $GITEA_OUTPUT
fi
echo "----"
- name: Create Pull Request
id: create-pull-request
if: steps.commit-push.outputs.push == 'true' && steps.check-for-pull-requst.outputs.pull-request-exists == 'false'
env:
GITEA_TOKEN: ${{ secrets.BOT_TOKEN }}
GITEA_URL: ${{ secrets.REPO_URL }}
HEAD_BRANCH: ${{ steps.commit-push.outputs.HEAD_BRANCH }}
run: |
cd ${MANIFEST_DIR}
API_ENDPOINT="${GITEA_URL}/api/v1/repos/${{ gitea.repository }}/pulls"
PAYLOAD=$( jq -n \
--arg head "${HEAD_BRANCH}" \
--arg base "${BASE_BRANCH}" \
--arg assignee "${ASSIGNEE}" \
--arg title "Automated Manifest Update" \
--arg body "This PR contains newly rendered Kubernetes manifests automatically generated by the CI workflow." \
'{head: $head, base: $base, assignee: $assignee, title: $title, body: $body}' )
echo ">> Creating PR from branch ${HEAD_BRANCH} into ${BASE_BRANCH}"
echo ">> With Endpoint of:"
echo "$API_ENDPOINT"
echo ">> With Payload of:"
echo "$PAYLOAD"
HTTP_STATUS=$(
curl -X POST \
--silent \
--write-out '%{http_code}' \
--output response_body.json \
--dump-header response_headers.txt \
--data "$PAYLOAD" \
-H "Authorization: token ${GITEA_TOKEN}" \
-H "Content-Type: application/json" \
"$API_ENDPOINT" 2> response_errors.txt
)
echo ">> HTTP Status Code: $HTTP_STATUS"
echo ">> Response Output ..."
echo "----"
cat response_body.json
echo "----"
cat response_headers.txt
echo "----"
cat response_errors.txt
echo "----"
if [ "$HTTP_STATUS" == "201" ]; then
echo ">> Pull Request created successfully!"
PR_URL=$(cat response_body.json | jq -r .html_url)
echo "pull-request-url=${PR_URL}" >> $GITEA_OUTPUT
PR_ID=$(cat response_body.json | jq -r .id)
echo "pull-request-id=${PR_ID}" >> $GITEA_OUTPUT
echo "pull-request-operation=created" >> $GITEA_OUTPUT
elif [ "$HTTP_STATUS" == "422" ]; then
echo ">> Failed to create PR (HTTP 422: Unprocessable Entity), PR may already exist"
elif [ "$HTTP_STATUS" == "409" ]; then
echo ">> Failed to create PR (HTTP 409: Conflict), PR already exists"
else
echo ">> Failed to create PR, HTTP status code: $HTTP_STATUS"
exit 1
fi
echo "----"
- name: ntfy Created
uses: niniyas/ntfy-action@master
if: steps.create-pull-request.outputs.pull-request-operation == 'created'
with:
url: "${{ secrets.NTFY_URL }}"
topic: "${{ secrets.NTFY_TOPIC }}"
title: "Manifest Render PR Created - Infrastructure"
priority: 3
headers: '{"Authorization": "Bearer ${{ secrets.NTFY_CRED }}"}'
tags: action,successfully,completed
details: "Manifest rendering for Infrastructure has created a new Pull Request with ID: ${{ steps.create-pull-request.outputs.pull-request-id }}!"
icon: "https://cdn.jsdelivr.net/gh/selfhst/icons/png/gitea.png"
actions: '[{"action": "view", "label": "Open Gitea", "url": "${{ steps.create-pull-request.outputs.pull-request-url }}", "clear": true}]'
- name: ntfy Failed
uses: niniyas/ntfy-action@master
if: failure()
with:
url: "${{ secrets.NTFY_URL }}"
topic: "${{ secrets.NTFY_TOPIC }}"
title: "Manifest Render Failure - Infrastructure"
priority: 4
headers: '{"Authorization": "Bearer ${{ secrets.NTFY_CRED }}"}'
tags: action,failed
details: "Manifest rendering for Infrastructure has failed!"
icon: "https://cdn.jsdelivr.net/gh/selfhst/icons/png/gitea.png"
actions: '[{"action": "view", "label": "Open Gitea", "url": "https://gitea.alexlebens.dev/alexlebens/infrastructure/actions?workflow=render-manifests.yaml", "clear": true}]'
image: true

View File

@@ -1,423 +0,0 @@
name: render-manifests-push
on:
push:
branches:
- main
paths:
- 'clusters/cl01tl/helm/**'
env:
CLUSTER: cl01tl
BASE_BRANCH: manifests
BRANCH_NAME: auto/update-manifests
ASSIGNEE: alexlebens
MAIN_DIR: /workspace/alexlebens/infrastructure/infrastructure
MANIFEST_DIR: /workspace/alexlebens/infrastructure/infrastructure-manifests
jobs:
render-manifests-push:
runs-on: ubuntu-js
if: gitea.event.commits[0].author.username != 'renovate-bot'
steps:
- name: Checkout Main
uses: actions/checkout@v6
with:
path: infrastructure
fetch-depth: 0
- name: Checkout Manifests
uses: actions/checkout@v6
with:
ref: manifests
path: infrastructure-manifests
- name: Set up Helm
uses: azure/setup-helm@v4
with:
token: ${{ secrets.GITEA_TOKEN }}
version: v3.17.2 # Pending https://github.com/helm/helm/pull/30743
cache: true
- name: Configure Kubeconfig
uses: azure/k8s-set-context@v4
with:
method: kubeconfig
kubeconfig: ${{ secrets.KUBECONFIG }}
- name: Prepare Manifest Branch
run: |
cd ${MANIFEST_DIR}
echo ">> Configure git to use gitea-bot as user ..."
git config user.name "gitea-bot"
git config user.email "gitea-bot@alexlebens.net"
echo ">> Checking if PR branch exists ..."
if [[ $(git ls-remote --heads origin "${BRANCH_NAME}" | wc -l) -gt 0 ]]; then
echo ">> Branch '${BRANCH_NAME}' exists, pulling changes ..."
git fetch origin "${BRANCH_NAME}"
git checkout "${BRANCH_NAME}"
git pull --rebase
else
echo ">> Branch '${BRANCH_NAME}' does not exist, creating ..."
git checkout -b $BRANCH_NAME
fi
echo "----"
- name: Check which Directories have Changes
id: check-dir-changes
run: |
cd ${MAIN_DIR}
RENDER_DIR=()
echo ">> Checking for changes ..."
GIT_DIFF=$(git diff --name-only ${{ gitea.event.before }}..HEAD | xargs -I {} dirname {} | sort -u | grep -E "clusters/[^/]+/helm/[^/]+")
if [ -n "${GIT_DIFF}" ]; then
echo ">> Changes detected:"
echo "$GIT_DIFF"
for path in $GIT_DIFF; do
RENDER_DIR+=$(echo "$path" | awk -F '/' '{print $4}')
RENDER_DIR+=$(echo " ")
done
else
echo ">> No changes detected"
fi
if [ -n "${RENDER_DIR}" ]; then
echo ">> Directories to Render:"
echo "$(echo "${RENDER_DIR}" | sort -u)"
echo "----"
echo "changes-detected=true" >> $GITEA_OUTPUT
echo "render-dir<<EOF" >> $GITEA_OUTPUT
echo "$(echo "${RENDER_DIR}" | sort -u)" >> $GITEA_OUTPUT
echo "EOF" >> $GITEA_OUTPUT
else
echo "changes-detected=false" >> $GITEA_OUTPUT
fi
- name: Add Repositories
if: steps.check-dir-changes.outputs.changes-detected == 'true'
env:
RENDER_DIR: ${{ steps.check-dir-changes.outputs.render-dir }}
run: |
cd ${MAIN_DIR}
echo ">> Adding repositories for chart dependencies ..."
for dir in ${RENDER_DIR}; do
helm dependency list --max-col-width 120 ${MAIN_DIR}/clusters/${CLUSTER}/helm/$dir 2> /dev/null \
| tail +2 | head -n -1 \
| awk '{ print "helm repo add " $1 " " $3 }' \
| while read cmd; do
if [[ "$cmd" == "*oci://*" ]]; then
echo ">> Ignoring OCI repo"
else
echo "$cmd" | sh;
fi
done || true
done
if helm repo list | tail +2 | read -r; then
echo ">> Update repository cache ..."
helm repo update
fi
echo "----"
- name: Remove Changed Manifest Files
if: steps.check-dir-changes.outputs.changes-detected == 'true'
env:
RENDER_DIR: ${{ steps.check-dir-changes.outputs.render-dir }}
run: |
cd ${MANIFEST_DIR}
echo ">> Remove manfiest files and rebuild from source ..."
for dir in ${RENDER_DIR}; do
chart_path=${MANIFEST_DIR}/clusters/${CLUSTER}/manifests/$dir
echo "$chart_path"
rm -rf $chart_path/*
done
echo "----"
- name: Render Helm Manifests
id: render-manifests
if: steps.check-dir-changes.outputs.changes-detected == 'true'
env:
RENDER_DIR: ${{ steps.check-dir-changes.outputs.render-dir }}
run: |
cd ${MAIN_DIR}
echo ">> Rendering Manifests ..."
for dir in ${RENDER_DIR}; do
chart_path=${MAIN_DIR}/clusters/${CLUSTER}/helm/$dir
chart_name=$(basename "$chart_path")
echo ""
echo ""
echo ">> Rendering chart: $chart_name"
echo ">> Chart path $chart_path"
if [ -f "$chart_path/Chart.yaml" ]; then
OUTPUT_FOLDER="${MANIFEST_DIR}/clusters/${CLUSTER}/manifests/$chart_name/"
TEMPLATE=""
mkdir -p ${MANIFEST_DIR}/clusters/${CLUSTER}/manifests/$chart_name
cd $chart_path
echo ""
echo ">> Updating helm dependency ..."
helm dependency update --skip-refresh
echo ""
echo ">> Building helm dependency ..."
helm dependency build --skip-refresh
echo ""
echo ">> Linting helm ..."
helm lint --namespace "$chart_name"
echo ""
echo ">> Rendering templates ..."
case "$chart_name" in
"stack")
echo ""
echo ">> Special Rendering for stack into argocd namespace ..."
TEMPLATE=$(helm template $chart_name ./ --namespace argocd --include-crds --dry-run=server --api-versions "gateway.networking.k8s.io/v1/HTTPRoute")
;;
"cilium" | "coredns" | "metrics-server" |"prometheus-operator-crds")
echo ""
echo ">> Special Rendering for $chart_name into kube-system namespace ..."
TEMPLATE=$(helm template $chart_name ./ --namespace kube-system --include-crds --dry-run=server --api-versions "gateway.networking.k8s.io/v1/HTTPRoute")
;;
*)
echo ""
echo ">> Standard Rendering for $chart_name ..."
TEMPLATE=$(helm template "$chart_name" ./ --namespace "$chart_name" --include-crds --dry-run=server --api-versions "gateway.networking.k8s.io/v1/HTTPRoute")
;;
esac
echo ""
echo ">> Formating rendered template ..."
echo "$TEMPLATE" | yq '... comments=""' | yq 'select(. != null)' | yq -s '"'"$OUTPUT_FOLDER"'" + .kind + "-" + .metadata.name + ".yaml"'
# Strip comments again to ensure formatting correctness
for file in "$OUTPUT_FOLDER"/*; do
yq -i '... comments=""' $file
done
echo ""
echo ">> Manifests for $chart_name rendered to $OUTPUT_FOLDER"
ls $OUTPUT_FOLDER
echo ""
else
echo ""
echo ">> Directory $chart_path does not contain a Chart.yaml. Skipping ..."
echo ""
fi
done
echo "----"
- name: Check for Changes
id: check-changes
if: steps.check-dir-changes.outputs.changes-detected == 'true'
run: |
cd ${MANIFEST_DIR}
GIT_CHANGES=$(git status --porcelain)
if [ -n "$GIT_CHANGES" ]; then
echo ">> Changes detected"
git status --porcelain
echo "changes-detected=true" >> $GITEA_OUTPUT
else
echo ">> No changes detected, skipping PR creation"
fi
echo "----"
- name: Commit and Push Changes
id: commit-push
if: steps.check-changes.outputs.changes-detected == 'true'
run: |
cd ${MANIFEST_DIR}
echo ">> Commiting changes to ${BRANCH_NAME} ..."
git add .
git commit -m "chore: Update manifests after change"
REPO_URL="${{ secrets.REPO_URL }}/${{ gitea.repository }}"
echo ">> Pushing changes to $REPO_URL ..."
git push -u "https://oauth2:${{ secrets.BOT_TOKEN }}@$(echo $REPO_URL | sed -e 's|https://||')" ${BRANCH_NAME}
echo "----"
echo "HEAD_BRANCH=${BRANCH_NAME}" >> $GITEA_OUTPUT
echo "push=true" >> $GITEA_OUTPUT
- name: Check for Pull Request
id: check-for-pull-requst
if: steps.commit-push.outputs.push == 'true'
env:
GITEA_TOKEN: ${{ secrets.BOT_TOKEN }}
GITEA_URL: ${{ secrets.REPO_URL }}
HEAD_BRANCH: ${{ steps.commit-push.outputs.HEAD_BRANCH }}
run: |
cd ${MANIFEST_DIR}
API_ENDPOINT="${GITEA_URL}/api/v1/repos/${{ gitea.repository }}/pulls?base_branch=${BASE_BRANCH}&state=open&page=1"
echo ">> Checking if PR from branch ${HEAD_BRANCH} into ${BASE_BRANCH}"
echo ">> With Endpoint of:"
echo "$API_ENDPOINT"
HTTP_STATUS=$(
curl -X GET \
--silent \
--write-out '%{http_code}' \
--output response_body.json \
--dump-header response_headers.txt \
-H "Authorization: token ${GITEA_TOKEN}" \
-H "Content-Type: application/json" \
"$API_ENDPOINT" 2> response_errors.txt
)
echo ">> HTTP Status Code: $HTTP_STATUS"
echo ">> Response Output ..."
echo "----"
cat response_body.json
echo "----"
cat response_headers.txt
echo "----"
cat response_errors.txt
echo "----"
if [ "$HTTP_STATUS" == "200" ] && [ "$(cat response_body.json | jq -r .[0].state)" == "open" ]; then
echo ">> Pull Request has been found open, will update"
PR_INDEX=$(cat response_body.json | jq -r .[0].number)
echo "pull-request-exists=${PR_INDEX}" >> $GITEA_OUTPUT
echo "pull-request-index=true" >> $GITEA_OUTPUT
elif [ "$HTTP_STATUS" == "200" ] && [ "$(cat response_body.json | jq -r .[0].state)" == "closed" ]; then
echo ">> Pull Request found, but was closed"
echo "pull-request-exists=false" >> $GITEA_OUTPUT
else
echo ">> Pull Request not found"
echo "pull-request-exists=false" >> $GITEA_OUTPUT
fi
echo "----"
- name: Create Pull Request
id: create-pull-request
if: steps.commit-push.outputs.push == 'true' && steps.check-for-pull-requst.outputs.pull-request-exists == 'false'
env:
GITEA_TOKEN: ${{ secrets.BOT_TOKEN }}
GITEA_URL: ${{ secrets.REPO_URL }}
HEAD_BRANCH: ${{ steps.commit-push.outputs.HEAD_BRANCH }}
run: |
cd ${MANIFEST_DIR}
API_ENDPOINT="${GITEA_URL}/api/v1/repos/${{ gitea.repository }}/pulls"
PAYLOAD=$( jq -n \
--arg head "${HEAD_BRANCH}" \
--arg base "${BASE_BRANCH}" \
--arg assignee "${ASSIGNEE}" \
--arg title "Automated Manifest Update" \
--arg body "This PR contains newly rendered Kubernetes manifests automatically generated by the CI workflow." \
'{head: $head, base: $base, assignee: $assignee, title: $title, body: $body}' )
echo ">> Creating PR from branch ${HEAD_BRANCH} into ${BASE_BRANCH}"
echo ">> With Endpoint of:"
echo "$API_ENDPOINT"
echo ">> With Payload of:"
echo "$PAYLOAD"
HTTP_STATUS=$(
curl -X POST \
--silent \
--write-out '%{http_code}' \
--output response_body.json \
--dump-header response_headers.txt \
--data "$PAYLOAD" \
-H "Authorization: token ${GITEA_TOKEN}" \
-H "Content-Type: application/json" \
"$API_ENDPOINT" 2> response_errors.txt
)
echo ">> HTTP Status Code: $HTTP_STATUS"
echo ">> Response Output ..."
echo "----"
cat response_body.json
echo "----"
cat response_headers.txt
echo "----"
cat response_errors.txt
echo "----"
if [ "$HTTP_STATUS" == "201" ]; then
echo ">> Pull Request created successfully!"
PR_URL=$(cat response_body.json | jq -r .html_url)
echo "pull-request-url=${PR_URL}" >> $GITEA_OUTPUT
PR_ID=$(cat response_body.json | jq -r .id)
echo "pull-request-id=${PR_ID}" >> $GITEA_OUTPUT
echo "pull-request-operation=created" >> $GITEA_OUTPUT
elif [ "$HTTP_STATUS" == "422" ]; then
echo ">> Failed to create PR (HTTP 422: Unprocessable Entity), PR may already exist"
elif [ "$HTTP_STATUS" == "409" ]; then
echo ">> Failed to create PR (HTTP 409: Conflict), PR already exists"
else
echo ">> Failed to create PR, HTTP status code: $HTTP_STATUS"
exit 1
fi
echo "----"
- name: ntfy Created
uses: niniyas/ntfy-action@master
if: steps.create-pull-request.outputs.pull-request-operation == 'created'
with:
url: "${{ secrets.NTFY_URL }}"
topic: "${{ secrets.NTFY_TOPIC }}"
title: "Manifest Render PR Created - Infrastructure"
priority: 3
headers: '{"Authorization": "Bearer ${{ secrets.NTFY_CRED }}"}'
tags: action,successfully,completed
details: "Manifest rendering for Infrastructure has created a new Pull Request with ID: ${{ steps.create-pull-request.outputs.pull-request-id }}!"
icon: "https://cdn.jsdelivr.net/gh/selfhst/icons/png/gitea.png"
actions: '[{"action": "view", "label": "Open Gitea", "url": "${{ steps.create-pull-request.outputs.pull-request-url }}", "clear": true}]'
- name: ntfy Failed
uses: niniyas/ntfy-action@master
if: failure()
with:
url: "${{ secrets.NTFY_URL }}"
topic: "${{ secrets.NTFY_TOPIC }}"
title: "Manifest Render Failure - Infrastructure"
priority: 4
headers: '{"Authorization": "Bearer ${{ secrets.NTFY_CRED }}"}'
tags: action,failed
details: "Manifest rendering for Infrastructure has failed!"
icon: "https://cdn.jsdelivr.net/gh/selfhst/icons/png/gitea.png"
actions: '[{"action": "view", "label": "Open Gitea", "url": "https://gitea.alexlebens.dev/alexlebens/infrastructure/actions?workflow=render-manifests.yaml", "clear": true}]'
image: true

View File

@@ -1,13 +1,16 @@
name: render-manifests-dispatch
name: render-manifests
on:
schedule:
- cron: '0 3 * * *'
push:
branches:
- main
paths:
- 'clusters/cl01tl/helm/**'
workflow_dispatch:
env:
CLUSTER: cl01tl
CLUSTERS: cl01tl
BASE_BRANCH: manifests
BRANCH_NAME: auto/update-manifests
ASSIGNEE: alexlebens
@@ -15,14 +18,13 @@ env:
MANIFEST_DIR: /workspace/alexlebens/infrastructure/infrastructure-manifests
jobs:
render-manifests-dispatch:
render-manifests-helm:
runs-on: ubuntu-js
steps:
- name: Checkout Main
- name: Checkout
uses: actions/checkout@v6
with:
path: infrastructure
fetch-depth: 0
- name: Checkout Manifests
uses: actions/checkout@v6
@@ -35,13 +37,6 @@ jobs:
with:
token: ${{ secrets.GITEA_TOKEN }}
version: v3.17.2 # Pending https://github.com/helm/helm/pull/30743
cache: true
- name: Configure Kubeconfig
uses: azure/k8s-set-context@v4
with:
method: kubeconfig
kubeconfig: ${{ secrets.KUBECONFIG }}
- name: Prepare Manifest Branch
run: |
@@ -63,161 +58,76 @@ jobs:
git checkout -b $BRANCH_NAME
fi
echo "----"
- name: Check which Directories have Changes
id: check-dir-changes
run: |
cd ${MAIN_DIR}
RENDER_DIR=()
echo ">> Triggered on dispatch, will check all paths ..."
RENDER_DIR+=$(ls clusters/cl01tl/helm/)
if [ -n "${RENDER_DIR}" ]; then
echo ">> Directories to Render:"
echo "$(echo "${RENDER_DIR}" | sort -u)"
echo "----"
echo "changes-detected=true" >> $GITEA_OUTPUT
echo "render-dir<<EOF" >> $GITEA_OUTPUT
echo "$(echo "${RENDER_DIR}" | sort -u)" >> $GITEA_OUTPUT
echo "EOF" >> $GITEA_OUTPUT
else
echo "changes-detected=false" >> $GITEA_OUTPUT
fi
echo ">> Remove manfiest files and rebuild from source ..."
cd ${MANIFEST_DIR}/clusters
rm -rf ./*
- name: Add Repositories
if: steps.check-dir-changes.outputs.changes-detected == 'true'
env:
RENDER_DIR: ${{ steps.check-dir-changes.outputs.render-dir }}
run: |
cd ${MAIN_DIR}
echo ">> Adding repositories for chart dependencies ..."
for dir in ${RENDER_DIR}; do
helm dependency list --max-col-width 120 ${MAIN_DIR}/clusters/${CLUSTER}/helm/$dir 2> /dev/null \
| tail +2 | head -n -1 \
| awk '{ print "helm repo add " $1 " " $3 }' \
| while read cmd; do
if [[ "$cmd" == "*oci://*" ]]; then
echo ">> Ignoring OCI repo"
else
echo "$cmd" | sh;
fi
done || true
for cluster in ${CLUSTERS}; do
echo ">> Adding repositories for chart dependencies of cluster $cluster ..."
for chart_path in ${MAIN_DIR}/clusters/$cluster/helm/*; do
helm dependency list --max-col-width 120 $chart_path 2> /dev/null \
| tail +2 | head -n -1 \
| awk '{ print "helm repo add " $1 " " $3 }' \
| while read cmd; do echo "$cmd" | sh; done || true
done
done
if helm repo list | tail +2 | read -r; then
echo ">> Update repository cache ..."
helm repo update
fi
echo "----"
- name: Render Helm Manifests
id: render-manifests
if: steps.check-dir-changes.outputs.changes-detected == 'true'
env:
RENDER_DIR: ${{ steps.check-dir-changes.outputs.render-dir }}
run: |
cd ${MAIN_DIR}
echo ">> Rendering Manifests ..."
for cluster in ${CLUSTERS}; do
for chart_path in ${MAIN_DIR}/clusters/$cluster/helm/*; do
chart_name=$(basename "$chart_path")
echo ">> Rendering chart: $chart_name"
for dir in ${RENDER_DIR}; do
chart_path=${MAIN_DIR}/clusters/${CLUSTER}/helm/$dir
chart_name=$(basename "$chart_path")
if [ -f "$chart_path/Chart.yaml" ]; then
mkdir -p ${MANIFEST_DIR}/clusters/$cluster/manifests/$chart_name
OUTPUT_FILE="${MANIFEST_DIR}/clusters/$cluster/manifests/$chart_name/$chart_name.yaml"
echo ""
echo ""
echo ">> Rendering chart: $chart_name"
echo ">> Chart path $chart_path"
cd $chart_path
if [ -f "$chart_path/Chart.yaml" ]; then
OUTPUT_FOLDER="${MANIFEST_DIR}/clusters/${CLUSTER}/manifests/$chart_name/"
TEMPLATE=""
echo ""
echo ">> Building helm dependency ..."
helm dependency build
mkdir -p ${MANIFEST_DIR}/clusters/${CLUSTER}/manifests/$chart_name
echo ""
echo ">> Linting helm ..."
helm lint --namespace "$chart_name" --with-subcharts
cd $chart_path
echo ""
echo ">> Rendering templates ..."
helm template "$chart_name" ./ --namespace "$chart_name" --include-crds > "$OUTPUT_FILE"
echo ""
echo ">> Updating helm dependency ..."
helm dependency update --skip-refresh
echo ""
echo ">> Building helm dependency ..."
helm dependency build --skip-refresh
echo ""
echo ">> Linting helm ..."
helm lint --namespace "$chart_name"
echo ""
echo ">> Rendering templates ..."
case "$chart_name" in
"stack")
echo ""
echo ">> Special Rendering for stack into argocd namespace ..."
TEMPLATE=$(helm template $chart_name ./ --namespace argocd --include-crds --dry-run=server --api-versions "gateway.networking.k8s.io/v1/HTTPRoute")
;;
"cilium" | "coredns" | "metrics-server" |"prometheus-operator-crds")
echo ""
echo ">> Special Rendering for $chart_name into kube-system namespace ..."
TEMPLATE=$(helm template $chart_name ./ --namespace kube-system --include-crds --dry-run=server --api-versions "gateway.networking.k8s.io/v1/HTTPRoute")
;;
*)
echo ""
echo ">> Standard Rendering for $chart_name ..."
TEMPLATE=$(helm template "$chart_name" ./ --namespace "$chart_name" --include-crds --dry-run=server --api-versions "gateway.networking.k8s.io/v1/HTTPRoute")
;;
esac
echo ""
echo ">> Formating rendered template ..."
echo "$TEMPLATE" | yq '... comments=""' | yq 'select(. != null)' | yq -s '"'"$OUTPUT_FOLDER"'" + .kind + "-" + .metadata.name + ".yaml"'
# Strip comments again to ensure formatting correctness
for file in "$OUTPUT_FOLDER"/*; do
yq -i '... comments=""' $file
done
echo ""
echo ">> Manifests for $chart_name rendered to $OUTPUT_FOLDER"
ls $OUTPUT_FOLDER
echo ""
else
echo ""
echo ">> Directory $chart_path does not contain a Chart.yaml. Skipping ..."
echo ""
fi
echo ""
echo ">> Manifests for $chart_name rendered to $OUTPUT_FILE"
echo ""
else
echo ""
echo ">> Directory $chart_path does not contain a Chart.yaml. Skipping ..."
echo ""
fi
done
done
echo "----"
- name: Check for Changes
id: check-changes
if: steps.check-dir-changes.outputs.changes-detected == 'true'
run: |
cd ${MANIFEST_DIR}
GIT_CHANGES=$(git status --porcelain)
if [ -n "$GIT_CHANGES" ]; then
if git status --porcelain | grep -q .; then
echo ">> Changes detected"
git status --porcelain
echo "changes-detected=true" >> $GITEA_OUTPUT
else
echo ">> No changes detected, skipping PR creation"
exit 0
fi
echo "----"
- name: Commit and Push Changes
id: commit-push
if: steps.check-changes.outputs.changes-detected == 'true'
@@ -228,12 +138,10 @@ jobs:
git add .
git commit -m "chore: Update manifests after change"
REPO_URL="${{ secrets.REPO_URL }}/${{ gitea.repository }}"
echo ">> Pushing changes to $REPO_URL ..."
REPO_URL="${{ secrets.REPO_URL }}/${{ gitea.repository }}"
git push -u "https://oauth2:${{ secrets.BOT_TOKEN }}@$(echo $REPO_URL | sed -e 's|https://||')" ${BRANCH_NAME}
echo "----"
echo "HEAD_BRANCH=${BRANCH_NAME}" >> $GITEA_OUTPUT
echo "push=true" >> $GITEA_OUTPUT
@@ -247,7 +155,7 @@ jobs:
run: |
cd ${MANIFEST_DIR}
API_ENDPOINT="${GITEA_URL}/api/v1/repos/${{ gitea.repository }}/pulls?base_branch=${BASE_BRANCH}&state=open&page=1"
API_ENDPOINT="${GITEA_URL}/api/v1/repos/${{ gitea.repository }}/pulls/${BASE_BRANCH}/${HEAD_BRANCH}"
echo ">> Checking if PR from branch ${HEAD_BRANCH} into ${BASE_BRANCH}"
echo ">> With Endpoint of:"
@@ -274,23 +182,17 @@ jobs:
cat response_errors.txt
echo "----"
if [ "$HTTP_STATUS" == "200" ] && [ "$(cat response_body.json | jq -r .[0].state)" == "open" ]; then
if [ "$HTTP_STATUS" == "200" ] && [$(cat response_body.json | jq -r .state) == "open"]; then
echo ">> Pull Request has been found open, will update"
PR_INDEX=$(cat response_body.json | jq -r .[0].number)
PR_INDEX=$(cat response_body.json | jq -r .number)
echo "pull-request-exists=${PR_INDEX}" >> $GITEA_OUTPUT
echo "pull-request-index=true" >> $GITEA_OUTPUT
elif [ "$HTTP_STATUS" == "200" ] && [ "$(cat response_body.json | jq -r .[0].state)" == "closed" ]; then
echo ">> Pull Request found, but was closed"
echo "pull-request-exists=false" >> $GITEA_OUTPUT
else
echo ">> Pull Request not found"
echo "pull-request-exists=false" >> $GITEA_OUTPUT
fi
echo "----"
- name: Create Pull Request
id: create-pull-request
if: steps.commit-push.outputs.push == 'true' && steps.check-for-pull-requst.outputs.pull-request-exists == 'false'
@@ -309,7 +211,7 @@ jobs:
--arg assignee "${ASSIGNEE}" \
--arg title "Automated Manifest Update" \
--arg body "This PR contains newly rendered Kubernetes manifests automatically generated by the CI workflow." \
'{head: $head, base: $base, assignee: $assignee, title: $title, body: $body}' )
'{head: $head, base: $base, assignee: $assignee, title: $title, body: $body'} )
echo ">> Creating PR from branch ${HEAD_BRANCH} into ${BASE_BRANCH}"
echo ">> With Endpoint of:"
@@ -343,22 +245,23 @@ jobs:
echo ">> Pull Request created successfully!"
PR_URL=$(cat response_body.json | jq -r .html_url)
echo "pull-request-url=${PR_URL}" >> $GITEA_OUTPUT
PR_ID=$(cat response_body.json | jq -r .id)
echo "pull-request-id=${PR_ID}" >> $GITEA_OUTPUT
echo "pull-request-operation=created" >> $GITEA_OUTPUT
elif [ "$HTTP_STATUS" == "422" ]; then
echo ">> Failed to create PR (HTTP 422: Unprocessable Entity), PR may already exist"
elif [ "$HTTP_STATUS" == "409" ]; then
echo ">> Failed to create PR (HTTP 409: Conflict), PR already exists"
else
echo ">> Failed to create PR, HTTP status code: $HTTP_STATUS"
exit 1
fi
echo "----"
- name: Cleanup Branch
if: failure() && steps.create-pull-request.outcome == 'failure'
env:
HEAD_BRANCH: ${{ steps.commit-push.outputs.HEAD_BRANCH }}
run: |
echo ">> Removing branch: ${HEAD_BRANCH}"
git push origin --delete ${HEAD_BRANCH}
- name: ntfy Created
uses: niniyas/ntfy-action@master
@@ -370,7 +273,7 @@ jobs:
priority: 3
headers: '{"Authorization": "Bearer ${{ secrets.NTFY_CRED }}"}'
tags: action,successfully,completed
details: "Manifest rendering for Infrastructure has created a new Pull Request with ID: ${{ steps.create-pull-request.outputs.pull-request-id }}!"
details: "Manifest rendering for Infrastructure has created a new Pull Request!"
icon: "https://cdn.jsdelivr.net/gh/selfhst/icons/png/gitea.png"
actions: '[{"action": "view", "label": "Open Gitea", "url": "${{ steps.create-pull-request.outputs.pull-request-url }}", "clear": true}]'

1
.gitignore vendored
View File

@@ -1,4 +1,3 @@
/**/archive/
/**/charts/
/**/manifests/
/**/tmpcharts*/

View File

@@ -2,12 +2,6 @@
GitOps definied infrastrucutre for the alexlebens.net domain.
## Stack-cl01tl
https://argocd.alexlebens.net/api/badge?name=stack-cl01tl&revision=true&showAppName=true
App-of-Apps Application for cl01tl
## License
This project is licensed under the terms of the Apache 2.0 License license.

View File

@@ -15,8 +15,7 @@ maintainers:
- name: alexlebens
dependencies:
- name: argo-cd
version: 9.3.4
version: 9.1.5
repository: https://argoproj.github.io/argo-helm
icon: https://cdn.jsdelivr.net/gh/selfhst/icons/png/argo-cd.png
# renovate: github=argoproj/argo-cd
appVersion: v3.2.1
appVersion: 3.0.0

View File

@@ -0,0 +1,88 @@
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: argocd-oidc-secret
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: argocd-oidc-secret
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/part-of: {{ .Release.Name }}
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: secret
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /authentik/oidc/argocd
metadataPolicy: None
property: secret
- secretKey: client
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /authentik/oidc/argocd
metadataPolicy: None
property: client
---
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: argocd-notifications-secret
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: argocd-notifications-secret
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/part-of: {{ .Release.Name }}
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: ntfy-token
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /ntfy/user/cl01tl
metadataPolicy: None
property: token
# ---
# apiVersion: external-secrets.io/v1
# kind: ExternalSecret
# metadata:
# name: argocd-gitea-repo-infrastructure-secret
# namespace: {{ .Release.Namespace }}
# labels:
# app.kubernetes.io/name: argocd-gitea-repo-infrastructure-secret
# app.kubernetes.io/instance: {{ .Release.Name }}
# app.kubernetes.io/part-of: {{ .Release.Name }}
# spec:
# secretStoreRef:
# kind: ClusterSecretStore
# name: vault
# data:
# - secretKey: type
# remoteRef:
# conversionStrategy: Default
# decodingStrategy: None
# key: /cl01tl/argocd/credentials/repo/infrastructure
# metadataPolicy: None
# property: type
# - secretKey: url
# remoteRef:
# conversionStrategy: Default
# decodingStrategy: None
# key: /cl01tl/argocd/credentials/repo/infrastructure
# metadataPolicy: None
# property: url
# - secretKey: sshPrivateKey
# remoteRef:
# conversionStrategy: Default
# decodingStrategy: None
# key: /cl01tl/argocd/credentials/repo/infrastructure
# metadataPolicy: None
# property: sshPrivateKey

View File

@@ -0,0 +1,28 @@
apiVersion: gateway.networking.k8s.io/v1
kind: HTTPRoute
metadata:
name: http-route-argocd
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: http-route-argocd
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/part-of: {{ .Release.Name }}
spec:
parentRefs:
- group: gateway.networking.k8s.io
kind: Gateway
name: traefik-gateway
namespace: traefik
hostnames:
- argocd.alexlebens.net
rules:
- matches:
- path:
type: PathPrefix
value: /
backendRefs:
- group: ''
kind: Service
name: argocd-server
port: 80
weight: 100

View File

@@ -25,10 +25,21 @@ argo-cd:
id: authentik
params:
server.insecure: true
controller.diff.server.side: true
rbac:
policy.csv: |
g, ArgoCD Admins, role:admin
cmp:
create: true
plugins:
cdk8s:
init:
command: [cdk8s]
args: [import]
generate:
command: [cdk8s, synth]
args: [--stdout]
discover:
fileName: "*.go"
controller:
replicas: 1
metrics:
@@ -49,7 +60,7 @@ argo-cd:
enabled: true
auth: false
redisSecretInit:
enabled: false
enabled: true
server:
replicas: 2
extensions:
@@ -65,22 +76,34 @@ argo-cd:
enabled: true
serviceMonitor:
enabled: true
httproute:
enabled: true
parentRefs:
- group: gateway.networking.k8s.io
kind: Gateway
name: traefik-gateway
namespace: traefik
hostnames:
- argocd.alexlebens.net
rules:
- matches:
- path:
type: PathPrefix
value: /
ingress:
enabled: false
repoServer:
replicas: 2
extraContainers:
- name: cmp-cdk8s
command:
- /var/run/argocd/argocd-cmp-server
image: ghcr.io/akuity/cdk8s-cmp-typescript:1.0
securityContext:
runAsNonRoot: true
runAsUser: 999
volumeMounts:
- mountPath: /var/run/argocd
name: var-files
- mountPath: /home/argocd/cmp-server/plugins
name: plugins
- mountPath: /home/argocd/cmp-server/config/plugin.yaml
subPath: cdk8s.yaml
name: argocd-cmp-cm
- mountPath: /tmp
name: cmp-tmp
volumes:
- name: argocd-cmp-cm
configMap:
name: argocd-cmp-cm
- name: cmp-tmp
emptyDir: {}
metrics:
enabled: true
serviceMonitor:
@@ -256,7 +279,7 @@ argo-cd:
- description: Application has degraded
send:
- app-health-degraded
when: app.status.health.status == 'Degraded'
when: app.status.health.status == 'Degraded' and time.Now().Sub(time.Parse(app.status.health.lastTransitionTime).Minutes() >= 15
trigger.on-sync-failed: |
- description: Application syncing has failed
send:

View File

@@ -0,0 +1,16 @@
apiVersion: v2
name: stack
version: 1.0.0
description: Stack
keywords:
- argo-cd
- stack
- deployment
home: https://wiki.alexlebens.dev/s/0c2d1896-710d-4972-9bc8-08d71987428a
sources:
- https://github.com/argoproj/argo-cd
- https://gitea.alexlebens.dev/alexlebens/infrastructure
maintainers:
- name: alexlebens
icon: https://cdn.jsdelivr.net/gh/selfhst/icons/png/argo-cd.png
appVersion: 1.0.0

View File

@@ -0,0 +1,59 @@
{{- range $index, $stack := .Values.applicationSet }}
---
apiVersion: argoproj.io/v1alpha1
kind: ApplicationSet
metadata:
name: {{ $stack.name }}
namespace: {{ $.Release.Namespace }}
labels:
app.kubernetes.io/name: {{ $stack.name }}
app.kubernetes.io/instance: {{ $stack.name }}
app.kubernetes.io/part-of: {{ $.Release.Name }}
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
syncPolicy:
applicationsSync: create-update
preserveResourcesOnDeletion: true
generators:
- git:
repoURL: {{ $.Values.git.repo }}
revision: {{ $.Values.git.revision }}
directories:
- path: "clusters/{{ $.Values.cluster.name }}/{{ $stack.name }}/*"
template:
metadata:
name: '{{ `{{path.basename}}` }}'
spec:
destination:
name: in-cluster
namespace: '{{ $stack.namespace | default `{{path.basename}}` }}'
project: default
revisionHistoryLimit: 3
source:
repoURL: {{ $.Values.git.repo }}
targetRevision: {{ $.Values.git.revision }}
path: '{{ `{{path}}` }}'
helm:
releaseName: "{{ `{{path.basename}}` }}"
{{- if $stack.ignoreDifferences }}
ignoreDifferences:
{{- toYaml $stack.ignoreDifferences | nindent 8 }}
{{ end }}
syncPolicy:
automated:
prune: {{ $stack.syncPolicy.automated.prune | default false }}
selfHeal: {{ $stack.syncPolicy.automated.selfHeal | default false }}
retry:
limit: 3
backoff:
duration: 1m
factor: 2
maxDuration: 15m
syncOptions:
- CreateNamespace={{ $stack.syncPolicy.syncOptions.createNamespace | default true }}
- ApplyOutOfSyncOnly={{ $stack.syncPolicy.syncOptions.applyOutOfSyncOnly | default true }}
- ServerSideApply={{ $stack.syncPolicy.syncOptions.serverSideApply | default true }}
- PruneLast={{ $stack.syncPolicy.syncOptions.pruneLast | default true }}
- RespectIgnoreDifferences={{ $stack.syncPolicy.syncOptions.respectIgnoreDifferences | default true }}
{{- end }}

View File

@@ -0,0 +1,192 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: cilium
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: cilium
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/part-of: {{ .Release.Name }}
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
project: default
source:
repoURL: {{ .Values.git.repo }}
targetRevision: {{ .Values.git.revision }}
path: clusters/{{ .Values.cluster.name }}/standalone/cilium
destination:
name: in-cluster
namespace: kube-system
revisionHistoryLimit: 3
ignoreDifferences:
- group: monitoring.coreos.com
kind: ServiceMonitor
jqPathExpressions:
- .spec.endpoints[]?.relabelings[]?.action
syncPolicy:
automated:
prune: true
retry:
limit: 10
backoff:
duration: 1m
factor: 2
maxDuration: 16m
syncOptions:
- CreateNamespace=false
- ApplyOutOfSyncOnly=true
- ServerSideApply=true
- PruneLast=true
- RespectIgnoreDifferences=true
---
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: coredns
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: coredns
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/part-of: {{ .Release.Name }}
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
project: default
source:
repoURL: {{ .Values.git.repo }}
targetRevision: {{ .Values.git.revision }}
path: clusters/{{ .Values.cluster.name }}/standalone/coredns
destination:
name: in-cluster
namespace: kube-system
revisionHistoryLimit: 10
syncPolicy:
automated:
prune: true
selfHeal: true
retry:
limit: 10
backoff:
duration: 1m
factor: 2
maxDuration: 16m
syncOptions:
- CreateNamespace=false
- ApplyOutOfSyncOnly=true
- ServerSideApply=true
- PruneLast=true
---
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: metrics-server
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: metrics-server
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/part-of: {{ .Release.Name }}
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
project: default
source:
repoURL: {{ .Values.git.repo }}
targetRevision: {{ .Values.git.revision }}
path: clusters/{{ .Values.cluster.name }}/standalone/metrics-server
destination:
name: in-cluster
namespace: kube-system
revisionHistoryLimit: 3
syncPolicy:
automated:
prune: true
selfHeal: true
retry:
limit: 10
backoff:
duration: 1m
factor: 2
maxDuration: 16m
syncOptions:
- CreateNamespace=false
- ApplyOutOfSyncOnly=true
- ServerSideApply=true
- PruneLast=true
---
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: kubelet-serving-cert-approver
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: kubelet-serving-cert-approver
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/part-of: {{ .Release.Name }}
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
project: default
source:
repoURL: {{ .Values.git.repo }}
targetRevision: {{ .Values.git.revision }}
path: clusters/{{ .Values.cluster.name }}/standalone/kubelet-serving-cert-approver
destination:
name: in-cluster
namespace: kubelet-serving-cert-approver
revisionHistoryLimit: 3
syncPolicy:
automated:
prune: true
retry:
limit: 10
backoff:
duration: 1m
factor: 2
maxDuration: 16m
syncOptions:
- CreateNamespace=true
- ApplyOutOfSyncOnly=true
- ServerSideApply=true
- PruneLast=true
---
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: prometheus-operator-crds
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: prometheus-operator-crds
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/part-of: {{ .Release.Name }}
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
project: default
source:
repoURL: {{ .Values.git.repo }}
targetRevision: {{ .Values.git.revision }}
path: clusters/{{ .Values.cluster.name }}/standalone/prometheus-operator-crds
destination:
name: in-cluster
namespace: kube-system
revisionHistoryLimit: 3
syncPolicy:
automated:
prune: true
selfHeal: false
retry:
limit: 10
backoff:
duration: 1m
factor: 2
maxDuration: 16m
syncOptions:
- CreateNamespace=false
- ApplyOutOfSyncOnly=true
- ServerSideApply=true
- PruneLast=true

View File

@@ -0,0 +1,112 @@
cluster:
name: cl01tl
git:
# repo: git@github.com:alexlebens/infrastructure.git
# repo: https://github.com/alexlebens/infrastructure.git
repo: http://gitea-http.gitea:3000/alexlebens/infrastructure
# repo: ssh://git@gitea-ssh.gitea/alexlebens/infrastructure
revision: HEAD
applicationSet:
- name: applications
ignoreDifferences:
- group: ""
kind: Service
jqPathExpressions:
- .spec.externalName
syncPolicy:
automated:
prune: true
syncOptions:
serverSideApply: true
- name: deployment
namespace: argocd
syncPolicy:
automated:
prune: true
syncOptions:
serverSideApply: true
- name: management
ignoreDifferences:
- group: ""
kind: Service
jqPathExpressions:
- .spec.externalName
syncPolicy:
automated:
prune: true
syncOptions:
serverSideApply: true
- name: monitoring
ignoreDifferences:
- group: ""
kind: Service
jqPathExpressions:
- .spec.externalName
- group: "apps"
kind: StatefulSet
jqPathExpressions:
- .spec.volumeClaimTemplates[]?.apiVersion
- .spec.volumeClaimTemplates[]?.kind
- .spec.volumeClaimTemplates[]?.metadata.creationTimestamp
syncPolicy:
automated:
prune: true
syncOptions:
serverSideApply: true
- name: platform
ignoreDifferences:
- group: ""
kind: Service
jqPathExpressions:
- .spec.externalName
- group: "apps"
kind: StatefulSet
jqPathExpressions:
- .spec.volumeClaimTemplates[]?.apiVersion
- .spec.volumeClaimTemplates[]?.kind
- .spec.volumeClaimTemplates[]?.metadata.creationTimestamp
syncPolicy:
automated:
prune: true
syncOptions:
serverSideApply: true
- name: services
ignoreDifferences:
- group: ""
kind: GpuDevicePlugin
jqPathExpressions:
- .metadata.annotations[]
- group: "apps"
kind: "Deployment"
jsonPointers:
- /spec/template/metadata/annotations/checksum~1secret
- /spec/template/metadata/annotations/checksum~1secret-core
- /spec/template/metadata/annotations/checksum~1secret-jobservice
- /spec/template/metadata/annotations/checksum~1tls
- group: "apps"
kind: "StatefulSet"
jsonPointers:
- /spec/template/metadata/annotations/checksum~1secret
- /spec/template/metadata/annotations/checksum~1tls
- group: "apps"
kind: StatefulSet
jqPathExpressions:
- .spec.volumeClaimTemplates[]?.apiVersion
- .spec.volumeClaimTemplates[]?.kind
- .spec.volumeClaimTemplates[]?.metadata.creationTimestamp
syncPolicy:
automated:
prune: true
syncOptions:
serverSideApply: true
- name: storage
ignoreDifferences:
- group: ""
kind: Service
jqPathExpressions:
- .spec.externalName
syncPolicy:
automated:
prune: true
syncOptions:
serverSideApply: true

View File

@@ -1,9 +1,6 @@
dependencies:
- name: app-template
repository: https://bjw-s-labs.github.io/helm-charts/
version: 4.6.2
- name: volsync-target
repository: oci://harbor.alexlebens.net/helm-charts
version: 0.7.0
digest: sha256:ff3e2f03e93cdd4593e28b9c8bd5b9ddb25548a20a070b2e202057f216207d03
generated: "2026-01-16T18:44:37.399172263Z"
version: 4.4.0
digest: sha256:b5d823171e1b4dc1d3856f782f0c67cbb5d49e4fa170df2f21b06303c7aff7f5
generated: "2025-11-30T21:05:19.732832-06:00"

View File

@@ -16,11 +16,6 @@ dependencies:
- name: app-template
alias: actual
repository: https://bjw-s-labs.github.io/helm-charts/
version: 4.6.2
- name: volsync-target
alias: volsync-target-data
version: 0.7.0
repository: oci://harbor.alexlebens.net/helm-charts
version: 4.4.0
icon: https://cdn.jsdelivr.net/gh/selfhst/icons/png/actual-budget.png
# renovate: github=actualbudget/actual
appVersion: 25.12.0
appVersion: 25.11.0

View File

@@ -1,49 +1,55 @@
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: komodo-secret
name: actual-data-backup-secret
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: komodo-secret
app.kubernetes.io/name: actual-data-backup-secret
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/part-of: {{ .Release.Name }}
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
target:
template:
mergePolicy: Merge
engineVersion: v2
data:
RESTIC_REPOSITORY: "{{ `{{ .BUCKET_ENDPOINT }}` }}/actual/actual-data"
data:
- secretKey: passkey
- secretKey: BUCKET_ENDPOINT
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/komodo/config
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: passkey
- secretKey: jwt
property: S3_BUCKET_ENDPOINT
- secretKey: RESTIC_PASSWORD
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/komodo/config
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: jwt
- secretKey: webhook
property: RESTIC_PASSWORD
- secretKey: AWS_DEFAULT_REGION
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/komodo/config
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: webhook
- secretKey: oidc-client-id
property: AWS_DEFAULT_REGION
- secretKey: AWS_ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /authentik/oidc/komodo
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: client
- secretKey: oidc-client-secret
property: access_key
- secretKey: AWS_SECRET_ACCESS_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /authentik/oidc/komodo
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: secret
property: secret_key

View File

@@ -0,0 +1,28 @@
apiVersion: gateway.networking.k8s.io/v1
kind: HTTPRoute
metadata:
name: http-route-actual
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: http-route-actual
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/part-of: {{ .Release.Name }}
spec:
parentRefs:
- group: gateway.networking.k8s.io
kind: Gateway
name: traefik-gateway
namespace: traefik
hostnames:
- actual.alexlebens.net
rules:
- matches:
- path:
type: PathPrefix
value: /
backendRefs:
- group: ''
kind: Service
name: actual
port: 80
weight: 100

View File

@@ -0,0 +1,25 @@
apiVersion: volsync.backube/v1alpha1
kind: ReplicationSource
metadata:
name: actual-data-backup-source
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: actual-data-backup-source
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/part-of: {{ .Release.Name }}
spec:
sourcePVC: actual-data
trigger:
schedule: 0 4 * * *
restic:
pruneIntervalDays: 7
repository: actual-data-backup-secret
retain:
hourly: 1
daily: 3
weekly: 2
monthly: 2
yearly: 4
copyMethod: Snapshot
storageClassName: ceph-block
volumeSnapshotClassName: ceph-blockpool-snapshot

View File

@@ -9,7 +9,7 @@ actual:
main:
image:
repository: ghcr.io/actualbudget/actual
tag: 26.1.0
tag: 25.11.0
pullPolicy: IfNotPresent
env:
- name: TZ
@@ -42,27 +42,6 @@ actual:
port: 80
targetPort: 5006
protocol: HTTP
route:
main:
kind: HTTPRoute
parentRefs:
- group: gateway.networking.k8s.io
kind: Gateway
name: traefik-gateway
namespace: traefik
hostnames:
- actual.alexlebens.net
rules:
- backendRefs:
- group: ''
kind: Service
name: actual
port: 80
weight: 100
matches:
- path:
type: PathPrefix
value: /
persistence:
data:
forceRename: actual-data
@@ -75,13 +54,3 @@ actual:
main:
- path: /data
readOnly: false
volsync-target-data:
pvcTarget: actual-data
local:
enabled: true
schedule: 0 8 * * *
remote:
enabled: false
external:
enabled: true
schedule: 0 9 * * *

View File

@@ -1,12 +0,0 @@
dependencies:
- name: argo-workflows
repository: https://argoproj.github.io/argo-helm
version: 0.47.0
- name: argo-events
repository: https://argoproj.github.io/argo-helm
version: 2.4.19
- name: postgres-cluster
repository: oci://harbor.alexlebens.net/helm-charts
version: 7.4.5
digest: sha256:c36845d5688e28e6f6c6b6f2e17b40514f2adb937f2c6077deadfec9e6b294fe
generated: "2026-01-14T21:30:10.440164554Z"

View File

@@ -1,6 +0,0 @@
dependencies:
- name: argo-cd
repository: https://argoproj.github.io/argo-helm
version: 9.3.4
digest: sha256:006518c10fc1636a5b0398de90d4a7687ae6c0bf4626c41d11b2bc3ad48ff416
generated: "2026-01-14T23:02:04.617990687Z"

View File

@@ -1,12 +1,6 @@
dependencies:
- name: app-template
repository: https://bjw-s-labs.github.io/helm-charts/
version: 4.6.2
- name: volsync-target
repository: oci://harbor.alexlebens.net/helm-charts
version: 0.7.0
- name: volsync-target
repository: oci://harbor.alexlebens.net/helm-charts
version: 0.7.0
digest: sha256:c8a988258b26187972a8b69767bf5df502d7e2b12710eb357ac15240d872fd37
generated: "2026-01-16T18:44:48.982249243Z"
version: 4.4.0
digest: sha256:f3a9990542f24965fadad0b5493059b78cdc3fae91c8214577fa6f41ca5f7de3
generated: "2025-11-30T21:05:21.317114-06:00"

View File

@@ -18,15 +18,6 @@ dependencies:
- name: app-template
alias: audiobookshelf
repository: https://bjw-s-labs.github.io/helm-charts/
version: 4.6.2
- name: volsync-target
alias: volsync-target-config
version: 0.7.0
repository: oci://harbor.alexlebens.net/helm-charts
- name: volsync-target
alias: volsync-target-metadata
version: 0.7.0
repository: oci://harbor.alexlebens.net/helm-charts
version: 4.4.0
icon: https://cdn.jsdelivr.net/gh/selfhst/icons/png/audiobookshelf.png
# renovate: github=advplyr/audiobookshelf
appVersion: 2.31.0
appVersion: 2.21.0

View File

@@ -19,3 +19,117 @@ spec:
key: /cl01tl/audiobookshelf/apprise
metadataPolicy: None
property: ntfy-url
---
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: audiobookshelf-config-backup-secret
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: audiobookshelf-config-backup-secret
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/part-of: {{ .Release.Name }}
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
target:
template:
mergePolicy: Merge
engineVersion: v2
data:
RESTIC_REPOSITORY: "{{ `{{ .BUCKET_ENDPOINT }}` }}/audiobookshelf/audiobookshelf-config"
data:
- secretKey: BUCKET_ENDPOINT
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: S3_BUCKET_ENDPOINT
- secretKey: RESTIC_PASSWORD
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: RESTIC_PASSWORD
- secretKey: AWS_DEFAULT_REGION
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: AWS_DEFAULT_REGION
- secretKey: AWS_ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: access_key
- secretKey: AWS_SECRET_ACCESS_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: secret_key
---
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: audiobookshelf-metadata-backup-secret
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: audiobookshelf-metadata-backup-secret
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/part-of: {{ .Release.Name }}
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
target:
template:
mergePolicy: Merge
engineVersion: v2
data:
RESTIC_REPOSITORY: "{{ `{{ .BUCKET_ENDPOINT }}` }}/audiobookshelf/audiobookshelf-metadata"
data:
- secretKey: BUCKET_ENDPOINT
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: S3_BUCKET_ENDPOINT
- secretKey: RESTIC_PASSWORD
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: RESTIC_PASSWORD
- secretKey: AWS_DEFAULT_REGION
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: AWS_DEFAULT_REGION
- secretKey: AWS_ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: access_key
- secretKey: AWS_SECRET_ACCESS_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: secret_key

View File

@@ -0,0 +1,28 @@
apiVersion: gateway.networking.k8s.io/v1
kind: HTTPRoute
metadata:
name: http-route-audiobookshelf
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: http-route-audiobookshelf
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/part-of: {{ .Release.Name }}
spec:
parentRefs:
- group: gateway.networking.k8s.io
kind: Gateway
name: traefik-gateway
namespace: traefik
hostnames:
- audiobookshelf.alexlebens.net
rules:
- matches:
- path:
type: PathPrefix
value: /
backendRefs:
- group: ''
kind: Service
name: audiobookshelf
port: 80
weight: 100

View File

@@ -1,5 +1,24 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: audiobookshelf-nfs-storage-backup
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: audiobookshelf-nfs-storage-backup
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/part-of: {{ .Release.Name }}
spec:
volumeMode: Filesystem
storageClassName: nfs-client
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: audiobookshelf-nfs-storage
namespace: {{ .Release.Namespace }}

View File

@@ -0,0 +1,52 @@
apiVersion: volsync.backube/v1alpha1
kind: ReplicationSource
metadata:
name: audiobookshelf-config-backup-source
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: audiobookshelf-config-backup-source
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/part-of: {{ .Release.Name }}
spec:
sourcePVC: audiobookshelf-config
trigger:
schedule: 0 4 * * *
restic:
pruneIntervalDays: 7
repository: audiobookshelf-config-backup-secret
retain:
hourly: 1
daily: 3
weekly: 2
monthly: 2
yearly: 4
copyMethod: Snapshot
storageClassName: ceph-block
volumeSnapshotClassName: ceph-blockpool-snapshot
---
apiVersion: volsync.backube/v1alpha1
kind: ReplicationSource
metadata:
name: audiobookshelf-metadata-backup-source
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: audiobookshelf-metadata-backup-source
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/part-of: {{ .Release.Name }}
spec:
sourcePVC: audiobookshelf-metadata
trigger:
schedule: 0 4 * * *
restic:
pruneIntervalDays: 7
repository: audiobookshelf-metadata-backup-secret
retain:
hourly: 1
daily: 3
weekly: 2
monthly: 2
yearly: 4
copyMethod: Snapshot
storageClassName: ceph-block
volumeSnapshotClassName: ceph-blockpool-snapshot

View File

@@ -0,0 +1,19 @@
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: audiobookshelf-apprise
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: audiobookshelf-apprise
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/part-of: {{ .Release.Name }}
spec:
endpoints:
- port: apprise
interval: 30s
scrapeTimeout: 15s
path: /metrics
selector:
matchLabels:
app.kubernetes.io/name: audiobookshelf
app.kubernetes.io/instance: {{ .Release.Name }}

View File

@@ -9,7 +9,7 @@ audiobookshelf:
main:
image:
repository: ghcr.io/advplyr/audiobookshelf
tag: 2.32.1
tag: 2.30.0
pullPolicy: IfNotPresent
env:
- name: TZ
@@ -21,7 +21,7 @@ audiobookshelf:
apprise-api:
image:
repository: caronc/apprise
tag: 1.3.0
tag: 1.2.6
pullPolicy: IfNotPresent
env:
- name: TZ
@@ -57,43 +57,8 @@ audiobookshelf:
port: 8000
targetPort: 8000
protocol: HTTP
serviceMonitor:
main:
selector:
matchLabels:
app.kubernetes.io/name: audiobookshelf
app.kubernetes.io/instance: audiobookshelf
serviceName: '{{ include "bjw-s.common.lib.chart.names.fullname" $ }}'
endpoints:
- port: apprise
scheme: http
path: /metrics
interval: 30s
scrapeTimeout: 15s
route:
main:
kind: HTTPRoute
parentRefs:
- group: gateway.networking.k8s.io
kind: Gateway
name: traefik-gateway
namespace: traefik
hostnames:
- audiobookshelf.alexlebens.net
rules:
- backendRefs:
- group: ''
kind: Service
name: audiobookshelf
port: 80
weight: 100
matches:
- path:
type: PathPrefix
value: /
persistence:
config:
forceRename: audiobookshelf-config
storageClass: ceph-block
accessMode: ReadWriteOnce
size: 2Gi
@@ -104,7 +69,6 @@ audiobookshelf:
- path: /config
readOnly: false
metadata:
forceRename: audiobookshelf-metadata
storageClass: ceph-block
accessMode: ReadWriteOnce
size: 10Gi
@@ -114,6 +78,13 @@ audiobookshelf:
main:
- path: /metadata
readOnly: false
backup:
existingClaim: audiobookshelf-nfs-storage-backup
advancedMounts:
main:
main:
- path: /metadata/backups
readOnly: false
audiobooks:
existingClaim: audiobookshelf-nfs-storage
advancedMounts:
@@ -121,23 +92,3 @@ audiobookshelf:
main:
- path: /mnt/store/
readOnly: false
volsync-target-config:
pvcTarget: audiobookshelf-config
local:
enabled: true
schedule: 2 8 * * *
remote:
enabled: false
external:
enabled: true
schedule: 2 9 * * *
volsync-target-metadata:
pvcTarget: audiobookshelf-metadata
local:
enabled: true
schedule: 4 8 * * *
remote:
enabled: false
external:
enabled: true
schedule: 4 9 * * *

View File

@@ -1,15 +0,0 @@
dependencies:
- name: authentik
repository: https://charts.goauthentik.io/
version: 2025.12.1
- name: cloudflared
repository: oci://harbor.alexlebens.net/helm-charts
version: 2.1.9
- name: postgres-cluster
repository: oci://harbor.alexlebens.net/helm-charts
version: 7.4.5
- name: redis-replication
repository: oci://harbor.alexlebens.net/helm-charts
version: 1.0.1
digest: sha256:458e9967330768b68649da4af69773adba3aed1a04316b9e67ef19612381d4b9
generated: "2026-01-17T17:30:18.453482605Z"

View File

@@ -1,114 +0,0 @@
authentik:
global:
env:
- name: AUTHENTIK_SECRET_KEY
valueFrom:
secretKeyRef:
name: authentik-key-secret
key: key
- name: AUTHENTIK_POSTGRESQL__HOST
valueFrom:
secretKeyRef:
name: authentik-postgresql-18-cluster-app
key: host
- name: AUTHENTIK_POSTGRESQL__NAME
valueFrom:
secretKeyRef:
name: authentik-postgresql-18-cluster-app
key: dbname
- name: AUTHENTIK_POSTGRESQL__USER
valueFrom:
secretKeyRef:
name: authentik-postgresql-18-cluster-app
key: user
- name: AUTHENTIK_POSTGRESQL__PASSWORD
valueFrom:
secretKeyRef:
name: authentik-postgresql-18-cluster-app
key: password
authentik:
redis:
host: redis-replication-authentik-master
server:
name: server
replicas: 1
metrics:
enabled: true
serviceMonitor:
enabled: true
route:
main:
enabled: true
apiVersion: gateway.networking.k8s.io/v1
kind: HTTPRoute
hostnames:
- authentik.alexlebens.net
parentRefs:
- group: gateway.networking.k8s.io
kind: Gateway
name: traefik-gateway
namespace: traefik
httpsRedirect: false
matches:
- path:
type: PathPrefix
value: /
worker:
name: worker
replicas: 1
prometheus:
rules:
enabled: true
postgresql:
enabled: false
redis:
enabled: false
postgres-18-cluster:
mode: recovery
recovery:
method: objectStore
objectStore:
index: 1
backup:
objectStore:
- name: garage-local
index: 1
destinationBucket: postgres-backups
externalSecretCredentialPath: /garage/home-infra/postgres-backups
isWALArchiver: true
# - name: garage-remote
# index: 1
# destinationBucket: postgres-backups
# externalSecretCredentialPath: /garage/home-infra/postgres-backups
# retentionPolicy: "90d"
# data:
# compression: bzip2
# - name: external
# index: 1
# endpointURL: https://nyc3.digitaloceanspaces.com
# destinationBucket: postgres-backups-ce540ddf106d186bbddca68a
# externalSecretCredentialPath: /garage/home-infra/postgres-backups
# isWALArchiver: false
scheduledBackups:
- name: live-backup
suspend: false
immediate: true
schedule: "0 0 0 * * *"
backupName: garage-local
# - name: weekly-backup
# suspend: true
# immediate: true
# schedule: "0 0 4 * * SAT"
# backupName: garage-remote
# - name: daily-backup
# suspend: true
# immediate: true
# schedule: "0 0 0 * * *"
# backupName: external
redis-replication:
existingSecret:
enabled: false
redisReplication:
clusterSize: 3
sentinel:
enabled: true

View File

@@ -1,12 +0,0 @@
dependencies:
- name: app-template
repository: https://bjw-s-labs.github.io/helm-charts/
version: 4.6.2
- name: volsync-target
repository: oci://harbor.alexlebens.net/helm-charts
version: 0.7.0
- name: volsync-target
repository: oci://harbor.alexlebens.net/helm-charts
version: 0.7.0
digest: sha256:18365b7dd3995703aad6928ce22dd1c3b8ffd5f1cccf54b8f1489ad111d13104
generated: "2026-01-16T18:45:00.087995513Z"

View File

@@ -1,9 +1,6 @@
dependencies:
- name: app-template
repository: https://bjw-s-labs.github.io/helm-charts/
version: 4.6.2
- name: volsync-target
repository: oci://harbor.alexlebens.net/helm-charts
version: 0.7.0
digest: sha256:77d0e82601292b4173d355d18c0de82bb37684a3dc29d7c8af4169308f14de48
generated: "2026-01-16T18:45:10.855338471Z"
version: 4.4.0
digest: sha256:c6f6d1f2fb9fedf54094920737a6f0bd1a2ab89f0a4122966ca98f6c9d3f11fa
generated: "2025-11-30T21:05:22.694344-06:00"

View File

@@ -18,11 +18,6 @@ dependencies:
- name: app-template
alias: bazarr
repository: https://bjw-s-labs.github.io/helm-charts/
version: 4.6.2
- name: volsync-target
alias: volsync-target-config
version: 0.7.0
repository: oci://harbor.alexlebens.net/helm-charts
version: 4.4.0
icon: https://cdn.jsdelivr.net/gh/selfhst/icons/png/bazarr.png
# renovate: github=linuxserver/bazarr
appVersion: 1.5.3
appVersion: 1.5.2

View File

@@ -0,0 +1,55 @@
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: bazarr-config-backup-secret
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: bazarr-config-backup-secret
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/part-of: {{ .Release.Name }}
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
target:
template:
mergePolicy: Merge
engineVersion: v2
data:
RESTIC_REPOSITORY: "{{ `{{ .BUCKET_ENDPOINT }}` }}/bazarr/bazarr-config"
data:
- secretKey: BUCKET_ENDPOINT
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: S3_BUCKET_ENDPOINT
- secretKey: RESTIC_PASSWORD
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: RESTIC_PASSWORD
- secretKey: AWS_DEFAULT_REGION
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: AWS_DEFAULT_REGION
- secretKey: AWS_ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: access_key
- secretKey: AWS_SECRET_ACCESS_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: secret_key

View File

@@ -0,0 +1,28 @@
apiVersion: gateway.networking.k8s.io/v1
kind: HTTPRoute
metadata:
name: http-route-bazarr
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: http-route-bazarr
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/part-of: {{ .Release.Name }}
spec:
parentRefs:
- group: gateway.networking.k8s.io
kind: Gateway
name: traefik-gateway
namespace: traefik
hostnames:
- bazarr.alexlebens.net
rules:
- matches:
- path:
type: PathPrefix
value: /
backendRefs:
- group: ''
kind: Service
name: bazarr
port: 80
weight: 100

View File

@@ -0,0 +1,30 @@
apiVersion: volsync.backube/v1alpha1
kind: ReplicationSource
metadata:
name: bazarr-config-backup-source
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: bazarr-config-backup-source
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/part-of: {{ .Release.Name }}
spec:
sourcePVC: bazarr-config
trigger:
schedule: 0 4 * * *
restic:
pruneIntervalDays: 7
repository: bazarr-config-backup-secret
retain:
hourly: 1
daily: 3
weekly: 2
monthly: 2
yearly: 4
moverSecurityContext:
runAsUser: 1000
runAsGroup: 1000
fsGroup: 1000
fsGroupChangePolicy: OnRootMismatch
copyMethod: Snapshot
storageClassName: ceph-block
volumeSnapshotClassName: ceph-blockpool-snapshot

View File

@@ -15,7 +15,7 @@ bazarr:
main:
image:
repository: ghcr.io/linuxserver/bazarr
tag: 1.5.4@sha256:7d0a091a63889ce1e4ac4c90595ebd2c50ba5a5df7039a4f4d2be6c2aed6d4ae
tag: 1.5.3@sha256:2be164c02c0bb311b6c32e57d3d0ddc2813d524e89ab51a3408c1bf6fafecda5
pullPolicy: IfNotPresent
env:
- name: TZ
@@ -36,27 +36,6 @@ bazarr:
port: 80
targetPort: 6767
protocol: HTTP
route:
main:
kind: HTTPRoute
parentRefs:
- group: gateway.networking.k8s.io
kind: Gateway
name: traefik-gateway
namespace: traefik
hostnames:
- bazarr.alexlebens.net
rules:
- backendRefs:
- group: ''
kind: Service
name: bazarr
port: 80
weight: 100
matches:
- path:
type: PathPrefix
value: /
persistence:
config:
forceRename: bazarr-config
@@ -76,18 +55,3 @@ bazarr:
main:
- path: /mnt/store
readOnly: false
volsync-target-config:
pvcTarget: bazarr-config
moverSecurityContext:
runAsUser: 1000
runAsGroup: 1000
fsGroup: 1000
fsGroupChangePolicy: OnRootMismatch
local:
enabled: true
schedule: 10 8 * * *
remote:
enabled: false
external:
enabled: true
schedule: 10 9 * * *

View File

@@ -1,9 +0,0 @@
dependencies:
- name: app-template
repository: https://bjw-s-labs.github.io/helm-charts/
version: 4.6.2
- name: redis-replication
repository: oci://harbor.alexlebens.net/helm-charts
version: 1.0.1
digest: sha256:f3cc9b85524eb1a0c8aec92a87ad4dd1f2f59c5bb3474b569ce188827e40b3d0
generated: "2026-01-16T18:45:21.852495393Z"

View File

@@ -1,15 +1,9 @@
dependencies:
- name: app-template
repository: https://bjw-s-labs.github.io/helm-charts/
version: 4.6.2
version: 4.4.0
- name: mariadb-cluster
repository: https://helm.mariadb.com/mariadb-operator
version: 25.10.4
- name: volsync-target
repository: oci://harbor.alexlebens.net/helm-charts
version: 0.7.0
- name: volsync-target
repository: oci://harbor.alexlebens.net/helm-charts
version: 0.7.0
digest: sha256:81601af110374e1571481873ace19f7bc694edb917ef35c1fbc623efe147a66d
generated: "2026-01-16T18:45:33.641059484Z"
version: 25.10.2
digest: sha256:264725306c1d1f38140293c0820abdc7e8aa4f39764b4d91e20200705ce2ec91
generated: "2025-11-30T21:05:24.649316-06:00"

View File

@@ -16,18 +16,9 @@ dependencies:
- name: app-template
alias: booklore
repository: https://bjw-s-labs.github.io/helm-charts/
version: 4.6.2
version: 4.4.0
- name: mariadb-cluster
version: 25.10.4
version: 25.10.2
repository: https://helm.mariadb.com/mariadb-operator
- name: volsync-target
alias: volsync-target-config
version: 0.7.0
repository: oci://harbor.alexlebens.net/helm-charts
- name: volsync-target
alias: volsync-target-data
version: 0.7.0
repository: oci://harbor.alexlebens.net/helm-charts
icon: https://cdn.jsdelivr.net/gh/selfhst/icons/png/booklore.png
# renovate: github=booklore-app/BookLore
appVersion: v1.13.2
appVersion: v.1.10.0

View File

@@ -43,6 +43,234 @@ spec:
metadataPolicy: None
property: psk.txt
---
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: booklore-config-backup-secret
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: booklore-config-backup-secret
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/part-of: {{ .Release.Name }}
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
target:
template:
mergePolicy: Merge
engineVersion: v2
data:
RESTIC_REPOSITORY: "{{ `{{ .BUCKET_ENDPOINT }}` }}/booklore/booklore-config"
data:
- secretKey: BUCKET_ENDPOINT
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /volsync/restic/digital-ocean
metadataPolicy: None
property: BUCKET_ENDPOINT
- secretKey: RESTIC_PASSWORD
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /volsync/restic/digital-ocean
metadataPolicy: None
property: RESTIC_PASSWORD
- secretKey: AWS_DEFAULT_REGION
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: AWS_DEFAULT_REGION
- secretKey: AWS_ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: AWS_ACCESS_KEY_ID
- secretKey: AWS_SECRET_ACCESS_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: AWS_SECRET_ACCESS_KEY
---
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: booklore-data-backup-secret-local
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: booklore-data-backup-secret-local
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/part-of: {{ .Release.Name }}
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
target:
template:
mergePolicy: Merge
engineVersion: v2
data:
RESTIC_REPOSITORY: "{{ `{{ .BUCKET_ENDPOINT }}` }}/booklore/booklore-data"
data:
- secretKey: BUCKET_ENDPOINT
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /volsync/restic/garage-local
metadataPolicy: None
property: BUCKET_ENDPOINT
- secretKey: RESTIC_PASSWORD
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /volsync/restic/garage-local
metadataPolicy: None
property: RESTIC_PASSWORD
- secretKey: AWS_DEFAULT_REGION
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/volsync-backups
metadataPolicy: None
property: ACCESS_REGION
- secretKey: AWS_ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/volsync-backups
metadataPolicy: None
property: ACCESS_KEY_ID
- secretKey: AWS_SECRET_ACCESS_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/volsync-backups
metadataPolicy: None
property: ACCESS_SECRET_KEY
---
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: booklore-data-backup-secret-remote
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: booklore-data-backup-secret-remote
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/part-of: {{ .Release.Name }}
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
target:
template:
mergePolicy: Merge
engineVersion: v2
data:
RESTIC_REPOSITORY: "{{ `{{ .BUCKET_ENDPOINT }}` }}/booklore/booklore-data"
data:
- secretKey: BUCKET_ENDPOINT
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /volsync/restic/garage-remote
metadataPolicy: None
property: BUCKET_ENDPOINT
- secretKey: RESTIC_PASSWORD
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /volsync/restic/garage-remote
metadataPolicy: None
property: RESTIC_PASSWORD
- secretKey: AWS_DEFAULT_REGION
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/volsync-backups
metadataPolicy: None
property: ACCESS_REGION
- secretKey: AWS_ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/volsync-backups
metadataPolicy: None
property: ACCESS_KEY_ID
- secretKey: AWS_SECRET_ACCESS_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/volsync-backups
metadataPolicy: None
property: ACCESS_SECRET_KEY
---
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: booklore-data-backup-secret-external
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: booklore-data-backup-secret-external
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/part-of: {{ .Release.Name }}
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
target:
template:
mergePolicy: Merge
engineVersion: v2
data:
RESTIC_REPOSITORY: "{{ `{{ .BUCKET_ENDPOINT }}` }}/booklore/booklore-data"
data:
- secretKey: BUCKET_ENDPOINT
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /volsync/restic/digital-ocean
metadataPolicy: None
property: BUCKET_ENDPOINT
- secretKey: RESTIC_PASSWORD
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /volsync/restic/digital-ocean
metadataPolicy: None
property: RESTIC_PASSWORD
- secretKey: AWS_DEFAULT_REGION
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: AWS_DEFAULT_REGION
- secretKey: AWS_ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: AWS_ACCESS_KEY_ID
- secretKey: AWS_SECRET_ACCESS_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: AWS_SECRET_ACCESS_KEY
---
apiVersion: external-secrets.io/v1
kind: ExternalSecret

View File

@@ -0,0 +1,28 @@
apiVersion: gateway.networking.k8s.io/v1
kind: HTTPRoute
metadata:
name: http-route-booklore
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: http-route-booklore
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/part-of: {{ .Release.Name }}
spec:
parentRefs:
- group: gateway.networking.k8s.io
kind: Gateway
name: traefik-gateway
namespace: traefik
hostnames:
- booklore.alexlebens.net
rules:
- matches:
- path:
type: PathPrefix
value: /
backendRefs:
- group: ''
kind: Service
name: booklore
port: 80
weight: 100

View File

@@ -8,6 +8,3 @@ metadata:
app.kubernetes.io/name: booklore
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/part-of: {{ .Release.Name }}
pod-security.kubernetes.io/audit: privileged
pod-security.kubernetes.io/enforce: privileged
pod-security.kubernetes.io/warn: privileged

View File

@@ -15,3 +15,115 @@ spec:
keySecret: booklore-data-replication-secret
address: volsync-rsync-tls-dst-booklore-data-replication-destination
copyMethod: Snapshot
---
apiVersion: volsync.backube/v1alpha1
kind: ReplicationSource
metadata:
name: booklore-config-backup-source
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: booklore-config-backup-source
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/part-of: {{ .Release.Name }}
spec:
sourcePVC: booklore-config
trigger:
schedule: 0 4 * * *
restic:
pruneIntervalDays: 7
repository: booklore-config-backup-secret
retain:
hourly: 1
daily: 3
weekly: 2
monthly: 2
yearly: 4
copyMethod: Snapshot
storageClassName: ceph-block
volumeSnapshotClassName: ceph-blockpool-snapshot
cacheCapacity: 10Gi
---
apiVersion: volsync.backube/v1alpha1
kind: ReplicationSource
metadata:
name: booklore-data-backup-source-local
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: booklore-data-backup-source-local
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/part-of: {{ .Release.Name }}
spec:
sourcePVC: booklore-data
trigger:
schedule: 0 2 * * *
restic:
pruneIntervalDays: 7
repository: booklore-data-backup-secret-local
retain:
hourly: 1
daily: 3
weekly: 2
monthly: 2
yearly: 4
copyMethod: Snapshot
storageClassName: ceph-block
volumeSnapshotClassName: ceph-blockpool-snapshot
cacheCapacity: 10Gi
---
apiVersion: volsync.backube/v1alpha1
kind: ReplicationSource
metadata:
name: booklore-data-backup-source-remote
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: booklore-data-backup-source-remote
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/part-of: {{ .Release.Name }}
spec:
sourcePVC: booklore-data
trigger:
schedule: 0 3 * * *
restic:
pruneIntervalDays: 7
repository: booklore-data-backup-secret-remote
retain:
hourly: 1
daily: 3
weekly: 2
monthly: 2
yearly: 4
copyMethod: Snapshot
storageClassName: ceph-block
volumeSnapshotClassName: ceph-blockpool-snapshot
cacheCapacity: 10Gi
---
apiVersion: volsync.backube/v1alpha1
kind: ReplicationSource
metadata:
name: booklore-data-backup-source-external
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: booklore-data-backup-source-external
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/part-of: {{ .Release.Name }}
spec:
sourcePVC: booklore-data
trigger:
schedule: 0 4 * * *
restic:
pruneIntervalDays: 7
repository: booklore-data-backup-secret-external
retain:
hourly: 1
daily: 3
weekly: 2
monthly: 2
yearly: 4
copyMethod: Snapshot
storageClassName: ceph-block
volumeSnapshotClassName: ceph-blockpool-snapshot
cacheCapacity: 10Gi

View File

@@ -0,0 +1,14 @@
apiVersion: v1
kind: Service
metadata:
name: garage-ps10rp
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: garage-ps10rp
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/part-of: {{ .Release.Name }}
annotations:
tailscale.com/tailnet-fqdn: garage-ps10rp.boreal-beaufort.ts.net
spec:
externalName: placeholder
type: ExternalName

View File

@@ -9,7 +9,7 @@ booklore:
main:
image:
repository: ghcr.io/booklore-app/booklore
tag: v1.17.0
tag: v1.12.0
pullPolicy: IfNotPresent
env:
- name: TZ
@@ -39,30 +39,8 @@ booklore:
port: 80
targetPort: 6060
protocol: HTTP
route:
main:
kind: HTTPRoute
parentRefs:
- group: gateway.networking.k8s.io
kind: Gateway
name: traefik-gateway
namespace: traefik
hostnames:
- booklore.alexlebens.net
rules:
- backendRefs:
- group: ''
kind: Service
name: booklore
port: 80
weight: 100
matches:
- path:
type: PathPrefix
value: /
persistence:
config:
forceRename: booklore-config
storageClass: ceph-block
accessMode: ReadWriteOnce
size: 5Gi
@@ -73,7 +51,6 @@ booklore:
- path: /app/data
readOnly: false
data:
forceRename: booklore-data
storageClass: ceph-block
accessMode: ReadWriteOnce
size: 10Gi
@@ -108,21 +85,6 @@ mariadb-cluster:
replicas: 3
galera:
enabled: true
bootstrapFrom:
s3:
bucket: mariadb-backups-b230a2f5aecf080a4b372c08
prefix: cl01tl/booklore
endpoint: nyc3.digitaloceanspaces.com
region: us-east-1
accessKeyIdSecretKeyRef:
name: booklore-mariadb-cluster-backup-secret-external
key: access
secretAccessKeySecretKeyRef:
name: booklore-mariadb-cluster-backup-secret-external
key: secret
tls:
enabled: true
backupContentType: Physical
databases:
- name: booklore
characterSet: utf8
@@ -157,8 +119,7 @@ mariadb-cluster:
suspend: false
immediate: true
compression: gzip
maxRetention: 2160h
successfulJobsHistoryLimit: 1
maxRetention: 720h
storage:
s3:
bucket: mariadb-backups-b230a2f5aecf080a4b372c08
@@ -173,28 +134,6 @@ mariadb-cluster:
key: secret
tls:
enabled: true
- name: backup-remote
schedule:
cron: "0 0 * * 0"
suspend: false
immediate: true
compression: gzip
maxRetention: 2160h
successfulJobsHistoryLimit: 1
storage:
s3:
bucket: mariadb-backups
prefix: cl01tl/booklore
endpoint: garage-ps10rp.boreal-beaufort.ts.net:3900
region: us-east-1
accessKeyIdSecretKeyRef:
name: booklore-mariadb-cluster-backup-secret-garage
key: access
secretAccessKeySecretKeyRef:
name: booklore-mariadb-cluster-backup-secret-garage
key: secret
tls:
enabled: true
- name: backup-garage
schedule:
cron: "0 0 * * *"
@@ -202,7 +141,6 @@ mariadb-cluster:
immediate: true
compression: gzip
maxRetention: 360h
successfulJobsHistoryLimit: 1
storage:
s3:
bucket: mariadb-backups
@@ -215,30 +153,3 @@ mariadb-cluster:
secretAccessKeySecretKeyRef:
name: booklore-mariadb-cluster-backup-secret-garage
key: secret
volsync-target-config:
pvcTarget: booklore-config
local:
enabled: true
schedule: 12 8 * * *
remote:
enabled: false
external:
enabled: true
schedule: 12 9 * * *
volsync-target-data:
pvcTarget: booklore-data
local:
enabled: true
schedule: 14 8 * * *
restic:
cacheCapacity: 10Gi
remote:
enabled: true
schedule: 14 10 * * *
restic:
cacheCapacity: 10Gi
external:
enabled: true
schedule: 14 9 * * *
restic:
cacheCapacity: 10Gi

View File

@@ -1,6 +0,0 @@
dependencies:
- name: cert-manager
repository: https://charts.jetstack.io
version: v1.19.2
digest: sha256:b02bda9b9f2fc886af11d017a27a5761513defee603f9e3aa1d7add2749b925c
generated: "2025-12-10T15:01:57.196895547Z"

View File

@@ -1,6 +0,0 @@
dependencies:
- name: cilium
repository: https://helm.cilium.io/
version: 1.18.6
digest: sha256:8ea328ac238524b5b423e6289f5e25d05ef64e6aa19cfd5de238f1d5dd533e9b
generated: "2026-01-14T11:02:31.272963463Z"

View File

@@ -1,19 +0,0 @@
# apiVersion: "cilium.io/v2alpha1"
# kind: CiliumL2AnnouncementPolicy
# metadata:
# name: general-l2-policy
# namespace: {{ .Release.Namespace }}
# labels:
# app.kubernetes.io/name: general-l2-policy
# app.kubernetes.io/instance: {{ .Release.Name }}
# app.kubernetes.io/part-of: {{ .Release.Name }}
# spec:
# nodeSelector:
# matchExpressions:
# - key: kubernetes.io/hostname
# operator: Exists
# interfaces:
# - end0
# - enp6s0
# externalIPs: true
# loadBalancerIPs: true

View File

@@ -1,9 +0,0 @@
dependencies:
- name: cloudnative-pg
repository: https://cloudnative-pg.io/charts/
version: 0.27.0
- name: plugin-barman-cloud
repository: https://cloudnative-pg.io/charts/
version: 0.4.0
digest: sha256:5e2a32fa5ed8b180ae5e556d65c67eeb3dcf38e2974b0d668eff4ee3c83258ce
generated: "2025-12-30T21:01:48.755246408Z"

View File

@@ -1,12 +1,9 @@
dependencies:
- name: app-template
repository: https://bjw-s-labs.github.io/helm-charts/
version: 4.6.2
version: 4.4.0
- name: cloudflared
repository: oci://harbor.alexlebens.net/helm-charts
version: 2.1.9
- name: volsync-target
repository: oci://harbor.alexlebens.net/helm-charts
version: 0.7.0
digest: sha256:d243a4062bd69c3e02536dbc8f0da06f8607a373a772bf521fa16ae10df331b7
generated: "2026-01-17T17:30:30.693066904Z"
version: 1.23.0
digest: sha256:99eb4f940077dc916f5425d196232fcd363223fa7b7b5d3889f5965aa59e26f5
generated: "2025-11-30T21:05:26.699161-06:00"

View File

@@ -19,14 +19,10 @@ dependencies:
- name: app-template
alias: code-server
repository: https://bjw-s-labs.github.io/helm-charts/
version: 4.6.2
version: 4.4.0
- name: cloudflared
alias: cloudflared
repository: oci://harbor.alexlebens.net/helm-charts
version: 2.1.9
- name: volsync-target
alias: volsync-target-config
version: 0.7.0
repository: oci://harbor.alexlebens.net/helm-charts
version: 1.23.0
icon: https://cdn.jsdelivr.net/gh/selfhst/icons/png/visual-studio-code.png
# renovate: github=coder/code-server
appVersion: 4.106.3
appVersion: 4.100.2

View File

@@ -26,3 +26,26 @@ spec:
key: /cl01tl/code-server/auth
metadataPolicy: None
property: SUDO_PASSWORD
---
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: code-server-cloudflared-secret
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: code-server-cloudflared-secret
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/part-of: {{ .Release.Name }}
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: cf-tunnel-token
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cloudflare/tunnels/codeserver
metadataPolicy: None
property: token

View File

@@ -0,0 +1,28 @@
apiVersion: gateway.networking.k8s.io/v1
kind: HTTPRoute
metadata:
name: http-route-code-server
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: http-route-code-server
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/part-of: {{ .Release.Name }}
spec:
parentRefs:
- group: gateway.networking.k8s.io
kind: Gateway
name: traefik-gateway
namespace: traefik
hostnames:
- code-server.alexlebens.net
rules:
- matches:
- path:
type: PathPrefix
value: /
backendRefs:
- group: ''
kind: Service
name: code-server
port: 8443
weight: 100

View File

@@ -1,17 +1,17 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: vault-storage-backup
name: code-server-nfs-storage
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: vault-storage-backup
app.kubernetes.io/name: code-server-nfs-storage
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/part-of: {{ .Release.Name }}
spec:
volumeMode: Filesystem
storageClassName: ceph-filesystem
storageClassName: nfs-client
accessModes:
- ReadWriteMany
- ReadWriteOnce
resources:
requests:
storage: 1Gi

View File

@@ -9,7 +9,7 @@ code-server:
main:
image:
repository: ghcr.io/linuxserver/code-server
tag: 4.108.1@sha256:e9ed472ff25e12a5ee3562684d073b29e508e472ef54e3558e19bf4ff99ef858
tag: 4.106.2@sha256:a98afdbcb59559f11e5e8df284062e55da1076b2e470e13db4aae133ea82bad0
pullPolicy: IfNotPresent
env:
- name: TZ
@@ -35,51 +35,13 @@ code-server:
port: 8443
targetPort: 8443
protocol: HTTP
route:
main:
kind: HTTPRoute
parentRefs:
- group: gateway.networking.k8s.io
kind: Gateway
name: traefik-gateway
namespace: traefik
hostnames:
- code-server.alexlebens.net
rules:
- backendRefs:
- group: ''
kind: Service
name: code-server
port: 8443
weight: 100
matches:
- path:
type: PathPrefix
value: /
persistence:
config:
forceRename: code-server-config
storageClass: ceph-block
accessMode: ReadWriteOnce
size: 2Gi
retain: true
existingClaim: code-server-nfs-storage
advancedMounts:
main:
main:
- path: /config
readOnly: false
volsync-target-config:
pvcTarget: code-server-config
moverSecurityContext:
runAsUser: 1000
runAsGroup: 1000
fsGroup: 1000
fsGroupChangePolicy: OnRootMismatch
local:
enabled: true
schedule: 16 8 * * *
remote:
enabled: false
external:
enabled: true
schedule: 16 9 * * *
cloudflared:
existingSecretName: code-server-cloudflared-secret

View File

@@ -1,6 +0,0 @@
dependencies:
- name: coredns
repository: https://coredns.github.io/helm
version: 1.45.0
digest: sha256:cfcb22a7d0bce4d6000800706597ae43faec74255f1deb5cc3279b2d0a81f6c6
generated: "2025-12-02T17:17:52.206039-06:00"

View File

@@ -1,6 +0,0 @@
dependencies:
- name: democratic-csi
repository: https://democratic-csi.github.io/charts/
version: 0.15.1
digest: sha256:e07d76a67023fb523e7d49730330995d0028faba9a4c7c3a6b87c5828921b3c3
generated: "2026-01-08T20:33:17.610556446Z"

View File

@@ -1,6 +0,0 @@
dependencies:
- name: descheduler
repository: https://kubernetes-sigs.github.io/descheduler/
version: 0.34.0
digest: sha256:1020c1fc8c179744f308e9b79f010dcaf59a05019f7d007157974be97063e12b
generated: "2025-12-01T20:25:26.970808-06:00"

View File

@@ -1,15 +1,12 @@
dependencies:
- name: app-template
repository: https://bjw-s-labs.github.io/helm-charts/
version: 4.6.2
version: 4.4.0
- name: cloudflared
repository: oci://harbor.alexlebens.net/helm-charts
version: 2.1.9
version: 1.23.0
- name: postgres-cluster
repository: oci://harbor.alexlebens.net/helm-charts
version: 7.4.5
- name: redis-replication
repository: oci://harbor.alexlebens.net/helm-charts
version: 1.0.1
digest: sha256:9ae9e0cb222e7ac2fca2bfe5367a4b011b0c83c1a0f98a01342f37fad62c2fe1
generated: "2026-01-17T17:30:43.212095428Z"
version: 6.16.0
digest: sha256:2f3d9f7a8d8d71b19ff3292993647d22a89aa6c444a6f0819b82cd0a577f1ebc
generated: "2025-11-30T21:05:28.43692-06:00"

View File

@@ -20,17 +20,14 @@ dependencies:
- name: app-template
alias: directus
repository: https://bjw-s-labs.github.io/helm-charts/
version: 4.6.2
version: 4.4.0
- name: cloudflared
alias: cloudflared-directus
repository: oci://harbor.alexlebens.net/helm-charts
version: 2.1.9
version: 1.23.0
- name: postgres-cluster
alias: postgres-18-cluster
version: 7.4.5
repository: oci://harbor.alexlebens.net/helm-charts
- name: redis-replication
version: 1.0.1
alias: postgres-17-cluster
version: 6.16.0
repository: oci://harbor.alexlebens.net/helm-charts
icon: https://cdn.jsdelivr.net/gh/selfhst/icons/png/directus.png
# renovate: github=directus/directus
appVersion: 11.14.0
appVersion: 11.7.2

View File

@@ -41,36 +41,6 @@ spec:
metadataPolicy: None
property: key
---
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: directus-oidc-secret
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: directus-oidc-secret
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/part-of: {{ .Release.Name }}
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: OIDC_CLIENT_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /authentik/oidc/directus
metadataPolicy: None
property: client
- secretKey: OIDC_CLIENT_SECRET
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /authentik/oidc/directus
metadataPolicy: None
property: secret
---
apiVersion: external-secrets.io/v1
kind: ExternalSecret
@@ -123,3 +93,153 @@ spec:
key: /cl01tl/directus/redis
metadataPolicy: None
property: password
---
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: directus-oidc-secret
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: directus-oidc-secret
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/part-of: {{ .Release.Name }}
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: OIDC_CLIENT_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /authentik/oidc/directus
metadataPolicy: None
property: client
- secretKey: OIDC_CLIENT_SECRET
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /authentik/oidc/directus
metadataPolicy: None
property: secret
---
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: directus-cloudflared-secret
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: directus-cloudflared-secret
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/part-of: {{ .Release.Name }}
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: cf-tunnel-token
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cloudflare/tunnels/directus
metadataPolicy: None
property: token
---
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: directus-postgresql-17-cluster-backup-secret
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: directus-postgresql-17-cluster-backup-secret
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/part-of: {{ .Release.Name }}
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/postgres-backups
metadataPolicy: None
property: access
- secretKey: ACCESS_SECRET_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/postgres-backups
metadataPolicy: None
property: secret
---
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: directus-postgresql-17-cluster-backup-secret-weekly
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: directus-postgresql-17-cluster-backup-secret-weekly
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/part-of: {{ .Release.Name }}
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/postgres-backups
metadataPolicy: None
property: ACCESS_KEY_ID
- secretKey: ACCESS_SECRET_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/postgres-backups
metadataPolicy: None
property: ACCESS_SECRET_KEY
---
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: directus-postgresql-17-cluster-backup-secret-garage
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: directus-postgresql-17-cluster-backup-secret-garage
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/part-of: {{ .Release.Name }}
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/postgres-backups
metadataPolicy: None
property: ACCESS_KEY_ID
- secretKey: ACCESS_SECRET_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/postgres-backups
metadataPolicy: None
property: ACCESS_SECRET_KEY
- secretKey: ACCESS_REGION
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/postgres-backups
metadataPolicy: None
property: ACCESS_REGION

View File

@@ -0,0 +1,35 @@
apiVersion: redis.redis.opstreelabs.in/v1beta2
kind: RedisReplication
metadata:
name: redis-replication-directus
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: redis-replication-directus
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/part-of: {{ .Release.Name }}
spec:
clusterSize: 3
podSecurityContext:
runAsUser: 1000
fsGroup: 1000
kubernetesConfig:
image: quay.io/opstree/redis:v8.2.1
imagePullPolicy: IfNotPresent
redisSecret:
name: directus-redis-config
key: password
resources:
requests:
cpu: 50m
memory: 128Mi
storage:
volumeClaimTemplate:
spec:
storageClassName: ceph-block
accessModes: ["ReadWriteOnce"]
resources:
requests:
storage: 1Gi
redisExporter:
enabled: true
image: quay.io/opstree/redis-exporter:v1.76.0

View File

@@ -0,0 +1,30 @@
apiVersion: redis.redis.opstreelabs.in/v1beta2
kind: RedisSentinel
metadata:
name: redis-sentinel-directus
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: redis-sentinel-directus
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/part-of: {{ .Release.Name }}
spec:
clusterSize: 3
podSecurityContext:
runAsUser: 1000
fsGroup: 1000
redisSentinelConfig:
redisReplicationName: redis-replication-directus
redisReplicationPassword:
secretKeyRef:
name: directus-redis-config
key: password
kubernetesConfig:
image: quay.io/opstree/redis-sentinel:v7.0.15
imagePullPolicy: IfNotPresent
redisSecret:
name: directus-redis-config
key: password
resources:
requests:
cpu: 10m
memory: 128Mi

View File

@@ -0,0 +1,43 @@
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: directus
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: directus
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/part-of: {{ .Release.Name }}
spec:
selector:
matchLabels:
app.kubernetes.io/name: directus
app.kubernetes.io/instance: {{ .Release.Name }}
endpoints:
- port: http
interval: 30s
scrapeTimeout: 15s
path: /metrics
bearerTokenSecret:
name: directus-metric-token
key: metric-token
---
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: redis-replication-directus
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: redis-replication-directus
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/part-of: {{ .Release.Name }}
redis-operator: "true"
env: production
spec:
selector:
matchLabels:
redis_setup_type: replication
endpoints:
- port: redis-exporter
interval: 30s
scrapeTimeout: 10s

View File

@@ -9,7 +9,7 @@ directus:
main:
image:
repository: directus/directus
tag: 11.14.1
tag: 11.13.4
pullPolicy: IfNotPresent
env:
- name: PUBLIC_URL
@@ -41,27 +41,27 @@ directus:
- name: DB_HOST
valueFrom:
secretKeyRef:
name: directus-postgresql-18-cluster-app
name: directus-postgresql-17-cluster-app
key: host
- name: DB_DATABASE
valueFrom:
secretKeyRef:
name: directus-postgresql-18-cluster-app
name: directus-postgresql-17-cluster-app
key: dbname
- name: DB_PORT
valueFrom:
secretKeyRef:
name: directus-postgresql-18-cluster-app
name: directus-postgresql-17-cluster-app
key: port
- name: DB_USER
valueFrom:
secretKeyRef:
name: directus-postgresql-18-cluster-app
name: directus-postgresql-17-cluster-app
key: user
- name: DB_PASSWORD
valueFrom:
secretKeyRef:
name: directus-postgresql-18-cluster-app
name: directus-postgresql-17-cluster-app
key: password
- name: SYNCHRONIZATION_STORE
value: redis
@@ -153,69 +153,62 @@ directus:
port: 80
targetPort: 8055
protocol: TCP
serviceMonitor:
main:
selector:
matchLabels:
app.kubernetes.io/name: directus
app.kubernetes.io/instance: directus
serviceName: '{{ include "bjw-s.common.lib.chart.names.fullname" $ }}'
endpoints:
- port: http
interval: 30s
scrapeTimeout: 15s
path: /metrics
bearerTokenSecret:
name: directus-metric-token
key: metric-token
postgres-18-cluster:
cloudflared-directus:
name: cloudflared-directus
existingSecretName: directus-cloudflared-secret
postgres-17-cluster:
mode: recovery
cluster:
storage:
storageClass: local-path
walStorage:
storageClass: local-path
monitoring:
enabled: true
prometheusRule:
enabled: true
recovery:
method: objectStore
objectStore:
destinationPath: s3://postgres-backups/cl01tl/directus/directus-postgresql-17-cluster
endpointURL: http://garage-main.garage:3900
index: 1
endpointCredentials: directus-postgresql-17-cluster-backup-secret-garage
backup:
objectStore:
- name: garage-local
- name: external
destinationPath: s3://postgres-backups-ce540ddf106d186bbddca68a/cl01tl/directus/directus-postgresql-17-cluster
index: 1
destinationBucket: postgres-backups
externalSecretCredentialPath: /garage/home-infra/postgres-backups
retentionPolicy: "30d"
isWALArchiver: false
- name: garage-local
destinationPath: s3://postgres-backups/cl01tl/directus/directus-postgresql-17-cluster
index: 1
endpointURL: http://garage-main.garage:3900
endpointCredentials: directus-postgresql-17-cluster-backup-secret-garage
endpointCredentialsIncludeRegion: true
retentionPolicy: "3d"
isWALArchiver: true
# - name: garage-remote
# destinationPath: s3://postgres-backups/cl01tl/directus/directus-postgresql-17-cluster
# index: 1
# destinationBucket: postgres-backups
# externalSecretCredentialPath: /garage/home-infra/postgres-backups
# retentionPolicy: "90d"
# endpointURL: https://garage-ps10rp.boreal-beaufort.ts.net:3900
# endpointCredentials: directus-postgresql-17-cluster-backup-secret-garage
# retentionPolicy: "30d"
# data:
# compression: bzip2
# - name: external
# index: 1
# endpointURL: https://nyc3.digitaloceanspaces.com
# destinationBucket: postgres-backups-ce540ddf106d186bbddca68a
# externalSecretCredentialPath: /garage/home-infra/postgres-backups
# isWALArchiver: false
# jobs: 2
scheduledBackups:
- name: daily-backup
suspend: false
schedule: "0 0 0 * * *"
backupName: external
- name: live-backup
suspend: false
immediate: true
schedule: "0 0 0 * * *"
backupName: garage-local
# - name: weekly-backup
# suspend: true
# immediate: true
# suspend: false
# schedule: "0 0 4 * * SAT"
# backupName: garage-remote
# - name: daily-backup
# suspend: true
# immediate: true
# schedule: "0 0 0 * * *"
# backupName: external
redis-replication:
existingSecret:
enabled: true
name: directus-redis-config
key: password
redisReplication:
clusterSize: 3
sentinel:
enabled: true

View File

@@ -1,6 +0,0 @@
dependencies:
- name: eck-operator
repository: https://helm.elastic.co
version: 3.2.0
digest: sha256:b27ba092ddfa078f763e409dd5db1144a269eff0f45af04f180d844f13466a34
generated: "2025-12-01T20:25:30.722424-06:00"

View File

@@ -1,9 +1,9 @@
dependencies:
- name: element-web
repository: https://ananace.gitlab.io/charts
version: 1.4.27
version: 1.4.24
- name: cloudflared
repository: oci://harbor.alexlebens.net/helm-charts
version: 2.1.9
digest: sha256:ef4b5256d6b405f08f33ec1a2bbda9f8427fcac0c69accd04c19fb4fe1871047
generated: "2026-01-17T17:30:57.57289288Z"
version: 1.23.0
digest: sha256:05aa032adca6d808215d6dcd5d7e38b821a740a53868238f79adaa606444b3ae
generated: "2025-11-30T21:05:30.356497-06:00"

View File

@@ -17,11 +17,11 @@ maintainers:
- name: alexlebens
dependencies:
- name: element-web
version: 1.4.27
version: 1.4.24
repository: https://ananace.gitlab.io/charts
- name: cloudflared
alias: cloudflared
repository: oci://harbor.alexlebens.net/helm-charts
version: 2.1.9
version: 1.23.0
icon: https://cdn.jsdelivr.net/gh/selfhst/icons/png/element.png
# renovate: github=element-hq/element-web
appVersion: v1.12.6
appVersion: v1.11.100

View File

@@ -1,10 +1,10 @@
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: argo-workflows-oidc-secret
name: element-web-cloudflared-secret
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: argo-workflows-oidc-secret
app.kubernetes.io/name: element-web-cloudflared-secret
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/part-of: {{ .Release.Name }}
spec:
@@ -12,17 +12,10 @@ spec:
kind: ClusterSecretStore
name: vault
data:
- secretKey: secret
- secretKey: cf-tunnel-token
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /authentik/oidc/argo-workflows
key: /cloudflare/tunnels/element
metadataPolicy: None
property: secret
- secretKey: client
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /authentik/oidc/argo-workflows
metadataPolicy: None
property: client
property: token

View File

@@ -2,7 +2,7 @@ element-web:
replicaCount: 1
image:
repository: vectorim/element-web
tag: v1.12.8
tag: v1.12.4
pullPolicy: IfNotPresent
defaultServer:
url: https://matrix.alexlebens.dev
@@ -24,3 +24,5 @@ element-web:
requests:
cpu: 10m
memory: 128Mi
cloudflared:
existingSecretName: element-web-cloudflared-secret

View File

@@ -1,9 +1,6 @@
dependencies:
- name: app-template
repository: https://bjw-s-labs.github.io/helm-charts/
version: 4.6.2
- name: volsync-target
repository: oci://harbor.alexlebens.net/helm-charts
version: 0.7.0
digest: sha256:c270dc80232d53598370e68d969ddcb71f26eb6910b7bb51761b88127e065f5a
generated: "2026-01-16T18:46:10.06351851Z"
version: 4.4.0
digest: sha256:9900009eb6415344d8c5387371a0052259092d92f34c21774f6a6abe9f11f43e
generated: "2025-11-30T21:05:32.524168-06:00"

View File

@@ -18,11 +18,6 @@ dependencies:
- name: app-template
alias: ephemera
repository: https://bjw-s-labs.github.io/helm-charts/
version: 4.6.2
- name: volsync-target
alias: volsync-target-config
version: 0.7.0
repository: oci://harbor.alexlebens.net/helm-charts
version: 4.4.0
icon: https://cdn.jsdelivr.net/gh/selfhst/icons/png/ephemera.png
# renovate: github=OrwellianEpilogue/ephemera
appVersion: 1.3.1

View File

@@ -42,3 +42,60 @@ spec:
key: /cl01tl/ephemera/config
metadataPolicy: None
property: ntfy-url
---
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: ephemera-config-backup-secret
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: ephemera-config-backup-secret
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/part-of: {{ .Release.Name }}
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
target:
template:
mergePolicy: Merge
engineVersion: v2
data:
RESTIC_REPOSITORY: "{{ `{{ .BUCKET_ENDPOINT }}` }}/ephemera/ephemera-config"
data:
- secretKey: BUCKET_ENDPOINT
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: S3_BUCKET_ENDPOINT
- secretKey: RESTIC_PASSWORD
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: RESTIC_PASSWORD
- secretKey: AWS_DEFAULT_REGION
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: AWS_DEFAULT_REGION
- secretKey: AWS_ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: access_key
- secretKey: AWS_SECRET_ACCESS_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: secret_key

View File

@@ -0,0 +1,28 @@
apiVersion: gateway.networking.k8s.io/v1
kind: HTTPRoute
metadata:
name: http-route-ephemera
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: http-route-ephemera
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/part-of: {{ .Release.Name }}
spec:
parentRefs:
- group: gateway.networking.k8s.io
kind: Gateway
name: traefik-gateway
namespace: traefik
hostnames:
- ephemera.alexlebens.net
rules:
- matches:
- path:
type: PathPrefix
value: /
backendRefs:
- group: ''
kind: Service
name: ephemera
port: 80
weight: 100

View File

@@ -0,0 +1,26 @@
apiVersion: volsync.backube/v1alpha1
kind: ReplicationSource
metadata:
name: ephemera-config-backup-source
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: ephemera-config-backup-source
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/part-of: {{ .Release.Name }}
spec:
sourcePVC: ephemera-config
trigger:
schedule: 0 4 * * *
restic:
pruneIntervalDays: 7
repository: ephemera-config-backup-secret
retain:
hourly: 1
daily: 3
weekly: 2
monthly: 2
yearly: 4
copyMethod: Snapshot
storageClassName: ceph-block
volumeSnapshotClassName: ceph-blockpool-snapshot
cacheCapacity: 10Gi

View File

@@ -9,13 +9,9 @@ ephemera:
main:
image:
repository: ghcr.io/orwellianepilogue/ephemera
tag: 2.0.0
tag: 1.3.1
pullPolicy: IfNotPresent
env:
- name: ALLOWED_ORIGINS
value: https://ephemera.alexlebens.net
- name: BASE_URL
value: https://ephemera.alexlebens.net
- name: AA_BASE_URL
value: https://annas-archive.org
# - name: AA_API_KEY
@@ -38,7 +34,7 @@ ephemera:
flaresolverr:
image:
repository: ghcr.io/flaresolverr/flaresolverr
tag: v3.4.6
tag: v3.4.5
pullPolicy: IfNotPresent
env:
- name: LOG_LEVEL
@@ -56,7 +52,7 @@ ephemera:
apprise-api:
image:
repository: caronc/apprise
tag: 1.3.0
tag: 1.2.6
pullPolicy: IfNotPresent
env:
- name: TZ
@@ -84,30 +80,8 @@ ephemera:
port: 80
targetPort: 8286
protocol: HTTP
route:
main:
kind: HTTPRoute
parentRefs:
- group: gateway.networking.k8s.io
kind: Gateway
name: traefik-gateway
namespace: traefik
hostnames:
- ephemera.alexlebens.net
rules:
- backendRefs:
- group: ''
kind: Service
name: ephemera
port: 80
weight: 100
matches:
- path:
type: PathPrefix
value: /
persistence:
config:
forceRename: ephemera
storageClass: ceph-block
accessMode: ReadWriteOnce
size: 5Gi
@@ -131,13 +105,3 @@ ephemera:
main:
- path: /app/ingest
readOnly: false
volsync-target-config:
pvcTarget: ephemera
local:
enabled: true
schedule: 16 8 * * *
remote:
enabled: false
external:
enabled: true
schedule: 16 9 * * *

View File

@@ -1,6 +0,0 @@
dependencies:
- name: eraser
repository: https://eraser-dev.github.io/eraser/charts
version: 1.4.1
digest: sha256:da828de684b0cd82e99994586f3db4f55c43c01607c4d8d0e70e204c7bbbbf5b
generated: "2025-12-03T22:53:20.200917773Z"

View File

@@ -1,6 +0,0 @@
dependencies:
- name: app-template
repository: https://bjw-s-labs.github.io/helm-charts/
version: 4.6.2
digest: sha256:e05d84dd266b8b456a8bc7f9a2bb3ab01f4ac926efd1a58cf405b0cdab343d3f
generated: "2026-01-17T18:27:08.062835-06:00"

View File

@@ -1,21 +0,0 @@
apiVersion: v2
name: excalidraw
version: 1.0.0
description: Excalidraw
keywords:
- excalidraw
home: https://wiki.alexlebens.dev/
sources:
- https://github.com/excalidraw/excalidraw
- https://hub.docker.com/r/excalidraw/excalidraw
- https://github.com/bjw-s-labs/helm-charts/tree/main/charts/other/app-template
maintainers:
- name: alexlebens
dependencies:
- name: app-template
alias: excalidraw
repository: https://bjw-s-labs.github.io/helm-charts/
version: 4.6.2
icon: https://cdn.jsdelivr.net/gh/selfhst/icons/png/excalidraw.png
# renovate: github=excalidraw/excalidraw
appVersion: 0.6.0

View File

@@ -1,51 +0,0 @@
excalidraw:
controllers:
main:
type: deployment
replicas: 1
strategy: Recreate
revisionHistoryLimit: 3
containers:
main:
image:
repository: excalidraw/excalidraw
tag: latest@sha256:3c2513e830bb6e195147c05b34ecf8393d0ba2b1cc86e93b407a5777d6135c6c
pullPolicy: IfNotPresent
env:
- name: NODE_ENV
value: production
- name: TZ
value: America/Chicago
resources:
requests:
cpu: 10m
memory: 128Mi
service:
main:
controller: main
ports:
http:
port: 80
targetPort: 80
protocol: HTTP
route:
main:
kind: HTTPRoute
parentRefs:
- group: gateway.networking.k8s.io
kind: Gateway
name: traefik-gateway
namespace: traefik
hostnames:
- excalidraw.alexlebens.net
rules:
- backendRefs:
- group: ''
kind: Service
name: excalidraw
port: 80
weight: 100
matches:
- path:
type: PathPrefix
value: /

View File

@@ -1,6 +0,0 @@
dependencies:
- name: external-dns
repository: https://kubernetes-sigs.github.io/external-dns/
version: 1.20.0
digest: sha256:0da4dec408239ea48de1d95fa8ad7701c4fdc0efe67baa8743507c75e62e2a47
generated: "2026-01-03T23:04:25.142170083Z"

View File

@@ -1,6 +0,0 @@
dependencies:
- name: external-secrets
repository: https://charts.external-secrets.io
version: 1.2.1
digest: sha256:20d4fe97e96c6bd5ba958b23121d807d8154c39d58b01511b80025166713a141
generated: "2026-01-03T23:02:15.181743082Z"

View File

@@ -1,15 +1,12 @@
dependencies:
- name: app-template
repository: https://bjw-s-labs.github.io/helm-charts/
version: 4.6.2
version: 4.4.0
- name: cloudflared
repository: oci://harbor.alexlebens.net/helm-charts
version: 2.1.9
version: 1.23.0
- name: postgres-cluster
repository: oci://harbor.alexlebens.net/helm-charts
version: 7.4.5
- name: volsync-target
repository: oci://harbor.alexlebens.net/helm-charts
version: 0.7.0
digest: sha256:ded65efcf83f3c0cd3497839a1beddbc616081c286bdac6f584353813e7c8da8
generated: "2026-01-17T17:31:12.045290099Z"
version: 6.16.0
digest: sha256:1c1355c247383bb5aef029eaadaf0c6bbcc23c0e42868178c1ea9a9ab21cc704
generated: "2025-11-30T21:05:34.030606-06:00"

View File

@@ -20,18 +20,14 @@ dependencies:
- name: app-template
alias: freshrss
repository: https://bjw-s-labs.github.io/helm-charts/
version: 4.6.2
version: 4.4.0
- name: cloudflared
alias: cloudflared
repository: oci://harbor.alexlebens.net/helm-charts
version: 2.1.9
version: 1.23.0
- name: postgres-cluster
alias: postgres-18-cluster
version: 7.4.5
repository: oci://harbor.alexlebens.net/helm-charts
- name: volsync-target
alias: volsync-target-data
version: 0.7.0
alias: postgres-17-cluster
version: 6.16.0
repository: oci://harbor.alexlebens.net/helm-charts
icon: https://cdn.jsdelivr.net/gh/selfhst/icons/png/freshrss.png
# renovate: github=FreshRSS/FreshRSS
appVersion: 1.27.1
appVersion: 1.26.2

View File

@@ -70,3 +70,150 @@ spec:
key: /authentik/oidc/freshrss
metadataPolicy: None
property: crypto-key
---
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: freshrss-cloudflared-secret
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: freshrss-cloudflared-secret
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/part-of: {{ .Release.Name }}
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: cf-tunnel-token
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cloudflare/tunnels/freshrss
metadataPolicy: None
property: token
---
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: freshrss-data-backup-secret
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: freshrss-data-backup-secret
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/part-of: {{ .Release.Name }}
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
target:
template:
mergePolicy: Merge
engineVersion: v2
data:
RESTIC_REPOSITORY: "{{ `{{ .BUCKET_ENDPOINT }}` }}/freshrss/freshrss-data"
data:
- secretKey: BUCKET_ENDPOINT
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: S3_BUCKET_ENDPOINT
- secretKey: RESTIC_PASSWORD
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: RESTIC_PASSWORD
- secretKey: AWS_DEFAULT_REGION
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/volsync/restic/config
metadataPolicy: None
property: AWS_DEFAULT_REGION
- secretKey: AWS_ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: access_key
- secretKey: AWS_SECRET_ACCESS_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: secret_key
---
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: freshrss-postgresql-17-cluster-backup-secret
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: freshrss-postgresql-17-cluster-backup-secret
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/part-of: {{ .Release.Name }}
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/postgres-backups
metadataPolicy: None
property: access
- secretKey: ACCESS_SECRET_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/postgres-backups
metadataPolicy: None
property: secret
---
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: freshrss-postgresql-17-cluster-backup-secret-garage
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: freshrss-postgresql-17-cluster-backup-secret-garage
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/part-of: {{ .Release.Name }}
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/postgres-backups
metadataPolicy: None
property: ACCESS_KEY_ID
- secretKey: ACCESS_SECRET_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/postgres-backups
metadataPolicy: None
property: ACCESS_SECRET_KEY
- secretKey: ACCESS_REGION
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/postgres-backups
metadataPolicy: None
property: ACCESS_REGION

View File

@@ -0,0 +1,35 @@
apiVersion: volsync.backube/v1alpha1
kind: ReplicationSource
metadata:
name: freshrss-data-backup-source
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: freshrss-data-backup-source
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/part-of: {{ .Release.Name }}
spec:
sourcePVC: freshrss-data
trigger:
schedule: 0 4 * * *
restic:
pruneIntervalDays: 7
repository: freshrss-data-backup-secret
retain:
hourly: 1
daily: 3
weekly: 2
monthly: 2
yearly: 4
moverSecurityContext:
runAsUser: 568
runAsGroup: 568
fsGroup: 568
fsGroupChangePolicy: OnRootMismatch
supplementalGroups:
- 44
- 100
- 109
- 65539
copyMethod: Snapshot
storageClassName: ceph-block
volumeSnapshotClassName: ceph-blockpool-snapshot

View File

@@ -11,7 +11,7 @@ freshrss:
runAsUser: 0
image:
repository: alpine
tag: 3.23.2
tag: 3.22.2
pullPolicy: IfNotPresent
command:
- /bin/sh
@@ -35,7 +35,7 @@ freshrss:
runAsUser: 0
image:
repository: alpine
tag: 3.23.2
tag: 3.22.2
pullPolicy: IfNotPresent
command:
- /bin/sh
@@ -59,7 +59,7 @@ freshrss:
runAsUser: 0
image:
repository: alpine
tag: 3.23.2
tag: 3.22.2
pullPolicy: IfNotPresent
command:
- /bin/sh
@@ -80,7 +80,7 @@ freshrss:
main:
image:
repository: freshrss/freshrss
tag: 1.28.0
tag: 1.27.1
pullPolicy: IfNotPresent
env:
- name: PGID
@@ -98,22 +98,22 @@ freshrss:
- name: DB_HOST
valueFrom:
secretKeyRef:
name: freshrss-postgresql-18-cluster-app
name: freshrss-postgresql-17-cluster-app
key: host
- name: DB_BASE
valueFrom:
secretKeyRef:
name: freshrss-postgresql-18-cluster-app
name: freshrss-postgresql-17-cluster-app
key: dbname
- name: DB_USER
valueFrom:
secretKeyRef:
name: freshrss-postgresql-18-cluster-app
name: freshrss-postgresql-17-cluster-app
key: user
- name: DB_PASSWORD
valueFrom:
secretKeyRef:
name: freshrss-postgresql-18-cluster-app
name: freshrss-postgresql-17-cluster-app
key: password
- name: FRESHRSS_INSTALL
value: |
@@ -163,7 +163,6 @@ freshrss:
protocol: HTTP
persistence:
data:
forceRename: freshrss-data
storageClass: ceph-block
accessMode: ReadWriteOnce
size: 5Gi
@@ -192,65 +191,61 @@ freshrss:
main:
- path: /var/www/FreshRSS/extensions
readOnly: false
postgres-18-cluster:
cloudflared:
existingSecretName: freshrss-cloudflared-secret
postgres-17-cluster:
mode: recovery
cluster:
storage:
storageClass: local-path
walStorage:
storageClass: local-path
monitoring:
enabled: true
prometheusRule:
enabled: true
recovery:
method: objectStore
objectStore:
destinationPath: s3://postgres-backups/cl01tl/freshrss/freshrss-postgresql-17-cluster
endpointURL: http://garage-main.garage:3900
index: 1
endpointCredentials: freshrss-postgresql-17-cluster-backup-secret-garage
backup:
objectStore:
- name: garage-local
- name: external
destinationPath: s3://postgres-backups-ce540ddf106d186bbddca68a/cl01tl/freshrss/freshrss-postgresql-17-cluster
index: 1
destinationBucket: postgres-backups
externalSecretCredentialPath: /garage/home-infra/postgres-backups
retentionPolicy: "30d"
isWALArchiver: false
- name: garage-local
destinationPath: s3://postgres-backups/cl01tl/freshrss/freshrss-postgresql-17-cluster
index: 1
endpointURL: http://garage-main.garage:3900
endpointCredentials: freshrss-postgresql-17-cluster-backup-secret-garage
endpointCredentialsIncludeRegion: true
retentionPolicy: "3d"
isWALArchiver: true
# - name: garage-remote
# destinationPath: s3://postgres-backups/cl01tl/freshrss/freshrss-postgresql-17-cluster
# index: 1
# destinationBucket: postgres-backups
# externalSecretCredentialPath: /garage/home-infra/postgres-backups
# retentionPolicy: "90d"
# endpointURL: https://garage-ps10rp.boreal-beaufort.ts.net:3900
# endpointCredentials: freshrss-postgresql-17-cluster-backup-secret-garage
# retentionPolicy: "30d"
# data:
# compression: bzip2
# - name: external
# index: 1
# endpointURL: https://nyc3.digitaloceanspaces.com
# destinationBucket: postgres-backups-ce540ddf106d186bbddca68a
# externalSecretCredentialPath: /garage/home-infra/postgres-backups
# isWALArchiver: false
# jobs: 2
scheduledBackups:
- name: daily-backup
suspend: false
schedule: "0 0 0 * * *"
backupName: external
- name: live-backup
suspend: false
immediate: true
schedule: "0 0 0 * * *"
backupName: garage-local
# - name: weekly-backup
# suspend: true
# immediate: true
# schedule: "0 0 4 * * SAT"
# suspend: false
# schedule: "0 2 4 * * SAT"
# backupName: garage-remote
# - name: daily-backup
# suspend: true
# immediate: true
# schedule: "0 0 0 * * *"
# backupName: external
volsync-target-data:
pvcTarget: freshrss-data
moverSecurityContext:
runAsUser: 568
runAsGroup: 568
fsGroup: 568
fsGroupChangePolicy: OnRootMismatch
supplementalGroups:
- 44
- 100
- 109
- 65539
local:
enabled: true
schedule: 18 8 * * *
remote:
enabled: false
external:
enabled: true
schedule: 18 9 * * *

View File

@@ -1,6 +0,0 @@
dependencies:
- name: app-template
repository: https://bjw-s-labs.github.io/helm-charts/
version: 4.6.2
digest: sha256:86536c14fa61870a64540b77f65afae8f4308e41a66eefae7fe85b83bf0df30e
generated: "2026-01-16T18:46:33.321938614Z"

View File

@@ -1,488 +0,0 @@
garage:
controllers:
server-1:
type: deployment
replicas: 1
strategy: Recreate
revisionHistoryLimit: 3
pod:
labels:
garage-type: server
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: garage-type
operator: In
values:
- server
topologyKey: kubernetes.io/hostname
containers:
main:
image:
repository: dxflrs/garage
tag: v2.1.0
pullPolicy: IfNotPresent
envFrom:
- secretRef:
name: garage-token-secret
resources:
requests:
cpu: 10m
memory: 128Mi
debug:
image:
repository: ubuntu
tag: resolute-20260106.1
pullPolicy: IfNotPresent
command:
- "sleep"
- "infinity"
resources:
requests:
cpu: 10m
memory: 32Mi
server-2:
type: deployment
replicas: 1
strategy: Recreate
revisionHistoryLimit: 3
pod:
labels:
garage-type: server
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: garage-type
operator: In
values:
- server
topologyKey: kubernetes.io/hostname
containers:
main:
image:
repository: dxflrs/garage
tag: v2.1.0
pullPolicy: IfNotPresent
envFrom:
- secretRef:
name: garage-token-secret
resources:
requests:
cpu: 10m
memory: 128Mi
server-3:
type: deployment
replicas: 1
strategy: Recreate
revisionHistoryLimit: 3
pod:
labels:
garage-type: server
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: garage-type
operator: In
values:
- server
topologyKey: kubernetes.io/hostname
containers:
main:
image:
repository: dxflrs/garage
tag: v2.1.0
pullPolicy: IfNotPresent
envFrom:
- secretRef:
name: garage-token-secret
resources:
requests:
cpu: 10m
memory: 128Mi
webui:
type: deployment
replicas: 1
strategy: Recreate
revisionHistoryLimit: 3
containers:
main:
image:
repository: khairul169/garage-webui
tag: 1.1.0
pullPolicy: IfNotPresent
env:
- name: API_BASE_URL
value: http://garage-main.garage:3903
- name: S3_ENDPOINT_URL
value: http://garage-main.garage:3900
- name: API_ADMIN_KEY
valueFrom:
secretKeyRef:
name: garage-token-secret
key: GARAGE_ADMIN_TOKEN
resources:
requests:
cpu: 10m
memory: 128Mi
configMaps:
config:
enabled: true
data:
garage-1.toml: |
replication_factor = 3
metadata_dir = "/var/lib/garage/meta"
data_dir = "/var/lib/garage/data"
metadata_snapshots_dir = "/var/lib/garage/snapshots"
db_engine = "lmdb"
metadata_auto_snapshot_interval = "6h"
compression_level = 3
rpc_bind_addr = "[::]:3901"
rpc_public_addr = "garage-1:3901"
allow_world_readable_secrets = false
[s3_api]
s3_region = "us-east-1"
api_bind_addr = "[::]:3900"
root_domain = ".garage-s3.alexlebens.net"
[s3_web]
bind_addr = "[::]:3902"
root_domain = ".garage-s3.alexlebens.net"
[admin]
api_bind_addr = "[::]:3903"
metrics_require_token = true
garage-2.toml: |
replication_factor = 3
metadata_dir = "/var/lib/garage/meta"
data_dir = "/var/lib/garage/data"
metadata_snapshots_dir = "/var/lib/garage/snapshots"
db_engine = "lmdb"
metadata_auto_snapshot_interval = "6h"
compression_level = 3
rpc_bind_addr = "[::]:3901"
rpc_public_addr = "garage-2:3901"
allow_world_readable_secrets = false
[s3_api]
s3_region = "us-east-1"
api_bind_addr = "[::]:3900"
root_domain = ".garage-s3.alexlebens.net"
[s3_web]
bind_addr = "[::]:3902"
root_domain = ".garage-s3.alexlebens.net"
[admin]
api_bind_addr = "[::]:3903"
metrics_require_token = true
garage-3.toml: |
replication_factor = 3
metadata_dir = "/var/lib/garage/meta"
data_dir = "/var/lib/garage/data"
metadata_snapshots_dir = "/var/lib/garage/snapshots"
db_engine = "lmdb"
metadata_auto_snapshot_interval = "6h"
compression_level = 3
rpc_bind_addr = "[::]:3901"
rpc_public_addr = "garage-3:3901"
allow_world_readable_secrets = false
[s3_api]
s3_region = "us-east-1"
api_bind_addr = "[::]:3900"
root_domain = ".garage-s3.alexlebens.net"
[s3_web]
bind_addr = "[::]:3902"
root_domain = ".garage-s3.alexlebens.net"
[admin]
api_bind_addr = "[::]:3903"
metrics_require_token = true
service:
garage-main:
forceRename: garage-main
controller: server-2
ports:
s3:
port: 3900
targetPort: 3900
protocol: HTTP
rpc:
port: 3901
targetPort: 3901
protocol: HTTP
web:
port: 3902
targetPort: 3902
protocol : HTTP
admin:
port: 3903
targetPort: 3903
protocol: HTTP
server-1:
forceRename: garage-1
controller: server-1
ports:
s3:
port: 3900
targetPort: 3900
protocol: HTTP
rpc:
port: 3901
targetPort: 3901
protocol: HTTP
web:
port: 3902
targetPort: 3902
protocol: HTTP
admin:
port: 3903
targetPort: 3903
protocol: HTTP
server-2:
forceRename: garage-2
controller: server-2
ports:
s3:
port: 3900
targetPort: 3900
protocol: HTTP
rpc:
port: 3901
targetPort: 3901
protocol: HTTP
web:
port: 3902
targetPort: 3902
protocol: HTTP
admin:
port: 3903
targetPort: 3903
protocol: HTTP
server-3:
forceRename: garage-3
controller: server-3
ports:
s3:
port: 3900
targetPort: 3900
protocol: HTTP
rpc:
port: 3901
targetPort: 3901
protocol: HTTP
web:
port: 3902
targetPort: 3902
protocol: HTTP
admin:
port: 3903
targetPort: 3903
protocol: HTTP
webui:
controller: webui
ports:
webui:
port: 3909
targetPort: 3909
protocol: HTTP
serviceMonitor:
main:
selector:
matchLabels:
app.kubernetes.io/name: garage
app.kubernetes.io/instance: garage
serviceName: '{{ include "bjw-s.common.lib.chart.names.fullname" $ }}'
endpoints:
- port: admin
interval: 1m
scrapeTimeout: 30s
path: /metrics
bearerTokenSecret:
name: garage-token-secret
key: GARAGE_METRICS_TOKEN
route:
webui:
kind: HTTPRoute
parentRefs:
- group: gateway.networking.k8s.io
kind: Gateway
name: traefik-gateway
namespace: traefik
hostnames:
- garage-webui.alexlebens.net
rules:
- backendRefs:
- group: ''
kind: Service
name: garage-webui
port: 3909
weight: 100
matches:
- path:
type: PathPrefix
value: /
s3:
kind: HTTPRoute
parentRefs:
- group: gateway.networking.k8s.io
kind: Gateway
name: traefik-gateway
namespace: traefik
hostnames:
- garage-s3.alexlebens.net
rules:
- backendRefs:
- group: ''
kind: Service
name: garage-main
port: 3900
weight: 100
matches:
- path:
type: PathPrefix
value: /
persistence:
config:
enabled: true
type: configMap
name: garage
advancedMounts:
server-1:
main:
- path: /etc/garage.toml
readOnly: true
mountPropagation: None
subPath: garage-1.toml
debug:
- path: /etc/garage.toml
readOnly: true
mountPropagation: None
subPath: garage-1.toml
server-2:
main:
- path: /etc/garage.toml
readOnly: true
mountPropagation: None
subPath: garage-2.toml
server-3:
main:
- path: /etc/garage.toml
readOnly: true
mountPropagation: None
subPath: garage-3.toml
webui:
main:
- path: /etc/garage.toml
readOnly: true
mountPropagation: None
subPath: garage-1.toml
db-1:
forceRename: garage-db-1
storageClass: ceph-block
accessMode: ReadWriteOnce
size: 50Gi
retain: true
advancedMounts:
server-1:
main:
- path: /var/lib/garage/meta
readOnly: false
debug:
- path: /var/lib/garage/meta
readOnly: false
db-2:
forceRename: garage-db-2
storageClass: ceph-block
accessMode: ReadWriteOnce
size: 50Gi
retain: true
advancedMounts:
server-2:
main:
- path: /var/lib/garage/meta
readOnly: false
db-3:
forceRename: garage-db-3
storageClass: ceph-block
accessMode: ReadWriteOnce
size: 50Gi
retain: true
advancedMounts:
server-3:
main:
- path: /var/lib/garage/meta
readOnly: false
data-1:
forceRename: garage-data
storageClass: synology-iscsi-delete
accessMode: ReadWriteOnce
size: 800Gi
retain: true
advancedMounts:
server-1:
main:
- path: /var/lib/garage/data
readOnly: false
debug:
- path: /var/lib/garage/data
readOnly: false
data-2:
forceRename: garage-data-2
storageClass: synology-iscsi-delete
accessMode: ReadWriteOnce
size: 800Gi
retain: true
advancedMounts:
server-2:
main:
- path: /var/lib/garage/data
readOnly: false
data-3:
forceRename: garage-data-3
storageClass: synology-iscsi-delete
accessMode: ReadWriteOnce
size: 800Gi
retain: true
advancedMounts:
server-3:
main:
- path: /var/lib/garage/data
readOnly: false
snapshots:
forceRename: garage-snapshots
storageClass: synology-iscsi-delete
accessMode: ReadWriteOnce
size: 50Gi
retain: true
advancedMounts:
server-1:
main:
- path: /var/lib/garage/snapshots
readOnly: false

View File

@@ -1,12 +0,0 @@
dependencies:
- name: gatus
repository: https://twin.github.io/helm-charts
version: 1.4.4
- name: postgres-cluster
repository: oci://harbor.alexlebens.net/helm-charts
version: 7.4.5
- name: volsync-target
repository: oci://harbor.alexlebens.net/helm-charts
version: 0.7.0
digest: sha256:ee32795b47519463ec6d1219bf4ec16784b1c42d98ae8a330e9650200d11c033
generated: "2025-12-27T19:45:37.106953505Z"

View File

@@ -1,51 +0,0 @@
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: gatus-config-secret
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: gatus-config-secret
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/part-of: {{ .Release.Name }}
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: NTFY_TOKEN
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /ntfy/user/cl01tl
metadataPolicy: None
property: token
---
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: gatus-oidc-secret
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: gatus-oidc-secret
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/part-of: {{ .Release.Name }}
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: OIDC_CLIENT_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /authentik/oidc/gatus
metadataPolicy: None
property: client
- secretKey: OIDC_CLIENT_SECRET
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /authentik/oidc/gatus
metadataPolicy: None
property: secret

Some files were not shown because too many files have changed in this diff Show More