Compare commits

...

95 Commits

Author SHA1 Message Date
47d7604aac change repo
All checks were successful
release-charts-cloudflared / release (push) Successful in 15s
2025-05-01 22:14:19 -05:00
ecf6e80a20 update image
Some checks failed
release-charts-cloudflared / release (push) Failing after 7s
2025-05-01 22:06:54 -05:00
f6bc5f42a5 change release
All checks were successful
release-charts-cloudflared / release (push) Successful in 18s
2025-04-11 15:52:31 -05:00
1b28dbf3db update image
All checks were successful
release-charts-cloudflared / release (push) Successful in 1m19s
2025-04-03 22:11:32 -05:00
0f2d18fc7a update repo config 2025-03-20 01:15:16 -05:00
0c093bd754 update workflows 2025-03-19 12:14:02 -05:00
0c8d26e3eb organize 2025-03-14 21:41:10 -05:00
82d93fc450 change config 2025-03-14 20:32:55 -05:00
2657f162c4 proper path 2025-03-14 16:07:39 -05:00
b7d53203da add github workflow 2025-03-14 16:05:53 -05:00
21a646dabd add name 2025-03-14 15:53:55 -05:00
0d15a1dadd change tag 2025-03-14 15:52:37 -05:00
a7fe403702 change env 2025-03-14 15:47:53 -05:00
34957e0c18 export env proper 2025-03-14 15:45:08 -05:00
a9286227f7 use workflow 2025-03-14 15:38:00 -05:00
3f6faacaa1 change dir 2025-03-14 15:34:09 -05:00
5817f674f4 remove github workflow 2025-03-14 15:33:08 -05:00
2786520504 extract metadata 2025-03-14 15:31:38 -05:00
c93f608874 change paths 2025-03-14 15:21:56 -05:00
4164f50bce update common chart 2025-03-14 15:21:04 -05:00
c060846f7b add plugin 2025-03-14 15:18:10 -05:00
673a8c686f use push 2025-03-14 15:15:39 -05:00
707cb159b9 change path 2025-03-14 15:12:39 -05:00
90a61573bc convert to use gitea docs 2025-03-14 15:06:19 -05:00
ad1fa6786a disable prov 2025-03-14 14:59:26 -05:00
28ed0e8735 fix path 2025-03-14 14:53:51 -05:00
0e3de3cca7 build helm depend 2025-03-14 14:37:31 -05:00
53f37bc75a update workflows 2025-03-14 14:34:07 -05:00
01d96d9a25 add path 2025-03-14 14:33:51 -05:00
76823dc414 update common
Some checks failed
Release Charts / release (push) Failing after 20s
2025-03-14 13:30:26 -05:00
f97b6ab657 change workflow 2025-03-14 13:23:53 -05:00
4bee2a675c update image
Some checks failed
Release Charts / release (push) Failing after 25s
2025-03-14 13:10:01 -05:00
0094b5611f add workflows 2025-03-14 12:26:23 -05:00
bb7fb1eadb disable workflows 2025-03-14 11:13:28 -05:00
99ed8cce53 change config 2025-03-13 23:02:05 -05:00
02bec682c2 update library chart 2025-03-05 17:56:08 -06:00
c549882df9 update image 2025-03-03 11:17:13 -06:00
e28f44b697 update image 2025-03-03 11:16:15 -06:00
78afcf24d3 update version 2025-02-26 13:57:44 -06:00
86e87dbbba add dep name 2025-02-26 13:55:58 -06:00
39134cbd95 use deb version 2025-02-26 13:54:41 -06:00
9f66bd588c remove days 2025-02-26 13:38:48 -06:00
81aac4790e update image 2025-02-17 20:19:32 -06:00
renovate[bot]
94b6b4b0fb Update helm/chart-releaser-action action to v1.7.0 (#76)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-02-17 20:17:54 -06:00
renovate[bot]
27edd0a1ef Update helm/chart-testing-action action to v2.7.0 (#77)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-02-17 20:17:48 -06:00
94184ea569 update chart 2025-02-17 20:17:36 -06:00
08473fc265 update image 2025-02-17 20:16:25 -06:00
81d3ecf237 adjust schedule 2025-01-30 21:08:38 -06:00
8392d67790 update chart 2025-01-17 17:23:14 -06:00
3f06bf148c update image 2025-01-17 17:22:00 -06:00
5259488c05 chagne resources 2025-01-08 17:39:10 -06:00
09c693d371 reduce resource request 2025-01-08 15:50:21 -06:00
ec6f44c6bc change resource 2025-01-08 15:33:59 -06:00
35f331e29a fix helm/prom bracket interaction 2025-01-08 15:20:28 -06:00
3b0481fcb1 add default rules 2025-01-07 14:22:25 -06:00
e2dfd70dc4 change default resources 2025-01-07 13:45:34 -06:00
ffc253ef7d add description of values 2024-12-30 17:10:54 -06:00
77dd85362e update dependency chart 2024-12-30 17:04:09 -06:00
d5bb83bf84 add description of values 2024-12-30 17:03:45 -06:00
11d3dd927b update dependency chart 2024-12-30 17:00:37 -06:00
1b67b5cbb6 add description of values 2024-12-30 16:59:49 -06:00
56fe199fb9 add precommit hooks 2024-12-30 16:55:01 -06:00
8ec7f590b2 upgrade base image to 17 2024-12-24 21:08:05 -06:00
d2444fb544 set switch for superuser 2024-12-22 17:29:30 -06:00
202a534e8e fix missing field 2024-12-21 23:48:11 -06:00
c36e4e371f reorganize values 2024-12-21 23:40:21 -06:00
1ac9444bb2 fix condition flow 2024-12-21 23:29:50 -06:00
275fcd8568 use cluster values 2024-12-21 23:26:40 -06:00
158d4ca676 change method 2024-12-21 23:22:34 -06:00
32e232d8e2 force hardcoded value for testing 2024-12-21 23:08:17 -06:00
93d2f916fb use value for name 2024-12-21 22:53:59 -06:00
b1a6a2fd39 remove condition 2024-12-21 22:46:17 -06:00
d3307d4f70 use different function 2024-12-21 22:39:52 -06:00
1b7018d3bd fix database naming 2024-12-21 22:31:00 -06:00
b75721ae1d add option to specifiy database name for replica 2024-12-21 22:20:09 -06:00
renovate[bot]
e0e4f6ee8a Update renovate/renovate Docker tag to v39 (#71)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2024-12-21 19:55:23 -06:00
renovate[bot]
7dd80d4528 Migrate config .github/renovate.json (#72)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2024-12-21 19:55:16 -06:00
24af841f19 update workflows 2024-12-21 18:11:39 -06:00
16211d4c62 remove schedule 2024-12-21 18:11:29 -06:00
513c46c957 change to midnight daily 2024-12-20 19:33:25 -06:00
3fad4e4ff0 update image 2024-12-20 19:25:40 -06:00
1f867e0276 update image 2024-12-20 19:25:03 -06:00
601790ab7a change backup schedule 2024-12-19 14:50:00 -06:00
16ebdda6a4 update image 2024-12-19 13:59:37 -06:00
dbf8f14512 update image 2024-12-19 13:58:37 -06:00
22dcd7a14c update image 2024-12-16 10:31:56 -06:00
8862d97c27 change retention policy 2024-12-12 11:12:58 -06:00
1f4cd543c0 bump chart version 2024-11-23 22:40:06 -06:00
4aac272e98 update image 2024-11-23 22:39:06 -06:00
b8602fb919 update image to 16.6 2024-11-23 22:38:36 -06:00
fb34897269 update image 2024-10-19 00:58:50 -05:00
ec27eff4da add priority class name and tolerations 2024-10-13 12:39:03 -05:00
2b31df483e listen on all addresses 2024-10-12 23:35:08 -05:00
53191f1d68 add generic device plugin 2024-10-12 23:18:07 -05:00
172526fb79 update common chart 2024-10-11 19:03:23 -05:00
24 changed files with 735 additions and 167 deletions

View File

@@ -0,0 +1,38 @@
name: lint-and-test-charts
on: pull_request
jobs:
lint-test:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Set up Helm
uses: azure/setup-helm@v4
with:
version: latest
- name: Set up Python
uses: actions/setup-python@v5
with:
python-version: "3.13"
check-latest: true
- name: Set up Chart Testing
uses: helm/chart-testing-action@v2.7.0
- name: Run Chart Testing (list-changed)
id: list-changed
run: |
changed=$(ct list-changed --target-branch ${{ gitea.event.repository.default_branch }})
if [[ -n "$changed" ]]; then
echo "changed=true" >> $GITHUB_OUTPUT
fi
- name: Run Chart Testing (lint)
if: steps.list-changed.outputs.changed == 'true'
run: ct lint --target-branch ${{ gitea.event.repository.default_branch }}

View File

@@ -0,0 +1,57 @@
name: release-charts-cloudflared
on:
push:
branches:
- main
paths:
- "charts/cloudflared/**"
workflow_dispatch:
env:
WORKFLOW_DIR: "charts/cloudflared"
jobs:
release:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Set up Helm
uses: azure/setup-helm@v4
with:
token: ${{ secrets.GITEA_TOKEN }}
version: latest
- name: Package Helm Chart
run: |
cd $WORKFLOW_DIR
helm dependency build
echo "PACKAGE_PATH=$(helm package . | awk '{print $NF}')" >> $GITEA_ENV
- name: Publish Helm Chart to Harbor
run: |
helm registry login ${{ vars.REGISTRY_HOST }} -u ${{ vars.REGISTRY_USER }} -p ${{ secrets.REGISTRY_SECRET }}
helm push ${{ env.PACKAGE_PATH }} oci://${{ vars.REGISTRY_HOST }}/helm-charts
- name: Publish Helm Chart to Gitea
run: |
helm plugin install https://github.com/chartmuseum/helm-push
helm repo add --username ${{ gitea.actor }} --password ${{ secrets.REPOSITORY_TOKEN }} helm-charts https://${{ vars.REPOSITORY_HOST }}/api/packages/alexlebens/helm
helm cm-push ${{ env.PACKAGE_PATH }} helm-charts
- name: Extract Chart Metadata
run: |
cd $WORKFLOW_DIR
echo "CHART_VERSION=$(yq '.version' Chart.yaml)" >> $GITEA_ENV
echo "CHART_NAME=$(yq '.name' Chart.yaml)" >> $GITEA_ENV
- name: Release Helm Chart
uses: akkuman/gitea-release-action@v1
with:
name: ${{ env.CHART_NAME }}-${{ env.CHART_VERSION }}
tag_name: ${{ env.CHART_NAME }}-${{ env.CHART_VERSION }}
files: |-
${{ env.PACKAGE_PATH }}

View File

@@ -0,0 +1,57 @@
name: release-charts-generic-device-plugin
on:
push:
branches:
- main
paths:
- "charts/generic-device-plugin/**"
workflow_dispatch:
env:
WORKFLOW_DIR: "charts/generic-device-plugin"
jobs:
release:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Set up Helm
uses: azure/setup-helm@v4
with:
token: ${{ secrets.GITEA_TOKEN }}
version: latest
- name: Package Helm Chart
run: |
cd $WORKFLOW_DIR
helm dependency build
echo "PACKAGE_PATH=$(helm package . | awk '{print $NF}')" >> $GITEA_ENV
- name: Publish Helm Chart to Harbor
run: |
helm registry login ${{ vars.REGISTRY_HOST }} -u ${{ vars.REGISTRY_USER }} -p ${{ secrets.REGISTRY_SECRET }}
helm push ${{ env.PACKAGE_PATH }} oci://${{ vars.REGISTRY_HOST }}/helm-charts
- name: Publish Helm Chart to Gitea
run: |
helm plugin install https://github.com/chartmuseum/helm-push
helm repo add --username ${{ gitea.actor }} --password ${{ secrets.REPOSITORY_TOKEN }} helm-charts https://${{ vars.REPOSITORY_HOST }}/api/packages/alexlebens/helm
helm cm-push ${{ env.PACKAGE_PATH }} helm-charts
- name: Extract Chart Metadata
run: |
cd $WORKFLOW_DIR
echo "CHART_VERSION=$(yq '.version' Chart.yaml)" >> $GITEA_ENV
echo "CHART_NAME=$(yq '.name' Chart.yaml)" >> $GITEA_ENV
- name: Release Helm Chart
uses: akkuman/gitea-release-action@v1
with:
name: ${{ env.CHART_NAME }}-${{ env.CHART_VERSION }}
tag_name: ${{ env.CHART_NAME }}-${{ env.CHART_VERSION }}
files: |-
${{ env.PACKAGE_PATH }}

View File

@@ -0,0 +1,57 @@
name: release-charts-postgres-cluster
on:
push:
branches:
- main
paths:
- "charts/postgres-cluster/**"
workflow_dispatch:
env:
WORKFLOW_DIR: "charts/postgres-cluster"
jobs:
release:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Set up Helm
uses: azure/setup-helm@v4
with:
token: ${{ secrets.GITEA_TOKEN }}
version: latest
- name: Package Helm Chart
run: |
cd $WORKFLOW_DIR
helm dependency build
echo "PACKAGE_PATH=$(helm package . | awk '{print $NF}')" >> $GITEA_ENV
- name: Publish Helm Chart to Harbor
run: |
helm registry login ${{ vars.REGISTRY_HOST }} -u ${{ vars.REGISTRY_USER }} -p ${{ secrets.REGISTRY_SECRET }}
helm push ${{ env.PACKAGE_PATH }} oci://${{ vars.REGISTRY_HOST }}/helm-charts
- name: Publish Helm Chart to Gitea
run: |
helm plugin install https://github.com/chartmuseum/helm-push
helm repo add --username ${{ gitea.actor }} --password ${{ secrets.REPOSITORY_TOKEN }} helm-charts https://${{ vars.REPOSITORY_HOST }}/api/packages/alexlebens/helm
helm cm-push ${{ env.PACKAGE_PATH }} helm-charts
- name: Extract Chart Metadata
run: |
cd $WORKFLOW_DIR
echo "CHART_VERSION=$(yq '.version' Chart.yaml)" >> $GITEA_ENV
echo "CHART_NAME=$(yq '.name' Chart.yaml)" >> $GITEA_ENV
- name: Release Helm Chart
uses: akkuman/gitea-release-action@v1
with:
name: ${{ env.CHART_NAME }}-${{ env.CHART_VERSION }}
tag_name: ${{ env.CHART_NAME }}-${{ env.CHART_VERSION }}
files: |-
${{ env.PACKAGE_PATH }}

View File

@@ -1,2 +0,0 @@
# This file is processed by Renovate bot so that it creates a PR on new major Renovate versions
FROM renovate/renovate:38

View File

@@ -1,37 +0,0 @@
name: lint-and-test-charts
on: pull_request
jobs:
lint-test:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Set up Helm
uses: azure/setup-helm@v4
with:
version: v3.13.3
- uses: actions/setup-python@v5
with:
python-version: "3.10"
check-latest: true
- name: Set up chart-testing
uses: helm/chart-testing-action@v2.6.1
- name: Run chart-testing (list-changed)
id: list-changed
run: |
changed=$(ct list-changed --target-branch ${{ github.event.repository.default_branch }})
if [[ -n "$changed" ]]; then
echo "changed=true" >> "$GITHUB_OUTPUT"
fi
- name: Run chart-testing (lint)
if: steps.list-changed.outputs.changed == 'true'
run: ct lint --target-branch ${{ github.event.repository.default_branch }}

View File

@@ -4,6 +4,8 @@ on:
push: push:
branches: branches:
- main - main
paths:
- "charts/**"
jobs: jobs:
release: release:
@@ -22,6 +24,6 @@ jobs:
git config user.email "$GITHUB_ACTOR@users.noreply.github.com" git config user.email "$GITHUB_ACTOR@users.noreply.github.com"
- name: Run chart-releaser - name: Run chart-releaser
uses: helm/chart-releaser-action@v1.6.0 uses: helm/chart-releaser-action@v1.7.0
env: env:
CR_TOKEN: "${{ secrets.GITHUB_TOKEN }}" CR_TOKEN: "${{ secrets.GITHUB_TOKEN }}"

19
.pre-commit-config.yaml Normal file
View File

@@ -0,0 +1,19 @@
repos:
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v2.3.0
hooks:
- id: end-of-file-fixer
- id: trailing-whitespace
- id: check-added-large-files
- id: check-yaml
exclude: 'charts/'
args:
- --multi
- repo: https://github.com/norwoodj/helm-docs
rev: v1.14.2
hooks:
- id: helm-docs
args:
- --chart-search-root=charts
- --template-files=./_templates.gotmpl
- --template-files=README.md.gotmpl

View File

@@ -1,18 +1,18 @@
apiVersion: v2 apiVersion: v2
name: cloudflared name: cloudflared
version: 1.8.0 version: 1.14.7
description: Cloudflared Tunnel description: Cloudflared Tunnel
keywords: keywords:
- cloudflare - cloudflare
- tunnel - tunnel
sources: sources:
- https://github.com/cloudflare/cloudflared - https://github.com/cloudflare/cloudflared
- https://github.com/bjw-s/helm-charts/tree/main/charts/library/common - https://github.com/bjw-s-labs/helm-charts/tree/main/charts/library/common
maintainers: maintainers:
- name: alexlebens - name: alexlebens
dependencies: dependencies:
- name: common - name: common
repository: https://bjw-s.github.io/helm-charts/ repository: https://bjw-s-labs.github.io/helm-charts/
version: 3.4.0 version: 3.7.3
icon: https://avatars.githubusercontent.com/u/314135?s=48&v=4 icon: https://avatars.githubusercontent.com/u/314135?s=48&v=4
appVersion: "2024.9.1" appVersion: "2025.4.2"

View File

@@ -1,16 +1,35 @@
## Introduction # cloudflared
[Cloudflared](https://github.com/cloudflare/cloudflared) ![Version: 1.14.7](https://img.shields.io/badge/Version-1.14.7-informational?style=flat-square) ![AppVersion: 2025.4.2](https://img.shields.io/badge/AppVersion-2025.4.2-informational?style=flat-square)
Contains the command-line client for Cloudflare Tunnel, a tunneling daemon that proxies traffic from the Cloudflare network to your origins. Cloudflared Tunnel
This chart bootstraps a [Cloudflared](https://github.com/cloudflare/cloudflared) tunnel on a [Kubernetes](https://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. ## Maintainers
## Prerequisites | Name | Email | Url |
| ---- | ------ | --- |
| alexlebens | | |
- Kubernetes ## Source Code
- Helm
## Parameters * <https://github.com/cloudflare/cloudflared>
* <https://github.com/bjw-s-labs/helm-charts/tree/main/charts/library/common>
See the [values files](values.yaml). ## Requirements
| Repository | Name | Version |
|------------|------|---------|
| https://bjw-s-labs.github.io/helm-charts/ | common | 3.7.3 |
## Values
| Key | Type | Default | Description |
|-----|------|---------|-------------|
| existingSecretKey | string | `"cf-tunnel-token"` | Name of key that contains the token in the existingSecret |
| existingSecretName | string | `"cloudflared-secret"` | Name of existing secret that contains Cloudflare token |
| image | object | `{"pullPolicy":"IfNotPresent","repository":"cloudflare/cloudflared","tag":"2025.4.2"}` | Default image |
| name | string | `"cloudflared"` | Name override of release |
| resources | object | `{"requests":{"cpu":"10m","memory":"128Mi"}}` | Default resources |
----------------------------------------------
Autogenerated from chart metadata using [helm-docs v1.14.2](https://github.com/norwoodj/helm-docs/releases/v1.14.2)

View File

@@ -1,11 +1,20 @@
# -- Name override of release
name: cloudflared name: cloudflared
# -- Name of existing secret that contains Cloudflare token
existingSecretName: cloudflared-secret existingSecretName: cloudflared-secret
# -- Name of key that contains the token in the existingSecret
existingSecretKey: cf-tunnel-token existingSecretKey: cf-tunnel-token
# -- Default image
image: image:
repository: cloudflare/cloudflared repository: cloudflare/cloudflared
tag: "2024.9.1" tag: "2025.4.2"
pullPolicy: IfNotPresent pullPolicy: IfNotPresent
# -- Default resources
resources: resources:
requests: requests:
cpu: 100m cpu: 10m
memory: 128Mi memory: 128Mi

View File

@@ -0,0 +1,18 @@
apiVersion: v2
name: generic-device-plugin
version: 0.1.10
description: Generic Device Plugin
keywords:
- generic-device-plugin
- device
- plugin
sources:
- https://github.com/squat/generic-device-plugin
- https://github.com/bjw-s/helm-charts/tree/main/charts/library/common
maintainers:
- name: alexlebens
dependencies:
- name: common
repository: https://bjw-s.github.io/helm-charts/
version: 3.7.3
appVersion: 0.1.10

View File

@@ -0,0 +1,37 @@
# generic-device-plugin
![Version: 0.1.10](https://img.shields.io/badge/Version-0.1.10-informational?style=flat-square) ![AppVersion: 0.1.10](https://img.shields.io/badge/AppVersion-0.1.10-informational?style=flat-square)
Generic Device Plugin
## Maintainers
| Name | Email | Url |
| ---- | ------ | --- |
| alexlebens | | |
## Source Code
* <https://github.com/squat/generic-device-plugin>
* <https://github.com/bjw-s/helm-charts/tree/main/charts/library/common>
## Requirements
| Repository | Name | Version |
|------------|------|---------|
| https://bjw-s.github.io/helm-charts/ | common | 3.7.3 |
## Values
| Key | Type | Default | Description |
|-----|------|---------|-------------|
| config | object | `{"data":"devices:\n - name: serial\n groups:\n - paths:\n - path: /dev/ttyUSB*\n - paths:\n - path: /dev/ttyACM*\n - paths:\n - path: /dev/tty.usb*\n - paths:\n - path: /dev/cu.*\n - paths:\n - path: /dev/cuaU*\n - paths:\n - path: /dev/rfcomm*\n - name: video\n groups:\n - paths:\n - path: /dev/video0\n - name: fuse\n groups:\n - count: 10\n paths:\n - path: /dev/fuse\n - name: audio\n groups:\n - count: 10\n paths:\n - path: /dev/snd\n - name: capture\n groups:\n - paths:\n - path: /dev/snd/controlC0\n - path: /dev/snd/pcmC0D0c\n - paths:\n - path: /dev/snd/controlC1\n mountPath: /dev/snd/controlC0\n - path: /dev/snd/pcmC1D0c\n mountPath: /dev/snd/pcmC0D0c\n - paths:\n - path: /dev/snd/controlC2\n mountPath: /dev/snd/controlC0\n - path: /dev/snd/pcmC2D0c\n mountPath: /dev/snd/pcmC0D0c\n - paths:\n - path: /dev/snd/controlC3\n mountPath: /dev/snd/controlC0\n - path: /dev/snd/pcmC3D0c\n mountPath: /dev/snd/pcmC0D0c\n","enabled":true}` | Config map |
| config.data | string | See [values.yaml](./values.yaml) | generic-device-plugin config file [[ref]](https://github.com/squat/generic-device-plugin#usage) |
| deviceDomain | string | `"squat.ai"` | Domain used by devices for identifcation |
| image | object | `{"pullPolicy":"Always","repository":"ghcr.io/squat/generic-device-plugin","tag":"latest@sha256:d7d0951df7f11479185fd9fba1c1cb4d9c8f3232d38a5468d6fe80074f2b45d5"}` | Default image |
| name | string | `"generic-device-plugin"` | Name override of release |
| resources | object | `{"limit":{"cpu":"100m","memory":"20Mi"},"requests":{"cpu":"50m","memory":"10Mi"}}` | Default resources |
| service | object | `{"listenPort":8080}` | Service port |
----------------------------------------------
Autogenerated from chart metadata using [helm-docs v1.14.2](https://github.com/norwoodj/helm-docs/releases/v1.14.2)

View File

@@ -0,0 +1,82 @@
{{ include "bjw-s.common.loader.init" . }}
{{ define "genericDevicePlugin.hardcodedValues" }}
{{ if not .Values.global.nameOverride }}
global:
nameOverride: {{ .Values.name }}
{{ end }}
controllers:
main:
type: daemonset
pod:
priorityClassName: system-node-critical
tolerations:
- operator: "Exists"
effect: "NoExecute"
- operator: "Exists"
effect: "NoSchedule"
containers:
main:
image:
repository: {{ .Values.image.repository }}
tag: {{ .Values.image.tag }}
pullPolicy: {{ .Values.image.pullPolicy }}
args:
- --config=/config/config.yaml
env:
- name: LISTEN
value: :{{ .Values.service.listenPort }}
- name: PLUGIN_DIRECTORY
value: /var/lib/kubelet/device-plugins
- name: DOMAIN
value: {{ .Values.deviceDomain }}
probes:
liveness:
type: HTTP
path: /health
readiness:
type: HTTP
path: /health
startup:
type: HTTP
path: /health
securityContext:
privileged: True
configMaps:
config:
enabled: {{ .Values.config.enabled }}
data:
config.yaml: {{ toYaml .Values.config.data | nindent 8 }}
service:
main:
controller: main
ports:
http:
port: {{ .Values.service.listenPort }}
persistence:
config:
enabled: true
type: configMap
name: {{ .Values.name }}-config
device-plugins:
enabled: true
type: hostPath
hostPath: /var/lib/kubelet/device-plugins
dev:
enabled: true
type: hostPath
hostPath: /dev
serviceMonitor:
main:
serviceName: generic-device-plugin
endpoints:
- port: http
scheme: http
path: /metrics
interval: 30s
scrapeTimeout: 10s
{{ end }}
{{ $_ := mergeOverwrite .Values (include "genericDevicePlugin.hardcodedValues" . | fromYaml) }}
{{/* Render the templates */}}
{{ include "bjw-s.common.loader.generate" . }}

View File

@@ -0,0 +1,80 @@
# -- Name override of release
name: generic-device-plugin
# -- Default image
image:
repository: ghcr.io/squat/generic-device-plugin
tag: latest@sha256:d7d0951df7f11479185fd9fba1c1cb4d9c8f3232d38a5468d6fe80074f2b45d5
pullPolicy: Always
# -- Domain used by devices for identifcation
deviceDomain: squat.ai
# -- Service port
service:
listenPort: 8080
# -- Default resources
resources:
limit:
cpu: 100m
memory: 20Mi
requests:
cpu: 50m
memory: 10Mi
# -- Config map
config:
enabled: true
# -- generic-device-plugin config file [[ref]](https://github.com/squat/generic-device-plugin#usage)
# @default -- See [values.yaml](./values.yaml)
data: |
devices:
- name: serial
groups:
- paths:
- path: /dev/ttyUSB*
- paths:
- path: /dev/ttyACM*
- paths:
- path: /dev/tty.usb*
- paths:
- path: /dev/cu.*
- paths:
- path: /dev/cuaU*
- paths:
- path: /dev/rfcomm*
- name: video
groups:
- paths:
- path: /dev/video0
- name: fuse
groups:
- count: 10
paths:
- path: /dev/fuse
- name: audio
groups:
- count: 10
paths:
- path: /dev/snd
- name: capture
groups:
- paths:
- path: /dev/snd/controlC0
- path: /dev/snd/pcmC0D0c
- paths:
- path: /dev/snd/controlC1
mountPath: /dev/snd/controlC0
- path: /dev/snd/pcmC1D0c
mountPath: /dev/snd/pcmC0D0c
- paths:
- path: /dev/snd/controlC2
mountPath: /dev/snd/controlC0
- path: /dev/snd/pcmC2D0c
mountPath: /dev/snd/pcmC0D0c
- paths:
- path: /dev/snd/controlC3
mountPath: /dev/snd/controlC0
- path: /dev/snd/pcmC3D0c
mountPath: /dev/snd/pcmC0D0c

View File

@@ -1,6 +1,6 @@
apiVersion: v2 apiVersion: v2
name: postgres-cluster name: postgres-cluster
version: 3.12.1 version: 4.2.1
description: Chart for cloudnative-pg cluster description: Chart for cloudnative-pg cluster
keywords: keywords:
- database - database
@@ -10,4 +10,4 @@ sources:
maintainers: maintainers:
- name: alexlebens - name: alexlebens
icon: https://avatars.githubusercontent.com/u/100373852?s=48&v=4 icon: https://avatars.githubusercontent.com/u/100373852?s=48&v=4
appVersion: v1.24.0 appVersion: v1.25.0

View File

@@ -1,17 +1,82 @@
## Introduction # postgres-cluster
[CloudNative PG](https://github.com/cloudnative-pg/cloudnative-pg) ![Version: 4.2.1](https://img.shields.io/badge/Version-4.2.1-informational?style=flat-square) ![AppVersion: v1.25.0](https://img.shields.io/badge/AppVersion-v1.25.0-informational?style=flat-square)
CloudNativePG is the Kubernetes operator that covers the full lifecycle of a highly available PostgreSQL database cluster with a primary/standby architecture, using native streaming replication. Chart for cloudnative-pg cluster
This chart bootstraps a [CNPG](https://github.com/cloudnative-pg/cloudnative-pg) cluster on a [Kubernetes](https://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. ## Maintainers
## Prerequisites | Name | Email | Url |
| ---- | ------ | --- |
| alexlebens | | |
- Kubernetes ## Source Code
- Helm
- CloudNative PG Operator
## Parameters * <https://github.com/cloudnative-pg/cloudnative-pg>
See the [values files](values.yaml). ## Values
| Key | Type | Default | Description |
|-----|------|---------|-------------|
| backup.backupIndex | int | `1` | Generate external cluster name, creates: postgresql-{{ .Release.Name }}-cluster-backup-index-{{ .Values.backups.backupIndex }}" |
| backup.backupName | string | `""` | Name of the backup cluster in the object store, defaults to "cluster.name" |
| backup.data.compression | string | `"snappy"` | Data compression method. One of `` (for no compression), `gzip`, `bzip2` or `snappy`. |
| backup.data.encryption | string | `""` | Whether to instruct the storage provider to encrypt data files. One of `` (use the storage container default), `AES256` or `aws:kms`. |
| backup.data.jobs | int | `1` | Number of data files to be archived or restored in parallel. |
| backup.destinationPath | string | `""` | S3 path starting with "s3://" |
| backup.enabled | bool | `false` | |
| backup.endpointCA | string | `""` | Specifies secret that contains a CA bundle to validate a privately signed certificate, should contain the key ca-bundle.crt |
| backup.endpointCredentials | string | `""` | Specifies secret that contains S3 credentials, should contain the keys ACCESS_KEY_ID and ACCESS_SECRET_KEY |
| backup.endpointURL | string | `""` | S3 endpoint starting with "https://" |
| backup.historyTags.backupRetentionPolicy | string | `""` | |
| backup.retentionPolicy | string | `"7d"` | Retention policy for backups |
| backup.schedule | string | `"0 0 */3 * *"` | Scheduled backup in cron format |
| backup.tags | object | `{"backupRetentionPolicy":""}` | Tags to add to backups. Add in key value beneath the type. |
| backup.wal.compression | string | `"snappy"` | WAL compression method. One of `` (for no compression), `gzip`, `bzip2` or `snappy`. |
| backup.wal.encryption | string | `""` | Whether to instruct the storage provider to encrypt WAL files. One of `` (use the storage container default), `AES256` or `aws:kms`. |
| backup.wal.maxParallel | int | `1` | Number of WAL files to be archived or restored in parallel. |
| bootstrap | object | `{"initdb":{}}` | Bootstrap is the configuration of the bootstrap process when initdb is used. See: https://cloudnative-pg.io/documentation/current/bootstrap/ See: https://cloudnative-pg.io/documentation/current/cloudnative-pg.v1/#postgresql-cnpg-io-v1-bootstrapinitdb |
| bootstrap.initdb | object | `{}` | Example values database: app owner: app secret: "" # Name of the secret containing the initial credentials for the owner of the user database. If empty a new secret will be created from scratch postInitApplicationSQL: - CREATE TABLE IF NOT EXISTS example; |
| cluster.additionalLabels | object | `{}` | |
| cluster.affinity | object | `{"enablePodAntiAffinity":true,"topologyKey":"kubernetes.io/hostname"}` | See: https://cloudnative-pg.io/documentation/current/cloudnative-pg.v1/#postgresql-cnpg-io-v1-AffinityConfiguration |
| cluster.annotations | object | `{}` | |
| cluster.enableSuperuserAccess | bool | `false` | Create secret containing credentials of superuser |
| cluster.image | object | `{"pullPolicy":"IfNotPresent","repository":"ghcr.io/cloudnative-pg/postgresql","tag":"17.4-3-bullseye"}` | Default image |
| cluster.instances | int | `3` | |
| cluster.logLevel | string | `"info"` | |
| cluster.monitoring | object | `{"enabled":false,"podMonitor":{"enabled":true},"prometheusRule":{"enableDefaultRules":true,"enabled":false,"excludeRules":[]}}` | Enable default monitoring and alert rules |
| cluster.postgresGID | int | `26` | |
| cluster.postgresUID | int | `26` | The UID and GID of the postgres user inside the image |
| cluster.postgresql | object | `{"parameters":{"hot_standby_feedback":"on","max_slot_wal_keep_size":"2000MB","shared_buffers":"128MB"},"shared_preload_libraries":[]}` | Parameters to be set for the database itself See: https://cloudnative-pg.io/documentation/current/cloudnative-pg.v1/#postgresql-cnpg-io-v1-PostgresConfiguration |
| cluster.primaryUpdateMethod | string | `"switchover"` | Method to follow to upgrade the primary server during a rolling update procedure, after all replicas have been successfully updated. It can be switchover (default) or in-place (restart). |
| cluster.primaryUpdateStrategy | string | `"unsupervised"` | Strategy to follow to upgrade the primary server during a rolling update procedure, after all replicas have been successfully updated: it can be automated (unsupervised - default) or manual (supervised) |
| cluster.priorityClassName | string | `""` | |
| cluster.resources | object | `{"limits":{"cpu":"1","hugepages-2Mi":"256Mi"},"requests":{"cpu":"100m","memory":"256Mi"}}` | Default resources |
| cluster.storage.size | string | `"10Gi"` | |
| cluster.storage.storageClass | string | `""` | |
| cluster.walStorage | object | `{"size":"2Gi","storageClass":""}` | Default storage size |
| mode | string | `"standalone"` | Cluster mode of operation. Available modes: * `standalone` - Default mode. Creates new or updates an existing CNPG cluster. * `recovery` - Same as standalone but creates a cluster from a backup, object store or via pg_basebackup * `replica` - Create database as a replica from another CNPG cluster |
| nameOverride | string | `""` | Override the name of the cluster |
| recovery | object | `{"data":{"compression":"snappy","encryption":"","jobs":1},"destinationPath":"","endpointCA":"","endpointCredentials":"","endpointURL":"","pitrTarget":{"time":""},"recoveryIndex":1,"recoveryInstanceName":"","recoveryServerName":"","wal":{"compression":"snappy","encryption":"","maxParallel":1}}` | Recovery settings when booting cluster from external cluster |
| recovery.data.compression | string | `"snappy"` | Data compression method. One of `` (for no compression), `gzip`, `bzip2` or `snappy`. |
| recovery.data.encryption | string | `""` | Whether to instruct the storage provider to encrypt data files. One of `` (use the storage container default), `AES256` or `aws:kms`. |
| recovery.data.jobs | int | `1` | Number of data files to be archived or restored in parallel. |
| recovery.endpointCA | string | `""` | Specifies secret that contains a CA bundle to validate a privately signed certificate, should contain the key ca-bundle.crt |
| recovery.endpointCredentials | string | `""` | Specifies secret that contains S3 credentials, should contain the keys ACCESS_KEY_ID and ACCESS_SECRET_KEY |
| recovery.endpointURL | string | `""` | S3 https endpoint and the s3:// path |
| recovery.pitrTarget | object | `{"time":""}` | Point in time recovery target in RFC3339 format |
| recovery.recoveryIndex | int | `1` | Generate external cluster name, uses: {{ .Release.Name }}postgresql-<major version>-cluster-backup-index-{{ .Values.recovery.recoveryIndex }} |
| recovery.recoveryInstanceName | string | `""` | Name of the recovery cluster in the object store, defaults to ".Release.Name" |
| recovery.recoveryServerName | string | `""` | Name of the recovery cluster in the object store, defaults to "cluster.name" |
| recovery.wal.compression | string | `"snappy"` | WAL compression method. One of `` (for no compression), `gzip`, `bzip2` or `snappy`. |
| recovery.wal.encryption | string | `""` | Whether to instruct the storage provider to encrypt WAL files. One of `` (use the storage container default), `AES256` or `aws:kms`. |
| recovery.wal.maxParallel | int | `1` | Number of WAL files to be archived or restored in parallel. |
| replica.externalCluster | object | `{"connectionParameters":{"dbname":"app","host":"postgresql","user":"app"},"password":{"key":"password","name":"postgresql"}}` | External cluster connection, password specifies a secret name and the key containing the password value |
| replica.importDatabases | list | `["app"]` | If type microservice only one database is allowed, default is app as standard in cnpg clusters |
| replica.importRoles | list | `[]` | If type microservice no roles are imported and ignored |
| replica.importType | string | `"microservice"` | See [here](https://cloudnative-pg.io/documentation/current/database_import/) for different import types * `microservice` - Single database import as expected from cnpg clusters * `monolith` - Import multiple databases and roles |
| replica.postImportApplicationSQL | list | `[]` | If import type is monolith postImportApplicationSQL is not supported and ignored |
| type | string | `"postgresql"` | Type of the CNPG database. Available types: * `postgresql` * `postgis` * `timescaledb` * `tensorchord` |
----------------------------------------------
Autogenerated from chart metadata using [helm-docs v1.14.2](https://github.com/norwoodj/helm-docs/releases/v1.14.2)

View File

@@ -2,7 +2,7 @@
bootstrap: bootstrap:
{{- if eq .Values.mode "standalone" }} {{- if eq .Values.mode "standalone" }}
initdb: initdb:
{{- with .Values.cluster.initdb }} {{- with .Values.bootstrap.initdb }}
{{- with (omit . "postInitApplicationSQL") }} {{- with (omit . "postInitApplicationSQL") }}
{{- . | toYaml | nindent 4 }} {{- . | toYaml | nindent 4 }}
{{- end }} {{- end }}
@@ -10,7 +10,7 @@ bootstrap:
{{- if eq .Values.type "tensorchord" }} {{- if eq .Values.type "tensorchord" }}
dataChecksums: true dataChecksums: true
{{- end }} {{- end }}
{{- if or (eq .Values.type "postgis") (eq .Values.type "timescaledb") (eq .Values.type "tensorchord") (.Values.cluster.initdb.postInitApplicationSQL) }} {{- if or (eq .Values.type "postgis") (eq .Values.type "timescaledb") (eq .Values.type "tensorchord") (.Values.bootstrap.initdb.postInitApplicationSQL) }}
postInitApplicationSQL: postInitApplicationSQL:
{{- if eq .Values.type "postgis" }} {{- if eq .Values.type "postgis" }}
- CREATE EXTENSION IF NOT EXISTS postgis; - CREATE EXTENSION IF NOT EXISTS postgis;
@@ -29,7 +29,7 @@ bootstrap:
- GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA vectors TO "app"; - GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA vectors TO "app";
- GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA public TO "app"; - GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA public TO "app";
{{- end }} {{- end }}
{{- with .Values.cluster.initdb }} {{- with .Values.bootstrap.initdb }}
{{- range .postInitApplicationSQL }} {{- range .postInitApplicationSQL }}
{{- printf "- %s" . | nindent 6 }} {{- printf "- %s" . | nindent 6 }}
{{- end }} {{- end }}
@@ -61,6 +61,11 @@ bootstrap:
{{- end }} {{- end }}
source: source:
externalCluster: "{{ include "cluster.name" . }}-cluster" externalCluster: "{{ include "cluster.name" . }}-cluster"
{{- with .Values.bootstrap.initdb }}
{{- with (omit . "postInitApplicationSQL") }}
{{- . | toYaml | nindent 4 }}
{{- end }}
{{- end }}
externalClusters: externalClusters:
- name: "{{ include "cluster.name" . }}-cluster" - name: "{{ include "cluster.name" . }}-cluster"
{{- with .Values.replica.externalCluster }} {{- with .Values.replica.externalCluster }}

View File

@@ -18,6 +18,7 @@ spec:
imagePullPolicy: {{ .Values.cluster.image.pullPolicy }} imagePullPolicy: {{ .Values.cluster.image.pullPolicy }}
postgresUID: {{ .Values.cluster.postgresUID }} postgresUID: {{ .Values.cluster.postgresUID }}
postgresGID: {{ .Values.cluster.postgresGID }} postgresGID: {{ .Values.cluster.postgresGID }}
enableSuperuserAccess: {{ .Values.cluster.enableSuperuserAccess }}
walStorage: walStorage:
size: {{ .Values.cluster.walStorage.size }} size: {{ .Values.cluster.walStorage.size }}
storageClass: {{ .Values.cluster.walStorage.storageClass }} storageClass: {{ .Values.cluster.walStorage.storageClass }}
@@ -60,4 +61,5 @@ spec:
enablePodMonitor: {{ and .Values.cluster.monitoring.enabled .Values.cluster.monitoring.podMonitor.enabled }} enablePodMonitor: {{ and .Values.cluster.monitoring.enabled .Values.cluster.monitoring.podMonitor.enabled }}
{{ include "cluster.bootstrap" . | nindent 2 }} {{ include "cluster.bootstrap" . | nindent 2 }}
{{ include "cluster.backup" . | nindent 2 }} {{ include "cluster.backup" . | nindent 2 }}

View File

@@ -14,10 +14,10 @@ spec:
- name: cloudnative-pg/{{ include "cluster.name" . }} - name: cloudnative-pg/{{ include "cluster.name" . }}
rules: rules:
{{- $dict := dict "excludeRules" .Values.cluster.monitoring.prometheusRule.excludeRules -}} {{- $dict := dict "excludeRules" .Values.cluster.monitoring.prometheusRule.excludeRules -}}
{{- $_ := set $dict "value" "{{ $value }}" -}} {{- $_ := set $dict "value" "{{`{{`}} $value {{`}}`}}" -}}
{{- $_ := set $dict "namespace" .Release.Namespace -}} {{- $_ := set $dict "namespace" .Release.Namespace -}}
{{- $_ := set $dict "cluster" (printf "%s-cluster" (include "cluster.name" .) ) -}} {{- $_ := set $dict "cluster" (printf "%s-cluster" (include "cluster.name" .) ) -}}
{{- $_ := set $dict "labels" (dict "job" "{{ $labels.job }}" "node" "{{ $labels.node }}" "pod" "{{ $labels.pod }}") -}} {{- $_ := set $dict "labels" (dict "job" "{{`{{`}} $labels.job {{`}}`}}" "node" "{{`{{`}} $labels.node {{`}}`}}" "pod" "{{`{{`}} $labels.pod {{`}}`}}") -}}
{{- $_ := set $dict "podSelector" (printf "%s-cluster-([1-9][0-9]*)$" (include "cluster.name" .) ) -}} {{- $_ := set $dict "podSelector" (printf "%s-cluster-([1-9][0-9]*)$" (include "cluster.name" .) ) -}}
{{- $_ := set $dict "Values" .Values -}} {{- $_ := set $dict "Values" .Values -}}
{{- $_ := set $dict "Template" .Template -}} {{- $_ := set $dict "Template" .Template -}}
@@ -27,4 +27,71 @@ spec:
- {{ $tpl }} - {{ $tpl }}
{{- end -}} {{- end -}}
{{- end -}} {{- end -}}
{{- if .Values.cluster.monitoring.prometheusRule.enableDefaultRules }}
- name: cloudnative-pg/default-rules
rules:
- alert: LongRunningTransaction
annotations:
description: Pod {{`{{`}} $labels.pod {{`}}`}} is taking more than 5 minutes (300 seconds) for a query.
summary: A query is taking longer than 5 minutes.
expr: |-
cnpg_backends_max_tx_duration_seconds > 300
for: 1m
labels:
severity: warning
- alert: BackendsWaiting
annotations:
description: Pod {{`{{`}} $labels.pod {{`}}`}} has been waiting for longer than 5 minutes
summary: If a backend is waiting for longer than 5 minutes
expr: |-
cnpg_backends_waiting_total > 300
for: 1m
labels:
severity: warning
- alert: PGDatabaseXidAge
annotations:
description: Over 300,000,000 transactions from frozen xid on pod {{`{{`}} $labels.pod {{`}}`}}
summary: Number of transactions from the frozen XID to the current one
expr: |-
cnpg_pg_database_xid_age > 300000000
for: 1m
labels:
severity: warning
- alert: PGReplication
annotations:
description: Standby is lagging behind by over 300 seconds (5 minutes)
summary: The standby is lagging behind the primary
expr: |-
cnpg_pg_replication_lag > 300
for: 1m
labels:
severity: warning
- alert: LastFailedArchiveTime
annotations:
description: Archiving failed for {{`{{`}} $labels.pod {{`}}`}}
summary: Checks the last time archiving failed. Will be < 0 when it has not failed.
expr: |-
(cnpg_pg_stat_archiver_last_failed_time - cnpg_pg_stat_archiver_last_archived_time) > 1
for: 1m
labels:
severity: warning
- alert: DatabaseDeadlockConflicts
annotations:
description: There are over 10 deadlock conflicts in {{`{{`}} $labels.pod {{`}}`}}
summary: Checks the number of database conflicts
expr: |-
cnpg_pg_stat_database_deadlocks > 10
for: 1m
labels:
severity: warning
- alert: ReplicaFailingReplication
annotations:
description: Replica {{`{{`}} $labels.pod {{`}}`}} is failing to replicate
summary: Checks if the replica is failing to replicate
expr: |-
cnpg_pg_replication_in_recovery > cnpg_pg_replication_is_wal_receiver_up
for: 1m
labels:
severity: warning
{{- end }}
{{ end }} {{ end }}

View File

@@ -1,7 +1,6 @@
# -- Override the name of the cluster # -- Override the name of the cluster
nameOverride: "" nameOverride: ""
###
# -- Type of the CNPG database. Available types: # -- Type of the CNPG database. Available types:
# * `postgresql` # * `postgresql`
# * `postgis` # * `postgis`
@@ -9,8 +8,7 @@ nameOverride: ""
# * `tensorchord` # * `tensorchord`
type: postgresql type: postgresql
### # -- Cluster mode of operation. Available modes:
# Cluster mode of operation. Available modes:
# * `standalone` - Default mode. Creates new or updates an existing CNPG cluster. # * `standalone` - Default mode. Creates new or updates an existing CNPG cluster.
# * `recovery` - Same as standalone but creates a cluster from a backup, object store or via pg_basebackup # * `recovery` - Same as standalone but creates a cluster from a backup, object store or via pg_basebackup
# * `replica` - Create database as a replica from another CNPG cluster # * `replica` - Create database as a replica from another CNPG cluster
@@ -19,15 +17,20 @@ mode: standalone
cluster: cluster:
instances: 3 instances: 3
# -- Default image
image: image:
repository: ghcr.io/cloudnative-pg/postgresql repository: ghcr.io/cloudnative-pg/postgresql
tag: "16.4-26" tag: "17.4-3-bullseye"
pullPolicy: IfNotPresent pullPolicy: IfNotPresent
# The UID and GID of the postgres user inside the image # -- The UID and GID of the postgres user inside the image
postgresUID: 26 postgresUID: 26
postgresGID: 26 postgresGID: 26
# -- Create secret containing credentials of superuser
enableSuperuserAccess: false
# -- Default storage size
walStorage: walStorage:
size: 2Gi size: 2Gi
storageClass: "" storageClass: ""
@@ -35,43 +38,45 @@ cluster:
size: 10Gi size: 10Gi
storageClass: "" storageClass: ""
# -- Default resources
resources: resources:
requests: requests:
memory: 256Mi memory: 256Mi
cpu: 10m cpu: 100m
limits: limits:
memory: 1Gi cpu: '1'
cpu: 800m
hugepages-2Mi: 256Mi hugepages-2Mi: 256Mi
# See: https://cloudnative-pg.io/documentation/current/cloudnative-pg.v1/#postgresql-cnpg-io-v1-AffinityConfiguration # -- See: https://cloudnative-pg.io/documentation/current/cloudnative-pg.v1/#postgresql-cnpg-io-v1-AffinityConfiguration
affinity: affinity:
enablePodAntiAffinity: true enablePodAntiAffinity: true
topologyKey: kubernetes.io/hostname topologyKey: kubernetes.io/hostname
additionalLabels: {} additionalLabels: {}
annotations: {} annotations: {}
priorityClassName: "" priorityClassName: ""
# Method to follow to upgrade the primary server during a rolling update procedure, after all replicas have been # -- Method to follow to upgrade the primary server during a rolling update procedure, after all replicas have been
# successfully updated. It can be switchover (default) or in-place (restart). # successfully updated. It can be switchover (default) or in-place (restart).
primaryUpdateMethod: switchover primaryUpdateMethod: switchover
# Strategy to follow to upgrade the primary server during a rolling update procedure, after all replicas have been # -- Strategy to follow to upgrade the primary server during a rolling update procedure, after all replicas have been
# successfully updated: it can be automated (unsupervised - default) or manual (supervised) # successfully updated: it can be automated (unsupervised - default) or manual (supervised)
primaryUpdateStrategy: unsupervised primaryUpdateStrategy: unsupervised
logLevel: "info" logLevel: "info"
# -- Enable default monitoring and alert rules
monitoring: monitoring:
enabled: false enabled: false
podMonitor: podMonitor:
enabled: true enabled: true
prometheusRule: prometheusRule:
enabled: false enabled: false
enableDefaultRules: true
excludeRules: [] excludeRules: []
# -- Parameters to be set for the database itself
# See: https://cloudnative-pg.io/documentation/current/cloudnative-pg.v1/#postgresql-cnpg-io-v1-PostgresConfiguration # See: https://cloudnative-pg.io/documentation/current/cloudnative-pg.v1/#postgresql-cnpg-io-v1-PostgresConfiguration
postgresql: postgresql:
parameters: parameters:
@@ -80,72 +85,76 @@ cluster:
hot_standby_feedback: "on" hot_standby_feedback: "on"
shared_preload_libraries: [] shared_preload_libraries: []
# BootstrapInitDB is the configuration of the bootstrap process when initdb is used. # -- Bootstrap is the configuration of the bootstrap process when initdb is used.
# See: https://cloudnative-pg.io/documentation/current/bootstrap/ # See: https://cloudnative-pg.io/documentation/current/bootstrap/
# See: https://cloudnative-pg.io/documentation/current/cloudnative-pg.v1/#postgresql-cnpg-io-v1-bootstrapinitdb # See: https://cloudnative-pg.io/documentation/current/cloudnative-pg.v1/#postgresql-cnpg-io-v1-bootstrapinitdb
initdb: {} bootstrap:
# -- Example values
# database: app # database: app
# owner: app # owner: app
# secret: "" # Name of the secret containing the initial credentials for the owner of the user database. If empty a new secret will be created from scratch # secret: "" # Name of the secret containing the initial credentials for the owner of the user database. If empty a new secret will be created from scratch
# postInitApplicationSQL: # postInitApplicationSQL:
# - CREATE TABLE IF NOT EXISTS example; # - CREATE TABLE IF NOT EXISTS example;
initdb: {}
# -- Recovery settings when booting cluster from external cluster
recovery: recovery:
# Point in time recovery target in RFC3339 format
# -- Point in time recovery target in RFC3339 format
pitrTarget: pitrTarget:
time: "" time: ""
# S3 https endpoint and the s3:// path # -- S3 https endpoint and the s3:// path
endpointURL: "" endpointURL: ""
destinationPath: "" destinationPath: ""
# Specifies secret that contains a CA bundle to validate a privately signed certificate, should contain the key ca-bundle.crt # -- Specifies secret that contains a CA bundle to validate a privately signed certificate, should contain the key ca-bundle.crt
endpointCA: "" endpointCA: ""
# Specifies secret that contains S3 credentials, should contain the keys ACCESS_KEY_ID and ACCESS_SECRET_KEY # -- Specifies secret that contains S3 credentials, should contain the keys ACCESS_KEY_ID and ACCESS_SECRET_KEY
endpointCredentials: "" endpointCredentials: ""
# Generate external cluster name, uses: {{ .Release.Name }}postgresql-<major version>-cluster-backup-index-{{ .Values.recovery.recoveryIndex }} # -- Generate external cluster name, uses: {{ .Release.Name }}postgresql-<major version>-cluster-backup-index-{{ .Values.recovery.recoveryIndex }}
recoveryIndex: 1 recoveryIndex: 1
# Name of the recovery cluster in the object store, defaults to "cluster.name" # -- Name of the recovery cluster in the object store, defaults to "cluster.name"
recoveryServerName: "" recoveryServerName: ""
# Name of the recovery cluster in the object store, defaults to ".Release.Name" # -- Name of the recovery cluster in the object store, defaults to ".Release.Name"
recoveryInstanceName: "" recoveryInstanceName: ""
wal: wal:
# WAL compression method. One of `` (for no compression), `gzip`, `bzip2` or `snappy`. # -- WAL compression method. One of `` (for no compression), `gzip`, `bzip2` or `snappy`.
compression: snappy compression: snappy
# Whether to instruct the storage provider to encrypt WAL files. One of `` (use the storage container default), `AES256` or `aws:kms`. # -- Whether to instruct the storage provider to encrypt WAL files. One of `` (use the storage container default), `AES256` or `aws:kms`.
encryption: "" encryption: ""
# Number of WAL files to be archived or restored in parallel. # -- Number of WAL files to be archived or restored in parallel.
maxParallel: 2 maxParallel: 1
data: data:
# Data compression method. One of `` (for no compression), `gzip`, `bzip2` or `snappy`. # -- Data compression method. One of `` (for no compression), `gzip`, `bzip2` or `snappy`.
compression: snappy compression: snappy
# Whether to instruct the storage provider to encrypt data files. One of `` (use the storage container default), `AES256` or `aws:kms`. # -- Whether to instruct the storage provider to encrypt data files. One of `` (use the storage container default), `AES256` or `aws:kms`.
encryption: "" encryption: ""
# Number of data files to be archived or restored in parallel. # -- Number of data files to be archived or restored in parallel.
jobs: 2 jobs: 1
replica: replica:
# See https://cloudnative-pg.io/documentation/current/database_import/ # -- See [here](https://cloudnative-pg.io/documentation/current/database_import/) for different import types
# * `microservice` - Single database import as expected from cnpg clusters # * `microservice` - Single database import as expected from cnpg clusters
# * `monolith` - Import multiple databases and roles # * `monolith` - Import multiple databases and roles
importType: microservice importType: microservice
# If type microservice only one database is allowed, default is app as standard in cnpg clusters # -- If type microservice only one database is allowed, default is app as standard in cnpg clusters
importDatabases: importDatabases:
- app - app
# If type microservice no roles are imported and ignored # -- If type microservice no roles are imported and ignored
importRoles: [] importRoles: []
# If import type is monolith postImportApplicationSQL is not supported and ignored # -- If import type is monolith postImportApplicationSQL is not supported and ignored
postImportApplicationSQL: [] postImportApplicationSQL: []
# External cluster connection, password specifies a secret name and the key containing the password value # -- External cluster connection, password specifies a secret name and the key containing the password value
externalCluster: externalCluster:
connectionParameters: connectionParameters:
host: postgresql host: postgresql
@@ -158,48 +167,47 @@ replica:
backup: backup:
enabled: false enabled: false
# S3 endpoint starting with "https://" # -- S3 endpoint starting with "https://"
endpointURL: "" endpointURL: ""
# S3 path starting with "s3://" # -- S3 path starting with "s3://"
destinationPath: "" destinationPath: ""
# Specifies secret that contains a CA bundle to validate a privately signed certificate, should contain the key ca-bundle.crt # -- Specifies secret that contains a CA bundle to validate a privately signed certificate, should contain the key ca-bundle.crt
endpointCA: "" endpointCA: ""
# Specifies secret that contains S3 credentials, should contain the keys ACCESS_KEY_ID and ACCESS_SECRET_KEY # -- Specifies secret that contains S3 credentials, should contain the keys ACCESS_KEY_ID and ACCESS_SECRET_KEY
endpointCredentials: "" endpointCredentials: ""
# Generate external cluster name, creates: postgresql-{{ .Release.Name }}-cluster-backup-index-{{ .Values.backups.backupIndex }}" # -- Generate external cluster name, creates: postgresql-{{ .Release.Name }}-cluster-backup-index-{{ .Values.backups.backupIndex }}"
backupIndex: 1 backupIndex: 1
# Name of the backup cluster in the object store, defaults to "cluster.name" # -- Name of the backup cluster in the object store, defaults to "cluster.name"
backupName: "" backupName: ""
# Tags to add to backups. Add in key value beneath the type. # -- Tags to add to backups. Add in key value beneath the type.
tags: tags:
backupRetentionPolicy: "expire" backupRetentionPolicy: ""
historyTags: historyTags:
backupRetentionPolicy: "keep" backupRetentionPolicy: ""
# Configuration for the WAL and data files.
wal: wal:
# WAL compression method. One of `` (for no compression), `gzip`, `bzip2` or `snappy`. # -- WAL compression method. One of `` (for no compression), `gzip`, `bzip2` or `snappy`.
compression: snappy compression: snappy
# Whether to instruct the storage provider to encrypt WAL files. One of `` (use the storage container default), `AES256` or `aws:kms`. # -- Whether to instruct the storage provider to encrypt WAL files. One of `` (use the storage container default), `AES256` or `aws:kms`.
encryption: "" encryption: ""
# Number of WAL files to be archived or restored in parallel. # -- Number of WAL files to be archived or restored in parallel.
maxParallel: 2 maxParallel: 1
data: data:
# Data compression method. One of `` (for no compression), `gzip`, `bzip2` or `snappy`. # -- Data compression method. One of `` (for no compression), `gzip`, `bzip2` or `snappy`.
compression: snappy compression: snappy
# Whether to instruct the storage provider to encrypt data files. One of `` (use the storage container default), `AES256` or `aws:kms`. # -- Whether to instruct the storage provider to encrypt data files. One of `` (use the storage container default), `AES256` or `aws:kms`.
encryption: "" encryption: ""
# Number of data files to be archived or restored in parallel. # -- Number of data files to be archived or restored in parallel.
jobs: 2 jobs: 1
# Retention policy for backups # -- Retention policy for backups
retentionPolicy: "60d" retentionPolicy: "7d"
# Scheduled backup in cron format # -- Scheduled backup in cron format
schedule: "0 0 */3 * *" schedule: "0 0 */3 * *"

View File

@@ -6,35 +6,18 @@
":rebaseStalePrs" ":rebaseStalePrs"
], ],
"timezone": "US/Central", "timezone": "US/Central",
"schedule": [ "schedule": [ "* */1 * * *" ],
"every weekday"
],
"labels": [], "labels": [],
"prHourlyLimit": 0,
"prConcurrentLimit": 0,
"packageRules": [ "packageRules": [
{ {
"description": "Disables for non major Renovate version", "description": "Label charts",
"matchPaths": [ "matchDatasources": [
".github/renovate-update-notification/Dockerfile" "helm"
],
"matchUpdateTypes": [
"minor",
"patch",
"pin",
"digest",
"rollback"
],
"enabled": false
},
{
"description": "Generate for major Renovate version",
"matchPaths": [
".github/renovate-update-notification/Dockerfile"
],
"matchUpdateTypes": [
"major"
], ],
"addLabels": [ "addLabels": [
"upgrade" "chart"
], ],
"automerge": false "automerge": false
}, },
@@ -46,19 +29,21 @@
"addLabels": [ "addLabels": [
"image" "image"
], ],
"automerge": false, "automerge": false
"minimumReleaseAge": "3 days"
}, },
{ {
"description": "Label charts", "description": "CNPG image",
"matchDepNames": [
"ghcr.io/cloudnative-pg/postgresql"
],
"matchDatasources": [ "matchDatasources": [
"helm" "docker"
], ],
"addLabels": [ "addLabels": [
"chart" "image"
], ],
"automerge": false, "automerge": false,
"minimumReleaseAge": "3 days" "versioning": "deb"
} }
] ]
} }