Compare commits

...

89 Commits

Author SHA1 Message Date
81d3ecf237 adjust schedule 2025-01-30 21:08:38 -06:00
8392d67790 update chart 2025-01-17 17:23:14 -06:00
3f06bf148c update image 2025-01-17 17:22:00 -06:00
5259488c05 chagne resources 2025-01-08 17:39:10 -06:00
09c693d371 reduce resource request 2025-01-08 15:50:21 -06:00
ec6f44c6bc change resource 2025-01-08 15:33:59 -06:00
35f331e29a fix helm/prom bracket interaction 2025-01-08 15:20:28 -06:00
3b0481fcb1 add default rules 2025-01-07 14:22:25 -06:00
e2dfd70dc4 change default resources 2025-01-07 13:45:34 -06:00
ffc253ef7d add description of values 2024-12-30 17:10:54 -06:00
77dd85362e update dependency chart 2024-12-30 17:04:09 -06:00
d5bb83bf84 add description of values 2024-12-30 17:03:45 -06:00
11d3dd927b update dependency chart 2024-12-30 17:00:37 -06:00
1b67b5cbb6 add description of values 2024-12-30 16:59:49 -06:00
56fe199fb9 add precommit hooks 2024-12-30 16:55:01 -06:00
8ec7f590b2 upgrade base image to 17 2024-12-24 21:08:05 -06:00
d2444fb544 set switch for superuser 2024-12-22 17:29:30 -06:00
202a534e8e fix missing field 2024-12-21 23:48:11 -06:00
c36e4e371f reorganize values 2024-12-21 23:40:21 -06:00
1ac9444bb2 fix condition flow 2024-12-21 23:29:50 -06:00
275fcd8568 use cluster values 2024-12-21 23:26:40 -06:00
158d4ca676 change method 2024-12-21 23:22:34 -06:00
32e232d8e2 force hardcoded value for testing 2024-12-21 23:08:17 -06:00
93d2f916fb use value for name 2024-12-21 22:53:59 -06:00
b1a6a2fd39 remove condition 2024-12-21 22:46:17 -06:00
d3307d4f70 use different function 2024-12-21 22:39:52 -06:00
1b7018d3bd fix database naming 2024-12-21 22:31:00 -06:00
b75721ae1d add option to specifiy database name for replica 2024-12-21 22:20:09 -06:00
renovate[bot]
e0e4f6ee8a Update renovate/renovate Docker tag to v39 (#71)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2024-12-21 19:55:23 -06:00
renovate[bot]
7dd80d4528 Migrate config .github/renovate.json (#72)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2024-12-21 19:55:16 -06:00
24af841f19 update workflows 2024-12-21 18:11:39 -06:00
16211d4c62 remove schedule 2024-12-21 18:11:29 -06:00
513c46c957 change to midnight daily 2024-12-20 19:33:25 -06:00
3fad4e4ff0 update image 2024-12-20 19:25:40 -06:00
1f867e0276 update image 2024-12-20 19:25:03 -06:00
601790ab7a change backup schedule 2024-12-19 14:50:00 -06:00
16ebdda6a4 update image 2024-12-19 13:59:37 -06:00
dbf8f14512 update image 2024-12-19 13:58:37 -06:00
22dcd7a14c update image 2024-12-16 10:31:56 -06:00
8862d97c27 change retention policy 2024-12-12 11:12:58 -06:00
1f4cd543c0 bump chart version 2024-11-23 22:40:06 -06:00
4aac272e98 update image 2024-11-23 22:39:06 -06:00
b8602fb919 update image to 16.6 2024-11-23 22:38:36 -06:00
fb34897269 update image 2024-10-19 00:58:50 -05:00
ec27eff4da add priority class name and tolerations 2024-10-13 12:39:03 -05:00
2b31df483e listen on all addresses 2024-10-12 23:35:08 -05:00
53191f1d68 add generic device plugin 2024-10-12 23:18:07 -05:00
172526fb79 update common chart 2024-10-11 19:03:23 -05:00
5d5aad265a fix settings for tensorchord type 2024-09-28 16:43:45 -05:00
84af71da49 add tag for postgres version 2024-09-28 02:07:28 -05:00
ab3ca49103 add tensorchord type 2024-09-28 02:05:34 -05:00
8b2342d1c2 bump chart version 2024-09-27 21:29:54 -05:00
9107020db2 update chart and image 2024-09-27 21:28:05 -05:00
3ecef5f8d1 add options for tagging 2024-09-27 21:27:01 -05:00
renovate[bot]
e5b1b733fe Update cloudflare/cloudflared Docker tag to v2024.8.3 (#63)
* Update cloudflare/cloudflared Docker tag to v2024.8.3

* update chart

---------

Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
Co-authored-by: alexlebens <alexanderlebens@gmail.com>
2024-08-24 01:30:19 -05:00
843e37e233 update postresql image 2024-08-19 16:42:54 -05:00
ee944a6b83 update image 2024-08-19 16:41:19 -05:00
renovate[bot]
5fe95ea7ad Update renovate/renovate Docker tag to v38 (#62)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2024-08-19 16:40:33 -05:00
6a33a670e1 update common chart 2024-08-19 16:40:16 -05:00
27cdfd742e remove mysql-cluster 2024-08-19 15:31:01 -05:00
9f68b30a31 change condition handling 2024-07-08 12:09:29 -05:00
668d50dfdb add conditional check for postinit 2024-07-04 22:52:02 -05:00
93a232947e increment chart 2024-07-04 22:45:41 -05:00
667236239d fix backup fields 2024-07-04 22:45:18 -05:00
875f0c143c fix backup fields 2024-07-04 22:41:31 -05:00
670b6e600c add conditional check for values 2024-07-01 18:08:23 -05:00
6f5b5ffcb4 change value inseration 2024-07-01 18:08:23 -05:00
renovate[bot]
295a7296bc Update cloudflare/cloudflared Docker tag to v2024.6.1 (#60)
* Update cloudflare/cloudflared Docker tag to v2024.6.1

* update chart

---------

Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
Co-authored-by: alexlebens <alexanderlebens@gmail.com>
2024-06-28 17:01:04 -05:00
f1b4020287 change flow control 2024-06-22 18:26:19 -05:00
969357a664 change null handling 2024-06-22 18:22:25 -05:00
5685190e43 remove field not declared in schema 2024-06-22 18:18:03 -05:00
5e88f116fc disable rules by default 2024-06-22 17:58:43 -05:00
f99ebfaa44 change initdb keys 2024-06-14 21:37:00 -05:00
64e3612762 fix init keys 2024-06-14 21:30:54 -05:00
a6821995ca fix post init location 2024-06-14 21:23:48 -05:00
4291c3d18c add options for postgresql init 2024-06-14 21:17:45 -05:00
renovate[bot]
3f1fc33123 Update cloudflare/cloudflared Docker tag to v2024.6.0 (#59)
* Update cloudflare/cloudflared Docker tag to v2024.6.0

* bump chart version

---------

Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
Co-authored-by: alexlebens <alexanderlebens@gmail.com>
2024-06-13 11:01:03 -05:00
fad13607e6 bump chart version 2024-06-13 10:58:31 -05:00
a1811097c0 add resources to values 2024-06-13 10:58:01 -05:00
6b850205ad move image to values 2024-06-13 10:56:49 -05:00
d075a47f03 remove hookshot 2024-06-13 10:53:52 -05:00
0a437d983d remove discord 2024-06-13 10:53:44 -05:00
7058201439 remove whatsapp 2024-06-13 10:53:29 -05:00
42cd8834b9 remove outline 2024-06-01 18:13:49 -05:00
2cda957b4c remove taiga 2024-06-01 18:13:42 -05:00
238d01c5e4 remove kublet cert 2024-05-30 11:59:29 -05:00
9f0fae9fdf remove qbittorrent 2024-05-30 11:59:13 -05:00
d2f062e3db remove unpackerr 2024-05-30 11:58:57 -05:00
a1c9367b6d remove penpot 2024-05-30 11:58:45 -05:00
113 changed files with 593 additions and 7845 deletions

View File

@@ -1,2 +1,2 @@
# This file is processed by Renovate bot so that it creates a PR on new major Renovate versions
FROM renovate/renovate:37
FROM renovate/renovate:39

View File

@@ -6,14 +6,11 @@
":rebaseStalePrs"
],
"timezone": "US/Central",
"schedule": [
"every weekday"
],
"labels": [],
"packageRules": [
{
"description": "Disables for non major Renovate version",
"matchPaths": [
"matchFileNames": [
".github/renovate-update-notification/Dockerfile"
],
"matchUpdateTypes": [
@@ -27,7 +24,7 @@
},
{
"description": "Generate for major Renovate version",
"matchPaths": [
"matchFileNames": [
".github/renovate-update-notification/Dockerfile"
],
"matchUpdateTypes": [

View File

@@ -14,11 +14,11 @@ jobs:
- name: Set up Helm
uses: azure/setup-helm@v4
with:
version: v3.13.3
version: latest
- uses: actions/setup-python@v5
with:
python-version: "3.10"
python-version: "3.13"
check-latest: true
- name: Set up chart-testing

19
.pre-commit-config.yaml Normal file
View File

@@ -0,0 +1,19 @@
repos:
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v2.3.0
hooks:
- id: end-of-file-fixer
- id: trailing-whitespace
- id: check-added-large-files
- id: check-yaml
exclude: 'charts/'
args:
- --multi
- repo: https://github.com/norwoodj/helm-docs
rev: v1.14.2
hooks:
- id: helm-docs
args:
- --chart-search-root=charts
- --template-files=./_templates.gotmpl
- --template-files=README.md.gotmpl

View File

@@ -1,6 +1,6 @@
apiVersion: v2
name: cloudflared
version: 1.2.0
version: 1.13.0
description: Cloudflared Tunnel
keywords:
- cloudflare
@@ -13,6 +13,6 @@ maintainers:
dependencies:
- name: common
repository: https://bjw-s.github.io/helm-charts/
version: 3.2.1
version: 3.6.1
icon: https://avatars.githubusercontent.com/u/314135?s=48&v=4
appVersion: "2024.5.0"
appVersion: "2025.1.0"

View File

@@ -1,16 +1,35 @@
## Introduction
# cloudflared
[Cloudflared](https://github.com/cloudflare/cloudflared)
![Version: 1.13.0](https://img.shields.io/badge/Version-1.13.0-informational?style=flat-square) ![AppVersion: 2025.1.0](https://img.shields.io/badge/AppVersion-2025.1.0-informational?style=flat-square)
Contains the command-line client for Cloudflare Tunnel, a tunneling daemon that proxies traffic from the Cloudflare network to your origins.
Cloudflared Tunnel
This chart bootstraps a [Cloudflared](https://github.com/cloudflare/cloudflared) tunnel on a [Kubernetes](https://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager.
## Maintainers
## Prerequisites
| Name | Email | Url |
| ---- | ------ | --- |
| alexlebens | | |
- Kubernetes
- Helm
## Source Code
## Parameters
* <https://github.com/cloudflare/cloudflared>
* <https://github.com/bjw-s/helm-charts/tree/main/charts/library/common>
See the [values files](values.yaml).
## Requirements
| Repository | Name | Version |
|------------|------|---------|
| https://bjw-s.github.io/helm-charts/ | common | 3.6.1 |
## Values
| Key | Type | Default | Description |
|-----|------|---------|-------------|
| existingSecretKey | string | `"cf-tunnel-token"` | Name of key that contains the token in the existingSecret |
| existingSecretName | string | `"cloudflared-secret"` | Name of existing secret that contains Cloudflare token |
| image | object | `{"pullPolicy":"IfNotPresent","repository":"cloudflare/cloudflared","tag":"2025.1.0"}` | Default image |
| name | string | `"cloudflared"` | Name override of release |
| resources | object | `{"requests":{"cpu":"10m","memory":"128Mi"}}` | Default resources |
----------------------------------------------
Autogenerated from chart metadata using [helm-docs v1.14.2](https://github.com/norwoodj/helm-docs/releases/v1.14.2)

View File

@@ -12,9 +12,9 @@ controllers:
containers:
main:
image:
repository: cloudflare/cloudflared
tag: "2024.5.0"
pullPolicy: IfNotPresent
repository: {{ .Values.image.repository }}
tag: {{ .Values.image.tag }}
pullPolicy: {{ .Values.image.pullPolicy }}
args:
- tunnel
- --protocol
@@ -30,9 +30,10 @@ controllers:
name: {{ .Values.existingSecretName }}
key: {{ .Values.existingSecretKey }}
resources:
requests:
cpu: 100m
memory: 128Mi
{{- with .Values.resources }}
resources:
{{- toYaml . | nindent 10 }}
{{ end }}
{{- end -}}
{{- $_ := mergeOverwrite .Values (include "cloudflared.hardcodedValues" . | fromYaml) -}}

View File

@@ -1,3 +1,20 @@
# -- Name override of release
name: cloudflared
# -- Name of existing secret that contains Cloudflare token
existingSecretName: cloudflared-secret
# -- Name of key that contains the token in the existingSecret
existingSecretKey: cf-tunnel-token
# -- Default image
image:
repository: cloudflare/cloudflared
tag: "2025.1.0"
pullPolicy: IfNotPresent
# -- Default resources
resources:
requests:
cpu: 10m
memory: 128Mi

View File

@@ -0,0 +1,18 @@
apiVersion: v2
name: generic-device-plugin
version: 0.1.6
description: Generic Device Plugin
keywords:
- generic-device-plugin
- device
- plugin
sources:
- https://github.com/squat/generic-device-plugin
- https://github.com/bjw-s/helm-charts/tree/main/charts/library/common
maintainers:
- name: alexlebens
dependencies:
- name: common
repository: https://bjw-s.github.io/helm-charts/
version: 3.6.1
appVersion: 0.1.6

View File

@@ -0,0 +1,37 @@
# generic-device-plugin
![Version: 0.1.6](https://img.shields.io/badge/Version-0.1.6-informational?style=flat-square) ![AppVersion: 0.1.6](https://img.shields.io/badge/AppVersion-0.1.6-informational?style=flat-square)
Generic Device Plugin
## Maintainers
| Name | Email | Url |
| ---- | ------ | --- |
| alexlebens | | |
## Source Code
* <https://github.com/squat/generic-device-plugin>
* <https://github.com/bjw-s/helm-charts/tree/main/charts/library/common>
## Requirements
| Repository | Name | Version |
|------------|------|---------|
| https://bjw-s.github.io/helm-charts/ | common | 3.6.1 |
## Values
| Key | Type | Default | Description |
|-----|------|---------|-------------|
| config | object | `{"data":"devices:\n - name: serial\n groups:\n - paths:\n - path: /dev/ttyUSB*\n - paths:\n - path: /dev/ttyACM*\n - paths:\n - path: /dev/tty.usb*\n - paths:\n - path: /dev/cu.*\n - paths:\n - path: /dev/cuaU*\n - paths:\n - path: /dev/rfcomm*\n - name: video\n groups:\n - paths:\n - path: /dev/video0\n - name: fuse\n groups:\n - count: 10\n paths:\n - path: /dev/fuse\n - name: audio\n groups:\n - count: 10\n paths:\n - path: /dev/snd\n - name: capture\n groups:\n - paths:\n - path: /dev/snd/controlC0\n - path: /dev/snd/pcmC0D0c\n - paths:\n - path: /dev/snd/controlC1\n mountPath: /dev/snd/controlC0\n - path: /dev/snd/pcmC1D0c\n mountPath: /dev/snd/pcmC0D0c\n - paths:\n - path: /dev/snd/controlC2\n mountPath: /dev/snd/controlC0\n - path: /dev/snd/pcmC2D0c\n mountPath: /dev/snd/pcmC0D0c\n - paths:\n - path: /dev/snd/controlC3\n mountPath: /dev/snd/controlC0\n - path: /dev/snd/pcmC3D0c\n mountPath: /dev/snd/pcmC0D0c\n","enabled":true}` | Config map |
| config.data | string | See [values.yaml](./values.yaml) | generic-device-plugin config file [[ref]](https://github.com/squat/generic-device-plugin#usage) |
| deviceDomain | string | `"squat.ai"` | Domain used by devices for identifcation |
| image | object | `{"pullPolicy":"Always","repository":"ghcr.io/squat/generic-device-plugin","tag":"latest@sha256:ba6f0b4cf6c858d6ad29ba4d32e4da11638abbc7d96436bf04f582a97b2b8821"}` | Default image |
| name | string | `"generic-device-plugin"` | Name override of release |
| resources | object | `{"limit":{"cpu":"100m","memory":"20Mi"},"requests":{"cpu":"50m","memory":"10Mi"}}` | Default resources |
| service | object | `{"listenPort":8080}` | Service port |
----------------------------------------------
Autogenerated from chart metadata using [helm-docs v1.14.2](https://github.com/norwoodj/helm-docs/releases/v1.14.2)

View File

@@ -0,0 +1,82 @@
{{ include "bjw-s.common.loader.init" . }}
{{ define "genericDevicePlugin.hardcodedValues" }}
{{ if not .Values.global.nameOverride }}
global:
nameOverride: {{ .Values.name }}
{{ end }}
controllers:
main:
type: daemonset
pod:
priorityClassName: system-node-critical
tolerations:
- operator: "Exists"
effect: "NoExecute"
- operator: "Exists"
effect: "NoSchedule"
containers:
main:
image:
repository: {{ .Values.image.repository }}
tag: {{ .Values.image.tag }}
pullPolicy: {{ .Values.image.pullPolicy }}
args:
- --config=/config/config.yaml
env:
- name: LISTEN
value: :{{ .Values.service.listenPort }}
- name: PLUGIN_DIRECTORY
value: /var/lib/kubelet/device-plugins
- name: DOMAIN
value: {{ .Values.deviceDomain }}
probes:
liveness:
type: HTTP
path: /health
readiness:
type: HTTP
path: /health
startup:
type: HTTP
path: /health
securityContext:
privileged: True
configMaps:
config:
enabled: {{ .Values.config.enabled }}
data:
config.yaml: {{ toYaml .Values.config.data | nindent 8 }}
service:
main:
controller: main
ports:
http:
port: {{ .Values.service.listenPort }}
persistence:
config:
enabled: true
type: configMap
name: {{ .Values.name }}-config
device-plugins:
enabled: true
type: hostPath
hostPath: /var/lib/kubelet/device-plugins
dev:
enabled: true
type: hostPath
hostPath: /dev
serviceMonitor:
main:
serviceName: generic-device-plugin
endpoints:
- port: http
scheme: http
path: /metrics
interval: 30s
scrapeTimeout: 10s
{{ end }}
{{ $_ := mergeOverwrite .Values (include "genericDevicePlugin.hardcodedValues" . | fromYaml) }}
{{/* Render the templates */}}
{{ include "bjw-s.common.loader.generate" . }}

View File

@@ -0,0 +1,80 @@
# -- Name override of release
name: generic-device-plugin
# -- Default image
image:
repository: ghcr.io/squat/generic-device-plugin
tag: latest@sha256:ba6f0b4cf6c858d6ad29ba4d32e4da11638abbc7d96436bf04f582a97b2b8821
pullPolicy: Always
# -- Domain used by devices for identifcation
deviceDomain: squat.ai
# -- Service port
service:
listenPort: 8080
# -- Default resources
resources:
limit:
cpu: 100m
memory: 20Mi
requests:
cpu: 50m
memory: 10Mi
# -- Config map
config:
enabled: true
# -- generic-device-plugin config file [[ref]](https://github.com/squat/generic-device-plugin#usage)
# @default -- See [values.yaml](./values.yaml)
data: |
devices:
- name: serial
groups:
- paths:
- path: /dev/ttyUSB*
- paths:
- path: /dev/ttyACM*
- paths:
- path: /dev/tty.usb*
- paths:
- path: /dev/cu.*
- paths:
- path: /dev/cuaU*
- paths:
- path: /dev/rfcomm*
- name: video
groups:
- paths:
- path: /dev/video0
- name: fuse
groups:
- count: 10
paths:
- path: /dev/fuse
- name: audio
groups:
- count: 10
paths:
- path: /dev/snd
- name: capture
groups:
- paths:
- path: /dev/snd/controlC0
- path: /dev/snd/pcmC0D0c
- paths:
- path: /dev/snd/controlC1
mountPath: /dev/snd/controlC0
- path: /dev/snd/pcmC1D0c
mountPath: /dev/snd/pcmC0D0c
- paths:
- path: /dev/snd/controlC2
mountPath: /dev/snd/controlC0
- path: /dev/snd/pcmC2D0c
mountPath: /dev/snd/pcmC0D0c
- paths:
- path: /dev/snd/controlC3
mountPath: /dev/snd/controlC0
- path: /dev/snd/pcmC3D0c
mountPath: /dev/snd/pcmC0D0c

View File

@@ -1,13 +0,0 @@
apiVersion: v2
name: kubelet-serving-cert-approver
version: 0.0.4
description: Kubelet Serving TLS Certificate Signing Request Approver
keywords:
- kubernetes
- certificate
sources:
- https://github.com/alex1989hu/kubelet-serving-cert-approver
- https://github.com/alexlebens/helm-charts/charts/homepage
maintainers:
- name: alexlebens
appVersion: 0.8.1

View File

@@ -1,16 +0,0 @@
## Introduction
[Kubelet Serving Certificate Approver](https://github.com/alex1989hu/kubelet-serving-cert-approver)
Kubelet Serving Certificate Approver is a custom approving controller which approves kubernetes.io/kubelet-serving Certificate Signing Request that kubelet use to serve TLS endpoints.
This chart bootstraps a [Kubelet Serving Certificate Approver](https://github.com/alex1989hu/kubelet-serving-cert-approver) deployment on a [Kubernetes](https://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager.
## Prerequisites
- Kubernetes
- Helm
## Parameters
See the [values files](values.yaml).

View File

@@ -1,19 +0,0 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: kubelet-serving-cert-approver
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: kubelet-serving-cert-approver
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/version: {{ .Chart.AppVersion }}
app.kubernetes.io/component: server
app.kubernetes.io/part-of: kubelet-serving-cert-approver
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: "certificates:{{ .Release.Name }}"
subjects:
- kind: ServiceAccount
name: {{ .Release.Name }}
namespace: {{ .Release.Namespace }}

View File

@@ -1,61 +0,0 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: "certificates:{{ .Release.Name }}"
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: kubelet-serving-cert-approver
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/version: {{ .Chart.AppVersion }}
app.kubernetes.io/component: server
app.kubernetes.io/part-of: kubelet-serving-cert-approver
rules:
- apiGroups:
- certificates.k8s.io
resources:
- certificatesigningrequests
verbs:
- get
- list
- watch
- apiGroups:
- certificates.k8s.io
resources:
- certificatesigningrequests/approval
verbs:
- update
- apiGroups:
- authorization.k8s.io
resources:
- subjectaccessreviews
verbs:
- create
- apiGroups:
- certificates.k8s.io
resourceNames:
- kubernetes.io/kubelet-serving
resources:
- signers
verbs:
- approve
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: "events:{{ .Release.Name }}"
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: kubelet-serving-cert-approver
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/version: {{ .Chart.AppVersion }}
app.kubernetes.io/component: server
app.kubernetes.io/part-of: kubelet-serving-cert-approverv
rules:
- apiGroups:
- ""
resources:
- events
verbs:
- create
- patch

View File

@@ -1,88 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: kubelet-serving-cert-approver
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: kubelet-serving-cert-approver
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/version: {{ .Chart.AppVersion }}
app.kubernetes.io/component: server
app.kubernetes.io/part-of: kubelet-serving-cert-approver
spec:
revisionHistoryLimit: 3
replicas: {{ .Values.deployment.replicas }}
strategy:
type: {{ .Values.deployment.strategy }}
selector:
matchLabels:
app.kubernetes.io/name: kubelet-serving-cert-approver
app.kubernetes.io/instance: {{ .Release.Name }}
template:
metadata:
labels:
app.kubernetes.io/name: kubelet-serving-cert-approver
app.kubernetes.io/instance: {{ .Release.Name }}
spec:
affinity:
nodeAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- preference:
matchExpressions:
- key: node-role.kubernetes.io/master
operator: DoesNotExist
- key: node-role.kubernetes.io/control-plane
operator: DoesNotExist
weight: 100
containers:
- name: {{ .Release.Name }}
image: "{{ .Values.deployment.image.repository }}:{{ .Values.deployment.image.tag }}"
imagePullPolicy: {{ .Values.deployment.image.imagePullPolicy }}
ports:
- containerPort: 8080
name: health
- containerPort: 9090
name: metrics
args:
- serve
env:
- name: NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
resources:
{{- toYaml .Values.deployment.resources | nindent 12 }}
livenessProbe:
httpGet:
path: /healthz
port: health
initialDelaySeconds: 6
readinessProbe:
httpGet:
path: /readyz
port: health
initialDelaySeconds: 3
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
privileged: false
readOnlyRootFilesystem: true
runAsNonRoot: true
priorityClassName: {{ .Values.deployment.priorityClassName }}
securityContext:
fsGroup: 65534
runAsGroup: 65534
runAsUser: 65534
seccompProfile:
type: RuntimeDefault
serviceAccountName: kubelet-serving-cert-approver
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/master
operator: Exists
- effect: NoSchedule
key: node-role.kubernetes.io/control-plane
operator: Exists

View File

@@ -1,10 +0,0 @@
apiVersion: v1
kind: Namespace
metadata:
name: kubelet-serving-cert-approver
labels:
app.kubernetes.io/name: kubelet-serving-cert-approver
app.kubernetes.io/instance: {{ .Release.Name }}
pod-security.kubernetes.io/audit: restricted
pod-security.kubernetes.io/enforce: restricted
pod-security.kubernetes.io/warn: restricted

View File

@@ -1,19 +0,0 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: "events:{{ .Release.Name }}"
namespace: default
labels:
app.kubernetes.io/name: kubelet-serving-cert-approver
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/version: {{ .Chart.AppVersion }}
app.kubernetes.io/component: server
app.kubernetes.io/part-of: kubelet-serving-cert-approver
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: "events:{{ .Release.Name }}"
subjects:
- kind: ServiceAccount
name: kubelet-serving-cert-approver
namespace: {{ .Release.Name }}

View File

@@ -1,11 +0,0 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: kubelet-serving-cert-approver
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: kubelet-serving-cert-approver
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/version: {{ .Chart.AppVersion }}
app.kubernetes.io/component: server
app.kubernetes.io/part-of: kubelet-serving-cert-approver

View File

@@ -1,20 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: kubelet-serving-cert-approver
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: kubelet-serving-cert-approver
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/version: {{ .Chart.AppVersion }}
app.kubernetes.io/component: server
app.kubernetes.io/part-of: kubelet-serving-cert-approver
spec:
ports:
- name: metrics
port: 9090
protocol: TCP
targetPort: metrics
selector:
app.kubernetes.io/name: kubelet-serving-cert-approver
app.kubernetes.io/instance: {{ .Release.Name }}

View File

@@ -1,15 +0,0 @@
deployment:
replicas: 1
strategy: Recreate
priorityClassName: system-cluster-critical
image:
repository: ghcr.io/alex1989hu/kubelet-serving-cert-approver
tag: main
imagePullPolicy: Always
resources:
limits:
cpu: 250m
memory: 32Mi
requests:
cpu: 10m
memory: 16Mi

View File

@@ -1,14 +0,0 @@
apiVersion: v2
name: matrix-hookshot
version: 0.1.1
description: Chart for Matrix Hookshot
keywords:
- matrix
- matrix-hookshot
- webhook
sources:
- https://github.com/matrix-org/matrix-hookshot
maintainers:
- name: alexlebens
icon: https://avatars.githubusercontent.com/u/8418310?s=48&v=4
appVersion: "5.3.0"

View File

@@ -1,43 +0,0 @@
{{/*
Helper for secret name
*/}}
{{- define "hookshot.secretName" -}}
{{- if .Values.hookshot.existingSecret }}
{{- printf "%s" .Values.hookshot.existingSecret -}}
{{- else }}
{{- printf "matrix-hookshot-config-secret" | trunc 63 | trimSuffix "-" }}
{{- end }}
{{- end }}
{{/*
Helper for registration secret name
*/}}
{{- define "hookshot.registrationSecretName" -}}
{{- if .Values.hookshot.existingRegistrationSecret }}
{{- printf "%s" .Values.hookshot.existingRegistrationSecret -}}
{{- else }}
{{- printf "matrix-hookshot-registration-secret" }}
{{- end }}
{{- end }}
{{/*
Helper for passkey secret name
*/}}
{{- define "hookshot.passkeySecretName" -}}
{{- if .Values.hookshot.existingPasskeySecret }}
{{- printf "%s" .Values.hookshot.existingPasskeySecret -}}
{{- else }}
{{- printf "matrix-hookshot-passkey-secret" }}
{{- end }}
{{- end }}
{{/*
Helper for passkey file name
*/}}
{{- define "hookshot.passFile" -}}
{{- if .Values.hookshot.config.passFile }}
{{- printf "%s" .Values.hookshot.config.passFile -}}
{{- else }}
{{- printf "passkey.pem" }}
{{- end }}
{{- end }}

View File

@@ -1,79 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: matrix-hookshot
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: matrix-hookshot
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/version: {{ .Chart.AppVersion }}
app.kubernetes.io/component: web
app.kubernetes.io/part-of: {{ .Release.Name }}
spec:
revisionHistoryLimit: 3
replicas: {{ .Values.deployment.replicas }}
strategy:
type: {{ .Values.deployment.strategy }}
selector:
matchLabels:
app.kubernetes.io/name: matrix-hookshot
app.kubernetes.io/instance: {{ .Release.Name }}
template:
metadata:
labels:
app.kubernetes.io/name: matrix-hookshot
app.kubernetes.io/instance: {{ .Release.Name }}
spec:
serviceAccountName: matrix-hookshot
automountServiceAccountToken: true
containers:
- name: matrix-hookshot
image: "{{ .Values.deployment.image.repository }}:{{ .Values.deployment.image.tag }}"
imagePullPolicy: {{ .Values.deployment.image.imagePullPolicy }}
ports:
- name: webhook
containerPort: {{ .Values.service.webhook.port }}
protocol: TCP
- name: metrics
containerPort: {{ .Values.service.metrics.port }}
protocol: TCP
- name: appservice
containerPort: {{ .Values.service.appservice.port }}
protocol: TCP
- name: widgets
containerPort: {{ .Values.service.widgets.port }}
protocol: TCP
env:
{{- range $k,$v := .Values.deployment.env }}
- name: {{ $k }}
value: {{ $v | quote }}
{{- end }}
{{- with .Values.deployment.envFrom }}
envFrom:
{{- toYaml . | nindent 12 }}
{{- end }}
resources:
{{- toYaml .Values.deployment.resources | nindent 12 }}
volumeMounts:
- name: config
mountPath: /data/config.yml
subPath: config.yml
readOnly: true
- name: registration
mountPath: /data/registration.yml
subPath: registration.yml
readOnly: true
- name: passkey
mountPath: "/data/{{ template "hookshot.passFile" . }}"
subPath: {{ template "hookshot.passFile" . }}
readOnly: true
volumes:
- name: config
secret:
secretName: {{ template "hookshot.secretName" . }}
- name: registration
secret:
secretName: {{ template "hookshot.registrationSecretName" . }}
- name: passkey
secret:
secretName: {{ template "hookshot.passkeySecretName" . }}

View File

@@ -1,100 +0,0 @@
{{- if .Values.ingress.webhook.enabled }}
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: matrix-hookshot-webhook
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: matrix-hookshot-webhook
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/version: {{ .Chart.AppVersion }}
app.kubernetes.io/component: web
app.kubernetes.io/part-of: {{ .Release.Name }}
annotations:
{{- toYaml .Values.ingress.webhook.annotations | nindent 4 }}
spec:
ingressClassName: {{ .Values.ingress.webhook.className }}
tls:
- hosts:
- {{ .Values.ingress.webhook.host }}
secretName: {{ .Release.Name }}-webhook-secret-tls
rules:
- host: {{ .Values.ingress.webhook.host }}
http:
paths:
- path: /webhook/
pathType: Prefix
backend:
service:
name: {{ .Release.Name }}
port:
name: webhook
{{- end }}
---
{{- if .Values.ingress.appservice.enabled }}
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: matrix-hookshot-appservice
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: matrix-hookshot-appservice
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/version: {{ .Chart.AppVersion }}
app.kubernetes.io/component: web
app.kubernetes.io/part-of: {{ .Release.Name }}
annotations:
{{- toYaml .Values.ingress.appservice.annotations | nindent 4 }}
spec:
ingressClassName: {{ .Values.ingress.appservice.className }}
tls:
- hosts:
- {{ .Values.ingress.appservice.host }}
secretName: {{ .Release.Name }}-appservice-secret-tls
rules:
- host: {{ .Values.ingress.appservice.host }}
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: {{ .Release.Name }}
port:
name: appservice
{{- end }}
---
{{- if .Values.ingress.widgets.enabled }}
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: matrix-hookshot-widgets
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: matrix-hookshot-widgets
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/version: {{ .Chart.AppVersion }}
app.kubernetes.io/component: web
app.kubernetes.io/part-of: {{ .Release.Name }}
annotations:
{{- toYaml .Values.ingress.widgets.annotations | nindent 4 }}
spec:
ingressClassName: {{ .Values.ingress.widgets.className }}
tls:
- hosts:
- {{ .Values.ingress.widgets.host }}
secretName: {{ .Release.Name }}-widgets-secret-tls
rules:
- host: {{ .Values.ingress.widgets.host }}
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: {{ .Release.Name }}
port:
name: widgets
{{- end }}

View File

@@ -1,26 +0,0 @@
apiVersion: v1
kind: Pod
metadata:
name: matrix-hookshot-test-connection
labels:
app.kubernetes.io/name: matrix-hookshot-test-connection
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/version: {{ .Chart.AppVersion }}
app.kubernetes.io/component: web
app.kubernetes.io/part-of: {{ .Release.Name }}
annotations:
"helm.sh/hook": test-success
spec:
restartPolicy: Never
containers:
- name: wget
image: busybox
command: ['wget']
args: ['matrix-hookshot:{{ .Values.service.webhook.port }}']
resources:
limits:
cpu: 500m
memory: 1Gi
requests:
cpu: 50m
memory: 256Mi

View File

@@ -1,52 +0,0 @@
{{- if not .Values.hookshot.existingSecret }}
apiVersion: v1
kind: Secret
metadata:
name: matrix-hookshot-config-secret
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: matrix-hookshot-config
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/version: {{ .Chart.AppVersion }}
app.kubernetes.io/component: web
app.kubernetes.io/part-of: {{ .Release.Name }}
data:
config.yml: |
{{ toYaml .Values.hookshot.config | indent 4 }}
{{- end }}
---
{{- if not .Values.hookshot.existingRegistrationSecret }}
apiVersion: v1
kind: Secret
metadata:
name: matrix-hookshot-registration-secret
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: matrix-hookshot-registration
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/version: {{ .Chart.AppVersion }}
app.kubernetes.io/component: web
app.kubernetes.io/part-of: {{ .Release.Name }}
data:
registration.yml: |
{{ toYaml .Values.hookshot.registration | indent 4 }}
{{- end }}
---
{{- if not .Values.hookshot.existingPasskeySecret }}
apiVersion: v1
kind: Secret
metadata:
name: matrix-hookshot-passkey-secret
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: matrix-hookshot-passkey
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/version: {{ .Chart.AppVersion }}
app.kubernetes.io/component: web
app.kubernetes.io/part-of: {{ .Release.Name }}
data:
{{ .Values.hookshot.config.passFile }}: |
{{ toYaml .Values.hookshot.passkey | indent 4 }}
{{- end }}

View File

@@ -1,11 +0,0 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: matrix-hookshot
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: matrix-hookshot
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/version: {{ .Chart.AppVersion }}
app.kubernetes.io/component: web
app.kubernetes.io/part-of: {{ .Release.Name }}

View File

@@ -1,23 +0,0 @@
{{- if and .Values.metrics.enabled .Values.metrics.serviceMonitor.enabled }}
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: matrix-hookshot
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: matrix-hookshot
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/version: {{ .Chart.AppVersion }}
app.kubernetes.io/component: web
app.kubernetes.io/part-of: {{ .Release.Name }}
spec:
endpoints:
- port: metrics
interval: {{ .Values.metrics.serviceMonitor.interval }}
scrapeTimeout: {{ .Values.metrics.serviceMonitor.scrapeTimeout }}
path: /metrics
selector:
matchLabels:
app.kubernetes.io/name: matrix-hookshot
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end }}

View File

@@ -1,33 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: matrix-hookshot
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: matrix-hookshot
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/version: {{ .Chart.AppVersion }}
app.kubernetes.io/component: web
app.kubernetes.io/part-of: {{ .Release.Name }}
spec:
type: {{ .Values.service.type }}
ports:
- port: {{ .Values.service.webhook.port }}
targetPort: webhook
protocol: TCP
name: webhook
- port: {{ .Values.service.metrics.port }}
targetPort: metrics
protocol: TCP
name: metrics
- port: {{ .Values.service.appservice.port }}
targetPort: appservice
protocol: TCP
name: appservice
- port: {{ .Values.service.widgets.port }}
targetPort: widgets
protocol: TCP
name: widgets
selector:
app.kubernetes.io/name: matrix-hookshot
app.kubernetes.io/instance: {{ .Release.Name }}

View File

@@ -1,248 +0,0 @@
deployment:
replicas: 1
strategy: Recreate
image:
repository: halfshot/matrix-hookshot
tag: "5.3.0"
imagePullPolicy: IfNotPresent
env: {}
envFrom: []
resources:
limits:
memory: 512Mi
cpu: 100m
requests:
memory: 256Mi
cpu: 50m
service:
type: ClusterIP
webhook:
port: 9000
metrics:
port: 9001
appservice:
port: 9002
widgets:
port: 9003
ingress:
webhook:
enabled: false
className: ""
annotations: {}
host: ""
appservice:
enabled: false
className: ""
annotations: {}
host: ""
widgets:
enabled: false
className: ""
annotations: {}
host: ""
metrics:
enabled: false
serviceMonitor:
enabled: false
interval: 15s
scrapeTimeout: 5s
# Reference the following for examples
# https://matrix-org.github.io/matrix-hookshot/latest/setup/sample-configuration.html
hookshot:
# config.yml contents
existingSecret: ""
config:
bridge:
domain: example.com
url: http://localhost:8008
mediaUrl: https://example.com
port: 9993
bindAddress: 0.0.0.0
passFile: passkey.pem
logging:
level: info
colorize: true
json: false
timestampFormat: HH:mm:ss:SSS
listeners:
- port: 9000
bindAddress: 0.0.0.0
resources:
- webhooks
- port: 9001
bindAddress: 0.0.0.0
resources:
- metrics
- provisioning
- port: 9003
bindAddress: 0.0.0.0
resources:
- widgets
# github:
# # (Optional) Configure this to enable GitHub support
# auth:
# # Authentication for the GitHub App.
# id: 123
# privateKeyFile: github-key.pem
# webhook:
# # Webhook settings for the GitHub app.
# secret: secrettoken
# oauth:
# # (Optional) Settings for allowing users to sign in via OAuth.
# client_id: foo
# client_secret: bar
# redirect_uri: https://example.com/oauth/
# defaultOptions:
# # (Optional) Default options for GitHub connections.
# showIssueRoomLink: false
# hotlinkIssues:
# prefix: "#"
# userIdPrefix:
# # (Optional) Prefix used when creating ghost users for GitHub accounts.
# _github_
# gitlab:
# # (Optional) Configure this to enable GitLab support
# instances:
# gitlab.com:
# url: https://gitlab.com
# webhook:
# secret: secrettoken
# publicUrl: https://example.com/hookshot/
# userIdPrefix:
# # (Optional) Prefix used when creating ghost users for GitLab accounts.
# _gitlab_
# commentDebounceMs:
# # (Optional) Aggregate comments by waiting this many miliseconds before posting them to Matrix. Defaults to 5000 (5 seconds)
# 5000
# figma:
# # (Optional) Configure this to enable Figma support
# publicUrl: https://example.com/hookshot/
# instances:
# your-instance:
# teamId: your-team-id
# accessToken: your-personal-access-token
# passcode: your-webhook-passcode
# jira:
# # (Optional) Configure this to enable Jira support. Only specify `url` if you are using a On Premise install (i.e. not atlassian.com)
# webhook:
# # Webhook settings for JIRA
# secret: secrettoken
# oauth:
# # (Optional) OAuth settings for connecting users to JIRA. See documentation for more information
# client_id: foo
# client_secret: bar
# redirect_uri: https://example.com/oauth/
# generic:
# # (Optional) Support for generic webhook events.
# #'allowJsTransformationFunctions' will allow users to write short transformation snippets in code, and thus is unsafe in untrusted environments
# enabled: false
# enableHttpGet: false
# urlPrefix: https://example.com/webhook/
# userIdPrefix: _webhooks_
# allowJsTransformationFunctions: false
# waitForComplete: false
# feeds:
# # (Optional) Configure this to enable RSS/Atom feed support
# enabled: false
# pollConcurrency: 4
# pollIntervalSeconds: 600
# pollTimeoutSeconds: 30
# provisioning:
# # (Optional) Provisioning API for integration managers
# secret: "!secretToken"
# bot:
# # (Optional) Define profile information for the bot user
# displayname: Hookshot Bot
# avatar: mxc://half-shot.uk/2876e89ccade4cb615e210c458e2a7a6883fe17d
# serviceBots:
# # (Optional) Define additional bot users for specific services
# - localpart: feeds
# displayname: Feeds
# avatar: ./assets/feeds_avatar.png
# prefix: "!feeds"
# service: feeds
# metrics:
# # (Optional) Prometheus metrics support
# enabled: true
# cache:
# # (Optional) Cache options for large scale deployments.
# # For encryption to work, this must be configured.
# redisUri: redis://localhost:6379
# queue:
# # (Optional) Message queue configuration options for large scale deployments.
# # For encryption to work, this must not be configured.
# redisUri: redis://localhost:6379
# widgets:
# # (Optional) EXPERIMENTAL support for complimentary widgets
# addToAdminRooms: false
# disallowedIpRanges:
# - 127.0.0.0/8
# - 10.0.0.0/8
# - 172.16.0.0/12
# - 192.168.0.0/16
# - 100.64.0.0/10
# - 192.0.0.0/24
# - 169.254.0.0/16
# - 192.88.99.0/24
# - 198.18.0.0/15
# - 192.0.2.0/24
# - 198.51.100.0/24
# - 203.0.113.0/24
# - 224.0.0.0/4
# - ::1/128
# - fe80::/10
# - fc00::/7
# - 2001:db8::/32
# - ff00::/8
# - fec0::/10
# roomSetupWidget:
# addOnInvite: false
# publicUrl: https://example.com/widgetapi/v1/static/
# branding:
# widgetTitle: Hookshot Configuration
# sentry:
# # (Optional) Configure Sentry error reporting
# dsn: https://examplePublicKey@o0.ingest.sentry.io/0
# environment: production
# permissions:
# # (Optional) Permissions for using the bridge. See docs/setup.md#permissions for help
# - actor: example.com
# services:
# - service: "*"
# level: admin
# registration.yml contents
existingRegistrationSecret: ""
registration:
id: matrix-hookshot
as_token: ""
hs_token: ""
namespaces:
rooms: []
users: []
sender_localpart: hookshot
url: "http://example.com"
rate_limited: false
# A passkey used to encrypt tokens stored inside the bridge.
# Run openssl genpkey -out passkey.pem -outform PEM -algorithm RSA -pkeyopt rsa_keygen_bits:4096 to generate
existingPasskeySecret: ""
passkey: ""

View File

@@ -1,15 +0,0 @@
apiVersion: v2
name: mautrix-discord
version: 0.1.1
description: Chart for Matrix Discord Bridge
keywords:
- matrix
- mautrix-discord
- bridge
- discord
sources:
- https://github.com/mautrix/discord
maintainers:
- name: alexlebens
icon: https://avatars.githubusercontent.com/u/88519669?s=48&v=4
appVersion: v0.6.5

View File

@@ -1,10 +0,0 @@
{{/*
Helper for secret name
*/}}
{{- define "mautrix-discord.secretName" -}}
{{- if .Values.mautrixDiscord.existingSecret }}
{{- printf "%s" .Values.mautrixDiscord.existingSecret -}}
{{- else }}
{{- printf "mautrix-discord-config-secret" | trunc 63 | trimSuffix "-" }}
{{- end }}
{{- end }}

View File

@@ -1,89 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: mautrix-discord
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: mautrix-discord
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/version: {{ .Chart.AppVersion }}
app.kubernetes.io/component: web
app.kubernetes.io/part-of: {{ .Release.Name }}
spec:
revisionHistoryLimit: 3
replicas: {{ .Values.deployment.replicas }}
strategy:
type: {{ .Values.deployment.strategy }}
selector:
matchLabels:
app.kubernetes.io/name: mautrix-discord
app.kubernetes.io/instance: {{ .Release.Name }}
template:
metadata:
labels:
app.kubernetes.io/name: mautrix-discord
app.kubernetes.io/instance: {{ .Release.Name }}
spec:
serviceAccountName: mautrix-discord
automountServiceAccountToken: true
containers:
- name: mautrix-discord
image: "{{ .Values.deployment.image.repository }}:{{ .Values.deployment.image.tag }}"
imagePullPolicy: {{ .Values.deployment.image.imagePullPolicy }}
ports:
- name: http
containerPort: {{ .Values.service.port }}
protocol: TCP
env:
{{- range $k,$v := .Values.deployment.env }}
- name: {{ $k }}
value: {{ $v | quote }}
{{- end }}
{{- with .Values.deployment.envFrom }}
envFrom:
{{- toYaml . | nindent 12 }}
{{- end }}
livenessProbe:
httpGet:
path: /_matrix/mau/live
port: http
failureThreshold: {{ .Values.deployment.probes.liveness.failureThreshold }}
periodSeconds: {{ .Values.deployment.probes.liveness.periodSeconds }}
readinessProbe:
httpGet:
path: /_matrix/mau/ready
port: http
failureThreshold: {{ .Values.deployment.probes.readiness.failureThreshold }}
periodSeconds: {{ .Values.deployment.probes.readiness.periodSeconds }}
resources:
{{- toYaml .Values.deployment.resources | nindent 12 }}
volumeMounts:
- name: config
mountPath: /data/config.yaml
subPath: config.yaml
readOnly: true
- name: data
mountPath: /data
volumes:
- name: config
secret:
secretName: {{ template "mautrix-discord.secretName" . }}
- name: data
{{- if .Values.persistence.enabled }}
persistentVolumeClaim:
claimName: {{ .Values.persistence.existingClaim | default "mautrix-discord-data" }}
{{- else }}
emptyDir: {}
{{- end }}
{{- with .Values.deployment.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.deployment.affinity }}
affinity:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.deployment.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}

View File

@@ -1,32 +0,0 @@
{{- if .Values.ingress.enabled }}
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: mautrix-discord
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: mautrix-discord
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/version: {{ .Chart.AppVersion }}
app.kubernetes.io/component: web
app.kubernetes.io/part-of: {{ .Release.Name }}
annotations:
{{- toYaml .Values.ingress.annotations | nindent 4 }}
spec:
ingressClassName: {{ .Values.ingress.className }}
tls:
- hosts:
- {{ .Values.ingress.host }}
secretName: {{ .Release.Name }}-secret-tls
rules:
- host: {{ .Values.ingress.host }}
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: mautrix-discord
port:
name: http
{{- end }}

View File

@@ -1,26 +0,0 @@
{{- if and .Values.persistence.enabled (not .Values.persistence.existingClaim) }}
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: mautrix-discord-data
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: mautrix-discord
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/version: {{ .Chart.AppVersion }}
app.kubernetes.io/component: web
app.kubernetes.io/part-of: {{ .Release.Name }}
spec:
accessModes:
- {{ .Values.persistence.accessMode | quote }}
resources:
requests:
storage: {{ .Values.persistence.size | quote }}
{{- if .Values.persistence.storageClass }}
{{- if not .Values.persistence.storageClass }}
storageClassName: ""
{{- else }}
storageClassName: {{ .Values.persistence.storageClass | quote }}
{{- end }}
{{- end }}
{{- end }}

View File

@@ -1,16 +0,0 @@
{{- if not .Values.mautrixDiscord.existingSecret }}
apiVersion: v1
kind: Secret
metadata:
name: mautrix-discord-config-secret
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: mautrix-discord-config
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/version: {{ .Chart.AppVersion }}
app.kubernetes.io/component: web
app.kubernetes.io/part-of: {{ .Release.Name }}
data:
config.yaml: |
{{ toYaml .Values.mautrixDiscord.config | indent 4 }}
{{- end }}

View File

@@ -1,17 +0,0 @@
{{- if .Values.serviceAccount.create -}}
apiVersion: v1
kind: ServiceAccount
metadata:
name: mautrix-discord
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: mautrix-discord
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/version: {{ .Chart.AppVersion }}
app.kubernetes.io/component: web
app.kubernetes.io/part-of: {{ .Release.Name }}
{{- with .Values.serviceAccount.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
{{- end -}}

View File

@@ -1,24 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: mautrix-discord
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: mautrix-discord
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/version: {{ .Chart.AppVersion }}
app.kubernetes.io/component: web
app.kubernetes.io/part-of: {{ .Release.Name }}
spec:
type: {{ .Values.service.type }}
clusterIP: {{ .Values.service.clusterIP | quote }}
externalTrafficPolicy: {{ .Values.service.externalTrafficPolicy }}
publishNotReadyAddresses: {{ .Values.service.publishNotReadyAddresses }}
ports:
- port: {{ .Values.service.port }}
targetPort: http
protocol: TCP
name: http
selector:
app.kubernetes.io/name: mautrix-discord
app.kubernetes.io/instance: {{ .Release.Name }}

View File

@@ -1,421 +0,0 @@
deployment:
replicas: 1
strategy: Recreate
image:
repository: dock.mau.dev/mautrix/discord
tag: v0.6.5
imagePullPolicy: IfNotPresent
env: {}
envFrom: []
resources:
limits:
memory: 128Mi
cpu: 200m
requests:
memory: 64Mi
cpu: 50m
probes:
liveness:
failureThreshold: 5
periodSeconds: 10
readiness:
failureThreshold: 5
periodSeconds: 10
nodeSelector: {}
tolerations: []
affinity: {}
serviceAccount:
create: true
annotations: {}
service:
type: ClusterIP
clusterIP: None
port: 29334
externalTrafficPolicy: ""
publishNotReadyAddresses: true
ingress:
enabled: false
className: ""
annotations: {}
host: ""
persistence:
enabled: false
existingClaim: ""
storageClass: ""
accessMode: ReadWriteOnce
size: 500Mi
# Reference the following for examples
# https://github.com/mautrix/discord/blob/main/example-config.yaml
mautrixDiscord:
# config.yml contents
existingSecret: ""
config:
# Homeserver details.
homeserver:
# The address that this appservice can use to connect to the homeserver.
address: https://matrix.example.com
# Publicly accessible base URL for media, used for avatars in relay mode.
# If not set, the connection address above will be used.
public_address: null
# The domain of the homeserver (also known as server_name, used for MXIDs, etc).
domain: example.com
# What software is the homeserver running?
# Standard Matrix homeservers like Synapse, Dendrite and Conduit should just use "standard" here.
software: standard
# The URL to push real-time bridge status to.
# If set, the bridge will make POST requests to this URL whenever a user's discord connection state changes.
# The bridge will use the appservice as_token to authorize requests.
status_endpoint: null
# Endpoint for reporting per-message status.
message_send_checkpoint_endpoint: null
# Does the homeserver support https://github.com/matrix-org/matrix-spec-proposals/pull/2246?
async_media: false
# Should the bridge use a websocket for connecting to the homeserver?
# The server side is currently not documented anywhere and is only implemented by mautrix-wsproxy,
# mautrix-asmux (deprecated), and hungryserv (proprietary).
websocket: false
# How often should the websocket be pinged? Pinging will be disabled if this is zero.
ping_interval_seconds: 0
# Application service host/registration related details.
# Changing these values requires regeneration of the registration.
appservice:
# The address that the homeserver can use to connect to this appservice.
address: http://localhost:29334
# The hostname and port where this appservice should listen.
hostname: 0.0.0.0
port: 29334
# Database config.
database:
# The database type. "sqlite3-fk-wal" and "postgres" are supported.
type: postgres
# The database URI.
# SQLite: A raw file path is supported, but `file:<path>?_txlock=immediate` is recommended.
# https://github.com/mattn/go-sqlite3#connection-string
# Postgres: Connection string. For example, postgres://user:password@host/database?sslmode=disable
# To connect via Unix socket, use something like postgres:///dbname?host=/var/run/postgresql
uri: postgres://user:password@host/database?sslmode=disable
# Maximum number of connections. Mostly relevant for Postgres.
max_open_conns: 20
max_idle_conns: 2
# Maximum connection idle time and lifetime before they're closed. Disabled if null.
# Parsed with https://pkg.go.dev/time#ParseDuration
max_conn_idle_time: null
max_conn_lifetime: null
# The unique ID of this appservice.
id: discord
# Appservice bot details.
bot:
# Username of the appservice bot.
username: discordbot
# Display name and avatar for bot. Set to "remove" to remove display name/avatar, leave empty
# to leave display name/avatar as-is.
displayname: Discord bridge bot
avatar: mxc://maunium.net/nIdEykemnwdisvHbpxflpDlC
# Whether or not to receive ephemeral events via appservice transactions.
# Requires MSC2409 support (i.e. Synapse 1.22+).
ephemeral_events: true
# Should incoming events be handled asynchronously?
# This may be necessary for large public instances with lots of messages going through.
# However, messages will not be guaranteed to be bridged in the same order they were sent in.
async_transactions: false
# Authentication tokens for AS <-> HS communication. Autogenerated; do not modify.
as_token: "This value is generated when generating the registration"
hs_token: "This value is generated when generating the registration"
# Bridge config
bridge:
# Localpart template of MXIDs for Discord users.
# {{.}} is replaced with the internal ID of the Discord user.
username_template: discord_{{.}}
# Displayname template for Discord users. This is also used as the room name in DMs if private_chat_portal_meta is enabled.
# Available variables:
# .ID - Internal user ID
# .Username - Legacy display/username on Discord
# .GlobalName - New displayname on Discord
# .Discriminator - The 4 numbers after the name on Discord
# .Bot - Whether the user is a bot
# .System - Whether the user is an official system user
# .Webhook - Whether the user is a webhook and is not an application
# .Application - Whether the user is an application
displayname_template: '{{or .GlobalName .Username}}{{if .Bot}} (bot){{end}}'
# Displayname template for Discord channels (bridged as rooms, or spaces when type=4).
# Available variables:
# .Name - Channel name, or user displayname (pre-formatted with displayname_template) in DMs.
# .ParentName - Parent channel name (used for categories).
# .GuildName - Guild name.
# .NSFW - Whether the channel is marked as NSFW.
# .Type - Channel type (see values at https://github.com/bwmarrin/discordgo/blob/v0.25.0/structs.go#L251-L267)
channel_name_template: '{{if or (eq .Type 3) (eq .Type 4)}}{{.Name}}{{else}}#{{.Name}}{{end}}'
# Displayname template for Discord guilds (bridged as spaces).
# Available variables:
# .Name - Guild name
guild_name_template: '{{.Name}}'
# Whether to explicitly set the avatar and room name for private chat portal rooms.
# If set to `default`, this will be enabled in encrypted rooms and disabled in unencrypted rooms.
# If set to `always`, all DM rooms will have explicit names and avatars set.
# If set to `never`, DM rooms will never have names and avatars set.
private_chat_portal_meta: default
portal_message_buffer: 128
# Number of private channel portals to create on bridge startup.
# Other portals will be created when receiving messages.
startup_private_channel_create_limit: 5
# Should the bridge send a read receipt from the bridge bot when a message has been sent to Discord?
delivery_receipts: false
# Whether the bridge should send the message status as a custom com.beeper.message_send_status event.
message_status_events: false
# Whether the bridge should send error notices via m.notice events when a message fails to bridge.
message_error_notices: true
# Should the bridge use space-restricted join rules instead of invite-only for guild rooms?
# This can avoid unnecessary invite events in guild rooms when members are synced in.
restricted_rooms: true
# Should the bridge automatically join the user to threads on Discord when the thread is opened on Matrix?
# This only works with clients that support thread read receipts (MSC3771 added in Matrix v1.4).
autojoin_thread_on_open: true
# Should inline fields in Discord embeds be bridged as HTML tables to Matrix?
# Tables aren't supported in all clients, but are the only way to emulate the Discord inline field UI.
embed_fields_as_tables: true
# Should guild channels be muted when the portal is created? This only meant for single-user instances,
# it won't mute it for all users if there are multiple Matrix users in the same Discord guild.
mute_channels_on_create: false
# Should the bridge update the m.direct account data event when double puppeting is enabled.
# Note that updating the m.direct event is not atomic (except with mautrix-asmux)
# and is therefore prone to race conditions.
sync_direct_chat_list: false
# Set this to true to tell the bridge to re-send m.bridge events to all rooms on the next run.
# This field will automatically be changed back to false after it, except if the config file is not writable.
resend_bridge_info: false
# Should incoming custom emoji reactions be bridged as mxc:// URIs?
# If set to false, custom emoji reactions will be bridged as the shortcode instead, and the image won't be available.
custom_emoji_reactions: true
# Should the bridge attempt to completely delete portal rooms when a channel is deleted on Discord?
# If true, the bridge will try to kick Matrix users from the room. Otherwise, the bridge only makes ghosts leave.
delete_portal_on_channel_delete: false
# Should the bridge delete all portal rooms when you leave a guild on Discord?
# This only applies if the guild has no other Matrix users on this bridge instance.
delete_guild_on_leave: true
# Whether or not created rooms should have federation enabled.
# If false, created portal rooms will never be federated.
federate_rooms: true
# Prefix messages from webhooks with the profile info? This can be used along with a custom displayname_template
# to better handle webhooks that change their name all the time (like ones used by bridges).
prefix_webhook_messages: false
# Bridge webhook avatars?
enable_webhook_avatars: true
# Should the bridge upload media to the Discord CDN directly before sending the message when using a user token,
# like the official client does? The other option is sending the media in the message send request as a form part
# (which is always used by bots and webhooks).
use_discord_cdn_upload: true
# Should mxc uris copied from Discord be cached?
# This can be `never` to never cache, `unencrypted` to only cache unencrypted mxc uris, or `always` to cache everything.
# If you have a media repo that generates non-unique mxc uris, you should set this to never.
cache_media: unencrypted
# Settings for converting Discord media to custom mxc:// URIs instead of reuploading.
# More details can be found at https://docs.mau.fi/bridges/go/discord/direct-media.html
direct_media:
# Should custom mxc:// URIs be used instead of reuploading media?
enabled: false
# The server name to use for the custom mxc:// URIs.
# This server name will effectively be a real Matrix server, it just won't implement anything other than media.
# You must either set up .well-known delegation from this domain to the bridge, or proxy the domain directly to the bridge.
server_name: discord-media.example.com
# Optionally a custom .well-known response. This defaults to `server_name:443`
well_known_response:
# The bridge supports MSC3860 media download redirects and will use them if the requester supports it.
# Optionally, you can force redirects and not allow proxying at all by setting this to false.
allow_proxy: true
# Matrix server signing key to make the federation tester pass, same format as synapse's .signing.key file.
# This key is also used to sign the mxc:// URIs to ensure only the bridge can generate them.
server_key: generate
# Settings for converting animated stickers.
animated_sticker:
# Format to which animated stickers should be converted.
# disable - No conversion, send as-is (lottie JSON)
# png - converts to non-animated png (fastest)
# gif - converts to animated gif
# webm - converts to webm video, requires ffmpeg executable with vp9 codec and webm container support
# webp - converts to animated webp, requires ffmpeg executable with webp codec/container support
target: webp
# Arguments for converter. All converters take width and height.
args:
width: 320
height: 320
fps: 25 # only for webm, webp and gif (2, 5, 10, 20 or 25 recommended)
# Servers to always allow double puppeting from
double_puppet_server_map:
example.com: https://example.com
# Allow using double puppeting from any server with a valid client .well-known file.
double_puppet_allow_discovery: false
# Shared secrets for https://github.com/devture/matrix-synapse-shared-secret-auth
#
# If set, double puppeting will be enabled automatically for local users
# instead of users having to find an access token and run `login-matrix`
# manually.
login_shared_secret_map:
example.com: foobar
# The prefix for commands. Only required in non-management rooms.
command_prefix: '!discord'
# Messages sent upon joining a management room.
# Markdown is supported. The defaults are listed below.
management_room_text:
# Sent when joining a room.
welcome: "Hello, I'm a Discord bridge bot."
# Sent when joining a management room and the user is already logged in.
welcome_connected: "Use `help` for help."
# Sent when joining a management room and the user is not logged in.
welcome_unconnected: "Use `help` for help or `login` to log in."
# Optional extra text sent when joining a management room.
additional_help: ""
# Settings for backfilling messages.
backfill:
# Limits for forward backfilling.
forward_limits:
# Initial backfill (when creating portal). 0 means backfill is disabled.
# A special unlimited value is not supported, you must set a limit. Initial backfill will
# fetch all messages first before backfilling anything, so high limits can take a lot of time.
initial:
dm: 0
channel: 0
thread: 0
# Missed message backfill (on startup).
# 0 means backfill is disabled, -1 means fetch all messages since last bridged message.
# When using unlimited backfill (-1), messages are backfilled as they are fetched.
# With limits, all messages up to the limit are fetched first and backfilled afterwards.
missed:
dm: 0
channel: 0
thread: 0
# Maximum members in a guild to enable backfilling. Set to -1 to disable limit.
# This can be used as a rough heuristic to disable backfilling in channels that are too active.
# Currently only applies to missed message backfill.
max_guild_members: -1
# End-to-bridge encryption support options.
#
# See https://docs.mau.fi/bridges/general/end-to-bridge-encryption.html for more info.
encryption:
# Allow encryption, work in group chat rooms with e2ee enabled
allow: false
# Default to encryption, force-enable encryption in all portals the bridge creates
# This will cause the bridge bot to be in private chats for the encryption to work properly.
default: false
# Whether to use MSC2409/MSC3202 instead of /sync long polling for receiving encryption-related data.
appservice: false
# Require encryption, drop any unencrypted messages.
require: false
# Enable key sharing? If enabled, key requests for rooms where users are in will be fulfilled.
# You must use a client that supports requesting keys from other users to use this feature.
allow_key_sharing: false
# Should users mentions be in the event wire content to enable the server to send push notifications?
plaintext_mentions: false
# Options for deleting megolm sessions from the bridge.
delete_keys:
# Beeper-specific: delete outbound sessions when hungryserv confirms
# that the user has uploaded the key to key backup.
delete_outbound_on_ack: false
# Don't store outbound sessions in the inbound table.
dont_store_outbound: false
# Ratchet megolm sessions forward after decrypting messages.
ratchet_on_decrypt: false
# Delete fully used keys (index >= max_messages) after decrypting messages.
delete_fully_used_on_decrypt: false
# Delete previous megolm sessions from same device when receiving a new one.
delete_prev_on_new_session: false
# Delete megolm sessions received from a device when the device is deleted.
delete_on_device_delete: false
# Periodically delete megolm sessions when 2x max_age has passed since receiving the session.
periodically_delete_expired: false
# Delete inbound megolm sessions that don't have the received_at field used for
# automatic ratcheting and expired session deletion. This is meant as a migration
# to delete old keys prior to the bridge update.
delete_outdated_inbound: false
# What level of device verification should be required from users?
#
# Valid levels:
# unverified - Send keys to all device in the room.
# cross-signed-untrusted - Require valid cross-signing, but trust all cross-signing keys.
# cross-signed-tofu - Require valid cross-signing, trust cross-signing keys on first use (and reject changes).
# cross-signed-verified - Require valid cross-signing, plus a valid user signature from the bridge bot.
# Note that creating user signatures from the bridge bot is not currently possible.
# verified - Require manual per-device verification
# (currently only possible by modifying the `trust` column in the `crypto_device` database table).
verification_levels:
# Minimum level for which the bridge should send keys to when bridging messages from WhatsApp to Matrix.
receive: unverified
# Minimum level that the bridge should accept for incoming Matrix messages.
send: unverified
# Minimum level that the bridge should require for accepting key requests.
share: cross-signed-tofu
# Options for Megolm room key rotation. These options allow you to
# configure the m.room.encryption event content. See:
# https://spec.matrix.org/v1.3/client-server-api/#mroomencryption for
# more information about that event.
rotation:
# Enable custom Megolm room key rotation settings. Note that these
# settings will only apply to rooms created after this option is
# set.
enable_custom: false
# The maximum number of milliseconds a session should be used
# before changing it. The Matrix spec recommends 604800000 (a week)
# as the default.
milliseconds: 604800000
# The maximum number of messages that should be sent with a given a
# session before changing it. The Matrix spec recommends 100 as the
# default.
messages: 100
# Disable rotating keys when a user's devices change?
# You should not enable this option unless you understand all the implications.
disable_device_change_key_rotation: false
# Settings for provisioning API
provisioning:
# Prefix for the provisioning API paths.
prefix: /_matrix/provision
# Shared secret for authentication. If set to "generate", a random secret will be generated,
# or if set to "disable", the provisioning API will be disabled.
shared_secret: generate
# Enable debug API at /debug with provisioning authentication.
debug_endpoints: false
# Permissions for using the bridge.
# Permitted values:
# relay - Talk through the relaybot (if enabled), no access otherwise
# user - Access to use the bridge to chat with a Discord account.
# admin - User level and some additional administration tools
# Permitted keys:
# * - All Matrix users
# domain - All users on that homeserver
# mxid - Specific user
permissions:
"*": relay
"example.com": user
"@admin:example.com": admin
# Logging config. See https://github.com/tulir/zeroconfig for details.
logging:
min_level: debug
writers:
- type: stdout
format: pretty-colored
- type: file
format: json
filename: ./logs/mautrix-discord.log
max_size: 100
max_backups: 10
compress: true

View File

@@ -1,15 +0,0 @@
apiVersion: v2
name: mautrix-whatsapp
version: 0.1.1
description: Chart for Matrix Whatsapp Bridge
keywords:
- matrix
- mautrix-whatsapp
- bridge
- whatsapp
sources:
- https://github.com/mautrix/whatsapp
maintainers:
- name: alexlebens
icon: https://avatars.githubusercontent.com/u/88519669?s=48&v=4
appVersion: v0.10.7

View File

@@ -1,10 +0,0 @@
{{/*
Helper for secret name
*/}}
{{- define "mautrix-whatsapp.secretName" -}}
{{- if .Values.mautrixWhatsapp.existingSecret }}
{{- printf "%s" .Values.mautrixWhatsapp.existingSecret -}}
{{- else }}
{{- printf "mautrix-whatsapp-config-secret" | trunc 63 | trimSuffix "-" }}
{{- end }}
{{- end }}

View File

@@ -1,89 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: mautrix-whatsapp
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: mautrix-whatsapp
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/version: {{ .Chart.AppVersion }}
app.kubernetes.io/component: web
app.kubernetes.io/part-of: {{ .Release.Name }}
spec:
revisionHistoryLimit: 3
replicas: {{ .Values.deployment.replicas }}
strategy:
type: {{ .Values.deployment.strategy }}
selector:
matchLabels:
app.kubernetes.io/name: mautrix-whatsapp
app.kubernetes.io/instance: {{ .Release.Name }}
template:
metadata:
labels:
app.kubernetes.io/name: mautrix-whatsapp
app.kubernetes.io/instance: {{ .Release.Name }}
spec:
serviceAccountName: mautrix-whatsapp
automountServiceAccountToken: true
containers:
- name: mautrix-whatsapp
image: "{{ .Values.deployment.image.repository }}:{{ .Values.deployment.image.tag }}"
imagePullPolicy: {{ .Values.deployment.image.imagePullPolicy }}
ports:
- name: http
containerPort: {{ .Values.service.port }}
protocol: TCP
env:
{{- range $k,$v := .Values.deployment.env }}
- name: {{ $k }}
value: {{ $v | quote }}
{{- end }}
{{- with .Values.deployment.envFrom }}
envFrom:
{{- toYaml . | nindent 12 }}
{{- end }}
livenessProbe:
httpGet:
path: /_matrix/mau/live
port: http
failureThreshold: {{ .Values.deployment.probes.liveness.failureThreshold }}
periodSeconds: {{ .Values.deployment.probes.liveness.periodSeconds }}
readinessProbe:
httpGet:
path: /_matrix/mau/ready
port: http
failureThreshold: {{ .Values.deployment.probes.readiness.failureThreshold }}
periodSeconds: {{ .Values.deployment.probes.readiness.periodSeconds }}
resources:
{{- toYaml .Values.deployment.resources | nindent 12 }}
volumeMounts:
- name: config
mountPath: /data/config.yaml
subPath: config.yaml
readOnly: true
- name: data
mountPath: /data
volumes:
- name: config
secret:
secretName: {{ template "mautrix-whatsapp.secretName" . }}
- name: data
{{- if .Values.persistence.enabled }}
persistentVolumeClaim:
claimName: {{ .Values.persistence.existingClaim | default "mautrix-whatsapp-data" }}
{{- else }}
emptyDir: {}
{{- end }}
{{- with .Values.deployment.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.deployment.affinity }}
affinity:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.deployment.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}

View File

@@ -1,32 +0,0 @@
{{- if .Values.ingress.enabled }}
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: mautrix-whatsapp
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: mautrix-whatsapp
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/version: {{ .Chart.AppVersion }}
app.kubernetes.io/component: web
app.kubernetes.io/part-of: {{ .Release.Name }}
annotations:
{{- toYaml .Values.ingress.annotations | nindent 4 }}
spec:
ingressClassName: {{ .Values.ingress.className }}
tls:
- hosts:
- {{ .Values.ingress.host }}
secretName: {{ .Release.Name }}-secret-tls
rules:
- host: {{ .Values.ingress.host }}
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: mautrix-whatsapp
port:
name: http
{{- end }}

View File

@@ -1,26 +0,0 @@
{{- if and .Values.persistence.enabled (not .Values.persistence.existingClaim) }}
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: mautrix-whatsapp-data
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: mautrix-whatsapp
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/version: {{ .Chart.AppVersion }}
app.kubernetes.io/component: web
app.kubernetes.io/part-of: {{ .Release.Name }}
spec:
accessModes:
- {{ .Values.persistence.accessMode | quote }}
resources:
requests:
storage: {{ .Values.persistence.size | quote }}
{{- if .Values.persistence.storageClass }}
{{- if not .Values.persistence.storageClass }}
storageClassName: ""
{{- else }}
storageClassName: {{ .Values.persistence.storageClass | quote }}
{{- end }}
{{- end }}
{{- end }}

View File

@@ -1,16 +0,0 @@
{{- if not .Values.mautrixWhatsapp.existingSecret }}
apiVersion: v1
kind: Secret
metadata:
name: mautrix-whatsapp-config-secret
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: mautrix-whatsapp-config
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/version: {{ .Chart.AppVersion }}
app.kubernetes.io/component: web
app.kubernetes.io/part-of: {{ .Release.Name }}
data:
config.yaml: |
{{ toYaml .Values.mautrixWhatsapp.config | indent 4 }}
{{- end }}

View File

@@ -1,17 +0,0 @@
{{- if .Values.serviceAccount.create -}}
apiVersion: v1
kind: ServiceAccount
metadata:
name: mautrix-whatsapp
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: mautrix-whatsapp
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/version: {{ .Chart.AppVersion }}
app.kubernetes.io/component: web
app.kubernetes.io/part-of: {{ .Release.Name }}
{{- with .Values.serviceAccount.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
{{- end -}}

View File

@@ -1,24 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: mautrix-whatsapp
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: mautrix-whatsapp
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/version: {{ .Chart.AppVersion }}
app.kubernetes.io/component: web
app.kubernetes.io/part-of: {{ .Release.Name }}
spec:
type: {{ .Values.service.type }}
clusterIP: {{ .Values.service.clusterIP | quote }}
externalTrafficPolicy: {{ .Values.service.externalTrafficPolicy }}
publishNotReadyAddresses: {{ .Values.service.publishNotReadyAddresses }}
ports:
- port: {{ .Values.service.port }}
targetPort: http
protocol: TCP
name: http
selector:
app.kubernetes.io/name: mautrix-whatsapp
app.kubernetes.io/instance: {{ .Release.Name }}

View File

@@ -1,526 +0,0 @@
deployment:
replicas: 1
strategy: Recreate
image:
repository: dock.mau.dev/mautrix/whatsapp
tag: v0.10.7
imagePullPolicy: IfNotPresent
env: {}
envFrom: []
resources:
limits:
memory: 128Mi
cpu: 200m
requests:
memory: 64Mi
cpu: 50m
probes:
liveness:
failureThreshold: 5
periodSeconds: 10
readiness:
failureThreshold: 5
periodSeconds: 10
nodeSelector: {}
tolerations: []
affinity: {}
serviceAccount:
create: true
annotations: {}
service:
type: ClusterIP
clusterIP: None
port: 29334
externalTrafficPolicy: ""
publishNotReadyAddresses: true
ingress:
enabled: false
className: ""
annotations: {}
host: ""
persistence:
enabled: false
existingClaim: ""
storageClass: ""
accessMode: ReadWriteOnce
size: 500Mi
# Reference the following for examples
# https://github.com/mautrix/whatsapp/blob/main/example-config.yaml
mautrixWhatsapp:
# config.yml contents
existingSecret: ""
config:
# Homeserver details.
homeserver:
# The address that this appservice can use to connect to the homeserver.
address: https://matrix.example.com
# The domain of the homeserver (also known as server_name, used for MXIDs, etc).
domain: example.com
# What software is the homeserver running?
# Standard Matrix homeservers like Synapse, Dendrite and Conduit should just use "standard" here.
software: standard
# The URL to push real-time bridge status to.
# If set, the bridge will make POST requests to this URL whenever a user's whatsapp connection state changes.
# The bridge will use the appservice as_token to authorize requests.
status_endpoint: null
# Endpoint for reporting per-message status.
message_send_checkpoint_endpoint: null
# Does the homeserver support https://github.com/matrix-org/matrix-spec-proposals/pull/2246?
async_media: false
# Should the bridge use a websocket for connecting to the homeserver?
# The server side is currently not documented anywhere and is only implemented by mautrix-wsproxy,
# mautrix-asmux (deprecated), and hungryserv (proprietary).
websocket: false
# How often should the websocket be pinged? Pinging will be disabled if this is zero.
ping_interval_seconds: 0
# Application service host/registration related details.
# Changing these values requires regeneration of the registration.
appservice:
# The address that the homeserver can use to connect to this appservice.
address: http://localhost:29318
# The hostname and port where this appservice should listen.
hostname: 0.0.0.0
port: 29318
# Database config.
database:
# The database type. "sqlite3-fk-wal" and "postgres" are supported.
type: postgres
# The database URI.
# SQLite: A raw file path is supported, but `file:<path>?_txlock=immediate` is recommended.
# https://github.com/mattn/go-sqlite3#connection-string
# Postgres: Connection string. For example, postgres://user:password@host/database?sslmode=disable
# To connect via Unix socket, use something like postgres:///dbname?host=/var/run/postgresql
uri: postgres://user:password@host/database?sslmode=disable
# Maximum number of connections. Mostly relevant for Postgres.
max_open_conns: 20
max_idle_conns: 2
# Maximum connection idle time and lifetime before they're closed. Disabled if null.
# Parsed with https://pkg.go.dev/time#ParseDuration
max_conn_idle_time: null
max_conn_lifetime: null
# The unique ID of this appservice.
id: whatsapp
# Appservice bot details.
bot:
# Username of the appservice bot.
username: whatsappbot
# Display name and avatar for bot. Set to "remove" to remove display name/avatar, leave empty
# to leave display name/avatar as-is.
displayname: WhatsApp bridge bot
avatar: mxc://maunium.net/NeXNQarUbrlYBiPCpprYsRqr
# Whether or not to receive ephemeral events via appservice transactions.
# Requires MSC2409 support (i.e. Synapse 1.22+).
ephemeral_events: true
# Should incoming events be handled asynchronously?
# This may be necessary for large public instances with lots of messages going through.
# However, messages will not be guaranteed to be bridged in the same order they were sent in.
async_transactions: false
# Authentication tokens for AS <-> HS communication. Autogenerated; do not modify.
as_token: "This value is generated when generating the registration"
hs_token: "This value is generated when generating the registration"
# Segment-compatible analytics endpoint for tracking some events, like provisioning API login and encryption errors.
analytics:
# Hostname of the tracking server. The path is hardcoded to /v1/track
host: api.segment.io
# API key to send with tracking requests. Tracking is disabled if this is null.
token: null
# Optional user ID for tracking events. If null, defaults to using Matrix user ID.
user_id: null
# Prometheus config.
metrics:
# Enable prometheus metrics?
enabled: false
# IP and port where the metrics listener should be. The path is always /metrics
listen: 127.0.0.1:8001
# Config for things that are directly sent to WhatsApp.
whatsapp:
# Device name that's shown in the "WhatsApp Web" section in the mobile app.
os_name: Mautrix-WhatsApp bridge
# Browser name that determines the logo shown in the mobile app.
# Must be "unknown" for a generic icon or a valid browser name if you want a specific icon.
# List of valid browser names: https://github.com/tulir/whatsmeow/blob/efc632c008604016ddde63bfcfca8de4e5304da9/binary/proto/def.proto#L43-L64
browser_name: unknown
# Bridge config
bridge:
# Localpart template of MXIDs for WhatsApp users.
# {{.}} is replaced with the phone number of the WhatsApp user.
username_template: whatsapp_{{.}}
# Displayname template for WhatsApp users.
# {{.PushName}} - nickname set by the WhatsApp user
# {{.BusinessName}} - validated WhatsApp business name
# {{.Phone}} - phone number (international format)
# The following variables are also available, but will cause problems on multi-user instances:
# {{.FullName}} - full name from contact list
# {{.FirstName}} - first name from contact list
displayname_template: "{{or .BusinessName .PushName .JID}} (WA)"
# Should the bridge create a space for each logged-in user and add bridged rooms to it?
# Users who logged in before turning this on should run `!wa sync space` to create and fill the space for the first time.
personal_filtering_spaces: false
# Should the bridge send a read receipt from the bridge bot when a message has been sent to WhatsApp?
delivery_receipts: false
# Whether the bridge should send the message status as a custom com.beeper.message_send_status event.
message_status_events: false
# Whether the bridge should send error notices via m.notice events when a message fails to bridge.
message_error_notices: true
# Should incoming calls send a message to the Matrix room?
call_start_notices: true
# Should another user's cryptographic identity changing send a message to Matrix?
identity_change_notices: false
portal_message_buffer: 128
# Settings for handling history sync payloads.
history_sync:
# Enable backfilling history sync payloads from WhatsApp?
backfill: true
# The maximum number of initial conversations that should be synced.
# Other conversations will be backfilled on demand when receiving a message or when initiating a direct chat.
max_initial_conversations: -1
# Maximum number of messages to backfill in each conversation.
# Set to -1 to disable limit.
message_count: 50
# Should the bridge request a full sync from the phone when logging in?
# This bumps the size of history syncs from 3 months to 1 year.
request_full_sync: false
# Configuration parameters that are sent to the phone along with the request full sync flag.
# By default (when the values are null or 0), the config isn't sent at all.
full_sync_config:
# Number of days of history to request.
# The limit seems to be around 3 years, but using higher values doesn't break.
days_limit: null
# This is presumably the maximum size of the transferred history sync blob, which may affect what the phone includes in the blob.
size_mb_limit: null
# This is presumably the local storage quota, which may affect what the phone includes in the history sync blob.
storage_quota_mb: null
# If this value is greater than 0, then if the conversation's last message was more than
# this number of hours ago, then the conversation will automatically be marked it as read.
# Conversations that have a last message that is less than this number of hours ago will
# have their unread status synced from WhatsApp.
unread_hours_threshold: 0
###############################################################################
# The settings below are only applicable for backfilling using batch sending, #
# which is no longer supported in Synapse. #
###############################################################################
# Settings for media requests. If the media expired, then it will not be on the WA servers.
# Media can always be requested by reacting with the ♻️ (recycle) emoji.
# These settings determine if the media requests should be done automatically during or after backfill.
media_requests:
# Should expired media be automatically requested from the server as part of the backfill process?
auto_request_media: true
# Whether to request the media immediately after the media message is backfilled ("immediate")
# or at a specific time of the day ("local_time").
request_method: immediate
# If request_method is "local_time", what time should the requests be sent (in minutes after midnight)?
request_local_time: 120
# Settings for immediate backfills. These backfills should generally be small and their main purpose is
# to populate each of the initial chats (as configured by max_initial_conversations) with a few messages
# so that you can continue conversations without losing context.
immediate:
# The number of concurrent backfill workers to create for immediate backfills.
# Note that using more than one worker could cause the room list to jump around
# since there are no guarantees about the order in which the backfills will complete.
worker_count: 1
# The maximum number of events to backfill initially.
max_events: 10
# Settings for deferred backfills. The purpose of these backfills are to fill in the rest of
# the chat history that was not covered by the immediate backfills.
# These backfills generally should happen at a slower pace so as not to overload the homeserver.
# Each deferred backfill config should define a "stage" of backfill (i.e. the last week of messages).
# The fields are as follows:
# - start_days_ago: the number of days ago to start backfilling from.
# To indicate the start of time, use -1. For example, for a week ago, use 7.
# - max_batch_events: the number of events to send per batch.
# - batch_delay: the number of seconds to wait before backfilling each batch.
deferred:
# Last Week
- start_days_ago: 7
max_batch_events: 20
batch_delay: 5
# Last Month
- start_days_ago: 30
max_batch_events: 50
batch_delay: 10
# Last 3 months
- start_days_ago: 90
max_batch_events: 100
batch_delay: 10
# The start of time
- start_days_ago: -1
max_batch_events: 500
batch_delay: 10
# Should puppet avatars be fetched from the server even if an avatar is already set?
user_avatar_sync: true
# Should Matrix users leaving groups be bridged to WhatsApp?
bridge_matrix_leave: true
# Should the bridge update the m.direct account data event when double puppeting is enabled.
# Note that updating the m.direct event is not atomic (except with mautrix-asmux)
# and is therefore prone to race conditions.
sync_direct_chat_list: false
# Should the bridge use MSC2867 to bridge manual "mark as unread"s from
# WhatsApp and set the unread status on initial backfill?
# This will only work on clients that support the m.marked_unread or
# com.famedly.marked_unread room account data.
sync_manual_marked_unread: true
# When double puppeting is enabled, users can use `!wa toggle` to change whether
# presence is bridged. This setting sets the default value.
# Existing users won't be affected when these are changed.
default_bridge_presence: true
# Send the presence as "available" to whatsapp when users start typing on a portal.
# This works as a workaround for homeservers that do not support presence, and allows
# users to see when the whatsapp user on the other side is typing during a conversation.
send_presence_on_typing: false
# Should the bridge always send "active" delivery receipts (two gray ticks on WhatsApp)
# even if the user isn't marked as online (e.g. when presence bridging isn't enabled)?
#
# By default, the bridge acts like WhatsApp web, which only sends active delivery
# receipts when it's in the foreground.
force_active_delivery_receipts: false
# Servers to always allow double puppeting from
double_puppet_server_map:
example.com: https://example.com
# Allow using double puppeting from any server with a valid client .well-known file.
double_puppet_allow_discovery: false
# Shared secrets for https://github.com/devture/matrix-synapse-shared-secret-auth
#
# If set, double puppeting will be enabled automatically for local users
# instead of users having to find an access token and run `login-matrix`
# manually.
login_shared_secret_map:
example.com: foobar
# Whether to explicitly set the avatar and room name for private chat portal rooms.
# If set to `default`, this will be enabled in encrypted rooms and disabled in unencrypted rooms.
# If set to `always`, all DM rooms will have explicit names and avatars set.
# If set to `never`, DM rooms will never have names and avatars set.
private_chat_portal_meta: default
# Should group members be synced in parallel? This makes member sync faster
parallel_member_sync: false
# Should Matrix m.notice-type messages be bridged?
bridge_notices: true
# Set this to true to tell the bridge to re-send m.bridge events to all rooms on the next run.
# This field will automatically be changed back to false after it, except if the config file is not writable.
resend_bridge_info: false
# When using double puppeting, should muted chats be muted in Matrix?
mute_bridging: false
# When using double puppeting, should archived chats be moved to a specific tag in Matrix?
# Note that WhatsApp unarchives chats when a message is received, which will also be mirrored to Matrix.
# This can be set to a tag (e.g. m.lowpriority), or null to disable.
archive_tag: null
# Same as above, but for pinned chats. The favorite tag is called m.favourite
pinned_tag: null
# Should mute status and tags only be bridged when the portal room is created?
tag_only_on_create: true
# Should WhatsApp status messages be bridged into a Matrix room?
# Disabling this won't affect already created status broadcast rooms.
enable_status_broadcast: true
# Should sending WhatsApp status messages be allowed?
# This can cause issues if the user has lots of contacts, so it's disabled by default.
disable_status_broadcast_send: true
# Should the status broadcast room be muted and moved into low priority by default?
# This is only applied when creating the room, the user can unmute it later.
mute_status_broadcast: true
# Tag to apply to the status broadcast room.
status_broadcast_tag: m.lowpriority
# Should the bridge use thumbnails from WhatsApp?
# They're disabled by default due to very low resolution.
whatsapp_thumbnail: false
# Allow invite permission for user. User can invite any bots to room with whatsapp
# users (private chat and groups)
allow_user_invite: false
# Whether or not created rooms should have federation enabled.
# If false, created portal rooms will never be federated.
federate_rooms: true
# Should the bridge never send alerts to the bridge management room?
# These are mostly things like the user being logged out.
disable_bridge_alerts: false
# Should the bridge stop if the WhatsApp server says another user connected with the same session?
# This is only safe on single-user bridges.
crash_on_stream_replaced: false
# Should the bridge detect URLs in outgoing messages, ask the homeserver to generate a preview,
# and send it to WhatsApp? URL previews can always be sent using the `com.beeper.linkpreviews`
# key in the event content even if this is disabled.
url_previews: false
# Send captions in the same message as images. This will send data compatible with both MSC2530 and MSC3552.
# This is currently not supported in most clients.
caption_in_message: false
# Send galleries as a single event? This is not an MSC (yet).
beeper_galleries: false
# Should polls be sent using MSC3381 event types?
extev_polls: false
# Should cross-chat replies from WhatsApp be bridged? Most servers and clients don't support this.
cross_room_replies: false
# Disable generating reply fallbacks? Some extremely bad clients still rely on them,
# but they're being phased out and will be completely removed in the future.
disable_reply_fallbacks: false
# Maximum time for handling Matrix events. Duration strings formatted for https://pkg.go.dev/time#ParseDuration
# Null means there's no enforced timeout.
message_handling_timeout:
# Send an error message after this timeout, but keep waiting for the response until the deadline.
# This is counted from the origin_server_ts, so the warning time is consistent regardless of the source of delay.
# If the message is older than this when it reaches the bridge, the message won't be handled at all.
error_after: null
# Drop messages after this timeout. They may still go through if the message got sent to the servers.
# This is counted from the time the bridge starts handling the message.
deadline: 120s
# The prefix for commands. Only required in non-management rooms.
command_prefix: "!wa"
# Messages sent upon joining a management room.
# Markdown is supported. The defaults are listed below.
management_room_text:
# Sent when joining a room.
welcome: "Hello, I'm a WhatsApp bridge bot."
# Sent when joining a management room and the user is already logged in.
welcome_connected: "Use `help` for help."
# Sent when joining a management room and the user is not logged in.
welcome_unconnected: "Use `help` for help or `login` to log in."
# Optional extra text sent when joining a management room.
additional_help: ""
# End-to-bridge encryption support options.
#
# See https://docs.mau.fi/bridges/general/end-to-bridge-encryption.html for more info.
encryption:
# Allow encryption, work in group chat rooms with e2ee enabled
allow: false
# Default to encryption, force-enable encryption in all portals the bridge creates
# This will cause the bridge bot to be in private chats for the encryption to work properly.
default: false
# Whether to use MSC2409/MSC3202 instead of /sync long polling for receiving encryption-related data.
appservice: false
# Require encryption, drop any unencrypted messages.
require: false
# Enable key sharing? If enabled, key requests for rooms where users are in will be fulfilled.
# You must use a client that supports requesting keys from other users to use this feature.
allow_key_sharing: false
# Should users mentions be in the event wire content to enable the server to send push notifications?
plaintext_mentions: false
# Options for deleting megolm sessions from the bridge.
delete_keys:
# Beeper-specific: delete outbound sessions when hungryserv confirms
# that the user has uploaded the key to key backup.
delete_outbound_on_ack: false
# Don't store outbound sessions in the inbound table.
dont_store_outbound: false
# Ratchet megolm sessions forward after decrypting messages.
ratchet_on_decrypt: false
# Delete fully used keys (index >= max_messages) after decrypting messages.
delete_fully_used_on_decrypt: false
# Delete previous megolm sessions from same device when receiving a new one.
delete_prev_on_new_session: false
# Delete megolm sessions received from a device when the device is deleted.
delete_on_device_delete: false
# Periodically delete megolm sessions when 2x max_age has passed since receiving the session.
periodically_delete_expired: false
# Delete inbound megolm sessions that don't have the received_at field used for
# automatic ratcheting and expired session deletion. This is meant as a migration
# to delete old keys prior to the bridge update.
delete_outdated_inbound: false
# What level of device verification should be required from users?
#
# Valid levels:
# unverified - Send keys to all device in the room.
# cross-signed-untrusted - Require valid cross-signing, but trust all cross-signing keys.
# cross-signed-tofu - Require valid cross-signing, trust cross-signing keys on first use (and reject changes).
# cross-signed-verified - Require valid cross-signing, plus a valid user signature from the bridge bot.
# Note that creating user signatures from the bridge bot is not currently possible.
# verified - Require manual per-device verification
# (currently only possible by modifying the `trust` column in the `crypto_device` database table).
verification_levels:
# Minimum level for which the bridge should send keys to when bridging messages from WhatsApp to Matrix.
receive: unverified
# Minimum level that the bridge should accept for incoming Matrix messages.
send: unverified
# Minimum level that the bridge should require for accepting key requests.
share: cross-signed-tofu
# Options for Megolm room key rotation. These options allow you to
# configure the m.room.encryption event content. See:
# https://spec.matrix.org/v1.3/client-server-api/#mroomencryption for
# more information about that event.
rotation:
# Enable custom Megolm room key rotation settings. Note that these
# settings will only apply to rooms created after this option is
# set.
enable_custom: false
# The maximum number of milliseconds a session should be used
# before changing it. The Matrix spec recommends 604800000 (a week)
# as the default.
milliseconds: 604800000
# The maximum number of messages that should be sent with a given a
# session before changing it. The Matrix spec recommends 100 as the
# default.
messages: 100
# Disable rotating keys when a user's devices change?
# You should not enable this option unless you understand all the implications.
disable_device_change_key_rotation: false
# Settings for provisioning API
provisioning:
# Prefix for the provisioning API paths.
prefix: /_matrix/provision
# Shared secret for authentication. If set to "generate", a random secret will be generated,
# or if set to "disable", the provisioning API will be disabled.
shared_secret: generate
# Enable debug API at /debug with provisioning authentication.
debug_endpoints: false
# Permissions for using the bridge.
# Permitted values:
# relay - Talk through the relaybot (if enabled), no access otherwise
# user - Access to use the bridge to chat with a WhatsApp account.
# admin - User level and some additional administration tools
# Permitted keys:
# * - All Matrix users
# domain - All users on that homeserver
# mxid - Specific user
permissions:
"*": relay
"example.com": user
"@admin:example.com": admin
# Settings for relay mode
relay:
# Whether relay mode should be allowed. If allowed, `!wa set-relay` can be used to turn any
# authenticated user into a relaybot for that chat.
enabled: false
# Should only admins be allowed to set themselves as relay users?
admin_only: true
# The formats to use when sending messages to WhatsApp via the relaybot.
message_formats:
m.text: "<b>{{ .Sender.Displayname }}</b>: {{ .Message }}"
m.notice: "<b>{{ .Sender.Displayname }}</b>: {{ .Message }}"
m.emote: "* <b>{{ .Sender.Displayname }}</b> {{ .Message }}"
m.file: "<b>{{ .Sender.Displayname }}</b> sent a file"
m.image: "<b>{{ .Sender.Displayname }}</b> sent an image"
m.audio: "<b>{{ .Sender.Displayname }}</b> sent an audio file"
m.video: "<b>{{ .Sender.Displayname }}</b> sent a video"
m.location: "<b>{{ .Sender.Displayname }}</b> sent a location"
# Logging config. See https://github.com/tulir/zeroconfig for details.
logging:
min_level: debug
writers:
- type: stdout
format: pretty-colored
- type: file
format: json
filename: ./logs/mautrix-whatsapp.log
max_size: 100
max_backups: 10
compress: true

View File

@@ -1,15 +0,0 @@
apiVersion: v2
name: mysql-cluster
version: 0.2.1
description: Chart for a mysql cluster
keywords:
- database
- mysql
sources:
- https://dev.mysql.com/
- https://github.com/mysql/mysql-operator
- https://github.com/mysql/mysql-operator/tree/trunk/helm/mysql-innodbcluster
maintainers:
- name: alexlebens
icon: https://avatars.githubusercontent.com/u/2452804?s=48&v=4
appVersion: 8.4.0

View File

@@ -1,17 +0,0 @@
## Introduction
[MySQL Operator](https://dev.mysql.com/doc/mysql-operator/en/)
MySQL Operator for Kubernetes manages MySQL InnoDB Cluster setups inside a Kubernetes Cluster. MySQL Operator for Kubernetes manages the full lifecycle with setup and maintenance including automating upgrades and backups.
This chart bootstraps a [MySQL InnoDB](https://dev.mysql.com/doc/mysql-operator/en/mysql-operator-innodbcluster.html) cluster on a [Kubernetes](https://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager.
## Prerequisites
- Kubernetes
- Helm
- MySQL Operator
## Parameters
See the [values files](values.yaml).

View File

@@ -1,72 +0,0 @@
{{- define "cluster.backup" -}}
{{- if and .Values.backup.enabled .Values.backup.profiles }}
backupProfiles:
{{- $isDumpInstance := false }}
{{- $isSnapshot := false }}
{{- range $_, $profile := .Values.backup.profiles }}
- name: {{ $profile.name | quote }}
{{- if hasKey $profile "podAnnotations" }}
podAnnotations:
{{ toYaml $profile.podAnnotations | nindent 6 }}
{{- end }}
{{- if hasKey $profile "podLabels" }}
podLabels:
{{ toYaml $profile.podLabels | nindent 6 }}
{{- end }}
{{- $isDumpInstance = hasKey $profile "dumpInstance" }}
{{- $isSnapshot = hasKey $profile "snapshot" }}
{{- if or $isDumpInstance $isSnapshot }}
{{- $backupProfile := ternary $profile.dumpInstance $profile.snapshot $isDumpInstance }}
{{- if $isDumpInstance }}
dumpInstance:
{{- else if $isSnapshot }}
snapshot:
{{- else }}
{{- fail "Unsupported or unspecified backup type, must be either snapshot or dumpInstance" }}
{{ end }}
{{- if not (hasKey $backupProfile "storage") }}
{{- fail "backup profile $profile.name has no storage section" }}
{{- else if hasKey $backupProfile.storage "s3" }}
storage:
s3:
{{- if $backupProfile.storage.s3.prefix }}
prefix: {{ $backupProfile.storage.s3.prefix }}
{{- end }}
bucketName: {{ required "bucketName is required" $backupProfile.storage.s3.bucketName }}
config: {{ required "config is required" $backupProfile.storage.s3.config }}
{{- if $backupProfile.storage.s3.profile }}
profile: {{ $backupProfile.storage.s3.profile }}
{{- end }}
{{- if $backupProfile.storage.s3.endpoint }}
endpoint: {{ $backupProfile.storage.s3.endpoint }}
{{- end }}
{{- else if hasKey $backupProfile.storage "persistentVolumeClaim" }}
storage:
persistentVolumeClaim: {{ toYaml $backupProfile.storage.persistentVolumeClaim | nindent 12}}
{{- else -}}
{{- fail "Backup profile $profile.name has empty storage section - neither s3 nor persistentVolumeClaim defined" }}
{{- end -}}
{{- end }}
{{- end }}
{{- end }}
{{- if .Values.backup.schedules }}
backupSchedules:
{{- range $_, $schedule := .Values.backup.schedules }}
- name: {{ $schedule.name | quote }}
enabled: {{ $schedule.enabled }}
schedule: {{ quote $schedule.schedule }}
{{- if ($schedule).timeZone }}
timeZone: {{ quote $schedule.timeZone }}
{{- end }}
deleteBackupData: {{ $schedule.deleteBackupData }}
backupProfileName: {{ $schedule.backupProfileName }}
{{- end }}
{{- end }}
{{- end }}

View File

@@ -1,64 +0,0 @@
{{/*
Expand the name of the chart.
*/}}
{{- define "cluster.name" -}}
{{- if .Values.global.nameOverride }}
{{- .Values.global.nameOverride | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- printf "%s-mysql-%s" .Release.Name ((semver .Values.cluster.image.version).Major | toString) | trunc 63 | trimSuffix "-" -}}
{{- end }}
{{- end }}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "cluster.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Check for invalid versions
*/}}
{{- $minimalVersion := "8.0.27" }}
{{- $forbiddenVersions := list "8.0.29" }}
{{- $serverVersion := .Values.serverVersion | default .Chart.AppVersion }}
{{- if lt $serverVersion $minimalVersion }}
{{- $err := printf "It is not possible to use MySQL version %s . Please, use %s or above" $serverVersion $minimalVersion }}
{{- fail $err }}
{{- end }}
{{- if has $serverVersion $forbiddenVersions }}
{{- $err := printf "It is not possible to use MySQL version %s . Please, use %s or above except %v" $serverVersion $minimalVersion $forbiddenVersions }}
{{- fail $err }}
{{- end }}
{{/*
Common labels
*/}}
{{- define "cluster.labels" -}}
helm.sh/chart: {{ include "cluster.chart" . }}
{{ include "cluster.selectorLabels" . }}
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end }}
{{/*
Selector labels
*/}}
{{- define "cluster.selectorLabels" -}}
app.kubernetes.io/name: {{ include "cluster.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/part-of: cloudnative-pg
{{- end }}
{{/*
Create the name of the service account to use.
*/}}
{{- define "mysql.serviceAccountName" -}}
{{- if .Values.serviceAccount.enabled -}}
{{ default (include "cluster.name" .) .Values.serviceAccount.name }}
{{- else -}}
{{ default "default" .Values.serviceAccount.name }}
{{- end -}}
{{- end -}}

View File

@@ -1,47 +0,0 @@
{{- define "cluster.init" -}}
{{- if eq .Values.mode "clone" }}
{{- with .Values.clone }}
initDB:
clone:
donorUrl: {{ required "clone donorUrl is required" .donorUrl }}
rootUser: {{ .rootUser | default "root" }}
secretKeyRef:
name: {{ required "clone credentials is required" .exisitingCredentialsSecret }}
{{- end }}
{{- end }}
{{- if eq .Values.mode "recovery" }}
{{- with .Values.recovery }}
initDB:
dump:
{{- if .name }}
name: {{ .name | quote }}
{{- end }}
{{- if .path }}
path: {{ .path | quote }}
{{- end }}
{{- if .options }}
options: {{ toYaml .options | nindent 8 }}
{{- end }}
storage:
{{- if eq .type "s3" }}
s3:
prefix: {{ required "s3 prefix is required" .s3.prefix }}
bucketName: {{ required "s3 bucketName is required" .s3.bucketName }}
config: {{ required "s3 config is required" .s3.config }}
{{- if .s3.profile }}
profile: {{ .s3.profile }}
{{- end }}
{{- if .s3.endpoint }}
endpoint: {{ .s3.endpoint }}
{{- end }}
{{- end }}
{{- if eq .type "pvc" }}
persistentVolumeClaim:
{{ toYaml .persistentVolumeClaim | nindent 10}}
{{- end }}
{{- end }}
{{- end }}
{{- end }}

View File

@@ -1,75 +0,0 @@
apiVersion: mysql.oracle.com/v2
kind: InnoDBCluster
metadata:
name: {{ include "cluster.name" . }}-cluster
namespace: {{ .Release.Namespace }}
annotations:
{{- with .Values.global.annotations }}
{{- toYaml . | nindent 4 }}
{{- end }}
labels:
{{- include "cluster.labels" . | nindent 4 }}
{{- include "cluster.selectorLabels" . | nindent 4 }}
{{- with .Values.global.labels }}
{{ toYaml . | nindent 4 }}
{{- end }}
spec:
instances: {{ required "serverInstances is required" .Values.cluster.serverInstances }}
baseServerId: {{ required "baseServerId is required" .Values.cluster.baseServerId }}
serviceAccountName: {{ include "mysql.serviceAccountName" . }}
imagePullPolicy : {{ .Values.cluster.image.pullPolicy }}
version: {{ .Values.cluster.image.version }}
tlsUseSelfSigned: true
secretName: {{ .Values.cluster.exisitingCredentialsSecret }}
podSpec:
{{- with .Values.cluster.podSpec }}
{{ toYaml . | nindent 4 }}
{{- end }}
podAnnotations:
{{- with .Values.cluster.podAnnotations }}
{{ toYaml . | nindent 4 }}
{{- end }}
podLabels:
{{- with .Values.cluster.podLabels }}
{{ toYaml . | nindent 4 }}
{{- end }}
router:
instances: {{ required "router.instances is required" .Values.cluster.router.instances }}
podSpec:
{{- with .Values.cluster.router.podSpec }}
{{- toYaml . | nindent 6 }}
{{- end }}
podAnnotations:
{{- with .Values.cluster.router.podAnnotations }}
{{- toYaml . | nindent 6 }}
{{- end }}
podLabels:
{{- with .Values.cluster.router.podLabels }}
{{- toYaml . | nindent 6 }}
{{- end }}
tlsSecretName: {{ include "cluster.name" . }}-router-tls
logs:
{{- with .Values.cluster.logs }}
{{ toYaml . | nindent 4 }}
{{- end }}
mycnf: |
{{ .Values.cluster.serverConfig.mycnf | indent 4 }}
{{- if .Values.cluster.datadirVolumeClaimTemplate }}
{{- with .Values.cluster.datadirVolumeClaimTemplate }}
datadirVolumeClaimTemplate:
{{- if .storageClassName }}
storageClassName: {{ .storageClassName | quote }}
{{- end}}
{{- if .accessModes }}
accessModes: [ "{{ .accessModes }}" ]
{{- end }}
{{- if .size }}
resources:
requests:
storage: "{{ .size }}"
{{- end }}
{{- end }}
{{- end }}
{{ include "cluster.init" . | nindent 2 }}
{{ include "cluster.backup" . | nindent 2 }}

View File

@@ -1,21 +0,0 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ include "mysql.serviceAccountName" . }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "cluster.labels" . | nindent 4 }}
{{- include "cluster.selectorLabels" . | nindent 4 }}
{{- with .Values.global.labels }}
{{- toYaml . | nindent 4 }}
{{- end }}
{{- with .Values.serviceAccount.labels }}
{{- toYaml . | nindent 4 }}
{{- end }}
annotations:
{{- with .Values.global.annotations }}
{{- toYaml . | nindent 4 }}
{{- end }}
{{- with .Values.serviceAccount.annotations }}
{{- toYaml . | nindent 4 }}
{{- end }}

View File

@@ -1,148 +0,0 @@
global:
nameOverride:
labels: {}
annotations: {}
serviceAccount:
enabled: true
labels: {}
annotations: {}
name: ""
###
# Cluster mode of operation. Available modes:
# * `standalone` - Default mode. Creates new or updates an existing cluster.
# * `recovery` - Same as standalone but creates a cluster from a backup
# * `clone` - Create database as a replica from another cluster
mode: standalone
##
# Cluster spec
#
# Reference: https://dev.mysql.com/doc/mysql-operator/en/mysql-operator-properties.html#mysql-operator-spec-innodbclusterspecinitdbdumpstorages3
#
cluster:
serverInstances: 1
baseServerId: 1000
# Existing secret that contains the keys "rootUser", "rootHost", and "rootPassword"
exisitingCredentialsSecret: ""
image:
version: 8.4.0
pullPolicy: IfNotPresent
router:
instances: 1
podSpec: {}
podAnnotations: {}
podLabels: {}
logs:
error:
enabled: true
collect: false
general:
enabled: false
collect: false
slowQuery:
enabled: false
longQueryTime: 2.5
serverConfig:
mycnf: |
[mysqld]
core_file
local_infile=off
mysql_native_password=ON
datadirVolumeClaimTemplate:
storageClassName: ""
accessModes: ""
size: ""
podSpec:
containers:
- name: mysql
resources:
limits:
memory: 1024Mi
cpu: 1000m
requests:
memory: 512Mi
cpu: 100m
podAnnotations: {}
podLabels: {}
##
# Recovery database from storage
#
recovery:
# * `s3` - Restores from s3 object store
# * `pvc` - Restores from persistent volume claim
type:
# -- Name of the dump. Not used by the operator, but a descriptive hint for the cluster administrator
name: ""
# -- Path to the dump in the PVC. Use when specifying persistentVolumeClaim. Omit for ociObjectStorage, S3, or azure.
path: ""
# -- A dictionary of key-value pairs passed directly to MySQL Shell's loadDump()
options: {}
s3:
# -- Path in the bucket where the dump files are stored
prefix: ""
# -- Name of a Secret with S3 configuration and credentials as contained in ~/.aws/config
config: ""
# -- Name of the S3 bucket where the dump is stored
bucketName: ""
# -- Override endpoint URL
endpoint: ""
persistentVolumeClaim: {}
##
# Clone database from another instance
#
clone:
donorUrl: ""
rootUser: root
exisitingCredentialsSecret: ""
##
# Backup database to pvc or s3
#
backup:
enabled: false
profiles:
## -- Example profile that back ups to local pvc
# - name: pvc-backup
# dumpInstance:
# storage:
# persistentVolumeClaim:
# claimName: backup-volume-claim
## -- Example profile that back ups to s3 endpoint
# - name: s3-backup
# snapshot:
# storage:
# s3:
# prefix: ""
# config: ""
# bucketName: ""
# endpoint: ""
schedules:
## -- Example schedule that backups daily
# - name: schedule-daily
# enabled: true
# schedule: "0 0 0 * * *"
# timeZone: "US/Central"
# deleteBackupData: false
# backupProfileName:

View File

@@ -1,18 +0,0 @@
apiVersion: v2
name: outline
version: 0.6.3
description: Chart for Outline wiki
keywords:
- wiki
- documentation
sources:
- https://github.com/outline/outline
- https://github.com/bitnami/charts/tree/main/bitnami/redis
maintainers:
- name: alexlebens
icon: https://avatars.githubusercontent.com/u/1765001?s=48&v=4
dependencies:
- name: redis
repository: https://charts.bitnami.com/bitnami
version: 19.3.4
appVersion: v0.76.1

View File

@@ -1,17 +0,0 @@
## Introduction
[Outline](https://github.com/outline/outline)
The fastest knowledge base for growing teams. Beautiful, realtime collaborative, feature packed, and markdown compatible.
This chart bootstraps an [Outline](https://github.com/outline/outline) deployment on a [Kubernetes](https://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager.
## Prerequisites
- Kubernetes
- Helm
- Bitnami Redis Chart
## Parameters
See the [values files](values.yaml).

View File

@@ -1,183 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: outline
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: outline
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/version: {{ .Chart.AppVersion }}
app.kubernetes.io/component: web
app.kubernetes.io/part-of: outline
spec:
revisionHistoryLimit: 3
replicas: {{ .Values.deployment.replicas }}
strategy:
type: {{ .Values.deployment.strategy }}
selector:
matchLabels:
app.kubernetes.io/name: outline
app.kubernetes.io/instance: {{ .Release.Name }}
template:
metadata:
labels:
app.kubernetes.io/name: outline
app.kubernetes.io/instance: {{ .Release.Name }}
spec:
serviceAccountName: outline
automountServiceAccountToken: true
containers:
- name: {{ .Release.Name }}
image: "{{ .Values.deployment.image.repository }}:{{ .Values.deployment.image.tag }}"
imagePullPolicy: {{ .Values.deployment.image.imagePullPolicy }}
ports:
- name: web
containerPort: {{ .Values.service.web.port }}
protocol: TCP
env:
- name: NODE_ENV
value: "{{ .Values.outline.nodeEnv }}"
- name: URL
value: "{{ .Values.outline.url }}"
- name: PORT
value: "{{ .Values.service.web.port }}"
- name: SECRET_KEY
valueFrom:
secretKeyRef:
name: "{{ .Values.outline.secretKey.existingSecretName }}"
key: "{{ .Values.outline.secretKey.existingSecretKey }}"
- name: UTILS_SECRET
valueFrom:
secretKeyRef:
name: "{{ .Values.outline.utilsSecret.existingSecretName }}"
key: "{{ .Values.outline.secretKey.existingSecretKey }}"
- name: POSTGRES_USERNAME
valueFrom:
secretKeyRef:
name: "{{ .Values.outline.database.usernameSecret.existingSecretName }}"
key: "{{ .Values.outline.database.usernameSecret.existingSecretKey }}"
- name: POSTGRES_PASSWORD
valueFrom:
secretKeyRef:
name: "{{ .Values.outline.database.passwordSecret.existingSecretName }}"
key: "{{ .Values.outline.database.passwordSecret.existingSecretKey }}"
- name: POSTGRES_DATABASE_NAME
valueFrom:
secretKeyRef:
name: "{{ .Values.outline.database.databaseName.existingSecretName }}"
key: "{{ .Values.outline.database.databaseName.existingSecretKey }}"
- name: POSTGRES_DATABASE_HOST
valueFrom:
secretKeyRef:
name: "{{ .Values.outline.database.databaseHost.existingSecretName }}"
key: "{{ .Values.outline.database.databaseHost.existingSecretKey }}"
- name: POSTGRES_DATABASE_PORT
valueFrom:
secretKeyRef:
name: "{{ .Values.outline.database.databasePort.existingSecretName }}"
key: "{{ .Values.outline.database.databasePort.existingSecretKey }}"
- name: DATABASE_URL
value: "postgres://$(POSTGRES_USERNAME):$(POSTGRES_PASSWORD)@$(POSTGRES_DATABASE_HOST):$(POSTGRES_DATABASE_PORT)/$(POSTGRES_DATABASE_NAME)"
- name: DATABASE_URL_TEST
value: "postgres://$(POSTGRES_USERNAME):$(POSTGRES_PASSWORD)@$(POSTGRES_DATABASE_HOST):$(POSTGRES_DATABASE_PORT)/$(POSTGRES_DATABASE_NAME)-test"
- name: DATABASE_CONNECTION_POOL_MIN
value: "{{ .Values.outline.database.connectionPoolMin }}"
- name: DATABASE_CONNECTION_POOL_MAX
value: "{{ .Values.outline.database.connectionPoolMax }}"
- name: PGSSLMODE
value: "{{ .Values.outline.database.sslMode }}"
- name: REDIS_URL
value: "redis://{{ .Release.Name }}-redis-master:6379"
- name: FILE_STORAGE
value: "{{ .Values.persistence.type }}"
{{- if eq .Values.persistence.type "s3" }}
- name: AWS_ACCESS_KEY_ID
valueFrom:
secretKeyRef:
name: "{{ .Values.persistence.s3.credentialsSecret }}"
key: AWS_ACCESS_KEY_ID
- name: AWS_SECRET_ACCESS_KEY
valueFrom:
secretKeyRef:
name: "{{ .Values.persistence.s3.credentialsSecret }}"
key: AWS_SECRET_ACCESS_KEY
- name: AWS_REGION
value: "{{ .Values.persistence.s3.region }}"
- name: AWS_S3_UPLOAD_BUCKET_NAME
value: "{{ .Values.persistence.s3.bucketName }}"
- name: AWS_S3_UPLOAD_BUCKET_URL
value: "{{ .Values.persistence.s3.bucketUrl }}"
- name: AWS_S3_ACCELERATE_URL
value: "{{ .Values.persistence.s3.bucketUrl }}"
- name: AWS_S3_FORCE_PATH_STYLE
value: "{{ .Values.persistence.s3.forcePathStyle }}"
- name: AWS_S3_ACL
value: "{{ .Values.persistence.s3.acl }}"
- name: FILE_STORAGE_UPLOAD_MAX_SIZE
value: "{{ .Values.persistence.s3.uploadMaxSize }}"
{{- else if eq .Values.persistence.type "local" }}
- name: FILE_STORAGE_LOCAL_ROOT_DIR
value: "{{ .Values.persistence.local.localRootDir }}"
- name: FILE_STORAGE_UPLOAD_MAX_SIZE
value: "{{ .Values.persistence.local.uploadMaxSize }}"
{{- end }}
- name: FORCE_HTTPS
value: "{{ .Values.outline.optional.forceHttps }}"
- name: ENABLE_UPDATES
value: "{{ .Values.outline.optional.enableUpdates }}"
- name: WEB_CONCURRENCY
value: "{{ .Values.outline.optional.webConcurrency }}"
- name: FILE_STORAGE_IMPORT_MAX_SIZE
value: "{{ .Values.outline.optional.maximumImportSize }}"
- name: LOG_LEVEL
value: "{{ .Values.outline.optional.logLevel }}"
- name: DEFAULT_LANGUAGE
value: "{{ .Values.outline.optional.defaultLanguage }}"
- name: RATE_LIMITER_ENABLED
value: "{{ .Values.outline.optional.rateLimiter.enabled }}"
- name: RATE_LIMITER_REQUESTS
value: "{{ .Values.outline.optional.rateLimiter.requests }}"
- name: RATE_LIMITER_DURATION_WINDOW
value: "{{ .Values.outline.optional.rateLimiter.durationWindow }}"
- name: DEVELOPMENT_UNSAFE_INLINE_CSP
value: "{{ .Values.outline.optional.developmentUnsafeInlineCsp }}"
{{- if .Values.outline.auth.oidc.enabled }}
- name: OIDC_CLIENT_ID
valueFrom:
secretKeyRef:
name: "{{ .Values.outline.auth.oidc.clientId.existingSecretName }}"
key: "{{ .Values.outline.auth.oidc.clientId.existingSecretKey }}"
- name: OIDC_CLIENT_SECRET
valueFrom:
secretKeyRef:
name: "{{ .Values.outline.auth.oidc.clientSecret.existingSecretName }}"
key: "{{ .Values.outline.auth.oidc.clientSecret.existingSecretKey }}"
- name: OIDC_AUTH_URI
value: "{{ .Values.outline.auth.oidc.authUri }}"
- name: OIDC_TOKEN_URI
value: "{{ .Values.outline.auth.oidc.tokenUri }}"
- name: OIDC_USERINFO_URI
value: "{{ .Values.outline.auth.oidc.userinfoUri }}"
- name: OIDC_USERNAME_CLAIM
value: "{{ .Values.outline.auth.oidc.usernameClaim }}"
- name: OIDC_DISPLAY_NAME
value: "{{ .Values.outline.auth.oidc.displayName }}"
- name: OIDC_SCOPES
value: "{{ .Values.outline.auth.oidc.scopes }}"
{{- end }}
resources:
{{- toYaml .Values.deployment.resources | nindent 12 }}
{{- if eq .Values.persistence.type "local" }}
volumeMounts:
- name: "{{ .Release.Name }}-volume-claim"
mountPath: {{ .Values.persistence.local.localRootDir }}
volumes:
- name: "{{ .Release.Name }}-volume-claim"
persistentVolumeClaim:
claimName: "{{ .Release.Name }}-volume-claim"
{{- end }}

View File

@@ -1,32 +0,0 @@
{{- if .Values.ingress.enabled }}
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: {{ .Release.Name }}
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: {{ .Release.Name }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/version: {{ .Chart.AppVersion }}
app.kubernetes.io/component: web
app.kubernetes.io/part-of: {{ .Release.Name }}
annotations:
{{- toYaml .Values.ingress.annotations | nindent 4 }}
spec:
ingressClassName: {{ .Values.ingress.className }}
tls:
- hosts:
- {{ .Values.ingress.host }}
secretName: {{ .Release.Name }}-secret-tls
rules:
- host: {{ .Values.ingress.host }}
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: {{ .Release.Name }}
port:
name: web
{{- end }}

View File

@@ -1,20 +0,0 @@
{{- if eq .Values.persistence.type "local" }}
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: {{ .Release.Name }}-volume-claim
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: outline
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/version: {{ .Chart.AppVersion }}
app.kubernetes.io/component: storage
app.kubernetes.io/part-of: outline
spec:
storageClassName: {{ .Values.persistence.local.storageClassName }}
accessModes:
- ReadWriteOnce
resources:
requests:
storage: {{ .Values.persistence.local.storageSize }}
{{- end }}

View File

@@ -1,11 +0,0 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: outline
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: outline
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/version: {{ .Chart.AppVersion }}
app.kubernetes.io/component: web
app.kubernetes.io/part-of: outline

View File

@@ -1,21 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: {{ .Release.Name }}
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: {{ .Release.Name }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/version: {{ .Chart.AppVersion }}
app.kubernetes.io/component: web
app.kubernetes.io/part-of: {{ .Release.Name }}
spec:
type: ClusterIP
ports:
- port: {{ .Values.service.web.port }}
targetPort: web
protocol: TCP
name: web
selector:
app.kubernetes.io/name: {{ .Release.Name }}
app.kubernetes.io/instance: {{ .Release.Name }}

View File

@@ -1,96 +0,0 @@
deployment:
replicas: 1
strategy: Recreate
image:
repository: outlinewiki/outline
tag: "0.76.1"
imagePullPolicy: IfNotPresent
resources:
requests:
memory: 256Mi
cpu: 50m
limits:
memory: 1Gi
cpu: 500m
service:
web:
port: 3000
ingress:
enabled: true
className:
annotations:
host:
persistence:
type: s3
s3:
credentialsSecret:
region:
bucketName:
bucketUrl:
uploadMaxSize: "26214400"
forcePathStyle: false
acl: private
local:
storageClassName: default
storageSize: 50Gi
localRootDir: /var/lib/outline/data
uploadMaxSize: 26214400
outline:
nodeEnv: production
url:
secretKey:
existingSecretName: outline-key-secret
existingSecretKey: secret-key
utilsSecret:
existingSecretName: outline-key-secret
existingSecretKey: utils-key
database:
passwordSecret:
existingSecretName:
existingSecretKey:
usernameSecret:
existingSecretName:
existingSecretKey:
databaseName:
existingSecretName:
existingSecretKey:
databaseHost:
existingSecretName:
existingSecretKey:
databasePort:
existingSecretName:
existingSecretKey:
connectionPoolMin: ""
connectionPoolMax: "20"
sslMode: disable
optional:
forceHttps: false
enableUpdates: false
webConcurrency: 1
maximumImportSize: 5120000
logLevel: info
defaultLanguage: en_US
rateLimiter:
enabled: false
requests: 1000
durationWindow: 60
developmentUnsafeInlineCsp: false
auth:
oidc:
enabled: true
clientId:
existingSecretName: outline-auth-secret
existingSecretKey: oidc-client-id
clientSecret:
existingSecretName: outline-auth-secret
existingSecretKey: oidc-client-secret
authUri:
tokenUri:
userinfoUri:
usernameClaim:
displayName:
scopes: openid profile email
redis:
architecture: standalone
auth:
enabled: false

View File

@@ -1,13 +0,0 @@
apiVersion: v2
name: penpot
version: 0.1.0
description: Chart for Penpot
keywords:
- penpot
- design
sources:
- https://github.com/penpot/penpot
maintainers:
- name: alexlebens
icon: https://avatars.githubusercontent.com/u/30179644?s=200&v=4
appVersion: 2.0.1

View File

@@ -1,16 +0,0 @@
## Introduction
[Penpot](https://github.com/penpot/penpot)
Penpot is the first Open Source design and prototyping platform meant for cross-domain teams. Non dependent on operating systems, Penpot is web based and works with open standards (SVG). Penpot invites designers all over the world to fall in love with open source while getting developers excited about the design process in return.
This chart bootstraps a [Penpot](https://github.com/penpot/penpot) deployment on a [Kubernetes](https://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager.
## Prerequisites
- Kubernetes
- Helm
## Parameters
See the [values files](values.yaml).

View File

@@ -1,72 +0,0 @@
{{/* vim: set filetype=mustache: */}}
{{/*
Expand the name of the chart.
*/}}
{{- define "penpot.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "penpot.fullname" -}}
{{- if .Values.fullnameOverride -}}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{- if contains $name .Release.Name -}}
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "penpot.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Common labels.
*/}}
{{- define "penpot.labels" -}}
helm.sh/chart: {{ include "penpot.chart" . }}
app.kubernetes.io/name: {{ include "penpot.name" . }}-frontend
app.kubernetes.io/instance: {{ .Release.Name }}
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end -}}
{{/*
Selector labels.
*/}}
{{- define "penpot.frontendSelectorLabels" -}}
app.kubernetes.io/name: {{ include "penpot.name" . }}-frontend
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end -}}
{{- define "penpot.backendSelectorLabels" -}}
app.kubernetes.io/name: {{ include "penpot.name" . }}-backend
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end -}}
{{- define "penpot.exporterSelectorLabels" -}}
app.kubernetes.io/name: {{ include "penpot.name" . }}-exporter
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end -}}
{{/*
Create the name of the service account to use.
*/}}
{{- define "penpot.serviceAccountName" -}}
{{- if .Values.serviceAccount.enabled -}}
{{ default (include "penpot.fullname" .) .Values.serviceAccount.name }}
{{- else -}}
{{ default "default" .Values.serviceAccount.name }}
{{- end -}}
{{- end -}}

View File

@@ -1,129 +0,0 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: "{{ include "penpot.fullname" . }}-frontend-nginx"
namespace: {{ .Release.Namespace }}
labels:
{{- include "penpot.labels" . | nindent 4 }}
data:
nginx.conf: |
user www-data;
worker_processes auto;
pid /run/nginx.pid;
include /etc/nginx/modules-enabled/*.conf;
events {
worker_connections 2048;
# multi_accept on;
}
http {
sendfile on;
tcp_nopush on;
tcp_nodelay on;
keepalive_requests 30;
keepalive_timeout 65;
types_hash_max_size 2048;
server_tokens off;
reset_timedout_connection on;
client_body_timeout 30s;
client_header_timeout 30s;
include /etc/nginx/mime.types;
default_type application/octet-stream;
error_log /dev/stdout;
access_log /dev/stdout;
gzip on;
gzip_vary on;
gzip_proxied any;
gzip_static on;
gzip_comp_level 4;
gzip_buffers 16 8k;
gzip_http_version 1.1;
gzip_types text/plain text/css text/javascript application/javascript application/json application/transit+json;
resolver 127.0.0.11;
map $http_upgrade $connection_upgrade {
default upgrade;
'' close;
}
server {
listen 80 default_server;
server_name _;
client_max_body_size 100M;
charset utf-8;
proxy_http_version 1.1;
proxy_set_header Host $http_host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Scheme $scheme;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
etag off;
root /var/www/app/;
location ~* \.(js|css).*$ {
add_header Cache-Control "max-age=86400" always; # 24 hours
}
location ~* \.(html).*$ {
add_header Cache-Control "no-cache, max-age=0" always;
}
location /api/export {
proxy_pass http://{{ include "penpot.fullname" . }}-exporter:6061;
}
location /api {
proxy_pass http://{{ include "penpot.fullname" . }}-backend:6060/api;
}
location /ws/notifications {
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection 'upgrade';
proxy_pass http://{{ include "penpot.fullname" . }}-backend:6060/ws/notifications;
}
location @handle_redirect {
set $redirect_uri "$upstream_http_location";
set $redirect_host "$upstream_http_x_host";
set $redirect_cache_control "$upstream_http_cache_control";
proxy_buffering off;
proxy_set_header Host "$redirect_host";
proxy_hide_header etag;
proxy_hide_header x-amz-id-2;
proxy_hide_header x-amz-request-id;
proxy_hide_header x-amz-meta-server-side-encryption;
proxy_hide_header x-amz-server-side-encryption;
proxy_pass $redirect_uri;
add_header x-internal-redirect "$redirect_uri";
add_header x-cache-control "$redirect_cache_control";
add_header cache-control "$redirect_cache_control";
}
location /assets {
proxy_pass http://{{ include "penpot.fullname" . }}-backend:6060/assets;
recursive_error_pages on;
proxy_intercept_errors on;
error_page 301 302 307 = @handle_redirect;
}
location /internal/assets {
internal;
alias /opt/data/assets;
add_header x-internal-redirect "$upstream_http_x_accel_redirect";
}
}
}

View File

@@ -1,378 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "penpot.fullname" . }}-backend
namespace: {{ .Release.Namespace }}
labels:
{{- include "penpot.labels" . | nindent 4 }}
spec:
replicas: {{ .Values.backend.replicaCount }}
selector:
matchLabels:
{{- include "penpot.backendSelectorLabels" . | nindent 6 }}
template:
metadata:
labels:
{{- include "penpot.backendSelectorLabels" . | nindent 8 }}
spec:
{{- with .Values.global.imagePullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 8 }}
{{- end }}
{{ if .Values.backend.podSecurityContext.enabled }}
securityContext:
{{- omit .Values.backend.podSecurityContext "enabled" | toYaml | nindent 8 }}
{{- end }}
serviceAccountName: {{ include "penpot.serviceAccountName" . }}
affinity:
podAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: app.kubernetes.io/instance
operator: In
values:
- {{ .Release.Name }}
topologyKey: "kubernetes.io/hostname"
containers:
- name: {{ .Chart.Name }}-backend
{{ if .Values.backend.containerSecurityContext.enabled }}
securityContext:
{{- omit .Values.backend.containerSecurityContext "enabled" | toYaml | nindent 12 }}
{{- end }}
image: "{{ .Values.backend.image.repository }}:{{ .Values.backend.image.tag }}"
imagePullPolicy: {{ .Values.backend.image.imagePullPolicy }}
volumeMounts:
- mountPath: /opt/data
name: app-data
readOnly: false
env:
- name: PENPOT_PUBLIC_URI
value: {{ .Values.config.publicURI | quote }}
- name: PENPOT_FLAGS
value: "$PENPOT_FLAGS {{ .Values.config.flags }}"
- name: PENPOT_SECRET_KEY
valueFrom:
secretKeyRef:
name: {{ .Values.config.apiSecretKey.existingSecretName }}
key: {{ .Values.config.apiSecretKey.existingSecretKey }}
- name: PENPOT_DATABASE_URI
value: "postgresql://{{ .Values.config.postgresql.host }}:{{ .Values.config.postgresql.port }}/{{ .Values.config.postgresql.database }}"
- name: PENPOT_DATABASE_USERNAME
{{- if not .Values.config.postgresql.secretKeys.usernameKey }}
value: {{ .Values.config.postgresql.username | quote }}
{{- else }}
valueFrom:
secretKeyRef:
name: {{ .Values.config.postgresql.existingSecret }}
key: {{ .Values.config.postgresql.secretKeys.usernameKey }}
{{- end }}
- name: PENPOT_DATABASE_PASSWORD
{{- if not .Values.config.postgresql.secretKeys.passwordKey }}
value: {{ .Values.config.postgresql.password | quote }}
{{- else }}
valueFrom:
secretKeyRef:
name: {{ .Values.config.postgresql.existingSecret }}
key: {{ .Values.config.postgresql.secretKeys.passwordKey }}
{{- end }}
- name: PENPOT_REDIS_URI
value: "redis://{{ .Values.config.redis.host }}:{{ .Values.config.redis.port }}/{{ .Values.config.redis.database }}"
- name: PENPOT_ASSETS_STORAGE_BACKEND
value: {{ .Values.config.assets.storageBackend | quote }}
{{- if eq .Values.config.assets.storageBackend "assets-fs" }}
- name: PENPOT_STORAGE_ASSETS_FS_DIRECTORY
value: {{ .Values.config.assets.filesystem.directory | quote }}
{{- else if eq .Values.config.assets.storageBackend "assets-s3" }}
- name: PENPOT_STORAGE_ASSETS_S3_REGION
value: {{ .Values.config.assets.s3.region | quote }}
- name: PENPOT_STORAGE_ASSETS_S3_BUCKET
value: {{ .Values.config.assets.s3.bucket | quote }}
- name: AWS_ACCESS_KEY_ID
{{- if not .Values.config.assets.s3.secretKeys.accessKeyIDKey }}
value: {{ .Values.config.assets.s3.accessKeyID | quote }}
{{- else }}
valueFrom:
secretKeyRef:
name: {{ .Values.config.assets.s3.existingSecret }}
key: {{ .Values.config.assets.s3.secretKeys.accessKeyIDKey }}
{{- end }}
- name: AWS_SECRET_ACCESS_KEY
{{- if not .Values.config.assets.s3.secretKeys.secretAccessKey }}
value: {{ .Values.config.assets.s3.secretAccessKey | quote }}
{{- else }}
valueFrom:
secretKeyRef:
name: {{ .Values.config.assets.s3.existingSecret }}
key: {{ .Values.config.assets.s3.secretKeys.secretAccessKey }}
{{- end }}
- name: PENPOT_STORAGE_ASSETS_S3_ENDPOINT
{{- if not .Values.config.assets.s3.secretKeys.endpointURIKey }}
value: {{ .Values.config.assets.s3.endpointURI | quote }}
{{- else }}
valueFrom:
secretKeyRef:
name: {{ .Values.config.assets.s3.existingSecret }}
key: {{ .Values.config.assets.s3.secretKeys.endpointURIKey }}
{{- end }}
{{- end }}
- name: PENPOT_TELEMETRY_ENABLED
value: {{ .Values.config.telemetryEnabled | quote }}
{{- if .Values.config.smtp.enabled }}
{{- if .Values.config.smtp.defaultFrom }}
- name: PENPOT_SMTP_DEFAULT_FROM
value: {{ .Values.config.smtp.defaultFrom | quote }}
{{- end }}
{{- if .Values.config.smtp.defaultReplyTo }}
- name: PENPOT_SMTP_DEFAULT_REPLY_TO
value: {{ .Values.config.smtp.defaultReplyTo | quote }}
{{- end }}
{{- if .Values.config.smtp.host }}
- name: PENPOT_SMTP_HOST
value: {{ .Values.config.smtp.host | quote }}
{{- end }}
{{- if .Values.config.smtp.port }}
- name: PENPOT_SMTP_PORT
value: {{ .Values.config.smtp.port | quote }}
{{- end }}
{{- if not .Values.config.smtp.secretKeys.usernameKey }}
- name: PENPOT_SMTP_USERNAME
value: {{ .Values.config.smtp.username | quote }}
{{- else }}
- name: PENPOT_SMTP_USERNAME
valueFrom:
secretKeyRef:
name: {{ .Values.config.smtp.existingSecret }}
key: {{ .Values.config.smtp.secretKeys.usernameKey }}
{{- end }}
{{- if not .Values.config.smtp.secretKeys.passwordKey }}
- name: PENPOT_SMTP_PASSWORD
value: {{ .Values.config.smtp.password | quote }}
{{- else }}
- name: PENPOT_SMTP_PASSWORD
valueFrom:
secretKeyRef:
name: {{ .Values.config.smtp.existingSecret }}
key: {{ .Values.config.smtp.secretKeys.passwordKey }}
{{- end }}
{{- if .Values.config.smtp.tls }}
- name: PENPOT_SMTP_TLS
value: {{ .Values.config.smtp.tls | quote }}
{{- end }}
{{- if .Values.config.smtp.ssl }}
- name: PENPOT_SMTP_SSL
value: {{ .Values.config.smtp.ssl | quote }}
{{- end }}
{{- end }}
{{- if .Values.config.registrationDomainWhitelist }}
- name: PENPOT_REGISTRATION_DOMAIN_WHITELIST
value: {{ .Values.config.registrationDomainWhitelist | quote }}
{{- end }}
{{- if .Values.config.providers.google.enabled }}
{{- if not .Values.config.providers.secretKeys.googleClientIDKey }}
- name: PENPOT_GOOGLE_CLIENT_ID
value: {{ .Values.config.providers.google.clientID | quote }}
{{- else }}
- name: PENPOT_GOOGLE_CLIENT_ID
valueFrom:
secretKeyRef:
name: {{ .Values.config.providers.existingSecret }}
key: {{ .Values.config.providers.secretKeys.googleClientIDKey }}
{{- end }}
{{- if not .Values.config.providers.secretKeys.googleClientSecretKey}}
- name: PENPOT_GOOGLE_CLIENT_SECRET
value: {{ .Values.config.providers.google.clientSecret | quote }}
{{- else }}
- name: PENPOT_GOOGLE_CLIENT_SECRET
valueFrom:
secretKeyRef:
name: {{ .Values.config.providers.existingSecret }}
key: {{ .Values.config.providers.secretKeys.googleClientSecretKey }}
{{- end }}
{{- end }}
{{- if .Values.config.providers.github.enabled }}
{{- if not .Values.config.providers.secretKeys.githubClientIDKey }}
- name: PENPOT_GITHUB_CLIENT_ID
value: {{ .Values.config.providers.github.clientID | quote }}
{{- else }}
- name: PENPOT_GITHUB_CLIENT_ID
valueFrom:
secretKeyRef:
name: {{ .Values.config.providers.existingSecret }}
key: {{ .Values.config.providers.secretKeys.githubClientIDKey }}
{{- end }}
{{- if not .Values.config.providers.secretKeys.githubClientSecretKey }}
- name: PENPOT_GITHUB_CLIENT_SECRET
value: {{ .Values.config.providers.github.clientSecret | quote }}
{{- else }}
- name: PENPOT_GITHUB_CLIENT_SECRET
valueFrom:
secretKeyRef:
name: {{ .Values.config.providers.existingSecret }}
key: {{ .Values.config.providers.secretKeys.githubClientSecretKey }}
{{- end }}
{{- end }}
{{- if .Values.config.providers.gitlab.enabled }}
{{- if .Values.config.providers.gitlab.baseURI }}
- name: PENPOT_GITLAB_BASE_URI
value: {{ .Values.config.providers.gitlab.baseURI | quote }}
{{- end }}
{{- if not .Values.config.providers.secretKeys.gitlabClientIDKey }}
- name: PENPOT_GITLAB_CLIENT_ID
value: {{ .Values.config.providers.gitlab.clientID | quote }}
{{- else }}
- name: PENPOT_GITLAB_CLIENT_ID
valueFrom:
secretKeyRef:
name: {{ .Values.config.providers.existingSecret }}
key: {{ .Values.config.providers.secretKeys.gitlabClientIDKey }}
{{- end }}
{{- if not .Values.config.providers.secretKeys.gitlabClientSecretKey }}
- name: PENPOT_GITLAB_CLIENT_SECRET
value: {{ .Values.config.providers.gitlab.clientSecret | quote }}
{{- else }}
- name: PENPOT_GITLAB_CLIENT_SECRET
valueFrom:
secretKeyRef:
name: {{ .Values.config.providers.existingSecret }}
key: {{ .Values.config.providers.secretKeys.gitlabClientSecretKey }}
{{- end }}
{{- end }}
{{- if .Values.config.providers.oidc.enabled }}
{{- if .Values.config.providers.oidc.baseURI }}
- name: PENPOT_OIDC_BASE_URI
value: {{ .Values.config.providers.oidc.baseURI | quote }}
{{- end }}
{{- if not .Values.config.providers.secretKeys.oidcClientIDKey }}
- name: PENPOT_OIDC_CLIENT_ID
value: {{ .Values.config.providers.oidc.clientID | quote}}
{{- else }}
- name: PENPOT_OIDC_CLIENT_ID
valueFrom:
secretKeyRef:
name: {{ .Values.config.providers.existingSecret }}
key: {{ .Values.config.providers.secretKeys.oidcClientIDKey }}
{{- end }}
{{- if not .Values.config.providers.secretKeys.oidcClientSecretKey}}
- name: PENPOT_OIDC_CLIENT_SECRET
value: {{ .Values.config.providers.oidc.clientSecret | quote }}
{{- else }}
- name: PENPOT_OIDC_CLIENT_SECRET
valueFrom:
secretKeyRef:
name: {{ .Values.config.providers.existingSecret }}
key: {{ .Values.config.providers.secretKeys.oidcClientSecretKey }}
{{- end }}
{{- if .Values.config.providers.oidc.authURI }}
- name: PENPOT_OIDC_AUTH_URI
value: {{ .Values.config.providers.oidc.authURI | quote }}
{{- end }}
{{- if .Values.config.providers.oidc.tokenURI }}
- name: PENPOT_OIDC_TOKEN_URI
value: {{ .Values.config.providers.oidc.tokenURI | quote }}
{{- end }}
{{- if .Values.config.providers.oidc.userURI }}
- name: PENPOT_OIDC_USER_URI
value: {{ .Values.config.providers.oidc.userURI | quote }}
{{- end }}
{{- if .Values.config.providers.oidc.roles }}
- name: PENPOT_OIDC_ROLES
value: {{ .Values.config.providers.oidc.roles | quote }}
{{- end }}
{{- if .Values.config.providers.oidc.rolesAttribute }}
- name: PENPOT_OIDC_ROLES_ATTR
value: {{ .Values.config.providers.oidc.rolesAttribute | quote }}
{{- end }}
{{- if .Values.config.providers.oidc.scopes }}
- name: PENPOT_OIDC_SCOPES
value: {{ .Values.config.providers.oidc.scopes | quote }}
{{- end }}
{{- if .Values.config.providers.oidc.nameAttribute }}
- name: PENPOT_OIDC_NAME_ATTR
value: {{ .Values.config.providers.oidc.nameAttribute | quote }}
{{- end }}
{{- if .Values.config.providers.oidc.emailAttribute }}
- name: PENPOT_OIDC_EMAIL_ATTR
value: {{ .Values.config.providers.oidc.emailAttribute | quote }}
{{- end }}
{{- end }}
{{- if .Values.config.providers.ldap.enabled }}
{{- if .Values.config.providers.ldap.host }}
- name: PENPOT_LDAP_HOST
value: {{ .Values.config.providers.ldap.host | quote }}
{{- end }}
{{- if .Values.config.providers.ldap.port }}
- name: PENPOT_LDAP_PORT
value: {{ .Values.config.providers.ldap.port | quote }}
{{- end }}
{{- if .Values.config.providers.ldap.ssl }}
- name: PENPOT_LDAP_SSL
value: {{ .Values.config.providers.ldap.ssl | quote }}
{{- end }}
{{- if .Values.config.providers.ldap.startTLS }}
- name: PENPOT_LDAP_STARTTLS
value: {{ .Values.config.providers.ldap.startTLS | quote }}
{{- end }}
{{- if .Values.config.providers.ldap.baseDN }}
- name: PENPOT_LDAP_BASE_DN
value: {{ .Values.config.providers.ldap.baseDN | quote }}
{{- end }}
{{- if .Values.config.providers.ldap.bindDN }}
- name: PENPOT_LDAP_BIND_DN
value: {{ .Values.config.providers.ldap.bindDN | quote }}
{{- end }}
{{- if .Values.config.providers.ldap.bindPassword }}
- name: PENPOT_LDAP_BIND_PASSWORD
value: {{ .Values.config.providers.ldap.bindPassword | quote }}
{{- end }}
{{- if .Values.config.providers.ldap.attributesUsername }}
- name: PENPOT_LDAP_ATTRS_USERNAME
value: {{ .Values.config.providers.ldap.attributesUsername | quote }}
{{- end }}
{{- if .Values.config.providers.ldap.attributesEmail }}
- name: PENPOT_LDAP_ATTRS_EMAIL
value: {{ .Values.config.providers.ldap.attributesEmail | quote }}
{{- end }}
{{- if .Values.config.providers.ldap.attributesFullname }}
- name: PENPOT_LDAP_ATTRS_FULLNAME
value: {{ .Values.config.providers.ldap.attributesFullname | quote }}
{{- end }}
{{- if .Values.config.providers.ldap.attributesPhoto }}
- name: PENPOT_LDAP_ATTRS_PHOTO
value: {{ .Values.config.providers.ldap.attributesPhoto | quote }}
{{- end }}
{{- end }}
ports:
- name: http
containerPort: {{ .Values.backend.service.port }}
protocol: TCP
resources:
{{- toYaml .Values.backend.resources | nindent 12 }}
{{- with .Values.backend.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.backend.affinity }}
affinity:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.backend.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}
volumes:
- name: app-data
{{- if .Values.persistence.enabled }}
persistentVolumeClaim:
claimName: {{ .Values.persistence.existingClaim | default ( include "penpot.fullname" . ) }}
{{- else }}
emptyDir: {}
{{- end }}

View File

@@ -1,353 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "penpot.fullname" . }}-exporter
namespace: {{ .Release.Namespace }}
labels:
{{- include "penpot.labels" . | nindent 4 }}
spec:
replicas: {{ .Values.exporter.replicaCount }}
selector:
matchLabels:
{{- include "penpot.exporterSelectorLabels" . | nindent 6 }}
template:
metadata:
labels:
{{- include "penpot.exporterSelectorLabels" . | nindent 8 }}
spec:
{{- with .Values.global.imagePullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 8 }}
{{- end }}
serviceAccountName: {{ include "penpot.serviceAccountName" . }}
{{ if .Values.exporter.podSecurityContext.enabled }}
securityContext:
{{- omit .Values.exporter.podSecurityContext "enabled" | toYaml | nindent 8 }}
{{- end }}
containers:
- name: {{ .Chart.Name }}-exporter
{{ if .Values.exporter.containerSecurityContext.enabled }}
securityContext:
{{- omit .Values.exporter.containerSecurityContext "enabled" | toYaml | nindent 12 }}
{{- end }}
image: "{{ .Values.exporter.image.repository }}:{{ .Values.exporter.image.tag }}"
imagePullPolicy: {{ .Values.exporter.image.imagePullPolicy }}
env:
- name: PENPOT_PUBLIC_URI
value: {{ .Values.config.publicURI | quote }}
- name: PENPOT_FLAGS
value: "$PENPOT_FLAGS {{ .Values.config.flags }}"
- name: PENPOT_SECRET_KEY
value: {{ .Values.config.apiSecretKey | quote }}
- name: PENPOT_DATABASE_URI
value: "postgresql://{{ .Values.config.postgresql.host }}:{{ .Values.config.postgresql.port }}/{{ .Values.config.postgresql.database }}"
- name: PENPOT_DATABASE_USERNAME
{{- if not .Values.config.postgresql.secretKeys.usernameKey }}
value: {{ .Values.config.postgresql.username | quote }}
{{- else }}
valueFrom:
secretKeyRef:
name: {{ .Values.config.postgresql.existingSecret }}
key: {{ .Values.config.postgresql.secretKeys.usernameKey }}
{{- end }}
- name: PENPOT_DATABASE_PASSWORD
{{- if not .Values.config.postgresql.secretKeys.passwordKey }}
value: {{ .Values.config.postgresql.password | quote }}
{{- else }}
valueFrom:
secretKeyRef:
name: {{ .Values.config.postgresql.existingSecret }}
key: {{ .Values.config.postgresql.secretKeys.passwordKey }}
{{- end }}
- name: PENPOT_REDIS_URI
value: "redis://{{ .Values.config.redis.host }}:{{ .Values.config.redis.port }}/{{ .Values.config.redis.database }}"
- name: PENPOT_ASSETS_STORAGE_BACKEND
value: {{ .Values.config.assets.storageBackend | quote }}
{{- if eq .Values.config.assets.storageBackend "assets-fs" }}
- name: PENPOT_STORAGE_ASSETS_FS_DIRECTORY
value: {{ .Values.config.assets.filesystem.directory | quote }}
{{- else if eq .Values.config.assets.storageBackend "assets-s3" }}
- name: PENPOT_STORAGE_ASSETS_S3_REGION
value: {{ .Values.config.assets.s3.region | quote }}
- name: PENPOT_STORAGE_ASSETS_S3_BUCKET
value: {{ .Values.config.assets.s3.bucket | quote }}
- name: AWS_ACCESS_KEY_ID
{{- if not .Values.config.assets.s3.secretKeys.accessKeyIDKey }}
value: {{ .Values.config.assets.s3.accessKeyID | quote }}
{{- else }}
valueFrom:
secretKeyRef:
name: {{ .Values.config.assets.s3.existingSecret }}
key: {{ .Values.config.assets.s3.secretKeys.accessKeyIDKey }}
{{- end }}
- name: AWS_SECRET_ACCESS_KEY
{{- if not .Values.config.assets.s3.secretKeys.secretAccessKey }}
value: {{ .Values.config.assets.s3.secretAccessKey | quote }}
{{- else }}
valueFrom:
secretKeyRef:
name: {{ .Values.config.assets.s3.existingSecret }}
key: {{ .Values.config.assets.s3.secretKeys.secretAccessKey }}
{{- end }}
- name: PENPOT_STORAGE_ASSETS_S3_ENDPOINT
{{- if not .Values.config.assets.s3.secretKeys.endpointURIKey }}
value: {{ .Values.config.assets.s3.endpointURI | quote }}
{{- else }}
valueFrom:
secretKeyRef:
name: {{ .Values.config.assets.s3.existingSecret }}
key: {{ .Values.config.assets.s3.secretKeys.endpointURIKey }}
{{- end }}
{{- end }}
- name: PENPOT_TELEMETRY_ENABLED
value: {{ .Values.config.telemetryEnabled | quote }}
{{- if .Values.config.smtp.enabled }}
{{- if .Values.config.smtp.defaultFrom }}
- name: PENPOT_SMTP_DEFAULT_FROM
value: {{ .Values.config.smtp.defaultFrom | quote }}
{{- end }}
{{- if .Values.config.smtp.defaultReplyTo }}
- name: PENPOT_SMTP_DEFAULT_REPLY_TO
value: {{ .Values.config.smtp.defaultReplyTo | quote }}
{{- end }}
{{- if .Values.config.smtp.host }}
- name: PENPOT_SMTP_HOST
value: {{ .Values.config.smtp.host | quote }}
{{- end }}
{{- if .Values.config.smtp.port }}
- name: PENPOT_SMTP_PORT
value: {{ .Values.config.smtp.port | quote }}
{{- end }}
{{- if not .Values.config.smtp.secretKeys.usernameKey }}
- name: PENPOT_SMTP_USERNAME
value: {{ .Values.config.smtp.username | quote }}
{{- else }}
- name: PENPOT_SMTP_USERNAME
valueFrom:
secretKeyRef:
name: {{ .Values.config.smtp.existingSecret }}
key: {{ .Values.config.smtp.secretKeys.usernameKey }}
{{- end }}
{{- if not .Values.config.smtp.secretKeys.passwordKey }}
- name: PENPOT_SMTP_PASSWORD
value: {{ .Values.config.smtp.password | quote }}
{{- else }}
- name: PENPOT_SMTP_PASSWORD
valueFrom:
secretKeyRef:
name: {{ .Values.config.smtp.existingSecret }}
key: {{ .Values.config.smtp.secretKeys.passwordKey }}
{{- end }}
{{- if .Values.config.smtp.tls }}
- name: PENPOT_SMTP_TLS
value: {{ .Values.config.smtp.tls | quote }}
{{- end }}
{{- if .Values.config.smtp.ssl }}
- name: PENPOT_SMTP_SSL
value: {{ .Values.config.smtp.ssl | quote }}
{{- end }}
{{- end }}
{{- if .Values.config.registrationDomainWhitelist }}
- name: PENPOT_REGISTRATION_DOMAIN_WHITELIST
value: {{ .Values.config.registrationDomainWhitelist | quote }}
{{- end }}
{{- if .Values.config.providers.google.enabled }}
{{- if not .Values.config.providers.secretKeys.googleClientIDKey }}
- name: PENPOT_GOOGLE_CLIENT_ID
value: {{ .Values.config.providers.google.clientID | quote }}
{{- else }}
- name: PENPOT_GOOGLE_CLIENT_ID
valueFrom:
secretKeyRef:
name: {{ .Values.config.providers.existingSecret }}
key: {{ .Values.config.providers.secretKeys.googleClientIDKey }}
{{- end }}
{{- if not .Values.config.providers.secretKeys.googleClientSecretKey}}
- name: PENPOT_GOOGLE_CLIENT_SECRET
value: {{ .Values.config.providers.google.clientSecret | quote }}
{{- else }}
- name: PENPOT_GOOGLE_CLIENT_SECRET
valueFrom:
secretKeyRef:
name: {{ .Values.config.providers.existingSecret }}
key: {{ .Values.config.providers.secretKeys.googleClientSecretKey }}
{{- end }}
{{- end }}
{{- if .Values.config.providers.github.enabled }}
{{- if not .Values.config.providers.secretKeys.githubClientIDKey }}
- name: PENPOT_GITHUB_CLIENT_ID
value: {{ .Values.config.providers.github.clientID | quote }}
{{- else }}
- name: PENPOT_GITHUB_CLIENT_ID
valueFrom:
secretKeyRef:
name: {{ .Values.config.providers.existingSecret }}
key: {{ .Values.config.providers.secretKeys.githubClientIDKey }}
{{- end }}
{{- if not .Values.config.providers.secretKeys.githubClientSecretKey }}
- name: PENPOT_GITHUB_CLIENT_SECRET
value: {{ .Values.config.providers.github.clientSecret | quote }}
{{- else }}
- name: PENPOT_GITHUB_CLIENT_SECRET
valueFrom:
secretKeyRef:
name: {{ .Values.config.providers.existingSecret }}
key: {{ .Values.config.providers.secretKeys.githubClientSecretKey }}
{{- end }}
{{- end }}
{{- if .Values.config.providers.gitlab.enabled }}
{{- if .Values.config.providers.gitlab.baseURI }}
- name: PENPOT_GITLAB_BASE_URI
value: {{ .Values.config.providers.gitlab.baseURI | quote }}
{{- end }}
{{- if not .Values.config.providers.secretKeys.gitlabClientIDKey }}
- name: PENPOT_GITLAB_CLIENT_ID
value: {{ .Values.config.providers.gitlab.clientID | quote }}
{{- else }}
- name: PENPOT_GITLAB_CLIENT_ID
valueFrom:
secretKeyRef:
name: {{ .Values.config.providers.existingSecret }}
key: {{ .Values.config.providers.secretKeys.gitlabClientIDKey }}
{{- end }}
{{- if not .Values.config.providers.secretKeys.gitlabClientSecretKey }}
- name: PENPOT_GITLAB_CLIENT_SECRET
value: {{ .Values.config.providers.gitlab.clientSecret | quote }}
{{- else }}
- name: PENPOT_GITLAB_CLIENT_SECRET
valueFrom:
secretKeyRef:
name: {{ .Values.config.providers.existingSecret }}
key: {{ .Values.config.providers.secretKeys.gitlabClientSecretKey }}
{{- end }}
{{- end }}
{{- if .Values.config.providers.oidc.enabled }}
{{- if .Values.config.providers.oidc.baseURI }}
- name: PENPOT_OIDC_BASE_URI
value: {{ .Values.config.providers.oidc.baseURI | quote }}
{{- end }}
{{- if not .Values.config.providers.secretKeys.oidcClientIDKey }}
- name: PENPOT_OIDC_CLIENT_ID
value: {{ .Values.config.providers.oidc.clientID | quote}}
{{- else }}
- name: PENPOT_OIDC_CLIENT_ID
valueFrom:
secretKeyRef:
name: {{ .Values.config.providers.existingSecret }}
key: {{ .Values.config.providers.secretKeys.oidcClientIDKey }}
{{- end }}
{{- if not .Values.config.providers.secretKeys.oidcClientSecretKey}}
- name: PENPOT_OIDC_CLIENT_SECRET
value: {{ .Values.config.providers.oidc.clientSecret | quote }}
{{- else }}
- name: PENPOT_OIDC_CLIENT_SECRET
valueFrom:
secretKeyRef:
name: {{ .Values.config.providers.existingSecret }}
key: {{ .Values.config.providers.secretKeys.oidcClientSecretKey }}
{{- end }}
{{- if .Values.config.providers.oidc.authURI }}
- name: PENPOT_OIDC_AUTH_URI
value: {{ .Values.config.providers.oidc.authURI | quote }}
{{- end }}
{{- if .Values.config.providers.oidc.tokenURI }}
- name: PENPOT_OIDC_TOKEN_URI
value: {{ .Values.config.providers.oidc.tokenURI | quote }}
{{- end }}
{{- if .Values.config.providers.oidc.userURI }}
- name: PENPOT_OIDC_USER_URI
value: {{ .Values.config.providers.oidc.userURI | quote }}
{{- end }}
{{- if .Values.config.providers.oidc.roles }}
- name: PENPOT_OIDC_ROLES
value: {{ .Values.config.providers.oidc.roles | quote }}
{{- end }}
{{- if .Values.config.providers.oidc.rolesAttribute }}
- name: PENPOT_OIDC_ROLES_ATTR
value: {{ .Values.config.providers.oidc.rolesAttribute | quote }}
{{- end }}
{{- if .Values.config.providers.oidc.scopes }}
- name: PENPOT_OIDC_SCOPES
value: {{ .Values.config.providers.oidc.scopes | quote }}
{{- end }}
{{- if .Values.config.providers.oidc.nameAttribute }}
- name: PENPOT_OIDC_NAME_ATTR
value: {{ .Values.config.providers.oidc.nameAttribute | quote }}
{{- end }}
{{- if .Values.config.providers.oidc.emailAttribute }}
- name: PENPOT_OIDC_EMAIL_ATTR
value: {{ .Values.config.providers.oidc.emailAttribute | quote }}
{{- end }}
{{- end }}
{{- if .Values.config.providers.ldap.enabled }}
{{- if .Values.config.providers.ldap.host }}
- name: PENPOT_LDAP_HOST
value: {{ .Values.config.providers.ldap.host | quote }}
{{- end }}
{{- if .Values.config.providers.ldap.port }}
- name: PENPOT_LDAP_PORT
value: {{ .Values.config.providers.ldap.port | quote }}
{{- end }}
{{- if .Values.config.providers.ldap.ssl }}
- name: PENPOT_LDAP_SSL
value: {{ .Values.config.providers.ldap.ssl | quote }}
{{- end }}
{{- if .Values.config.providers.ldap.startTLS }}
- name: PENPOT_LDAP_STARTTLS
value: {{ .Values.config.providers.ldap.startTLS | quote }}
{{- end }}
{{- if .Values.config.providers.ldap.baseDN }}
- name: PENPOT_LDAP_BASE_DN
value: {{ .Values.config.providers.ldap.baseDN | quote }}
{{- end }}
{{- if .Values.config.providers.ldap.bindDN }}
- name: PENPOT_LDAP_BIND_DN
value: {{ .Values.config.providers.ldap.bindDN | quote }}
{{- end }}
{{- if .Values.config.providers.ldap.bindPassword }}
- name: PENPOT_LDAP_BIND_PASSWORD
value: {{ .Values.config.providers.ldap.bindPassword | quote }}
{{- end }}
{{- if .Values.config.providers.ldap.attributesUsername }}
- name: PENPOT_LDAP_ATTRS_USERNAME
value: {{ .Values.config.providers.ldap.attributesUsername | quote }}
{{- end }}
{{- if .Values.config.providers.ldap.attributesEmail }}
- name: PENPOT_LDAP_ATTRS_EMAIL
value: {{ .Values.config.providers.ldap.attributesEmail | quote }}
{{- end }}
{{- if .Values.config.providers.ldap.attributesFullname }}
- name: PENPOT_LDAP_ATTRS_FULLNAME
value: {{ .Values.config.providers.ldap.attributesFullname | quote }}
{{- end }}
{{- if .Values.config.providers.ldap.attributesPhoto }}
- name: PENPOT_LDAP_ATTRS_PHOTO
value: {{ .Values.config.providers.ldap.attributesPhoto | quote }}
{{- end }}
{{- end }}
ports:
- name: http
containerPort: {{ .Values.exporter.service.port }}
protocol: TCP
resources:
{{- toYaml .Values.exporter.resources | nindent 12 }}
{{- with .Values.exporter.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.exporter.affinity }}
affinity:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.exporter.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}

View File

@@ -1,375 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "penpot.fullname" . }}-frontend
namespace: {{ .Release.Namespace }}
labels:
{{- include "penpot.labels" . | nindent 4 }}
spec:
replicas: {{ .Values.frontend.replicaCount }}
selector:
matchLabels:
{{- include "penpot.frontendSelectorLabels" . | nindent 6 }}
template:
metadata:
labels:
{{- include "penpot.frontendSelectorLabels" . | nindent 8 }}
spec:
{{- with .Values.global.imagePullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 8 }}
{{- end }}
serviceAccountName: {{ include "penpot.serviceAccountName" . }}
affinity:
podAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: app.kubernetes.io/instance
operator: In
values:
- {{ .Release.Name }}
topologyKey: "kubernetes.io/hostname"
containers:
- name: {{ .Chart.Name }}-frontend
image: "{{ .Values.frontend.image.repository }}:{{ .Values.frontend.image.tag }}"
imagePullPolicy: {{ .Values.frontend.image.imagePullPolicy }}
env:
- name: PENPOT_PUBLIC_URI
value: {{ .Values.config.publicURI | quote }}
- name: PENPOT_FLAGS
value: "$PENPOT_FLAGS {{ .Values.config.flags }}"
- name: PENPOT_SECRET_KEY
value: {{ .Values.config.apiSecretKey | quote }}
- name: PENPOT_DATABASE_URI
value: "postgresql://{{ .Values.config.postgresql.host }}:{{ .Values.config.postgresql.port }}/{{ .Values.config.postgresql.database }}"
- name: PENPOT_DATABASE_USERNAME
{{- if not .Values.config.postgresql.secretKeys.usernameKey }}
value: {{ .Values.config.postgresql.username | quote }}
{{- else }}
valueFrom:
secretKeyRef:
name: {{ .Values.config.postgresql.existingSecret }}
key: {{ .Values.config.postgresql.secretKeys.usernameKey }}
{{- end }}
- name: PENPOT_DATABASE_PASSWORD
{{- if not .Values.config.postgresql.secretKeys.passwordKey }}
value: {{ .Values.config.postgresql.password | quote }}
{{- else }}
valueFrom:
secretKeyRef:
name: {{ .Values.config.postgresql.existingSecret }}
key: {{ .Values.config.postgresql.secretKeys.passwordKey }}
{{- end }}
- name: PENPOT_REDIS_URI
value: "redis://{{ .Values.config.redis.host }}:{{ .Values.config.redis.port }}/{{ .Values.config.redis.database }}"
- name: PENPOT_ASSETS_STORAGE_BACKEND
value: {{ .Values.config.assets.storageBackend | quote }}
{{- if eq .Values.config.assets.storageBackend "assets-fs" }}
- name: PENPOT_STORAGE_ASSETS_FS_DIRECTORY
value: {{ .Values.config.assets.filesystem.directory | quote }}
{{- else if eq .Values.config.assets.storageBackend "assets-s3" }}
- name: PENPOT_STORAGE_ASSETS_S3_REGION
value: {{ .Values.config.assets.s3.region | quote }}
- name: PENPOT_STORAGE_ASSETS_S3_BUCKET
value: {{ .Values.config.assets.s3.bucket | quote }}
- name: AWS_ACCESS_KEY_ID
{{- if not .Values.config.assets.s3.secretKeys.accessKeyIDKey }}
value: {{ .Values.config.assets.s3.accessKeyID | quote }}
{{- else }}
valueFrom:
secretKeyRef:
name: {{ .Values.config.assets.s3.existingSecret }}
key: {{ .Values.config.assets.s3.secretKeys.accessKeyIDKey }}
{{- end }}
- name: AWS_SECRET_ACCESS_KEY
{{- if not .Values.config.assets.s3.secretKeys.secretAccessKey }}
value: {{ .Values.config.assets.s3.secretAccessKey | quote }}
{{- else }}
valueFrom:
secretKeyRef:
name: {{ .Values.config.assets.s3.existingSecret }}
key: {{ .Values.config.assets.s3.secretKeys.secretAccessKey }}
{{- end }}
- name: PENPOT_STORAGE_ASSETS_S3_ENDPOINT
{{- if not .Values.config.assets.s3.secretKeys.endpointURIKey }}
value: {{ .Values.config.assets.s3.endpointURI | quote }}
{{- else }}
valueFrom:
secretKeyRef:
name: {{ .Values.config.assets.s3.existingSecret }}
key: {{ .Values.config.assets.s3.secretKeys.endpointURIKey }}
{{- end }}
{{- end }}
- name: PENPOT_TELEMETRY_ENABLED
value: {{ .Values.config.telemetryEnabled | quote }}
{{- if .Values.config.smtp.enabled }}
{{- if .Values.config.smtp.defaultFrom }}
- name: PENPOT_SMTP_DEFAULT_FROM
value: {{ .Values.config.smtp.defaultFrom | quote }}
{{- end }}
{{- if .Values.config.smtp.defaultReplyTo }}
- name: PENPOT_SMTP_DEFAULT_REPLY_TO
value: {{ .Values.config.smtp.defaultReplyTo | quote }}
{{- end }}
{{- if .Values.config.smtp.host }}
- name: PENPOT_SMTP_HOST
value: {{ .Values.config.smtp.host | quote }}
{{- end }}
{{- if .Values.config.smtp.port }}
- name: PENPOT_SMTP_PORT
value: {{ .Values.config.smtp.port | quote }}
{{- end }}
{{- if not .Values.config.smtp.secretKeys.usernameKey }}
- name: PENPOT_SMTP_USERNAME
value: {{ .Values.config.smtp.username | quote }}
{{- else }}
- name: PENPOT_SMTP_USERNAME
valueFrom:
secretKeyRef:
name: {{ .Values.config.smtp.existingSecret }}
key: {{ .Values.config.smtp.secretKeys.usernameKey }}
{{- end }}
{{- if not .Values.config.smtp.secretKeys.passwordKey }}
- name: PENPOT_SMTP_PASSWORD
value: {{ .Values.config.smtp.password | quote }}
{{- else }}
- name: PENPOT_SMTP_PASSWORD
valueFrom:
secretKeyRef:
name: {{ .Values.config.smtp.existingSecret }}
key: {{ .Values.config.smtp.secretKeys.passwordKey }}
{{- end }}
{{- if .Values.config.smtp.tls }}
- name: PENPOT_SMTP_TLS
value: {{ .Values.config.smtp.tls | quote }}
{{- end }}
{{- if .Values.config.smtp.ssl }}
- name: PENPOT_SMTP_SSL
value: {{ .Values.config.smtp.ssl | quote }}
{{- end }}
{{- end }}
{{- if .Values.config.registrationDomainWhitelist }}
- name: PENPOT_REGISTRATION_DOMAIN_WHITELIST
value: {{ .Values.config.registrationDomainWhitelist | quote }}
{{- end }}
{{- if .Values.config.providers.google.enabled }}
{{- if not .Values.config.providers.secretKeys.googleClientIDKey }}
- name: PENPOT_GOOGLE_CLIENT_ID
value: {{ .Values.config.providers.google.clientID | quote }}
{{- else }}
- name: PENPOT_GOOGLE_CLIENT_ID
valueFrom:
secretKeyRef:
name: {{ .Values.config.providers.existingSecret }}
key: {{ .Values.config.providers.secretKeys.googleClientIDKey }}
{{- end }}
{{- if not .Values.config.providers.secretKeys.googleClientSecretKey}}
- name: PENPOT_GOOGLE_CLIENT_SECRET
value: {{ .Values.config.providers.google.clientSecret | quote }}
{{- else }}
- name: PENPOT_GOOGLE_CLIENT_SECRET
valueFrom:
secretKeyRef:
name: {{ .Values.config.providers.existingSecret }}
key: {{ .Values.config.providers.secretKeys.googleClientSecretKey }}
{{- end }}
{{- end }}
{{- if .Values.config.providers.github.enabled }}
{{- if not .Values.config.providers.secretKeys.githubClientIDKey }}
- name: PENPOT_GITHUB_CLIENT_ID
value: {{ .Values.config.providers.github.clientID | quote }}
{{- else }}
- name: PENPOT_GITHUB_CLIENT_ID
valueFrom:
secretKeyRef:
name: {{ .Values.config.providers.existingSecret }}
key: {{ .Values.config.providers.secretKeys.githubClientIDKey }}
{{- end }}
{{- if not .Values.config.providers.secretKeys.githubClientSecretKey }}
- name: PENPOT_GITHUB_CLIENT_SECRET
value: {{ .Values.config.providers.github.clientSecret | quote }}
{{- else }}
- name: PENPOT_GITHUB_CLIENT_SECRET
valueFrom:
secretKeyRef:
name: {{ .Values.config.providers.existingSecret }}
key: {{ .Values.config.providers.secretKeys.githubClientSecretKey }}
{{- end }}
{{- end }}
{{- if .Values.config.providers.gitlab.enabled }}
{{- if .Values.config.providers.gitlab.baseURI }}
- name: PENPOT_GITLAB_BASE_URI
value: {{ .Values.config.providers.gitlab.baseURI | quote }}
{{- end }}
{{- if not .Values.config.providers.secretKeys.gitlabClientIDKey }}
- name: PENPOT_GITLAB_CLIENT_ID
value: {{ .Values.config.providers.gitlab.clientID | quote }}
{{- else }}
- name: PENPOT_GITLAB_CLIENT_ID
valueFrom:
secretKeyRef:
name: {{ .Values.config.providers.existingSecret }}
key: {{ .Values.config.providers.secretKeys.gitlabClientIDKey }}
{{- end }}
{{- if not .Values.config.providers.secretKeys.gitlabClientSecretKey }}
- name: PENPOT_GITLAB_CLIENT_SECRET
value: {{ .Values.config.providers.gitlab.clientSecret | quote }}
{{- else }}
- name: PENPOT_GITLAB_CLIENT_SECRET
valueFrom:
secretKeyRef:
name: {{ .Values.config.providers.existingSecret }}
key: {{ .Values.config.providers.secretKeys.gitlabClientSecretKey }}
{{- end }}
{{- end }}
{{- if .Values.config.providers.oidc.enabled }}
{{- if .Values.config.providers.oidc.baseURI }}
- name: PENPOT_OIDC_BASE_URI
value: {{ .Values.config.providers.oidc.baseURI | quote }}
{{- end }}
{{- if not .Values.config.providers.secretKeys.oidcClientIDKey }}
- name: PENPOT_OIDC_CLIENT_ID
value: {{ .Values.config.providers.oidc.clientID | quote}}
{{- else }}
- name: PENPOT_OIDC_CLIENT_ID
valueFrom:
secretKeyRef:
name: {{ .Values.config.providers.existingSecret }}
key: {{ .Values.config.providers.secretKeys.oidcClientIDKey }}
{{- end }}
{{- if not .Values.config.providers.secretKeys.oidcClientSecretKey}}
- name: PENPOT_OIDC_CLIENT_SECRET
value: {{ .Values.config.providers.oidc.clientSecret | quote }}
{{- else }}
- name: PENPOT_OIDC_CLIENT_SECRET
valueFrom:
secretKeyRef:
name: {{ .Values.config.providers.existingSecret }}
key: {{ .Values.config.providers.secretKeys.oidcClientSecretKey }}
{{- end }}
{{- if .Values.config.providers.oidc.authURI }}
- name: PENPOT_OIDC_AUTH_URI
value: {{ .Values.config.providers.oidc.authURI | quote }}
{{- end }}
{{- if .Values.config.providers.oidc.tokenURI }}
- name: PENPOT_OIDC_TOKEN_URI
value: {{ .Values.config.providers.oidc.tokenURI | quote }}
{{- end }}
{{- if .Values.config.providers.oidc.userURI }}
- name: PENPOT_OIDC_USER_URI
value: {{ .Values.config.providers.oidc.userURI | quote }}
{{- end }}
{{- if .Values.config.providers.oidc.roles }}
- name: PENPOT_OIDC_ROLES
value: {{ .Values.config.providers.oidc.roles | quote }}
{{- end }}
{{- if .Values.config.providers.oidc.rolesAttribute }}
- name: PENPOT_OIDC_ROLES_ATTR
value: {{ .Values.config.providers.oidc.rolesAttribute | quote }}
{{- end }}
{{- if .Values.config.providers.oidc.scopes }}
- name: PENPOT_OIDC_SCOPES
value: {{ .Values.config.providers.oidc.scopes | quote }}
{{- end }}
{{- if .Values.config.providers.oidc.nameAttribute }}
- name: PENPOT_OIDC_NAME_ATTR
value: {{ .Values.config.providers.oidc.nameAttribute | quote }}
{{- end }}
{{- if .Values.config.providers.oidc.emailAttribute }}
- name: PENPOT_OIDC_EMAIL_ATTR
value: {{ .Values.config.providers.oidc.emailAttribute | quote }}
{{- end }}
{{- end }}
{{- if .Values.config.providers.ldap.enabled }}
{{- if .Values.config.providers.ldap.host }}
- name: PENPOT_LDAP_HOST
value: {{ .Values.config.providers.ldap.host | quote }}
{{- end }}
{{- if .Values.config.providers.ldap.port }}
- name: PENPOT_LDAP_PORT
value: {{ .Values.config.providers.ldap.port | quote }}
{{- end }}
{{- if .Values.config.providers.ldap.ssl }}
- name: PENPOT_LDAP_SSL
value: {{ .Values.config.providers.ldap.ssl | quote }}
{{- end }}
{{- if .Values.config.providers.ldap.startTLS }}
- name: PENPOT_LDAP_STARTTLS
value: {{ .Values.config.providers.ldap.startTLS | quote }}
{{- end }}
{{- if .Values.config.providers.ldap.baseDN }}
- name: PENPOT_LDAP_BASE_DN
value: {{ .Values.config.providers.ldap.baseDN | quote }}
{{- end }}
{{- if .Values.config.providers.ldap.bindDN }}
- name: PENPOT_LDAP_BIND_DN
value: {{ .Values.config.providers.ldap.bindDN | quote }}
{{- end }}
{{- if .Values.config.providers.ldap.bindPassword }}
- name: PENPOT_LDAP_BIND_PASSWORD
value: {{ .Values.config.providers.ldap.bindPassword | quote }}
{{- end }}
{{- if .Values.config.providers.ldap.attributesUsername }}
- name: PENPOT_LDAP_ATTRS_USERNAME
value: {{ .Values.config.providers.ldap.attributesUsername | quote }}
{{- end }}
{{- if .Values.config.providers.ldap.attributesEmail }}
- name: PENPOT_LDAP_ATTRS_EMAIL
value: {{ .Values.config.providers.ldap.attributesEmail | quote }}
{{- end }}
{{- if .Values.config.providers.ldap.attributesFullname }}
- name: PENPOT_LDAP_ATTRS_FULLNAME
value: {{ .Values.config.providers.ldap.attributesFullname | quote }}
{{- end }}
{{- if .Values.config.providers.ldap.attributesPhoto }}
- name: PENPOT_LDAP_ATTRS_PHOTO
value: {{ .Values.config.providers.ldap.attributesPhoto | quote }}
{{- end }}
{{- end }}
volumeMounts:
- mountPath: /opt/data
name: app-data
readOnly: false
- mountPath: /etc/nginx/nginx.conf
name: "{{ include "penpot.fullname" . }}-frontend-nginx"
readOnly: true
subPath: nginx.conf
ports:
- name: http
containerPort: {{ .Values.frontend.service.port }}
protocol: TCP
resources:
{{- toYaml .Values.frontend.resources | nindent 12 }}
{{- with .Values.frontend.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.frontend.affinity }}
affinity:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.frontend.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}
volumes:
- name: app-data
{{- if .Values.persistence.enabled }}
persistentVolumeClaim:
claimName: {{ .Values.persistence.existingClaim | default ( include "penpot.fullname" . ) }}
{{- else }}
emptyDir: {}
{{- end }}
- configMap:
defaultMode: 420
name: "{{ include "penpot.fullname" . }}-frontend-nginx"
name: "{{ include "penpot.fullname" . }}-frontend-nginx"

View File

@@ -1,53 +0,0 @@
{{- if .Values.ingress.enabled -}}
{{- $gitVersion := .Capabilities.KubeVersion.GitVersion -}}
{{- $fullName := include "penpot.fullname" . -}}
{{- $svcPort := .Values.frontend.service.port -}}
{{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion -}}
apiVersion: networking.k8s.io/v1
{{- else if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}}
apiVersion: networking.k8s.io/v1beta1
{{- else -}}
apiVersion: extensions/v1beta1
{{- end }}
kind: Ingress
metadata:
name: {{ $fullName }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "penpot.labels" . | nindent 4 }}
{{- with .Values.ingress.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
{{- if .Values.ingress.tls }}
tls:
{{- range .Values.ingress.tls }}
- hosts:
{{- range .hosts }}
- {{ . | quote }}
{{- end }}
secretName: {{ .secretName }}
{{- end }}
{{- end }}
rules:
{{- range .Values.ingress.hosts }}
- host: {{ .host | quote }}
http:
paths:
{{ if semverCompare ">=1.19-0" $gitVersion }}
- path: /
pathType: Prefix
backend:
service:
name: {{ $fullName }}
port:
number: {{ $svcPort }}
{{ else }}
- path: /
backend:
serviceName: {{ $fullName }}
servicePort: {{ $svcPort }}
{{- end }}
{{- end }}
{{- end }}

View File

@@ -1,24 +0,0 @@
{{- if and .Values.persistence.enabled (not .Values.persistence.existingClaim) -}}
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: {{ include "penpot.fullname" . }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "penpot.labels" . | nindent 4 }}
{{- if .Values.persistence.annotations }}
annotations:
{{ toYaml .Values.persistence.annotations | indent 4 }}
{{- end }}
spec:
accessModes:
{{- range .Values.persistence.accessModes }}
- {{ . | quote }}
{{- end }}
resources:
requests:
storage: {{ .Values.persistence.size | quote }}
{{- if .Values.persistence.storageClass }}
storageClassName: "{{ .Values.persistence.storageClass }}"
{{- end }}
{{- end -}}

View File

@@ -1,13 +0,0 @@
{{- if .Values.serviceAccount.enabled -}}
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ include "penpot.serviceAccountName" . }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "penpot.labels" . | nindent 4 }}
{{- with .Values.serviceAccount.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
{{- end -}}

View File

@@ -1,52 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: {{ include "penpot.fullname" . }}-backend
namespace: {{ .Release.Namespace }}
labels:
{{- include "penpot.labels" . | nindent 4 }}
spec:
type: {{ .Values.backend.service.type }}
ports:
- port: {{ .Values.backend.service.port }}
targetPort: http
protocol: TCP
name: http
selector:
{{- include "penpot.backendSelectorLabels" . | nindent 4 }}
---
apiVersion: v1
kind: Service
metadata:
name: {{ include "penpot.fullname" . }}-exporter
namespace: {{ .Release.Namespace }}
labels:
{{- include "penpot.labels" . | nindent 4 }}
spec:
type: {{ .Values.exporter.service.type }}
ports:
- port: {{ .Values.exporter.service.port }}
targetPort: http
protocol: TCP
name: http
selector:
{{- include "penpot.exporterSelectorLabels" . | nindent 4 }}
---
apiVersion: v1
kind: Service
metadata:
name: {{ include "penpot.fullname" . }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "penpot.labels" . | nindent 4 }}
spec:
type: {{ .Values.frontend.service.type }}
ports:
- port: {{ .Values.frontend.service.port }}
targetPort: http
protocol: TCP
name: http
selector:
{{- include "penpot.frontendSelectorLabels" . | nindent 4 }}

View File

@@ -1,468 +0,0 @@
## Default values for Penpot
## @section Global parameters
## @param global.postgresqlEnabled Whether to deploy the Bitnami PostgreSQL chart as subchart. Check [the official chart](https://artifacthub.io/packages/helm/bitnami/postgresql) for configuration.
## @param global.redisEnabled Whether to deploy the Bitnami Redis chart as subchart. Check [the official chart](https://artifacthub.io/packages/helm/bitnami/redis) for configuration.
## @param global.imagePullSecrets Global Docker registry secret names as an array.
##
global:
## E.g.
## imagePullSecrets:
## - myRegistryKeySecretName
##
imagePullSecrets: []
## @section Common parameters
## @param nameOverride String to partially override common.names.fullname
##
nameOverride: ""
## @param fullnameOverride String to fully override common.names.fullname
##
fullnameOverride: ""
## @param serviceAccount.enabled Specifies whether a ServiceAccount should be created.
## @param serviceAccount.annotations Annotations for service account. Evaluated as a template. Only used if `create` is `true`.
## @param serviceAccount.name The name of the ServiceAccount to use. If not set and enabled is true, a name is generated using the fullname template.
##
serviceAccount:
enabled: true
annotations: {}
name: ""
## @section Backend parameters
## Penpot Backend
##
backend:
## @param backend.image.repository The Docker repository to pull the image from.
## @param backend.image.tag The image tag to use.
## @param backend.image.imagePullPolicy The image pull policy to use.
##
image:
repository: penpotapp/backend
tag: 2.0.1
imagePullPolicy: IfNotPresent
## @param backend.replicaCount The number of replicas to deploy.
##
replicaCount: 1
## @param backend.service.type The service type to create.
## @param backend.service.port The service port to use.
##
service:
type: ClusterIP
port: 6060
## Configure Pods Security Context
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod
## @param backend.podSecurityContext.enabled Enabled Penpot pods' security context
## @param backend.podSecurityContext.fsGroup Set Penpot pod's security context fsGroup
##
podSecurityContext:
enabled: true
fsGroup: 1001
## Configure Container Security Context
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod
## @param backend.containerSecurityContext.enabled Enabled Penpot containers' security context
## @param backend.containerSecurityContext.runAsUser Set Penpot containers' security context runAsUser
## @param backend.containerSecurityContext.allowPrivilegeEscalation Set Penpot containers' security context allowPrivilegeEscalation
## @param backend.containerSecurityContext.capabilities.drop Set Penpot containers' security context capabilities to be dropped
## @param backend.containerSecurityContext.readOnlyRootFilesystem Set Penpot containers' security context readOnlyRootFilesystem
## @param backend.containerSecurityContext.runAsNonRoot Set Penpot container's security context runAsNonRoot
##
containerSecurityContext:
enabled: true
runAsUser: 1001
allowPrivilegeEscalation: false
capabilities:
drop:
- all
readOnlyRootFilesystem: false
runAsNonRoot: true
## @param backend.affinity Affinity for Penpot pods assignment
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
##
affinity: {}
## @param backend.nodeSelector Node labels for Penpot pods assignment
## ref: https://kubernetes.io/docs/user-guide/node-selection/
##
nodeSelector: {}
## @param backend.tolerations Tolerations for Penpot pods assignment
## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
##
tolerations: []
## Penpot backend resource requests and limits
## ref: https://kubernetes.io/docs/user-guide/compute-resources/
## @param backend.resources.limits The resources limits for the Penpot backend containers
## @param backend.resources.requests The requested resources for the Penpot backend containers
##
resources:
limits: {}
requests: {}
## @section Frontend parameters
## Penpot Frontend
##
frontend:
## @param frontend.image.repository The Docker repository to pull the image from.
## @param frontend.image.tag The image tag to use.
## @param frontend.image.imagePullPolicy The image pull policy to use.
##
image:
repository: penpotapp/frontend
tag: 2.0.1
imagePullPolicy: IfNotPresent
## @param frontend.replicaCount The number of replicas to deploy.
##
replicaCount: 1
## @param frontend.service.type The service type to create.
## @param frontend.service.port The service port to use.
##
service:
type: ClusterIP
port: 80
## @param frontend.affinity Affinity for Penpot pods assignment
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
##
affinity: {}
## @param frontend.nodeSelector Node labels for Penpot pods assignment
## ref: https://kubernetes.io/docs/user-guide/node-selection/
##
nodeSelector: {}
## @param frontend.tolerations Tolerations for Penpot pods assignment
## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
##
tolerations: []
## Penpot frontend resource requests and limits
## ref: https://kubernetes.io/docs/user-guide/compute-resources/
## @param frontend.resources.limits The resources limits for the Penpot frontend containers
## @param frontend.resources.requests The requested resources for the Penpot frontend containers
##
resources:
limits: {}
requests: {}
## @section Exporter parameters
## Penpot Exporter
##
exporter:
## @param exporter.image.repository The Docker repository to pull the image from.
## @param exporter.image.tag The image tag to use.
## @param exporter.image.imagePullPolicy The image pull policy to use.
##
image:
repository: penpotapp/exporter
tag: 2.0.1
imagePullPolicy: IfNotPresent
## @param exporter.replicaCount The number of replicas to deploy.
##
replicaCount: 1
## @param exporter.service.type The service type to create.
## @param exporter.service.port The service port to use.
##
service:
type: ClusterIP
port: 6061
## Configure Pods Security Context
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod
## @param exporter.podSecurityContext.enabled Enabled Penpot pods' security context
## @param exporter.podSecurityContext.fsGroup Set Penpot pod's security context fsGroup
##
podSecurityContext:
enabled: true
fsGroup: 1001
## Configure Container Security Context
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod
## @param exporter.containerSecurityContext.enabled Enabled Penpot containers' security context
## @param exporter.containerSecurityContext.runAsUser Set Penpot containers' security context runAsUser
## @param exporter.containerSecurityContext.allowPrivilegeEscalation Set Penpot containers' security context allowPrivilegeEscalation
## @param exporter.containerSecurityContext.capabilities.drop Set Penpot containers' security context capabilities to be dropped
## @param exporter.containerSecurityContext.readOnlyRootFilesystem Set Penpot containers' security context readOnlyRootFilesystem
## @param exporter.containerSecurityContext.runAsNonRoot Set Penpot container's security context runAsNonRoot
##
containerSecurityContext:
enabled: true
runAsUser: 1001
allowPrivilegeEscalation: false
capabilities:
drop:
- all
readOnlyRootFilesystem: false
runAsNonRoot: true
## @param exporter.affinity Affinity for Penpot pods assignment
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
##
affinity: {}
## @param exporter.nodeSelector Node labels for Penpot pods assignment
## ref: https://kubernetes.io/docs/user-guide/node-selection/
##
nodeSelector: {}
## @param exporter.tolerations Tolerations for Penpot pods assignment
## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
##
tolerations: []
## Penpot exporter resource requests and limits
## ref: https://kubernetes.io/docs/user-guide/compute-resources/
## @param exporter.resources.limits The resources limits for the Penpot exporter containers
## @param exporter.resources.requests The requested resources for the Penpot exporter containers
##
resources:
limits: {}
requests: {}
## @section Ingress parameters
## @param frontend.ingress.enabled Enable ingress record generation for Penpot frontend.
## @param frontend.ingress.annotations Mapped annotations for the frontend ingress.
## @param frontend.ingress.hosts Array style hosts for the frontend ingress.
## @param frontend.ingress.tls Array style TLS secrets for the frontend ingress.
##
ingress:
enabled: false
## E.g.
## annotations:
## kubernetes.io/ingress.class: nginx
## kubernetes.io/tls-acme: "true"
##
annotations:
{}
## E.g.
## hosts:
## - host: penpot-example.local
hosts: []
## E.g.
## - secretName: chart-example-tls
## hosts:
## - chart-example.local
tls: []
## @section Persistence parameters
## Penpot persistence
##
persistence:
## @param persistence.enabled Enable persistence using Persistent Volume Claims.
##
enabled: false
## @param persistence.storageClass Persistent Volume storage class.
## If defined, storageClassName: <storageClass>.
## If undefined (the default) or set to null, no storageClassName spec is set, choosing the default provisioner.
##
storageClass: ""
## @param persistence.size Persistent Volume size.
##
size: 8Gi
## @param persistence.existingClaim The name of an existing PVC to use for persistence.
##
existingClaim: ""
## @param persistence.accessModes Persistent Volume access modes.
##
accessModes:
- ReadWriteOnce
## @param persistence.annotations Persistent Volume Claim annotations.
##
annotations: {}
## @section Configuration parameters
## Penpot configuration
##
config:
## @param config.publicURI The public domain to serve Penpot on. Set `disable-secure-session-cookies` in the flags if you plan on serving it on a non HTTPS domain.
## @param config.flags The feature flags to enable. Check [the official docs](https://help.penpot.app/technical-guide/configuration/) for more info.
## @param config.apiSecretKey A random secret key needed for persistent user sessions. Generate with `openssl rand -hex 16` for example.
##
publicURI: "http://localhost:8080"
flags: "enable-registration enable-login disable-demo-users disable-demo-warning"
apiSecretKey:
existingSecretName: ""
existingSecretKey: ""
## @param config.postgresql.host The PostgreSQL host to connect to.
## @param config.postgresql.port The PostgreSQL host port to use.
## @param config.postgresql.database The PostgreSQL database to use.
## @param config.postgresql.username The database username to use.
## @param config.postgresql.password The database username to use.
## @param config.postgresql.existingSecret The name of an existing secret.
## @param config.postgresql.secretKeys.usernameKey The username key to use from an existing secret.
## @param config.postgresql.secretKeys.passwordKey The password key to use from an existing secret.
##
postgresql:
host: "postgresql.penpot.svc.cluster.local"
port: 5432
username: ""
password: ""
database: ""
existingSecret: ""
secretKeys:
usernameKey: ""
passwordKey: ""
## @param config.redis.host The Redis host to connect to.
## @param config.redis.port The Redis host port to use.
## @param config.redis.database The Redis database to connect to.
##
redis:
host: "redis-headless.penpot.svc.cluster.local"
port: 6379
database: "0"
## @param config.assets.storageBackend The storage backend for assets to use. Use `assets-fs` for filesystem, and `assets-s3` for S3.
## @param config.assets.filesystem.directory The storage directory to use if you chose the filesystem storage backend.
## @param config.assets.s3.accessKeyID The S3 access key ID to use if you chose the S3 storage backend.
## @param config.assets.s3.secretAccessKey The S3 secret access key to use if you chose the S3 storage backend.
## @param config.assets.s3.region The S3 region to use if you chose the S3 storage backend.
## @param config.assets.s3.bucket The name of the S3 bucket to use if you chose the S3 storage backend.
## @param config.assets.s3.endpointURI The S3 endpoint URI to use if you chose the S3 storage backend.
## @param config.assets.s3.existingSecret The name of an existing secret.
## @param config.assets.s3.secretKeys.accessKeyIDKey The S3 access key ID to use from an existing secret.
## @param config.assets.s3.secretKeys.secretAccessKey The S3 secret access key to use from an existing secret.
## @param config.assets.s3.secretKeys.endpointURIKey The S3 endpoint URI to use from an existing secret.
##
assets:
storageBackend: "assets-fs"
filesystem:
directory: "/opt/data/assets"
s3:
accessKeyID: ""
secretAccessKey: ""
region: ""
bucket: ""
endpointURI: ""
existingSecret: ""
secretKeys:
accessKeyIDKey: ""
secretAccessKey: ""
endpointURIKey: ""
## @param config.telemetryEnabled Whether to enable sending of anonymous telemetry data.
##
telemetryEnabled: true
## @param config.smtp.enabled Whether to enable SMTP configuration. You also need to add the 'enable-smtp' flag to the PENPOT_FLAGS variable.
## @param config.smtp.defaultFrom The SMTP default email to send from.
## @param config.smtp.defaultReplyTo The SMTP default email to reply to.
## @param config.smtp.host The SMTP host to use.
## @param config.smtp.port The SMTP host port to use.
## @param config.smtp.username The SMTP username to use.
## @param config.smtp.password The SMTP password to use.
## @param config.smtp.tls Whether to use TLS for the SMTP connection.
## @param config.smtp.ssl Whether to use SSL for the SMTP connection.
## @param config.smtp.existingSecret The name of an existing secret.
## @param config.smtp.secretKeys.usernameKey The SMTP username to use from an existing secret.
## @param config.smtp.secretKeys.passwordKey The SMTP password to use from an existing secret.
##
smtp:
enabled: false
defaultFrom: ""
defaultReplyTo: ""
host: ""
port: ""
username: ""
password: ""
tls: true
ssl: false
existingSecret: ""
secretKeys:
usernameKey: ""
passwordKey: ""
## @param config.registrationDomainWhitelist Comma separated list of allowed domains to register. Empty to allow all domains.
##
registrationDomainWhitelist: ""
## Penpot Authentication providers parameters
##
providers:
## @param config.providers.google.enabled Whether to enable Google configuration. To enable Google auth, add `enable-login-with-google` to the flags.
## @param config.providers.google.clientID The Google client ID to use. To enable Google auth, add `enable-login-with-google` to the flags.
## @param config.providers.google.clientSecret The Google client secret to use. To enable Google auth, add `enable-login-with-google` to the flags.
##
google:
enabled: false
clientID: ""
clientSecret: ""
## @param config.providers.github.enabled Whether to enable GitHub configuration. To enable GitHub auth, also add `enable-login-with-github` to the flags.
## @param config.providers.github.clientID The GitHub client ID to use.
## @param config.providers.github.clientSecret The GitHub client secret to use.
##
github:
enabled: false
clientID: ""
clientSecret: ""
## @param config.providers.gitlab.enabled Whether to enable GitLab configuration. To enable GitLab auth, also add `enable-login-with-gitlab` to the flags.
## @param config.providers.gitlab.baseURI The GitLab base URI to use.
## @param config.providers.gitlab.clientID The GitLab client ID to use.
## @param config.providers.gitlab.clientSecret The GitLab client secret to use.
##
gitlab:
enabled: false
baseURI: "https://gitlab.com"
clientID: ""
clientSecret: ""
## @param config.providers.oidc.enabled Whether to enable OIDC configuration. To enable OpenID Connect auth, also add `enable-login-with-oidc` to the flags.
## @param config.providers.oidc.baseURI The OpenID Connect base URI to use.
## @param config.providers.oidc.clientID The OpenID Connect client ID to use.
## @param config.providers.oidc.clientSecret The OpenID Connect client secret to use.
## @param config.providers.oidc.authURI Optional OpenID Connect auth URI to use. Auto discovered if not provided.
## @param config.providers.oidc.tokenURI Optional OpenID Connect token URI to use. Auto discovered if not provided.
## @param config.providers.oidc.userURI Optional OpenID Connect user URI to use. Auto discovered if not provided.
## @param config.providers.oidc.roles Optional OpenID Connect roles to use. If no role is provided, roles checking disabled.
## @param config.providers.oidc.rolesAttribute Optional OpenID Connect roles attribute to use. If not provided, the roles checking will be disabled.
## @param config.providers.oidc.scopes Optional OpenID Connect scopes to use. This settings allow overwrite the required scopes, use with caution because penpot requres at least `name` and `email` attrs found on the user info. Optional, defaults to `openid profile`.
## @param config.providers.oidc.nameAttribute Optional OpenID Connect name attribute to use. If not provided, the `name` prop will be used.
## @param config.providers.oidc.emailAttribute Optional OpenID Connect email attribute to use. If not provided, the `email` prop will be used.
##
oidc:
enabled: false
baseURI: ""
clientID: ""
clientSecret: ""
authURI: ""
tokenURI: ""
userURI: ""
roles: "role1 role2"
rolesAttribute: ""
scopes: "scope1 scope2"
nameAttribute: ""
emailAttribute: ""
## @param config.providers.ldap.enabled Whether to enable LDAP configuration. To enable LDAP, also add `enable-login-with-ldap` to the flags.
## @param config.providers.ldap.host The LDAP host to use.
## @param config.providers.ldap.port The LDAP port to use.
## @param config.providers.ldap.ssl Whether to use SSL for the LDAP connection.
## @param config.providers.ldap.startTLS Whether to utilize StartTLS for the LDAP connection.
## @param config.providers.ldap.baseDN The LDAP base DN to use.
## @param config.providers.ldap.bindDN The LDAP bind DN to use.
## @param config.providers.ldap.bindPassword The LDAP bind password to use.
## @param config.providers.ldap.attributesUsername The LDAP attributes username to use.
## @param config.providers.ldap.attributesEmail The LDAP attributes email to use.
## @param config.providers.ldap.attributesFullname The LDAP attributes fullname to use.
## @param config.providers.ldap.attributesPhoto The LDAP attributes photo format to use.
##
ldap:
enabled: false
host: "ldap"
port: 10389
ssl: false
startTLS: false
baseDN: "ou=people,dc=planetexpress,dc=com"
bindDN: "cn=admin,dc=planetexpress,dc=com"
bindPassword: "GoodNewsEveryone"
attributesUsername: "uid"
attributesEmail: "mail"
attributesFullname: "cn"
attributesPhoto: "jpegPhoto"
## @param config.providers.existingSecret The name of an existing secret to use.
## @param config.providers.secretKeys.googleClientIDKey The Google client ID key to use from an existing secret.
## @param config.providers.secretKeys.googleClientSecretKey The Google client secret key to use from an existing secret.
## @param config.providers.secretKeys.githubClientIDKey The GitHub client ID key to use from an existing secret.
## @param config.providers.secretKeys.githubClientSecretKey The GitHub client secret key to use from an existing secret.
## @param config.providers.secretKeys.gitlabClientIDKey The GitLab client ID key to use from an existing secret.
## @param config.providers.secretKeys.gitlabClientSecretKey The GitLab client secret key to use from an existing secret.
## @param config.providers.secretKeys.oidcClientIDKey The OpenID Connect client ID key to use from an existing secret.
## @param config.providers.secretKeys.oidcClientSecretKey The OpenID Connect client secret key to use from an existing secret.
##
existingSecret: ""
secretKeys:
googleClientIDKey: ""
googleClientSecretKey: ""
githubClientIDKey: ""
githubClientSecretKey: ""
gitlabClientIDKey: ""
gitlabClientSecretKey: ""
oidcClientIDKey: ""
oidcClientSecretKey: ""

View File

@@ -1,6 +1,6 @@
apiVersion: v2
name: postgres-cluster
version: 3.1.0
version: 4.1.4
description: Chart for cloudnative-pg cluster
keywords:
- database
@@ -10,4 +10,4 @@ sources:
maintainers:
- name: alexlebens
icon: https://avatars.githubusercontent.com/u/100373852?s=48&v=4
appVersion: v1.22.2
appVersion: v1.25.0

View File

@@ -1,17 +1,82 @@
## Introduction
# postgres-cluster
[CloudNative PG](https://github.com/cloudnative-pg/cloudnative-pg)
![Version: 4.1.4](https://img.shields.io/badge/Version-4.1.4-informational?style=flat-square) ![AppVersion: v1.25.0](https://img.shields.io/badge/AppVersion-v1.25.0-informational?style=flat-square)
CloudNativePG is the Kubernetes operator that covers the full lifecycle of a highly available PostgreSQL database cluster with a primary/standby architecture, using native streaming replication.
Chart for cloudnative-pg cluster
This chart bootstraps a [CNPG](https://github.com/cloudnative-pg/cloudnative-pg) cluster on a [Kubernetes](https://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager.
## Maintainers
## Prerequisites
| Name | Email | Url |
| ---- | ------ | --- |
| alexlebens | | |
- Kubernetes
- Helm
- CloudNative PG Operator
## Source Code
## Parameters
* <https://github.com/cloudnative-pg/cloudnative-pg>
See the [values files](values.yaml).
## Values
| Key | Type | Default | Description |
|-----|------|---------|-------------|
| backup.backupIndex | int | `1` | Generate external cluster name, creates: postgresql-{{ .Release.Name }}-cluster-backup-index-{{ .Values.backups.backupIndex }}" |
| backup.backupName | string | `""` | Name of the backup cluster in the object store, defaults to "cluster.name" |
| backup.data.compression | string | `"snappy"` | Data compression method. One of `` (for no compression), `gzip`, `bzip2` or `snappy`. |
| backup.data.encryption | string | `""` | Whether to instruct the storage provider to encrypt data files. One of `` (use the storage container default), `AES256` or `aws:kms`. |
| backup.data.jobs | int | `1` | Number of data files to be archived or restored in parallel. |
| backup.destinationPath | string | `""` | S3 path starting with "s3://" |
| backup.enabled | bool | `false` | |
| backup.endpointCA | string | `""` | Specifies secret that contains a CA bundle to validate a privately signed certificate, should contain the key ca-bundle.crt |
| backup.endpointCredentials | string | `""` | Specifies secret that contains S3 credentials, should contain the keys ACCESS_KEY_ID and ACCESS_SECRET_KEY |
| backup.endpointURL | string | `""` | S3 endpoint starting with "https://" |
| backup.historyTags.backupRetentionPolicy | string | `""` | |
| backup.retentionPolicy | string | `"7d"` | Retention policy for backups |
| backup.schedule | string | `"0 0 */3 * *"` | Scheduled backup in cron format |
| backup.tags | object | `{"backupRetentionPolicy":""}` | Tags to add to backups. Add in key value beneath the type. |
| backup.wal.compression | string | `"snappy"` | WAL compression method. One of `` (for no compression), `gzip`, `bzip2` or `snappy`. |
| backup.wal.encryption | string | `""` | Whether to instruct the storage provider to encrypt WAL files. One of `` (use the storage container default), `AES256` or `aws:kms`. |
| backup.wal.maxParallel | int | `1` | Number of WAL files to be archived or restored in parallel. |
| bootstrap | object | `{"initdb":{}}` | Bootstrap is the configuration of the bootstrap process when initdb is used. See: https://cloudnative-pg.io/documentation/current/bootstrap/ See: https://cloudnative-pg.io/documentation/current/cloudnative-pg.v1/#postgresql-cnpg-io-v1-bootstrapinitdb |
| bootstrap.initdb | object | `{}` | Example values database: app owner: app secret: "" # Name of the secret containing the initial credentials for the owner of the user database. If empty a new secret will be created from scratch postInitApplicationSQL: - CREATE TABLE IF NOT EXISTS example; |
| cluster.additionalLabels | object | `{}` | |
| cluster.affinity | object | `{"enablePodAntiAffinity":true,"topologyKey":"kubernetes.io/hostname"}` | See: https://cloudnative-pg.io/documentation/current/cloudnative-pg.v1/#postgresql-cnpg-io-v1-AffinityConfiguration |
| cluster.annotations | object | `{}` | |
| cluster.enableSuperuserAccess | bool | `false` | Create secret containing credentials of superuser |
| cluster.image | object | `{"pullPolicy":"IfNotPresent","repository":"ghcr.io/cloudnative-pg/postgresql","tag":"17.2-22"}` | Default image |
| cluster.instances | int | `3` | |
| cluster.logLevel | string | `"info"` | |
| cluster.monitoring | object | `{"enabled":false,"podMonitor":{"enabled":true},"prometheusRule":{"enableDefaultRules":true,"enabled":false,"excludeRules":[]}}` | Enable default monitoring and alert rules |
| cluster.postgresGID | int | `26` | |
| cluster.postgresUID | int | `26` | The UID and GID of the postgres user inside the image |
| cluster.postgresql | object | `{"parameters":{"hot_standby_feedback":"on","max_slot_wal_keep_size":"2000MB","shared_buffers":"128MB"},"shared_preload_libraries":[]}` | Parameters to be set for the database itself See: https://cloudnative-pg.io/documentation/current/cloudnative-pg.v1/#postgresql-cnpg-io-v1-PostgresConfiguration |
| cluster.primaryUpdateMethod | string | `"switchover"` | Method to follow to upgrade the primary server during a rolling update procedure, after all replicas have been successfully updated. It can be switchover (default) or in-place (restart). |
| cluster.primaryUpdateStrategy | string | `"unsupervised"` | Strategy to follow to upgrade the primary server during a rolling update procedure, after all replicas have been successfully updated: it can be automated (unsupervised - default) or manual (supervised) |
| cluster.priorityClassName | string | `""` | |
| cluster.resources | object | `{"limits":{"cpu":"1","hugepages-2Mi":"256Mi"},"requests":{"cpu":"100m","memory":"256Mi"}}` | Default resources |
| cluster.storage.size | string | `"10Gi"` | |
| cluster.storage.storageClass | string | `""` | |
| cluster.walStorage | object | `{"size":"2Gi","storageClass":""}` | Default storage size |
| mode | string | `"standalone"` | Cluster mode of operation. Available modes: * `standalone` - Default mode. Creates new or updates an existing CNPG cluster. * `recovery` - Same as standalone but creates a cluster from a backup, object store or via pg_basebackup * `replica` - Create database as a replica from another CNPG cluster |
| nameOverride | string | `""` | Override the name of the cluster |
| recovery | object | `{"data":{"compression":"snappy","encryption":"","jobs":1},"destinationPath":"","endpointCA":"","endpointCredentials":"","endpointURL":"","pitrTarget":{"time":""},"recoveryIndex":1,"recoveryInstanceName":"","recoveryServerName":"","wal":{"compression":"snappy","encryption":"","maxParallel":1}}` | Recovery settings when booting cluster from external cluster |
| recovery.data.compression | string | `"snappy"` | Data compression method. One of `` (for no compression), `gzip`, `bzip2` or `snappy`. |
| recovery.data.encryption | string | `""` | Whether to instruct the storage provider to encrypt data files. One of `` (use the storage container default), `AES256` or `aws:kms`. |
| recovery.data.jobs | int | `1` | Number of data files to be archived or restored in parallel. |
| recovery.endpointCA | string | `""` | Specifies secret that contains a CA bundle to validate a privately signed certificate, should contain the key ca-bundle.crt |
| recovery.endpointCredentials | string | `""` | Specifies secret that contains S3 credentials, should contain the keys ACCESS_KEY_ID and ACCESS_SECRET_KEY |
| recovery.endpointURL | string | `""` | S3 https endpoint and the s3:// path |
| recovery.pitrTarget | object | `{"time":""}` | Point in time recovery target in RFC3339 format |
| recovery.recoveryIndex | int | `1` | Generate external cluster name, uses: {{ .Release.Name }}postgresql-<major version>-cluster-backup-index-{{ .Values.recovery.recoveryIndex }} |
| recovery.recoveryInstanceName | string | `""` | Name of the recovery cluster in the object store, defaults to ".Release.Name" |
| recovery.recoveryServerName | string | `""` | Name of the recovery cluster in the object store, defaults to "cluster.name" |
| recovery.wal.compression | string | `"snappy"` | WAL compression method. One of `` (for no compression), `gzip`, `bzip2` or `snappy`. |
| recovery.wal.encryption | string | `""` | Whether to instruct the storage provider to encrypt WAL files. One of `` (use the storage container default), `AES256` or `aws:kms`. |
| recovery.wal.maxParallel | int | `1` | Number of WAL files to be archived or restored in parallel. |
| replica.externalCluster | object | `{"connectionParameters":{"dbname":"app","host":"postgresql","user":"app"},"password":{"key":"password","name":"postgresql"}}` | External cluster connection, password specifies a secret name and the key containing the password value |
| replica.importDatabases | list | `["app"]` | If type microservice only one database is allowed, default is app as standard in cnpg clusters |
| replica.importRoles | list | `[]` | If type microservice no roles are imported and ignored |
| replica.importType | string | `"microservice"` | See [here](https://cloudnative-pg.io/documentation/current/database_import/) for different import types * `microservice` - Single database import as expected from cnpg clusters * `monolith` - Import multiple databases and roles |
| replica.postImportApplicationSQL | list | `[]` | If import type is monolith postImportApplicationSQL is not supported and ignored |
| type | string | `"postgresql"` | Type of the CNPG database. Available types: * `postgresql` * `postgis` * `timescaledb` * `tensorchord` |
----------------------------------------------
Autogenerated from chart metadata using [helm-docs v1.14.2](https://github.com/norwoodj/helm-docs/releases/v1.14.2)

View File

@@ -11,6 +11,14 @@ backup:
key: ca-bundle.crt
{{- end }}
serverName: "{{ include "cluster.name" . }}-backup-{{ .Values.backup.backupIndex }}"
tags:
{{- with .Values.backup.tags }}
{{- . | toYaml | nindent 6 }}
{{- end }}
historyTags:
{{- with .Values.backup.historyTags }}
{{- . | toYaml | nindent 6 }}
{{- end }}
s3Credentials:
accessKeyId:
name: {{ include "cluster.backupCredentials" . }}
@@ -19,12 +27,24 @@ backup:
name: {{ include "cluster.backupCredentials" . }}
key: ACCESS_SECRET_KEY
wal:
{{- if .Values.backup.wal.compression }}
compression: {{ .Values.backup.wal.compression }}
{{- end }}
{{- if .Values.backup.wal.encryption }}
encryption: {{ .Values.backup.wal.encryption }}
{{- end }}
{{- if .Values.backup.wal.maxParallel }}
maxParallel: {{ .Values.backup.wal.maxParallel }}
{{- end }}
data:
{{- if .Values.backup.data.compression }}
compression: {{ .Values.backup.data.compression }}
{{- end }}
{{- if .Values.backup.data.encryption }}
encryption: {{ .Values.backup.data.encryption }}
{{- end }}
{{- if .Values.backup.data.jobs }}
jobs: {{ .Values.backup.data.jobs }}
{{- end }}
{{- end }}
{{- end }}

View File

@@ -2,11 +2,15 @@
bootstrap:
{{- if eq .Values.mode "standalone" }}
initdb:
{{- with .Values.cluster.initdb }}
{{- with .Values.bootstrap.initdb }}
{{- with (omit . "postInitApplicationSQL") }}
{{- . | toYaml | nindent 4 }}
{{- end }}
{{- end }}
{{- if eq .Values.type "tensorchord" }}
dataChecksums: true
{{- end }}
{{- if or (eq .Values.type "postgis") (eq .Values.type "timescaledb") (eq .Values.type "tensorchord") (.Values.bootstrap.initdb.postInitApplicationSQL) }}
postInitApplicationSQL:
{{- if eq .Values.type "postgis" }}
- CREATE EXTENSION IF NOT EXISTS postgis;
@@ -15,12 +19,22 @@ bootstrap:
- CREATE EXTENSION IF NOT EXISTS postgis_tiger_geocoder;
{{- else if eq .Values.type "timescaledb" }}
- CREATE EXTENSION IF NOT EXISTS timescaledb;
{{- else if eq .Values.type "tensorchord" }}
- ALTER SYSTEM SET search_path TO "$user", public, vectors;
- SET search_path TO "$user", public, vectors;
- CREATE EXTENSION IF NOT EXISTS "vectors";
- CREATE EXTENSION IF NOT EXISTS "cube";
- CREATE EXTENSION IF NOT EXISTS "earthdistance";
- ALTER SCHEMA vectors OWNER TO "app";
- GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA vectors TO "app";
- GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA public TO "app";
{{- end }}
{{- with .Values.cluster.initdb }}
{{- with .Values.bootstrap.initdb }}
{{- range .postInitApplicationSQL }}
{{- printf "- %s" . | nindent 6 }}
{{- end }}
{{- end }}
{{- end }}
{{- else if eq .Values.mode "replica" }}
initdb:
import:
@@ -47,6 +61,11 @@ bootstrap:
{{- end }}
source:
externalCluster: "{{ include "cluster.name" . }}-cluster"
{{- with .Values.bootstrap.initdb }}
{{- with (omit . "postInitApplicationSQL") }}
{{- . | toYaml | nindent 4 }}
{{- end }}
{{- end }}
externalClusters:
- name: "{{ include "cluster.name" . }}-cluster"
{{- with .Values.replica.externalCluster }}
@@ -78,13 +97,25 @@ externalClusters:
name: {{ include "cluster.recoveryCredentials" . }}
key: ACCESS_SECRET_KEY
wal:
{{- if .Values.recovery.wal.compression }}
compression: {{ .Values.recovery.wal.compression }}
{{- end }}
{{- if .Values.recovery.wal.encryption }}
encryption: {{ .Values.recovery.wal.encryption }}
{{- end }}
{{- if .Values.recovery.wal.maxParallel }}
maxParallel: {{ .Values.recovery.wal.maxParallel }}
{{- end }}
data:
{{- if .Values.recovery.data.compression }}
compression: {{ .Values.recovery.data.compression }}
{{- end }}
{{- if .Values.recovery.data.encryption }}
encryption: {{ .Values.recovery.data.encryption }}
{{- end }}
{{- if .Values.recovery.data.jobs }}
jobs: {{ .Values.recovery.data.jobs }}
{{- end }}
{{- else }}
{{ fail "Invalid cluster mode!" }}
{{- end }}

View File

@@ -18,6 +18,7 @@ spec:
imagePullPolicy: {{ .Values.cluster.image.pullPolicy }}
postgresUID: {{ .Values.cluster.postgresUID }}
postgresGID: {{ .Values.cluster.postgresGID }}
enableSuperuserAccess: {{ .Values.cluster.enableSuperuserAccess }}
walStorage:
size: {{ .Values.cluster.walStorage.size }}
storageClass: {{ .Values.cluster.walStorage.storageClass }}
@@ -32,15 +33,26 @@ spec:
affinity:
{{- toYaml . | nindent 4 }}
{{- end }}
{{- if .Values.cluster.priorityClassName }}
priorityClassName: {{ .Values.cluster.priorityClassName }}
{{- end }}
primaryUpdateMethod: {{ .Values.cluster.primaryUpdateMethod }}
primaryUpdateStrategy: {{ .Values.cluster.primaryUpdateStrategy }}
logLevel: {{ .Values.cluster.logLevel }}
postgresql:
shared_preload_libraries:
{{- if eq .Values.type "timescaledb" }}
shared_preload_libraries:
- timescaledb
{{- end }}
{{- if eq .Values.type "tensorchord" }}
shared_preload_libraries:
- vectors.so
enableAlterSystem: true
{{- end }}
{{- with .Values.cluster.postgresql.shared_preload_libraries }}
shared_preload_libraries:
{{- toYaml . | nindent 6 }}
{{ end }}
{{- with .Values.cluster.postgresql.parameters }}
parameters:
{{- toYaml . | nindent 6 }}
@@ -49,4 +61,5 @@ spec:
enablePodMonitor: {{ and .Values.cluster.monitoring.enabled .Values.cluster.monitoring.podMonitor.enabled }}
{{ include "cluster.bootstrap" . | nindent 2 }}
{{ include "cluster.backup" . | nindent 2 }}

View File

@@ -14,10 +14,10 @@ spec:
- name: cloudnative-pg/{{ include "cluster.name" . }}
rules:
{{- $dict := dict "excludeRules" .Values.cluster.monitoring.prometheusRule.excludeRules -}}
{{- $_ := set $dict "value" "{{ $value }}" -}}
{{- $_ := set $dict "value" "{{`{{`}} $value {{`}}`}}" -}}
{{- $_ := set $dict "namespace" .Release.Namespace -}}
{{- $_ := set $dict "cluster" (printf "%s-cluster" (include "cluster.name" .) ) -}}
{{- $_ := set $dict "labels" (dict "job" "{{ $labels.job }}" "node" "{{ $labels.node }}" "pod" "{{ $labels.pod }}") -}}
{{- $_ := set $dict "labels" (dict "job" "{{`{{`}} $labels.job {{`}}`}}" "node" "{{`{{`}} $labels.node {{`}}`}}" "pod" "{{`{{`}} $labels.pod {{`}}`}}") -}}
{{- $_ := set $dict "podSelector" (printf "%s-cluster-([1-9][0-9]*)$" (include "cluster.name" .) ) -}}
{{- $_ := set $dict "Values" .Values -}}
{{- $_ := set $dict "Template" .Template -}}
@@ -27,4 +27,71 @@ spec:
- {{ $tpl }}
{{- end -}}
{{- end -}}
{{- if .Values.cluster.monitoring.prometheusRule.enableDefaultRules }}
- name: cloudnative-pg/default-rules
rules:
- alert: LongRunningTransaction
annotations:
description: Pod {{`{{`}} $labels.pod {{`}}`}} is taking more than 5 minutes (300 seconds) for a query.
summary: A query is taking longer than 5 minutes.
expr: |-
cnpg_backends_max_tx_duration_seconds > 300
for: 1m
labels:
severity: warning
- alert: BackendsWaiting
annotations:
description: Pod {{`{{`}} $labels.pod {{`}}`}} has been waiting for longer than 5 minutes
summary: If a backend is waiting for longer than 5 minutes
expr: |-
cnpg_backends_waiting_total > 300
for: 1m
labels:
severity: warning
- alert: PGDatabaseXidAge
annotations:
description: Over 300,000,000 transactions from frozen xid on pod {{`{{`}} $labels.pod {{`}}`}}
summary: Number of transactions from the frozen XID to the current one
expr: |-
cnpg_pg_database_xid_age > 300000000
for: 1m
labels:
severity: warning
- alert: PGReplication
annotations:
description: Standby is lagging behind by over 300 seconds (5 minutes)
summary: The standby is lagging behind the primary
expr: |-
cnpg_pg_replication_lag > 300
for: 1m
labels:
severity: warning
- alert: LastFailedArchiveTime
annotations:
description: Archiving failed for {{`{{`}} $labels.pod {{`}}`}}
summary: Checks the last time archiving failed. Will be < 0 when it has not failed.
expr: |-
(cnpg_pg_stat_archiver_last_failed_time - cnpg_pg_stat_archiver_last_archived_time) > 1
for: 1m
labels:
severity: warning
- alert: DatabaseDeadlockConflicts
annotations:
description: There are over 10 deadlock conflicts in {{`{{`}} $labels.pod {{`}}`}}
summary: Checks the number of database conflicts
expr: |-
cnpg_pg_stat_database_deadlocks > 10
for: 1m
labels:
severity: warning
- alert: ReplicaFailingReplication
annotations:
description: Replica {{`{{`}} $labels.pod {{`}}`}} is failing to replicate
summary: Checks if the replica is failing to replicate
expr: |-
cnpg_pg_replication_in_recovery > cnpg_pg_replication_is_wal_receiver_up
for: 1m
labels:
severity: warning
{{- end }}
{{ end }}

View File

@@ -1,15 +1,14 @@
# -- Override the name of the cluster
nameOverride: ""
###
# -- Type of the CNPG database. Available types:
# * `postgresql`
# * `postgis`
# * `timescaledb`
# * `tensorchord`
type: postgresql
###
# Cluster mode of operation. Available modes:
# -- Cluster mode of operation. Available modes:
# * `standalone` - Default mode. Creates new or updates an existing CNPG cluster.
# * `recovery` - Same as standalone but creates a cluster from a backup, object store or via pg_basebackup
# * `replica` - Create database as a replica from another CNPG cluster
@@ -18,15 +17,20 @@ mode: standalone
cluster:
instances: 3
# -- Default image
image:
repository: ghcr.io/cloudnative-pg/postgresql
tag: "16.3"
tag: "17.2-22"
pullPolicy: IfNotPresent
# The UID and GID of the postgres user inside the image
# -- The UID and GID of the postgres user inside the image
postgresUID: 26
postgresGID: 26
# -- Create secret containing credentials of superuser
enableSuperuserAccess: false
# -- Default storage size
walStorage:
size: 2Gi
storageClass: ""
@@ -34,117 +38,123 @@ cluster:
size: 10Gi
storageClass: ""
# -- Default resources
resources:
requests:
memory: 256Mi
cpu: 10m
cpu: 100m
limits:
memory: 1Gi
cpu: 800m
cpu: '1'
hugepages-2Mi: 256Mi
# See: https://cloudnative-pg.io/documentation/current/cloudnative-pg.v1/#postgresql-cnpg-io-v1-AffinityConfiguration
# -- See: https://cloudnative-pg.io/documentation/current/cloudnative-pg.v1/#postgresql-cnpg-io-v1-AffinityConfiguration
affinity:
enablePodAntiAffinity: true
topologyKey: kubernetes.io/hostname
additionalLabels: {}
annotations: {}
priorityClassName: ""
# Method to follow to upgrade the primary server during a rolling update procedure, after all replicas have been
# -- Method to follow to upgrade the primary server during a rolling update procedure, after all replicas have been
# successfully updated. It can be switchover (default) or in-place (restart).
primaryUpdateMethod: switchover
# Strategy to follow to upgrade the primary server during a rolling update procedure, after all replicas have been
# -- Strategy to follow to upgrade the primary server during a rolling update procedure, after all replicas have been
# successfully updated: it can be automated (unsupervised - default) or manual (supervised)
primaryUpdateStrategy: unsupervised
logLevel: "info"
# -- Enable default monitoring and alert rules
monitoring:
enabled: false
podMonitor:
enabled: true
prometheusRule:
enabled: true
enabled: false
enableDefaultRules: true
excludeRules: []
# -- Parameters to be set for the database itself
# See: https://cloudnative-pg.io/documentation/current/cloudnative-pg.v1/#postgresql-cnpg-io-v1-PostgresConfiguration
postgresql:
parameters:
shared_buffers: 128MB
max_slot_wal_keep_size: 2000MB
hot_standby_feedback: "on"
shared_preload_libraries: []
# BootstrapInitDB is the configuration of the bootstrap process when initdb is used.
# -- Bootstrap is the configuration of the bootstrap process when initdb is used.
# See: https://cloudnative-pg.io/documentation/current/bootstrap/
# See: https://cloudnative-pg.io/documentation/current/cloudnative-pg.v1/#postgresql-cnpg-io-v1-bootstrapinitdb
initdb:
{}
bootstrap:
# -- Example values
# database: app
# owner: app
# secret: "" # Name of the secret containing the initial credentials for the owner of the user database. If empty a new secret will be created from scratch
# postInitApplicationSQL:
# - CREATE TABLE IF NOT EXISTS example;
initdb: {}
# -- Recovery settings when booting cluster from external cluster
recovery:
# Point in time recovery target in RFC3339 format
# -- Point in time recovery target in RFC3339 format
pitrTarget:
time: ""
# S3 https endpoint and the s3:// path
# -- S3 https endpoint and the s3:// path
endpointURL: ""
destinationPath: ""
# Specifies secret that contains a CA bundle to validate a privately signed certificate, should contain the key ca-bundle.crt
# -- Specifies secret that contains a CA bundle to validate a privately signed certificate, should contain the key ca-bundle.crt
endpointCA: ""
# Specifies secret that contains S3 credentials, should contain the keys ACCESS_KEY_ID and ACCESS_SECRET_KEY
# -- Specifies secret that contains S3 credentials, should contain the keys ACCESS_KEY_ID and ACCESS_SECRET_KEY
endpointCredentials: ""
# Generate external cluster name, uses: {{ .Release.Name }}postgresql-<major version>-cluster-backup-index-{{ .Values.recovery.recoveryIndex }}
# -- Generate external cluster name, uses: {{ .Release.Name }}postgresql-<major version>-cluster-backup-index-{{ .Values.recovery.recoveryIndex }}
recoveryIndex: 1
# Name of the recovery cluster in the object store, defaults to "cluster.name"
# -- Name of the recovery cluster in the object store, defaults to "cluster.name"
recoveryServerName: ""
# Name of the recovery cluster in the object store, defaults to ".Release.Name"
# -- Name of the recovery cluster in the object store, defaults to ".Release.Name"
recoveryInstanceName: ""
wal:
# WAL compression method. One of `` (for no compression), `gzip`, `bzip2` or `snappy`.
# -- WAL compression method. One of `` (for no compression), `gzip`, `bzip2` or `snappy`.
compression: snappy
# Whether to instruct the storage provider to encrypt WAL files. One of `` (use the storage container default), `AES256` or `aws:kms`.
# -- Whether to instruct the storage provider to encrypt WAL files. One of `` (use the storage container default), `AES256` or `aws:kms`.
encryption: ""
# Number of WAL files to be archived or restored in parallel.
maxParallel: 2
# -- Number of WAL files to be archived or restored in parallel.
maxParallel: 1
data:
# Data compression method. One of `` (for no compression), `gzip`, `bzip2` or `snappy`.
# -- Data compression method. One of `` (for no compression), `gzip`, `bzip2` or `snappy`.
compression: snappy
# Whether to instruct the storage provider to encrypt data files. One of `` (use the storage container default), `AES256` or `aws:kms`.
# -- Whether to instruct the storage provider to encrypt data files. One of `` (use the storage container default), `AES256` or `aws:kms`.
encryption: ""
# Number of data files to be archived or restored in parallel.
jobs: 2
# -- Number of data files to be archived or restored in parallel.
jobs: 1
replica:
# See https://cloudnative-pg.io/documentation/current/database_import/
# -- See [here](https://cloudnative-pg.io/documentation/current/database_import/) for different import types
# * `microservice` - Single database import as expected from cnpg clusters
# * `monolith` - Import multiple databases and roles
importType: microservice
# If type microservice only one database is allowed, default is app as standard in cnpg clusters
# -- If type microservice only one database is allowed, default is app as standard in cnpg clusters
importDatabases:
- app
# If type microservice no roles are imported and ignored
# -- If type microservice no roles are imported and ignored
importRoles: []
# If import type is monolith postImportApplicationSQL is not supported and ignored
# -- If import type is monolith postImportApplicationSQL is not supported and ignored
postImportApplicationSQL: []
# External cluster connection, password specifies a secret name and the key containing the password value
# -- External cluster connection, password specifies a secret name and the key containing the password value
externalCluster:
connectionParameters:
host: postgresql
@@ -157,41 +167,47 @@ replica:
backup:
enabled: false
# S3 endpoint starting with "https://"
# -- S3 endpoint starting with "https://"
endpointURL: ""
# S3 path starting with "s3://"
# -- S3 path starting with "s3://"
destinationPath: ""
# Specifies secret that contains a CA bundle to validate a privately signed certificate, should contain the key ca-bundle.crt
# -- Specifies secret that contains a CA bundle to validate a privately signed certificate, should contain the key ca-bundle.crt
endpointCA: ""
# Specifies secret that contains S3 credentials, should contain the keys ACCESS_KEY_ID and ACCESS_SECRET_KEY
# -- Specifies secret that contains S3 credentials, should contain the keys ACCESS_KEY_ID and ACCESS_SECRET_KEY
endpointCredentials: ""
# Generate external cluster name, creates: postgresql-{{ .Release.Name }}-cluster-backup-index-{{ .Values.backups.backupIndex }}"
# -- Generate external cluster name, creates: postgresql-{{ .Release.Name }}-cluster-backup-index-{{ .Values.backups.backupIndex }}"
backupIndex: 1
# Name of the backup cluster in the object store, defaults to "cluster.name"
# -- Name of the backup cluster in the object store, defaults to "cluster.name"
backupName: ""
# -- Tags to add to backups. Add in key value beneath the type.
tags:
backupRetentionPolicy: ""
historyTags:
backupRetentionPolicy: ""
wal:
# WAL compression method. One of `` (for no compression), `gzip`, `bzip2` or `snappy`.
# -- WAL compression method. One of `` (for no compression), `gzip`, `bzip2` or `snappy`.
compression: snappy
# Whether to instruct the storage provider to encrypt WAL files. One of `` (use the storage container default), `AES256` or `aws:kms`.
# -- Whether to instruct the storage provider to encrypt WAL files. One of `` (use the storage container default), `AES256` or `aws:kms`.
encryption: ""
# Number of WAL files to be archived or restored in parallel.
maxParallel: 2
# -- Number of WAL files to be archived or restored in parallel.
maxParallel: 1
data:
# Data compression method. One of `` (for no compression), `gzip`, `bzip2` or `snappy`.
# -- Data compression method. One of `` (for no compression), `gzip`, `bzip2` or `snappy`.
compression: snappy
# Whether to instruct the storage provider to encrypt data files. One of `` (use the storage container default), `AES256` or `aws:kms`.
# -- Whether to instruct the storage provider to encrypt data files. One of `` (use the storage container default), `AES256` or `aws:kms`.
encryption: ""
# Number of data files to be archived or restored in parallel.
jobs: 2
# -- Number of data files to be archived or restored in parallel.
jobs: 1
# Retention policy for backups
retentionPolicy: "30d"
# -- Retention policy for backups
retentionPolicy: "7d"
# Scheduled backup in cron format
schedule: "0 0 0 * * *"
# -- Scheduled backup in cron format
schedule: "0 0 */3 * *"

View File

@@ -1,13 +0,0 @@
apiVersion: v2
name: qbittorrent
version: 0.0.8
description: Chart for qBittorrent
keywords:
- downloads
- torrent
sources:
- https://github.com/qbittorrent/qBittorrent
maintainers:
- name: alexlebens
icon: https://avatars.githubusercontent.com/u/2131270?s=48&v=4
appVersion: version-4.6.3-r0

View File

@@ -1,17 +0,0 @@
## Introduction
[qBittorrent](https://github.com/qbittorrent/qBittorrent)
qBittorrent is a bittorrent client programmed in C++ / Qt that uses libtorrent
This chart bootstraps a [qBittorrent](https://github.com/qbittorrent/qBittorrent) deployment on a [Kubernetes](https://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager.
## Prerequisites
- Kubernetes
- Helm
## Parameters
See the [values files](values.yaml).

View File

@@ -1,111 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: qbittorrent
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: qbittorrent
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/version: {{ .Chart.AppVersion }}
app.kubernetes.io/component: web
app.kubernetes.io/part-of: qbittorrent
spec:
revisionHistoryLimit: 3
replicas: {{ .Values.server.replicas }}
strategy:
type: {{ .Values.server.strategy }}
selector:
matchLabels:
app.kubernetes.io/name: qbittorrent
app.kubernetes.io/instance: {{ .Release.Name }}
template:
metadata:
labels:
app.kubernetes.io/name: qbittorrent
app.kubernetes.io/instance: {{ .Release.Name }}
spec:
serviceAccountName: qbittorrent
automountServiceAccountToken: true
containers:
- name: qbittorrent
image: "{{ .Values.server.image.repository }}:{{ .Values.server.image.tag }}"
imagePullPolicy: {{ .Values.server.image.pullPolicy }}
env:
{{- with (concat .Values.global.env .Values.server.env) }}
{{- toYaml . | nindent 12 }}
{{- end }}
- name: WEBUI_PORT
value: "{{ .Values.server.service.http.port }}"
resources:
{{- toYaml .Values.server.resources | nindent 12 }}
volumeMounts:
- name: qbittorrent-config
mountPath: /config
- name: media-storage
mountPath: {{ .Values.global.persistence.media.mountPath }}
{{- if .Values.gluetun.enabled }}
- name: gluetun
image: "{{.Values.gluetun.image.repository}}:{{.Values.gluetun.image.tag}}"
imagePullPolicy: {{ .Values.gluetun.image.pullPolicy }}
env:
{{- with (concat .Values.global.env .Values.gluetun.env) }}
{{- toYaml . | nindent 12 }}
{{- end }}
ports:
- name: health
containerPort: {{ .Values.gluetun.service.health.port }}
protocol: TCP
- name: http
containerPort: {{ .Values.server.service.http.port }}
protocol: TCP
- name: metrics
containerPort: 9022
protocol: TCP
securityContext:
{{- toYaml .Values.gluetun.securityContext | nindent 12 }}
resources:
{{- toYaml .Values.gluetun.resources | nindent 12 }}
volumeMounts:
- name: tunnel-device
mountPath: /dev/net/tun
- name: wg0-wireguard-config
mountPath: /gluetun/wireguard/
{{- end }}
{{- if .Values.metrics.enabled }}
- name: exporter
image: "{{ .Values.metrics.exporter.image.repository }}:{{.Values.metrics.exporter.image.tag }}"
imagePullPolicy: {{ .Values.metrics.exporter.image.pullPolicy }}
env:
{{- with (concat .Values.global.env .Values.metrics.exporter.env) }}
{{- toYaml . | nindent 12 }}
{{- end }}
- name: QBITTORRENT_HOST
value: "http://localhost"
- name: QBITTORRENT_PORT
value: "{{ .Values.server.service.http.port }}"
- name: EXPORTER_PORT
value: "9022"
{{- end }}
volumes:
{{- if .Values.gluetun.enabled }}
- name: tunnel-device
hostPath:
path: /dev/net/tun
- name: wg0-wireguard-config
secret:
secretName: {{ .Values.gluetun.existingSecretName }}
items:
- key: wg0.conf
path: wg0.conf
{{- end }}
- name: qbittorrent-config
persistentVolumeClaim:
claimName: qbittorrent-config
- name: media-storage
persistentVolumeClaim:
claimName: {{ .Values.global.persistence.media.claimName }}

View File

@@ -1,32 +0,0 @@
{{- if .Values.server.ingress.enabled }}
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: qbittorrent
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: qbittorrent
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/version: {{ .Chart.AppVersion }}
app.kubernetes.io/component: web
app.kubernetes.io/part-of: qbittorrent
annotations:
{{- toYaml .Values.server.ingress.annotations | nindent 4 }}
spec:
ingressClassName: {{ .Values.server.ingress.className }}
tls:
- hosts:
- {{ .Values.server.ingress.host }}
secretName: qbittorrent-secret-tls
rules:
- host: {{ .Values.server.ingress.host }}
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
service:
name: qbittorrent-webui
port:
name: http
{{- end }}

View File

@@ -1,19 +0,0 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: qbittorrent-config
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: qbittorrent
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/version: {{ .Chart.AppVersion }}
app.kubernetes.io/component: storage
app.kubernetes.io/part-of: qbittorrent
spec:
storageClassName: {{ .Values.server.persistence.config.storageClassName }}
accessModes:
- ReadWriteOnce
volumeMode: Filesystem
resources:
requests:
storage: {{ .Values.server.persistence.config.storageSize }}

View File

@@ -1,13 +0,0 @@
{{- if .Values.global.serviceAccount.create -}}
apiVersion: v1
kind: ServiceAccount
metadata:
name: qbittorrent
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: qbittorrent
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/version: {{ .Chart.AppVersion }}
app.kubernetes.io/component: web
app.kubernetes.io/part-of: qbittorrent
{{- end }}

View File

@@ -1,23 +0,0 @@
{{- if and .Values.metrics.enabled .Values.metrics.serviceMonitor.enabled }}
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: qbittorrent
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: qbittorrent
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/version: {{ .Chart.AppVersion }}
app.kubernetes.io/component: web
app.kubernetes.io/part-of: qbittorrent
spec:
endpoints:
- port: metrics
interval: {{ .Values.metrics.serviceMonitor.interval }}
scrapeTimeout: {{ .Values.metrics.serviceMonitor.scrapeTimeout }}
path: /metrics
selector:
matchLabels:
app.kubernetes.io/name: qbittorrent
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end }}

View File

@@ -1,21 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: qbittorrent-webui
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: qbittorrent
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/version: {{ .Chart.AppVersion }}
app.kubernetes.io/component: web
app.kubernetes.io/part-of: qbittorrent
spec:
type: ClusterIP
ports:
- port: {{ .Values.server.service.http.port }}
targetPort: http
protocol: TCP
name: http
selector:
app.kubernetes.io/name: qbittorrent
app.kubernetes.io/instance: {{ .Release.Name }}

View File

@@ -1,84 +0,0 @@
global:
serviceAccount:
create: true
env: []
persistence:
media:
claimName:
mountPath:
server:
replicas: 1
strategy: Recreate
image:
repository: linuxserver/qbittorrent
tag: "version-4.6.3-r0"
pullPolicy: IfNotPresent
env:
- name: TZ
value: UTC
- name: PUID
value: "1000"
- name: PGID
value: "1000"
- name: UMASK_SET
value: "002"
resources:
requests:
cpu: 100m
memory: 2Gi
limits:
cpu: 2000m
memory: 2Gi
service:
http:
port: 8080
ingress:
enabled: false
className:
annotations:
host:
persistence:
config:
storageClassName:
storageSize:
gluetun:
enabled: false
image:
repository: ghcr.io/qdm12/gluetun
tag: v3.38.0
pullPolicy: IfNotPresent
securityContext:
privileged: True
capabilities:
add:
- NET_ADMIN
env: []
existingSecretName:
resources:
requests:
cpu: 100m
memory: 128Mi
limits:
cpu: 500m
memory: 256Mi
service:
health:
port: 9999
metrics:
enabled: false
serviceMonitor:
enabled: false
interval: 15s
scrapeTimeout: 5s
exporter:
image:
repository: esanchezm/prometheus-qbittorrent-exporter
tag: v1.5.1
imagePullPolicy: IfNotPresent
env:
- name: QBITTORRENT_USER
value: admin
- name: QBITTORRENT_PASS
value: ""
- name: EXPORTER_LOG_LEVEL
value: INFO

View File

@@ -1,24 +0,0 @@
apiVersion: v2
name: taiga
version: 0.2.3
description: Chart for Taiga
keywords:
- kanban
- project management
sources:
- https://github.com/taigaio
- https://github.com/rabbitmq/rabbitmq-server
- https://github.com/bitnami/charts/tree/main/bitnami/rabbitmq
maintainers:
- name: alexlebens
icon: https://avatars.githubusercontent.com/u/6905422?s=200&v=4
dependencies:
- name: rabbitmq
version: 14.1.5
repository: https://charts.bitnami.com/bitnami
alias: async-rabbitmq
- name: rabbitmq
version: 14.1.5
repository: https://charts.bitnami.com/bitnami
alias: events-rabbitmq
appVersion: 6.7.7

View File

@@ -1,17 +0,0 @@
## Introduction
[Taiga 6](https://github.com/taigaio)
Intuitive and simple, yet feature complete Kanban board
This chart bootstraps a [Taiga](https://github.com/taigaio) deployment on a [Kubernetes](https://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager.
## Prerequisites
- Kubernetes
- Helm
## Parameters
See the [values files](values.yaml).

View File

@@ -1,135 +0,0 @@
{{/*
Expand the name of the chart.
*/}}
{{- define "taiga.name" -}}
{{- default .Chart.Name .Values.global.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
*/}}
{{- define "taiga.fullname" -}}
{{- if .Values.global.fullnameOverride -}}
{{- .Values.global.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- $name := default .Chart.Name .Values.global.nameOverride -}}
{{- if contains $name .Release.Name -}}
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Create chart name and version as used by the chart label
*/}}
{{- define "taiga.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Common labels
*/}}
{{- define "taiga.labels" -}}
app.kubernetes.io/name: {{ template "taiga.name" . }}
helm.sh/chart: {{ template "taiga.chart" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end -}}
{{/*
Common labels for specific components
*/}}
{{- define "taiga.back.labels" -}}
app.kubernetes.io/name: {{ template "taiga.name" . }}-back
helm.sh/chart: {{ template "taiga.chart" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end -}}
{{- define "taiga.async.labels" -}}
app.kubernetes.io/name: {{ template "taiga.name" . }}-async
helm.sh/chart: {{ template "taiga.chart" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end -}}
{{- define "taiga.front.labels" -}}
app.kubernetes.io/name: {{ template "taiga.name" . }}-front
helm.sh/chart: {{ template "taiga.chart" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end -}}
{{- define "taiga.events.labels" -}}
app.kubernetes.io/name: {{ template "taiga.name" . }}-events
helm.sh/chart: {{ template "taiga.chart" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end -}}
{{- define "taiga.protected.labels" -}}
app.kubernetes.io/name: {{ template "taiga.name" . }}-protected
helm.sh/chart: {{ template "taiga.chart" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end -}}
{{/*
Labels to use on deploy.spec.selector.matchLabels and svc.spec.selector
*/}}
{{- define "taiga.matchLabels" -}}
app.kubernetes.io/name: {{ template "taiga.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end -}}
{{- define "taiga.back.matchLabels" -}}
app.kubernetes.io/name: {{ template "taiga.name" . }}-back
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end -}}
{{- define "taiga.async.matchLabels" -}}
app.kubernetes.io/name: {{ template "taiga.name" . }}-async
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end -}}
{{- define "taiga.front.matchLabels" -}}
app.kubernetes.io/name: {{ template "taiga.name" . }}-front
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end -}}
{{- define "taiga.events.matchLabels" -}}
app.kubernetes.io/name: {{ template "taiga.name" . }}-events
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end -}}
{{- define "taiga.protected.matchLabels" -}}
app.kubernetes.io/name: {{ template "taiga.name" . }}-protected
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end -}}
{{/*
Create the name of the service account to use
*/}}
{{- define "taiga.serviceAccountName" -}}
{{- if .Values.serviceAccount.create -}}
{{ default (include "taiga.fullname" .) .Values.serviceAccount.name }}
{{- else -}}
{{ default "default" .Values.serviceAccount.name }}
{{- end -}}
{{- end -}}
{{/*
Create the name of the static persistent volume
*/}}
{{- define "taiga.staticVolumeName" -}}
{{- if .Values.persistence.static.existingClaim -}}
{{ .Values.persistence.static.existingClaim }}
{{- else -}}
{{ printf "%s-static" (include "taiga.fullname" .) | trunc 63 | trimSuffix "-" }}
{{- end -}}
{{- end -}}
{{/*
Create the name of the media persistent volume
*/}}
{{- define "taiga.mediaVolumeName" -}}
{{- if .Values.persistence.media.existingClaim -}}
{{ .Values.persistence.media.existingClaim }}
{{- else -}}
{{ printf "%s-media" (include "taiga.fullname" .) | trunc 63 | trimSuffix "-" }}
{{- end -}}
{{- end -}}

View File

@@ -1,36 +0,0 @@
{{- if .Values.createInitialUser }}
---
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ template "taiga.fullname" . }}-create-initial-user
namespace: {{ .Release.Namespace }}
annotations:
{{- with .Values.global.annotations }}
{{- toYaml . | nindent 4 }}
{{- end }}
labels:
{{- include "taiga.labels" . | nindent 4 }}
{{- with .Values.global.labels }}
{{ toYaml . | nindent 4 }}
{{- end }}
data:
createinitialuser.sh: |
#!/bin/sh
echo """
import time
import requests
import subprocess
print('Waiting for backend ...')
while requests.get('http://{{ template "taiga.fullname" . }}-back/api/v1/').status_code != 200:
print('...')
time.sleep(2)
if str(subprocess.check_output(['python', 'manage.py', 'dumpdata', 'users.user'], cwd='/taiga-back')).find('\"is_superuser\": true') == -1:
print(subprocess.check_output(['python', 'manage.py', 'loaddata', 'initial_user'], cwd='/taiga-back'))
else:
print('Admin user yet created.')
""" > /tmp/create_superuser.py
python /tmp/create_superuser.py
{{- end }}

View File

@@ -1,515 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ template "taiga.fullname" . }}-back
namespace: {{ .Release.Namespace }}
annotations:
{{- with .Values.global.annotations }}
{{- toYaml . | nindent 4 }}
{{- end }}
labels:
{{- include "taiga.back.labels" . | nindent 4 }}
{{- with .Values.global.labels }}
{{ toYaml . | nindent 4 }}
{{- end }}
spec:
revisionHistoryLimit: 3
replicas: {{ .Values.back.replicas }}
strategy:
type: Recreate
selector:
matchLabels:
{{- include "taiga.back.matchLabels" . | nindent 6 }}
template:
metadata:
labels:
{{- include "taiga.back.labels" . | nindent 8 }}
app.kubernetes.io/component: {{ template "taiga.name" . }}-back
annotations:
{{- with .Values.back.podAnnotations }}
{{ toYaml . | nindent 8 }}
{{- end }}
spec:
affinity:
{{- with .Values.back.affinity }}
{{ toYaml . | nindent 8 }}
{{- end }}
nodeSelector:
{{- with .Values.back.nodeSelector }}
{{ toYaml . | nindent 8 }}
{{- end }}
tolerations:
{{- with .Values.back.tolerations }}
{{ toYaml . | nindent 8 }}
{{- end }}
serviceAccountName: {{ template "taiga.serviceAccountName" . }}
securityContext:
{{- with .Values.back.securityContext }}
{{ toYaml . | nindent 8 }}
{{- end }}
containers:
- name: {{ template "taiga.fullname" . }}-back
image: "{{ .Values.back.image.repository }}:{{ .Values.back.image.tag }}"
imagePullPolicy: {{ .Values.back.image.pullPolicy }}
resources:
{{ toYaml .Values.back.resources | nindent 12 }}
ports:
- name: taiga-back
containerPort: {{ .Values.back.service.port }}
protocol: TCP
volumeMounts:
- name: taiga-static
mountPath: /taiga-back/static
- name: taiga-media
mountPath: /taiga-back/media
env:
- name: TAIGA_SECRET_KEY
valueFrom:
secretKeyRef:
name: "{{ .Values.secretKey.existingSecretName }}"
key: "{{ .Values.secretKey.existingSecretKey }}"
- name: ENABLE_TELEMETRY
value: "{{ .Values.enableTelemetry }}"
- name: PUBLIC_REGISTER_ENABLED
value: "{{ .Values.publicRegisterEnabled }}"
- name: POSTGRES_USER
valueFrom:
secretKeyRef:
name: "{{ .Values.postgresql.existingSecretName }}"
key: "{{ .Values.postgresql.usernameKey }}"
- name: POSTGRES_PASSWORD
valueFrom:
secretKeyRef:
name: "{{ .Values.postgresql.existingSecretName }}"
key: "{{ .Values.postgresql.passwordKey }}"
- name: POSTGRES_DB
valueFrom:
secretKeyRef:
name: "{{ .Values.postgresql.existingSecretName }}"
key: "{{ .Values.postgresql.databaseNameKey }}"
- name: POSTGRES_HOST
valueFrom:
secretKeyRef:
name: "{{ .Values.postgresql.existingSecretName }}"
key: "{{ .Values.postgresql.hostKey }}"
{{ if .Values.oidc.enabled }}
- name: OIDC_ENABLED
value: "True"
- name: OIDC_SCOPES
valueFrom:
secretKeyRef:
name: "{{ .Values.oidc.existingSecretName }}"
key: "{{ .Values.oidc.scopesKey }}"
- name: OIDC_SIGN_ALGO
valueFrom:
secretKeyRef:
name: "{{ .Values.oidc.existingSecretName }}"
key: "{{ .Values.oidc.signatureAlgorithmKey }}"
- name: OIDC_CLIENT_ID
valueFrom:
secretKeyRef:
name: "{{ .Values.oidc.existingSecretName }}"
key: "{{ .Values.oidc.clientIdKey }}"
- name: OIDC_CLIENT_SECRET
valueFrom:
secretKeyRef:
name: "{{ .Values.oidc.existingSecretName }}"
key: "{{ .Values.oidc.clientSecretKey }}"
- name: OIDC_BASE_URL
valueFrom:
secretKeyRef:
name: "{{ .Values.oidc.existingSecretName }}"
key: "{{ .Values.oidc.baseUrlKey }}"
- name: OIDC_JWKS_ENDPOINT
valueFrom:
secretKeyRef:
name: "{{ .Values.oidc.existingSecretName }}"
key: "{{ .Values.oidc.jwksEndpointKey }}"
- name: OIDC_AUTHORIZATION_ENDPOINT
valueFrom:
secretKeyRef:
name: "{{ .Values.oidc.existingSecretName }}"
key: "{{ .Values.oidc.authorizationEndpointKey }}"
- name: OIDC_TOKEN_ENDPOINT
valueFrom:
secretKeyRef:
name: "{{ .Values.oidc.existingSecretName }}"
key: "{{ .Values.oidc.tokenEndpointKey }}"
- name: OIDC_USER_ENDPOINT
valueFrom:
secretKeyRef:
name: "{{ .Values.oidc.existingSecretName }}"
key: "{{ .Values.oidc.userEndpointKey }}"
{{ end }}
{{ if .Values.email.enabled }}
- name: EMAIL_BACKEND
value: "django.core.mail.backends.smtp.EmailBackend"
- name: DEFAULT_FROM_EMAIL
value: "{{ .Values.email.from }}"
- name: EMAIL_HOST
value: "{{ .Values.email.host }}"
- name: EMAIL_PORT
value: "{{ .Values.email.port }}"
- name: EMAIL_USE_TLS
value: "{{ .Values.email.tls }}"
- name: EMAIL_USE_SSL
value: "{{ .Values.email.ssl }}"
- name: EMAIL_HOST_USER
value: "{{ .Values.email.user }}"
- name: EMAIL_HOST_PASSWORD
valueFrom:
secretKeyRef:
name: "{{ .Values.email.existingPasswordSecret }}"
key: "{{ .Values.email.existingSecretPasswordKey }}"
{{ end }}
- name: ENABLE_GITHUB_AUTH
value: "false"
- name: ENABLE_GITLAB_AUTH
value: "false"
- name: ENABLE_SLACK
value: "{{ .Values.enableSlack }}"
{{ if .Values.githubImporter.enabled }}
- name: ENABLE_GITHUB_IMPORTER
value: "True"
- name: GITHUB_API_CLIENT_ID
valueFrom:
secretKeyRef:
name: "{{ .Values.githubImporter.existingSecretName }}"
key: "{{ .Values.githubImporter.existingSecretClientIdKey }}"
- name: GITHUB_API_CLIENT_SECRET
valueFrom:
secretKeyRef:
name: "{{ .Values.githubImporter.existingSecretName }}"
key: "{{ .Values.githubImporter.existingSecretClientSecretKey }}"
{{ else }}
- name: ENABLE_GITHUB_IMPORTER
value: "False"
{{ end }}
{{ if .Values.jiraImporter.enabled }}
- name: ENABLE_JIRA_IMPORTER
value: "True"
- name: JIRA_IMPORTER_CONSUMER_KEY
valueFrom:
secretKeyRef:
name: "{{ .Values.jiraImporter.existingSecretName }}"
key: "{{ .Values.jiraImporter.existingSecretConsumerKeyKey }}"
- name: JIRA_IMPORTER_CERT
valueFrom:
secretKeyRef:
name: "{{ .Values.jiraImporter.existingSecretName }}"
key: "{{ .Values.jiraImporter.existingSecretCertKey }}"
- name: JIRA_IMPORTER_PUB_CERT
valueFrom:
secretKeyRef:
name: "{{ .Values.jiraImporter.existingSecretName }}"
key: "{{ .Values.jiraImporter.existingSecretPubCertKey }}"
{{ else }}
- name: ENABLE_JIRA_IMPORTER
value: "False"
{{ end }}
{{ if .Values.trelloImporter.enabled }}
- name: ENABLE_TRELLO_IMPORTER
value: "True"
- name: TRELLO_IMPORTER_API_KEY
valueFrom:
secretKeyRef:
name: "{{ .Values.trelloImporter.existingSecretName }}"
key: "{{ .Values.trelloImporter.existingSecretApiKeyKey }}"
- name: TRELLO_IMPORTER_SECRET_KEY
valueFrom:
secretKeyRef:
name: "{{ .Values.trelloImporter.existingSecretName }}"
key: "{{ .Values.trelloImporter.existingSecretSecretKeyKey }}"
{{ else }}
- name: ENABLE_JIRA_IMPORTER
value: "False"
{{ end }}
- name: RABBITMQ_USER
value: "{{ index .Values "async-rabbitmq" "auth" "username" }}"
- name: RABBITMQ_PASS
valueFrom:
secretKeyRef:
name: {{ index .Values "async-rabbitmq" "auth" "existingPasswordSecret" }}
key: {{ index .Values "async-rabbitmq" "auth" "existingSecretPasswordKey" }}
{{ if .Values.ingress.enabled }}
- name: TAIGA_SITES_DOMAIN
value: "{{ .Values.ingress.host }}"
- name: TAIGA_SITES_SCHEME
value: "https"
- name: SESSION_COOKIE_SECURE
value: "True"
- name: CSRF_COOKIE_SECURE
value: "True"
{{- end }}
{{- if .Values.back.livenessProbe.enabled }}
livenessProbe:
httpGet:
path: /admin/login/
port: {{ .Values.back.service.port }}
initialDelaySeconds: {{ .Values.back.livenessProbe.initialDelaySeconds }}
periodSeconds: {{ .Values.back.livenessProbe.periodSeconds }}
timeoutSeconds: {{ .Values.back.livenessProbe.timeoutSeconds }}
successThreshold: {{ .Values.back.livenessProbe.successThreshold }}
failureThreshold: {{ .Values.back.livenessProbe.failureThreshold }}
{{- end }}
{{- if .Values.back.readinessProbe.enabled }}
readinessProbe:
httpGet:
path: /admin/login/
port: {{ .Values.back.service.port }}
initialDelaySeconds: {{ .Values.back.readinessProbe.initialDelaySeconds }}
periodSeconds: {{ .Values.back.readinessProbe.periodSeconds }}
timeoutSeconds: {{ .Values.back.readinessProbe.timeoutSeconds }}
successThreshold: {{ .Values.back.readinessProbe.successThreshold }}
failureThreshold: {{ .Values.back.readinessProbe.failureThreshold }}
{{- end }}
- name: {{ template "taiga.fullname" . }}-async
image: "{{ .Values.async.image.repository }}:{{ .Values.async.image.tag }}"
imagePullPolicy: {{ .Values.async.image.pullPolicy }}
resources:
{{ toYaml .Values.async.resources | nindent 12 }}
command:
- /taiga-back/docker/async_entrypoint.sh
volumeMounts:
- name: taiga-static
mountPath: /taiga-back/static
- name: taiga-media
mountPath: /taiga-back/media
env:
- name: TAIGA_SECRET_KEY
valueFrom:
secretKeyRef:
name: "{{ .Values.secretKey.existingSecretName }}"
key: "{{ .Values.secretKey.existingSecretKey }}"
- name: ENABLE_TELEMETRY
value: "{{ .Values.enableTelemetry }}"
- name: PUBLIC_REGISTER_ENABLED
value: "{{ .Values.publicRegisterEnabled }}"
- name: POSTGRES_USER
valueFrom:
secretKeyRef:
name: "{{ .Values.postgresql.existingSecretName }}"
key: "{{ .Values.postgresql.usernameKey }}"
- name: POSTGRES_PASSWORD
valueFrom:
secretKeyRef:
name: "{{ .Values.postgresql.existingSecretName }}"
key: "{{ .Values.postgresql.passwordKey }}"
- name: POSTGRES_DB
valueFrom:
secretKeyRef:
name: "{{ .Values.postgresql.existingSecretName }}"
key: "{{ .Values.postgresql.databaseNameKey }}"
- name: POSTGRES_HOST
valueFrom:
secretKeyRef:
name: "{{ .Values.postgresql.existingSecretName }}"
key: "{{ .Values.postgresql.hostKey }}"
{{ if .Values.oidc.enabled }}
- name: OIDC_ENABLED
value: "True"
- name: OIDC_SCOPES
valueFrom:
secretKeyRef:
name: "{{ .Values.oidc.existingSecretName }}"
key: "{{ .Values.oidc.scopesKey }}"
- name: OIDC_SIGN_ALGO
valueFrom:
secretKeyRef:
name: "{{ .Values.oidc.existingSecretName }}"
key: "{{ .Values.oidc.signatureAlgorithmKey }}"
- name: OIDC_CLIENT_ID
valueFrom:
secretKeyRef:
name: "{{ .Values.oidc.existingSecretName }}"
key: "{{ .Values.oidc.clientIdKey }}"
- name: OIDC_CLIENT_SECRET
valueFrom:
secretKeyRef:
name: "{{ .Values.oidc.existingSecretName }}"
key: "{{ .Values.oidc.clientSecretKey }}"
- name: OIDC_BASE_URL
valueFrom:
secretKeyRef:
name: "{{ .Values.oidc.existingSecretName }}"
key: "{{ .Values.oidc.baseUrlKey }}"
- name: OIDC_JWKS_ENDPOINT
valueFrom:
secretKeyRef:
name: "{{ .Values.oidc.existingSecretName }}"
key: "{{ .Values.oidc.jwksEndpointKey }}"
- name: OIDC_AUTHORIZATION_ENDPOINT
valueFrom:
secretKeyRef:
name: "{{ .Values.oidc.existingSecretName }}"
key: "{{ .Values.oidc.authorizationEndpointKey }}"
- name: OIDC_TOKEN_ENDPOINT
valueFrom:
secretKeyRef:
name: "{{ .Values.oidc.existingSecretName }}"
key: "{{ .Values.oidc.tokenEndpointKey }}"
- name: OIDC_USER_ENDPOINT
valueFrom:
secretKeyRef:
name: "{{ .Values.oidc.existingSecretName }}"
key: "{{ .Values.oidc.userEndpointKey }}"
{{ end }}
{{ if .Values.email.enabled }}
- name: EMAIL_BACKEND
value: "django.core.mail.backends.smtp.EmailBackend"
- name: DEFAULT_FROM_EMAIL
value: "{{ .Values.email.from }}"
- name: EMAIL_HOST
value: "{{ .Values.email.host }}"
- name: EMAIL_PORT
value: "{{ .Values.email.port }}"
- name: EMAIL_USE_TLS
value: "{{ .Values.email.tls }}"
- name: EMAIL_USE_SSL
value: "{{ .Values.email.ssl }}"
- name: EMAIL_HOST_USER
value: "{{ .Values.email.user }}"
- name: EMAIL_HOST_PASSWORD
valueFrom:
secretKeyRef:
name: "{{ .Values.email.existingPasswordSecret }}"
key: "{{ .Values.email.existingSecretPasswordKey }}"
{{ end }}
- name: ENABLE_GITHUB_AUTH
value: "false"
- name: ENABLE_GITLAB_AUTH
value: "false"
- name: ENABLE_SLACK
value: "{{ .Values.enableSlack }}"
{{ if .Values.githubImporter.enabled }}
- name: ENABLE_GITHUB_IMPORTER
value: "True"
- name: GITHUB_API_CLIENT_ID
valueFrom:
secretKeyRef:
name: "{{ .Values.githubImporter.existingSecretName }}"
key: "{{ .Values.githubImporter.existingSecretClientIdKey }}"
- name: GITHUB_API_CLIENT_SECRET
valueFrom:
secretKeyRef:
name: "{{ .Values.githubImporter.existingSecretName }}"
key: "{{ .Values.githubImporter.existingSecretClientSecretKey }}"
{{ else }}
- name: ENABLE_GITHUB_IMPORTER
value: "False"
{{ end }}
{{ if .Values.jiraImporter.enabled }}
- name: ENABLE_JIRA_IMPORTER
value: "True"
- name: JIRA_IMPORTER_CONSUMER_KEY
valueFrom:
secretKeyRef:
name: "{{ .Values.jiraImporter.existingSecretName }}"
key: "{{ .Values.jiraImporter.existingSecretConsumerKeyKey }}"
- name: JIRA_IMPORTER_CERT
valueFrom:
secretKeyRef:
name: "{{ .Values.jiraImporter.existingSecretName }}"
key: "{{ .Values.jiraImporter.existingSecretCertKey }}"
- name: JIRA_IMPORTER_PUB_CERT
valueFrom:
secretKeyRef:
name: "{{ .Values.jiraImporter.existingSecretName }}"
key: "{{ .Values.jiraImporter.existingSecretPubCertKey }}"
{{ else }}
- name: ENABLE_JIRA_IMPORTER
value: "False"
{{ end }}
{{ if .Values.trelloImporter.enabled }}
- name: ENABLE_TRELLO_IMPORTER
value: "True"
- name: TRELLO_IMPORTER_API_KEY
valueFrom:
secretKeyRef:
name: "{{ .Values.trelloImporter.existingSecretName }}"
key: "{{ .Values.trelloImporter.existingSecretApiKeyKey }}"
- name: TRELLO_IMPORTER_SECRET_KEY
valueFrom:
secretKeyRef:
name: "{{ .Values.trelloImporter.existingSecretName }}"
key: "{{ .Values.trelloImporter.existingSecretSecretKeyKey }}"
{{ else }}
- name: ENABLE_JIRA_IMPORTER
value: "False"
{{ end }}
- name: RABBITMQ_USER
value: "{{ index .Values "async-rabbitmq" "auth" "username" }}"
- name: RABBITMQ_PASS
valueFrom:
secretKeyRef:
name: {{ index .Values "async-rabbitmq" "auth" "existingPasswordSecret" }}
key: {{ index .Values "async-rabbitmq" "auth" "existingSecretPasswordKey" }}
{{ if .Values.ingress.enabled }}
- name: TAIGA_SITES_DOMAIN
value: "{{ .Values.ingress.host }}"
- name: TAIGA_SITES_SCHEME
value: "https"
- name: SESSION_COOKIE_SECURE
value: "True"
- name: CSRF_COOKIE_SECURE
value: "True"
{{- end }}
{{- if .Values.back.livenessProbe.enabled }}
livenessProbe:
httpGet:
path: /admin/login/
port: {{ .Values.back.service.port }}
initialDelaySeconds: {{ .Values.back.livenessProbe.initialDelaySeconds }}
periodSeconds: {{ .Values.back.livenessProbe.periodSeconds }}
timeoutSeconds: {{ .Values.back.livenessProbe.timeoutSeconds }}
successThreshold: {{ .Values.back.livenessProbe.successThreshold }}
failureThreshold: {{ .Values.back.livenessProbe.failureThreshold }}
{{- end }}
{{- if .Values.back.readinessProbe.enabled }}
readinessProbe:
httpGet:
path: /admin/login/
port: {{ .Values.back.service.port }}
initialDelaySeconds: {{ .Values.back.readinessProbe.initialDelaySeconds }}
periodSeconds: {{ .Values.back.readinessProbe.periodSeconds }}
timeoutSeconds: {{ .Values.back.readinessProbe.timeoutSeconds }}
successThreshold: {{ .Values.back.readinessProbe.successThreshold }}
failureThreshold: {{ .Values.back.readinessProbe.failureThreshold }}
{{- end }}
volumes:
- name: taiga-static
{{- if .Values.persistence.static.enabled }}
persistentVolumeClaim:
claimName: {{ include "taiga.staticVolumeName" . }}
{{- else }}
emptyDir: {}
{{- end }}
- name: taiga-media
{{- if .Values.persistence.media.enabled }}
persistentVolumeClaim:
claimName: {{ include "taiga.mediaVolumeName" . }}
{{- else }}
emptyDir: {}
{{- end }}

View File

@@ -1,101 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ template "taiga.fullname" . }}-events
namespace: {{ .Release.Namespace }}
annotations:
{{- with .Values.global.annotations }}
{{- toYaml . | nindent 4 }}
{{- end }}
labels:
{{- include "taiga.events.labels" . | nindent 4 }}
{{- with .Values.global.labels }}
{{ toYaml . | nindent 4 }}
{{- end }}
spec:
revisionHistoryLimit: 3
replicas: {{ .Values.events.replicas }}
strategy:
type: Recreate
selector:
matchLabels:
{{- include "taiga.events.matchLabels" . | nindent 6 }}
template:
metadata:
labels:
{{- include "taiga.events.labels" . | nindent 8 }}
app.kubernetes.io/component: {{ template "taiga.name" . }}-events
annotations:
{{- with .Values.events.podAnnotations }}
{{ toYaml . | nindent 8 }}
{{- end }}
spec:
affinity:
{{- with .Values.events.affinity }}
{{ toYaml . | nindent 8 }}
{{- end }}
nodeSelector:
{{- with .Values.events.nodeSelector }}
{{ toYaml . | nindent 8 }}
{{- end }}
tolerations:
{{- with .Values.events.tolerations }}
{{ toYaml . | nindent 8 }}
{{- end }}
serviceAccountName: {{ template "taiga.serviceAccountName" . }}
securityContext:
{{- with .Values.events.securityContext }}
{{ toYaml . | nindent 8 }}
{{- end }}
containers:
- name: {{ template "taiga.fullname" . }}-events
image: "{{ .Values.events.image.repository }}:{{ .Values.events.image.tag }}"
imagePullPolicy: {{ .Values.events.image.pullPolicy }}
resources:
{{ toYaml .Values.events.resources | nindent 12 }}
ports:
- name: taiga-events
containerPort: {{ .Values.events.service.http.port }}
protocol: TCP
- name: taiga-app
containerPort: {{ .Values.events.service.app.port }}
protocol: TCP
env:
- name: TAIGA_SECRET_KEY
valueFrom:
secretKeyRef:
name: "{{ .Values.secretKey.existingSecretName }}"
key: "{{ .Values.secretKey.existingSecretKey }}"
- name: RABBITMQ_USER
value: "{{ index .Values "events-rabbitmq" "auth" "username" }}"
- name: RABBITMQ_PASS
valueFrom:
secretKeyRef:
name: {{ index .Values "events-rabbitmq" "auth" "existingPasswordSecret" }}
key: {{ index .Values "events-rabbitmq" "auth" "existingSecretPasswordKey" }}
- name: APP_PORT
value: "{{ .Values.events.service.app.port }}"
{{- if .Values.events.livenessProbe.enabled }}
livenessProbe:
httpGet:
path: /healthz
port: {{ .Values.events.service.app.port }}
initialDelaySeconds: {{ .Values.events.livenessProbe.initialDelaySeconds }}
periodSeconds: {{ .Values.events.livenessProbe.periodSeconds }}
timeoutSeconds: {{ .Values.events.livenessProbe.timeoutSeconds }}
successThreshold: {{ .Values.events.livenessProbe.successThreshold }}
failureThreshold: {{ .Values.events.livenessProbe.failureThreshold }}
{{- end }}
{{- if .Values.events.readinessProbe.enabled }}
readinessProbe:
httpGet:
path: /healthz
port: {{ .Values.events.service.app.port }}
initialDelaySeconds: {{ .Values.events.readinessProbe.initialDelaySeconds }}
periodSeconds: {{ .Values.events.readinessProbe.periodSeconds }}
timeoutSeconds: {{ .Values.events.readinessProbe.timeoutSeconds }}
successThreshold: {{ .Values.events.readinessProbe.successThreshold }}
failureThreshold: {{ .Values.events.readinessProbe.failureThreshold }}
{{- end }}

Some files were not shown because too many files have changed in this diff Show More