Compare commits

...

76 Commits

Author SHA1 Message Date
3b50ca2bfe fix comparision operator position 2024-04-18 01:51:06 -06:00
17796a1183 increment chart version 2024-04-18 01:48:03 -06:00
512b1d4243 set default value for comparision 2024-04-18 01:47:46 -06:00
a2b0cdd5b6 fix ordering of comparision operator 2024-04-18 01:39:33 -06:00
e79af169b9 calculate length of array separately 2024-04-18 01:35:19 -06:00
661f9342b9 fix length measurement of database 2024-04-18 01:17:16 -06:00
9d1244c7a1 remove patch from image tag 2024-04-18 01:07:36 -06:00
0dc50bf88f change default cluster name to start with release 2024-04-17 20:01:47 -06:00
75accbbf87 use semver function to pull major version into cluster name 2024-04-17 20:00:06 -06:00
19fbd95a79 change templating for cluster naming 2024-04-17 19:45:08 -06:00
d73c42fd42 change default values 2024-04-17 19:15:54 -06:00
renovate[bot]
6399a8ca97 Update Helm release rabbitmq to v14 (#34)
* Update Helm release rabbitmq to v14

* update chart

* align comments for readability

---------

Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
Co-authored-by: alexlebens <alexanderlebens@gmail.com>
2024-04-17 19:13:57 -06:00
renovate[bot]
580c7da73a Update Helm release redis to v19.1.1 (#18)
* Update Helm release redis to v19.1.1

* update charts

* fix indentation

---------

Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
Co-authored-by: alexlebens <alexanderlebens@gmail.com>
2024-04-17 19:09:36 -06:00
renovate[bot]
11d47799f1 Update dock.mau.dev/mautrix/whatsapp Docker tag to v0.10.7 (#36)
* Update dock.mau.dev/mautrix/whatsapp Docker tag to v0.10.7

* update helm chart

* fix indentation

---------

Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
Co-authored-by: alexlebens <alexanderlebens@gmail.com>
2024-04-17 19:05:30 -06:00
renovate[bot]
7d825da72d Update linuxserver/code-server Docker tag to v4.23.1 (#35)
* Update linuxserver/code-server Docker tag to v4.23.1

* update helm chart

---------

Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
Co-authored-by: alexlebens <alexanderlebens@gmail.com>
2024-04-17 19:05:16 -06:00
renovate[bot]
adf49292bd Update halfshot/matrix-hookshot Docker tag to v5.3.0 (#38)
* Update halfshot/matrix-hookshot Docker tag to v5.3.0

* update chart

* fix linting errors

---------

Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
Co-authored-by: alexlebens <alexanderlebens@gmail.com>
2024-04-17 19:03:21 -06:00
renovate[bot]
63e69df14a Update ghcr.io/gethomepage/homepage Docker tag to v0.8.12 (#37)
* Update ghcr.io/gethomepage/homepage Docker tag to v0.8.12

* update chart

---------

Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
Co-authored-by: Alex Lebens <alexanderlebens@gmail.com>
2024-04-17 18:55:36 -06:00
7bd8a4525a if oidc is enabled add an ingress path to the backend 2024-04-17 04:42:51 -06:00
a860789056 add env to front deployment about oidc enablement 2024-04-15 03:31:49 -06:00
58f89640a8 fix naming of changed rabbitmq charts 2024-04-15 02:47:45 -06:00
132e086d6d change rabbitmq chart naming to generate proper dns and app names 2024-04-15 02:44:10 -06:00
617505ee99 fix length of app port 2024-04-13 23:37:58 -06:00
34a21702ab fix http service value 2024-04-13 23:32:48 -06:00
15d3253af9 fix events app port to service and port 2024-04-13 23:28:03 -06:00
90970ef172 fix events health endpoint 2024-04-13 23:19:59 -06:00
0d6f789ffd increment chart version 2024-04-13 23:14:21 -06:00
f968776cd0 fix trello importer switch for async container 2024-04-13 23:13:44 -06:00
0b2beb08b7 fix indentation of events deployment 2024-04-13 23:11:44 -06:00
8fae31a679 properly enable/disable trello importer 2024-04-13 23:07:20 -06:00
f67ac05610 fix indentation 2024-04-13 23:05:03 -06:00
7803519d04 add major version value 2024-04-13 22:58:01 -06:00
55e63c2c72 fix minor formatting and remove uneeded values 2024-04-13 22:31:07 -06:00
6e083293bb fix minor formatting and remove uneeded values 2024-04-13 22:29:33 -06:00
60e427826c add taiga 2024-04-13 22:17:45 -06:00
f905b4ccfe change s3 env keys 2024-04-13 14:58:20 -06:00
487786455c change default credential secret name 2024-04-13 03:28:27 -06:00
585d39657a change how the cluster name is generated and used 2024-04-13 03:24:58 -06:00
e5e2812ed5 remove chart as functionality is now included in postgres-cluster chart 2024-04-13 02:46:38 -06:00
506218210e add replica import type 2024-04-13 02:41:46 -06:00
a7a08ef9f3 change chart to align with cnpg's chart 2024-04-13 02:06:02 -06:00
0fe94afd2a revert to redis image 2024-04-12 20:11:10 -06:00
renovate[bot]
73262aa60a Update homeassistant/home-assistant Docker tag to v2024.4.3 (#31)
* Update homeassistant/home-assistant Docker tag to v2024.4.3

* update chart

---------

Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
Co-authored-by: Alex Lebens <alexanderlebens@gmail.com>
2024-04-12 17:42:37 -06:00
a322553210 update readme 2024-04-11 19:37:45 -06:00
09aae9e79d change default cluster size to 3 2024-04-11 19:35:05 -06:00
c72c25a74d upgrade default postgres version to 16.2 2024-04-11 19:33:20 -06:00
9c93b1dc4a bump chart version 2024-04-11 19:31:56 -06:00
cfd426f657 bump chart version 2024-04-11 19:31:46 -06:00
93f4991a05 remove default as storage class 2024-04-11 19:30:58 -06:00
ce0f3c7b07 remove default as storage class 2024-04-11 19:30:50 -06:00
58c5443de1 add postgres-cluster-upgrade chart 2024-04-11 19:05:18 -06:00
b3acbf3cbc update redis chart to 19 2024-04-11 18:04:23 -06:00
3270a3102b change redis image to use valkey 2024-04-11 18:03:44 -06:00
acc9710c72 update redis chart to 19 2024-04-11 18:03:23 -06:00
756ef9b0c6 upgrade elasticsearch chart version 2024-04-11 17:44:53 -06:00
renovate[bot]
8baec6fd41 Update bbilly1/tubearchivist Docker tag to v0.4.7 (#30)
* Update bbilly1/tubearchivist Docker tag to v0.4.7

* update chart

---------

Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
Co-authored-by: Alex Lebens <alexanderlebens@gmail.com>
2024-04-11 17:38:23 -06:00
c1ab4afc46 add s3 accelerate env 2024-04-09 22:35:16 -06:00
bdcd63284a add url protocol to s3 endpoint 2024-04-09 20:16:43 -06:00
renovate[bot]
e8a951405d Update linuxserver/code-server Docker tag to v4.23.0 (#29)
* Update linuxserver/code-server Docker tag to v4.23.0

* update chart

---------

Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
Co-authored-by: Alex Lebens <alexanderlebens@gmail.com>
2024-04-08 18:19:18 -06:00
renovate[bot]
93caa67bad Update ghcr.io/gethomepage/homepage Docker tag to v0.8.11 (#28)
* Update ghcr.io/gethomepage/homepage Docker tag to v0.8.11

* update chart

---------

Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
Co-authored-by: Alex Lebens <alexanderlebens@gmail.com>
2024-04-08 11:30:31 -06:00
renovate[bot]
0dfaebdb7f Update homeassistant/home-assistant Docker tag to v2024.4.2 (#27)
* Update homeassistant/home-assistant Docker tag to v2024.4.2

* update chart

---------

Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
Co-authored-by: Alex Lebens <alexanderlebens@gmail.com>
2024-04-08 11:28:09 -06:00
renovate[bot]
2f721343aa Update homeassistant/home-assistant Docker tag to v2024.4.1 (#26)
* Update homeassistant/home-assistant Docker tag to v2024.4.1

* update chart

---------

Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
Co-authored-by: Alex Lebens <alexanderlebens@gmail.com>
2024-04-06 15:41:04 -06:00
270b62be53 add tubearchivist to jellyfin 2024-04-04 23:22:14 -06:00
0984e40cc8 remove workflow path 2024-04-04 14:53:35 -06:00
4e26a7c727 remove commands from bridges 2024-04-04 14:47:35 -06:00
17d146a444 add archive folder 2024-04-04 13:47:48 -06:00
323955129b add mautrix-whatsapp 2024-04-04 13:43:13 -06:00
d4eaeb7c21 add mautrix-discord 2024-04-04 13:36:39 -06:00
725e83af07 add port and host to database env 2024-04-04 11:48:22 -06:00
renovate[bot]
d58fbbd819 Update homeassistant/home-assistant Docker tag to v2024.4.0 (#25)
* Update homeassistant/home-assistant Docker tag to v2024.4.0

* update chart

---------

Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
Co-authored-by: Alex Lebens <alexanderlebens@gmail.com>
2024-04-03 20:15:46 -06:00
bab4c95580 update image version to latest 2024-04-01 09:28:19 -06:00
536b133850 add widgets 2024-03-30 16:47:42 -06:00
ead44d21f7 fix service account name 2024-03-30 14:48:51 -06:00
ff7fb92c19 create tempalte name for passFile 2024-03-30 14:43:13 -06:00
46effc5599 fix secret field schema 2024-03-30 14:32:22 -06:00
0f7a0d658f fix naming 2024-03-30 14:27:29 -06:00
08b0782645 add service monitor 2024-03-30 14:22:55 -06:00
62 changed files with 4404 additions and 231 deletions

View File

@@ -1,6 +1,6 @@
apiVersion: v2 apiVersion: v2
name: home-assistant name: home-assistant
version: 0.1.4 version: 0.1.10
description: Chart for Home Assistant description: Chart for Home Assistant
keywords: keywords:
- home-automation - home-automation
@@ -9,4 +9,4 @@ sources:
maintainers: maintainers:
- name: alexlebens - name: alexlebens
icon: https://avatars.githubusercontent.com/u/13844975?s=200&v=4 icon: https://avatars.githubusercontent.com/u/13844975?s=200&v=4
appVersion: v2024.3.3 appVersion: v2024.4.3

View File

@@ -3,7 +3,7 @@ deployment:
strategy: Recreate strategy: Recreate
image: image:
repository: homeassistant/home-assistant repository: homeassistant/home-assistant
tag: 2024.3.3 tag: 2024.4.3
imagePullPolicy: IfNotPresent imagePullPolicy: IfNotPresent
env: env:
TZ: UTC TZ: UTC
@@ -56,7 +56,7 @@ codeserver:
enabled: false enabled: false
image: image:
repository: linuxserver/code-server repository: linuxserver/code-server
tag: 4.22.1 tag: 4.23.1
imagePullPolicy: IfNotPresent imagePullPolicy: IfNotPresent
env: env:
TZ: UTC TZ: UTC

View File

@@ -1,6 +1,6 @@
apiVersion: v2 apiVersion: v2
name: homepage name: homepage
version: 0.0.9 version: 0.0.11
description: Chart for benphelps homepage description: Chart for benphelps homepage
keywords: keywords:
- dashboard - dashboard
@@ -9,4 +9,4 @@ sources:
maintainers: maintainers:
- name: alexlebens - name: alexlebens
icon: https://github.com/benphelps/homepage/blob/de584eae8f12a0d257e554e9511ef19bd2a1232c/public/mstile-150x150.png icon: https://github.com/benphelps/homepage/blob/de584eae8f12a0d257e554e9511ef19bd2a1232c/public/mstile-150x150.png
appVersion: v0.8.10 appVersion: v0.8.12

View File

@@ -3,7 +3,7 @@ deployment:
strategy: Recreate strategy: Recreate
image: image:
repository: ghcr.io/gethomepage/homepage repository: ghcr.io/gethomepage/homepage
tag: v0.8.10 tag: v0.8.12
imagePullPolicy: IfNotPresent imagePullPolicy: IfNotPresent
env: env:
envFrom: envFrom:

View File

@@ -1,6 +1,6 @@
apiVersion: v2 apiVersion: v2
name: matrix-hookshot name: matrix-hookshot
version: 0.0.3 version: 0.1.1
description: Chart for Matrix Hookshot description: Chart for Matrix Hookshot
keywords: keywords:
- matrix - matrix
@@ -11,4 +11,4 @@ sources:
maintainers: maintainers:
- name: alexlebens - name: alexlebens
icon: https://avatars.githubusercontent.com/u/8418310?s=48&v=4 icon: https://avatars.githubusercontent.com/u/8418310?s=48&v=4
appVersion: "5.2.1" appVersion: "5.3.0"

View File

@@ -30,3 +30,14 @@ Helper for passkey secret name
{{- printf "matrix-hookshot-passkey-secret" }} {{- printf "matrix-hookshot-passkey-secret" }}
{{- end }} {{- end }}
{{- end }} {{- end }}
{{/*
Helper for passkey file name
*/}}
{{- define "hookshot.passFile" -}}
{{- if .Values.hookshot.config.passFile }}
{{- printf "%s" .Values.hookshot.config.passFile -}}
{{- else }}
{{- printf "passkey.pem" }}
{{- end }}
{{- end }}

View File

@@ -4,7 +4,7 @@ metadata:
name: matrix-hookshot name: matrix-hookshot
namespace: {{ .Release.Namespace }} namespace: {{ .Release.Namespace }}
labels: labels:
app.kubernetes.io/name: {{ .Release.Name }} app.kubernetes.io/name: matrix-hookshot
app.kubernetes.io/instance: {{ .Release.Name }} app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/version: {{ .Chart.AppVersion }} app.kubernetes.io/version: {{ .Chart.AppVersion }}
app.kubernetes.io/component: web app.kubernetes.io/component: web
@@ -24,10 +24,10 @@ spec:
app.kubernetes.io/name: matrix-hookshot app.kubernetes.io/name: matrix-hookshot
app.kubernetes.io/instance: {{ .Release.Name }} app.kubernetes.io/instance: {{ .Release.Name }}
spec: spec:
serviceAccountName: {{ .Release.Name }} serviceAccountName: matrix-hookshot
automountServiceAccountToken: true automountServiceAccountToken: true
containers: containers:
- name: {{ .Release.Name }} - name: matrix-hookshot
image: "{{ .Values.deployment.image.repository }}:{{ .Values.deployment.image.tag }}" image: "{{ .Values.deployment.image.repository }}:{{ .Values.deployment.image.tag }}"
imagePullPolicy: {{ .Values.deployment.image.imagePullPolicy }} imagePullPolicy: {{ .Values.deployment.image.imagePullPolicy }}
ports: ports:
@@ -40,6 +40,9 @@ spec:
- name: appservice - name: appservice
containerPort: {{ .Values.service.appservice.port }} containerPort: {{ .Values.service.appservice.port }}
protocol: TCP protocol: TCP
- name: widgets
containerPort: {{ .Values.service.widgets.port }}
protocol: TCP
env: env:
{{- range $k,$v := .Values.deployment.env }} {{- range $k,$v := .Values.deployment.env }}
- name: {{ $k }} - name: {{ $k }}
@@ -61,13 +64,13 @@ spec:
subPath: registration.yml subPath: registration.yml
readOnly: true readOnly: true
- name: passkey - name: passkey
mountPath: "/data/{{ .Values.hookshot.config.passFile }}" mountPath: "/data/{{ template "hookshot.passFile" . }}"
subPath: "{{ .Values.hookshot.config.passFile }}" subPath: {{ template "hookshot.passFile" . }}
readOnly: true readOnly: true
volumes: volumes:
- name: config - name: config
secret: secret:
name: {{ template "hookshot.secretName" . }} secretName: {{ template "hookshot.secretName" . }}
- name: registration - name: registration
secret: secret:
secretName: {{ template "hookshot.registrationSecretName" . }} secretName: {{ template "hookshot.registrationSecretName" . }}

View File

@@ -2,10 +2,10 @@
apiVersion: networking.k8s.io/v1 apiVersion: networking.k8s.io/v1
kind: Ingress kind: Ingress
metadata: metadata:
name: "{{ .Release.Name }}-webhook" name: matrix-hookshot-webhook
namespace: {{ .Release.Namespace }} namespace: {{ .Release.Namespace }}
labels: labels:
app.kubernetes.io/name: "{{ .Release.Name }}-webhook" app.kubernetes.io/name: matrix-hookshot-webhook
app.kubernetes.io/instance: {{ .Release.Name }} app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/version: {{ .Chart.AppVersion }} app.kubernetes.io/version: {{ .Chart.AppVersion }}
app.kubernetes.io/component: web app.kubernetes.io/component: web
@@ -36,10 +36,10 @@ spec:
apiVersion: networking.k8s.io/v1 apiVersion: networking.k8s.io/v1
kind: Ingress kind: Ingress
metadata: metadata:
name: "{{ .Release.Name }}-appservice" name: matrix-hookshot-appservice
namespace: {{ .Release.Namespace }} namespace: {{ .Release.Namespace }}
labels: labels:
app.kubernetes.io/name: "{{ .Release.Name }}-appservice" app.kubernetes.io/name: matrix-hookshot-appservice
app.kubernetes.io/instance: {{ .Release.Name }} app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/version: {{ .Chart.AppVersion }} app.kubernetes.io/version: {{ .Chart.AppVersion }}
app.kubernetes.io/component: web app.kubernetes.io/component: web
@@ -64,3 +64,37 @@ spec:
port: port:
name: appservice name: appservice
{{- end }} {{- end }}
---
{{- if .Values.ingress.widgets.enabled }}
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: matrix-hookshot-widgets
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: matrix-hookshot-widgets
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/version: {{ .Chart.AppVersion }}
app.kubernetes.io/component: web
app.kubernetes.io/part-of: {{ .Release.Name }}
annotations:
{{- toYaml .Values.ingress.widgets.annotations | nindent 4 }}
spec:
ingressClassName: {{ .Values.ingress.widgets.className }}
tls:
- hosts:
- {{ .Values.ingress.widgets.host }}
secretName: {{ .Release.Name }}-widgets-secret-tls
rules:
- host: {{ .Values.ingress.widgets.host }}
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: {{ .Release.Name }}
port:
name: widgets
{{- end }}

View File

@@ -1,9 +1,9 @@
apiVersion: v1 apiVersion: v1
kind: Pod kind: Pod
metadata: metadata:
name: "{{ .Release.Name }}-test-connection" name: matrix-hookshot-test-connection
labels: labels:
app.kubernetes.io/name: {{ .Release.Name }} app.kubernetes.io/name: matrix-hookshot-test-connection
app.kubernetes.io/instance: {{ .Release.Name }} app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/version: {{ .Chart.AppVersion }} app.kubernetes.io/version: {{ .Chart.AppVersion }}
app.kubernetes.io/component: web app.kubernetes.io/component: web
@@ -16,7 +16,7 @@ spec:
- name: wget - name: wget
image: busybox image: busybox
command: ['wget'] command: ['wget']
args: ['{{ .Release.Name }}:{{ .Values.service.webhook.port }}'] args: ['matrix-hookshot:{{ .Values.service.webhook.port }}']
resources: resources:
limits: limits:
cpu: 500m cpu: 500m

View File

@@ -1,10 +1,10 @@
apiVersion: v1 apiVersion: v1
kind: ServiceAccount kind: ServiceAccount
metadata: metadata:
name: {{ .Release.Name }} name: matrix-hookshot
namespace: {{ .Release.Namespace }} namespace: {{ .Release.Namespace }}
labels: labels:
app.kubernetes.io/name: {{ .Release.Name }} app.kubernetes.io/name: matrix-hookshot
app.kubernetes.io/instance: {{ .Release.Name }} app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/version: {{ .Chart.AppVersion }} app.kubernetes.io/version: {{ .Chart.AppVersion }}
app.kubernetes.io/component: web app.kubernetes.io/component: web

View File

@@ -0,0 +1,23 @@
{{- if and .Values.metrics.enabled .Values.metrics.serviceMonitor.enabled }}
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: matrix-hookshot
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: matrix-hookshot
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/version: {{ .Chart.AppVersion }}
app.kubernetes.io/component: web
app.kubernetes.io/part-of: {{ .Release.Name }}
spec:
endpoints:
- port: metrics
interval: {{ .Values.metrics.serviceMonitor.interval }}
scrapeTimeout: {{ .Values.metrics.serviceMonitor.scrapeTimeout }}
path: /metrics
selector:
matchLabels:
app.kubernetes.io/name: matrix-hookshot
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end }}

View File

@@ -1,10 +1,10 @@
apiVersion: v1 apiVersion: v1
kind: Service kind: Service
metadata: metadata:
name: {{ .Release.Name }} name: matrix-hookshot
namespace: {{ .Release.Namespace }} namespace: {{ .Release.Namespace }}
labels: labels:
app.kubernetes.io/name: {{ .Release.Name }} app.kubernetes.io/name: matrix-hookshot
app.kubernetes.io/instance: {{ .Release.Name }} app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/version: {{ .Chart.AppVersion }} app.kubernetes.io/version: {{ .Chart.AppVersion }}
app.kubernetes.io/component: web app.kubernetes.io/component: web
@@ -24,6 +24,10 @@ spec:
targetPort: appservice targetPort: appservice
protocol: TCP protocol: TCP
name: appservice name: appservice
- port: {{ .Values.service.widgets.port }}
targetPort: widgets
protocol: TCP
name: widgets
selector: selector:
app.kubernetes.io/name: {{ .Release.Name }} app.kubernetes.io/name: matrix-hookshot
app.kubernetes.io/instance: {{ .Release.Name }} app.kubernetes.io/instance: {{ .Release.Name }}

View File

@@ -3,7 +3,7 @@ deployment:
strategy: Recreate strategy: Recreate
image: image:
repository: halfshot/matrix-hookshot repository: halfshot/matrix-hookshot
tag: "4.5.1" tag: "5.3.0"
imagePullPolicy: IfNotPresent imagePullPolicy: IfNotPresent
env: {} env: {}
envFrom: [] envFrom: []
@@ -22,6 +22,8 @@ service:
port: 9001 port: 9001
appservice: appservice:
port: 9002 port: 9002
widgets:
port: 9003
ingress: ingress:
webhook: webhook:
enabled: false enabled: false
@@ -33,6 +35,17 @@ ingress:
className: "" className: ""
annotations: {} annotations: {}
host: "" host: ""
widgets:
enabled: false
className: ""
annotations: {}
host: ""
metrics:
enabled: false
serviceMonitor:
enabled: false
interval: 15s
scrapeTimeout: 5s
# Reference the following for examples # Reference the following for examples
# https://matrix-org.github.io/matrix-hookshot/latest/setup/sample-configuration.html # https://matrix-org.github.io/matrix-hookshot/latest/setup/sample-configuration.html
@@ -63,12 +76,12 @@ hookshot:
resources: resources:
- metrics - metrics
- provisioning - provisioning
- port: 9002 - port: 9003
bindAddress: 0.0.0.0 bindAddress: 0.0.0.0
resources: resources:
- widgets - widgets
#github: # github:
# # (Optional) Configure this to enable GitHub support # # (Optional) Configure this to enable GitHub support
# auth: # auth:
# # Authentication for the GitHub App. # # Authentication for the GitHub App.
@@ -91,7 +104,7 @@ hookshot:
# # (Optional) Prefix used when creating ghost users for GitHub accounts. # # (Optional) Prefix used when creating ghost users for GitHub accounts.
# _github_ # _github_
#gitlab: # gitlab:
# # (Optional) Configure this to enable GitLab support # # (Optional) Configure this to enable GitLab support
# instances: # instances:
# gitlab.com: # gitlab.com:
@@ -106,7 +119,7 @@ hookshot:
# # (Optional) Aggregate comments by waiting this many miliseconds before posting them to Matrix. Defaults to 5000 (5 seconds) # # (Optional) Aggregate comments by waiting this many miliseconds before posting them to Matrix. Defaults to 5000 (5 seconds)
# 5000 # 5000
#figma: # figma:
# # (Optional) Configure this to enable Figma support # # (Optional) Configure this to enable Figma support
# publicUrl: https://example.com/hookshot/ # publicUrl: https://example.com/hookshot/
# instances: # instances:
@@ -115,7 +128,7 @@ hookshot:
# accessToken: your-personal-access-token # accessToken: your-personal-access-token
# passcode: your-webhook-passcode # passcode: your-webhook-passcode
#jira: # jira:
# # (Optional) Configure this to enable Jira support. Only specify `url` if you are using a On Premise install (i.e. not atlassian.com) # # (Optional) Configure this to enable Jira support. Only specify `url` if you are using a On Premise install (i.e. not atlassian.com)
# webhook: # webhook:
# # Webhook settings for JIRA # # Webhook settings for JIRA
@@ -126,7 +139,7 @@ hookshot:
# client_secret: bar # client_secret: bar
# redirect_uri: https://example.com/oauth/ # redirect_uri: https://example.com/oauth/
#generic: # generic:
# # (Optional) Support for generic webhook events. # # (Optional) Support for generic webhook events.
# #'allowJsTransformationFunctions' will allow users to write short transformation snippets in code, and thus is unsafe in untrusted environments # #'allowJsTransformationFunctions' will allow users to write short transformation snippets in code, and thus is unsafe in untrusted environments
@@ -137,23 +150,23 @@ hookshot:
# allowJsTransformationFunctions: false # allowJsTransformationFunctions: false
# waitForComplete: false # waitForComplete: false
#feeds: # feeds:
# # (Optional) Configure this to enable RSS/Atom feed support # # (Optional) Configure this to enable RSS/Atom feed support
# enabled: false # enabled: false
# pollConcurrency: 4 # pollConcurrency: 4
# pollIntervalSeconds: 600 # pollIntervalSeconds: 600
# pollTimeoutSeconds: 30 # pollTimeoutSeconds: 30
#provisioning: # provisioning:
# # (Optional) Provisioning API for integration managers # # (Optional) Provisioning API for integration managers
# secret: "!secretToken" # secret: "!secretToken"
#bot: # bot:
# # (Optional) Define profile information for the bot user # # (Optional) Define profile information for the bot user
# displayname: Hookshot Bot # displayname: Hookshot Bot
# avatar: mxc://half-shot.uk/2876e89ccade4cb615e210c458e2a7a6883fe17d # avatar: mxc://half-shot.uk/2876e89ccade4cb615e210c458e2a7a6883fe17d
#serviceBots: # serviceBots:
# # (Optional) Define additional bot users for specific services # # (Optional) Define additional bot users for specific services
# - localpart: feeds # - localpart: feeds
# displayname: Feeds # displayname: Feeds
@@ -161,21 +174,21 @@ hookshot:
# prefix: "!feeds" # prefix: "!feeds"
# service: feeds # service: feeds
#metrics: # metrics:
# # (Optional) Prometheus metrics support # # (Optional) Prometheus metrics support
# enabled: true # enabled: true
#cache: # cache:
# # (Optional) Cache options for large scale deployments. # # (Optional) Cache options for large scale deployments.
# # For encryption to work, this must be configured. # # For encryption to work, this must be configured.
# redisUri: redis://localhost:6379 # redisUri: redis://localhost:6379
#queue: # queue:
# # (Optional) Message queue configuration options for large scale deployments. # # (Optional) Message queue configuration options for large scale deployments.
# # For encryption to work, this must not be configured. # # For encryption to work, this must not be configured.
# redisUri: redis://localhost:6379 # redisUri: redis://localhost:6379
#widgets: # widgets:
# # (Optional) EXPERIMENTAL support for complimentary widgets # # (Optional) EXPERIMENTAL support for complimentary widgets
# addToAdminRooms: false # addToAdminRooms: false
# disallowedIpRanges: # disallowedIpRanges:
@@ -204,12 +217,12 @@ hookshot:
# branding: # branding:
# widgetTitle: Hookshot Configuration # widgetTitle: Hookshot Configuration
#sentry: # sentry:
# # (Optional) Configure Sentry error reporting # # (Optional) Configure Sentry error reporting
# dsn: https://examplePublicKey@o0.ingest.sentry.io/0 # dsn: https://examplePublicKey@o0.ingest.sentry.io/0
# environment: production # environment: production
#permissions: # permissions:
# # (Optional) Permissions for using the bridge. See docs/setup.md#permissions for help # # (Optional) Permissions for using the bridge. See docs/setup.md#permissions for help
# - actor: example.com # - actor: example.com
# services: # services:

View File

@@ -0,0 +1,15 @@
apiVersion: v2
name: mautrix-discord
version: 0.0.2
description: Chart for Matrix Discord Bridge
keywords:
- matrix
- mautrix-discord
- bridge
- discord
sources:
- https://github.com/mautrix/discord
maintainers:
- name: alexlebens
icon: https://avatars.githubusercontent.com/u/88519669?s=48&v=4
appVersion: v0.6.5

View File

@@ -0,0 +1,41 @@
{{/*
Helper for secret name
*/}}
{{- define "mautrix-discord.secretName" -}}
{{- if .Values.mautrixDiscord.existingSecret }}
{{- printf "%s" .Values.mautrixDiscord.existingSecret -}}
{{- else }}
{{- printf "mautrix-discord-config-secret" | trunc 63 | trimSuffix "-" }}
{{- end }}
{{- end }}
{{/*
Helper for registration secret name
*/}}
{{- define "mautrix-discord.registrationSecretName" -}}
{{- if .Values.mautrixDiscord.existingRegistrationSecret }}
{{- printf "%s" .Values.mautrixDiscord.existingRegistrationSecret -}}
{{- else }}
{{- printf "mautrix-discord-registration-secret" }}
{{- end }}
{{- end }}
{{/*
Generate registration.yaml if not from existing secret
*/}}
{{- define "mautrix-discord.registration-yaml" -}}
id: {{ .Values.mautrixDiscord.config.appservice.id | quote }}
as_token: {{ .Values.mautrixDiscord.config.appservice.as_token | quote }}
hs_token: {{ .Values.mautrixDiscord.config.appservice.hs_token | quote }}
namespaces:
users:
- regex: {{ printf "^@discordbot:%s$" (replace "." "\\." .Values.mautrixDiscord.config.homeserver.domain) }}
exclusive: true
- regex: {{ printf "^@%s:%s$" (replace "{{.}}" ".*" (tpl .Values.mautrixDiscord.config.bridge.username_template .)) (replace "." "\\." .Values.mautrixDiscord.config.homeserver.domain) }}
exclusive: true
url: {{ .Values.mautrixDiscord.config.appservice.address | quote }}
sender_localpart: {{ .Values.mautrixDiscord.registration.sender_localpart | quote }}
rate_limited: {{ .Values.mautrixDiscord.registration.rate_limited }}
de.sorunome.msc2409.push_ephemeral: true
push_ephemeral: true
{{- end -}}

View File

@@ -0,0 +1,96 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: mautrix-discord
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: mautrix-discord
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/version: {{ .Chart.AppVersion }}
app.kubernetes.io/component: web
app.kubernetes.io/part-of: {{ .Release.Name }}
spec:
revisionHistoryLimit: 3
replicas: {{ .Values.deployment.replicas }}
strategy:
type: {{ .Values.deployment.strategy }}
selector:
matchLabels:
app.kubernetes.io/name: mautrix-discord
app.kubernetes.io/instance: {{ .Release.Name }}
template:
metadata:
labels:
app.kubernetes.io/name: mautrix-discord
app.kubernetes.io/instance: {{ .Release.Name }}
spec:
serviceAccountName: mautrix-discord
automountServiceAccountToken: true
containers:
- name: mautrix-discord
image: "{{ .Values.deployment.image.repository }}:{{ .Values.deployment.image.tag }}"
imagePullPolicy: {{ .Values.deployment.image.imagePullPolicy }}
ports:
- name: http
containerPort: {{ .Values.service.port }}
protocol: TCP
env:
{{- range $k,$v := .Values.deployment.env }}
- name: {{ $k }}
value: {{ $v | quote }}
{{- end }}
{{- with .Values.deployment.envFrom }}
envFrom:
{{- toYaml . | nindent 12 }}
{{- end }}
livenessProbe:
httpGet:
path: /_matrix/mau/live
port: http
failureThreshold: {{ .Values.deployment.probes.liveness.failureThreshold }}
periodSeconds: {{ .Values.deployment.probes.liveness.periodSeconds }}
readinessProbe:
httpGet:
path: /_matrix/mau/ready
port: http
failureThreshold: {{ .Values.deployment.probes.readiness.failureThreshold }}
periodSeconds: {{ .Values.deployment.probes.readiness.periodSeconds }}
resources:
{{- toYaml .Values.deployment.resources | nindent 12 }}
volumeMounts:
- name: config
mountPath: /data/config.yaml
subPath: config.yaml
readOnly: true
- name: registration
mountPath: /data/registration.yaml
subPath: registration.yaml
readOnly: true
- name: data
mountPath: /data
volumes:
- name: config
secret:
secretName: {{ template "mautrix-discord.secretName" . }}
- name: registration
secret:
secretName: {{ template "mautrix-discord.registrationSecretName" . }}
- name: data
{{- if .Values.persistence.enabled }}
persistentVolumeClaim:
claimName: {{ .Values.persistence.existingClaim | default "mautrix-discord-data" }}
{{- else }}
emptyDir: {}
{{- end }}
{{- with .Values.deployment.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.deployment.affinity }}
affinity:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.deployment.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}

View File

@@ -0,0 +1,32 @@
{{- if .Values.ingress.enabled }}
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: mautrix-discord
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: mautrix-discord
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/version: {{ .Chart.AppVersion }}
app.kubernetes.io/component: web
app.kubernetes.io/part-of: {{ .Release.Name }}
annotations:
{{- toYaml .Values.ingress.annotations | nindent 4 }}
spec:
ingressClassName: {{ .Values.ingress.className }}
tls:
- hosts:
- {{ .Values.ingress.host }}
secretName: {{ .Release.Name }}-secret-tls
rules:
- host: {{ .Values.ingress.host }}
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: mautrix-discord
port:
name: http
{{- end }}

View File

@@ -0,0 +1,26 @@
{{- if and .Values.persistence.enabled (not .Values.persistence.existingClaim) }}
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: mautrix-discord-data
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: mautrix-discord
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/version: {{ .Chart.AppVersion }}
app.kubernetes.io/component: web
app.kubernetes.io/part-of: {{ .Release.Name }}
spec:
accessModes:
- {{ .Values.persistence.accessMode | quote }}
resources:
requests:
storage: {{ .Values.persistence.size | quote }}
{{- if .Values.persistence.storageClass }}
{{- if not .Values.persistence.storageClass }}
storageClassName: ""
{{- else }}
storageClassName: {{ .Values.persistence.storageClass | quote }}
{{- end }}
{{- end }}
{{- end }}

View File

@@ -0,0 +1,33 @@
{{- if not .Values.mautrixDiscord.existingSecret }}
apiVersion: v1
kind: Secret
metadata:
name: mautrix-discord-config-secret
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: mautrix-discord-config
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/version: {{ .Chart.AppVersion }}
app.kubernetes.io/component: web
app.kubernetes.io/part-of: {{ .Release.Name }}
data:
config.yaml: |
{{ toYaml .Values.mautrixDiscord.config | indent 4 }}
{{- end }}
---
{{- if not .Values.mautrixDiscord.existingRegistrationSecret }}
apiVersion: v1
kind: Secret
metadata:
name: mautrix-discord-registration-secret
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: mautrix-discord-registration
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/version: {{ .Chart.AppVersion }}
app.kubernetes.io/component: web
app.kubernetes.io/part-of: {{ .Release.Name }}
data:
registration.yaml: {{ include "mautrix-discord.registration-yaml" . | b64enc | quote }}
{{- end }}

View File

@@ -0,0 +1,17 @@
{{- if .Values.serviceAccount.create -}}
apiVersion: v1
kind: ServiceAccount
metadata:
name: mautrix-discord
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: mautrix-discord
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/version: {{ .Chart.AppVersion }}
app.kubernetes.io/component: web
app.kubernetes.io/part-of: {{ .Release.Name }}
{{- with .Values.serviceAccount.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
{{- end -}}

View File

@@ -0,0 +1,24 @@
apiVersion: v1
kind: Service
metadata:
name: mautrix-discord
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: mautrix-discord
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/version: {{ .Chart.AppVersion }}
app.kubernetes.io/component: web
app.kubernetes.io/part-of: {{ .Release.Name }}
spec:
type: {{ .Values.service.type }}
clusterIP: {{ .Values.service.clusterIP | quote }}
externalTrafficPolicy: {{ .Values.service.externalTrafficPolicy }}
publishNotReadyAddresses: {{ .Values.service.publishNotReadyAddresses }}
ports:
- port: {{ .Values.service.port }}
targetPort: http
protocol: TCP
name: http
selector:
app.kubernetes.io/name: mautrix-discord
app.kubernetes.io/instance: {{ .Release.Name }}

View File

@@ -0,0 +1,427 @@
deployment:
replicas: 1
strategy: Recreate
image:
repository: dock.mau.dev/mautrix/discord
tag: v0.6.5
imagePullPolicy: IfNotPresent
env: {}
envFrom: []
resources:
limits:
memory: 128Mi
cpu: 200m
requests:
memory: 64Mi
cpu: 50m
probes:
liveness:
failureThreshold: 5
periodSeconds: 10
readiness:
failureThreshold: 5
periodSeconds: 10
nodeSelector: {}
tolerations: []
affinity: {}
serviceAccount:
create: true
annotations: {}
service:
type: ClusterIP
clusterIP: None
port: 29334
externalTrafficPolicy: ""
publishNotReadyAddresses: true
ingress:
enabled: false
className: ""
annotations: {}
host: ""
persistence:
enabled: false
existingClaim: ""
storageClass: ""
accessMode: ReadWriteOnce
size: 500Mi
# Reference the following for examples
# https://github.com/mautrix/discord/blob/main/example-config.yaml
mautrixDiscord:
# config.yml contents
existingSecret: ""
config:
# Homeserver details.
homeserver:
# The address that this appservice can use to connect to the homeserver.
address: https://matrix.example.com
# Publicly accessible base URL for media, used for avatars in relay mode.
# If not set, the connection address above will be used.
public_address: null
# The domain of the homeserver (also known as server_name, used for MXIDs, etc).
domain: example.com
# What software is the homeserver running?
# Standard Matrix homeservers like Synapse, Dendrite and Conduit should just use "standard" here.
software: standard
# The URL to push real-time bridge status to.
# If set, the bridge will make POST requests to this URL whenever a user's discord connection state changes.
# The bridge will use the appservice as_token to authorize requests.
status_endpoint: null
# Endpoint for reporting per-message status.
message_send_checkpoint_endpoint: null
# Does the homeserver support https://github.com/matrix-org/matrix-spec-proposals/pull/2246?
async_media: false
# Should the bridge use a websocket for connecting to the homeserver?
# The server side is currently not documented anywhere and is only implemented by mautrix-wsproxy,
# mautrix-asmux (deprecated), and hungryserv (proprietary).
websocket: false
# How often should the websocket be pinged? Pinging will be disabled if this is zero.
ping_interval_seconds: 0
# Application service host/registration related details.
# Changing these values requires regeneration of the registration.
appservice:
# The address that the homeserver can use to connect to this appservice.
address: http://localhost:29334
# The hostname and port where this appservice should listen.
hostname: 0.0.0.0
port: 29334
# Database config.
database:
# The database type. "sqlite3-fk-wal" and "postgres" are supported.
type: postgres
# The database URI.
# SQLite: A raw file path is supported, but `file:<path>?_txlock=immediate` is recommended.
# https://github.com/mattn/go-sqlite3#connection-string
# Postgres: Connection string. For example, postgres://user:password@host/database?sslmode=disable
# To connect via Unix socket, use something like postgres:///dbname?host=/var/run/postgresql
uri: postgres://user:password@host/database?sslmode=disable
# Maximum number of connections. Mostly relevant for Postgres.
max_open_conns: 20
max_idle_conns: 2
# Maximum connection idle time and lifetime before they're closed. Disabled if null.
# Parsed with https://pkg.go.dev/time#ParseDuration
max_conn_idle_time: null
max_conn_lifetime: null
# The unique ID of this appservice.
id: discord
# Appservice bot details.
bot:
# Username of the appservice bot.
username: discordbot
# Display name and avatar for bot. Set to "remove" to remove display name/avatar, leave empty
# to leave display name/avatar as-is.
displayname: Discord bridge bot
avatar: mxc://maunium.net/nIdEykemnwdisvHbpxflpDlC
# Whether or not to receive ephemeral events via appservice transactions.
# Requires MSC2409 support (i.e. Synapse 1.22+).
ephemeral_events: true
# Should incoming events be handled asynchronously?
# This may be necessary for large public instances with lots of messages going through.
# However, messages will not be guaranteed to be bridged in the same order they were sent in.
async_transactions: false
# Authentication tokens for AS <-> HS communication. Autogenerated; do not modify.
as_token: "This value is generated when generating the registration"
hs_token: "This value is generated when generating the registration"
# Bridge config
bridge:
# Localpart template of MXIDs for Discord users.
# {{.}} is replaced with the internal ID of the Discord user.
username_template: discord_{{.}}
# Displayname template for Discord users. This is also used as the room name in DMs if private_chat_portal_meta is enabled.
# Available variables:
# .ID - Internal user ID
# .Username - Legacy display/username on Discord
# .GlobalName - New displayname on Discord
# .Discriminator - The 4 numbers after the name on Discord
# .Bot - Whether the user is a bot
# .System - Whether the user is an official system user
# .Webhook - Whether the user is a webhook and is not an application
# .Application - Whether the user is an application
displayname_template: '{{or .GlobalName .Username}}{{if .Bot}} (bot){{end}}'
# Displayname template for Discord channels (bridged as rooms, or spaces when type=4).
# Available variables:
# .Name - Channel name, or user displayname (pre-formatted with displayname_template) in DMs.
# .ParentName - Parent channel name (used for categories).
# .GuildName - Guild name.
# .NSFW - Whether the channel is marked as NSFW.
# .Type - Channel type (see values at https://github.com/bwmarrin/discordgo/blob/v0.25.0/structs.go#L251-L267)
channel_name_template: '{{if or (eq .Type 3) (eq .Type 4)}}{{.Name}}{{else}}#{{.Name}}{{end}}'
# Displayname template for Discord guilds (bridged as spaces).
# Available variables:
# .Name - Guild name
guild_name_template: '{{.Name}}'
# Whether to explicitly set the avatar and room name for private chat portal rooms.
# If set to `default`, this will be enabled in encrypted rooms and disabled in unencrypted rooms.
# If set to `always`, all DM rooms will have explicit names and avatars set.
# If set to `never`, DM rooms will never have names and avatars set.
private_chat_portal_meta: default
portal_message_buffer: 128
# Number of private channel portals to create on bridge startup.
# Other portals will be created when receiving messages.
startup_private_channel_create_limit: 5
# Should the bridge send a read receipt from the bridge bot when a message has been sent to Discord?
delivery_receipts: false
# Whether the bridge should send the message status as a custom com.beeper.message_send_status event.
message_status_events: false
# Whether the bridge should send error notices via m.notice events when a message fails to bridge.
message_error_notices: true
# Should the bridge use space-restricted join rules instead of invite-only for guild rooms?
# This can avoid unnecessary invite events in guild rooms when members are synced in.
restricted_rooms: true
# Should the bridge automatically join the user to threads on Discord when the thread is opened on Matrix?
# This only works with clients that support thread read receipts (MSC3771 added in Matrix v1.4).
autojoin_thread_on_open: true
# Should inline fields in Discord embeds be bridged as HTML tables to Matrix?
# Tables aren't supported in all clients, but are the only way to emulate the Discord inline field UI.
embed_fields_as_tables: true
# Should guild channels be muted when the portal is created? This only meant for single-user instances,
# it won't mute it for all users if there are multiple Matrix users in the same Discord guild.
mute_channels_on_create: false
# Should the bridge update the m.direct account data event when double puppeting is enabled.
# Note that updating the m.direct event is not atomic (except with mautrix-asmux)
# and is therefore prone to race conditions.
sync_direct_chat_list: false
# Set this to true to tell the bridge to re-send m.bridge events to all rooms on the next run.
# This field will automatically be changed back to false after it, except if the config file is not writable.
resend_bridge_info: false
# Should incoming custom emoji reactions be bridged as mxc:// URIs?
# If set to false, custom emoji reactions will be bridged as the shortcode instead, and the image won't be available.
custom_emoji_reactions: true
# Should the bridge attempt to completely delete portal rooms when a channel is deleted on Discord?
# If true, the bridge will try to kick Matrix users from the room. Otherwise, the bridge only makes ghosts leave.
delete_portal_on_channel_delete: false
# Should the bridge delete all portal rooms when you leave a guild on Discord?
# This only applies if the guild has no other Matrix users on this bridge instance.
delete_guild_on_leave: true
# Whether or not created rooms should have federation enabled.
# If false, created portal rooms will never be federated.
federate_rooms: true
# Prefix messages from webhooks with the profile info? This can be used along with a custom displayname_template
# to better handle webhooks that change their name all the time (like ones used by bridges).
prefix_webhook_messages: false
# Bridge webhook avatars?
enable_webhook_avatars: true
# Should the bridge upload media to the Discord CDN directly before sending the message when using a user token,
# like the official client does? The other option is sending the media in the message send request as a form part
# (which is always used by bots and webhooks).
use_discord_cdn_upload: true
# Should mxc uris copied from Discord be cached?
# This can be `never` to never cache, `unencrypted` to only cache unencrypted mxc uris, or `always` to cache everything.
# If you have a media repo that generates non-unique mxc uris, you should set this to never.
cache_media: unencrypted
# Settings for converting Discord media to custom mxc:// URIs instead of reuploading.
# More details can be found at https://docs.mau.fi/bridges/go/discord/direct-media.html
direct_media:
# Should custom mxc:// URIs be used instead of reuploading media?
enabled: false
# The server name to use for the custom mxc:// URIs.
# This server name will effectively be a real Matrix server, it just won't implement anything other than media.
# You must either set up .well-known delegation from this domain to the bridge, or proxy the domain directly to the bridge.
server_name: discord-media.example.com
# Optionally a custom .well-known response. This defaults to `server_name:443`
well_known_response:
# The bridge supports MSC3860 media download redirects and will use them if the requester supports it.
# Optionally, you can force redirects and not allow proxying at all by setting this to false.
allow_proxy: true
# Matrix server signing key to make the federation tester pass, same format as synapse's .signing.key file.
# This key is also used to sign the mxc:// URIs to ensure only the bridge can generate them.
server_key: generate
# Settings for converting animated stickers.
animated_sticker:
# Format to which animated stickers should be converted.
# disable - No conversion, send as-is (lottie JSON)
# png - converts to non-animated png (fastest)
# gif - converts to animated gif
# webm - converts to webm video, requires ffmpeg executable with vp9 codec and webm container support
# webp - converts to animated webp, requires ffmpeg executable with webp codec/container support
target: webp
# Arguments for converter. All converters take width and height.
args:
width: 320
height: 320
fps: 25 # only for webm, webp and gif (2, 5, 10, 20 or 25 recommended)
# Servers to always allow double puppeting from
double_puppet_server_map:
example.com: https://example.com
# Allow using double puppeting from any server with a valid client .well-known file.
double_puppet_allow_discovery: false
# Shared secrets for https://github.com/devture/matrix-synapse-shared-secret-auth
#
# If set, double puppeting will be enabled automatically for local users
# instead of users having to find an access token and run `login-matrix`
# manually.
login_shared_secret_map:
example.com: foobar
# The prefix for commands. Only required in non-management rooms.
command_prefix: '!discord'
# Messages sent upon joining a management room.
# Markdown is supported. The defaults are listed below.
management_room_text:
# Sent when joining a room.
welcome: "Hello, I'm a Discord bridge bot."
# Sent when joining a management room and the user is already logged in.
welcome_connected: "Use `help` for help."
# Sent when joining a management room and the user is not logged in.
welcome_unconnected: "Use `help` for help or `login` to log in."
# Optional extra text sent when joining a management room.
additional_help: ""
# Settings for backfilling messages.
backfill:
# Limits for forward backfilling.
forward_limits:
# Initial backfill (when creating portal). 0 means backfill is disabled.
# A special unlimited value is not supported, you must set a limit. Initial backfill will
# fetch all messages first before backfilling anything, so high limits can take a lot of time.
initial:
dm: 0
channel: 0
thread: 0
# Missed message backfill (on startup).
# 0 means backfill is disabled, -1 means fetch all messages since last bridged message.
# When using unlimited backfill (-1), messages are backfilled as they are fetched.
# With limits, all messages up to the limit are fetched first and backfilled afterwards.
missed:
dm: 0
channel: 0
thread: 0
# Maximum members in a guild to enable backfilling. Set to -1 to disable limit.
# This can be used as a rough heuristic to disable backfilling in channels that are too active.
# Currently only applies to missed message backfill.
max_guild_members: -1
# End-to-bridge encryption support options.
#
# See https://docs.mau.fi/bridges/general/end-to-bridge-encryption.html for more info.
encryption:
# Allow encryption, work in group chat rooms with e2ee enabled
allow: false
# Default to encryption, force-enable encryption in all portals the bridge creates
# This will cause the bridge bot to be in private chats for the encryption to work properly.
default: false
# Whether to use MSC2409/MSC3202 instead of /sync long polling for receiving encryption-related data.
appservice: false
# Require encryption, drop any unencrypted messages.
require: false
# Enable key sharing? If enabled, key requests for rooms where users are in will be fulfilled.
# You must use a client that supports requesting keys from other users to use this feature.
allow_key_sharing: false
# Should users mentions be in the event wire content to enable the server to send push notifications?
plaintext_mentions: false
# Options for deleting megolm sessions from the bridge.
delete_keys:
# Beeper-specific: delete outbound sessions when hungryserv confirms
# that the user has uploaded the key to key backup.
delete_outbound_on_ack: false
# Don't store outbound sessions in the inbound table.
dont_store_outbound: false
# Ratchet megolm sessions forward after decrypting messages.
ratchet_on_decrypt: false
# Delete fully used keys (index >= max_messages) after decrypting messages.
delete_fully_used_on_decrypt: false
# Delete previous megolm sessions from same device when receiving a new one.
delete_prev_on_new_session: false
# Delete megolm sessions received from a device when the device is deleted.
delete_on_device_delete: false
# Periodically delete megolm sessions when 2x max_age has passed since receiving the session.
periodically_delete_expired: false
# Delete inbound megolm sessions that don't have the received_at field used for
# automatic ratcheting and expired session deletion. This is meant as a migration
# to delete old keys prior to the bridge update.
delete_outdated_inbound: false
# What level of device verification should be required from users?
#
# Valid levels:
# unverified - Send keys to all device in the room.
# cross-signed-untrusted - Require valid cross-signing, but trust all cross-signing keys.
# cross-signed-tofu - Require valid cross-signing, trust cross-signing keys on first use (and reject changes).
# cross-signed-verified - Require valid cross-signing, plus a valid user signature from the bridge bot.
# Note that creating user signatures from the bridge bot is not currently possible.
# verified - Require manual per-device verification
# (currently only possible by modifying the `trust` column in the `crypto_device` database table).
verification_levels:
# Minimum level for which the bridge should send keys to when bridging messages from WhatsApp to Matrix.
receive: unverified
# Minimum level that the bridge should accept for incoming Matrix messages.
send: unverified
# Minimum level that the bridge should require for accepting key requests.
share: cross-signed-tofu
# Options for Megolm room key rotation. These options allow you to
# configure the m.room.encryption event content. See:
# https://spec.matrix.org/v1.3/client-server-api/#mroomencryption for
# more information about that event.
rotation:
# Enable custom Megolm room key rotation settings. Note that these
# settings will only apply to rooms created after this option is
# set.
enable_custom: false
# The maximum number of milliseconds a session should be used
# before changing it. The Matrix spec recommends 604800000 (a week)
# as the default.
milliseconds: 604800000
# The maximum number of messages that should be sent with a given a
# session before changing it. The Matrix spec recommends 100 as the
# default.
messages: 100
# Disable rotating keys when a user's devices change?
# You should not enable this option unless you understand all the implications.
disable_device_change_key_rotation: false
# Settings for provisioning API
provisioning:
# Prefix for the provisioning API paths.
prefix: /_matrix/provision
# Shared secret for authentication. If set to "generate", a random secret will be generated,
# or if set to "disable", the provisioning API will be disabled.
shared_secret: generate
# Enable debug API at /debug with provisioning authentication.
debug_endpoints: false
# Permissions for using the bridge.
# Permitted values:
# relay - Talk through the relaybot (if enabled), no access otherwise
# user - Access to use the bridge to chat with a Discord account.
# admin - User level and some additional administration tools
# Permitted keys:
# * - All Matrix users
# domain - All users on that homeserver
# mxid - Specific user
permissions:
"*": relay
"example.com": user
"@admin:example.com": admin
# Logging config. See https://github.com/tulir/zeroconfig for details.
logging:
min_level: debug
writers:
- type: stdout
format: pretty-colored
- type: file
format: json
filename: ./logs/mautrix-discord.log
max_size: 100
max_backups: 10
compress: true
# registration.yml contents
existingRegistrationSecret: ""
registration:
rate_limited: false
sender_localpart: discordbridgebot

View File

@@ -0,0 +1,15 @@
apiVersion: v2
name: mautrix-whatsapp
version: 0.0.3
description: Chart for Matrix Whatsapp Bridge
keywords:
- matrix
- mautrix-whatsapp
- bridge
- whatsapp
sources:
- https://github.com/mautrix/whatsapp
maintainers:
- name: alexlebens
icon: https://avatars.githubusercontent.com/u/88519669?s=48&v=4
appVersion: v0.10.7

View File

@@ -0,0 +1,41 @@
{{/*
Helper for secret name
*/}}
{{- define "mautrix-whatsapp.secretName" -}}
{{- if .Values.mautrixWhatsapp.existingSecret }}
{{- printf "%s" .Values.mautrixWhatsapp.existingSecret -}}
{{- else }}
{{- printf "mautrix-whatsapp-config-secret" | trunc 63 | trimSuffix "-" }}
{{- end }}
{{- end }}
{{/*
Helper for registration secret name
*/}}
{{- define "mautrix-whatsapp.registrationSecretName" -}}
{{- if .Values.mautrixWhatsapp.existingRegistrationSecret }}
{{- printf "%s" .Values.mautrixWhatsapp.existingRegistrationSecret -}}
{{- else }}
{{- printf "mautrix-whatsapp-registration-secret" }}
{{- end }}
{{- end }}
{{/*
Generate registration.yaml if not from existing secret
*/}}
{{- define "mautrix-whatsapp.registration-yaml" -}}
id: {{ .Values.mautrixWhatsapp.config.appservice.id | quote }}
as_token: {{ .Values.mautrixWhatsapp.config.appservice.as_token | quote }}
hs_token: {{ .Values.mautrixWhatsapp.config.appservice.hs_token | quote }}
namespaces:
users:
- regex: {{ printf "^@whatsappbot:%s$" (replace "." "\\." .Values.mautrixWhatsapp.config.homeserver.domain) }}
exclusive: true
- regex: {{ printf "^@%s:%s$" (replace "{{.}}" ".*" (tpl .Values.mautrixWhatsapp.config.bridge.username_template .)) (replace "." "\\." .Values.mautrixWhatsapp.config.homeserver.domain) }}
exclusive: true
url: {{ .Values.mautrixWhatsapp.config.appservice.address | quote }}
sender_localpart: {{ .Values.mautrixWhatsapp.registration.sender_localpart | quote }}
rate_limited: {{ .Values.mautrixWhatsapp.registration.rate_limited }}
de.sorunome.msc2409.push_ephemeral: true
push_ephemeral: true
{{- end -}}

View File

@@ -0,0 +1,96 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: mautrix-whatsapp
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: mautrix-whatsapp
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/version: {{ .Chart.AppVersion }}
app.kubernetes.io/component: web
app.kubernetes.io/part-of: {{ .Release.Name }}
spec:
revisionHistoryLimit: 3
replicas: {{ .Values.deployment.replicas }}
strategy:
type: {{ .Values.deployment.strategy }}
selector:
matchLabels:
app.kubernetes.io/name: mautrix-whatsapp
app.kubernetes.io/instance: {{ .Release.Name }}
template:
metadata:
labels:
app.kubernetes.io/name: mautrix-whatsapp
app.kubernetes.io/instance: {{ .Release.Name }}
spec:
serviceAccountName: mautrix-whatsapp
automountServiceAccountToken: true
containers:
- name: mautrix-whatsapp
image: "{{ .Values.deployment.image.repository }}:{{ .Values.deployment.image.tag }}"
imagePullPolicy: {{ .Values.deployment.image.imagePullPolicy }}
ports:
- name: http
containerPort: {{ .Values.service.port }}
protocol: TCP
env:
{{- range $k,$v := .Values.deployment.env }}
- name: {{ $k }}
value: {{ $v | quote }}
{{- end }}
{{- with .Values.deployment.envFrom }}
envFrom:
{{- toYaml . | nindent 12 }}
{{- end }}
livenessProbe:
httpGet:
path: /_matrix/mau/live
port: http
failureThreshold: {{ .Values.deployment.probes.liveness.failureThreshold }}
periodSeconds: {{ .Values.deployment.probes.liveness.periodSeconds }}
readinessProbe:
httpGet:
path: /_matrix/mau/ready
port: http
failureThreshold: {{ .Values.deployment.probes.readiness.failureThreshold }}
periodSeconds: {{ .Values.deployment.probes.readiness.periodSeconds }}
resources:
{{- toYaml .Values.deployment.resources | nindent 12 }}
volumeMounts:
- name: config
mountPath: /data/config.yaml
subPath: config.yaml
readOnly: true
- name: registration
mountPath: /data/registration.yaml
subPath: registration.yaml
readOnly: true
- name: data
mountPath: /data
volumes:
- name: config
secret:
secretName: {{ template "mautrix-whatsapp.secretName" . }}
- name: registration
secret:
secretName: {{ template "mautrix-whatsapp.registrationSecretName" . }}
- name: data
{{- if .Values.persistence.enabled }}
persistentVolumeClaim:
claimName: {{ .Values.persistence.existingClaim | default "mautrix-whatsapp-data" }}
{{- else }}
emptyDir: {}
{{- end }}
{{- with .Values.deployment.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.deployment.affinity }}
affinity:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.deployment.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}

View File

@@ -0,0 +1,32 @@
{{- if .Values.ingress.enabled }}
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: mautrix-whatsapp
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: mautrix-whatsapp
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/version: {{ .Chart.AppVersion }}
app.kubernetes.io/component: web
app.kubernetes.io/part-of: {{ .Release.Name }}
annotations:
{{- toYaml .Values.ingress.annotations | nindent 4 }}
spec:
ingressClassName: {{ .Values.ingress.className }}
tls:
- hosts:
- {{ .Values.ingress.host }}
secretName: {{ .Release.Name }}-secret-tls
rules:
- host: {{ .Values.ingress.host }}
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: mautrix-whatsapp
port:
name: http
{{- end }}

View File

@@ -0,0 +1,26 @@
{{- if and .Values.persistence.enabled (not .Values.persistence.existingClaim) }}
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: mautrix-whatsapp-data
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: mautrix-whatsapp
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/version: {{ .Chart.AppVersion }}
app.kubernetes.io/component: web
app.kubernetes.io/part-of: {{ .Release.Name }}
spec:
accessModes:
- {{ .Values.persistence.accessMode | quote }}
resources:
requests:
storage: {{ .Values.persistence.size | quote }}
{{- if .Values.persistence.storageClass }}
{{- if not .Values.persistence.storageClass }}
storageClassName: ""
{{- else }}
storageClassName: {{ .Values.persistence.storageClass | quote }}
{{- end }}
{{- end }}
{{- end }}

View File

@@ -0,0 +1,33 @@
{{- if not .Values.mautrixWhatsapp.existingSecret }}
apiVersion: v1
kind: Secret
metadata:
name: mautrix-whatsapp-config-secret
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: mautrix-whatsapp-config
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/version: {{ .Chart.AppVersion }}
app.kubernetes.io/component: web
app.kubernetes.io/part-of: {{ .Release.Name }}
data:
config.yaml: |
{{ toYaml .Values.mautrixWhatsapp.config | indent 4 }}
{{- end }}
---
{{- if not .Values.mautrixWhatsapp.existingRegistrationSecret }}
apiVersion: v1
kind: Secret
metadata:
name: mautrix-whatsapp-registration-secret
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: mautrix-whatsapp-registration
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/version: {{ .Chart.AppVersion }}
app.kubernetes.io/component: web
app.kubernetes.io/part-of: {{ .Release.Name }}
data:
registration.yaml: {{ include "mautrix-whatsapp.registration-yaml" . | b64enc | quote }}
{{- end }}

View File

@@ -0,0 +1,17 @@
{{- if .Values.serviceAccount.create -}}
apiVersion: v1
kind: ServiceAccount
metadata:
name: mautrix-whatsapp
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: mautrix-whatsapp
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/version: {{ .Chart.AppVersion }}
app.kubernetes.io/component: web
app.kubernetes.io/part-of: {{ .Release.Name }}
{{- with .Values.serviceAccount.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
{{- end -}}

View File

@@ -0,0 +1,24 @@
apiVersion: v1
kind: Service
metadata:
name: mautrix-whatsapp
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: mautrix-whatsapp
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/version: {{ .Chart.AppVersion }}
app.kubernetes.io/component: web
app.kubernetes.io/part-of: {{ .Release.Name }}
spec:
type: {{ .Values.service.type }}
clusterIP: {{ .Values.service.clusterIP | quote }}
externalTrafficPolicy: {{ .Values.service.externalTrafficPolicy }}
publishNotReadyAddresses: {{ .Values.service.publishNotReadyAddresses }}
ports:
- port: {{ .Values.service.port }}
targetPort: http
protocol: TCP
name: http
selector:
app.kubernetes.io/name: mautrix-whatsapp
app.kubernetes.io/instance: {{ .Release.Name }}

View File

@@ -0,0 +1,532 @@
deployment:
replicas: 1
strategy: Recreate
image:
repository: dock.mau.dev/mautrix/whatsapp
tag: v0.10.7
imagePullPolicy: IfNotPresent
env: {}
envFrom: []
resources:
limits:
memory: 128Mi
cpu: 200m
requests:
memory: 64Mi
cpu: 50m
probes:
liveness:
failureThreshold: 5
periodSeconds: 10
readiness:
failureThreshold: 5
periodSeconds: 10
nodeSelector: {}
tolerations: []
affinity: {}
serviceAccount:
create: true
annotations: {}
service:
type: ClusterIP
clusterIP: None
port: 29334
externalTrafficPolicy: ""
publishNotReadyAddresses: true
ingress:
enabled: false
className: ""
annotations: {}
host: ""
persistence:
enabled: false
existingClaim: ""
storageClass: ""
accessMode: ReadWriteOnce
size: 500Mi
# Reference the following for examples
# https://github.com/mautrix/whatsapp/blob/main/example-config.yaml
mautrixWhatsapp:
# config.yml contents
existingSecret: ""
config:
# Homeserver details.
homeserver:
# The address that this appservice can use to connect to the homeserver.
address: https://matrix.example.com
# The domain of the homeserver (also known as server_name, used for MXIDs, etc).
domain: example.com
# What software is the homeserver running?
# Standard Matrix homeservers like Synapse, Dendrite and Conduit should just use "standard" here.
software: standard
# The URL to push real-time bridge status to.
# If set, the bridge will make POST requests to this URL whenever a user's whatsapp connection state changes.
# The bridge will use the appservice as_token to authorize requests.
status_endpoint: null
# Endpoint for reporting per-message status.
message_send_checkpoint_endpoint: null
# Does the homeserver support https://github.com/matrix-org/matrix-spec-proposals/pull/2246?
async_media: false
# Should the bridge use a websocket for connecting to the homeserver?
# The server side is currently not documented anywhere and is only implemented by mautrix-wsproxy,
# mautrix-asmux (deprecated), and hungryserv (proprietary).
websocket: false
# How often should the websocket be pinged? Pinging will be disabled if this is zero.
ping_interval_seconds: 0
# Application service host/registration related details.
# Changing these values requires regeneration of the registration.
appservice:
# The address that the homeserver can use to connect to this appservice.
address: http://localhost:29318
# The hostname and port where this appservice should listen.
hostname: 0.0.0.0
port: 29318
# Database config.
database:
# The database type. "sqlite3-fk-wal" and "postgres" are supported.
type: postgres
# The database URI.
# SQLite: A raw file path is supported, but `file:<path>?_txlock=immediate` is recommended.
# https://github.com/mattn/go-sqlite3#connection-string
# Postgres: Connection string. For example, postgres://user:password@host/database?sslmode=disable
# To connect via Unix socket, use something like postgres:///dbname?host=/var/run/postgresql
uri: postgres://user:password@host/database?sslmode=disable
# Maximum number of connections. Mostly relevant for Postgres.
max_open_conns: 20
max_idle_conns: 2
# Maximum connection idle time and lifetime before they're closed. Disabled if null.
# Parsed with https://pkg.go.dev/time#ParseDuration
max_conn_idle_time: null
max_conn_lifetime: null
# The unique ID of this appservice.
id: whatsapp
# Appservice bot details.
bot:
# Username of the appservice bot.
username: whatsappbot
# Display name and avatar for bot. Set to "remove" to remove display name/avatar, leave empty
# to leave display name/avatar as-is.
displayname: WhatsApp bridge bot
avatar: mxc://maunium.net/NeXNQarUbrlYBiPCpprYsRqr
# Whether or not to receive ephemeral events via appservice transactions.
# Requires MSC2409 support (i.e. Synapse 1.22+).
ephemeral_events: true
# Should incoming events be handled asynchronously?
# This may be necessary for large public instances with lots of messages going through.
# However, messages will not be guaranteed to be bridged in the same order they were sent in.
async_transactions: false
# Authentication tokens for AS <-> HS communication. Autogenerated; do not modify.
as_token: "This value is generated when generating the registration"
hs_token: "This value is generated when generating the registration"
# Segment-compatible analytics endpoint for tracking some events, like provisioning API login and encryption errors.
analytics:
# Hostname of the tracking server. The path is hardcoded to /v1/track
host: api.segment.io
# API key to send with tracking requests. Tracking is disabled if this is null.
token: null
# Optional user ID for tracking events. If null, defaults to using Matrix user ID.
user_id: null
# Prometheus config.
metrics:
# Enable prometheus metrics?
enabled: false
# IP and port where the metrics listener should be. The path is always /metrics
listen: 127.0.0.1:8001
# Config for things that are directly sent to WhatsApp.
whatsapp:
# Device name that's shown in the "WhatsApp Web" section in the mobile app.
os_name: Mautrix-WhatsApp bridge
# Browser name that determines the logo shown in the mobile app.
# Must be "unknown" for a generic icon or a valid browser name if you want a specific icon.
# List of valid browser names: https://github.com/tulir/whatsmeow/blob/efc632c008604016ddde63bfcfca8de4e5304da9/binary/proto/def.proto#L43-L64
browser_name: unknown
# Bridge config
bridge:
# Localpart template of MXIDs for WhatsApp users.
# {{.}} is replaced with the phone number of the WhatsApp user.
username_template: whatsapp_{{.}}
# Displayname template for WhatsApp users.
# {{.PushName}} - nickname set by the WhatsApp user
# {{.BusinessName}} - validated WhatsApp business name
# {{.Phone}} - phone number (international format)
# The following variables are also available, but will cause problems on multi-user instances:
# {{.FullName}} - full name from contact list
# {{.FirstName}} - first name from contact list
displayname_template: "{{or .BusinessName .PushName .JID}} (WA)"
# Should the bridge create a space for each logged-in user and add bridged rooms to it?
# Users who logged in before turning this on should run `!wa sync space` to create and fill the space for the first time.
personal_filtering_spaces: false
# Should the bridge send a read receipt from the bridge bot when a message has been sent to WhatsApp?
delivery_receipts: false
# Whether the bridge should send the message status as a custom com.beeper.message_send_status event.
message_status_events: false
# Whether the bridge should send error notices via m.notice events when a message fails to bridge.
message_error_notices: true
# Should incoming calls send a message to the Matrix room?
call_start_notices: true
# Should another user's cryptographic identity changing send a message to Matrix?
identity_change_notices: false
portal_message_buffer: 128
# Settings for handling history sync payloads.
history_sync:
# Enable backfilling history sync payloads from WhatsApp?
backfill: true
# The maximum number of initial conversations that should be synced.
# Other conversations will be backfilled on demand when receiving a message or when initiating a direct chat.
max_initial_conversations: -1
# Maximum number of messages to backfill in each conversation.
# Set to -1 to disable limit.
message_count: 50
# Should the bridge request a full sync from the phone when logging in?
# This bumps the size of history syncs from 3 months to 1 year.
request_full_sync: false
# Configuration parameters that are sent to the phone along with the request full sync flag.
# By default (when the values are null or 0), the config isn't sent at all.
full_sync_config:
# Number of days of history to request.
# The limit seems to be around 3 years, but using higher values doesn't break.
days_limit: null
# This is presumably the maximum size of the transferred history sync blob, which may affect what the phone includes in the blob.
size_mb_limit: null
# This is presumably the local storage quota, which may affect what the phone includes in the history sync blob.
storage_quota_mb: null
# If this value is greater than 0, then if the conversation's last message was more than
# this number of hours ago, then the conversation will automatically be marked it as read.
# Conversations that have a last message that is less than this number of hours ago will
# have their unread status synced from WhatsApp.
unread_hours_threshold: 0
###############################################################################
# The settings below are only applicable for backfilling using batch sending, #
# which is no longer supported in Synapse. #
###############################################################################
# Settings for media requests. If the media expired, then it will not be on the WA servers.
# Media can always be requested by reacting with the ♻️ (recycle) emoji.
# These settings determine if the media requests should be done automatically during or after backfill.
media_requests:
# Should expired media be automatically requested from the server as part of the backfill process?
auto_request_media: true
# Whether to request the media immediately after the media message is backfilled ("immediate")
# or at a specific time of the day ("local_time").
request_method: immediate
# If request_method is "local_time", what time should the requests be sent (in minutes after midnight)?
request_local_time: 120
# Settings for immediate backfills. These backfills should generally be small and their main purpose is
# to populate each of the initial chats (as configured by max_initial_conversations) with a few messages
# so that you can continue conversations without losing context.
immediate:
# The number of concurrent backfill workers to create for immediate backfills.
# Note that using more than one worker could cause the room list to jump around
# since there are no guarantees about the order in which the backfills will complete.
worker_count: 1
# The maximum number of events to backfill initially.
max_events: 10
# Settings for deferred backfills. The purpose of these backfills are to fill in the rest of
# the chat history that was not covered by the immediate backfills.
# These backfills generally should happen at a slower pace so as not to overload the homeserver.
# Each deferred backfill config should define a "stage" of backfill (i.e. the last week of messages).
# The fields are as follows:
# - start_days_ago: the number of days ago to start backfilling from.
# To indicate the start of time, use -1. For example, for a week ago, use 7.
# - max_batch_events: the number of events to send per batch.
# - batch_delay: the number of seconds to wait before backfilling each batch.
deferred:
# Last Week
- start_days_ago: 7
max_batch_events: 20
batch_delay: 5
# Last Month
- start_days_ago: 30
max_batch_events: 50
batch_delay: 10
# Last 3 months
- start_days_ago: 90
max_batch_events: 100
batch_delay: 10
# The start of time
- start_days_ago: -1
max_batch_events: 500
batch_delay: 10
# Should puppet avatars be fetched from the server even if an avatar is already set?
user_avatar_sync: true
# Should Matrix users leaving groups be bridged to WhatsApp?
bridge_matrix_leave: true
# Should the bridge update the m.direct account data event when double puppeting is enabled.
# Note that updating the m.direct event is not atomic (except with mautrix-asmux)
# and is therefore prone to race conditions.
sync_direct_chat_list: false
# Should the bridge use MSC2867 to bridge manual "mark as unread"s from
# WhatsApp and set the unread status on initial backfill?
# This will only work on clients that support the m.marked_unread or
# com.famedly.marked_unread room account data.
sync_manual_marked_unread: true
# When double puppeting is enabled, users can use `!wa toggle` to change whether
# presence is bridged. This setting sets the default value.
# Existing users won't be affected when these are changed.
default_bridge_presence: true
# Send the presence as "available" to whatsapp when users start typing on a portal.
# This works as a workaround for homeservers that do not support presence, and allows
# users to see when the whatsapp user on the other side is typing during a conversation.
send_presence_on_typing: false
# Should the bridge always send "active" delivery receipts (two gray ticks on WhatsApp)
# even if the user isn't marked as online (e.g. when presence bridging isn't enabled)?
#
# By default, the bridge acts like WhatsApp web, which only sends active delivery
# receipts when it's in the foreground.
force_active_delivery_receipts: false
# Servers to always allow double puppeting from
double_puppet_server_map:
example.com: https://example.com
# Allow using double puppeting from any server with a valid client .well-known file.
double_puppet_allow_discovery: false
# Shared secrets for https://github.com/devture/matrix-synapse-shared-secret-auth
#
# If set, double puppeting will be enabled automatically for local users
# instead of users having to find an access token and run `login-matrix`
# manually.
login_shared_secret_map:
example.com: foobar
# Whether to explicitly set the avatar and room name for private chat portal rooms.
# If set to `default`, this will be enabled in encrypted rooms and disabled in unencrypted rooms.
# If set to `always`, all DM rooms will have explicit names and avatars set.
# If set to `never`, DM rooms will never have names and avatars set.
private_chat_portal_meta: default
# Should group members be synced in parallel? This makes member sync faster
parallel_member_sync: false
# Should Matrix m.notice-type messages be bridged?
bridge_notices: true
# Set this to true to tell the bridge to re-send m.bridge events to all rooms on the next run.
# This field will automatically be changed back to false after it, except if the config file is not writable.
resend_bridge_info: false
# When using double puppeting, should muted chats be muted in Matrix?
mute_bridging: false
# When using double puppeting, should archived chats be moved to a specific tag in Matrix?
# Note that WhatsApp unarchives chats when a message is received, which will also be mirrored to Matrix.
# This can be set to a tag (e.g. m.lowpriority), or null to disable.
archive_tag: null
# Same as above, but for pinned chats. The favorite tag is called m.favourite
pinned_tag: null
# Should mute status and tags only be bridged when the portal room is created?
tag_only_on_create: true
# Should WhatsApp status messages be bridged into a Matrix room?
# Disabling this won't affect already created status broadcast rooms.
enable_status_broadcast: true
# Should sending WhatsApp status messages be allowed?
# This can cause issues if the user has lots of contacts, so it's disabled by default.
disable_status_broadcast_send: true
# Should the status broadcast room be muted and moved into low priority by default?
# This is only applied when creating the room, the user can unmute it later.
mute_status_broadcast: true
# Tag to apply to the status broadcast room.
status_broadcast_tag: m.lowpriority
# Should the bridge use thumbnails from WhatsApp?
# They're disabled by default due to very low resolution.
whatsapp_thumbnail: false
# Allow invite permission for user. User can invite any bots to room with whatsapp
# users (private chat and groups)
allow_user_invite: false
# Whether or not created rooms should have federation enabled.
# If false, created portal rooms will never be federated.
federate_rooms: true
# Should the bridge never send alerts to the bridge management room?
# These are mostly things like the user being logged out.
disable_bridge_alerts: false
# Should the bridge stop if the WhatsApp server says another user connected with the same session?
# This is only safe on single-user bridges.
crash_on_stream_replaced: false
# Should the bridge detect URLs in outgoing messages, ask the homeserver to generate a preview,
# and send it to WhatsApp? URL previews can always be sent using the `com.beeper.linkpreviews`
# key in the event content even if this is disabled.
url_previews: false
# Send captions in the same message as images. This will send data compatible with both MSC2530 and MSC3552.
# This is currently not supported in most clients.
caption_in_message: false
# Send galleries as a single event? This is not an MSC (yet).
beeper_galleries: false
# Should polls be sent using MSC3381 event types?
extev_polls: false
# Should cross-chat replies from WhatsApp be bridged? Most servers and clients don't support this.
cross_room_replies: false
# Disable generating reply fallbacks? Some extremely bad clients still rely on them,
# but they're being phased out and will be completely removed in the future.
disable_reply_fallbacks: false
# Maximum time for handling Matrix events. Duration strings formatted for https://pkg.go.dev/time#ParseDuration
# Null means there's no enforced timeout.
message_handling_timeout:
# Send an error message after this timeout, but keep waiting for the response until the deadline.
# This is counted from the origin_server_ts, so the warning time is consistent regardless of the source of delay.
# If the message is older than this when it reaches the bridge, the message won't be handled at all.
error_after: null
# Drop messages after this timeout. They may still go through if the message got sent to the servers.
# This is counted from the time the bridge starts handling the message.
deadline: 120s
# The prefix for commands. Only required in non-management rooms.
command_prefix: "!wa"
# Messages sent upon joining a management room.
# Markdown is supported. The defaults are listed below.
management_room_text:
# Sent when joining a room.
welcome: "Hello, I'm a WhatsApp bridge bot."
# Sent when joining a management room and the user is already logged in.
welcome_connected: "Use `help` for help."
# Sent when joining a management room and the user is not logged in.
welcome_unconnected: "Use `help` for help or `login` to log in."
# Optional extra text sent when joining a management room.
additional_help: ""
# End-to-bridge encryption support options.
#
# See https://docs.mau.fi/bridges/general/end-to-bridge-encryption.html for more info.
encryption:
# Allow encryption, work in group chat rooms with e2ee enabled
allow: false
# Default to encryption, force-enable encryption in all portals the bridge creates
# This will cause the bridge bot to be in private chats for the encryption to work properly.
default: false
# Whether to use MSC2409/MSC3202 instead of /sync long polling for receiving encryption-related data.
appservice: false
# Require encryption, drop any unencrypted messages.
require: false
# Enable key sharing? If enabled, key requests for rooms where users are in will be fulfilled.
# You must use a client that supports requesting keys from other users to use this feature.
allow_key_sharing: false
# Should users mentions be in the event wire content to enable the server to send push notifications?
plaintext_mentions: false
# Options for deleting megolm sessions from the bridge.
delete_keys:
# Beeper-specific: delete outbound sessions when hungryserv confirms
# that the user has uploaded the key to key backup.
delete_outbound_on_ack: false
# Don't store outbound sessions in the inbound table.
dont_store_outbound: false
# Ratchet megolm sessions forward after decrypting messages.
ratchet_on_decrypt: false
# Delete fully used keys (index >= max_messages) after decrypting messages.
delete_fully_used_on_decrypt: false
# Delete previous megolm sessions from same device when receiving a new one.
delete_prev_on_new_session: false
# Delete megolm sessions received from a device when the device is deleted.
delete_on_device_delete: false
# Periodically delete megolm sessions when 2x max_age has passed since receiving the session.
periodically_delete_expired: false
# Delete inbound megolm sessions that don't have the received_at field used for
# automatic ratcheting and expired session deletion. This is meant as a migration
# to delete old keys prior to the bridge update.
delete_outdated_inbound: false
# What level of device verification should be required from users?
#
# Valid levels:
# unverified - Send keys to all device in the room.
# cross-signed-untrusted - Require valid cross-signing, but trust all cross-signing keys.
# cross-signed-tofu - Require valid cross-signing, trust cross-signing keys on first use (and reject changes).
# cross-signed-verified - Require valid cross-signing, plus a valid user signature from the bridge bot.
# Note that creating user signatures from the bridge bot is not currently possible.
# verified - Require manual per-device verification
# (currently only possible by modifying the `trust` column in the `crypto_device` database table).
verification_levels:
# Minimum level for which the bridge should send keys to when bridging messages from WhatsApp to Matrix.
receive: unverified
# Minimum level that the bridge should accept for incoming Matrix messages.
send: unverified
# Minimum level that the bridge should require for accepting key requests.
share: cross-signed-tofu
# Options for Megolm room key rotation. These options allow you to
# configure the m.room.encryption event content. See:
# https://spec.matrix.org/v1.3/client-server-api/#mroomencryption for
# more information about that event.
rotation:
# Enable custom Megolm room key rotation settings. Note that these
# settings will only apply to rooms created after this option is
# set.
enable_custom: false
# The maximum number of milliseconds a session should be used
# before changing it. The Matrix spec recommends 604800000 (a week)
# as the default.
milliseconds: 604800000
# The maximum number of messages that should be sent with a given a
# session before changing it. The Matrix spec recommends 100 as the
# default.
messages: 100
# Disable rotating keys when a user's devices change?
# You should not enable this option unless you understand all the implications.
disable_device_change_key_rotation: false
# Settings for provisioning API
provisioning:
# Prefix for the provisioning API paths.
prefix: /_matrix/provision
# Shared secret for authentication. If set to "generate", a random secret will be generated,
# or if set to "disable", the provisioning API will be disabled.
shared_secret: generate
# Enable debug API at /debug with provisioning authentication.
debug_endpoints: false
# Permissions for using the bridge.
# Permitted values:
# relay - Talk through the relaybot (if enabled), no access otherwise
# user - Access to use the bridge to chat with a WhatsApp account.
# admin - User level and some additional administration tools
# Permitted keys:
# * - All Matrix users
# domain - All users on that homeserver
# mxid - Specific user
permissions:
"*": relay
"example.com": user
"@admin:example.com": admin
# Settings for relay mode
relay:
# Whether relay mode should be allowed. If allowed, `!wa set-relay` can be used to turn any
# authenticated user into a relaybot for that chat.
enabled: false
# Should only admins be allowed to set themselves as relay users?
admin_only: true
# The formats to use when sending messages to WhatsApp via the relaybot.
message_formats:
m.text: "<b>{{ .Sender.Displayname }}</b>: {{ .Message }}"
m.notice: "<b>{{ .Sender.Displayname }}</b>: {{ .Message }}"
m.emote: "* <b>{{ .Sender.Displayname }}</b> {{ .Message }}"
m.file: "<b>{{ .Sender.Displayname }}</b> sent a file"
m.image: "<b>{{ .Sender.Displayname }}</b> sent an image"
m.audio: "<b>{{ .Sender.Displayname }}</b> sent an audio file"
m.video: "<b>{{ .Sender.Displayname }}</b> sent a video"
m.location: "<b>{{ .Sender.Displayname }}</b> sent a location"
# Logging config. See https://github.com/tulir/zeroconfig for details.
logging:
min_level: debug
writers:
- type: stdout
format: pretty-colored
- type: file
format: json
filename: ./logs/mautrix-whatsapp.log
max_size: 100
max_backups: 10
compress: true
# registration.yml contents
existingRegistrationSecret: ""
registration:
rate_limited: false
sender_localpart: whatsappbridgebot

View File

@@ -1,6 +1,6 @@
apiVersion: v2 apiVersion: v2
name: outline name: outline
version: 0.0.9 version: 0.5.1
description: Chart for Outline wiki description: Chart for Outline wiki
keywords: keywords:
- wiki - wiki
@@ -14,5 +14,5 @@ icon: https://avatars.githubusercontent.com/u/1765001?s=48&v=4
dependencies: dependencies:
- name: redis - name: redis
repository: https://charts.bitnami.com/bitnami repository: https://charts.bitnami.com/bitnami
version: 18.19.4 version: 19.1.1
appVersion: v0.75.2 appVersion: v0.75.2

View File

@@ -51,16 +51,16 @@ spec:
secretKeyRef: secretKeyRef:
name: "{{ .Values.outline.utilsSecret.existingSecretName }}" name: "{{ .Values.outline.utilsSecret.existingSecretName }}"
key: "{{ .Values.outline.secretKey.existingSecretKey }}" key: "{{ .Values.outline.secretKey.existingSecretKey }}"
- name: POSTGRES_PASSWORD
valueFrom:
secretKeyRef:
name: "{{ .Values.outline.database.passwordSecret.existingSecretName }}"
key: "{{ .Values.outline.database.passwordSecret.existingSecretKey }}"
- name: POSTGRES_USERNAME - name: POSTGRES_USERNAME
valueFrom: valueFrom:
secretKeyRef: secretKeyRef:
name: "{{ .Values.outline.database.usernameSecret.existingSecretName }}" name: "{{ .Values.outline.database.usernameSecret.existingSecretName }}"
key: "{{ .Values.outline.database.usernameSecret.existingSecretKey }}" key: "{{ .Values.outline.database.usernameSecret.existingSecretKey }}"
- name: POSTGRES_PASSWORD
valueFrom:
secretKeyRef:
name: "{{ .Values.outline.database.passwordSecret.existingSecretName }}"
key: "{{ .Values.outline.database.passwordSecret.existingSecretKey }}"
- name: POSTGRES_DATABASE_NAME - name: POSTGRES_DATABASE_NAME
valueFrom: valueFrom:
secretKeyRef: secretKeyRef:
@@ -71,10 +71,15 @@ spec:
secretKeyRef: secretKeyRef:
name: "{{ .Values.outline.database.databaseHost.existingSecretName }}" name: "{{ .Values.outline.database.databaseHost.existingSecretName }}"
key: "{{ .Values.outline.database.databaseHost.existingSecretKey }}" key: "{{ .Values.outline.database.databaseHost.existingSecretKey }}"
- name: POSTGRES_DATABASE_PORT
valueFrom:
secretKeyRef:
name: "{{ .Values.outline.database.databasePort.existingSecretName }}"
key: "{{ .Values.outline.database.databasePort.existingSecretKey }}"
- name: DATABASE_URL - name: DATABASE_URL
value: "postgres://$(POSTGRES_USERNAME):$(POSTGRES_PASSWORD)@postgresql-{{ .Release.Name }}-cluster-rw:5432/$(POSTGRES_DATABASE_NAME)" value: "postgres://$(POSTGRES_USERNAME):$(POSTGRES_PASSWORD)@$(POSTGRES_DATABASE_HOST):$(POSTGRES_DATABASE_PORT)/$(POSTGRES_DATABASE_NAME)"
- name: DATABASE_URL_TEST - name: DATABASE_URL_TEST
value: "postgres://$(POSTGRES_USERNAME):$(POSTGRES_PASSWORD)@postgresql-{{ .Release.Name }}-cluster-rw:5432/$(POSTGRES_DATABASE_NAME)-test" value: "postgres://$(POSTGRES_USERNAME):$(POSTGRES_PASSWORD)@$(POSTGRES_DATABASE_HOST):$(POSTGRES_DATABASE_PORT)/$(POSTGRES_DATABASE_NAME)-test"
- name: DATABASE_CONNECTION_POOL_MIN - name: DATABASE_CONNECTION_POOL_MIN
value: "{{ .Values.outline.database.connectionPoolMin }}" value: "{{ .Values.outline.database.connectionPoolMin }}"
- name: DATABASE_CONNECTION_POOL_MAX - name: DATABASE_CONNECTION_POOL_MAX
@@ -97,37 +102,14 @@ spec:
secretKeyRef: secretKeyRef:
name: "{{ .Values.persistence.s3.credentialsSecret }}" name: "{{ .Values.persistence.s3.credentialsSecret }}"
key: AWS_SECRET_ACCESS_KEY key: AWS_SECRET_ACCESS_KEY
{{- if .Values.persistence.s3.endpointConfigMap.enabled }}
- name: AWS_REGION
valueFrom:
configMapKeyRef:
name: "{{ .Values.persistence.s3.endpointConfigMap.name }}"
key: BUCKET_REGION
- name: AWS_S3_UPLOAD_BUCKET_NAME
valueFrom:
configMapKeyRef:
name: "{{ .Values.persistence.s3.endpointConfigMap.name }}"
key: BUCKET_NAME
- name: AWS_S3_UPLOAD_BUCKET_HOST
valueFrom:
configMapKeyRef:
name: "{{ .Values.persistence.s3.endpointConfigMap.name }}"
key: BUCKET_HOST
- name: AWS_S3_UPLOAD_BUCKET_PORT
valueFrom:
configMapKeyRef:
name: "{{ .Values.persistence.s3.endpointConfigMap.name }}"
key: BUCKET_PORT
- name: AWS_S3_UPLOAD_BUCKET_URL
value: "$(AWS_S3_UPLOAD_BUCKET_HOST):$(AWS_S3_UPLOAD_BUCKET_PORT)|"
{{- else }}
- name: AWS_REGION - name: AWS_REGION
value: "{{ .Values.persistence.s3.region }}" value: "{{ .Values.persistence.s3.region }}"
- name: AWS_S3_UPLOAD_BUCKET_NAME - name: AWS_S3_UPLOAD_BUCKET_NAME
value: "{{ .Values.persistence.s3.bucketName }}" value: "{{ .Values.persistence.s3.bucketName }}"
- name: AWS_S3_UPLOAD_BUCKET_URL - name: AWS_S3_UPLOAD_BUCKET_URL
value: "{{ .Values.persistence.s3.endpoint }}" value: "{{ .Values.persistence.s3.bucketUrl }}"
{{- end }} - name: AWS_S3_ACCELERATE_URL
value: "{{ .Values.persistence.s3.bucketUrl }}"
- name: AWS_S3_FORCE_PATH_STYLE - name: AWS_S3_FORCE_PATH_STYLE
value: "{{ .Values.persistence.s3.forcePathStyle }}" value: "{{ .Values.persistence.s3.forcePathStyle }}"
- name: AWS_S3_ACL - name: AWS_S3_ACL

View File

@@ -24,12 +24,9 @@ persistence:
type: s3 type: s3
s3: s3:
credentialsSecret: credentialsSecret:
endpointConfigMap:
enabled: false
name:
region: region:
bucketName: bucketName:
endpoint: bucketUrl:
uploadMaxSize: "26214400" uploadMaxSize: "26214400"
forcePathStyle: false forcePathStyle: false
acl: private acl: private
@@ -38,10 +35,6 @@ persistence:
storageSize: 50Gi storageSize: 50Gi
localRootDir: /var/lib/outline/data localRootDir: /var/lib/outline/data
uploadMaxSize: 26214400 uploadMaxSize: 26214400
redis:
architecture: standalone
auth:
enabled: false
outline: outline:
nodeEnv: production nodeEnv: production
url: url:
@@ -64,6 +57,9 @@ outline:
databaseHost: databaseHost:
existingSecretName: existingSecretName:
existingSecretKey: existingSecretKey:
databasePort:
existingSecretName:
existingSecretKey:
connectionPoolMin: "" connectionPoolMin: ""
connectionPoolMax: "20" connectionPoolMax: "20"
sslMode: disable sslMode: disable
@@ -94,3 +90,7 @@ outline:
usernameClaim: usernameClaim:
displayName: displayName:
scopes: openid profile email scopes: openid profile email
redis:
architecture: standalone
auth:
enabled: false

View File

@@ -1,6 +1,6 @@
apiVersion: v2 apiVersion: v2
name: postgres-cluster name: postgres-cluster
version: 0.2.3 version: 2.3.6
description: Chart for cloudnative-pg cluster description: Chart for cloudnative-pg cluster
keywords: keywords:
- database - database
@@ -10,4 +10,4 @@ sources:
maintainers: maintainers:
- name: alexlebens - name: alexlebens
icon: https://avatars.githubusercontent.com/u/100373852?s=48&v=4 icon: https://avatars.githubusercontent.com/u/100373852?s=48&v=4
appVersion: v1.22.1 appVersion: v1.22.2

View File

@@ -0,0 +1,30 @@
{{- define "cluster.backup" -}}
{{- if .Values.backup.enabled }}
backup:
retentionPolicy: {{ .Values.backup.retentionPolicy }}
barmanObjectStore:
destinationPath: "s3://{{ .Values.backup.endpointBucket }}/{{ .Values.kubernetesClusterName }}/postgresql/{{ include "cluster.backupName" . }}"
endpointURL: {{ .Values.backup.endpointURL }}
{{- if .Values.backup.endpointCA }}
endpointCA:
name: {{ .Values.backup.endpointCA }}
key: ca-bundle.crt
{{- end }}
serverName: "{{ include "cluster.name" . }}-backup-{{ .Values.backup.backupIndex }}"
s3Credentials:
accessKeyId:
name: {{ include "cluster.backupCredentials" . }}
key: ACCESS_KEY_ID
secretAccessKey:
name: {{ include "cluster.backupCredentials" . }}
key: ACCESS_SECRET_KEY
wal:
compression: {{ .Values.backup.wal.compression }}
encryption: {{ .Values.backup.wal.encryption }}
maxParallel: {{ .Values.backup.wal.maxParallel }}
data:
compression: {{ .Values.backup.data.compression }}
encryption: {{ .Values.backup.data.encryption }}
jobs: {{ .Values.backup.data.jobs }}
{{- end }}
{{- end }}

View File

@@ -0,0 +1,92 @@
{{- define "cluster.bootstrap" -}}
bootstrap:
{{- if eq .Values.mode "standalone" }}
initdb:
{{- with .Values.cluster.initdb }}
{{- with (omit . "postInitApplicationSQL") }}
{{- . | toYaml | nindent 4 }}
{{- end }}
{{- end }}
postInitApplicationSQL:
{{- if eq .Values.type "postgis" }}
- CREATE EXTENSION IF NOT EXISTS postgis;
- CREATE EXTENSION IF NOT EXISTS postgis_topology;
- CREATE EXTENSION IF NOT EXISTS fuzzystrmatch;
- CREATE EXTENSION IF NOT EXISTS postgis_tiger_geocoder;
{{- else if eq .Values.type "timescaledb" }}
- CREATE EXTENSION IF NOT EXISTS timescaledb;
{{- end }}
{{- with .Values.cluster.initdb }}
{{- range .postInitApplicationSQL }}
{{- printf "- %s" . | nindent 6 }}
{{- end }}
{{- end }}
{{- else if eq .Values.mode "replica" }}
initdb:
import:
type: {{ .Values.replica.importType }}
databases:
{{- if and (gt (len .Values.replica.importDatabases) 1) (eq (default .Values.replica.importType "microservice") "microservice") }}
{{ fail "Too many databases in import type of microservice!" }}
{{- else}}
{{- with .Values.replica.importDatabases }}
{{- . | toYaml | nindent 8 }}
{{- end }}
{{- end }}
{{- if eq .Values.replica.importType "monolith" }}
roles:
{{- with .Values.replica.importRoles }}
{{- . | toYaml | nindent 8 }}
{{- end }}
{{- end }}
{{- if and (.Values.replica.postImportApplicationSQL) (eq .Values.replica.importType "microservice") }}
postImportApplicationSQL:
{{- with .Values.replica.postImportApplicationSQL }}
{{- . | toYaml | nindent 8 }}
{{- end }}
{{- end }}
source:
externalCluster: "{{ include "cluster.name" . }}-cluster"
externalClusters:
- name: "{{ include "cluster.name" . }}-cluster"
{{- with .Values.replica.externalCluster }}
{{- . | toYaml | nindent 4 }}
{{- end }}
{{- else if eq .Values.mode "recovery" }}
recovery:
{{- with .Values.recovery.pitrTarget.time }}
recoveryTarget:
targetTime: {{ . }}
{{- end }}
source: "{{ include "cluster.name" . }}-backup-{{ .Values.recovery.recoveryIndex }}"
externalClusters:
- name: "{{ include "cluster.name" . }}-backup-{{ .Values.recovery.recoveryIndex }}"
barmanObjectStore:
serverName: "{{ include "cluster.name" . }}-backup-{{ .Values.recovery.recoveryIndex }}"
destinationPath: "s3://{{ .Values.recovery.endpointBucket }}/{{ .Values.kubernetesClusterName }}/postgresql/{{ include "cluster.recoveryName" . }}"
endpointURL: {{ .Values.recovery.endpointURL }}
{{- with .Values.recovery.endpointCA }}
endpointCA:
name: {{ . }}
key: ca-bundle.crt
{{- end }}
serverName: "{{ include "cluster.name" . }}-backup-{{ .Values.recovery.recoveryIndex }}"
s3Credentials:
accessKeyId:
name: {{ include "cluster.recoveryCredentials" . }}
key: ACCESS_KEY_ID
secretAccessKey:
name: {{ include "cluster.recoveryCredentials" . }}
key: ACCESS_SECRET_KEY
wal:
compression: {{ .Values.recovery.wal.compression }}
encryption: {{ .Values.recovery.wal.encryption }}
maxParallel: {{ .Values.recovery.wal.maxParallel }}
data:
compression: {{ .Values.recovery.data.compression }}
encryption: {{ .Values.recovery.data.encryption }}
jobs: {{ .Values.recovery.data.jobs }}
{{- else }}
{{ fail "Invalid cluster mode!" }}
{{- end }}
{{- end }}

View File

@@ -0,0 +1,80 @@
{{/*
Expand the name of the chart.
*/}}
{{- define "cluster.name" -}}
{{- if .Values.nameOverride }}
{{- .Values.nameOverride | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- printf "%s-postgresql-%s" .Release.Name ((semver .Values.cluster.image.tag).Major | toString) | trunc 63 | trimSuffix "-" -}}
{{- end }}
{{- end }}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "cluster.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Common labels
*/}}
{{- define "cluster.labels" -}}
helm.sh/chart: {{ include "cluster.chart" . }}
{{ include "cluster.selectorLabels" . }}
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end }}
{{/*
Selector labels
*/}}
{{- define "cluster.selectorLabels" -}}
app.kubernetes.io/name: {{ include "cluster.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/part-of: cloudnative-pg
{{- end }}
{{/*
Generate name for object store credentials
*/}}
{{- define "cluster.recoveryCredentials" -}}
{{- if .Values.recovery.endpointCredentials -}}
{{- .Values.recovery.endpointCredentials -}}
{{- else -}}
{{- printf "%s-backup-secret" (include "cluster.name" .) | trunc 63 | trimSuffix "-" -}}
{{- end }}
{{- end }}
{{- define "cluster.backupCredentials" -}}
{{- if .Values.backup.endpointCredentials -}}
{{- .Values.backup.endpointCredentials -}}
{{- else -}}
{{- printf "%s-backup-secret" (include "cluster.name" .) | trunc 63 | trimSuffix "-" -}}
{{- end }}
{{- end }}
{{/*
Generate backup server name
*/}}
{{- define "cluster.backupName" -}}
{{- if .Values.backup.backupName -}}
{{- .Values.backup.backupName -}}
{{- else -}}
{{ include "cluster.name" . }}
{{- end }}
{{- end }}
{{/*
Generate recovery server name
*/}}
{{- define "cluster.recoveryName" -}}
{{- if .Values.recovery.recoveryName -}}
{{- .Values.recovery.recoveryName -}}
{{- else -}}
{{ include "cluster.name" . }}
{{- end }}
{{- end }}

View File

@@ -0,0 +1,52 @@
apiVersion: postgresql.cnpg.io/v1
kind: Cluster
metadata:
name: {{ include "cluster.name" . }}-cluster
namespace: {{ .Release.Namespace }}
{{- with .Values.cluster.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
labels:
{{- include "cluster.labels" . | nindent 4 }}
{{- with .Values.cluster.additionalLabels }}
{{ toYaml . | nindent 4 }}
{{- end }}
spec:
instances: {{ .Values.cluster.instances }}
imageName: "{{ .Values.cluster.image.repository }}:{{ .Values.cluster.image.tag }}"
imagePullPolicy: {{ .Values.cluster.image.pullPolicy }}
postgresUID: {{ .Values.cluster.postgresUID }}
postgresGID: {{ .Values.cluster.postgresGID }}
walStorage:
size: {{ .Values.cluster.walStorage.size }}
storageClass: {{ .Values.cluster.walStorage.storageClass }}
storage:
size: {{ .Values.cluster.storage.size }}
storageClass: {{ .Values.cluster.storage.storageClass }}
{{- with .Values.cluster.resources }}
resources:
{{- toYaml . | nindent 4 }}
{{ end }}
{{- with .Values.cluster.affinity }}
affinity:
{{- toYaml . | nindent 4 }}
{{- end }}
priorityClassName: {{ .Values.cluster.priorityClassName }}
primaryUpdateMethod: {{ .Values.cluster.primaryUpdateMethod }}
primaryUpdateStrategy: {{ .Values.cluster.primaryUpdateStrategy }}
logLevel: {{ .Values.cluster.logLevel }}
postgresql:
shared_preload_libraries:
{{- if eq .Values.type "timescaledb" }}
- timescaledb
{{- end }}
{{- with .Values.cluster.postgresql.parameters }}
parameters:
{{- toYaml . | nindent 6 }}
{{ end }}
monitoring:
enablePodMonitor: {{ and .Values.cluster.monitoring.enabled .Values.cluster.monitoring.podMonitor.enabled }}
{{ include "cluster.bootstrap" . | nindent 2 }}
{{ include "cluster.backup" . | nindent 2 }}

View File

@@ -1,81 +0,0 @@
apiVersion: postgresql.cnpg.io/v1
kind: Cluster
metadata:
name: "postgresql-{{ .Release.Name }}-cluster"
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: postgresql
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/version: {{ .Chart.AppVersion }}
app.kubernetes.io/component: database
app.kubernetes.io/part-of: {{ .Release.Name }}
spec:
imageName: "{{ .Values.cluster.image.repository }}:{{ .Values.cluster.image.tag }}"
instances: {{ .Values.cluster.instances }}
replicationSlots:
highAvailability:
enabled: true
affinity:
enablePodAntiAffinity: true
topologyKey: kubernetes.io/hostname
postgresql:
parameters:
{{- toYaml .Values.cluster.parameters | nindent 6 }}
resources:
{{- toYaml .Values.cluster.resources | nindent 4 }}
storage:
storageClass: {{ .Values.cluster.storage.data.storageClass }}
size: {{ .Values.cluster.storage.data.size }}
walStorage:
storageClass: {{ .Values.cluster.storage.wal.storageClass }}
size: {{ .Values.cluster.storage.wal.size }}
monitoring:
enablePodMonitor: true
{{- if .Values.bootstrap.initdbEnabled }}
bootstrap:
initdb:
{{- toYaml .Values.bootstrap.initdb | nindent 6 }}
{{- end }}
{{- if .Values.bootstrap.recoveryEnabled }}
bootstrap:
recovery:
source: "postgresql-{{ .Release.Name }}-cluster-backup-index-{{ .Values.bootstrap.recoveryIndex }}"
externalClusters:
- name: "postgresql-{{ .Release.Name }}-cluster-backup-index-{{ .Values.bootstrap.recoveryIndex }}"
barmanObjectStore:
endpointURL: {{ .Values.bootstrap.endpointURL }}
destinationPath: "s3://{{ .Values.bootstrap.bucket }}/{{ .Values.cluster.name }}/postgresql/{{ .Release.Name }}-cluster"
s3Credentials:
accessKeyId:
name: "postgresql-{{ .Release.Name }}-cluster-backup-secret"
key: ACCESS_KEY_ID
secretAccessKey:
name: "postgresql-{{ .Release.Name }}-cluster-backup-secret"
key: ACCESS_SECRET_KEY
data:
compression: {{ .Values.cluster.compression }}
wal:
compression: {{ .Values.cluster.compression }}
{{- end }}
{{- if .Values.backup.backupEnabled }}
backup:
retentionPolicy: "{{ .Values.backup.retentionPolicy }}"
barmanObjectStore:
destinationPath: "s3://{{ .Values.backup.bucket }}/{{ .Values.cluster.name }}/postgresql/{{ .Release.Name }}-cluster"
endpointURL: {{ .Values.backup.endpointURL }}
serverName: "postgresql-{{ .Release.Name }}-cluster-backup-index-{{ .Values.backup.backupIndex }}"
s3Credentials:
accessKeyId:
name: "postgresql-{{ .Release.Name }}-cluster-backup-secret"
key: ACCESS_KEY_ID
secretAccessKey:
name: "postgresql-{{ .Release.Name }}-cluster-backup-secret"
key: ACCESS_SECRET_KEY
data:
compression: {{ .Values.cluster.compression }}
wal:
compression: {{ .Values.cluster.compression }}
{{- end }}

View File

@@ -0,0 +1,30 @@
{{- if and .Values.cluster.monitoring.enabled .Values.cluster.monitoring.prometheusRule.enabled -}}
apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:
name: {{ include "cluster.name" . }}-alert-rules
namespace: {{ .Release.Namespace }}
labels:
{{- include "cluster.labels" . | nindent 4 }}
{{- with .Values.cluster.additionalLabels }}
{{ toYaml . | nindent 4 }}
{{- end }}
spec:
groups:
- name: cloudnative-pg/{{ include "cluster.name" . }}
rules:
{{- $dict := dict "excludeRules" .Values.cluster.monitoring.prometheusRule.excludeRules -}}
{{- $_ := set $dict "value" "{{ $value }}" -}}
{{- $_ := set $dict "namespace" .Release.Namespace -}}
{{- $_ := set $dict "cluster" (printf "%s-cluster" (include "cluster.name" .) ) -}}
{{- $_ := set $dict "labels" (dict "job" "{{ $labels.job }}" "node" "{{ $labels.node }}" "pod" "{{ $labels.pod }}") -}}
{{- $_ := set $dict "podSelector" (printf "%s-cluster-([1-9][0-9]*)$" (include "cluster.name" .) ) -}}
{{- $_ := set $dict "Values" .Values -}}
{{- $_ := set $dict "Template" .Template -}}
{{- range $path, $_ := .Files.Glob "prometheus_rules/**.yaml" }}
{{- $tpl := tpl ($.Files.Get $path) $dict | nindent 10 | trim -}}
{{- with $tpl }}
- {{ $tpl }}
{{- end -}}
{{- end -}}
{{ end }}

View File

@@ -1,16 +1,18 @@
{{ if .Values.backup.enabled }}
apiVersion: postgresql.cnpg.io/v1 apiVersion: postgresql.cnpg.io/v1
kind: ScheduledBackup kind: ScheduledBackup
metadata: metadata:
name: "postgresql-{{ .Release.Name }}-cluster-backup" name: {{ include "cluster.name" . }}-scheduled-backup
namespace: {{ .Release.Namespace }} namespace: {{ .Release.Namespace }}
labels: labels:
app.kubernetes.io/name: postgresql {{- include "cluster.labels" . | nindent 4 }}
app.kubernetes.io/instance: {{ .Release.Name }} {{- with .Values.cluster.additionalLabels }}
app.kubernetes.io/version: {{ .Chart.AppVersion }} {{ toYaml . | nindent 4 }}
app.kubernetes.io/component: database {{- end }}
app.kubernetes.io/part-of: {{ .Release.Name }}
spec: spec:
immediate: true
schedule: {{ .Values.backup.schedule }} schedule: {{ .Values.backup.schedule }}
backupOwnerReference: self backupOwnerReference: self
cluster: cluster:
name: "postgresql-{{ .Release.Name }}-cluster" name: {{ include "cluster.name" . }}-cluster
{{ end }}

View File

@@ -1,42 +1,195 @@
# -- Override the name of the cluster
nameOverride: ""
###
# -- Type of the CNPG database. Available types:
# * `postgresql`
# * `postgis`
# * `timescaledb`
type: postgresql
###
# Cluster mode of operation. Available modes:
# * `standalone` - Default mode. Creates new or updates an existing CNPG cluster.
# * `recovery` - Same as standalone but creates a cluster from a backup, object store or via pg_basebackup
# * `replica` - Create database as a replica from another CNPG cluster
mode: standalone
# Generates bucket name and path for recovery and backup, creates: <endpointBucket>/<clusterName>/postgresql/{{ .Release.Name }}
kubernetesClusterName: ""
cluster: cluster:
name: cl01tl instances: 3
image: image:
repository: ghcr.io/cloudnative-pg/postgresql repository: ghcr.io/cloudnative-pg/postgresql
tag: 16.0 tag: "16.2"
instances: 2 pullPolicy: IfNotPresent
parameters:
shared_buffers: 128MB # The UID and GID of the postgres user inside the image
max_slot_wal_keep_size: 2000MB postgresUID: 26
hot_standby_feedback: "on" postgresGID: 26
compression: snappy
walStorage:
size: 2Gi
storageClass: ""
storage:
size: 10Gi
storageClass: ""
resources: resources:
requests: requests:
memory: 512Mi memory: 256Mi
cpu: 100m cpu: 10m
limits: limits:
memory: 2Gi memory: 1Gi
cpu: 1500m cpu: 100m
hugepages-2Mi: 512Mi hugepages-2Mi: 256Mi
storage:
data: # See: https://cloudnative-pg.io/documentation/current/cloudnative-pg.v1/#postgresql-cnpg-io-v1-AffinityConfiguration
storageClass: default affinity:
size: 10Gi enablePodAntiAffinity: true
wal: topologyKey: kubernetes.io/hostname
storageClass: default
size: 2Gi additionalLabels: {}
bootstrap: annotations: {}
recoveryEnabled: false
priorityClassName: ""
# Method to follow to upgrade the primary server during a rolling update procedure, after all replicas have been
# successfully updated. It can be switchover (default) or in-place (restart).
primaryUpdateMethod: switchover
# Strategy to follow to upgrade the primary server during a rolling update procedure, after all replicas have been
# successfully updated: it can be automated (unsupervised - default) or manual (supervised)
primaryUpdateStrategy: unsupervised
logLevel: "info"
monitoring:
enabled: false
podMonitor:
enabled: true
prometheusRule:
enabled: true
excludeRules: []
# See: https://cloudnative-pg.io/documentation/current/cloudnative-pg.v1/#postgresql-cnpg-io-v1-PostgresConfiguration
postgresql:
parameters:
shared_buffers: 128MB
max_slot_wal_keep_size: 2000MB
hot_standby_feedback: "on"
# BootstrapInitDB is the configuration of the bootstrap process when initdb is used.
# See: https://cloudnative-pg.io/documentation/current/bootstrap/
# See: https://cloudnative-pg.io/documentation/current/cloudnative-pg.v1/#postgresql-cnpg-io-v1-bootstrapinitdb
initdb: {}
# database: app
# owner: app
# secret: "" # Name of the secret containing the initial credentials for the owner of the user database. If empty a new secret will be created from scratch
# postInitApplicationSQL:
# - CREATE TABLE IF NOT EXISTS example;
recovery:
# Point in time recovery target in RFC3339 format
pitrTarget:
time: ""
# Overrides the provider specific default endpoint. Defaults to:
# S3: https://s3.<region>.amazonaws.com"
endpointURL: ""
endpointBucket: ""
# Specifies secret that contains a CA bundle to validate a privately signed certificate, should contain the key ca-bundle.crt
endpointCA: ""
# Specifies secret that contains S3 credentials, should contain the keys ACCESS_KEY_ID and ACCESS_SECRET_KEY
endpointCredentials: ""
# Generate external cluster name, uses: postgresql-{{ .Release.Name }}-cluster-backup-index-{{ .Values.recovery.recoveryIndex }}"
recoveryIndex: 1 recoveryIndex: 1
endpointURL:
bucket: # Name of the recovery cluster in the object store, defaults to "cluster.name"
initdbEnabled: false recoveryName: ""
initdb:
database: app wal:
owner: app # WAL compression method. One of `` (for no compression), `gzip`, `bzip2` or `snappy`.
compression: snappy
# Whether to instruct the storage provider to encrypt WAL files. One of `` (use the storage container default), `AES256` or `aws:kms`.
encryption: ""
# Number of WAL files to be archived or restored in parallel.
maxParallel: 2
data:
# Data compression method. One of `` (for no compression), `gzip`, `bzip2` or `snappy`.
compression: snappy
# Whether to instruct the storage provider to encrypt data files. One of `` (use the storage container default), `AES256` or `aws:kms`.
encryption: ""
# Number of data files to be archived or restored in parallel.
jobs: 2
replica:
# See https://cloudnative-pg.io/documentation/current/database_import/
# * `microservice` - Single database import as expected from cnpg clusters
# * `monolith` - Import multiple databases and roles
importType: microservice
# If type microservice only one database is allowed, default is app as standard in cnpg clusters
importDatabases:
- app
# If type microservice no roles are imported and ignored
importRoles: []
# If import type is monolith postImportApplicationSQL is not supported and ignored
postImportApplicationSQL: []
# External cluster connection, password specifies a secret name and the key containing the password value
externalCluster:
connectionParameters:
host: postgresql
user: app
dbname: app
password:
name: postgresql
key: password
backup: backup:
backupEnabled: true enabled: false
schedule: "0 0 0 * * *"
retentionPolicy: 14d # Overrides the provider specific default endpoint
endpointURL: ""
endpointBucket: ""
# Specifies secret that contains a CA bundle to validate a privately signed certificate, should contain the key ca-bundle.crt
endpointCA: ""
# Specifies secret that contains S3 credentials, should contain the keys ACCESS_KEY_ID and ACCESS_SECRET_KEY
endpointCredentials: ""
# Generate external cluster name, creates: postgresql-{{ .Release.Name }}-cluster-backup-index-{{ .Values.backups.backupIndex }}"
backupIndex: 1 backupIndex: 1
endpointURL:
bucket: # Name of the backup cluster in the object store, defaults to "cluster.name"
backupName: ""
wal:
# WAL compression method. One of `` (for no compression), `gzip`, `bzip2` or `snappy`.
compression: snappy
# Whether to instruct the storage provider to encrypt WAL files. One of `` (use the storage container default), `AES256` or `aws:kms`.
encryption: ""
# Number of WAL files to be archived or restored in parallel.
maxParallel: 2
data:
# Data compression method. One of `` (for no compression), `gzip`, `bzip2` or `snappy`.
compression: snappy
# Whether to instruct the storage provider to encrypt data files. One of `` (use the storage container default), `AES256` or `aws:kms`.
encryption: ""
# Number of data files to be archived or restored in parallel.
jobs: 2
# Retention policy for backups
retentionPolicy: "30d"
# Scheduled backup in cron format
schedule: "0 0 0 * * *"

24
charts/taiga/Chart.yaml Normal file
View File

@@ -0,0 +1,24 @@
apiVersion: v2
name: taiga
version: 0.2.0
description: Chart for Taiga
keywords:
- kanban
- project management
sources:
- https://github.com/taigaio
- https://github.com/rabbitmq/rabbitmq-server
- https://github.com/bitnami/charts/tree/main/bitnami/rabbitmq
maintainers:
- name: alexlebens
icon: https://avatars.githubusercontent.com/u/6905422?s=200&v=4
dependencies:
- name: rabbitmq
version: 14.0.1
repository: https://charts.bitnami.com/bitnami
alias: async-rabbitmq
- name: rabbitmq
version: 14.0.1
repository: https://charts.bitnami.com/bitnami
alias: events-rabbitmq
appVersion: 6.7.7

17
charts/taiga/README.md Normal file
View File

@@ -0,0 +1,17 @@
## Introduction
[Taiga 6](https://github.com/taigaio)
Intuitive and simple, yet feature complete Kanban board
This chart bootstraps a [Taiga](https://github.com/taigaio) deployment on a [Kubernetes](https://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager.
## Prerequisites
- Kubernetes
- Helm
## Parameters
See the [values files](values.yaml).

View File

@@ -0,0 +1,135 @@
{{/*
Expand the name of the chart.
*/}}
{{- define "taiga.name" -}}
{{- default .Chart.Name .Values.global.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
*/}}
{{- define "taiga.fullname" -}}
{{- if .Values.global.fullnameOverride -}}
{{- .Values.global.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- $name := default .Chart.Name .Values.global.nameOverride -}}
{{- if contains $name .Release.Name -}}
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Create chart name and version as used by the chart label
*/}}
{{- define "taiga.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Common labels
*/}}
{{- define "taiga.labels" -}}
app.kubernetes.io/name: {{ template "taiga.name" . }}
helm.sh/chart: {{ template "taiga.chart" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end -}}
{{/*
Common labels for specific components
*/}}
{{- define "taiga.back.labels" -}}
app.kubernetes.io/name: {{ template "taiga.name" . }}-back
helm.sh/chart: {{ template "taiga.chart" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end -}}
{{- define "taiga.async.labels" -}}
app.kubernetes.io/name: {{ template "taiga.name" . }}-async
helm.sh/chart: {{ template "taiga.chart" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end -}}
{{- define "taiga.front.labels" -}}
app.kubernetes.io/name: {{ template "taiga.name" . }}-front
helm.sh/chart: {{ template "taiga.chart" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end -}}
{{- define "taiga.events.labels" -}}
app.kubernetes.io/name: {{ template "taiga.name" . }}-events
helm.sh/chart: {{ template "taiga.chart" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end -}}
{{- define "taiga.protected.labels" -}}
app.kubernetes.io/name: {{ template "taiga.name" . }}-protected
helm.sh/chart: {{ template "taiga.chart" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end -}}
{{/*
Labels to use on deploy.spec.selector.matchLabels and svc.spec.selector
*/}}
{{- define "taiga.matchLabels" -}}
app.kubernetes.io/name: {{ template "taiga.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end -}}
{{- define "taiga.back.matchLabels" -}}
app.kubernetes.io/name: {{ template "taiga.name" . }}-back
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end -}}
{{- define "taiga.async.matchLabels" -}}
app.kubernetes.io/name: {{ template "taiga.name" . }}-async
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end -}}
{{- define "taiga.front.matchLabels" -}}
app.kubernetes.io/name: {{ template "taiga.name" . }}-front
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end -}}
{{- define "taiga.events.matchLabels" -}}
app.kubernetes.io/name: {{ template "taiga.name" . }}-events
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end -}}
{{- define "taiga.protected.matchLabels" -}}
app.kubernetes.io/name: {{ template "taiga.name" . }}-protected
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end -}}
{{/*
Create the name of the service account to use
*/}}
{{- define "taiga.serviceAccountName" -}}
{{- if .Values.serviceAccount.create -}}
{{ default (include "taiga.fullname" .) .Values.serviceAccount.name }}
{{- else -}}
{{ default "default" .Values.serviceAccount.name }}
{{- end -}}
{{- end -}}
{{/*
Create the name of the static persistent volume
*/}}
{{- define "taiga.staticVolumeName" -}}
{{- if .Values.persistence.static.existingClaim -}}
{{ .Values.persistence.static.existingClaim }}
{{- else -}}
{{ printf "%s-static" (include "taiga.fullname" .) | trunc 63 | trimSuffix "-" }}
{{- end -}}
{{- end -}}
{{/*
Create the name of the media persistent volume
*/}}
{{- define "taiga.mediaVolumeName" -}}
{{- if .Values.persistence.media.existingClaim -}}
{{ .Values.persistence.media.existingClaim }}
{{- else -}}
{{ printf "%s-media" (include "taiga.fullname" .) | trunc 63 | trimSuffix "-" }}
{{- end -}}
{{- end -}}

View File

@@ -0,0 +1,36 @@
{{- if .Values.createInitialUser }}
---
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ template "taiga.fullname" . }}-create-initial-user
namespace: {{ .Release.Namespace }}
annotations:
{{- with .Values.global.annotations }}
{{- toYaml . | nindent 4 }}
{{- end }}
labels:
{{- include "taiga.labels" . | nindent 4 }}
{{- with .Values.global.labels }}
{{ toYaml . | nindent 4 }}
{{- end }}
data:
createinitialuser.sh: |
#!/bin/sh
echo """
import time
import requests
import subprocess
print('Waiting for backend ...')
while requests.get('http://{{ template "taiga.fullname" . }}-back/api/v1/').status_code != 200:
print('...')
time.sleep(2)
if str(subprocess.check_output(['python', 'manage.py', 'dumpdata', 'users.user'], cwd='/taiga-back')).find('\"is_superuser\": true') == -1:
print(subprocess.check_output(['python', 'manage.py', 'loaddata', 'initial_user'], cwd='/taiga-back'))
else:
print('Admin user yet created.')
""" > /tmp/create_superuser.py
python /tmp/create_superuser.py
{{- end }}

View File

@@ -0,0 +1,515 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ template "taiga.fullname" . }}-back
namespace: {{ .Release.Namespace }}
annotations:
{{- with .Values.global.annotations }}
{{- toYaml . | nindent 4 }}
{{- end }}
labels:
{{- include "taiga.back.labels" . | nindent 4 }}
{{- with .Values.global.labels }}
{{ toYaml . | nindent 4 }}
{{- end }}
spec:
revisionHistoryLimit: 3
replicas: {{ .Values.back.replicas }}
strategy:
type: Recreate
selector:
matchLabels:
{{- include "taiga.back.matchLabels" . | nindent 6 }}
template:
metadata:
labels:
{{- include "taiga.back.labels" . | nindent 8 }}
app.kubernetes.io/component: {{ template "taiga.name" . }}-back
annotations:
{{- with .Values.back.podAnnotations }}
{{ toYaml . | nindent 8 }}
{{- end }}
spec:
affinity:
{{- with .Values.back.affinity }}
{{ toYaml . | nindent 8 }}
{{- end }}
nodeSelector:
{{- with .Values.back.nodeSelector }}
{{ toYaml . | nindent 8 }}
{{- end }}
tolerations:
{{- with .Values.back.tolerations }}
{{ toYaml . | nindent 8 }}
{{- end }}
serviceAccountName: {{ template "taiga.serviceAccountName" . }}
securityContext:
{{- with .Values.back.securityContext }}
{{ toYaml . | nindent 8 }}
{{- end }}
containers:
- name: {{ template "taiga.fullname" . }}-back
image: "{{ .Values.back.image.repository }}:{{ .Values.back.image.tag }}"
imagePullPolicy: {{ .Values.back.image.pullPolicy }}
resources:
{{ toYaml .Values.back.resources | nindent 12 }}
ports:
- name: taiga-back
containerPort: {{ .Values.back.service.port }}
protocol: TCP
volumeMounts:
- name: taiga-static
mountPath: /taiga-back/static
- name: taiga-media
mountPath: /taiga-back/media
env:
- name: TAIGA_SECRET_KEY
valueFrom:
secretKeyRef:
name: "{{ .Values.secretKey.existingSecretName }}"
key: "{{ .Values.secretKey.existingSecretKey }}"
- name: ENABLE_TELEMETRY
value: "{{ .Values.enableTelemetry }}"
- name: PUBLIC_REGISTER_ENABLED
value: "{{ .Values.publicRegisterEnabled }}"
- name: POSTGRES_USER
valueFrom:
secretKeyRef:
name: "{{ .Values.postgresql.existingSecretName }}"
key: "{{ .Values.postgresql.usernameKey }}"
- name: POSTGRES_PASSWORD
valueFrom:
secretKeyRef:
name: "{{ .Values.postgresql.existingSecretName }}"
key: "{{ .Values.postgresql.passwordKey }}"
- name: POSTGRES_DB
valueFrom:
secretKeyRef:
name: "{{ .Values.postgresql.existingSecretName }}"
key: "{{ .Values.postgresql.databaseNameKey }}"
- name: POSTGRES_HOST
valueFrom:
secretKeyRef:
name: "{{ .Values.postgresql.existingSecretName }}"
key: "{{ .Values.postgresql.hostKey }}"
{{ if .Values.oidc.enabled }}
- name: OIDC_ENABLED
value: "True"
- name: OIDC_SCOPES
valueFrom:
secretKeyRef:
name: "{{ .Values.oidc.existingSecretName }}"
key: "{{ .Values.oidc.scopesKey }}"
- name: OIDC_SIGN_ALGO
valueFrom:
secretKeyRef:
name: "{{ .Values.oidc.existingSecretName }}"
key: "{{ .Values.oidc.signatureAlgorithmKey }}"
- name: OIDC_CLIENT_ID
valueFrom:
secretKeyRef:
name: "{{ .Values.oidc.existingSecretName }}"
key: "{{ .Values.oidc.clientIdKey }}"
- name: OIDC_CLIENT_SECRET
valueFrom:
secretKeyRef:
name: "{{ .Values.oidc.existingSecretName }}"
key: "{{ .Values.oidc.clientSecretKey }}"
- name: OIDC_BASE_URL
valueFrom:
secretKeyRef:
name: "{{ .Values.oidc.existingSecretName }}"
key: "{{ .Values.oidc.baseUrlKey }}"
- name: OIDC_JWKS_ENDPOINT
valueFrom:
secretKeyRef:
name: "{{ .Values.oidc.existingSecretName }}"
key: "{{ .Values.oidc.jwksEndpointKey }}"
- name: OIDC_AUTHORIZATION_ENDPOINT
valueFrom:
secretKeyRef:
name: "{{ .Values.oidc.existingSecretName }}"
key: "{{ .Values.oidc.authorizationEndpointKey }}"
- name: OIDC_TOKEN_ENDPOINT
valueFrom:
secretKeyRef:
name: "{{ .Values.oidc.existingSecretName }}"
key: "{{ .Values.oidc.tokenEndpointKey }}"
- name: OIDC_USER_ENDPOINT
valueFrom:
secretKeyRef:
name: "{{ .Values.oidc.existingSecretName }}"
key: "{{ .Values.oidc.userEndpointKey }}"
{{ end }}
{{ if .Values.email.enabled }}
- name: EMAIL_BACKEND
value: "django.core.mail.backends.smtp.EmailBackend"
- name: DEFAULT_FROM_EMAIL
value: "{{ .Values.email.from }}"
- name: EMAIL_HOST
value: "{{ .Values.email.host }}"
- name: EMAIL_PORT
value: "{{ .Values.email.port }}"
- name: EMAIL_USE_TLS
value: "{{ .Values.email.tls }}"
- name: EMAIL_USE_SSL
value: "{{ .Values.email.ssl }}"
- name: EMAIL_HOST_USER
value: "{{ .Values.email.user }}"
- name: EMAIL_HOST_PASSWORD
valueFrom:
secretKeyRef:
name: "{{ .Values.email.existingPasswordSecret }}"
key: "{{ .Values.email.existingSecretPasswordKey }}"
{{ end }}
- name: ENABLE_GITHUB_AUTH
value: "false"
- name: ENABLE_GITLAB_AUTH
value: "false"
- name: ENABLE_SLACK
value: "{{ .Values.enableSlack }}"
{{ if .Values.githubImporter.enabled }}
- name: ENABLE_GITHUB_IMPORTER
value: "True"
- name: GITHUB_API_CLIENT_ID
valueFrom:
secretKeyRef:
name: "{{ .Values.githubImporter.existingSecretName }}"
key: "{{ .Values.githubImporter.existingSecretClientIdKey }}"
- name: GITHUB_API_CLIENT_SECRET
valueFrom:
secretKeyRef:
name: "{{ .Values.githubImporter.existingSecretName }}"
key: "{{ .Values.githubImporter.existingSecretClientSecretKey }}"
{{ else }}
- name: ENABLE_GITHUB_IMPORTER
value: "False"
{{ end }}
{{ if .Values.jiraImporter.enabled }}
- name: ENABLE_JIRA_IMPORTER
value: "True"
- name: JIRA_IMPORTER_CONSUMER_KEY
valueFrom:
secretKeyRef:
name: "{{ .Values.jiraImporter.existingSecretName }}"
key: "{{ .Values.jiraImporter.existingSecretConsumerKeyKey }}"
- name: JIRA_IMPORTER_CERT
valueFrom:
secretKeyRef:
name: "{{ .Values.jiraImporter.existingSecretName }}"
key: "{{ .Values.jiraImporter.existingSecretCertKey }}"
- name: JIRA_IMPORTER_PUB_CERT
valueFrom:
secretKeyRef:
name: "{{ .Values.jiraImporter.existingSecretName }}"
key: "{{ .Values.jiraImporter.existingSecretPubCertKey }}"
{{ else }}
- name: ENABLE_JIRA_IMPORTER
value: "False"
{{ end }}
{{ if .Values.trelloImporter.enabled }}
- name: ENABLE_TRELLO_IMPORTER
value: "True"
- name: TRELLO_IMPORTER_API_KEY
valueFrom:
secretKeyRef:
name: "{{ .Values.trelloImporter.existingSecretName }}"
key: "{{ .Values.trelloImporter.existingSecretApiKeyKey }}"
- name: TRELLO_IMPORTER_SECRET_KEY
valueFrom:
secretKeyRef:
name: "{{ .Values.trelloImporter.existingSecretName }}"
key: "{{ .Values.trelloImporter.existingSecretSecretKeyKey }}"
{{ else }}
- name: ENABLE_JIRA_IMPORTER
value: "False"
{{ end }}
- name: RABBITMQ_USER
value: "{{ index .Values "async-rabbitmq" "auth" "username" }}"
- name: RABBITMQ_PASS
valueFrom:
secretKeyRef:
name: {{ index .Values "async-rabbitmq" "auth" "existingPasswordSecret" }}
key: {{ index .Values "async-rabbitmq" "auth" "existingSecretPasswordKey" }}
{{ if .Values.ingress.enabled }}
- name: TAIGA_SITES_DOMAIN
value: "{{ .Values.ingress.host }}"
- name: TAIGA_SITES_SCHEME
value: "https"
- name: SESSION_COOKIE_SECURE
value: "True"
- name: CSRF_COOKIE_SECURE
value: "True"
{{- end }}
{{- if .Values.back.livenessProbe.enabled }}
livenessProbe:
httpGet:
path: /admin/login/
port: {{ .Values.back.service.port }}
initialDelaySeconds: {{ .Values.back.livenessProbe.initialDelaySeconds }}
periodSeconds: {{ .Values.back.livenessProbe.periodSeconds }}
timeoutSeconds: {{ .Values.back.livenessProbe.timeoutSeconds }}
successThreshold: {{ .Values.back.livenessProbe.successThreshold }}
failureThreshold: {{ .Values.back.livenessProbe.failureThreshold }}
{{- end }}
{{- if .Values.back.readinessProbe.enabled }}
readinessProbe:
httpGet:
path: /admin/login/
port: {{ .Values.back.service.port }}
initialDelaySeconds: {{ .Values.back.readinessProbe.initialDelaySeconds }}
periodSeconds: {{ .Values.back.readinessProbe.periodSeconds }}
timeoutSeconds: {{ .Values.back.readinessProbe.timeoutSeconds }}
successThreshold: {{ .Values.back.readinessProbe.successThreshold }}
failureThreshold: {{ .Values.back.readinessProbe.failureThreshold }}
{{- end }}
- name: {{ template "taiga.fullname" . }}-async
image: "{{ .Values.async.image.repository }}:{{ .Values.async.image.tag }}"
imagePullPolicy: {{ .Values.async.image.pullPolicy }}
resources:
{{ toYaml .Values.async.resources | nindent 12 }}
command:
- /taiga-back/docker/async_entrypoint.sh
volumeMounts:
- name: taiga-static
mountPath: /taiga-back/static
- name: taiga-media
mountPath: /taiga-back/media
env:
- name: TAIGA_SECRET_KEY
valueFrom:
secretKeyRef:
name: "{{ .Values.secretKey.existingSecretName }}"
key: "{{ .Values.secretKey.existingSecretKey }}"
- name: ENABLE_TELEMETRY
value: "{{ .Values.enableTelemetry }}"
- name: PUBLIC_REGISTER_ENABLED
value: "{{ .Values.publicRegisterEnabled }}"
- name: POSTGRES_USER
valueFrom:
secretKeyRef:
name: "{{ .Values.postgresql.existingSecretName }}"
key: "{{ .Values.postgresql.usernameKey }}"
- name: POSTGRES_PASSWORD
valueFrom:
secretKeyRef:
name: "{{ .Values.postgresql.existingSecretName }}"
key: "{{ .Values.postgresql.passwordKey }}"
- name: POSTGRES_DB
valueFrom:
secretKeyRef:
name: "{{ .Values.postgresql.existingSecretName }}"
key: "{{ .Values.postgresql.databaseNameKey }}"
- name: POSTGRES_HOST
valueFrom:
secretKeyRef:
name: "{{ .Values.postgresql.existingSecretName }}"
key: "{{ .Values.postgresql.hostKey }}"
{{ if .Values.oidc.enabled }}
- name: OIDC_ENABLED
value: "True"
- name: OIDC_SCOPES
valueFrom:
secretKeyRef:
name: "{{ .Values.oidc.existingSecretName }}"
key: "{{ .Values.oidc.scopesKey }}"
- name: OIDC_SIGN_ALGO
valueFrom:
secretKeyRef:
name: "{{ .Values.oidc.existingSecretName }}"
key: "{{ .Values.oidc.signatureAlgorithmKey }}"
- name: OIDC_CLIENT_ID
valueFrom:
secretKeyRef:
name: "{{ .Values.oidc.existingSecretName }}"
key: "{{ .Values.oidc.clientIdKey }}"
- name: OIDC_CLIENT_SECRET
valueFrom:
secretKeyRef:
name: "{{ .Values.oidc.existingSecretName }}"
key: "{{ .Values.oidc.clientSecretKey }}"
- name: OIDC_BASE_URL
valueFrom:
secretKeyRef:
name: "{{ .Values.oidc.existingSecretName }}"
key: "{{ .Values.oidc.baseUrlKey }}"
- name: OIDC_JWKS_ENDPOINT
valueFrom:
secretKeyRef:
name: "{{ .Values.oidc.existingSecretName }}"
key: "{{ .Values.oidc.jwksEndpointKey }}"
- name: OIDC_AUTHORIZATION_ENDPOINT
valueFrom:
secretKeyRef:
name: "{{ .Values.oidc.existingSecretName }}"
key: "{{ .Values.oidc.authorizationEndpointKey }}"
- name: OIDC_TOKEN_ENDPOINT
valueFrom:
secretKeyRef:
name: "{{ .Values.oidc.existingSecretName }}"
key: "{{ .Values.oidc.tokenEndpointKey }}"
- name: OIDC_USER_ENDPOINT
valueFrom:
secretKeyRef:
name: "{{ .Values.oidc.existingSecretName }}"
key: "{{ .Values.oidc.userEndpointKey }}"
{{ end }}
{{ if .Values.email.enabled }}
- name: EMAIL_BACKEND
value: "django.core.mail.backends.smtp.EmailBackend"
- name: DEFAULT_FROM_EMAIL
value: "{{ .Values.email.from }}"
- name: EMAIL_HOST
value: "{{ .Values.email.host }}"
- name: EMAIL_PORT
value: "{{ .Values.email.port }}"
- name: EMAIL_USE_TLS
value: "{{ .Values.email.tls }}"
- name: EMAIL_USE_SSL
value: "{{ .Values.email.ssl }}"
- name: EMAIL_HOST_USER
value: "{{ .Values.email.user }}"
- name: EMAIL_HOST_PASSWORD
valueFrom:
secretKeyRef:
name: "{{ .Values.email.existingPasswordSecret }}"
key: "{{ .Values.email.existingSecretPasswordKey }}"
{{ end }}
- name: ENABLE_GITHUB_AUTH
value: "false"
- name: ENABLE_GITLAB_AUTH
value: "false"
- name: ENABLE_SLACK
value: "{{ .Values.enableSlack }}"
{{ if .Values.githubImporter.enabled }}
- name: ENABLE_GITHUB_IMPORTER
value: "True"
- name: GITHUB_API_CLIENT_ID
valueFrom:
secretKeyRef:
name: "{{ .Values.githubImporter.existingSecretName }}"
key: "{{ .Values.githubImporter.existingSecretClientIdKey }}"
- name: GITHUB_API_CLIENT_SECRET
valueFrom:
secretKeyRef:
name: "{{ .Values.githubImporter.existingSecretName }}"
key: "{{ .Values.githubImporter.existingSecretClientSecretKey }}"
{{ else }}
- name: ENABLE_GITHUB_IMPORTER
value: "False"
{{ end }}
{{ if .Values.jiraImporter.enabled }}
- name: ENABLE_JIRA_IMPORTER
value: "True"
- name: JIRA_IMPORTER_CONSUMER_KEY
valueFrom:
secretKeyRef:
name: "{{ .Values.jiraImporter.existingSecretName }}"
key: "{{ .Values.jiraImporter.existingSecretConsumerKeyKey }}"
- name: JIRA_IMPORTER_CERT
valueFrom:
secretKeyRef:
name: "{{ .Values.jiraImporter.existingSecretName }}"
key: "{{ .Values.jiraImporter.existingSecretCertKey }}"
- name: JIRA_IMPORTER_PUB_CERT
valueFrom:
secretKeyRef:
name: "{{ .Values.jiraImporter.existingSecretName }}"
key: "{{ .Values.jiraImporter.existingSecretPubCertKey }}"
{{ else }}
- name: ENABLE_JIRA_IMPORTER
value: "False"
{{ end }}
{{ if .Values.trelloImporter.enabled }}
- name: ENABLE_TRELLO_IMPORTER
value: "True"
- name: TRELLO_IMPORTER_API_KEY
valueFrom:
secretKeyRef:
name: "{{ .Values.trelloImporter.existingSecretName }}"
key: "{{ .Values.trelloImporter.existingSecretApiKeyKey }}"
- name: TRELLO_IMPORTER_SECRET_KEY
valueFrom:
secretKeyRef:
name: "{{ .Values.trelloImporter.existingSecretName }}"
key: "{{ .Values.trelloImporter.existingSecretSecretKeyKey }}"
{{ else }}
- name: ENABLE_JIRA_IMPORTER
value: "False"
{{ end }}
- name: RABBITMQ_USER
value: "{{ index .Values "async-rabbitmq" "auth" "username" }}"
- name: RABBITMQ_PASS
valueFrom:
secretKeyRef:
name: {{ index .Values "async-rabbitmq" "auth" "existingPasswordSecret" }}
key: {{ index .Values "async-rabbitmq" "auth" "existingSecretPasswordKey" }}
{{ if .Values.ingress.enabled }}
- name: TAIGA_SITES_DOMAIN
value: "{{ .Values.ingress.host }}"
- name: TAIGA_SITES_SCHEME
value: "https"
- name: SESSION_COOKIE_SECURE
value: "True"
- name: CSRF_COOKIE_SECURE
value: "True"
{{- end }}
{{- if .Values.back.livenessProbe.enabled }}
livenessProbe:
httpGet:
path: /admin/login/
port: {{ .Values.back.service.port }}
initialDelaySeconds: {{ .Values.back.livenessProbe.initialDelaySeconds }}
periodSeconds: {{ .Values.back.livenessProbe.periodSeconds }}
timeoutSeconds: {{ .Values.back.livenessProbe.timeoutSeconds }}
successThreshold: {{ .Values.back.livenessProbe.successThreshold }}
failureThreshold: {{ .Values.back.livenessProbe.failureThreshold }}
{{- end }}
{{- if .Values.back.readinessProbe.enabled }}
readinessProbe:
httpGet:
path: /admin/login/
port: {{ .Values.back.service.port }}
initialDelaySeconds: {{ .Values.back.readinessProbe.initialDelaySeconds }}
periodSeconds: {{ .Values.back.readinessProbe.periodSeconds }}
timeoutSeconds: {{ .Values.back.readinessProbe.timeoutSeconds }}
successThreshold: {{ .Values.back.readinessProbe.successThreshold }}
failureThreshold: {{ .Values.back.readinessProbe.failureThreshold }}
{{- end }}
volumes:
- name: taiga-static
{{- if .Values.persistence.static.enabled }}
persistentVolumeClaim:
claimName: {{ include "taiga.staticVolumeName" . }}
{{- else }}
emptyDir: {}
{{- end }}
- name: taiga-media
{{- if .Values.persistence.media.enabled }}
persistentVolumeClaim:
claimName: {{ include "taiga.mediaVolumeName" . }}
{{- else }}
emptyDir: {}
{{- end }}

View File

@@ -0,0 +1,101 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ template "taiga.fullname" . }}-events
namespace: {{ .Release.Namespace }}
annotations:
{{- with .Values.global.annotations }}
{{- toYaml . | nindent 4 }}
{{- end }}
labels:
{{- include "taiga.events.labels" . | nindent 4 }}
{{- with .Values.global.labels }}
{{ toYaml . | nindent 4 }}
{{- end }}
spec:
revisionHistoryLimit: 3
replicas: {{ .Values.events.replicas }}
strategy:
type: Recreate
selector:
matchLabels:
{{- include "taiga.events.matchLabels" . | nindent 6 }}
template:
metadata:
labels:
{{- include "taiga.events.labels" . | nindent 8 }}
app.kubernetes.io/component: {{ template "taiga.name" . }}-events
annotations:
{{- with .Values.events.podAnnotations }}
{{ toYaml . | nindent 8 }}
{{- end }}
spec:
affinity:
{{- with .Values.events.affinity }}
{{ toYaml . | nindent 8 }}
{{- end }}
nodeSelector:
{{- with .Values.events.nodeSelector }}
{{ toYaml . | nindent 8 }}
{{- end }}
tolerations:
{{- with .Values.events.tolerations }}
{{ toYaml . | nindent 8 }}
{{- end }}
serviceAccountName: {{ template "taiga.serviceAccountName" . }}
securityContext:
{{- with .Values.events.securityContext }}
{{ toYaml . | nindent 8 }}
{{- end }}
containers:
- name: {{ template "taiga.fullname" . }}-events
image: "{{ .Values.events.image.repository }}:{{ .Values.events.image.tag }}"
imagePullPolicy: {{ .Values.events.image.pullPolicy }}
resources:
{{ toYaml .Values.events.resources | nindent 12 }}
ports:
- name: taiga-events
containerPort: {{ .Values.events.service.http.port }}
protocol: TCP
- name: taiga-app
containerPort: {{ .Values.events.service.app.port }}
protocol: TCP
env:
- name: TAIGA_SECRET_KEY
valueFrom:
secretKeyRef:
name: "{{ .Values.secretKey.existingSecretName }}"
key: "{{ .Values.secretKey.existingSecretKey }}"
- name: RABBITMQ_USER
value: "{{ index .Values "events-rabbitmq" "auth" "username" }}"
- name: RABBITMQ_PASS
valueFrom:
secretKeyRef:
name: {{ index .Values "events-rabbitmq" "auth" "existingPasswordSecret" }}
key: {{ index .Values "events-rabbitmq" "auth" "existingSecretPasswordKey" }}
- name: APP_PORT
value: "{{ .Values.events.service.app.port }}"
{{- if .Values.events.livenessProbe.enabled }}
livenessProbe:
httpGet:
path: /healthz
port: {{ .Values.events.service.app.port }}
initialDelaySeconds: {{ .Values.events.livenessProbe.initialDelaySeconds }}
periodSeconds: {{ .Values.events.livenessProbe.periodSeconds }}
timeoutSeconds: {{ .Values.events.livenessProbe.timeoutSeconds }}
successThreshold: {{ .Values.events.livenessProbe.successThreshold }}
failureThreshold: {{ .Values.events.livenessProbe.failureThreshold }}
{{- end }}
{{- if .Values.events.readinessProbe.enabled }}
readinessProbe:
httpGet:
path: /healthz
port: {{ .Values.events.service.app.port }}
initialDelaySeconds: {{ .Values.events.readinessProbe.initialDelaySeconds }}
periodSeconds: {{ .Values.events.readinessProbe.periodSeconds }}
timeoutSeconds: {{ .Values.events.readinessProbe.timeoutSeconds }}
successThreshold: {{ .Values.events.readinessProbe.successThreshold }}
failureThreshold: {{ .Values.events.readinessProbe.failureThreshold }}
{{- end }}

View File

@@ -0,0 +1,108 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ template "taiga.fullname" . }}-front
namespace: {{ .Release.Namespace }}
annotations:
{{- with .Values.global.annotations }}
{{- toYaml . | nindent 4 }}
{{- end }}
labels:
{{- include "taiga.front.labels" . | nindent 4 }}
{{- with .Values.global.labels }}
{{ toYaml . | nindent 4 }}
{{- end }}
spec:
revisionHistoryLimit: 3
replicas: {{ .Values.front.replicas }}
strategy:
type: Recreate
selector:
matchLabels:
{{- include "taiga.front.matchLabels" . | nindent 6 }}
template:
metadata:
labels:
{{- include "taiga.front.labels" . | nindent 8 }}
app.kubernetes.io/component: {{ template "taiga.name" . }}-front
annotations:
{{- with .Values.front.podAnnotations }}
{{ toYaml . | nindent 8 }}
{{- end }}
spec:
affinity:
{{- with .Values.front.affinity }}
{{ toYaml . | nindent 8 }}
{{- end }}
nodeSelector:
{{- with .Values.front.nodeSelector }}
{{ toYaml . | nindent 8 }}
{{- end }}
tolerations:
{{- with .Values.front.tolerations }}
{{ toYaml . | nindent 8 }}
{{- end }}
serviceAccountName: {{ template "taiga.serviceAccountName" . }}
securityContext:
{{- with .Values.front.securityContext }}
{{ toYaml . | nindent 8 }}
{{- end }}
containers:
- name: {{ template "taiga.fullname" . }}-front
image: "{{ .Values.front.image.repository }}:{{ .Values.front.image.tag }}"
imagePullPolicy: {{ .Values.front.image.pullPolicy }}
resources:
{{ toYaml .Values.front.resources | nindent 12 }}
ports:
- name: taiga-front
containerPort: {{ .Values.front.service.port }}
protocol: TCP
env:
{{ if .Values.ingress.enabled }}
- name: TAIGA_URL
value: "https://{{ .Values.ingress.host }}"
{{ else }}
- name: TAIGA_URL
value: "http://localhost:{{ .Values.front.service.port }}"
{{ end }}
- name: PUBLIC_REGISTER_ENABLED
value: "{{ .Values.publicRegisterEnabled }}"
- name: ENABLE_GITHUB_AUTH
value: "false"
- name: ENABLE_GITLAB_AUTH
value: "false"
- name: ENABLE_OIDC
value: "{{ .Values.oidc.enabled }}"
- name: ENABLE_SLACK
value: "{{ .Values.enableSlack }}"
- name: ENABLE_GITHUB_IMPORTER
value: "{{ .Values.githubImporter.enabled }}"
- name: ENABLE_JIRA_IMPORTER
value: "{{ .Values.jiraImporter.enabled }}"
- name: ENABLE_TRELLO_IMPORTER
value: "{{ .Values.trelloImporter.enabled }}"
{{- if .Values.front.livenessProbe.enabled }}
livenessProbe:
httpGet:
path: /admin/login/
port: {{ .Values.front.service.port }}
initialDelaySeconds: {{ .Values.front.livenessProbe.initialDelaySeconds }}
periodSeconds: {{ .Values.front.livenessProbe.periodSeconds }}
timeoutSeconds: {{ .Values.front.livenessProbe.timeoutSeconds }}
successThreshold: {{ .Values.front.livenessProbe.successThreshold }}
failureThreshold: {{ .Values.front.livenessProbe.failureThreshold }}
{{- end }}
{{- if .Values.front.readinessProbe.enabled }}
readinessProbe:
httpGet:
path: /admin/login/
port: {{ .Values.front.service.port }}
initialDelaySeconds: {{ .Values.front.readinessProbe.initialDelaySeconds }}
periodSeconds: {{ .Values.front.readinessProbe.periodSeconds }}
timeoutSeconds: {{ .Values.front.readinessProbe.timeoutSeconds }}
successThreshold: {{ .Values.front.readinessProbe.successThreshold }}
failureThreshold: {{ .Values.front.readinessProbe.failureThreshold }}
{{- end }}

View File

@@ -0,0 +1,91 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ template "taiga.fullname" . }}-protected
namespace: {{ .Release.Namespace }}
annotations:
{{- with .Values.global.annotations }}
{{- toYaml . | nindent 4 }}
{{- end }}
labels:
{{- include "taiga.protected.labels" . | nindent 4 }}
{{- with .Values.global.labels }}
{{ toYaml . | nindent 4 }}
{{- end }}
spec:
revisionHistoryLimit: 3
replicas: {{ .Values.protected.replicas }}
strategy:
type: Recreate
selector:
matchLabels:
{{- include "taiga.protected.matchLabels" . | nindent 6 }}
template:
metadata:
labels:
{{- include "taiga.protected.labels" . | nindent 8 }}
app.kubernetes.io/component: {{ template "taiga.name" . }}-protected
annotations:
{{- with .Values.protected.podAnnotations }}
{{ toYaml . | nindent 8 }}
{{- end }}
spec:
affinity:
{{- with .Values.protected.affinity }}
{{ toYaml . | nindent 8 }}
{{- end }}
nodeSelector:
{{- with .Values.protected.nodeSelector }}
{{ toYaml . | nindent 8 }}
{{- end }}
tolerations:
{{- with .Values.protected.tolerations }}
{{ toYaml . | nindent 8 }}
{{- end }}
serviceAccountName: {{ template "taiga.serviceAccountName" . }}
securityContext:
{{- with .Values.protected.securityContext }}
{{ toYaml . | nindent 8 }}
{{- end }}
containers:
- name: {{ template "taiga.fullname" . }}-protected
image: "{{ .Values.protected.image.repository }}:{{ .Values.protected.image.tag }}"
imagePullPolicy: {{ .Values.protected.image.pullPolicy }}
resources:
{{ toYaml .Values.protected.resources | nindent 12 }}
ports:
- name: taiga-protected
containerPort: {{ .Values.protected.service.port }}
protocol: TCP
env:
- name: SECRET_KEY
valueFrom:
secretKeyRef:
name: "{{ .Values.secretKey.existingSecretName }}"
key: "{{ .Values.secretKey.existingSecretKey }}"
- name: MAX_AGE
value: "{{ .Values.maxAge }}"
{{- if .Values.protected.livenessProbe.enabled }}
livenessProbe:
httpGet:
path: /admin/login/
port: {{ .Values.protected.service.port }}
initialDelaySeconds: {{ .Values.protected.livenessProbe.initialDelaySeconds }}
periodSeconds: {{ .Values.protected.livenessProbe.periodSeconds }}
timeoutSeconds: {{ .Values.protected.livenessProbe.timeoutSeconds }}
successThreshold: {{ .Values.protected.livenessProbe.successThreshold }}
failureThreshold: {{ .Values.protected.livenessProbe.failureThreshold }}
{{- end }}
{{- if .Values.protected.readinessProbe.enabled }}
readinessProbe:
httpGet:
path: /admin/login/
port: {{ .Values.protected.service.port }}
initialDelaySeconds: {{ .Values.protected.readinessProbe.initialDelaySeconds }}
periodSeconds: {{ .Values.protected.readinessProbe.periodSeconds }}
timeoutSeconds: {{ .Values.protected.readinessProbe.timeoutSeconds }}
successThreshold: {{ .Values.protected.readinessProbe.successThreshold }}
failureThreshold: {{ .Values.protected.readinessProbe.failureThreshold }}
{{- end }}

View File

@@ -0,0 +1,74 @@
{{- if .Values.ingress.enabled }}
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: {{ template "taiga.fullname" . }}
namespace: {{ .Release.Namespace }}
annotations:
{{- with .Values.global.annotations }}
{{- toYaml . | nindent 4 }}
{{- end }}
{{- toYaml .Values.ingress.annotations | nindent 4 }}
labels:
{{- include "taiga.labels" . | nindent 4 }}
{{- with .Values.global.labels }}
{{ toYaml . | nindent 4 }}
{{- end }}
{{- with .Values.ingress.labels }}
{{ toYaml . | nindent 4 }}
{{- end }}
spec:
ingressClassName: {{ .Values.ingress.className }}
tls:
- hosts:
- {{ .Values.ingress.host }}
secretName: {{ template "taiga.fullname" . }}-secret-tls
rules:
- host: {{ .Values.ingress.host }}
http:
paths:
- path: /
backend:
service:
name: "{{ template "taiga.fullname" . }}-front"
port:
name: taiga-front
pathType: ImplementationSpecific
- path: /api
backend:
service:
name: "{{ template "taiga.fullname" . }}-back"
port:
name: taiga-back
pathType: ImplementationSpecific
- path: /admin
backend:
service:
name: "{{ template "taiga.fullname" . }}-back"
port:
name: taiga-back
pathType: ImplementationSpecific
{{ if .Values.oidc.enabled }}
- path: /oidc
backend:
service:
name: "{{ template "taiga.fullname" . }}-back"
port:
name: taiga-back
pathType: ImplementationSpecific
{{- end }}
- path: /events
backend:
service:
name: "{{ template "taiga.fullname" . }}-events"
port:
name: taiga-events
pathType: ImplementationSpecific
- path: /media
backend:
service:
name: "{{ template "taiga.fullname" . }}-protected"
port:
name: taiga-protected
pathType: ImplementationSpecific
{{- end }}

View File

@@ -0,0 +1,66 @@
{{- if .Values.createInitialUser }}
apiVersion: batch/v1
kind: Job
metadata:
name: {{ template "taiga.fullname" . }}-create-initial-user
namespace: {{ .Release.Namespace }}
annotations:
{{- with .Values.global.annotations }}
{{- toYaml . | nindent 4 }}
{{- end }}
labels:
{{- include "taiga.labels" . | nindent 4 }}
{{- with .Values.global.labels }}
{{ toYaml . | nindent 4 }}
{{- end }}
spec:
backoffLimit: 4
template:
spec:
{{- if .Values.back.nodeSelector }}
nodeSelector:
{{ toYaml .Values.back.nodeSelector | nindent 8 }}
{{- end }}
restartPolicy: Never
containers:
- name: {{ template "taiga.fullname" . }}-create-initial-user
image: "{{ .Values.back.image.repository }}:{{ .Values.back.image.tag }}"
imagePullPolicy: {{ .Values.back.image.pullPolicy }}
command:
- sh
- /scripts/createinitialuser.sh
volumeMounts:
- name: create-initial-user
mountPath: /scripts
env:
- name: TAIGA_SECRET_KEY
valueFrom:
secretKeyRef:
name: "{{ .Values.secretKey.existingSecretName }}"
key: "{{ .Values.secretKey.existingSecretKey }}"
- name: POSTGRES_USERNAME
valueFrom:
secretKeyRef:
name: "{{ .Values.postgresql.existingSecretName }}"
key: "{{ .Values.postgresql.usernameKey }}"
- name: POSTGRES_PASSWORD
valueFrom:
secretKeyRef:
name: "{{ .Values.postgresql.existingSecretName }}"
key: "{{ .Values.postgresql.passwordKey }}"
- name: POSTGRES_DATABASE_NAME
valueFrom:
secretKeyRef:
name: "{{ .Values.postgresql.existingSecretName }}"
key: "{{ .Values.postgresql.databaseNameKey }}"
- name: POSTGRES_DATABASE_HOST
valueFrom:
secretKeyRef:
name: "{{ .Values.postgresql.existingSecretName }}"
key: "{{ .Values.postgresql.hostKey }}"
volumes:
- name: create-initial-user
configMap:
name: {{ template "taiga.fullname" . }}-create-initial-user
defaultMode: 0744
{{- end }}

View File

@@ -0,0 +1,54 @@
{{- if and .Values.persistence.static.enabled (not .Values.persistence.static.existingClaim) }}
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: {{ template "taiga.staticVolumeName" . }}
namespace: {{ .Release.Namespace }}
annotations:
{{- with .Values.global.annotations }}
{{- toYaml . | nindent 4 }}
{{- end }}
{{- if .Values.persistence.static.retain }}
helm.sh/resource-policy: keep
{{- end }}
labels:
{{- include "taiga.labels" . | nindent 4 }}
{{- with .Values.global.labels }}
{{ toYaml . | nindent 4 }}
{{- end }}
spec:
storageClassName: {{ .Values.persistence.static.storageClass }}
accessModes:
- {{ .Values.persistence.static.accessMode }}
resources:
requests:
storage: {{ .Values.persistence.static.size }}
{{- end }}
---
{{- if and .Values.persistence.media.enabled (not .Values.persistence.media.existingClaim) }}
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: {{ template "taiga.mediaVolumeName" . }}
namespace: {{ .Release.Namespace }}
annotations:
{{- with .Values.global.annotations }}
{{- toYaml . | nindent 4 }}
{{- end }}
{{- if .Values.persistence.media.retain }}
"helm.sh/resource-policy": keep
{{- end }}
labels:
{{- include "taiga.labels" . | nindent 4 }}
{{- with .Values.global.labels }}
{{ toYaml . | nindent 4 }}
{{- end }}
spec:
storageClassName: {{ .Values.persistence.media.storageClass }}
accessModes:
- {{ .Values.persistence.media.accessMode }}
resources:
requests:
storage: {{ .Values.persistence.media.size }}
{{- end }}

View File

@@ -0,0 +1,20 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ template "taiga.serviceAccountName" . }}
namespace: {{ .Release.Namespace }}
annotations:
{{- with .Values.global.annotations }}
{{- toYaml . | nindent 4 }}
{{- end }}
{{- with .Values.serviceAccount.annotations }}
{{- toYaml . | nindent 4 }}
{{- end }}
labels:
{{- include "taiga.labels" . | nindent 4 }}
{{- with .Values.global.labels }}
{{ toYaml . | nindent 4 }}
{{- end }}
{{- with .Values.serviceAccount.labels }}
{{ toYaml . | nindent 4 }}
{{- end }}

View File

@@ -0,0 +1,138 @@
apiVersion: v1
kind: Service
metadata:
name: {{ template "taiga.fullname" . }}-back
namespace: {{ .Release.Namespace }}
annotations:
{{- with .Values.global.annotations }}
{{- toYaml . | nindent 4 }}
{{- end }}
{{- with .Values.back.service.annotations }}
{{- toYaml . | nindent 4 }}
{{- end }}
labels:
{{- include "taiga.back.labels" . | nindent 4 }}
{{- with .Values.global.labels }}
{{ toYaml . | nindent 4 }}
{{- end }}
{{- with .Values.back.service.labels }}
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
type: {{ .Values.back.service.type }}
ports:
- port: {{ .Values.back.service.port }}
targetPort: taiga-back
protocol: TCP
name: taiga-back
selector:
{{- include "taiga.back.matchLabels" . | nindent 4 }}
{{- with .Values.back.service.extraSelectorLabels }}
{{- toYaml . | nindent 4 }}
{{- end }}
---
apiVersion: v1
kind: Service
metadata:
name: {{ template "taiga.fullname" . }}-events
namespace: {{ .Release.Namespace }}
annotations:
{{- with .Values.global.annotations }}
{{- toYaml . | nindent 4 }}
{{- end }}
{{- with .Values.events.service.annotations }}
{{- toYaml . | nindent 4 }}
{{- end }}
labels:
{{- include "taiga.events.labels" . | nindent 4 }}
{{- with .Values.global.labels }}
{{ toYaml . | nindent 4 }}
{{- end }}
{{- with .Values.events.service.labels }}
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
type: {{ .Values.events.service.type }}
ports:
- port: {{ .Values.events.service.http.port }}
targetPort: taiga-events
protocol: TCP
name: taiga-events
- port: {{ .Values.events.service.app.port }}
targetPort: taiga-app
protocol: TCP
name: taiga-app
selector:
{{- include "taiga.events.matchLabels" . | nindent 4 }}
{{- with .Values.events.service.extraSelectorLabels }}
{{- toYaml . | nindent 4 }}
{{- end }}
---
apiVersion: v1
kind: Service
metadata:
name: {{ template "taiga.fullname" . }}-front
namespace: {{ .Release.Namespace }}
annotations:
{{- with .Values.global.annotations }}
{{- toYaml . | nindent 4 }}
{{- end }}
{{- with .Values.front.service.annotations }}
{{- toYaml . | nindent 4 }}
{{- end }}
labels:
{{- include "taiga.front.labels" . | nindent 4 }}
{{- with .Values.global.labels }}
{{ toYaml . | nindent 4 }}
{{- end }}
{{- with .Values.front.service.labels }}
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
type: {{ .Values.front.service.type }}
ports:
- port: {{ .Values.front.service.port }}
targetPort: taiga-front
protocol: TCP
name: taiga-front
selector:
{{- include "taiga.front.matchLabels" . | nindent 4 }}
{{- with .Values.front.service.extraSelectorLabels }}
{{- toYaml . | nindent 4 }}
{{- end }}
---
apiVersion: v1
kind: Service
metadata:
name: {{ template "taiga.fullname" . }}-protected
namespace: {{ .Release.Namespace }}
annotations:
{{- with .Values.global.annotations }}
{{- toYaml . | nindent 4 }}
{{- end }}
{{- with .Values.protected.service.annotations }}
{{- toYaml . | nindent 4 }}
{{- end }}
labels:
{{- include "taiga.protected.labels" . | nindent 4 }}
{{- with .Values.global.labels }}
{{ toYaml . | nindent 4 }}
{{- end }}
{{- with .Values.protected.service.labels }}
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
type: {{ .Values.protected.service.type }}
ports:
- port: {{ .Values.protected.service.port }}
targetPort: taiga-protected
protocol: TCP
name: taiga-protected
selector:
{{- include "taiga.protected.matchLabels" . | nindent 4 }}
{{- with .Values.protected.service.extraSelectorLabels }}
{{- toYaml . | nindent 4 }}
{{- end }}

817
charts/taiga/values.yaml Normal file
View File

@@ -0,0 +1,817 @@
## Global
##
global:
# -- Set an override for the prefix of the fullname
nameOverride:
# -- Set the entire name definition
fullnameOverride:
# -- Set additional global labels. Helm templates can be used.
labels: {}
# -- Set additional global annotations. Helm templates can be used.
annotations: {}
## Service Account
##
serviceAccount:
# -- Specifies whether a service account should be created
create: false
# -- Annotations to add to the service account
annotations: {}
# -- Labels to add to the service account
labels: {}
# -- The name of the service account to use.
# If not set and create is true, a name is generated using the fullname template
name: ""
## Secret key
## Specificy the secret name and the key containg a strong secret key
##
secretKey:
existingSecretName: ""
existingSecretKey: ""
## Create initial user with credentials admin/123123
## ref: https://taigaio.github.io/taiga-doc/dist/setup-production.html
##
# TODO: set to false by default or create with a random password which is stored in a secret
# or allow to pass in the data for username and secret
createInitialUser: true
## Max age
##
maxAge: 360
## Create initial templates
## ref: https://taigaio.github.io/taiga-doc/dist/setup-production.html
##
# TODO: This values seems to be unused
createInitialTemplates: false
## Telemetry settings
##
enableTelemetry: true
## Public registration
##
publicRegisterEnabled: true
## Enable debug
## ref: https://taigaio.github.io/taiga-doc/dist/setup-production.html
debug: false
## Postgresql
## Configuration is expected to be stored in a secret, reference the secret name and each key for the value
##
postgresql:
existingSecretName: ""
usernameKey: ""
passwordKey: ""
databaseNameKey: ""
hostKey: ""
portKey: ""
## OIDC authentication
## Configuration is expected to be stored in a secret, reference the secret name and each key for the value
##
oidc:
enabled: false
existingSecretName: ""
scopesKey: "" # "openid profile email"
signatureAlgorithmKey: "" # "RS256"
clientIdKey: "" # <generate from auth provider>
clientSecretKey: "" # <generate from auth provider>
baseUrlKey: "" # "https://id.fedoraproject.org/openidc"
jwksEndpointKey: "" # "https://id.fedoraproject.org/openidc/Jwks"
authorizationEndpointKey: "" # "https://id.fedoraproject.org/openidc/Authorization"
tokenEndpointKey: "" # "https://id.fedoraproject.org/openidc/Token"
userEndpointKey: "" # "https://id.fedoraproject.org/openidc/UserInfo"
## SMTP mail delivery configuration
## ref: https://taigaio.github.io/taiga-doc/dist/setup-production.html
##
email:
enabled: false
from: no-reply@example.com
host: localhost
port: 587
tls: false
ssl: false
user: ""
## Specificy an existing secret containg the password for the smtp user
##
existingPasswordSecret: ""
existingSecretPasswordKey: ""
## Slack
##
enableSlack: false
## Importers
##
# Github importer
githubImporter:
enabled: false
existingSecretName: ""
existingSecretClientIdKey: ""
existingSecretClientSecretKey: ""
# Jira importer
jiraImporter:
enabled: false
existingSecretName: ""
existingSecretConsumerKeyKey: ""
existingSecretCertKey: ""
existingSecretPubCertKey: ""
# Trello importer
trelloImporter:
enabled: false
existingSecretName: ""
existingSecretApiKeyKey: ""
existingSecretSecretKeyKey: ""
## taiga-back
##
back:
## Taiga image version
## ref: https://hub.docker.com/r/taigaio/taiga5/tags
##
image:
repository: taigaio/taiga-back
tag: "6.7.3"
## Specify a imagePullPolicy
## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images
##
pullPolicy: IfNotPresent
## Define the number of pods the deployment will create
## Do not change unless your persistent volume allows more than one writer, ie NFS
## ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/
##
replicas: 1
## Pod annotations
## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
##
podAnnotations: {}
## Affinity for pod assignment
## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
##
affinity: {}
## Node labels for pod assignment. Evaluated as a template.
## Ref: https://kubernetes.io/docs/user-guide/node-selection/
##
nodeSelector: {}
## Tolerations for pod assignment
## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
##
tolerations: []
## Pod Security Context
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
##
securityContext: {}
## taiga containers' resource requests and limits
## ref: http://kubernetes.io/docs/user-guide/compute-resources/
##
resources:
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
limits: {}
# cpu: 2
# memory: 1Gi
requests: {}
# cpu: 1
# memory: 1Gi
## Configure extra options for liveness and readiness probes
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes)
##
livenessProbe:
enabled: false
initialDelaySeconds: 20
periodSeconds: 10
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 3
readinessProbe:
enabled: false
initialDelaySeconds: 5
periodSeconds: 10
timeoutSeconds: 1
successThreshold: 1
failureThreshold: 3
## Environment variables, to pass to the entry point
##
# extraVars:
# - name: NAMI_DEBUG
# value: --log-level trace
## Service
##
service:
# -- Set the service type
type: ClusterIP
# -- Provide additional annotations which may be required.
annotations: {}
# -- Provide additional labels which may be required.
labels: {}
# -- Allow adding additional match labels
extraSelectorLabels: {}
# -- HTTP port number
port: 8000
## Async
##
async:
## Taiga image version
## ref: https://hub.docker.com/r/taigaio/taiga5/tags
##
image:
repository: taigaio/taiga-back
tag: "6.7.3"
## Specify a imagePullPolicy
## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images
##
pullPolicy: IfNotPresent
## Define the number of pods the deployment will create
## Do not change unless your persistent volume allows more than one writer, ie NFS
## ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/
##
replicas: 1
## Pod Security Context
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
##
securityContext: {}
## Pod annotations
## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
##
podAnnotations: {}
## Affinity for pod assignment
## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
##
affinity: {}
## Node labels for pod assignment. Evaluated as a template.
## Ref: https://kubernetes.io/docs/user-guide/node-selection/
##
nodeSelector: {}
## Tolerations for pod assignment
## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
##
tolerations: []
## taiga containers' resource requests and limits
## ref: http://kubernetes.io/docs/user-guide/compute-resources/
##
resources:
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
limits: {}
# cpu: 2
# memory: 1Gi
requests: {}
# cpu: 1
# memory: 1Gi
## Configure extra options for liveness and readiness probes
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes)
##
livenessProbe:
enabled: false
initialDelaySeconds: 20
periodSeconds: 10
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 3
readinessProbe:
enabled: false
initialDelaySeconds: 5
periodSeconds: 10
timeoutSeconds: 1
successThreshold: 1
failureThreshold: 3
## Environment variables, to pass to the entry point
##
# extraVars:
# - name: NAMI_DEBUG
# value: --log-level trace
## Service
##
service:
# -- Set the service type
type: ClusterIP
# -- Provide additional annotations which may be required.
annotations: {}
# -- Provide additional labels which may be required.
labels: {}
# -- Allow adding additional match labels
extraSelectorLabels: {}
# -- HTTP port number
port: 8000
## Async Rabbitmq
## https://artifacthub.io/packages/helm/bitnami/rabbitmq?modal=values-schema
##
async-rabbitmq:
auth:
## @param auth.username RabbitMQ application username
## ref: https://github.com/bitnami/containers/tree/main/bitnami/rabbitmq#environment-variables
##
username: taiga
## @param auth.existingPasswordSecret Existing secret with RabbitMQ credentials (existing secret must contain a value for `rabbitmq-password` key or override with setting auth.existingSecretPasswordKey)
## e.g:
## existingPasswordSecret: name-of-existing-secret
##
existingPasswordSecret: ""
existingSecretPasswordKey: ""
## @param auth.existingErlangSecret Existing secret with RabbitMQ Erlang cookie (must contain a value for `rabbitmq-erlang-cookie` key or override with auth.existingSecretErlangKey)
## e.g:
## existingErlangSecret: name-of-existing-secret
##
existingErlangSecret: ""
## @param auth.existingSecretErlangKey [default: rabbitmq-erlang-cookie] Erlang cookie key to be retrieved from existing secret
## NOTE: ignored unless `auth.existingErlangSecret` parameter is set
##
existingSecretErlangKey: ""
## @param configurationExistingSecret Existing secret with the configuration to use as rabbitmq.conf.
## Must contain the key "rabbitmq.conf"
## Takes precedence over `configuration`, so do not use both simultaneously
## With providing an existingSecret, extraConfiguration and extraConfigurationExistingSecret do not take any effect
##
configurationExistingSecret: ""
## @param extraConfiguration [string] Configuration file content: extra configuration to be appended to RabbitMQ configuration
## Use this instead of `configuration` to add more configuration
## Do not use simultaneously with `extraConfigurationExistingSecret`
##
extraConfiguration: |-
default_vhost = taiga
default_permissions.configure = .*
default_permissions.read = .*
default_permissions.write = .*
## Events
##
events:
## Taiga image version
## ref: https://hub.docker.com/r/taigaio/taiga5/tags
##
image:
repository: taigaio/taiga-events
tag: "6.7.0"
## Specify a imagePullPolicy
## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images
##
pullPolicy: IfNotPresent
## Pod Security Context
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
##
securityContext: {}
## Define the number of pods the deployment will create
## Do not change unless your persistent volume allows more than one writer, ie NFS
## ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/
##
replicas: 1
## Pod annotations
## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
##
podAnnotations: {}
## Affinity for pod assignment
## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
##
affinity: {}
## Node labels for pod assignment. Evaluated as a template.
## Ref: https://kubernetes.io/docs/user-guide/node-selection/
##
nodeSelector: {}
## Tolerations for pod assignment
## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
##
tolerations: []
## taiga containers' resource requests and limits
## ref: http://kubernetes.io/docs/user-guide/compute-resources/
##
resources:
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
limits: {}
# cpu: 2
# memory: 1Gi
requests: {}
# cpu: 1
# memory: 1Gi
## Configure extra options for liveness and readiness probes
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes)
##
livenessProbe:
enabled: false
initialDelaySeconds: 20
periodSeconds: 10
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 3
readinessProbe:
enabled: false
initialDelaySeconds: 5
periodSeconds: 10
timeoutSeconds: 1
successThreshold: 1
failureThreshold: 3
## Environment variables, to pass to the entry point
##
# extraVars:
# - name: NAMI_DEBUG
# value: --log-level trace
## Service
##
service:
# -- Set the service type
type: ClusterIP
# -- Provide additional annotations which may be required.
annotations: {}
# -- Provide additional labels which may be required.
labels: {}
# -- Allow adding additional match labels
extraSelectorLabels: {}
http:
# -- HTTP port number
port: 8888
app:
# -- HTTP port number
port: 3023
## Events Rabbitmq
## https://artifacthub.io/packages/helm/bitnami/rabbitmq?modal=values-schema
##
events-rabbitmq:
auth:
## @param auth.username RabbitMQ application username
## ref: https://github.com/bitnami/containers/tree/main/bitnami/rabbitmq#environment-variables
##
username: taiga
## @param auth.existingPasswordSecret Existing secret with RabbitMQ credentials (existing secret must contain a value for `rabbitmq-password` key or override with setting auth.existingSecretPasswordKey)
## e.g:
## existingPasswordSecret: name-of-existing-secret
##
existingPasswordSecret: ""
existingSecretPasswordKey: ""
## @param auth.existingErlangSecret Existing secret with RabbitMQ Erlang cookie (must contain a value for `rabbitmq-erlang-cookie` key or override with auth.existingSecretErlangKey)
## e.g:
## existingErlangSecret: name-of-existing-secret
##
existingErlangSecret: ""
## @param auth.existingSecretErlangKey [default: rabbitmq-erlang-cookie] Erlang cookie key to be retrieved from existing secret
## NOTE: ignored unless `auth.existingErlangSecret` parameter is set
##
existingSecretErlangKey: ""
## @param configurationExistingSecret Existing secret with the configuration to use as rabbitmq.conf.
## Must contain the key "rabbitmq.conf"
## Takes precedence over `configuration`, so do not use both simultaneously
## With providing an existingSecret, extraConfiguration and extraConfigurationExistingSecret do not take any effect
##
configurationExistingSecret: ""
## @param extraConfiguration [string] Configuration file content: extra configuration to be appended to RabbitMQ configuration
## Use this instead of `configuration` to add more configuration
## Do not use simultaneously with `extraConfigurationExistingSecret`
##
extraConfiguration: |-
default_vhost = taiga
default_permissions.configure = .*
default_permissions.read = .*
default_permissions.write = .*
## Protected
##
protected:
## Taiga image version
## ref: https://hub.docker.com/r/taigaio/taiga5/tags
##
image:
repository: taigaio/taiga-protected
tag: "6.7.0"
## Specify a imagePullPolicy
## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images
##
pullPolicy: IfNotPresent
## Pod Security Context
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
##
securityContext: {}
## Define the number of pods the deployment will create
## Do not change unless your persistent volume allows more than one writer, ie NFS
## ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/
##
replicas: 1
## Pod annotations
## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
##
podAnnotations: {}
## Affinity for pod assignment
## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
##
affinity: {}
## Node labels for pod assignment. Evaluated as a template.
## Ref: https://kubernetes.io/docs/user-guide/node-selection/
##
nodeSelector: {}
## Tolerations for pod assignment
## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
##
tolerations: []
## taiga containers' resource requests and limits
## ref: http://kubernetes.io/docs/user-guide/compute-resources/
##
resources:
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
limits: {}
# cpu: 2
# memory: 1Gi
requests: {}
# cpu: 1
# memory: 1Gi
## Configure extra options for liveness and readiness probes
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes)
##
livenessProbe:
enabled: false
initialDelaySeconds: 20
periodSeconds: 10
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 3
readinessProbe:
enabled: false
initialDelaySeconds: 5
periodSeconds: 10
timeoutSeconds: 1
successThreshold: 1
failureThreshold: 3
## Environment variables, to pass to the entry point
##
# extraVars:
# - name: NAMI_DEBUG
# value: --log-level trace
## Service
##
service:
# -- Set the service type
type: ClusterIP
# -- Provide additional annotations which may be required.
annotations: {}
# -- Provide additional labels which may be required.
labels: {}
# -- Allow adding additional match labels
extraSelectorLabels: {}
# -- HTTP port number
port: 8003
## Front
##
front:
## Taiga image version
## ref: https://hub.docker.com/r/taigaio/taiga5/tags
##
image:
repository: taigaio/taiga-front
tag: "6.7.7"
## Specify a imagePullPolicy
## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images
##
pullPolicy: IfNotPresent
## Define the number of pods the deployment will create
## Do not change unless your persistent volume allows more than one writer, ie NFS
## ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/
##
replicas: 1
## Pod Security Context
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
##
securityContext: {}
## Pod annotations
## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
##
podAnnotations: {}
## Affinity for pod assignment
## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
##
affinity: {}
## Node labels for pod assignment. Evaluated as a template.
## Ref: https://kubernetes.io/docs/user-guide/node-selection/
##
nodeSelector: {}
## Tolerations for pod assignment
## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
##
tolerations: []
## taiga containers' resource requests and limits
## ref: http://kubernetes.io/docs/user-guide/compute-resources/
##
resources:
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
limits: {}
# cpu: 2
# memory: 1Gi
requests: {}
# cpu: 1
# memory: 1Gi
## Configure extra options for liveness and readiness probes
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes)
##
livenessProbe:
enabled: false
initialDelaySeconds: 20
periodSeconds: 10
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 3
readinessProbe:
enabled: false
initialDelaySeconds: 5
periodSeconds: 10
timeoutSeconds: 1
successThreshold: 1
failureThreshold: 3
## Environment variables, to pass to the entry point
##
# extraVars:
# - name: NAMI_DEBUG
# value: --log-level trace
## Service
##
service:
# -- Set the service type
type: ClusterIP
# -- Provide additional annotations which may be required.
annotations: {}
# -- Provide additional labels which may be required.
labels: {}
# -- Allow adding additional match labels
extraSelectorLabels: {}
# -- HTTP port number
port: 80
## Configure the ingress resource that allows you to access the
## taiga installation. Set up the URL
## ref: http://kubernetes.io/docs/user-guide/ingress/
##
ingress:
# -- Enables or disables the ingress
enabled: false
# -- Provide additional annotations which may be required.
annotations: {}
# -- Provide additional labels which may be required.
labels: {}
# -- Set the ingressClass that is used for this ingress.
className: ""
## Configure the hosts for the ingress
host: chart-example.local
## Enable persistence using Persistent Volume Claims
## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/
##
persistence:
static:
# -- Enables or disables the persistence item. Defaults to true
enabled: true
# -- Storage Class for the config volume.
# If set to `-`, dynamic provisioning is disabled.
# If set to something else, the given storageClass is used.
# If undefined (the default) or set to null, no storageClassName spec is set, choosing the default provisioner.
storageClass: ""
# -- If you want to reuse an existing claim, the name of the existing PVC can be passed here.
existingClaim: ""
# -- AccessMode for the persistent volume.
# Make sure to select an access mode that is supported by your storage provider!
# [[ref]](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#access-modes)
accessMode: ReadWriteOnce
# -- The amount of storage that is requested for the persistent volume.
size: 5Gi
# -- Set to true to retain the PVC upon `helm uninstall`
retain: false
media:
# -- Enables or disables the persistence item. Defaults to true
enabled: true
# -- Storage Class for the config volume.
# If set to `-`, dynamic provisioning is disabled.
# If set to something else, the given storageClass is used.
# If undefined (the default) or set to null, no storageClassName spec is set, choosing the default provisioner.
storageClass: ""
# -- If you want to reuse an existing claim, the name of the existing PVC can be passed here.
existingClaim: ""
# -- AccessMode for the persistent volume.
# Make sure to select an access mode that is supported by your storage provider!
# [[ref]](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#access-modes)
accessMode: ReadWriteOnce
# -- The amount of storage that is requested for the persistent volume.
size: 5Gi
# -- Set to true to retain the PVC upon `helm uninstall`
retain: false

View File

@@ -1,6 +1,6 @@
apiVersion: v2 apiVersion: v2
name: tubearchivist-to-jellyfin name: tubearchivist-to-jellyfin
version: 0.0.3 version: 0.0.4
description: Import library from tubearchivist to jellyfin description: Import library from tubearchivist to jellyfin
keywords: keywords:
- tubearchivist - tubearchivist

View File

@@ -24,10 +24,23 @@ spec:
imagePullPolicy: {{ .Values.image.pullPolicy }} imagePullPolicy: {{ .Values.image.pullPolicy }}
command: ["python"] command: ["python"]
args: ["main.py"] args: ["main.py"]
{{- with .Values.envFrom }} env:
envFrom: - name: TA_URL
{{- toYaml . | nindent 16 }} value: "{{ .Values.config.tubearchivistUrl }}"
{{- end }} - name: TA_TOKEN
valueFrom:
secretKeyRef:
name: "{{ .Values.secrets.tubearchivistToken.existingSecretName }}"
key: "{{ .Values.secrets.tubearchivistToken.existingSecretKey }}"
- name: JF_URL
value: "{{ .Values.config.jellyfinUrl }}"
- name: JF_TOKEN
valueFrom:
secretKeyRef:
name: "{{ .Values.secrets.jellyfinToken.existingSecretName }}"
key: "{{ .Values.secrets.jellyfinToken.existingSecretKey }}"
- name: LISTEN_PORT
value: "8001"
volumeMounts: volumeMounts:
- name: tubearchivist-youtube - name: tubearchivist-youtube
mountPath: /youtube mountPath: /youtube

View File

@@ -4,7 +4,16 @@ image:
repository: bbilly1/tubearchivist-jf repository: bbilly1/tubearchivist-jf
tag: v0.1.2 tag: v0.1.2
pullPolicy: IfNotPresent pullPolicy: IfNotPresent
envFrom:
persistence: persistence:
youtube: youtube:
claimName: claimName: ""
config:
tubearchivistUrl: ""
jellyfinUrl: ""
secrets:
tubearchivistToken:
existingSecretName: ""
existingSecretKey: token
jellyfinToken:
existingSecretName: ""
existingSecretKey: token

View File

@@ -1,6 +1,6 @@
apiVersion: v2 apiVersion: v2
name: tubearchivist name: tubearchivist
version: 0.0.7 version: 0.2.2
description: Chart for Tube Archivist description: Chart for Tube Archivist
keywords: keywords:
- download - download
@@ -14,9 +14,9 @@ maintainers:
icon: https://avatars.githubusercontent.com/u/102734415?s=48&v=4 icon: https://avatars.githubusercontent.com/u/102734415?s=48&v=4
dependencies: dependencies:
- name: redis - name: redis
version: 18.19.4 version: 19.1.1
repository: https://charts.bitnami.com/bitnami repository: https://charts.bitnami.com/bitnami
- name: elasticsearch - name: elasticsearch
version: 19.21.2 version: 20.0.4
repository: https://charts.bitnami.com/bitnami repository: https://charts.bitnami.com/bitnami
appVersion: v0.4.6 appVersion: v0.4.7

View File

@@ -3,7 +3,7 @@ deployment:
strategy: Recreate strategy: Recreate
image: image:
repository: bbilly1/tubearchivist repository: bbilly1/tubearchivist
tag: v0.4.6 tag: v0.4.7
imagePullPolicy: IfNotPresent imagePullPolicy: IfNotPresent
env: env:
TZ: UTC TZ: UTC
@@ -20,22 +20,22 @@ service:
port: 8000 port: 8000
ingress: ingress:
enabled: false enabled: false
className: className: ""
annotations: annotations: ""
host: host: ""
persistence: persistence:
cache: cache:
enabled: false enabled: false
storageClassName: default storageClassName: ""
storageSize: 5Gi storageSize: 5Gi
accessMode: ReadWriteOnce accessMode: ReadWriteOnce
volumeMode: Filesystem volumeMode: Filesystem
youtube: youtube:
claimName: claimName: ""
redis: redis:
image: image:
repository: redis/redis-stack-server repository: redis/redis-stack-server
tag: 7.2.0-v9 tag: 7.2.0-v10
architecture: standalone architecture: standalone
auth: auth:
enabled: false enabled: false
@@ -48,17 +48,17 @@ redis:
loadmodule /opt/redis-stack/lib/rejson.so loadmodule /opt/redis-stack/lib/rejson.so
elasticsearch: elasticsearch:
global: global:
storageClass: default storageClass: ""
extraEnvVars: extraEnvVars:
- name: "discovery.type" - name: "discovery.type"
value: "single-node" value: "single-node"
- name: xpack.security.enabled - name: xpack.security.enabled
value: "true" value: "true"
extraEnvVarsSecret: extraEnvVarsSecret: []
extraConfig: extraConfig:
path: path:
repo: /usr/share/elasticsearch/data/snapshot repo: /usr/share/elasticsearch/data/snapshot
extraVolumes: extraVolumes: []
extraVolumeMounts: extraVolumeMounts:
- name: snapshot - name: snapshot
mountPath: /usr/share/elasticsearch/data/snapshot mountPath: /usr/share/elasticsearch/data/snapshot