Compare commits

...

88 Commits

Author SHA1 Message Date
5d2cdc9648 update dependencies 2024-05-20 12:13:43 -05:00
99c106bd63 update dependencies 2024-05-20 12:13:33 -05:00
e6938fe645 bump version 2024-05-20 12:12:00 -05:00
7f5d870579 update dependencies 2024-05-18 14:40:42 -05:00
6cf2db87f4 update dependencies 2024-05-18 14:40:13 -05:00
537d9bd125 update dependencies 2024-05-18 14:39:55 -05:00
9627287f30 update base image 2024-05-17 12:08:15 -05:00
dd724b5b32 update base image 2024-05-17 12:06:04 -05:00
cd91a16c75 pass destinationPath through to values 2024-05-16 17:19:41 -05:00
69900d3931 update image version 2024-05-16 13:57:33 -05:00
f80cec8c82 change renovate config 2024-05-16 13:51:53 -05:00
f3d629fe00 add namespace to authentik proxy 2024-05-16 13:44:28 -05:00
4d3574ffa8 add namespace to authentik proxy 2024-05-16 13:44:20 -05:00
f98268fd25 add namespace to authentik proxy 2024-05-16 13:44:09 -05:00
7514ea022e bump chart version 2024-05-16 13:15:13 -05:00
a65a0dbcec change timezone 2024-05-16 13:01:35 -05:00
6bc5aea01f update dependencies 2024-05-16 12:48:42 -05:00
80940910a9 update dependencies 2024-05-16 12:48:19 -05:00
6895b078b5 update image version 2024-05-16 12:47:41 -05:00
27e70a1786 update image version 2024-05-16 12:46:44 -05:00
de21d07a5d update image version 2024-05-16 12:45:49 -05:00
58cc48724b update image version 2024-05-16 12:45:15 -05:00
8a357574e9 update dependencies 2024-05-16 12:44:35 -05:00
220e9e011b update image version 2024-05-16 12:42:57 -05:00
9483523eb8 update dependencies 2024-05-16 12:42:04 -05:00
ca205a8802 update dependencies 2024-05-16 12:41:41 -05:00
36267ada6f update middleware api 2024-05-16 12:35:39 -05:00
153b7a1ad2 update middleware api 2024-05-16 12:35:27 -05:00
9b30408661 update middleware api 2024-05-16 12:35:04 -05:00
947120d73c fix backup schedule 2024-04-26 14:35:54 -06:00
a62e24142c add mysql cluster 2024-04-26 14:05:21 -06:00
03c825e816 change s3 path 2024-04-26 10:00:10 -06:00
38c2be01f9 remove kyoo 2024-04-25 12:45:25 -06:00
renovate[bot]
5ac88f9aa8 Update homeassistant/home-assistant Docker tag to v2024.4.4 (#44)
* Update homeassistant/home-assistant Docker tag to v2024.4.4

* update chart

---------

Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
Co-authored-by: alexlebens <alexanderlebens@gmail.com>
2024-04-23 16:43:29 -06:00
renovate[bot]
3c3f1bdb76 Update Helm release redis to v19.1.3 (#43)
* Update Helm release redis to v19.1.3

* update chart versions

---------

Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
Co-authored-by: alexlebens <alexanderlebens@gmail.com>
2024-04-23 15:57:32 -06:00
renovate[bot]
718acdc607 Update Helm release rabbitmq to v14.0.2 (#42)
* Update Helm release rabbitmq to v14.0.2

* update chart

* remove tailing whitespace

---------

Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
Co-authored-by: alexlebens <alexanderlebens@gmail.com>
2024-04-23 03:34:32 -06:00
renovate[bot]
71a5d81c09 Update bbilly1/tubearchivist-jf Docker tag to v0.2.0 (#41)
* Update bbilly1/tubearchivist-jf Docker tag to v0.2.0

* update chart

---------

Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
Co-authored-by: alexlebens <alexanderlebens@gmail.com>
2024-04-23 03:32:57 -06:00
renovate[bot]
e2d4c395e5 Update Helm release elasticsearch to v21 (#40)
* Update Helm release elasticsearch to v21

* update elastic search chart

---------

Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
Co-authored-by: alexlebens <alexanderlebens@gmail.com>
2024-04-22 06:43:28 -06:00
fd611813b7 add annotations to deployment 2024-04-21 06:40:46 -06:00
ab5da15b10 remove lazy-librarian 2024-04-21 04:59:41 -06:00
e584566dde fix app version 2024-04-21 04:03:32 -06:00
f06aa3a175 add lazy-librarian 2024-04-21 03:59:25 -06:00
9abeba8f9d add penpot 2024-04-19 21:34:56 -06:00
1f498323a4 change postgres settings to import only password from secret 2024-04-19 05:41:38 -06:00
646e3a2c36 grant read to unlogged users 2024-04-19 05:32:28 -06:00
197ca6ef81 add quotes around default vhost of / 2024-04-19 05:22:14 -06:00
b8780a7339 add default vhost 2024-04-19 05:05:10 -06:00
b90968ea85 fix scanner image name 2024-04-19 05:01:23 -06:00
d3275f8067 create switch if various api keys are provided 2024-04-19 04:58:46 -06:00
649f362824 remove extra configuration from rabbitmq 2024-04-19 04:54:03 -06:00
732761d73b fix default vhost 2024-04-19 04:50:49 -06:00
0e7627cb7d fix oidc values path 2024-04-19 04:41:22 -06:00
d81c246b35 add kyoo 2024-04-19 04:08:55 -06:00
renovate[bot]
b97dd1f892 Update Helm release redis to v19.1.2 (#39)
* Update Helm release redis to v19.1.2

* update chart

---------

Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
Co-authored-by: alexlebens <alexanderlebens@gmail.com>
2024-04-18 22:02:37 -06:00
0b8374753d change default cpu limit 2024-04-18 05:49:15 -06:00
cb29afdcb2 fix source server naming 2024-04-18 05:35:06 -06:00
4f366535c3 change default cpu limit 2024-04-18 04:35:42 -06:00
f32ef77551 add additional options for recovery 2024-04-18 03:43:52 -06:00
d02f649164 remove default option in bootstrap helper 2024-04-18 03:27:00 -06:00
3b50ca2bfe fix comparision operator position 2024-04-18 01:51:06 -06:00
17796a1183 increment chart version 2024-04-18 01:48:03 -06:00
512b1d4243 set default value for comparision 2024-04-18 01:47:46 -06:00
a2b0cdd5b6 fix ordering of comparision operator 2024-04-18 01:39:33 -06:00
e79af169b9 calculate length of array separately 2024-04-18 01:35:19 -06:00
661f9342b9 fix length measurement of database 2024-04-18 01:17:16 -06:00
9d1244c7a1 remove patch from image tag 2024-04-18 01:07:36 -06:00
0dc50bf88f change default cluster name to start with release 2024-04-17 20:01:47 -06:00
75accbbf87 use semver function to pull major version into cluster name 2024-04-17 20:00:06 -06:00
19fbd95a79 change templating for cluster naming 2024-04-17 19:45:08 -06:00
d73c42fd42 change default values 2024-04-17 19:15:54 -06:00
renovate[bot]
6399a8ca97 Update Helm release rabbitmq to v14 (#34)
* Update Helm release rabbitmq to v14

* update chart

* align comments for readability

---------

Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
Co-authored-by: alexlebens <alexanderlebens@gmail.com>
2024-04-17 19:13:57 -06:00
renovate[bot]
580c7da73a Update Helm release redis to v19.1.1 (#18)
* Update Helm release redis to v19.1.1

* update charts

* fix indentation

---------

Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
Co-authored-by: alexlebens <alexanderlebens@gmail.com>
2024-04-17 19:09:36 -06:00
renovate[bot]
11d47799f1 Update dock.mau.dev/mautrix/whatsapp Docker tag to v0.10.7 (#36)
* Update dock.mau.dev/mautrix/whatsapp Docker tag to v0.10.7

* update helm chart

* fix indentation

---------

Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
Co-authored-by: alexlebens <alexanderlebens@gmail.com>
2024-04-17 19:05:30 -06:00
renovate[bot]
7d825da72d Update linuxserver/code-server Docker tag to v4.23.1 (#35)
* Update linuxserver/code-server Docker tag to v4.23.1

* update helm chart

---------

Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
Co-authored-by: alexlebens <alexanderlebens@gmail.com>
2024-04-17 19:05:16 -06:00
renovate[bot]
adf49292bd Update halfshot/matrix-hookshot Docker tag to v5.3.0 (#38)
* Update halfshot/matrix-hookshot Docker tag to v5.3.0

* update chart

* fix linting errors

---------

Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
Co-authored-by: alexlebens <alexanderlebens@gmail.com>
2024-04-17 19:03:21 -06:00
renovate[bot]
63e69df14a Update ghcr.io/gethomepage/homepage Docker tag to v0.8.12 (#37)
* Update ghcr.io/gethomepage/homepage Docker tag to v0.8.12

* update chart

---------

Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
Co-authored-by: Alex Lebens <alexanderlebens@gmail.com>
2024-04-17 18:55:36 -06:00
7bd8a4525a if oidc is enabled add an ingress path to the backend 2024-04-17 04:42:51 -06:00
a860789056 add env to front deployment about oidc enablement 2024-04-15 03:31:49 -06:00
58f89640a8 fix naming of changed rabbitmq charts 2024-04-15 02:47:45 -06:00
132e086d6d change rabbitmq chart naming to generate proper dns and app names 2024-04-15 02:44:10 -06:00
617505ee99 fix length of app port 2024-04-13 23:37:58 -06:00
34a21702ab fix http service value 2024-04-13 23:32:48 -06:00
15d3253af9 fix events app port to service and port 2024-04-13 23:28:03 -06:00
90970ef172 fix events health endpoint 2024-04-13 23:19:59 -06:00
0d6f789ffd increment chart version 2024-04-13 23:14:21 -06:00
f968776cd0 fix trello importer switch for async container 2024-04-13 23:13:44 -06:00
0b2beb08b7 fix indentation of events deployment 2024-04-13 23:11:44 -06:00
8fae31a679 properly enable/disable trello importer 2024-04-13 23:07:20 -06:00
60 changed files with 3071 additions and 606 deletions

10
.github/renovate.json vendored
View File

@@ -5,7 +5,7 @@
"mergeConfidence:all-badges",
":rebaseStalePrs"
],
"timezone": "US/Mountain",
"timezone": "US/Central",
"schedule": [
"every weekday"
],
@@ -40,7 +40,7 @@
},
{
"description": "Label service images",
"matchPackageNames": [
"matchDepNames": [
"ghcr.io/alex1989hu/kubelet-serving-cert-approver",
"ghcr.io/cloudnative-pg/postgresql",
"redis/redis-stack-server"
@@ -57,7 +57,7 @@
},
{
"description": "Label service charts",
"matchPackageNames": [
"matchDepNames": [
"elasticsearch",
"redis"
],
@@ -73,7 +73,7 @@
},
{
"description": "Label application images",
"matchPackageNames": [
"matchDepNames": [
"bbilly1/tubearchivist-jf",
"bbilly1/tubearchivist",
"freshrss/freshrss",
@@ -97,7 +97,7 @@
},
{
"description": "Label application charts",
"matchPackageNames": [],
"matchDepNames": [],
"matchDatasources": [
"helm"
],

View File

@@ -1,6 +1,6 @@
apiVersion: v2
name: calibre-server
version: 0.0.6
version: 0.0.8
description: Chart for Calibre content database
keywords:
- media

View File

@@ -31,4 +31,5 @@ spec:
- kind: Service
name: {{ .Values.ingressRoute.authentik.outpost }}
port: {{ .Values.ingressRoute.authentik.port }}
namespace: {{ .Values.ingressRoute.authentik.namespace }}
{{- end }}

View File

@@ -1,5 +1,5 @@
{{- if .Values.ingressRoute.enabled }}
apiVersion: traefik.containo.us/v1alpha1
apiVersion: traefik.io/v1alpha1
kind: Middleware
metadata:
name: "authentik-{{ .Release.Name }}"

View File

@@ -31,8 +31,9 @@ ingressRoute:
http:
host:
authentik:
outpost:
outpost: ""
port: 9000
namespace: ""
persistence:
config:
storageClassName: default

View File

@@ -1,6 +1,6 @@
apiVersion: v2
name: home-assistant
version: 0.1.9
version: 0.1.16
description: Chart for Home Assistant
keywords:
- home-automation
@@ -9,4 +9,4 @@ sources:
maintainers:
- name: alexlebens
icon: https://avatars.githubusercontent.com/u/13844975?s=200&v=4
appVersion: v2024.4.3
appVersion: v2024.5.4

View File

@@ -18,7 +18,7 @@ spec:
match: "Host(`{{ .Values.ingressRoute.host }}`)"
middlewares:
- name: "authentik-{{ .Release.Name }}"
namespace: {{ .Release.Namespace }}
namespace: {{ .Release.Namespace }}
priority: 10
services:
- kind: Service
@@ -30,7 +30,8 @@ spec:
services:
- kind: Service
name: {{ .Values.ingressRoute.authentik.outpost }}
port: {{ .Values.ingressRoute.authentik.port }}
port: {{ .Values.ingressRoute.authentik.port }}
namespace: {{ .Values.ingressRoute.authentik.namespace }}
{{- end }}
---
@@ -54,7 +55,7 @@ spec:
match: "Host(`{{ .Values.codeserver.ingressRoute.host }}`)"
middlewares:
- name: "authentik-{{ .Release.Name }}"
namespace: {{ .Release.Namespace }}
namespace: {{ .Release.Namespace }}
priority: 10
services:
- kind: Service
@@ -66,5 +67,6 @@ spec:
services:
- kind: Service
name: {{ .Values.ingressRoute.authentik.outpost }}
port: {{ .Values.ingressRoute.authentik.port }}
port: {{ .Values.ingressRoute.authentik.port }}
namespace: {{ .Values.ingressRoute.authentik.namespace }}
{{- end }}

View File

@@ -1,5 +1,5 @@
{{- if .Values.ingressRoute.enabled }}
apiVersion: traefik.containo.us/v1alpha1
apiVersion: traefik.io/v1alpha1
kind: Middleware
metadata:
name: "authentik-{{ .Release.Name }}"

View File

@@ -3,7 +3,7 @@ deployment:
strategy: Recreate
image:
repository: homeassistant/home-assistant
tag: 2024.4.3
tag: 2024.5.4
imagePullPolicy: IfNotPresent
env:
TZ: UTC
@@ -22,8 +22,9 @@ ingressRoute:
enabled: true
host:
authentik:
outpost:
outpost: ""
port: 9000
namespace: ""
metrics:
enabled: false
serviceMonitor:
@@ -56,7 +57,7 @@ codeserver:
enabled: false
image:
repository: linuxserver/code-server
tag: 4.23.0
tag: 4.89.1
imagePullPolicy: IfNotPresent
env:
TZ: UTC

View File

@@ -1,6 +1,6 @@
apiVersion: v2
name: homepage
version: 0.0.10
version: 0.0.15
description: Chart for benphelps homepage
keywords:
- dashboard
@@ -9,4 +9,4 @@ sources:
maintainers:
- name: alexlebens
icon: https://github.com/benphelps/homepage/blob/de584eae8f12a0d257e554e9511ef19bd2a1232c/public/mstile-150x150.png
appVersion: v0.8.11
appVersion: v0.8.13

View File

@@ -9,6 +9,10 @@ metadata:
app.kubernetes.io/version: {{ .Chart.AppVersion }}
app.kubernetes.io/component: web
app.kubernetes.io/part-of: homepage
annotations:
{{- with .Values.deployment.annotations }}
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
revisionHistoryLimit: 3
replicas: {{ .Values.deployment.replicas }}

View File

@@ -30,3 +30,4 @@ spec:
- kind: Service
name: {{ .Values.ingressRoute.authentik.outpost }}
port: {{ .Values.ingressRoute.authentik.port }}
namespace: {{ .Values.ingressRoute.authentik.namespace }}

View File

@@ -1,4 +1,4 @@
apiVersion: traefik.containo.us/v1alpha1
apiVersion: traefik.io/v1alpha1
kind: Middleware
metadata:
name: "authentik-{{ .Release.Name }}"

View File

@@ -1,9 +1,10 @@
deployment:
annotations: {}
replicas: 1
strategy: Recreate
image:
repository: ghcr.io/gethomepage/homepage
tag: v0.8.11
tag: v0.8.13
imagePullPolicy: IfNotPresent
env:
envFrom:
@@ -18,10 +19,11 @@ service:
http:
port: 3000
ingressRoute:
host:
host: ""
authentik:
outpost:
outpost: ""
port: 9000
namespace: ""
config:
bookmarks:
services:

View File

@@ -1,6 +1,6 @@
apiVersion: v2
name: libation
version: 0.0.6
version: 0.0.7
description: Import library from audible
keywords:
- audiobooks
@@ -10,4 +10,4 @@ sources:
maintainers:
- name: alexlebens
icon: https://getlibation.com/images/libation-logo.png
appVersion: "11.1.0"
appVersion: "11.3.13"

View File

@@ -2,7 +2,7 @@ job:
schedule: "0 * * * *"
image:
repository: rmcrackan/libation
tag: "11.1.0"
tag: "11.3.13"
pullPolicy: IfNotPresent
persistence:
config:

View File

@@ -1,6 +1,6 @@
apiVersion: v2
name: matrix-hookshot
version: 0.1.0
version: 0.1.1
description: Chart for Matrix Hookshot
keywords:
- matrix
@@ -11,4 +11,4 @@ sources:
maintainers:
- name: alexlebens
icon: https://avatars.githubusercontent.com/u/8418310?s=48&v=4
appVersion: "5.2.1"
appVersion: "5.3.0"

View File

@@ -3,7 +3,7 @@ deployment:
strategy: Recreate
image:
repository: halfshot/matrix-hookshot
tag: "5.2.1"
tag: "5.3.0"
imagePullPolicy: IfNotPresent
env: {}
envFrom: []
@@ -39,7 +39,7 @@ ingress:
enabled: false
className: ""
annotations: {}
host: ""
host: ""
metrics:
enabled: false
serviceMonitor:
@@ -81,7 +81,7 @@ hookshot:
resources:
- widgets
#github:
# github:
# # (Optional) Configure this to enable GitHub support
# auth:
# # Authentication for the GitHub App.
@@ -104,7 +104,7 @@ hookshot:
# # (Optional) Prefix used when creating ghost users for GitHub accounts.
# _github_
#gitlab:
# gitlab:
# # (Optional) Configure this to enable GitLab support
# instances:
# gitlab.com:
@@ -119,7 +119,7 @@ hookshot:
# # (Optional) Aggregate comments by waiting this many miliseconds before posting them to Matrix. Defaults to 5000 (5 seconds)
# 5000
#figma:
# figma:
# # (Optional) Configure this to enable Figma support
# publicUrl: https://example.com/hookshot/
# instances:
@@ -128,7 +128,7 @@ hookshot:
# accessToken: your-personal-access-token
# passcode: your-webhook-passcode
#jira:
# jira:
# # (Optional) Configure this to enable Jira support. Only specify `url` if you are using a On Premise install (i.e. not atlassian.com)
# webhook:
# # Webhook settings for JIRA
@@ -139,7 +139,7 @@ hookshot:
# client_secret: bar
# redirect_uri: https://example.com/oauth/
#generic:
# generic:
# # (Optional) Support for generic webhook events.
# #'allowJsTransformationFunctions' will allow users to write short transformation snippets in code, and thus is unsafe in untrusted environments
@@ -150,23 +150,23 @@ hookshot:
# allowJsTransformationFunctions: false
# waitForComplete: false
#feeds:
# feeds:
# # (Optional) Configure this to enable RSS/Atom feed support
# enabled: false
# pollConcurrency: 4
# pollIntervalSeconds: 600
# pollTimeoutSeconds: 30
#provisioning:
# provisioning:
# # (Optional) Provisioning API for integration managers
# secret: "!secretToken"
#bot:
# bot:
# # (Optional) Define profile information for the bot user
# displayname: Hookshot Bot
# avatar: mxc://half-shot.uk/2876e89ccade4cb615e210c458e2a7a6883fe17d
#serviceBots:
# serviceBots:
# # (Optional) Define additional bot users for specific services
# - localpart: feeds
# displayname: Feeds
@@ -174,21 +174,21 @@ hookshot:
# prefix: "!feeds"
# service: feeds
#metrics:
# metrics:
# # (Optional) Prometheus metrics support
# enabled: true
#cache:
# cache:
# # (Optional) Cache options for large scale deployments.
# # For encryption to work, this must be configured.
# redisUri: redis://localhost:6379
#queue:
# queue:
# # (Optional) Message queue configuration options for large scale deployments.
# # For encryption to work, this must not be configured.
# redisUri: redis://localhost:6379
#widgets:
# widgets:
# # (Optional) EXPERIMENTAL support for complimentary widgets
# addToAdminRooms: false
# disallowedIpRanges:
@@ -217,12 +217,12 @@ hookshot:
# branding:
# widgetTitle: Hookshot Configuration
#sentry:
# sentry:
# # (Optional) Configure Sentry error reporting
# dsn: https://examplePublicKey@o0.ingest.sentry.io/0
# environment: production
#permissions:
# permissions:
# # (Optional) Permissions for using the bridge. See docs/setup.md#permissions for help
# - actor: example.com
# services:

View File

@@ -1,6 +1,6 @@
apiVersion: v2
name: mautrix-whatsapp
version: 0.0.2
version: 0.0.3
description: Chart for Matrix Whatsapp Bridge
keywords:
- matrix
@@ -12,4 +12,4 @@ sources:
maintainers:
- name: alexlebens
icon: https://avatars.githubusercontent.com/u/88519669?s=48&v=4
appVersion: v0.10.6
appVersion: v0.10.7

View File

@@ -3,7 +3,7 @@ deployment:
strategy: Recreate
image:
repository: dock.mau.dev/mautrix/whatsapp
tag: v0.10.6
tag: v0.10.7
imagePullPolicy: IfNotPresent
env: {}
envFrom: []
@@ -45,479 +45,477 @@ persistence:
accessMode: ReadWriteOnce
size: 500Mi
# Reference the following for examples
# https://github.com/mautrix/whatsapp/blob/main/example-config.yaml
mautrixWhatsapp:
# config.yml contents
existingSecret: ""
config:
# Homeserver details.
homeserver:
# The address that this appservice can use to connect to the homeserver.
address: https://matrix.example.com
# The domain of the homeserver (also known as server_name, used for MXIDs, etc).
domain: example.com
# The address that this appservice can use to connect to the homeserver.
address: https://matrix.example.com
# The domain of the homeserver (also known as server_name, used for MXIDs, etc).
domain: example.com
# What software is the homeserver running?
# Standard Matrix homeservers like Synapse, Dendrite and Conduit should just use "standard" here.
software: standard
# The URL to push real-time bridge status to.
# If set, the bridge will make POST requests to this URL whenever a user's whatsapp connection state changes.
# The bridge will use the appservice as_token to authorize requests.
status_endpoint: null
# Endpoint for reporting per-message status.
message_send_checkpoint_endpoint: null
# Does the homeserver support https://github.com/matrix-org/matrix-spec-proposals/pull/2246?
async_media: false
# What software is the homeserver running?
# Standard Matrix homeservers like Synapse, Dendrite and Conduit should just use "standard" here.
software: standard
# The URL to push real-time bridge status to.
# If set, the bridge will make POST requests to this URL whenever a user's whatsapp connection state changes.
# The bridge will use the appservice as_token to authorize requests.
status_endpoint: null
# Endpoint for reporting per-message status.
message_send_checkpoint_endpoint: null
# Does the homeserver support https://github.com/matrix-org/matrix-spec-proposals/pull/2246?
async_media: false
# Should the bridge use a websocket for connecting to the homeserver?
# The server side is currently not documented anywhere and is only implemented by mautrix-wsproxy,
# mautrix-asmux (deprecated), and hungryserv (proprietary).
websocket: false
# How often should the websocket be pinged? Pinging will be disabled if this is zero.
ping_interval_seconds: 0
# Should the bridge use a websocket for connecting to the homeserver?
# The server side is currently not documented anywhere and is only implemented by mautrix-wsproxy,
# mautrix-asmux (deprecated), and hungryserv (proprietary).
websocket: false
# How often should the websocket be pinged? Pinging will be disabled if this is zero.
ping_interval_seconds: 0
# Application service host/registration related details.
# Changing these values requires regeneration of the registration.
appservice:
# The address that the homeserver can use to connect to this appservice.
address: http://localhost:29318
# The address that the homeserver can use to connect to this appservice.
address: http://localhost:29318
# The hostname and port where this appservice should listen.
hostname: 0.0.0.0
port: 29318
# The hostname and port where this appservice should listen.
hostname: 0.0.0.0
port: 29318
# Database config.
database:
# The database type. "sqlite3-fk-wal" and "postgres" are supported.
type: postgres
# The database URI.
# SQLite: A raw file path is supported, but `file:<path>?_txlock=immediate` is recommended.
# https://github.com/mattn/go-sqlite3#connection-string
# Postgres: Connection string. For example, postgres://user:password@host/database?sslmode=disable
# To connect via Unix socket, use something like postgres:///dbname?host=/var/run/postgresql
uri: postgres://user:password@host/database?sslmode=disable
# Maximum number of connections. Mostly relevant for Postgres.
max_open_conns: 20
max_idle_conns: 2
# Maximum connection idle time and lifetime before they're closed. Disabled if null.
# Parsed with https://pkg.go.dev/time#ParseDuration
max_conn_idle_time: null
max_conn_lifetime: null
# Database config.
database:
# The database type. "sqlite3-fk-wal" and "postgres" are supported.
type: postgres
# The database URI.
# SQLite: A raw file path is supported, but `file:<path>?_txlock=immediate` is recommended.
# https://github.com/mattn/go-sqlite3#connection-string
# Postgres: Connection string. For example, postgres://user:password@host/database?sslmode=disable
# To connect via Unix socket, use something like postgres:///dbname?host=/var/run/postgresql
uri: postgres://user:password@host/database?sslmode=disable
# Maximum number of connections. Mostly relevant for Postgres.
max_open_conns: 20
max_idle_conns: 2
# Maximum connection idle time and lifetime before they're closed. Disabled if null.
# Parsed with https://pkg.go.dev/time#ParseDuration
max_conn_idle_time: null
max_conn_lifetime: null
# The unique ID of this appservice.
id: whatsapp
# Appservice bot details.
bot:
# Username of the appservice bot.
username: whatsappbot
# Display name and avatar for bot. Set to "remove" to remove display name/avatar, leave empty
# to leave display name/avatar as-is.
displayname: WhatsApp bridge bot
avatar: mxc://maunium.net/NeXNQarUbrlYBiPCpprYsRqr
# The unique ID of this appservice.
id: whatsapp
# Appservice bot details.
bot:
# Username of the appservice bot.
username: whatsappbot
# Display name and avatar for bot. Set to "remove" to remove display name/avatar, leave empty
# to leave display name/avatar as-is.
displayname: WhatsApp bridge bot
avatar: mxc://maunium.net/NeXNQarUbrlYBiPCpprYsRqr
# Whether or not to receive ephemeral events via appservice transactions.
# Requires MSC2409 support (i.e. Synapse 1.22+).
ephemeral_events: true
# Whether or not to receive ephemeral events via appservice transactions.
# Requires MSC2409 support (i.e. Synapse 1.22+).
ephemeral_events: true
# Should incoming events be handled asynchronously?
# This may be necessary for large public instances with lots of messages going through.
# However, messages will not be guaranteed to be bridged in the same order they were sent in.
async_transactions: false
# Should incoming events be handled asynchronously?
# This may be necessary for large public instances with lots of messages going through.
# However, messages will not be guaranteed to be bridged in the same order they were sent in.
async_transactions: false
# Authentication tokens for AS <-> HS communication. Autogenerated; do not modify.
as_token: "This value is generated when generating the registration"
hs_token: "This value is generated when generating the registration"
# Authentication tokens for AS <-> HS communication. Autogenerated; do not modify.
as_token: "This value is generated when generating the registration"
hs_token: "This value is generated when generating the registration"
# Segment-compatible analytics endpoint for tracking some events, like provisioning API login and encryption errors.
analytics:
# Hostname of the tracking server. The path is hardcoded to /v1/track
host: api.segment.io
# API key to send with tracking requests. Tracking is disabled if this is null.
token: null
# Optional user ID for tracking events. If null, defaults to using Matrix user ID.
user_id: null
# Hostname of the tracking server. The path is hardcoded to /v1/track
host: api.segment.io
# API key to send with tracking requests. Tracking is disabled if this is null.
token: null
# Optional user ID for tracking events. If null, defaults to using Matrix user ID.
user_id: null
# Prometheus config.
metrics:
# Enable prometheus metrics?
enabled: false
# IP and port where the metrics listener should be. The path is always /metrics
listen: 127.0.0.1:8001
# Enable prometheus metrics?
enabled: false
# IP and port where the metrics listener should be. The path is always /metrics
listen: 127.0.0.1:8001
# Config for things that are directly sent to WhatsApp.
whatsapp:
# Device name that's shown in the "WhatsApp Web" section in the mobile app.
os_name: Mautrix-WhatsApp bridge
# Browser name that determines the logo shown in the mobile app.
# Must be "unknown" for a generic icon or a valid browser name if you want a specific icon.
# List of valid browser names: https://github.com/tulir/whatsmeow/blob/efc632c008604016ddde63bfcfca8de4e5304da9/binary/proto/def.proto#L43-L64
browser_name: unknown
# Device name that's shown in the "WhatsApp Web" section in the mobile app.
os_name: Mautrix-WhatsApp bridge
# Browser name that determines the logo shown in the mobile app.
# Must be "unknown" for a generic icon or a valid browser name if you want a specific icon.
# List of valid browser names: https://github.com/tulir/whatsmeow/blob/efc632c008604016ddde63bfcfca8de4e5304da9/binary/proto/def.proto#L43-L64
browser_name: unknown
# Bridge config
bridge:
# Localpart template of MXIDs for WhatsApp users.
# {{.}} is replaced with the phone number of the WhatsApp user.
username_template: whatsapp_{{.}}
# Displayname template for WhatsApp users.
# {{.PushName}} - nickname set by the WhatsApp user
# {{.BusinessName}} - validated WhatsApp business name
# {{.Phone}} - phone number (international format)
# The following variables are also available, but will cause problems on multi-user instances:
# {{.FullName}} - full name from contact list
# {{.FirstName}} - first name from contact list
displayname_template: "{{or .BusinessName .PushName .JID}} (WA)"
# Should the bridge create a space for each logged-in user and add bridged rooms to it?
# Users who logged in before turning this on should run `!wa sync space` to create and fill the space for the first time.
personal_filtering_spaces: false
# Should the bridge send a read receipt from the bridge bot when a message has been sent to WhatsApp?
delivery_receipts: false
# Whether the bridge should send the message status as a custom com.beeper.message_send_status event.
message_status_events: false
# Whether the bridge should send error notices via m.notice events when a message fails to bridge.
message_error_notices: true
# Should incoming calls send a message to the Matrix room?
call_start_notices: true
# Should another user's cryptographic identity changing send a message to Matrix?
identity_change_notices: false
portal_message_buffer: 128
# Settings for handling history sync payloads.
history_sync:
# Enable backfilling history sync payloads from WhatsApp?
backfill: true
# The maximum number of initial conversations that should be synced.
# Other conversations will be backfilled on demand when receiving a message or when initiating a direct chat.
max_initial_conversations: -1
# Maximum number of messages to backfill in each conversation.
# Set to -1 to disable limit.
message_count: 50
# Should the bridge request a full sync from the phone when logging in?
# This bumps the size of history syncs from 3 months to 1 year.
request_full_sync: false
# Configuration parameters that are sent to the phone along with the request full sync flag.
# By default (when the values are null or 0), the config isn't sent at all.
full_sync_config:
# Number of days of history to request.
# The limit seems to be around 3 years, but using higher values doesn't break.
days_limit: null
# This is presumably the maximum size of the transferred history sync blob, which may affect what the phone includes in the blob.
size_mb_limit: null
# This is presumably the local storage quota, which may affect what the phone includes in the history sync blob.
storage_quota_mb: null
# If this value is greater than 0, then if the conversation's last message was more than
# this number of hours ago, then the conversation will automatically be marked it as read.
# Conversations that have a last message that is less than this number of hours ago will
# have their unread status synced from WhatsApp.
unread_hours_threshold: 0
# Localpart template of MXIDs for WhatsApp users.
# {{.}} is replaced with the phone number of the WhatsApp user.
username_template: whatsapp_{{.}}
# Displayname template for WhatsApp users.
# {{.PushName}} - nickname set by the WhatsApp user
# {{.BusinessName}} - validated WhatsApp business name
# {{.Phone}} - phone number (international format)
# The following variables are also available, but will cause problems on multi-user instances:
# {{.FullName}} - full name from contact list
# {{.FirstName}} - first name from contact list
displayname_template: "{{or .BusinessName .PushName .JID}} (WA)"
# Should the bridge create a space for each logged-in user and add bridged rooms to it?
# Users who logged in before turning this on should run `!wa sync space` to create and fill the space for the first time.
personal_filtering_spaces: false
# Should the bridge send a read receipt from the bridge bot when a message has been sent to WhatsApp?
delivery_receipts: false
# Whether the bridge should send the message status as a custom com.beeper.message_send_status event.
message_status_events: false
# Whether the bridge should send error notices via m.notice events when a message fails to bridge.
message_error_notices: true
# Should incoming calls send a message to the Matrix room?
call_start_notices: true
# Should another user's cryptographic identity changing send a message to Matrix?
identity_change_notices: false
portal_message_buffer: 128
# Settings for handling history sync payloads.
history_sync:
# Enable backfilling history sync payloads from WhatsApp?
backfill: true
# The maximum number of initial conversations that should be synced.
# Other conversations will be backfilled on demand when receiving a message or when initiating a direct chat.
max_initial_conversations: -1
# Maximum number of messages to backfill in each conversation.
# Set to -1 to disable limit.
message_count: 50
# Should the bridge request a full sync from the phone when logging in?
# This bumps the size of history syncs from 3 months to 1 year.
request_full_sync: false
# Configuration parameters that are sent to the phone along with the request full sync flag.
# By default (when the values are null or 0), the config isn't sent at all.
full_sync_config:
# Number of days of history to request.
# The limit seems to be around 3 years, but using higher values doesn't break.
days_limit: null
# This is presumably the maximum size of the transferred history sync blob, which may affect what the phone includes in the blob.
size_mb_limit: null
# This is presumably the local storage quota, which may affect what the phone includes in the history sync blob.
storage_quota_mb: null
# If this value is greater than 0, then if the conversation's last message was more than
# this number of hours ago, then the conversation will automatically be marked it as read.
# Conversations that have a last message that is less than this number of hours ago will
# have their unread status synced from WhatsApp.
unread_hours_threshold: 0
###############################################################################
# The settings below are only applicable for backfilling using batch sending, #
# which is no longer supported in Synapse. #
###############################################################################
###############################################################################
# The settings below are only applicable for backfilling using batch sending, #
# which is no longer supported in Synapse. #
###############################################################################
# Settings for media requests. If the media expired, then it will not be on the WA servers.
# Media can always be requested by reacting with the ♻️ (recycle) emoji.
# These settings determine if the media requests should be done automatically during or after backfill.
media_requests:
# Should expired media be automatically requested from the server as part of the backfill process?
auto_request_media: true
# Whether to request the media immediately after the media message is backfilled ("immediate")
# or at a specific time of the day ("local_time").
request_method: immediate
# If request_method is "local_time", what time should the requests be sent (in minutes after midnight)?
request_local_time: 120
# Settings for immediate backfills. These backfills should generally be small and their main purpose is
# to populate each of the initial chats (as configured by max_initial_conversations) with a few messages
# so that you can continue conversations without losing context.
immediate:
# The number of concurrent backfill workers to create for immediate backfills.
# Note that using more than one worker could cause the room list to jump around
# since there are no guarantees about the order in which the backfills will complete.
worker_count: 1
# The maximum number of events to backfill initially.
max_events: 10
# Settings for deferred backfills. The purpose of these backfills are to fill in the rest of
# the chat history that was not covered by the immediate backfills.
# These backfills generally should happen at a slower pace so as not to overload the homeserver.
# Each deferred backfill config should define a "stage" of backfill (i.e. the last week of messages).
# The fields are as follows:
# - start_days_ago: the number of days ago to start backfilling from.
# To indicate the start of time, use -1. For example, for a week ago, use 7.
# - max_batch_events: the number of events to send per batch.
# - batch_delay: the number of seconds to wait before backfilling each batch.
deferred:
# Last Week
- start_days_ago: 7
max_batch_events: 20
batch_delay: 5
# Last Month
- start_days_ago: 30
max_batch_events: 50
batch_delay: 10
# Last 3 months
- start_days_ago: 90
max_batch_events: 100
batch_delay: 10
# The start of time
- start_days_ago: -1
max_batch_events: 500
batch_delay: 10
# Settings for media requests. If the media expired, then it will not be on the WA servers.
# Media can always be requested by reacting with the ♻️ (recycle) emoji.
# These settings determine if the media requests should be done automatically during or after backfill.
media_requests:
# Should expired media be automatically requested from the server as part of the backfill process?
auto_request_media: true
# Whether to request the media immediately after the media message is backfilled ("immediate")
# or at a specific time of the day ("local_time").
request_method: immediate
# If request_method is "local_time", what time should the requests be sent (in minutes after midnight)?
request_local_time: 120
# Settings for immediate backfills. These backfills should generally be small and their main purpose is
# to populate each of the initial chats (as configured by max_initial_conversations) with a few messages
# so that you can continue conversations without losing context.
immediate:
# The number of concurrent backfill workers to create for immediate backfills.
# Note that using more than one worker could cause the room list to jump around
# since there are no guarantees about the order in which the backfills will complete.
worker_count: 1
# The maximum number of events to backfill initially.
max_events: 10
# Settings for deferred backfills. The purpose of these backfills are to fill in the rest of
# the chat history that was not covered by the immediate backfills.
# These backfills generally should happen at a slower pace so as not to overload the homeserver.
# Each deferred backfill config should define a "stage" of backfill (i.e. the last week of messages).
# The fields are as follows:
# - start_days_ago: the number of days ago to start backfilling from.
# To indicate the start of time, use -1. For example, for a week ago, use 7.
# - max_batch_events: the number of events to send per batch.
# - batch_delay: the number of seconds to wait before backfilling each batch.
deferred:
# Last Week
- start_days_ago: 7
max_batch_events: 20
batch_delay: 5
# Last Month
- start_days_ago: 30
max_batch_events: 50
batch_delay: 10
# Last 3 months
- start_days_ago: 90
max_batch_events: 100
batch_delay: 10
# The start of time
- start_days_ago: -1
max_batch_events: 500
batch_delay: 10
# Should puppet avatars be fetched from the server even if an avatar is already set?
user_avatar_sync: true
# Should Matrix users leaving groups be bridged to WhatsApp?
bridge_matrix_leave: true
# Should the bridge update the m.direct account data event when double puppeting is enabled.
# Note that updating the m.direct event is not atomic (except with mautrix-asmux)
# and is therefore prone to race conditions.
sync_direct_chat_list: false
# Should the bridge use MSC2867 to bridge manual "mark as unread"s from
# WhatsApp and set the unread status on initial backfill?
# This will only work on clients that support the m.marked_unread or
# com.famedly.marked_unread room account data.
sync_manual_marked_unread: true
# When double puppeting is enabled, users can use `!wa toggle` to change whether
# presence is bridged. This setting sets the default value.
# Existing users won't be affected when these are changed.
default_bridge_presence: true
# Send the presence as "available" to whatsapp when users start typing on a portal.
# This works as a workaround for homeservers that do not support presence, and allows
# users to see when the whatsapp user on the other side is typing during a conversation.
send_presence_on_typing: false
# Should the bridge always send "active" delivery receipts (two gray ticks on WhatsApp)
# even if the user isn't marked as online (e.g. when presence bridging isn't enabled)?
# Should puppet avatars be fetched from the server even if an avatar is already set?
user_avatar_sync: true
# Should Matrix users leaving groups be bridged to WhatsApp?
bridge_matrix_leave: true
# Should the bridge update the m.direct account data event when double puppeting is enabled.
# Note that updating the m.direct event is not atomic (except with mautrix-asmux)
# and is therefore prone to race conditions.
sync_direct_chat_list: false
# Should the bridge use MSC2867 to bridge manual "mark as unread"s from
# WhatsApp and set the unread status on initial backfill?
# This will only work on clients that support the m.marked_unread or
# com.famedly.marked_unread room account data.
sync_manual_marked_unread: true
# When double puppeting is enabled, users can use `!wa toggle` to change whether
# presence is bridged. This setting sets the default value.
# Existing users won't be affected when these are changed.
default_bridge_presence: true
# Send the presence as "available" to whatsapp when users start typing on a portal.
# This works as a workaround for homeservers that do not support presence, and allows
# users to see when the whatsapp user on the other side is typing during a conversation.
send_presence_on_typing: false
# Should the bridge always send "active" delivery receipts (two gray ticks on WhatsApp)
# even if the user isn't marked as online (e.g. when presence bridging isn't enabled)?
#
# By default, the bridge acts like WhatsApp web, which only sends active delivery
# receipts when it's in the foreground.
force_active_delivery_receipts: false
# Servers to always allow double puppeting from
double_puppet_server_map:
example.com: https://example.com
# Allow using double puppeting from any server with a valid client .well-known file.
double_puppet_allow_discovery: false
# Shared secrets for https://github.com/devture/matrix-synapse-shared-secret-auth
#
# If set, double puppeting will be enabled automatically for local users
# instead of users having to find an access token and run `login-matrix`
# manually.
login_shared_secret_map:
example.com: foobar
# Whether to explicitly set the avatar and room name for private chat portal rooms.
# If set to `default`, this will be enabled in encrypted rooms and disabled in unencrypted rooms.
# If set to `always`, all DM rooms will have explicit names and avatars set.
# If set to `never`, DM rooms will never have names and avatars set.
private_chat_portal_meta: default
# Should group members be synced in parallel? This makes member sync faster
parallel_member_sync: false
# Should Matrix m.notice-type messages be bridged?
bridge_notices: true
# Set this to true to tell the bridge to re-send m.bridge events to all rooms on the next run.
# This field will automatically be changed back to false after it, except if the config file is not writable.
resend_bridge_info: false
# When using double puppeting, should muted chats be muted in Matrix?
mute_bridging: false
# When using double puppeting, should archived chats be moved to a specific tag in Matrix?
# Note that WhatsApp unarchives chats when a message is received, which will also be mirrored to Matrix.
# This can be set to a tag (e.g. m.lowpriority), or null to disable.
archive_tag: null
# Same as above, but for pinned chats. The favorite tag is called m.favourite
pinned_tag: null
# Should mute status and tags only be bridged when the portal room is created?
tag_only_on_create: true
# Should WhatsApp status messages be bridged into a Matrix room?
# Disabling this won't affect already created status broadcast rooms.
enable_status_broadcast: true
# Should sending WhatsApp status messages be allowed?
# This can cause issues if the user has lots of contacts, so it's disabled by default.
disable_status_broadcast_send: true
# Should the status broadcast room be muted and moved into low priority by default?
# This is only applied when creating the room, the user can unmute it later.
mute_status_broadcast: true
# Tag to apply to the status broadcast room.
status_broadcast_tag: m.lowpriority
# Should the bridge use thumbnails from WhatsApp?
# They're disabled by default due to very low resolution.
whatsapp_thumbnail: false
# Allow invite permission for user. User can invite any bots to room with whatsapp
# users (private chat and groups)
allow_user_invite: false
# Whether or not created rooms should have federation enabled.
# If false, created portal rooms will never be federated.
federate_rooms: true
# Should the bridge never send alerts to the bridge management room?
# These are mostly things like the user being logged out.
disable_bridge_alerts: false
# Should the bridge stop if the WhatsApp server says another user connected with the same session?
# This is only safe on single-user bridges.
crash_on_stream_replaced: false
# Should the bridge detect URLs in outgoing messages, ask the homeserver to generate a preview,
# and send it to WhatsApp? URL previews can always be sent using the `com.beeper.linkpreviews`
# key in the event content even if this is disabled.
url_previews: false
# Send captions in the same message as images. This will send data compatible with both MSC2530 and MSC3552.
# This is currently not supported in most clients.
caption_in_message: false
# Send galleries as a single event? This is not an MSC (yet).
beeper_galleries: false
# Should polls be sent using MSC3381 event types?
extev_polls: false
# Should cross-chat replies from WhatsApp be bridged? Most servers and clients don't support this.
cross_room_replies: false
# Disable generating reply fallbacks? Some extremely bad clients still rely on them,
# but they're being phased out and will be completely removed in the future.
disable_reply_fallbacks: false
# Maximum time for handling Matrix events. Duration strings formatted for https://pkg.go.dev/time#ParseDuration
# Null means there's no enforced timeout.
message_handling_timeout:
# Send an error message after this timeout, but keep waiting for the response until the deadline.
# This is counted from the origin_server_ts, so the warning time is consistent regardless of the source of delay.
# If the message is older than this when it reaches the bridge, the message won't be handled at all.
error_after: null
# Drop messages after this timeout. They may still go through if the message got sent to the servers.
# This is counted from the time the bridge starts handling the message.
deadline: 120s
# The prefix for commands. Only required in non-management rooms.
command_prefix: "!wa"
# Messages sent upon joining a management room.
# Markdown is supported. The defaults are listed below.
management_room_text:
# Sent when joining a room.
welcome: "Hello, I'm a WhatsApp bridge bot."
# Sent when joining a management room and the user is already logged in.
welcome_connected: "Use `help` for help."
# Sent when joining a management room and the user is not logged in.
welcome_unconnected: "Use `help` for help or `login` to log in."
# Optional extra text sent when joining a management room.
additional_help: ""
# End-to-bridge encryption support options.
#
# See https://docs.mau.fi/bridges/general/end-to-bridge-encryption.html for more info.
encryption:
# Allow encryption, work in group chat rooms with e2ee enabled
allow: false
# Default to encryption, force-enable encryption in all portals the bridge creates
# This will cause the bridge bot to be in private chats for the encryption to work properly.
default: false
# Whether to use MSC2409/MSC3202 instead of /sync long polling for receiving encryption-related data.
appservice: false
# Require encryption, drop any unencrypted messages.
require: false
# Enable key sharing? If enabled, key requests for rooms where users are in will be fulfilled.
# You must use a client that supports requesting keys from other users to use this feature.
allow_key_sharing: false
# Should users mentions be in the event wire content to enable the server to send push notifications?
plaintext_mentions: false
# Options for deleting megolm sessions from the bridge.
delete_keys:
# Beeper-specific: delete outbound sessions when hungryserv confirms
# that the user has uploaded the key to key backup.
delete_outbound_on_ack: false
# Don't store outbound sessions in the inbound table.
dont_store_outbound: false
# Ratchet megolm sessions forward after decrypting messages.
ratchet_on_decrypt: false
# Delete fully used keys (index >= max_messages) after decrypting messages.
delete_fully_used_on_decrypt: false
# Delete previous megolm sessions from same device when receiving a new one.
delete_prev_on_new_session: false
# Delete megolm sessions received from a device when the device is deleted.
delete_on_device_delete: false
# Periodically delete megolm sessions when 2x max_age has passed since receiving the session.
periodically_delete_expired: false
# Delete inbound megolm sessions that don't have the received_at field used for
# automatic ratcheting and expired session deletion. This is meant as a migration
# to delete old keys prior to the bridge update.
delete_outdated_inbound: false
# What level of device verification should be required from users?
#
# By default, the bridge acts like WhatsApp web, which only sends active delivery
# receipts when it's in the foreground.
force_active_delivery_receipts: false
# Servers to always allow double puppeting from
double_puppet_server_map:
example.com: https://example.com
# Allow using double puppeting from any server with a valid client .well-known file.
double_puppet_allow_discovery: false
# Shared secrets for https://github.com/devture/matrix-synapse-shared-secret-auth
#
# If set, double puppeting will be enabled automatically for local users
# instead of users having to find an access token and run `login-matrix`
# manually.
login_shared_secret_map:
example.com: foobar
# Whether to explicitly set the avatar and room name for private chat portal rooms.
# If set to `default`, this will be enabled in encrypted rooms and disabled in unencrypted rooms.
# If set to `always`, all DM rooms will have explicit names and avatars set.
# If set to `never`, DM rooms will never have names and avatars set.
private_chat_portal_meta: default
# Should group members be synced in parallel? This makes member sync faster
parallel_member_sync: false
# Should Matrix m.notice-type messages be bridged?
bridge_notices: true
# Set this to true to tell the bridge to re-send m.bridge events to all rooms on the next run.
# This field will automatically be changed back to false after it, except if the config file is not writable.
resend_bridge_info: false
# When using double puppeting, should muted chats be muted in Matrix?
mute_bridging: false
# When using double puppeting, should archived chats be moved to a specific tag in Matrix?
# Note that WhatsApp unarchives chats when a message is received, which will also be mirrored to Matrix.
# This can be set to a tag (e.g. m.lowpriority), or null to disable.
archive_tag: null
# Same as above, but for pinned chats. The favorite tag is called m.favourite
pinned_tag: null
# Should mute status and tags only be bridged when the portal room is created?
tag_only_on_create: true
# Should WhatsApp status messages be bridged into a Matrix room?
# Disabling this won't affect already created status broadcast rooms.
enable_status_broadcast: true
# Should sending WhatsApp status messages be allowed?
# This can cause issues if the user has lots of contacts, so it's disabled by default.
disable_status_broadcast_send: true
# Should the status broadcast room be muted and moved into low priority by default?
# This is only applied when creating the room, the user can unmute it later.
mute_status_broadcast: true
# Tag to apply to the status broadcast room.
status_broadcast_tag: m.lowpriority
# Should the bridge use thumbnails from WhatsApp?
# They're disabled by default due to very low resolution.
whatsapp_thumbnail: false
# Allow invite permission for user. User can invite any bots to room with whatsapp
# users (private chat and groups)
allow_user_invite: false
# Whether or not created rooms should have federation enabled.
# If false, created portal rooms will never be federated.
federate_rooms: true
# Should the bridge never send alerts to the bridge management room?
# These are mostly things like the user being logged out.
disable_bridge_alerts: false
# Should the bridge stop if the WhatsApp server says another user connected with the same session?
# This is only safe on single-user bridges.
crash_on_stream_replaced: false
# Should the bridge detect URLs in outgoing messages, ask the homeserver to generate a preview,
# and send it to WhatsApp? URL previews can always be sent using the `com.beeper.linkpreviews`
# key in the event content even if this is disabled.
url_previews: false
# Send captions in the same message as images. This will send data compatible with both MSC2530 and MSC3552.
# This is currently not supported in most clients.
caption_in_message: false
# Send galleries as a single event? This is not an MSC (yet).
beeper_galleries: false
# Should polls be sent using MSC3381 event types?
extev_polls: false
# Should cross-chat replies from WhatsApp be bridged? Most servers and clients don't support this.
cross_room_replies: false
# Disable generating reply fallbacks? Some extremely bad clients still rely on them,
# but they're being phased out and will be completely removed in the future.
disable_reply_fallbacks: false
# Maximum time for handling Matrix events. Duration strings formatted for https://pkg.go.dev/time#ParseDuration
# Null means there's no enforced timeout.
message_handling_timeout:
# Send an error message after this timeout, but keep waiting for the response until the deadline.
# This is counted from the origin_server_ts, so the warning time is consistent regardless of the source of delay.
# If the message is older than this when it reaches the bridge, the message won't be handled at all.
error_after: null
# Drop messages after this timeout. They may still go through if the message got sent to the servers.
# This is counted from the time the bridge starts handling the message.
deadline: 120s
# Valid levels:
# unverified - Send keys to all device in the room.
# cross-signed-untrusted - Require valid cross-signing, but trust all cross-signing keys.
# cross-signed-tofu - Require valid cross-signing, trust cross-signing keys on first use (and reject changes).
# cross-signed-verified - Require valid cross-signing, plus a valid user signature from the bridge bot.
# Note that creating user signatures from the bridge bot is not currently possible.
# verified - Require manual per-device verification
# (currently only possible by modifying the `trust` column in the `crypto_device` database table).
verification_levels:
# Minimum level for which the bridge should send keys to when bridging messages from WhatsApp to Matrix.
receive: unverified
# Minimum level that the bridge should accept for incoming Matrix messages.
send: unverified
# Minimum level that the bridge should require for accepting key requests.
share: cross-signed-tofu
# Options for Megolm room key rotation. These options allow you to
# configure the m.room.encryption event content. See:
# https://spec.matrix.org/v1.3/client-server-api/#mroomencryption for
# more information about that event.
rotation:
# Enable custom Megolm room key rotation settings. Note that these
# settings will only apply to rooms created after this option is
# set.
enable_custom: false
# The maximum number of milliseconds a session should be used
# before changing it. The Matrix spec recommends 604800000 (a week)
# as the default.
milliseconds: 604800000
# The maximum number of messages that should be sent with a given a
# session before changing it. The Matrix spec recommends 100 as the
# default.
messages: 100
# The prefix for commands. Only required in non-management rooms.
command_prefix: "!wa"
# Disable rotating keys when a user's devices change?
# You should not enable this option unless you understand all the implications.
disable_device_change_key_rotation: false
# Messages sent upon joining a management room.
# Markdown is supported. The defaults are listed below.
management_room_text:
# Sent when joining a room.
welcome: "Hello, I'm a WhatsApp bridge bot."
# Sent when joining a management room and the user is already logged in.
welcome_connected: "Use `help` for help."
# Sent when joining a management room and the user is not logged in.
welcome_unconnected: "Use `help` for help or `login` to log in."
# Optional extra text sent when joining a management room.
additional_help: ""
# Settings for provisioning API
provisioning:
# Prefix for the provisioning API paths.
prefix: /_matrix/provision
# Shared secret for authentication. If set to "generate", a random secret will be generated,
# or if set to "disable", the provisioning API will be disabled.
shared_secret: generate
# Enable debug API at /debug with provisioning authentication.
debug_endpoints: false
# End-to-bridge encryption support options.
#
# See https://docs.mau.fi/bridges/general/end-to-bridge-encryption.html for more info.
encryption:
# Allow encryption, work in group chat rooms with e2ee enabled
allow: false
# Default to encryption, force-enable encryption in all portals the bridge creates
# This will cause the bridge bot to be in private chats for the encryption to work properly.
default: false
# Whether to use MSC2409/MSC3202 instead of /sync long polling for receiving encryption-related data.
appservice: false
# Require encryption, drop any unencrypted messages.
require: false
# Enable key sharing? If enabled, key requests for rooms where users are in will be fulfilled.
# You must use a client that supports requesting keys from other users to use this feature.
allow_key_sharing: false
# Should users mentions be in the event wire content to enable the server to send push notifications?
plaintext_mentions: false
# Options for deleting megolm sessions from the bridge.
delete_keys:
# Beeper-specific: delete outbound sessions when hungryserv confirms
# that the user has uploaded the key to key backup.
delete_outbound_on_ack: false
# Don't store outbound sessions in the inbound table.
dont_store_outbound: false
# Ratchet megolm sessions forward after decrypting messages.
ratchet_on_decrypt: false
# Delete fully used keys (index >= max_messages) after decrypting messages.
delete_fully_used_on_decrypt: false
# Delete previous megolm sessions from same device when receiving a new one.
delete_prev_on_new_session: false
# Delete megolm sessions received from a device when the device is deleted.
delete_on_device_delete: false
# Periodically delete megolm sessions when 2x max_age has passed since receiving the session.
periodically_delete_expired: false
# Delete inbound megolm sessions that don't have the received_at field used for
# automatic ratcheting and expired session deletion. This is meant as a migration
# to delete old keys prior to the bridge update.
delete_outdated_inbound: false
# What level of device verification should be required from users?
#
# Valid levels:
# unverified - Send keys to all device in the room.
# cross-signed-untrusted - Require valid cross-signing, but trust all cross-signing keys.
# cross-signed-tofu - Require valid cross-signing, trust cross-signing keys on first use (and reject changes).
# cross-signed-verified - Require valid cross-signing, plus a valid user signature from the bridge bot.
# Note that creating user signatures from the bridge bot is not currently possible.
# verified - Require manual per-device verification
# (currently only possible by modifying the `trust` column in the `crypto_device` database table).
verification_levels:
# Minimum level for which the bridge should send keys to when bridging messages from WhatsApp to Matrix.
receive: unverified
# Minimum level that the bridge should accept for incoming Matrix messages.
send: unverified
# Minimum level that the bridge should require for accepting key requests.
share: cross-signed-tofu
# Options for Megolm room key rotation. These options allow you to
# configure the m.room.encryption event content. See:
# https://spec.matrix.org/v1.3/client-server-api/#mroomencryption for
# more information about that event.
rotation:
# Enable custom Megolm room key rotation settings. Note that these
# settings will only apply to rooms created after this option is
# set.
enable_custom: false
# The maximum number of milliseconds a session should be used
# before changing it. The Matrix spec recommends 604800000 (a week)
# as the default.
milliseconds: 604800000
# The maximum number of messages that should be sent with a given a
# session before changing it. The Matrix spec recommends 100 as the
# default.
messages: 100
# Permissions for using the bridge.
# Permitted values:
# relay - Talk through the relaybot (if enabled), no access otherwise
# user - Access to use the bridge to chat with a WhatsApp account.
# admin - User level and some additional administration tools
# Permitted keys:
# * - All Matrix users
# domain - All users on that homeserver
# mxid - Specific user
permissions:
"*": relay
"example.com": user
"@admin:example.com": admin
# Disable rotating keys when a user's devices change?
# You should not enable this option unless you understand all the implications.
disable_device_change_key_rotation: false
# Settings for provisioning API
provisioning:
# Prefix for the provisioning API paths.
prefix: /_matrix/provision
# Shared secret for authentication. If set to "generate", a random secret will be generated,
# or if set to "disable", the provisioning API will be disabled.
shared_secret: generate
# Enable debug API at /debug with provisioning authentication.
debug_endpoints: false
# Permissions for using the bridge.
# Permitted values:
# relay - Talk through the relaybot (if enabled), no access otherwise
# user - Access to use the bridge to chat with a WhatsApp account.
# admin - User level and some additional administration tools
# Permitted keys:
# * - All Matrix users
# domain - All users on that homeserver
# mxid - Specific user
permissions:
"*": relay
"example.com": user
"@admin:example.com": admin
# Settings for relay mode
relay:
# Whether relay mode should be allowed. If allowed, `!wa set-relay` can be used to turn any
# authenticated user into a relaybot for that chat.
enabled: false
# Should only admins be allowed to set themselves as relay users?
admin_only: true
# The formats to use when sending messages to WhatsApp via the relaybot.
message_formats:
m.text: "<b>{{ .Sender.Displayname }}</b>: {{ .Message }}"
m.notice: "<b>{{ .Sender.Displayname }}</b>: {{ .Message }}"
m.emote: "* <b>{{ .Sender.Displayname }}</b> {{ .Message }}"
m.file: "<b>{{ .Sender.Displayname }}</b> sent a file"
m.image: "<b>{{ .Sender.Displayname }}</b> sent an image"
m.audio: "<b>{{ .Sender.Displayname }}</b> sent an audio file"
m.video: "<b>{{ .Sender.Displayname }}</b> sent a video"
m.location: "<b>{{ .Sender.Displayname }}</b> sent a location"
# Settings for relay mode
relay:
# Whether relay mode should be allowed. If allowed, `!wa set-relay` can be used to turn any
# authenticated user into a relaybot for that chat.
enabled: false
# Should only admins be allowed to set themselves as relay users?
admin_only: true
# The formats to use when sending messages to WhatsApp via the relaybot.
message_formats:
m.text: "<b>{{ .Sender.Displayname }}</b>: {{ .Message }}"
m.notice: "<b>{{ .Sender.Displayname }}</b>: {{ .Message }}"
m.emote: "* <b>{{ .Sender.Displayname }}</b> {{ .Message }}"
m.file: "<b>{{ .Sender.Displayname }}</b> sent a file"
m.image: "<b>{{ .Sender.Displayname }}</b> sent an image"
m.audio: "<b>{{ .Sender.Displayname }}</b> sent an audio file"
m.video: "<b>{{ .Sender.Displayname }}</b> sent a video"
m.location: "<b>{{ .Sender.Displayname }}</b> sent a location"
# Logging config. See https://github.com/tulir/zeroconfig for details.
logging:
min_level: debug
writers:
min_level: debug
writers:
- type: stdout
format: pretty-colored
- type: file

View File

@@ -0,0 +1,15 @@
apiVersion: v2
name: mysql-cluster
version: 0.1.2
description: Chart for a mysql cluster
keywords:
- database
- mysql
sources:
- https://dev.mysql.com/
- https://github.com/mysql/mysql-operator
- https://github.com/mysql/mysql-operator/tree/trunk/helm/mysql-innodbcluster
maintainers:
- name: alexlebens
icon: https://avatars.githubusercontent.com/u/2452804?s=48&v=4
appVersion: 8.3.0-2.1.2

View File

@@ -0,0 +1,17 @@
## Introduction
[MySQL Operator](https://dev.mysql.com/doc/mysql-operator/en/)
MySQL Operator for Kubernetes manages MySQL InnoDB Cluster setups inside a Kubernetes Cluster. MySQL Operator for Kubernetes manages the full lifecycle with setup and maintenance including automating upgrades and backups.
This chart bootstraps a [MySQL InnoDB](https://dev.mysql.com/doc/mysql-operator/en/mysql-operator-innodbcluster.html) cluster on a [Kubernetes](https://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager.
## Prerequisites
- Kubernetes
- Helm
- MySQL Operator
## Parameters
See the [values files](values.yaml).

View File

@@ -0,0 +1,72 @@
{{- define "cluster.backup" -}}
{{- if and .Values.backup.enabled .Values.backup.profiles }}
backupProfiles:
{{- $isDumpInstance := false }}
{{- $isSnapshot := false }}
{{- range $_, $profile := .Values.backup.profiles }}
- name: {{ $profile.name | quote }}
{{- if hasKey $profile "podAnnotations" }}
podAnnotations:
{{ toYaml $profile.podAnnotations | nindent 6 }}
{{- end }}
{{- if hasKey $profile "podLabels" }}
podLabels:
{{ toYaml $profile.podLabels | nindent 6 }}
{{- end }}
{{- $isDumpInstance = hasKey $profile "dumpInstance" }}
{{- $isSnapshot = hasKey $profile "snapshot" }}
{{- if or $isDumpInstance $isSnapshot }}
{{- $backupProfile := ternary $profile.dumpInstance $profile.snapshot $isDumpInstance }}
{{- if $isDumpInstance }}
dumpInstance:
{{- else if $isSnapshot }}
snapshot:
{{- else }}
{{- fail "Unsupported or unspecified backup type, must be either snapshot or dumpInstance" }}
{{ end }}
{{- if not (hasKey $backupProfile "storage") }}
{{- fail "backup profile $profile.name has no storage section" }}
{{- else if hasKey $backupProfile.storage "s3" }}
storage:
s3:
{{- if $backupProfile.storage.s3.prefix }}
prefix: {{ $backupProfile.storage.s3.prefix }}
{{- end }}
bucketName: {{ required "bucketName is required" $backupProfile.storage.s3.bucketName }}
config: {{ required "config is required" $backupProfile.storage.s3.config }}
{{- if $backupProfile.storage.s3.profile }}
profile: {{ $backupProfile.storage.s3.profile }}
{{- end }}
{{- if $backupProfile.storage.s3.endpoint }}
endpoint: {{ $backupProfile.storage.s3.endpoint }}
{{- end }}
{{- else if hasKey $backupProfile.storage "persistentVolumeClaim" }}
storage:
persistentVolumeClaim: {{ toYaml $backupProfile.storage.persistentVolumeClaim | nindent 12}}
{{- else -}}
{{- fail "Backup profile $profile.name has empty storage section - neither s3 nor persistentVolumeClaim defined" }}
{{- end -}}
{{- end }}
{{- end }}
{{- end }}
{{- if .Values.backup.schedules }}
backupSchedules:
{{- range $_, $schedule := .Values.backup.schedules }}
- name: {{ $schedule.name | quote }}
enabled: {{ $schedule.enabled }}
schedule: {{ quote $schedule.schedule }}
{{- if ($schedule).timeZone }}
timeZone: {{ quote $schedule.timeZone }}
{{- end }}
deleteBackupData: {{ $schedule.deleteBackupData }}
backupProfileName: {{ $schedule.backupProfileName }}
{{- end }}
{{- end }}
{{- end }}

View File

@@ -0,0 +1,64 @@
{{/*
Expand the name of the chart.
*/}}
{{- define "cluster.name" -}}
{{- if .Values.global.nameOverride }}
{{- .Values.global.nameOverride | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- printf "%s-mysql-%s" .Release.Name ((semver .Values.cluster.image.version).Major | toString) | trunc 63 | trimSuffix "-" -}}
{{- end }}
{{- end }}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "cluster.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Check for invalid versions
*/}}
{{- $minimalVersion := "8.0.27" }}
{{- $forbiddenVersions := list "8.0.29" }}
{{- $serverVersion := .Values.serverVersion | default .Chart.AppVersion }}
{{- if lt $serverVersion $minimalVersion }}
{{- $err := printf "It is not possible to use MySQL version %s . Please, use %s or above" $serverVersion $minimalVersion }}
{{- fail $err }}
{{- end }}
{{- if has $serverVersion $forbiddenVersions }}
{{- $err := printf "It is not possible to use MySQL version %s . Please, use %s or above except %v" $serverVersion $minimalVersion $forbiddenVersions }}
{{- fail $err }}
{{- end }}
{{/*
Common labels
*/}}
{{- define "cluster.labels" -}}
helm.sh/chart: {{ include "cluster.chart" . }}
{{ include "cluster.selectorLabels" . }}
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end }}
{{/*
Selector labels
*/}}
{{- define "cluster.selectorLabels" -}}
app.kubernetes.io/name: {{ include "cluster.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/part-of: cloudnative-pg
{{- end }}
{{/*
Create the name of the service account to use.
*/}}
{{- define "mysql.serviceAccountName" -}}
{{- if .Values.serviceAccount.enabled -}}
{{ default (include "cluster.name" .) .Values.serviceAccount.name }}
{{- else -}}
{{ default "default" .Values.serviceAccount.name }}
{{- end -}}
{{- end -}}

View File

@@ -0,0 +1,47 @@
{{- define "cluster.init" -}}
{{- if eq .Values.mode "clone" }}
{{- with .Values.clone }}
initDB:
clone:
donorUrl: {{ required "clone donorUrl is required" .donorUrl }}
rootUser: {{ .rootUser | default "root" }}
secretKeyRef:
name: {{ required "clone credentials is required" .exisitingCredentialsSecret }}
{{- end }}
{{- end }}
{{- if eq .Values.mode "recovery" }}
{{- with .Values.recovery }}
initDB:
dump:
{{- if .name }}
name: {{ .name | quote }}
{{- end }}
{{- if .path }}
path: {{ .path | quote }}
{{- end }}
{{- if .options }}
options: {{ toYaml .options | nindent 8 }}
{{- end }}
storage:
{{- if eq .type "s3" }}
s3:
prefix: {{ required "s3 prefix is required" .s3.prefix }}
bucketName: {{ required "s3 bucketName is required" .s3.bucketName }}
config: {{ required "s3 config is required" .s3.config }}
{{- if .s3.profile }}
profile: {{ .s3.profile }}
{{- end }}
{{- if .s3.endpoint }}
endpoint: {{ .s3.endpoint }}
{{- end }}
{{- end }}
{{- if eq .type "pvc" }}
persistentVolumeClaim:
{{ toYaml .persistentVolumeClaim | nindent 10}}
{{- end }}
{{- end }}
{{- end }}
{{- end }}

View File

@@ -0,0 +1,75 @@
apiVersion: mysql.oracle.com/v2
kind: InnoDBCluster
metadata:
name: {{ include "cluster.name" . }}-cluster
namespace: {{ .Release.Namespace }}
annotations:
{{- with .Values.global.annotations }}
{{- toYaml . | nindent 4 }}
{{- end }}
labels:
{{- include "cluster.labels" . | nindent 4 }}
{{- include "cluster.selectorLabels" . | nindent 4 }}
{{- with .Values.global.labels }}
{{ toYaml . | nindent 4 }}
{{- end }}
spec:
instances: {{ required "serverInstances is required" .Values.cluster.serverInstances }}
baseServerId: {{ required "baseServerId is required" .Values.cluster.baseServerId }}
serviceAccountName: {{ include "mysql.serviceAccountName" . }}
imagePullPolicy : {{ .Values.cluster.image.pullPolicy }}
version: {{ .Values.cluster.image.version }}
tlsUseSelfSigned: true
secretName: {{ .Values.cluster.exisitingCredentialsSecret }}
podSpec:
{{- with .Values.cluster.podSpec }}
{{ toYaml . | nindent 4 }}
{{- end }}
podAnnotations:
{{- with .Values.cluster.podAnnotations }}
{{ toYaml . | nindent 4 }}
{{- end }}
podLabels:
{{- with .Values.cluster.podLabels }}
{{ toYaml . | nindent 4 }}
{{- end }}
router:
instances: {{ required "router.instances is required" .Values.cluster.router.instances }}
podSpec:
{{- with .Values.cluster.router.podSpec }}
{{- toYaml . | nindent 6 }}
{{- end }}
podAnnotations:
{{- with .Values.cluster.router.podAnnotations }}
{{- toYaml . | nindent 6 }}
{{- end }}
podLabels:
{{- with .Values.cluster.router.podLabels }}
{{- toYaml . | nindent 6 }}
{{- end }}
tlsSecretName: {{ include "cluster.name" . }}-router-tls
logs:
{{- with .Values.cluster.logs }}
{{ toYaml . | nindent 4 }}
{{- end }}
mycnf: |
{{ .Values.cluster.serverConfig.mycnf | indent 4 }}
{{- if .Values.cluster.datadirVolumeClaimTemplate }}
{{- with .Values.cluster.datadirVolumeClaimTemplate }}
datadirVolumeClaimTemplate:
{{- if .storageClassName }}
storageClassName: {{ .storageClassName | quote }}
{{- end}}
{{- if .accessModes }}
accessModes: [ "{{ .accessModes }}" ]
{{- end }}
{{- if .size }}
resources:
requests:
storage: "{{ .size }}"
{{- end }}
{{- end }}
{{- end }}
{{ include "cluster.init" . | nindent 2 }}
{{ include "cluster.backup" . | nindent 2 }}

View File

@@ -0,0 +1,21 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ include "mysql.serviceAccountName" . }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "cluster.labels" . | nindent 4 }}
{{- include "cluster.selectorLabels" . | nindent 4 }}
{{- with .Values.global.labels }}
{{- toYaml . | nindent 4 }}
{{- end }}
{{- with .Values.serviceAccount.labels }}
{{- toYaml . | nindent 4 }}
{{- end }}
annotations:
{{- with .Values.global.annotations }}
{{- toYaml . | nindent 4 }}
{{- end }}
{{- with .Values.serviceAccount.annotations }}
{{- toYaml . | nindent 4 }}
{{- end }}

View File

@@ -0,0 +1,147 @@
global:
nameOverride:
labels: {}
annotations: {}
serviceAccount:
enabled: true
labels: {}
annotations: {}
name: ""
###
# Cluster mode of operation. Available modes:
# * `standalone` - Default mode. Creates new or updates an existing cluster.
# * `recovery` - Same as standalone but creates a cluster from a backup
# * `clone` - Create database as a replica from another cluster
mode: standalone
##
# Cluster spec
#
# Reference: https://dev.mysql.com/doc/mysql-operator/en/mysql-operator-properties.html#mysql-operator-spec-innodbclusterspecinitdbdumpstorages3
#
cluster:
serverInstances: 1
baseServerId: 1000
# Existing secret that contains the keys "rootUser", "rootHost", and "rootPassword"
exisitingCredentialsSecret: ""
image:
version: 8.3.0-2.1.2
pullPolicy: IfNotPresent
router:
instances: 1
podSpec: {}
podAnnotations: {}
podLabels: {}
logs:
error:
enabled: true
collect: false
general:
enabled: false
collect: false
slowQuery:
enabled: false
longQueryTime: 2.5
serverConfig:
mycnf: |
[mysqld]
core_file
local_infile=off
datadirVolumeClaimTemplate:
storageClassName: ""
accessModes: ""
size: ""
podSpec:
containers:
- name: mysql
resources:
limits:
memory: 1024Mi
cpu: 1000m
requests:
memory: 512Mi
cpu: 100m
podAnnotations: {}
podLabels: {}
##
# Recovery database from storage
#
recovery:
# * `s3` - Restores from s3 object store
# * `pvc` - Restores from persistent volume claim
type:
# -- Name of the dump. Not used by the operator, but a descriptive hint for the cluster administrator
name: ""
# -- Path to the dump in the PVC. Use when specifying persistentVolumeClaim. Omit for ociObjectStorage, S3, or azure.
path: ""
# -- A dictionary of key-value pairs passed directly to MySQL Shell's loadDump()
options: {}
s3:
# -- Path in the bucket where the dump files are stored
prefix: ""
# -- Name of a Secret with S3 configuration and credentials as contained in ~/.aws/config
config: ""
# -- Name of the S3 bucket where the dump is stored
bucketName: ""
# -- Override endpoint URL
endpoint: ""
persistentVolumeClaim: {}
##
# Clone database from another instance
#
clone:
donorUrl: ""
rootUser: root
exisitingCredentialsSecret: ""
##
# Backup database to pvc or s3
#
backup:
enabled: false
profiles:
## -- Example profile that back ups to local pvc
# - name: pvc-backup
# dumpInstance:
# storage:
# persistentVolumeClaim:
# claimName: backup-volume-claim
## -- Example profile that back ups to s3 endpoint
# - name: s3-backup
# snapshot:
# storage:
# s3:
# prefix: ""
# config: ""
# bucketName: ""
# endpoint: ""
schedules:
## -- Example schedule that backups daily
# - name: schedule-daily
# enabled: true
# schedule: "0 0 0 * * *"
# timeZone: "US/Central"
# deleteBackupData: false
# backupProfileName:

View File

@@ -1,6 +1,6 @@
apiVersion: v2
name: outline
version: 0.5.0
version: 0.6.3
description: Chart for Outline wiki
keywords:
- wiki
@@ -14,5 +14,5 @@ icon: https://avatars.githubusercontent.com/u/1765001?s=48&v=4
dependencies:
- name: redis
repository: https://charts.bitnami.com/bitnami
version: 19.1.0
appVersion: v0.75.2
version: 19.3.4
appVersion: v0.76.1

View File

@@ -3,7 +3,7 @@ deployment:
strategy: Recreate
image:
repository: outlinewiki/outline
tag: "0.75.2"
tag: "0.76.1"
imagePullPolicy: IfNotPresent
resources:
requests:
@@ -59,7 +59,7 @@ outline:
existingSecretKey:
databasePort:
existingSecretName:
existingSecretKey:
existingSecretKey:
connectionPoolMin: ""
connectionPoolMax: "20"
sslMode: disable

13
charts/penpot/Chart.yaml Normal file
View File

@@ -0,0 +1,13 @@
apiVersion: v2
name: penpot
version: 0.1.0
description: Chart for Penpot
keywords:
- penpot
- design
sources:
- https://github.com/penpot/penpot
maintainers:
- name: alexlebens
icon: https://avatars.githubusercontent.com/u/30179644?s=200&v=4
appVersion: 2.0.1

16
charts/penpot/README.md Normal file
View File

@@ -0,0 +1,16 @@
## Introduction
[Penpot](https://github.com/penpot/penpot)
Penpot is the first Open Source design and prototyping platform meant for cross-domain teams. Non dependent on operating systems, Penpot is web based and works with open standards (SVG). Penpot invites designers all over the world to fall in love with open source while getting developers excited about the design process in return.
This chart bootstraps a [Penpot](https://github.com/penpot/penpot) deployment on a [Kubernetes](https://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager.
## Prerequisites
- Kubernetes
- Helm
## Parameters
See the [values files](values.yaml).

View File

@@ -0,0 +1,72 @@
{{/* vim: set filetype=mustache: */}}
{{/*
Expand the name of the chart.
*/}}
{{- define "penpot.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "penpot.fullname" -}}
{{- if .Values.fullnameOverride -}}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{- if contains $name .Release.Name -}}
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "penpot.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Common labels.
*/}}
{{- define "penpot.labels" -}}
helm.sh/chart: {{ include "penpot.chart" . }}
app.kubernetes.io/name: {{ include "penpot.name" . }}-frontend
app.kubernetes.io/instance: {{ .Release.Name }}
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end -}}
{{/*
Selector labels.
*/}}
{{- define "penpot.frontendSelectorLabels" -}}
app.kubernetes.io/name: {{ include "penpot.name" . }}-frontend
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end -}}
{{- define "penpot.backendSelectorLabels" -}}
app.kubernetes.io/name: {{ include "penpot.name" . }}-backend
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end -}}
{{- define "penpot.exporterSelectorLabels" -}}
app.kubernetes.io/name: {{ include "penpot.name" . }}-exporter
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end -}}
{{/*
Create the name of the service account to use.
*/}}
{{- define "penpot.serviceAccountName" -}}
{{- if .Values.serviceAccount.enabled -}}
{{ default (include "penpot.fullname" .) .Values.serviceAccount.name }}
{{- else -}}
{{ default "default" .Values.serviceAccount.name }}
{{- end -}}
{{- end -}}

View File

@@ -0,0 +1,129 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: "{{ include "penpot.fullname" . }}-frontend-nginx"
namespace: {{ .Release.Namespace }}
labels:
{{- include "penpot.labels" . | nindent 4 }}
data:
nginx.conf: |
user www-data;
worker_processes auto;
pid /run/nginx.pid;
include /etc/nginx/modules-enabled/*.conf;
events {
worker_connections 2048;
# multi_accept on;
}
http {
sendfile on;
tcp_nopush on;
tcp_nodelay on;
keepalive_requests 30;
keepalive_timeout 65;
types_hash_max_size 2048;
server_tokens off;
reset_timedout_connection on;
client_body_timeout 30s;
client_header_timeout 30s;
include /etc/nginx/mime.types;
default_type application/octet-stream;
error_log /dev/stdout;
access_log /dev/stdout;
gzip on;
gzip_vary on;
gzip_proxied any;
gzip_static on;
gzip_comp_level 4;
gzip_buffers 16 8k;
gzip_http_version 1.1;
gzip_types text/plain text/css text/javascript application/javascript application/json application/transit+json;
resolver 127.0.0.11;
map $http_upgrade $connection_upgrade {
default upgrade;
'' close;
}
server {
listen 80 default_server;
server_name _;
client_max_body_size 100M;
charset utf-8;
proxy_http_version 1.1;
proxy_set_header Host $http_host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Scheme $scheme;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
etag off;
root /var/www/app/;
location ~* \.(js|css).*$ {
add_header Cache-Control "max-age=86400" always; # 24 hours
}
location ~* \.(html).*$ {
add_header Cache-Control "no-cache, max-age=0" always;
}
location /api/export {
proxy_pass http://{{ include "penpot.fullname" . }}-exporter:6061;
}
location /api {
proxy_pass http://{{ include "penpot.fullname" . }}-backend:6060/api;
}
location /ws/notifications {
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection 'upgrade';
proxy_pass http://{{ include "penpot.fullname" . }}-backend:6060/ws/notifications;
}
location @handle_redirect {
set $redirect_uri "$upstream_http_location";
set $redirect_host "$upstream_http_x_host";
set $redirect_cache_control "$upstream_http_cache_control";
proxy_buffering off;
proxy_set_header Host "$redirect_host";
proxy_hide_header etag;
proxy_hide_header x-amz-id-2;
proxy_hide_header x-amz-request-id;
proxy_hide_header x-amz-meta-server-side-encryption;
proxy_hide_header x-amz-server-side-encryption;
proxy_pass $redirect_uri;
add_header x-internal-redirect "$redirect_uri";
add_header x-cache-control "$redirect_cache_control";
add_header cache-control "$redirect_cache_control";
}
location /assets {
proxy_pass http://{{ include "penpot.fullname" . }}-backend:6060/assets;
recursive_error_pages on;
proxy_intercept_errors on;
error_page 301 302 307 = @handle_redirect;
}
location /internal/assets {
internal;
alias /opt/data/assets;
add_header x-internal-redirect "$upstream_http_x_accel_redirect";
}
}
}

View File

@@ -0,0 +1,378 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "penpot.fullname" . }}-backend
namespace: {{ .Release.Namespace }}
labels:
{{- include "penpot.labels" . | nindent 4 }}
spec:
replicas: {{ .Values.backend.replicaCount }}
selector:
matchLabels:
{{- include "penpot.backendSelectorLabels" . | nindent 6 }}
template:
metadata:
labels:
{{- include "penpot.backendSelectorLabels" . | nindent 8 }}
spec:
{{- with .Values.global.imagePullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 8 }}
{{- end }}
{{ if .Values.backend.podSecurityContext.enabled }}
securityContext:
{{- omit .Values.backend.podSecurityContext "enabled" | toYaml | nindent 8 }}
{{- end }}
serviceAccountName: {{ include "penpot.serviceAccountName" . }}
affinity:
podAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: app.kubernetes.io/instance
operator: In
values:
- {{ .Release.Name }}
topologyKey: "kubernetes.io/hostname"
containers:
- name: {{ .Chart.Name }}-backend
{{ if .Values.backend.containerSecurityContext.enabled }}
securityContext:
{{- omit .Values.backend.containerSecurityContext "enabled" | toYaml | nindent 12 }}
{{- end }}
image: "{{ .Values.backend.image.repository }}:{{ .Values.backend.image.tag }}"
imagePullPolicy: {{ .Values.backend.image.imagePullPolicy }}
volumeMounts:
- mountPath: /opt/data
name: app-data
readOnly: false
env:
- name: PENPOT_PUBLIC_URI
value: {{ .Values.config.publicURI | quote }}
- name: PENPOT_FLAGS
value: "$PENPOT_FLAGS {{ .Values.config.flags }}"
- name: PENPOT_SECRET_KEY
valueFrom:
secretKeyRef:
name: {{ .Values.config.apiSecretKey.existingSecretName }}
key: {{ .Values.config.apiSecretKey.existingSecretKey }}
- name: PENPOT_DATABASE_URI
value: "postgresql://{{ .Values.config.postgresql.host }}:{{ .Values.config.postgresql.port }}/{{ .Values.config.postgresql.database }}"
- name: PENPOT_DATABASE_USERNAME
{{- if not .Values.config.postgresql.secretKeys.usernameKey }}
value: {{ .Values.config.postgresql.username | quote }}
{{- else }}
valueFrom:
secretKeyRef:
name: {{ .Values.config.postgresql.existingSecret }}
key: {{ .Values.config.postgresql.secretKeys.usernameKey }}
{{- end }}
- name: PENPOT_DATABASE_PASSWORD
{{- if not .Values.config.postgresql.secretKeys.passwordKey }}
value: {{ .Values.config.postgresql.password | quote }}
{{- else }}
valueFrom:
secretKeyRef:
name: {{ .Values.config.postgresql.existingSecret }}
key: {{ .Values.config.postgresql.secretKeys.passwordKey }}
{{- end }}
- name: PENPOT_REDIS_URI
value: "redis://{{ .Values.config.redis.host }}:{{ .Values.config.redis.port }}/{{ .Values.config.redis.database }}"
- name: PENPOT_ASSETS_STORAGE_BACKEND
value: {{ .Values.config.assets.storageBackend | quote }}
{{- if eq .Values.config.assets.storageBackend "assets-fs" }}
- name: PENPOT_STORAGE_ASSETS_FS_DIRECTORY
value: {{ .Values.config.assets.filesystem.directory | quote }}
{{- else if eq .Values.config.assets.storageBackend "assets-s3" }}
- name: PENPOT_STORAGE_ASSETS_S3_REGION
value: {{ .Values.config.assets.s3.region | quote }}
- name: PENPOT_STORAGE_ASSETS_S3_BUCKET
value: {{ .Values.config.assets.s3.bucket | quote }}
- name: AWS_ACCESS_KEY_ID
{{- if not .Values.config.assets.s3.secretKeys.accessKeyIDKey }}
value: {{ .Values.config.assets.s3.accessKeyID | quote }}
{{- else }}
valueFrom:
secretKeyRef:
name: {{ .Values.config.assets.s3.existingSecret }}
key: {{ .Values.config.assets.s3.secretKeys.accessKeyIDKey }}
{{- end }}
- name: AWS_SECRET_ACCESS_KEY
{{- if not .Values.config.assets.s3.secretKeys.secretAccessKey }}
value: {{ .Values.config.assets.s3.secretAccessKey | quote }}
{{- else }}
valueFrom:
secretKeyRef:
name: {{ .Values.config.assets.s3.existingSecret }}
key: {{ .Values.config.assets.s3.secretKeys.secretAccessKey }}
{{- end }}
- name: PENPOT_STORAGE_ASSETS_S3_ENDPOINT
{{- if not .Values.config.assets.s3.secretKeys.endpointURIKey }}
value: {{ .Values.config.assets.s3.endpointURI | quote }}
{{- else }}
valueFrom:
secretKeyRef:
name: {{ .Values.config.assets.s3.existingSecret }}
key: {{ .Values.config.assets.s3.secretKeys.endpointURIKey }}
{{- end }}
{{- end }}
- name: PENPOT_TELEMETRY_ENABLED
value: {{ .Values.config.telemetryEnabled | quote }}
{{- if .Values.config.smtp.enabled }}
{{- if .Values.config.smtp.defaultFrom }}
- name: PENPOT_SMTP_DEFAULT_FROM
value: {{ .Values.config.smtp.defaultFrom | quote }}
{{- end }}
{{- if .Values.config.smtp.defaultReplyTo }}
- name: PENPOT_SMTP_DEFAULT_REPLY_TO
value: {{ .Values.config.smtp.defaultReplyTo | quote }}
{{- end }}
{{- if .Values.config.smtp.host }}
- name: PENPOT_SMTP_HOST
value: {{ .Values.config.smtp.host | quote }}
{{- end }}
{{- if .Values.config.smtp.port }}
- name: PENPOT_SMTP_PORT
value: {{ .Values.config.smtp.port | quote }}
{{- end }}
{{- if not .Values.config.smtp.secretKeys.usernameKey }}
- name: PENPOT_SMTP_USERNAME
value: {{ .Values.config.smtp.username | quote }}
{{- else }}
- name: PENPOT_SMTP_USERNAME
valueFrom:
secretKeyRef:
name: {{ .Values.config.smtp.existingSecret }}
key: {{ .Values.config.smtp.secretKeys.usernameKey }}
{{- end }}
{{- if not .Values.config.smtp.secretKeys.passwordKey }}
- name: PENPOT_SMTP_PASSWORD
value: {{ .Values.config.smtp.password | quote }}
{{- else }}
- name: PENPOT_SMTP_PASSWORD
valueFrom:
secretKeyRef:
name: {{ .Values.config.smtp.existingSecret }}
key: {{ .Values.config.smtp.secretKeys.passwordKey }}
{{- end }}
{{- if .Values.config.smtp.tls }}
- name: PENPOT_SMTP_TLS
value: {{ .Values.config.smtp.tls | quote }}
{{- end }}
{{- if .Values.config.smtp.ssl }}
- name: PENPOT_SMTP_SSL
value: {{ .Values.config.smtp.ssl | quote }}
{{- end }}
{{- end }}
{{- if .Values.config.registrationDomainWhitelist }}
- name: PENPOT_REGISTRATION_DOMAIN_WHITELIST
value: {{ .Values.config.registrationDomainWhitelist | quote }}
{{- end }}
{{- if .Values.config.providers.google.enabled }}
{{- if not .Values.config.providers.secretKeys.googleClientIDKey }}
- name: PENPOT_GOOGLE_CLIENT_ID
value: {{ .Values.config.providers.google.clientID | quote }}
{{- else }}
- name: PENPOT_GOOGLE_CLIENT_ID
valueFrom:
secretKeyRef:
name: {{ .Values.config.providers.existingSecret }}
key: {{ .Values.config.providers.secretKeys.googleClientIDKey }}
{{- end }}
{{- if not .Values.config.providers.secretKeys.googleClientSecretKey}}
- name: PENPOT_GOOGLE_CLIENT_SECRET
value: {{ .Values.config.providers.google.clientSecret | quote }}
{{- else }}
- name: PENPOT_GOOGLE_CLIENT_SECRET
valueFrom:
secretKeyRef:
name: {{ .Values.config.providers.existingSecret }}
key: {{ .Values.config.providers.secretKeys.googleClientSecretKey }}
{{- end }}
{{- end }}
{{- if .Values.config.providers.github.enabled }}
{{- if not .Values.config.providers.secretKeys.githubClientIDKey }}
- name: PENPOT_GITHUB_CLIENT_ID
value: {{ .Values.config.providers.github.clientID | quote }}
{{- else }}
- name: PENPOT_GITHUB_CLIENT_ID
valueFrom:
secretKeyRef:
name: {{ .Values.config.providers.existingSecret }}
key: {{ .Values.config.providers.secretKeys.githubClientIDKey }}
{{- end }}
{{- if not .Values.config.providers.secretKeys.githubClientSecretKey }}
- name: PENPOT_GITHUB_CLIENT_SECRET
value: {{ .Values.config.providers.github.clientSecret | quote }}
{{- else }}
- name: PENPOT_GITHUB_CLIENT_SECRET
valueFrom:
secretKeyRef:
name: {{ .Values.config.providers.existingSecret }}
key: {{ .Values.config.providers.secretKeys.githubClientSecretKey }}
{{- end }}
{{- end }}
{{- if .Values.config.providers.gitlab.enabled }}
{{- if .Values.config.providers.gitlab.baseURI }}
- name: PENPOT_GITLAB_BASE_URI
value: {{ .Values.config.providers.gitlab.baseURI | quote }}
{{- end }}
{{- if not .Values.config.providers.secretKeys.gitlabClientIDKey }}
- name: PENPOT_GITLAB_CLIENT_ID
value: {{ .Values.config.providers.gitlab.clientID | quote }}
{{- else }}
- name: PENPOT_GITLAB_CLIENT_ID
valueFrom:
secretKeyRef:
name: {{ .Values.config.providers.existingSecret }}
key: {{ .Values.config.providers.secretKeys.gitlabClientIDKey }}
{{- end }}
{{- if not .Values.config.providers.secretKeys.gitlabClientSecretKey }}
- name: PENPOT_GITLAB_CLIENT_SECRET
value: {{ .Values.config.providers.gitlab.clientSecret | quote }}
{{- else }}
- name: PENPOT_GITLAB_CLIENT_SECRET
valueFrom:
secretKeyRef:
name: {{ .Values.config.providers.existingSecret }}
key: {{ .Values.config.providers.secretKeys.gitlabClientSecretKey }}
{{- end }}
{{- end }}
{{- if .Values.config.providers.oidc.enabled }}
{{- if .Values.config.providers.oidc.baseURI }}
- name: PENPOT_OIDC_BASE_URI
value: {{ .Values.config.providers.oidc.baseURI | quote }}
{{- end }}
{{- if not .Values.config.providers.secretKeys.oidcClientIDKey }}
- name: PENPOT_OIDC_CLIENT_ID
value: {{ .Values.config.providers.oidc.clientID | quote}}
{{- else }}
- name: PENPOT_OIDC_CLIENT_ID
valueFrom:
secretKeyRef:
name: {{ .Values.config.providers.existingSecret }}
key: {{ .Values.config.providers.secretKeys.oidcClientIDKey }}
{{- end }}
{{- if not .Values.config.providers.secretKeys.oidcClientSecretKey}}
- name: PENPOT_OIDC_CLIENT_SECRET
value: {{ .Values.config.providers.oidc.clientSecret | quote }}
{{- else }}
- name: PENPOT_OIDC_CLIENT_SECRET
valueFrom:
secretKeyRef:
name: {{ .Values.config.providers.existingSecret }}
key: {{ .Values.config.providers.secretKeys.oidcClientSecretKey }}
{{- end }}
{{- if .Values.config.providers.oidc.authURI }}
- name: PENPOT_OIDC_AUTH_URI
value: {{ .Values.config.providers.oidc.authURI | quote }}
{{- end }}
{{- if .Values.config.providers.oidc.tokenURI }}
- name: PENPOT_OIDC_TOKEN_URI
value: {{ .Values.config.providers.oidc.tokenURI | quote }}
{{- end }}
{{- if .Values.config.providers.oidc.userURI }}
- name: PENPOT_OIDC_USER_URI
value: {{ .Values.config.providers.oidc.userURI | quote }}
{{- end }}
{{- if .Values.config.providers.oidc.roles }}
- name: PENPOT_OIDC_ROLES
value: {{ .Values.config.providers.oidc.roles | quote }}
{{- end }}
{{- if .Values.config.providers.oidc.rolesAttribute }}
- name: PENPOT_OIDC_ROLES_ATTR
value: {{ .Values.config.providers.oidc.rolesAttribute | quote }}
{{- end }}
{{- if .Values.config.providers.oidc.scopes }}
- name: PENPOT_OIDC_SCOPES
value: {{ .Values.config.providers.oidc.scopes | quote }}
{{- end }}
{{- if .Values.config.providers.oidc.nameAttribute }}
- name: PENPOT_OIDC_NAME_ATTR
value: {{ .Values.config.providers.oidc.nameAttribute | quote }}
{{- end }}
{{- if .Values.config.providers.oidc.emailAttribute }}
- name: PENPOT_OIDC_EMAIL_ATTR
value: {{ .Values.config.providers.oidc.emailAttribute | quote }}
{{- end }}
{{- end }}
{{- if .Values.config.providers.ldap.enabled }}
{{- if .Values.config.providers.ldap.host }}
- name: PENPOT_LDAP_HOST
value: {{ .Values.config.providers.ldap.host | quote }}
{{- end }}
{{- if .Values.config.providers.ldap.port }}
- name: PENPOT_LDAP_PORT
value: {{ .Values.config.providers.ldap.port | quote }}
{{- end }}
{{- if .Values.config.providers.ldap.ssl }}
- name: PENPOT_LDAP_SSL
value: {{ .Values.config.providers.ldap.ssl | quote }}
{{- end }}
{{- if .Values.config.providers.ldap.startTLS }}
- name: PENPOT_LDAP_STARTTLS
value: {{ .Values.config.providers.ldap.startTLS | quote }}
{{- end }}
{{- if .Values.config.providers.ldap.baseDN }}
- name: PENPOT_LDAP_BASE_DN
value: {{ .Values.config.providers.ldap.baseDN | quote }}
{{- end }}
{{- if .Values.config.providers.ldap.bindDN }}
- name: PENPOT_LDAP_BIND_DN
value: {{ .Values.config.providers.ldap.bindDN | quote }}
{{- end }}
{{- if .Values.config.providers.ldap.bindPassword }}
- name: PENPOT_LDAP_BIND_PASSWORD
value: {{ .Values.config.providers.ldap.bindPassword | quote }}
{{- end }}
{{- if .Values.config.providers.ldap.attributesUsername }}
- name: PENPOT_LDAP_ATTRS_USERNAME
value: {{ .Values.config.providers.ldap.attributesUsername | quote }}
{{- end }}
{{- if .Values.config.providers.ldap.attributesEmail }}
- name: PENPOT_LDAP_ATTRS_EMAIL
value: {{ .Values.config.providers.ldap.attributesEmail | quote }}
{{- end }}
{{- if .Values.config.providers.ldap.attributesFullname }}
- name: PENPOT_LDAP_ATTRS_FULLNAME
value: {{ .Values.config.providers.ldap.attributesFullname | quote }}
{{- end }}
{{- if .Values.config.providers.ldap.attributesPhoto }}
- name: PENPOT_LDAP_ATTRS_PHOTO
value: {{ .Values.config.providers.ldap.attributesPhoto | quote }}
{{- end }}
{{- end }}
ports:
- name: http
containerPort: {{ .Values.backend.service.port }}
protocol: TCP
resources:
{{- toYaml .Values.backend.resources | nindent 12 }}
{{- with .Values.backend.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.backend.affinity }}
affinity:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.backend.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}
volumes:
- name: app-data
{{- if .Values.persistence.enabled }}
persistentVolumeClaim:
claimName: {{ .Values.persistence.existingClaim | default ( include "penpot.fullname" . ) }}
{{- else }}
emptyDir: {}
{{- end }}

View File

@@ -0,0 +1,353 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "penpot.fullname" . }}-exporter
namespace: {{ .Release.Namespace }}
labels:
{{- include "penpot.labels" . | nindent 4 }}
spec:
replicas: {{ .Values.exporter.replicaCount }}
selector:
matchLabels:
{{- include "penpot.exporterSelectorLabels" . | nindent 6 }}
template:
metadata:
labels:
{{- include "penpot.exporterSelectorLabels" . | nindent 8 }}
spec:
{{- with .Values.global.imagePullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 8 }}
{{- end }}
serviceAccountName: {{ include "penpot.serviceAccountName" . }}
{{ if .Values.exporter.podSecurityContext.enabled }}
securityContext:
{{- omit .Values.exporter.podSecurityContext "enabled" | toYaml | nindent 8 }}
{{- end }}
containers:
- name: {{ .Chart.Name }}-exporter
{{ if .Values.exporter.containerSecurityContext.enabled }}
securityContext:
{{- omit .Values.exporter.containerSecurityContext "enabled" | toYaml | nindent 12 }}
{{- end }}
image: "{{ .Values.exporter.image.repository }}:{{ .Values.exporter.image.tag }}"
imagePullPolicy: {{ .Values.exporter.image.imagePullPolicy }}
env:
- name: PENPOT_PUBLIC_URI
value: {{ .Values.config.publicURI | quote }}
- name: PENPOT_FLAGS
value: "$PENPOT_FLAGS {{ .Values.config.flags }}"
- name: PENPOT_SECRET_KEY
value: {{ .Values.config.apiSecretKey | quote }}
- name: PENPOT_DATABASE_URI
value: "postgresql://{{ .Values.config.postgresql.host }}:{{ .Values.config.postgresql.port }}/{{ .Values.config.postgresql.database }}"
- name: PENPOT_DATABASE_USERNAME
{{- if not .Values.config.postgresql.secretKeys.usernameKey }}
value: {{ .Values.config.postgresql.username | quote }}
{{- else }}
valueFrom:
secretKeyRef:
name: {{ .Values.config.postgresql.existingSecret }}
key: {{ .Values.config.postgresql.secretKeys.usernameKey }}
{{- end }}
- name: PENPOT_DATABASE_PASSWORD
{{- if not .Values.config.postgresql.secretKeys.passwordKey }}
value: {{ .Values.config.postgresql.password | quote }}
{{- else }}
valueFrom:
secretKeyRef:
name: {{ .Values.config.postgresql.existingSecret }}
key: {{ .Values.config.postgresql.secretKeys.passwordKey }}
{{- end }}
- name: PENPOT_REDIS_URI
value: "redis://{{ .Values.config.redis.host }}:{{ .Values.config.redis.port }}/{{ .Values.config.redis.database }}"
- name: PENPOT_ASSETS_STORAGE_BACKEND
value: {{ .Values.config.assets.storageBackend | quote }}
{{- if eq .Values.config.assets.storageBackend "assets-fs" }}
- name: PENPOT_STORAGE_ASSETS_FS_DIRECTORY
value: {{ .Values.config.assets.filesystem.directory | quote }}
{{- else if eq .Values.config.assets.storageBackend "assets-s3" }}
- name: PENPOT_STORAGE_ASSETS_S3_REGION
value: {{ .Values.config.assets.s3.region | quote }}
- name: PENPOT_STORAGE_ASSETS_S3_BUCKET
value: {{ .Values.config.assets.s3.bucket | quote }}
- name: AWS_ACCESS_KEY_ID
{{- if not .Values.config.assets.s3.secretKeys.accessKeyIDKey }}
value: {{ .Values.config.assets.s3.accessKeyID | quote }}
{{- else }}
valueFrom:
secretKeyRef:
name: {{ .Values.config.assets.s3.existingSecret }}
key: {{ .Values.config.assets.s3.secretKeys.accessKeyIDKey }}
{{- end }}
- name: AWS_SECRET_ACCESS_KEY
{{- if not .Values.config.assets.s3.secretKeys.secretAccessKey }}
value: {{ .Values.config.assets.s3.secretAccessKey | quote }}
{{- else }}
valueFrom:
secretKeyRef:
name: {{ .Values.config.assets.s3.existingSecret }}
key: {{ .Values.config.assets.s3.secretKeys.secretAccessKey }}
{{- end }}
- name: PENPOT_STORAGE_ASSETS_S3_ENDPOINT
{{- if not .Values.config.assets.s3.secretKeys.endpointURIKey }}
value: {{ .Values.config.assets.s3.endpointURI | quote }}
{{- else }}
valueFrom:
secretKeyRef:
name: {{ .Values.config.assets.s3.existingSecret }}
key: {{ .Values.config.assets.s3.secretKeys.endpointURIKey }}
{{- end }}
{{- end }}
- name: PENPOT_TELEMETRY_ENABLED
value: {{ .Values.config.telemetryEnabled | quote }}
{{- if .Values.config.smtp.enabled }}
{{- if .Values.config.smtp.defaultFrom }}
- name: PENPOT_SMTP_DEFAULT_FROM
value: {{ .Values.config.smtp.defaultFrom | quote }}
{{- end }}
{{- if .Values.config.smtp.defaultReplyTo }}
- name: PENPOT_SMTP_DEFAULT_REPLY_TO
value: {{ .Values.config.smtp.defaultReplyTo | quote }}
{{- end }}
{{- if .Values.config.smtp.host }}
- name: PENPOT_SMTP_HOST
value: {{ .Values.config.smtp.host | quote }}
{{- end }}
{{- if .Values.config.smtp.port }}
- name: PENPOT_SMTP_PORT
value: {{ .Values.config.smtp.port | quote }}
{{- end }}
{{- if not .Values.config.smtp.secretKeys.usernameKey }}
- name: PENPOT_SMTP_USERNAME
value: {{ .Values.config.smtp.username | quote }}
{{- else }}
- name: PENPOT_SMTP_USERNAME
valueFrom:
secretKeyRef:
name: {{ .Values.config.smtp.existingSecret }}
key: {{ .Values.config.smtp.secretKeys.usernameKey }}
{{- end }}
{{- if not .Values.config.smtp.secretKeys.passwordKey }}
- name: PENPOT_SMTP_PASSWORD
value: {{ .Values.config.smtp.password | quote }}
{{- else }}
- name: PENPOT_SMTP_PASSWORD
valueFrom:
secretKeyRef:
name: {{ .Values.config.smtp.existingSecret }}
key: {{ .Values.config.smtp.secretKeys.passwordKey }}
{{- end }}
{{- if .Values.config.smtp.tls }}
- name: PENPOT_SMTP_TLS
value: {{ .Values.config.smtp.tls | quote }}
{{- end }}
{{- if .Values.config.smtp.ssl }}
- name: PENPOT_SMTP_SSL
value: {{ .Values.config.smtp.ssl | quote }}
{{- end }}
{{- end }}
{{- if .Values.config.registrationDomainWhitelist }}
- name: PENPOT_REGISTRATION_DOMAIN_WHITELIST
value: {{ .Values.config.registrationDomainWhitelist | quote }}
{{- end }}
{{- if .Values.config.providers.google.enabled }}
{{- if not .Values.config.providers.secretKeys.googleClientIDKey }}
- name: PENPOT_GOOGLE_CLIENT_ID
value: {{ .Values.config.providers.google.clientID | quote }}
{{- else }}
- name: PENPOT_GOOGLE_CLIENT_ID
valueFrom:
secretKeyRef:
name: {{ .Values.config.providers.existingSecret }}
key: {{ .Values.config.providers.secretKeys.googleClientIDKey }}
{{- end }}
{{- if not .Values.config.providers.secretKeys.googleClientSecretKey}}
- name: PENPOT_GOOGLE_CLIENT_SECRET
value: {{ .Values.config.providers.google.clientSecret | quote }}
{{- else }}
- name: PENPOT_GOOGLE_CLIENT_SECRET
valueFrom:
secretKeyRef:
name: {{ .Values.config.providers.existingSecret }}
key: {{ .Values.config.providers.secretKeys.googleClientSecretKey }}
{{- end }}
{{- end }}
{{- if .Values.config.providers.github.enabled }}
{{- if not .Values.config.providers.secretKeys.githubClientIDKey }}
- name: PENPOT_GITHUB_CLIENT_ID
value: {{ .Values.config.providers.github.clientID | quote }}
{{- else }}
- name: PENPOT_GITHUB_CLIENT_ID
valueFrom:
secretKeyRef:
name: {{ .Values.config.providers.existingSecret }}
key: {{ .Values.config.providers.secretKeys.githubClientIDKey }}
{{- end }}
{{- if not .Values.config.providers.secretKeys.githubClientSecretKey }}
- name: PENPOT_GITHUB_CLIENT_SECRET
value: {{ .Values.config.providers.github.clientSecret | quote }}
{{- else }}
- name: PENPOT_GITHUB_CLIENT_SECRET
valueFrom:
secretKeyRef:
name: {{ .Values.config.providers.existingSecret }}
key: {{ .Values.config.providers.secretKeys.githubClientSecretKey }}
{{- end }}
{{- end }}
{{- if .Values.config.providers.gitlab.enabled }}
{{- if .Values.config.providers.gitlab.baseURI }}
- name: PENPOT_GITLAB_BASE_URI
value: {{ .Values.config.providers.gitlab.baseURI | quote }}
{{- end }}
{{- if not .Values.config.providers.secretKeys.gitlabClientIDKey }}
- name: PENPOT_GITLAB_CLIENT_ID
value: {{ .Values.config.providers.gitlab.clientID | quote }}
{{- else }}
- name: PENPOT_GITLAB_CLIENT_ID
valueFrom:
secretKeyRef:
name: {{ .Values.config.providers.existingSecret }}
key: {{ .Values.config.providers.secretKeys.gitlabClientIDKey }}
{{- end }}
{{- if not .Values.config.providers.secretKeys.gitlabClientSecretKey }}
- name: PENPOT_GITLAB_CLIENT_SECRET
value: {{ .Values.config.providers.gitlab.clientSecret | quote }}
{{- else }}
- name: PENPOT_GITLAB_CLIENT_SECRET
valueFrom:
secretKeyRef:
name: {{ .Values.config.providers.existingSecret }}
key: {{ .Values.config.providers.secretKeys.gitlabClientSecretKey }}
{{- end }}
{{- end }}
{{- if .Values.config.providers.oidc.enabled }}
{{- if .Values.config.providers.oidc.baseURI }}
- name: PENPOT_OIDC_BASE_URI
value: {{ .Values.config.providers.oidc.baseURI | quote }}
{{- end }}
{{- if not .Values.config.providers.secretKeys.oidcClientIDKey }}
- name: PENPOT_OIDC_CLIENT_ID
value: {{ .Values.config.providers.oidc.clientID | quote}}
{{- else }}
- name: PENPOT_OIDC_CLIENT_ID
valueFrom:
secretKeyRef:
name: {{ .Values.config.providers.existingSecret }}
key: {{ .Values.config.providers.secretKeys.oidcClientIDKey }}
{{- end }}
{{- if not .Values.config.providers.secretKeys.oidcClientSecretKey}}
- name: PENPOT_OIDC_CLIENT_SECRET
value: {{ .Values.config.providers.oidc.clientSecret | quote }}
{{- else }}
- name: PENPOT_OIDC_CLIENT_SECRET
valueFrom:
secretKeyRef:
name: {{ .Values.config.providers.existingSecret }}
key: {{ .Values.config.providers.secretKeys.oidcClientSecretKey }}
{{- end }}
{{- if .Values.config.providers.oidc.authURI }}
- name: PENPOT_OIDC_AUTH_URI
value: {{ .Values.config.providers.oidc.authURI | quote }}
{{- end }}
{{- if .Values.config.providers.oidc.tokenURI }}
- name: PENPOT_OIDC_TOKEN_URI
value: {{ .Values.config.providers.oidc.tokenURI | quote }}
{{- end }}
{{- if .Values.config.providers.oidc.userURI }}
- name: PENPOT_OIDC_USER_URI
value: {{ .Values.config.providers.oidc.userURI | quote }}
{{- end }}
{{- if .Values.config.providers.oidc.roles }}
- name: PENPOT_OIDC_ROLES
value: {{ .Values.config.providers.oidc.roles | quote }}
{{- end }}
{{- if .Values.config.providers.oidc.rolesAttribute }}
- name: PENPOT_OIDC_ROLES_ATTR
value: {{ .Values.config.providers.oidc.rolesAttribute | quote }}
{{- end }}
{{- if .Values.config.providers.oidc.scopes }}
- name: PENPOT_OIDC_SCOPES
value: {{ .Values.config.providers.oidc.scopes | quote }}
{{- end }}
{{- if .Values.config.providers.oidc.nameAttribute }}
- name: PENPOT_OIDC_NAME_ATTR
value: {{ .Values.config.providers.oidc.nameAttribute | quote }}
{{- end }}
{{- if .Values.config.providers.oidc.emailAttribute }}
- name: PENPOT_OIDC_EMAIL_ATTR
value: {{ .Values.config.providers.oidc.emailAttribute | quote }}
{{- end }}
{{- end }}
{{- if .Values.config.providers.ldap.enabled }}
{{- if .Values.config.providers.ldap.host }}
- name: PENPOT_LDAP_HOST
value: {{ .Values.config.providers.ldap.host | quote }}
{{- end }}
{{- if .Values.config.providers.ldap.port }}
- name: PENPOT_LDAP_PORT
value: {{ .Values.config.providers.ldap.port | quote }}
{{- end }}
{{- if .Values.config.providers.ldap.ssl }}
- name: PENPOT_LDAP_SSL
value: {{ .Values.config.providers.ldap.ssl | quote }}
{{- end }}
{{- if .Values.config.providers.ldap.startTLS }}
- name: PENPOT_LDAP_STARTTLS
value: {{ .Values.config.providers.ldap.startTLS | quote }}
{{- end }}
{{- if .Values.config.providers.ldap.baseDN }}
- name: PENPOT_LDAP_BASE_DN
value: {{ .Values.config.providers.ldap.baseDN | quote }}
{{- end }}
{{- if .Values.config.providers.ldap.bindDN }}
- name: PENPOT_LDAP_BIND_DN
value: {{ .Values.config.providers.ldap.bindDN | quote }}
{{- end }}
{{- if .Values.config.providers.ldap.bindPassword }}
- name: PENPOT_LDAP_BIND_PASSWORD
value: {{ .Values.config.providers.ldap.bindPassword | quote }}
{{- end }}
{{- if .Values.config.providers.ldap.attributesUsername }}
- name: PENPOT_LDAP_ATTRS_USERNAME
value: {{ .Values.config.providers.ldap.attributesUsername | quote }}
{{- end }}
{{- if .Values.config.providers.ldap.attributesEmail }}
- name: PENPOT_LDAP_ATTRS_EMAIL
value: {{ .Values.config.providers.ldap.attributesEmail | quote }}
{{- end }}
{{- if .Values.config.providers.ldap.attributesFullname }}
- name: PENPOT_LDAP_ATTRS_FULLNAME
value: {{ .Values.config.providers.ldap.attributesFullname | quote }}
{{- end }}
{{- if .Values.config.providers.ldap.attributesPhoto }}
- name: PENPOT_LDAP_ATTRS_PHOTO
value: {{ .Values.config.providers.ldap.attributesPhoto | quote }}
{{- end }}
{{- end }}
ports:
- name: http
containerPort: {{ .Values.exporter.service.port }}
protocol: TCP
resources:
{{- toYaml .Values.exporter.resources | nindent 12 }}
{{- with .Values.exporter.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.exporter.affinity }}
affinity:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.exporter.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}

View File

@@ -0,0 +1,375 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "penpot.fullname" . }}-frontend
namespace: {{ .Release.Namespace }}
labels:
{{- include "penpot.labels" . | nindent 4 }}
spec:
replicas: {{ .Values.frontend.replicaCount }}
selector:
matchLabels:
{{- include "penpot.frontendSelectorLabels" . | nindent 6 }}
template:
metadata:
labels:
{{- include "penpot.frontendSelectorLabels" . | nindent 8 }}
spec:
{{- with .Values.global.imagePullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 8 }}
{{- end }}
serviceAccountName: {{ include "penpot.serviceAccountName" . }}
affinity:
podAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: app.kubernetes.io/instance
operator: In
values:
- {{ .Release.Name }}
topologyKey: "kubernetes.io/hostname"
containers:
- name: {{ .Chart.Name }}-frontend
image: "{{ .Values.frontend.image.repository }}:{{ .Values.frontend.image.tag }}"
imagePullPolicy: {{ .Values.frontend.image.imagePullPolicy }}
env:
- name: PENPOT_PUBLIC_URI
value: {{ .Values.config.publicURI | quote }}
- name: PENPOT_FLAGS
value: "$PENPOT_FLAGS {{ .Values.config.flags }}"
- name: PENPOT_SECRET_KEY
value: {{ .Values.config.apiSecretKey | quote }}
- name: PENPOT_DATABASE_URI
value: "postgresql://{{ .Values.config.postgresql.host }}:{{ .Values.config.postgresql.port }}/{{ .Values.config.postgresql.database }}"
- name: PENPOT_DATABASE_USERNAME
{{- if not .Values.config.postgresql.secretKeys.usernameKey }}
value: {{ .Values.config.postgresql.username | quote }}
{{- else }}
valueFrom:
secretKeyRef:
name: {{ .Values.config.postgresql.existingSecret }}
key: {{ .Values.config.postgresql.secretKeys.usernameKey }}
{{- end }}
- name: PENPOT_DATABASE_PASSWORD
{{- if not .Values.config.postgresql.secretKeys.passwordKey }}
value: {{ .Values.config.postgresql.password | quote }}
{{- else }}
valueFrom:
secretKeyRef:
name: {{ .Values.config.postgresql.existingSecret }}
key: {{ .Values.config.postgresql.secretKeys.passwordKey }}
{{- end }}
- name: PENPOT_REDIS_URI
value: "redis://{{ .Values.config.redis.host }}:{{ .Values.config.redis.port }}/{{ .Values.config.redis.database }}"
- name: PENPOT_ASSETS_STORAGE_BACKEND
value: {{ .Values.config.assets.storageBackend | quote }}
{{- if eq .Values.config.assets.storageBackend "assets-fs" }}
- name: PENPOT_STORAGE_ASSETS_FS_DIRECTORY
value: {{ .Values.config.assets.filesystem.directory | quote }}
{{- else if eq .Values.config.assets.storageBackend "assets-s3" }}
- name: PENPOT_STORAGE_ASSETS_S3_REGION
value: {{ .Values.config.assets.s3.region | quote }}
- name: PENPOT_STORAGE_ASSETS_S3_BUCKET
value: {{ .Values.config.assets.s3.bucket | quote }}
- name: AWS_ACCESS_KEY_ID
{{- if not .Values.config.assets.s3.secretKeys.accessKeyIDKey }}
value: {{ .Values.config.assets.s3.accessKeyID | quote }}
{{- else }}
valueFrom:
secretKeyRef:
name: {{ .Values.config.assets.s3.existingSecret }}
key: {{ .Values.config.assets.s3.secretKeys.accessKeyIDKey }}
{{- end }}
- name: AWS_SECRET_ACCESS_KEY
{{- if not .Values.config.assets.s3.secretKeys.secretAccessKey }}
value: {{ .Values.config.assets.s3.secretAccessKey | quote }}
{{- else }}
valueFrom:
secretKeyRef:
name: {{ .Values.config.assets.s3.existingSecret }}
key: {{ .Values.config.assets.s3.secretKeys.secretAccessKey }}
{{- end }}
- name: PENPOT_STORAGE_ASSETS_S3_ENDPOINT
{{- if not .Values.config.assets.s3.secretKeys.endpointURIKey }}
value: {{ .Values.config.assets.s3.endpointURI | quote }}
{{- else }}
valueFrom:
secretKeyRef:
name: {{ .Values.config.assets.s3.existingSecret }}
key: {{ .Values.config.assets.s3.secretKeys.endpointURIKey }}
{{- end }}
{{- end }}
- name: PENPOT_TELEMETRY_ENABLED
value: {{ .Values.config.telemetryEnabled | quote }}
{{- if .Values.config.smtp.enabled }}
{{- if .Values.config.smtp.defaultFrom }}
- name: PENPOT_SMTP_DEFAULT_FROM
value: {{ .Values.config.smtp.defaultFrom | quote }}
{{- end }}
{{- if .Values.config.smtp.defaultReplyTo }}
- name: PENPOT_SMTP_DEFAULT_REPLY_TO
value: {{ .Values.config.smtp.defaultReplyTo | quote }}
{{- end }}
{{- if .Values.config.smtp.host }}
- name: PENPOT_SMTP_HOST
value: {{ .Values.config.smtp.host | quote }}
{{- end }}
{{- if .Values.config.smtp.port }}
- name: PENPOT_SMTP_PORT
value: {{ .Values.config.smtp.port | quote }}
{{- end }}
{{- if not .Values.config.smtp.secretKeys.usernameKey }}
- name: PENPOT_SMTP_USERNAME
value: {{ .Values.config.smtp.username | quote }}
{{- else }}
- name: PENPOT_SMTP_USERNAME
valueFrom:
secretKeyRef:
name: {{ .Values.config.smtp.existingSecret }}
key: {{ .Values.config.smtp.secretKeys.usernameKey }}
{{- end }}
{{- if not .Values.config.smtp.secretKeys.passwordKey }}
- name: PENPOT_SMTP_PASSWORD
value: {{ .Values.config.smtp.password | quote }}
{{- else }}
- name: PENPOT_SMTP_PASSWORD
valueFrom:
secretKeyRef:
name: {{ .Values.config.smtp.existingSecret }}
key: {{ .Values.config.smtp.secretKeys.passwordKey }}
{{- end }}
{{- if .Values.config.smtp.tls }}
- name: PENPOT_SMTP_TLS
value: {{ .Values.config.smtp.tls | quote }}
{{- end }}
{{- if .Values.config.smtp.ssl }}
- name: PENPOT_SMTP_SSL
value: {{ .Values.config.smtp.ssl | quote }}
{{- end }}
{{- end }}
{{- if .Values.config.registrationDomainWhitelist }}
- name: PENPOT_REGISTRATION_DOMAIN_WHITELIST
value: {{ .Values.config.registrationDomainWhitelist | quote }}
{{- end }}
{{- if .Values.config.providers.google.enabled }}
{{- if not .Values.config.providers.secretKeys.googleClientIDKey }}
- name: PENPOT_GOOGLE_CLIENT_ID
value: {{ .Values.config.providers.google.clientID | quote }}
{{- else }}
- name: PENPOT_GOOGLE_CLIENT_ID
valueFrom:
secretKeyRef:
name: {{ .Values.config.providers.existingSecret }}
key: {{ .Values.config.providers.secretKeys.googleClientIDKey }}
{{- end }}
{{- if not .Values.config.providers.secretKeys.googleClientSecretKey}}
- name: PENPOT_GOOGLE_CLIENT_SECRET
value: {{ .Values.config.providers.google.clientSecret | quote }}
{{- else }}
- name: PENPOT_GOOGLE_CLIENT_SECRET
valueFrom:
secretKeyRef:
name: {{ .Values.config.providers.existingSecret }}
key: {{ .Values.config.providers.secretKeys.googleClientSecretKey }}
{{- end }}
{{- end }}
{{- if .Values.config.providers.github.enabled }}
{{- if not .Values.config.providers.secretKeys.githubClientIDKey }}
- name: PENPOT_GITHUB_CLIENT_ID
value: {{ .Values.config.providers.github.clientID | quote }}
{{- else }}
- name: PENPOT_GITHUB_CLIENT_ID
valueFrom:
secretKeyRef:
name: {{ .Values.config.providers.existingSecret }}
key: {{ .Values.config.providers.secretKeys.githubClientIDKey }}
{{- end }}
{{- if not .Values.config.providers.secretKeys.githubClientSecretKey }}
- name: PENPOT_GITHUB_CLIENT_SECRET
value: {{ .Values.config.providers.github.clientSecret | quote }}
{{- else }}
- name: PENPOT_GITHUB_CLIENT_SECRET
valueFrom:
secretKeyRef:
name: {{ .Values.config.providers.existingSecret }}
key: {{ .Values.config.providers.secretKeys.githubClientSecretKey }}
{{- end }}
{{- end }}
{{- if .Values.config.providers.gitlab.enabled }}
{{- if .Values.config.providers.gitlab.baseURI }}
- name: PENPOT_GITLAB_BASE_URI
value: {{ .Values.config.providers.gitlab.baseURI | quote }}
{{- end }}
{{- if not .Values.config.providers.secretKeys.gitlabClientIDKey }}
- name: PENPOT_GITLAB_CLIENT_ID
value: {{ .Values.config.providers.gitlab.clientID | quote }}
{{- else }}
- name: PENPOT_GITLAB_CLIENT_ID
valueFrom:
secretKeyRef:
name: {{ .Values.config.providers.existingSecret }}
key: {{ .Values.config.providers.secretKeys.gitlabClientIDKey }}
{{- end }}
{{- if not .Values.config.providers.secretKeys.gitlabClientSecretKey }}
- name: PENPOT_GITLAB_CLIENT_SECRET
value: {{ .Values.config.providers.gitlab.clientSecret | quote }}
{{- else }}
- name: PENPOT_GITLAB_CLIENT_SECRET
valueFrom:
secretKeyRef:
name: {{ .Values.config.providers.existingSecret }}
key: {{ .Values.config.providers.secretKeys.gitlabClientSecretKey }}
{{- end }}
{{- end }}
{{- if .Values.config.providers.oidc.enabled }}
{{- if .Values.config.providers.oidc.baseURI }}
- name: PENPOT_OIDC_BASE_URI
value: {{ .Values.config.providers.oidc.baseURI | quote }}
{{- end }}
{{- if not .Values.config.providers.secretKeys.oidcClientIDKey }}
- name: PENPOT_OIDC_CLIENT_ID
value: {{ .Values.config.providers.oidc.clientID | quote}}
{{- else }}
- name: PENPOT_OIDC_CLIENT_ID
valueFrom:
secretKeyRef:
name: {{ .Values.config.providers.existingSecret }}
key: {{ .Values.config.providers.secretKeys.oidcClientIDKey }}
{{- end }}
{{- if not .Values.config.providers.secretKeys.oidcClientSecretKey}}
- name: PENPOT_OIDC_CLIENT_SECRET
value: {{ .Values.config.providers.oidc.clientSecret | quote }}
{{- else }}
- name: PENPOT_OIDC_CLIENT_SECRET
valueFrom:
secretKeyRef:
name: {{ .Values.config.providers.existingSecret }}
key: {{ .Values.config.providers.secretKeys.oidcClientSecretKey }}
{{- end }}
{{- if .Values.config.providers.oidc.authURI }}
- name: PENPOT_OIDC_AUTH_URI
value: {{ .Values.config.providers.oidc.authURI | quote }}
{{- end }}
{{- if .Values.config.providers.oidc.tokenURI }}
- name: PENPOT_OIDC_TOKEN_URI
value: {{ .Values.config.providers.oidc.tokenURI | quote }}
{{- end }}
{{- if .Values.config.providers.oidc.userURI }}
- name: PENPOT_OIDC_USER_URI
value: {{ .Values.config.providers.oidc.userURI | quote }}
{{- end }}
{{- if .Values.config.providers.oidc.roles }}
- name: PENPOT_OIDC_ROLES
value: {{ .Values.config.providers.oidc.roles | quote }}
{{- end }}
{{- if .Values.config.providers.oidc.rolesAttribute }}
- name: PENPOT_OIDC_ROLES_ATTR
value: {{ .Values.config.providers.oidc.rolesAttribute | quote }}
{{- end }}
{{- if .Values.config.providers.oidc.scopes }}
- name: PENPOT_OIDC_SCOPES
value: {{ .Values.config.providers.oidc.scopes | quote }}
{{- end }}
{{- if .Values.config.providers.oidc.nameAttribute }}
- name: PENPOT_OIDC_NAME_ATTR
value: {{ .Values.config.providers.oidc.nameAttribute | quote }}
{{- end }}
{{- if .Values.config.providers.oidc.emailAttribute }}
- name: PENPOT_OIDC_EMAIL_ATTR
value: {{ .Values.config.providers.oidc.emailAttribute | quote }}
{{- end }}
{{- end }}
{{- if .Values.config.providers.ldap.enabled }}
{{- if .Values.config.providers.ldap.host }}
- name: PENPOT_LDAP_HOST
value: {{ .Values.config.providers.ldap.host | quote }}
{{- end }}
{{- if .Values.config.providers.ldap.port }}
- name: PENPOT_LDAP_PORT
value: {{ .Values.config.providers.ldap.port | quote }}
{{- end }}
{{- if .Values.config.providers.ldap.ssl }}
- name: PENPOT_LDAP_SSL
value: {{ .Values.config.providers.ldap.ssl | quote }}
{{- end }}
{{- if .Values.config.providers.ldap.startTLS }}
- name: PENPOT_LDAP_STARTTLS
value: {{ .Values.config.providers.ldap.startTLS | quote }}
{{- end }}
{{- if .Values.config.providers.ldap.baseDN }}
- name: PENPOT_LDAP_BASE_DN
value: {{ .Values.config.providers.ldap.baseDN | quote }}
{{- end }}
{{- if .Values.config.providers.ldap.bindDN }}
- name: PENPOT_LDAP_BIND_DN
value: {{ .Values.config.providers.ldap.bindDN | quote }}
{{- end }}
{{- if .Values.config.providers.ldap.bindPassword }}
- name: PENPOT_LDAP_BIND_PASSWORD
value: {{ .Values.config.providers.ldap.bindPassword | quote }}
{{- end }}
{{- if .Values.config.providers.ldap.attributesUsername }}
- name: PENPOT_LDAP_ATTRS_USERNAME
value: {{ .Values.config.providers.ldap.attributesUsername | quote }}
{{- end }}
{{- if .Values.config.providers.ldap.attributesEmail }}
- name: PENPOT_LDAP_ATTRS_EMAIL
value: {{ .Values.config.providers.ldap.attributesEmail | quote }}
{{- end }}
{{- if .Values.config.providers.ldap.attributesFullname }}
- name: PENPOT_LDAP_ATTRS_FULLNAME
value: {{ .Values.config.providers.ldap.attributesFullname | quote }}
{{- end }}
{{- if .Values.config.providers.ldap.attributesPhoto }}
- name: PENPOT_LDAP_ATTRS_PHOTO
value: {{ .Values.config.providers.ldap.attributesPhoto | quote }}
{{- end }}
{{- end }}
volumeMounts:
- mountPath: /opt/data
name: app-data
readOnly: false
- mountPath: /etc/nginx/nginx.conf
name: "{{ include "penpot.fullname" . }}-frontend-nginx"
readOnly: true
subPath: nginx.conf
ports:
- name: http
containerPort: {{ .Values.frontend.service.port }}
protocol: TCP
resources:
{{- toYaml .Values.frontend.resources | nindent 12 }}
{{- with .Values.frontend.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.frontend.affinity }}
affinity:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.frontend.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}
volumes:
- name: app-data
{{- if .Values.persistence.enabled }}
persistentVolumeClaim:
claimName: {{ .Values.persistence.existingClaim | default ( include "penpot.fullname" . ) }}
{{- else }}
emptyDir: {}
{{- end }}
- configMap:
defaultMode: 420
name: "{{ include "penpot.fullname" . }}-frontend-nginx"
name: "{{ include "penpot.fullname" . }}-frontend-nginx"

View File

@@ -0,0 +1,53 @@
{{- if .Values.ingress.enabled -}}
{{- $gitVersion := .Capabilities.KubeVersion.GitVersion -}}
{{- $fullName := include "penpot.fullname" . -}}
{{- $svcPort := .Values.frontend.service.port -}}
{{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion -}}
apiVersion: networking.k8s.io/v1
{{- else if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}}
apiVersion: networking.k8s.io/v1beta1
{{- else -}}
apiVersion: extensions/v1beta1
{{- end }}
kind: Ingress
metadata:
name: {{ $fullName }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "penpot.labels" . | nindent 4 }}
{{- with .Values.ingress.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
{{- if .Values.ingress.tls }}
tls:
{{- range .Values.ingress.tls }}
- hosts:
{{- range .hosts }}
- {{ . | quote }}
{{- end }}
secretName: {{ .secretName }}
{{- end }}
{{- end }}
rules:
{{- range .Values.ingress.hosts }}
- host: {{ .host | quote }}
http:
paths:
{{ if semverCompare ">=1.19-0" $gitVersion }}
- path: /
pathType: Prefix
backend:
service:
name: {{ $fullName }}
port:
number: {{ $svcPort }}
{{ else }}
- path: /
backend:
serviceName: {{ $fullName }}
servicePort: {{ $svcPort }}
{{- end }}
{{- end }}
{{- end }}

View File

@@ -0,0 +1,24 @@
{{- if and .Values.persistence.enabled (not .Values.persistence.existingClaim) -}}
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: {{ include "penpot.fullname" . }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "penpot.labels" . | nindent 4 }}
{{- if .Values.persistence.annotations }}
annotations:
{{ toYaml .Values.persistence.annotations | indent 4 }}
{{- end }}
spec:
accessModes:
{{- range .Values.persistence.accessModes }}
- {{ . | quote }}
{{- end }}
resources:
requests:
storage: {{ .Values.persistence.size | quote }}
{{- if .Values.persistence.storageClass }}
storageClassName: "{{ .Values.persistence.storageClass }}"
{{- end }}
{{- end -}}

View File

@@ -0,0 +1,13 @@
{{- if .Values.serviceAccount.enabled -}}
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ include "penpot.serviceAccountName" . }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "penpot.labels" . | nindent 4 }}
{{- with .Values.serviceAccount.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
{{- end -}}

View File

@@ -0,0 +1,52 @@
apiVersion: v1
kind: Service
metadata:
name: {{ include "penpot.fullname" . }}-backend
namespace: {{ .Release.Namespace }}
labels:
{{- include "penpot.labels" . | nindent 4 }}
spec:
type: {{ .Values.backend.service.type }}
ports:
- port: {{ .Values.backend.service.port }}
targetPort: http
protocol: TCP
name: http
selector:
{{- include "penpot.backendSelectorLabels" . | nindent 4 }}
---
apiVersion: v1
kind: Service
metadata:
name: {{ include "penpot.fullname" . }}-exporter
namespace: {{ .Release.Namespace }}
labels:
{{- include "penpot.labels" . | nindent 4 }}
spec:
type: {{ .Values.exporter.service.type }}
ports:
- port: {{ .Values.exporter.service.port }}
targetPort: http
protocol: TCP
name: http
selector:
{{- include "penpot.exporterSelectorLabels" . | nindent 4 }}
---
apiVersion: v1
kind: Service
metadata:
name: {{ include "penpot.fullname" . }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "penpot.labels" . | nindent 4 }}
spec:
type: {{ .Values.frontend.service.type }}
ports:
- port: {{ .Values.frontend.service.port }}
targetPort: http
protocol: TCP
name: http
selector:
{{- include "penpot.frontendSelectorLabels" . | nindent 4 }}

468
charts/penpot/values.yaml Normal file
View File

@@ -0,0 +1,468 @@
## Default values for Penpot
## @section Global parameters
## @param global.postgresqlEnabled Whether to deploy the Bitnami PostgreSQL chart as subchart. Check [the official chart](https://artifacthub.io/packages/helm/bitnami/postgresql) for configuration.
## @param global.redisEnabled Whether to deploy the Bitnami Redis chart as subchart. Check [the official chart](https://artifacthub.io/packages/helm/bitnami/redis) for configuration.
## @param global.imagePullSecrets Global Docker registry secret names as an array.
##
global:
## E.g.
## imagePullSecrets:
## - myRegistryKeySecretName
##
imagePullSecrets: []
## @section Common parameters
## @param nameOverride String to partially override common.names.fullname
##
nameOverride: ""
## @param fullnameOverride String to fully override common.names.fullname
##
fullnameOverride: ""
## @param serviceAccount.enabled Specifies whether a ServiceAccount should be created.
## @param serviceAccount.annotations Annotations for service account. Evaluated as a template. Only used if `create` is `true`.
## @param serviceAccount.name The name of the ServiceAccount to use. If not set and enabled is true, a name is generated using the fullname template.
##
serviceAccount:
enabled: true
annotations: {}
name: ""
## @section Backend parameters
## Penpot Backend
##
backend:
## @param backend.image.repository The Docker repository to pull the image from.
## @param backend.image.tag The image tag to use.
## @param backend.image.imagePullPolicy The image pull policy to use.
##
image:
repository: penpotapp/backend
tag: 2.0.1
imagePullPolicy: IfNotPresent
## @param backend.replicaCount The number of replicas to deploy.
##
replicaCount: 1
## @param backend.service.type The service type to create.
## @param backend.service.port The service port to use.
##
service:
type: ClusterIP
port: 6060
## Configure Pods Security Context
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod
## @param backend.podSecurityContext.enabled Enabled Penpot pods' security context
## @param backend.podSecurityContext.fsGroup Set Penpot pod's security context fsGroup
##
podSecurityContext:
enabled: true
fsGroup: 1001
## Configure Container Security Context
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod
## @param backend.containerSecurityContext.enabled Enabled Penpot containers' security context
## @param backend.containerSecurityContext.runAsUser Set Penpot containers' security context runAsUser
## @param backend.containerSecurityContext.allowPrivilegeEscalation Set Penpot containers' security context allowPrivilegeEscalation
## @param backend.containerSecurityContext.capabilities.drop Set Penpot containers' security context capabilities to be dropped
## @param backend.containerSecurityContext.readOnlyRootFilesystem Set Penpot containers' security context readOnlyRootFilesystem
## @param backend.containerSecurityContext.runAsNonRoot Set Penpot container's security context runAsNonRoot
##
containerSecurityContext:
enabled: true
runAsUser: 1001
allowPrivilegeEscalation: false
capabilities:
drop:
- all
readOnlyRootFilesystem: false
runAsNonRoot: true
## @param backend.affinity Affinity for Penpot pods assignment
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
##
affinity: {}
## @param backend.nodeSelector Node labels for Penpot pods assignment
## ref: https://kubernetes.io/docs/user-guide/node-selection/
##
nodeSelector: {}
## @param backend.tolerations Tolerations for Penpot pods assignment
## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
##
tolerations: []
## Penpot backend resource requests and limits
## ref: https://kubernetes.io/docs/user-guide/compute-resources/
## @param backend.resources.limits The resources limits for the Penpot backend containers
## @param backend.resources.requests The requested resources for the Penpot backend containers
##
resources:
limits: {}
requests: {}
## @section Frontend parameters
## Penpot Frontend
##
frontend:
## @param frontend.image.repository The Docker repository to pull the image from.
## @param frontend.image.tag The image tag to use.
## @param frontend.image.imagePullPolicy The image pull policy to use.
##
image:
repository: penpotapp/frontend
tag: 2.0.1
imagePullPolicy: IfNotPresent
## @param frontend.replicaCount The number of replicas to deploy.
##
replicaCount: 1
## @param frontend.service.type The service type to create.
## @param frontend.service.port The service port to use.
##
service:
type: ClusterIP
port: 80
## @param frontend.affinity Affinity for Penpot pods assignment
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
##
affinity: {}
## @param frontend.nodeSelector Node labels for Penpot pods assignment
## ref: https://kubernetes.io/docs/user-guide/node-selection/
##
nodeSelector: {}
## @param frontend.tolerations Tolerations for Penpot pods assignment
## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
##
tolerations: []
## Penpot frontend resource requests and limits
## ref: https://kubernetes.io/docs/user-guide/compute-resources/
## @param frontend.resources.limits The resources limits for the Penpot frontend containers
## @param frontend.resources.requests The requested resources for the Penpot frontend containers
##
resources:
limits: {}
requests: {}
## @section Exporter parameters
## Penpot Exporter
##
exporter:
## @param exporter.image.repository The Docker repository to pull the image from.
## @param exporter.image.tag The image tag to use.
## @param exporter.image.imagePullPolicy The image pull policy to use.
##
image:
repository: penpotapp/exporter
tag: 2.0.1
imagePullPolicy: IfNotPresent
## @param exporter.replicaCount The number of replicas to deploy.
##
replicaCount: 1
## @param exporter.service.type The service type to create.
## @param exporter.service.port The service port to use.
##
service:
type: ClusterIP
port: 6061
## Configure Pods Security Context
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod
## @param exporter.podSecurityContext.enabled Enabled Penpot pods' security context
## @param exporter.podSecurityContext.fsGroup Set Penpot pod's security context fsGroup
##
podSecurityContext:
enabled: true
fsGroup: 1001
## Configure Container Security Context
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod
## @param exporter.containerSecurityContext.enabled Enabled Penpot containers' security context
## @param exporter.containerSecurityContext.runAsUser Set Penpot containers' security context runAsUser
## @param exporter.containerSecurityContext.allowPrivilegeEscalation Set Penpot containers' security context allowPrivilegeEscalation
## @param exporter.containerSecurityContext.capabilities.drop Set Penpot containers' security context capabilities to be dropped
## @param exporter.containerSecurityContext.readOnlyRootFilesystem Set Penpot containers' security context readOnlyRootFilesystem
## @param exporter.containerSecurityContext.runAsNonRoot Set Penpot container's security context runAsNonRoot
##
containerSecurityContext:
enabled: true
runAsUser: 1001
allowPrivilegeEscalation: false
capabilities:
drop:
- all
readOnlyRootFilesystem: false
runAsNonRoot: true
## @param exporter.affinity Affinity for Penpot pods assignment
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
##
affinity: {}
## @param exporter.nodeSelector Node labels for Penpot pods assignment
## ref: https://kubernetes.io/docs/user-guide/node-selection/
##
nodeSelector: {}
## @param exporter.tolerations Tolerations for Penpot pods assignment
## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
##
tolerations: []
## Penpot exporter resource requests and limits
## ref: https://kubernetes.io/docs/user-guide/compute-resources/
## @param exporter.resources.limits The resources limits for the Penpot exporter containers
## @param exporter.resources.requests The requested resources for the Penpot exporter containers
##
resources:
limits: {}
requests: {}
## @section Ingress parameters
## @param frontend.ingress.enabled Enable ingress record generation for Penpot frontend.
## @param frontend.ingress.annotations Mapped annotations for the frontend ingress.
## @param frontend.ingress.hosts Array style hosts for the frontend ingress.
## @param frontend.ingress.tls Array style TLS secrets for the frontend ingress.
##
ingress:
enabled: false
## E.g.
## annotations:
## kubernetes.io/ingress.class: nginx
## kubernetes.io/tls-acme: "true"
##
annotations:
{}
## E.g.
## hosts:
## - host: penpot-example.local
hosts: []
## E.g.
## - secretName: chart-example-tls
## hosts:
## - chart-example.local
tls: []
## @section Persistence parameters
## Penpot persistence
##
persistence:
## @param persistence.enabled Enable persistence using Persistent Volume Claims.
##
enabled: false
## @param persistence.storageClass Persistent Volume storage class.
## If defined, storageClassName: <storageClass>.
## If undefined (the default) or set to null, no storageClassName spec is set, choosing the default provisioner.
##
storageClass: ""
## @param persistence.size Persistent Volume size.
##
size: 8Gi
## @param persistence.existingClaim The name of an existing PVC to use for persistence.
##
existingClaim: ""
## @param persistence.accessModes Persistent Volume access modes.
##
accessModes:
- ReadWriteOnce
## @param persistence.annotations Persistent Volume Claim annotations.
##
annotations: {}
## @section Configuration parameters
## Penpot configuration
##
config:
## @param config.publicURI The public domain to serve Penpot on. Set `disable-secure-session-cookies` in the flags if you plan on serving it on a non HTTPS domain.
## @param config.flags The feature flags to enable. Check [the official docs](https://help.penpot.app/technical-guide/configuration/) for more info.
## @param config.apiSecretKey A random secret key needed for persistent user sessions. Generate with `openssl rand -hex 16` for example.
##
publicURI: "http://localhost:8080"
flags: "enable-registration enable-login disable-demo-users disable-demo-warning"
apiSecretKey:
existingSecretName: ""
existingSecretKey: ""
## @param config.postgresql.host The PostgreSQL host to connect to.
## @param config.postgresql.port The PostgreSQL host port to use.
## @param config.postgresql.database The PostgreSQL database to use.
## @param config.postgresql.username The database username to use.
## @param config.postgresql.password The database username to use.
## @param config.postgresql.existingSecret The name of an existing secret.
## @param config.postgresql.secretKeys.usernameKey The username key to use from an existing secret.
## @param config.postgresql.secretKeys.passwordKey The password key to use from an existing secret.
##
postgresql:
host: "postgresql.penpot.svc.cluster.local"
port: 5432
username: ""
password: ""
database: ""
existingSecret: ""
secretKeys:
usernameKey: ""
passwordKey: ""
## @param config.redis.host The Redis host to connect to.
## @param config.redis.port The Redis host port to use.
## @param config.redis.database The Redis database to connect to.
##
redis:
host: "redis-headless.penpot.svc.cluster.local"
port: 6379
database: "0"
## @param config.assets.storageBackend The storage backend for assets to use. Use `assets-fs` for filesystem, and `assets-s3` for S3.
## @param config.assets.filesystem.directory The storage directory to use if you chose the filesystem storage backend.
## @param config.assets.s3.accessKeyID The S3 access key ID to use if you chose the S3 storage backend.
## @param config.assets.s3.secretAccessKey The S3 secret access key to use if you chose the S3 storage backend.
## @param config.assets.s3.region The S3 region to use if you chose the S3 storage backend.
## @param config.assets.s3.bucket The name of the S3 bucket to use if you chose the S3 storage backend.
## @param config.assets.s3.endpointURI The S3 endpoint URI to use if you chose the S3 storage backend.
## @param config.assets.s3.existingSecret The name of an existing secret.
## @param config.assets.s3.secretKeys.accessKeyIDKey The S3 access key ID to use from an existing secret.
## @param config.assets.s3.secretKeys.secretAccessKey The S3 secret access key to use from an existing secret.
## @param config.assets.s3.secretKeys.endpointURIKey The S3 endpoint URI to use from an existing secret.
##
assets:
storageBackend: "assets-fs"
filesystem:
directory: "/opt/data/assets"
s3:
accessKeyID: ""
secretAccessKey: ""
region: ""
bucket: ""
endpointURI: ""
existingSecret: ""
secretKeys:
accessKeyIDKey: ""
secretAccessKey: ""
endpointURIKey: ""
## @param config.telemetryEnabled Whether to enable sending of anonymous telemetry data.
##
telemetryEnabled: true
## @param config.smtp.enabled Whether to enable SMTP configuration. You also need to add the 'enable-smtp' flag to the PENPOT_FLAGS variable.
## @param config.smtp.defaultFrom The SMTP default email to send from.
## @param config.smtp.defaultReplyTo The SMTP default email to reply to.
## @param config.smtp.host The SMTP host to use.
## @param config.smtp.port The SMTP host port to use.
## @param config.smtp.username The SMTP username to use.
## @param config.smtp.password The SMTP password to use.
## @param config.smtp.tls Whether to use TLS for the SMTP connection.
## @param config.smtp.ssl Whether to use SSL for the SMTP connection.
## @param config.smtp.existingSecret The name of an existing secret.
## @param config.smtp.secretKeys.usernameKey The SMTP username to use from an existing secret.
## @param config.smtp.secretKeys.passwordKey The SMTP password to use from an existing secret.
##
smtp:
enabled: false
defaultFrom: ""
defaultReplyTo: ""
host: ""
port: ""
username: ""
password: ""
tls: true
ssl: false
existingSecret: ""
secretKeys:
usernameKey: ""
passwordKey: ""
## @param config.registrationDomainWhitelist Comma separated list of allowed domains to register. Empty to allow all domains.
##
registrationDomainWhitelist: ""
## Penpot Authentication providers parameters
##
providers:
## @param config.providers.google.enabled Whether to enable Google configuration. To enable Google auth, add `enable-login-with-google` to the flags.
## @param config.providers.google.clientID The Google client ID to use. To enable Google auth, add `enable-login-with-google` to the flags.
## @param config.providers.google.clientSecret The Google client secret to use. To enable Google auth, add `enable-login-with-google` to the flags.
##
google:
enabled: false
clientID: ""
clientSecret: ""
## @param config.providers.github.enabled Whether to enable GitHub configuration. To enable GitHub auth, also add `enable-login-with-github` to the flags.
## @param config.providers.github.clientID The GitHub client ID to use.
## @param config.providers.github.clientSecret The GitHub client secret to use.
##
github:
enabled: false
clientID: ""
clientSecret: ""
## @param config.providers.gitlab.enabled Whether to enable GitLab configuration. To enable GitLab auth, also add `enable-login-with-gitlab` to the flags.
## @param config.providers.gitlab.baseURI The GitLab base URI to use.
## @param config.providers.gitlab.clientID The GitLab client ID to use.
## @param config.providers.gitlab.clientSecret The GitLab client secret to use.
##
gitlab:
enabled: false
baseURI: "https://gitlab.com"
clientID: ""
clientSecret: ""
## @param config.providers.oidc.enabled Whether to enable OIDC configuration. To enable OpenID Connect auth, also add `enable-login-with-oidc` to the flags.
## @param config.providers.oidc.baseURI The OpenID Connect base URI to use.
## @param config.providers.oidc.clientID The OpenID Connect client ID to use.
## @param config.providers.oidc.clientSecret The OpenID Connect client secret to use.
## @param config.providers.oidc.authURI Optional OpenID Connect auth URI to use. Auto discovered if not provided.
## @param config.providers.oidc.tokenURI Optional OpenID Connect token URI to use. Auto discovered if not provided.
## @param config.providers.oidc.userURI Optional OpenID Connect user URI to use. Auto discovered if not provided.
## @param config.providers.oidc.roles Optional OpenID Connect roles to use. If no role is provided, roles checking disabled.
## @param config.providers.oidc.rolesAttribute Optional OpenID Connect roles attribute to use. If not provided, the roles checking will be disabled.
## @param config.providers.oidc.scopes Optional OpenID Connect scopes to use. This settings allow overwrite the required scopes, use with caution because penpot requres at least `name` and `email` attrs found on the user info. Optional, defaults to `openid profile`.
## @param config.providers.oidc.nameAttribute Optional OpenID Connect name attribute to use. If not provided, the `name` prop will be used.
## @param config.providers.oidc.emailAttribute Optional OpenID Connect email attribute to use. If not provided, the `email` prop will be used.
##
oidc:
enabled: false
baseURI: ""
clientID: ""
clientSecret: ""
authURI: ""
tokenURI: ""
userURI: ""
roles: "role1 role2"
rolesAttribute: ""
scopes: "scope1 scope2"
nameAttribute: ""
emailAttribute: ""
## @param config.providers.ldap.enabled Whether to enable LDAP configuration. To enable LDAP, also add `enable-login-with-ldap` to the flags.
## @param config.providers.ldap.host The LDAP host to use.
## @param config.providers.ldap.port The LDAP port to use.
## @param config.providers.ldap.ssl Whether to use SSL for the LDAP connection.
## @param config.providers.ldap.startTLS Whether to utilize StartTLS for the LDAP connection.
## @param config.providers.ldap.baseDN The LDAP base DN to use.
## @param config.providers.ldap.bindDN The LDAP bind DN to use.
## @param config.providers.ldap.bindPassword The LDAP bind password to use.
## @param config.providers.ldap.attributesUsername The LDAP attributes username to use.
## @param config.providers.ldap.attributesEmail The LDAP attributes email to use.
## @param config.providers.ldap.attributesFullname The LDAP attributes fullname to use.
## @param config.providers.ldap.attributesPhoto The LDAP attributes photo format to use.
##
ldap:
enabled: false
host: "ldap"
port: 10389
ssl: false
startTLS: false
baseDN: "ou=people,dc=planetexpress,dc=com"
bindDN: "cn=admin,dc=planetexpress,dc=com"
bindPassword: "GoodNewsEveryone"
attributesUsername: "uid"
attributesEmail: "mail"
attributesFullname: "cn"
attributesPhoto: "jpegPhoto"
## @param config.providers.existingSecret The name of an existing secret to use.
## @param config.providers.secretKeys.googleClientIDKey The Google client ID key to use from an existing secret.
## @param config.providers.secretKeys.googleClientSecretKey The Google client secret key to use from an existing secret.
## @param config.providers.secretKeys.githubClientIDKey The GitHub client ID key to use from an existing secret.
## @param config.providers.secretKeys.githubClientSecretKey The GitHub client secret key to use from an existing secret.
## @param config.providers.secretKeys.gitlabClientIDKey The GitLab client ID key to use from an existing secret.
## @param config.providers.secretKeys.gitlabClientSecretKey The GitLab client secret key to use from an existing secret.
## @param config.providers.secretKeys.oidcClientIDKey The OpenID Connect client ID key to use from an existing secret.
## @param config.providers.secretKeys.oidcClientSecretKey The OpenID Connect client secret key to use from an existing secret.
##
existingSecret: ""
secretKeys:
googleClientIDKey: ""
googleClientSecretKey: ""
githubClientIDKey: ""
githubClientSecretKey: ""
gitlabClientIDKey: ""
gitlabClientSecretKey: ""
oidcClientIDKey: ""
oidcClientSecretKey: ""

View File

@@ -1,6 +1,6 @@
apiVersion: v2
name: postgres-cluster
version: 2.2.0
version: 3.1.0
description: Chart for cloudnative-pg cluster
keywords:
- database

View File

@@ -3,7 +3,7 @@
backup:
retentionPolicy: {{ .Values.backup.retentionPolicy }}
barmanObjectStore:
destinationPath: "s3://{{ .Values.backup.endpointBucket }}/{{ .Values.kubernetesClusterName }}/postgresql/{{ include "cluster.name" . }}"
destinationPath: {{ .Values.backup.destinationPath }}
endpointURL: {{ .Values.backup.endpointURL }}
{{- if .Values.backup.endpointCA }}
endpointCA:

View File

@@ -26,20 +26,20 @@ bootstrap:
import:
type: {{ .Values.replica.importType }}
databases:
{{- if and (len .Values.replica.importDatabases gt 1) (.Values.replica.importType eq "microservice") }}
{{- if and (gt (len .Values.replica.importDatabases) 1) (eq .Values.replica.importType "microservice") }}
{{ fail "Too many databases in import type of microservice!" }}
{{- else}}
{{- with .Values.replica.importDatabases }}
{{- . | toYaml | nindent 8 }}
{{- end }}
{{- end }}
{{- if .Values.replica.importType eq "monolith" }}
{{- if eq .Values.replica.importType "monolith" }}
roles:
{{- with .Values.replica.importRoles }}
{{- . | toYaml | nindent 8 }}
{{- end }}
{{- end }}
{{- if and (.Values.replica.postImportApplicationSQL) (.Values.replica.importType eq "microservice") }}
{{- if and (.Values.replica.postImportApplicationSQL) (eq .Values.replica.importType "microservice") }}
postImportApplicationSQL:
{{- with .Values.replica.postImportApplicationSQL }}
{{- . | toYaml | nindent 8 }}
@@ -58,19 +58,18 @@ externalClusters:
recoveryTarget:
targetTime: {{ . }}
{{- end }}
source: "{{ include "cluster.name" . }}-backup-{{ .Values.recovery.recoveryIndex }}"
source: {{ include "cluster.recoveryServerName" . }}
externalClusters:
- name: "{{ include "cluster.name" . }}-backup-{{ .Values.recovery.recoveryIndex }}"
- name: {{ include "cluster.recoveryServerName" . }}
barmanObjectStore:
serverName: "{{ include "cluster.name" . }}-backup-{{ .Values.recovery.recoveryIndex }}"
destinationPath: "s3://{{ .Values.recovery.endpointBucket }}/{{ .Values.kubernetesClusterName }}/postgresql/{{ .Values.recovery.recoveryName }}"
serverName: {{ include "cluster.recoveryServerName" . }}
destinationPath: {{ .Values.recovery.destinationPath }}
endpointURL: {{ .Values.recovery.endpointURL }}
{{- with .Values.recovery.endpointCA }}
endpointCA:
name: {{ . }}
key: ca-bundle.crt
{{- end }}
serverName: "{{ include "cluster.name" . }}-backup-{{ .Values.recovery.recoveryIndex }}"
s3Credentials:
accessKeyId:
name: {{ include "cluster.recoveryCredentials" . }}

View File

@@ -5,7 +5,7 @@ Expand the name of the chart.
{{- if .Values.nameOverride }}
{{- .Values.nameOverride | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- printf "postgresql-%s-%s" .Values.cluster.image.majorVersion .Release.Name | trunc 63 | trimSuffix "-" -}}
{{- printf "%s-postgresql-%s" .Release.Name ((semver .Values.cluster.image.tag).Major | toString) | trunc 63 | trimSuffix "-" -}}
{{- end }}
{{- end }}
@@ -57,11 +57,34 @@ Generate name for object store credentials
{{- end }}
{{/*
Generate recovery server name
Generate backup server name
*/}}
{{- define "cluster.recoveryName" -}}
{{- if .Values.recovery.recoveryName -}}
{{- .Values.recovery.recoveryName -}}
{{- define "cluster.backupName" -}}
{{- if .Values.backup.backupName -}}
{{- .Values.backup.backupName -}}
{{- else -}}
{{ include "cluster.name" . }}
{{- end }}
{{- end }}
{{/*
Generate recovery server name
*/}}
{{- define "cluster.recoveryServerName" -}}
{{- if .Values.recovery.recoveryServerName -}}
{{- .Values.recovery.recoveryServerName -}}
{{- else -}}
{{- printf "%s-backup-%s" (include "cluster.name" .) (toString .Values.recovery.recoveryIndex) | trunc 63 | trimSuffix "-" -}}
{{- end }}
{{- end }}
{{/*
Generate recovery instance name
*/}}
{{- define "cluster.recoveryInstanceName" -}}
{{- if .Values.recovery.recoveryInstanceName -}}
{{- .Values.recovery.recoveryInstanceName -}}
{{- else -}}
{{ include "cluster.name" . }}
{{- end }}

View File

@@ -15,16 +15,12 @@ type: postgresql
# * `replica` - Create database as a replica from another CNPG cluster
mode: standalone
# Generates bucket name and path for recovery and backup, creates: <endpointBucket>/<clusterName>/postgresql/{{ .Release.Name }}
kubernetesClusterName: ""
cluster:
instances: 3
image:
repository: ghcr.io/cloudnative-pg/postgresql
tag: "16.2"
majorVersion: "16"
tag: "16.3"
pullPolicy: IfNotPresent
# The UID and GID of the postgres user inside the image
@@ -44,7 +40,7 @@ cluster:
cpu: 10m
limits:
memory: 1Gi
cpu: 100m
cpu: 800m
hugepages-2Mi: 256Mi
# See: https://cloudnative-pg.io/documentation/current/cloudnative-pg.v1/#postgresql-cnpg-io-v1-AffinityConfiguration
@@ -85,7 +81,8 @@ cluster:
# BootstrapInitDB is the configuration of the bootstrap process when initdb is used.
# See: https://cloudnative-pg.io/documentation/current/bootstrap/
# See: https://cloudnative-pg.io/documentation/current/cloudnative-pg.v1/#postgresql-cnpg-io-v1-bootstrapinitdb
initdb: {}
initdb:
{}
# database: app
# owner: app
# secret: "" # Name of the secret containing the initial credentials for the owner of the user database. If empty a new secret will be created from scratch
@@ -97,10 +94,9 @@ recovery:
pitrTarget:
time: ""
# Overrides the provider specific default endpoint. Defaults to:
# S3: https://s3.<region>.amazonaws.com"
# S3 https endpoint and the s3:// path
endpointURL: ""
endpointBucket: ""
destinationPath: ""
# Specifies secret that contains a CA bundle to validate a privately signed certificate, should contain the key ca-bundle.crt
endpointCA: ""
@@ -108,11 +104,14 @@ recovery:
# Specifies secret that contains S3 credentials, should contain the keys ACCESS_KEY_ID and ACCESS_SECRET_KEY
endpointCredentials: ""
# Generate external cluster name, uses: postgresql-{{ .Release.Name }}-cluster-backup-index-{{ .Values.recovery.recoveryIndex }}"
# Generate external cluster name, uses: {{ .Release.Name }}postgresql-<major version>-cluster-backup-index-{{ .Values.recovery.recoveryIndex }}
recoveryIndex: 1
# Name of the recovery cluster in the object store, defaults to "cluster.name"
recoveryName: ""
recoveryServerName: ""
# Name of the recovery cluster in the object store, defaults to ".Release.Name"
recoveryInstanceName: ""
wal:
# WAL compression method. One of `` (for no compression), `gzip`, `bzip2` or `snappy`.
@@ -138,7 +137,7 @@ replica:
# If type microservice only one database is allowed, default is app as standard in cnpg clusters
importDatabases:
- app
# If type microservice no roles are imported and ignored
importRoles: []
@@ -158,9 +157,11 @@ replica:
backup:
enabled: false
# Overrides the provider specific default endpoint
# S3 endpoint starting with "https://"
endpointURL: ""
endpointBucket: ""
# S3 path starting with "s3://"
destinationPath: ""
# Specifies secret that contains a CA bundle to validate a privately signed certificate, should contain the key ca-bundle.crt
endpointCA: ""
@@ -171,6 +172,9 @@ backup:
# Generate external cluster name, creates: postgresql-{{ .Release.Name }}-cluster-backup-index-{{ .Values.backups.backupIndex }}"
backupIndex: 1
# Name of the backup cluster in the object store, defaults to "cluster.name"
backupName: ""
wal:
# WAL compression method. One of `` (for no compression), `gzip`, `bzip2` or `snappy`.
compression: snappy

View File

@@ -1,6 +1,6 @@
apiVersion: v2
name: taiga
version: 0.1.3
version: 0.2.3
description: Chart for Taiga
keywords:
- kanban
@@ -14,11 +14,11 @@ maintainers:
icon: https://avatars.githubusercontent.com/u/6905422?s=200&v=4
dependencies:
- name: rabbitmq
version: 13.0.3
version: 14.1.5
repository: https://charts.bitnami.com/bitnami
alias: taiga-async-rabbitmq
alias: async-rabbitmq
- name: rabbitmq
version: 13.0.3
version: 14.1.5
repository: https://charts.bitnami.com/bitnami
alias: taiga-events-rabbitmq
alias: events-rabbitmq
appVersion: 6.7.7

View File

@@ -213,7 +213,7 @@ spec:
value: "False"
{{ end }}
{{ if .Values.trelloImporter }}
{{ if .Values.trelloImporter.enabled }}
- name: ENABLE_TRELLO_IMPORTER
value: "True"
- name: TRELLO_IMPORTER_API_KEY
@@ -232,12 +232,12 @@ spec:
{{ end }}
- name: RABBITMQ_USER
value: "{{ index .Values "taiga-async-rabbitmq" "auth" "username" }}"
value: "{{ index .Values "async-rabbitmq" "auth" "username" }}"
- name: RABBITMQ_PASS
valueFrom:
secretKeyRef:
name: {{ index .Values "taiga-async-rabbitmq" "auth" "existingPasswordSecret" }}
key: {{ index .Values "taiga-async-rabbitmq" "auth" "existingSecretPasswordKey" }}
name: {{ index .Values "async-rabbitmq" "auth" "existingPasswordSecret" }}
key: {{ index .Values "async-rabbitmq" "auth" "existingSecretPasswordKey" }}
{{ if .Values.ingress.enabled }}
- name: TAIGA_SITES_DOMAIN
@@ -437,7 +437,7 @@ spec:
value: "False"
{{ end }}
{{ if .Values.trelloImporter }}
{{ if .Values.trelloImporter.enabled }}
- name: ENABLE_TRELLO_IMPORTER
value: "True"
- name: TRELLO_IMPORTER_API_KEY
@@ -456,12 +456,12 @@ spec:
{{ end }}
- name: RABBITMQ_USER
value: "{{ index .Values "taiga-async-rabbitmq" "auth" "username" }}"
value: "{{ index .Values "async-rabbitmq" "auth" "username" }}"
- name: RABBITMQ_PASS
valueFrom:
secretKeyRef:
name: {{ index .Values "taiga-async-rabbitmq" "auth" "existingPasswordSecret" }}
key: {{ index .Values "taiga-async-rabbitmq" "auth" "existingSecretPasswordKey" }}
name: {{ index .Values "async-rabbitmq" "auth" "existingPasswordSecret" }}
key: {{ index .Values "async-rabbitmq" "auth" "existingSecretPasswordKey" }}
{{ if .Values.ingress.enabled }}
- name: TAIGA_SITES_DOMAIN

View File

@@ -46,51 +46,56 @@ spec:
securityContext:
{{- with .Values.events.securityContext }}
{{ toYaml . | nindent 8 }}
{{- end }}
containers:
- name: {{ template "taiga.fullname" . }}-events
image: "{{ .Values.events.image.repository }}:{{ .Values.events.image.tag }}"
imagePullPolicy: {{ .Values.events.image.pullPolicy }}
resources:
{{ toYaml .Values.events.resources | nindent 12 }}
ports:
- name: taiga-events
containerPort: {{ .Values.events.service.port }}
protocol: TCP
env:
- name: TAIGA_SECRET_KEY
valueFrom:
secretKeyRef:
name: "{{ .Values.secretKey.existingSecretName }}"
key: "{{ .Values.secretKey.existingSecretKey }}"
- name: RABBITMQ_USER
value: "{{ index .Values "taiga-events-rabbitmq" "auth" "username" }}"
- name: RABBITMQ_PASS
valueFrom:
secretKeyRef:
name: {{ index .Values "taiga-events-rabbitmq" "auth" "existingPasswordSecret" }}
key: {{ index .Values "taiga-events-rabbitmq" "auth" "existingSecretPasswordKey" }}
{{- end }}
containers:
- name: {{ template "taiga.fullname" . }}-events
image: "{{ .Values.events.image.repository }}:{{ .Values.events.image.tag }}"
imagePullPolicy: {{ .Values.events.image.pullPolicy }}
resources:
{{ toYaml .Values.events.resources | nindent 12 }}
ports:
- name: taiga-events
containerPort: {{ .Values.events.service.http.port }}
protocol: TCP
- name: taiga-app
containerPort: {{ .Values.events.service.app.port }}
protocol: TCP
env:
- name: TAIGA_SECRET_KEY
valueFrom:
secretKeyRef:
name: "{{ .Values.secretKey.existingSecretName }}"
key: "{{ .Values.secretKey.existingSecretKey }}"
- name: RABBITMQ_USER
value: "{{ index .Values "events-rabbitmq" "auth" "username" }}"
- name: RABBITMQ_PASS
valueFrom:
secretKeyRef:
name: {{ index .Values "events-rabbitmq" "auth" "existingPasswordSecret" }}
key: {{ index .Values "events-rabbitmq" "auth" "existingSecretPasswordKey" }}
- name: APP_PORT
value: "{{ .Values.events.service.app.port }}"
{{- if .Values.events.livenessProbe.enabled }}
livenessProbe:
httpGet:
path: /admin/login/
port: {{ .Values.events.service.port }}
initialDelaySeconds: {{ .Values.events.livenessProbe.initialDelaySeconds }}
periodSeconds: {{ .Values.events.livenessProbe.periodSeconds }}
timeoutSeconds: {{ .Values.events.livenessProbe.timeoutSeconds }}
successThreshold: {{ .Values.events.livenessProbe.successThreshold }}
failureThreshold: {{ .Values.events.livenessProbe.failureThreshold }}
{{- end }}
{{- if .Values.events.livenessProbe.enabled }}
livenessProbe:
httpGet:
path: /healthz
port: {{ .Values.events.service.app.port }}
initialDelaySeconds: {{ .Values.events.livenessProbe.initialDelaySeconds }}
periodSeconds: {{ .Values.events.livenessProbe.periodSeconds }}
timeoutSeconds: {{ .Values.events.livenessProbe.timeoutSeconds }}
successThreshold: {{ .Values.events.livenessProbe.successThreshold }}
failureThreshold: {{ .Values.events.livenessProbe.failureThreshold }}
{{- end }}
{{- if .Values.events.readinessProbe.enabled }}
readinessProbe:
httpGet:
path: /admin/login/
port: {{ .Values.events.service.port }}
initialDelaySeconds: {{ .Values.events.readinessProbe.initialDelaySeconds }}
periodSeconds: {{ .Values.events.readinessProbe.periodSeconds }}
timeoutSeconds: {{ .Values.events.readinessProbe.timeoutSeconds }}
successThreshold: {{ .Values.events.readinessProbe.successThreshold }}
failureThreshold: {{ .Values.events.readinessProbe.failureThreshold }}
{{- end }}
{{- if .Values.events.readinessProbe.enabled }}
readinessProbe:
httpGet:
path: /healthz
port: {{ .Values.events.service.app.port }}
initialDelaySeconds: {{ .Values.events.readinessProbe.initialDelaySeconds }}
periodSeconds: {{ .Values.events.readinessProbe.periodSeconds }}
timeoutSeconds: {{ .Values.events.readinessProbe.timeoutSeconds }}
successThreshold: {{ .Values.events.readinessProbe.successThreshold }}
failureThreshold: {{ .Values.events.readinessProbe.failureThreshold }}
{{- end }}

View File

@@ -72,6 +72,8 @@ spec:
value: "false"
- name: ENABLE_GITLAB_AUTH
value: "false"
- name: ENABLE_OIDC
value: "{{ .Values.oidc.enabled }}"
- name: ENABLE_SLACK
value: "{{ .Values.enableSlack }}"
- name: ENABLE_GITHUB_IMPORTER

View File

@@ -40,7 +40,7 @@ spec:
name: "{{ template "taiga.fullname" . }}-back"
port:
name: taiga-back
pathType: ImplementationSpecific
pathType: ImplementationSpecific
- path: /admin
backend:
service:
@@ -48,6 +48,15 @@ spec:
port:
name: taiga-back
pathType: ImplementationSpecific
{{ if .Values.oidc.enabled }}
- path: /oidc
backend:
service:
name: "{{ template "taiga.fullname" . }}-back"
port:
name: taiga-back
pathType: ImplementationSpecific
{{- end }}
- path: /events
backend:
service:

View File

@@ -55,10 +55,14 @@ metadata:
spec:
type: {{ .Values.events.service.type }}
ports:
- port: {{ .Values.events.service.port }}
- port: {{ .Values.events.service.http.port }}
targetPort: taiga-events
protocol: TCP
name: taiga-events
- port: {{ .Values.events.service.app.port }}
targetPort: taiga-app
protocol: TCP
name: taiga-app
selector:
{{- include "taiga.events.matchLabels" . | nindent 4 }}
{{- with .Values.events.service.extraSelectorLabels }}

View File

@@ -82,15 +82,15 @@ postgresql:
oidc:
enabled: false
existingSecretName: ""
scopesKey: "" # "openid profile email"
signatureAlgorithmKey: "" # "RS256"
clientIdKey: "" # <generate from auth provider>
clientSecretKey: "" # <generate from auth provider>
baseUrlKey: "" # "https://id.fedoraproject.org/openidc"
jwksEndpointKey: "" # "https://id.fedoraproject.org/openidc/Jwks"
authorizationEndpointKey: "" # "https://id.fedoraproject.org/openidc/Authorization"
tokenEndpointKey: "" # "https://id.fedoraproject.org/openidc/Token"
userEndpointKey: "" # "https://id.fedoraproject.org/openidc/UserInfo"
scopesKey: "" # "openid profile email"
signatureAlgorithmKey: "" # "RS256"
clientIdKey: "" # <generate from auth provider>
clientSecretKey: "" # <generate from auth provider>
baseUrlKey: "" # "https://id.fedoraproject.org/openidc"
jwksEndpointKey: "" # "https://id.fedoraproject.org/openidc/Jwks"
authorizationEndpointKey: "" # "https://id.fedoraproject.org/openidc/Authorization"
tokenEndpointKey: "" # "https://id.fedoraproject.org/openidc/Token"
userEndpointKey: "" # "https://id.fedoraproject.org/openidc/UserInfo"
## SMTP mail delivery configuration
## ref: https://taigaio.github.io/taiga-doc/dist/setup-production.html
@@ -347,7 +347,7 @@ async:
## Async Rabbitmq
## https://artifacthub.io/packages/helm/bitnami/rabbitmq?modal=values-schema
##
taiga-async-rabbitmq:
async-rabbitmq:
auth:
## @param auth.username RabbitMQ application username
## ref: https://github.com/bitnami/containers/tree/main/bitnami/rabbitmq#environment-variables
@@ -487,13 +487,18 @@ events:
# -- Allow adding additional match labels
extraSelectorLabels: {}
# -- HTTP port number
port: 8888
http:
# -- HTTP port number
port: 8888
app:
# -- HTTP port number
port: 3023
## Events Rabbitmq
## https://artifacthub.io/packages/helm/bitnami/rabbitmq?modal=values-schema
##
taiga-events-rabbitmq:
events-rabbitmq:
auth:
## @param auth.username RabbitMQ application username
## ref: https://github.com/bitnami/containers/tree/main/bitnami/rabbitmq#environment-variables

View File

@@ -1,6 +1,6 @@
apiVersion: v2
name: tdarr
version: 0.0.5
version: 0.0.6
description: Chart for Tdarr V2
keywords:
- video
@@ -15,4 +15,4 @@ dependencies:
- name: tdarr-exporter
version: 1.1.1
repository: https://homeylab.github.io/helm-charts/
appVersion: "2.17.01"
appVersion: "2.18.02"

View File

@@ -21,7 +21,7 @@ server:
strategy: Recreate
image:
repository: ghcr.io/haveagitgat/tdarr
tag: "2.17.01"
tag: "2.18.02"
pullPolicy: IfNotPresent
env: []
envFrom: []
@@ -58,7 +58,7 @@ node:
strategy: Recreate
image:
repository: ghcr.io/haveagitgat/tdarr_node
tag: "2.17.01"
tag: "2.18.02"
pullPolicy: IfNotPresent
env: []
envFrom: []

View File

@@ -1,6 +1,6 @@
apiVersion: v2
name: tubearchivist-to-jellyfin
version: 0.0.4
version: 0.1.0
description: Import library from tubearchivist to jellyfin
keywords:
- tubearchivist
@@ -11,4 +11,4 @@ sources:
maintainers:
- name: alexlebens
icon: https://avatars.githubusercontent.com/u/102734415?s=48&v=4
appVersion: "v0.1.2"
appVersion: "v0.2.0"

View File

@@ -2,7 +2,7 @@ job:
schedule: "0 * * * *"
image:
repository: bbilly1/tubearchivist-jf
tag: v0.1.2
tag: v0.2.0
pullPolicy: IfNotPresent
persistence:
youtube:

View File

@@ -1,6 +1,6 @@
apiVersion: v2
name: tubearchivist
version: 0.2.0
version: 0.2.9
description: Chart for Tube Archivist
keywords:
- download
@@ -14,9 +14,9 @@ maintainers:
icon: https://avatars.githubusercontent.com/u/102734415?s=48&v=4
dependencies:
- name: redis
version: 19.1.0
version: 19.3.4
repository: https://charts.bitnami.com/bitnami
- name: elasticsearch
version: 20.0.4
version: 21.0.6
repository: https://charts.bitnami.com/bitnami
appVersion: v0.4.7

View File

@@ -20,18 +20,18 @@ service:
port: 8000
ingress:
enabled: false
className:
annotations:
host:
className: ""
annotations: ""
host: ""
persistence:
cache:
enabled: false
storageClassName: default
storageClassName: ""
storageSize: 5Gi
accessMode: ReadWriteOnce
volumeMode: Filesystem
youtube:
claimName:
claimName: ""
redis:
image:
repository: redis/redis-stack-server
@@ -48,17 +48,17 @@ redis:
loadmodule /opt/redis-stack/lib/rejson.so
elasticsearch:
global:
storageClass: default
storageClass: ""
extraEnvVars:
- name: "discovery.type"
value: "single-node"
- name: xpack.security.enabled
value: "true"
extraEnvVarsSecret:
extraEnvVarsSecret: []
extraConfig:
path:
repo: /usr/share/elasticsearch/data/snapshot
extraVolumes:
extraVolumes: []
extraVolumeMounts:
- name: snapshot
mountPath: /usr/share/elasticsearch/data/snapshot