chore: Update manifests after change

This commit is contained in:
2026-03-24 22:03:37 +00:00
parent c819ea152f
commit 23158129ff
63 changed files with 2 additions and 3019 deletions

View File

@@ -0,0 +1 @@

View File

@@ -1,66 +0,0 @@
apiVersion: postgresql.cnpg.io/v1
kind: Cluster
metadata:
name: harbor-postgresql-18-cluster
namespace: harbor
labels:
app.kubernetes.io/name: harbor-postgresql-18-cluster
helm.sh/chart: postgres-18-cluster-7.10.0
app.kubernetes.io/instance: harbor
app.kubernetes.io/part-of: harbor
app.kubernetes.io/version: "7.10.0"
app.kubernetes.io/managed-by: Helm
spec:
instances: 3
imageName: "ghcr.io/cloudnative-pg/postgresql:18.3-standard-trixie"
imagePullPolicy: IfNotPresent
postgresUID: 26
postgresGID: 26
storage:
size: 10Gi
storageClass: local-path
walStorage:
size: 2Gi
storageClass: local-path
resources:
limits:
hugepages-2Mi: 256Mi
requests:
cpu: 100m
memory: 256Mi
affinity:
enablePodAntiAffinity: true
topologyKey: kubernetes.io/hostname
primaryUpdateMethod: switchover
primaryUpdateStrategy: unsupervised
logLevel: info
enableSuperuserAccess: false
enablePDB: true
postgresql:
parameters:
hot_standby_feedback: "on"
max_slot_wal_keep_size: 2000MB
shared_buffers: 128MB
monitoring:
enablePodMonitor: true
disableDefaultQueries: false
plugins:
- name: barman-cloud.cloudnative-pg.io
enabled: true
isWALArchiver: true
parameters:
barmanObjectName: "harbor-postgresql-18-backup-garage-local"
serverName: "harbor-postgresql-18-backup-2"
bootstrap:
recovery:
database: app
source: harbor-postgresql-18-backup-2
externalClusters:
- name: harbor-postgresql-18-backup-2
plugin:
name: barman-cloud.cloudnative-pg.io
enabled: true
isWALArchiver: false
parameters:
barmanObjectName: "harbor-postgresql-18-recovery"
serverName: harbor-postgresql-18-backup-2

View File

@@ -1,62 +0,0 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: harbor-core
namespace: "harbor"
labels:
heritage: Helm
release: harbor
chart: harbor
app: "harbor"
app.kubernetes.io/instance: harbor
app.kubernetes.io/name: harbor
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: harbor
app.kubernetes.io/version: "2.14.3"
data:
app.conf: |
appname = Harbor
runmode = prod
enablegzip = true
[prod]
httpport = 8080
PORT: "8080"
DATABASE_TYPE: "postgresql"
POSTGRESQL_HOST: "harbor-postgresql-18-cluster-rw"
POSTGRESQL_PORT: "5432"
POSTGRESQL_USERNAME: "app"
POSTGRESQL_DATABASE: "app"
POSTGRESQL_SSLMODE: "disable"
POSTGRESQL_MAX_IDLE_CONNS: "100"
POSTGRESQL_MAX_OPEN_CONNS: "900"
EXT_ENDPOINT: "https://harbor.alexlebens.net"
CORE_URL: "http://harbor-core:80"
JOBSERVICE_URL: "http://harbor-jobservice"
REGISTRY_URL: "http://harbor-registry:5000"
TOKEN_SERVICE_URL: "http://harbor-core:80/service/token"
CORE_LOCAL_URL: "http://127.0.0.1:8080"
WITH_TRIVY: "true"
TRIVY_ADAPTER_URL: "http://harbor-trivy:8080"
REGISTRY_STORAGE_PROVIDER_NAME: "filesystem"
LOG_LEVEL: "info"
CONFIG_PATH: "/etc/core/app.conf"
CHART_CACHE_DRIVER: "redis"
_REDIS_URL_CORE: "redis://harbor-valkey.harbor:6379/0?idle_timeout_seconds=30"
_REDIS_URL_REG: "redis://harbor-valkey.harbor:6379/2?idle_timeout_seconds=30"
PORTAL_URL: "http://harbor-portal"
REGISTRY_CONTROLLER_URL: "http://harbor-registry:8080"
REGISTRY_CREDENTIAL_USERNAME: "harbor_registry_user"
HTTP_PROXY: ""
HTTPS_PROXY: ""
NO_PROXY: "harbor-core,harbor-jobservice,harbor-database,harbor-registry,harbor-portal,harbor-trivy,harbor-exporter,127.0.0.1,localhost,.local,.internal"
PERMITTED_REGISTRY_TYPES_FOR_PROXY_CACHE: "docker-hub,harbor,azure-acr,ali-acr,aws-ecr,google-gcr,docker-registry,github-ghcr,jfrog-artifactory"
REPLICATION_ADAPTER_WHITELIST: "ali-acr,aws-ecr,azure-acr,docker-hub,docker-registry,github-ghcr,google-gcr,harbor,huawei-SWR,jfrog-artifactory,tencent-tcr,volcengine-cr"
METRIC_ENABLE: "true"
METRIC_PATH: "/metrics"
METRIC_PORT: "8001"
METRIC_NAMESPACE: harbor
METRIC_SUBSYSTEM: core
CACHE_ENABLED: "true"
CACHE_EXPIRE_HOURS: "24"
QUOTA_UPDATE_PROVIDER: "db"

View File

@@ -1,40 +0,0 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: "harbor-exporter-env"
namespace: "harbor"
labels:
heritage: Helm
release: harbor
chart: harbor
app: "harbor"
app.kubernetes.io/instance: harbor
app.kubernetes.io/name: harbor
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: harbor
app.kubernetes.io/version: "2.14.3"
data:
HTTP_PROXY: ""
HTTPS_PROXY: ""
NO_PROXY: "harbor-core,harbor-jobservice,harbor-database,harbor-registry,harbor-portal,harbor-trivy,harbor-exporter,127.0.0.1,localhost,.local,.internal"
LOG_LEVEL: "info"
HARBOR_EXPORTER_PORT: "8001"
HARBOR_EXPORTER_METRICS_PATH: "/metrics"
HARBOR_EXPORTER_METRICS_ENABLED: "true"
HARBOR_EXPORTER_CACHE_TIME: "23"
HARBOR_EXPORTER_CACHE_CLEAN_INTERVAL: "14400"
HARBOR_METRIC_NAMESPACE: harbor
HARBOR_METRIC_SUBSYSTEM: exporter
HARBOR_REDIS_URL: "redis://harbor-valkey.harbor:6379/1"
HARBOR_REDIS_NAMESPACE: harbor_job_service_namespace
HARBOR_REDIS_TIMEOUT: "3600"
HARBOR_SERVICE_SCHEME: "http"
HARBOR_SERVICE_HOST: "harbor-core"
HARBOR_SERVICE_PORT: "80"
HARBOR_DATABASE_HOST: "harbor-postgresql-18-cluster-rw"
HARBOR_DATABASE_PORT: "5432"
HARBOR_DATABASE_USERNAME: "app"
HARBOR_DATABASE_DBNAME: "app"
HARBOR_DATABASE_SSLMODE: "disable"
HARBOR_DATABASE_MAX_IDLE_CONNS: "100"
HARBOR_DATABASE_MAX_OPEN_CONNS: "900"

View File

@@ -1,32 +0,0 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: "harbor-jobservice-env"
namespace: "harbor"
labels:
heritage: Helm
release: harbor
chart: harbor
app: "harbor"
app.kubernetes.io/instance: harbor
app.kubernetes.io/name: harbor
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: harbor
app.kubernetes.io/version: "2.14.3"
data:
CORE_URL: "http://harbor-core:80"
TOKEN_SERVICE_URL: "http://harbor-core:80/service/token"
REGISTRY_URL: "http://harbor-registry:5000"
REGISTRY_CONTROLLER_URL: "http://harbor-registry:8080"
REGISTRY_CREDENTIAL_USERNAME: "harbor_registry_user"
JOBSERVICE_WEBHOOK_JOB_MAX_RETRY: "3"
JOBSERVICE_WEBHOOK_JOB_HTTP_CLIENT_TIMEOUT: "3"
LOG_LEVEL: "info"
HTTP_PROXY: ""
HTTPS_PROXY: ""
NO_PROXY: "harbor-core,harbor-jobservice,harbor-database,harbor-registry,harbor-portal,harbor-trivy,harbor-exporter,127.0.0.1,localhost,.local,.internal"
METRIC_NAMESPACE: harbor
METRIC_SUBSYSTEM: jobservice
_REDIS_URL_CORE: "redis://harbor-valkey.harbor:6379/0?idle_timeout_seconds=30"
CACHE_ENABLED: "true"
CACHE_EXPIRE_HOURS: "24"

View File

@@ -1,43 +0,0 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: "harbor-jobservice"
namespace: "harbor"
labels:
heritage: Helm
release: harbor
chart: harbor
app: "harbor"
app.kubernetes.io/instance: harbor
app.kubernetes.io/name: harbor
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: harbor
app.kubernetes.io/version: "2.14.3"
data:
config.yml: |
#Server listening port
protocol: "http"
port: 8080
worker_pool:
workers: 10
backend: "redis"
redis_pool:
redis_url: "redis://harbor-valkey.harbor:6379/1"
namespace: "harbor_job_service_namespace"
idle_timeout_second: 3600
job_loggers:
- name: "STD_OUTPUT"
level: INFO
metric:
enabled: true
path: /metrics
port: 8001
#Loggers for the job service
loggers:
- name: "STD_OUTPUT"
level: INFO
reaper:
# the max time to wait for a task to finish, if unfinished after max_update_hours, the task will be mark as error, but the task will continue to run, default value is 24
max_update_hours: 24
# the max time for execution in running state without new task created
max_dangling_hours: 168

View File

@@ -1,49 +0,0 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: "harbor-portal"
namespace: "harbor"
labels:
heritage: Helm
release: harbor
chart: harbor
app: "harbor"
app.kubernetes.io/instance: harbor
app.kubernetes.io/name: harbor
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: harbor
app.kubernetes.io/version: "2.14.3"
data:
nginx.conf: |
worker_processes auto;
pid /tmp/nginx.pid;
events {
worker_connections 1024;
}
http {
client_body_temp_path /tmp/client_body_temp;
proxy_temp_path /tmp/proxy_temp;
fastcgi_temp_path /tmp/fastcgi_temp;
uwsgi_temp_path /tmp/uwsgi_temp;
scgi_temp_path /tmp/scgi_temp;
server {
listen 8080;
server_name localhost;
root /usr/share/nginx/html;
index index.html index.htm;
include /etc/nginx/mime.types;
gzip on;
gzip_min_length 1000;
gzip_proxied expired no-cache no-store private auth;
gzip_types text/plain text/css application/json application/javascript application/x-javascript text/xml application/xml application/xml+rss text/javascript;
location /devcenter-api-2.0 {
try_files $uri $uri/ /swagger-ui-index.html;
}
location / {
try_files $uri $uri/ /index.html;
}
location = /index.html {
add_header Cache-Control "no-store, no-cache, must-revalidate";
}
}
}

View File

@@ -1,73 +0,0 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: "harbor-registry"
namespace: "harbor"
labels:
heritage: Helm
release: harbor
chart: harbor
app: "harbor"
app.kubernetes.io/instance: harbor
app.kubernetes.io/name: harbor
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: harbor
app.kubernetes.io/version: "2.14.3"
data:
config.yml: |
version: 0.1
log:
level: info
fields:
service: registry
storage:
filesystem:
rootdirectory: /storage
cache:
layerinfo: redis
maintenance:
uploadpurging:
enabled: true
age: 72h
interval: 24h
dryrun: false
delete:
enabled: true
redirect:
disable: false
redis:
addr: harbor-valkey.harbor:6379
db: 2
readtimeout: 10s
writetimeout: 10s
dialtimeout: 10s
enableTLS: false
pool:
maxidle: 100
maxactive: 500
idletimeout: 60s
http:
addr: :5000
relativeurls: true
# set via environment variable
# secret: placeholder
debug:
addr: :8001
prometheus:
enabled: true
path: /metrics
auth:
htpasswd:
realm: harbor-registry-basic-realm
path: /etc/registry/passwd
validation:
disabled: true
compatibility:
schema1:
enabled: true
ctl-config.yml: |
---
protocol: "http"
port: 8080
log_level: info
registry_config: "/etc/registry/config.yml"

View File

@@ -1,16 +0,0 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: "harbor-registryctl"
namespace: "harbor"
labels:
heritage: Helm
release: harbor
chart: harbor
app: "harbor"
app.kubernetes.io/instance: harbor
app.kubernetes.io/name: harbor
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: harbor
app.kubernetes.io/version: "2.14.3"
data:

View File

@@ -1,87 +0,0 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: harbor-valkey-init-scripts
labels:
helm.sh/chart: valkey-0.9.3
app.kubernetes.io/name: valkey
app.kubernetes.io/instance: harbor
app.kubernetes.io/version: "9.0.3"
app.kubernetes.io/managed-by: Helm
data:
init.sh: |-
#!/bin/sh
set -eu
# Default config paths
VALKEY_CONFIG=${VALKEY_CONFIG_PATH:-/data/conf/valkey.conf}
LOGFILE="/data/init.log"
DATA_DIR="/data/conf"
# Logging function (outputs to stderr and file)
log() {
echo "$(date) $1" | tee -a "$LOGFILE" >&2
}
# Clean old log if requested
if [ "${KEEP_OLD_LOGS:-false}" != "true" ]; then
rm -f "$LOGFILE"
fi
if [ -f "$LOGFILE" ]; then
log "Detected restart of this instance ($HOSTNAME)"
fi
log "Creating configuration in $DATA_DIR..."
mkdir -p "$DATA_DIR"
rm -f "$VALKEY_CONFIG"
# Base valkey.conf
log "Generating base valkey.conf"
{
echo "port 6379"
echo "protected-mode no"
echo "bind * -::*"
echo "dir /data"
} >>"$VALKEY_CONFIG"
# Replica mode configuration
log "Configuring replication mode"
# Use POD_INDEX from Kubernetes metadata
POD_INDEX=${POD_INDEX:-0}
IS_MASTER=false
# Check if this is pod-0 (master)
if [ "$POD_INDEX" = "0" ]; then
IS_MASTER=true
log "This pod (index $POD_INDEX) is configured as MASTER"
else
log "This pod (index $POD_INDEX) is configured as REPLICA"
fi
# Configure replica settings
if [ "$IS_MASTER" = "false" ]; then
MASTER_HOST="harbor-valkey-0.harbor-valkey-headless.harbor.svc.cluster.local"
MASTER_PORT="6379"
log "Configuring replica to follow master at $MASTER_HOST:$MASTER_PORT"
{
echo ""
echo "# Replica Configuration"
echo "replicaof $MASTER_HOST $MASTER_PORT"
echo "replica-announce-ip harbor-valkey-$POD_INDEX.harbor-valkey-headless.harbor.svc.cluster.local"
} >>"$VALKEY_CONFIG"
fi
# Append extra configs if present
if [ -f /usr/local/etc/valkey/valkey.conf ]; then
log "Appending /usr/local/etc/valkey/valkey.conf"
cat /usr/local/etc/valkey/valkey.conf >>"$VALKEY_CONFIG"
fi
if [ -d /extravalkeyconfigs ]; then
log "Appending files in /extravalkeyconfigs/"
cat /extravalkeyconfigs/* >>"$VALKEY_CONFIG"
fi

View File

@@ -1,152 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: harbor-core
namespace: "harbor"
labels:
heritage: Helm
release: harbor
chart: harbor
app: "harbor"
app.kubernetes.io/instance: harbor
app.kubernetes.io/name: harbor
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: harbor
app.kubernetes.io/version: "2.14.3"
component: core
app.kubernetes.io/component: core
spec:
replicas: 2
revisionHistoryLimit: 10
selector:
matchLabels:
release: harbor
app: "harbor"
component: core
template:
metadata:
labels:
heritage: Helm
release: harbor
chart: harbor
app: "harbor"
app.kubernetes.io/instance: harbor
app.kubernetes.io/name: harbor
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: harbor
app.kubernetes.io/version: "2.14.3"
component: core
app.kubernetes.io/component: core
annotations:
checksum/configmap: 641bbb72f9900d6197857c2f9fb6f0bdc95af2a2e3883dfec940c519b299da5d
checksum/secret: 59669814fb7baa809e9428f8ded55a9bf9281f6bfedaa638b53b49cff7b66e22
checksum/secret-jobservice: f3a0135630d8fa98235c6c6341ee8e42262bad005727f86ce3f0a0679271f1ed
spec:
securityContext:
runAsUser: 10000
fsGroup: 10000
automountServiceAccountToken: false
terminationGracePeriodSeconds: 120
containers:
- name: core
image: goharbor/harbor-core:v2.15.0
imagePullPolicy: IfNotPresent
startupProbe:
httpGet:
path: /api/v2.0/ping
scheme: HTTP
port: 8080
failureThreshold: 360
initialDelaySeconds: 10
periodSeconds: 10
livenessProbe:
httpGet:
path: /api/v2.0/ping
scheme: HTTP
port: 8080
failureThreshold: 2
periodSeconds: 10
readinessProbe:
httpGet:
path: /api/v2.0/ping
scheme: HTTP
port: 8080
failureThreshold: 2
periodSeconds: 10
envFrom:
- configMapRef:
name: "harbor-core"
- secretRef:
name: "harbor-core"
env:
- name: CORE_SECRET
valueFrom:
secretKeyRef:
name: harbor-secret
key: secret
- name: JOBSERVICE_SECRET
valueFrom:
secretKeyRef:
name: harbor-secret
key: JOBSERVICE_SECRET
- name: HARBOR_ADMIN_PASSWORD
valueFrom:
secretKeyRef:
name: harbor-secret
key: HARBOR_ADMIN_PASSWORD
- name: POSTGRESQL_PASSWORD
valueFrom:
secretKeyRef:
name: harbor-postgresql-18-cluster-app
key: password
- name: REGISTRY_CREDENTIAL_PASSWORD
valueFrom:
secretKeyRef:
name: harbor-secret
key: REGISTRY_PASSWD
- name: CSRF_KEY
valueFrom:
secretKeyRef:
name: harbor-secret
key: CSRF_KEY
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
privileged: false
runAsNonRoot: true
seccompProfile:
type: RuntimeDefault
ports:
- containerPort: 8080
volumeMounts:
- name: config
mountPath: /etc/core/app.conf
subPath: app.conf
- name: secret-key
mountPath: /etc/core/key
subPath: key
- name: token-service-private-key
mountPath: /etc/core/private_key.pem
subPath: tls.key
- name: psc
mountPath: /etc/core/token
volumes:
- name: config
configMap:
name: harbor-core
items:
- key: app.conf
path: app.conf
- name: secret-key
secret:
secretName: harbor-secret
items:
- key: secretKey
path: key
- name: token-service-private-key
secret:
secretName: harbor-secret
- name: psc
emptyDir: {}

View File

@@ -1,96 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: harbor-exporter
namespace: "harbor"
labels:
heritage: Helm
release: harbor
chart: harbor
app: "harbor"
app.kubernetes.io/instance: harbor
app.kubernetes.io/name: harbor
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: harbor
app.kubernetes.io/version: "2.14.3"
component: exporter
app.kubernetes.io/component: exporter
spec:
replicas: 2
revisionHistoryLimit: 10
selector:
matchLabels:
release: harbor
app: "harbor"
component: exporter
template:
metadata:
labels:
heritage: Helm
release: harbor
chart: harbor
app: "harbor"
app.kubernetes.io/instance: harbor
app.kubernetes.io/name: harbor
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: harbor
app.kubernetes.io/version: "2.14.3"
component: exporter
app.kubernetes.io/component: exporter
annotations:
checksum/configmap: 5293f455659091cb2f6ed1113095a6dbb04f8364748670cb5d4630ca689d73d8
checksum/secret: bb03df1fde79526e9aaa86ec987c17ba77e4b5cbeb4ff140100971d53b061347
spec:
securityContext:
runAsUser: 10000
fsGroup: 10000
automountServiceAccountToken: false
containers:
- name: exporter
image: goharbor/harbor-exporter:v2.15.0
imagePullPolicy: IfNotPresent
livenessProbe:
httpGet:
path: /
port: 8001
initialDelaySeconds: 300
periodSeconds: 10
readinessProbe:
httpGet:
path: /
port: 8001
initialDelaySeconds: 30
periodSeconds: 10
args: ["-log-level", "info"]
envFrom:
- configMapRef:
name: "harbor-exporter-env"
- secretRef:
name: "harbor-exporter"
env:
- name: HARBOR_DATABASE_PASSWORD
valueFrom:
secretKeyRef:
name: harbor-postgresql-18-cluster-app
key: password
- name: HARBOR_ADMIN_PASSWORD
valueFrom:
secretKeyRef:
name: harbor-secret
key: HARBOR_ADMIN_PASSWORD
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
privileged: false
runAsNonRoot: true
seccompProfile:
type: RuntimeDefault
ports:
- containerPort: 8001
volumeMounts:
volumes:
- name: config
secret:
secretName: "harbor-exporter"

View File

@@ -1,116 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: "harbor-jobservice"
namespace: "harbor"
labels:
heritage: Helm
release: harbor
chart: harbor
app: "harbor"
app.kubernetes.io/instance: harbor
app.kubernetes.io/name: harbor
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: harbor
app.kubernetes.io/version: "2.14.3"
component: jobservice
app.kubernetes.io/component: jobservice
spec:
replicas: 2
revisionHistoryLimit: 10
strategy:
type: Recreate
rollingUpdate: null
selector:
matchLabels:
release: harbor
app: "harbor"
component: jobservice
template:
metadata:
labels:
heritage: Helm
release: harbor
chart: harbor
app: "harbor"
app.kubernetes.io/instance: harbor
app.kubernetes.io/name: harbor
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: harbor
app.kubernetes.io/version: "2.14.3"
component: jobservice
app.kubernetes.io/component: jobservice
annotations:
checksum/configmap: fd35a180d4111bc06151e7a8a1b824387e09f2aae58e69ecea24f8540abaebbd
checksum/configmap-env: e01b3437c4423c0fbcfcb609680c3186f3a09d00297883f6b19f117a44d2a88c
checksum/secret: f3a0135630d8fa98235c6c6341ee8e42262bad005727f86ce3f0a0679271f1ed
checksum/secret-core: 59669814fb7baa809e9428f8ded55a9bf9281f6bfedaa638b53b49cff7b66e22
spec:
securityContext:
runAsUser: 10000
fsGroup: 10000
automountServiceAccountToken: false
terminationGracePeriodSeconds: 120
containers:
- name: jobservice
image: goharbor/harbor-jobservice:v2.15.0
imagePullPolicy: IfNotPresent
livenessProbe:
httpGet:
path: /api/v1/stats
scheme: HTTP
port: 8080
initialDelaySeconds: 300
periodSeconds: 10
readinessProbe:
httpGet:
path: /api/v1/stats
scheme: HTTP
port: 8080
initialDelaySeconds: 20
periodSeconds: 10
env:
- name: CORE_SECRET
valueFrom:
secretKeyRef:
name: harbor-secret
key: secret
- name: JOBSERVICE_SECRET
valueFrom:
secretKeyRef:
name: harbor-secret
key: JOBSERVICE_SECRET
- name: REGISTRY_CREDENTIAL_PASSWORD
valueFrom:
secretKeyRef:
name: harbor-secret
key: REGISTRY_PASSWD
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
privileged: false
runAsNonRoot: true
seccompProfile:
type: RuntimeDefault
envFrom:
- configMapRef:
name: "harbor-jobservice-env"
- secretRef:
name: "harbor-jobservice"
ports:
- containerPort: 8080
volumeMounts:
- name: jobservice-config
mountPath: /etc/jobservice/config.yml
subPath: config.yml
- name: job-logs
mountPath: /var/log/jobs
subPath:
volumes:
- name: jobservice-config
configMap:
name: "harbor-jobservice"
- name: job-logs
emptyDir: {}

View File

@@ -1,83 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: "harbor-portal"
namespace: "harbor"
labels:
heritage: Helm
release: harbor
chart: harbor
app: "harbor"
app.kubernetes.io/instance: harbor
app.kubernetes.io/name: harbor
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: harbor
app.kubernetes.io/version: "2.14.3"
component: portal
app.kubernetes.io/component: portal
spec:
replicas: 2
revisionHistoryLimit: 10
selector:
matchLabels:
release: harbor
app: "harbor"
component: portal
template:
metadata:
labels:
heritage: Helm
release: harbor
chart: harbor
app: "harbor"
app.kubernetes.io/instance: harbor
app.kubernetes.io/name: harbor
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: harbor
app.kubernetes.io/version: "2.14.3"
component: portal
app.kubernetes.io/component: portal
annotations:
checksum/configmap: 67a5d24a4be2482eaeeeb0b460a525257bcc917634227bef22888ba007496c12
spec:
securityContext:
runAsUser: 10000
fsGroup: 10000
automountServiceAccountToken: false
containers:
- name: portal
image: goharbor/harbor-portal:v2.15.0
imagePullPolicy: IfNotPresent
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
privileged: false
runAsNonRoot: true
seccompProfile:
type: RuntimeDefault
livenessProbe:
httpGet:
path: /
scheme: HTTP
port: 8080
initialDelaySeconds: 300
periodSeconds: 10
readinessProbe:
httpGet:
path: /
scheme: HTTP
port: 8080
initialDelaySeconds: 1
periodSeconds: 10
ports:
- containerPort: 8080
volumeMounts:
- name: portal-config
mountPath: /etc/nginx/nginx.conf
subPath: nginx.conf
volumes:
- name: portal-config
configMap:
name: "harbor-portal"

View File

@@ -1,177 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: "harbor-registry"
namespace: "harbor"
labels:
heritage: Helm
release: harbor
chart: harbor
app: "harbor"
app.kubernetes.io/instance: harbor
app.kubernetes.io/name: harbor
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: harbor
app.kubernetes.io/version: "2.14.3"
component: registry
app.kubernetes.io/component: registry
spec:
replicas: 1
revisionHistoryLimit: 10
strategy:
type: Recreate
rollingUpdate: null
selector:
matchLabels:
release: harbor
app: "harbor"
component: registry
template:
metadata:
labels:
heritage: Helm
release: harbor
chart: harbor
app: "harbor"
app.kubernetes.io/instance: harbor
app.kubernetes.io/name: harbor
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: harbor
app.kubernetes.io/version: "2.14.3"
component: registry
app.kubernetes.io/component: registry
annotations:
checksum/configmap: 77823f5f18ace686e7928407a3f045ee24ae0a3bd616a88a110e4a504b03f7ca
checksum/secret: 47a7c4b7d3c8e57c96d426d6085e3d3c9dfed0b5590c1c5a46f3ea642e876775
checksum/secret-jobservice: f3a0135630d8fa98235c6c6341ee8e42262bad005727f86ce3f0a0679271f1ed
checksum/secret-core: 59669814fb7baa809e9428f8ded55a9bf9281f6bfedaa638b53b49cff7b66e22
spec:
securityContext:
runAsUser: 10000
fsGroup: 10000
fsGroupChangePolicy: OnRootMismatch
automountServiceAccountToken: false
terminationGracePeriodSeconds: 120
containers:
- name: registry
image: goharbor/registry-photon:v2.15.0
imagePullPolicy: IfNotPresent
livenessProbe:
httpGet:
path: /
scheme: HTTP
port: 5000
initialDelaySeconds: 300
periodSeconds: 10
readinessProbe:
httpGet:
path: /
scheme: HTTP
port: 5000
initialDelaySeconds: 1
periodSeconds: 10
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
privileged: false
runAsNonRoot: true
seccompProfile:
type: RuntimeDefault
envFrom:
- secretRef:
name: "harbor-registry"
env:
- name: REGISTRY_HTTP_SECRET
valueFrom:
secretKeyRef:
name: harbor-secret
key: REGISTRY_HTTP_SECRET
ports:
- containerPort: 5000
- containerPort: 8001
volumeMounts:
- name: registry-data
mountPath: /storage
subPath:
- name: registry-htpasswd
mountPath: /etc/registry/passwd
subPath: passwd
- name: registry-config
mountPath: /etc/registry/config.yml
subPath: config.yml
- name: registryctl
image: goharbor/harbor-registryctl:v2.15.0
imagePullPolicy: IfNotPresent
livenessProbe:
httpGet:
path: /api/health
scheme: HTTP
port: 8080
initialDelaySeconds: 300
periodSeconds: 10
readinessProbe:
httpGet:
path: /api/health
scheme: HTTP
port: 8080
initialDelaySeconds: 1
periodSeconds: 10
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
privileged: false
runAsNonRoot: true
seccompProfile:
type: RuntimeDefault
envFrom:
- configMapRef:
name: "harbor-registryctl"
- secretRef:
name: "harbor-registry"
- secretRef:
name: "harbor-registryctl"
env:
- name: REGISTRY_HTTP_SECRET
valueFrom:
secretKeyRef:
name: harbor-secret
key: REGISTRY_HTTP_SECRET
- name: CORE_SECRET
valueFrom:
secretKeyRef:
name: harbor-secret
key: secret
- name: JOBSERVICE_SECRET
valueFrom:
secretKeyRef:
name: harbor-secret
key: JOBSERVICE_SECRET
ports:
- containerPort: 8080
volumeMounts:
- name: registry-data
mountPath: /storage
subPath:
- name: registry-config
mountPath: /etc/registry/config.yml
subPath: config.yml
- name: registry-config
mountPath: /etc/registryctl/config.yml
subPath: ctl-config.yml
volumes:
- name: registry-htpasswd
secret:
secretName: harbor-secret
items:
- key: REGISTRY_HTPASSWD
path: passwd
- name: registry-config
configMap:
name: "harbor-registry"
- name: registry-data
persistentVolumeClaim:
claimName: harbor-registry

View File

@@ -1,38 +0,0 @@
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: harbor-postgresql-18-backup-garage-local-secret
namespace: harbor
labels:
app.kubernetes.io/name: harbor-postgresql-18-backup-garage-local-secret
helm.sh/chart: postgres-18-cluster-7.10.0
app.kubernetes.io/instance: harbor
app.kubernetes.io/part-of: harbor
app.kubernetes.io/version: "7.10.0"
app.kubernetes.io/managed-by: Helm
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: ACCESS_REGION
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/postgres-backups
metadataPolicy: None
property: ACCESS_REGION
- secretKey: ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/postgres-backups
metadataPolicy: None
property: ACCESS_KEY_ID
- secretKey: ACCESS_SECRET_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/postgres-backups
metadataPolicy: None
property: ACCESS_SECRET_KEY

View File

@@ -1,38 +0,0 @@
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: harbor-postgresql-18-recovery-secret
namespace: harbor
labels:
helm.sh/chart: postgres-18-cluster-7.10.0
app.kubernetes.io/instance: harbor
app.kubernetes.io/part-of: harbor
app.kubernetes.io/version: "7.10.0"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: harbor-postgresql-18-recovery-secret
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: ACCESS_REGION
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/postgres-backups
metadataPolicy: None
property: ACCESS_REGION
- secretKey: ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/postgres-backups
metadataPolicy: None
property: ACCESS_KEY_ID
- secretKey: ACCESS_SECRET_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/postgres-backups
metadataPolicy: None
property: ACCESS_SECRET_KEY

View File

@@ -1,98 +0,0 @@
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: harbor-secret
namespace: harbor
labels:
app.kubernetes.io/name: harbor-secret
app.kubernetes.io/instance: harbor
app.kubernetes.io/part-of: harbor
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: HARBOR_ADMIN_PASSWORD
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/harbor/config
metadataPolicy: None
property: admin-password
- secretKey: secretKey
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/harbor/config
metadataPolicy: None
property: secretKey
- secretKey: CSRF_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/harbor/core
metadataPolicy: None
property: CSRF_KEY
- secretKey: secret
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/harbor/core
metadataPolicy: None
property: secret
- secretKey: tls.crt
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/harbor/core
metadataPolicy: None
property: tls.crt
- secretKey: tls.key
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/harbor/core
metadataPolicy: None
property: tls.key
- secretKey: JOBSERVICE_SECRET
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/harbor/jobservice
metadataPolicy: None
property: JOBSERVICE_SECRET
- secretKey: REGISTRY_HTTP_SECRET
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/harbor/registry
metadataPolicy: None
property: REGISTRY_HTTP_SECRET
- secretKey: REGISTRY_REDIS_PASSWORD
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/harbor/registry
metadataPolicy: None
property: REGISTRY_REDIS_PASSWORD
- secretKey: REGISTRY_HTPASSWD
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/harbor/registry
metadataPolicy: None
property: REGISTRY_HTPASSWD
- secretKey: REGISTRY_CREDENTIAL_PASSWORD
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/harbor/registry
metadataPolicy: None
property: REGISTRY_CREDENTIAL_PASSWORD
- secretKey: REGISTRY_PASSWD
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/harbor/registry
metadataPolicy: None
property: REGISTRY_CREDENTIAL_PASSWORD

View File

@@ -1,39 +0,0 @@
apiVersion: gateway.networking.k8s.io/v1
kind: HTTPRoute
metadata:
name: "harbor-route"
namespace: "harbor"
spec:
parentRefs:
- group: gateway.networking.k8s.io
kind: Gateway
name: traefik-gateway
namespace: traefik
hostnames:
- harbor.alexlebens.net
rules:
- matches:
- path:
type: PathPrefix
value: /api/
- path:
type: PathPrefix
value: /service/
- path:
type: PathPrefix
value: /v2/
- path:
type: PathPrefix
value: /c/
backendRefs:
- name: harbor-core
namespace: "harbor"
port: 80
- matches:
- path:
type: PathPrefix
value: /
backendRefs:
- name: harbor-portal
namespace: "harbor"
port: 80

View File

@@ -1,68 +0,0 @@
apiVersion: batch/v1
kind: Job
metadata:
name: migration-job
namespace: "harbor"
labels:
heritage: Helm
release: harbor
chart: harbor
app: "harbor"
app.kubernetes.io/instance: harbor
app.kubernetes.io/name: harbor
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: harbor
app.kubernetes.io/version: "2.14.3"
component: migrator
annotations:
"helm.sh/hook": pre-upgrade
"helm.sh/hook-weight": "-5"
spec:
template:
metadata:
labels:
release: harbor
app: "harbor"
component: migrator
spec:
restartPolicy: Never
securityContext:
runAsUser: 10000
fsGroup: 10000
terminationGracePeriodSeconds: 120
containers:
- name: core-job
image: goharbor/harbor-core:v2.15.0
imagePullPolicy: IfNotPresent
command: ["/harbor/harbor_core", "-mode=migrate"]
envFrom:
- configMapRef:
name: "harbor-core"
- secretRef:
name: "harbor-core"
env:
- name: POSTGRESQL_PASSWORD
valueFrom:
secretKeyRef:
name: harbor-postgresql-18-cluster-app
key: password
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
privileged: false
runAsNonRoot: true
seccompProfile:
type: RuntimeDefault
volumeMounts:
- name: config
mountPath: /etc/core/app.conf
subPath: app.conf
volumes:
- name: config
configMap:
name: harbor-core
items:
- key: app.conf
path: app.conf

View File

@@ -1,33 +0,0 @@
apiVersion: barmancloud.cnpg.io/v1
kind: ObjectStore
metadata:
name: harbor-postgresql-18-backup-garage-local
namespace: harbor
labels:
app.kubernetes.io/name: harbor-postgresql-18-backup-garage-local
helm.sh/chart: postgres-18-cluster-7.10.0
app.kubernetes.io/instance: harbor
app.kubernetes.io/part-of: harbor
app.kubernetes.io/version: "7.10.0"
app.kubernetes.io/managed-by: Helm
spec:
retentionPolicy: 7d
instanceSidecarConfiguration:
env:
- name: AWS_REQUEST_CHECKSUM_CALCULATION
value: when_required
- name: AWS_RESPONSE_CHECKSUM_VALIDATION
value: when_required
configuration:
destinationPath: s3://postgres-backups/cl01tl/harbor/harbor-postgresql-18-cluster
endpointURL: http://garage-main.garage:3900
s3Credentials:
accessKeyId:
name: harbor-postgresql-18-backup-garage-local-secret
key: ACCESS_KEY_ID
secretAccessKey:
name: harbor-postgresql-18-backup-garage-local-secret
key: ACCESS_SECRET_KEY
region:
name: harbor-postgresql-18-backup-garage-local-secret
key: ACCESS_REGION

View File

@@ -1,32 +0,0 @@
apiVersion: barmancloud.cnpg.io/v1
kind: ObjectStore
metadata:
name: "harbor-postgresql-18-recovery"
namespace: harbor
labels:
helm.sh/chart: postgres-18-cluster-7.10.0
app.kubernetes.io/instance: harbor
app.kubernetes.io/part-of: harbor
app.kubernetes.io/version: "7.10.0"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: "harbor-postgresql-18-recovery"
spec:
configuration:
destinationPath: s3://postgres-backups/cl01tl/harbor/harbor-postgresql-18-cluster
endpointURL: http://garage-main.garage:3900
wal:
compression: snappy
maxParallel: 1
data:
compression: snappy
jobs: 1
s3Credentials:
accessKeyId:
name: harbor-postgresql-18-recovery-secret
key: ACCESS_KEY_ID
secretAccessKey:
name: harbor-postgresql-18-recovery-secret
key: ACCESS_SECRET_KEY
region:
name: harbor-postgresql-18-recovery-secret
key: ACCESS_REGION

View File

@@ -1,26 +0,0 @@
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: harbor-registry
namespace: "harbor"
annotations:
helm.sh/resource-policy: keep
labels:
heritage: Helm
release: harbor
chart: harbor
app: "harbor"
app.kubernetes.io/instance: harbor
app.kubernetes.io/name: harbor
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: harbor
app.kubernetes.io/version: "2.14.3"
component: registry
app.kubernetes.io/component: registry
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 100Gi
storageClassName: ceph-block

View File

@@ -1,23 +0,0 @@
apiVersion: monitoring.coreos.com/v1
kind: PodMonitor
metadata:
name: harbor-valkey
labels:
helm.sh/chart: valkey-0.9.3
app.kubernetes.io/name: valkey
app.kubernetes.io/instance: harbor
app.kubernetes.io/version: "9.0.3"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: valkey
app.kubernetes.io/component: podmonitor
spec:
podMetricsEndpoints:
- port: metrics
interval: 30s
namespaceSelector:
matchNames:
- harbor
selector:
matchLabels:
app.kubernetes.io/name: valkey
app.kubernetes.io/instance: harbor

View File

@@ -1,270 +0,0 @@
apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:
name: harbor-postgresql-18-alert-rules
namespace: harbor
labels:
app.kubernetes.io/name: harbor-postgresql-18-alert-rules
helm.sh/chart: postgres-18-cluster-7.10.0
app.kubernetes.io/instance: harbor
app.kubernetes.io/part-of: harbor
app.kubernetes.io/version: "7.10.0"
app.kubernetes.io/managed-by: Helm
spec:
groups:
- name: cloudnative-pg/harbor-postgresql-18
rules:
- alert: CNPGClusterBackendsWaitingWarning
annotations:
summary: CNPG Cluster a backend is waiting for longer than 5 minutes.
description: |-
Pod {{ $labels.pod }}
has been waiting for longer than 5 minutes
expr: |
cnpg_backends_waiting_total{namespace="harbor"} > 300
for: 1m
labels:
severity: warning
namespace: harbor
cnpg_cluster: harbor-postgresql-18-cluster
- alert: CNPGClusterDatabaseDeadlockConflictsWarning
annotations:
summary: CNPG Cluster has over 10 deadlock conflicts.
description: |-
There are over 10 deadlock conflicts in
{{ $labels.pod }}
expr: |
cnpg_pg_stat_database_deadlocks{namespace="harbor"} > 10
for: 1m
labels:
severity: warning
namespace: harbor
cnpg_cluster: harbor-postgresql-18-cluster
- alert: CNPGClusterHACritical
annotations:
summary: CNPG Cluster has no standby replicas!
description: |-
CloudNativePG Cluster "{{`{{`}} $labels.job {{`}}`}}" has no ready standby replicas. Your cluster at a severe
risk of data loss and downtime if the primary instance fails.
The primary instance is still online and able to serve queries, although connections to the `-ro` endpoint
will fail. The `-r` endpoint os operating at reduced capacity and all traffic is being served by the main.
This can happen during a normal fail-over or automated minor version upgrades in a cluster with 2 or less
instances. The replaced instance may need some time to catch-up with the cluster primary instance.
This alarm will be always trigger if your cluster is configured to run with only 1 instance. In this
case you may want to silence it.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHACritical.md
expr: |
max by (job) (cnpg_pg_replication_streaming_replicas{namespace="harbor"} - cnpg_pg_replication_is_wal_receiver_up{namespace="harbor"}) < 1
for: 5m
labels:
severity: critical
namespace: harbor
cnpg_cluster: harbor-postgresql-18-cluster
- alert: CNPGClusterHAWarning
annotations:
summary: CNPG Cluster less than 2 standby replicas.
description: |-
CloudNativePG Cluster "{{`{{`}} $labels.job {{`}}`}}" has only {{`{{`}} $value {{`}}`}} standby replicas, putting
your cluster at risk if another instance fails. The cluster is still able to operate normally, although
the `-ro` and `-r` endpoints operate at reduced capacity.
This can happen during a normal fail-over or automated minor version upgrades. The replaced instance may
need some time to catch-up with the cluster primary instance.
This alarm will be constantly triggered if your cluster is configured to run with less than 3 instances.
In this case you may want to silence it.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHAWarning.md
expr: |
max by (job) (cnpg_pg_replication_streaming_replicas{namespace="harbor"} - cnpg_pg_replication_is_wal_receiver_up{namespace="harbor"}) < 2
for: 5m
labels:
severity: warning
namespace: harbor
cnpg_cluster: harbor-postgresql-18-cluster
- alert: CNPGClusterHighConnectionsCritical
annotations:
summary: CNPG Instance maximum number of connections critical!
description: |-
CloudNativePG Cluster "harbor/harbor-postgresql-18-cluster" instance {{`{{`}} $labels.pod {{`}}`}} is using {{`{{`}} $value {{`}}`}}% of
the maximum number of connections.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighConnectionsCritical.md
expr: |
sum by (pod) (cnpg_backends_total{namespace="harbor", pod=~"harbor-postgresql-18-cluster-([1-9][0-9]*)$"}) / max by (pod) (cnpg_pg_settings_setting{name="max_connections", namespace="harbor", pod=~"harbor-postgresql-18-cluster-([1-9][0-9]*)$"}) * 100 > 95
for: 5m
labels:
severity: critical
namespace: harbor
cnpg_cluster: harbor-postgresql-18-cluster
- alert: CNPGClusterHighConnectionsWarning
annotations:
summary: CNPG Instance is approaching the maximum number of connections.
description: |-
CloudNativePG Cluster "harbor/harbor-postgresql-18-cluster" instance {{`{{`}} $labels.pod {{`}}`}} is using {{`{{`}} $value {{`}}`}}% of
the maximum number of connections.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighConnectionsWarning.md
expr: |
sum by (pod) (cnpg_backends_total{namespace="harbor", pod=~"harbor-postgresql-18-cluster-([1-9][0-9]*)$"}) / max by (pod) (cnpg_pg_settings_setting{name="max_connections", namespace="harbor", pod=~"harbor-postgresql-18-cluster-([1-9][0-9]*)$"}) * 100 > 80
for: 5m
labels:
severity: warning
namespace: harbor
cnpg_cluster: harbor-postgresql-18-cluster
- alert: CNPGClusterHighReplicationLag
annotations:
summary: CNPG Cluster high replication lag
description: |-
CloudNativePG Cluster "harbor/harbor-postgresql-18-cluster" is experiencing a high replication lag of
{{`{{`}} $value {{`}}`}}ms.
High replication lag indicates network issues, busy instances, slow queries or suboptimal configuration.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighReplicationLag.md
expr: |
max(cnpg_pg_replication_lag{namespace="harbor",pod=~"harbor-postgresql-18-cluster-([1-9][0-9]*)$"}) * 1000 > 1000
for: 5m
labels:
severity: warning
namespace: harbor
cnpg_cluster: harbor-postgresql-18-cluster
- alert: CNPGClusterInstancesOnSameNode
annotations:
summary: CNPG Cluster instances are located on the same node.
description: |-
CloudNativePG Cluster "harbor/harbor-postgresql-18-cluster" has {{`{{`}} $value {{`}}`}}
instances on the same node {{`{{`}} $labels.node {{`}}`}}.
A failure or scheduled downtime of a single node will lead to a potential service disruption and/or data loss.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterInstancesOnSameNode.md
expr: |
count by (node) (kube_pod_info{namespace="harbor", pod=~"harbor-postgresql-18-cluster-([1-9][0-9]*)$"}) > 1
for: 5m
labels:
severity: warning
namespace: harbor
cnpg_cluster: harbor-postgresql-18-cluster
- alert: CNPGClusterLongRunningTransactionWarning
annotations:
summary: CNPG Cluster query is taking longer than 5 minutes.
description: |-
CloudNativePG Cluster Pod {{ $labels.pod }}
is taking more than 5 minutes (300 seconds) for a query.
expr: |-
cnpg_backends_max_tx_duration_seconds{namespace="harbor"} > 300
for: 1m
labels:
severity: warning
namespace: harbor
cnpg_cluster: harbor-postgresql-18-cluster
- alert: CNPGClusterLowDiskSpaceCritical
annotations:
summary: CNPG Instance is running out of disk space!
description: |-
CloudNativePG Cluster "harbor/harbor-postgresql-18-cluster" is running extremely low on disk space. Check attached PVCs!
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterLowDiskSpaceCritical.md
expr: |
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="harbor", persistentvolumeclaim=~"harbor-postgresql-18-cluster-([1-9][0-9]*)$"} / kubelet_volume_stats_capacity_bytes{namespace="harbor", persistentvolumeclaim=~"harbor-postgresql-18-cluster-([1-9][0-9]*)$"})) > 0.9 OR
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="harbor", persistentvolumeclaim=~"harbor-postgresql-18-cluster-([1-9][0-9]*)$-wal"} / kubelet_volume_stats_capacity_bytes{namespace="harbor", persistentvolumeclaim=~"harbor-postgresql-18-cluster-([1-9][0-9]*)$-wal"})) > 0.9 OR
max(sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_used_bytes{namespace="harbor", persistentvolumeclaim=~"harbor-postgresql-18-cluster-([1-9][0-9]*)$-tbs.*"})
/
sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_capacity_bytes{namespace="harbor", persistentvolumeclaim=~"harbor-postgresql-18-cluster-([1-9][0-9]*)$-tbs.*"})
*
on(namespace, persistentvolumeclaim) group_left(volume)
kube_pod_spec_volumes_persistentvolumeclaims_info{pod=~"harbor-postgresql-18-cluster-([1-9][0-9]*)$"}
) > 0.9
for: 5m
labels:
severity: critical
namespace: harbor
cnpg_cluster: harbor-postgresql-18-cluster
- alert: CNPGClusterLowDiskSpaceWarning
annotations:
summary: CNPG Instance is running out of disk space.
description: |-
CloudNativePG Cluster "harbor/harbor-postgresql-18-cluster" is running low on disk space. Check attached PVCs.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterLowDiskSpaceWarning.md
expr: |
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="harbor", persistentvolumeclaim=~"harbor-postgresql-18-cluster-([1-9][0-9]*)$"} / kubelet_volume_stats_capacity_bytes{namespace="harbor", persistentvolumeclaim=~"harbor-postgresql-18-cluster-([1-9][0-9]*)$"})) > 0.7 OR
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="harbor", persistentvolumeclaim=~"harbor-postgresql-18-cluster-([1-9][0-9]*)$-wal"} / kubelet_volume_stats_capacity_bytes{namespace="harbor", persistentvolumeclaim=~"harbor-postgresql-18-cluster-([1-9][0-9]*)$-wal"})) > 0.7 OR
max(sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_used_bytes{namespace="harbor", persistentvolumeclaim=~"harbor-postgresql-18-cluster-([1-9][0-9]*)$-tbs.*"})
/
sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_capacity_bytes{namespace="harbor", persistentvolumeclaim=~"harbor-postgresql-18-cluster-([1-9][0-9]*)$-tbs.*"})
*
on(namespace, persistentvolumeclaim) group_left(volume)
kube_pod_spec_volumes_persistentvolumeclaims_info{pod=~"harbor-postgresql-18-cluster-([1-9][0-9]*)$"}
) > 0.7
for: 5m
labels:
severity: warning
namespace: harbor
cnpg_cluster: harbor-postgresql-18-cluster
- alert: CNPGClusterOffline
annotations:
summary: CNPG Cluster has no running instances!
description: |-
CloudNativePG Cluster "harbor/harbor-postgresql-18-cluster" has no ready instances.
Having an offline cluster means your applications will not be able to access the database, leading to
potential service disruption and/or data loss.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterOffline.md
expr: |
(count(cnpg_collector_up{namespace="harbor",pod=~"harbor-postgresql-18-cluster-([1-9][0-9]*)$"}) OR on() vector(0)) == 0
for: 5m
labels:
severity: critical
namespace: harbor
cnpg_cluster: harbor-postgresql-18-cluster
- alert: CNPGClusterPGDatabaseXidAgeWarning
annotations:
summary: CNPG Cluster has a number of transactions from the frozen XID to the current one.
description: |-
Over 300,000,000 transactions from frozen xid
on pod {{ $labels.pod }}
expr: |
cnpg_pg_database_xid_age{namespace="harbor"} > 300000000
for: 1m
labels:
severity: warning
namespace: harbor
cnpg_cluster: harbor-postgresql-18-cluster
- alert: CNPGClusterPGReplicationWarning
annotations:
summary: CNPG Cluster standby is lagging behind the primary.
description: |-
Standby is lagging behind by over 300 seconds (5 minutes)
expr: |
cnpg_pg_replication_lag{namespace="harbor"} > 300
for: 1m
labels:
severity: warning
namespace: harbor
cnpg_cluster: harbor-postgresql-18-cluster
- alert: CNPGClusterReplicaFailingReplicationWarning
annotations:
summary: CNPG Cluster has a replica is failing to replicate.
description: |-
Replica {{ $labels.pod }}
is failing to replicate
expr: |
cnpg_pg_replication_in_recovery{namespace="harbor"} > cnpg_pg_replication_is_wal_receiver_up{namespace="harbor"}
for: 1m
labels:
severity: warning
namespace: harbor
cnpg_cluster: harbor-postgresql-18-cluster
- alert: CNPGClusterZoneSpreadWarning
annotations:
summary: CNPG Cluster instances in the same zone.
description: |-
CloudNativePG Cluster "harbor/harbor-postgresql-18-cluster" has instances in the same availability zone.
A disaster in one availability zone will lead to a potential service disruption and/or data loss.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterZoneSpreadWarning.md
expr: |
3 > count(count by (label_topology_kubernetes_io_zone) (kube_pod_info{namespace="harbor", pod=~"harbor-postgresql-18-cluster-([1-9][0-9]*)$"} * on(node,instance) group_left(label_topology_kubernetes_io_zone) kube_node_labels)) < 3
for: 5m
labels:
severity: warning
namespace: harbor
cnpg_cluster: harbor-postgresql-18-cluster

View File

@@ -1,47 +0,0 @@
apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:
name: harbor-valkey
labels:
helm.sh/chart: valkey-0.9.3
app.kubernetes.io/name: valkey
app.kubernetes.io/instance: harbor
app.kubernetes.io/version: "9.0.3"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: valkey
spec:
groups:
- name: harbor-valkey
rules:
- alert: ValkeyDown
annotations:
description: Valkey instance {{ $labels.instance }} is down.
summary: Valkey instance {{ $labels.instance }} down
expr: |
redis_up{service="harbor-valkey-metrics"} == 0
for: 2m
labels:
severity: error
- alert: ValkeyMemoryHigh
annotations:
description: |
Valkey instance {{ $labels.instance }} is using {{ $value }}% of its available memory.
summary: Valkey instance {{ $labels.instance }} is using too much memory
expr: |
redis_memory_used_bytes{service="harbor-valkey-metrics"} * 100
/
redis_memory_max_bytes{service="harbor-valkey-metrics"}
> 90 <= 100
for: 2m
labels:
severity: error
- alert: ValkeyKeyEviction
annotations:
description: |
Valkey instance {{ $labels.instance }} has evicted {{ $value }} keys in the last 5 minutes.
summary: Valkey instance {{ $labels.instance }} has evicted keys
expr: |
increase(redis_evicted_keys_total{service="harbor-valkey-metrics"}[5m]) > 0
for: 1s
labels:
severity: error

View File

@@ -1,24 +0,0 @@
apiVersion: postgresql.cnpg.io/v1
kind: ScheduledBackup
metadata:
name: "harbor-postgresql-18-scheduled-backup-live-backup"
namespace: harbor
labels:
app.kubernetes.io/name: "harbor-postgresql-18-scheduled-backup-live-backup"
helm.sh/chart: postgres-18-cluster-7.10.0
app.kubernetes.io/instance: harbor
app.kubernetes.io/part-of: harbor
app.kubernetes.io/version: "7.10.0"
app.kubernetes.io/managed-by: Helm
spec:
immediate: true
suspend: false
schedule: "0 35 14 * * *"
backupOwnerReference: self
cluster:
name: harbor-postgresql-18-cluster
method: plugin
pluginConfiguration:
name: barman-cloud.cloudnative-pg.io
parameters:
barmanObjectName: "harbor-postgresql-18-backup-garage-local"

View File

@@ -1,17 +0,0 @@
apiVersion: v1
kind: Secret
metadata:
name: harbor-core
namespace: "harbor"
labels:
heritage: Helm
release: harbor
chart: harbor
app: "harbor"
app.kubernetes.io/instance: harbor
app.kubernetes.io/name: harbor
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: harbor
app.kubernetes.io/version: "2.14.3"
type: Opaque
data:

View File

@@ -1,17 +0,0 @@
apiVersion: v1
kind: Secret
metadata:
name: harbor-exporter
namespace: "harbor"
labels:
heritage: Helm
release: harbor
chart: harbor
app: "harbor"
app.kubernetes.io/instance: harbor
app.kubernetes.io/name: harbor
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: harbor
app.kubernetes.io/version: "2.14.3"
type: Opaque
data:

View File

@@ -1,17 +0,0 @@
apiVersion: v1
kind: Secret
metadata:
name: "harbor-jobservice"
namespace: "harbor"
labels:
heritage: Helm
release: harbor
chart: harbor
app: "harbor"
app.kubernetes.io/instance: harbor
app.kubernetes.io/name: harbor
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: harbor
app.kubernetes.io/version: "2.14.3"
type: Opaque
data:

View File

@@ -1,18 +0,0 @@
apiVersion: v1
kind: Secret
metadata:
name: "harbor-registry"
namespace: "harbor"
labels:
heritage: Helm
release: harbor
chart: harbor
app: "harbor"
app.kubernetes.io/instance: harbor
app.kubernetes.io/name: harbor
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: harbor
app.kubernetes.io/version: "2.14.3"
type: Opaque
data:
REGISTRY_REDIS_PASSWORD: ""

View File

@@ -1,17 +0,0 @@
apiVersion: v1
kind: Secret
metadata:
name: "harbor-registryctl"
namespace: "harbor"
labels:
heritage: Helm
release: harbor
chart: harbor
app: "harbor"
app.kubernetes.io/instance: harbor
app.kubernetes.io/name: harbor
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: harbor
app.kubernetes.io/version: "2.14.3"
type: Opaque
data:

View File

@@ -1,19 +0,0 @@
apiVersion: v1
kind: Secret
metadata:
name: harbor-trivy
namespace: "harbor"
labels:
heritage: Helm
release: harbor
chart: harbor
app: "harbor"
app.kubernetes.io/instance: harbor
app.kubernetes.io/name: harbor
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: harbor
app.kubernetes.io/version: "2.14.3"
type: Opaque
data:
redisURL: cmVkaXM6Ly9oYXJib3ItdmFsa2V5LmhhcmJvcjo2Mzc5LzU/aWRsZV90aW1lb3V0X3NlY29uZHM9MzA=
gitHubToken: ""

View File

@@ -1,26 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: harbor-core
namespace: "harbor"
labels:
heritage: Helm
release: harbor
chart: harbor
app: "harbor"
app.kubernetes.io/instance: harbor
app.kubernetes.io/name: harbor
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: harbor
app.kubernetes.io/version: "2.14.3"
spec:
ports:
- name: http-web
port: 80
targetPort: 8080
- name: http-metrics
port: 8001
selector:
release: harbor
app: "harbor"
component: core

View File

@@ -1,23 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: "harbor-exporter"
namespace: "harbor"
labels:
heritage: Helm
release: harbor
chart: harbor
app: "harbor"
app.kubernetes.io/instance: harbor
app.kubernetes.io/name: harbor
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: harbor
app.kubernetes.io/version: "2.14.3"
spec:
ports:
- name: http-metrics
port: 8001
selector:
release: harbor
app: "harbor"
component: exporter

View File

@@ -1,26 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: "harbor-jobservice"
namespace: "harbor"
labels:
heritage: Helm
release: harbor
chart: harbor
app: "harbor"
app.kubernetes.io/instance: harbor
app.kubernetes.io/name: harbor
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: harbor
app.kubernetes.io/version: "2.14.3"
spec:
ports:
- name: http-jobservice
port: 80
targetPort: 8080
- name: http-metrics
port: 8001
selector:
release: harbor
app: "harbor"
component: jobservice

View File

@@ -1,23 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: "harbor-portal"
namespace: "harbor"
labels:
heritage: Helm
release: harbor
chart: harbor
app: "harbor"
app.kubernetes.io/instance: harbor
app.kubernetes.io/name: harbor
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: harbor
app.kubernetes.io/version: "2.14.3"
spec:
ports:
- port: 80
targetPort: 8080
selector:
release: harbor
app: "harbor"
component: portal

View File

@@ -1,27 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: "harbor-registry"
namespace: "harbor"
labels:
heritage: Helm
release: harbor
chart: harbor
app: "harbor"
app.kubernetes.io/instance: harbor
app.kubernetes.io/name: harbor
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: harbor
app.kubernetes.io/version: "2.14.3"
spec:
ports:
- name: http-registry
port: 5000
- name: http-controller
port: 8080
- name: http-metrics
port: 8001
selector:
release: harbor
app: "harbor"
component: registry

View File

@@ -1,24 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: "harbor-trivy"
namespace: "harbor"
labels:
heritage: Helm
release: harbor
chart: harbor
app: "harbor"
app.kubernetes.io/instance: harbor
app.kubernetes.io/name: harbor
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: harbor
app.kubernetes.io/version: "2.14.3"
spec:
ports:
- name: http-trivy
protocol: TCP
port: 8080
selector:
release: harbor
app: "harbor"
component: trivy

View File

@@ -1,23 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: harbor-valkey-headless
labels:
helm.sh/chart: valkey-0.9.3
app.kubernetes.io/name: valkey
app.kubernetes.io/instance: harbor
app.kubernetes.io/version: "9.0.3"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: headless
spec:
type: ClusterIP
clusterIP: None
publishNotReadyAddresses: true
ports:
- name: tcp
port: 6379
targetPort: tcp
protocol: TCP
selector:
app.kubernetes.io/name: valkey
app.kubernetes.io/instance: harbor

View File

@@ -1,23 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: harbor-valkey-metrics
labels:
helm.sh/chart: valkey-0.9.3
app.kubernetes.io/name: valkey
app.kubernetes.io/instance: harbor
app.kubernetes.io/version: "9.0.3"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: metrics
app.kubernetes.io/part-of: valkey
annotations:
spec:
type: ClusterIP
ports:
- name: metrics
port: 9121
protocol: TCP
targetPort: metrics
selector:
app.kubernetes.io/name: valkey
app.kubernetes.io/instance: harbor

View File

@@ -1,21 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: harbor-valkey-read
labels:
helm.sh/chart: valkey-0.9.3
app.kubernetes.io/name: valkey
app.kubernetes.io/instance: harbor
app.kubernetes.io/version: "9.0.3"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: read
spec:
type: ClusterIP
ports:
- name: tcp
port: 6379
targetPort: tcp
protocol: TCP
selector:
app.kubernetes.io/name: valkey
app.kubernetes.io/instance: harbor

View File

@@ -1,22 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: harbor-valkey
labels:
helm.sh/chart: valkey-0.9.3
app.kubernetes.io/name: valkey
app.kubernetes.io/instance: harbor
app.kubernetes.io/version: "9.0.3"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: primary
spec:
type: ClusterIP
ports:
- port: 6379
targetPort: tcp
protocol: TCP
name: tcp
selector:
app.kubernetes.io/name: valkey
app.kubernetes.io/instance: harbor
statefulset.kubernetes.io/pod-name: harbor-valkey-0

View File

@@ -1,11 +0,0 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: harbor-valkey
labels:
helm.sh/chart: valkey-0.9.3
app.kubernetes.io/name: valkey
app.kubernetes.io/instance: harbor
app.kubernetes.io/version: "9.0.3"
app.kubernetes.io/managed-by: Helm
automountServiceAccountToken: false

View File

@@ -1,24 +0,0 @@
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: harbor-valkey
labels:
helm.sh/chart: valkey-0.9.3
app.kubernetes.io/name: valkey
app.kubernetes.io/instance: harbor
app.kubernetes.io/version: "9.0.3"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: valkey
app.kubernetes.io/component: service-monitor
spec:
endpoints:
- port: metrics
interval: 30s
namespaceSelector:
matchNames:
- harbor
selector:
matchLabels:
app.kubernetes.io/name: valkey
app.kubernetes.io/instance: harbor
app.kubernetes.io/component: metrics

View File

@@ -1,24 +0,0 @@
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: harbor
namespace: "harbor"
labels:
heritage: Helm
release: harbor
chart: harbor
app: "harbor"
app.kubernetes.io/instance: harbor
app.kubernetes.io/name: harbor
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: harbor
app.kubernetes.io/version: "2.14.3"
spec:
jobLabel: app.kubernetes.io/name
endpoints:
- port: http-metrics
honorLabels: true
selector:
matchLabels:
release: harbor
app: "harbor"

View File

@@ -1,167 +0,0 @@
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: harbor-trivy
namespace: "harbor"
labels:
heritage: Helm
release: harbor
chart: harbor
app: "harbor"
app.kubernetes.io/instance: harbor
app.kubernetes.io/name: harbor
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: harbor
app.kubernetes.io/version: "2.14.3"
component: trivy
app.kubernetes.io/component: trivy
spec:
replicas: 1
serviceName: harbor-trivy
selector:
matchLabels:
release: harbor
app: "harbor"
component: trivy
template:
metadata:
labels:
heritage: Helm
release: harbor
chart: harbor
app: "harbor"
app.kubernetes.io/instance: harbor
app.kubernetes.io/name: harbor
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: harbor
app.kubernetes.io/version: "2.14.3"
component: trivy
app.kubernetes.io/component: trivy
annotations:
checksum/secret: 83fe4ce46bcdf24dffaccbf9ece506a58ee9eda2fe07e0aa3658386702fd3d26
spec:
securityContext:
runAsUser: 10000
fsGroup: 10000
automountServiceAccountToken: false
containers:
- name: trivy
image: goharbor/trivy-adapter-photon:v2.14.3
imagePullPolicy: IfNotPresent
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
privileged: false
runAsNonRoot: true
seccompProfile:
type: RuntimeDefault
env:
- name: HTTP_PROXY
value: ""
- name: HTTPS_PROXY
value: ""
- name: NO_PROXY
value: "harbor-core,harbor-jobservice,harbor-database,harbor-registry,harbor-portal,harbor-trivy,harbor-exporter,127.0.0.1,localhost,.local,.internal"
- name: "SCANNER_LOG_LEVEL"
value: "info"
- name: "SCANNER_TRIVY_CACHE_DIR"
value: "/home/scanner/.cache/trivy"
- name: "SCANNER_TRIVY_REPORTS_DIR"
value: "/home/scanner/.cache/reports"
- name: "SCANNER_TRIVY_DEBUG_MODE"
value: "false"
- name: "SCANNER_TRIVY_VULN_TYPE"
value: "os,library"
- name: "SCANNER_TRIVY_TIMEOUT"
value: "5m0s"
- name: "SCANNER_TRIVY_GITHUB_TOKEN"
valueFrom:
secretKeyRef:
name: harbor-trivy
key: gitHubToken
- name: "SCANNER_TRIVY_SEVERITY"
value: "UNKNOWN,LOW,MEDIUM,HIGH,CRITICAL"
- name: "SCANNER_TRIVY_IGNORE_UNFIXED"
value: "false"
- name: "SCANNER_TRIVY_SKIP_UPDATE"
value: "false"
- name: "SCANNER_TRIVY_SKIP_JAVA_DB_UPDATE"
value: "false"
- name: "SCANNER_TRIVY_DB_REPOSITORY"
value: "mirror.gcr.io/aquasec/trivy-db,ghcr.io/aquasecurity/trivy-db"
- name: "SCANNER_TRIVY_JAVA_DB_REPOSITORY"
value: "mirror.gcr.io/aquasec/trivy-java-db,ghcr.io/aquasecurity/trivy-java-db"
- name: "SCANNER_TRIVY_OFFLINE_SCAN"
value: "false"
- name: "SCANNER_TRIVY_SECURITY_CHECKS"
value: "vuln"
- name: "SCANNER_TRIVY_INSECURE"
value: "false"
- name: SCANNER_API_SERVER_ADDR
value: ":8080"
- name: "SCANNER_REDIS_URL"
valueFrom:
secretKeyRef:
name: harbor-trivy
key: redisURL
- name: "SCANNER_STORE_REDIS_URL"
valueFrom:
secretKeyRef:
name: harbor-trivy
key: redisURL
- name: "SCANNER_JOB_QUEUE_REDIS_URL"
valueFrom:
secretKeyRef:
name: harbor-trivy
key: redisURL
ports:
- name: api-server
containerPort: 8080
volumeMounts:
- name: data
mountPath: /home/scanner/.cache
subPath:
readOnly: false
livenessProbe:
httpGet:
scheme: HTTP
path: /probe/healthy
port: api-server
initialDelaySeconds: 5
periodSeconds: 10
successThreshold: 1
failureThreshold: 10
readinessProbe:
httpGet:
scheme: HTTP
path: /probe/ready
port: api-server
initialDelaySeconds: 5
periodSeconds: 10
successThreshold: 1
failureThreshold: 3
resources:
limits:
cpu: 1
memory: 1Gi
requests:
cpu: 200m
memory: 512Mi
volumeClaimTemplates:
- apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: data
labels:
heritage: Helm
release: harbor
chart: harbor
app: "harbor"
annotations:
spec:
accessModes: ["ReadWriteOnce"]
resources:
requests:
storage: "5Gi"

View File

@@ -1,129 +0,0 @@
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: harbor-valkey
labels:
helm.sh/chart: valkey-0.9.3
app.kubernetes.io/name: valkey
app.kubernetes.io/instance: harbor
app.kubernetes.io/version: "9.0.3"
app.kubernetes.io/managed-by: Helm
spec:
serviceName: harbor-valkey-headless
replicas: 3
podManagementPolicy: OrderedReady
selector:
matchLabels:
app.kubernetes.io/name: valkey
app.kubernetes.io/instance: harbor
volumeClaimTemplates:
- metadata:
name: valkey-data
spec:
accessModes:
- ReadWriteOnce
storageClassName: "ceph-block"
resources:
requests:
storage: "1Gi"
template:
metadata:
labels:
app.kubernetes.io/name: valkey
app.kubernetes.io/instance: harbor
annotations:
checksum/initconfig: "0cad4b394241164de6b4d658a977be16"
spec:
automountServiceAccountToken: false
serviceAccountName: harbor-valkey
securityContext:
fsGroup: 1000
runAsGroup: 1000
runAsUser: 1000
initContainers:
- name: harbor-valkey-init
image: docker.io/valkey/valkey:9.0.3
imagePullPolicy: IfNotPresent
securityContext:
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
runAsNonRoot: true
runAsUser: 1000
command: ["/scripts/init.sh"]
env:
- name: POD_INDEX
valueFrom:
fieldRef:
fieldPath: metadata.labels['apps.kubernetes.io/pod-index']
volumeMounts:
- name: valkey-data
mountPath: /data
- name: scripts
mountPath: /scripts
containers:
- name: harbor-valkey
image: docker.io/valkey/valkey:9.0.3
imagePullPolicy: IfNotPresent
command: ["valkey-server"]
args: ["/data/conf/valkey.conf"]
securityContext:
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
runAsNonRoot: true
runAsUser: 1000
env:
- name: POD_INDEX
valueFrom:
fieldRef:
fieldPath: metadata.labels['apps.kubernetes.io/pod-index']
- name: VALKEY_LOGLEVEL
value: "notice"
ports:
- name: tcp
containerPort: 6379
protocol: TCP
startupProbe:
exec:
command: ["sh", "-c", "valkey-cli ping"]
livenessProbe:
exec:
command: ["sh", "-c", "valkey-cli ping"]
resources:
requests:
cpu: 10m
memory: 128Mi
volumeMounts:
- name: valkey-data
mountPath: /data
- name: metrics
image: ghcr.io/oliver006/redis_exporter:v1.82.0
imagePullPolicy: "IfNotPresent"
ports:
- name: metrics
containerPort: 9121
startupProbe:
tcpSocket:
port: metrics
livenessProbe:
tcpSocket:
port: metrics
readinessProbe:
httpGet:
path: /
port: metrics
resources:
requests:
cpu: 10m
memory: 64M
env:
- name: REDIS_ALIAS
value: harbor-valkey
volumes:
- name: scripts
configMap:
name: harbor-valkey-init-scripts
defaultMode: 0555

View File

@@ -0,0 +1 @@

View File

@@ -1,68 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: yubal
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: yubal
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: yubal
helm.sh/chart: yubal-4.6.2
namespace: yubal
spec:
revisionHistoryLimit: 3
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/controller: main
app.kubernetes.io/name: yubal
app.kubernetes.io/instance: yubal
template:
metadata:
labels:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: yubal
app.kubernetes.io/name: yubal
spec:
enableServiceLinks: false
serviceAccountName: default
automountServiceAccountToken: true
securityContext:
fsGroup: 1000
runAsGroup: 1000
runAsUser: 1000
hostIPC: false
hostNetwork: false
hostPID: false
dnsPolicy: ClusterFirst
containers:
- env:
- name: YUBAL_TZ
value: America/Chicago
- name: YUBAL_HOST
value: 0.0.0.0
- name: YUBAL_PORT
value: "8000"
- name: YUBAL_LOG_LEVEL
value: INFO
image: ghcr.io/guillevc/yubal:4.0.0
imagePullPolicy: IfNotPresent
name: main
resources:
requests:
cpu: 10m
memory: 128Mi
volumeMounts:
- mountPath: /app/config
name: config
- mountPath: /app/data
name: music
volumes:
- name: config
persistentVolumeClaim:
claimName: yubal
- name: music
persistentVolumeClaim:
claimName: yubal-nfs-storage

View File

@@ -1,58 +0,0 @@
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: yubal-backup-secret-external
namespace: yubal
labels:
helm.sh/chart: volsync-target-config-0.8.0
app.kubernetes.io/instance: yubal
app.kubernetes.io/part-of: yubal
app.kubernetes.io/version: "0.8.0"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: yubal-backup-secret-external
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
target:
template:
mergePolicy: Merge
engineVersion: v2
data:
RESTIC_REPOSITORY: "{{ .BUCKET_ENDPOINT }}/yubal/yubal"
data:
- secretKey: BUCKET_ENDPOINT
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /volsync/restic/digital-ocean
metadataPolicy: None
property: BUCKET_ENDPOINT
- secretKey: RESTIC_PASSWORD
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /volsync/restic/digital-ocean
metadataPolicy: None
property: RESTIC_PASSWORD
- secretKey: AWS_DEFAULT_REGION
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: AWS_DEFAULT_REGION
- secretKey: AWS_ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: AWS_ACCESS_KEY_ID
- secretKey: AWS_SECRET_ACCESS_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/volsync-backups
metadataPolicy: None
property: AWS_SECRET_ACCESS_KEY

View File

@@ -1,58 +0,0 @@
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: yubal-backup-secret-local
namespace: yubal
labels:
helm.sh/chart: volsync-target-config-0.8.0
app.kubernetes.io/instance: yubal
app.kubernetes.io/part-of: yubal
app.kubernetes.io/version: "0.8.0"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: yubal-backup-secret-local
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
target:
template:
mergePolicy: Merge
engineVersion: v2
data:
RESTIC_REPOSITORY: "{{ .BUCKET_ENDPOINT }}/yubal/yubal"
data:
- secretKey: BUCKET_ENDPOINT
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /volsync/restic/garage-local
metadataPolicy: None
property: BUCKET_ENDPOINT
- secretKey: RESTIC_PASSWORD
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /volsync/restic/garage-local
metadataPolicy: None
property: RESTIC_PASSWORD
- secretKey: AWS_DEFAULT_REGION
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/volsync-backups
metadataPolicy: None
property: ACCESS_REGION
- secretKey: AWS_ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/volsync-backups
metadataPolicy: None
property: ACCESS_KEY_ID
- secretKey: AWS_SECRET_ACCESS_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/volsync-backups
metadataPolicy: None
property: ACCESS_SECRET_KEY

View File

@@ -1,58 +0,0 @@
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: yubal-backup-secret-remote
namespace: yubal
labels:
helm.sh/chart: volsync-target-config-0.8.0
app.kubernetes.io/instance: yubal
app.kubernetes.io/part-of: yubal
app.kubernetes.io/version: "0.8.0"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: yubal-backup-secret-remote
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
target:
template:
mergePolicy: Merge
engineVersion: v2
data:
RESTIC_REPOSITORY: "{{ .BUCKET_ENDPOINT }}/yubal/yubal"
data:
- secretKey: BUCKET_ENDPOINT
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /volsync/restic/garage-remote
metadataPolicy: None
property: BUCKET_ENDPOINT
- secretKey: RESTIC_PASSWORD
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /volsync/restic/garage-remote
metadataPolicy: None
property: RESTIC_PASSWORD
- secretKey: AWS_DEFAULT_REGION
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/volsync-backups
metadataPolicy: None
property: ACCESS_REGION
- secretKey: AWS_ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/volsync-backups
metadataPolicy: None
property: ACCESS_KEY_ID
- secretKey: AWS_SECRET_ACCESS_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/volsync-backups
metadataPolicy: None
property: ACCESS_SECRET_KEY

View File

@@ -1,42 +0,0 @@
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: yubal-wireguard-conf
namespace: yubal
labels:
app.kubernetes.io/name: yubal-wireguard-conf
app.kubernetes.io/instance: yubal
app.kubernetes.io/part-of: yubal
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: private-key
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /airvpn/conf/cl01tl
metadataPolicy: None
property: private-key
- secretKey: preshared-key
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /airvpn/conf/cl01tl
metadataPolicy: None
property: preshared-key
- secretKey: addresses
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /airvpn/conf/cl01tl
metadataPolicy: None
property: addresses
- secretKey: input-ports
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /airvpn/conf/cl01tl
metadataPolicy: None
property: input-ports

View File

@@ -1,30 +0,0 @@
apiVersion: gateway.networking.k8s.io/v1
kind: HTTPRoute
metadata:
name: yubal
labels:
app.kubernetes.io/instance: yubal
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: yubal
helm.sh/chart: yubal-4.6.2
namespace: yubal
spec:
parentRefs:
- group: gateway.networking.k8s.io
kind: Gateway
name: traefik-gateway
namespace: traefik
hostnames:
- "yubal.alexlebens.net"
rules:
- backendRefs:
- group: ""
kind: Service
name: yubal
namespace: yubal
port: 80
weight: 100
matches:
- path:
type: PathPrefix
value: /

View File

@@ -1,11 +0,0 @@
apiVersion: v1
kind: Namespace
metadata:
name: yubal
labels:
app.kubernetes.io/name: yubal
app.kubernetes.io/instance: yubal
app.kubernetes.io/part-of: yubal
pod-security.kubernetes.io/audit: privileged
pod-security.kubernetes.io/enforce: privileged
pod-security.kubernetes.io/warn: privileged

View File

@@ -1,23 +0,0 @@
apiVersion: v1
kind: PersistentVolume
metadata:
name: yubal-nfs-storage
namespace: yubal
labels:
app.kubernetes.io/name: yubal-nfs-storage
app.kubernetes.io/instance: yubal
app.kubernetes.io/part-of: yubal
spec:
persistentVolumeReclaimPolicy: Retain
storageClassName: nfs-client
capacity:
storage: 1Gi
accessModes:
- ReadWriteMany
nfs:
path: /volume2/Storage/Music Youtube/
server: synologybond.alexlebens.net
mountOptions:
- vers=4
- minorversion=1
- noac

View File

@@ -1,17 +0,0 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: yubal-nfs-storage
namespace: yubal
labels:
app.kubernetes.io/name: yubal-nfs-storage
app.kubernetes.io/instance: yubal
app.kubernetes.io/part-of: yubal
spec:
volumeName: yubal-nfs-storage
storageClassName: nfs-client
accessModes:
- ReadWriteMany
resources:
requests:
storage: 1Gi

View File

@@ -1,19 +0,0 @@
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: yubal
labels:
app.kubernetes.io/instance: yubal
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: yubal
helm.sh/chart: yubal-4.6.2
annotations:
helm.sh/resource-policy: keep
namespace: yubal
spec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: "1Gi"
storageClassName: "ceph-block"

View File

@@ -1,29 +0,0 @@
apiVersion: volsync.backube/v1alpha1
kind: ReplicationSource
metadata:
name: yubal-backup-source-external
namespace: yubal
labels:
helm.sh/chart: volsync-target-config-0.8.0
app.kubernetes.io/instance: yubal
app.kubernetes.io/part-of: yubal
app.kubernetes.io/version: "0.8.0"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: yubal-backup
spec:
sourcePVC: yubal
trigger:
schedule: 34 14 * * *
restic:
pruneIntervalDays: 7
repository: yubal-backup-secret-external
retain:
daily: 7
hourly: 0
monthly: 3
weekly: 4
yearly: 1
copyMethod: Snapshot
storageClassName: ceph-block
volumeSnapshotClassName: ceph-blockpool-snapshot
cacheCapacity: 1Gi

View File

@@ -1,29 +0,0 @@
apiVersion: volsync.backube/v1alpha1
kind: ReplicationSource
metadata:
name: yubal-backup-source-local
namespace: yubal
labels:
helm.sh/chart: volsync-target-config-0.8.0
app.kubernetes.io/instance: yubal
app.kubernetes.io/part-of: yubal
app.kubernetes.io/version: "0.8.0"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: yubal-backup
spec:
sourcePVC: yubal
trigger:
schedule: 34 11 * * *
restic:
pruneIntervalDays: 7
repository: yubal-backup-secret-local
retain:
daily: 7
hourly: 0
monthly: 3
weekly: 4
yearly: 1
copyMethod: Snapshot
storageClassName: ceph-block
volumeSnapshotClassName: ceph-blockpool-snapshot
cacheCapacity: 1Gi

View File

@@ -1,29 +0,0 @@
apiVersion: volsync.backube/v1alpha1
kind: ReplicationSource
metadata:
name: yubal-backup-source-remote
namespace: yubal
labels:
helm.sh/chart: volsync-target-config-0.8.0
app.kubernetes.io/instance: yubal
app.kubernetes.io/part-of: yubal
app.kubernetes.io/version: "0.8.0"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: yubal-backup
spec:
sourcePVC: yubal
trigger:
schedule: 34 12 * * *
restic:
pruneIntervalDays: 7
repository: yubal-backup-secret-remote
retain:
daily: 7
hourly: 0
monthly: 3
weekly: 4
yearly: 1
copyMethod: Snapshot
storageClassName: ceph-block
volumeSnapshotClassName: ceph-blockpool-snapshot
cacheCapacity: 1Gi

View File

@@ -1,22 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: yubal
labels:
app.kubernetes.io/instance: yubal
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: yubal
app.kubernetes.io/service: yubal
helm.sh/chart: yubal-4.6.2
namespace: yubal
spec:
type: ClusterIP
ports:
- port: 80
targetPort: 8000
protocol: TCP
name: http
selector:
app.kubernetes.io/controller: main
app.kubernetes.io/instance: yubal
app.kubernetes.io/name: yubal