Files
infrastructure/clusters/cl01tl/manifests/harbor/Cluster-harbor-postgresql-18-cluster.yaml

2578 lines
78 KiB
YAML

apiVersion: postgresql.cnpg.io/v1
kind: Cluster
metadata:
name: harbor-postgresql-18-cluster
namespace: harbor
labels:
helm.sh/chart: postgres-18-cluster-7.9.1
app.kubernetes.io/name: harbor-postgresql-18
app.kubernetes.io/instance: harbor
app.kubernetes.io/part-of: harbor
app.kubernetes.io/version: "7.9.1"
app.kubernetes.io/managed-by: Helm
spec:
instances: 3
imageName: "ghcr.io/cloudnative-pg/postgresql:18.3-standard-trixie"
imagePullPolicy: IfNotPresent
postgresUID: 26
postgresGID: 26
storage:
size: 10Gi
storageClass: local-path
walStorage:
size: 2Gi
storageClass: local-path
resources:
limits:
hugepages-2Mi: 256Mi
requests:
cpu: 100m
memory: 256Mi
affinity:
enablePodAntiAffinity: true
topologyKey: kubernetes.io/hostname
primaryUpdateMethod: switchover
primaryUpdateStrategy: unsupervised
logLevel: info
enableSuperuserAccess: false
enablePDB: true
postgresql:
parameters:
hot_standby_feedback: "on"
max_slot_wal_keep_size: 2000MB
shared_buffers: 128MB
monitoring:
enablePodMonitor: true
disableDefaultQueries: false
plugins:
- name: barman-cloud.cloudnative-pg.io
enabled: true
isWALArchiver: true
parameters:
barmanObjectName: "harbor-postgresql-18-backup-garage-local"
serverName: "harbor-postgresql-18-backup-2"
bootstrap:
recovery:
database: app
source: harbor-postgresql-18-backup-2
externalClusters:
- name: harbor-postgresql-18-backup-2
plugin:
name: barman-cloud.cloudnative-pg.io
enabled: true
isWALArchiver: false
parameters:
barmanObjectName: "harbor-postgresql-18-recovery"
serverName: harbor-postgresql-18-backup-2
---
apiVersion: v1
kind: ConfigMap
metadata:
name: harbor-core
namespace: "harbor"
labels:
heritage: Helm
release: harbor
chart: harbor
app: "harbor"
app.kubernetes.io/instance: harbor
app.kubernetes.io/name: harbor
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: harbor
app.kubernetes.io/version: "2.14.2"
data:
app.conf: |
appname = Harbor
runmode = prod
enablegzip = true
[prod]
httpport = 8080
PORT: "8080"
DATABASE_TYPE: "postgresql"
POSTGRESQL_HOST: "harbor-postgresql-18-cluster-rw"
POSTGRESQL_PORT: "5432"
POSTGRESQL_USERNAME: "app"
POSTGRESQL_DATABASE: "app"
POSTGRESQL_SSLMODE: "disable"
POSTGRESQL_MAX_IDLE_CONNS: "100"
POSTGRESQL_MAX_OPEN_CONNS: "900"
EXT_ENDPOINT: "https://harbor.alexlebens.net"
CORE_URL: "http://harbor-core:80"
JOBSERVICE_URL: "http://harbor-jobservice"
REGISTRY_URL: "http://harbor-registry:5000"
TOKEN_SERVICE_URL: "http://harbor-core:80/service/token"
CORE_LOCAL_URL: "http://127.0.0.1:8080"
WITH_TRIVY: "true"
TRIVY_ADAPTER_URL: "http://harbor-trivy:8080"
REGISTRY_STORAGE_PROVIDER_NAME: "filesystem"
LOG_LEVEL: "info"
CONFIG_PATH: "/etc/core/app.conf"
CHART_CACHE_DRIVER: "redis"
_REDIS_URL_CORE: "redis://harbor-valkey.harbor:6379/0?idle_timeout_seconds=30"
_REDIS_URL_REG: "redis://harbor-valkey.harbor:6379/2?idle_timeout_seconds=30"
PORTAL_URL: "http://harbor-portal"
REGISTRY_CONTROLLER_URL: "http://harbor-registry:8080"
REGISTRY_CREDENTIAL_USERNAME: "harbor_registry_user"
HTTP_PROXY: ""
HTTPS_PROXY: ""
NO_PROXY: "harbor-core,harbor-jobservice,harbor-database,harbor-registry,harbor-portal,harbor-trivy,harbor-exporter,127.0.0.1,localhost,.local,.internal"
PERMITTED_REGISTRY_TYPES_FOR_PROXY_CACHE: "docker-hub,harbor,azure-acr,ali-acr,aws-ecr,google-gcr,docker-registry,github-ghcr,jfrog-artifactory"
REPLICATION_ADAPTER_WHITELIST: "ali-acr,aws-ecr,azure-acr,docker-hub,docker-registry,github-ghcr,google-gcr,harbor,huawei-SWR,jfrog-artifactory,tencent-tcr,volcengine-cr"
METRIC_ENABLE: "true"
METRIC_PATH: "/metrics"
METRIC_PORT: "8001"
METRIC_NAMESPACE: harbor
METRIC_SUBSYSTEM: core
CACHE_ENABLED: "true"
CACHE_EXPIRE_HOURS: "24"
QUOTA_UPDATE_PROVIDER: "db"
---
apiVersion: v1
kind: ConfigMap
metadata:
name: "harbor-exporter-env"
namespace: "harbor"
labels:
heritage: Helm
release: harbor
chart: harbor
app: "harbor"
app.kubernetes.io/instance: harbor
app.kubernetes.io/name: harbor
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: harbor
app.kubernetes.io/version: "2.14.2"
data:
HTTP_PROXY: ""
HTTPS_PROXY: ""
NO_PROXY: "harbor-core,harbor-jobservice,harbor-database,harbor-registry,harbor-portal,harbor-trivy,harbor-exporter,127.0.0.1,localhost,.local,.internal"
LOG_LEVEL: "info"
HARBOR_EXPORTER_PORT: "8001"
HARBOR_EXPORTER_METRICS_PATH: "/metrics"
HARBOR_EXPORTER_METRICS_ENABLED: "true"
HARBOR_EXPORTER_CACHE_TIME: "23"
HARBOR_EXPORTER_CACHE_CLEAN_INTERVAL: "14400"
HARBOR_METRIC_NAMESPACE: harbor
HARBOR_METRIC_SUBSYSTEM: exporter
HARBOR_REDIS_URL: "redis://harbor-valkey.harbor:6379/1"
HARBOR_REDIS_NAMESPACE: harbor_job_service_namespace
HARBOR_REDIS_TIMEOUT: "3600"
HARBOR_SERVICE_SCHEME: "http"
HARBOR_SERVICE_HOST: "harbor-core"
HARBOR_SERVICE_PORT: "80"
HARBOR_DATABASE_HOST: "harbor-postgresql-18-cluster-rw"
HARBOR_DATABASE_PORT: "5432"
HARBOR_DATABASE_USERNAME: "app"
HARBOR_DATABASE_DBNAME: "app"
HARBOR_DATABASE_SSLMODE: "disable"
HARBOR_DATABASE_MAX_IDLE_CONNS: "100"
HARBOR_DATABASE_MAX_OPEN_CONNS: "900"
---
apiVersion: v1
kind: ConfigMap
metadata:
name: "harbor-jobservice-env"
namespace: "harbor"
labels:
heritage: Helm
release: harbor
chart: harbor
app: "harbor"
app.kubernetes.io/instance: harbor
app.kubernetes.io/name: harbor
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: harbor
app.kubernetes.io/version: "2.14.2"
data:
CORE_URL: "http://harbor-core:80"
TOKEN_SERVICE_URL: "http://harbor-core:80/service/token"
REGISTRY_URL: "http://harbor-registry:5000"
REGISTRY_CONTROLLER_URL: "http://harbor-registry:8080"
REGISTRY_CREDENTIAL_USERNAME: "harbor_registry_user"
JOBSERVICE_WEBHOOK_JOB_MAX_RETRY: "3"
JOBSERVICE_WEBHOOK_JOB_HTTP_CLIENT_TIMEOUT: "3"
LOG_LEVEL: "info"
HTTP_PROXY: ""
HTTPS_PROXY: ""
NO_PROXY: "harbor-core,harbor-jobservice,harbor-database,harbor-registry,harbor-portal,harbor-trivy,harbor-exporter,127.0.0.1,localhost,.local,.internal"
METRIC_NAMESPACE: harbor
METRIC_SUBSYSTEM: jobservice
_REDIS_URL_CORE: "redis://harbor-valkey.harbor:6379/0?idle_timeout_seconds=30"
CACHE_ENABLED: "true"
CACHE_EXPIRE_HOURS: "24"
---
apiVersion: v1
kind: ConfigMap
metadata:
name: "harbor-jobservice"
namespace: "harbor"
labels:
heritage: Helm
release: harbor
chart: harbor
app: "harbor"
app.kubernetes.io/instance: harbor
app.kubernetes.io/name: harbor
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: harbor
app.kubernetes.io/version: "2.14.2"
data:
config.yml: |
#Server listening port
protocol: "http"
port: 8080
worker_pool:
workers: 10
backend: "redis"
redis_pool:
redis_url: "redis://harbor-valkey.harbor:6379/1"
namespace: "harbor_job_service_namespace"
idle_timeout_second: 3600
job_loggers:
- name: "STD_OUTPUT"
level: INFO
metric:
enabled: true
path: /metrics
port: 8001
#Loggers for the job service
loggers:
- name: "STD_OUTPUT"
level: INFO
reaper:
# the max time to wait for a task to finish, if unfinished after max_update_hours, the task will be mark as error, but the task will continue to run, default value is 24
max_update_hours: 24
# the max time for execution in running state without new task created
max_dangling_hours: 168
---
apiVersion: v1
kind: ConfigMap
metadata:
name: "harbor-portal"
namespace: "harbor"
labels:
heritage: Helm
release: harbor
chart: harbor
app: "harbor"
app.kubernetes.io/instance: harbor
app.kubernetes.io/name: harbor
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: harbor
app.kubernetes.io/version: "2.14.2"
data:
nginx.conf: |
worker_processes auto;
pid /tmp/nginx.pid;
events {
worker_connections 1024;
}
http {
client_body_temp_path /tmp/client_body_temp;
proxy_temp_path /tmp/proxy_temp;
fastcgi_temp_path /tmp/fastcgi_temp;
uwsgi_temp_path /tmp/uwsgi_temp;
scgi_temp_path /tmp/scgi_temp;
server {
listen 8080;
server_name localhost;
root /usr/share/nginx/html;
index index.html index.htm;
include /etc/nginx/mime.types;
gzip on;
gzip_min_length 1000;
gzip_proxied expired no-cache no-store private auth;
gzip_types text/plain text/css application/json application/javascript application/x-javascript text/xml application/xml application/xml+rss text/javascript;
location /devcenter-api-2.0 {
try_files $uri $uri/ /swagger-ui-index.html;
}
location / {
try_files $uri $uri/ /index.html;
}
location = /index.html {
add_header Cache-Control "no-store, no-cache, must-revalidate";
}
}
}
---
apiVersion: v1
kind: ConfigMap
metadata:
name: "harbor-registry"
namespace: "harbor"
labels:
heritage: Helm
release: harbor
chart: harbor
app: "harbor"
app.kubernetes.io/instance: harbor
app.kubernetes.io/name: harbor
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: harbor
app.kubernetes.io/version: "2.14.2"
data:
config.yml: |
version: 0.1
log:
level: info
fields:
service: registry
storage:
filesystem:
rootdirectory: /storage
cache:
layerinfo: redis
maintenance:
uploadpurging:
enabled: true
age: 72h
interval: 24h
dryrun: false
delete:
enabled: true
redirect:
disable: false
redis:
addr: harbor-valkey.harbor:6379
db: 2
readtimeout: 10s
writetimeout: 10s
dialtimeout: 10s
enableTLS: false
pool:
maxidle: 100
maxactive: 500
idletimeout: 60s
http:
addr: :5000
relativeurls: true
# set via environment variable
# secret: placeholder
debug:
addr: :8001
prometheus:
enabled: true
path: /metrics
auth:
htpasswd:
realm: harbor-registry-basic-realm
path: /etc/registry/passwd
validation:
disabled: true
compatibility:
schema1:
enabled: true
ctl-config.yml: |
---
protocol: "http"
port: 8080
log_level: info
registry_config: "/etc/registry/config.yml"
---
apiVersion: v1
kind: ConfigMap
metadata:
name: "harbor-registryctl"
namespace: "harbor"
labels:
heritage: Helm
release: harbor
chart: harbor
app: "harbor"
app.kubernetes.io/instance: harbor
app.kubernetes.io/name: harbor
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: harbor
app.kubernetes.io/version: "2.14.2"
data:
---
apiVersion: v1
kind: ConfigMap
metadata:
name: harbor-valkey-init-scripts
labels:
helm.sh/chart: valkey-0.9.3
app.kubernetes.io/name: valkey
app.kubernetes.io/instance: harbor
app.kubernetes.io/version: "9.0.3"
app.kubernetes.io/managed-by: Helm
data:
init.sh: |-
#!/bin/sh
set -eu
# Default config paths
VALKEY_CONFIG=${VALKEY_CONFIG_PATH:-/data/conf/valkey.conf}
LOGFILE="/data/init.log"
DATA_DIR="/data/conf"
# Logging function (outputs to stderr and file)
log() {
echo "$(date) $1" | tee -a "$LOGFILE" >&2
}
# Clean old log if requested
if [ "${KEEP_OLD_LOGS:-false}" != "true" ]; then
rm -f "$LOGFILE"
fi
if [ -f "$LOGFILE" ]; then
log "Detected restart of this instance ($HOSTNAME)"
fi
log "Creating configuration in $DATA_DIR..."
mkdir -p "$DATA_DIR"
rm -f "$VALKEY_CONFIG"
# Base valkey.conf
log "Generating base valkey.conf"
{
echo "port 6379"
echo "protected-mode no"
echo "bind * -::*"
echo "dir /data"
} >>"$VALKEY_CONFIG"
# Replica mode configuration
log "Configuring replication mode"
# Use POD_INDEX from Kubernetes metadata
POD_INDEX=${POD_INDEX:-0}
IS_MASTER=false
# Check if this is pod-0 (master)
if [ "$POD_INDEX" = "0" ]; then
IS_MASTER=true
log "This pod (index $POD_INDEX) is configured as MASTER"
else
log "This pod (index $POD_INDEX) is configured as REPLICA"
fi
# Configure replica settings
if [ "$IS_MASTER" = "false" ]; then
MASTER_HOST="harbor-valkey-0.harbor-valkey-headless.harbor.svc.cluster.local"
MASTER_PORT="6379"
log "Configuring replica to follow master at $MASTER_HOST:$MASTER_PORT"
{
echo ""
echo "# Replica Configuration"
echo "replicaof $MASTER_HOST $MASTER_PORT"
echo "replica-announce-ip harbor-valkey-$POD_INDEX.harbor-valkey-headless.harbor.svc.cluster.local"
} >>"$VALKEY_CONFIG"
fi
# Append extra configs if present
if [ -f /usr/local/etc/valkey/valkey.conf ]; then
log "Appending /usr/local/etc/valkey/valkey.conf"
cat /usr/local/etc/valkey/valkey.conf >>"$VALKEY_CONFIG"
fi
if [ -d /extravalkeyconfigs ]; then
log "Appending files in /extravalkeyconfigs/"
cat /extravalkeyconfigs/* >>"$VALKEY_CONFIG"
fi
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: harbor-core
namespace: "harbor"
labels:
heritage: Helm
release: harbor
chart: harbor
app: "harbor"
app.kubernetes.io/instance: harbor
app.kubernetes.io/name: harbor
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: harbor
app.kubernetes.io/version: "2.14.2"
component: core
app.kubernetes.io/component: core
spec:
replicas: 2
revisionHistoryLimit: 10
selector:
matchLabels:
release: harbor
app: "harbor"
component: core
template:
metadata:
labels:
heritage: Helm
release: harbor
chart: harbor
app: "harbor"
app.kubernetes.io/instance: harbor
app.kubernetes.io/name: harbor
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: harbor
app.kubernetes.io/version: "2.14.2"
component: core
app.kubernetes.io/component: core
annotations:
checksum/configmap: 1cd92d6ad655a3c1d33e18b68b16811a6876c079f89d8e606ae522d6c43f1278
checksum/secret: ea96175d9d13685272268c622197e7f602ef7f436c55dafd8e625f468586290d
checksum/secret-jobservice: d997909ea151f04cd3c6ce0da2b1e4b09ab6eecdc3d048c0d3aedf21b23285ba
spec:
securityContext:
runAsUser: 10000
fsGroup: 10000
automountServiceAccountToken: false
terminationGracePeriodSeconds: 120
containers:
- name: core
image: goharbor/harbor-core:v2.14.3
imagePullPolicy: IfNotPresent
startupProbe:
httpGet:
path: /api/v2.0/ping
scheme: HTTP
port: 8080
failureThreshold: 360
initialDelaySeconds: 10
periodSeconds: 10
livenessProbe:
httpGet:
path: /api/v2.0/ping
scheme: HTTP
port: 8080
failureThreshold: 2
periodSeconds: 10
readinessProbe:
httpGet:
path: /api/v2.0/ping
scheme: HTTP
port: 8080
failureThreshold: 2
periodSeconds: 10
envFrom:
- configMapRef:
name: "harbor-core"
- secretRef:
name: "harbor-core"
env:
- name: CORE_SECRET
valueFrom:
secretKeyRef:
name: harbor-secret
key: secret
- name: JOBSERVICE_SECRET
valueFrom:
secretKeyRef:
name: harbor-secret
key: JOBSERVICE_SECRET
- name: HARBOR_ADMIN_PASSWORD
valueFrom:
secretKeyRef:
name: harbor-secret
key: HARBOR_ADMIN_PASSWORD
- name: POSTGRESQL_PASSWORD
valueFrom:
secretKeyRef:
name: harbor-postgresql-18-cluster-app
key: password
- name: REGISTRY_CREDENTIAL_PASSWORD
valueFrom:
secretKeyRef:
name: harbor-secret
key: REGISTRY_PASSWD
- name: CSRF_KEY
valueFrom:
secretKeyRef:
name: harbor-secret
key: CSRF_KEY
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
privileged: false
runAsNonRoot: true
seccompProfile:
type: RuntimeDefault
ports:
- containerPort: 8080
volumeMounts:
- name: config
mountPath: /etc/core/app.conf
subPath: app.conf
- name: secret-key
mountPath: /etc/core/key
subPath: key
- name: token-service-private-key
mountPath: /etc/core/private_key.pem
subPath: tls.key
- name: psc
mountPath: /etc/core/token
volumes:
- name: config
configMap:
name: harbor-core
items:
- key: app.conf
path: app.conf
- name: secret-key
secret:
secretName: harbor-secret
items:
- key: secretKey
path: key
- name: token-service-private-key
secret:
secretName: harbor-secret
- name: psc
emptyDir: {}
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: harbor-exporter
namespace: "harbor"
labels:
heritage: Helm
release: harbor
chart: harbor
app: "harbor"
app.kubernetes.io/instance: harbor
app.kubernetes.io/name: harbor
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: harbor
app.kubernetes.io/version: "2.14.2"
component: exporter
app.kubernetes.io/component: exporter
spec:
replicas: 2
revisionHistoryLimit: 10
selector:
matchLabels:
release: harbor
app: "harbor"
component: exporter
template:
metadata:
labels:
heritage: Helm
release: harbor
chart: harbor
app: "harbor"
app.kubernetes.io/instance: harbor
app.kubernetes.io/name: harbor
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: harbor
app.kubernetes.io/version: "2.14.2"
component: exporter
app.kubernetes.io/component: exporter
annotations:
checksum/configmap: c04bce2b69f581921ad9549dc4f41bf335ff506bfe8ce654eb45400623e6f462
checksum/secret: 8905c1701a3ab4bcddb0c66b5428b18255fa8d3f4d1ff790897c401abd04485f
spec:
securityContext:
runAsUser: 10000
fsGroup: 10000
automountServiceAccountToken: false
containers:
- name: exporter
image: goharbor/harbor-exporter:v2.14.3
imagePullPolicy: IfNotPresent
livenessProbe:
httpGet:
path: /
port: 8001
initialDelaySeconds: 300
periodSeconds: 10
readinessProbe:
httpGet:
path: /
port: 8001
initialDelaySeconds: 30
periodSeconds: 10
args: ["-log-level", "info"]
envFrom:
- configMapRef:
name: "harbor-exporter-env"
- secretRef:
name: "harbor-exporter"
env:
- name: HARBOR_DATABASE_PASSWORD
valueFrom:
secretKeyRef:
name: harbor-postgresql-18-cluster-app
key: password
- name: HARBOR_ADMIN_PASSWORD
valueFrom:
secretKeyRef:
name: harbor-secret
key: HARBOR_ADMIN_PASSWORD
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
privileged: false
runAsNonRoot: true
seccompProfile:
type: RuntimeDefault
ports:
- containerPort: 8001
volumeMounts:
volumes:
- name: config
secret:
secretName: "harbor-exporter"
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: "harbor-jobservice"
namespace: "harbor"
labels:
heritage: Helm
release: harbor
chart: harbor
app: "harbor"
app.kubernetes.io/instance: harbor
app.kubernetes.io/name: harbor
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: harbor
app.kubernetes.io/version: "2.14.2"
component: jobservice
app.kubernetes.io/component: jobservice
spec:
replicas: 2
revisionHistoryLimit: 10
strategy:
type: Recreate
rollingUpdate: null
selector:
matchLabels:
release: harbor
app: "harbor"
component: jobservice
template:
metadata:
labels:
heritage: Helm
release: harbor
chart: harbor
app: "harbor"
app.kubernetes.io/instance: harbor
app.kubernetes.io/name: harbor
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: harbor
app.kubernetes.io/version: "2.14.2"
component: jobservice
app.kubernetes.io/component: jobservice
annotations:
checksum/configmap: c6704bb870a06299b2ffcf517be5d3ce2b8584f784ee45924ffd5c1e7c3d7d88
checksum/configmap-env: b3e100f4d68eea17ef740a80b17978219f5b92eb30014551a4eadfebadfe70f1
checksum/secret: d997909ea151f04cd3c6ce0da2b1e4b09ab6eecdc3d048c0d3aedf21b23285ba
checksum/secret-core: ea96175d9d13685272268c622197e7f602ef7f436c55dafd8e625f468586290d
spec:
securityContext:
runAsUser: 10000
fsGroup: 10000
automountServiceAccountToken: false
terminationGracePeriodSeconds: 120
containers:
- name: jobservice
image: goharbor/harbor-jobservice:v2.14.3
imagePullPolicy: IfNotPresent
livenessProbe:
httpGet:
path: /api/v1/stats
scheme: HTTP
port: 8080
initialDelaySeconds: 300
periodSeconds: 10
readinessProbe:
httpGet:
path: /api/v1/stats
scheme: HTTP
port: 8080
initialDelaySeconds: 20
periodSeconds: 10
env:
- name: CORE_SECRET
valueFrom:
secretKeyRef:
name: harbor-secret
key: secret
- name: JOBSERVICE_SECRET
valueFrom:
secretKeyRef:
name: harbor-secret
key: JOBSERVICE_SECRET
- name: REGISTRY_CREDENTIAL_PASSWORD
valueFrom:
secretKeyRef:
name: harbor-secret
key: REGISTRY_PASSWD
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
privileged: false
runAsNonRoot: true
seccompProfile:
type: RuntimeDefault
envFrom:
- configMapRef:
name: "harbor-jobservice-env"
- secretRef:
name: "harbor-jobservice"
ports:
- containerPort: 8080
volumeMounts:
- name: jobservice-config
mountPath: /etc/jobservice/config.yml
subPath: config.yml
- name: job-logs
mountPath: /var/log/jobs
subPath:
volumes:
- name: jobservice-config
configMap:
name: "harbor-jobservice"
- name: job-logs
emptyDir: {}
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: "harbor-portal"
namespace: "harbor"
labels:
heritage: Helm
release: harbor
chart: harbor
app: "harbor"
app.kubernetes.io/instance: harbor
app.kubernetes.io/name: harbor
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: harbor
app.kubernetes.io/version: "2.14.2"
component: portal
app.kubernetes.io/component: portal
spec:
replicas: 2
revisionHistoryLimit: 10
selector:
matchLabels:
release: harbor
app: "harbor"
component: portal
template:
metadata:
labels:
heritage: Helm
release: harbor
chart: harbor
app: "harbor"
app.kubernetes.io/instance: harbor
app.kubernetes.io/name: harbor
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: harbor
app.kubernetes.io/version: "2.14.2"
component: portal
app.kubernetes.io/component: portal
annotations:
checksum/configmap: c5a352b51b1e4ca91935bfbccfbd62b94d18bc6ac132bcec6ae03d78cdb3a6f2
spec:
securityContext:
runAsUser: 10000
fsGroup: 10000
automountServiceAccountToken: false
containers:
- name: portal
image: goharbor/harbor-portal:v2.14.3
imagePullPolicy: IfNotPresent
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
privileged: false
runAsNonRoot: true
seccompProfile:
type: RuntimeDefault
livenessProbe:
httpGet:
path: /
scheme: HTTP
port: 8080
initialDelaySeconds: 300
periodSeconds: 10
readinessProbe:
httpGet:
path: /
scheme: HTTP
port: 8080
initialDelaySeconds: 1
periodSeconds: 10
ports:
- containerPort: 8080
volumeMounts:
- name: portal-config
mountPath: /etc/nginx/nginx.conf
subPath: nginx.conf
volumes:
- name: portal-config
configMap:
name: "harbor-portal"
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: "harbor-registry"
namespace: "harbor"
labels:
heritage: Helm
release: harbor
chart: harbor
app: "harbor"
app.kubernetes.io/instance: harbor
app.kubernetes.io/name: harbor
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: harbor
app.kubernetes.io/version: "2.14.2"
component: registry
app.kubernetes.io/component: registry
spec:
replicas: 1
revisionHistoryLimit: 10
strategy:
type: Recreate
rollingUpdate: null
selector:
matchLabels:
release: harbor
app: "harbor"
component: registry
template:
metadata:
labels:
heritage: Helm
release: harbor
chart: harbor
app: "harbor"
app.kubernetes.io/instance: harbor
app.kubernetes.io/name: harbor
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: harbor
app.kubernetes.io/version: "2.14.2"
component: registry
app.kubernetes.io/component: registry
annotations:
checksum/configmap: a765dd6fb9d5e9f003853b3f23a72a47461637085500e3c3c24759916a6f5f34
checksum/secret: 532881bd1d1d6c57ab80f3b04a516655345de38da400ba7e22511f263226bd48
checksum/secret-jobservice: d997909ea151f04cd3c6ce0da2b1e4b09ab6eecdc3d048c0d3aedf21b23285ba
checksum/secret-core: ea96175d9d13685272268c622197e7f602ef7f436c55dafd8e625f468586290d
spec:
securityContext:
runAsUser: 10000
fsGroup: 10000
fsGroupChangePolicy: OnRootMismatch
automountServiceAccountToken: false
terminationGracePeriodSeconds: 120
containers:
- name: registry
image: goharbor/registry-photon:v2.14.3
imagePullPolicy: IfNotPresent
livenessProbe:
httpGet:
path: /
scheme: HTTP
port: 5000
initialDelaySeconds: 300
periodSeconds: 10
readinessProbe:
httpGet:
path: /
scheme: HTTP
port: 5000
initialDelaySeconds: 1
periodSeconds: 10
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
privileged: false
runAsNonRoot: true
seccompProfile:
type: RuntimeDefault
envFrom:
- secretRef:
name: "harbor-registry"
env:
- name: REGISTRY_HTTP_SECRET
valueFrom:
secretKeyRef:
name: harbor-secret
key: REGISTRY_HTTP_SECRET
ports:
- containerPort: 5000
- containerPort: 8001
volumeMounts:
- name: registry-data
mountPath: /storage
subPath:
- name: registry-htpasswd
mountPath: /etc/registry/passwd
subPath: passwd
- name: registry-config
mountPath: /etc/registry/config.yml
subPath: config.yml
- name: registryctl
image: goharbor/harbor-registryctl:v2.14.3
imagePullPolicy: IfNotPresent
livenessProbe:
httpGet:
path: /api/health
scheme: HTTP
port: 8080
initialDelaySeconds: 300
periodSeconds: 10
readinessProbe:
httpGet:
path: /api/health
scheme: HTTP
port: 8080
initialDelaySeconds: 1
periodSeconds: 10
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
privileged: false
runAsNonRoot: true
seccompProfile:
type: RuntimeDefault
envFrom:
- configMapRef:
name: "harbor-registryctl"
- secretRef:
name: "harbor-registry"
- secretRef:
name: "harbor-registryctl"
env:
- name: REGISTRY_HTTP_SECRET
valueFrom:
secretKeyRef:
name: harbor-secret
key: REGISTRY_HTTP_SECRET
- name: CORE_SECRET
valueFrom:
secretKeyRef:
name: harbor-secret
key: secret
- name: JOBSERVICE_SECRET
valueFrom:
secretKeyRef:
name: harbor-secret
key: JOBSERVICE_SECRET
ports:
- containerPort: 8080
volumeMounts:
- name: registry-data
mountPath: /storage
subPath:
- name: registry-config
mountPath: /etc/registry/config.yml
subPath: config.yml
- name: registry-config
mountPath: /etc/registryctl/config.yml
subPath: ctl-config.yml
volumes:
- name: registry-htpasswd
secret:
secretName: harbor-secret
items:
- key: REGISTRY_HTPASSWD
path: passwd
- name: registry-config
configMap:
name: "harbor-registry"
- name: registry-data
persistentVolumeClaim:
claimName: harbor-registry
---
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: harbor-postgresql-18-backup-garage-local-secret
namespace: harbor
labels:
helm.sh/chart: postgres-18-cluster-7.9.1
app.kubernetes.io/name: harbor-postgresql-18
app.kubernetes.io/instance: harbor
app.kubernetes.io/part-of: harbor
app.kubernetes.io/version: "7.9.1"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: harbor-postgresql-18-backup-garage-local-secret
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: ACCESS_REGION
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/postgres-backups
metadataPolicy: None
property: ACCESS_REGION
- secretKey: ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/postgres-backups
metadataPolicy: None
property: ACCESS_KEY_ID
- secretKey: ACCESS_SECRET_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/postgres-backups
metadataPolicy: None
property: ACCESS_SECRET_KEY
---
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: harbor-postgresql-18-recovery-secret
namespace: harbor
labels:
helm.sh/chart: postgres-18-cluster-7.9.1
app.kubernetes.io/name: harbor-postgresql-18
app.kubernetes.io/instance: harbor
app.kubernetes.io/part-of: harbor
app.kubernetes.io/version: "7.9.1"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: harbor-postgresql-18-recovery-secret
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: ACCESS_REGION
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/postgres-backups
metadataPolicy: None
property: ACCESS_REGION
- secretKey: ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/postgres-backups
metadataPolicy: None
property: ACCESS_KEY_ID
- secretKey: ACCESS_SECRET_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/postgres-backups
metadataPolicy: None
property: ACCESS_SECRET_KEY
---
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: harbor-secret
namespace: harbor
labels:
app.kubernetes.io/name: harbor-secret
app.kubernetes.io/instance: harbor
app.kubernetes.io/part-of: harbor
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: HARBOR_ADMIN_PASSWORD
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/harbor/config
metadataPolicy: None
property: admin-password
- secretKey: secretKey
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/harbor/config
metadataPolicy: None
property: secretKey
- secretKey: CSRF_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/harbor/core
metadataPolicy: None
property: CSRF_KEY
- secretKey: secret
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/harbor/core
metadataPolicy: None
property: secret
- secretKey: tls.crt
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/harbor/core
metadataPolicy: None
property: tls.crt
- secretKey: tls.key
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/harbor/core
metadataPolicy: None
property: tls.key
- secretKey: JOBSERVICE_SECRET
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/harbor/jobservice
metadataPolicy: None
property: JOBSERVICE_SECRET
- secretKey: REGISTRY_HTTP_SECRET
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/harbor/registry
metadataPolicy: None
property: REGISTRY_HTTP_SECRET
- secretKey: REGISTRY_REDIS_PASSWORD
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/harbor/registry
metadataPolicy: None
property: REGISTRY_REDIS_PASSWORD
- secretKey: REGISTRY_HTPASSWD
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/harbor/registry
metadataPolicy: None
property: REGISTRY_HTPASSWD
- secretKey: REGISTRY_CREDENTIAL_PASSWORD
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/harbor/registry
metadataPolicy: None
property: REGISTRY_CREDENTIAL_PASSWORD
- secretKey: REGISTRY_PASSWD
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/harbor/registry
metadataPolicy: None
property: REGISTRY_CREDENTIAL_PASSWORD
---
apiVersion: gateway.networking.k8s.io/v1
kind: HTTPRoute
metadata:
name: "harbor-route"
namespace: "harbor"
spec:
parentRefs:
- group: gateway.networking.k8s.io
kind: Gateway
name: traefik-gateway
namespace: traefik
hostnames:
- harbor.alexlebens.net
rules:
- matches:
- path:
type: PathPrefix
value: /api/
- path:
type: PathPrefix
value: /service/
- path:
type: PathPrefix
value: /v2/
- path:
type: PathPrefix
value: /c/
backendRefs:
- name: harbor-core
namespace: "harbor"
port: 80
- matches:
- path:
type: PathPrefix
value: /
backendRefs:
- name: harbor-portal
namespace: "harbor"
port: 80
---
apiVersion: batch/v1
kind: Job
metadata:
name: migration-job
namespace: "harbor"
labels:
heritage: Helm
release: harbor
chart: harbor
app: "harbor"
app.kubernetes.io/instance: harbor
app.kubernetes.io/name: harbor
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: harbor
app.kubernetes.io/version: "2.14.2"
component: migrator
annotations:
"helm.sh/hook": pre-upgrade
"helm.sh/hook-weight": "-5"
spec:
template:
metadata:
labels:
release: harbor
app: "harbor"
component: migrator
spec:
restartPolicy: Never
securityContext:
runAsUser: 10000
fsGroup: 10000
terminationGracePeriodSeconds: 120
containers:
- name: core-job
image: goharbor/harbor-core:v2.14.3
imagePullPolicy: IfNotPresent
command: ["/harbor/harbor_core", "-mode=migrate"]
envFrom:
- configMapRef:
name: "harbor-core"
- secretRef:
name: "harbor-core"
env:
- name: POSTGRESQL_PASSWORD
valueFrom:
secretKeyRef:
name: harbor-postgresql-18-cluster-app
key: password
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
privileged: false
runAsNonRoot: true
seccompProfile:
type: RuntimeDefault
volumeMounts:
- name: config
mountPath: /etc/core/app.conf
subPath: app.conf
volumes:
- name: config
configMap:
name: harbor-core
items:
- key: app.conf
path: app.conf
---
apiVersion: barmancloud.cnpg.io/v1
kind: ObjectStore
metadata:
name: harbor-postgresql-18-backup-garage-local
namespace: harbor
labels:
helm.sh/chart: postgres-18-cluster-7.9.1
app.kubernetes.io/name: harbor-postgresql-18
app.kubernetes.io/instance: harbor
app.kubernetes.io/part-of: harbor
app.kubernetes.io/version: "7.9.1"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: harbor-postgresql-18-backup-garage-local
spec:
retentionPolicy: 7d
instanceSidecarConfiguration:
env:
- name: AWS_REQUEST_CHECKSUM_CALCULATION
value: when_required
- name: AWS_RESPONSE_CHECKSUM_VALIDATION
value: when_required
configuration:
destinationPath: s3://postgres-backups/cl01tl/harbor/harbor-postgresql-18-cluster
endpointURL: http://garage-main.garage:3900
s3Credentials:
accessKeyId:
name: harbor-postgresql-18-backup-garage-local-secret
key: ACCESS_KEY_ID
secretAccessKey:
name: harbor-postgresql-18-backup-garage-local-secret
key: ACCESS_SECRET_KEY
region:
name: harbor-postgresql-18-backup-garage-local-secret
key: ACCESS_REGION
---
apiVersion: barmancloud.cnpg.io/v1
kind: ObjectStore
metadata:
name: "harbor-postgresql-18-recovery"
namespace: harbor
labels:
helm.sh/chart: postgres-18-cluster-7.9.1
app.kubernetes.io/name: harbor-postgresql-18
app.kubernetes.io/instance: harbor
app.kubernetes.io/part-of: harbor
app.kubernetes.io/version: "7.9.1"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: "harbor-postgresql-18-recovery"
spec:
configuration:
destinationPath: s3://postgres-backups/cl01tl/harbor/harbor-postgresql-18-cluster
endpointURL: http://garage-main.garage:3900
wal:
compression: snappy
maxParallel: 1
data:
compression: snappy
jobs: 1
s3Credentials:
accessKeyId:
name: harbor-postgresql-18-recovery-secret
key: ACCESS_KEY_ID
secretAccessKey:
name: harbor-postgresql-18-recovery-secret
key: ACCESS_SECRET_KEY
region:
name: harbor-postgresql-18-recovery-secret
key: ACCESS_REGION
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: harbor-registry
namespace: "harbor"
annotations:
helm.sh/resource-policy: keep
labels:
heritage: Helm
release: harbor
chart: harbor
app: "harbor"
app.kubernetes.io/instance: harbor
app.kubernetes.io/name: harbor
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: harbor
app.kubernetes.io/version: "2.14.2"
component: registry
app.kubernetes.io/component: registry
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 100Gi
storageClassName: ceph-block
---
apiVersion: monitoring.coreos.com/v1
kind: PodMonitor
metadata:
name: harbor-valkey
labels:
helm.sh/chart: valkey-0.9.3
app.kubernetes.io/name: valkey
app.kubernetes.io/instance: harbor
app.kubernetes.io/version: "9.0.3"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: valkey
app.kubernetes.io/component: podmonitor
spec:
podMetricsEndpoints:
- port: metrics
interval: 30s
namespaceSelector:
matchNames:
- harbor
selector:
matchLabels:
app.kubernetes.io/name: valkey
app.kubernetes.io/instance: harbor
---
apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:
name: harbor-postgresql-18-alert-rules
namespace: harbor
labels:
helm.sh/chart: postgres-18-cluster-7.9.1
app.kubernetes.io/name: harbor-postgresql-18
app.kubernetes.io/instance: harbor
app.kubernetes.io/part-of: harbor
app.kubernetes.io/version: "7.9.1"
app.kubernetes.io/managed-by: Helm
spec:
groups:
- name: cloudnative-pg/harbor-postgresql-18
rules:
- alert: CNPGClusterBackendsWaitingWarning
annotations:
summary: CNPG Cluster a backend is waiting for longer than 5 minutes.
description: |-
Pod {{ $labels.pod }}
has been waiting for longer than 5 minutes
expr: |
cnpg_backends_waiting_total{namespace="harbor"} > 300
for: 1m
labels:
severity: warning
namespace: harbor
cnpg_cluster: harbor-postgresql-18-cluster
- alert: CNPGClusterDatabaseDeadlockConflictsWarning
annotations:
summary: CNPG Cluster has over 10 deadlock conflicts.
description: |-
There are over 10 deadlock conflicts in
{{ $labels.pod }}
expr: |
cnpg_pg_stat_database_deadlocks{namespace="harbor"} > 10
for: 1m
labels:
severity: warning
namespace: harbor
cnpg_cluster: harbor-postgresql-18-cluster
- alert: CNPGClusterHACritical
annotations:
summary: CNPG Cluster has no standby replicas!
description: |-
CloudNativePG Cluster "{{`{{`}} $labels.job {{`}}`}}" has no ready standby replicas. Your cluster at a severe
risk of data loss and downtime if the primary instance fails.
The primary instance is still online and able to serve queries, although connections to the `-ro` endpoint
will fail. The `-r` endpoint os operating at reduced capacity and all traffic is being served by the main.
This can happen during a normal fail-over or automated minor version upgrades in a cluster with 2 or less
instances. The replaced instance may need some time to catch-up with the cluster primary instance.
This alarm will be always trigger if your cluster is configured to run with only 1 instance. In this
case you may want to silence it.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHACritical.md
expr: |
max by (job) (cnpg_pg_replication_streaming_replicas{namespace="harbor"} - cnpg_pg_replication_is_wal_receiver_up{namespace="harbor"}) < 1
for: 5m
labels:
severity: critical
namespace: harbor
cnpg_cluster: harbor-postgresql-18-cluster
- alert: CNPGClusterHAWarning
annotations:
summary: CNPG Cluster less than 2 standby replicas.
description: |-
CloudNativePG Cluster "{{`{{`}} $labels.job {{`}}`}}" has only {{`{{`}} $value {{`}}`}} standby replicas, putting
your cluster at risk if another instance fails. The cluster is still able to operate normally, although
the `-ro` and `-r` endpoints operate at reduced capacity.
This can happen during a normal fail-over or automated minor version upgrades. The replaced instance may
need some time to catch-up with the cluster primary instance.
This alarm will be constantly triggered if your cluster is configured to run with less than 3 instances.
In this case you may want to silence it.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHAWarning.md
expr: |
max by (job) (cnpg_pg_replication_streaming_replicas{namespace="harbor"} - cnpg_pg_replication_is_wal_receiver_up{namespace="harbor"}) < 2
for: 5m
labels:
severity: warning
namespace: harbor
cnpg_cluster: harbor-postgresql-18-cluster
- alert: CNPGClusterHighConnectionsCritical
annotations:
summary: CNPG Instance maximum number of connections critical!
description: |-
CloudNativePG Cluster "harbor/harbor-postgresql-18-cluster" instance {{`{{`}} $labels.pod {{`}}`}} is using {{`{{`}} $value {{`}}`}}% of
the maximum number of connections.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighConnectionsCritical.md
expr: |
sum by (pod) (cnpg_backends_total{namespace="harbor", pod=~"harbor-postgresql-18-cluster-([1-9][0-9]*)$"}) / max by (pod) (cnpg_pg_settings_setting{name="max_connections", namespace="harbor", pod=~"harbor-postgresql-18-cluster-([1-9][0-9]*)$"}) * 100 > 95
for: 5m
labels:
severity: critical
namespace: harbor
cnpg_cluster: harbor-postgresql-18-cluster
- alert: CNPGClusterHighConnectionsWarning
annotations:
summary: CNPG Instance is approaching the maximum number of connections.
description: |-
CloudNativePG Cluster "harbor/harbor-postgresql-18-cluster" instance {{`{{`}} $labels.pod {{`}}`}} is using {{`{{`}} $value {{`}}`}}% of
the maximum number of connections.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighConnectionsWarning.md
expr: |
sum by (pod) (cnpg_backends_total{namespace="harbor", pod=~"harbor-postgresql-18-cluster-([1-9][0-9]*)$"}) / max by (pod) (cnpg_pg_settings_setting{name="max_connections", namespace="harbor", pod=~"harbor-postgresql-18-cluster-([1-9][0-9]*)$"}) * 100 > 80
for: 5m
labels:
severity: warning
namespace: harbor
cnpg_cluster: harbor-postgresql-18-cluster
- alert: CNPGClusterHighReplicationLag
annotations:
summary: CNPG Cluster high replication lag
description: |-
CloudNativePG Cluster "harbor/harbor-postgresql-18-cluster" is experiencing a high replication lag of
{{`{{`}} $value {{`}}`}}ms.
High replication lag indicates network issues, busy instances, slow queries or suboptimal configuration.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighReplicationLag.md
expr: |
max(cnpg_pg_replication_lag{namespace="harbor",pod=~"harbor-postgresql-18-cluster-([1-9][0-9]*)$"}) * 1000 > 1000
for: 5m
labels:
severity: warning
namespace: harbor
cnpg_cluster: harbor-postgresql-18-cluster
- alert: CNPGClusterInstancesOnSameNode
annotations:
summary: CNPG Cluster instances are located on the same node.
description: |-
CloudNativePG Cluster "harbor/harbor-postgresql-18-cluster" has {{`{{`}} $value {{`}}`}}
instances on the same node {{`{{`}} $labels.node {{`}}`}}.
A failure or scheduled downtime of a single node will lead to a potential service disruption and/or data loss.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterInstancesOnSameNode.md
expr: |
count by (node) (kube_pod_info{namespace="harbor", pod=~"harbor-postgresql-18-cluster-([1-9][0-9]*)$"}) > 1
for: 5m
labels:
severity: warning
namespace: harbor
cnpg_cluster: harbor-postgresql-18-cluster
- alert: CNPGClusterLongRunningTransactionWarning
annotations:
summary: CNPG Cluster query is taking longer than 5 minutes.
description: |-
CloudNativePG Cluster Pod {{ $labels.pod }}
is taking more than 5 minutes (300 seconds) for a query.
expr: |-
cnpg_backends_max_tx_duration_seconds{namespace="harbor"} > 300
for: 1m
labels:
severity: warning
namespace: harbor
cnpg_cluster: harbor-postgresql-18-cluster
- alert: CNPGClusterLowDiskSpaceCritical
annotations:
summary: CNPG Instance is running out of disk space!
description: |-
CloudNativePG Cluster "harbor/harbor-postgresql-18-cluster" is running extremely low on disk space. Check attached PVCs!
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterLowDiskSpaceCritical.md
expr: |
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="harbor", persistentvolumeclaim=~"harbor-postgresql-18-cluster-([1-9][0-9]*)$"} / kubelet_volume_stats_capacity_bytes{namespace="harbor", persistentvolumeclaim=~"harbor-postgresql-18-cluster-([1-9][0-9]*)$"})) > 0.9 OR
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="harbor", persistentvolumeclaim=~"harbor-postgresql-18-cluster-([1-9][0-9]*)$-wal"} / kubelet_volume_stats_capacity_bytes{namespace="harbor", persistentvolumeclaim=~"harbor-postgresql-18-cluster-([1-9][0-9]*)$-wal"})) > 0.9 OR
max(sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_used_bytes{namespace="harbor", persistentvolumeclaim=~"harbor-postgresql-18-cluster-([1-9][0-9]*)$-tbs.*"})
/
sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_capacity_bytes{namespace="harbor", persistentvolumeclaim=~"harbor-postgresql-18-cluster-([1-9][0-9]*)$-tbs.*"})
*
on(namespace, persistentvolumeclaim) group_left(volume)
kube_pod_spec_volumes_persistentvolumeclaims_info{pod=~"harbor-postgresql-18-cluster-([1-9][0-9]*)$"}
) > 0.9
for: 5m
labels:
severity: critical
namespace: harbor
cnpg_cluster: harbor-postgresql-18-cluster
- alert: CNPGClusterLowDiskSpaceWarning
annotations:
summary: CNPG Instance is running out of disk space.
description: |-
CloudNativePG Cluster "harbor/harbor-postgresql-18-cluster" is running low on disk space. Check attached PVCs.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterLowDiskSpaceWarning.md
expr: |
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="harbor", persistentvolumeclaim=~"harbor-postgresql-18-cluster-([1-9][0-9]*)$"} / kubelet_volume_stats_capacity_bytes{namespace="harbor", persistentvolumeclaim=~"harbor-postgresql-18-cluster-([1-9][0-9]*)$"})) > 0.7 OR
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="harbor", persistentvolumeclaim=~"harbor-postgresql-18-cluster-([1-9][0-9]*)$-wal"} / kubelet_volume_stats_capacity_bytes{namespace="harbor", persistentvolumeclaim=~"harbor-postgresql-18-cluster-([1-9][0-9]*)$-wal"})) > 0.7 OR
max(sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_used_bytes{namespace="harbor", persistentvolumeclaim=~"harbor-postgresql-18-cluster-([1-9][0-9]*)$-tbs.*"})
/
sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_capacity_bytes{namespace="harbor", persistentvolumeclaim=~"harbor-postgresql-18-cluster-([1-9][0-9]*)$-tbs.*"})
*
on(namespace, persistentvolumeclaim) group_left(volume)
kube_pod_spec_volumes_persistentvolumeclaims_info{pod=~"harbor-postgresql-18-cluster-([1-9][0-9]*)$"}
) > 0.7
for: 5m
labels:
severity: warning
namespace: harbor
cnpg_cluster: harbor-postgresql-18-cluster
- alert: CNPGClusterOffline
annotations:
summary: CNPG Cluster has no running instances!
description: |-
CloudNativePG Cluster "harbor/harbor-postgresql-18-cluster" has no ready instances.
Having an offline cluster means your applications will not be able to access the database, leading to
potential service disruption and/or data loss.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterOffline.md
expr: |
(count(cnpg_collector_up{namespace="harbor",pod=~"harbor-postgresql-18-cluster-([1-9][0-9]*)$"}) OR on() vector(0)) == 0
for: 5m
labels:
severity: critical
namespace: harbor
cnpg_cluster: harbor-postgresql-18-cluster
- alert: CNPGClusterPGDatabaseXidAgeWarning
annotations:
summary: CNPG Cluster has a number of transactions from the frozen XID to the current one.
description: |-
Over 300,000,000 transactions from frozen xid
on pod {{ $labels.pod }}
expr: |
cnpg_pg_database_xid_age{namespace="harbor"} > 300000000
for: 1m
labels:
severity: warning
namespace: harbor
cnpg_cluster: harbor-postgresql-18-cluster
- alert: CNPGClusterPGReplicationWarning
annotations:
summary: CNPG Cluster standby is lagging behind the primary.
description: |-
Standby is lagging behind by over 300 seconds (5 minutes)
expr: |
cnpg_pg_replication_lag{namespace="harbor"} > 300
for: 1m
labels:
severity: warning
namespace: harbor
cnpg_cluster: harbor-postgresql-18-cluster
- alert: CNPGClusterReplicaFailingReplicationWarning
annotations:
summary: CNPG Cluster has a replica is failing to replicate.
description: |-
Replica {{ $labels.pod }}
is failing to replicate
expr: |
cnpg_pg_replication_in_recovery{namespace="harbor"} > cnpg_pg_replication_is_wal_receiver_up{namespace="harbor"}
for: 1m
labels:
severity: warning
namespace: harbor
cnpg_cluster: harbor-postgresql-18-cluster
- alert: CNPGClusterZoneSpreadWarning
annotations:
summary: CNPG Cluster instances in the same zone.
description: |-
CloudNativePG Cluster "harbor/harbor-postgresql-18-cluster" has instances in the same availability zone.
A disaster in one availability zone will lead to a potential service disruption and/or data loss.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterZoneSpreadWarning.md
expr: |
3 > count(count by (label_topology_kubernetes_io_zone) (kube_pod_info{namespace="harbor", pod=~"harbor-postgresql-18-cluster-([1-9][0-9]*)$"} * on(node,instance) group_left(label_topology_kubernetes_io_zone) kube_node_labels)) < 3
for: 5m
labels:
severity: warning
namespace: harbor
cnpg_cluster: harbor-postgresql-18-cluster
---
apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:
name: harbor-valkey
labels:
helm.sh/chart: valkey-0.9.3
app.kubernetes.io/name: valkey
app.kubernetes.io/instance: harbor
app.kubernetes.io/version: "9.0.3"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: valkey
spec:
groups:
- name: harbor-valkey
rules:
- alert: ValkeyDown
annotations:
description: Valkey instance {{ $labels.instance }} is down.
summary: Valkey instance {{ $labels.instance }} down
expr: |
redis_up{service="harbor-valkey-metrics"} == 0
for: 2m
labels:
severity: error
- alert: ValkeyMemoryHigh
annotations:
description: |
Valkey instance {{ $labels.instance }} is using {{ $value }}% of its available memory.
summary: Valkey instance {{ $labels.instance }} is using too much memory
expr: |
redis_memory_used_bytes{service="harbor-valkey-metrics"} * 100
/
redis_memory_max_bytes{service="harbor-valkey-metrics"}
> 90 <= 100
for: 2m
labels:
severity: error
- alert: ValkeyKeyEviction
annotations:
description: |
Valkey instance {{ $labels.instance }} has evicted {{ $value }} keys in the last 5 minutes.
summary: Valkey instance {{ $labels.instance }} has evicted keys
expr: |
increase(redis_evicted_keys_total{service="harbor-valkey-metrics"}[5m]) > 0
for: 1s
labels:
severity: error
---
apiVersion: postgresql.cnpg.io/v1
kind: ScheduledBackup
metadata:
name: "harbor-postgresql-18-scheduled-backup-live-backup"
namespace: harbor
labels:
helm.sh/chart: postgres-18-cluster-7.9.1
app.kubernetes.io/name: harbor-postgresql-18
app.kubernetes.io/instance: harbor
app.kubernetes.io/part-of: harbor
app.kubernetes.io/version: "7.9.1"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: "harbor-postgresql-18-scheduled-backup-live-backup"
spec:
immediate: true
suspend: false
schedule: "0 35 14 * * *"
backupOwnerReference: self
cluster:
name: harbor-postgresql-18-cluster
method: plugin
pluginConfiguration:
name: barman-cloud.cloudnative-pg.io
parameters:
barmanObjectName: "harbor-postgresql-18-backup-garage-local"
---
apiVersion: v1
kind: Secret
metadata:
name: harbor-core
namespace: "harbor"
labels:
heritage: Helm
release: harbor
chart: harbor
app: "harbor"
app.kubernetes.io/instance: harbor
app.kubernetes.io/name: harbor
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: harbor
app.kubernetes.io/version: "2.14.2"
type: Opaque
data:
---
apiVersion: v1
kind: Secret
metadata:
name: harbor-exporter
namespace: "harbor"
labels:
heritage: Helm
release: harbor
chart: harbor
app: "harbor"
app.kubernetes.io/instance: harbor
app.kubernetes.io/name: harbor
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: harbor
app.kubernetes.io/version: "2.14.2"
type: Opaque
data:
---
apiVersion: v1
kind: Secret
metadata:
name: "harbor-jobservice"
namespace: "harbor"
labels:
heritage: Helm
release: harbor
chart: harbor
app: "harbor"
app.kubernetes.io/instance: harbor
app.kubernetes.io/name: harbor
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: harbor
app.kubernetes.io/version: "2.14.2"
type: Opaque
data:
---
apiVersion: v1
kind: Secret
metadata:
name: "harbor-registry"
namespace: "harbor"
labels:
heritage: Helm
release: harbor
chart: harbor
app: "harbor"
app.kubernetes.io/instance: harbor
app.kubernetes.io/name: harbor
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: harbor
app.kubernetes.io/version: "2.14.2"
type: Opaque
data:
REGISTRY_REDIS_PASSWORD: ""
---
apiVersion: v1
kind: Secret
metadata:
name: "harbor-registryctl"
namespace: "harbor"
labels:
heritage: Helm
release: harbor
chart: harbor
app: "harbor"
app.kubernetes.io/instance: harbor
app.kubernetes.io/name: harbor
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: harbor
app.kubernetes.io/version: "2.14.2"
type: Opaque
data:
---
apiVersion: v1
kind: Secret
metadata:
name: harbor-trivy
namespace: "harbor"
labels:
heritage: Helm
release: harbor
chart: harbor
app: "harbor"
app.kubernetes.io/instance: harbor
app.kubernetes.io/name: harbor
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: harbor
app.kubernetes.io/version: "2.14.2"
type: Opaque
data:
redisURL: cmVkaXM6Ly9oYXJib3ItdmFsa2V5LmhhcmJvcjo2Mzc5LzU/aWRsZV90aW1lb3V0X3NlY29uZHM9MzA=
gitHubToken: ""
---
apiVersion: v1
kind: Service
metadata:
name: harbor-core
namespace: "harbor"
labels:
heritage: Helm
release: harbor
chart: harbor
app: "harbor"
app.kubernetes.io/instance: harbor
app.kubernetes.io/name: harbor
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: harbor
app.kubernetes.io/version: "2.14.2"
spec:
ports:
- name: http-web
port: 80
targetPort: 8080
- name: http-metrics
port: 8001
selector:
release: harbor
app: "harbor"
component: core
---
apiVersion: v1
kind: Service
metadata:
name: "harbor-exporter"
namespace: "harbor"
labels:
heritage: Helm
release: harbor
chart: harbor
app: "harbor"
app.kubernetes.io/instance: harbor
app.kubernetes.io/name: harbor
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: harbor
app.kubernetes.io/version: "2.14.2"
spec:
ports:
- name: http-metrics
port: 8001
selector:
release: harbor
app: "harbor"
component: exporter
---
apiVersion: v1
kind: Service
metadata:
name: "harbor-jobservice"
namespace: "harbor"
labels:
heritage: Helm
release: harbor
chart: harbor
app: "harbor"
app.kubernetes.io/instance: harbor
app.kubernetes.io/name: harbor
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: harbor
app.kubernetes.io/version: "2.14.2"
spec:
ports:
- name: http-jobservice
port: 80
targetPort: 8080
- name: http-metrics
port: 8001
selector:
release: harbor
app: "harbor"
component: jobservice
---
apiVersion: v1
kind: Service
metadata:
name: "harbor-portal"
namespace: "harbor"
labels:
heritage: Helm
release: harbor
chart: harbor
app: "harbor"
app.kubernetes.io/instance: harbor
app.kubernetes.io/name: harbor
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: harbor
app.kubernetes.io/version: "2.14.2"
spec:
ports:
- port: 80
targetPort: 8080
selector:
release: harbor
app: "harbor"
component: portal
---
apiVersion: v1
kind: Service
metadata:
name: "harbor-registry"
namespace: "harbor"
labels:
heritage: Helm
release: harbor
chart: harbor
app: "harbor"
app.kubernetes.io/instance: harbor
app.kubernetes.io/name: harbor
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: harbor
app.kubernetes.io/version: "2.14.2"
spec:
ports:
- name: http-registry
port: 5000
- name: http-controller
port: 8080
- name: http-metrics
port: 8001
selector:
release: harbor
app: "harbor"
component: registry
---
apiVersion: v1
kind: Service
metadata:
name: "harbor-trivy"
namespace: "harbor"
labels:
heritage: Helm
release: harbor
chart: harbor
app: "harbor"
app.kubernetes.io/instance: harbor
app.kubernetes.io/name: harbor
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: harbor
app.kubernetes.io/version: "2.14.2"
spec:
ports:
- name: http-trivy
protocol: TCP
port: 8080
selector:
release: harbor
app: "harbor"
component: trivy
---
apiVersion: v1
kind: Service
metadata:
name: harbor-valkey-headless
labels:
helm.sh/chart: valkey-0.9.3
app.kubernetes.io/name: valkey
app.kubernetes.io/instance: harbor
app.kubernetes.io/version: "9.0.3"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: headless
spec:
type: ClusterIP
clusterIP: None
publishNotReadyAddresses: true
ports:
- name: tcp
port: 6379
targetPort: tcp
protocol: TCP
selector:
app.kubernetes.io/name: valkey
app.kubernetes.io/instance: harbor
---
apiVersion: v1
kind: Service
metadata:
name: harbor-valkey-metrics
labels:
helm.sh/chart: valkey-0.9.3
app.kubernetes.io/name: valkey
app.kubernetes.io/instance: harbor
app.kubernetes.io/version: "9.0.3"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: metrics
app.kubernetes.io/part-of: valkey
annotations:
spec:
type: ClusterIP
ports:
- name: metrics
port: 9121
protocol: TCP
targetPort: metrics
selector:
app.kubernetes.io/name: valkey
app.kubernetes.io/instance: harbor
---
apiVersion: v1
kind: Service
metadata:
name: harbor-valkey-read
labels:
helm.sh/chart: valkey-0.9.3
app.kubernetes.io/name: valkey
app.kubernetes.io/instance: harbor
app.kubernetes.io/version: "9.0.3"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: read
spec:
type: ClusterIP
ports:
- name: tcp
port: 6379
targetPort: tcp
protocol: TCP
selector:
app.kubernetes.io/name: valkey
app.kubernetes.io/instance: harbor
---
apiVersion: v1
kind: Service
metadata:
name: harbor-valkey
labels:
helm.sh/chart: valkey-0.9.3
app.kubernetes.io/name: valkey
app.kubernetes.io/instance: harbor
app.kubernetes.io/version: "9.0.3"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: primary
spec:
type: ClusterIP
ports:
- port: 6379
targetPort: tcp
protocol: TCP
name: tcp
selector:
app.kubernetes.io/name: valkey
app.kubernetes.io/instance: harbor
statefulset.kubernetes.io/pod-name: harbor-valkey-0
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: harbor-valkey
labels:
helm.sh/chart: valkey-0.9.3
app.kubernetes.io/name: valkey
app.kubernetes.io/instance: harbor
app.kubernetes.io/version: "9.0.3"
app.kubernetes.io/managed-by: Helm
automountServiceAccountToken: false
---
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: harbor-valkey
labels:
helm.sh/chart: valkey-0.9.3
app.kubernetes.io/name: valkey
app.kubernetes.io/instance: harbor
app.kubernetes.io/version: "9.0.3"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: valkey
app.kubernetes.io/component: service-monitor
spec:
endpoints:
- port: metrics
interval: 30s
namespaceSelector:
matchNames:
- harbor
selector:
matchLabels:
app.kubernetes.io/name: valkey
app.kubernetes.io/instance: harbor
app.kubernetes.io/component: metrics
---
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: harbor
namespace: "harbor"
labels:
heritage: Helm
release: harbor
chart: harbor
app: "harbor"
app.kubernetes.io/instance: harbor
app.kubernetes.io/name: harbor
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: harbor
app.kubernetes.io/version: "2.14.2"
spec:
jobLabel: app.kubernetes.io/name
endpoints:
- port: http-metrics
honorLabels: true
selector:
matchLabels:
release: harbor
app: "harbor"
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: harbor-trivy
namespace: "harbor"
labels:
heritage: Helm
release: harbor
chart: harbor
app: "harbor"
app.kubernetes.io/instance: harbor
app.kubernetes.io/name: harbor
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: harbor
app.kubernetes.io/version: "2.14.2"
component: trivy
app.kubernetes.io/component: trivy
spec:
replicas: 1
serviceName: harbor-trivy
selector:
matchLabels:
release: harbor
app: "harbor"
component: trivy
template:
metadata:
labels:
heritage: Helm
release: harbor
chart: harbor
app: "harbor"
app.kubernetes.io/instance: harbor
app.kubernetes.io/name: harbor
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: harbor
app.kubernetes.io/version: "2.14.2"
component: trivy
app.kubernetes.io/component: trivy
annotations:
checksum/secret: d401a2a9815cc5377e43ae9b0746c5c8bed3bbd03489e30594af98d56079f11f
spec:
securityContext:
runAsUser: 10000
fsGroup: 10000
automountServiceAccountToken: false
containers:
- name: trivy
image: goharbor/trivy-adapter-photon:v2.14.2
imagePullPolicy: IfNotPresent
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
privileged: false
runAsNonRoot: true
seccompProfile:
type: RuntimeDefault
env:
- name: HTTP_PROXY
value: ""
- name: HTTPS_PROXY
value: ""
- name: NO_PROXY
value: "harbor-core,harbor-jobservice,harbor-database,harbor-registry,harbor-portal,harbor-trivy,harbor-exporter,127.0.0.1,localhost,.local,.internal"
- name: "SCANNER_LOG_LEVEL"
value: "info"
- name: "SCANNER_TRIVY_CACHE_DIR"
value: "/home/scanner/.cache/trivy"
- name: "SCANNER_TRIVY_REPORTS_DIR"
value: "/home/scanner/.cache/reports"
- name: "SCANNER_TRIVY_DEBUG_MODE"
value: "false"
- name: "SCANNER_TRIVY_VULN_TYPE"
value: "os,library"
- name: "SCANNER_TRIVY_TIMEOUT"
value: "5m0s"
- name: "SCANNER_TRIVY_GITHUB_TOKEN"
valueFrom:
secretKeyRef:
name: harbor-trivy
key: gitHubToken
- name: "SCANNER_TRIVY_SEVERITY"
value: "UNKNOWN,LOW,MEDIUM,HIGH,CRITICAL"
- name: "SCANNER_TRIVY_IGNORE_UNFIXED"
value: "false"
- name: "SCANNER_TRIVY_SKIP_UPDATE"
value: "false"
- name: "SCANNER_TRIVY_SKIP_JAVA_DB_UPDATE"
value: "false"
- name: "SCANNER_TRIVY_DB_REPOSITORY"
value: "mirror.gcr.io/aquasec/trivy-db,ghcr.io/aquasecurity/trivy-db"
- name: "SCANNER_TRIVY_JAVA_DB_REPOSITORY"
value: "mirror.gcr.io/aquasec/trivy-java-db,ghcr.io/aquasecurity/trivy-java-db"
- name: "SCANNER_TRIVY_OFFLINE_SCAN"
value: "false"
- name: "SCANNER_TRIVY_SECURITY_CHECKS"
value: "vuln"
- name: "SCANNER_TRIVY_INSECURE"
value: "false"
- name: SCANNER_API_SERVER_ADDR
value: ":8080"
- name: "SCANNER_REDIS_URL"
valueFrom:
secretKeyRef:
name: harbor-trivy
key: redisURL
- name: "SCANNER_STORE_REDIS_URL"
valueFrom:
secretKeyRef:
name: harbor-trivy
key: redisURL
- name: "SCANNER_JOB_QUEUE_REDIS_URL"
valueFrom:
secretKeyRef:
name: harbor-trivy
key: redisURL
ports:
- name: api-server
containerPort: 8080
volumeMounts:
- name: data
mountPath: /home/scanner/.cache
subPath:
readOnly: false
livenessProbe:
httpGet:
scheme: HTTP
path: /probe/healthy
port: api-server
initialDelaySeconds: 5
periodSeconds: 10
successThreshold: 1
failureThreshold: 10
readinessProbe:
httpGet:
scheme: HTTP
path: /probe/ready
port: api-server
initialDelaySeconds: 5
periodSeconds: 10
successThreshold: 1
failureThreshold: 3
resources:
limits:
cpu: 1
memory: 1Gi
requests:
cpu: 200m
memory: 512Mi
volumeClaimTemplates:
- apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: data
labels:
heritage: Helm
release: harbor
chart: harbor
app: "harbor"
annotations:
spec:
accessModes: ["ReadWriteOnce"]
resources:
requests:
storage: "5Gi"
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: harbor-valkey
labels:
helm.sh/chart: valkey-0.9.3
app.kubernetes.io/name: valkey
app.kubernetes.io/instance: harbor
app.kubernetes.io/version: "9.0.3"
app.kubernetes.io/managed-by: Helm
spec:
serviceName: harbor-valkey-headless
replicas: 3
podManagementPolicy: OrderedReady
selector:
matchLabels:
app.kubernetes.io/name: valkey
app.kubernetes.io/instance: harbor
volumeClaimTemplates:
- metadata:
name: valkey-data
spec:
accessModes:
- ReadWriteOnce
storageClassName: "ceph-block"
resources:
requests:
storage: "1Gi"
template:
metadata:
labels:
app.kubernetes.io/name: valkey
app.kubernetes.io/instance: harbor
annotations:
checksum/initconfig: "0cad4b394241164de6b4d658a977be16"
spec:
automountServiceAccountToken: false
serviceAccountName: harbor-valkey
securityContext:
fsGroup: 1000
runAsGroup: 1000
runAsUser: 1000
initContainers:
- name: harbor-valkey-init
image: docker.io/valkey/valkey:9.0.3
imagePullPolicy: IfNotPresent
securityContext:
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
runAsNonRoot: true
runAsUser: 1000
command: ["/scripts/init.sh"]
env:
- name: POD_INDEX
valueFrom:
fieldRef:
fieldPath: metadata.labels['apps.kubernetes.io/pod-index']
volumeMounts:
- name: valkey-data
mountPath: /data
- name: scripts
mountPath: /scripts
containers:
- name: harbor-valkey
image: docker.io/valkey/valkey:9.0.3
imagePullPolicy: IfNotPresent
command: ["valkey-server"]
args: ["/data/conf/valkey.conf"]
securityContext:
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
runAsNonRoot: true
runAsUser: 1000
env:
- name: POD_INDEX
valueFrom:
fieldRef:
fieldPath: metadata.labels['apps.kubernetes.io/pod-index']
- name: VALKEY_LOGLEVEL
value: "notice"
ports:
- name: tcp
containerPort: 6379
protocol: TCP
startupProbe:
exec:
command: ["sh", "-c", "valkey-cli ping"]
livenessProbe:
exec:
command: ["sh", "-c", "valkey-cli ping"]
resources:
requests:
cpu: 10m
memory: 128Mi
volumeMounts:
- name: valkey-data
mountPath: /data
- name: metrics
image: ghcr.io/oliver006/redis_exporter:v1.82.0
imagePullPolicy: "IfNotPresent"
ports:
- name: metrics
containerPort: 9121
startupProbe:
tcpSocket:
port: metrics
livenessProbe:
tcpSocket:
port: metrics
readinessProbe:
httpGet:
path: /
port: metrics
resources:
requests:
cpu: 10m
memory: 64M
env:
- name: REDIS_ALIAS
value: harbor-valkey
volumes:
- name: scripts
configMap:
name: harbor-valkey-init-scripts
defaultMode: 0555