Files
infrastructure/clusters/cl01tl/manifests/harbor/harbor.yaml

2661 lines
86 KiB
YAML

---
# Source: harbor/charts/harbor/templates/core/core-secret.yaml
apiVersion: v1
kind: Secret
metadata:
name: harbor-core
namespace: "harbor"
labels:
heritage: Helm
release: harbor
chart: harbor
app: "harbor"
app.kubernetes.io/instance: harbor
app.kubernetes.io/name: harbor
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: harbor
app.kubernetes.io/version: "2.14.0"
type: Opaque
data:
---
# Source: harbor/charts/harbor/templates/exporter/exporter-secret.yaml
apiVersion: v1
kind: Secret
metadata:
name: harbor-exporter
namespace: "harbor"
labels:
heritage: Helm
release: harbor
chart: harbor
app: "harbor"
app.kubernetes.io/instance: harbor
app.kubernetes.io/name: harbor
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: harbor
app.kubernetes.io/version: "2.14.0"
type: Opaque
data:
---
# Source: harbor/charts/harbor/templates/jobservice/jobservice-secrets.yaml
apiVersion: v1
kind: Secret
metadata:
name: "harbor-jobservice"
namespace: "harbor"
labels:
heritage: Helm
release: harbor
chart: harbor
app: "harbor"
app.kubernetes.io/instance: harbor
app.kubernetes.io/name: harbor
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: harbor
app.kubernetes.io/version: "2.14.0"
type: Opaque
data:
---
# Source: harbor/charts/harbor/templates/nginx/secret.yaml
apiVersion: v1
kind: Secret
metadata:
name: harbor-nginx
namespace: "harbor"
labels:
heritage: Helm
release: harbor
chart: harbor
app: "harbor"
app.kubernetes.io/instance: harbor
app.kubernetes.io/name: harbor
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: harbor
app.kubernetes.io/version: "2.14.0"
type: Opaque
data:
tls.crt: "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURRekNDQWl1Z0F3SUJBZ0lSQVA5MjQ4Ry9hYlZhYVFnZ0RHZjgvRkF3RFFZSktvWklodmNOQVFFTEJRQXcKRkRFU01CQUdBMVVFQXhNSmFHRnlZbTl5TFdOaE1CNFhEVEkxTVRJd016QXlNVE15TWxvWERUSTJNVEl3TXpBeQpNVE15TWxvd0lERWVNQndHQTFVRUF4TVZhR0Z5WW05eUxtRnNaWGhzWldKbGJuTXVibVYwTUlJQklqQU5CZ2txCmhraUc5dzBCQVFFRkFBT0NBUThBTUlJQkNnS0NBUUVBeHpaNEdHSEExZkxLWVR5amFmNkt5MUN0ZHlKNXk4aEEKVmgzRlhVTWR5cExMWWhpTERGZDhuNlJNdHphenAyOWNKSDREODNoWERnR0ppNEcxWW5zdzRkcTMwS1JxY202WQp3MG5aZEtJbmJhSEhuZ2NwY1ZycDJGVTdLQVBsM1lONUowQ1lqeTNWZmFaaHMwc0RKb3l1L3R1V2FndVJFMFR6ClhaOG1rL1I0cVB6bE1qS05tNFc5eUpLTzEydlNEWHlhTEI4SmRkLzlNWDE4Z0U5OXNMSnhPOTZuRnJoaXBHbGEKQWxZNXZ5WitHc3R5KytaUTd3VnczeTBnZFl3Y2xZaVNaNElNL3JPdkl1dVBnczVxSWFJTnRrMGVNK3lYL2VINgpRVW8vdEVCWkNOR2lTdTVDc3U4SmZuS1RYK05GLzYreG1yOWczVjBQQSs2ZXJxYlZSK296OHdJREFRQUJvNEdECk1JR0FNQTRHQTFVZER3RUIvd1FFQXdJRm9EQWRCZ05WSFNVRUZqQVVCZ2dyQmdFRkJRY0RBUVlJS3dZQkJRVUgKQXdJd0RBWURWUjBUQVFIL0JBSXdBREFmQmdOVkhTTUVHREFXZ0JSV0ZkWm8rU0hiV0txaDJMd0NFdjRya3BCZQpkREFnQmdOVkhSRUVHVEFYZ2hWb1lYSmliM0l1WVd4bGVHeGxZbVZ1Y3k1dVpYUXdEUVlKS29aSWh2Y05BUUVMCkJRQURnZ0VCQUlyaEJkTXJUcDB2ZENxUW5RdSs4MTZZWlo5SndaaG52ZGZFa013by9Qd0Q0Q3AzLzEyZjVYY00KRmJ1QmxGWWpvYlJmMzcxRTZ5RWkzeXhiL3QwbFVCaUNLYldUM3RSOXBUTVY3dDVXYVhzY1ZJcTg2eWhFcDI5WgpMTWFUUXhBRTN2a0UvL2dXTXp3RVo2aFhUNHVpTU94ZHhKNllwcXdCUXBXMm14cFRSN2JUSnVnNFpXUGhoQTFpCkpMZDlIY0JXeS9Bc2RsYjl6N0dzelFPRnQ0SkEvdERZazdRNFgxaExCV2lvakovcnZSOElkVzR3elZXQkVDejUKVGo0UHQvTnNNZkhhT2VtR1dyWGZFV0FkWWM0RlQ3TnVNSzBwelIvRlJQM3lZRzI4OTFoNG4rckovdUVITmZ3agp2c1FnUzFzTC9McmhlL1M2V1l3WjdGVDQzWVZRSXA0PQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg=="
tls.key: "LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFb3dJQkFBS0NBUUVBeHpaNEdHSEExZkxLWVR5amFmNkt5MUN0ZHlKNXk4aEFWaDNGWFVNZHlwTExZaGlMCkRGZDhuNlJNdHphenAyOWNKSDREODNoWERnR0ppNEcxWW5zdzRkcTMwS1JxY202WXcwblpkS0luYmFISG5nY3AKY1ZycDJGVTdLQVBsM1lONUowQ1lqeTNWZmFaaHMwc0RKb3l1L3R1V2FndVJFMFR6WFo4bWsvUjRxUHpsTWpLTgptNFc5eUpLTzEydlNEWHlhTEI4SmRkLzlNWDE4Z0U5OXNMSnhPOTZuRnJoaXBHbGFBbFk1dnlaK0dzdHkrK1pRCjd3VnczeTBnZFl3Y2xZaVNaNElNL3JPdkl1dVBnczVxSWFJTnRrMGVNK3lYL2VINlFVby90RUJaQ05HaVN1NUMKc3U4SmZuS1RYK05GLzYreG1yOWczVjBQQSs2ZXJxYlZSK296OHdJREFRQUJBb0lCQVFDTENmUkdLQ3Y3NTJuaQo5ZnlrSlBsZ01CM3g4WWtLZHVpWWNrem02OG9tTWRCcjFlOXVuTDBwZXZYa1JmU2Izdmh6cTlROElQaTVtTThVCkpHS2hxcE9TZnlJZUtETDNUVU9ud2FwWllqZWNwL1ozMEUzWGhQbURlUEZCMjM2YTNZRTRxVGdSYm83OEZKMUsKRXdmTzV3L1laeENUVVNGZlRwZUE5MnhvaFkrVmVUZ25YRk9lQWtrL2U5YW1FUSt2QTd6ZklOV1Z0TlVsOFQ2UAozTXVuSmF6WDJTTXpFZ0NRc1JVR1hjRE1ZWFhZVVc4cjEweUJkeVZkamtMRzYyL29mdW14d1JMWWRpUEhvUXViClQ5ejdWY1ZDZnZReHpaaGw1N3cySXdVMUVyYmJ1YW8rU1I4Rnp0dW1JUU1HN2Npcm8vQXd0dVZmUTNIbkwyTWMKRlZ0SWZJWnhBb0dCQU0yQmtZMEpTclU0ZDhSTlFwYzlaMVRkYmNaeGZleWRKREVSNVdEUjVEb3dIczlGTllmbwo0Y1lQdG5rckdBQ2Q2NlprQk5Kb1pMc0ltcVlxK2lDNnVTV1lvei8yN3o0OGkyeUY0ZE9US0hWZ0xRdGIxTnBQCmYyMU9MWCtDMFZPR3NYdnVRZzFIR0piZFFxN0hHK255ekFCd1hPWWhKRzE1a21xSFBzbCtpMkdMQW9HQkFQZ3AKQzZNSC9IL3YxS1FhRlNPTmFaam9ENjRpcitCODhPc2QyUHNwVzVQSDRNSFQ0MzFXQmhES25xNnZVb29nWHJKYQpzZHZKRjBSTEcvK3BEQ1VEY1ludzRKbUZTUWdpMGptRXR3bDJQaFpaMUVHbW9yZkJ6Z3ZiTU1wc1k5cyt0RmNaClBjTFcvTngxVVBPejQrN2d6OUE4SmZIWmUyQnVJTzdHTkpadzIvUTVBb0dBQnFITzA3RmdFdGNlcmx3YjhRdVAKUVNMZ1B3NDhsUjFCSk1CQ0djek1ROEIzemhLYXRxY2lXaVQ1cTVGamRlbWl5RHprRnZJOWhmZXlIZmkzRjhWTwo0bEs1dWYrN0d0UVRYODQyU29DdVJwTnpBTEhXL2JDaEdTTHp2dkMyMXE0N0hoZkRVL0JlYjhmMk5mUEJCOXpKCkRwNlJzTytTNEdHVEF4TVlTS0puMXg4Q2dZQlMvWDhMeFpiMnZpRHh4RnMvYVlKZ0JYT0ErODZ2bDNrR1dCUUIKWXNKaWVKeThsWHpscElnZ3czSXYzeWdPVTJiMzVJYkloc2FHSzB3d3dycnlBZVhOaU1YRDhhUUwyWUZPaFFpbApYR3JGRk1OOE5Sc0Rzd2VTUlhwYXA4YjVRazRUTnJqRjJSbjFMREtuWFlpamtZRk9GVjluTXlFa2pZL3I2bE1aCjRLSmVXUUtCZ0RmT0xMeTdjME1KalhRZmdKQWFUMjYrdTVQajNybVdSU2FYT3hMdnN5NzdNTENsVFFhVGk5cFkKYTlQWEF2Q0QxUW1BWnl0bTlmbUdYUllWVU1PdVZ0d0tPbzd6NmpGdGVabWlvRzVDcmJxaUkxdmVla3VnNEp5UwpibnFhYktvdnYzdngzUUVYamc5Rm0ybm9lUWh5TjU3NHNGendjbTUyZWR0OWphbERMUVp4Ci0tLS0tRU5EIFJTQSBQUklWQVRFIEtFWS0tLS0tCg=="
ca.crt: "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURGRENDQWZ5Z0F3SUJBZ0lSQUtWd2p1eVA1amFGMjdFSDNYYlM4NFF3RFFZSktvWklodmNOQVFFTEJRQXcKRkRFU01CQUdBMVVFQXhNSmFHRnlZbTl5TFdOaE1CNFhEVEkxTVRJd016QXlNVE15TWxvWERUSTJNVEl3TXpBeQpNVE15TWxvd0ZERVNNQkFHQTFVRUF4TUphR0Z5WW05eUxXTmhNSUlCSWpBTkJna3Foa2lHOXcwQkFRRUZBQU9DCkFROEFNSUlCQ2dLQ0FRRUFxMnMzSEQ2WnQ4ZkQ3K0xHbDhhalJhbEZnb2doQXFKb3VjWlMxMGFhemczZVFhbHkKWWlaQjAvVC9qamoyZGtnRlNmMkxLTC9VRXlrRDVXOExWWEpwcDB6dWRIMlZTMGVncENIK3FLbTFXcUs4VCtDawovSDFkd3d2QmNIWGNZckJqakJpZXQzUmNkVUpaaWZSVHhjZVRCMnR5THhvVTJPbllZOFhmK241VmdvZEw4M0VCCkZoRmUzNkluMDFFbG8rSUcvSFNBclNJU1MzcGJLUDg1WmpkVGZ4NyttbFlUUWxvUWU3dFN5OGlCYWNTK0VhMGUKYVVUUVZwR3BzZURuT2g5djlhcVF1TCt4NmhuWlRuL3RLTSsxRnkrOUFneG9aSURJOUprZGRKaVlqMU1Lc2M3Mgo3TkpWeUF4V3VobVd5dG1Va25reXhPWUtsdXRrMThNRVlodnVwUUlEQVFBQm8yRXdYekFPQmdOVkhROEJBZjhFCkJBTUNBcVF3SFFZRFZSMGxCQll3RkFZSUt3WUJCUVVIQXdFR0NDc0dBUVVGQndNQ01BOEdBMVVkRXdFQi93UUYKTUFNQkFmOHdIUVlEVlIwT0JCWUVGRllWMW1qNUlkdFlxcUhZdkFJUy9pdVNrRjUwTUEwR0NTcUdTSWIzRFFFQgpDd1VBQTRJQkFRQnRmdnp6K3VxRFo4Z3FDYWhBY3NvNDEwSHlRZmd1NjUvUkU5dmd1UjJsc0libU5MNnBsKysvCjRnMHBEOGRvLzdNYWNGRVRZTTFVT0ZlaEp3Qkx5R1BaRXZmVnYyYlBZYVllSDRyT3RlNnpVWjJkNmMwSlRpbGEKVWpaODI2TWk1L1pCRkhjWmllVUZlK2Z0elpKMWJMOFRFYTh6dldZL0dmSU14Z1haUGJlc0lPNE9XaUxiMlpEbQpOYTZGZlpZTVFhczduaXFXT0ZHV2RoRHFoelB4Y2JYUENOMkN1ZjN0aDZsbkhMZHFUMi9FNmx6Qy9hWmszWWJDCkhLbnRTSXNwc1Bvbm5Ba2lFTUFlbkowaEwvVFI2NEI4U0h6SlF4c1QycGQ4SVJ2SGZLTUtmTnBOQTRWODdoZ0oKOStBK2ZaNnBJZTljaHhGYnF3ZkhFbi9aR0djUEd2dFQKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo="
---
# Source: harbor/charts/harbor/templates/registry/registry-secret.yaml
apiVersion: v1
kind: Secret
metadata:
name: "harbor-registry"
namespace: "harbor"
labels:
heritage: Helm
release: harbor
chart: harbor
app: "harbor"
app.kubernetes.io/instance: harbor
app.kubernetes.io/name: harbor
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: harbor
app.kubernetes.io/version: "2.14.0"
type: Opaque
data:
REGISTRY_REDIS_PASSWORD: ""
---
# Source: harbor/charts/harbor/templates/registry/registryctl-secret.yaml
apiVersion: v1
kind: Secret
metadata:
name: "harbor-registryctl"
namespace: "harbor"
labels:
heritage: Helm
release: harbor
chart: harbor
app: "harbor"
app.kubernetes.io/instance: harbor
app.kubernetes.io/name: harbor
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: harbor
app.kubernetes.io/version: "2.14.0"
type: Opaque
data:
---
# Source: harbor/charts/harbor/templates/trivy/trivy-secret.yaml
apiVersion: v1
kind: Secret
metadata:
name: harbor-trivy
namespace: "harbor"
labels:
heritage: Helm
release: harbor
chart: harbor
app: "harbor"
app.kubernetes.io/instance: harbor
app.kubernetes.io/name: harbor
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: harbor
app.kubernetes.io/version: "2.14.0"
type: Opaque
data:
redisURL: cmVkaXM6Ly9yZWRpcy1yZXBsaWNhdGlvbi1oYXJib3ItbWFzdGVyLmhhcmJvcjo2Mzc5LzU/aWRsZV90aW1lb3V0X3NlY29uZHM9MzA=
gitHubToken: ""
---
# Source: harbor/charts/harbor/templates/core/core-cm.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: harbor-core
namespace: "harbor"
labels:
heritage: Helm
release: harbor
chart: harbor
app: "harbor"
app.kubernetes.io/instance: harbor
app.kubernetes.io/name: harbor
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: harbor
app.kubernetes.io/version: "2.14.0"
data:
app.conf: |+
appname = Harbor
runmode = prod
enablegzip = true
[prod]
httpport = 8080
PORT: "8080"
DATABASE_TYPE: "postgresql"
POSTGRESQL_HOST: "harbor-postgresql-17-cluster-rw"
POSTGRESQL_PORT: "5432"
POSTGRESQL_USERNAME: "app"
POSTGRESQL_DATABASE: "app"
POSTGRESQL_SSLMODE: "disable"
POSTGRESQL_MAX_IDLE_CONNS: "100"
POSTGRESQL_MAX_OPEN_CONNS: "900"
EXT_ENDPOINT: "https://harbor.alexlebens.net"
CORE_URL: "http://harbor-core:80"
JOBSERVICE_URL: "http://harbor-jobservice"
REGISTRY_URL: "http://harbor-registry:5000"
TOKEN_SERVICE_URL: "http://harbor-core:80/service/token"
CORE_LOCAL_URL: "http://127.0.0.1:8080"
WITH_TRIVY: "true"
TRIVY_ADAPTER_URL: "http://harbor-trivy:8080"
REGISTRY_STORAGE_PROVIDER_NAME: "filesystem"
LOG_LEVEL: "info"
CONFIG_PATH: "/etc/core/app.conf"
CHART_CACHE_DRIVER: "redis"
_REDIS_URL_CORE: "redis://redis-replication-harbor-master.harbor:6379/0?idle_timeout_seconds=30"
_REDIS_URL_REG: "redis://redis-replication-harbor-master.harbor:6379/2?idle_timeout_seconds=30"
PORTAL_URL: "http://harbor-portal"
REGISTRY_CONTROLLER_URL: "http://harbor-registry:8080"
REGISTRY_CREDENTIAL_USERNAME: "harbor_registry_user"
HTTP_PROXY: ""
HTTPS_PROXY: ""
NO_PROXY: "harbor-core,harbor-jobservice,harbor-database,harbor-registry,harbor-portal,harbor-trivy,harbor-exporter,127.0.0.1,localhost,.local,.internal"
PERMITTED_REGISTRY_TYPES_FOR_PROXY_CACHE: "docker-hub,harbor,azure-acr,ali-acr,aws-ecr,google-gcr,docker-registry,github-ghcr,jfrog-artifactory"
REPLICATION_ADAPTER_WHITELIST: "ali-acr,aws-ecr,azure-acr,docker-hub,docker-registry,github-ghcr,google-gcr,harbor,huawei-SWR,jfrog-artifactory,tencent-tcr,volcengine-cr"
METRIC_ENABLE: "true"
METRIC_PATH: "/metrics"
METRIC_PORT: "8001"
METRIC_NAMESPACE: harbor
METRIC_SUBSYSTEM: core
CACHE_ENABLED: "true"
CACHE_EXPIRE_HOURS: "24"
QUOTA_UPDATE_PROVIDER: "db"
---
# Source: harbor/charts/harbor/templates/exporter/exporter-cm-env.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: "harbor-exporter-env"
namespace: "harbor"
labels:
heritage: Helm
release: harbor
chart: harbor
app: "harbor"
app.kubernetes.io/instance: harbor
app.kubernetes.io/name: harbor
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: harbor
app.kubernetes.io/version: "2.14.0"
data:
HTTP_PROXY: ""
HTTPS_PROXY: ""
NO_PROXY: "harbor-core,harbor-jobservice,harbor-database,harbor-registry,harbor-portal,harbor-trivy,harbor-exporter,127.0.0.1,localhost,.local,.internal"
LOG_LEVEL: "info"
HARBOR_EXPORTER_PORT: "8001"
HARBOR_EXPORTER_METRICS_PATH: "/metrics"
HARBOR_EXPORTER_METRICS_ENABLED: "true"
HARBOR_EXPORTER_CACHE_TIME: "23"
HARBOR_EXPORTER_CACHE_CLEAN_INTERVAL: "14400"
HARBOR_METRIC_NAMESPACE: harbor
HARBOR_METRIC_SUBSYSTEM: exporter
HARBOR_REDIS_URL: "redis://redis-replication-harbor-master.harbor:6379/1"
HARBOR_REDIS_NAMESPACE: harbor_job_service_namespace
HARBOR_REDIS_TIMEOUT: "3600"
HARBOR_SERVICE_SCHEME: "http"
HARBOR_SERVICE_HOST: "harbor-core"
HARBOR_SERVICE_PORT: "80"
HARBOR_DATABASE_HOST: "harbor-postgresql-17-cluster-rw"
HARBOR_DATABASE_PORT: "5432"
HARBOR_DATABASE_USERNAME: "app"
HARBOR_DATABASE_DBNAME: "app"
HARBOR_DATABASE_SSLMODE: "disable"
HARBOR_DATABASE_MAX_IDLE_CONNS: "100"
HARBOR_DATABASE_MAX_OPEN_CONNS: "900"
---
# Source: harbor/charts/harbor/templates/jobservice/jobservice-cm-env.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: "harbor-jobservice-env"
namespace: "harbor"
labels:
heritage: Helm
release: harbor
chart: harbor
app: "harbor"
app.kubernetes.io/instance: harbor
app.kubernetes.io/name: harbor
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: harbor
app.kubernetes.io/version: "2.14.0"
data:
CORE_URL: "http://harbor-core:80"
TOKEN_SERVICE_URL: "http://harbor-core:80/service/token"
REGISTRY_URL: "http://harbor-registry:5000"
REGISTRY_CONTROLLER_URL: "http://harbor-registry:8080"
REGISTRY_CREDENTIAL_USERNAME: "harbor_registry_user"
JOBSERVICE_WEBHOOK_JOB_MAX_RETRY: "3"
JOBSERVICE_WEBHOOK_JOB_HTTP_CLIENT_TIMEOUT: "3"
LOG_LEVEL: "info"
HTTP_PROXY: ""
HTTPS_PROXY: ""
NO_PROXY: "harbor-core,harbor-jobservice,harbor-database,harbor-registry,harbor-portal,harbor-trivy,harbor-exporter,127.0.0.1,localhost,.local,.internal"
METRIC_NAMESPACE: harbor
METRIC_SUBSYSTEM: jobservice
_REDIS_URL_CORE: "redis://redis-replication-harbor-master.harbor:6379/0?idle_timeout_seconds=30"
CACHE_ENABLED: "true"
CACHE_EXPIRE_HOURS: "24"
---
# Source: harbor/charts/harbor/templates/jobservice/jobservice-cm.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: "harbor-jobservice"
namespace: "harbor"
labels:
heritage: Helm
release: harbor
chart: harbor
app: "harbor"
app.kubernetes.io/instance: harbor
app.kubernetes.io/name: harbor
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: harbor
app.kubernetes.io/version: "2.14.0"
data:
config.yml: |+
#Server listening port
protocol: "http"
port: 8080
worker_pool:
workers: 10
backend: "redis"
redis_pool:
redis_url: "redis://redis-replication-harbor-master.harbor:6379/1"
namespace: "harbor_job_service_namespace"
idle_timeout_second: 3600
job_loggers:
- name: "STD_OUTPUT"
level: INFO
metric:
enabled: true
path: /metrics
port: 8001
#Loggers for the job service
loggers:
- name: "STD_OUTPUT"
level: INFO
reaper:
# the max time to wait for a task to finish, if unfinished after max_update_hours, the task will be mark as error, but the task will continue to run, default value is 24
max_update_hours: 24
# the max time for execution in running state without new task created
max_dangling_hours: 168
---
# Source: harbor/charts/harbor/templates/nginx/configmap-https.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: harbor-nginx
namespace: "harbor"
labels:
heritage: Helm
release: harbor
chart: harbor
app: "harbor"
app.kubernetes.io/instance: harbor
app.kubernetes.io/name: harbor
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: harbor
app.kubernetes.io/version: "2.14.0"
data:
nginx.conf: |+
worker_processes auto;
pid /tmp/nginx.pid;
events {
worker_connections 3096;
use epoll;
multi_accept on;
}
http {
client_body_temp_path /tmp/client_body_temp;
proxy_temp_path /tmp/proxy_temp;
fastcgi_temp_path /tmp/fastcgi_temp;
uwsgi_temp_path /tmp/uwsgi_temp;
scgi_temp_path /tmp/scgi_temp;
tcp_nodelay on;
# this is necessary for us to be able to disable request buffering in all cases
proxy_http_version 1.1;
upstream core {
server "harbor-core:80";
}
upstream portal {
server "harbor-portal:80";
}
log_format timed_combined '[$time_local]:$remote_addr - '
'"$request" $status $body_bytes_sent '
'"$http_referer" "$http_user_agent" '
'$request_time $upstream_response_time $pipe';
access_log /dev/stdout timed_combined;
map $http_x_forwarded_proto $x_forwarded_proto {
default $http_x_forwarded_proto;
"" $scheme;
}
server {
listen 8443 ssl;
# server_name harbordomain.com;
server_tokens off;
# SSL
ssl_certificate /etc/nginx/cert/tls.crt;
ssl_certificate_key /etc/nginx/cert/tls.key;
# Recommendations from https://raymii.org/s/tutorials/Strong_SSL_Security_On_nginx.html
ssl_protocols TLSv1.2 TLSv1.3;
ssl_ciphers '!aNULL:kECDH+AESGCM:ECDH+AESGCM:RSA+AESGCM:kECDH+AES:ECDH+AES:RSA+AES:';
ssl_prefer_server_ciphers on;
ssl_session_cache shared:SSL:10m;
# disable any limits to avoid HTTP 413 for large image uploads
client_max_body_size 0;
# required to avoid HTTP 411: see Issue #1486 (https://github.com/docker/docker/issues/1486)
chunked_transfer_encoding on;
# Add extra headers
add_header Strict-Transport-Security "max-age=31536000; includeSubdomains; preload";
add_header X-Frame-Options DENY;
add_header Content-Security-Policy "frame-ancestors 'none'";
location / {
proxy_pass http://portal/;
proxy_set_header Host $http_host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $x_forwarded_proto;
proxy_cookie_path / "/; HttpOnly; Secure";
proxy_buffering off;
proxy_request_buffering off;
}
location /api/ {
proxy_pass http://core/api/;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $x_forwarded_proto;
proxy_cookie_path / "/; Secure";
proxy_buffering off;
proxy_request_buffering off;
}
location /c/ {
proxy_pass http://core/c/;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $x_forwarded_proto;
proxy_cookie_path / "/; Secure";
proxy_buffering off;
proxy_request_buffering off;
}
location /v1/ {
return 404;
}
location /v2/ {
proxy_pass http://core/v2/;
proxy_set_header Host $http_host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $x_forwarded_proto;
proxy_buffering off;
proxy_request_buffering off;
proxy_send_timeout 900;
proxy_read_timeout 900;
}
location /service/ {
proxy_pass http://core/service/;
proxy_set_header Host $http_host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $x_forwarded_proto;
proxy_cookie_path / "/; Secure";
proxy_buffering off;
proxy_request_buffering off;
}
location /service/notifications {
return 404;
}
}
server {
listen 8080;
#server_name harbordomain.com;
return 301 https://$host$request_uri;
}
}
---
# Source: harbor/charts/harbor/templates/portal/configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: "harbor-portal"
namespace: "harbor"
labels:
heritage: Helm
release: harbor
chart: harbor
app: "harbor"
app.kubernetes.io/instance: harbor
app.kubernetes.io/name: harbor
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: harbor
app.kubernetes.io/version: "2.14.0"
data:
nginx.conf: |+
worker_processes auto;
pid /tmp/nginx.pid;
events {
worker_connections 1024;
}
http {
client_body_temp_path /tmp/client_body_temp;
proxy_temp_path /tmp/proxy_temp;
fastcgi_temp_path /tmp/fastcgi_temp;
uwsgi_temp_path /tmp/uwsgi_temp;
scgi_temp_path /tmp/scgi_temp;
server {
listen 8080;
server_name localhost;
root /usr/share/nginx/html;
index index.html index.htm;
include /etc/nginx/mime.types;
gzip on;
gzip_min_length 1000;
gzip_proxied expired no-cache no-store private auth;
gzip_types text/plain text/css application/json application/javascript application/x-javascript text/xml application/xml application/xml+rss text/javascript;
location /devcenter-api-2.0 {
try_files $uri $uri/ /swagger-ui-index.html;
}
location / {
try_files $uri $uri/ /index.html;
}
location = /index.html {
add_header Cache-Control "no-store, no-cache, must-revalidate";
}
}
}
---
# Source: harbor/charts/harbor/templates/registry/registry-cm.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: "harbor-registry"
namespace: "harbor"
labels:
heritage: Helm
release: harbor
chart: harbor
app: "harbor"
app.kubernetes.io/instance: harbor
app.kubernetes.io/name: harbor
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: harbor
app.kubernetes.io/version: "2.14.0"
data:
config.yml: |+
version: 0.1
log:
level: info
fields:
service: registry
storage:
filesystem:
rootdirectory: /storage
cache:
layerinfo: redis
maintenance:
uploadpurging:
enabled: true
age: 72h
interval: 24h
dryrun: false
delete:
enabled: true
redirect:
disable: false
redis:
addr: redis-replication-harbor-master.harbor:6379
db: 2
readtimeout: 10s
writetimeout: 10s
dialtimeout: 10s
enableTLS: false
pool:
maxidle: 100
maxactive: 500
idletimeout: 60s
http:
addr: :5000
relativeurls: true
# set via environment variable
# secret: placeholder
debug:
addr: :8001
prometheus:
enabled: true
path: /metrics
auth:
htpasswd:
realm: harbor-registry-basic-realm
path: /etc/registry/passwd
validation:
disabled: true
compatibility:
schema1:
enabled: true
ctl-config.yml: |+
---
protocol: "http"
port: 8080
log_level: info
registry_config: "/etc/registry/config.yml"
---
# Source: harbor/charts/harbor/templates/registry/registryctl-cm.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: "harbor-registryctl"
namespace: "harbor"
labels:
heritage: Helm
release: harbor
chart: harbor
app: "harbor"
app.kubernetes.io/instance: harbor
app.kubernetes.io/name: harbor
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: harbor
app.kubernetes.io/version: "2.14.0"
data:
---
# Source: harbor/charts/harbor/templates/registry/registry-pvc.yaml
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: harbor-registry
namespace: "harbor"
annotations:
helm.sh/resource-policy: keep
labels:
heritage: Helm
release: harbor
chart: harbor
app: "harbor"
app.kubernetes.io/instance: harbor
app.kubernetes.io/name: harbor
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: harbor
app.kubernetes.io/version: "2.14.0"
component: registry
app.kubernetes.io/component: registry
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 100Gi
storageClassName: ceph-block
---
# Source: harbor/charts/harbor/templates/core/core-svc.yaml
apiVersion: v1
kind: Service
metadata:
name: harbor-core
namespace: "harbor"
labels:
heritage: Helm
release: harbor
chart: harbor
app: "harbor"
app.kubernetes.io/instance: harbor
app.kubernetes.io/name: harbor
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: harbor
app.kubernetes.io/version: "2.14.0"
spec:
ports:
- name: http-web
port: 80
targetPort: 8080
- name: http-metrics
port: 8001
selector:
release: harbor
app: "harbor"
component: core
---
# Source: harbor/charts/harbor/templates/exporter/exporter-svc.yaml
apiVersion: v1
kind: Service
metadata:
name: "harbor-exporter"
namespace: "harbor"
labels:
heritage: Helm
release: harbor
chart: harbor
app: "harbor"
app.kubernetes.io/instance: harbor
app.kubernetes.io/name: harbor
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: harbor
app.kubernetes.io/version: "2.14.0"
spec:
ports:
- name: http-metrics
port: 8001
selector:
release: harbor
app: "harbor"
component: exporter
---
# Source: harbor/charts/harbor/templates/jobservice/jobservice-svc.yaml
apiVersion: v1
kind: Service
metadata:
name: "harbor-jobservice"
namespace: "harbor"
labels:
heritage: Helm
release: harbor
chart: harbor
app: "harbor"
app.kubernetes.io/instance: harbor
app.kubernetes.io/name: harbor
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: harbor
app.kubernetes.io/version: "2.14.0"
spec:
ports:
- name: http-jobservice
port: 80
targetPort: 8080
- name: http-metrics
port: 8001
selector:
release: harbor
app: "harbor"
component: jobservice
---
# Source: harbor/charts/harbor/templates/nginx/service.yaml
apiVersion: v1
kind: Service
metadata:
name: harbor
namespace: "harbor"
labels:
heritage: Helm
release: harbor
chart: harbor
app: "harbor"
app.kubernetes.io/instance: harbor
app.kubernetes.io/name: harbor
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: harbor
app.kubernetes.io/version: "2.14.0"
spec:
type: ClusterIP
ports:
- name: http
port: 80
targetPort: 8080
- name: https
port: 443
targetPort: 8443
selector:
release: harbor
app: "harbor"
component: nginx
---
# Source: harbor/charts/harbor/templates/portal/service.yaml
apiVersion: v1
kind: Service
metadata:
name: "harbor-portal"
namespace: "harbor"
labels:
heritage: Helm
release: harbor
chart: harbor
app: "harbor"
app.kubernetes.io/instance: harbor
app.kubernetes.io/name: harbor
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: harbor
app.kubernetes.io/version: "2.14.0"
spec:
ports:
- port: 80
targetPort: 8080
selector:
release: harbor
app: "harbor"
component: portal
---
# Source: harbor/charts/harbor/templates/registry/registry-svc.yaml
apiVersion: v1
kind: Service
metadata:
name: "harbor-registry"
namespace: "harbor"
labels:
heritage: Helm
release: harbor
chart: harbor
app: "harbor"
app.kubernetes.io/instance: harbor
app.kubernetes.io/name: harbor
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: harbor
app.kubernetes.io/version: "2.14.0"
spec:
ports:
- name: http-registry
port: 5000
- name: http-controller
port: 8080
- name: http-metrics
port: 8001
selector:
release: harbor
app: "harbor"
component: registry
---
# Source: harbor/charts/harbor/templates/trivy/trivy-svc.yaml
apiVersion: v1
kind: Service
metadata:
name: "harbor-trivy"
namespace: "harbor"
labels:
heritage: Helm
release: harbor
chart: harbor
app: "harbor"
app.kubernetes.io/instance: harbor
app.kubernetes.io/name: harbor
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: harbor
app.kubernetes.io/version: "2.14.0"
spec:
ports:
- name: http-trivy
protocol: TCP
port: 8080
selector:
release: harbor
app: "harbor"
component: trivy
---
# Source: harbor/charts/harbor/templates/core/core-dpl.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: harbor-core
namespace: "harbor"
labels:
heritage: Helm
release: harbor
chart: harbor
app: "harbor"
app.kubernetes.io/instance: harbor
app.kubernetes.io/name: harbor
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: harbor
app.kubernetes.io/version: "2.14.0"
component: core
app.kubernetes.io/component: core
spec:
replicas: 2
revisionHistoryLimit: 10
selector:
matchLabels:
release: harbor
app: "harbor"
component: core
template:
metadata:
labels:
heritage: Helm
release: harbor
chart: harbor
app: "harbor"
app.kubernetes.io/instance: harbor
app.kubernetes.io/name: harbor
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: harbor
app.kubernetes.io/version: "2.14.0"
component: core
app.kubernetes.io/component: core
annotations:
checksum/configmap: 2ab1f59e0aae82905a44ed3392436bca450fadb6f9db40cba116688647e9673e
checksum/secret: 5e00a5c189bdc7836b8fa02b61f7369eef2d6813f8d4b425beac262244f541a0
checksum/secret-jobservice: 38121c345921c661c7e6f9de4bb5f6446c41c255babf1b8416a36627193ed5a7
spec:
securityContext:
runAsUser: 10000
fsGroup: 10000
automountServiceAccountToken: false
terminationGracePeriodSeconds: 120
containers:
- name: core
image: goharbor/harbor-core:v2.14.1
imagePullPolicy: IfNotPresent
startupProbe:
httpGet:
path: /api/v2.0/ping
scheme: HTTP
port: 8080
failureThreshold: 360
initialDelaySeconds: 10
periodSeconds: 10
livenessProbe:
httpGet:
path: /api/v2.0/ping
scheme: HTTP
port: 8080
failureThreshold: 2
periodSeconds: 10
readinessProbe:
httpGet:
path: /api/v2.0/ping
scheme: HTTP
port: 8080
failureThreshold: 2
periodSeconds: 10
envFrom:
- configMapRef:
name: "harbor-core"
- secretRef:
name: "harbor-core"
env:
- name: CORE_SECRET
valueFrom:
secretKeyRef:
name: harbor-secret
key: secret
- name: JOBSERVICE_SECRET
valueFrom:
secretKeyRef:
name: harbor-secret
key: JOBSERVICE_SECRET
- name: HARBOR_ADMIN_PASSWORD
valueFrom:
secretKeyRef:
name: harbor-secret
key: HARBOR_ADMIN_PASSWORD
- name: POSTGRESQL_PASSWORD
valueFrom:
secretKeyRef:
name: harbor-postgresql-17-cluster-app
key: password
- name: REGISTRY_CREDENTIAL_PASSWORD
valueFrom:
secretKeyRef:
name: harbor-secret
key: REGISTRY_PASSWD
- name: CSRF_KEY
valueFrom:
secretKeyRef:
name: harbor-secret
key: CSRF_KEY
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
privileged: false
runAsNonRoot: true
seccompProfile:
type: RuntimeDefault
ports:
- containerPort: 8080
volumeMounts:
- name: config
mountPath: /etc/core/app.conf
subPath: app.conf
- name: secret-key
mountPath: /etc/core/key
subPath: key
- name: token-service-private-key
mountPath: /etc/core/private_key.pem
subPath: tls.key
- name: ca-download
mountPath: /etc/core/ca
- name: psc
mountPath: /etc/core/token
volumes:
- name: config
configMap:
name: harbor-core
items:
- key: app.conf
path: app.conf
- name: secret-key
secret:
secretName: harbor-secret
items:
- key: secretKey
path: key
- name: token-service-private-key
secret:
secretName: harbor-secret
- name: ca-download
secret:
secretName: harbor-nginx
- name: psc
emptyDir: {}
---
# Source: harbor/charts/harbor/templates/exporter/exporter-dpl.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: harbor-exporter
namespace: "harbor"
labels:
heritage: Helm
release: harbor
chart: harbor
app: "harbor"
app.kubernetes.io/instance: harbor
app.kubernetes.io/name: harbor
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: harbor
app.kubernetes.io/version: "2.14.0"
component: exporter
app.kubernetes.io/component: exporter
spec:
replicas: 2
revisionHistoryLimit: 10
selector:
matchLabels:
release: harbor
app: "harbor"
component: exporter
template:
metadata:
labels:
heritage: Helm
release: harbor
chart: harbor
app: "harbor"
app.kubernetes.io/instance: harbor
app.kubernetes.io/name: harbor
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: harbor
app.kubernetes.io/version: "2.14.0"
component: exporter
app.kubernetes.io/component: exporter
annotations:
checksum/configmap: a3f8e56f85d2e991fa50b6a694cd385741220bafa53790826d86cc3301097bdd
checksum/secret: ee0f7cc018ee30f536120b83238e2679ba83ac6fb0a47f5319ec09ddd21e7a46
spec:
securityContext:
runAsUser: 10000
fsGroup: 10000
automountServiceAccountToken: false
containers:
- name: exporter
image: goharbor/harbor-exporter:v2.14.1
imagePullPolicy: IfNotPresent
livenessProbe:
httpGet:
path: /
port: 8001
initialDelaySeconds: 300
periodSeconds: 10
readinessProbe:
httpGet:
path: /
port: 8001
initialDelaySeconds: 30
periodSeconds: 10
args: ["-log-level", "info"]
envFrom:
- configMapRef:
name: "harbor-exporter-env"
- secretRef:
name: "harbor-exporter"
env:
- name: HARBOR_DATABASE_PASSWORD
valueFrom:
secretKeyRef:
name: harbor-postgresql-17-cluster-app
key: password
- name: HARBOR_ADMIN_PASSWORD
valueFrom:
secretKeyRef:
name: harbor-secret
key: HARBOR_ADMIN_PASSWORD
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
privileged: false
runAsNonRoot: true
seccompProfile:
type: RuntimeDefault
ports:
- containerPort: 8001
volumeMounts:
volumes:
- name: config
secret:
secretName: "harbor-exporter"
---
# Source: harbor/charts/harbor/templates/jobservice/jobservice-dpl.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: "harbor-jobservice"
namespace: "harbor"
labels:
heritage: Helm
release: harbor
chart: harbor
app: "harbor"
app.kubernetes.io/instance: harbor
app.kubernetes.io/name: harbor
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: harbor
app.kubernetes.io/version: "2.14.0"
component: jobservice
app.kubernetes.io/component: jobservice
spec:
replicas: 2
revisionHistoryLimit: 10
strategy:
type: Recreate
rollingUpdate: null
selector:
matchLabels:
release: harbor
app: "harbor"
component: jobservice
template:
metadata:
labels:
heritage: Helm
release: harbor
chart: harbor
app: "harbor"
app.kubernetes.io/instance: harbor
app.kubernetes.io/name: harbor
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: harbor
app.kubernetes.io/version: "2.14.0"
component: jobservice
app.kubernetes.io/component: jobservice
annotations:
checksum/configmap: 02840b388e1331297a14b21ecc01301f71b3dd10b0c3e8ba7f33e106740caa1e
checksum/configmap-env: 70eaac0af19c1296afdd9b67e01e3732a4bde89f91e5fbb376e531a67bdf1986
checksum/secret: 38121c345921c661c7e6f9de4bb5f6446c41c255babf1b8416a36627193ed5a7
checksum/secret-core: 5e00a5c189bdc7836b8fa02b61f7369eef2d6813f8d4b425beac262244f541a0
spec:
securityContext:
runAsUser: 10000
fsGroup: 10000
automountServiceAccountToken: false
terminationGracePeriodSeconds: 120
containers:
- name: jobservice
image: goharbor/harbor-jobservice:v2.14.1
imagePullPolicy: IfNotPresent
livenessProbe:
httpGet:
path: /api/v1/stats
scheme: HTTP
port: 8080
initialDelaySeconds: 300
periodSeconds: 10
readinessProbe:
httpGet:
path: /api/v1/stats
scheme: HTTP
port: 8080
initialDelaySeconds: 20
periodSeconds: 10
env:
- name: CORE_SECRET
valueFrom:
secretKeyRef:
name: harbor-secret
key: secret
- name: JOBSERVICE_SECRET
valueFrom:
secretKeyRef:
name: harbor-secret
key: JOBSERVICE_SECRET
- name: REGISTRY_CREDENTIAL_PASSWORD
valueFrom:
secretKeyRef:
name: harbor-secret
key: REGISTRY_PASSWD
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
privileged: false
runAsNonRoot: true
seccompProfile:
type: RuntimeDefault
envFrom:
- configMapRef:
name: "harbor-jobservice-env"
- secretRef:
name: "harbor-jobservice"
ports:
- containerPort: 8080
volumeMounts:
- name: jobservice-config
mountPath: /etc/jobservice/config.yml
subPath: config.yml
- name: job-logs
mountPath: /var/log/jobs
subPath:
volumes:
- name: jobservice-config
configMap:
name: "harbor-jobservice"
- name: job-logs
emptyDir: {}
---
# Source: harbor/charts/harbor/templates/nginx/deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: harbor-nginx
namespace: "harbor"
labels:
heritage: Helm
release: harbor
chart: harbor
app: "harbor"
app.kubernetes.io/instance: harbor
app.kubernetes.io/name: harbor
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: harbor
app.kubernetes.io/version: "2.14.0"
component: nginx
app.kubernetes.io/component: nginx
spec:
replicas: 0
revisionHistoryLimit: 10
selector:
matchLabels:
release: harbor
app: "harbor"
component: nginx
template:
metadata:
labels:
heritage: Helm
release: harbor
chart: harbor
app: "harbor"
app.kubernetes.io/instance: harbor
app.kubernetes.io/name: harbor
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: harbor
app.kubernetes.io/version: "2.14.0"
component: nginx
app.kubernetes.io/component: nginx
annotations:
checksum/configmap: 55921b41f4478ded4d60da7edb83b828382ba722214816271ce3ffd2a77aed35
checksum/secret: 1917ed1f7dd89e9c1996cbb3dbe20a46cfc18579b5f7f3f547e430e875e3f7ef
spec:
securityContext:
runAsUser: 10000
fsGroup: 10000
automountServiceAccountToken: false
containers:
- name: nginx
image: "goharbor/nginx-photon:v2.14.1"
imagePullPolicy: "IfNotPresent"
livenessProbe:
httpGet:
scheme: HTTPS
path: /
port: 8443
initialDelaySeconds: 300
periodSeconds: 10
readinessProbe:
httpGet:
scheme: HTTPS
path: /
port: 8443
initialDelaySeconds: 1
periodSeconds: 10
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
privileged: false
runAsNonRoot: true
seccompProfile:
type: RuntimeDefault
ports:
- containerPort: 8080
- containerPort: 8443
volumeMounts:
- name: config
mountPath: /etc/nginx/nginx.conf
subPath: nginx.conf
- name: certificate
mountPath: /etc/nginx/cert
volumes:
- name: config
configMap:
name: harbor-nginx
- name: certificate
secret:
secretName: harbor-nginx
---
# Source: harbor/charts/harbor/templates/portal/deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: "harbor-portal"
namespace: "harbor"
labels:
heritage: Helm
release: harbor
chart: harbor
app: "harbor"
app.kubernetes.io/instance: harbor
app.kubernetes.io/name: harbor
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: harbor
app.kubernetes.io/version: "2.14.0"
component: portal
app.kubernetes.io/component: portal
spec:
replicas: 2
revisionHistoryLimit: 10
selector:
matchLabels:
release: harbor
app: "harbor"
component: portal
template:
metadata:
labels:
heritage: Helm
release: harbor
chart: harbor
app: "harbor"
app.kubernetes.io/instance: harbor
app.kubernetes.io/name: harbor
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: harbor
app.kubernetes.io/version: "2.14.0"
component: portal
app.kubernetes.io/component: portal
annotations:
checksum/configmap: 88f60f7e1b77f88d1d57ceccace7127f138ce8e15397579095fdf08c6decbe0d
spec:
securityContext:
runAsUser: 10000
fsGroup: 10000
automountServiceAccountToken: false
containers:
- name: portal
image: goharbor/harbor-portal:v2.14.1
imagePullPolicy: IfNotPresent
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
privileged: false
runAsNonRoot: true
seccompProfile:
type: RuntimeDefault
livenessProbe:
httpGet:
path: /
scheme: HTTP
port: 8080
initialDelaySeconds: 300
periodSeconds: 10
readinessProbe:
httpGet:
path: /
scheme: HTTP
port: 8080
initialDelaySeconds: 1
periodSeconds: 10
ports:
- containerPort: 8080
volumeMounts:
- name: portal-config
mountPath: /etc/nginx/nginx.conf
subPath: nginx.conf
volumes:
- name: portal-config
configMap:
name: "harbor-portal"
---
# Source: harbor/charts/harbor/templates/registry/registry-dpl.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: "harbor-registry"
namespace: "harbor"
labels:
heritage: Helm
release: harbor
chart: harbor
app: "harbor"
app.kubernetes.io/instance: harbor
app.kubernetes.io/name: harbor
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: harbor
app.kubernetes.io/version: "2.14.0"
component: registry
app.kubernetes.io/component: registry
spec:
replicas: 1
revisionHistoryLimit: 10
strategy:
type: Recreate
rollingUpdate: null
selector:
matchLabels:
release: harbor
app: "harbor"
component: registry
template:
metadata:
labels:
heritage: Helm
release: harbor
chart: harbor
app: "harbor"
app.kubernetes.io/instance: harbor
app.kubernetes.io/name: harbor
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: harbor
app.kubernetes.io/version: "2.14.0"
component: registry
app.kubernetes.io/component: registry
annotations:
checksum/configmap: 87c58b25a3419837f72faa3a2690b9f1523e038d974222105459836926be3d03
checksum/secret: 54660cd152eb2befec78a32a5c59307dc1c09a9f1bd3b78150fe5035c5c33a90
checksum/secret-jobservice: 38121c345921c661c7e6f9de4bb5f6446c41c255babf1b8416a36627193ed5a7
checksum/secret-core: 5e00a5c189bdc7836b8fa02b61f7369eef2d6813f8d4b425beac262244f541a0
spec:
securityContext:
runAsUser: 10000
fsGroup: 10000
fsGroupChangePolicy: OnRootMismatch
automountServiceAccountToken: false
terminationGracePeriodSeconds: 120
containers:
- name: registry
image: goharbor/registry-photon:v2.14.1
imagePullPolicy: IfNotPresent
livenessProbe:
httpGet:
path: /
scheme: HTTP
port: 5000
initialDelaySeconds: 300
periodSeconds: 10
readinessProbe:
httpGet:
path: /
scheme: HTTP
port: 5000
initialDelaySeconds: 1
periodSeconds: 10
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
privileged: false
runAsNonRoot: true
seccompProfile:
type: RuntimeDefault
envFrom:
- secretRef:
name: "harbor-registry"
env:
- name: REGISTRY_HTTP_SECRET
valueFrom:
secretKeyRef:
name: harbor-secret
key: REGISTRY_HTTP_SECRET
ports:
- containerPort: 5000
- containerPort: 8001
volumeMounts:
- name: registry-data
mountPath: /storage
subPath:
- name: registry-htpasswd
mountPath: /etc/registry/passwd
subPath: passwd
- name: registry-config
mountPath: /etc/registry/config.yml
subPath: config.yml
- name: registryctl
image: goharbor/harbor-registryctl:v2.14.1
imagePullPolicy: IfNotPresent
livenessProbe:
httpGet:
path: /api/health
scheme: HTTP
port: 8080
initialDelaySeconds: 300
periodSeconds: 10
readinessProbe:
httpGet:
path: /api/health
scheme: HTTP
port: 8080
initialDelaySeconds: 1
periodSeconds: 10
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
privileged: false
runAsNonRoot: true
seccompProfile:
type: RuntimeDefault
envFrom:
- configMapRef:
name: "harbor-registryctl"
- secretRef:
name: "harbor-registry"
- secretRef:
name: "harbor-registryctl"
env:
- name: REGISTRY_HTTP_SECRET
valueFrom:
secretKeyRef:
name: harbor-secret
key: REGISTRY_HTTP_SECRET
- name: CORE_SECRET
valueFrom:
secretKeyRef:
name: harbor-secret
key: secret
- name: JOBSERVICE_SECRET
valueFrom:
secretKeyRef:
name: harbor-secret
key: JOBSERVICE_SECRET
ports:
- containerPort: 8080
volumeMounts:
- name: registry-data
mountPath: /storage
subPath:
- name: registry-config
mountPath: /etc/registry/config.yml
subPath: config.yml
- name: registry-config
mountPath: /etc/registryctl/config.yml
subPath: ctl-config.yml
volumes:
- name: registry-htpasswd
secret:
secretName: harbor-secret
items:
- key: REGISTRY_HTPASSWD
path: passwd
- name: registry-config
configMap:
name: "harbor-registry"
- name: registry-data
persistentVolumeClaim:
claimName: harbor-registry
---
# Source: harbor/charts/harbor/templates/trivy/trivy-sts.yaml
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: harbor-trivy
namespace: "harbor"
labels:
heritage: Helm
release: harbor
chart: harbor
app: "harbor"
app.kubernetes.io/instance: harbor
app.kubernetes.io/name: harbor
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: harbor
app.kubernetes.io/version: "2.14.0"
component: trivy
app.kubernetes.io/component: trivy
spec:
replicas: 1
serviceName: harbor-trivy
selector:
matchLabels:
release: harbor
app: "harbor"
component: trivy
template:
metadata:
labels:
heritage: Helm
release: harbor
chart: harbor
app: "harbor"
app.kubernetes.io/instance: harbor
app.kubernetes.io/name: harbor
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: harbor
app.kubernetes.io/version: "2.14.0"
component: trivy
app.kubernetes.io/component: trivy
annotations:
checksum/secret: 3e2dedee1ec33c5ef3e227c0b8122b7d124687f85691d5fdac5791a081fb3d2c
spec:
securityContext:
runAsUser: 10000
fsGroup: 10000
automountServiceAccountToken: false
containers:
- name: trivy
image: goharbor/trivy-adapter-photon:v2.14.0
imagePullPolicy: IfNotPresent
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
privileged: false
runAsNonRoot: true
seccompProfile:
type: RuntimeDefault
env:
- name: HTTP_PROXY
value: ""
- name: HTTPS_PROXY
value: ""
- name: NO_PROXY
value: "harbor-core,harbor-jobservice,harbor-database,harbor-registry,harbor-portal,harbor-trivy,harbor-exporter,127.0.0.1,localhost,.local,.internal"
- name: "SCANNER_LOG_LEVEL"
value: "info"
- name: "SCANNER_TRIVY_CACHE_DIR"
value: "/home/scanner/.cache/trivy"
- name: "SCANNER_TRIVY_REPORTS_DIR"
value: "/home/scanner/.cache/reports"
- name: "SCANNER_TRIVY_DEBUG_MODE"
value: "false"
- name: "SCANNER_TRIVY_VULN_TYPE"
value: "os,library"
- name: "SCANNER_TRIVY_TIMEOUT"
value: "5m0s"
- name: "SCANNER_TRIVY_GITHUB_TOKEN"
valueFrom:
secretKeyRef:
name: harbor-trivy
key: gitHubToken
- name: "SCANNER_TRIVY_SEVERITY"
value: "UNKNOWN,LOW,MEDIUM,HIGH,CRITICAL"
- name: "SCANNER_TRIVY_IGNORE_UNFIXED"
value: "false"
- name: "SCANNER_TRIVY_SKIP_UPDATE"
value: "false"
- name: "SCANNER_TRIVY_SKIP_JAVA_DB_UPDATE"
value: "false"
- name: "SCANNER_TRIVY_DB_REPOSITORY"
value: "mirror.gcr.io/aquasec/trivy-db,ghcr.io/aquasecurity/trivy-db"
- name: "SCANNER_TRIVY_JAVA_DB_REPOSITORY"
value: "mirror.gcr.io/aquasec/trivy-java-db,ghcr.io/aquasecurity/trivy-java-db"
- name: "SCANNER_TRIVY_OFFLINE_SCAN"
value: "false"
- name: "SCANNER_TRIVY_SECURITY_CHECKS"
value: "vuln"
- name: "SCANNER_TRIVY_INSECURE"
value: "false"
- name: SCANNER_API_SERVER_ADDR
value: ":8080"
- name: "SCANNER_REDIS_URL"
valueFrom:
secretKeyRef:
name: harbor-trivy
key: redisURL
- name: "SCANNER_STORE_REDIS_URL"
valueFrom:
secretKeyRef:
name: harbor-trivy
key: redisURL
- name: "SCANNER_JOB_QUEUE_REDIS_URL"
valueFrom:
secretKeyRef:
name: harbor-trivy
key: redisURL
ports:
- name: api-server
containerPort: 8080
volumeMounts:
- name: data
mountPath: /home/scanner/.cache
subPath:
readOnly: false
livenessProbe:
httpGet:
scheme: HTTP
path: /probe/healthy
port: api-server
initialDelaySeconds: 5
periodSeconds: 10
successThreshold: 1
failureThreshold: 10
readinessProbe:
httpGet:
scheme: HTTP
path: /probe/ready
port: api-server
initialDelaySeconds: 5
periodSeconds: 10
successThreshold: 1
failureThreshold: 3
resources:
limits:
cpu: 1
memory: 1Gi
requests:
cpu: 200m
memory: 512Mi
volumeClaimTemplates:
- apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: data
labels:
heritage: Helm
release: harbor
chart: harbor
app: "harbor"
annotations:
spec:
accessModes: ["ReadWriteOnce"]
resources:
requests:
storage: "5Gi"
---
# Source: harbor/charts/postgres-17-cluster/templates/cluster.yaml
apiVersion: postgresql.cnpg.io/v1
kind: Cluster
metadata:
name: harbor-postgresql-17-cluster
namespace: harbor
labels:
helm.sh/chart: postgres-17-cluster-6.16.1
app.kubernetes.io/name: harbor-postgresql-17
app.kubernetes.io/instance: harbor
app.kubernetes.io/part-of: harbor
app.kubernetes.io/version: "6.16.1"
app.kubernetes.io/managed-by: Helm
spec:
instances: 3
imageName: "ghcr.io/cloudnative-pg/postgresql:17.7-standard-trixie"
imagePullPolicy: IfNotPresent
postgresUID: 26
postgresGID: 26
plugins:
- name: barman-cloud.cloudnative-pg.io
enabled: true
isWALArchiver: false
parameters:
barmanObjectName: "harbor-postgresql-17-external-backup"
serverName: "harbor-postgresql-17-backup-2"
- name: barman-cloud.cloudnative-pg.io
enabled: true
isWALArchiver: true
parameters:
barmanObjectName: "harbor-postgresql-17-garage-local-backup"
serverName: "harbor-postgresql-17-backup-1"
externalClusters:
- name: recovery
plugin:
name: barman-cloud.cloudnative-pg.io
parameters:
barmanObjectName: "harbor-postgresql-17-recovery"
serverName: harbor-postgresql-17-backup-1
storage:
size: 10Gi
storageClass: local-path
walStorage:
size: 2Gi
storageClass: local-path
resources:
limits:
hugepages-2Mi: 256Mi
requests:
cpu: 100m
memory: 256Mi
affinity:
enablePodAntiAffinity: true
topologyKey: kubernetes.io/hostname
primaryUpdateMethod: switchover
primaryUpdateStrategy: unsupervised
logLevel: info
enableSuperuserAccess: false
enablePDB: true
postgresql:
parameters:
hot_standby_feedback: "on"
max_slot_wal_keep_size: 2000MB
shared_buffers: 128MB
monitoring:
enablePodMonitor: true
disableDefaultQueries: false
bootstrap:
recovery:
database: app
source: harbor-postgresql-17-backup-1
externalClusters:
- name: harbor-postgresql-17-backup-1
plugin:
name: barman-cloud.cloudnative-pg.io
enabled: true
isWALArchiver: false
parameters:
barmanObjectName: "harbor-postgresql-17-recovery"
serverName: harbor-postgresql-17-backup-1
---
# Source: harbor/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: harbor-secret
namespace: harbor
labels:
app.kubernetes.io/name: harbor-secret
app.kubernetes.io/instance: harbor
app.kubernetes.io/part-of: harbor
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: HARBOR_ADMIN_PASSWORD
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/harbor/config
metadataPolicy: None
property: admin-password
- secretKey: secretKey
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/harbor/config
metadataPolicy: None
property: secretKey
- secretKey: CSRF_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/harbor/core
metadataPolicy: None
property: CSRF_KEY
- secretKey: secret
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/harbor/core
metadataPolicy: None
property: secret
- secretKey: tls.crt
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/harbor/core
metadataPolicy: None
property: tls.crt
- secretKey: tls.key
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/harbor/core
metadataPolicy: None
property: tls.key
- secretKey: JOBSERVICE_SECRET
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/harbor/jobservice
metadataPolicy: None
property: JOBSERVICE_SECRET
- secretKey: REGISTRY_HTTP_SECRET
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/harbor/registry
metadataPolicy: None
property: REGISTRY_HTTP_SECRET
- secretKey: REGISTRY_REDIS_PASSWORD
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/harbor/registry
metadataPolicy: None
property: REGISTRY_REDIS_PASSWORD
- secretKey: REGISTRY_HTPASSWD
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/harbor/registry
metadataPolicy: None
property: REGISTRY_HTPASSWD
- secretKey: REGISTRY_CREDENTIAL_PASSWORD
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/harbor/registry
metadataPolicy: None
property: REGISTRY_CREDENTIAL_PASSWORD
- secretKey: REGISTRY_PASSWD
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/harbor/registry
metadataPolicy: None
property: REGISTRY_CREDENTIAL_PASSWORD
---
# Source: harbor/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: harbor-nginx-secret
namespace: harbor
labels:
app.kubernetes.io/name: harbor-nginx-secret
app.kubernetes.io/instance: harbor
app.kubernetes.io/part-of: harbor
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: ca.crt
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/harbor/nginx
metadataPolicy: None
property: ca.crt
- secretKey: tls.crt
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/harbor/nginx
metadataPolicy: None
property: tls.crt
- secretKey: tls.key
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /cl01tl/harbor/nginx
metadataPolicy: None
property: tls.key
---
# Source: harbor/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: harbor-postgresql-17-cluster-backup-secret
namespace: harbor
labels:
app.kubernetes.io/name: harbor-postgresql-17-cluster-backup-secret
app.kubernetes.io/instance: harbor
app.kubernetes.io/part-of: harbor
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/postgres-backups
metadataPolicy: None
property: access
- secretKey: ACCESS_SECRET_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /digital-ocean/home-infra/postgres-backups
metadataPolicy: None
property: secret
---
# Source: harbor/templates/external-secret.yaml
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: harbor-postgresql-17-cluster-backup-secret-garage
namespace: harbor
labels:
app.kubernetes.io/name: harbor-postgresql-17-cluster-backup-secret-garage
app.kubernetes.io/instance: harbor
app.kubernetes.io/part-of: harbor
spec:
secretStoreRef:
kind: ClusterSecretStore
name: vault
data:
- secretKey: ACCESS_KEY_ID
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/postgres-backups
metadataPolicy: None
property: ACCESS_KEY_ID
- secretKey: ACCESS_SECRET_KEY
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/postgres-backups
metadataPolicy: None
property: ACCESS_SECRET_KEY
- secretKey: ACCESS_REGION
remoteRef:
conversionStrategy: Default
decodingStrategy: None
key: /garage/home-infra/postgres-backups
metadataPolicy: None
property: ACCESS_REGION
---
# Source: harbor/templates/http-route.yaml
apiVersion: gateway.networking.k8s.io/v1
kind: HTTPRoute
metadata:
name: http-route-harbor
namespace: harbor
labels:
app.kubernetes.io/name: http-route-harbor
app.kubernetes.io/instance: harbor
app.kubernetes.io/part-of: harbor
spec:
parentRefs:
- group: gateway.networking.k8s.io
kind: Gateway
name: traefik-gateway
namespace: traefik
hostnames:
- harbor.alexlebens.net
rules:
- matches:
- path:
type: PathPrefix
value: /api/
- path:
type: PathPrefix
value: /service/
- path:
type: PathPrefix
value: /v2/
- path:
type: PathPrefix
value: /c/
backendRefs:
- group: ''
kind: Service
name: harbor-core
port: 80
weight: 100
- matches:
- path:
type: PathPrefix
value: /
backendRefs:
- group: ''
kind: Service
name: harbor-portal
port: 80
weight: 100
---
# Source: harbor/charts/postgres-17-cluster/templates/object-store.yaml
apiVersion: barmancloud.cnpg.io/v1
kind: ObjectStore
metadata:
name: "harbor-postgresql-17-external-backup"
namespace: harbor
labels:
helm.sh/chart: postgres-17-cluster-6.16.1
app.kubernetes.io/name: harbor-postgresql-17
app.kubernetes.io/instance: harbor
app.kubernetes.io/part-of: harbor
app.kubernetes.io/version: "6.16.1"
app.kubernetes.io/managed-by: Helm
spec:
retentionPolicy: 30d
configuration:
destinationPath: s3://postgres-backups-ce540ddf106d186bbddca68a/cl01tl/harbor/harbor-postgresql-17-cluster
endpointURL: https://nyc3.digitaloceanspaces.com
s3Credentials:
accessKeyId:
name: harbor-postgresql-17-cluster-backup-secret
key: ACCESS_KEY_ID
secretAccessKey:
name: harbor-postgresql-17-cluster-backup-secret
key: ACCESS_SECRET_KEY
---
# Source: harbor/charts/postgres-17-cluster/templates/object-store.yaml
apiVersion: barmancloud.cnpg.io/v1
kind: ObjectStore
metadata:
name: "harbor-postgresql-17-garage-local-backup"
namespace: harbor
labels:
helm.sh/chart: postgres-17-cluster-6.16.1
app.kubernetes.io/name: harbor-postgresql-17
app.kubernetes.io/instance: harbor
app.kubernetes.io/part-of: harbor
app.kubernetes.io/version: "6.16.1"
app.kubernetes.io/managed-by: Helm
spec:
retentionPolicy: 3d
configuration:
destinationPath: s3://postgres-backups/cl01tl/harbor/harbor-postgresql-17-cluster
endpointURL: http://garage-main.garage:3900
s3Credentials:
accessKeyId:
name: harbor-postgresql-17-cluster-backup-secret-garage
key: ACCESS_KEY_ID
secretAccessKey:
name: harbor-postgresql-17-cluster-backup-secret-garage
key: ACCESS_SECRET_KEY
region:
name: harbor-postgresql-17-cluster-backup-secret-garage
key: ACCESS_REGION
---
# Source: harbor/charts/postgres-17-cluster/templates/object-store.yaml
apiVersion: barmancloud.cnpg.io/v1
kind: ObjectStore
metadata:
name: "harbor-postgresql-17-recovery"
namespace: harbor
labels:
helm.sh/chart: postgres-17-cluster-6.16.1
app.kubernetes.io/name: harbor-postgresql-17
app.kubernetes.io/instance: harbor
app.kubernetes.io/part-of: harbor
app.kubernetes.io/version: "6.16.1"
app.kubernetes.io/managed-by: Helm
spec:
configuration:
destinationPath: s3://postgres-backups/cl01tl/harbor/harbor-postgresql-17-cluster
endpointURL: http://garage-main.garage:3900
wal:
compression: snappy
maxParallel: 1
data:
compression: snappy
jobs: 1
s3Credentials:
accessKeyId:
name: harbor-postgresql-17-cluster-backup-secret-garage
key: ACCESS_KEY_ID
secretAccessKey:
name: harbor-postgresql-17-cluster-backup-secret-garage
key: ACCESS_SECRET_KEY
---
# Source: harbor/charts/postgres-17-cluster/templates/prometheus-rule.yaml
apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:
name: harbor-postgresql-17-alert-rules
namespace: harbor
labels:
helm.sh/chart: postgres-17-cluster-6.16.1
app.kubernetes.io/name: harbor-postgresql-17
app.kubernetes.io/instance: harbor
app.kubernetes.io/part-of: harbor
app.kubernetes.io/version: "6.16.1"
app.kubernetes.io/managed-by: Helm
spec:
groups:
- name: cloudnative-pg/harbor-postgresql-17
rules:
- alert: CNPGClusterBackendsWaitingWarning
annotations:
summary: CNPG Cluster a backend is waiting for longer than 5 minutes.
description: |-
Pod {{ $labels.pod }}
has been waiting for longer than 5 minutes
expr: |
cnpg_backends_waiting_total > 300
for: 1m
labels:
severity: warning
namespace: harbor
cnpg_cluster: harbor-postgresql-17-cluster
- alert: CNPGClusterDatabaseDeadlockConflictsWarning
annotations:
summary: CNPG Cluster has over 10 deadlock conflicts.
description: |-
There are over 10 deadlock conflicts in
{{ $labels.pod }}
expr: |
cnpg_pg_stat_database_deadlocks > 10
for: 1m
labels:
severity: warning
namespace: harbor
cnpg_cluster: harbor-postgresql-17-cluster
- alert: CNPGClusterHACritical
annotations:
summary: CNPG Cluster has no standby replicas!
description: |-
CloudNativePG Cluster "{{`{{`}} $labels.job {{`}}`}}" has no ready standby replicas. Your cluster at a severe
risk of data loss and downtime if the primary instance fails.
The primary instance is still online and able to serve queries, although connections to the `-ro` endpoint
will fail. The `-r` endpoint os operating at reduced capacity and all traffic is being served by the main.
This can happen during a normal fail-over or automated minor version upgrades in a cluster with 2 or less
instances. The replaced instance may need some time to catch-up with the cluster primary instance.
This alarm will be always trigger if your cluster is configured to run with only 1 instance. In this
case you may want to silence it.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHACritical.md
expr: |
max by (job) (cnpg_pg_replication_streaming_replicas{namespace="harbor"} - cnpg_pg_replication_is_wal_receiver_up{namespace="harbor"}) < 1
for: 5m
labels:
severity: critical
namespace: harbor
cnpg_cluster: harbor-postgresql-17-cluster
- alert: CNPGClusterHAWarning
annotations:
summary: CNPG Cluster less than 2 standby replicas.
description: |-
CloudNativePG Cluster "{{`{{`}} $labels.job {{`}}`}}" has only {{`{{`}} $value {{`}}`}} standby replicas, putting
your cluster at risk if another instance fails. The cluster is still able to operate normally, although
the `-ro` and `-r` endpoints operate at reduced capacity.
This can happen during a normal fail-over or automated minor version upgrades. The replaced instance may
need some time to catch-up with the cluster primary instance.
This alarm will be constantly triggered if your cluster is configured to run with less than 3 instances.
In this case you may want to silence it.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHAWarning.md
expr: |
max by (job) (cnpg_pg_replication_streaming_replicas{namespace="harbor"} - cnpg_pg_replication_is_wal_receiver_up{namespace="harbor"}) < 2
for: 5m
labels:
severity: warning
namespace: harbor
cnpg_cluster: harbor-postgresql-17-cluster
- alert: CNPGClusterHighConnectionsCritical
annotations:
summary: CNPG Instance maximum number of connections critical!
description: |-
CloudNativePG Cluster "harbor/harbor-postgresql-17-cluster" instance {{`{{`}} $labels.pod {{`}}`}} is using {{`{{`}} $value {{`}}`}}% of
the maximum number of connections.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighConnectionsCritical.md
expr: |
sum by (pod) (cnpg_backends_total{namespace="harbor", pod=~"harbor-postgresql-17-cluster-([1-9][0-9]*)$"}) / max by (pod) (cnpg_pg_settings_setting{name="max_connections", namespace="harbor", pod=~"harbor-postgresql-17-cluster-([1-9][0-9]*)$"}) * 100 > 95
for: 5m
labels:
severity: critical
namespace: harbor
cnpg_cluster: harbor-postgresql-17-cluster
- alert: CNPGClusterHighConnectionsWarning
annotations:
summary: CNPG Instance is approaching the maximum number of connections.
description: |-
CloudNativePG Cluster "harbor/harbor-postgresql-17-cluster" instance {{`{{`}} $labels.pod {{`}}`}} is using {{`{{`}} $value {{`}}`}}% of
the maximum number of connections.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighConnectionsWarning.md
expr: |
sum by (pod) (cnpg_backends_total{namespace="harbor", pod=~"harbor-postgresql-17-cluster-([1-9][0-9]*)$"}) / max by (pod) (cnpg_pg_settings_setting{name="max_connections", namespace="harbor", pod=~"harbor-postgresql-17-cluster-([1-9][0-9]*)$"}) * 100 > 80
for: 5m
labels:
severity: warning
namespace: harbor
cnpg_cluster: harbor-postgresql-17-cluster
- alert: CNPGClusterHighReplicationLag
annotations:
summary: CNPG Cluster high replication lag
description: |-
CloudNativePG Cluster "harbor/harbor-postgresql-17-cluster" is experiencing a high replication lag of
{{`{{`}} $value {{`}}`}}ms.
High replication lag indicates network issues, busy instances, slow queries or suboptimal configuration.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterHighReplicationLag.md
expr: |
max(cnpg_pg_replication_lag{namespace="harbor",pod=~"harbor-postgresql-17-cluster-([1-9][0-9]*)$"}) * 1000 > 1000
for: 5m
labels:
severity: warning
namespace: harbor
cnpg_cluster: harbor-postgresql-17-cluster
- alert: CNPGClusterInstancesOnSameNode
annotations:
summary: CNPG Cluster instances are located on the same node.
description: |-
CloudNativePG Cluster "harbor/harbor-postgresql-17-cluster" has {{`{{`}} $value {{`}}`}}
instances on the same node {{`{{`}} $labels.node {{`}}`}}.
A failure or scheduled downtime of a single node will lead to a potential service disruption and/or data loss.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterInstancesOnSameNode.md
expr: |
count by (node) (kube_pod_info{namespace="harbor", pod=~"harbor-postgresql-17-cluster-([1-9][0-9]*)$"}) > 1
for: 5m
labels:
severity: warning
namespace: harbor
cnpg_cluster: harbor-postgresql-17-cluster
- alert: CNPGClusterLongRunningTransactionWarning
annotations:
summary: CNPG Cluster query is taking longer than 5 minutes.
description: |-
CloudNativePG Cluster Pod {{ $labels.pod }}
is taking more than 5 minutes (300 seconds) for a query.
expr: |-
cnpg_backends_max_tx_duration_seconds > 300
for: 1m
labels:
severity: warning
namespace: harbor
cnpg_cluster: harbor-postgresql-17-cluster
- alert: CNPGClusterLowDiskSpaceCritical
annotations:
summary: CNPG Instance is running out of disk space!
description: |-
CloudNativePG Cluster "harbor/harbor-postgresql-17-cluster" is running extremely low on disk space. Check attached PVCs!
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterLowDiskSpaceCritical.md
expr: |
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="harbor", persistentvolumeclaim=~"harbor-postgresql-17-cluster-([1-9][0-9]*)$"} / kubelet_volume_stats_capacity_bytes{namespace="harbor", persistentvolumeclaim=~"harbor-postgresql-17-cluster-([1-9][0-9]*)$"})) > 0.9 OR
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="harbor", persistentvolumeclaim=~"harbor-postgresql-17-cluster-([1-9][0-9]*)$-wal"} / kubelet_volume_stats_capacity_bytes{namespace="harbor", persistentvolumeclaim=~"harbor-postgresql-17-cluster-([1-9][0-9]*)$-wal"})) > 0.9 OR
max(sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_used_bytes{namespace="harbor", persistentvolumeclaim=~"harbor-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"})
/
sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_capacity_bytes{namespace="harbor", persistentvolumeclaim=~"harbor-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"})
*
on(namespace, persistentvolumeclaim) group_left(volume)
kube_pod_spec_volumes_persistentvolumeclaims_info{pod=~"harbor-postgresql-17-cluster-([1-9][0-9]*)$"}
) > 0.9
for: 5m
labels:
severity: critical
namespace: harbor
cnpg_cluster: harbor-postgresql-17-cluster
- alert: CNPGClusterLowDiskSpaceWarning
annotations:
summary: CNPG Instance is running out of disk space.
description: |-
CloudNativePG Cluster "harbor/harbor-postgresql-17-cluster" is running low on disk space. Check attached PVCs.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterLowDiskSpaceWarning.md
expr: |
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="harbor", persistentvolumeclaim=~"harbor-postgresql-17-cluster-([1-9][0-9]*)$"} / kubelet_volume_stats_capacity_bytes{namespace="harbor", persistentvolumeclaim=~"harbor-postgresql-17-cluster-([1-9][0-9]*)$"})) > 0.7 OR
max(max by(persistentvolumeclaim) (1 - kubelet_volume_stats_available_bytes{namespace="harbor", persistentvolumeclaim=~"harbor-postgresql-17-cluster-([1-9][0-9]*)$-wal"} / kubelet_volume_stats_capacity_bytes{namespace="harbor", persistentvolumeclaim=~"harbor-postgresql-17-cluster-([1-9][0-9]*)$-wal"})) > 0.7 OR
max(sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_used_bytes{namespace="harbor", persistentvolumeclaim=~"harbor-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"})
/
sum by (namespace,persistentvolumeclaim) (kubelet_volume_stats_capacity_bytes{namespace="harbor", persistentvolumeclaim=~"harbor-postgresql-17-cluster-([1-9][0-9]*)$-tbs.*"})
*
on(namespace, persistentvolumeclaim) group_left(volume)
kube_pod_spec_volumes_persistentvolumeclaims_info{pod=~"harbor-postgresql-17-cluster-([1-9][0-9]*)$"}
) > 0.7
for: 5m
labels:
severity: warning
namespace: harbor
cnpg_cluster: harbor-postgresql-17-cluster
- alert: CNPGClusterOffline
annotations:
summary: CNPG Cluster has no running instances!
description: |-
CloudNativePG Cluster "harbor/harbor-postgresql-17-cluster" has no ready instances.
Having an offline cluster means your applications will not be able to access the database, leading to
potential service disruption and/or data loss.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterOffline.md
expr: |
(count(cnpg_collector_up{namespace="harbor",pod=~"harbor-postgresql-17-cluster-([1-9][0-9]*)$"}) OR on() vector(0)) == 0
for: 5m
labels:
severity: critical
namespace: harbor
cnpg_cluster: harbor-postgresql-17-cluster
- alert: CNPGClusterPGDatabaseXidAgeWarning
annotations:
summary: CNPG Cluster has a number of transactions from the frozen XID to the current one.
description: |-
Over 300,000,000 transactions from frozen xid
on pod {{ $labels.pod }}
expr: |
cnpg_pg_database_xid_age > 300000000
for: 1m
labels:
severity: warning
namespace: harbor
cnpg_cluster: harbor-postgresql-17-cluster
- alert: CNPGClusterPGReplicationWarning
annotations:
summary: CNPG Cluster standby is lagging behind the primary.
description: |-
Standby is lagging behind by over 300 seconds (5 minutes)
expr: |
cnpg_pg_replication_lag > 300
for: 1m
labels:
severity: warning
namespace: harbor
cnpg_cluster: harbor-postgresql-17-cluster
- alert: CNPGClusterReplicaFailingReplicationWarning
annotations:
summary: CNPG Cluster has a replica is failing to replicate.
description: |-
Replica {{ $labels.pod }}
is failing to replicate
expr: |
cnpg_pg_replication_in_recovery > cnpg_pg_replication_is_wal_receiver_up
for: 1m
labels:
severity: warning
namespace: harbor
cnpg_cluster: harbor-postgresql-17-cluster
- alert: CNPGClusterZoneSpreadWarning
annotations:
summary: CNPG Cluster instances in the same zone.
description: |-
CloudNativePG Cluster "harbor/harbor-postgresql-17-cluster" has instances in the same availability zone.
A disaster in one availability zone will lead to a potential service disruption and/or data loss.
runbook_url: https://github.com/cloudnative-pg/charts/blob/main/charts/cluster/docs/runbooks/CNPGClusterZoneSpreadWarning.md
expr: |
3 > count(count by (label_topology_kubernetes_io_zone) (kube_pod_info{namespace="harbor", pod=~"harbor-postgresql-17-cluster-([1-9][0-9]*)$"} * on(node,instance) group_left(label_topology_kubernetes_io_zone) kube_node_labels)) < 3
for: 5m
labels:
severity: warning
namespace: harbor
cnpg_cluster: harbor-postgresql-17-cluster
---
# Source: harbor/templates/redis-replication.yaml
apiVersion: redis.redis.opstreelabs.in/v1beta2
kind: RedisReplication
metadata:
name: redis-replication-harbor
namespace: harbor
labels:
app.kubernetes.io/name: redis-replication-harbor
app.kubernetes.io/instance: harbor
app.kubernetes.io/part-of: harbor
spec:
clusterSize: 3
podSecurityContext:
runAsUser: 1000
fsGroup: 1000
kubernetesConfig:
image: quay.io/opstree/redis:v8.0.3
imagePullPolicy: IfNotPresent
resources:
requests:
cpu: 50m
memory: 128Mi
storage:
volumeClaimTemplate:
spec:
storageClassName: ceph-block
accessModes: ["ReadWriteOnce"]
resources:
requests:
storage: 1Gi
redisExporter:
enabled: true
image: quay.io/opstree/redis-exporter:v1.48.0
---
# Source: harbor/charts/postgres-17-cluster/templates/scheduled-backup.yaml
apiVersion: postgresql.cnpg.io/v1
kind: ScheduledBackup
metadata:
name: "harbor-postgresql-17-daily-backup-scheduled-backup"
namespace: harbor
labels:
helm.sh/chart: postgres-17-cluster-6.16.1
app.kubernetes.io/name: harbor-postgresql-17
app.kubernetes.io/instance: harbor
app.kubernetes.io/part-of: harbor
app.kubernetes.io/version: "6.16.1"
app.kubernetes.io/managed-by: Helm
spec:
immediate: false
suspend: false
schedule: "0 0 0 * * *"
backupOwnerReference: self
cluster:
name: harbor-postgresql-17-cluster
method: plugin
pluginConfiguration:
name: barman-cloud.cloudnative-pg.io
parameters:
barmanObjectName: "harbor-postgresql-17-external-backup"
---
# Source: harbor/charts/postgres-17-cluster/templates/scheduled-backup.yaml
apiVersion: postgresql.cnpg.io/v1
kind: ScheduledBackup
metadata:
name: "harbor-postgresql-17-live-backup-scheduled-backup"
namespace: harbor
labels:
helm.sh/chart: postgres-17-cluster-6.16.1
app.kubernetes.io/name: harbor-postgresql-17
app.kubernetes.io/instance: harbor
app.kubernetes.io/part-of: harbor
app.kubernetes.io/version: "6.16.1"
app.kubernetes.io/managed-by: Helm
spec:
immediate: true
suspend: false
schedule: "0 0 0 * * *"
backupOwnerReference: self
cluster:
name: harbor-postgresql-17-cluster
method: plugin
pluginConfiguration:
name: barman-cloud.cloudnative-pg.io
parameters:
barmanObjectName: "harbor-postgresql-17-garage-local-backup"
---
# Source: harbor/charts/harbor/templates/metrics/metrics-svcmon.yaml
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: harbor
namespace: "harbor"
labels:
heritage: Helm
release: harbor
chart: harbor
app: "harbor"
app.kubernetes.io/instance: harbor
app.kubernetes.io/name: harbor
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: harbor
app.kubernetes.io/version: "2.14.0"
spec:
jobLabel: app.kubernetes.io/name
endpoints:
- port: http-metrics
honorLabels: true
selector:
matchLabels:
release: harbor
app: "harbor"
---
# Source: harbor/templates/service-monitor.yaml
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: redis-replication-harbor
namespace: harbor
labels:
app.kubernetes.io/name: redis-replication-harbor
app.kubernetes.io/instance: harbor
app.kubernetes.io/part-of: harbor
redis-operator: "true"
env: production
spec:
selector:
matchLabels:
redis_setup_type: replication
endpoints:
- port: redis-exporter
interval: 30s
scrapeTimeout: 10s
---
# Source: harbor/charts/harbor/templates/core/core-pre-upgrade-job.yaml
apiVersion: batch/v1
kind: Job
metadata:
name: migration-job
namespace: "harbor"
labels:
heritage: Helm
release: harbor
chart: harbor
app: "harbor"
app.kubernetes.io/instance: harbor
app.kubernetes.io/name: harbor
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/part-of: harbor
app.kubernetes.io/version: "2.14.0"
component: migrator
annotations:
# This is what defines this resource as a hook. Without this line, the
# job is considered part of the release.
"helm.sh/hook": pre-upgrade
"helm.sh/hook-weight": "-5"
spec:
template:
metadata:
labels:
release: harbor
app: "harbor"
component: migrator
spec:
restartPolicy: Never
securityContext:
runAsUser: 10000
fsGroup: 10000
terminationGracePeriodSeconds: 120
containers:
- name: core-job
image: goharbor/harbor-core:v2.14.1
imagePullPolicy: IfNotPresent
command: ["/harbor/harbor_core", "-mode=migrate"]
envFrom:
- configMapRef:
name: "harbor-core"
- secretRef:
name: "harbor-core"
env:
- name: POSTGRESQL_PASSWORD
valueFrom:
secretKeyRef:
name: harbor-postgresql-17-cluster-app
key: password
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
privileged: false
runAsNonRoot: true
seccompProfile:
type: RuntimeDefault
volumeMounts:
- name: config
mountPath: /etc/core/app.conf
subPath: app.conf
volumes:
- name: config
configMap:
name: harbor-core
items:
- key: app.conf
path: app.conf