Files
infrastructure/clusters/cl01tl/helm/rook-ceph/values.yaml
Alex Lebens 2eb730c87c
All checks were successful
lint-test-helm / lint-helm (pull_request) Successful in 39s
lint-test-helm / validate-kubeconform (pull_request) Successful in 26s
render-manifests / render-manifests (pull_request) Successful in 1m25s
feat: change resource limits
2026-04-06 17:11:53 -05:00

225 lines
6.1 KiB
YAML

rook-ceph:
crds:
enabled: true
resources:
limits:
memory: 1Gi
requests:
cpu: 100m
memory: 100Mi
csi:
cephFSKernelMountOptions: "ms_mode=secure"
enableMetadata: true
serviceMonitor:
enabled: true
enableDiscoveryDaemon: true
monitoring:
enabled: true
rook-ceph-cluster:
toolbox:
enabled: true
image: quay.io/ceph/ceph:v20.2.1@sha256:0bae386bc859cd9a05b804d1ca16cca8853a64f90809044e2bf43095419dc337
resources:
limits:
memory: 1Gi
requests:
cpu: 1m
memory: 10Mi
monitoring:
enabled: true
createPrometheusRules: true
prometheusRuleOverrides:
CephNodeDiskspaceWarning:
disabled: true
cephImage:
repository: quay.io/ceph/ceph
tag: v20.2.1@sha256:0bae386bc859cd9a05b804d1ca16cca8853a64f90809044e2bf43095419dc337
cephClusterSpec:
cephConfig:
osd:
bluestore_slow_ops_warn_lifetime: "60"
bluestore_slow_ops_warn_threshold: "10"
csi:
readAffinity:
enabled: true
mgr:
modules:
- name: pg_autoscaler
enabled: true
- name: rook
enabled: true
- name: volumes
enabled: true
dashboard:
ssl: false
network:
connections:
encryption:
enabled: true
compression:
enabled: true
requireMsgr2: true
placement:
all:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: node-role.kubernetes.io/rook-osd-node
operator: Exists
mon:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: node-role.kubernetes.io/rook-mon-node
operator: Exists
- key: node-role.kubernetes.io/control-plane
operator: Exists
tolerations:
- key: node-role.kubernetes.io/rook-mon-node
operator: Exists
- key: node-role.kubernetes.io/control-plane
operator: Exists
resources:
mgr:
limits:
memory: 2Gi
requests:
cpu: 100m
memory: 500Mi
mon:
limits:
memory: 4Gi
requests:
cpu: 100m
memory: 750Mi
osd:
limits:
memory: 8Gi
requests:
cpu: 100m
memory: 2Gi
prepareosd:
requests:
cpu: 100m
memory: 200Mi
mgr-sidecar:
limits:
memory: 2Gi
requests:
cpu: 100m
memory: 40Mi
crashcollector:
limits:
memory: 2Gi
requests:
cpu: 10m
memory: 20Mi
logcollector:
limits:
memory: 2Gi
requests:
cpu: 10m
memory: 100Mi
cleanup:
limits:
memory: 2Gi
requests:
cpu: 10m
memory: 100Mi
exporter:
limits:
memory: 2Gi
requests:
cpu: 10m
memory: 20Mi
storage:
useAllDevices: false
devicePathFilter: "/dev/disk/by-partlabel/r-csi-disk"
config:
osdsPerDevice: "1"
route:
dashboard:
host:
name: ceph.alexlebens.net
path: "/"
pathType: PathPrefix
parentRefs:
- group: gateway.networking.k8s.io
kind: Gateway
name: traefik-gateway
namespace: traefik
cephBlockPools:
- name: ceph-blockpool
spec:
failureDomain: host
replicated:
size: 3
enableRBDStats: false
storageClass:
enabled: true
name: ceph-block
isDefault: true
reclaimPolicy: Delete
allowVolumeExpansion: true
volumeBindingMode: "Immediate"
parameters:
imageFormat: "2"
imageFeatures: layering,exclusive-lock,object-map,fast-diff
csi.storage.k8s.io/provisioner-secret-name: rook-csi-rbd-provisioner
csi.storage.k8s.io/provisioner-secret-namespace: "{{ .Release.Namespace }}"
csi.storage.k8s.io/controller-expand-secret-name: rook-csi-rbd-provisioner
csi.storage.k8s.io/controller-expand-secret-namespace: "{{ .Release.Namespace }}"
csi.storage.k8s.io/node-stage-secret-name: rook-csi-rbd-node
csi.storage.k8s.io/node-stage-secret-namespace: "{{ .Release.Namespace }}"
csi.storage.k8s.io/fstype: ext4
cephFileSystems:
- name: ceph-filesystem
spec:
metadataPool:
replicated:
size: 3
dataPools:
- failureDomain: host
replicated:
size: 3
name: data0
metadataServer:
activeCount: 1
activeStandby: true
resources:
limits:
memory: 4Gi
requests:
cpu: 100m
memory: 400Mi
priorityClassName: system-cluster-critical
storageClass:
enabled: true
isDefault: false
name: ceph-filesystem
pool: data0
reclaimPolicy: Delete
allowVolumeExpansion: true
volumeBindingMode: "Immediate"
parameters:
csi.storage.k8s.io/provisioner-secret-name: rook-csi-cephfs-provisioner
csi.storage.k8s.io/provisioner-secret-namespace: "{{ .Release.Namespace }}"
csi.storage.k8s.io/controller-expand-secret-name: rook-csi-cephfs-provisioner
csi.storage.k8s.io/controller-expand-secret-namespace: "{{ .Release.Namespace }}"
csi.storage.k8s.io/node-stage-secret-name: rook-csi-cephfs-node
csi.storage.k8s.io/node-stage-secret-namespace: "{{ .Release.Namespace }}"
csi.storage.k8s.io/fstype: ext4
cephFileSystemVolumeSnapshotClass:
enabled: true
name: ceph-filesystem-snapshot
isDefault: false
deletionPolicy: Delete
cephBlockPoolsVolumeSnapshotClass:
enabled: true
name: ceph-blockpool-snapshot
isDefault: true
deletionPolicy: Delete
cephObjectStores: []