Automated Manifest Update - Automerge #6912

Merged
gitea-bot merged 1 commits from auto/update-manifests-automerge-6907 into manifests 2026-05-13 00:12:41 +00:00
14 changed files with 89 additions and 91 deletions

View File

@@ -3,10 +3,10 @@ kind: ClusterRole
metadata: metadata:
name: cloudnative-pg-edit name: cloudnative-pg-edit
labels: labels:
helm.sh/chart: cloudnative-pg-0.28.0 helm.sh/chart: cloudnative-pg-0.28.2
app.kubernetes.io/name: cloudnative-pg app.kubernetes.io/name: cloudnative-pg
app.kubernetes.io/instance: cloudnative-pg app.kubernetes.io/instance: cloudnative-pg
app.kubernetes.io/version: "1.29.0" app.kubernetes.io/version: "1.29.1"
app.kubernetes.io/managed-by: Helm app.kubernetes.io/managed-by: Helm
rules: rules:
- apiGroups: - apiGroups:

View File

@@ -3,10 +3,10 @@ kind: ClusterRole
metadata: metadata:
name: cloudnative-pg-view name: cloudnative-pg-view
labels: labels:
helm.sh/chart: cloudnative-pg-0.28.0 helm.sh/chart: cloudnative-pg-0.28.2
app.kubernetes.io/name: cloudnative-pg app.kubernetes.io/name: cloudnative-pg
app.kubernetes.io/instance: cloudnative-pg app.kubernetes.io/instance: cloudnative-pg
app.kubernetes.io/version: "1.29.0" app.kubernetes.io/version: "1.29.1"
app.kubernetes.io/managed-by: Helm app.kubernetes.io/managed-by: Helm
rules: rules:
- apiGroups: - apiGroups:

View File

@@ -3,10 +3,10 @@ kind: ClusterRole
metadata: metadata:
name: cloudnative-pg name: cloudnative-pg
labels: labels:
helm.sh/chart: cloudnative-pg-0.28.0 helm.sh/chart: cloudnative-pg-0.28.2
app.kubernetes.io/name: cloudnative-pg app.kubernetes.io/name: cloudnative-pg
app.kubernetes.io/instance: cloudnative-pg app.kubernetes.io/instance: cloudnative-pg
app.kubernetes.io/version: "1.29.0" app.kubernetes.io/version: "1.29.1"
app.kubernetes.io/managed-by: Helm app.kubernetes.io/managed-by: Helm
rules: rules:
- apiGroups: - apiGroups:

View File

@@ -3,10 +3,10 @@ kind: ClusterRoleBinding
metadata: metadata:
name: cloudnative-pg name: cloudnative-pg
labels: labels:
helm.sh/chart: cloudnative-pg-0.28.0 helm.sh/chart: cloudnative-pg-0.28.2
app.kubernetes.io/name: cloudnative-pg app.kubernetes.io/name: cloudnative-pg
app.kubernetes.io/instance: cloudnative-pg app.kubernetes.io/instance: cloudnative-pg
app.kubernetes.io/version: "1.29.0" app.kubernetes.io/version: "1.29.1"
app.kubernetes.io/managed-by: Helm app.kubernetes.io/managed-by: Helm
roleRef: roleRef:
apiGroup: rbac.authorization.k8s.io apiGroup: rbac.authorization.k8s.io

View File

@@ -4,9 +4,9 @@ metadata:
name: cnpg-controller-manager-config name: cnpg-controller-manager-config
namespace: cloudnative-pg namespace: cloudnative-pg
labels: labels:
helm.sh/chart: cloudnative-pg-0.28.0 helm.sh/chart: cloudnative-pg-0.28.2
app.kubernetes.io/name: cloudnative-pg app.kubernetes.io/name: cloudnative-pg
app.kubernetes.io/instance: cloudnative-pg app.kubernetes.io/instance: cloudnative-pg
app.kubernetes.io/version: "1.29.0" app.kubernetes.io/version: "1.29.1"
app.kubernetes.io/managed-by: Helm app.kubernetes.io/managed-by: Helm
data: {} data: {}

View File

@@ -4,10 +4,10 @@ metadata:
name: cnpg-default-monitoring name: cnpg-default-monitoring
namespace: cloudnative-pg namespace: cloudnative-pg
labels: labels:
helm.sh/chart: cloudnative-pg-0.28.0 helm.sh/chart: cloudnative-pg-0.28.2
app.kubernetes.io/name: cloudnative-pg app.kubernetes.io/name: cloudnative-pg
app.kubernetes.io/instance: cloudnative-pg app.kubernetes.io/instance: cloudnative-pg
app.kubernetes.io/version: "1.29.0" app.kubernetes.io/version: "1.29.1"
app.kubernetes.io/managed-by: Helm app.kubernetes.io/managed-by: Helm
cnpg.io/reload: "" cnpg.io/reload: ""
data: data:
@@ -32,11 +32,11 @@ data:
, state , state
, usename , usename
, COALESCE(application_name, '') AS application_name , COALESCE(application_name, '') AS application_name
, COUNT(*) , pg_catalog.count(*)
, COALESCE(EXTRACT (EPOCH FROM (max(now() - xact_start))), 0) AS max_tx_secs , COALESCE(EXTRACT (EPOCH FROM (pg_catalog.max(pg_catalog.now() OPERATOR(pg_catalog.-) xact_start))), 0) AS max_tx_secs
FROM pg_catalog.pg_stat_activity FROM pg_catalog.pg_stat_activity
GROUP BY datname, state, usename, application_name GROUP BY datname, state, usename, application_name
) sa ON states.state = sa.state ) sa ON states.state OPERATOR(pg_catalog.=) sa.state
WHERE sa.usename IS NOT NULL WHERE sa.usename IS NOT NULL
metrics: metrics:
- datname: - datname:
@@ -60,10 +60,10 @@ data:
backends_waiting: backends_waiting:
query: | query: |
SELECT count(*) AS total SELECT pg_catalog.count(*) AS total
FROM pg_catalog.pg_locks blocked_locks FROM pg_catalog.pg_locks blocked_locks
JOIN pg_catalog.pg_locks blocking_locks JOIN pg_catalog.pg_locks blocking_locks
ON blocking_locks.locktype = blocked_locks.locktype ON blocking_locks.locktype OPERATOR(pg_catalog.=) blocked_locks.locktype
AND blocking_locks.database IS NOT DISTINCT FROM blocked_locks.database AND blocking_locks.database IS NOT DISTINCT FROM blocked_locks.database
AND blocking_locks.relation IS NOT DISTINCT FROM blocked_locks.relation AND blocking_locks.relation IS NOT DISTINCT FROM blocked_locks.relation
AND blocking_locks.page IS NOT DISTINCT FROM blocked_locks.page AND blocking_locks.page IS NOT DISTINCT FROM blocked_locks.page
@@ -73,8 +73,8 @@ data:
AND blocking_locks.classid IS NOT DISTINCT FROM blocked_locks.classid AND blocking_locks.classid IS NOT DISTINCT FROM blocked_locks.classid
AND blocking_locks.objid IS NOT DISTINCT FROM blocked_locks.objid AND blocking_locks.objid IS NOT DISTINCT FROM blocked_locks.objid
AND blocking_locks.objsubid IS NOT DISTINCT FROM blocked_locks.objsubid AND blocking_locks.objsubid IS NOT DISTINCT FROM blocked_locks.objsubid
AND blocking_locks.pid != blocked_locks.pid AND blocking_locks.pid OPERATOR(pg_catalog.<>) blocked_locks.pid
JOIN pg_catalog.pg_stat_activity blocking_activity ON blocking_activity.pid = blocking_locks.pid JOIN pg_catalog.pg_stat_activity blocking_activity ON blocking_activity.pid OPERATOR(pg_catalog.=) blocking_locks.pid
WHERE NOT blocked_locks.granted WHERE NOT blocked_locks.granted
metrics: metrics:
- total: - total:
@@ -116,14 +116,14 @@ data:
query: | query: |
SELECT CASE WHEN ( SELECT CASE WHEN (
NOT pg_catalog.pg_is_in_recovery() NOT pg_catalog.pg_is_in_recovery()
OR pg_catalog.pg_last_wal_receive_lsn() = pg_catalog.pg_last_wal_replay_lsn()) OR pg_catalog.pg_last_wal_receive_lsn() OPERATOR(pg_catalog.=) pg_catalog.pg_last_wal_replay_lsn())
THEN 0 THEN 0
ELSE GREATEST (0, ELSE GREATEST (0,
EXTRACT(EPOCH FROM (now() - pg_catalog.pg_last_xact_replay_timestamp()))) EXTRACT(EPOCH FROM (pg_catalog.now() OPERATOR(pg_catalog.-) pg_catalog.pg_last_xact_replay_timestamp())))
END AS lag, END AS lag,
pg_catalog.pg_is_in_recovery() AS in_recovery, pg_catalog.pg_is_in_recovery() AS in_recovery,
EXISTS (TABLE pg_stat_wal_receiver) AS is_wal_receiver_up, EXISTS (TABLE pg_catalog.pg_stat_wal_receiver) AS is_wal_receiver_up,
(SELECT count(*) FROM pg_catalog.pg_stat_replication) AS streaming_replicas (SELECT pg_catalog.count(*) FROM pg_catalog.pg_stat_replication) AS streaming_replicas
metrics: metrics:
- lag: - lag:
usage: "GAUGE" usage: "GAUGE"
@@ -171,17 +171,17 @@ data:
query: | query: |
SELECT archived_count SELECT archived_count
, failed_count , failed_count
, COALESCE(EXTRACT(EPOCH FROM (now() - last_archived_time)), -1) AS seconds_since_last_archival , COALESCE(EXTRACT(EPOCH FROM (pg_catalog.now() OPERATOR(pg_catalog.-) last_archived_time)), -1) AS seconds_since_last_archival
, COALESCE(EXTRACT(EPOCH FROM (now() - last_failed_time)), -1) AS seconds_since_last_failure , COALESCE(EXTRACT(EPOCH FROM (pg_catalog.now() OPERATOR(pg_catalog.-) last_failed_time)), -1) AS seconds_since_last_failure
, COALESCE(EXTRACT(EPOCH FROM last_archived_time), -1) AS last_archived_time , COALESCE(EXTRACT(EPOCH FROM last_archived_time), -1) AS last_archived_time
, COALESCE(EXTRACT(EPOCH FROM last_failed_time), -1) AS last_failed_time , COALESCE(EXTRACT(EPOCH FROM last_failed_time), -1) AS last_failed_time
, COALESCE(CAST(CAST('x'||pg_catalog.right(pg_catalog.split_part(last_archived_wal, '.', 1), 16) AS pg_catalog.bit(64)) AS pg_catalog.int8), -1) AS last_archived_wal_start_lsn , COALESCE(CAST(CAST('x' OPERATOR(pg_catalog.||) pg_catalog.right(pg_catalog.split_part(last_archived_wal, '.', 1), 16) AS pg_catalog.bit(64)) AS pg_catalog.int8), -1) AS last_archived_wal_start_lsn
, COALESCE(CAST(CAST('x'||pg_catalog.right(pg_catalog.split_part(last_failed_wal, '.', 1), 16) AS pg_catalog.bit(64)) AS pg_catalog.int8), -1) AS last_failed_wal_start_lsn , COALESCE(CAST(CAST('x' OPERATOR(pg_catalog.||) pg_catalog.right(pg_catalog.split_part(last_failed_wal, '.', 1), 16) AS pg_catalog.bit(64)) AS pg_catalog.int8), -1) AS last_failed_wal_start_lsn
, EXTRACT(EPOCH FROM stats_reset) AS stats_reset_time , EXTRACT(EPOCH FROM stats_reset) AS stats_reset_time
FROM pg_catalog.pg_stat_archiver FROM pg_catalog.pg_stat_archiver
predicate_query: | predicate_query: |
SELECT NOT pg_catalog.pg_is_in_recovery() SELECT NOT pg_catalog.pg_is_in_recovery()
OR pg_catalog.current_setting('archive_mode') = 'always' OR pg_catalog.current_setting('archive_mode') OPERATOR(pg_catalog.=) 'always'
metrics: metrics:
- archived_count: - archived_count:
usage: "COUNTER" usage: "COUNTER"
@@ -467,12 +467,12 @@ data:
pg_extensions: pg_extensions:
query: | query: |
SELECT SELECT
current_database() as datname, pg_catalog.current_database() as datname,
name as extname, name as extname,
default_version, default_version,
installed_version, installed_version,
CASE CASE
WHEN default_version = installed_version THEN 0 WHEN default_version OPERATOR(pg_catalog.=) installed_version THEN 0
ELSE 1 ELSE 1
END AS update_available END AS update_available
FROM pg_catalog.pg_available_extensions FROM pg_catalog.pg_available_extensions

View File

@@ -5532,7 +5532,6 @@ spec:
procMount denotes the type of proc mount to use for the containers. procMount denotes the type of proc mount to use for the containers.
The default value is Default which uses the container runtime defaults for The default value is Default which uses the container runtime defaults for
readonly paths and masked paths. readonly paths and masked paths.
This requires the ProcMountType feature flag to be enabled.
Note that this field cannot be set when spec.os.name is windows. Note that this field cannot be set when spec.os.name is windows.
type: string type: string
readOnlyRootFilesystem: readOnlyRootFilesystem:

View File

@@ -2758,7 +2758,6 @@ spec:
procMount denotes the type of proc mount to use for the containers. procMount denotes the type of proc mount to use for the containers.
The default value is Default which uses the container runtime defaults for The default value is Default which uses the container runtime defaults for
readonly paths and masked paths. readonly paths and masked paths.
This requires the ProcMountType feature flag to be enabled.
Note that this field cannot be set when spec.os.name is windows. Note that this field cannot be set when spec.os.name is windows.
type: string type: string
readOnlyRootFilesystem: readOnlyRootFilesystem:
@@ -4253,7 +4252,6 @@ spec:
procMount denotes the type of proc mount to use for the containers. procMount denotes the type of proc mount to use for the containers.
The default value is Default which uses the container runtime defaults for The default value is Default which uses the container runtime defaults for
readonly paths and masked paths. readonly paths and masked paths.
This requires the ProcMountType feature flag to be enabled.
Note that this field cannot be set when spec.os.name is windows. Note that this field cannot be set when spec.os.name is windows.
type: string type: string
readOnlyRootFilesystem: readOnlyRootFilesystem:
@@ -4719,7 +4717,6 @@ spec:
When set to false, a new userns is created for the pod. Setting false is useful for When set to false, a new userns is created for the pod. Setting false is useful for
mitigating container breakout vulnerabilities even allowing users to run their mitigating container breakout vulnerabilities even allowing users to run their
containers as root without actually having root privileges on the host. containers as root without actually having root privileges on the host.
This field is alpha-level and is only honored by servers that enable the UserNamespacesSupport feature.
type: boolean type: boolean
hostname: hostname:
description: |- description: |-
@@ -5833,7 +5830,6 @@ spec:
procMount denotes the type of proc mount to use for the containers. procMount denotes the type of proc mount to use for the containers.
The default value is Default which uses the container runtime defaults for The default value is Default which uses the container runtime defaults for
readonly paths and masked paths. readonly paths and masked paths.
This requires the ProcMountType feature flag to be enabled.
Note that this field cannot be set when spec.os.name is windows. Note that this field cannot be set when spec.os.name is windows.
type: string type: string
readOnlyRootFilesystem: readOnlyRootFilesystem:
@@ -6382,6 +6378,14 @@ spec:
It adds a name to it that uniquely identifies the ResourceClaim inside the Pod. It adds a name to it that uniquely identifies the ResourceClaim inside the Pod.
Containers that need access to the ResourceClaim reference it with this name. Containers that need access to the ResourceClaim reference it with this name.
When the DRAWorkloadResourceClaims feature gate is enabled and this Pod
belongs to a PodGroup, a PodResourceClaim is matched to a
PodGroupResourceClaim if all of their fields are equal (Name,
ResourceClaimName, and ResourceClaimTemplateName). A matched claim references
a single ResourceClaim shared across all Pods in the PodGroup, reserved for
the PodGroup in ResourceClaimStatus.ReservedFor rather than for individual
Pods.
properties: properties:
name: name:
description: |- description: |-
@@ -6407,6 +6411,16 @@ spec:
generated component, will be used to form a unique name for the generated component, will be used to form a unique name for the
ResourceClaim, which will be recorded in pod.status.resourceClaimStatuses. ResourceClaim, which will be recorded in pod.status.resourceClaimStatuses.
When the DRAWorkloadResourceClaims feature gate is enabled and the pod
belongs to a PodGroup that defines a PodGroupResourceClaim with the same
Name and ResourceClaimTemplateName, this PodResourceClaim resolves to the
ResourceClaim generated for the PodGroup. All pods in the group that
define an equivalent PodResourceClaim matching the
PodGroupResourceClaim's Name and ResourceClaimTemplateName share the same
generated ResourceClaim. ResourceClaims generated for a PodGroup are
owned by the PodGroup and their lifecycles are tied to the PodGroup
instead of any individual pod.
This field is immutable and no changes will be made to the This field is immutable and no changes will be made to the
corresponding ResourceClaim by the control plane after creating the corresponding ResourceClaim by the control plane after creating the
ResourceClaim. ResourceClaim.
@@ -6531,6 +6545,28 @@ spec:
x-kubernetes-list-map-keys: x-kubernetes-list-map-keys:
- name - name
x-kubernetes-list-type: map x-kubernetes-list-type: map
schedulingGroup:
description: |-
SchedulingGroup provides a reference to the immediate scheduling runtime
grouping object that this Pod belongs to.
This field is used by the scheduler to identify the group and apply the
correct group scheduling policies. The association with a group also
impacts other lifecycle aspects of a Pod that are relevant in a wider context
of scheduling like preemption, resource attachment, etc. If not specified,
the Pod is treated as a single unit in all of these aspects.
The group object referenced by this field may not exist at the time the
Pod is created.
This field is immutable, but a group object with the same name may be
recreated with different policies. Doing this during pod scheduling
may result in the placement not conforming to the expected policies.
properties:
podGroupName:
description: |-
PodGroupName specifies the name of the standalone PodGroup object
that represents the runtime instance of this group.
Must be a DNS subdomain.
type: string
type: object
securityContext: securityContext:
description: |- description: |-
SecurityContext holds pod-level security attributes and common container settings. SecurityContext holds pod-level security attributes and common container settings.
@@ -7900,7 +7936,7 @@ spec:
A failure to resolve or pull the image during pod startup will block containers from starting and may add significant latency. Failures will be retried using normal volume backoff and will be reported on the pod reason and message. A failure to resolve or pull the image during pod startup will block containers from starting and may add significant latency. Failures will be retried using normal volume backoff and will be reported on the pod reason and message.
The types of objects that may be mounted by this volume are defined by the container runtime implementation on a host machine and at minimum must include all valid types supported by the container image field. The types of objects that may be mounted by this volume are defined by the container runtime implementation on a host machine and at minimum must include all valid types supported by the container image field.
The OCI object gets mounted in a single directory (spec.containers[*].volumeMounts.mountPath) by merging the manifest layers in the same way as for container images. The OCI object gets mounted in a single directory (spec.containers[*].volumeMounts.mountPath) by merging the manifest layers in the same way as for container images.
The volume will be mounted read-only (ro) and non-executable files (noexec). The volume will be mounted read-only (ro).
Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath) before 1.33. Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath) before 1.33.
The field spec.securityContext.fsGroupChangePolicy has no effect on this volume type. The field spec.securityContext.fsGroupChangePolicy has no effect on this volume type.
properties: properties:
@@ -8068,8 +8104,7 @@ spec:
description: |- description: |-
portworxVolume represents a portworx volume attached and mounted on kubelets host machine. portworxVolume represents a portworx volume attached and mounted on kubelets host machine.
Deprecated: PortworxVolume is deprecated. All operations for the in-tree portworxVolume type Deprecated: PortworxVolume is deprecated. All operations for the in-tree portworxVolume type
are redirected to the pxd.portworx.com CSI driver when the CSIMigrationPortworx feature-gate are redirected to the pxd.portworx.com CSI driver.
is on.
properties: properties:
fsType: fsType:
description: |- description: |-
@@ -8843,42 +8878,6 @@ spec:
x-kubernetes-list-map-keys: x-kubernetes-list-map-keys:
- name - name
x-kubernetes-list-type: map x-kubernetes-list-type: map
workloadRef:
description: |-
WorkloadRef provides a reference to the Workload object that this Pod belongs to.
This field is used by the scheduler to identify the PodGroup and apply the
correct group scheduling policies. The Workload object referenced
by this field may not exist at the time the Pod is created.
This field is immutable, but a Workload object with the same name
may be recreated with different policies. Doing this during pod scheduling
may result in the placement not conforming to the expected policies.
properties:
name:
description: |-
Name defines the name of the Workload object this Pod belongs to.
Workload must be in the same namespace as the Pod.
If it doesn't match any existing Workload, the Pod will remain unschedulable
until a Workload object is created and observed by the kube-scheduler.
It must be a DNS subdomain.
type: string
podGroup:
description: |-
PodGroup is the name of the PodGroup within the Workload that this Pod
belongs to. If it doesn't match any existing PodGroup within the Workload,
the Pod will remain unschedulable until the Workload object is recreated
and observed by the kube-scheduler. It must be a DNS label.
type: string
podGroupReplicaKey:
description: |-
PodGroupReplicaKey specifies the replica key of the PodGroup to which this
Pod belongs. It is used to distinguish pods belonging to different replicas
of the same pod group. The pod group policy is applied separately to each replica.
When set, it must be a DNS label.
type: string
required:
- name
- podGroup
type: object
required: required:
- containers - containers
type: object type: object

View File

@@ -4,10 +4,10 @@ metadata:
name: cloudnative-pg name: cloudnative-pg
namespace: cloudnative-pg namespace: cloudnative-pg
labels: labels:
helm.sh/chart: cloudnative-pg-0.28.0 helm.sh/chart: cloudnative-pg-0.28.2
app.kubernetes.io/name: cloudnative-pg app.kubernetes.io/name: cloudnative-pg
app.kubernetes.io/instance: cloudnative-pg app.kubernetes.io/instance: cloudnative-pg
app.kubernetes.io/version: "1.29.0" app.kubernetes.io/version: "1.29.1"
app.kubernetes.io/managed-by: Helm app.kubernetes.io/managed-by: Helm
spec: spec:
replicas: 2 replicas: 2
@@ -18,9 +18,9 @@ spec:
template: template:
metadata: metadata:
annotations: annotations:
checksum/rbac: de6f8c0801db717a56d299947c958973df088ff20a44e5058af807c196824bd6 checksum/rbac: de1893fe73cb4a732a34dfc9010586666d3cbb25ccdce87bfe27a05faebd1051
checksum/config: 1b0703c95b4ee680d176bff9c527b0aa792c9296c2c7ce6d0d9feb8a4a719a9c checksum/config: 230f2a9e55a5dbc43268219ac9eea0ba30ad92b72a29234d99665ccba35e2287
checksum/monitoring-config: e386a85eeb1c33f87053f6424690f9a77e6ef7247a617162dbaee9b370fb70d1 checksum/monitoring-config: e029fcb861f94835e364ba9142e66dc421d16d528da505386c7ec1df19152215
labels: labels:
app.kubernetes.io/name: cloudnative-pg app.kubernetes.io/name: cloudnative-pg
app.kubernetes.io/instance: cloudnative-pg app.kubernetes.io/instance: cloudnative-pg
@@ -36,14 +36,14 @@ spec:
- /manager - /manager
env: env:
- name: OPERATOR_IMAGE_NAME - name: OPERATOR_IMAGE_NAME
value: "ghcr.io/cloudnative-pg/cloudnative-pg:1.29.0" value: "ghcr.io/cloudnative-pg/cloudnative-pg:1.29.1"
- name: OPERATOR_NAMESPACE - name: OPERATOR_NAMESPACE
valueFrom: valueFrom:
fieldRef: fieldRef:
fieldPath: metadata.namespace fieldPath: metadata.namespace
- name: MONITORING_QUERIES_CONFIGMAP - name: MONITORING_QUERIES_CONFIGMAP
value: "cnpg-default-monitoring" value: "cnpg-default-monitoring"
image: "ghcr.io/cloudnative-pg/cloudnative-pg:1.29.0" image: "ghcr.io/cloudnative-pg/cloudnative-pg:1.29.1"
imagePullPolicy: IfNotPresent imagePullPolicy: IfNotPresent
livenessProbe: livenessProbe:
httpGet: httpGet:

View File

@@ -3,10 +3,10 @@ kind: MutatingWebhookConfiguration
metadata: metadata:
name: cnpg-mutating-webhook-configuration name: cnpg-mutating-webhook-configuration
labels: labels:
helm.sh/chart: cloudnative-pg-0.28.0 helm.sh/chart: cloudnative-pg-0.28.2
app.kubernetes.io/name: cloudnative-pg app.kubernetes.io/name: cloudnative-pg
app.kubernetes.io/instance: cloudnative-pg app.kubernetes.io/instance: cloudnative-pg
app.kubernetes.io/version: "1.29.0" app.kubernetes.io/version: "1.29.1"
app.kubernetes.io/managed-by: Helm app.kubernetes.io/managed-by: Helm
webhooks: webhooks:
- admissionReviewVersions: - admissionReviewVersions:

View File

@@ -4,10 +4,10 @@ metadata:
name: cloudnative-pg name: cloudnative-pg
namespace: cloudnative-pg namespace: cloudnative-pg
labels: labels:
helm.sh/chart: cloudnative-pg-0.28.0 helm.sh/chart: cloudnative-pg-0.28.2
app.kubernetes.io/name: cloudnative-pg app.kubernetes.io/name: cloudnative-pg
app.kubernetes.io/instance: cloudnative-pg app.kubernetes.io/instance: cloudnative-pg
app.kubernetes.io/version: "1.29.0" app.kubernetes.io/version: "1.29.1"
app.kubernetes.io/managed-by: Helm app.kubernetes.io/managed-by: Helm
spec: spec:
selector: selector:

View File

@@ -4,10 +4,10 @@ metadata:
name: cnpg-webhook-service name: cnpg-webhook-service
namespace: cloudnative-pg namespace: cloudnative-pg
labels: labels:
helm.sh/chart: cloudnative-pg-0.28.0 helm.sh/chart: cloudnative-pg-0.28.2
app.kubernetes.io/name: cloudnative-pg app.kubernetes.io/name: cloudnative-pg
app.kubernetes.io/instance: cloudnative-pg app.kubernetes.io/instance: cloudnative-pg
app.kubernetes.io/version: "1.29.0" app.kubernetes.io/version: "1.29.1"
app.kubernetes.io/managed-by: Helm app.kubernetes.io/managed-by: Helm
spec: spec:
type: ClusterIP type: ClusterIP

View File

@@ -4,8 +4,8 @@ metadata:
name: cloudnative-pg name: cloudnative-pg
namespace: cloudnative-pg namespace: cloudnative-pg
labels: labels:
helm.sh/chart: cloudnative-pg-0.28.0 helm.sh/chart: cloudnative-pg-0.28.2
app.kubernetes.io/name: cloudnative-pg app.kubernetes.io/name: cloudnative-pg
app.kubernetes.io/instance: cloudnative-pg app.kubernetes.io/instance: cloudnative-pg
app.kubernetes.io/version: "1.29.0" app.kubernetes.io/version: "1.29.1"
app.kubernetes.io/managed-by: Helm app.kubernetes.io/managed-by: Helm

View File

@@ -3,10 +3,10 @@ kind: ValidatingWebhookConfiguration
metadata: metadata:
name: cnpg-validating-webhook-configuration name: cnpg-validating-webhook-configuration
labels: labels:
helm.sh/chart: cloudnative-pg-0.28.0 helm.sh/chart: cloudnative-pg-0.28.2
app.kubernetes.io/name: cloudnative-pg app.kubernetes.io/name: cloudnative-pg
app.kubernetes.io/instance: cloudnative-pg app.kubernetes.io/instance: cloudnative-pg
app.kubernetes.io/version: "1.29.0" app.kubernetes.io/version: "1.29.1"
app.kubernetes.io/managed-by: Helm app.kubernetes.io/managed-by: Helm
webhooks: webhooks:
- admissionReviewVersions: - admissionReviewVersions: