From 7f21ec570432c6ef1f4ab1b1e0602b200c17928f Mon Sep 17 00:00:00 2001 From: gitea-bot Date: Tue, 3 Feb 2026 20:26:56 +0000 Subject: [PATCH] Automated Manifest Update (#3688) This PR contains newly rendered Kubernetes manifests automatically generated by the CI workflow. Reviewed-on: https://gitea.alexlebens.dev/alexlebens/infrastructure/pulls/3688 Co-authored-by: gitea-bot Co-committed-by: gitea-bot --- .../ClusterRole-elastic-operator-edit.yaml | 10 +- .../ClusterRole-elastic-operator-view.yaml | 10 +- .../ClusterRole-elastic-operator.yaml | 32 +- .../ClusterRoleBinding-elastic-operator.yaml | 4 +- .../ConfigMap-elastic-operator.yaml | 4 +- ...efinition-agents.agent.k8s.elastic.co.yaml | 10 +- ...inition-apmservers.apm.k8s.elastic.co.yaml | 8 +- ...sagentpolicies.autoops.k8s.elastic.co.yaml | 173 ++++++ ...eDefinition-beats.beat.k8s.elastic.co.yaml | 6 +- ...lasticmapsservers.maps.k8s.elastic.co.yaml | 8 +- ...utoscalers.autoscaling.k8s.elastic.co.yaml | 6 +- ...searches.elasticsearch.k8s.elastic.co.yaml | 379 +++++++++++- ...rches.enterprisesearch.k8s.elastic.co.yaml | 10 +- ...inition-kibanas.kibana.k8s.elastic.co.yaml | 39 +- ...on-logstashes.logstash.k8s.elastic.co.yaml | 18 +- ...stries.packageregistry.k8s.elastic.co.yaml | 546 ++++++++++++++++++ ...cies.stackconfigpolicy.k8s.elastic.co.yaml | 17 +- .../PodMonitor-elastic-operator.yaml | 4 +- .../Secret-elastic-operator-webhook-cert.yaml | 4 +- .../Service-elastic-operator-webhook.yaml | 4 +- .../ServiceAccount-elastic-operator.yaml | 4 +- .../StatefulSet-elastic-operator.yaml | 11 +- ...rator.elastic-operator.k8s.elastic.co.yaml | 44 +- ...nfigMap-gitea-meilisearch-environment.yaml | 4 +- ...rsistentVolumeClaim-gitea-meilisearch.yaml | 4 +- ...Pod-gitea-meilisearch-test-connection.yaml | 4 +- .../gitea/Service-gitea-meilisearch.yaml | 4 +- .../ServiceAccount-gitea-meilisearch.yaml | 4 +- .../ServiceMonitor-gitea-meilisearch.yaml | 4 +- .../gitea/StatefulSet-gitea-meilisearch.yaml | 12 +- ...gMap-jellyfin-meilisearch-environment.yaml | 4 +- ...stentVolumeClaim-jellyfin-meilisearch.yaml | 4 +- ...-jellyfin-meilisearch-test-connection.yaml | 4 +- .../Service-jellyfin-meilisearch.yaml | 4 +- .../ServiceAccount-jellyfin-meilisearch.yaml | 4 +- .../ServiceMonitor-jellyfin-meilisearch.yaml | 4 +- .../StatefulSet-jellyfin-meilisearch.yaml | 12 +- ...gMap-karakeep-meilisearch-environment.yaml | 4 +- ...stentVolumeClaim-karakeep-meilisearch.yaml | 4 +- ...-karakeep-meilisearch-test-connection.yaml | 4 +- .../Service-karakeep-meilisearch.yaml | 4 +- .../ServiceAccount-karakeep-meilisearch.yaml | 4 +- .../ServiceMonitor-karakeep-meilisearch.yaml | 4 +- .../StatefulSet-karakeep-meilisearch.yaml | 12 +- ...er-kube-prometheus-stack-alertmanager.yaml | 8 +- ...rRole-kube-prometheus-stack-admission.yaml | 4 +- ...erRole-kube-prometheus-stack-operator.yaml | 4 +- ...-prometheus-stack-prometheus-crd-edit.yaml | 4 +- ...-prometheus-stack-prometheus-crd-view.yaml | 4 +- ...Role-kube-prometheus-stack-prometheus.yaml | 4 +- ...nding-kube-prometheus-stack-admission.yaml | 4 +- ...inding-kube-prometheus-stack-operator.yaml | 4 +- ...ding-kube-prometheus-stack-prometheus.yaml | 4 +- ...etheus-stack-prometheus-node-exporter.yaml | 4 +- ...oyment-kube-prometheus-stack-operator.yaml | 8 +- ...te-kube-prometheus-stack-alertmanager.yaml | 4 +- ...oute-kube-prometheus-stack-prometheus.yaml | 4 +- ...ube-prometheus-stack-admission-create.yaml | 8 +- ...kube-prometheus-stack-admission-patch.yaml | 10 +- ...ation-kube-prometheus-stack-admission.yaml | 4 +- ...heus-kube-prometheus-stack-prometheus.yaml | 4 +- ...e-prometheus-stack-alertmanager.rules.yaml | 8 +- ...ube-prometheus-stack-config-reloaders.yaml | 4 +- ...etheusRule-kube-prometheus-stack-etcd.yaml | 4 +- ...e-kube-prometheus-stack-general.rules.yaml | 4 +- ...rules.container-cpu-usage-seconds-tot.yaml | 4 +- ...tack-k8s.rules.container-memory-cache.yaml | 4 +- ...-stack-k8s.rules.container-memory-rss.yaml | 4 +- ...stack-k8s.rules.container-memory-swap.yaml | 4 +- ...rules.container-memory-working-set-by.yaml | 4 +- ...us-stack-k8s.rules.container-resource.yaml | 4 +- ...-prometheus-stack-k8s.rules.pod-owner.yaml | 4 +- ...ack-kube-apiserver-availability.rules.yaml | 4 +- ...s-stack-kube-apiserver-burnrate.rules.yaml | 4 +- ...-stack-kube-apiserver-histogram.rules.yaml | 4 +- ...-prometheus-stack-kube-apiserver-slos.yaml | 4 +- ...s-stack-kube-prometheus-general.rules.yaml | 4 +- ...-kube-prometheus-node-recording.rules.yaml | 4 +- ...e-prometheus-stack-kube-state-metrics.yaml | 4 +- ...e-kube-prometheus-stack-kubelet.rules.yaml | 4 +- ...kube-prometheus-stack-kubernetes-apps.yaml | 4 +- ...prometheus-stack-kubernetes-resources.yaml | 4 +- ...e-prometheus-stack-kubernetes-storage.yaml | 4 +- ...eus-stack-kubernetes-system-apiserver.yaml | 4 +- ...theus-stack-kubernetes-system-kubelet.yaml | 4 +- ...be-prometheus-stack-kubernetes-system.yaml | 4 +- ...-prometheus-stack-node-exporter.rules.yaml | 4 +- ...e-kube-prometheus-stack-node-exporter.yaml | 4 +- ...le-kube-prometheus-stack-node-network.yaml | 4 +- ...Rule-kube-prometheus-stack-node.rules.yaml | 4 +- ...-prometheus-stack-prometheus-operator.yaml | 4 +- ...Rule-kube-prometheus-stack-prometheus.yaml | 4 +- .../Role-kube-prometheus-stack-admission.yaml | 4 +- ...nding-kube-prometheus-stack-admission.yaml | 4 +- ...er-kube-prometheus-stack-alertmanager.yaml | 4 +- ...ce-kube-prometheus-stack-alertmanager.yaml | 4 +- ...Service-kube-prometheus-stack-coredns.yaml | 4 +- ...rvice-kube-prometheus-stack-kube-etcd.yaml | 4 +- ...ervice-kube-prometheus-stack-operator.yaml | 4 +- ...etheus-stack-prometheus-node-exporter.yaml | 2 +- ...vice-kube-prometheus-stack-prometheus.yaml | 4 +- ...count-kube-prometheus-stack-admission.yaml | 4 +- ...nt-kube-prometheus-stack-alertmanager.yaml | 4 +- ...ccount-kube-prometheus-stack-operator.yaml | 4 +- ...etheus-stack-prometheus-node-exporter.yaml | 2 +- ...ount-kube-prometheus-stack-prometheus.yaml | 4 +- ...or-kube-prometheus-stack-alertmanager.yaml | 4 +- ...nitor-kube-prometheus-stack-apiserver.yaml | 4 +- ...Monitor-kube-prometheus-stack-coredns.yaml | 4 +- ...nitor-kube-prometheus-stack-kube-etcd.yaml | 4 +- ...Monitor-kube-prometheus-stack-kubelet.yaml | 4 +- ...onitor-kube-prometheus-stack-operator.yaml | 4 +- ...etheus-stack-prometheus-node-exporter.yaml | 2 +- ...itor-kube-prometheus-stack-prometheus.yaml | 4 +- ...ation-kube-prometheus-stack-admission.yaml | 4 +- .../navidrome/Deployment-navidrome-main.yaml | 2 +- 116 files changed, 1485 insertions(+), 276 deletions(-) create mode 100644 clusters/cl01tl/manifests/elastic-operator/CustomResourceDefinition-autoopsagentpolicies.autoops.k8s.elastic.co.yaml create mode 100644 clusters/cl01tl/manifests/elastic-operator/CustomResourceDefinition-packageregistries.packageregistry.k8s.elastic.co.yaml diff --git a/clusters/cl01tl/manifests/elastic-operator/ClusterRole-elastic-operator-edit.yaml b/clusters/cl01tl/manifests/elastic-operator/ClusterRole-elastic-operator-edit.yaml index 172dd604e..4bf15de73 100644 --- a/clusters/cl01tl/manifests/elastic-operator/ClusterRole-elastic-operator-edit.yaml +++ b/clusters/cl01tl/manifests/elastic-operator/ClusterRole-elastic-operator-edit.yaml @@ -7,8 +7,8 @@ metadata: rbac.authorization.k8s.io/aggregate-to-admin: "true" app.kubernetes.io/name: elastic-operator app.kubernetes.io/instance: elastic-operator - app.kubernetes.io/version: "3.2.0" - helm.sh/chart: eck-operator-3.2.0 + app.kubernetes.io/version: "3.3.0" + helm.sh/chart: eck-operator-3.3.0 app.kubernetes.io/managed-by: Helm rules: - apiGroups: ["elasticsearch.k8s.elastic.co"] @@ -41,3 +41,9 @@ rules: - apiGroups: ["logstash.k8s.elastic.co"] resources: ["logstashes"] verbs: ["create", "delete", "deletecollection", "patch", "update"] + - apiGroups: ["autoops.k8s.elastic.co"] + resources: ["autoopsagentpolicies"] + verbs: ["create", "delete", "deletecollection", "patch", "update"] + - apiGroups: ["packageregistry.k8s.elastic.co"] + resources: ["packageregistries"] + verbs: ["create", "delete", "deletecollection", "patch", "update"] diff --git a/clusters/cl01tl/manifests/elastic-operator/ClusterRole-elastic-operator-view.yaml b/clusters/cl01tl/manifests/elastic-operator/ClusterRole-elastic-operator-view.yaml index b91ae4cec..2c34d421a 100644 --- a/clusters/cl01tl/manifests/elastic-operator/ClusterRole-elastic-operator-view.yaml +++ b/clusters/cl01tl/manifests/elastic-operator/ClusterRole-elastic-operator-view.yaml @@ -8,8 +8,8 @@ metadata: rbac.authorization.k8s.io/aggregate-to-admin: "true" app.kubernetes.io/name: elastic-operator app.kubernetes.io/instance: elastic-operator - app.kubernetes.io/version: "3.2.0" - helm.sh/chart: eck-operator-3.2.0 + app.kubernetes.io/version: "3.3.0" + helm.sh/chart: eck-operator-3.3.0 app.kubernetes.io/managed-by: Helm rules: - apiGroups: ["elasticsearch.k8s.elastic.co"] @@ -42,3 +42,9 @@ rules: - apiGroups: ["logstash.k8s.elastic.co"] resources: ["logstashes"] verbs: ["get", "list", "watch"] + - apiGroups: ["autoops.k8s.elastic.co"] + resources: ["autoopsagentpolicies"] + verbs: ["get", "list", "watch"] + - apiGroups: ["packageregistry.k8s.elastic.co"] + resources: ["packageregistries"] + verbs: ["get", "list", "watch"] diff --git a/clusters/cl01tl/manifests/elastic-operator/ClusterRole-elastic-operator.yaml b/clusters/cl01tl/manifests/elastic-operator/ClusterRole-elastic-operator.yaml index e7d7d692b..d584444e9 100644 --- a/clusters/cl01tl/manifests/elastic-operator/ClusterRole-elastic-operator.yaml +++ b/clusters/cl01tl/manifests/elastic-operator/ClusterRole-elastic-operator.yaml @@ -5,8 +5,8 @@ metadata: labels: app.kubernetes.io/name: elastic-operator app.kubernetes.io/instance: elastic-operator - app.kubernetes.io/version: "3.2.0" - helm.sh/chart: eck-operator-3.2.0 + app.kubernetes.io/version: "3.3.0" + helm.sh/chart: eck-operator-3.3.0 app.kubernetes.io/managed-by: Helm rules: - apiGroups: @@ -212,6 +212,34 @@ rules: - create - update - patch + - apiGroups: + - autoops.k8s.elastic.co + resources: + - autoopsagentpolicies + - autoopsagentpolicies/status + - autoopsagentpolicies/finalizers + verbs: + - get + - list + - watch + - create + - update + - patch + - apiGroups: + - packageregistry.k8s.elastic.co + resources: + - packageregistries + - packageregistries/status + - packageregistries/finalizers + verbs: + - get + - list + - watch + - create + - update + - patch + - delete + - deletecollection - apiGroups: - storage.k8s.io resources: diff --git a/clusters/cl01tl/manifests/elastic-operator/ClusterRoleBinding-elastic-operator.yaml b/clusters/cl01tl/manifests/elastic-operator/ClusterRoleBinding-elastic-operator.yaml index 5717162b2..4ec07aa54 100644 --- a/clusters/cl01tl/manifests/elastic-operator/ClusterRoleBinding-elastic-operator.yaml +++ b/clusters/cl01tl/manifests/elastic-operator/ClusterRoleBinding-elastic-operator.yaml @@ -5,8 +5,8 @@ metadata: labels: app.kubernetes.io/name: elastic-operator app.kubernetes.io/instance: elastic-operator - app.kubernetes.io/version: "3.2.0" - helm.sh/chart: eck-operator-3.2.0 + app.kubernetes.io/version: "3.3.0" + helm.sh/chart: eck-operator-3.3.0 app.kubernetes.io/managed-by: Helm roleRef: apiGroup: rbac.authorization.k8s.io diff --git a/clusters/cl01tl/manifests/elastic-operator/ConfigMap-elastic-operator.yaml b/clusters/cl01tl/manifests/elastic-operator/ConfigMap-elastic-operator.yaml index 5f039fdbd..8ad11cf2a 100644 --- a/clusters/cl01tl/manifests/elastic-operator/ConfigMap-elastic-operator.yaml +++ b/clusters/cl01tl/manifests/elastic-operator/ConfigMap-elastic-operator.yaml @@ -6,8 +6,8 @@ metadata: labels: app.kubernetes.io/name: elastic-operator app.kubernetes.io/instance: elastic-operator - app.kubernetes.io/version: "3.2.0" - helm.sh/chart: eck-operator-3.2.0 + app.kubernetes.io/version: "3.3.0" + helm.sh/chart: eck-operator-3.3.0 app.kubernetes.io/managed-by: Helm data: eck.yaml: |- diff --git a/clusters/cl01tl/manifests/elastic-operator/CustomResourceDefinition-agents.agent.k8s.elastic.co.yaml b/clusters/cl01tl/manifests/elastic-operator/CustomResourceDefinition-agents.agent.k8s.elastic.co.yaml index f3219a241..70ce99839 100644 --- a/clusters/cl01tl/manifests/elastic-operator/CustomResourceDefinition-agents.agent.k8s.elastic.co.yaml +++ b/clusters/cl01tl/manifests/elastic-operator/CustomResourceDefinition-agents.agent.k8s.elastic.co.yaml @@ -2,14 +2,14 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.19.0 + controller-gen.kubebuilder.io/version: v0.20.0 helm.sh/resource-policy: keep labels: app.kubernetes.io/instance: 'elastic-operator' app.kubernetes.io/managed-by: 'Helm' app.kubernetes.io/name: 'eck-operator-crds' - app.kubernetes.io/version: '3.2.0' - helm.sh/chart: 'eck-operator-crds-3.2.0' + app.kubernetes.io/version: '3.3.0' + helm.sh/chart: 'eck-operator-crds-3.3.0' name: agents.agent.k8s.elastic.co spec: group: agent.k8s.elastic.co @@ -662,7 +662,7 @@ spec: description: SelfSignedCertificate allows configuring the self-signed certificate generated by the operator. properties: disabled: - description: Disabled indicates that the provisioning of the self-signed certifcate should be disabled. + description: Disabled indicates that the provisioning of the self-signed certificate should be disabled. type: boolean subjectAltNames: description: SubjectAlternativeNames is a list of SANs to include in the generated HTTP TLS certificate. @@ -939,7 +939,7 @@ spec: resources: description: |- resources represents the minimum resources the volume should have. - If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + Users are allowed to specify resource requirements that are lower than previous value but must still be higher than capacity recorded in the status field of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources diff --git a/clusters/cl01tl/manifests/elastic-operator/CustomResourceDefinition-apmservers.apm.k8s.elastic.co.yaml b/clusters/cl01tl/manifests/elastic-operator/CustomResourceDefinition-apmservers.apm.k8s.elastic.co.yaml index 6f59d3378..a73709f59 100644 --- a/clusters/cl01tl/manifests/elastic-operator/CustomResourceDefinition-apmservers.apm.k8s.elastic.co.yaml +++ b/clusters/cl01tl/manifests/elastic-operator/CustomResourceDefinition-apmservers.apm.k8s.elastic.co.yaml @@ -2,14 +2,14 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.19.0 + controller-gen.kubebuilder.io/version: v0.20.0 helm.sh/resource-policy: keep labels: app.kubernetes.io/instance: 'elastic-operator' app.kubernetes.io/managed-by: 'Helm' app.kubernetes.io/name: 'eck-operator-crds' - app.kubernetes.io/version: '3.2.0' - helm.sh/chart: 'eck-operator-crds-3.2.0' + app.kubernetes.io/version: '3.3.0' + helm.sh/chart: 'eck-operator-crds-3.3.0' name: apmservers.apm.k8s.elastic.co spec: group: apm.k8s.elastic.co @@ -488,7 +488,7 @@ spec: description: SelfSignedCertificate allows configuring the self-signed certificate generated by the operator. properties: disabled: - description: Disabled indicates that the provisioning of the self-signed certifcate should be disabled. + description: Disabled indicates that the provisioning of the self-signed certificate should be disabled. type: boolean subjectAltNames: description: SubjectAlternativeNames is a list of SANs to include in the generated HTTP TLS certificate. diff --git a/clusters/cl01tl/manifests/elastic-operator/CustomResourceDefinition-autoopsagentpolicies.autoops.k8s.elastic.co.yaml b/clusters/cl01tl/manifests/elastic-operator/CustomResourceDefinition-autoopsagentpolicies.autoops.k8s.elastic.co.yaml new file mode 100644 index 000000000..8def29b61 --- /dev/null +++ b/clusters/cl01tl/manifests/elastic-operator/CustomResourceDefinition-autoopsagentpolicies.autoops.k8s.elastic.co.yaml @@ -0,0 +1,173 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.20.0 + helm.sh/resource-policy: keep + labels: + app.kubernetes.io/instance: 'elastic-operator' + app.kubernetes.io/managed-by: 'Helm' + app.kubernetes.io/name: 'eck-operator-crds' + app.kubernetes.io/version: '3.3.0' + helm.sh/chart: 'eck-operator-crds-3.3.0' + name: autoopsagentpolicies.autoops.k8s.elastic.co +spec: + group: autoops.k8s.elastic.co + names: + categories: + - elastic + kind: AutoOpsAgentPolicy + listKind: AutoOpsAgentPolicyList + plural: autoopsagentpolicies + shortNames: + - aop + singular: autoopsagentpolicy + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: Ready resources + jsonPath: .status.ready + name: Ready + type: string + - jsonPath: .status.phase + name: Phase + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: AutoOpsAgentPolicy represents an Elastic AutoOps Policy resource in a Kubernetes cluster. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + properties: + autoOpsRef: + description: AutoOpsRef defines a reference to a secret containing connection details for AutoOps via Cloud Connect. + properties: + secretName: + description: |- + SecretName references a Secret containing connection details for external AutoOps. + Required when connecting via Cloud Connect. The secret must contain: + - `cloud-connected-mode-api-key`: Cloud Connected Mode API key + - `autoops-otel-url`: AutoOps OpenTelemetry endpoint URL + - `autoops-token`: AutoOps authentication token + - `cloud-connected-mode-api-url`: (optional) Cloud Connected Mode API URL + This field cannot be used in combination with `name`. + type: string + type: object + image: + description: Image is the AutoOps Agent Docker image to deploy. + type: string + podTemplate: + description: PodTemplate provides customisation options (labels, annotations, affinity rules, resource requests, and so on) for the Agent pods + type: object + x-kubernetes-preserve-unknown-fields: true + resourceSelector: + description: |- + ResourceSelector is a label selector for the resources to be configured. + Any Elasticsearch instances that match the selector will be configured to send data to AutoOps. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + revisionHistoryLimit: + description: RevisionHistoryLimit is the number of revisions to retain to allow rollback in the underlying Deployment. + format: int32 + type: integer + serviceAccountName: + description: |- + ServiceAccountName is used to check access to Elasticsearch resources in different namespaces. + Can only be used if ECK is enforcing RBAC on references (--enforce-rbac-on-refs flag). + The service account must have "get" permission on elasticsearch.k8s.elastic.co/elasticsearches + in the target namespaces. + type: string + version: + description: Version of the AutoOpsAgentPolicy. + type: string + required: + - version + type: object + status: + properties: + errors: + description: Errors is the number of resources that are in an error state. + type: integer + observedGeneration: + description: ObservedGeneration is the most recent generation observed for this AutoOpsAgentPolicy. + format: int64 + type: integer + phase: + description: Phase is the phase of the AutoOpsAgentPolicy. + type: string + ready: + description: Ready is the number of resources that are in a ready state. + type: integer + resources: + description: Resources is the number of resources that match the ResourceSelector. + type: integer + required: + - errors + - ready + - resources + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/clusters/cl01tl/manifests/elastic-operator/CustomResourceDefinition-beats.beat.k8s.elastic.co.yaml b/clusters/cl01tl/manifests/elastic-operator/CustomResourceDefinition-beats.beat.k8s.elastic.co.yaml index 536cea0fe..d8c82040c 100644 --- a/clusters/cl01tl/manifests/elastic-operator/CustomResourceDefinition-beats.beat.k8s.elastic.co.yaml +++ b/clusters/cl01tl/manifests/elastic-operator/CustomResourceDefinition-beats.beat.k8s.elastic.co.yaml @@ -2,14 +2,14 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.19.0 + controller-gen.kubebuilder.io/version: v0.20.0 helm.sh/resource-policy: keep labels: app.kubernetes.io/instance: 'elastic-operator' app.kubernetes.io/managed-by: 'Helm' app.kubernetes.io/name: 'eck-operator-crds' - app.kubernetes.io/version: '3.2.0' - helm.sh/chart: 'eck-operator-crds-3.2.0' + app.kubernetes.io/version: '3.3.0' + helm.sh/chart: 'eck-operator-crds-3.3.0' name: beats.beat.k8s.elastic.co spec: group: beat.k8s.elastic.co diff --git a/clusters/cl01tl/manifests/elastic-operator/CustomResourceDefinition-elasticmapsservers.maps.k8s.elastic.co.yaml b/clusters/cl01tl/manifests/elastic-operator/CustomResourceDefinition-elasticmapsservers.maps.k8s.elastic.co.yaml index db9c67143..37d66355f 100644 --- a/clusters/cl01tl/manifests/elastic-operator/CustomResourceDefinition-elasticmapsservers.maps.k8s.elastic.co.yaml +++ b/clusters/cl01tl/manifests/elastic-operator/CustomResourceDefinition-elasticmapsservers.maps.k8s.elastic.co.yaml @@ -2,14 +2,14 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.19.0 + controller-gen.kubebuilder.io/version: v0.20.0 helm.sh/resource-policy: keep labels: app.kubernetes.io/instance: 'elastic-operator' app.kubernetes.io/managed-by: 'Helm' app.kubernetes.io/name: 'eck-operator-crds' - app.kubernetes.io/version: '3.2.0' - helm.sh/chart: 'eck-operator-crds-3.2.0' + app.kubernetes.io/version: '3.3.0' + helm.sh/chart: 'eck-operator-crds-3.3.0' name: elasticmapsservers.maps.k8s.elastic.co spec: group: maps.k8s.elastic.co @@ -497,7 +497,7 @@ spec: description: SelfSignedCertificate allows configuring the self-signed certificate generated by the operator. properties: disabled: - description: Disabled indicates that the provisioning of the self-signed certifcate should be disabled. + description: Disabled indicates that the provisioning of the self-signed certificate should be disabled. type: boolean subjectAltNames: description: SubjectAlternativeNames is a list of SANs to include in the generated HTTP TLS certificate. diff --git a/clusters/cl01tl/manifests/elastic-operator/CustomResourceDefinition-elasticsearchautoscalers.autoscaling.k8s.elastic.co.yaml b/clusters/cl01tl/manifests/elastic-operator/CustomResourceDefinition-elasticsearchautoscalers.autoscaling.k8s.elastic.co.yaml index ea849f995..216b7b5d8 100644 --- a/clusters/cl01tl/manifests/elastic-operator/CustomResourceDefinition-elasticsearchautoscalers.autoscaling.k8s.elastic.co.yaml +++ b/clusters/cl01tl/manifests/elastic-operator/CustomResourceDefinition-elasticsearchautoscalers.autoscaling.k8s.elastic.co.yaml @@ -2,14 +2,14 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.19.0 + controller-gen.kubebuilder.io/version: v0.20.0 helm.sh/resource-policy: keep labels: app.kubernetes.io/instance: 'elastic-operator' app.kubernetes.io/managed-by: 'Helm' app.kubernetes.io/name: 'eck-operator-crds' - app.kubernetes.io/version: '3.2.0' - helm.sh/chart: 'eck-operator-crds-3.2.0' + app.kubernetes.io/version: '3.3.0' + helm.sh/chart: 'eck-operator-crds-3.3.0' name: elasticsearchautoscalers.autoscaling.k8s.elastic.co spec: group: autoscaling.k8s.elastic.co diff --git a/clusters/cl01tl/manifests/elastic-operator/CustomResourceDefinition-elasticsearches.elasticsearch.k8s.elastic.co.yaml b/clusters/cl01tl/manifests/elastic-operator/CustomResourceDefinition-elasticsearches.elasticsearch.k8s.elastic.co.yaml index 910aada43..3c5f27762 100644 --- a/clusters/cl01tl/manifests/elastic-operator/CustomResourceDefinition-elasticsearches.elasticsearch.k8s.elastic.co.yaml +++ b/clusters/cl01tl/manifests/elastic-operator/CustomResourceDefinition-elasticsearches.elasticsearch.k8s.elastic.co.yaml @@ -2,14 +2,14 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.19.0 + controller-gen.kubebuilder.io/version: v0.20.0 helm.sh/resource-policy: keep labels: app.kubernetes.io/instance: 'elastic-operator' app.kubernetes.io/managed-by: 'Helm' app.kubernetes.io/name: 'eck-operator-crds' - app.kubernetes.io/version: '3.2.0' - helm.sh/chart: 'eck-operator-crds-3.2.0' + app.kubernetes.io/version: '3.3.0' + helm.sh/chart: 'eck-operator-crds-3.3.0' name: elasticsearches.elasticsearch.k8s.elastic.co spec: group: elasticsearch.k8s.elastic.co @@ -482,7 +482,7 @@ spec: description: SelfSignedCertificate allows configuring the self-signed certificate generated by the operator. properties: disabled: - description: Disabled indicates that the provisioning of the self-signed certifcate should be disabled. + description: Disabled indicates that the provisioning of the self-signed certificate should be disabled. type: boolean subjectAltNames: description: SubjectAlternativeNames is a list of SANs to include in the generated HTTP TLS certificate. @@ -750,7 +750,7 @@ spec: resources: description: |- resources represents the minimum resources the volume should have. - If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + Users are allowed to specify resource requirements that are lower than previous value but must still be higher than capacity recorded in the status field of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources @@ -864,7 +864,7 @@ spec: The behavior depends on the license level. With a Basic license or if podDisruptionBudget.spec is not empty: The default budget doesn't allow any Pod to be removed in case the cluster is not green or if there is only one node of type `data` or `master`. - In all other cases the default podDisruptionBudget sets `minUnavailable` equal to the total number of nodes minus 1. + In all other cases the default podDisruptionBudget sets `minAvailable` equal to the total number of nodes minus 1. With an Enterprise license and if podDisruptionBudget.spec is empty: The default budget is split into multiple budgets, each targeting a specific node role type allowing additional disruptions for certain roles according to the health status of the cluster. @@ -1000,6 +1000,371 @@ spec: properties: enabled: type: boolean + service: + description: Service defines the template for the remote cluster server Service object. + properties: + metadata: + description: |- + ObjectMeta is the metadata of the service. + The name and namespace provided here are managed by ECK and will be ignored. + properties: + annotations: + additionalProperties: + type: string + type: object + finalizers: + items: + type: string + type: array + labels: + additionalProperties: + type: string + type: object + name: + type: string + namespace: + type: string + type: object + spec: + description: Spec is the specification of the service. + properties: + allocateLoadBalancerNodePorts: + description: |- + allocateLoadBalancerNodePorts defines if NodePorts will be automatically + allocated for services with type LoadBalancer. Default is "true". It + may be set to "false" if the cluster load-balancer does not rely on + NodePorts. If the caller requests specific NodePorts (by specifying a + value), those requests will be respected, regardless of this field. + This field may only be set for services with type LoadBalancer and will + be cleared if the type is changed to any other type. + type: boolean + clusterIP: + description: |- + clusterIP is the IP address of the service and is usually assigned + randomly. If an address is specified manually, is in-range (as per + system configuration), and is not in use, it will be allocated to the + service; otherwise creation of the service will fail. This field may not + be changed through updates unless the type field is also being changed + to ExternalName (which requires this field to be blank) or the type + field is being changed from ExternalName (in which case this field may + optionally be specified, as describe above). Valid values are "None", + empty string (""), or a valid IP address. Setting this to "None" makes a + "headless service" (no virtual IP), which is useful when direct endpoint + connections are preferred and proxying is not required. Only applies to + types ClusterIP, NodePort, and LoadBalancer. If this field is specified + when creating a Service of type ExternalName, creation will fail. This + field will be wiped when updating a Service to type ExternalName. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + type: string + clusterIPs: + description: |- + ClusterIPs is a list of IP addresses assigned to this service, and are + usually assigned randomly. If an address is specified manually, is + in-range (as per system configuration), and is not in use, it will be + allocated to the service; otherwise creation of the service will fail. + This field may not be changed through updates unless the type field is + also being changed to ExternalName (which requires this field to be + empty) or the type field is being changed from ExternalName (in which + case this field may optionally be specified, as describe above). Valid + values are "None", empty string (""), or a valid IP address. Setting + this to "None" makes a "headless service" (no virtual IP), which is + useful when direct endpoint connections are preferred and proxying is + not required. Only applies to types ClusterIP, NodePort, and + LoadBalancer. If this field is specified when creating a Service of type + ExternalName, creation will fail. This field will be wiped when updating + a Service to type ExternalName. If this field is not specified, it will + be initialized from the clusterIP field. If this field is specified, + clients must ensure that clusterIPs[0] and clusterIP have the same + value. + + This field may hold a maximum of two entries (dual-stack IPs, in either order). + These IPs must correspond to the values of the ipFamilies field. Both + clusterIPs and ipFamilies are governed by the ipFamilyPolicy field. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + items: + type: string + type: array + x-kubernetes-list-type: atomic + externalIPs: + description: |- + externalIPs is a list of IP addresses for which nodes in the cluster + will also accept traffic for this service. These IPs are not managed by + Kubernetes. The user is responsible for ensuring that traffic arrives + at a node with this IP. A common example is external load-balancers + that are not part of the Kubernetes system. + items: + type: string + type: array + x-kubernetes-list-type: atomic + externalName: + description: |- + externalName is the external reference that discovery mechanisms will + return as an alias for this service (e.g. a DNS CNAME record). No + proxying will be involved. Must be a lowercase RFC-1123 hostname + (https://tools.ietf.org/html/rfc1123) and requires `type` to be "ExternalName". + type: string + externalTrafficPolicy: + description: |- + externalTrafficPolicy describes how nodes distribute service traffic they + receive on one of the Service's "externally-facing" addresses (NodePorts, + ExternalIPs, and LoadBalancer IPs). If set to "Local", the proxy will configure + the service in a way that assumes that external load balancers will take care + of balancing the service traffic between nodes, and so each node will deliver + traffic only to the node-local endpoints of the service, without masquerading + the client source IP. (Traffic mistakenly sent to a node with no endpoints will + be dropped.) The default value, "Cluster", uses the standard behavior of + routing to all endpoints evenly (possibly modified by topology and other + features). Note that traffic sent to an External IP or LoadBalancer IP from + within the cluster will always get "Cluster" semantics, but clients sending to + a NodePort from within the cluster may need to take traffic policy into account + when picking a node. + type: string + healthCheckNodePort: + description: |- + healthCheckNodePort specifies the healthcheck nodePort for the service. + This only applies when type is set to LoadBalancer and + externalTrafficPolicy is set to Local. If a value is specified, is + in-range, and is not in use, it will be used. If not specified, a value + will be automatically allocated. External systems (e.g. load-balancers) + can use this port to determine if a given node holds endpoints for this + service or not. If this field is specified when creating a Service + which does not need it, creation will fail. This field will be wiped + when updating a Service to no longer need it (e.g. changing type). + This field cannot be updated once set. + format: int32 + type: integer + internalTrafficPolicy: + description: |- + InternalTrafficPolicy describes how nodes distribute service traffic they + receive on the ClusterIP. If set to "Local", the proxy will assume that pods + only want to talk to endpoints of the service on the same node as the pod, + dropping the traffic if there are no local endpoints. The default value, + "Cluster", uses the standard behavior of routing to all endpoints evenly + (possibly modified by topology and other features). + type: string + ipFamilies: + description: |- + IPFamilies is a list of IP families (e.g. IPv4, IPv6) assigned to this + service. This field is usually assigned automatically based on cluster + configuration and the ipFamilyPolicy field. If this field is specified + manually, the requested family is available in the cluster, + and ipFamilyPolicy allows it, it will be used; otherwise creation of + the service will fail. This field is conditionally mutable: it allows + for adding or removing a secondary IP family, but it does not allow + changing the primary IP family of the Service. Valid values are "IPv4" + and "IPv6". This field only applies to Services of types ClusterIP, + NodePort, and LoadBalancer, and does apply to "headless" services. + This field will be wiped when updating a Service to type ExternalName. + + This field may hold a maximum of two entries (dual-stack families, in + either order). These families must correspond to the values of the + clusterIPs field, if specified. Both clusterIPs and ipFamilies are + governed by the ipFamilyPolicy field. + items: + description: |- + IPFamily represents the IP Family (IPv4 or IPv6). This type is used + to express the family of an IP expressed by a type (e.g. service.spec.ipFamilies). + type: string + type: array + x-kubernetes-list-type: atomic + ipFamilyPolicy: + description: |- + IPFamilyPolicy represents the dual-stack-ness requested or required by + this Service. If there is no value provided, then this field will be set + to SingleStack. Services can be "SingleStack" (a single IP family), + "PreferDualStack" (two IP families on dual-stack configured clusters or + a single IP family on single-stack clusters), or "RequireDualStack" + (two IP families on dual-stack configured clusters, otherwise fail). The + ipFamilies and clusterIPs fields depend on the value of this field. This + field will be wiped when updating a service to type ExternalName. + type: string + loadBalancerClass: + description: |- + loadBalancerClass is the class of the load balancer implementation this Service belongs to. + If specified, the value of this field must be a label-style identifier, with an optional prefix, + e.g. "internal-vip" or "example.com/internal-vip". Unprefixed names are reserved for end-users. + This field can only be set when the Service type is 'LoadBalancer'. If not set, the default load + balancer implementation is used, today this is typically done through the cloud provider integration, + but should apply for any default implementation. If set, it is assumed that a load balancer + implementation is watching for Services with a matching class. Any default load balancer + implementation (e.g. cloud providers) should ignore Services that set this field. + This field can only be set when creating or updating a Service to type 'LoadBalancer'. + Once set, it can not be changed. This field will be wiped when a service is updated to a non 'LoadBalancer' type. + type: string + loadBalancerIP: + description: |- + Only applies to Service Type: LoadBalancer. + This feature depends on whether the underlying cloud-provider supports specifying + the loadBalancerIP when a load balancer is created. + This field will be ignored if the cloud-provider does not support the feature. + Deprecated: This field was under-specified and its meaning varies across implementations. + Using it is non-portable and it may not support dual-stack. + Users are encouraged to use implementation-specific annotations when available. + type: string + loadBalancerSourceRanges: + description: |- + If specified and supported by the platform, this will restrict traffic through the cloud-provider + load-balancer will be restricted to the specified client IPs. This field will be ignored if the + cloud-provider does not support the feature." + More info: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/ + items: + type: string + type: array + x-kubernetes-list-type: atomic + ports: + description: |- + The list of ports that are exposed by this service. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + items: + description: ServicePort contains information on service's port. + properties: + appProtocol: + description: |- + The application protocol for this port. + This is used as a hint for implementations to offer richer behavior for protocols that they understand. + This field follows standard Kubernetes label syntax. + Valid values are either: + + * Un-prefixed protocol names - reserved for IANA standard service names (as per + RFC-6335 and https://www.iana.org/assignments/service-names). + + * Kubernetes-defined prefixed names: + * 'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior- + * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455 + * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455 + + * Other protocols should use implementation-defined prefixed names such as + mycompany.com/my-custom-protocol. + type: string + name: + description: |- + The name of this port within the service. This must be a DNS_LABEL. + All ports within a ServiceSpec must have unique names. When considering + the endpoints for a Service, this must match the 'name' field in the + EndpointPort. + Optional if only one ServicePort is defined on this service. + type: string + nodePort: + description: |- + The port on each node on which this service is exposed when type is + NodePort or LoadBalancer. Usually assigned by the system. If a value is + specified, in-range, and not in use it will be used, otherwise the + operation will fail. If not specified, a port will be allocated if this + Service requires one. If this field is specified when creating a + Service which does not need it, creation will fail. This field will be + wiped when updating a Service to no longer need it (e.g. changing type + from NodePort to ClusterIP). + More info: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + format: int32 + type: integer + port: + description: The port that will be exposed by this service. + format: int32 + type: integer + protocol: + default: TCP + description: |- + The IP protocol for this port. Supports "TCP", "UDP", and "SCTP". + Default is TCP. + type: string + targetPort: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the pods targeted by the service. + Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + If this is a string, it will be looked up as a named port in the + target Pod's container ports. If this is not specified, the value + of the 'port' field is used (an identity map). + This field is ignored for services with clusterIP=None, and should be + omitted or set equal to the 'port' field. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#defining-a-service + x-kubernetes-int-or-string: true + required: + - port + type: object + type: array + x-kubernetes-list-map-keys: + - port + - protocol + x-kubernetes-list-type: map + publishNotReadyAddresses: + description: |- + publishNotReadyAddresses indicates that any agent which deals with endpoints for this + Service should disregard any indications of ready/not-ready. + The primary use case for setting this field is for a StatefulSet's Headless Service to + propagate SRV DNS records for its Pods for the purpose of peer discovery. + The Kubernetes controllers that generate Endpoints and EndpointSlice resources for + Services interpret this to mean that all endpoints are considered "ready" even if the + Pods themselves are not. Agents which consume only Kubernetes generated endpoints + through the Endpoints or EndpointSlice resources can safely assume this behavior. + type: boolean + selector: + additionalProperties: + type: string + description: |- + Route service traffic to pods with label keys and values matching this + selector. If empty or not present, the service is assumed to have an + external process managing its endpoints, which Kubernetes will not + modify. Only applies to types ClusterIP, NodePort, and LoadBalancer. + Ignored if type is ExternalName. + More info: https://kubernetes.io/docs/concepts/services-networking/service/ + type: object + x-kubernetes-map-type: atomic + sessionAffinity: + description: |- + Supports "ClientIP" and "None". Used to maintain session affinity. + Enable client IP based session affinity. + Must be ClientIP or None. + Defaults to None. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + type: string + sessionAffinityConfig: + description: sessionAffinityConfig contains the configurations of session affinity. + properties: + clientIP: + description: clientIP contains the configurations of Client IP based session affinity. + properties: + timeoutSeconds: + description: |- + timeoutSeconds specifies the seconds of ClientIP type session sticky time. + The value must be >0 && <=86400(for 1 day) if ServiceAffinity == "ClientIP". + Default value is 10800(for 3 hours). + format: int32 + type: integer + type: object + type: object + trafficDistribution: + description: |- + TrafficDistribution offers a way to express preferences for how traffic + is distributed to Service endpoints. Implementations can use this field + as a hint, but are not required to guarantee strict adherence. If the + field is not set, the implementation will apply its default routing + strategy. If set to "PreferClose", implementations should prioritize + endpoints that are in the same zone. + type: string + type: + description: |- + type determines how the Service is exposed. Defaults to ClusterIP. Valid + options are ExternalName, ClusterIP, NodePort, and LoadBalancer. + "ClusterIP" allocates a cluster-internal IP address for load-balancing + to endpoints. Endpoints are determined by the selector or if that is not + specified, by manual construction of an Endpoints object or + EndpointSlice objects. If clusterIP is "None", no virtual IP is + allocated and the endpoints are published as a set of endpoints rather + than a virtual IP. + "NodePort" builds on ClusterIP and allocates a port on every node which + routes to the same endpoints as the clusterIP. + "LoadBalancer" builds on NodePort and creates an external load-balancer + (if supported in the current cloud) which routes to the same endpoints + as the clusterIP. + "ExternalName" aliases this service to the specified externalName. + Several other fields do not apply to ExternalName services. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types + type: string + type: object + type: object type: object remoteClusters: description: RemoteClusters enables you to establish uni-directional connections to a remote Elasticsearch cluster. @@ -2364,7 +2729,7 @@ spec: resources: description: |- resources represents the minimum resources the volume should have. - If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + Users are allowed to specify resource requirements that are lower than previous value but must still be higher than capacity recorded in the status field of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources diff --git a/clusters/cl01tl/manifests/elastic-operator/CustomResourceDefinition-enterprisesearches.enterprisesearch.k8s.elastic.co.yaml b/clusters/cl01tl/manifests/elastic-operator/CustomResourceDefinition-enterprisesearches.enterprisesearch.k8s.elastic.co.yaml index 429a94bb1..aba6cdfd5 100644 --- a/clusters/cl01tl/manifests/elastic-operator/CustomResourceDefinition-enterprisesearches.enterprisesearch.k8s.elastic.co.yaml +++ b/clusters/cl01tl/manifests/elastic-operator/CustomResourceDefinition-enterprisesearches.enterprisesearch.k8s.elastic.co.yaml @@ -2,14 +2,14 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.19.0 + controller-gen.kubebuilder.io/version: v0.20.0 helm.sh/resource-policy: keep labels: app.kubernetes.io/instance: 'elastic-operator' app.kubernetes.io/managed-by: 'Helm' app.kubernetes.io/name: 'eck-operator-crds' - app.kubernetes.io/version: '3.2.0' - helm.sh/chart: 'eck-operator-crds-3.2.0' + app.kubernetes.io/version: '3.3.0' + helm.sh/chart: 'eck-operator-crds-3.3.0' name: enterprisesearches.enterprisesearch.k8s.elastic.co spec: group: enterprisesearch.k8s.elastic.co @@ -497,7 +497,7 @@ spec: description: SelfSignedCertificate allows configuring the self-signed certificate generated by the operator. properties: disabled: - description: Disabled indicates that the provisioning of the self-signed certifcate should be disabled. + description: Disabled indicates that the provisioning of the self-signed certificate should be disabled. type: boolean subjectAltNames: description: SubjectAlternativeNames is a list of SANs to include in the generated HTTP TLS certificate. @@ -1056,7 +1056,7 @@ spec: description: SelfSignedCertificate allows configuring the self-signed certificate generated by the operator. properties: disabled: - description: Disabled indicates that the provisioning of the self-signed certifcate should be disabled. + description: Disabled indicates that the provisioning of the self-signed certificate should be disabled. type: boolean subjectAltNames: description: SubjectAlternativeNames is a list of SANs to include in the generated HTTP TLS certificate. diff --git a/clusters/cl01tl/manifests/elastic-operator/CustomResourceDefinition-kibanas.kibana.k8s.elastic.co.yaml b/clusters/cl01tl/manifests/elastic-operator/CustomResourceDefinition-kibanas.kibana.k8s.elastic.co.yaml index b28560033..79a4ade95 100644 --- a/clusters/cl01tl/manifests/elastic-operator/CustomResourceDefinition-kibanas.kibana.k8s.elastic.co.yaml +++ b/clusters/cl01tl/manifests/elastic-operator/CustomResourceDefinition-kibanas.kibana.k8s.elastic.co.yaml @@ -2,14 +2,14 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.19.0 + controller-gen.kubebuilder.io/version: v0.20.0 helm.sh/resource-policy: keep labels: app.kubernetes.io/instance: 'elastic-operator' app.kubernetes.io/managed-by: 'Helm' app.kubernetes.io/name: 'eck-operator-crds' - app.kubernetes.io/version: '3.2.0' - helm.sh/chart: 'eck-operator-crds-3.2.0' + app.kubernetes.io/version: '3.3.0' + helm.sh/chart: 'eck-operator-crds-3.3.0' name: kibanas.kibana.k8s.elastic.co spec: group: kibana.k8s.elastic.co @@ -518,7 +518,7 @@ spec: description: SelfSignedCertificate allows configuring the self-signed certificate generated by the operator. properties: disabled: - description: Disabled indicates that the provisioning of the self-signed certifcate should be disabled. + description: Disabled indicates that the provisioning of the self-signed certificate should be disabled. type: boolean subjectAltNames: description: SubjectAlternativeNames is a list of SANs to include in the generated HTTP TLS certificate. @@ -625,6 +625,34 @@ spec: type: array type: object type: object + packageRegistryRef: + description: PackageRegistryRef is a reference to an Elastic Package Registry running in the same Kubernetes cluster. + properties: + name: + description: Name of an existing Kubernetes object corresponding to an Elastic resource managed by ECK. + type: string + namespace: + description: Namespace of the Kubernetes object. If empty, defaults to the current namespace. + type: string + secretName: + description: |- + SecretName is the name of an existing Kubernetes secret that contains connection information for associating an + Elastic resource not managed by the operator. + The referenced secret must contain the following: + - `url`: the URL to reach the Elastic resource + - `username`: the username of the user to be authenticated to the Elastic resource + - `password`: the password of the user to be authenticated to the Elastic resource + - `ca.crt`: the CA certificate in PEM format (optional) + - `api-key`: the key to authenticate against the Elastic resource instead of a username and password (supported only for `elasticsearchRefs` in AgentSpec and in BeatSpec) + This field cannot be used in combination with the other fields name, namespace or serviceName. + type: string + serviceName: + description: |- + ServiceName is the name of an existing Kubernetes service which is used to make requests to the referenced + object. It has to be in the same namespace as the referenced resource. If left empty, the default HTTP service of + the referenced resource is used. + type: string + type: object podTemplate: description: PodTemplate provides customisation options (labels, annotations, affinity rules, resource requests, and so on) for the Kibana pods type: object @@ -715,6 +743,9 @@ spec: controller has not yet processed the changes contained in the Kibana specification. format: int64 type: integer + packageRegistryAssociationStatus: + description: PackageRegistryAssociationStatus is the status of any auto-linking to Elastic Package Registry. + type: string selector: description: Selector is the label selector used to find all pods. type: string diff --git a/clusters/cl01tl/manifests/elastic-operator/CustomResourceDefinition-logstashes.logstash.k8s.elastic.co.yaml b/clusters/cl01tl/manifests/elastic-operator/CustomResourceDefinition-logstashes.logstash.k8s.elastic.co.yaml index 953235598..38487a080 100644 --- a/clusters/cl01tl/manifests/elastic-operator/CustomResourceDefinition-logstashes.logstash.k8s.elastic.co.yaml +++ b/clusters/cl01tl/manifests/elastic-operator/CustomResourceDefinition-logstashes.logstash.k8s.elastic.co.yaml @@ -2,14 +2,14 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.19.0 + controller-gen.kubebuilder.io/version: v0.20.0 helm.sh/resource-policy: keep labels: app.kubernetes.io/instance: 'elastic-operator' app.kubernetes.io/managed-by: 'Helm' app.kubernetes.io/name: 'eck-operator-crds' - app.kubernetes.io/version: '3.2.0' - helm.sh/chart: 'eck-operator-crds-3.2.0' + app.kubernetes.io/version: '3.3.0' + helm.sh/chart: 'eck-operator-crds-3.3.0' name: logstashes.logstash.k8s.elastic.co spec: group: logstash.k8s.elastic.co @@ -671,7 +671,7 @@ spec: description: SelfSignedCertificate allows configuring the self-signed certificate generated by the operator. properties: disabled: - description: Disabled indicates that the provisioning of the self-signed certifcate should be disabled. + description: Disabled indicates that the provisioning of the self-signed certificate should be disabled. type: boolean subjectAltNames: description: SubjectAlternativeNames is a list of SANs to include in the generated HTTP TLS certificate. @@ -704,10 +704,10 @@ spec: The maximum number of pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). Absolute number is calculated from percentage by rounding up. This can not be 0. - Defaults to 1. This field is alpha-level and is only honored by servers that enable the - MaxUnavailableStatefulSet feature. The field applies to all pods in the range 0 to + Defaults to 1. This field is beta-level and is enabled by default. The field applies to all pods in the range 0 to Replicas-1. That means if there is any unavailable pod in the range 0 to Replicas-1, it will be counted towards MaxUnavailable. + This setting might not be effective for the OrderedReady podManagementPolicy. That policy ensures pods are created and become ready one at a time. x-kubernetes-int-or-string: true partition: description: |- @@ -864,7 +864,7 @@ spec: resources: description: |- resources represents the minimum resources the volume should have. - If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + Users are allowed to specify resource requirements that are lower than previous value but must still be higher than capacity recorded in the status field of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources @@ -986,7 +986,7 @@ spec: that it does not recognizes, then it should ignore that update and let other controllers handle it. type: string - description: "allocatedResourceStatuses stores status of resource being resized for the given PVC.\nKey names follow standard Kubernetes label syntax. Valid values are either:\n\t* Un-prefixed keys:\n\t\t- storage - the capacity of the volume.\n\t* Custom resources must use implementation-defined prefixed names such as \"example.com/my-custom-resource\"\nApart from above values - keys that are unprefixed or have kubernetes.io prefix are considered\nreserved and hence may not be used.\n\nClaimResourceStatus can be in any of following states:\n\t- ControllerResizeInProgress:\n\t\tState set when resize controller starts resizing the volume in control-plane.\n\t- ControllerResizeFailed:\n\t\tState set when resize has failed in resize controller with a terminal error.\n\t- NodeResizePending:\n\t\tState set when resize controller has finished resizing the volume but further resizing of\n\t\tvolume is needed on the node.\n\t- NodeResizeInProgress:\n\t\tState set when kubelet starts resizing the volume.\n\t- NodeResizeFailed:\n\t\tState set when resizing has failed in kubelet with a terminal error. Transient errors don't set\n\t\tNodeResizeFailed.\nFor example: if expanding a PVC for more capacity - this field can be one of the following states:\n\t- pvc.status.allocatedResourceStatus['storage'] = \"ControllerResizeInProgress\"\n - pvc.status.allocatedResourceStatus['storage'] = \"ControllerResizeFailed\"\n - pvc.status.allocatedResourceStatus['storage'] = \"NodeResizePending\"\n - pvc.status.allocatedResourceStatus['storage'] = \"NodeResizeInProgress\"\n - pvc.status.allocatedResourceStatus['storage'] = \"NodeResizeFailed\"\nWhen this field is not set, it means that no resize operation is in progress for the given PVC.\n\nA controller that receives PVC update with previously unknown resourceName or ClaimResourceStatus\nshould ignore the update for the purpose it was designed. For example - a controller that\nonly is responsible for resizing capacity of the volume, should ignore PVC updates that change other valid\nresources associated with PVC.\n\nThis is an alpha field and requires enabling RecoverVolumeExpansionFailure feature." + description: "allocatedResourceStatuses stores status of resource being resized for the given PVC.\nKey names follow standard Kubernetes label syntax. Valid values are either:\n\t* Un-prefixed keys:\n\t\t- storage - the capacity of the volume.\n\t* Custom resources must use implementation-defined prefixed names such as \"example.com/my-custom-resource\"\nApart from above values - keys that are unprefixed or have kubernetes.io prefix are considered\nreserved and hence may not be used.\n\nClaimResourceStatus can be in any of following states:\n\t- ControllerResizeInProgress:\n\t\tState set when resize controller starts resizing the volume in control-plane.\n\t- ControllerResizeFailed:\n\t\tState set when resize has failed in resize controller with a terminal error.\n\t- NodeResizePending:\n\t\tState set when resize controller has finished resizing the volume but further resizing of\n\t\tvolume is needed on the node.\n\t- NodeResizeInProgress:\n\t\tState set when kubelet starts resizing the volume.\n\t- NodeResizeFailed:\n\t\tState set when resizing has failed in kubelet with a terminal error. Transient errors don't set\n\t\tNodeResizeFailed.\nFor example: if expanding a PVC for more capacity - this field can be one of the following states:\n\t- pvc.status.allocatedResourceStatus['storage'] = \"ControllerResizeInProgress\"\n - pvc.status.allocatedResourceStatus['storage'] = \"ControllerResizeFailed\"\n - pvc.status.allocatedResourceStatus['storage'] = \"NodeResizePending\"\n - pvc.status.allocatedResourceStatus['storage'] = \"NodeResizeInProgress\"\n - pvc.status.allocatedResourceStatus['storage'] = \"NodeResizeFailed\"\nWhen this field is not set, it means that no resize operation is in progress for the given PVC.\n\nA controller that receives PVC update with previously unknown resourceName or ClaimResourceStatus\nshould ignore the update for the purpose it was designed. For example - a controller that\nonly is responsible for resizing capacity of the volume, should ignore PVC updates that change other valid\nresources associated with PVC." type: object x-kubernetes-map-type: granular allocatedResources: @@ -996,7 +996,7 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: "allocatedResources tracks the resources allocated to a PVC including its capacity.\nKey names follow standard Kubernetes label syntax. Valid values are either:\n\t* Un-prefixed keys:\n\t\t- storage - the capacity of the volume.\n\t* Custom resources must use implementation-defined prefixed names such as \"example.com/my-custom-resource\"\nApart from above values - keys that are unprefixed or have kubernetes.io prefix are considered\nreserved and hence may not be used.\n\nCapacity reported here may be larger than the actual capacity when a volume expansion operation\nis requested.\nFor storage quota, the larger value from allocatedResources and PVC.spec.resources is used.\nIf allocatedResources is not set, PVC.spec.resources alone is used for quota calculation.\nIf a volume expansion capacity request is lowered, allocatedResources is only\nlowered if there are no expansion operations in progress and if the actual volume capacity\nis equal or lower than the requested capacity.\n\nA controller that receives PVC update with previously unknown resourceName\nshould ignore the update for the purpose it was designed. For example - a controller that\nonly is responsible for resizing capacity of the volume, should ignore PVC updates that change other valid\nresources associated with PVC.\n\nThis is an alpha field and requires enabling RecoverVolumeExpansionFailure feature." + description: "allocatedResources tracks the resources allocated to a PVC including its capacity.\nKey names follow standard Kubernetes label syntax. Valid values are either:\n\t* Un-prefixed keys:\n\t\t- storage - the capacity of the volume.\n\t* Custom resources must use implementation-defined prefixed names such as \"example.com/my-custom-resource\"\nApart from above values - keys that are unprefixed or have kubernetes.io prefix are considered\nreserved and hence may not be used.\n\nCapacity reported here may be larger than the actual capacity when a volume expansion operation\nis requested.\nFor storage quota, the larger value from allocatedResources and PVC.spec.resources is used.\nIf allocatedResources is not set, PVC.spec.resources alone is used for quota calculation.\nIf a volume expansion capacity request is lowered, allocatedResources is only\nlowered if there are no expansion operations in progress and if the actual volume capacity\nis equal or lower than the requested capacity.\n\nA controller that receives PVC update with previously unknown resourceName\nshould ignore the update for the purpose it was designed. For example - a controller that\nonly is responsible for resizing capacity of the volume, should ignore PVC updates that change other valid\nresources associated with PVC." type: object capacity: additionalProperties: diff --git a/clusters/cl01tl/manifests/elastic-operator/CustomResourceDefinition-packageregistries.packageregistry.k8s.elastic.co.yaml b/clusters/cl01tl/manifests/elastic-operator/CustomResourceDefinition-packageregistries.packageregistry.k8s.elastic.co.yaml new file mode 100644 index 000000000..177cd9b86 --- /dev/null +++ b/clusters/cl01tl/manifests/elastic-operator/CustomResourceDefinition-packageregistries.packageregistry.k8s.elastic.co.yaml @@ -0,0 +1,546 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.20.0 + helm.sh/resource-policy: keep + labels: + app.kubernetes.io/instance: 'elastic-operator' + app.kubernetes.io/managed-by: 'Helm' + app.kubernetes.io/name: 'eck-operator-crds' + app.kubernetes.io/version: '3.3.0' + helm.sh/chart: 'eck-operator-crds-3.3.0' + name: packageregistries.packageregistry.k8s.elastic.co +spec: + group: packageregistry.k8s.elastic.co + names: + categories: + - elastic + kind: PackageRegistry + listKind: PackageRegistryList + plural: packageregistries + shortNames: + - epr + singular: packageregistry + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .status.health + name: health + type: string + - description: Available nodes + jsonPath: .status.availableNodes + name: nodes + type: integer + - description: PackageRegistry version + jsonPath: .status.version + name: version + type: string + - jsonPath: .metadata.creationTimestamp + name: age + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: PackageRegistry represents an Elastic Package Registry resource in a Kubernetes cluster. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: PackageRegistrySpec holds the specification of an Elastic Package Registry instance. + properties: + config: + description: 'Config holds the PackageRegistry configuration. See: https://github.com/elastic/package-registry/blob/main/config.reference.yml' + type: object + x-kubernetes-preserve-unknown-fields: true + configRef: + description: |- + ConfigRef contains a reference to an existing Kubernetes Secret holding the Elastic Package Registry configuration. + Configuration settings are merged and have precedence over settings specified in `config`. + properties: + secretName: + description: SecretName is the name of the secret. + type: string + type: object + count: + description: Count of Elastic Package Registry instances to deploy. + format: int32 + type: integer + http: + description: HTTP holds the HTTP layer configuration for Elastic Package Registry. + properties: + service: + description: Service defines the template for the associated Kubernetes Service object. + properties: + metadata: + description: |- + ObjectMeta is the metadata of the service. + The name and namespace provided here are managed by ECK and will be ignored. + properties: + annotations: + additionalProperties: + type: string + type: object + finalizers: + items: + type: string + type: array + labels: + additionalProperties: + type: string + type: object + name: + type: string + namespace: + type: string + type: object + spec: + description: Spec is the specification of the service. + properties: + allocateLoadBalancerNodePorts: + description: |- + allocateLoadBalancerNodePorts defines if NodePorts will be automatically + allocated for services with type LoadBalancer. Default is "true". It + may be set to "false" if the cluster load-balancer does not rely on + NodePorts. If the caller requests specific NodePorts (by specifying a + value), those requests will be respected, regardless of this field. + This field may only be set for services with type LoadBalancer and will + be cleared if the type is changed to any other type. + type: boolean + clusterIP: + description: |- + clusterIP is the IP address of the service and is usually assigned + randomly. If an address is specified manually, is in-range (as per + system configuration), and is not in use, it will be allocated to the + service; otherwise creation of the service will fail. This field may not + be changed through updates unless the type field is also being changed + to ExternalName (which requires this field to be blank) or the type + field is being changed from ExternalName (in which case this field may + optionally be specified, as describe above). Valid values are "None", + empty string (""), or a valid IP address. Setting this to "None" makes a + "headless service" (no virtual IP), which is useful when direct endpoint + connections are preferred and proxying is not required. Only applies to + types ClusterIP, NodePort, and LoadBalancer. If this field is specified + when creating a Service of type ExternalName, creation will fail. This + field will be wiped when updating a Service to type ExternalName. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + type: string + clusterIPs: + description: |- + ClusterIPs is a list of IP addresses assigned to this service, and are + usually assigned randomly. If an address is specified manually, is + in-range (as per system configuration), and is not in use, it will be + allocated to the service; otherwise creation of the service will fail. + This field may not be changed through updates unless the type field is + also being changed to ExternalName (which requires this field to be + empty) or the type field is being changed from ExternalName (in which + case this field may optionally be specified, as describe above). Valid + values are "None", empty string (""), or a valid IP address. Setting + this to "None" makes a "headless service" (no virtual IP), which is + useful when direct endpoint connections are preferred and proxying is + not required. Only applies to types ClusterIP, NodePort, and + LoadBalancer. If this field is specified when creating a Service of type + ExternalName, creation will fail. This field will be wiped when updating + a Service to type ExternalName. If this field is not specified, it will + be initialized from the clusterIP field. If this field is specified, + clients must ensure that clusterIPs[0] and clusterIP have the same + value. + + This field may hold a maximum of two entries (dual-stack IPs, in either order). + These IPs must correspond to the values of the ipFamilies field. Both + clusterIPs and ipFamilies are governed by the ipFamilyPolicy field. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + items: + type: string + type: array + x-kubernetes-list-type: atomic + externalIPs: + description: |- + externalIPs is a list of IP addresses for which nodes in the cluster + will also accept traffic for this service. These IPs are not managed by + Kubernetes. The user is responsible for ensuring that traffic arrives + at a node with this IP. A common example is external load-balancers + that are not part of the Kubernetes system. + items: + type: string + type: array + x-kubernetes-list-type: atomic + externalName: + description: |- + externalName is the external reference that discovery mechanisms will + return as an alias for this service (e.g. a DNS CNAME record). No + proxying will be involved. Must be a lowercase RFC-1123 hostname + (https://tools.ietf.org/html/rfc1123) and requires `type` to be "ExternalName". + type: string + externalTrafficPolicy: + description: |- + externalTrafficPolicy describes how nodes distribute service traffic they + receive on one of the Service's "externally-facing" addresses (NodePorts, + ExternalIPs, and LoadBalancer IPs). If set to "Local", the proxy will configure + the service in a way that assumes that external load balancers will take care + of balancing the service traffic between nodes, and so each node will deliver + traffic only to the node-local endpoints of the service, without masquerading + the client source IP. (Traffic mistakenly sent to a node with no endpoints will + be dropped.) The default value, "Cluster", uses the standard behavior of + routing to all endpoints evenly (possibly modified by topology and other + features). Note that traffic sent to an External IP or LoadBalancer IP from + within the cluster will always get "Cluster" semantics, but clients sending to + a NodePort from within the cluster may need to take traffic policy into account + when picking a node. + type: string + healthCheckNodePort: + description: |- + healthCheckNodePort specifies the healthcheck nodePort for the service. + This only applies when type is set to LoadBalancer and + externalTrafficPolicy is set to Local. If a value is specified, is + in-range, and is not in use, it will be used. If not specified, a value + will be automatically allocated. External systems (e.g. load-balancers) + can use this port to determine if a given node holds endpoints for this + service or not. If this field is specified when creating a Service + which does not need it, creation will fail. This field will be wiped + when updating a Service to no longer need it (e.g. changing type). + This field cannot be updated once set. + format: int32 + type: integer + internalTrafficPolicy: + description: |- + InternalTrafficPolicy describes how nodes distribute service traffic they + receive on the ClusterIP. If set to "Local", the proxy will assume that pods + only want to talk to endpoints of the service on the same node as the pod, + dropping the traffic if there are no local endpoints. The default value, + "Cluster", uses the standard behavior of routing to all endpoints evenly + (possibly modified by topology and other features). + type: string + ipFamilies: + description: |- + IPFamilies is a list of IP families (e.g. IPv4, IPv6) assigned to this + service. This field is usually assigned automatically based on cluster + configuration and the ipFamilyPolicy field. If this field is specified + manually, the requested family is available in the cluster, + and ipFamilyPolicy allows it, it will be used; otherwise creation of + the service will fail. This field is conditionally mutable: it allows + for adding or removing a secondary IP family, but it does not allow + changing the primary IP family of the Service. Valid values are "IPv4" + and "IPv6". This field only applies to Services of types ClusterIP, + NodePort, and LoadBalancer, and does apply to "headless" services. + This field will be wiped when updating a Service to type ExternalName. + + This field may hold a maximum of two entries (dual-stack families, in + either order). These families must correspond to the values of the + clusterIPs field, if specified. Both clusterIPs and ipFamilies are + governed by the ipFamilyPolicy field. + items: + description: |- + IPFamily represents the IP Family (IPv4 or IPv6). This type is used + to express the family of an IP expressed by a type (e.g. service.spec.ipFamilies). + type: string + type: array + x-kubernetes-list-type: atomic + ipFamilyPolicy: + description: |- + IPFamilyPolicy represents the dual-stack-ness requested or required by + this Service. If there is no value provided, then this field will be set + to SingleStack. Services can be "SingleStack" (a single IP family), + "PreferDualStack" (two IP families on dual-stack configured clusters or + a single IP family on single-stack clusters), or "RequireDualStack" + (two IP families on dual-stack configured clusters, otherwise fail). The + ipFamilies and clusterIPs fields depend on the value of this field. This + field will be wiped when updating a service to type ExternalName. + type: string + loadBalancerClass: + description: |- + loadBalancerClass is the class of the load balancer implementation this Service belongs to. + If specified, the value of this field must be a label-style identifier, with an optional prefix, + e.g. "internal-vip" or "example.com/internal-vip". Unprefixed names are reserved for end-users. + This field can only be set when the Service type is 'LoadBalancer'. If not set, the default load + balancer implementation is used, today this is typically done through the cloud provider integration, + but should apply for any default implementation. If set, it is assumed that a load balancer + implementation is watching for Services with a matching class. Any default load balancer + implementation (e.g. cloud providers) should ignore Services that set this field. + This field can only be set when creating or updating a Service to type 'LoadBalancer'. + Once set, it can not be changed. This field will be wiped when a service is updated to a non 'LoadBalancer' type. + type: string + loadBalancerIP: + description: |- + Only applies to Service Type: LoadBalancer. + This feature depends on whether the underlying cloud-provider supports specifying + the loadBalancerIP when a load balancer is created. + This field will be ignored if the cloud-provider does not support the feature. + Deprecated: This field was under-specified and its meaning varies across implementations. + Using it is non-portable and it may not support dual-stack. + Users are encouraged to use implementation-specific annotations when available. + type: string + loadBalancerSourceRanges: + description: |- + If specified and supported by the platform, this will restrict traffic through the cloud-provider + load-balancer will be restricted to the specified client IPs. This field will be ignored if the + cloud-provider does not support the feature." + More info: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/ + items: + type: string + type: array + x-kubernetes-list-type: atomic + ports: + description: |- + The list of ports that are exposed by this service. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + items: + description: ServicePort contains information on service's port. + properties: + appProtocol: + description: |- + The application protocol for this port. + This is used as a hint for implementations to offer richer behavior for protocols that they understand. + This field follows standard Kubernetes label syntax. + Valid values are either: + + * Un-prefixed protocol names - reserved for IANA standard service names (as per + RFC-6335 and https://www.iana.org/assignments/service-names). + + * Kubernetes-defined prefixed names: + * 'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior- + * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455 + * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455 + + * Other protocols should use implementation-defined prefixed names such as + mycompany.com/my-custom-protocol. + type: string + name: + description: |- + The name of this port within the service. This must be a DNS_LABEL. + All ports within a ServiceSpec must have unique names. When considering + the endpoints for a Service, this must match the 'name' field in the + EndpointPort. + Optional if only one ServicePort is defined on this service. + type: string + nodePort: + description: |- + The port on each node on which this service is exposed when type is + NodePort or LoadBalancer. Usually assigned by the system. If a value is + specified, in-range, and not in use it will be used, otherwise the + operation will fail. If not specified, a port will be allocated if this + Service requires one. If this field is specified when creating a + Service which does not need it, creation will fail. This field will be + wiped when updating a Service to no longer need it (e.g. changing type + from NodePort to ClusterIP). + More info: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + format: int32 + type: integer + port: + description: The port that will be exposed by this service. + format: int32 + type: integer + protocol: + default: TCP + description: |- + The IP protocol for this port. Supports "TCP", "UDP", and "SCTP". + Default is TCP. + type: string + targetPort: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the pods targeted by the service. + Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + If this is a string, it will be looked up as a named port in the + target Pod's container ports. If this is not specified, the value + of the 'port' field is used (an identity map). + This field is ignored for services with clusterIP=None, and should be + omitted or set equal to the 'port' field. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#defining-a-service + x-kubernetes-int-or-string: true + required: + - port + type: object + type: array + x-kubernetes-list-map-keys: + - port + - protocol + x-kubernetes-list-type: map + publishNotReadyAddresses: + description: |- + publishNotReadyAddresses indicates that any agent which deals with endpoints for this + Service should disregard any indications of ready/not-ready. + The primary use case for setting this field is for a StatefulSet's Headless Service to + propagate SRV DNS records for its Pods for the purpose of peer discovery. + The Kubernetes controllers that generate Endpoints and EndpointSlice resources for + Services interpret this to mean that all endpoints are considered "ready" even if the + Pods themselves are not. Agents which consume only Kubernetes generated endpoints + through the Endpoints or EndpointSlice resources can safely assume this behavior. + type: boolean + selector: + additionalProperties: + type: string + description: |- + Route service traffic to pods with label keys and values matching this + selector. If empty or not present, the service is assumed to have an + external process managing its endpoints, which Kubernetes will not + modify. Only applies to types ClusterIP, NodePort, and LoadBalancer. + Ignored if type is ExternalName. + More info: https://kubernetes.io/docs/concepts/services-networking/service/ + type: object + x-kubernetes-map-type: atomic + sessionAffinity: + description: |- + Supports "ClientIP" and "None". Used to maintain session affinity. + Enable client IP based session affinity. + Must be ClientIP or None. + Defaults to None. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + type: string + sessionAffinityConfig: + description: sessionAffinityConfig contains the configurations of session affinity. + properties: + clientIP: + description: clientIP contains the configurations of Client IP based session affinity. + properties: + timeoutSeconds: + description: |- + timeoutSeconds specifies the seconds of ClientIP type session sticky time. + The value must be >0 && <=86400(for 1 day) if ServiceAffinity == "ClientIP". + Default value is 10800(for 3 hours). + format: int32 + type: integer + type: object + type: object + trafficDistribution: + description: |- + TrafficDistribution offers a way to express preferences for how traffic + is distributed to Service endpoints. Implementations can use this field + as a hint, but are not required to guarantee strict adherence. If the + field is not set, the implementation will apply its default routing + strategy. If set to "PreferClose", implementations should prioritize + endpoints that are in the same zone. + type: string + type: + description: |- + type determines how the Service is exposed. Defaults to ClusterIP. Valid + options are ExternalName, ClusterIP, NodePort, and LoadBalancer. + "ClusterIP" allocates a cluster-internal IP address for load-balancing + to endpoints. Endpoints are determined by the selector or if that is not + specified, by manual construction of an Endpoints object or + EndpointSlice objects. If clusterIP is "None", no virtual IP is + allocated and the endpoints are published as a set of endpoints rather + than a virtual IP. + "NodePort" builds on ClusterIP and allocates a port on every node which + routes to the same endpoints as the clusterIP. + "LoadBalancer" builds on NodePort and creates an external load-balancer + (if supported in the current cloud) which routes to the same endpoints + as the clusterIP. + "ExternalName" aliases this service to the specified externalName. + Several other fields do not apply to ExternalName services. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types + type: string + type: object + type: object + tls: + description: TLS defines options for configuring TLS for HTTP. + properties: + certificate: + description: |- + Certificate is a reference to a Kubernetes secret that contains the certificate and private key for enabling TLS. + The referenced secret should contain the following: + + - `ca.crt`: The certificate authority (optional). + - `tls.crt`: The certificate (or a chain). + - `tls.key`: The private key to the first certificate in the certificate chain. + properties: + secretName: + description: SecretName is the name of the secret. + type: string + type: object + selfSignedCertificate: + description: SelfSignedCertificate allows configuring the self-signed certificate generated by the operator. + properties: + disabled: + description: Disabled indicates that the provisioning of the self-signed certificate should be disabled. + type: boolean + subjectAltNames: + description: SubjectAlternativeNames is a list of SANs to include in the generated HTTP TLS certificate. + items: + description: SubjectAlternativeName represents a SAN entry in a x509 certificate. + properties: + dns: + description: DNS is the DNS name of the subject. + type: string + ip: + description: IP is the IP address of the subject. + type: string + type: object + type: array + type: object + type: object + type: object + image: + description: Image is the Elastic Package Registry Docker image to deploy. + type: string + podTemplate: + description: PodTemplate provides customisation options (labels, annotations, affinity rules, resource requests, and so on) for the Elastic Package Registry pods + type: object + x-kubernetes-preserve-unknown-fields: true + revisionHistoryLimit: + description: RevisionHistoryLimit is the number of revisions to retain to allow rollback in the underlying Deployment. + format: int32 + type: integer + version: + description: Version of Elastic Package Registry. + type: string + required: + - version + type: object + status: + description: PackageRegistryStatus defines the observed state of Elastic Package Registry + properties: + availableNodes: + description: AvailableNodes is the number of available replicas in the deployment. + format: int32 + type: integer + count: + description: Count corresponds to Scale.Status.Replicas, which is the actual number of observed instances of the scaled object. + format: int32 + type: integer + health: + description: Health of the deployment. + type: string + observedGeneration: + description: |- + ObservedGeneration is the most recent generation observed for this Elastic Package Registry. + It corresponds to the metadata generation, which is updated on mutation by the API Server. + If the generation observed in status diverges from the generation in metadata, the Elastic Package Registry + controller has not yet processed the changes contained in the Elastic Package Registry specification. + format: int64 + type: integer + selector: + description: Selector is the label selector used to find all pods. + type: string + version: + description: |- + Version of the stack resource currently running. During version upgrades, multiple versions may run + in parallel: this value specifies the lowest version currently running. + type: string + type: object + type: object + served: true + storage: true + subresources: + scale: + labelSelectorPath: .status.selector + specReplicasPath: .spec.count + statusReplicasPath: .status.count + status: {} diff --git a/clusters/cl01tl/manifests/elastic-operator/CustomResourceDefinition-stackconfigpolicies.stackconfigpolicy.k8s.elastic.co.yaml b/clusters/cl01tl/manifests/elastic-operator/CustomResourceDefinition-stackconfigpolicies.stackconfigpolicy.k8s.elastic.co.yaml index 95c6d3303..242bcbec9 100644 --- a/clusters/cl01tl/manifests/elastic-operator/CustomResourceDefinition-stackconfigpolicies.stackconfigpolicy.k8s.elastic.co.yaml +++ b/clusters/cl01tl/manifests/elastic-operator/CustomResourceDefinition-stackconfigpolicies.stackconfigpolicy.k8s.elastic.co.yaml @@ -2,14 +2,14 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.19.0 + controller-gen.kubebuilder.io/version: v0.20.0 helm.sh/resource-policy: keep labels: app.kubernetes.io/instance: 'elastic-operator' app.kubernetes.io/managed-by: 'Helm' app.kubernetes.io/name: 'eck-operator-crds' - app.kubernetes.io/version: '3.2.0' - helm.sh/chart: 'eck-operator-crds-3.2.0' + app.kubernetes.io/version: '3.3.0' + helm.sh/chart: 'eck-operator-crds-3.3.0' name: stackconfigpolicies.stackconfigpolicy.k8s.elastic.co spec: group: stackconfigpolicy.k8s.elastic.co @@ -35,6 +35,9 @@ spec: - jsonPath: .metadata.creationTimestamp name: Age type: date + - jsonPath: .spec.weight + name: Weight + type: integer name: v1alpha1 schema: openAPIV3Schema: @@ -269,6 +272,13 @@ spec: - secretName type: object type: array + weight: + default: 0 + description: |- + Weight determines the priority of this policy when multiple policies target the same resource. + Higher weight values take precedence. Defaults to 0. + format: int32 + type: integer type: object status: properties: @@ -351,6 +361,7 @@ spec: type: object description: |- ResourcesStatuses holds the status for each resource to be configured. + Deprecated: Details is used to store the status of resources from ECK 2.11 type: object type: object diff --git a/clusters/cl01tl/manifests/elastic-operator/PodMonitor-elastic-operator.yaml b/clusters/cl01tl/manifests/elastic-operator/PodMonitor-elastic-operator.yaml index 20327134f..bf0088a38 100644 --- a/clusters/cl01tl/manifests/elastic-operator/PodMonitor-elastic-operator.yaml +++ b/clusters/cl01tl/manifests/elastic-operator/PodMonitor-elastic-operator.yaml @@ -6,8 +6,8 @@ metadata: labels: app.kubernetes.io/name: elastic-operator app.kubernetes.io/instance: elastic-operator - app.kubernetes.io/version: "3.2.0" - helm.sh/chart: eck-operator-3.2.0 + app.kubernetes.io/version: "3.3.0" + helm.sh/chart: eck-operator-3.3.0 app.kubernetes.io/managed-by: Helm spec: podMetricsEndpoints: diff --git a/clusters/cl01tl/manifests/elastic-operator/Secret-elastic-operator-webhook-cert.yaml b/clusters/cl01tl/manifests/elastic-operator/Secret-elastic-operator-webhook-cert.yaml index 90f2988a0..0856ca7f3 100644 --- a/clusters/cl01tl/manifests/elastic-operator/Secret-elastic-operator-webhook-cert.yaml +++ b/clusters/cl01tl/manifests/elastic-operator/Secret-elastic-operator-webhook-cert.yaml @@ -6,6 +6,6 @@ metadata: labels: app.kubernetes.io/name: elastic-operator app.kubernetes.io/instance: elastic-operator - app.kubernetes.io/version: "3.2.0" - helm.sh/chart: eck-operator-3.2.0 + app.kubernetes.io/version: "3.3.0" + helm.sh/chart: eck-operator-3.3.0 app.kubernetes.io/managed-by: Helm diff --git a/clusters/cl01tl/manifests/elastic-operator/Service-elastic-operator-webhook.yaml b/clusters/cl01tl/manifests/elastic-operator/Service-elastic-operator-webhook.yaml index 19eb9902e..c21f2c779 100644 --- a/clusters/cl01tl/manifests/elastic-operator/Service-elastic-operator-webhook.yaml +++ b/clusters/cl01tl/manifests/elastic-operator/Service-elastic-operator-webhook.yaml @@ -6,8 +6,8 @@ metadata: labels: app.kubernetes.io/name: elastic-operator app.kubernetes.io/instance: elastic-operator - app.kubernetes.io/version: "3.2.0" - helm.sh/chart: eck-operator-3.2.0 + app.kubernetes.io/version: "3.3.0" + helm.sh/chart: eck-operator-3.3.0 app.kubernetes.io/managed-by: Helm spec: ports: diff --git a/clusters/cl01tl/manifests/elastic-operator/ServiceAccount-elastic-operator.yaml b/clusters/cl01tl/manifests/elastic-operator/ServiceAccount-elastic-operator.yaml index 2eaad7c5b..816152f43 100644 --- a/clusters/cl01tl/manifests/elastic-operator/ServiceAccount-elastic-operator.yaml +++ b/clusters/cl01tl/manifests/elastic-operator/ServiceAccount-elastic-operator.yaml @@ -7,6 +7,6 @@ metadata: labels: app.kubernetes.io/name: elastic-operator app.kubernetes.io/instance: elastic-operator - app.kubernetes.io/version: "3.2.0" - helm.sh/chart: eck-operator-3.2.0 + app.kubernetes.io/version: "3.3.0" + helm.sh/chart: eck-operator-3.3.0 app.kubernetes.io/managed-by: Helm diff --git a/clusters/cl01tl/manifests/elastic-operator/StatefulSet-elastic-operator.yaml b/clusters/cl01tl/manifests/elastic-operator/StatefulSet-elastic-operator.yaml index 628822eb5..6c106c1e6 100644 --- a/clusters/cl01tl/manifests/elastic-operator/StatefulSet-elastic-operator.yaml +++ b/clusters/cl01tl/manifests/elastic-operator/StatefulSet-elastic-operator.yaml @@ -6,8 +6,8 @@ metadata: labels: app.kubernetes.io/name: elastic-operator app.kubernetes.io/instance: elastic-operator - app.kubernetes.io/version: "3.2.0" - helm.sh/chart: eck-operator-3.2.0 + app.kubernetes.io/version: "3.3.0" + helm.sh/chart: eck-operator-3.3.0 app.kubernetes.io/managed-by: Helm spec: selector: @@ -20,10 +20,13 @@ spec: metadata: annotations: "co.elastic.logs/raw": "[{\"type\":\"filestream\",\"enabled\":true,\"id\":\"eck-container-logs-${data.kubernetes.container.id}\",\"paths\":[\"/var/log/containers/*${data.kubernetes.container.id}.log\"],\"parsers\":[{\"container\":{}},{\"ndjson\":{\"keys_under_root\":true}}],\"prospector.scanner.symlinks\":true,\"processors\":[{\"convert\":{\"mode\":\"rename\",\"ignore_missing\":true,\"fields\":[{\"from\":\"error\",\"to\":\"_error\"}]}},{\"convert\":{\"mode\":\"rename\",\"ignore_missing\":true,\"fields\":[{\"from\":\"_error\",\"to\":\"error.message\"}]}},{\"convert\":{\"mode\":\"rename\",\"ignore_missing\":true,\"fields\":[{\"from\":\"source\",\"to\":\"_source\"}]}},{\"convert\":{\"mode\":\"rename\",\"ignore_missing\":true,\"fields\":[{\"from\":\"_source\",\"to\":\"event.source\"}]}}]}]" - "checksum/config": b73feaddf363fb2b6fe00148962a59c33dac58db78014f4b50b9c3de06865131 + "checksum/config": bd4458862d6cac7cd84ccb5584d2bfa24645bc0bd649adbe4d9207847d1de11f labels: app.kubernetes.io/name: elastic-operator app.kubernetes.io/instance: elastic-operator + app.kubernetes.io/version: "3.3.0" + helm.sh/chart: eck-operator-3.3.0 + app.kubernetes.io/managed-by: Helm spec: terminationGracePeriodSeconds: 10 serviceAccountName: elastic-operator @@ -31,7 +34,7 @@ spec: securityContext: runAsNonRoot: true containers: - - image: "docker.elastic.co/eck/eck-operator:3.2.0" + - image: "docker.elastic.co/eck/eck-operator:3.3.0" imagePullPolicy: IfNotPresent name: manager args: diff --git a/clusters/cl01tl/manifests/elastic-operator/ValidatingWebhookConfiguration-elastic-operator.elastic-operator.k8s.elastic.co.yaml b/clusters/cl01tl/manifests/elastic-operator/ValidatingWebhookConfiguration-elastic-operator.elastic-operator.k8s.elastic.co.yaml index d0916db7a..fe145e8a4 100644 --- a/clusters/cl01tl/manifests/elastic-operator/ValidatingWebhookConfiguration-elastic-operator.elastic-operator.k8s.elastic.co.yaml +++ b/clusters/cl01tl/manifests/elastic-operator/ValidatingWebhookConfiguration-elastic-operator.elastic-operator.k8s.elastic.co.yaml @@ -5,8 +5,8 @@ metadata: labels: app.kubernetes.io/name: elastic-operator app.kubernetes.io/instance: elastic-operator - app.kubernetes.io/version: "3.2.0" - helm.sh/chart: eck-operator-3.2.0 + app.kubernetes.io/version: "3.3.0" + helm.sh/chart: eck-operator-3.3.0 app.kubernetes.io/managed-by: Helm webhooks: - clientConfig: @@ -289,3 +289,43 @@ webhooks: - UPDATE resources: - logstashes + - clientConfig: + service: + name: elastic-operator-webhook + namespace: elastic-operator + path: /validate-autoops-k8s-elastic-co-v1alpha1-autoopsagentpolicies + failurePolicy: Ignore + name: elastic-autoops-validation-v1alpha1.k8s.elastic.co + matchPolicy: Exact + admissionReviewVersions: [v1] + sideEffects: None + rules: + - apiGroups: + - autoops.k8s.elastic.co + apiVersions: + - v1alpha1 + operations: + - CREATE + - UPDATE + resources: + - autoopsagentpolicies + - clientConfig: + service: + name: elastic-operator-webhook + namespace: elastic-operator + path: /validate-epr-k8s-elastic-co-v1alpha1-elasticpackageregistry + failurePolicy: Ignore + name: elastic-epr-validation-v1alpha1.k8s.elastic.co + matchPolicy: Exact + admissionReviewVersions: [v1, v1beta1] + sideEffects: None + rules: + - apiGroups: + - packageregistry.k8s.elastic.co + apiVersions: + - v1alpha1 + operations: + - CREATE + - UPDATE + resources: + - packageregistries diff --git a/clusters/cl01tl/manifests/gitea/ConfigMap-gitea-meilisearch-environment.yaml b/clusters/cl01tl/manifests/gitea/ConfigMap-gitea-meilisearch-environment.yaml index 2f32c6ae4..76fcba898 100644 --- a/clusters/cl01tl/manifests/gitea/ConfigMap-gitea-meilisearch-environment.yaml +++ b/clusters/cl01tl/manifests/gitea/ConfigMap-gitea-meilisearch-environment.yaml @@ -3,10 +3,10 @@ kind: ConfigMap metadata: name: gitea-meilisearch-environment labels: - helm.sh/chart: meilisearch-0.24.2 + helm.sh/chart: meilisearch-0.25.1 app.kubernetes.io/name: meilisearch app.kubernetes.io/instance: gitea - app.kubernetes.io/version: "v1.34.2" + app.kubernetes.io/version: "v1.35.0" app.kubernetes.io/component: search-engine app.kubernetes.io/part-of: meilisearch app.kubernetes.io/managed-by: Helm diff --git a/clusters/cl01tl/manifests/gitea/PersistentVolumeClaim-gitea-meilisearch.yaml b/clusters/cl01tl/manifests/gitea/PersistentVolumeClaim-gitea-meilisearch.yaml index c6fe814e0..18d00b89b 100644 --- a/clusters/cl01tl/manifests/gitea/PersistentVolumeClaim-gitea-meilisearch.yaml +++ b/clusters/cl01tl/manifests/gitea/PersistentVolumeClaim-gitea-meilisearch.yaml @@ -3,10 +3,10 @@ apiVersion: v1 metadata: name: gitea-meilisearch labels: - helm.sh/chart: meilisearch-0.24.2 + helm.sh/chart: meilisearch-0.25.1 app.kubernetes.io/name: meilisearch app.kubernetes.io/instance: gitea - app.kubernetes.io/version: "v1.34.2" + app.kubernetes.io/version: "v1.35.0" app.kubernetes.io/component: search-engine app.kubernetes.io/part-of: meilisearch app.kubernetes.io/managed-by: Helm diff --git a/clusters/cl01tl/manifests/gitea/Pod-gitea-meilisearch-test-connection.yaml b/clusters/cl01tl/manifests/gitea/Pod-gitea-meilisearch-test-connection.yaml index ee84d3db5..46d10c6d7 100644 --- a/clusters/cl01tl/manifests/gitea/Pod-gitea-meilisearch-test-connection.yaml +++ b/clusters/cl01tl/manifests/gitea/Pod-gitea-meilisearch-test-connection.yaml @@ -4,7 +4,7 @@ metadata: name: gitea-meilisearch-test-connection labels: app.kubernetes.io/name: meilisearch - helm.sh/chart: meilisearch-0.24.2 + helm.sh/chart: meilisearch-0.25.1 app.kubernetes.io/instance: gitea app.kubernetes.io/managed-by: Helm annotations: @@ -14,5 +14,5 @@ spec: - name: wget image: busybox command: ['wget'] - args: ['gitea-meilisearch:7700'] + args: ['--spider', '--timeout=5', 'gitea-meilisearch:7700'] restartPolicy: Never diff --git a/clusters/cl01tl/manifests/gitea/Service-gitea-meilisearch.yaml b/clusters/cl01tl/manifests/gitea/Service-gitea-meilisearch.yaml index 6c973e04f..5749ec44c 100644 --- a/clusters/cl01tl/manifests/gitea/Service-gitea-meilisearch.yaml +++ b/clusters/cl01tl/manifests/gitea/Service-gitea-meilisearch.yaml @@ -3,10 +3,10 @@ kind: Service metadata: name: gitea-meilisearch labels: - helm.sh/chart: meilisearch-0.24.2 + helm.sh/chart: meilisearch-0.25.1 app.kubernetes.io/name: meilisearch app.kubernetes.io/instance: gitea - app.kubernetes.io/version: "v1.34.2" + app.kubernetes.io/version: "v1.35.0" app.kubernetes.io/component: search-engine app.kubernetes.io/part-of: meilisearch app.kubernetes.io/managed-by: Helm diff --git a/clusters/cl01tl/manifests/gitea/ServiceAccount-gitea-meilisearch.yaml b/clusters/cl01tl/manifests/gitea/ServiceAccount-gitea-meilisearch.yaml index 5eeb7b0d4..3a2d5cef3 100644 --- a/clusters/cl01tl/manifests/gitea/ServiceAccount-gitea-meilisearch.yaml +++ b/clusters/cl01tl/manifests/gitea/ServiceAccount-gitea-meilisearch.yaml @@ -3,10 +3,10 @@ kind: ServiceAccount metadata: name: gitea-meilisearch labels: - helm.sh/chart: meilisearch-0.24.2 + helm.sh/chart: meilisearch-0.25.1 app.kubernetes.io/name: meilisearch app.kubernetes.io/instance: gitea - app.kubernetes.io/version: "v1.34.2" + app.kubernetes.io/version: "v1.35.0" app.kubernetes.io/component: search-engine app.kubernetes.io/part-of: meilisearch app.kubernetes.io/managed-by: Helm diff --git a/clusters/cl01tl/manifests/gitea/ServiceMonitor-gitea-meilisearch.yaml b/clusters/cl01tl/manifests/gitea/ServiceMonitor-gitea-meilisearch.yaml index 734f41c57..48039cd63 100644 --- a/clusters/cl01tl/manifests/gitea/ServiceMonitor-gitea-meilisearch.yaml +++ b/clusters/cl01tl/manifests/gitea/ServiceMonitor-gitea-meilisearch.yaml @@ -4,10 +4,10 @@ metadata: name: gitea-meilisearch namespace: gitea labels: - helm.sh/chart: meilisearch-0.24.2 + helm.sh/chart: meilisearch-0.25.1 app.kubernetes.io/name: meilisearch app.kubernetes.io/instance: gitea - app.kubernetes.io/version: "v1.34.2" + app.kubernetes.io/version: "v1.35.0" app.kubernetes.io/component: search-engine app.kubernetes.io/part-of: meilisearch app.kubernetes.io/managed-by: Helm diff --git a/clusters/cl01tl/manifests/gitea/StatefulSet-gitea-meilisearch.yaml b/clusters/cl01tl/manifests/gitea/StatefulSet-gitea-meilisearch.yaml index b29bf7d28..918729f94 100644 --- a/clusters/cl01tl/manifests/gitea/StatefulSet-gitea-meilisearch.yaml +++ b/clusters/cl01tl/manifests/gitea/StatefulSet-gitea-meilisearch.yaml @@ -3,10 +3,10 @@ kind: StatefulSet metadata: name: gitea-meilisearch labels: - helm.sh/chart: meilisearch-0.24.2 + helm.sh/chart: meilisearch-0.25.1 app.kubernetes.io/name: meilisearch app.kubernetes.io/instance: gitea - app.kubernetes.io/version: "v1.34.2" + app.kubernetes.io/version: "v1.35.0" app.kubernetes.io/component: search-engine app.kubernetes.io/part-of: meilisearch app.kubernetes.io/managed-by: Helm @@ -20,15 +20,15 @@ spec: template: metadata: labels: - helm.sh/chart: meilisearch-0.24.2 + helm.sh/chart: meilisearch-0.25.1 app.kubernetes.io/name: meilisearch app.kubernetes.io/instance: gitea - app.kubernetes.io/version: "v1.34.2" + app.kubernetes.io/version: "v1.35.0" app.kubernetes.io/component: search-engine app.kubernetes.io/part-of: meilisearch app.kubernetes.io/managed-by: Helm annotations: - checksum/config: 33e9823eb31803827e2677974002d1042a106ad3723acd6696c8c0f81004b448 + checksum/config: c4f3efa51b257e50195f1b1fe31fb13c9b5431aa57cbfc8e6fbb6fe910bbb4ef spec: serviceAccountName: gitea-meilisearch securityContext: @@ -45,7 +45,7 @@ spec: claimName: gitea-meilisearch containers: - name: meilisearch - image: "getmeili/meilisearch:v1.34.2" + image: "getmeili/meilisearch:v1.35.0" imagePullPolicy: IfNotPresent securityContext: allowPrivilegeEscalation: false diff --git a/clusters/cl01tl/manifests/jellyfin/ConfigMap-jellyfin-meilisearch-environment.yaml b/clusters/cl01tl/manifests/jellyfin/ConfigMap-jellyfin-meilisearch-environment.yaml index 81191fb79..3aa44899a 100644 --- a/clusters/cl01tl/manifests/jellyfin/ConfigMap-jellyfin-meilisearch-environment.yaml +++ b/clusters/cl01tl/manifests/jellyfin/ConfigMap-jellyfin-meilisearch-environment.yaml @@ -3,10 +3,10 @@ kind: ConfigMap metadata: name: jellyfin-meilisearch-environment labels: - helm.sh/chart: meilisearch-0.24.2 + helm.sh/chart: meilisearch-0.25.1 app.kubernetes.io/name: meilisearch app.kubernetes.io/instance: jellyfin - app.kubernetes.io/version: "v1.34.2" + app.kubernetes.io/version: "v1.35.0" app.kubernetes.io/component: search-engine app.kubernetes.io/part-of: meilisearch app.kubernetes.io/managed-by: Helm diff --git a/clusters/cl01tl/manifests/jellyfin/PersistentVolumeClaim-jellyfin-meilisearch.yaml b/clusters/cl01tl/manifests/jellyfin/PersistentVolumeClaim-jellyfin-meilisearch.yaml index e039f393a..1ea3f872b 100644 --- a/clusters/cl01tl/manifests/jellyfin/PersistentVolumeClaim-jellyfin-meilisearch.yaml +++ b/clusters/cl01tl/manifests/jellyfin/PersistentVolumeClaim-jellyfin-meilisearch.yaml @@ -3,10 +3,10 @@ apiVersion: v1 metadata: name: jellyfin-meilisearch labels: - helm.sh/chart: meilisearch-0.24.2 + helm.sh/chart: meilisearch-0.25.1 app.kubernetes.io/name: meilisearch app.kubernetes.io/instance: jellyfin - app.kubernetes.io/version: "v1.34.2" + app.kubernetes.io/version: "v1.35.0" app.kubernetes.io/component: search-engine app.kubernetes.io/part-of: meilisearch app.kubernetes.io/managed-by: Helm diff --git a/clusters/cl01tl/manifests/jellyfin/Pod-jellyfin-meilisearch-test-connection.yaml b/clusters/cl01tl/manifests/jellyfin/Pod-jellyfin-meilisearch-test-connection.yaml index 091f2d978..943b76d4c 100644 --- a/clusters/cl01tl/manifests/jellyfin/Pod-jellyfin-meilisearch-test-connection.yaml +++ b/clusters/cl01tl/manifests/jellyfin/Pod-jellyfin-meilisearch-test-connection.yaml @@ -4,7 +4,7 @@ metadata: name: jellyfin-meilisearch-test-connection labels: app.kubernetes.io/name: meilisearch - helm.sh/chart: meilisearch-0.24.2 + helm.sh/chart: meilisearch-0.25.1 app.kubernetes.io/instance: jellyfin app.kubernetes.io/managed-by: Helm annotations: @@ -14,5 +14,5 @@ spec: - name: wget image: busybox command: ['wget'] - args: ['jellyfin-meilisearch:7700'] + args: ['--spider', '--timeout=5', 'jellyfin-meilisearch:7700'] restartPolicy: Never diff --git a/clusters/cl01tl/manifests/jellyfin/Service-jellyfin-meilisearch.yaml b/clusters/cl01tl/manifests/jellyfin/Service-jellyfin-meilisearch.yaml index 89b0fc74f..1a6fb8150 100644 --- a/clusters/cl01tl/manifests/jellyfin/Service-jellyfin-meilisearch.yaml +++ b/clusters/cl01tl/manifests/jellyfin/Service-jellyfin-meilisearch.yaml @@ -3,10 +3,10 @@ kind: Service metadata: name: jellyfin-meilisearch labels: - helm.sh/chart: meilisearch-0.24.2 + helm.sh/chart: meilisearch-0.25.1 app.kubernetes.io/name: meilisearch app.kubernetes.io/instance: jellyfin - app.kubernetes.io/version: "v1.34.2" + app.kubernetes.io/version: "v1.35.0" app.kubernetes.io/component: search-engine app.kubernetes.io/part-of: meilisearch app.kubernetes.io/managed-by: Helm diff --git a/clusters/cl01tl/manifests/jellyfin/ServiceAccount-jellyfin-meilisearch.yaml b/clusters/cl01tl/manifests/jellyfin/ServiceAccount-jellyfin-meilisearch.yaml index 2634f5f91..f065842b7 100644 --- a/clusters/cl01tl/manifests/jellyfin/ServiceAccount-jellyfin-meilisearch.yaml +++ b/clusters/cl01tl/manifests/jellyfin/ServiceAccount-jellyfin-meilisearch.yaml @@ -3,10 +3,10 @@ kind: ServiceAccount metadata: name: jellyfin-meilisearch labels: - helm.sh/chart: meilisearch-0.24.2 + helm.sh/chart: meilisearch-0.25.1 app.kubernetes.io/name: meilisearch app.kubernetes.io/instance: jellyfin - app.kubernetes.io/version: "v1.34.2" + app.kubernetes.io/version: "v1.35.0" app.kubernetes.io/component: search-engine app.kubernetes.io/part-of: meilisearch app.kubernetes.io/managed-by: Helm diff --git a/clusters/cl01tl/manifests/jellyfin/ServiceMonitor-jellyfin-meilisearch.yaml b/clusters/cl01tl/manifests/jellyfin/ServiceMonitor-jellyfin-meilisearch.yaml index 5a44af762..e59bd6fe6 100644 --- a/clusters/cl01tl/manifests/jellyfin/ServiceMonitor-jellyfin-meilisearch.yaml +++ b/clusters/cl01tl/manifests/jellyfin/ServiceMonitor-jellyfin-meilisearch.yaml @@ -4,10 +4,10 @@ metadata: name: jellyfin-meilisearch namespace: jellyfin labels: - helm.sh/chart: meilisearch-0.24.2 + helm.sh/chart: meilisearch-0.25.1 app.kubernetes.io/name: meilisearch app.kubernetes.io/instance: jellyfin - app.kubernetes.io/version: "v1.34.2" + app.kubernetes.io/version: "v1.35.0" app.kubernetes.io/component: search-engine app.kubernetes.io/part-of: meilisearch app.kubernetes.io/managed-by: Helm diff --git a/clusters/cl01tl/manifests/jellyfin/StatefulSet-jellyfin-meilisearch.yaml b/clusters/cl01tl/manifests/jellyfin/StatefulSet-jellyfin-meilisearch.yaml index fcc3f3d83..6a4ed6fbe 100644 --- a/clusters/cl01tl/manifests/jellyfin/StatefulSet-jellyfin-meilisearch.yaml +++ b/clusters/cl01tl/manifests/jellyfin/StatefulSet-jellyfin-meilisearch.yaml @@ -3,10 +3,10 @@ kind: StatefulSet metadata: name: jellyfin-meilisearch labels: - helm.sh/chart: meilisearch-0.24.2 + helm.sh/chart: meilisearch-0.25.1 app.kubernetes.io/name: meilisearch app.kubernetes.io/instance: jellyfin - app.kubernetes.io/version: "v1.34.2" + app.kubernetes.io/version: "v1.35.0" app.kubernetes.io/component: search-engine app.kubernetes.io/part-of: meilisearch app.kubernetes.io/managed-by: Helm @@ -20,15 +20,15 @@ spec: template: metadata: labels: - helm.sh/chart: meilisearch-0.24.2 + helm.sh/chart: meilisearch-0.25.1 app.kubernetes.io/name: meilisearch app.kubernetes.io/instance: jellyfin - app.kubernetes.io/version: "v1.34.2" + app.kubernetes.io/version: "v1.35.0" app.kubernetes.io/component: search-engine app.kubernetes.io/part-of: meilisearch app.kubernetes.io/managed-by: Helm annotations: - checksum/config: 504b57cecb5c76f6a3697b032604ee23f848154baf45e3b9be749452e461b218 + checksum/config: 070febc6802ea45933e1919c94bbe704d9541a211afcf940f760cff33877c979 spec: serviceAccountName: jellyfin-meilisearch securityContext: @@ -45,7 +45,7 @@ spec: claimName: jellyfin-meilisearch containers: - name: meilisearch - image: "getmeili/meilisearch:v1.34.2" + image: "getmeili/meilisearch:v1.35.0" imagePullPolicy: IfNotPresent securityContext: allowPrivilegeEscalation: false diff --git a/clusters/cl01tl/manifests/karakeep/ConfigMap-karakeep-meilisearch-environment.yaml b/clusters/cl01tl/manifests/karakeep/ConfigMap-karakeep-meilisearch-environment.yaml index 89746a8b4..618513032 100644 --- a/clusters/cl01tl/manifests/karakeep/ConfigMap-karakeep-meilisearch-environment.yaml +++ b/clusters/cl01tl/manifests/karakeep/ConfigMap-karakeep-meilisearch-environment.yaml @@ -3,10 +3,10 @@ kind: ConfigMap metadata: name: karakeep-meilisearch-environment labels: - helm.sh/chart: meilisearch-0.24.2 + helm.sh/chart: meilisearch-0.25.1 app.kubernetes.io/name: meilisearch app.kubernetes.io/instance: karakeep - app.kubernetes.io/version: "v1.34.2" + app.kubernetes.io/version: "v1.35.0" app.kubernetes.io/component: search-engine app.kubernetes.io/part-of: meilisearch app.kubernetes.io/managed-by: Helm diff --git a/clusters/cl01tl/manifests/karakeep/PersistentVolumeClaim-karakeep-meilisearch.yaml b/clusters/cl01tl/manifests/karakeep/PersistentVolumeClaim-karakeep-meilisearch.yaml index ffa4c3dd1..cfccfc012 100644 --- a/clusters/cl01tl/manifests/karakeep/PersistentVolumeClaim-karakeep-meilisearch.yaml +++ b/clusters/cl01tl/manifests/karakeep/PersistentVolumeClaim-karakeep-meilisearch.yaml @@ -3,10 +3,10 @@ apiVersion: v1 metadata: name: karakeep-meilisearch labels: - helm.sh/chart: meilisearch-0.24.2 + helm.sh/chart: meilisearch-0.25.1 app.kubernetes.io/name: meilisearch app.kubernetes.io/instance: karakeep - app.kubernetes.io/version: "v1.34.2" + app.kubernetes.io/version: "v1.35.0" app.kubernetes.io/component: search-engine app.kubernetes.io/part-of: meilisearch app.kubernetes.io/managed-by: Helm diff --git a/clusters/cl01tl/manifests/karakeep/Pod-karakeep-meilisearch-test-connection.yaml b/clusters/cl01tl/manifests/karakeep/Pod-karakeep-meilisearch-test-connection.yaml index 42d56ba6b..35489b5f4 100644 --- a/clusters/cl01tl/manifests/karakeep/Pod-karakeep-meilisearch-test-connection.yaml +++ b/clusters/cl01tl/manifests/karakeep/Pod-karakeep-meilisearch-test-connection.yaml @@ -4,7 +4,7 @@ metadata: name: karakeep-meilisearch-test-connection labels: app.kubernetes.io/name: meilisearch - helm.sh/chart: meilisearch-0.24.2 + helm.sh/chart: meilisearch-0.25.1 app.kubernetes.io/instance: karakeep app.kubernetes.io/managed-by: Helm annotations: @@ -14,5 +14,5 @@ spec: - name: wget image: busybox command: ['wget'] - args: ['karakeep-meilisearch:7700'] + args: ['--spider', '--timeout=5', 'karakeep-meilisearch:7700'] restartPolicy: Never diff --git a/clusters/cl01tl/manifests/karakeep/Service-karakeep-meilisearch.yaml b/clusters/cl01tl/manifests/karakeep/Service-karakeep-meilisearch.yaml index cae6743ab..382938b61 100644 --- a/clusters/cl01tl/manifests/karakeep/Service-karakeep-meilisearch.yaml +++ b/clusters/cl01tl/manifests/karakeep/Service-karakeep-meilisearch.yaml @@ -3,10 +3,10 @@ kind: Service metadata: name: karakeep-meilisearch labels: - helm.sh/chart: meilisearch-0.24.2 + helm.sh/chart: meilisearch-0.25.1 app.kubernetes.io/name: meilisearch app.kubernetes.io/instance: karakeep - app.kubernetes.io/version: "v1.34.2" + app.kubernetes.io/version: "v1.35.0" app.kubernetes.io/component: search-engine app.kubernetes.io/part-of: meilisearch app.kubernetes.io/managed-by: Helm diff --git a/clusters/cl01tl/manifests/karakeep/ServiceAccount-karakeep-meilisearch.yaml b/clusters/cl01tl/manifests/karakeep/ServiceAccount-karakeep-meilisearch.yaml index 99d3a1258..20a77122c 100644 --- a/clusters/cl01tl/manifests/karakeep/ServiceAccount-karakeep-meilisearch.yaml +++ b/clusters/cl01tl/manifests/karakeep/ServiceAccount-karakeep-meilisearch.yaml @@ -3,10 +3,10 @@ kind: ServiceAccount metadata: name: karakeep-meilisearch labels: - helm.sh/chart: meilisearch-0.24.2 + helm.sh/chart: meilisearch-0.25.1 app.kubernetes.io/name: meilisearch app.kubernetes.io/instance: karakeep - app.kubernetes.io/version: "v1.34.2" + app.kubernetes.io/version: "v1.35.0" app.kubernetes.io/component: search-engine app.kubernetes.io/part-of: meilisearch app.kubernetes.io/managed-by: Helm diff --git a/clusters/cl01tl/manifests/karakeep/ServiceMonitor-karakeep-meilisearch.yaml b/clusters/cl01tl/manifests/karakeep/ServiceMonitor-karakeep-meilisearch.yaml index ee31a9c71..304413463 100644 --- a/clusters/cl01tl/manifests/karakeep/ServiceMonitor-karakeep-meilisearch.yaml +++ b/clusters/cl01tl/manifests/karakeep/ServiceMonitor-karakeep-meilisearch.yaml @@ -4,10 +4,10 @@ metadata: name: karakeep-meilisearch namespace: karakeep labels: - helm.sh/chart: meilisearch-0.24.2 + helm.sh/chart: meilisearch-0.25.1 app.kubernetes.io/name: meilisearch app.kubernetes.io/instance: karakeep - app.kubernetes.io/version: "v1.34.2" + app.kubernetes.io/version: "v1.35.0" app.kubernetes.io/component: search-engine app.kubernetes.io/part-of: meilisearch app.kubernetes.io/managed-by: Helm diff --git a/clusters/cl01tl/manifests/karakeep/StatefulSet-karakeep-meilisearch.yaml b/clusters/cl01tl/manifests/karakeep/StatefulSet-karakeep-meilisearch.yaml index 9c36a0dc3..913607001 100644 --- a/clusters/cl01tl/manifests/karakeep/StatefulSet-karakeep-meilisearch.yaml +++ b/clusters/cl01tl/manifests/karakeep/StatefulSet-karakeep-meilisearch.yaml @@ -3,10 +3,10 @@ kind: StatefulSet metadata: name: karakeep-meilisearch labels: - helm.sh/chart: meilisearch-0.24.2 + helm.sh/chart: meilisearch-0.25.1 app.kubernetes.io/name: meilisearch app.kubernetes.io/instance: karakeep - app.kubernetes.io/version: "v1.34.2" + app.kubernetes.io/version: "v1.35.0" app.kubernetes.io/component: search-engine app.kubernetes.io/part-of: meilisearch app.kubernetes.io/managed-by: Helm @@ -20,15 +20,15 @@ spec: template: metadata: labels: - helm.sh/chart: meilisearch-0.24.2 + helm.sh/chart: meilisearch-0.25.1 app.kubernetes.io/name: meilisearch app.kubernetes.io/instance: karakeep - app.kubernetes.io/version: "v1.34.2" + app.kubernetes.io/version: "v1.35.0" app.kubernetes.io/component: search-engine app.kubernetes.io/part-of: meilisearch app.kubernetes.io/managed-by: Helm annotations: - checksum/config: 3be8f2136d2b0e2bd2a7c4c935e722af789eb6f6219bf5aad34ed16f70beac5a + checksum/config: a1b915ccb2d6acacf42c3cf0c16a86ac6026f198087e1d1b6f33997bc78911c1 spec: serviceAccountName: karakeep-meilisearch securityContext: @@ -45,7 +45,7 @@ spec: claimName: karakeep-meilisearch containers: - name: meilisearch - image: "getmeili/meilisearch:v1.34.2" + image: "getmeili/meilisearch:v1.35.0" imagePullPolicy: IfNotPresent securityContext: allowPrivilegeEscalation: false diff --git a/clusters/cl01tl/manifests/kube-prometheus-stack/Alertmanager-kube-prometheus-stack-alertmanager.yaml b/clusters/cl01tl/manifests/kube-prometheus-stack/Alertmanager-kube-prometheus-stack-alertmanager.yaml index 863195138..00cd90e6f 100644 --- a/clusters/cl01tl/manifests/kube-prometheus-stack/Alertmanager-kube-prometheus-stack-alertmanager.yaml +++ b/clusters/cl01tl/manifests/kube-prometheus-stack/Alertmanager-kube-prometheus-stack-alertmanager.yaml @@ -7,15 +7,15 @@ metadata: app: kube-prometheus-stack-alertmanager app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: kube-prometheus-stack - app.kubernetes.io/version: "81.4.2" + app.kubernetes.io/version: "81.5.0" app.kubernetes.io/part-of: kube-prometheus-stack - chart: kube-prometheus-stack-81.4.2 + chart: kube-prometheus-stack-81.5.0 release: "kube-prometheus-stack" heritage: "Helm" spec: - image: "quay.io/prometheus/alertmanager:v0.30.1" + image: "quay.io/prometheus/alertmanager:v0.31.0" imagePullPolicy: "IfNotPresent" - version: v0.30.1 + version: v0.31.0 replicas: 1 listenLocal: false serviceAccountName: kube-prometheus-stack-alertmanager diff --git a/clusters/cl01tl/manifests/kube-prometheus-stack/ClusterRole-kube-prometheus-stack-admission.yaml b/clusters/cl01tl/manifests/kube-prometheus-stack/ClusterRole-kube-prometheus-stack-admission.yaml index 574248d7e..6afe4a232 100644 --- a/clusters/cl01tl/manifests/kube-prometheus-stack/ClusterRole-kube-prometheus-stack-admission.yaml +++ b/clusters/cl01tl/manifests/kube-prometheus-stack/ClusterRole-kube-prometheus-stack-admission.yaml @@ -9,9 +9,9 @@ metadata: app: kube-prometheus-stack-admission app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: kube-prometheus-stack - app.kubernetes.io/version: "81.4.2" + app.kubernetes.io/version: "81.5.0" app.kubernetes.io/part-of: kube-prometheus-stack - chart: kube-prometheus-stack-81.4.2 + chart: kube-prometheus-stack-81.5.0 release: "kube-prometheus-stack" heritage: "Helm" app.kubernetes.io/name: kube-prometheus-stack-prometheus-operator diff --git a/clusters/cl01tl/manifests/kube-prometheus-stack/ClusterRole-kube-prometheus-stack-operator.yaml b/clusters/cl01tl/manifests/kube-prometheus-stack/ClusterRole-kube-prometheus-stack-operator.yaml index 26222d0bf..fca130171 100644 --- a/clusters/cl01tl/manifests/kube-prometheus-stack/ClusterRole-kube-prometheus-stack-operator.yaml +++ b/clusters/cl01tl/manifests/kube-prometheus-stack/ClusterRole-kube-prometheus-stack-operator.yaml @@ -5,9 +5,9 @@ metadata: labels: app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: kube-prometheus-stack - app.kubernetes.io/version: "81.4.2" + app.kubernetes.io/version: "81.5.0" app.kubernetes.io/part-of: kube-prometheus-stack - chart: kube-prometheus-stack-81.4.2 + chart: kube-prometheus-stack-81.5.0 release: "kube-prometheus-stack" heritage: "Helm" app: kube-prometheus-stack-operator diff --git a/clusters/cl01tl/manifests/kube-prometheus-stack/ClusterRole-kube-prometheus-stack-prometheus-crd-edit.yaml b/clusters/cl01tl/manifests/kube-prometheus-stack/ClusterRole-kube-prometheus-stack-prometheus-crd-edit.yaml index 2f53252aa..02d50bdc9 100644 --- a/clusters/cl01tl/manifests/kube-prometheus-stack/ClusterRole-kube-prometheus-stack-prometheus-crd-edit.yaml +++ b/clusters/cl01tl/manifests/kube-prometheus-stack/ClusterRole-kube-prometheus-stack-prometheus-crd-edit.yaml @@ -7,9 +7,9 @@ metadata: rbac.authorization.k8s.io/aggregate-to-admin: "true" app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: kube-prometheus-stack - app.kubernetes.io/version: "81.4.2" + app.kubernetes.io/version: "81.5.0" app.kubernetes.io/part-of: kube-prometheus-stack - chart: kube-prometheus-stack-81.4.2 + chart: kube-prometheus-stack-81.5.0 release: "kube-prometheus-stack" heritage: "Helm" app: kube-prometheus-stack-operator diff --git a/clusters/cl01tl/manifests/kube-prometheus-stack/ClusterRole-kube-prometheus-stack-prometheus-crd-view.yaml b/clusters/cl01tl/manifests/kube-prometheus-stack/ClusterRole-kube-prometheus-stack-prometheus-crd-view.yaml index d80425dda..5ef412e24 100644 --- a/clusters/cl01tl/manifests/kube-prometheus-stack/ClusterRole-kube-prometheus-stack-prometheus-crd-view.yaml +++ b/clusters/cl01tl/manifests/kube-prometheus-stack/ClusterRole-kube-prometheus-stack-prometheus-crd-view.yaml @@ -8,9 +8,9 @@ metadata: rbac.authorization.k8s.io/aggregate-to-view: "true" app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: kube-prometheus-stack - app.kubernetes.io/version: "81.4.2" + app.kubernetes.io/version: "81.5.0" app.kubernetes.io/part-of: kube-prometheus-stack - chart: kube-prometheus-stack-81.4.2 + chart: kube-prometheus-stack-81.5.0 release: "kube-prometheus-stack" heritage: "Helm" app: kube-prometheus-stack-operator diff --git a/clusters/cl01tl/manifests/kube-prometheus-stack/ClusterRole-kube-prometheus-stack-prometheus.yaml b/clusters/cl01tl/manifests/kube-prometheus-stack/ClusterRole-kube-prometheus-stack-prometheus.yaml index a158be542..528011c3f 100644 --- a/clusters/cl01tl/manifests/kube-prometheus-stack/ClusterRole-kube-prometheus-stack-prometheus.yaml +++ b/clusters/cl01tl/manifests/kube-prometheus-stack/ClusterRole-kube-prometheus-stack-prometheus.yaml @@ -6,9 +6,9 @@ metadata: app: kube-prometheus-stack-prometheus app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: kube-prometheus-stack - app.kubernetes.io/version: "81.4.2" + app.kubernetes.io/version: "81.5.0" app.kubernetes.io/part-of: kube-prometheus-stack - chart: kube-prometheus-stack-81.4.2 + chart: kube-prometheus-stack-81.5.0 release: "kube-prometheus-stack" heritage: "Helm" rules: diff --git a/clusters/cl01tl/manifests/kube-prometheus-stack/ClusterRoleBinding-kube-prometheus-stack-admission.yaml b/clusters/cl01tl/manifests/kube-prometheus-stack/ClusterRoleBinding-kube-prometheus-stack-admission.yaml index 369bd988f..41f142015 100644 --- a/clusters/cl01tl/manifests/kube-prometheus-stack/ClusterRoleBinding-kube-prometheus-stack-admission.yaml +++ b/clusters/cl01tl/manifests/kube-prometheus-stack/ClusterRoleBinding-kube-prometheus-stack-admission.yaml @@ -9,9 +9,9 @@ metadata: app: kube-prometheus-stack-admission app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: kube-prometheus-stack - app.kubernetes.io/version: "81.4.2" + app.kubernetes.io/version: "81.5.0" app.kubernetes.io/part-of: kube-prometheus-stack - chart: kube-prometheus-stack-81.4.2 + chart: kube-prometheus-stack-81.5.0 release: "kube-prometheus-stack" heritage: "Helm" app.kubernetes.io/name: kube-prometheus-stack-prometheus-operator diff --git a/clusters/cl01tl/manifests/kube-prometheus-stack/ClusterRoleBinding-kube-prometheus-stack-operator.yaml b/clusters/cl01tl/manifests/kube-prometheus-stack/ClusterRoleBinding-kube-prometheus-stack-operator.yaml index d3b7609d7..541d0ec41 100644 --- a/clusters/cl01tl/manifests/kube-prometheus-stack/ClusterRoleBinding-kube-prometheus-stack-operator.yaml +++ b/clusters/cl01tl/manifests/kube-prometheus-stack/ClusterRoleBinding-kube-prometheus-stack-operator.yaml @@ -5,9 +5,9 @@ metadata: labels: app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: kube-prometheus-stack - app.kubernetes.io/version: "81.4.2" + app.kubernetes.io/version: "81.5.0" app.kubernetes.io/part-of: kube-prometheus-stack - chart: kube-prometheus-stack-81.4.2 + chart: kube-prometheus-stack-81.5.0 release: "kube-prometheus-stack" heritage: "Helm" app: kube-prometheus-stack-operator diff --git a/clusters/cl01tl/manifests/kube-prometheus-stack/ClusterRoleBinding-kube-prometheus-stack-prometheus.yaml b/clusters/cl01tl/manifests/kube-prometheus-stack/ClusterRoleBinding-kube-prometheus-stack-prometheus.yaml index 526f47087..dfd2dddad 100644 --- a/clusters/cl01tl/manifests/kube-prometheus-stack/ClusterRoleBinding-kube-prometheus-stack-prometheus.yaml +++ b/clusters/cl01tl/manifests/kube-prometheus-stack/ClusterRoleBinding-kube-prometheus-stack-prometheus.yaml @@ -6,9 +6,9 @@ metadata: app: kube-prometheus-stack-prometheus app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: kube-prometheus-stack - app.kubernetes.io/version: "81.4.2" + app.kubernetes.io/version: "81.5.0" app.kubernetes.io/part-of: kube-prometheus-stack - chart: kube-prometheus-stack-81.4.2 + chart: kube-prometheus-stack-81.5.0 release: "kube-prometheus-stack" heritage: "Helm" roleRef: diff --git a/clusters/cl01tl/manifests/kube-prometheus-stack/DaemonSet-kube-prometheus-stack-prometheus-node-exporter.yaml b/clusters/cl01tl/manifests/kube-prometheus-stack/DaemonSet-kube-prometheus-stack-prometheus-node-exporter.yaml index 4de4b72c0..793a35c52 100644 --- a/clusters/cl01tl/manifests/kube-prometheus-stack/DaemonSet-kube-prometheus-stack-prometheus-node-exporter.yaml +++ b/clusters/cl01tl/manifests/kube-prometheus-stack/DaemonSet-kube-prometheus-stack-prometheus-node-exporter.yaml @@ -4,7 +4,7 @@ metadata: name: kube-prometheus-stack-prometheus-node-exporter namespace: kube-prometheus-stack labels: - helm.sh/chart: prometheus-node-exporter-4.51.0 + helm.sh/chart: prometheus-node-exporter-4.51.1 app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: metrics app.kubernetes.io/part-of: prometheus-node-exporter @@ -27,7 +27,7 @@ spec: annotations: cluster-autoscaler.kubernetes.io/safe-to-evict: "true" labels: - helm.sh/chart: prometheus-node-exporter-4.51.0 + helm.sh/chart: prometheus-node-exporter-4.51.1 app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: metrics app.kubernetes.io/part-of: prometheus-node-exporter diff --git a/clusters/cl01tl/manifests/kube-prometheus-stack/Deployment-kube-prometheus-stack-operator.yaml b/clusters/cl01tl/manifests/kube-prometheus-stack/Deployment-kube-prometheus-stack-operator.yaml index 307c8773a..645b19872 100644 --- a/clusters/cl01tl/manifests/kube-prometheus-stack/Deployment-kube-prometheus-stack-operator.yaml +++ b/clusters/cl01tl/manifests/kube-prometheus-stack/Deployment-kube-prometheus-stack-operator.yaml @@ -6,9 +6,9 @@ metadata: labels: app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: kube-prometheus-stack - app.kubernetes.io/version: "81.4.2" + app.kubernetes.io/version: "81.5.0" app.kubernetes.io/part-of: kube-prometheus-stack - chart: kube-prometheus-stack-81.4.2 + chart: kube-prometheus-stack-81.5.0 release: "kube-prometheus-stack" heritage: "Helm" app: kube-prometheus-stack-operator @@ -26,9 +26,9 @@ spec: labels: app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: kube-prometheus-stack - app.kubernetes.io/version: "81.4.2" + app.kubernetes.io/version: "81.5.0" app.kubernetes.io/part-of: kube-prometheus-stack - chart: kube-prometheus-stack-81.4.2 + chart: kube-prometheus-stack-81.5.0 release: "kube-prometheus-stack" heritage: "Helm" app: kube-prometheus-stack-operator diff --git a/clusters/cl01tl/manifests/kube-prometheus-stack/HTTPRoute-kube-prometheus-stack-alertmanager.yaml b/clusters/cl01tl/manifests/kube-prometheus-stack/HTTPRoute-kube-prometheus-stack-alertmanager.yaml index ee26bdb1b..e30a6594c 100644 --- a/clusters/cl01tl/manifests/kube-prometheus-stack/HTTPRoute-kube-prometheus-stack-alertmanager.yaml +++ b/clusters/cl01tl/manifests/kube-prometheus-stack/HTTPRoute-kube-prometheus-stack-alertmanager.yaml @@ -7,9 +7,9 @@ metadata: app: kube-prometheus-stack-alertmanager app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: kube-prometheus-stack - app.kubernetes.io/version: "81.4.2" + app.kubernetes.io/version: "81.5.0" app.kubernetes.io/part-of: kube-prometheus-stack - chart: kube-prometheus-stack-81.4.2 + chart: kube-prometheus-stack-81.5.0 release: "kube-prometheus-stack" heritage: "Helm" spec: diff --git a/clusters/cl01tl/manifests/kube-prometheus-stack/HTTPRoute-kube-prometheus-stack-prometheus.yaml b/clusters/cl01tl/manifests/kube-prometheus-stack/HTTPRoute-kube-prometheus-stack-prometheus.yaml index 13beaf9bf..513c53206 100644 --- a/clusters/cl01tl/manifests/kube-prometheus-stack/HTTPRoute-kube-prometheus-stack-prometheus.yaml +++ b/clusters/cl01tl/manifests/kube-prometheus-stack/HTTPRoute-kube-prometheus-stack-prometheus.yaml @@ -7,9 +7,9 @@ metadata: app: kube-prometheus-stack-prometheus app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: kube-prometheus-stack - app.kubernetes.io/version: "81.4.2" + app.kubernetes.io/version: "81.5.0" app.kubernetes.io/part-of: kube-prometheus-stack - chart: kube-prometheus-stack-81.4.2 + chart: kube-prometheus-stack-81.5.0 release: "kube-prometheus-stack" heritage: "Helm" spec: diff --git a/clusters/cl01tl/manifests/kube-prometheus-stack/Job-kube-prometheus-stack-admission-create.yaml b/clusters/cl01tl/manifests/kube-prometheus-stack/Job-kube-prometheus-stack-admission-create.yaml index 9d735c399..451f5c031 100644 --- a/clusters/cl01tl/manifests/kube-prometheus-stack/Job-kube-prometheus-stack-admission-create.yaml +++ b/clusters/cl01tl/manifests/kube-prometheus-stack/Job-kube-prometheus-stack-admission-create.yaml @@ -12,9 +12,9 @@ metadata: app: kube-prometheus-stack-admission-create app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: kube-prometheus-stack - app.kubernetes.io/version: "81.4.2" + app.kubernetes.io/version: "81.5.0" app.kubernetes.io/part-of: kube-prometheus-stack - chart: kube-prometheus-stack-81.4.2 + chart: kube-prometheus-stack-81.5.0 release: "kube-prometheus-stack" heritage: "Helm" app.kubernetes.io/name: kube-prometheus-stack-prometheus-operator @@ -28,9 +28,9 @@ spec: app: kube-prometheus-stack-admission-create app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: kube-prometheus-stack - app.kubernetes.io/version: "81.4.2" + app.kubernetes.io/version: "81.5.0" app.kubernetes.io/part-of: kube-prometheus-stack - chart: kube-prometheus-stack-81.4.2 + chart: kube-prometheus-stack-81.5.0 release: "kube-prometheus-stack" heritage: "Helm" app.kubernetes.io/name: kube-prometheus-stack-prometheus-operator diff --git a/clusters/cl01tl/manifests/kube-prometheus-stack/Job-kube-prometheus-stack-admission-patch.yaml b/clusters/cl01tl/manifests/kube-prometheus-stack/Job-kube-prometheus-stack-admission-patch.yaml index 1f3bf8c79..6a6f5e0de 100644 --- a/clusters/cl01tl/manifests/kube-prometheus-stack/Job-kube-prometheus-stack-admission-patch.yaml +++ b/clusters/cl01tl/manifests/kube-prometheus-stack/Job-kube-prometheus-stack-admission-patch.yaml @@ -12,9 +12,9 @@ metadata: app: kube-prometheus-stack-admission-patch app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: kube-prometheus-stack - app.kubernetes.io/version: "81.4.2" + app.kubernetes.io/version: "81.5.0" app.kubernetes.io/part-of: kube-prometheus-stack - chart: kube-prometheus-stack-81.4.2 + chart: kube-prometheus-stack-81.5.0 release: "kube-prometheus-stack" heritage: "Helm" app.kubernetes.io/name: kube-prometheus-stack-prometheus-operator @@ -28,9 +28,9 @@ spec: app: kube-prometheus-stack-admission-patch app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: kube-prometheus-stack - app.kubernetes.io/version: "81.4.2" + app.kubernetes.io/version: "81.5.0" app.kubernetes.io/part-of: kube-prometheus-stack - chart: kube-prometheus-stack-81.4.2 + chart: kube-prometheus-stack-81.5.0 release: "kube-prometheus-stack" heritage: "Helm" app.kubernetes.io/name: kube-prometheus-stack-prometheus-operator @@ -45,7 +45,7 @@ spec: - --webhook-name=kube-prometheus-stack-admission - --namespace=kube-prometheus-stack - --secret-name=kube-prometheus-stack-admission - - --patch-failure-policy=Fail + - --patch-failure-policy= securityContext: allowPrivilegeEscalation: false capabilities: diff --git a/clusters/cl01tl/manifests/kube-prometheus-stack/MutatingWebhookConfiguration-kube-prometheus-stack-admission.yaml b/clusters/cl01tl/manifests/kube-prometheus-stack/MutatingWebhookConfiguration-kube-prometheus-stack-admission.yaml index 9f4ab3bdf..8984a31f8 100644 --- a/clusters/cl01tl/manifests/kube-prometheus-stack/MutatingWebhookConfiguration-kube-prometheus-stack-admission.yaml +++ b/clusters/cl01tl/manifests/kube-prometheus-stack/MutatingWebhookConfiguration-kube-prometheus-stack-admission.yaml @@ -8,9 +8,9 @@ metadata: app: kube-prometheus-stack-admission app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: kube-prometheus-stack - app.kubernetes.io/version: "81.4.2" + app.kubernetes.io/version: "81.5.0" app.kubernetes.io/part-of: kube-prometheus-stack - chart: kube-prometheus-stack-81.4.2 + chart: kube-prometheus-stack-81.5.0 release: "kube-prometheus-stack" heritage: "Helm" app.kubernetes.io/name: kube-prometheus-stack-prometheus-operator diff --git a/clusters/cl01tl/manifests/kube-prometheus-stack/Prometheus-kube-prometheus-stack-prometheus.yaml b/clusters/cl01tl/manifests/kube-prometheus-stack/Prometheus-kube-prometheus-stack-prometheus.yaml index d4b17481c..1e6587581 100644 --- a/clusters/cl01tl/manifests/kube-prometheus-stack/Prometheus-kube-prometheus-stack-prometheus.yaml +++ b/clusters/cl01tl/manifests/kube-prometheus-stack/Prometheus-kube-prometheus-stack-prometheus.yaml @@ -7,9 +7,9 @@ metadata: app: kube-prometheus-stack-prometheus app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: kube-prometheus-stack - app.kubernetes.io/version: "81.4.2" + app.kubernetes.io/version: "81.5.0" app.kubernetes.io/part-of: kube-prometheus-stack - chart: kube-prometheus-stack-81.4.2 + chart: kube-prometheus-stack-81.5.0 release: "kube-prometheus-stack" heritage: "Helm" spec: diff --git a/clusters/cl01tl/manifests/kube-prometheus-stack/PrometheusRule-kube-prometheus-stack-alertmanager.rules.yaml b/clusters/cl01tl/manifests/kube-prometheus-stack/PrometheusRule-kube-prometheus-stack-alertmanager.rules.yaml index da6332a6c..6a9f48f3f 100644 --- a/clusters/cl01tl/manifests/kube-prometheus-stack/PrometheusRule-kube-prometheus-stack-alertmanager.rules.yaml +++ b/clusters/cl01tl/manifests/kube-prometheus-stack/PrometheusRule-kube-prometheus-stack-alertmanager.rules.yaml @@ -7,9 +7,9 @@ metadata: app: kube-prometheus-stack app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: kube-prometheus-stack - app.kubernetes.io/version: "81.4.2" + app.kubernetes.io/version: "81.5.0" app.kubernetes.io/part-of: kube-prometheus-stack - chart: kube-prometheus-stack-81.4.2 + chart: kube-prometheus-stack-81.5.0 release: "kube-prometheus-stack" heritage: "Helm" spec: @@ -66,7 +66,7 @@ spec: min by (namespace,service, integration) ( rate(alertmanager_notifications_failed_total{job="kube-prometheus-stack-alertmanager",container="alertmanager",namespace="kube-prometheus-stack", integration=~`.*`}[15m]) / - ignoring (reason) group_left rate(alertmanager_notifications_total{job="kube-prometheus-stack-alertmanager",container="alertmanager",namespace="kube-prometheus-stack", integration=~`.*`}[15m]) + ignoring (reason) group_left rate(alertmanager_notifications_total{job="kube-prometheus-stack-alertmanager",container="alertmanager",namespace="kube-prometheus-stack", integration=~`.*`}[15m]) > 0 ) > 0.01 for: 5m @@ -81,7 +81,7 @@ spec: min by (namespace,service, integration) ( rate(alertmanager_notifications_failed_total{job="kube-prometheus-stack-alertmanager",container="alertmanager",namespace="kube-prometheus-stack", integration!~`.*`}[15m]) / - ignoring (reason) group_left rate(alertmanager_notifications_total{job="kube-prometheus-stack-alertmanager",container="alertmanager",namespace="kube-prometheus-stack", integration!~`.*`}[15m]) + ignoring (reason) group_left rate(alertmanager_notifications_total{job="kube-prometheus-stack-alertmanager",container="alertmanager",namespace="kube-prometheus-stack", integration!~`.*`}[15m]) > 0 ) > 0.01 for: 5m diff --git a/clusters/cl01tl/manifests/kube-prometheus-stack/PrometheusRule-kube-prometheus-stack-config-reloaders.yaml b/clusters/cl01tl/manifests/kube-prometheus-stack/PrometheusRule-kube-prometheus-stack-config-reloaders.yaml index f1444e652..429170dd4 100644 --- a/clusters/cl01tl/manifests/kube-prometheus-stack/PrometheusRule-kube-prometheus-stack-config-reloaders.yaml +++ b/clusters/cl01tl/manifests/kube-prometheus-stack/PrometheusRule-kube-prometheus-stack-config-reloaders.yaml @@ -7,9 +7,9 @@ metadata: app: kube-prometheus-stack app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: kube-prometheus-stack - app.kubernetes.io/version: "81.4.2" + app.kubernetes.io/version: "81.5.0" app.kubernetes.io/part-of: kube-prometheus-stack - chart: kube-prometheus-stack-81.4.2 + chart: kube-prometheus-stack-81.5.0 release: "kube-prometheus-stack" heritage: "Helm" spec: diff --git a/clusters/cl01tl/manifests/kube-prometheus-stack/PrometheusRule-kube-prometheus-stack-etcd.yaml b/clusters/cl01tl/manifests/kube-prometheus-stack/PrometheusRule-kube-prometheus-stack-etcd.yaml index 7df302cf3..f5300e3d8 100644 --- a/clusters/cl01tl/manifests/kube-prometheus-stack/PrometheusRule-kube-prometheus-stack-etcd.yaml +++ b/clusters/cl01tl/manifests/kube-prometheus-stack/PrometheusRule-kube-prometheus-stack-etcd.yaml @@ -7,9 +7,9 @@ metadata: app: kube-prometheus-stack app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: kube-prometheus-stack - app.kubernetes.io/version: "81.4.2" + app.kubernetes.io/version: "81.5.0" app.kubernetes.io/part-of: kube-prometheus-stack - chart: kube-prometheus-stack-81.4.2 + chart: kube-prometheus-stack-81.5.0 release: "kube-prometheus-stack" heritage: "Helm" spec: diff --git a/clusters/cl01tl/manifests/kube-prometheus-stack/PrometheusRule-kube-prometheus-stack-general.rules.yaml b/clusters/cl01tl/manifests/kube-prometheus-stack/PrometheusRule-kube-prometheus-stack-general.rules.yaml index 0041655a7..b52588be2 100644 --- a/clusters/cl01tl/manifests/kube-prometheus-stack/PrometheusRule-kube-prometheus-stack-general.rules.yaml +++ b/clusters/cl01tl/manifests/kube-prometheus-stack/PrometheusRule-kube-prometheus-stack-general.rules.yaml @@ -7,9 +7,9 @@ metadata: app: kube-prometheus-stack app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: kube-prometheus-stack - app.kubernetes.io/version: "81.4.2" + app.kubernetes.io/version: "81.5.0" app.kubernetes.io/part-of: kube-prometheus-stack - chart: kube-prometheus-stack-81.4.2 + chart: kube-prometheus-stack-81.5.0 release: "kube-prometheus-stack" heritage: "Helm" spec: diff --git a/clusters/cl01tl/manifests/kube-prometheus-stack/PrometheusRule-kube-prometheus-stack-k8s.rules.container-cpu-usage-seconds-tot.yaml b/clusters/cl01tl/manifests/kube-prometheus-stack/PrometheusRule-kube-prometheus-stack-k8s.rules.container-cpu-usage-seconds-tot.yaml index 6598b618c..05e869029 100644 --- a/clusters/cl01tl/manifests/kube-prometheus-stack/PrometheusRule-kube-prometheus-stack-k8s.rules.container-cpu-usage-seconds-tot.yaml +++ b/clusters/cl01tl/manifests/kube-prometheus-stack/PrometheusRule-kube-prometheus-stack-k8s.rules.container-cpu-usage-seconds-tot.yaml @@ -7,9 +7,9 @@ metadata: app: kube-prometheus-stack app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: kube-prometheus-stack - app.kubernetes.io/version: "81.4.2" + app.kubernetes.io/version: "81.5.0" app.kubernetes.io/part-of: kube-prometheus-stack - chart: kube-prometheus-stack-81.4.2 + chart: kube-prometheus-stack-81.5.0 release: "kube-prometheus-stack" heritage: "Helm" spec: diff --git a/clusters/cl01tl/manifests/kube-prometheus-stack/PrometheusRule-kube-prometheus-stack-k8s.rules.container-memory-cache.yaml b/clusters/cl01tl/manifests/kube-prometheus-stack/PrometheusRule-kube-prometheus-stack-k8s.rules.container-memory-cache.yaml index fdbadce15..fdd000de1 100644 --- a/clusters/cl01tl/manifests/kube-prometheus-stack/PrometheusRule-kube-prometheus-stack-k8s.rules.container-memory-cache.yaml +++ b/clusters/cl01tl/manifests/kube-prometheus-stack/PrometheusRule-kube-prometheus-stack-k8s.rules.container-memory-cache.yaml @@ -7,9 +7,9 @@ metadata: app: kube-prometheus-stack app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: kube-prometheus-stack - app.kubernetes.io/version: "81.4.2" + app.kubernetes.io/version: "81.5.0" app.kubernetes.io/part-of: kube-prometheus-stack - chart: kube-prometheus-stack-81.4.2 + chart: kube-prometheus-stack-81.5.0 release: "kube-prometheus-stack" heritage: "Helm" spec: diff --git a/clusters/cl01tl/manifests/kube-prometheus-stack/PrometheusRule-kube-prometheus-stack-k8s.rules.container-memory-rss.yaml b/clusters/cl01tl/manifests/kube-prometheus-stack/PrometheusRule-kube-prometheus-stack-k8s.rules.container-memory-rss.yaml index 6b7ee97b0..0df41cce2 100644 --- a/clusters/cl01tl/manifests/kube-prometheus-stack/PrometheusRule-kube-prometheus-stack-k8s.rules.container-memory-rss.yaml +++ b/clusters/cl01tl/manifests/kube-prometheus-stack/PrometheusRule-kube-prometheus-stack-k8s.rules.container-memory-rss.yaml @@ -7,9 +7,9 @@ metadata: app: kube-prometheus-stack app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: kube-prometheus-stack - app.kubernetes.io/version: "81.4.2" + app.kubernetes.io/version: "81.5.0" app.kubernetes.io/part-of: kube-prometheus-stack - chart: kube-prometheus-stack-81.4.2 + chart: kube-prometheus-stack-81.5.0 release: "kube-prometheus-stack" heritage: "Helm" spec: diff --git a/clusters/cl01tl/manifests/kube-prometheus-stack/PrometheusRule-kube-prometheus-stack-k8s.rules.container-memory-swap.yaml b/clusters/cl01tl/manifests/kube-prometheus-stack/PrometheusRule-kube-prometheus-stack-k8s.rules.container-memory-swap.yaml index 0f4934870..33d05e22d 100644 --- a/clusters/cl01tl/manifests/kube-prometheus-stack/PrometheusRule-kube-prometheus-stack-k8s.rules.container-memory-swap.yaml +++ b/clusters/cl01tl/manifests/kube-prometheus-stack/PrometheusRule-kube-prometheus-stack-k8s.rules.container-memory-swap.yaml @@ -7,9 +7,9 @@ metadata: app: kube-prometheus-stack app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: kube-prometheus-stack - app.kubernetes.io/version: "81.4.2" + app.kubernetes.io/version: "81.5.0" app.kubernetes.io/part-of: kube-prometheus-stack - chart: kube-prometheus-stack-81.4.2 + chart: kube-prometheus-stack-81.5.0 release: "kube-prometheus-stack" heritage: "Helm" spec: diff --git a/clusters/cl01tl/manifests/kube-prometheus-stack/PrometheusRule-kube-prometheus-stack-k8s.rules.container-memory-working-set-by.yaml b/clusters/cl01tl/manifests/kube-prometheus-stack/PrometheusRule-kube-prometheus-stack-k8s.rules.container-memory-working-set-by.yaml index 0e6389526..fdfafa448 100644 --- a/clusters/cl01tl/manifests/kube-prometheus-stack/PrometheusRule-kube-prometheus-stack-k8s.rules.container-memory-working-set-by.yaml +++ b/clusters/cl01tl/manifests/kube-prometheus-stack/PrometheusRule-kube-prometheus-stack-k8s.rules.container-memory-working-set-by.yaml @@ -7,9 +7,9 @@ metadata: app: kube-prometheus-stack app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: kube-prometheus-stack - app.kubernetes.io/version: "81.4.2" + app.kubernetes.io/version: "81.5.0" app.kubernetes.io/part-of: kube-prometheus-stack - chart: kube-prometheus-stack-81.4.2 + chart: kube-prometheus-stack-81.5.0 release: "kube-prometheus-stack" heritage: "Helm" spec: diff --git a/clusters/cl01tl/manifests/kube-prometheus-stack/PrometheusRule-kube-prometheus-stack-k8s.rules.container-resource.yaml b/clusters/cl01tl/manifests/kube-prometheus-stack/PrometheusRule-kube-prometheus-stack-k8s.rules.container-resource.yaml index e50052008..e457bf6c4 100644 --- a/clusters/cl01tl/manifests/kube-prometheus-stack/PrometheusRule-kube-prometheus-stack-k8s.rules.container-resource.yaml +++ b/clusters/cl01tl/manifests/kube-prometheus-stack/PrometheusRule-kube-prometheus-stack-k8s.rules.container-resource.yaml @@ -7,9 +7,9 @@ metadata: app: kube-prometheus-stack app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: kube-prometheus-stack - app.kubernetes.io/version: "81.4.2" + app.kubernetes.io/version: "81.5.0" app.kubernetes.io/part-of: kube-prometheus-stack - chart: kube-prometheus-stack-81.4.2 + chart: kube-prometheus-stack-81.5.0 release: "kube-prometheus-stack" heritage: "Helm" spec: diff --git a/clusters/cl01tl/manifests/kube-prometheus-stack/PrometheusRule-kube-prometheus-stack-k8s.rules.pod-owner.yaml b/clusters/cl01tl/manifests/kube-prometheus-stack/PrometheusRule-kube-prometheus-stack-k8s.rules.pod-owner.yaml index 073a298bb..da6b8c824 100644 --- a/clusters/cl01tl/manifests/kube-prometheus-stack/PrometheusRule-kube-prometheus-stack-k8s.rules.pod-owner.yaml +++ b/clusters/cl01tl/manifests/kube-prometheus-stack/PrometheusRule-kube-prometheus-stack-k8s.rules.pod-owner.yaml @@ -7,9 +7,9 @@ metadata: app: kube-prometheus-stack app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: kube-prometheus-stack - app.kubernetes.io/version: "81.4.2" + app.kubernetes.io/version: "81.5.0" app.kubernetes.io/part-of: kube-prometheus-stack - chart: kube-prometheus-stack-81.4.2 + chart: kube-prometheus-stack-81.5.0 release: "kube-prometheus-stack" heritage: "Helm" spec: diff --git a/clusters/cl01tl/manifests/kube-prometheus-stack/PrometheusRule-kube-prometheus-stack-kube-apiserver-availability.rules.yaml b/clusters/cl01tl/manifests/kube-prometheus-stack/PrometheusRule-kube-prometheus-stack-kube-apiserver-availability.rules.yaml index 62d9b1543..d89746fba 100644 --- a/clusters/cl01tl/manifests/kube-prometheus-stack/PrometheusRule-kube-prometheus-stack-kube-apiserver-availability.rules.yaml +++ b/clusters/cl01tl/manifests/kube-prometheus-stack/PrometheusRule-kube-prometheus-stack-kube-apiserver-availability.rules.yaml @@ -7,9 +7,9 @@ metadata: app: kube-prometheus-stack app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: kube-prometheus-stack - app.kubernetes.io/version: "81.4.2" + app.kubernetes.io/version: "81.5.0" app.kubernetes.io/part-of: kube-prometheus-stack - chart: kube-prometheus-stack-81.4.2 + chart: kube-prometheus-stack-81.5.0 release: "kube-prometheus-stack" heritage: "Helm" spec: diff --git a/clusters/cl01tl/manifests/kube-prometheus-stack/PrometheusRule-kube-prometheus-stack-kube-apiserver-burnrate.rules.yaml b/clusters/cl01tl/manifests/kube-prometheus-stack/PrometheusRule-kube-prometheus-stack-kube-apiserver-burnrate.rules.yaml index 6af77fa24..799983d50 100644 --- a/clusters/cl01tl/manifests/kube-prometheus-stack/PrometheusRule-kube-prometheus-stack-kube-apiserver-burnrate.rules.yaml +++ b/clusters/cl01tl/manifests/kube-prometheus-stack/PrometheusRule-kube-prometheus-stack-kube-apiserver-burnrate.rules.yaml @@ -7,9 +7,9 @@ metadata: app: kube-prometheus-stack app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: kube-prometheus-stack - app.kubernetes.io/version: "81.4.2" + app.kubernetes.io/version: "81.5.0" app.kubernetes.io/part-of: kube-prometheus-stack - chart: kube-prometheus-stack-81.4.2 + chart: kube-prometheus-stack-81.5.0 release: "kube-prometheus-stack" heritage: "Helm" spec: diff --git a/clusters/cl01tl/manifests/kube-prometheus-stack/PrometheusRule-kube-prometheus-stack-kube-apiserver-histogram.rules.yaml b/clusters/cl01tl/manifests/kube-prometheus-stack/PrometheusRule-kube-prometheus-stack-kube-apiserver-histogram.rules.yaml index e7907d1a8..4617e5777 100644 --- a/clusters/cl01tl/manifests/kube-prometheus-stack/PrometheusRule-kube-prometheus-stack-kube-apiserver-histogram.rules.yaml +++ b/clusters/cl01tl/manifests/kube-prometheus-stack/PrometheusRule-kube-prometheus-stack-kube-apiserver-histogram.rules.yaml @@ -7,9 +7,9 @@ metadata: app: kube-prometheus-stack app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: kube-prometheus-stack - app.kubernetes.io/version: "81.4.2" + app.kubernetes.io/version: "81.5.0" app.kubernetes.io/part-of: kube-prometheus-stack - chart: kube-prometheus-stack-81.4.2 + chart: kube-prometheus-stack-81.5.0 release: "kube-prometheus-stack" heritage: "Helm" spec: diff --git a/clusters/cl01tl/manifests/kube-prometheus-stack/PrometheusRule-kube-prometheus-stack-kube-apiserver-slos.yaml b/clusters/cl01tl/manifests/kube-prometheus-stack/PrometheusRule-kube-prometheus-stack-kube-apiserver-slos.yaml index 39490e550..11b271c19 100644 --- a/clusters/cl01tl/manifests/kube-prometheus-stack/PrometheusRule-kube-prometheus-stack-kube-apiserver-slos.yaml +++ b/clusters/cl01tl/manifests/kube-prometheus-stack/PrometheusRule-kube-prometheus-stack-kube-apiserver-slos.yaml @@ -7,9 +7,9 @@ metadata: app: kube-prometheus-stack app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: kube-prometheus-stack - app.kubernetes.io/version: "81.4.2" + app.kubernetes.io/version: "81.5.0" app.kubernetes.io/part-of: kube-prometheus-stack - chart: kube-prometheus-stack-81.4.2 + chart: kube-prometheus-stack-81.5.0 release: "kube-prometheus-stack" heritage: "Helm" spec: diff --git a/clusters/cl01tl/manifests/kube-prometheus-stack/PrometheusRule-kube-prometheus-stack-kube-prometheus-general.rules.yaml b/clusters/cl01tl/manifests/kube-prometheus-stack/PrometheusRule-kube-prometheus-stack-kube-prometheus-general.rules.yaml index 82501096b..214d6779f 100644 --- a/clusters/cl01tl/manifests/kube-prometheus-stack/PrometheusRule-kube-prometheus-stack-kube-prometheus-general.rules.yaml +++ b/clusters/cl01tl/manifests/kube-prometheus-stack/PrometheusRule-kube-prometheus-stack-kube-prometheus-general.rules.yaml @@ -7,9 +7,9 @@ metadata: app: kube-prometheus-stack app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: kube-prometheus-stack - app.kubernetes.io/version: "81.4.2" + app.kubernetes.io/version: "81.5.0" app.kubernetes.io/part-of: kube-prometheus-stack - chart: kube-prometheus-stack-81.4.2 + chart: kube-prometheus-stack-81.5.0 release: "kube-prometheus-stack" heritage: "Helm" spec: diff --git a/clusters/cl01tl/manifests/kube-prometheus-stack/PrometheusRule-kube-prometheus-stack-kube-prometheus-node-recording.rules.yaml b/clusters/cl01tl/manifests/kube-prometheus-stack/PrometheusRule-kube-prometheus-stack-kube-prometheus-node-recording.rules.yaml index ad75b70e1..c18bbfa84 100644 --- a/clusters/cl01tl/manifests/kube-prometheus-stack/PrometheusRule-kube-prometheus-stack-kube-prometheus-node-recording.rules.yaml +++ b/clusters/cl01tl/manifests/kube-prometheus-stack/PrometheusRule-kube-prometheus-stack-kube-prometheus-node-recording.rules.yaml @@ -7,9 +7,9 @@ metadata: app: kube-prometheus-stack app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: kube-prometheus-stack - app.kubernetes.io/version: "81.4.2" + app.kubernetes.io/version: "81.5.0" app.kubernetes.io/part-of: kube-prometheus-stack - chart: kube-prometheus-stack-81.4.2 + chart: kube-prometheus-stack-81.5.0 release: "kube-prometheus-stack" heritage: "Helm" spec: diff --git a/clusters/cl01tl/manifests/kube-prometheus-stack/PrometheusRule-kube-prometheus-stack-kube-state-metrics.yaml b/clusters/cl01tl/manifests/kube-prometheus-stack/PrometheusRule-kube-prometheus-stack-kube-state-metrics.yaml index 363f54ae2..811fee8f3 100644 --- a/clusters/cl01tl/manifests/kube-prometheus-stack/PrometheusRule-kube-prometheus-stack-kube-state-metrics.yaml +++ b/clusters/cl01tl/manifests/kube-prometheus-stack/PrometheusRule-kube-prometheus-stack-kube-state-metrics.yaml @@ -7,9 +7,9 @@ metadata: app: kube-prometheus-stack app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: kube-prometheus-stack - app.kubernetes.io/version: "81.4.2" + app.kubernetes.io/version: "81.5.0" app.kubernetes.io/part-of: kube-prometheus-stack - chart: kube-prometheus-stack-81.4.2 + chart: kube-prometheus-stack-81.5.0 release: "kube-prometheus-stack" heritage: "Helm" spec: diff --git a/clusters/cl01tl/manifests/kube-prometheus-stack/PrometheusRule-kube-prometheus-stack-kubelet.rules.yaml b/clusters/cl01tl/manifests/kube-prometheus-stack/PrometheusRule-kube-prometheus-stack-kubelet.rules.yaml index fcf641c1d..b58e8e9a4 100644 --- a/clusters/cl01tl/manifests/kube-prometheus-stack/PrometheusRule-kube-prometheus-stack-kubelet.rules.yaml +++ b/clusters/cl01tl/manifests/kube-prometheus-stack/PrometheusRule-kube-prometheus-stack-kubelet.rules.yaml @@ -7,9 +7,9 @@ metadata: app: kube-prometheus-stack app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: kube-prometheus-stack - app.kubernetes.io/version: "81.4.2" + app.kubernetes.io/version: "81.5.0" app.kubernetes.io/part-of: kube-prometheus-stack - chart: kube-prometheus-stack-81.4.2 + chart: kube-prometheus-stack-81.5.0 release: "kube-prometheus-stack" heritage: "Helm" spec: diff --git a/clusters/cl01tl/manifests/kube-prometheus-stack/PrometheusRule-kube-prometheus-stack-kubernetes-apps.yaml b/clusters/cl01tl/manifests/kube-prometheus-stack/PrometheusRule-kube-prometheus-stack-kubernetes-apps.yaml index 1666c8c4d..27e1295d2 100644 --- a/clusters/cl01tl/manifests/kube-prometheus-stack/PrometheusRule-kube-prometheus-stack-kubernetes-apps.yaml +++ b/clusters/cl01tl/manifests/kube-prometheus-stack/PrometheusRule-kube-prometheus-stack-kubernetes-apps.yaml @@ -7,9 +7,9 @@ metadata: app: kube-prometheus-stack app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: kube-prometheus-stack - app.kubernetes.io/version: "81.4.2" + app.kubernetes.io/version: "81.5.0" app.kubernetes.io/part-of: kube-prometheus-stack - chart: kube-prometheus-stack-81.4.2 + chart: kube-prometheus-stack-81.5.0 release: "kube-prometheus-stack" heritage: "Helm" spec: diff --git a/clusters/cl01tl/manifests/kube-prometheus-stack/PrometheusRule-kube-prometheus-stack-kubernetes-resources.yaml b/clusters/cl01tl/manifests/kube-prometheus-stack/PrometheusRule-kube-prometheus-stack-kubernetes-resources.yaml index 63c249cbc..5b16015f6 100644 --- a/clusters/cl01tl/manifests/kube-prometheus-stack/PrometheusRule-kube-prometheus-stack-kubernetes-resources.yaml +++ b/clusters/cl01tl/manifests/kube-prometheus-stack/PrometheusRule-kube-prometheus-stack-kubernetes-resources.yaml @@ -7,9 +7,9 @@ metadata: app: kube-prometheus-stack app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: kube-prometheus-stack - app.kubernetes.io/version: "81.4.2" + app.kubernetes.io/version: "81.5.0" app.kubernetes.io/part-of: kube-prometheus-stack - chart: kube-prometheus-stack-81.4.2 + chart: kube-prometheus-stack-81.5.0 release: "kube-prometheus-stack" heritage: "Helm" spec: diff --git a/clusters/cl01tl/manifests/kube-prometheus-stack/PrometheusRule-kube-prometheus-stack-kubernetes-storage.yaml b/clusters/cl01tl/manifests/kube-prometheus-stack/PrometheusRule-kube-prometheus-stack-kubernetes-storage.yaml index 47b8d9692..835f3507b 100644 --- a/clusters/cl01tl/manifests/kube-prometheus-stack/PrometheusRule-kube-prometheus-stack-kubernetes-storage.yaml +++ b/clusters/cl01tl/manifests/kube-prometheus-stack/PrometheusRule-kube-prometheus-stack-kubernetes-storage.yaml @@ -7,9 +7,9 @@ metadata: app: kube-prometheus-stack app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: kube-prometheus-stack - app.kubernetes.io/version: "81.4.2" + app.kubernetes.io/version: "81.5.0" app.kubernetes.io/part-of: kube-prometheus-stack - chart: kube-prometheus-stack-81.4.2 + chart: kube-prometheus-stack-81.5.0 release: "kube-prometheus-stack" heritage: "Helm" spec: diff --git a/clusters/cl01tl/manifests/kube-prometheus-stack/PrometheusRule-kube-prometheus-stack-kubernetes-system-apiserver.yaml b/clusters/cl01tl/manifests/kube-prometheus-stack/PrometheusRule-kube-prometheus-stack-kubernetes-system-apiserver.yaml index beb5acc0a..d2b7ed397 100644 --- a/clusters/cl01tl/manifests/kube-prometheus-stack/PrometheusRule-kube-prometheus-stack-kubernetes-system-apiserver.yaml +++ b/clusters/cl01tl/manifests/kube-prometheus-stack/PrometheusRule-kube-prometheus-stack-kubernetes-system-apiserver.yaml @@ -7,9 +7,9 @@ metadata: app: kube-prometheus-stack app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: kube-prometheus-stack - app.kubernetes.io/version: "81.4.2" + app.kubernetes.io/version: "81.5.0" app.kubernetes.io/part-of: kube-prometheus-stack - chart: kube-prometheus-stack-81.4.2 + chart: kube-prometheus-stack-81.5.0 release: "kube-prometheus-stack" heritage: "Helm" spec: diff --git a/clusters/cl01tl/manifests/kube-prometheus-stack/PrometheusRule-kube-prometheus-stack-kubernetes-system-kubelet.yaml b/clusters/cl01tl/manifests/kube-prometheus-stack/PrometheusRule-kube-prometheus-stack-kubernetes-system-kubelet.yaml index 783c83eb3..5ab93b861 100644 --- a/clusters/cl01tl/manifests/kube-prometheus-stack/PrometheusRule-kube-prometheus-stack-kubernetes-system-kubelet.yaml +++ b/clusters/cl01tl/manifests/kube-prometheus-stack/PrometheusRule-kube-prometheus-stack-kubernetes-system-kubelet.yaml @@ -7,9 +7,9 @@ metadata: app: kube-prometheus-stack app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: kube-prometheus-stack - app.kubernetes.io/version: "81.4.2" + app.kubernetes.io/version: "81.5.0" app.kubernetes.io/part-of: kube-prometheus-stack - chart: kube-prometheus-stack-81.4.2 + chart: kube-prometheus-stack-81.5.0 release: "kube-prometheus-stack" heritage: "Helm" spec: diff --git a/clusters/cl01tl/manifests/kube-prometheus-stack/PrometheusRule-kube-prometheus-stack-kubernetes-system.yaml b/clusters/cl01tl/manifests/kube-prometheus-stack/PrometheusRule-kube-prometheus-stack-kubernetes-system.yaml index d738bbbc3..c4ef09492 100644 --- a/clusters/cl01tl/manifests/kube-prometheus-stack/PrometheusRule-kube-prometheus-stack-kubernetes-system.yaml +++ b/clusters/cl01tl/manifests/kube-prometheus-stack/PrometheusRule-kube-prometheus-stack-kubernetes-system.yaml @@ -7,9 +7,9 @@ metadata: app: kube-prometheus-stack app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: kube-prometheus-stack - app.kubernetes.io/version: "81.4.2" + app.kubernetes.io/version: "81.5.0" app.kubernetes.io/part-of: kube-prometheus-stack - chart: kube-prometheus-stack-81.4.2 + chart: kube-prometheus-stack-81.5.0 release: "kube-prometheus-stack" heritage: "Helm" spec: diff --git a/clusters/cl01tl/manifests/kube-prometheus-stack/PrometheusRule-kube-prometheus-stack-node-exporter.rules.yaml b/clusters/cl01tl/manifests/kube-prometheus-stack/PrometheusRule-kube-prometheus-stack-node-exporter.rules.yaml index c8a1f727c..985147198 100644 --- a/clusters/cl01tl/manifests/kube-prometheus-stack/PrometheusRule-kube-prometheus-stack-node-exporter.rules.yaml +++ b/clusters/cl01tl/manifests/kube-prometheus-stack/PrometheusRule-kube-prometheus-stack-node-exporter.rules.yaml @@ -7,9 +7,9 @@ metadata: app: kube-prometheus-stack app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: kube-prometheus-stack - app.kubernetes.io/version: "81.4.2" + app.kubernetes.io/version: "81.5.0" app.kubernetes.io/part-of: kube-prometheus-stack - chart: kube-prometheus-stack-81.4.2 + chart: kube-prometheus-stack-81.5.0 release: "kube-prometheus-stack" heritage: "Helm" spec: diff --git a/clusters/cl01tl/manifests/kube-prometheus-stack/PrometheusRule-kube-prometheus-stack-node-exporter.yaml b/clusters/cl01tl/manifests/kube-prometheus-stack/PrometheusRule-kube-prometheus-stack-node-exporter.yaml index cf2e498b6..7ce18c4df 100644 --- a/clusters/cl01tl/manifests/kube-prometheus-stack/PrometheusRule-kube-prometheus-stack-node-exporter.yaml +++ b/clusters/cl01tl/manifests/kube-prometheus-stack/PrometheusRule-kube-prometheus-stack-node-exporter.yaml @@ -7,9 +7,9 @@ metadata: app: kube-prometheus-stack app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: kube-prometheus-stack - app.kubernetes.io/version: "81.4.2" + app.kubernetes.io/version: "81.5.0" app.kubernetes.io/part-of: kube-prometheus-stack - chart: kube-prometheus-stack-81.4.2 + chart: kube-prometheus-stack-81.5.0 release: "kube-prometheus-stack" heritage: "Helm" spec: diff --git a/clusters/cl01tl/manifests/kube-prometheus-stack/PrometheusRule-kube-prometheus-stack-node-network.yaml b/clusters/cl01tl/manifests/kube-prometheus-stack/PrometheusRule-kube-prometheus-stack-node-network.yaml index 63e6691b9..7e860fa6c 100644 --- a/clusters/cl01tl/manifests/kube-prometheus-stack/PrometheusRule-kube-prometheus-stack-node-network.yaml +++ b/clusters/cl01tl/manifests/kube-prometheus-stack/PrometheusRule-kube-prometheus-stack-node-network.yaml @@ -7,9 +7,9 @@ metadata: app: kube-prometheus-stack app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: kube-prometheus-stack - app.kubernetes.io/version: "81.4.2" + app.kubernetes.io/version: "81.5.0" app.kubernetes.io/part-of: kube-prometheus-stack - chart: kube-prometheus-stack-81.4.2 + chart: kube-prometheus-stack-81.5.0 release: "kube-prometheus-stack" heritage: "Helm" spec: diff --git a/clusters/cl01tl/manifests/kube-prometheus-stack/PrometheusRule-kube-prometheus-stack-node.rules.yaml b/clusters/cl01tl/manifests/kube-prometheus-stack/PrometheusRule-kube-prometheus-stack-node.rules.yaml index 4054e22c6..e6f67a09d 100644 --- a/clusters/cl01tl/manifests/kube-prometheus-stack/PrometheusRule-kube-prometheus-stack-node.rules.yaml +++ b/clusters/cl01tl/manifests/kube-prometheus-stack/PrometheusRule-kube-prometheus-stack-node.rules.yaml @@ -7,9 +7,9 @@ metadata: app: kube-prometheus-stack app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: kube-prometheus-stack - app.kubernetes.io/version: "81.4.2" + app.kubernetes.io/version: "81.5.0" app.kubernetes.io/part-of: kube-prometheus-stack - chart: kube-prometheus-stack-81.4.2 + chart: kube-prometheus-stack-81.5.0 release: "kube-prometheus-stack" heritage: "Helm" spec: diff --git a/clusters/cl01tl/manifests/kube-prometheus-stack/PrometheusRule-kube-prometheus-stack-prometheus-operator.yaml b/clusters/cl01tl/manifests/kube-prometheus-stack/PrometheusRule-kube-prometheus-stack-prometheus-operator.yaml index 81d143b35..af628a556 100644 --- a/clusters/cl01tl/manifests/kube-prometheus-stack/PrometheusRule-kube-prometheus-stack-prometheus-operator.yaml +++ b/clusters/cl01tl/manifests/kube-prometheus-stack/PrometheusRule-kube-prometheus-stack-prometheus-operator.yaml @@ -7,9 +7,9 @@ metadata: app: kube-prometheus-stack app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: kube-prometheus-stack - app.kubernetes.io/version: "81.4.2" + app.kubernetes.io/version: "81.5.0" app.kubernetes.io/part-of: kube-prometheus-stack - chart: kube-prometheus-stack-81.4.2 + chart: kube-prometheus-stack-81.5.0 release: "kube-prometheus-stack" heritage: "Helm" spec: diff --git a/clusters/cl01tl/manifests/kube-prometheus-stack/PrometheusRule-kube-prometheus-stack-prometheus.yaml b/clusters/cl01tl/manifests/kube-prometheus-stack/PrometheusRule-kube-prometheus-stack-prometheus.yaml index 47af160b7..2b68b54f2 100644 --- a/clusters/cl01tl/manifests/kube-prometheus-stack/PrometheusRule-kube-prometheus-stack-prometheus.yaml +++ b/clusters/cl01tl/manifests/kube-prometheus-stack/PrometheusRule-kube-prometheus-stack-prometheus.yaml @@ -7,9 +7,9 @@ metadata: app: kube-prometheus-stack app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: kube-prometheus-stack - app.kubernetes.io/version: "81.4.2" + app.kubernetes.io/version: "81.5.0" app.kubernetes.io/part-of: kube-prometheus-stack - chart: kube-prometheus-stack-81.4.2 + chart: kube-prometheus-stack-81.5.0 release: "kube-prometheus-stack" heritage: "Helm" spec: diff --git a/clusters/cl01tl/manifests/kube-prometheus-stack/Role-kube-prometheus-stack-admission.yaml b/clusters/cl01tl/manifests/kube-prometheus-stack/Role-kube-prometheus-stack-admission.yaml index 135cfbfad..fd203d33f 100644 --- a/clusters/cl01tl/manifests/kube-prometheus-stack/Role-kube-prometheus-stack-admission.yaml +++ b/clusters/cl01tl/manifests/kube-prometheus-stack/Role-kube-prometheus-stack-admission.yaml @@ -10,9 +10,9 @@ metadata: app: kube-prometheus-stack-admission app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: kube-prometheus-stack - app.kubernetes.io/version: "81.4.2" + app.kubernetes.io/version: "81.5.0" app.kubernetes.io/part-of: kube-prometheus-stack - chart: kube-prometheus-stack-81.4.2 + chart: kube-prometheus-stack-81.5.0 release: "kube-prometheus-stack" heritage: "Helm" app.kubernetes.io/name: kube-prometheus-stack-prometheus-operator diff --git a/clusters/cl01tl/manifests/kube-prometheus-stack/RoleBinding-kube-prometheus-stack-admission.yaml b/clusters/cl01tl/manifests/kube-prometheus-stack/RoleBinding-kube-prometheus-stack-admission.yaml index 0cc2a2d1e..ffdf2d7b5 100644 --- a/clusters/cl01tl/manifests/kube-prometheus-stack/RoleBinding-kube-prometheus-stack-admission.yaml +++ b/clusters/cl01tl/manifests/kube-prometheus-stack/RoleBinding-kube-prometheus-stack-admission.yaml @@ -10,9 +10,9 @@ metadata: app: kube-prometheus-stack-admission app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: kube-prometheus-stack - app.kubernetes.io/version: "81.4.2" + app.kubernetes.io/version: "81.5.0" app.kubernetes.io/part-of: kube-prometheus-stack - chart: kube-prometheus-stack-81.4.2 + chart: kube-prometheus-stack-81.5.0 release: "kube-prometheus-stack" heritage: "Helm" app.kubernetes.io/name: kube-prometheus-stack-prometheus-operator diff --git a/clusters/cl01tl/manifests/kube-prometheus-stack/Secret-alertmanager-kube-prometheus-stack-alertmanager.yaml b/clusters/cl01tl/manifests/kube-prometheus-stack/Secret-alertmanager-kube-prometheus-stack-alertmanager.yaml index bfa4561ad..631acf96f 100644 --- a/clusters/cl01tl/manifests/kube-prometheus-stack/Secret-alertmanager-kube-prometheus-stack-alertmanager.yaml +++ b/clusters/cl01tl/manifests/kube-prometheus-stack/Secret-alertmanager-kube-prometheus-stack-alertmanager.yaml @@ -7,9 +7,9 @@ metadata: app: kube-prometheus-stack-alertmanager app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: kube-prometheus-stack - app.kubernetes.io/version: "81.4.2" + app.kubernetes.io/version: "81.5.0" app.kubernetes.io/part-of: kube-prometheus-stack - chart: kube-prometheus-stack-81.4.2 + chart: kube-prometheus-stack-81.5.0 release: "kube-prometheus-stack" heritage: "Helm" data: diff --git a/clusters/cl01tl/manifests/kube-prometheus-stack/Service-kube-prometheus-stack-alertmanager.yaml b/clusters/cl01tl/manifests/kube-prometheus-stack/Service-kube-prometheus-stack-alertmanager.yaml index 275fae81e..b2e55ef96 100644 --- a/clusters/cl01tl/manifests/kube-prometheus-stack/Service-kube-prometheus-stack-alertmanager.yaml +++ b/clusters/cl01tl/manifests/kube-prometheus-stack/Service-kube-prometheus-stack-alertmanager.yaml @@ -8,9 +8,9 @@ metadata: self-monitor: "true" app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: kube-prometheus-stack - app.kubernetes.io/version: "81.4.2" + app.kubernetes.io/version: "81.5.0" app.kubernetes.io/part-of: kube-prometheus-stack - chart: kube-prometheus-stack-81.4.2 + chart: kube-prometheus-stack-81.5.0 release: "kube-prometheus-stack" heritage: "Helm" spec: diff --git a/clusters/cl01tl/manifests/kube-prometheus-stack/Service-kube-prometheus-stack-coredns.yaml b/clusters/cl01tl/manifests/kube-prometheus-stack/Service-kube-prometheus-stack-coredns.yaml index 2598c491b..61a8a6388 100644 --- a/clusters/cl01tl/manifests/kube-prometheus-stack/Service-kube-prometheus-stack-coredns.yaml +++ b/clusters/cl01tl/manifests/kube-prometheus-stack/Service-kube-prometheus-stack-coredns.yaml @@ -7,9 +7,9 @@ metadata: jobLabel: coredns app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: kube-prometheus-stack - app.kubernetes.io/version: "81.4.2" + app.kubernetes.io/version: "81.5.0" app.kubernetes.io/part-of: kube-prometheus-stack - chart: kube-prometheus-stack-81.4.2 + chart: kube-prometheus-stack-81.5.0 release: "kube-prometheus-stack" heritage: "Helm" namespace: kube-system diff --git a/clusters/cl01tl/manifests/kube-prometheus-stack/Service-kube-prometheus-stack-kube-etcd.yaml b/clusters/cl01tl/manifests/kube-prometheus-stack/Service-kube-prometheus-stack-kube-etcd.yaml index a099836fd..5e34ec776 100644 --- a/clusters/cl01tl/manifests/kube-prometheus-stack/Service-kube-prometheus-stack-kube-etcd.yaml +++ b/clusters/cl01tl/manifests/kube-prometheus-stack/Service-kube-prometheus-stack-kube-etcd.yaml @@ -7,9 +7,9 @@ metadata: jobLabel: kube-etcd app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: kube-prometheus-stack - app.kubernetes.io/version: "81.4.2" + app.kubernetes.io/version: "81.5.0" app.kubernetes.io/part-of: kube-prometheus-stack - chart: kube-prometheus-stack-81.4.2 + chart: kube-prometheus-stack-81.5.0 release: "kube-prometheus-stack" heritage: "Helm" namespace: kube-system diff --git a/clusters/cl01tl/manifests/kube-prometheus-stack/Service-kube-prometheus-stack-operator.yaml b/clusters/cl01tl/manifests/kube-prometheus-stack/Service-kube-prometheus-stack-operator.yaml index 2b66e7e1f..aaaa9a814 100644 --- a/clusters/cl01tl/manifests/kube-prometheus-stack/Service-kube-prometheus-stack-operator.yaml +++ b/clusters/cl01tl/manifests/kube-prometheus-stack/Service-kube-prometheus-stack-operator.yaml @@ -6,9 +6,9 @@ metadata: labels: app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: kube-prometheus-stack - app.kubernetes.io/version: "81.4.2" + app.kubernetes.io/version: "81.5.0" app.kubernetes.io/part-of: kube-prometheus-stack - chart: kube-prometheus-stack-81.4.2 + chart: kube-prometheus-stack-81.5.0 release: "kube-prometheus-stack" heritage: "Helm" app: kube-prometheus-stack-operator diff --git a/clusters/cl01tl/manifests/kube-prometheus-stack/Service-kube-prometheus-stack-prometheus-node-exporter.yaml b/clusters/cl01tl/manifests/kube-prometheus-stack/Service-kube-prometheus-stack-prometheus-node-exporter.yaml index 586ce11d4..55b737a15 100644 --- a/clusters/cl01tl/manifests/kube-prometheus-stack/Service-kube-prometheus-stack-prometheus-node-exporter.yaml +++ b/clusters/cl01tl/manifests/kube-prometheus-stack/Service-kube-prometheus-stack-prometheus-node-exporter.yaml @@ -4,7 +4,7 @@ metadata: name: kube-prometheus-stack-prometheus-node-exporter namespace: kube-prometheus-stack labels: - helm.sh/chart: prometheus-node-exporter-4.51.0 + helm.sh/chart: prometheus-node-exporter-4.51.1 app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: metrics app.kubernetes.io/part-of: prometheus-node-exporter diff --git a/clusters/cl01tl/manifests/kube-prometheus-stack/Service-kube-prometheus-stack-prometheus.yaml b/clusters/cl01tl/manifests/kube-prometheus-stack/Service-kube-prometheus-stack-prometheus.yaml index 22c728f95..720810db4 100644 --- a/clusters/cl01tl/manifests/kube-prometheus-stack/Service-kube-prometheus-stack-prometheus.yaml +++ b/clusters/cl01tl/manifests/kube-prometheus-stack/Service-kube-prometheus-stack-prometheus.yaml @@ -8,9 +8,9 @@ metadata: self-monitor: "true" app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: kube-prometheus-stack - app.kubernetes.io/version: "81.4.2" + app.kubernetes.io/version: "81.5.0" app.kubernetes.io/part-of: kube-prometheus-stack - chart: kube-prometheus-stack-81.4.2 + chart: kube-prometheus-stack-81.5.0 release: "kube-prometheus-stack" heritage: "Helm" spec: diff --git a/clusters/cl01tl/manifests/kube-prometheus-stack/ServiceAccount-kube-prometheus-stack-admission.yaml b/clusters/cl01tl/manifests/kube-prometheus-stack/ServiceAccount-kube-prometheus-stack-admission.yaml index e3ade546a..b1aa9a05a 100644 --- a/clusters/cl01tl/manifests/kube-prometheus-stack/ServiceAccount-kube-prometheus-stack-admission.yaml +++ b/clusters/cl01tl/manifests/kube-prometheus-stack/ServiceAccount-kube-prometheus-stack-admission.yaml @@ -10,9 +10,9 @@ metadata: app: kube-prometheus-stack-admission app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: kube-prometheus-stack - app.kubernetes.io/version: "81.4.2" + app.kubernetes.io/version: "81.5.0" app.kubernetes.io/part-of: kube-prometheus-stack - chart: kube-prometheus-stack-81.4.2 + chart: kube-prometheus-stack-81.5.0 release: "kube-prometheus-stack" heritage: "Helm" app.kubernetes.io/name: kube-prometheus-stack-prometheus-operator diff --git a/clusters/cl01tl/manifests/kube-prometheus-stack/ServiceAccount-kube-prometheus-stack-alertmanager.yaml b/clusters/cl01tl/manifests/kube-prometheus-stack/ServiceAccount-kube-prometheus-stack-alertmanager.yaml index e14381ed1..0abf8a157 100644 --- a/clusters/cl01tl/manifests/kube-prometheus-stack/ServiceAccount-kube-prometheus-stack-alertmanager.yaml +++ b/clusters/cl01tl/manifests/kube-prometheus-stack/ServiceAccount-kube-prometheus-stack-alertmanager.yaml @@ -9,9 +9,9 @@ metadata: app.kubernetes.io/component: alertmanager app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: kube-prometheus-stack - app.kubernetes.io/version: "81.4.2" + app.kubernetes.io/version: "81.5.0" app.kubernetes.io/part-of: kube-prometheus-stack - chart: kube-prometheus-stack-81.4.2 + chart: kube-prometheus-stack-81.5.0 release: "kube-prometheus-stack" heritage: "Helm" automountServiceAccountToken: true diff --git a/clusters/cl01tl/manifests/kube-prometheus-stack/ServiceAccount-kube-prometheus-stack-operator.yaml b/clusters/cl01tl/manifests/kube-prometheus-stack/ServiceAccount-kube-prometheus-stack-operator.yaml index e28cc9b6f..ebd5a5c0c 100644 --- a/clusters/cl01tl/manifests/kube-prometheus-stack/ServiceAccount-kube-prometheus-stack-operator.yaml +++ b/clusters/cl01tl/manifests/kube-prometheus-stack/ServiceAccount-kube-prometheus-stack-operator.yaml @@ -6,9 +6,9 @@ metadata: labels: app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: kube-prometheus-stack - app.kubernetes.io/version: "81.4.2" + app.kubernetes.io/version: "81.5.0" app.kubernetes.io/part-of: kube-prometheus-stack - chart: kube-prometheus-stack-81.4.2 + chart: kube-prometheus-stack-81.5.0 release: "kube-prometheus-stack" heritage: "Helm" app: kube-prometheus-stack-operator diff --git a/clusters/cl01tl/manifests/kube-prometheus-stack/ServiceAccount-kube-prometheus-stack-prometheus-node-exporter.yaml b/clusters/cl01tl/manifests/kube-prometheus-stack/ServiceAccount-kube-prometheus-stack-prometheus-node-exporter.yaml index 0196654e6..60edafe02 100644 --- a/clusters/cl01tl/manifests/kube-prometheus-stack/ServiceAccount-kube-prometheus-stack-prometheus-node-exporter.yaml +++ b/clusters/cl01tl/manifests/kube-prometheus-stack/ServiceAccount-kube-prometheus-stack-prometheus-node-exporter.yaml @@ -4,7 +4,7 @@ metadata: name: kube-prometheus-stack-prometheus-node-exporter namespace: kube-prometheus-stack labels: - helm.sh/chart: prometheus-node-exporter-4.51.0 + helm.sh/chart: prometheus-node-exporter-4.51.1 app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: metrics app.kubernetes.io/part-of: prometheus-node-exporter diff --git a/clusters/cl01tl/manifests/kube-prometheus-stack/ServiceAccount-kube-prometheus-stack-prometheus.yaml b/clusters/cl01tl/manifests/kube-prometheus-stack/ServiceAccount-kube-prometheus-stack-prometheus.yaml index 79e0d5d45..05da42382 100644 --- a/clusters/cl01tl/manifests/kube-prometheus-stack/ServiceAccount-kube-prometheus-stack-prometheus.yaml +++ b/clusters/cl01tl/manifests/kube-prometheus-stack/ServiceAccount-kube-prometheus-stack-prometheus.yaml @@ -9,9 +9,9 @@ metadata: app.kubernetes.io/component: prometheus app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: kube-prometheus-stack - app.kubernetes.io/version: "81.4.2" + app.kubernetes.io/version: "81.5.0" app.kubernetes.io/part-of: kube-prometheus-stack - chart: kube-prometheus-stack-81.4.2 + chart: kube-prometheus-stack-81.5.0 release: "kube-prometheus-stack" heritage: "Helm" automountServiceAccountToken: true diff --git a/clusters/cl01tl/manifests/kube-prometheus-stack/ServiceMonitor-kube-prometheus-stack-alertmanager.yaml b/clusters/cl01tl/manifests/kube-prometheus-stack/ServiceMonitor-kube-prometheus-stack-alertmanager.yaml index 280c504c4..255d7525d 100644 --- a/clusters/cl01tl/manifests/kube-prometheus-stack/ServiceMonitor-kube-prometheus-stack-alertmanager.yaml +++ b/clusters/cl01tl/manifests/kube-prometheus-stack/ServiceMonitor-kube-prometheus-stack-alertmanager.yaml @@ -7,9 +7,9 @@ metadata: app: kube-prometheus-stack-alertmanager app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: kube-prometheus-stack - app.kubernetes.io/version: "81.4.2" + app.kubernetes.io/version: "81.5.0" app.kubernetes.io/part-of: kube-prometheus-stack - chart: kube-prometheus-stack-81.4.2 + chart: kube-prometheus-stack-81.5.0 release: "kube-prometheus-stack" heritage: "Helm" spec: diff --git a/clusters/cl01tl/manifests/kube-prometheus-stack/ServiceMonitor-kube-prometheus-stack-apiserver.yaml b/clusters/cl01tl/manifests/kube-prometheus-stack/ServiceMonitor-kube-prometheus-stack-apiserver.yaml index 1b410aeef..84a72e20d 100644 --- a/clusters/cl01tl/manifests/kube-prometheus-stack/ServiceMonitor-kube-prometheus-stack-apiserver.yaml +++ b/clusters/cl01tl/manifests/kube-prometheus-stack/ServiceMonitor-kube-prometheus-stack-apiserver.yaml @@ -7,9 +7,9 @@ metadata: app: kube-prometheus-stack-apiserver app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: kube-prometheus-stack - app.kubernetes.io/version: "81.4.2" + app.kubernetes.io/version: "81.5.0" app.kubernetes.io/part-of: kube-prometheus-stack - chart: kube-prometheus-stack-81.4.2 + chart: kube-prometheus-stack-81.5.0 release: "kube-prometheus-stack" heritage: "Helm" spec: diff --git a/clusters/cl01tl/manifests/kube-prometheus-stack/ServiceMonitor-kube-prometheus-stack-coredns.yaml b/clusters/cl01tl/manifests/kube-prometheus-stack/ServiceMonitor-kube-prometheus-stack-coredns.yaml index 8d1abaf4a..04c406abf 100644 --- a/clusters/cl01tl/manifests/kube-prometheus-stack/ServiceMonitor-kube-prometheus-stack-coredns.yaml +++ b/clusters/cl01tl/manifests/kube-prometheus-stack/ServiceMonitor-kube-prometheus-stack-coredns.yaml @@ -7,9 +7,9 @@ metadata: app: kube-prometheus-stack-coredns app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: kube-prometheus-stack - app.kubernetes.io/version: "81.4.2" + app.kubernetes.io/version: "81.5.0" app.kubernetes.io/part-of: kube-prometheus-stack - chart: kube-prometheus-stack-81.4.2 + chart: kube-prometheus-stack-81.5.0 release: "kube-prometheus-stack" heritage: "Helm" spec: diff --git a/clusters/cl01tl/manifests/kube-prometheus-stack/ServiceMonitor-kube-prometheus-stack-kube-etcd.yaml b/clusters/cl01tl/manifests/kube-prometheus-stack/ServiceMonitor-kube-prometheus-stack-kube-etcd.yaml index aeaa31e51..a896db1f0 100644 --- a/clusters/cl01tl/manifests/kube-prometheus-stack/ServiceMonitor-kube-prometheus-stack-kube-etcd.yaml +++ b/clusters/cl01tl/manifests/kube-prometheus-stack/ServiceMonitor-kube-prometheus-stack-kube-etcd.yaml @@ -7,9 +7,9 @@ metadata: app: kube-prometheus-stack-kube-etcd app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: kube-prometheus-stack - app.kubernetes.io/version: "81.4.2" + app.kubernetes.io/version: "81.5.0" app.kubernetes.io/part-of: kube-prometheus-stack - chart: kube-prometheus-stack-81.4.2 + chart: kube-prometheus-stack-81.5.0 release: "kube-prometheus-stack" heritage: "Helm" spec: diff --git a/clusters/cl01tl/manifests/kube-prometheus-stack/ServiceMonitor-kube-prometheus-stack-kubelet.yaml b/clusters/cl01tl/manifests/kube-prometheus-stack/ServiceMonitor-kube-prometheus-stack-kubelet.yaml index 88df218be..5096c811f 100644 --- a/clusters/cl01tl/manifests/kube-prometheus-stack/ServiceMonitor-kube-prometheus-stack-kubelet.yaml +++ b/clusters/cl01tl/manifests/kube-prometheus-stack/ServiceMonitor-kube-prometheus-stack-kubelet.yaml @@ -7,9 +7,9 @@ metadata: app: kube-prometheus-stack-kubelet app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: kube-prometheus-stack - app.kubernetes.io/version: "81.4.2" + app.kubernetes.io/version: "81.5.0" app.kubernetes.io/part-of: kube-prometheus-stack - chart: kube-prometheus-stack-81.4.2 + chart: kube-prometheus-stack-81.5.0 release: "kube-prometheus-stack" heritage: "Helm" spec: diff --git a/clusters/cl01tl/manifests/kube-prometheus-stack/ServiceMonitor-kube-prometheus-stack-operator.yaml b/clusters/cl01tl/manifests/kube-prometheus-stack/ServiceMonitor-kube-prometheus-stack-operator.yaml index 1edc7459c..04dbe6f85 100644 --- a/clusters/cl01tl/manifests/kube-prometheus-stack/ServiceMonitor-kube-prometheus-stack-operator.yaml +++ b/clusters/cl01tl/manifests/kube-prometheus-stack/ServiceMonitor-kube-prometheus-stack-operator.yaml @@ -6,9 +6,9 @@ metadata: labels: app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: kube-prometheus-stack - app.kubernetes.io/version: "81.4.2" + app.kubernetes.io/version: "81.5.0" app.kubernetes.io/part-of: kube-prometheus-stack - chart: kube-prometheus-stack-81.4.2 + chart: kube-prometheus-stack-81.5.0 release: "kube-prometheus-stack" heritage: "Helm" app: kube-prometheus-stack-operator diff --git a/clusters/cl01tl/manifests/kube-prometheus-stack/ServiceMonitor-kube-prometheus-stack-prometheus-node-exporter.yaml b/clusters/cl01tl/manifests/kube-prometheus-stack/ServiceMonitor-kube-prometheus-stack-prometheus-node-exporter.yaml index 1ef7e17da..75e0712e5 100644 --- a/clusters/cl01tl/manifests/kube-prometheus-stack/ServiceMonitor-kube-prometheus-stack-prometheus-node-exporter.yaml +++ b/clusters/cl01tl/manifests/kube-prometheus-stack/ServiceMonitor-kube-prometheus-stack-prometheus-node-exporter.yaml @@ -4,7 +4,7 @@ metadata: name: kube-prometheus-stack-prometheus-node-exporter namespace: kube-prometheus-stack labels: - helm.sh/chart: prometheus-node-exporter-4.51.0 + helm.sh/chart: prometheus-node-exporter-4.51.1 app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: metrics app.kubernetes.io/part-of: prometheus-node-exporter diff --git a/clusters/cl01tl/manifests/kube-prometheus-stack/ServiceMonitor-kube-prometheus-stack-prometheus.yaml b/clusters/cl01tl/manifests/kube-prometheus-stack/ServiceMonitor-kube-prometheus-stack-prometheus.yaml index 9f15231c4..9f6838894 100644 --- a/clusters/cl01tl/manifests/kube-prometheus-stack/ServiceMonitor-kube-prometheus-stack-prometheus.yaml +++ b/clusters/cl01tl/manifests/kube-prometheus-stack/ServiceMonitor-kube-prometheus-stack-prometheus.yaml @@ -7,9 +7,9 @@ metadata: app: kube-prometheus-stack-prometheus app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: kube-prometheus-stack - app.kubernetes.io/version: "81.4.2" + app.kubernetes.io/version: "81.5.0" app.kubernetes.io/part-of: kube-prometheus-stack - chart: kube-prometheus-stack-81.4.2 + chart: kube-prometheus-stack-81.5.0 release: "kube-prometheus-stack" heritage: "Helm" spec: diff --git a/clusters/cl01tl/manifests/kube-prometheus-stack/ValidatingWebhookConfiguration-kube-prometheus-stack-admission.yaml b/clusters/cl01tl/manifests/kube-prometheus-stack/ValidatingWebhookConfiguration-kube-prometheus-stack-admission.yaml index 37855b750..c3250dd5c 100644 --- a/clusters/cl01tl/manifests/kube-prometheus-stack/ValidatingWebhookConfiguration-kube-prometheus-stack-admission.yaml +++ b/clusters/cl01tl/manifests/kube-prometheus-stack/ValidatingWebhookConfiguration-kube-prometheus-stack-admission.yaml @@ -8,9 +8,9 @@ metadata: app: kube-prometheus-stack-admission app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: kube-prometheus-stack - app.kubernetes.io/version: "81.4.2" + app.kubernetes.io/version: "81.5.0" app.kubernetes.io/part-of: kube-prometheus-stack - chart: kube-prometheus-stack-81.4.2 + chart: kube-prometheus-stack-81.5.0 release: "kube-prometheus-stack" heritage: "Helm" app.kubernetes.io/name: kube-prometheus-stack-prometheus-operator diff --git a/clusters/cl01tl/manifests/navidrome/Deployment-navidrome-main.yaml b/clusters/cl01tl/manifests/navidrome/Deployment-navidrome-main.yaml index 456c301e5..1356dac27 100644 --- a/clusters/cl01tl/manifests/navidrome/Deployment-navidrome-main.yaml +++ b/clusters/cl01tl/manifests/navidrome/Deployment-navidrome-main.yaml @@ -47,7 +47,7 @@ spec: value: "false" - name: ND_PROMETHEUS_ENABLED value: "true" - image: deluan/navidrome:0.59.0@sha256:4edc8a1de3e042f30b78a478325839f4395177eb8201c27543dccc0eba674f23 + image: deluan/navidrome:0.60.0@sha256:5d0f6ab343397c043c7063db14ae10e4e3980e54ae7388031cbce47e84af6657 imagePullPolicy: IfNotPresent name: main resources: