Automated Manifest Update (#2259)

This PR contains newly rendered Kubernetes manifests automatically generated by the CI workflow.

Reviewed-on: #2259
Co-authored-by: gitea-bot <gitea-bot@alexlebens.net>
Co-committed-by: gitea-bot <gitea-bot@alexlebens.net>
This commit was merged in pull request #2259.
This commit is contained in:
2025-12-04 21:47:46 +00:00
committed by Alex Lebens
parent d008c08479
commit 7a96d06727
2100 changed files with 365994 additions and 380674 deletions

View File

@@ -0,0 +1,16 @@
---
# Source: cilium/templates/cilium-load-balancer-ip-pool.yaml
apiVersion: "cilium.io/v2alpha1"
kind: CiliumLoadBalancerIPPool
metadata:
name: bgp-ip-pool
namespace: kube-system
labels:
app.kubernetes.io/name: bgp-ip-pool
app.kubernetes.io/instance: cilium
app.kubernetes.io/part-of: cilium
spec:
blocks:
- start: "10.232.2.100"
stop: "10.232.2.200"
disabled: true

View File

@@ -0,0 +1,17 @@
---
# Source: cilium/templates/cilium-load-balancer-ip-pool.yaml
apiVersion: "cilium.io/v2alpha1"
kind: CiliumLoadBalancerIPPool
metadata:
name: default-ip-pool
namespace: kube-system
labels:
app.kubernetes.io/name: default-ip-pool
app.kubernetes.io/instance: cilium
app.kubernetes.io/part-of: cilium
spec:
blocks:
- start: "10.232.1.21"
stop: "10.232.1.23"
- start: "10.232.2.21"
stop: "10.232.2.23"

View File

@@ -0,0 +1,298 @@
---
# Source: cilium/charts/cilium/templates/cilium-operator/clusterrole.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: cilium-operator
labels:
app.kubernetes.io/part-of: cilium
rules:
- apiGroups:
- ""
resources:
- pods
verbs:
- get
- list
- watch
# to automatically delete [core|kube]dns pods so that are starting to being
# managed by Cilium
- delete
- apiGroups:
- ""
resources:
- configmaps
resourceNames:
- cilium-config
verbs:
# allow patching of the configmap to set annotations
- patch
- apiGroups:
- ""
resources:
- nodes
verbs:
- list
- watch
- apiGroups:
- ""
resources:
# To remove node taints
- nodes
# To set NetworkUnavailable false on startup
- nodes/status
verbs:
- patch
- apiGroups:
- discovery.k8s.io
resources:
- endpointslices
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
# to perform LB IP allocation for BGP
- services/status
verbs:
- update
- patch
- apiGroups:
- ""
resources:
# to check apiserver connectivity
- namespaces
- secrets
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
# to perform the translation of a CNP that contains `ToGroup` to its endpoints
- services
- endpoints
verbs:
- get
- list
- watch
- create
- update
- delete
- patch
- apiGroups:
- cilium.io
resources:
- ciliumnetworkpolicies
- ciliumclusterwidenetworkpolicies
verbs:
# Create auto-generated CNPs and CCNPs from Policies that have 'toGroups'
- create
- update
- deletecollection
# To update the status of the CNPs and CCNPs
- patch
- get
- list
- watch
- apiGroups:
- cilium.io
resources:
- ciliumnetworkpolicies/status
- ciliumclusterwidenetworkpolicies/status
verbs:
# Update the auto-generated CNPs and CCNPs status.
- patch
- update
- apiGroups:
- cilium.io
resources:
- ciliumendpoints
- ciliumidentities
verbs:
# To perform garbage collection of such resources
- delete
- list
- watch
- apiGroups:
- cilium.io
resources:
- ciliumidentities
verbs:
# To synchronize garbage collection of such resources
- update
- apiGroups:
- cilium.io
resources:
- ciliumnodes
verbs:
- create
- update
- get
- list
- watch
# To perform CiliumNode garbage collector
- delete
- apiGroups:
- cilium.io
resources:
- ciliumnodes/status
verbs:
- update
- apiGroups:
- cilium.io
resources:
- ciliumendpointslices
- ciliumenvoyconfigs
- ciliumbgppeerconfigs
- ciliumbgpadvertisements
- ciliumbgpnodeconfigs
verbs:
- create
- update
- get
- list
- watch
- delete
- patch
- apiGroups:
- cilium.io
resources:
- ciliumbgpclusterconfigs/status
- ciliumbgppeerconfigs/status
verbs:
- update
- apiGroups:
- apiextensions.k8s.io
resources:
- customresourcedefinitions
verbs:
- create
- get
- list
- watch
- apiGroups:
- apiextensions.k8s.io
resources:
- customresourcedefinitions
verbs:
- update
resourceNames:
- ciliumloadbalancerippools.cilium.io
- ciliumbgppeeringpolicies.cilium.io
- ciliumbgpclusterconfigs.cilium.io
- ciliumbgppeerconfigs.cilium.io
- ciliumbgpadvertisements.cilium.io
- ciliumbgpnodeconfigs.cilium.io
- ciliumbgpnodeconfigoverrides.cilium.io
- ciliumclusterwideenvoyconfigs.cilium.io
- ciliumclusterwidenetworkpolicies.cilium.io
- ciliumegressgatewaypolicies.cilium.io
- ciliumendpoints.cilium.io
- ciliumendpointslices.cilium.io
- ciliumenvoyconfigs.cilium.io
- ciliumidentities.cilium.io
- ciliumlocalredirectpolicies.cilium.io
- ciliumnetworkpolicies.cilium.io
- ciliumnodes.cilium.io
- ciliumnodeconfigs.cilium.io
- ciliumcidrgroups.cilium.io
- ciliuml2announcementpolicies.cilium.io
- ciliumpodippools.cilium.io
- ciliumgatewayclassconfigs.cilium.io
- apiGroups:
- cilium.io
resources:
- ciliumloadbalancerippools
- ciliumpodippools
- ciliumbgppeeringpolicies
- ciliumbgpclusterconfigs
- ciliumbgpnodeconfigoverrides
- ciliumbgppeerconfigs
verbs:
- get
- list
- watch
- apiGroups:
- cilium.io
resources:
- ciliumpodippools
verbs:
- create
- apiGroups:
- cilium.io
resources:
- ciliumloadbalancerippools/status
verbs:
- patch
# For cilium-operator running in HA mode.
#
# Cilium operator running in HA mode requires the use of ResourceLock for Leader Election
# between multiple running instances.
# The preferred way of doing this is to use LeasesResourceLock as edits to Leases are less
# common and fewer objects in the cluster watch "all Leases".
- apiGroups:
- coordination.k8s.io
resources:
- leases
verbs:
- create
- get
- update
- apiGroups:
- gateway.networking.k8s.io
resources:
- gatewayclasses
- gateways
- tlsroutes
- httproutes
- grpcroutes
- referencegrants
- referencepolicies
verbs:
- get
- list
- watch
- apiGroups:
- gateway.networking.k8s.io
resources:
- gatewayclasses
verbs:
- patch
- apiGroups:
- gateway.networking.k8s.io
resources:
- gatewayclasses/status
- gateways/status
- httproutes/status
- grpcroutes/status
- tlsroutes/status
verbs:
- update
- patch
- apiGroups:
- cilium.io
resources:
- ciliumgatewayclassconfigs
verbs:
- get
- list
- watch
- apiGroups:
- cilium.io
resources:
- ciliumgatewayclassconfigs/status
verbs:
- update
- patch
- apiGroups:
- multicluster.x-k8s.io
resources:
- serviceimports
verbs:
- get
- list
- watch

View File

@@ -0,0 +1,112 @@
---
# Source: cilium/charts/cilium/templates/cilium-agent/clusterrole.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: cilium
labels:
app.kubernetes.io/part-of: cilium
rules:
- apiGroups:
- networking.k8s.io
resources:
- networkpolicies
verbs:
- get
- list
- watch
- apiGroups:
- discovery.k8s.io
resources:
- endpointslices
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- namespaces
- services
- pods
- endpoints
- nodes
verbs:
- get
- list
- watch
- apiGroups:
- apiextensions.k8s.io
resources:
- customresourcedefinitions
verbs:
- list
- watch
# This is used when validating policies in preflight. This will need to stay
# until we figure out how to avoid "get" inside the preflight, and then
# should be removed ideally.
- get
- apiGroups:
- cilium.io
resources:
- ciliumloadbalancerippools
- ciliumbgppeeringpolicies
- ciliumbgpnodeconfigs
- ciliumbgpadvertisements
- ciliumbgppeerconfigs
- ciliumclusterwideenvoyconfigs
- ciliumclusterwidenetworkpolicies
- ciliumegressgatewaypolicies
- ciliumendpoints
- ciliumendpointslices
- ciliumenvoyconfigs
- ciliumidentities
- ciliumlocalredirectpolicies
- ciliumnetworkpolicies
- ciliumnodes
- ciliumnodeconfigs
- ciliumcidrgroups
- ciliuml2announcementpolicies
- ciliumpodippools
verbs:
- list
- watch
- apiGroups:
- cilium.io
resources:
- ciliumidentities
- ciliumendpoints
- ciliumnodes
verbs:
- create
- apiGroups:
- cilium.io
# To synchronize garbage collection of such resources
resources:
- ciliumidentities
verbs:
- update
- apiGroups:
- cilium.io
resources:
- ciliumendpoints
verbs:
- delete
- get
- apiGroups:
- cilium.io
resources:
- ciliumnodes
- ciliumnodes/status
verbs:
- get
- update
- apiGroups:
- cilium.io
resources:
- ciliumendpoints/status
- ciliumendpoints
- ciliuml2announcementpolicies/status
- ciliumbgpnodeconfigs/status
verbs:
- patch

View File

@@ -0,0 +1,46 @@
---
# Source: cilium/charts/cilium/templates/hubble-ui/clusterrole.yaml
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: hubble-ui
labels:
app.kubernetes.io/part-of: cilium
rules:
- apiGroups:
- networking.k8s.io
resources:
- networkpolicies
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- componentstatuses
- endpoints
- namespaces
- nodes
- pods
- services
verbs:
- get
- list
- watch
- apiGroups:
- apiextensions.k8s.io
resources:
- customresourcedefinitions
verbs:
- get
- list
- watch
- apiGroups:
- cilium.io
resources:
- "*"
verbs:
- get
- list
- watch

View File

@@ -0,0 +1,16 @@
---
# Source: cilium/charts/cilium/templates/cilium-operator/clusterrolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: cilium-operator
labels:
app.kubernetes.io/part-of: cilium
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cilium-operator
subjects:
- kind: ServiceAccount
name: "cilium-operator"
namespace: kube-system

View File

@@ -0,0 +1,16 @@
---
# Source: cilium/charts/cilium/templates/cilium-agent/clusterrolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: cilium
labels:
app.kubernetes.io/part-of: cilium
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cilium
subjects:
- kind: ServiceAccount
name: "cilium"
namespace: kube-system

View File

@@ -0,0 +1,16 @@
---
# Source: cilium/charts/cilium/templates/hubble-ui/clusterrolebinding.yaml
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: hubble-ui
labels:
app.kubernetes.io/part-of: cilium
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: hubble-ui
subjects:
- kind: ServiceAccount
name: "hubble-ui"
namespace: kube-system

View File

@@ -0,0 +1,256 @@
---
# Source: cilium/charts/cilium/templates/cilium-configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: cilium-config
namespace: kube-system
data:
# Identity allocation mode selects how identities are shared between cilium
# nodes by setting how they are stored. The options are "crd", "kvstore" or
# "doublewrite-readkvstore" / "doublewrite-readcrd".
# - "crd" stores identities in kubernetes as CRDs (custom resource definition).
# These can be queried with:
# kubectl get ciliumid
# - "kvstore" stores identities in an etcd kvstore, that is
# configured below. Cilium versions before 1.6 supported only the kvstore
# backend. Upgrades from these older cilium versions should continue using
# the kvstore by commenting out the identity-allocation-mode below, or
# setting it to "kvstore".
# - "doublewrite" modes store identities in both the kvstore and CRDs. This is useful
# for seamless migrations from the kvstore mode to the crd mode. Consult the
# documentation for more information on how to perform the migration.
identity-allocation-mode: crd
identity-heartbeat-timeout: "30m0s"
identity-gc-interval: "15m0s"
cilium-endpoint-gc-interval: "5m0s"
nodes-gc-interval: "5m0s"
# If you want to run cilium in debug mode change this value to true
debug: "false"
metrics-sampling-interval: "5m"
# The agent can be put into the following three policy enforcement modes
# default, always and never.
# https://docs.cilium.io/en/latest/security/policy/intro/#policy-enforcement-modes
enable-policy: "default"
# If you want metrics enabled in all of your Cilium agents, set the port for
# which the Cilium agents will have their metrics exposed.
# This option deprecates the "prometheus-serve-addr" in the
# "cilium-metrics-config" ConfigMap
# NOTE that this will open the port on ALL nodes where Cilium pods are
# scheduled.
prometheus-serve-addr: ":9962"
# A space-separated list of controller groups for which to enable metrics.
# The special values of "all" and "none" are supported.
controller-group-metrics: write-cni-file sync-host-ips sync-lb-maps-with-k8s-services
# If you want metrics enabled in cilium-operator, set the port for
# which the Cilium Operator will have their metrics exposed.
# NOTE that this will open the port on the nodes where Cilium operator pod
# is scheduled.
operator-prometheus-serve-addr: ":9963"
enable-metrics: "true"
enable-envoy-config: "true"
envoy-config-retry-interval: "15s"
enable-gateway-api: "true"
enable-gateway-api-secrets-sync: "true"
enable-gateway-api-proxy-protocol: "false"
enable-gateway-api-app-protocol: "true"
enable-gateway-api-alpn: "true"
gateway-api-xff-num-trusted-hops: "0"
gateway-api-service-externaltrafficpolicy: "Cluster"
gateway-api-secrets-namespace: "cilium-secrets"
gateway-api-hostnetwork-enabled: "false"
gateway-api-hostnetwork-nodelabelselector: ""
enable-policy-secrets-sync: "true"
policy-secrets-only-from-secrets-namespace: "true"
policy-secrets-namespace: "cilium-secrets"
# Enable IPv4 addressing. If enabled, all endpoints are allocated an IPv4
# address.
enable-ipv4: "true"
# Enable IPv6 addressing. If enabled, all endpoints are allocated an IPv6
# address.
enable-ipv6: "false"
# Users who wish to specify their own custom CNI configuration file must set
# custom-cni-conf to "true", otherwise Cilium may overwrite the configuration.
custom-cni-conf: "false"
enable-bpf-clock-probe: "false"
# If you want cilium monitor to aggregate tracing for packets, set this level
# to "low", "medium", or "maximum". The higher the level, the less packets
# that will be seen in monitor output.
monitor-aggregation: medium
# The monitor aggregation interval governs the typical time between monitor
# notification events for each allowed connection.
#
# Only effective when monitor aggregation is set to "medium" or higher.
monitor-aggregation-interval: "5s"
# The monitor aggregation flags determine which TCP flags which, upon the
# first observation, cause monitor notifications to be generated.
#
# Only effective when monitor aggregation is set to "medium" or higher.
monitor-aggregation-flags: all
# Specifies the ratio (0.0-1.0] of total system memory to use for dynamic
# sizing of the TCP CT, non-TCP CT, NAT and policy BPF maps.
bpf-map-dynamic-size-ratio: "0.0025"
# bpf-policy-map-max specifies the maximum number of entries in endpoint
# policy map (per endpoint)
bpf-policy-map-max: "16384"
# bpf-policy-stats-map-max specifies the maximum number of entries in global
# policy stats map
bpf-policy-stats-map-max: "65536"
# bpf-lb-map-max specifies the maximum number of entries in bpf lb service,
# backend and affinity maps.
bpf-lb-map-max: "65536"
bpf-lb-external-clusterip: "false"
bpf-lb-source-range-all-types: "false"
bpf-lb-algorithm-annotation: "false"
bpf-lb-mode-annotation: "false"
bpf-distributed-lru: "false"
bpf-events-drop-enabled: "true"
bpf-events-policy-verdict-enabled: "true"
bpf-events-trace-enabled: "true"
# Pre-allocation of map entries allows per-packet latency to be reduced, at
# the expense of up-front memory allocation for the entries in the maps. The
# default value below will minimize memory usage in the default installation;
# users who are sensitive to latency may consider setting this to "true".
#
# This option was introduced in Cilium 1.4. Cilium 1.3 and earlier ignore
# this option and behave as though it is set to "true".
#
# If this value is modified, then during the next Cilium startup the restore
# of existing endpoints and tracking of ongoing connections may be disrupted.
# As a result, reply packets may be dropped and the load-balancing decisions
# for established connections may change.
#
# If this option is set to "false" during an upgrade from 1.3 or earlier to
# 1.4 or later, then it may cause one-time disruptions during the upgrade.
preallocate-bpf-maps: "false"
# Name of the cluster. Only relevant when building a mesh of clusters.
cluster-name: "default"
# Unique ID of the cluster. Must be unique across all conneted clusters and
# in the range of 1 and 255. Only relevant when building a mesh of clusters.
cluster-id: "0"
# Encapsulation mode for communication between nodes
# Possible values:
# - disabled
# - vxlan (default)
# - geneve
routing-mode: "tunnel"
tunnel-protocol: "vxlan"
tunnel-source-port-range: "0-0"
service-no-backend-response: "reject"
# Enables L7 proxy for L7 policy enforcement and visibility
enable-l7-proxy: "true"
enable-ipv4-masquerade: "true"
enable-ipv4-big-tcp: "false"
enable-ipv6-big-tcp: "false"
enable-ipv6-masquerade: "true"
enable-tcx: "true"
datapath-mode: "veth"
enable-masquerade-to-route-source: "false"
enable-xt-socket-fallback: "true"
install-no-conntrack-iptables-rules: "false"
iptables-random-fully: "false"
auto-direct-node-routes: "false"
direct-routing-skip-unreachable: "false"
# List of devices used to attach bpf_host.o (implements BPF NodePort,
# host-firewall and BPF masquerading)
devices: "end0 enp6s0"
kube-proxy-replacement: "true"
kube-proxy-replacement-healthz-bind-address: ""
bpf-lb-sock: "true"
bpf-lb-sock-hostns-only: "true"
enable-health-check-nodeport: "true"
enable-health-check-loadbalancer-ip: "false"
node-port-bind-protection: "true"
enable-auto-protect-node-port-range: "true"
bpf-lb-acceleration: "disabled"
enable-svc-source-range-check: "true"
enable-l2-neigh-discovery: "false"
k8s-require-ipv4-pod-cidr: "false"
k8s-require-ipv6-pod-cidr: "false"
enable-k8s-networkpolicy: "true"
enable-endpoint-lockdown-on-policy-overflow: "false"
# Tell the agent to generate and write a CNI configuration file
write-cni-conf-when-ready: /host/etc/cni/net.d/05-cilium.conflist
cni-exclusive: "true"
cni-log-file: "/var/run/cilium/cilium-cni.log"
enable-endpoint-health-checking: "true"
enable-health-checking: "true"
health-check-icmp-failure-threshold: "3"
enable-well-known-identities: "false"
enable-node-selector-labels: "false"
synchronize-k8s-nodes: "true"
operator-api-serve-addr: "127.0.0.1:9234"
enable-hubble: "true"
# UNIX domain socket for Hubble server to listen to.
hubble-socket-path: "/var/run/cilium/hubble.sock"
hubble-network-policy-correlation-enabled: "true"
# An additional address for Hubble server to listen to (e.g. ":4244").
hubble-listen-address: ":4244"
hubble-disable-tls: "false"
hubble-tls-cert-file: /var/lib/cilium/tls/hubble/server.crt
hubble-tls-key-file: /var/lib/cilium/tls/hubble/server.key
hubble-tls-client-ca-files: /var/lib/cilium/tls/hubble/client-ca.crt
ipam: "kubernetes"
ipam-cilium-node-update-rate: "15s"
default-lb-service-ipam: "lbipam"
egress-gateway-reconciliation-trigger-interval: "1s"
enable-vtep: "false"
vtep-endpoint: ""
vtep-cidr: ""
vtep-mask: ""
vtep-mac: ""
enable-k8s-endpoint-slice: "true"
procfs: "/host/proc"
bpf-root: "/sys/fs/bpf"
cgroup-root: "/sys/fs/cgroup"
enable-cilium-endpoint-slice: "true"
ces-rate-limits: "[{\"burst\":20,\"limit\":10,\"nodes\":0},{\"burst\":100,\"limit\":50,\"nodes\":100}]"
identity-management-mode: "agent"
enable-sctp: "false"
k8s-client-qps: "50"
k8s-client-burst: "100"
remove-cilium-node-taints: "true"
set-cilium-node-taints: "true"
set-cilium-is-up-condition: "true"
unmanaged-pod-watcher-interval: "15"
# default DNS proxy to transparent mode in non-chaining modes
dnsproxy-enable-transparent-mode: "true"
dnsproxy-socket-linger-timeout: "10"
tofqdns-dns-reject-response-code: "refused"
tofqdns-enable-dns-compression: "true"
tofqdns-endpoint-max-ip-per-hostname: "1000"
tofqdns-idle-connection-grace-period: "0s"
tofqdns-max-deferred-connection-deletes: "10000"
tofqdns-proxy-response-max-delay: "100ms"
tofqdns-preallocate-identities: "true"
agent-not-ready-taint-key: "node.cilium.io/agent-not-ready"
mesh-auth-enabled: "true"
mesh-auth-queue-size: "1024"
mesh-auth-rotated-identities-queue-size: "1024"
mesh-auth-gc-interval: "5m0s"
proxy-xff-num-trusted-hops-ingress: "0"
proxy-xff-num-trusted-hops-egress: "0"
proxy-connect-timeout: "2"
proxy-initial-fetch-timeout: "30"
proxy-max-requests-per-connection: "0"
proxy-max-connection-duration-seconds: "0"
proxy-idle-timeout-seconds: "60"
proxy-max-concurrent-retries: "128"
http-retry-count: "3"
http-stream-idle-timeout: "300"
external-envoy-proxy: "true"
envoy-base-id: "0"
envoy-access-log-buffer-size: "4096"
envoy-keep-cap-netbindservice: "true"
max-connected-clusters: "255"
clustermesh-enable-endpoint-sync: "false"
clustermesh-enable-mcs-api: "false"
policy-default-local-cluster: "false"
nat-map-stats-entries: "32"
nat-map-stats-interval: "30s"
enable-internal-traffic-policy: "true"
enable-lb-ipam: "true"
enable-non-default-deny-policies: "true"
enable-source-ip-verification: "true"
# Extra config allows adding arbitrary properties to the cilium config.
# By putting it at the end of the ConfigMap, it's also possible to override existing properties.

File diff suppressed because one or more lines are too long

View File

@@ -0,0 +1,9 @@
---
# Source: cilium/charts/cilium/templates/hubble-relay/configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: hubble-relay-config
namespace: kube-system
data:
config.yaml: "cluster-name: default\npeer-service: \"hubble-peer.kube-system.svc.cluster.local.:443\"\nlisten-address: :4245\ngops: true\ngops-port: \"9893\"\nretry-timeout: \nsort-buffer-len-max: \nsort-buffer-drain-timeout: \ntls-hubble-client-cert-file: /var/lib/hubble-relay/tls/client.crt\ntls-hubble-client-key-file: /var/lib/hubble-relay/tls/client.key\ntls-hubble-server-ca-files: /var/lib/hubble-relay/tls/hubble-server-ca.crt\n\ndisable-server-tls: true\n"

View File

@@ -0,0 +1,9 @@
---
# Source: cilium/charts/cilium/templates/hubble-ui/configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: hubble-ui-nginx
namespace: kube-system
data:
nginx.conf: "server {\n listen 8081;\n listen [::]:8081;\n server_name localhost;\n root /app;\n index index.html;\n client_max_body_size 1G;\n\n location / {\n proxy_set_header Host $host;\n proxy_set_header X-Real-IP $remote_addr;\n\n location /api {\n proxy_http_version 1.1;\n proxy_pass_request_headers on;\n proxy_pass http://127.0.0.1:8090;\n }\n location / {\n if ($http_user_agent ~* \"kube-probe\") { access_log off; }\n # double `/index.html` is required here\n try_files $uri $uri/ /index.html /index.html;\n }\n\n # Liveness probe\n location /healthz {\n access_log off;\n add_header Content-Type text/plain;\n return 200 'ok';\n }\n }\n}"

View File

@@ -0,0 +1,174 @@
---
# Source: cilium/charts/cilium/templates/cilium-envoy/daemonset.yaml
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: cilium-envoy
namespace: kube-system
labels:
k8s-app: cilium-envoy
app.kubernetes.io/part-of: cilium
app.kubernetes.io/name: cilium-envoy
name: cilium-envoy
spec:
selector:
matchLabels:
k8s-app: cilium-envoy
updateStrategy:
rollingUpdate:
maxUnavailable: 2
type: RollingUpdate
template:
metadata:
annotations:
labels:
k8s-app: cilium-envoy
name: cilium-envoy
app.kubernetes.io/name: cilium-envoy
app.kubernetes.io/part-of: cilium
spec:
securityContext:
appArmorProfile:
type: Unconfined
containers:
- name: cilium-envoy
image: "quay.io/cilium/cilium-envoy:v1.34.10-1762597008-ff7ae7d623be00078865cff1b0672cc5d9bfc6d5@sha256:1deb6709afcb5523579bf1abbc3255adf9e354565a88c4a9162c8d9cb1d77ab5"
imagePullPolicy: IfNotPresent
command:
- /usr/bin/cilium-envoy-starter
args:
- '--keep-cap-net-bind-service'
- '--'
- '-c /var/run/cilium/envoy/bootstrap-config.json'
- '--base-id 0'
- '--log-level info'
startupProbe:
httpGet:
host: "127.0.0.1"
path: /healthz
port: 9878
scheme: HTTP
failureThreshold: 105
periodSeconds: 2
successThreshold: 1
initialDelaySeconds: 5
livenessProbe:
httpGet:
host: "127.0.0.1"
path: /healthz
port: 9878
scheme: HTTP
periodSeconds: 30
successThreshold: 1
failureThreshold: 10
timeoutSeconds: 5
readinessProbe:
httpGet:
host: "127.0.0.1"
path: /healthz
port: 9878
scheme: HTTP
periodSeconds: 30
successThreshold: 1
failureThreshold: 3
timeoutSeconds: 5
env:
- name: K8S_NODE_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
- name: CILIUM_K8S_NAMESPACE
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
- name: KUBERNETES_SERVICE_HOST
value: "localhost"
- name: KUBERNETES_SERVICE_PORT
value: "7445"
ports:
- name: envoy-metrics
containerPort: 9964
hostPort: 9964
protocol: TCP
securityContext:
seLinuxOptions:
level: s0
type: spc_t
capabilities:
add:
- NET_ADMIN
- NET_BIND_SERVICE
- PERFMON
- BPF
drop:
- ALL
terminationMessagePolicy: FallbackToLogsOnError
volumeMounts:
- name: envoy-sockets
mountPath: /var/run/cilium/envoy/sockets
readOnly: false
- name: envoy-artifacts
mountPath: /var/run/cilium/envoy/artifacts
readOnly: true
- name: envoy-config
mountPath: /var/run/cilium/envoy/
readOnly: true
- name: bpf-maps
mountPath: /sys/fs/bpf
mountPropagation: HostToContainer
restartPolicy: Always
priorityClassName: system-node-critical
serviceAccountName: "cilium-envoy"
automountServiceAccountToken: true
terminationGracePeriodSeconds: 1
hostNetwork: true
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: cilium.io/no-schedule
operator: NotIn
values:
- "true"
podAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchLabels:
k8s-app: cilium
topologyKey: kubernetes.io/hostname
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchLabels:
k8s-app: cilium-envoy
topologyKey: kubernetes.io/hostname
nodeSelector:
kubernetes.io/os: linux
tolerations:
- operator: Exists
volumes:
- name: envoy-sockets
hostPath:
path: "/var/run/cilium/envoy/sockets"
type: DirectoryOrCreate
- name: envoy-artifacts
hostPath:
path: "/var/run/cilium/envoy/artifacts"
type: DirectoryOrCreate
- name: envoy-config
configMap:
name: "cilium-envoy-config"
# note: the leading zero means this number is in octal representation: do not remove it
defaultMode: 0400
items:
- key: bootstrap-config.json
path: bootstrap-config.json
# To keep state between restarts / upgrades
# To keep state between restarts / upgrades for bpf maps
- name: bpf-maps
hostPath:
path: /sys/fs/bpf
type: DirectoryOrCreate

View File

@@ -0,0 +1,494 @@
---
# Source: cilium/charts/cilium/templates/cilium-agent/daemonset.yaml
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: cilium
namespace: kube-system
labels:
k8s-app: cilium
app.kubernetes.io/part-of: cilium
app.kubernetes.io/name: cilium-agent
spec:
selector:
matchLabels:
k8s-app: cilium
updateStrategy:
rollingUpdate:
maxUnavailable: 2
type: RollingUpdate
template:
metadata:
annotations:
# ensure pods roll when configmap updates
cilium.io/cilium-configmap-checksum: "31ad7748e0aefe75b6436d96c8c85754e0b44e68e6012fa188bc5bcd66085828"
kubectl.kubernetes.io/default-container: cilium-agent
labels:
k8s-app: cilium
app.kubernetes.io/name: cilium-agent
app.kubernetes.io/part-of: cilium
spec:
securityContext:
appArmorProfile:
type: Unconfined
seccompProfile:
type: Unconfined
containers:
- name: cilium-agent
image: "quay.io/cilium/cilium:v1.18.4@sha256:49d87af187eeeb9e9e3ec2bc6bd372261a0b5cb2d845659463ba7cc10fe9e45f"
imagePullPolicy: IfNotPresent
command:
- cilium-agent
args:
- --config-dir=/tmp/cilium/config-map
startupProbe:
httpGet:
host: "127.0.0.1"
path: /healthz
port: 9879
scheme: HTTP
httpHeaders:
- name: "brief"
value: "true"
failureThreshold: 300
periodSeconds: 2
successThreshold: 1
initialDelaySeconds: 5
livenessProbe:
httpGet:
host: "127.0.0.1"
path: /healthz
port: 9879
scheme: HTTP
httpHeaders:
- name: "brief"
value: "true"
- name: "require-k8s-connectivity"
value: "false"
periodSeconds: 30
successThreshold: 1
failureThreshold: 10
timeoutSeconds: 5
readinessProbe:
httpGet:
host: "127.0.0.1"
path: /healthz
port: 9879
scheme: HTTP
httpHeaders:
- name: "brief"
value: "true"
periodSeconds: 30
successThreshold: 1
failureThreshold: 3
timeoutSeconds: 5
env:
- name: K8S_NODE_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
- name: CILIUM_K8S_NAMESPACE
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
- name: CILIUM_CLUSTERMESH_CONFIG
value: /var/lib/cilium/clustermesh/
- name: GOMEMLIMIT
valueFrom:
resourceFieldRef:
resource: limits.memory
divisor: '1'
- name: KUBERNETES_SERVICE_HOST
value: "localhost"
- name: KUBERNETES_SERVICE_PORT
value: "7445"
- name: KUBE_CLIENT_BACKOFF_BASE
value: "1"
- name: KUBE_CLIENT_BACKOFF_DURATION
value: "120"
lifecycle:
postStart:
exec:
command:
- "bash"
- "-c"
- |
set -o errexit
set -o pipefail
set -o nounset
# When running in AWS ENI mode, it's likely that 'aws-node' has
# had a chance to install SNAT iptables rules. These can result
# in dropped traffic, so we should attempt to remove them.
# We do it using a 'postStart' hook since this may need to run
# for nodes which might have already been init'ed but may still
# have dangling rules. This is safe because there are no
# dependencies on anything that is part of the startup script
# itself, and can be safely run multiple times per node (e.g. in
# case of a restart).
if [[ "$(iptables-save | grep -E -c 'AWS-SNAT-CHAIN|AWS-CONNMARK-CHAIN')" != "0" ]];
then
echo 'Deleting iptables rules created by the AWS CNI VPC plugin'
iptables-save | grep -E -v 'AWS-SNAT-CHAIN|AWS-CONNMARK-CHAIN' | iptables-restore
fi
echo 'Done!'
preStop:
exec:
command:
- /cni-uninstall.sh
ports:
- name: peer-service
containerPort: 4244
hostPort: 4244
protocol: TCP
- name: prometheus
containerPort: 9962
hostPort: 9962
protocol: TCP
securityContext:
seLinuxOptions:
level: s0
type: spc_t
capabilities:
add:
- CHOWN
- KILL
- NET_ADMIN
- NET_RAW
- IPC_LOCK
- SYS_ADMIN
- SYS_RESOURCE
- DAC_OVERRIDE
- FOWNER
- SETGID
- SETUID
- PERFMON
- BPF
drop:
- ALL
terminationMessagePolicy: FallbackToLogsOnError
volumeMounts:
- name: envoy-sockets
mountPath: /var/run/cilium/envoy/sockets
readOnly: false
# Unprivileged containers need to mount /proc/sys/net from the host
# to have write access
- mountPath: /host/proc/sys/net
name: host-proc-sys-net
# Unprivileged containers need to mount /proc/sys/kernel from the host
# to have write access
- mountPath: /host/proc/sys/kernel
name: host-proc-sys-kernel
- name: bpf-maps
mountPath: /sys/fs/bpf
# Unprivileged containers can't set mount propagation to bidirectional
# in this case we will mount the bpf fs from an init container that
# is privileged and set the mount propagation from host to container
# in Cilium.
mountPropagation: HostToContainer
# Check for duplicate mounts before mounting
- name: cilium-cgroup
mountPath: /sys/fs/cgroup
- name: cilium-run
mountPath: /var/run/cilium
- name: cilium-netns
mountPath: /var/run/cilium/netns
mountPropagation: HostToContainer
- name: etc-cni-netd
mountPath: /host/etc/cni/net.d
- name: clustermesh-secrets
mountPath: /var/lib/cilium/clustermesh
readOnly: true
# Needed to be able to load kernel modules
- name: lib-modules
mountPath: /lib/modules
readOnly: true
- name: xtables-lock
mountPath: /run/xtables.lock
- name: hubble-tls
mountPath: /var/lib/cilium/tls/hubble
readOnly: true
- name: tmp
mountPath: /tmp
initContainers:
- name: config
image: "quay.io/cilium/cilium:v1.18.4@sha256:49d87af187eeeb9e9e3ec2bc6bd372261a0b5cb2d845659463ba7cc10fe9e45f"
imagePullPolicy: IfNotPresent
command:
- cilium-dbg
- build-config
env:
- name: K8S_NODE_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
- name: CILIUM_K8S_NAMESPACE
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
- name: KUBERNETES_SERVICE_HOST
value: "localhost"
- name: KUBERNETES_SERVICE_PORT
value: "7445"
volumeMounts:
- name: tmp
mountPath: /tmp
terminationMessagePolicy: FallbackToLogsOnError
- name: apply-sysctl-overwrites
image: "quay.io/cilium/cilium:v1.18.4@sha256:49d87af187eeeb9e9e3ec2bc6bd372261a0b5cb2d845659463ba7cc10fe9e45f"
imagePullPolicy: IfNotPresent
env:
- name: BIN_PATH
value: /opt/cni/bin
command:
- sh
- -ec
# The statically linked Go program binary is invoked to avoid any
# dependency on utilities like sh that can be missing on certain
# distros installed on the underlying host. Copy the binary to the
# same directory where we install cilium cni plugin so that exec permissions
# are available.
- |
cp /usr/bin/cilium-sysctlfix /hostbin/cilium-sysctlfix;
nsenter --mount=/hostproc/1/ns/mnt "${BIN_PATH}/cilium-sysctlfix";
rm /hostbin/cilium-sysctlfix
volumeMounts:
- name: hostproc
mountPath: /hostproc
- name: cni-path
mountPath: /hostbin
terminationMessagePolicy: FallbackToLogsOnError
securityContext:
seLinuxOptions:
level: s0
type: spc_t
capabilities:
add:
- SYS_ADMIN
- SYS_CHROOT
- SYS_PTRACE
drop:
- ALL
# Mount the bpf fs if it is not mounted. We will perform this task
# from a privileged container because the mount propagation bidirectional
# only works from privileged containers.
- name: mount-bpf-fs
image: "quay.io/cilium/cilium:v1.18.4@sha256:49d87af187eeeb9e9e3ec2bc6bd372261a0b5cb2d845659463ba7cc10fe9e45f"
imagePullPolicy: IfNotPresent
args:
- 'mount | grep "/sys/fs/bpf type bpf" || mount -t bpf bpf /sys/fs/bpf'
command:
- /bin/bash
- -c
- --
terminationMessagePolicy: FallbackToLogsOnError
securityContext:
privileged: true
volumeMounts:
- name: bpf-maps
mountPath: /sys/fs/bpf
mountPropagation: Bidirectional
- name: clean-cilium-state
image: "quay.io/cilium/cilium:v1.18.4@sha256:49d87af187eeeb9e9e3ec2bc6bd372261a0b5cb2d845659463ba7cc10fe9e45f"
imagePullPolicy: IfNotPresent
command:
- /init-container.sh
env:
- name: CILIUM_ALL_STATE
valueFrom:
configMapKeyRef:
name: cilium-config
key: clean-cilium-state
optional: true
- name: CILIUM_BPF_STATE
valueFrom:
configMapKeyRef:
name: cilium-config
key: clean-cilium-bpf-state
optional: true
- name: WRITE_CNI_CONF_WHEN_READY
valueFrom:
configMapKeyRef:
name: cilium-config
key: write-cni-conf-when-ready
optional: true
- name: KUBERNETES_SERVICE_HOST
value: "localhost"
- name: KUBERNETES_SERVICE_PORT
value: "7445"
terminationMessagePolicy: FallbackToLogsOnError
securityContext:
seLinuxOptions:
level: s0
type: spc_t
capabilities:
add:
- NET_ADMIN
- SYS_ADMIN
- SYS_RESOURCE
drop:
- ALL
volumeMounts:
- name: bpf-maps
mountPath: /sys/fs/bpf
# Required to mount cgroup filesystem from the host to cilium agent pod
- name: cilium-cgroup
mountPath: /sys/fs/cgroup
mountPropagation: HostToContainer
- name: cilium-run
mountPath: /var/run/cilium # wait-for-kube-proxy
# Install the CNI binaries in an InitContainer so we don't have a writable host mount in the agent
- name: install-cni-binaries
image: "quay.io/cilium/cilium:v1.18.4@sha256:49d87af187eeeb9e9e3ec2bc6bd372261a0b5cb2d845659463ba7cc10fe9e45f"
imagePullPolicy: IfNotPresent
command:
- "/install-plugin.sh"
resources:
requests:
cpu: 100m
memory: 10Mi
securityContext:
seLinuxOptions:
level: s0
type: spc_t
capabilities:
drop:
- ALL
terminationMessagePolicy: FallbackToLogsOnError
volumeMounts:
- name: cni-path
mountPath: /host/opt/cni/bin # .Values.cni.install
restartPolicy: Always
priorityClassName: system-node-critical
serviceAccountName: "cilium"
automountServiceAccountToken: true
terminationGracePeriodSeconds: 1
hostNetwork: true
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchLabels:
k8s-app: cilium
topologyKey: kubernetes.io/hostname
nodeSelector:
kubernetes.io/os: linux
tolerations:
- operator: Exists
volumes:
# For sharing configuration between the "config" initContainer and the agent
- name: tmp
emptyDir: {}
# To keep state between restarts / upgrades
- name: cilium-run
hostPath:
path: /var/run/cilium
type: DirectoryOrCreate
# To exec into pod network namespaces
- name: cilium-netns
hostPath:
path: /var/run/netns
type: DirectoryOrCreate
# To keep state between restarts / upgrades for bpf maps
- name: bpf-maps
hostPath:
path: /sys/fs/bpf
type: DirectoryOrCreate
# To mount cgroup2 filesystem on the host or apply sysctlfix
- name: hostproc
hostPath:
path: /proc
type: Directory
# To keep state between restarts / upgrades for cgroup2 filesystem
- name: cilium-cgroup
hostPath:
path: /sys/fs/cgroup
type: DirectoryOrCreate
# To install cilium cni plugin in the host
- name: cni-path
hostPath:
path: /opt/cni/bin
type: DirectoryOrCreate
# To install cilium cni configuration in the host
- name: etc-cni-netd
hostPath:
path: /etc/cni/net.d
type: DirectoryOrCreate
# To be able to load kernel modules
- name: lib-modules
hostPath:
path: /lib/modules
# To access iptables concurrently with other processes (e.g. kube-proxy)
- name: xtables-lock
hostPath:
path: /run/xtables.lock
type: FileOrCreate
# Sharing socket with Cilium Envoy on the same node by using a host path
- name: envoy-sockets
hostPath:
path: "/var/run/cilium/envoy/sockets"
type: DirectoryOrCreate
# To read the clustermesh configuration
- name: clustermesh-secrets
projected:
# note: the leading zero means this number is in octal representation: do not remove it
defaultMode: 0400
sources:
- secret:
name: cilium-clustermesh
optional: true
# note: items are not explicitly listed here, since the entries of this secret
# depend on the peers configured, and that would cause a restart of all agents
# at every addition/removal. Leaving the field empty makes each secret entry
# to be automatically projected into the volume as a file whose name is the key.
- secret:
name: clustermesh-apiserver-remote-cert
optional: true
items:
- key: tls.key
path: common-etcd-client.key
- key: tls.crt
path: common-etcd-client.crt
- key: ca.crt
path: common-etcd-client-ca.crt
# note: we configure the volume for the kvstoremesh-specific certificate
# regardless of whether KVStoreMesh is enabled or not, so that it can be
# automatically mounted in case KVStoreMesh gets subsequently enabled,
# without requiring an agent restart.
- secret:
name: clustermesh-apiserver-local-cert
optional: true
items:
- key: tls.key
path: local-etcd-client.key
- key: tls.crt
path: local-etcd-client.crt
- key: ca.crt
path: local-etcd-client-ca.crt
- name: host-proc-sys-net
hostPath:
path: /proc/sys/net
type: Directory
- name: host-proc-sys-kernel
hostPath:
path: /proc/sys/kernel
type: Directory
- name: hubble-tls
projected:
# note: the leading zero means this number is in octal representation: do not remove it
defaultMode: 0400
sources:
- secret:
name: hubble-server-certs
optional: true
items:
- key: tls.crt
path: server.crt
- key: tls.key
path: server.key
- key: ca.crt
path: client-ca.crt

View File

@@ -0,0 +1,139 @@
---
# Source: cilium/charts/cilium/templates/cilium-operator/deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: cilium-operator
namespace: kube-system
labels:
io.cilium/app: operator
name: cilium-operator
app.kubernetes.io/part-of: cilium
app.kubernetes.io/name: cilium-operator
spec:
# See docs on ServerCapabilities.LeasesResourceLock in file pkg/k8s/version/version.go
# for more details.
replicas: 2
selector:
matchLabels:
io.cilium/app: operator
name: cilium-operator
# ensure operator update on single node k8s clusters, by using rolling update with maxUnavailable=100% in case
# of one replica and no user configured Recreate strategy.
# otherwise an update might get stuck due to the default maxUnavailable=50% in combination with the
# podAntiAffinity which prevents deployments of multiple operator replicas on the same node.
strategy:
rollingUpdate:
maxSurge: 25%
maxUnavailable: 50%
type: RollingUpdate
template:
metadata:
annotations:
# ensure pods roll when configmap updates
cilium.io/cilium-configmap-checksum: "31ad7748e0aefe75b6436d96c8c85754e0b44e68e6012fa188bc5bcd66085828"
labels:
io.cilium/app: operator
name: cilium-operator
app.kubernetes.io/part-of: cilium
app.kubernetes.io/name: cilium-operator
spec:
securityContext:
seccompProfile:
type: RuntimeDefault
containers:
- name: cilium-operator
image: "quay.io/cilium/operator-generic:v1.18.4@sha256:1b22b9ff28affdf574378a70dade4ef835b00b080c2ee2418530809dd62c3012"
imagePullPolicy: IfNotPresent
command:
- cilium-operator-generic
args:
- --config-dir=/tmp/cilium/config-map
- --debug=$(CILIUM_DEBUG)
env:
- name: K8S_NODE_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
- name: CILIUM_K8S_NAMESPACE
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
- name: CILIUM_DEBUG
valueFrom:
configMapKeyRef:
key: debug
name: cilium-config
optional: true
- name: KUBERNETES_SERVICE_HOST
value: "localhost"
- name: KUBERNETES_SERVICE_PORT
value: "7445"
ports:
- name: prometheus
containerPort: 9963
hostPort: 9963
protocol: TCP
livenessProbe:
httpGet:
host: "127.0.0.1"
path: /healthz
port: 9234
scheme: HTTP
initialDelaySeconds: 60
periodSeconds: 10
timeoutSeconds: 3
readinessProbe:
httpGet:
host: "127.0.0.1"
path: /healthz
port: 9234
scheme: HTTP
initialDelaySeconds: 0
periodSeconds: 5
timeoutSeconds: 3
failureThreshold: 5
volumeMounts:
- name: cilium-config-path
mountPath: /tmp/cilium/config-map
readOnly: true
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
terminationMessagePolicy: FallbackToLogsOnError
hostNetwork: true
restartPolicy: Always
priorityClassName: system-cluster-critical
serviceAccountName: "cilium-operator"
automountServiceAccountToken: true
# In HA mode, cilium-operator pods must not be scheduled on the same
# node as they will clash with each other.
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchLabels:
io.cilium/app: operator
topologyKey: kubernetes.io/hostname
nodeSelector:
kubernetes.io/os: linux
tolerations:
- key: node-role.kubernetes.io/control-plane
operator: Exists
- key: node-role.kubernetes.io/master
operator: Exists
- key: node.kubernetes.io/not-ready
operator: Exists
- key: node.cloudprovider.kubernetes.io/uninitialized
operator: Exists
- key: node.cilium.io/agent-not-ready
operator: Exists
volumes:
# To read the configuration from the config map
- name: cilium-config-path
configMap:
name: cilium-config

View File

@@ -0,0 +1,128 @@
---
# Source: cilium/charts/cilium/templates/hubble-relay/deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: hubble-relay
namespace: kube-system
labels:
k8s-app: hubble-relay
app.kubernetes.io/name: hubble-relay
app.kubernetes.io/part-of: cilium
spec:
replicas: 1
selector:
matchLabels:
k8s-app: hubble-relay
strategy:
rollingUpdate:
maxUnavailable: 1
type: RollingUpdate
template:
metadata:
annotations:
labels:
k8s-app: hubble-relay
app.kubernetes.io/name: hubble-relay
app.kubernetes.io/part-of: cilium
spec:
securityContext:
fsGroup: 65532
seccompProfile:
type: RuntimeDefault
containers:
- name: hubble-relay
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
runAsGroup: 65532
runAsNonRoot: true
runAsUser: 65532
seccompProfile:
type: RuntimeDefault
image: "quay.io/cilium/hubble-relay:v1.18.4@sha256:6d350cb1c84b847adb152173debef1f774126c69de21a5921a1e6a23b8779723"
imagePullPolicy: IfNotPresent
command:
- hubble-relay
args:
- serve
ports:
- name: grpc
containerPort: 4245
readinessProbe:
grpc:
port: 4222
timeoutSeconds: 3
# livenessProbe will kill the pod, we should be very conservative
# here on failures since killing the pod should be a last resort, and
# we should provide enough time for relay to retry before killing it.
livenessProbe:
grpc:
port: 4222
timeoutSeconds: 10
# Give relay time to establish connections and make a few retries
# before starting livenessProbes.
initialDelaySeconds: 10
# 10 second * 12 failures = 2 minutes of failure.
# If relay cannot become healthy after 2 minutes, then killing it
# might resolve whatever issue is occurring.
#
# 10 seconds is a reasonable retry period so we can see if it's
# failing regularly or only sporadically.
periodSeconds: 10
failureThreshold: 12
startupProbe:
grpc:
port: 4222
# Give relay time to get it's certs and establish connections and
# make a few retries before starting startupProbes.
initialDelaySeconds: 10
# 20 * 3 seconds = 1 minute of failure before we consider startup as failed.
failureThreshold: 20
# Retry more frequently at startup so that it can be considered started more quickly.
periodSeconds: 3
volumeMounts:
- name: config
mountPath: /etc/hubble-relay
readOnly: true
- name: tls
mountPath: /var/lib/hubble-relay/tls
readOnly: true
terminationMessagePolicy: FallbackToLogsOnError
restartPolicy: Always
priorityClassName:
serviceAccountName: "hubble-relay"
automountServiceAccountToken: false
terminationGracePeriodSeconds: 1
affinity:
podAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchLabels:
k8s-app: cilium
topologyKey: kubernetes.io/hostname
nodeSelector:
kubernetes.io/os: linux
volumes:
- name: config
configMap:
name: hubble-relay-config
items:
- key: config.yaml
path: config.yaml
- name: tls
projected:
# note: the leading zero means this number is in octal representation: do not remove it
defaultMode: 0400
sources:
- secret:
name: hubble-relay-client-certs
items:
- key: tls.crt
path: client.crt
- key: tls.key
path: client.key
- key: ca.crt
path: hubble-server-ca.crt

View File

@@ -0,0 +1,83 @@
---
# Source: cilium/charts/cilium/templates/hubble-ui/deployment.yaml
kind: Deployment
apiVersion: apps/v1
metadata:
name: hubble-ui
namespace: kube-system
labels:
k8s-app: hubble-ui
app.kubernetes.io/name: hubble-ui
app.kubernetes.io/part-of: cilium
spec:
replicas: 1
selector:
matchLabels:
k8s-app: hubble-ui
strategy:
rollingUpdate:
maxUnavailable: 1
type: RollingUpdate
template:
metadata:
annotations:
labels:
k8s-app: hubble-ui
app.kubernetes.io/name: hubble-ui
app.kubernetes.io/part-of: cilium
spec:
securityContext:
fsGroup: 1001
runAsGroup: 1001
runAsUser: 1001
priorityClassName:
serviceAccountName: "hubble-ui"
automountServiceAccountToken: true
containers:
- name: frontend
image: "quay.io/cilium/hubble-ui:v0.13.3@sha256:661d5de7050182d495c6497ff0b007a7a1e379648e60830dd68c4d78ae21761d"
imagePullPolicy: IfNotPresent
ports:
- name: http
containerPort: 8081
livenessProbe:
httpGet:
path: /healthz
port: 8081
readinessProbe:
httpGet:
path: /
port: 8081
volumeMounts:
- name: hubble-ui-nginx-conf
mountPath: /etc/nginx/conf.d/default.conf
subPath: nginx.conf
- name: tmp-dir
mountPath: /tmp
terminationMessagePolicy: FallbackToLogsOnError
securityContext:
allowPrivilegeEscalation: false
- name: backend
image: "quay.io/cilium/hubble-ui-backend:v0.13.3@sha256:db1454e45dc39ca41fbf7cad31eec95d99e5b9949c39daaad0fa81ef29d56953"
imagePullPolicy: IfNotPresent
env:
- name: EVENTS_SERVER_PORT
value: "8090"
- name: FLOWS_API_ADDR
value: "hubble-relay:80"
ports:
- name: grpc
containerPort: 8090
volumeMounts:
terminationMessagePolicy: FallbackToLogsOnError
securityContext:
allowPrivilegeEscalation: false
nodeSelector:
kubernetes.io/os: linux
volumes:
- configMap:
defaultMode: 420
name: hubble-ui-nginx
name: hubble-ui-nginx-conf
- emptyDir: {}
name: tmp-dir

View File

@@ -0,0 +1,30 @@
---
# Source: cilium/templates/http-route.yaml
apiVersion: gateway.networking.k8s.io/v1
kind: HTTPRoute
metadata:
name: http-route-hubble
namespace: kube-system
labels:
app.kubernetes.io/name: http-route-hubble
app.kubernetes.io/instance: cilium
app.kubernetes.io/part-of: cilium
spec:
parentRefs:
- group: gateway.networking.k8s.io
kind: Gateway
name: traefik-gateway
namespace: traefik
hostnames:
- hubble.alexlebens.net
rules:
- matches:
- path:
type: PathPrefix
value: /
backendRefs:
- group: ''
kind: Service
name: hubble-ui
port: 80
weight: 100

View File

@@ -0,0 +1,9 @@
---
# Source: cilium/charts/cilium/templates/cilium-secrets-namespace.yaml
apiVersion: v1
kind: Namespace
metadata:
name: "cilium-secrets"
labels:
app.kubernetes.io/part-of: cilium
annotations:

View File

@@ -0,0 +1,18 @@
---
# Source: cilium/charts/cilium/templates/cilium-agent/role.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: cilium-config-agent
namespace: kube-system
labels:
app.kubernetes.io/part-of: cilium
rules:
- apiGroups:
- ""
resources:
- configmaps
verbs:
- get
- list
- watch

View File

@@ -0,0 +1,18 @@
---
# Source: cilium/charts/cilium/templates/cilium-agent/role.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: cilium-gateway-secrets
namespace: "cilium-secrets"
labels:
app.kubernetes.io/part-of: cilium
rules:
- apiGroups:
- ""
resources:
- secrets
verbs:
- get
- list
- watch

View File

@@ -0,0 +1,19 @@
---
# Source: cilium/charts/cilium/templates/cilium-operator/role.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: cilium-operator-gateway-secrets
namespace: "cilium-secrets"
labels:
app.kubernetes.io/part-of: cilium
rules:
- apiGroups:
- ""
resources:
- secrets
verbs:
- create
- delete
- update
- patch

View File

@@ -0,0 +1,19 @@
---
# Source: cilium/charts/cilium/templates/cilium-operator/role.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: cilium-operator-tlsinterception-secrets
namespace: "cilium-secrets"
labels:
app.kubernetes.io/part-of: cilium
rules:
- apiGroups:
- ""
resources:
- secrets
verbs:
- create
- delete
- update
- patch

View File

@@ -0,0 +1,18 @@
---
# Source: cilium/charts/cilium/templates/cilium-agent/role.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: cilium-tlsinterception-secrets
namespace: "cilium-secrets"
labels:
app.kubernetes.io/part-of: cilium
rules:
- apiGroups:
- ""
resources:
- secrets
verbs:
- get
- list
- watch

View File

@@ -0,0 +1,17 @@
---
# Source: cilium/charts/cilium/templates/cilium-agent/rolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: cilium-config-agent
namespace: kube-system
labels:
app.kubernetes.io/part-of: cilium
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: cilium-config-agent
subjects:
- kind: ServiceAccount
name: "cilium"
namespace: kube-system

View File

@@ -0,0 +1,17 @@
---
# Source: cilium/charts/cilium/templates/cilium-agent/rolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: cilium-gateway-secrets
namespace: "cilium-secrets"
labels:
app.kubernetes.io/part-of: cilium
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: cilium-gateway-secrets
subjects:
- kind: ServiceAccount
name: "cilium"
namespace: kube-system

View File

@@ -0,0 +1,17 @@
---
# Source: cilium/charts/cilium/templates/cilium-operator/rolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: cilium-operator-gateway-secrets
namespace: "cilium-secrets"
labels:
app.kubernetes.io/part-of: cilium
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: cilium-operator-gateway-secrets
subjects:
- kind: ServiceAccount
name: "cilium-operator"
namespace: kube-system

View File

@@ -0,0 +1,17 @@
---
# Source: cilium/charts/cilium/templates/cilium-operator/rolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: cilium-operator-tlsinterception-secrets
namespace: "cilium-secrets"
labels:
app.kubernetes.io/part-of: cilium
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: cilium-operator-tlsinterception-secrets
subjects:
- kind: ServiceAccount
name: "cilium-operator"
namespace: kube-system

View File

@@ -0,0 +1,17 @@
---
# Source: cilium/charts/cilium/templates/cilium-agent/rolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: cilium-tlsinterception-secrets
namespace: "cilium-secrets"
labels:
app.kubernetes.io/part-of: cilium
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: cilium-tlsinterception-secrets
subjects:
- kind: ServiceAccount
name: "cilium"
namespace: kube-system

View File

@@ -0,0 +1,10 @@
---
# Source: cilium/charts/cilium/templates/cilium-ca-secret.yaml
apiVersion: v1
kind: Secret
metadata:
name: cilium-ca
namespace: kube-system
data:
ca.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURGRENDQWZ5Z0F3SUJBZ0lSQU1IM3pmdG5RbXNDcW9WQ2Nab0c2b1V3RFFZSktvWklodmNOQVFFTEJRQXcKRkRFU01CQUdBMVVFQXhNSlEybHNhWFZ0SUVOQk1CNFhEVEkxTVRJd05ESXhNak0xTkZvWERUSTRNVEl3TXpJeApNak0xTkZvd0ZERVNNQkFHQTFVRUF4TUpRMmxzYVhWdElFTkJNSUlCSWpBTkJna3Foa2lHOXcwQkFRRUZBQU9DCkFROEFNSUlCQ2dLQ0FRRUE2TWF5ellrTHhRVHVraHNKTUV2NHo1TDIwRGl5bmZ4d2d5b0NWTVBXSG1FNDlwc3YKWVdJbW1uTzM5R09Md2pRbmM0dVlra0NHQlVDOUNNeVhNSElFTllWMXRLQkI2MWtsU2tmL3dVaFJsRHZlWWJ2dwpyREt4NExiQ0hhRmRRb09Zeml1N0F6ZXZiREpFS1BvNkt4RVFwUzlBRnpjM1Rld0ppR2tYaDFteWRTbzh4d1JHCmdkU1luWG1zWHVmRk5id0grZy9GZ1o0eTB3a0pvMzBmd0lvbm83dW9qODJ5cUVMSkxocDY3anFUQTkrWkRCQXAKcVp4eDVmK3EyQTNLTFhwTzNBZ2kvcEdaZ1daOHZpTXRQeTBJSDU2ZVN1NUdpeTJkczVjVGlqTDE4VTRMcVQ3Vgp4alFkUm5GS3pQSUdhMXZZV0JFQTJCcFlDbFprcVRZU3BZZUN4UUlEQVFBQm8yRXdYekFPQmdOVkhROEJBZjhFCkJBTUNBcVF3SFFZRFZSMGxCQll3RkFZSUt3WUJCUVVIQXdFR0NDc0dBUVVGQndNQ01BOEdBMVVkRXdFQi93UUYKTUFNQkFmOHdIUVlEVlIwT0JCWUVGQnVQSndRVEdQd0tqamJxSk9WYk42QWIyMzhWTUEwR0NTcUdTSWIzRFFFQgpDd1VBQTRJQkFRQmI1d2U2UXFWbUs5czBIdm1tL253N3JVMWp6WG9xbjFRSi8vUmhWOHE0WW8vdHozWGFIR1hFCkhDb3d5eU5SRkRrajliRVY1aUFtTS9CeUZGYzRmNEk3bzBmSnFoendGa3lHbTUybXoxV1FMb1d2dmREYVR5UDMKTlF2UEVoTkk0VmhSMWFWQUxyMXJpeDYwc05yVGN6Qk4rNWRqNUZXK0xBeTZna2dDQzlnYXB4VURxaS9rZmhtcAovRTZWejB6TnF0bHROZjJiUS8zb3BWVXN1M3FXQlNqUFZUQXM0cVNYRFJTamxoYnVraGQvR28zMkxFdm5nY0tBCktidEdoK1BpLzBKUXJML0tHUGhjUTRBRGhxRm8zMzJnWDd5ZUVuNlEyVzh2SWlpekVsK2dXR3FrZUR4eVBJYmkKSWJUdmtKVkdZazFCeFVsU3JuaStPWE9pVFBlZFZ4dUoKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=
ca.key: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFb3dJQkFBS0NBUUVBNk1heXpZa0x4UVR1a2hzSk1FdjR6NUwyMERpeW5meHdneW9DVk1QV0htRTQ5cHN2CllXSW1tbk8zOUdPTHdqUW5jNHVZa2tDR0JVQzlDTXlYTUhJRU5ZVjF0S0JCNjFrbFNrZi93VWhSbER2ZVlidncKckRLeDRMYkNIYUZkUW9PWXppdTdBemV2YkRKRUtQbzZLeEVRcFM5QUZ6YzNUZXdKaUdrWGgxbXlkU284eHdSRwpnZFNZblhtc1h1ZkZOYndIK2cvRmdaNHkwd2tKbzMwZndJb25vN3VvajgyeXFFTEpMaHA2N2pxVEE5K1pEQkFwCnFaeHg1ZitxMkEzS0xYcE8zQWdpL3BHWmdXWjh2aU10UHkwSUg1NmVTdTVHaXkyZHM1Y1RpakwxOFU0THFUN1YKeGpRZFJuRkt6UElHYTF2WVdCRUEyQnBZQ2xaa3FUWVNwWWVDeFFJREFRQUJBb0lCQUdaSU1QY0pXZ2VYVkRFVQoraU9qcmsrZlZ4NUZXNDc2Qm9KTDZqeE8xUDRDWUdKVDdDelJVUDhiQytDeUFRNHpsb1k1OTYrRDlKL0lRNGZqClJpZStZY1VXd0pLK3ZGaHFrbTBpVitvWkdlZVZxdkN0QVdMWTM3cEs4OTdMTWN4eFFLcVBJcHlXVWxtcmVMRzgKeWVSMG1pakxLMUZJTE4vcmNJRGxGTGFmbmovR21lSmlYSHZWV0pGQkRxN0NReCsxVldyQU0vNU5udXVXNlhKVgpGRC9BbS81WVZQeU9yVmZUMXFNaTZrV3QvclZrK1VOZm0xbDRGSExiL3lBbm1HSTM1SExBMTJkNjRhQXhmY2FrCk1Xa2NDcGxZWm9qUjlzVGlibENwNTA5Wk1JdUJ6S01TQ0t5TThNYmZmQ2gvTEt0bXdZcHVCelY5bzZnVHA1cmsKcjRBc1dYMENnWUVBOTlRTU1EWkJlRElzTGlMN3hJeFRkTGNlT05VbWVzdm4vOGkzbGRZVW82LzNnTTFpeHZKKwo4NkVncnp5NGM2YlMvQll3dzZtcFlPdlVZT2VSbjJmaWcvK05xYzI2Ym1GcDRNaS9vNGY3ejVUMUtrb1Fhd1pxCjBTaVJzZ3ZMSElYUk5NZDFSc0tkcEY3WUo3YWNFbE9sa2JWbDBlcy81bjFibG8xTmd4VWFic01DZ1lFQThIT1gKK0xuWWNpTk1iVWhuVjBhRjVJcFN3TVc4WmtzRFgzUFNxZkEvWVBRVEVUZENiYk9JTW5ua3NBZ3A4T1NZV0tYawpZMW1YTHBKcnhwTEEyRklLYVdvTHo4UU5wUEVBeDJBTzJCNytuMlN6MGRub016TS9tZkQrZ0JjY1J5WkpTVHVhCm8vWk5DaHpSdGlrWEhtbHpPSGFoNDBTdFdCNlZUZFUvNWxSN3Y5Y0NnWUFTZUFjQWdNcVlQb1QwV3o4UUFyVWwKZVpBVEJDOTk3encyMzZwdXpDbDFtV09FZ2NuZXNhdXNWRkd2UFlNeGV6azErQ0ZpTTM2ZjduTkJWWUNRdEprNAo1UTZWblBOTHpvNnBaY3lDVDF3QXVFVWRCYS83aWRya0EzWDRJdERjSzB6UDloMVRUS1dhM2thVmp4SktlMWxYClpvRlVCNGdYa1RnR0hEbXVqeUFOS1FLQmdHT3Nsei9NMGxPbUhFVms4U20xbTBVOHJjY3BjTXE2cDhCR3cvb2MKRkRtOWs3WGNRVlkyU093YnFWZnJXbTk0WTByRUllT3BQTWxhZXdES3p0UWRWaUdTaXlqcUxYbkZTYnhjTG1zYQpsaFErNjJlL084a1ZvblRPK01EaVU1K08yR3hhSCtWOThUdVM3TC9sVXFjUnNXaWpqTUlvTXU5SFpqN29lbCtoCmY5YnJBb0dCQU44RmdGejJ6ZWdpOWM2cGdjSWN2TVRsTDRmQXdCMjhXbWp5ejlpeGQ3MjlLSkpia3MzRTFENTMKbnpIeEpNaEdncGkyNXhvM21TaHJQZERsRjJBUjluV2hiUENiOVdZTWlacVBVY25kclNWZmVBeVNZVVlJWnhTbQorNkFkVzVIWkhEemZ1cmptNysrSFkvNWQ1aFZGN3VkOEhNQm5HNStDNjBtL2hwbFRoVkJzCi0tLS0tRU5EIFJTQSBQUklWQVRFIEtFWS0tLS0tCg==

View File

@@ -0,0 +1,12 @@
---
# Source: cilium/charts/cilium/templates/hubble/tls-helm/relay-client-secret.yaml
apiVersion: v1
kind: Secret
metadata:
name: hubble-relay-client-certs
namespace: kube-system
type: kubernetes.io/tls
data:
ca.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURGRENDQWZ5Z0F3SUJBZ0lSQU1IM3pmdG5RbXNDcW9WQ2Nab0c2b1V3RFFZSktvWklodmNOQVFFTEJRQXcKRkRFU01CQUdBMVVFQXhNSlEybHNhWFZ0SUVOQk1CNFhEVEkxTVRJd05ESXhNak0xTkZvWERUSTRNVEl3TXpJeApNak0xTkZvd0ZERVNNQkFHQTFVRUF4TUpRMmxzYVhWdElFTkJNSUlCSWpBTkJna3Foa2lHOXcwQkFRRUZBQU9DCkFROEFNSUlCQ2dLQ0FRRUE2TWF5ellrTHhRVHVraHNKTUV2NHo1TDIwRGl5bmZ4d2d5b0NWTVBXSG1FNDlwc3YKWVdJbW1uTzM5R09Md2pRbmM0dVlra0NHQlVDOUNNeVhNSElFTllWMXRLQkI2MWtsU2tmL3dVaFJsRHZlWWJ2dwpyREt4NExiQ0hhRmRRb09Zeml1N0F6ZXZiREpFS1BvNkt4RVFwUzlBRnpjM1Rld0ppR2tYaDFteWRTbzh4d1JHCmdkU1luWG1zWHVmRk5id0grZy9GZ1o0eTB3a0pvMzBmd0lvbm83dW9qODJ5cUVMSkxocDY3anFUQTkrWkRCQXAKcVp4eDVmK3EyQTNLTFhwTzNBZ2kvcEdaZ1daOHZpTXRQeTBJSDU2ZVN1NUdpeTJkczVjVGlqTDE4VTRMcVQ3Vgp4alFkUm5GS3pQSUdhMXZZV0JFQTJCcFlDbFprcVRZU3BZZUN4UUlEQVFBQm8yRXdYekFPQmdOVkhROEJBZjhFCkJBTUNBcVF3SFFZRFZSMGxCQll3RkFZSUt3WUJCUVVIQXdFR0NDc0dBUVVGQndNQ01BOEdBMVVkRXdFQi93UUYKTUFNQkFmOHdIUVlEVlIwT0JCWUVGQnVQSndRVEdQd0tqamJxSk9WYk42QWIyMzhWTUEwR0NTcUdTSWIzRFFFQgpDd1VBQTRJQkFRQmI1d2U2UXFWbUs5czBIdm1tL253N3JVMWp6WG9xbjFRSi8vUmhWOHE0WW8vdHozWGFIR1hFCkhDb3d5eU5SRkRrajliRVY1aUFtTS9CeUZGYzRmNEk3bzBmSnFoendGa3lHbTUybXoxV1FMb1d2dmREYVR5UDMKTlF2UEVoTkk0VmhSMWFWQUxyMXJpeDYwc05yVGN6Qk4rNWRqNUZXK0xBeTZna2dDQzlnYXB4VURxaS9rZmhtcAovRTZWejB6TnF0bHROZjJiUS8zb3BWVXN1M3FXQlNqUFZUQXM0cVNYRFJTamxoYnVraGQvR28zMkxFdm5nY0tBCktidEdoK1BpLzBKUXJML0tHUGhjUTRBRGhxRm8zMzJnWDd5ZUVuNlEyVzh2SWlpekVsK2dXR3FrZUR4eVBJYmkKSWJUdmtKVkdZazFCeFVsU3JuaStPWE9pVFBlZFZ4dUoKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=
tls.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURTVENDQWpHZ0F3SUJBZ0lSQUkrUCtiR0VmdVlaOEcxZm1BRUFmNkl3RFFZSktvWklodmNOQVFFTEJRQXcKRkRFU01CQUdBMVVFQXhNSlEybHNhWFZ0SUVOQk1CNFhEVEkxTVRJd05ESXhNak0xTkZvWERUSTJNVEl3TkRJeApNak0xTkZvd0l6RWhNQjhHQTFVRUF3d1lLaTVvZFdKaWJHVXRjbVZzWVhrdVkybHNhWFZ0TG1sdk1JSUJJakFOCkJna3Foa2lHOXcwQkFRRUZBQU9DQVE4QU1JSUJDZ0tDQVFFQW5ZMVZLdHVoR1AzcUFuV0lFcHQwRkk0d2syMFcKNDNBY3F4WkpNVU12QVd3cktVOVpnVWxmYUpCd3dHQUpjeTZNUG9ocWFTbWcyVmFsR1ZrWTc1L0xKcFdKaFNnUgpmQTlaUm9MTzQ0bDc2cGhxV1F2Y1R1MTV3STM2aFJZOUpwbzQzaUVnSGd5c2plTXdQd0FiUVhYNDNPZHo5djJDCitNcmtmSWdKK2dVelArZHg0NVU5KzZibExOek1xZkwxUDhPZ2RycktEcHRCcFVxek5mY093VnFjeit3YUdybnIKOWFCUXo3Zk4vSnJXU2NLaGNlZE85OVJpM2dlVit2aGwwOEhkODd4NTAzQ25YRFRJRWc3cEtzMTE1UFZQRTJpdgpUZ0h1MER0WGVCS0JzdTZlVHFFaEl0U3FtbHJkck93OW5LQ0ZRRmJ0RWwxTjFiMnZFSGFBRVcrYnZRSURBUUFCCm80R0dNSUdETUE0R0ExVWREd0VCL3dRRUF3SUZvREFkQmdOVkhTVUVGakFVQmdnckJnRUZCUWNEQVFZSUt3WUIKQlFVSEF3SXdEQVlEVlIwVEFRSC9CQUl3QURBZkJnTlZIU01FR0RBV2dCUWJqeWNFRXhqOENvNDI2aVRsV3plZwpHOXQvRlRBakJnTlZIUkVFSERBYWdoZ3FMbWgxWW1Kc1pTMXlaV3hoZVM1amFXeHBkVzB1YVc4d0RRWUpLb1pJCmh2Y05BUUVMQlFBRGdnRUJBRU5mYXY3MkZCQmdNd3VudjBySVU3UXVoTUU1NnBKTW5DTHZkcjVhVnpQTUozalgKSjVSKzhqYW94ZUw0ODBnZWZYaFB4cmw4VUN5cXN2MEJ2bUpURldDcTg1S3lYZExUdW1FL2l0UW5nTlJDY1pIZQpEdGI2SGduYTVoZHBCd0lYa0NXWVo1L252TkhXVG1hWE1RbkRIRHpnaXMvK3F5MWVsM05PMkFyY2h0eDFQT3NsCktxN1IrQXp3bXlqQVVHR1VjMjhhakw1SHB5N3BTUWFBNHV6VTJQOVU3WkRxa1l3R3pYTVhCcjlwSENteWpDUk8Kalg2UDUrNmtoZmlLSVJVZUN4b1g0VDVVZUxscjBQWi8yeWkvdExNTUFoTTBTRUJHcWRuUXNVOUpuZzl4VTJxWAo2RGk0U0NlN0U4OVFSaHZrSDE5bDR6SUpvc3BUaW8wcFV2OXdxdWs9Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K
tls.key: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFb3dJQkFBS0NBUUVBblkxVkt0dWhHUDNxQW5XSUVwdDBGSTR3azIwVzQzQWNxeFpKTVVNdkFXd3JLVTlaCmdVbGZhSkJ3d0dBSmN5Nk1Qb2hxYVNtZzJWYWxHVmtZNzUvTEpwV0poU2dSZkE5WlJvTE80NGw3NnBocVdRdmMKVHUxNXdJMzZoUlk5SnBvNDNpRWdIZ3lzamVNd1B3QWJRWFg0M09kejl2MkMrTXJrZklnSitnVXpQK2R4NDVVOQorNmJsTE56TXFmTDFQOE9nZHJyS0RwdEJwVXF6TmZjT3dWcWN6K3dhR3JucjlhQlF6N2ZOL0pyV1NjS2hjZWRPCjk5UmkzZ2VWK3ZobDA4SGQ4N3g1MDNDblhEVElFZzdwS3MxMTVQVlBFMml2VGdIdTBEdFhlQktCc3U2ZVRxRWgKSXRTcW1scmRyT3c5bktDRlFGYnRFbDFOMWIydkVIYUFFVytidlFJREFRQUJBb0lCQUFOMjFSV1JLTFNrK0owLwpPbE8yYVZIK1ltRUxlQUplTjdNblZVcXBBSUtXZjBoMFB6S1JpM0NBSklxR1VnN2ZTaVFoMmIwZ05qSXhMb1lsCktHQTBja0lSN2FrbzNXR044TmVWRHJtQUQySjBJWHA5d1VWWEFXTlRlOXVyUm9Mdnh5cFRMaUtiQXJ4SDc0cWoKRlBaV2dyZFF1NE1GeGZoMGRadXJYaFRLUW5zNE1ibzBMYUR3cjEvVE0wbGQ4SWhaQVhlSW9ickxaNG03UnFXOQpMZW1NQTNGbDJoSXMxSjY4NnZQS3lDVGlYZVEwWFlpVWVmWFZuWHNObzlMTHFEVWhISEhSTTlKMWdkQ0Q5N0c0CklVVjk0U0lQY1lVSWF0djJ1aHBISlM2VEdsemNZdjdlS0ZFWFh2MGlLZHVxK3dURVdmdUVybDNsay8yVkVaOGgKSGF6MldnRUNnWUVBemhCbXU2cTBPWVR5VFFMQmNmb1k3MWwwdllueWlJMi82STE1Wld6TTR5b3JPNDFYSmlTSQpxNkZFWEoyeVExdEFGRHVhRUV2eElmZzZPL3lDQjVDWGg5LzhKaTc5T2h2VTJUMU5mOHhkKzltdGd2YnlPZFNKCnZKbThNZUZiR0lPZ3R2WVVueWVKa3MwRWppZ0JLSk9WZlV3NkxPY1k0cGg2cHg5UlRrN04wWUVDZ1lFQXc3dGsKaTdYNlFQa0FnZ29iVXVZcnM2N3V4aGRndHNJZm1VU0FrTnpUNW9WZU1CZERYVUZHUHVkdnVUUFRTNWM4YkxZSQprd0NHMFRaQ0Y2YkxHOXVKUjRXd3dka240QU1mYURtRVROVnY1aEdTMTVMNEttL0ZwRVFoeSt4TGxmWEo3cFdpCmdReVV6aW5lTjc1VVR5eFVuOThIL0VxTFVySEs1b0g2OG9CY3NEMENnWUJYS0lqTGlBQ0M4OENvNGltQVZCQ0gKN0laOFJRVFl6VndHdVJCSDNpVXJVSUxJeFplL0FtVHNjMkcrNnRTanVNejV3ZHdBV0o2OWg0amFKb3U1TjJkZwppVlRWc2JwYUFidWR4MWdvWGdEQ3JlZStKVVYvUUc2aDMwVEN0RmNibjFFekVoTFczcHBLQlgyUHcwdjZKYkp0Ck5QakdWVmtwUHVWaUd0VUJFa1BzQVFLQmdRQ2h5aGhHVTluVU12NzBmcDR2WEFycnI4RzNCRVB4VFcwSko5S04KNjd0TVpSbUI3dmpEOE55N3lscFRZRDliMEZzbUVTSHRRUll2Z1FZSXZ0cE00SjhNKzdrcnd2QnUvcVhMWlIreQowd3AzeGpiMjhva25xdFJYLytsZndxYUlBbTgwUTRQYjRDWndjN00zcUovUUFmakJYWDRnVVEzeEFQMlNwQVptCnhTdllrUUtCZ0J5WmVlK0ZhczNwazZVZFdYNCtQZHQ3UW96YURGSEhlUWpSc2I2Y1NpT1liNnhUWTZNNkhxSEgKMDNrbXZPVVI2dkptaDFuOURFSXRjUkdVb3I4QXh5aHNBeEtBNU1GZWMxK29xZHVkb2M4VkVYQUxuL2NBdmJWeQpaV05UWlN1d0I5YitlY1grR3B2T1hPMGdBci9sN1JReTNHZ2R1OHB0TTUwdWlwZXJHZExrCi0tLS0tRU5EIFJTQSBQUklWQVRFIEtFWS0tLS0tCg==

View File

@@ -0,0 +1,12 @@
---
# Source: cilium/charts/cilium/templates/hubble/tls-helm/server-secret.yaml
apiVersion: v1
kind: Secret
metadata:
name: hubble-server-certs
namespace: kube-system
type: kubernetes.io/tls
data:
ca.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURGRENDQWZ5Z0F3SUJBZ0lSQU1IM3pmdG5RbXNDcW9WQ2Nab0c2b1V3RFFZSktvWklodmNOQVFFTEJRQXcKRkRFU01CQUdBMVVFQXhNSlEybHNhWFZ0SUVOQk1CNFhEVEkxTVRJd05ESXhNak0xTkZvWERUSTRNVEl3TXpJeApNak0xTkZvd0ZERVNNQkFHQTFVRUF4TUpRMmxzYVhWdElFTkJNSUlCSWpBTkJna3Foa2lHOXcwQkFRRUZBQU9DCkFROEFNSUlCQ2dLQ0FRRUE2TWF5ellrTHhRVHVraHNKTUV2NHo1TDIwRGl5bmZ4d2d5b0NWTVBXSG1FNDlwc3YKWVdJbW1uTzM5R09Md2pRbmM0dVlra0NHQlVDOUNNeVhNSElFTllWMXRLQkI2MWtsU2tmL3dVaFJsRHZlWWJ2dwpyREt4NExiQ0hhRmRRb09Zeml1N0F6ZXZiREpFS1BvNkt4RVFwUzlBRnpjM1Rld0ppR2tYaDFteWRTbzh4d1JHCmdkU1luWG1zWHVmRk5id0grZy9GZ1o0eTB3a0pvMzBmd0lvbm83dW9qODJ5cUVMSkxocDY3anFUQTkrWkRCQXAKcVp4eDVmK3EyQTNLTFhwTzNBZ2kvcEdaZ1daOHZpTXRQeTBJSDU2ZVN1NUdpeTJkczVjVGlqTDE4VTRMcVQ3Vgp4alFkUm5GS3pQSUdhMXZZV0JFQTJCcFlDbFprcVRZU3BZZUN4UUlEQVFBQm8yRXdYekFPQmdOVkhROEJBZjhFCkJBTUNBcVF3SFFZRFZSMGxCQll3RkFZSUt3WUJCUVVIQXdFR0NDc0dBUVVGQndNQ01BOEdBMVVkRXdFQi93UUYKTUFNQkFmOHdIUVlEVlIwT0JCWUVGQnVQSndRVEdQd0tqamJxSk9WYk42QWIyMzhWTUEwR0NTcUdTSWIzRFFFQgpDd1VBQTRJQkFRQmI1d2U2UXFWbUs5czBIdm1tL253N3JVMWp6WG9xbjFRSi8vUmhWOHE0WW8vdHozWGFIR1hFCkhDb3d5eU5SRkRrajliRVY1aUFtTS9CeUZGYzRmNEk3bzBmSnFoendGa3lHbTUybXoxV1FMb1d2dmREYVR5UDMKTlF2UEVoTkk0VmhSMWFWQUxyMXJpeDYwc05yVGN6Qk4rNWRqNUZXK0xBeTZna2dDQzlnYXB4VURxaS9rZmhtcAovRTZWejB6TnF0bHROZjJiUS8zb3BWVXN1M3FXQlNqUFZUQXM0cVNYRFJTamxoYnVraGQvR28zMkxFdm5nY0tBCktidEdoK1BpLzBKUXJML0tHUGhjUTRBRGhxRm8zMzJnWDd5ZUVuNlEyVzh2SWlpekVsK2dXR3FrZUR4eVBJYmkKSWJUdmtKVkdZazFCeFVsU3JuaStPWE9pVFBlZFZ4dUoKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=
tls.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURWekNDQWorZ0F3SUJBZ0lSQUpYK0NBbmJ2SkZ1eVRYQ1ludkdLWmd3RFFZSktvWklodmNOQVFFTEJRQXcKRkRFU01CQUdBMVVFQXhNSlEybHNhWFZ0SUVOQk1CNFhEVEkxTVRJd05ESXhNak0xTkZvWERUSTJNVEl3TkRJeApNak0xTkZvd0tqRW9NQ1lHQTFVRUF3d2ZLaTVrWldaaGRXeDBMbWgxWW1Kc1pTMW5jbkJqTG1OcGJHbDFiUzVwCmJ6Q0NBU0l3RFFZSktvWklodmNOQVFFQkJRQURnZ0VQQURDQ0FRb0NnZ0VCQUxETmppbDl3MUZnTHMrSmgvN0UKM0RKT2RhNlZ4ZmRxVWMwYitxZ3g4TlI5YUx2cWErZjJTN0ZFMitleUFSUDFraFZCVHZjUkNkUlkzOWZLR3NXNAoyMXlnZmsvbkFKR1lUMkMyRGdwb0xlUjBHaTZrYkFHKy9pblE5ekxFSzJrU1BEMTNtRjdITEdWTXpKcGI2ZkU5CjllVS9wc1ZNVzQvU2NBUVM0eHQ3RzkxcE45QWcybVpYQjQ4eUN0TG1XMDJHS1VEbWs0U1NNNzJqeGxYa212M2UKSTBoWlpPTkxzOTR6UGZ4eHZCcDlEQXltYm5CZDA2VmpzVWllS3F2UGYzcEl0OCs0WHJQYmdiS21JL25XOFc1NwpTNFZKQWpFMGNwZWpCSHVwNGtCMll2OEtvSkI0c08xNGxlS0t5S2ZjLzc3cEdRajRxRTkzT25sMXpEWHJaaWRMCjRIa0NBd0VBQWFPQmpUQ0JpakFPQmdOVkhROEJBZjhFQkFNQ0JhQXdIUVlEVlIwbEJCWXdGQVlJS3dZQkJRVUgKQXdFR0NDc0dBUVVGQndNQ01Bd0dBMVVkRXdFQi93UUNNQUF3SHdZRFZSMGpCQmd3Rm9BVUc0OG5CQk1ZL0FxTwpOdW9rNVZzM29CdmJmeFV3S2dZRFZSMFJCQ013SVlJZktpNWtaV1poZFd4MExtaDFZbUpzWlMxbmNuQmpMbU5wCmJHbDFiUzVwYnpBTkJna3Foa2lHOXcwQkFRc0ZBQU9DQVFFQVI1L3k5RHF5TURjdFBPS1h3T0lCMnJBVHFpT3cKUnN4ditHM0Jyb3RWZG02YVVGYi84aUh0aCt0dGVBTGIzMkNpekUyZ1hpZmxSczZ4bHM4SjFqNHc5aXUwYWNqOApEMHNyK2grV05BY3dwQkNmanZCVlEyV3krTktPSy83UUF3R1pNRUhwZ3ZhS1N3YUVWNEE3L2F0a3FTZzh2UDlmCjJsZVhsT0NCanpPenJhdG5aWnFsYXFESDRPb1d1WkJsbFZTQzFtQkhjaGdzWTQvVXVxcDNreWM2SjNoZkdFTjYKUFEveEMrSk45Mzk4eUJkVjNFdHVqVFRkQTNYNTlHZ3hZQW84U1pxN0h4Uk03Tlg4Q1FhRU4vNEc4eDZPeEs0cwpmM09ITW9uTlBZblNyd1FQNjFmWTIwaUFra2hWN2pKMzNzMUtYK3RiNm1sOUlxNXI2QVlVS0ZRYTBBPT0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=
tls.key: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFcEFJQkFBS0NBUUVBc00yT0tYM0RVV0F1ejRtSC9zVGNNazUxcnBYRjkycFJ6UnY2cURIdzFIMW91K3ByCjUvWkxzVVRiNTdJQkUvV1NGVUZPOXhFSjFGamYxOG9heGJqYlhLQitUK2NBa1poUFlMWU9DbWd0NUhRYUxxUnMKQWI3K0tkRDNNc1FyYVJJOFBYZVlYc2NzWlV6TW1sdnA4VDMxNVQrbXhVeGJqOUp3QkJMakczc2IzV2szMENEYQpabGNIanpJSzB1WmJUWVlwUU9hVGhKSXp2YVBHVmVTYS9kNGpTRmxrNDB1ejNqTTkvSEc4R24wTURLWnVjRjNUCnBXT3hTSjRxcTg5L2VraTN6N2hlczl1QnNxWWorZGJ4Ym50TGhVa0NNVFJ5bDZNRWU2bmlRSFppL3dxZ2tIaXcKN1hpVjRvcklwOXovdnVrWkNQaW9UM2M2ZVhYTU5ldG1KMHZnZVFJREFRQUJBb0lCQUR3RkRBVzUwOXlFTWwveAo2R0RzbzNQUWZ0TE50VzdRZ1FTSVF3ZG1MTU4xNzJBYnJKWUtYcGZITUgvdlJiNW8xemF2L1FhU21pdWtJQms1Cld1MVVXK09OK0ZOWHNERzg2dldCMkRKZ08zUWFsZWpQbUhyZlE1a2VmMHlVbThnSGtMdXRKZVRHNktOYTl2cFAKeVpORDNYMzlwYmZFdkRDTzE4Z0tRVjdLNUNLOVRGRnNLQUxIUHZydGN0ajZlZ2szWlFzdzNHQVJvM0l5aWw2WgpJRGh0b1pEOVBIQkpaME1Oazc4THNLelRya3Y3Q0JIZDJScEFTWTJOQzBBZ0NqcEx0aVNJR2l5QWdjaEF4UmJBClVaUEFIMmFHK25NSGZ0NFF2QUFGdTZkMVhMYTFUZ1lwekhxYjZvN0xTcEFWU3NMOXFJUTA0WVNFaTBOQUVUaGUKRHdVRTJvRUNnWUVBd2ppa3FnTDR2dHhRNXpaRlp1bGowL0dJYVNGRElzRExmVG5wbkt3VTRzYlBWcHRiNWRycApCMVk1WXVWRUZjZUJsZHBwTmtuQkpBN3pnTE55bmNFTWQ1N1NrTFlPb1IrdEpLZGpXMndCSHR4alBBSjc3YTVFCnFCUzg4TjBWallSQjNhTVkxM2VUSEdpOUp3dlNvQ3FGaHR0emMybWpRbHRoa3hlU1poWUVLaVVDZ1lFQTZRcUwKMGxKd1pmSllYNllNaDduUnR2TGc0dG9Va25aZzF3clJmYlFJR2ZPYlpESDErajA2VFZWUDAwTzNmODB1N04rSApybnFNZ2RYbHFTeWNkcU9JMVgrZmxUdlFOZjJpQ2FNMlJsWTU0cEtNR21OYVU3RHh6bktCR3paeDBOU0RUSUhFCktEVVdISGlLREhIS0xmV3ZNc29LYnc4dXBPZGNidmIxQ1pzS0NzVUNnWUVBbHVtQ2NTUHIrZ21neDNkeEI5enoKbDV6R25NR2VzK05RZWxaRWdsdG53eW90c3lMMmppekNBdFJwdE1GbVhQSHZhK3dROG9EdWxVRVV6ZjBoWXhpOAp3am5qYlZRNUc5Tk1LdjN4Z3ZqeUNmTy9HOExtWHJHWllXenp2c2hhMWVKZGQ0VWlhVkhkT2liY1N2bXh5SWdoCnhyNi8vMzZtdjRuWDBINXhYK2RkaHcwQ2dZRUFuRCtpOGZpdVBkeFlYb0V4NDRxMlRxallHYUhJcTA3TnJVRW0KcHRhZm0rN1c0RDRUUTUzcnlsQUovb2Ruc0R1OVc3cWYrVjlYZC9JWFV1K0RWSlJmT1c2aGEraTN1SXQvS21FUgpFZVhjVjhxVVAxa2dCZjhja2FQNlptbUJ0RmZZVk9Lb3g2Q1lXNnlDT3NmTW5EWWZLcm1HZDJKQmp2VkU1MjJUCmE4WlUzRlVDZ1lBZDRaNjBDUi9vZS8ydUszVU54SjliVkZXZFdIdDFxWVFCSFg1L1J5OWV2RGVxNVdveWtobUsKNzBBVzhzYzBlTlJhMklYeTl3aHY2VG1saDF2VW04RTRvdUYzbktyTmJONjVKSWhGSXB3b0NkaTVzY1BMMEI3dQprUlIzTmhkb1Nwd2JYTi9Ob29raTB1SFBBbHhwVlI1VUF2SFBZUjFmeWtYWW1NWk1tR3VOSHc9PQotLS0tLUVORCBSU0EgUFJJVkFURSBLRVktLS0tLQo=

View File

@@ -0,0 +1,21 @@
---
# Source: cilium/charts/cilium/templates/cilium-agent/service.yaml
apiVersion: v1
kind: Service
metadata:
name: cilium-agent
namespace: kube-system
labels:
k8s-app: cilium
app.kubernetes.io/name: cilium-agent
app.kubernetes.io/part-of: cilium
spec:
clusterIP: None
type: ClusterIP
selector:
k8s-app: cilium
ports:
- name: metrics
port: 9962
protocol: TCP
targetPort: prometheus

View File

@@ -0,0 +1,22 @@
---
# Source: cilium/charts/cilium/templates/cilium-envoy/service.yaml
apiVersion: v1
kind: Service
metadata:
name: cilium-envoy
namespace: kube-system
labels:
k8s-app: cilium-envoy
app.kubernetes.io/name: cilium-envoy
app.kubernetes.io/part-of: cilium
io.cilium/app: proxy
spec:
clusterIP: None
type: ClusterIP
selector:
k8s-app: cilium-envoy
ports:
- name: envoy-metrics
port: 9964
protocol: TCP
targetPort: envoy-metrics

View File

@@ -0,0 +1,23 @@
---
# Source: cilium/charts/cilium/templates/cilium-operator/service.yaml
kind: Service
apiVersion: v1
metadata:
name: cilium-operator
namespace: kube-system
labels:
io.cilium/app: operator
name: cilium-operator
app.kubernetes.io/part-of: cilium
app.kubernetes.io/name: cilium-operator
spec:
clusterIP: None
type: ClusterIP
ports:
- name: metrics
port: 9963
protocol: TCP
targetPort: prometheus
selector:
io.cilium/app: operator
name: cilium-operator

View File

@@ -0,0 +1,20 @@
---
# Source: cilium/charts/cilium/templates/hubble/peer-service.yaml
apiVersion: v1
kind: Service
metadata:
name: hubble-peer
namespace: kube-system
labels:
k8s-app: cilium
app.kubernetes.io/part-of: cilium
app.kubernetes.io/name: hubble-peer
spec:
selector:
k8s-app: cilium
ports:
- name: peer-service
port: 443
protocol: TCP
targetPort: 4244
internalTrafficPolicy: Local

View File

@@ -0,0 +1,20 @@
---
# Source: cilium/charts/cilium/templates/hubble-relay/service.yaml
kind: Service
apiVersion: v1
metadata:
name: hubble-relay
namespace: kube-system
annotations:
labels:
k8s-app: hubble-relay
app.kubernetes.io/name: hubble-relay
app.kubernetes.io/part-of: cilium
spec:
type: "ClusterIP"
selector:
k8s-app: hubble-relay
ports:
- protocol: TCP
port: 80
targetPort: grpc

View File

@@ -0,0 +1,19 @@
---
# Source: cilium/charts/cilium/templates/hubble-ui/service.yaml
kind: Service
apiVersion: v1
metadata:
name: hubble-ui
namespace: kube-system
labels:
k8s-app: hubble-ui
app.kubernetes.io/name: hubble-ui
app.kubernetes.io/part-of: cilium
spec:
type: "ClusterIP"
selector:
k8s-app: hubble-ui
ports:
- name: http
port: 80
targetPort: 8081

View File

@@ -0,0 +1,7 @@
---
# Source: cilium/charts/cilium/templates/cilium-envoy/serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: "cilium-envoy"
namespace: kube-system

View File

@@ -0,0 +1,7 @@
---
# Source: cilium/charts/cilium/templates/cilium-operator/serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: "cilium-operator"
namespace: kube-system

View File

@@ -0,0 +1,7 @@
---
# Source: cilium/charts/cilium/templates/cilium-agent/serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: "cilium"
namespace: kube-system

View File

@@ -0,0 +1,8 @@
---
# Source: cilium/charts/cilium/templates/hubble-relay/serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: "hubble-relay"
namespace: kube-system
automountServiceAccountToken: false

View File

@@ -0,0 +1,7 @@
---
# Source: cilium/charts/cilium/templates/hubble-ui/serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: "hubble-ui"
namespace: kube-system

View File

@@ -0,0 +1,32 @@
---
# Source: cilium/charts/cilium/templates/cilium-agent/servicemonitor.yaml
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: cilium-agent
namespace: kube-system
labels:
app.kubernetes.io/part-of: cilium
app.kubernetes.io/name: cilium-agent
spec:
selector:
matchLabels:
app.kubernetes.io/name: cilium-agent
namespaceSelector:
matchNames:
- kube-system
endpoints:
- port: metrics
interval: "10s"
honorLabels: true
path: /metrics
relabelings:
- action: replace
replacement: ${1}
sourceLabels:
- __meta_kubernetes_pod_node_name
targetLabel: node
# If envoy DaemonSet is enabled, we'll create a separate service for it
# If it is not enabled, that means envoy runs inside cilium-agent and we'll monitor using same service
targetLabels:
- k8s-app

View File

@@ -0,0 +1,30 @@
---
# Source: cilium/charts/cilium/templates/cilium-envoy/servicemonitor.yaml
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: cilium-envoy
namespace: kube-system
labels:
app.kubernetes.io/part-of: cilium
app.kubernetes.io/name: cilium-envoy
spec:
selector:
matchLabels:
k8s-app: cilium-envoy
namespaceSelector:
matchNames:
- kube-system
endpoints:
- port: envoy-metrics
interval: "10s"
honorLabels: true
path: /metrics
relabelings:
- action: replace
replacement: ${1}
sourceLabels:
- __meta_kubernetes_pod_node_name
targetLabel: node
targetLabels:
- k8s-app

View File

@@ -0,0 +1,25 @@
---
# Source: cilium/charts/cilium/templates/cilium-operator/servicemonitor.yaml
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: cilium-operator
namespace: kube-system
labels:
app.kubernetes.io/part-of: cilium
app.kubernetes.io/name: cilium-operator
spec:
selector:
matchLabels:
io.cilium/app: operator
name: cilium-operator
namespaceSelector:
matchNames:
- kube-system
endpoints:
- port: metrics
interval: "10s"
honorLabels: true
path: /metrics
targetLabels:
- io.cilium/app

File diff suppressed because one or more lines are too long