Add generated file
This PR adds generated files under pkg/client and vendor folder.
This commit is contained in:
1
vendor/k8s.io/kubernetes/test/kubemark/resources/manifests/addons/kubemark-rbac-bindings/README.md
generated
vendored
Normal file
1
vendor/k8s.io/kubernetes/test/kubemark/resources/manifests/addons/kubemark-rbac-bindings/README.md
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
These resources are used to add extra (non-default) bindings to kubemark to match users and groups that are particular to the kubemark environment. These are not standard bootstrap bindings and not standard users they are bound to, and have been adapted from cluster/addons/e2e-rbac-bindings. Tighten/loosen these access rights as required in future.
|
||||
16
vendor/k8s.io/kubernetes/test/kubemark/resources/manifests/addons/kubemark-rbac-bindings/cluster-autoscaler-binding.yaml
generated
vendored
Normal file
16
vendor/k8s.io/kubernetes/test/kubemark/resources/manifests/addons/kubemark-rbac-bindings/cluster-autoscaler-binding.yaml
generated
vendored
Normal file
@@ -0,0 +1,16 @@
|
||||
# This is the role binding for the kubemark cluster autoscaler.
|
||||
# TODO: Use proper Cluster Autoscaler role (github.com/kubernetes/autoscaler/issues/383)
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: cluster-autoscaler-view-binding
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: cluster-admin
|
||||
subjects:
|
||||
- apiGroup: rbac.authorization.k8s.io
|
||||
kind: User
|
||||
name: system:cluster-autoscaler
|
||||
15
vendor/k8s.io/kubernetes/test/kubemark/resources/manifests/addons/kubemark-rbac-bindings/heapster-binding.yaml
generated
vendored
Normal file
15
vendor/k8s.io/kubernetes/test/kubemark/resources/manifests/addons/kubemark-rbac-bindings/heapster-binding.yaml
generated
vendored
Normal file
@@ -0,0 +1,15 @@
|
||||
# This is the role binding for the kubemark heapster.
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: heapster-view-binding
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: system:heapster
|
||||
subjects:
|
||||
- apiGroup: rbac.authorization.k8s.io
|
||||
kind: User
|
||||
name: system:heapster
|
||||
15
vendor/k8s.io/kubernetes/test/kubemark/resources/manifests/addons/kubemark-rbac-bindings/kube-dns-binding.yaml
generated
vendored
Normal file
15
vendor/k8s.io/kubernetes/test/kubemark/resources/manifests/addons/kubemark-rbac-bindings/kube-dns-binding.yaml
generated
vendored
Normal file
@@ -0,0 +1,15 @@
|
||||
# This is the role binding for the kubemark kube-dns.
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: kube-dns-view-binding
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: system:kube-dns
|
||||
subjects:
|
||||
- apiGroup: rbac.authorization.k8s.io
|
||||
kind: User
|
||||
name: system:kube-dns
|
||||
18
vendor/k8s.io/kubernetes/test/kubemark/resources/manifests/addons/kubemark-rbac-bindings/kubecfg-binding.yaml
generated
vendored
Normal file
18
vendor/k8s.io/kubernetes/test/kubemark/resources/manifests/addons/kubemark-rbac-bindings/kubecfg-binding.yaml
generated
vendored
Normal file
@@ -0,0 +1,18 @@
|
||||
# This is the role binding for the local kubectl, which is
|
||||
# used for listing hollow-nodes in start-kubemark.sh and
|
||||
# send resource creation requests, etc in run-e2e-tests.sh.
|
||||
# Also useful if you manually want to use local kubectl.
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: kubecfg-cluster-admin
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: cluster-admin
|
||||
subjects:
|
||||
- apiGroup: rbac.authorization.k8s.io
|
||||
kind: User
|
||||
name: kubecfg
|
||||
18
vendor/k8s.io/kubernetes/test/kubemark/resources/manifests/addons/kubemark-rbac-bindings/kubelet-binding.yaml
generated
vendored
Normal file
18
vendor/k8s.io/kubernetes/test/kubemark/resources/manifests/addons/kubemark-rbac-bindings/kubelet-binding.yaml
generated
vendored
Normal file
@@ -0,0 +1,18 @@
|
||||
# The Kubemark environment currently gives all kubelets a single shared credential.
|
||||
#
|
||||
# TODO: give each kubelet a credential in the system:nodes group with username system:node:<nodeName>,
|
||||
# to exercise the Node authorizer and admission, then remove this binding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: kubelet-node
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: system:node
|
||||
subjects:
|
||||
- apiGroup: rbac.authorization.k8s.io
|
||||
kind: User
|
||||
name: kubelet
|
||||
15
vendor/k8s.io/kubernetes/test/kubemark/resources/manifests/addons/kubemark-rbac-bindings/npd-binding.yaml
generated
vendored
Normal file
15
vendor/k8s.io/kubernetes/test/kubemark/resources/manifests/addons/kubemark-rbac-bindings/npd-binding.yaml
generated
vendored
Normal file
@@ -0,0 +1,15 @@
|
||||
# This is the role binding for the node-problem-detector.
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: node-problem-detector-binding
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: system:node-problem-detector
|
||||
subjects:
|
||||
- apiGroup: rbac.authorization.k8s.io
|
||||
kind: User
|
||||
name: system:node-problem-detector
|
||||
51
vendor/k8s.io/kubernetes/test/kubemark/resources/manifests/etcd-events.yaml
generated
vendored
Normal file
51
vendor/k8s.io/kubernetes/test/kubemark/resources/manifests/etcd-events.yaml
generated
vendored
Normal file
@@ -0,0 +1,51 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: etcd-server-events
|
||||
namespace: kube-system
|
||||
spec:
|
||||
hostNetwork: true
|
||||
nodeName: {{instance_prefix}}-master
|
||||
containers:
|
||||
- name: etcd-container
|
||||
image: {{kube_docker_registry}}/etcd:{{etcd_image}}
|
||||
imagePullPolicy: IfNotPresent
|
||||
resources:
|
||||
requests:
|
||||
cpu: 100m
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- /usr/local/bin/etcd
|
||||
{{params}}
|
||||
1>>/var/log/etcd-events.log 2>&1
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
host: 127.0.0.1
|
||||
path: /health
|
||||
port: 4002
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 15
|
||||
timeoutSeconds: 15
|
||||
ports:
|
||||
- name: serverport
|
||||
containerPort: 2381
|
||||
hostPort: 2381
|
||||
protocol: TCP
|
||||
- name: clientport
|
||||
containerPort: 4002
|
||||
hostPort: 4002
|
||||
protocol: TCP
|
||||
volumeMounts:
|
||||
- name: varetcd
|
||||
mountPath: /var/etcd
|
||||
- name: varlogetcd
|
||||
mountPath: /var/log/etcd-events.log
|
||||
volumes:
|
||||
- name: varetcd
|
||||
hostPath:
|
||||
path: /var/etcd/events
|
||||
- name: varlogetcd
|
||||
hostPath:
|
||||
path: /var/log/etcd-events.log
|
||||
type: FileOrCreate
|
||||
50
vendor/k8s.io/kubernetes/test/kubemark/resources/manifests/etcd.yaml
generated
vendored
Normal file
50
vendor/k8s.io/kubernetes/test/kubemark/resources/manifests/etcd.yaml
generated
vendored
Normal file
@@ -0,0 +1,50 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: etcd-server
|
||||
namespace: kube-system
|
||||
spec:
|
||||
hostNetwork: true
|
||||
containers:
|
||||
- name: etcd-container
|
||||
image: {{kube_docker_registry}}/etcd:{{etcd_image}}
|
||||
imagePullPolicy: IfNotPresent
|
||||
resources:
|
||||
requests:
|
||||
cpu: 200m
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- /usr/local/bin/etcd
|
||||
{{params}}
|
||||
1>>/var/log/etcd.log 2>&1
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
host: 127.0.0.1
|
||||
path: /health
|
||||
port: 2379
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 15
|
||||
timeoutSeconds: 15
|
||||
ports:
|
||||
- name: serverport
|
||||
containerPort: 2380
|
||||
hostPort: 2380
|
||||
protocol: TCP
|
||||
- name: clientport
|
||||
containerPort: 2379
|
||||
hostPort: 2379
|
||||
protocol: TCP
|
||||
volumeMounts:
|
||||
- name: varetcd
|
||||
mountPath: /var/etcd
|
||||
- name: varlogetcd
|
||||
mountPath: /var/log/etcd.log
|
||||
volumes:
|
||||
- name: varetcd
|
||||
hostPath:
|
||||
path: /var/etcd
|
||||
- name: varlogetcd
|
||||
hostPath:
|
||||
path: /var/log/etcd.log
|
||||
type: FileOrCreate
|
||||
34
vendor/k8s.io/kubernetes/test/kubemark/resources/manifests/kube-addon-manager.yaml
generated
vendored
Normal file
34
vendor/k8s.io/kubernetes/test/kubemark/resources/manifests/kube-addon-manager.yaml
generated
vendored
Normal file
@@ -0,0 +1,34 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: kube-addon-manager
|
||||
namespace: kube-system
|
||||
labels:
|
||||
component: kube-addon-manager
|
||||
spec:
|
||||
hostNetwork: true
|
||||
containers:
|
||||
- name: kube-addon-manager
|
||||
image: {{kube_docker_registry}}/kube-addon-manager:v8.6
|
||||
command:
|
||||
- /bin/bash
|
||||
- -c
|
||||
- /opt/kube-addons.sh 1>>/var/log/kube-addon-manager.log 2>&1
|
||||
resources:
|
||||
requests:
|
||||
cpu: 5m
|
||||
memory: 50Mi
|
||||
volumeMounts:
|
||||
- name: addons
|
||||
mountPath: /etc/kubernetes/
|
||||
readOnly: true
|
||||
- name: varlog
|
||||
mountPath: /var/log/kube-addon-manager.log
|
||||
volumes:
|
||||
- name: addons
|
||||
hostPath:
|
||||
path: /etc/kubernetes/
|
||||
- name: varlog
|
||||
hostPath:
|
||||
path: /var/log/kube-addon-manager.log
|
||||
type: FileOrCreate
|
||||
70
vendor/k8s.io/kubernetes/test/kubemark/resources/manifests/kube-apiserver.yaml
generated
vendored
Normal file
70
vendor/k8s.io/kubernetes/test/kubemark/resources/manifests/kube-apiserver.yaml
generated
vendored
Normal file
@@ -0,0 +1,70 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: kube-apiserver
|
||||
namespace: kube-system
|
||||
spec:
|
||||
hostNetwork: true
|
||||
containers:
|
||||
- name: kube-apiserver
|
||||
image: {{kube_docker_registry}}/kube-apiserver:{{kube-apiserver_docker_tag}}
|
||||
imagePullPolicy: IfNotPresent
|
||||
resources:
|
||||
requests:
|
||||
cpu: 250m
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- /usr/local/bin/kube-apiserver
|
||||
{{params}}
|
||||
1>>/var/log/kube-apiserver.log 2>&1
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
host: 127.0.0.1
|
||||
path: /healthz
|
||||
port: 8080
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 15
|
||||
timeoutSeconds: 15
|
||||
ports:
|
||||
- name: https
|
||||
containerPort: 443
|
||||
hostPort: 443
|
||||
protocol: TCP
|
||||
- name: local
|
||||
containerPort: 8080
|
||||
hostPort: 8080
|
||||
protocol: TCP
|
||||
volumeMounts:
|
||||
{{audit_policy_config_mount}}
|
||||
- name: srvkube
|
||||
mountPath: /etc/srv/kubernetes
|
||||
readOnly: true
|
||||
- name: logfile
|
||||
mountPath: /var/log/kube-apiserver.log
|
||||
- name: etcssl
|
||||
mountPath: /etc/ssl
|
||||
readOnly: true
|
||||
- name: usrsharecacerts
|
||||
mountPath: /usr/share/ca-certificates
|
||||
readOnly: true
|
||||
- name: srvsshproxy
|
||||
mountPath: /etc/srv/sshproxy
|
||||
volumes:
|
||||
{{audit_policy_config_volume}}
|
||||
- name: srvkube
|
||||
hostPath:
|
||||
path: /etc/srv/kubernetes
|
||||
- name: logfile
|
||||
hostPath:
|
||||
path: /var/log/kube-apiserver.log
|
||||
type: FileOrCreate
|
||||
- name: etcssl
|
||||
hostPath:
|
||||
path: /etc/ssl
|
||||
- name: usrsharecacerts
|
||||
hostPath:
|
||||
path: /usr/share/ca-certificates
|
||||
- name: srvsshproxy
|
||||
hostPath:
|
||||
path: /etc/srv/sshproxy
|
||||
54
vendor/k8s.io/kubernetes/test/kubemark/resources/manifests/kube-controller-manager.yaml
generated
vendored
Normal file
54
vendor/k8s.io/kubernetes/test/kubemark/resources/manifests/kube-controller-manager.yaml
generated
vendored
Normal file
@@ -0,0 +1,54 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: kube-controller-manager
|
||||
namespace: kube-system
|
||||
spec:
|
||||
hostNetwork: true
|
||||
containers:
|
||||
- name: kube-controller-manager
|
||||
image: {{kube_docker_registry}}/kube-controller-manager:{{kube-controller-manager_docker_tag}}
|
||||
imagePullPolicy: IfNotPresent
|
||||
resources:
|
||||
requests:
|
||||
cpu: 200m
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- /usr/local/bin/kube-controller-manager
|
||||
{{params}}
|
||||
1>>/var/log/kube-controller-manager.log 2>&1
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
host: 127.0.0.1
|
||||
path: /healthz
|
||||
port: 10252
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 15
|
||||
timeoutSeconds: 15
|
||||
volumeMounts:
|
||||
- name: srvkube
|
||||
mountPath: /etc/srv/kubernetes
|
||||
readOnly: true
|
||||
- name: logfile
|
||||
mountPath: /var/log/kube-controller-manager.log
|
||||
- name: etcssl
|
||||
mountPath: /etc/ssl
|
||||
readOnly: true
|
||||
- name: usrsharecacerts
|
||||
mountPath: /usr/share/ca-certificates
|
||||
readOnly: true
|
||||
volumes:
|
||||
- name: srvkube
|
||||
hostPath:
|
||||
path: /etc/srv/kubernetes
|
||||
- name: logfile
|
||||
hostPath:
|
||||
path: /var/log/kube-controller-manager.log
|
||||
type: FileOrCreate
|
||||
- name: etcssl
|
||||
hostPath:
|
||||
path: /etc/ssl
|
||||
- name: usrsharecacerts
|
||||
hostPath:
|
||||
path: /usr/share/ca-certificates
|
||||
43
vendor/k8s.io/kubernetes/test/kubemark/resources/manifests/kube-scheduler.yaml
generated
vendored
Normal file
43
vendor/k8s.io/kubernetes/test/kubemark/resources/manifests/kube-scheduler.yaml
generated
vendored
Normal file
@@ -0,0 +1,43 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: kube-scheduler
|
||||
namespace: kube-system
|
||||
spec:
|
||||
hostNetwork: true
|
||||
nodeName: {{instance_prefix}}-master
|
||||
containers:
|
||||
- name: kube-scheduler
|
||||
image: {{kube_docker_registry}}/kube-scheduler:{{kube-scheduler_docker_tag}}
|
||||
imagePullPolicy: IfNotPresent
|
||||
resources:
|
||||
requests:
|
||||
cpu: 100m
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- /usr/local/bin/kube-scheduler
|
||||
{{params}}
|
||||
1>>/var/log/kube-scheduler.log 2>&1
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
host: 127.0.0.1
|
||||
path: /healthz
|
||||
port: 10251
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 15
|
||||
timeoutSeconds: 15
|
||||
volumeMounts:
|
||||
- name: srvkube
|
||||
mountPath: /etc/srv/kubernetes
|
||||
readOnly: true
|
||||
- name: logfile
|
||||
mountPath: /var/log/kube-scheduler.log
|
||||
volumes:
|
||||
- name: srvkube
|
||||
hostPath:
|
||||
path: /etc/srv/kubernetes
|
||||
- name: logfile
|
||||
hostPath:
|
||||
path: /var/log/kube-scheduler.log
|
||||
type: FileOrCreate
|
||||
Reference in New Issue
Block a user