Add generated file

This PR adds generated files under pkg/client and vendor folder.
This commit is contained in:
xing-yang
2018-07-12 10:55:15 -07:00
parent 36b1de0341
commit e213d1890d
17729 changed files with 5090889 additions and 0 deletions

738
vendor/k8s.io/kubernetes/hack/.golint_failures generated vendored Normal file
View File

@@ -0,0 +1,738 @@
cluster/images/etcd-version-monitor
cmd/hyperkube
cmd/kube-apiserver/app
cmd/kube-controller-manager/app
cmd/kube-proxy/app
cmd/kube-scheduler/app
cmd/kubeadm/app
cmd/kubeadm/app/apis/kubeadm/v1alpha2
cmd/kubeadm/app/util/config
cmd/kubelet/app
cmd/kubelet/app/options
cmd/kubemark
pkg/api/endpoints
pkg/api/ref
pkg/api/testapi
pkg/api/testing
pkg/api/testing/compat
pkg/api/v1/endpoints
pkg/api/v1/pod
pkg/api/v1/resource
pkg/apis/abac
pkg/apis/abac/latest
pkg/apis/admission
pkg/apis/admission/v1beta1
pkg/apis/admissionregistration
pkg/apis/admissionregistration/v1alpha1
pkg/apis/admissionregistration/v1beta1
pkg/apis/admissionregistration/validation
pkg/apis/apps
pkg/apis/apps/validation
pkg/apis/authentication
pkg/apis/authentication/v1
pkg/apis/authentication/v1beta1
pkg/apis/authorization
pkg/apis/authorization/v1
pkg/apis/authorization/v1beta1
pkg/apis/authorization/validation
pkg/apis/autoscaling
pkg/apis/autoscaling/validation
pkg/apis/batch
pkg/apis/batch/validation
pkg/apis/certificates
pkg/apis/certificates/v1beta1
pkg/apis/certificates/validation
pkg/apis/componentconfig
pkg/apis/componentconfig/v1alpha1
pkg/apis/core
pkg/apis/core/helper
pkg/apis/core/helper/qos
pkg/apis/core/v1/helper
pkg/apis/core/v1/helper/qos
pkg/apis/core/v1/validation
pkg/apis/core/validation
pkg/apis/events
pkg/apis/events/v1beta1
pkg/apis/extensions
pkg/apis/extensions/validation
pkg/apis/imagepolicy
pkg/apis/imagepolicy/v1alpha1
pkg/apis/networking
pkg/apis/policy
pkg/apis/policy/v1beta1
pkg/apis/policy/validation
pkg/apis/rbac/v1
pkg/apis/rbac/v1beta1
pkg/apis/rbac/validation
pkg/apis/scheduling
pkg/apis/scheduling/v1alpha1
pkg/apis/scheduling/v1beta1
pkg/apis/settings
pkg/apis/settings/v1alpha1
pkg/apis/storage
pkg/apis/storage/util
pkg/apis/storage/v1/util
pkg/apis/storage/v1alpha1
pkg/apis/storage/v1beta1/util
pkg/auth/authorizer/abac
pkg/capabilities
pkg/client/chaosclient
pkg/client/leaderelectionconfig
pkg/client/tests
pkg/cloudprovider
pkg/cloudprovider/providers/aws
pkg/cloudprovider/providers/fake
pkg/cloudprovider/providers/gce
pkg/cloudprovider/providers/gce/cloud
pkg/cloudprovider/providers/ovirt
pkg/cloudprovider/providers/photon
pkg/cloudprovider/providers/vsphere
pkg/controller
pkg/controller/bootstrap
pkg/controller/certificates
pkg/controller/certificates/approver
pkg/controller/certificates/signer
pkg/controller/cloud
pkg/controller/clusterroleaggregation
pkg/controller/cronjob
pkg/controller/daemon
pkg/controller/deployment
pkg/controller/deployment/util
pkg/controller/disruption
pkg/controller/endpoint
pkg/controller/garbagecollector
pkg/controller/job
pkg/controller/namespace
pkg/controller/namespace/deletion
pkg/controller/podautoscaler
pkg/controller/podautoscaler/metrics
pkg/controller/podgc
pkg/controller/replicaset
pkg/controller/replicaset/options
pkg/controller/replication
pkg/controller/resourcequota
pkg/controller/route
pkg/controller/service
pkg/controller/serviceaccount
pkg/controller/statefulset
pkg/controller/ttl
pkg/controller/volume/attachdetach
pkg/controller/volume/attachdetach/statusupdater
pkg/controller/volume/attachdetach/testing
pkg/controller/volume/events
pkg/controller/volume/expand
pkg/controller/volume/persistentvolume
pkg/controller/volume/persistentvolume/options
pkg/credentialprovider
pkg/credentialprovider/gcp
pkg/credentialprovider/rancher
pkg/features
pkg/kubeapiserver
pkg/kubeapiserver/admission
pkg/kubeapiserver/authenticator
pkg/kubeapiserver/authorizer
pkg/kubeapiserver/authorizer/modes
pkg/kubeapiserver/options
pkg/kubeapiserver/server
pkg/kubectl
pkg/kubectl/cmd
pkg/kubectl/cmd/auth
pkg/kubectl/cmd/config
pkg/kubectl/cmd/create
pkg/kubectl/cmd/get
pkg/kubectl/cmd/rollout
pkg/kubectl/cmd/set
pkg/kubectl/cmd/templates
pkg/kubectl/cmd/testing
pkg/kubectl/cmd/util
pkg/kubectl/cmd/util/editor
pkg/kubectl/cmd/util/sanity
pkg/kubectl/cmd/wait
pkg/kubectl/genericclioptions
pkg/kubectl/genericclioptions/printers
pkg/kubectl/genericclioptions/resource
pkg/kubectl/metricsutil
pkg/kubectl/util
pkg/kubectl/util/slice
pkg/kubelet
pkg/kubelet/apis
pkg/kubelet/apis/cri/runtime/v1alpha2
pkg/kubelet/apis/cri/testing
pkg/kubelet/apis/deviceplugin/v1alpha
pkg/kubelet/apis/deviceplugin/v1beta1
pkg/kubelet/apis/kubeletconfig
pkg/kubelet/apis/kubeletconfig/v1beta1
pkg/kubelet/apis/pluginregistration/v1alpha1
pkg/kubelet/cadvisor
pkg/kubelet/cadvisor/testing
pkg/kubelet/checkpoint
pkg/kubelet/checkpointmanager/checksum
pkg/kubelet/checkpointmanager/testing/example_checkpoint_formats/v1
pkg/kubelet/client
pkg/kubelet/cm
pkg/kubelet/cm/devicemanager/checkpoint
pkg/kubelet/cm/util
pkg/kubelet/config
pkg/kubelet/configmap
pkg/kubelet/container/testing
pkg/kubelet/custommetrics
pkg/kubelet/dockershim
pkg/kubelet/dockershim/cm
pkg/kubelet/dockershim/libdocker
pkg/kubelet/dockershim/network
pkg/kubelet/dockershim/network/cni
pkg/kubelet/dockershim/network/cni/testing
pkg/kubelet/dockershim/network/hostport
pkg/kubelet/dockershim/network/hostport/testing
pkg/kubelet/dockershim/network/kubenet
pkg/kubelet/dockershim/network/testing
pkg/kubelet/events
pkg/kubelet/images
pkg/kubelet/kuberuntime
pkg/kubelet/leaky
pkg/kubelet/lifecycle
pkg/kubelet/metrics
pkg/kubelet/pleg
pkg/kubelet/pod
pkg/kubelet/pod/testing
pkg/kubelet/preemption
pkg/kubelet/prober
pkg/kubelet/prober/results
pkg/kubelet/prober/testing
pkg/kubelet/qos
pkg/kubelet/remote
pkg/kubelet/secret
pkg/kubelet/server
pkg/kubelet/server/portforward
pkg/kubelet/server/remotecommand
pkg/kubelet/server/stats
pkg/kubelet/server/streaming
pkg/kubelet/stats
pkg/kubelet/status
pkg/kubelet/status/testing
pkg/kubelet/sysctl
pkg/kubelet/types
pkg/kubelet/util
pkg/kubelet/util/cache
pkg/kubelet/util/pluginwatcher
pkg/kubelet/util/pluginwatcher/example_plugin_apis/v1beta1
pkg/kubelet/util/pluginwatcher/example_plugin_apis/v1beta2
pkg/kubelet/util/queue
pkg/kubelet/util/sliceutils
pkg/kubemark
pkg/master
pkg/master/controller/crdregistration
pkg/master/tunneler
pkg/printers
pkg/printers/internalversion
pkg/printers/storage
pkg/probe
pkg/probe/exec
pkg/probe/http
pkg/probe/tcp
pkg/proxy
pkg/proxy/apis/kubeproxyconfig
pkg/proxy/apis/kubeproxyconfig/v1alpha1
pkg/proxy/iptables
pkg/proxy/userspace
pkg/proxy/util
pkg/proxy/winkernel
pkg/proxy/winuserspace
pkg/quota/evaluator/core
pkg/registry/admissionregistration/initializerconfiguration/storage
pkg/registry/admissionregistration/mutatingwebhookconfiguration/storage
pkg/registry/admissionregistration/rest
pkg/registry/admissionregistration/validatingwebhookconfiguration/storage
pkg/registry/apps/daemonset
pkg/registry/apps/daemonset/storage
pkg/registry/apps/deployment
pkg/registry/apps/deployment/storage
pkg/registry/apps/replicaset
pkg/registry/apps/replicaset/storage
pkg/registry/apps/rest
pkg/registry/apps/statefulset
pkg/registry/apps/statefulset/storage
pkg/registry/authentication/rest
pkg/registry/authentication/tokenreview
pkg/registry/authorization/localsubjectaccessreview
pkg/registry/authorization/rest
pkg/registry/authorization/selfsubjectaccessreview
pkg/registry/authorization/subjectaccessreview
pkg/registry/autoscaling/horizontalpodautoscaler
pkg/registry/autoscaling/horizontalpodautoscaler/storage
pkg/registry/autoscaling/rest
pkg/registry/batch/cronjob
pkg/registry/batch/cronjob/storage
pkg/registry/batch/job
pkg/registry/batch/job/storage
pkg/registry/batch/rest
pkg/registry/certificates/certificates
pkg/registry/certificates/certificates/storage
pkg/registry/certificates/rest
pkg/registry/core/componentstatus
pkg/registry/core/endpoint/storage
pkg/registry/core/event
pkg/registry/core/event/storage
pkg/registry/core/limitrange/storage
pkg/registry/core/namespace
pkg/registry/core/namespace/storage
pkg/registry/core/node
pkg/registry/core/node/storage
pkg/registry/core/persistentvolume
pkg/registry/core/persistentvolume/storage
pkg/registry/core/persistentvolumeclaim
pkg/registry/core/persistentvolumeclaim/storage
pkg/registry/core/pod
pkg/registry/core/pod/rest
pkg/registry/core/podtemplate/storage
pkg/registry/core/replicationcontroller
pkg/registry/core/replicationcontroller/storage
pkg/registry/core/resourcequota
pkg/registry/core/resourcequota/storage
pkg/registry/core/rest
pkg/registry/core/secret
pkg/registry/core/secret/storage
pkg/registry/core/service
pkg/registry/core/service/allocator
pkg/registry/core/service/allocator/storage
pkg/registry/core/service/ipallocator
pkg/registry/core/service/portallocator
pkg/registry/core/service/portallocator/controller
pkg/registry/core/service/storage
pkg/registry/core/serviceaccount/storage
pkg/registry/events/rest
pkg/registry/extensions/controller/storage
pkg/registry/extensions/ingress
pkg/registry/extensions/ingress/storage
pkg/registry/extensions/rest
pkg/registry/networking/networkpolicy/storage
pkg/registry/networking/rest
pkg/registry/policy/poddisruptionbudget
pkg/registry/policy/poddisruptionbudget/storage
pkg/registry/policy/rest
pkg/registry/rbac
pkg/registry/rbac/clusterrole
pkg/registry/rbac/clusterrole/policybased
pkg/registry/rbac/clusterrolebinding
pkg/registry/rbac/clusterrolebinding/policybased
pkg/registry/rbac/reconciliation
pkg/registry/rbac/rest
pkg/registry/rbac/role
pkg/registry/rbac/role/policybased
pkg/registry/rbac/rolebinding
pkg/registry/rbac/rolebinding/policybased
pkg/registry/rbac/validation
pkg/registry/registrytest
pkg/registry/scheduling/priorityclass/storage
pkg/registry/scheduling/rest
pkg/registry/settings/podpreset/storage
pkg/registry/settings/rest
pkg/registry/storage/rest
pkg/registry/storage/storageclass
pkg/registry/storage/storageclass/storage
pkg/routes
pkg/security/apparmor
pkg/security/podsecuritypolicy
pkg/security/podsecuritypolicy/group
pkg/security/podsecuritypolicy/seccomp
pkg/security/podsecuritypolicy/selinux
pkg/security/podsecuritypolicy/user
pkg/security/podsecuritypolicy/util
pkg/securitycontext
pkg/ssh
pkg/util/bandwidth
pkg/util/config
pkg/util/configz
pkg/util/dbus
pkg/util/ebtables
pkg/util/env
pkg/util/file
pkg/util/goroutinemap/exponentialbackoff
pkg/util/initsystem
pkg/util/ipconfig
pkg/util/iptables
pkg/util/iptables/testing
pkg/util/keymutex
pkg/util/labels
pkg/util/mount
pkg/util/netsh/testing
pkg/util/node
pkg/util/normalizer
pkg/util/oom
pkg/util/parsers
pkg/util/procfs
pkg/util/removeall
pkg/util/resourcecontainer
pkg/util/rlimit
pkg/util/selinux
pkg/util/strings
pkg/util/sysctl
pkg/util/sysctl/testing
pkg/util/system
pkg/util/taints
pkg/util/term
pkg/util/threading
pkg/util/tolerations
pkg/util/workqueue/prometheus
pkg/version/verflag
pkg/volume/aws_ebs
pkg/volume/azure_dd
pkg/volume/azure_file
pkg/volume/cephfs
pkg/volume/configmap
pkg/volume/csi
pkg/volume/csi/fake
pkg/volume/csi/labelmanager
pkg/volume/empty_dir
pkg/volume/fc
pkg/volume/flexvolume
pkg/volume/flocker
pkg/volume/gce_pd
pkg/volume/git_repo
pkg/volume/host_path
pkg/volume/iscsi
pkg/volume/local
pkg/volume/nfs
pkg/volume/photon_pd
pkg/volume/portworx
pkg/volume/rbd
pkg/volume/scaleio
pkg/volume/secret
pkg/volume/storageos
pkg/volume/testing
pkg/volume/util
pkg/volume/util/fs
pkg/volume/util/recyclerclient
pkg/volume/util/volumepathhandler
pkg/volume/vsphere_volume
plugin/pkg/admission/antiaffinity
plugin/pkg/admission/eventratelimit/apis/eventratelimit
plugin/pkg/admission/eventratelimit/apis/eventratelimit/v1alpha1
plugin/pkg/admission/limitranger
plugin/pkg/admission/noderestriction
plugin/pkg/admission/podnodeselector
plugin/pkg/admission/podpreset
plugin/pkg/admission/podtolerationrestriction
plugin/pkg/admission/podtolerationrestriction/apis/podtolerationrestriction
plugin/pkg/admission/podtolerationrestriction/apis/podtolerationrestriction/v1alpha1
plugin/pkg/admission/resourcequota
plugin/pkg/admission/resourcequota/apis/resourcequota
plugin/pkg/admission/resourcequota/apis/resourcequota/v1alpha1
plugin/pkg/admission/security
plugin/pkg/admission/security/podsecuritypolicy
plugin/pkg/admission/serviceaccount
plugin/pkg/auth/authorizer/node
plugin/pkg/auth/authorizer/rbac
staging/src/k8s.io/api/admission/v1beta1
staging/src/k8s.io/api/admissionregistration/v1alpha1
staging/src/k8s.io/api/admissionregistration/v1beta1
staging/src/k8s.io/api/apps/v1
staging/src/k8s.io/api/apps/v1beta1
staging/src/k8s.io/api/apps/v1beta2
staging/src/k8s.io/api/authentication/v1
staging/src/k8s.io/api/authentication/v1beta1
staging/src/k8s.io/api/authorization/v1
staging/src/k8s.io/api/authorization/v1beta1
staging/src/k8s.io/api/autoscaling/v1
staging/src/k8s.io/api/autoscaling/v2beta1
staging/src/k8s.io/api/batch/v1
staging/src/k8s.io/api/batch/v1beta1
staging/src/k8s.io/api/batch/v2alpha1
staging/src/k8s.io/api/certificates/v1beta1
staging/src/k8s.io/api/core/v1
staging/src/k8s.io/api/events/v1beta1
staging/src/k8s.io/api/extensions/v1beta1
staging/src/k8s.io/api/imagepolicy/v1alpha1
staging/src/k8s.io/api/networking/v1
staging/src/k8s.io/api/policy/v1beta1
staging/src/k8s.io/api/rbac/v1
staging/src/k8s.io/api/rbac/v1alpha1
staging/src/k8s.io/api/rbac/v1beta1
staging/src/k8s.io/api/scheduling/v1alpha1
staging/src/k8s.io/api/scheduling/v1beta1
staging/src/k8s.io/api/settings/v1alpha1
staging/src/k8s.io/api/storage/v1
staging/src/k8s.io/api/storage/v1alpha1
staging/src/k8s.io/api/storage/v1beta1
staging/src/k8s.io/apiextensions-apiserver/examples/client-go/pkg/apis/cr
staging/src/k8s.io/apiextensions-apiserver/examples/client-go/pkg/apis/cr/v1
staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions
staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1
staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver
staging/src/k8s.io/apiextensions-apiserver/pkg/cmd/server
staging/src/k8s.io/apiextensions-apiserver/pkg/controller/finalizer
staging/src/k8s.io/apiextensions-apiserver/pkg/controller/status
staging/src/k8s.io/apiextensions-apiserver/pkg/features
staging/src/k8s.io/apiextensions-apiserver/pkg/registry/customresourcedefinition
staging/src/k8s.io/apiextensions-apiserver/test/integration/testserver
staging/src/k8s.io/apimachinery/pkg/api/meta
staging/src/k8s.io/apimachinery/pkg/api/testing/fuzzer
staging/src/k8s.io/apimachinery/pkg/api/testing/roundtrip
staging/src/k8s.io/apimachinery/pkg/api/validation
staging/src/k8s.io/apimachinery/pkg/api/validation/path
staging/src/k8s.io/apimachinery/pkg/apis/meta/fuzzer
staging/src/k8s.io/apimachinery/pkg/apis/meta/internalversion
staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured
staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/validation
staging/src/k8s.io/apimachinery/pkg/apis/meta/v1beta1
staging/src/k8s.io/apimachinery/pkg/apis/testapigroup
staging/src/k8s.io/apimachinery/pkg/apis/testapigroup/v1
staging/src/k8s.io/apimachinery/pkg/conversion
staging/src/k8s.io/apimachinery/pkg/labels
staging/src/k8s.io/apimachinery/pkg/runtime/schema
staging/src/k8s.io/apimachinery/pkg/runtime/serializer
staging/src/k8s.io/apimachinery/pkg/runtime/serializer/protobuf
staging/src/k8s.io/apimachinery/pkg/runtime/serializer/recognizer
staging/src/k8s.io/apimachinery/pkg/runtime/serializer/streaming
staging/src/k8s.io/apimachinery/pkg/runtime/serializer/testing
staging/src/k8s.io/apimachinery/pkg/runtime/testing
staging/src/k8s.io/apimachinery/pkg/selection
staging/src/k8s.io/apimachinery/pkg/test
staging/src/k8s.io/apimachinery/pkg/types
staging/src/k8s.io/apimachinery/pkg/util/cache
staging/src/k8s.io/apimachinery/pkg/util/clock
staging/src/k8s.io/apimachinery/pkg/util/diff
staging/src/k8s.io/apimachinery/pkg/util/errors
staging/src/k8s.io/apimachinery/pkg/util/framer
staging/src/k8s.io/apimachinery/pkg/util/httpstream
staging/src/k8s.io/apimachinery/pkg/util/httpstream/spdy
staging/src/k8s.io/apimachinery/pkg/util/intstr
staging/src/k8s.io/apimachinery/pkg/util/jsonmergepatch
staging/src/k8s.io/apimachinery/pkg/util/mergepatch
staging/src/k8s.io/apimachinery/pkg/util/net
staging/src/k8s.io/apimachinery/pkg/util/proxy
staging/src/k8s.io/apimachinery/pkg/util/rand
staging/src/k8s.io/apimachinery/pkg/util/remotecommand
staging/src/k8s.io/apimachinery/pkg/util/runtime
staging/src/k8s.io/apimachinery/pkg/util/sets/types
staging/src/k8s.io/apimachinery/pkg/util/strategicpatch
staging/src/k8s.io/apimachinery/pkg/util/uuid
staging/src/k8s.io/apimachinery/pkg/util/validation
staging/src/k8s.io/apimachinery/pkg/util/wait
staging/src/k8s.io/apimachinery/pkg/util/yaml
staging/src/k8s.io/apiserver/pkg/admission
staging/src/k8s.io/apiserver/pkg/admission/configuration
staging/src/k8s.io/apiserver/pkg/admission/plugin/initialization
staging/src/k8s.io/apiserver/pkg/admission/plugin/namespace/lifecycle
staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/config
staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission
staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1alpha1
staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/mutating
staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/testcerts
staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/validating
staging/src/k8s.io/apiserver/pkg/apis/apiserver
staging/src/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1
staging/src/k8s.io/apiserver/pkg/apis/audit
staging/src/k8s.io/apiserver/pkg/apis/audit/v1alpha1
staging/src/k8s.io/apiserver/pkg/apis/audit/v1beta1
staging/src/k8s.io/apiserver/pkg/apis/audit/validation
staging/src/k8s.io/apiserver/pkg/apis/example
staging/src/k8s.io/apiserver/pkg/apis/example/v1
staging/src/k8s.io/apiserver/pkg/apis/example2
staging/src/k8s.io/apiserver/pkg/apis/example2/v1
staging/src/k8s.io/apiserver/pkg/audit
staging/src/k8s.io/apiserver/pkg/audit/policy
staging/src/k8s.io/apiserver/pkg/authentication/authenticatorfactory
staging/src/k8s.io/apiserver/pkg/authentication/group
staging/src/k8s.io/apiserver/pkg/authentication/request/anonymous
staging/src/k8s.io/apiserver/pkg/authentication/request/bearertoken
staging/src/k8s.io/apiserver/pkg/authentication/request/headerrequest
staging/src/k8s.io/apiserver/pkg/authentication/request/websocket
staging/src/k8s.io/apiserver/pkg/authentication/serviceaccount
staging/src/k8s.io/apiserver/pkg/authentication/token/tokenfile
staging/src/k8s.io/apiserver/pkg/authentication/user
staging/src/k8s.io/apiserver/pkg/authorization/authorizer
staging/src/k8s.io/apiserver/pkg/authorization/authorizerfactory
staging/src/k8s.io/apiserver/pkg/endpoints
staging/src/k8s.io/apiserver/pkg/endpoints/discovery
staging/src/k8s.io/apiserver/pkg/endpoints/filters
staging/src/k8s.io/apiserver/pkg/endpoints/handlers
staging/src/k8s.io/apiserver/pkg/endpoints/handlers/negotiation
staging/src/k8s.io/apiserver/pkg/endpoints/metrics
staging/src/k8s.io/apiserver/pkg/endpoints/openapi/testing
staging/src/k8s.io/apiserver/pkg/endpoints/request
staging/src/k8s.io/apiserver/pkg/endpoints/testing
staging/src/k8s.io/apiserver/pkg/features
staging/src/k8s.io/apiserver/pkg/registry/generic
staging/src/k8s.io/apiserver/pkg/registry/generic/registry
staging/src/k8s.io/apiserver/pkg/registry/generic/rest
staging/src/k8s.io/apiserver/pkg/registry/generic/testing
staging/src/k8s.io/apiserver/pkg/registry/rest
staging/src/k8s.io/apiserver/pkg/registry/rest/resttest
staging/src/k8s.io/apiserver/pkg/server
staging/src/k8s.io/apiserver/pkg/server/healthz
staging/src/k8s.io/apiserver/pkg/server/httplog
staging/src/k8s.io/apiserver/pkg/server/options
staging/src/k8s.io/apiserver/pkg/server/routes/data/swagger
staging/src/k8s.io/apiserver/pkg/server/storage
staging/src/k8s.io/apiserver/pkg/storage
staging/src/k8s.io/apiserver/pkg/storage/errors
staging/src/k8s.io/apiserver/pkg/storage/etcd
staging/src/k8s.io/apiserver/pkg/storage/etcd/etcdtest
staging/src/k8s.io/apiserver/pkg/storage/etcd/metrics
staging/src/k8s.io/apiserver/pkg/storage/etcd/testing
staging/src/k8s.io/apiserver/pkg/storage/etcd/testing/testingcert
staging/src/k8s.io/apiserver/pkg/storage/etcd/util
staging/src/k8s.io/apiserver/pkg/storage/storagebackend
staging/src/k8s.io/apiserver/pkg/storage/testing
staging/src/k8s.io/apiserver/pkg/storage/tests
staging/src/k8s.io/apiserver/pkg/storage/value
staging/src/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/v1beta1
staging/src/k8s.io/apiserver/pkg/util/feature
staging/src/k8s.io/apiserver/pkg/util/flag
staging/src/k8s.io/apiserver/pkg/util/proxy
staging/src/k8s.io/apiserver/pkg/util/trace
staging/src/k8s.io/apiserver/pkg/util/webhook
staging/src/k8s.io/apiserver/pkg/util/wsstream
staging/src/k8s.io/apiserver/plugin/pkg/audit/log
staging/src/k8s.io/apiserver/plugin/pkg/authenticator/password/passwordfile
staging/src/k8s.io/apiserver/plugin/pkg/authenticator/token/oidc
staging/src/k8s.io/apiserver/plugin/pkg/authenticator/token/tokentest
staging/src/k8s.io/apiserver/plugin/pkg/authenticator/token/webhook
staging/src/k8s.io/apiserver/plugin/pkg/authorizer/webhook
staging/src/k8s.io/client-go/deprecated-dynamic
staging/src/k8s.io/client-go/discovery/cached
staging/src/k8s.io/client-go/dynamic
staging/src/k8s.io/client-go/dynamic/fake
staging/src/k8s.io/client-go/examples/workqueue
staging/src/k8s.io/client-go/kubernetes/typed/authentication/v1
staging/src/k8s.io/client-go/kubernetes/typed/authentication/v1/fake
staging/src/k8s.io/client-go/kubernetes/typed/authentication/v1beta1
staging/src/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/fake
staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1
staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1/fake
staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1beta1
staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake
staging/src/k8s.io/client-go/kubernetes/typed/certificates/v1beta1
staging/src/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/fake
staging/src/k8s.io/client-go/kubernetes/typed/core/v1
staging/src/k8s.io/client-go/kubernetes/typed/core/v1/fake
staging/src/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake
staging/src/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake
staging/src/k8s.io/client-go/plugin/pkg/client/auth/oidc
staging/src/k8s.io/client-go/rest
staging/src/k8s.io/client-go/rest/fake
staging/src/k8s.io/client-go/scale
staging/src/k8s.io/client-go/scale/fake
staging/src/k8s.io/client-go/scale/scheme
staging/src/k8s.io/client-go/scale/scheme/appsint
staging/src/k8s.io/client-go/scale/scheme/appsv1beta1
staging/src/k8s.io/client-go/scale/scheme/appsv1beta2
staging/src/k8s.io/client-go/scale/scheme/autoscalingv1
staging/src/k8s.io/client-go/scale/scheme/extensionsint
staging/src/k8s.io/client-go/scale/scheme/extensionsv1beta1
staging/src/k8s.io/client-go/scale/scheme/extensionsv1beta1
staging/src/k8s.io/client-go/testing
staging/src/k8s.io/client-go/tools/cache
staging/src/k8s.io/client-go/tools/cache/testing
staging/src/k8s.io/client-go/tools/clientcmd
staging/src/k8s.io/client-go/tools/clientcmd/api
staging/src/k8s.io/client-go/tools/clientcmd/api/latest
staging/src/k8s.io/client-go/tools/clientcmd/api/v1
staging/src/k8s.io/client-go/tools/leaderelection
staging/src/k8s.io/client-go/tools/leaderelection/resourcelock
staging/src/k8s.io/client-go/tools/portforward
staging/src/k8s.io/client-go/tools/record
staging/src/k8s.io/client-go/tools/reference
staging/src/k8s.io/client-go/transport
staging/src/k8s.io/client-go/util/cert/triple
staging/src/k8s.io/client-go/util/exec
staging/src/k8s.io/client-go/util/flowcontrol
staging/src/k8s.io/client-go/util/integer
staging/src/k8s.io/client-go/util/jsonpath
staging/src/k8s.io/client-go/util/retry
staging/src/k8s.io/client-go/util/testing
staging/src/k8s.io/code-generator/cmd/client-gen/args
staging/src/k8s.io/code-generator/cmd/client-gen/generators/fake
staging/src/k8s.io/code-generator/cmd/client-gen/generators/scheme
staging/src/k8s.io/code-generator/cmd/client-gen/types
staging/src/k8s.io/code-generator/cmd/conversion-gen/generators
staging/src/k8s.io/code-generator/cmd/go-to-protobuf/protobuf
staging/src/k8s.io/code-generator/cmd/lister-gen/generators
staging/src/k8s.io/kube-aggregator/pkg/apis/apiregistration
staging/src/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1
staging/src/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1
staging/src/k8s.io/kube-aggregator/pkg/apis/apiregistration/validation
staging/src/k8s.io/kube-aggregator/pkg/apiserver
staging/src/k8s.io/kube-aggregator/pkg/cmd/server
staging/src/k8s.io/kube-aggregator/pkg/controllers/autoregister
staging/src/k8s.io/kube-aggregator/pkg/controllers/status
staging/src/k8s.io/kube-aggregator/pkg/registry/apiservice
staging/src/k8s.io/kube-aggregator/pkg/registry/apiservice/etcd
staging/src/k8s.io/metrics/pkg/apis/custom_metrics
staging/src/k8s.io/metrics/pkg/apis/custom_metrics/v1beta1
staging/src/k8s.io/metrics/pkg/apis/external_metrics
staging/src/k8s.io/metrics/pkg/apis/external_metrics/v1beta1
staging/src/k8s.io/metrics/pkg/apis/metrics
staging/src/k8s.io/metrics/pkg/apis/metrics/v1alpha1
staging/src/k8s.io/metrics/pkg/apis/metrics/v1beta1
staging/src/k8s.io/metrics/pkg/client/custom_metrics
staging/src/k8s.io/metrics/pkg/client/custom_metrics/fake
staging/src/k8s.io/metrics/pkg/client/external_metrics
staging/src/k8s.io/metrics/pkg/client/external_metrics/fake
staging/src/k8s.io/sample-apiserver/pkg/apis/wardle
staging/src/k8s.io/sample-apiserver/pkg/apis/wardle/v1alpha1
staging/src/k8s.io/sample-apiserver/pkg/apiserver
staging/src/k8s.io/sample-apiserver/pkg/cmd/server
staging/src/k8s.io/sample-apiserver/pkg/registry/wardle/fischer
staging/src/k8s.io/sample-apiserver/pkg/registry/wardle/flunder
staging/src/k8s.io/sample-controller/pkg/apis/samplecontroller
staging/src/k8s.io/sample-controller/pkg/apis/samplecontroller/v1alpha1
test/e2e
test/e2e/apimachinery
test/e2e/apps
test/e2e/auth
test/e2e/autoscaling
test/e2e/chaosmonkey
test/e2e/common
test/e2e/framework
test/e2e/framework/metrics
test/e2e/framework/timer
test/e2e/instrumentation
test/e2e/instrumentation/logging
test/e2e/instrumentation/monitoring
test/e2e/kubectl
test/e2e/lifecycle
test/e2e/lifecycle/bootstrap
test/e2e/network
test/e2e/node
test/e2e/scalability
test/e2e/scheduling
test/e2e/servicecatalog
test/e2e/storage
test/e2e/storage/utils
test/e2e/storage/vsphere
test/e2e/ui
test/e2e/upgrades
test/e2e/upgrades/apps
test/e2e/upgrades/storage
test/e2e_kubeadm
test/e2e_node
test/e2e_node/builder
test/e2e_node/environment
test/e2e_node/remote
test/e2e_node/runner/remote
test/e2e_node/services
test/e2e_node/system
test/images/net/nat
test/images/netexec
test/images/nettest
test/images/no-snat-test
test/images/no-snat-test-proxy
test/images/resource-consumer
test/images/resource-consumer/common
test/images/resource-consumer/controller
test/integration
test/integration/auth
test/integration/evictions
test/integration/framework
test/integration/master
test/integration/replicaset
test/integration/replicationcontroller
test/integration/scheduler
test/integration/scheduler_perf
test/integration/volume
test/list
test/utils
test/utils/image

19
vendor/k8s.io/kubernetes/hack/.spelling_failures generated vendored Normal file
View File

@@ -0,0 +1,19 @@
BUILD
CHANGELOG
OWNERS
api.pb.go
api.proto
docs/api-reference
docs/man
docs/user-guide
generated.pb.go
generated.proto
pkg/client/clientset_generated
pkg/client/informers/informers_generated/
pkg/generated
swagger
test/e2e/generated
third_party/
translations/
vendor/
zz_generated

80
vendor/k8s.io/kubernetes/hack/BUILD generated vendored Normal file
View File

@@ -0,0 +1,80 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_binary",
"go_library",
"go_test",
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [
":package-srcs",
"//hack/boilerplate:all-srcs",
"//hack/e2e-internal:all-srcs",
"//hack/lib:all-srcs",
"//hack/make-rules:all-srcs",
],
tags = ["automanaged"],
)
sh_test(
name = "verify-boilerplate",
srcs = ["verify-boilerplate.sh"],
data = ["//:all-srcs"],
tags = ["manual"],
)
# Disable gofmt test until we can figure out how to access the gofmt binary.
# https://github.com/bazelbuild/rules_go/issues/511
#sh_test(
# name = "verify-gofmt",
# srcs = ["verify-gofmt.sh"],
# data = [
# "//:all-srcs",
# "@io_bazel_rules_go_toolchain//:toolchain",
# ],
# tags = ["manual"],
#)
test_suite(
name = "verify-all",
tags = ["manual"],
tests = [
"verify-boilerplate",
# "verify-gofmt",
],
)
go_binary(
name = "hack",
embed = [":go_default_library"],
)
go_test(
name = "go_default_test",
srcs = ["e2e_test.go"],
data = glob(["testdata/**"]),
embed = [":go_default_library"],
)
go_library(
name = "go_default_library",
srcs = ["e2e.go"],
importpath = "k8s.io/kubernetes/hack",
)
sh_binary(
name = "update-mirror",
srcs = ["update-workspace-mirror.sh"],
args = ["$(location @workspace_urls//:urls.txt)"],
data = ["@workspace_urls//:urls.txt"],
)

30
vendor/k8s.io/kubernetes/hack/OWNERS generated vendored Normal file
View File

@@ -0,0 +1,30 @@
reviewers:
- cblecker
- eparis
- fejta
- ixdy
- jbeda
- juanvallejo
- lavalamp
- zmerlynn
- sttts
- gmarek
- vishh
approvers:
- cblecker
- deads2k
- eparis
- fejta
- ixdy
- jbeda
- lavalamp
- madhusudancs
- pwittrock
- shashidharatd
- zmerlynn
- sttts
- gmarek
- vishh
- liggitt
- soltysh # for sig-cli related stuff
- dims # for local-up-cluster related files

View File

@@ -0,0 +1,3 @@
This file is autogenerated, but we've stopped checking such files into the
repository to reduce the need for rebases. Please run hack/generate-docs.sh to
populate this file.

27
vendor/k8s.io/kubernetes/hack/benchmark-go.sh generated vendored Executable file
View File

@@ -0,0 +1,27 @@
#!/usr/bin/env bash
# Copyright 2014 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -o errexit
set -o nounset
set -o pipefail
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/..
make test \
WHAT="$*" \
KUBE_COVER="" \
KUBE_RACE=" " \
KUBE_TEST_ARGS="-- -test.run='^X' -benchtime=1s -bench=. -benchmem" \

31
vendor/k8s.io/kubernetes/hack/boilerplate/BUILD generated vendored Normal file
View File

@@ -0,0 +1,31 @@
package(default_visibility = ["//visibility:public"])
exports_files(glob(["*.txt"]))
py_test(
name = "boilerplate_test",
srcs = [
"boilerplate.py",
"boilerplate_test.py",
],
data = glob([
"*.txt",
"test/*",
]),
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [
":package-srcs",
"//hack/boilerplate/test:all-srcs",
],
tags = ["automanaged"],
)

View File

@@ -0,0 +1,14 @@
# Copyright YEAR The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

View File

@@ -0,0 +1,14 @@
# Copyright YEAR The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

View File

@@ -0,0 +1,14 @@
# Copyright YEAR The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

View File

@@ -0,0 +1,16 @@
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/

View File

@@ -0,0 +1,16 @@
/*
Copyright YEAR The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/

227
vendor/k8s.io/kubernetes/hack/boilerplate/boilerplate.py generated vendored Executable file
View File

@@ -0,0 +1,227 @@
#!/usr/bin/env python
# Copyright 2015 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import argparse
import datetime
import difflib
import glob
import os
import re
import sys
parser = argparse.ArgumentParser()
parser.add_argument(
"filenames",
help="list of files to check, all files if unspecified",
nargs='*')
rootdir = os.path.dirname(__file__) + "/../../"
rootdir = os.path.abspath(rootdir)
parser.add_argument(
"--rootdir", default=rootdir, help="root directory to examine")
default_boilerplate_dir = os.path.join(rootdir, "hack/boilerplate")
parser.add_argument(
"--boilerplate-dir", default=default_boilerplate_dir)
parser.add_argument(
"-v", "--verbose",
help="give verbose output regarding why a file does not pass",
action="store_true")
args = parser.parse_args()
verbose_out = sys.stderr if args.verbose else open("/dev/null", "w")
def get_refs():
refs = {}
for path in glob.glob(os.path.join(args.boilerplate_dir, "boilerplate.*.txt")):
extension = os.path.basename(path).split(".")[1]
ref_file = open(path, 'r')
ref = ref_file.read().splitlines()
ref_file.close()
refs[extension] = ref
return refs
def is_generated_file(filename, data, regexs):
for d in skipped_ungenerated_files:
if d in filename:
return False
p = regexs["generated"]
return p.search(data)
def file_passes(filename, refs, regexs):
try:
f = open(filename, 'r')
except Exception as exc:
print("Unable to open %s: %s" % (filename, exc), file=verbose_out)
return False
data = f.read()
f.close()
# determine if the file is automatically generated
generated = is_generated_file(filename, data, regexs)
basename = os.path.basename(filename)
if generated:
extension = "generatego"
else:
extension = file_extension(filename)
if extension != "":
ref = refs[extension]
else:
ref = refs[basename]
# remove build tags from the top of Go files
if extension == "go" or extension =="generatego":
p = regexs["go_build_constraints"]
(data, found) = p.subn("", data, 1)
# remove shebang from the top of shell files
if extension == "sh":
p = regexs["shebang"]
(data, found) = p.subn("", data, 1)
data = data.splitlines()
# if our test file is smaller than the reference it surely fails!
if len(ref) > len(data):
print('File %s smaller than reference (%d < %d)' %
(filename, len(data), len(ref)),
file=verbose_out)
return False
# trim our file to the same number of lines as the reference file
data = data[:len(ref)]
p = regexs["year"]
for d in data:
if p.search(d):
if generated:
print('File %s has the YEAR field, but it should not be in generated file' % filename, file=verbose_out)
else:
print('File %s has the YEAR field, but missing the year of date' % filename, file=verbose_out)
return False
if not generated:
# Replace all occurrences of the regex "2014|2015|2016|2017|2018" with "YEAR"
p = regexs["date"]
for i, d in enumerate(data):
(data[i], found) = p.subn('YEAR', d)
if found != 0:
break
# if we don't match the reference at this point, fail
if ref != data:
print("Header in %s does not match reference, diff:" % filename, file=verbose_out)
if args.verbose:
print(file=verbose_out)
for line in difflib.unified_diff(ref, data, 'reference', filename, lineterm=''):
print(line, file=verbose_out)
print(file=verbose_out)
return False
return True
def file_extension(filename):
return os.path.splitext(filename)[1].split(".")[-1].lower()
skipped_dirs = ['Godeps', 'third_party', '_gopath', '_output', '.git', 'cluster/env.sh',
"vendor", "test/e2e/generated/bindata.go", "hack/boilerplate/test",
"pkg/generated/bindata.go"]
# list all the files contain 'DO NOT EDIT', but are not generated
skipped_ungenerated_files = ['hack/build-ui.sh', 'hack/lib/swagger.sh',
'hack/boilerplate/boilerplate.py']
def normalize_files(files):
newfiles = []
for pathname in files:
if any(x in pathname for x in skipped_dirs):
continue
newfiles.append(pathname)
for i, pathname in enumerate(newfiles):
if not os.path.isabs(pathname):
newfiles[i] = os.path.join(args.rootdir, pathname)
return newfiles
def get_files(extensions):
files = []
if len(args.filenames) > 0:
files = args.filenames
else:
for root, dirs, walkfiles in os.walk(args.rootdir):
# don't visit certain dirs. This is just a performance improvement
# as we would prune these later in normalize_files(). But doing it
# cuts down the amount of filesystem walking we do and cuts down
# the size of the file list
for d in skipped_dirs:
if d in dirs:
dirs.remove(d)
for name in walkfiles:
pathname = os.path.join(root, name)
files.append(pathname)
files = normalize_files(files)
outfiles = []
for pathname in files:
basename = os.path.basename(pathname)
extension = file_extension(pathname)
if extension in extensions or basename in extensions:
outfiles.append(pathname)
return outfiles
def get_dates():
years = datetime.datetime.now().year
return '(%s)' % '|'.join((str(year) for year in range(2014, years+1)))
def get_regexs():
regexs = {}
# Search for "YEAR" which exists in the boilerplate, but shouldn't in the real thing
regexs["year"] = re.compile( 'YEAR' )
# get_dates return 2014, 2015, 2016, 2017, or 2018 until the current year as a regex like: "(2014|2015|2016|2017|2018)";
# company holder names can be anything
regexs["date"] = re.compile(get_dates())
# strip // +build \n\n build constraints
regexs["go_build_constraints"] = re.compile(r"^(// \+build.*\n)+\n", re.MULTILINE)
# strip #!.* from shell scripts
regexs["shebang"] = re.compile(r"^(#!.*\n)\n*", re.MULTILINE)
# Search for generated files
regexs["generated"] = re.compile( 'DO NOT EDIT' )
return regexs
def main():
regexs = get_regexs()
refs = get_refs()
filenames = get_files(refs.keys())
for filename in filenames:
if not file_passes(filename, refs, regexs):
print(filename, file=sys.stdout)
return 0
if __name__ == "__main__":
sys.exit(main())

View File

@@ -0,0 +1,16 @@
#!/usr/bin/env python
# Copyright YEAR The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

View File

@@ -0,0 +1,14 @@
# Copyright YEAR The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

View File

@@ -0,0 +1,52 @@
#!/usr/bin/env python
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import boilerplate
import unittest
import StringIO
import os
import sys
class TestBoilerplate(unittest.TestCase):
"""
Note: run this test from the hack/boilerplate directory.
$ python -m unittest boilerplate_test
"""
def test_boilerplate(self):
os.chdir("test/")
class Args(object):
def __init__(self):
self.filenames = []
self.rootdir = "."
self.boilerplate_dir = "../"
self.verbose = True
# capture stdout
old_stdout = sys.stdout
sys.stdout = StringIO.StringIO()
boilerplate.args = Args()
ret = boilerplate.main()
output = sorted(sys.stdout.getvalue().split())
sys.stdout = old_stdout
self.assertEquals(
output, ['././fail.go', '././fail.py'])

28
vendor/k8s.io/kubernetes/hack/boilerplate/test/BUILD generated vendored Normal file
View File

@@ -0,0 +1,28 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
)
go_library(
name = "go_default_library",
srcs = [
"fail.go",
"pass.go",
],
importpath = "k8s.io/kubernetes/hack/boilerplate/test",
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)

19
vendor/k8s.io/kubernetes/hack/boilerplate/test/fail.go generated vendored Normal file
View File

@@ -0,0 +1,19 @@
/*
Copyright 2014 The Kubernetes Authors.
fail
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package test

17
vendor/k8s.io/kubernetes/hack/boilerplate/test/fail.py generated vendored Normal file
View File

@@ -0,0 +1,17 @@
#!/usr/bin/env python
# Copyright 2015 The Kubernetes Authors.
#
# failed
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

17
vendor/k8s.io/kubernetes/hack/boilerplate/test/pass.go generated vendored Normal file
View File

@@ -0,0 +1,17 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package test

17
vendor/k8s.io/kubernetes/hack/boilerplate/test/pass.py generated vendored Normal file
View File

@@ -0,0 +1,17 @@
#!/usr/bin/env python
# Copyright 2015 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
True

31
vendor/k8s.io/kubernetes/hack/build-cross.sh generated vendored Executable file
View File

@@ -0,0 +1,31 @@
#!/usr/bin/env bash
# Copyright 2014 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script is a vestigial redirection. Please do not add "real" logic.
set -o errexit
set -o nounset
set -o pipefail
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/..
echo "NOTE: $0 has been replaced by 'make cross'"
echo
echo "The equivalent of this invocation is: "
echo " make cross"
echo
echo
make --no-print-directory -C "${KUBE_ROOT}" cross

37
vendor/k8s.io/kubernetes/hack/build-go.sh generated vendored Executable file
View File

@@ -0,0 +1,37 @@
#!/usr/bin/env bash
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script is a vestigial redirection. Please do not add "real" logic.
set -o errexit
set -o nounset
set -o pipefail
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/..
# For help output
ARGHELP=""
if [[ "$#" -gt 0 ]]; then
ARGHELP="WHAT='$@'"
fi
echo "NOTE: $0 has been replaced by 'make' or 'make all'"
echo
echo "The equivalent of this invocation is: "
echo " make ${ARGHELP}"
echo
echo
make --no-print-directory -C "${KUBE_ROOT}" all WHAT="$*"

55
vendor/k8s.io/kubernetes/hack/build-ui.sh generated vendored Executable file
View File

@@ -0,0 +1,55 @@
#!/usr/bin/env bash
# Copyright 2014 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script builds ui assets into a single go datafile
set -o errexit
set -o nounset
set -o pipefail
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/..
source "${KUBE_ROOT}/hack/lib/init.sh"
cd "${KUBE_ROOT}"
if ! which go-bindata > /dev/null 2>&1 ; then
echo "Cannot find go-bindata. Install with \"go get github.com/jteeuwen/go-bindata/...\""
exit 1
fi
kube::util::ensure-temp-dir
readonly TMP_DATAFILE="${KUBE_TEMP}/datafile.go"
readonly SWAGGER_SRC="third_party/swagger-ui/..."
readonly SWAGGER_PKG="swagger"
function kube::hack::build_ui() {
local pkg="$1"
local src="$2"
local output_file="staging/src/k8s.io/apiserver/pkg/server/routes/data/${pkg}/datafile.go"
go-bindata -nocompress -o "${output_file}" -prefix ${PWD} -pkg "${pkg}" "${src}"
local year=$(date +%Y)
cat hack/boilerplate/boilerplate.generatego.txt > "${TMP_DATAFILE}"
echo -e "// generated by hack/build-ui.sh; DO NOT EDIT\n" >> "${TMP_DATAFILE}"
cat "${output_file}" >> "${TMP_DATAFILE}"
gofmt -s -w "${TMP_DATAFILE}"
mv "${TMP_DATAFILE}" "${output_file}"
}
kube::hack::build_ui "${SWAGGER_PKG}" "${SWAGGER_SRC}"

242
vendor/k8s.io/kubernetes/hack/cherry_pick_pull.sh generated vendored Executable file
View File

@@ -0,0 +1,242 @@
#!/usr/bin/env bash
# Copyright 2015 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Checkout a PR from GitHub. (Yes, this is sitting in a Git tree. How
# meta.) Assumes you care about pulls from remote "upstream" and
# checks thems out to a branch named:
# automated-cherry-pick-of-<pr>-<target branch>-<timestamp>
set -o errexit
set -o nounset
set -o pipefail
declare -r KUBE_ROOT="$(dirname "${BASH_SOURCE}")/.."
cd "${KUBE_ROOT}"
declare -r STARTINGBRANCH=$(git symbolic-ref --short HEAD)
declare -r REBASEMAGIC="${KUBE_ROOT}/.git/rebase-apply"
DRY_RUN=${DRY_RUN:-""}
REGENERATE_DOCS=${REGENERATE_DOCS:-""}
UPSTREAM_REMOTE=${UPSTREAM_REMOTE:-upstream}
FORK_REMOTE=${FORK_REMOTE:-origin}
if [[ -z ${GITHUB_USER:-} ]]; then
echo "Please export GITHUB_USER=<your-user> (or GH organization, if that's where your fork lives)"
exit 1
fi
if ! which hub > /dev/null; then
echo "Can't find 'hub' tool in PATH, please install from https://github.com/github/hub"
exit 1
fi
if [[ "$#" -lt 2 ]]; then
echo "${0} <remote branch> <pr-number>...: cherry pick one or more <pr> onto <remote branch> and leave instructions for proposing pull request"
echo
echo " Checks out <remote branch> and handles the cherry-pick of <pr> (possibly multiple) for you."
echo " Examples:"
echo " $0 upstream/release-3.14 12345 # Cherry-picks PR 12345 onto upstream/release-3.14 and proposes that as a PR."
echo " $0 upstream/release-3.14 12345 56789 # Cherry-picks PR 12345, then 56789 and proposes the combination as a single PR."
echo
echo " Set the DRY_RUN environment var to skip git push and creating PR."
echo " This is useful for creating patches to a release branch without making a PR."
echo " When DRY_RUN is set the script will leave you in a branch containing the commits you cherry-picked."
echo
echo " Set the REGENERATE_DOCS environment var to regenerate documentation for the target branch after picking the specified commits."
echo " This is useful when picking commits containing changes to API documentation."
echo
echo " Set UPSTREAM_REMOTE (default: upstream) and FORK_REMOTE (default: origin)"
echo " To override the default remote names to what you have locally."
exit 2
fi
if git_status=$(git status --porcelain --untracked=no 2>/dev/null) && [[ -n "${git_status}" ]]; then
echo "!!! Dirty tree. Clean up and try again."
exit 1
fi
if [[ -e "${REBASEMAGIC}" ]]; then
echo "!!! 'git rebase' or 'git am' in progress. Clean up and try again."
exit 1
fi
declare -r BRANCH="$1"
shift 1
declare -r PULLS=( "$@" )
function join { local IFS="$1"; shift; echo "$*"; }
declare -r PULLDASH=$(join - "${PULLS[@]/#/#}") # Generates something like "#12345-#56789"
declare -r PULLSUBJ=$(join " " "${PULLS[@]/#/#}") # Generates something like "#12345 #56789"
echo "+++ Updating remotes..."
git remote update "${UPSTREAM_REMOTE}" "${FORK_REMOTE}"
if ! git log -n1 --format=%H "${BRANCH}" >/dev/null 2>&1; then
echo "!!! '${BRANCH}' not found. The second argument should be something like ${UPSTREAM_REMOTE}/release-0.21."
echo " (In particular, it needs to be a valid, existing remote branch that I can 'git checkout'.)"
exit 1
fi
declare -r NEWBRANCHREQ="automated-cherry-pick-of-${PULLDASH}" # "Required" portion for tools.
declare -r NEWBRANCH="$(echo "${NEWBRANCHREQ}-${BRANCH}" | sed 's/\//-/g')"
declare -r NEWBRANCHUNIQ="${NEWBRANCH}-$(date +%s)"
echo "+++ Creating local branch ${NEWBRANCHUNIQ}"
cleanbranch=""
prtext=""
gitamcleanup=false
function return_to_kansas {
if [[ "${gitamcleanup}" == "true" ]]; then
echo
echo "+++ Aborting in-progress git am."
git am --abort >/dev/null 2>&1 || true
fi
# return to the starting branch and delete the PR text file
if [[ -z "${DRY_RUN}" ]]; then
echo
echo "+++ Returning you to the ${STARTINGBRANCH} branch and cleaning up."
git checkout -f "${STARTINGBRANCH}" >/dev/null 2>&1 || true
if [[ -n "${cleanbranch}" ]]; then
git branch -D "${cleanbranch}" >/dev/null 2>&1 || true
fi
if [[ -n "${prtext}" ]]; then
rm "${prtext}"
fi
fi
}
trap return_to_kansas EXIT
SUBJECTS=()
function make-a-pr() {
local rel="$(basename "${BRANCH}")"
echo
echo "+++ Creating a pull request on GitHub at ${GITHUB_USER}:${NEWBRANCH}"
# This looks like an unnecessary use of a tmpfile, but it avoids
# https://github.com/github/hub/issues/976 Otherwise stdin is stolen
# when we shove the heredoc at hub directly, tickling the ioctl
# crash.
prtext="$(mktemp -t prtext.XXXX)" # cleaned in return_to_kansas
local numandtitle=$(printf '%s\n' "${SUBJECTS[@]}")
cat >"${prtext}" <<EOF
Automated cherry pick of ${numandtitle}
Cherry pick of ${PULLSUBJ} on ${rel}.
${numandtitle}
EOF
hub pull-request -F "${prtext}" -h "${GITHUB_USER}:${NEWBRANCH}" -b "kubernetes:${rel}"
}
git checkout -b "${NEWBRANCHUNIQ}" "${BRANCH}"
cleanbranch="${NEWBRANCHUNIQ}"
gitamcleanup=true
for pull in "${PULLS[@]}"; do
echo "+++ Downloading patch to /tmp/${pull}.patch (in case you need to do this again)"
curl -o "/tmp/${pull}.patch" -sSL "http://pr.k8s.io/${pull}.patch"
echo
echo "+++ About to attempt cherry pick of PR. To reattempt:"
echo " $ git am -3 /tmp/${pull}.patch"
echo
git am -3 "/tmp/${pull}.patch" || {
conflicts=false
while unmerged=$(git status --porcelain | grep ^U) && [[ -n ${unmerged} ]] \
|| [[ -e "${REBASEMAGIC}" ]]; do
conflicts=true # <-- We should have detected conflicts once
echo
echo "+++ Conflicts detected:"
echo
(git status --porcelain | grep ^U) || echo "!!! None. Did you git am --continue?"
echo
echo "+++ Please resolve the conflicts in another window (and remember to 'git add / git am --continue')"
read -p "+++ Proceed (anything but 'y' aborts the cherry-pick)? [y/n] " -r
echo
if ! [[ "${REPLY}" =~ ^[yY]$ ]]; then
echo "Aborting." >&2
exit 1
fi
done
if [[ "${conflicts}" != "true" ]]; then
echo "!!! git am failed, likely because of an in-progress 'git am' or 'git rebase'"
exit 1
fi
}
# set the subject
subject=$(grep -m 1 "^Subject" "/tmp/${pull}.patch" | sed -e 's/Subject: \[PATCH//g' | sed 's/.*] //')
SUBJECTS+=("#${pull}: ${subject}")
# remove the patch file from /tmp
rm -f "/tmp/${pull}.patch"
done
gitamcleanup=false
# Re-generate docs (if needed)
if [[ -n "${REGENERATE_DOCS}" ]]; then
echo
echo "Regenerating docs..."
if ! hack/generate-docs.sh; then
echo
echo "hack/generate-docs.sh FAILED to complete."
exit 1
fi
fi
if [[ -n "${DRY_RUN}" ]]; then
echo "!!! Skipping git push and PR creation because you set DRY_RUN."
echo "To return to the branch you were in when you invoked this script:"
echo
echo " git checkout ${STARTINGBRANCH}"
echo
echo "To delete this branch:"
echo
echo " git branch -D ${NEWBRANCHUNIQ}"
exit 0
fi
if git remote -v | grep ^${FORK_REMOTE} | grep kubernetes/kubernetes.git; then
echo "!!! You have ${FORK_REMOTE} configured as your kubernetes/kubernetes.git"
echo "This isn't normal. Leaving you with push instructions:"
echo
echo "+++ First manually push the branch this script created:"
echo
echo " git push REMOTE ${NEWBRANCHUNIQ}:${NEWBRANCH}"
echo
echo "where REMOTE is your personal fork (maybe ${UPSTREAM_REMOTE}? Consider swapping those.)."
echo "OR consider setting UPSTREAM_REMOTE and FORK_REMOTE to different values."
echo
make-a-pr
cleanbranch=""
exit 0
fi
echo
echo "+++ I'm about to do the following to push to GitHub (and I'm assuming ${FORK_REMOTE} is your personal fork):"
echo
echo " git push ${FORK_REMOTE} ${NEWBRANCHUNIQ}:${NEWBRANCH}"
echo
read -p "+++ Proceed (anything but 'y' aborts the cherry-pick)? [y/n] " -r
if ! [[ "${REPLY}" =~ ^[yY]$ ]]; then
echo "Aborting." >&2
exit 1
fi
git push "${FORK_REMOTE}" -f "${NEWBRANCHUNIQ}:${NEWBRANCH}"
make-a-pr

34
vendor/k8s.io/kubernetes/hack/dev-build-and-push.sh generated vendored Executable file
View File

@@ -0,0 +1,34 @@
#!/usr/bin/env bash
# Copyright 2014 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script will build a dev release and push it to an existing cluster.
set -o errexit
set -o nounset
set -o pipefail
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/..
# Build a dev release
make -f ${KUBE_ROOT}/Makefile quick-release
if [ "$?" != "0" ]; then
echo "Building a release failed!"
exit 1
fi
# Now push this out to the cluster
"${KUBE_ROOT}/cluster/kube-push.sh"

35
vendor/k8s.io/kubernetes/hack/dev-build-and-up.sh generated vendored Executable file
View File

@@ -0,0 +1,35 @@
#!/usr/bin/env bash
# Copyright 2014 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script will build a dev release and bring up a new cluster with that
# release.
set -o errexit
set -o nounset
set -o pipefail
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/..
# Build a dev release
make -f ${KUBE_ROOT}/Makefile quick-release
if [ "$?" != "0" ]; then
echo "Building the release failed!"
exit 1
fi
# Now bring a new cluster up with that release.
"${KUBE_ROOT}/cluster/kube-up.sh"

49
vendor/k8s.io/kubernetes/hack/dev-push-hyperkube.sh generated vendored Executable file
View File

@@ -0,0 +1,49 @@
#!/usr/bin/env bash
# Copyright 2014 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script builds hyperkube and then the hyperkube image.
# REGISTRY and VERSION must be set.
# Example usage:
# $ export REGISTRY=gcr.io/someone
# $ export VERSION=v1.4.0-testfix
# ./hack/dev-push-hyperkube.sh
# That will build and push gcr.io/someone/hyperkube-amd64:v1.4.0-testfix
set -o errexit
set -o nounset
set -o pipefail
KUBE_ROOT="$(dirname "${BASH_SOURCE}")/.."
source "${KUBE_ROOT}/build/common.sh"
if [[ -z "${REGISTRY:-}" ]]; then
echo "REGISTRY must be set"
exit -1
fi
if [[ -z "${VERSION:-}" ]]; then
echo "VERSION must be set"
exit -1
fi
IMAGE="${REGISTRY}/hyperkube-amd64:${VERSION}"
kube::build::verify_prereqs
kube::build::build_image
kube::build::run_build_command make WHAT=cmd/hyperkube
kube::build::copy_output
make -C "${KUBE_ROOT}/cluster/images/hyperkube" build
docker push "${IMAGE}"

14
vendor/k8s.io/kubernetes/hack/e2e-internal/BUILD generated vendored Normal file
View File

@@ -0,0 +1,14 @@
package(default_visibility = ["//visibility:public"])
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)

View File

@@ -0,0 +1,33 @@
#!/usr/bin/env bash
# Copyright 2015 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -o errexit
set -o nounset
set -o pipefail
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../..
: ${KUBECTL:=${KUBE_ROOT}/cluster/kubectl.sh}
: ${KUBE_CONFIG_FILE:="config-test.sh"}
export KUBECTL KUBE_CONFIG_FILE
source "${KUBE_ROOT}/cluster/kube-util.sh"
prepare-e2e
#TODO(colhom): spec and implement federated version of this
${KUBECTL} get nodes --no-headers | wc -l

32
vendor/k8s.io/kubernetes/hack/e2e-internal/e2e-down.sh generated vendored Executable file
View File

@@ -0,0 +1,32 @@
#!/usr/bin/env bash
# Copyright 2015 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -o errexit
set -o nounset
set -o pipefail
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../..
: ${KUBECTL:=${KUBE_ROOT}/cluster/kubectl.sh}
: ${KUBE_CONFIG_FILE:="config-test.sh"}
export KUBECTL KUBE_CONFIG_FILE
source "${KUBE_ROOT}/cluster/kube-util.sh"
prepare-e2e
test-teardown

View File

@@ -0,0 +1,35 @@
#!/usr/bin/env bash
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../..
if [[ ! -z "${1:-}" ]]; then
export KUBE_GCE_ZONE="${1}"
fi
if [[ ! -z "${2:-}" ]]; then
export MULTIZONE="${2}"
fi
if [[ ! -z "${3:-}" ]]; then
export KUBE_REPLICATE_EXISTING_MASTER="${3}"
fi
if [[ ! -z "${4:-}" ]]; then
export KUBE_USE_EXISTING_MASTER="${4}"
fi
if [[ -z "${NUM_NODES:-}" ]]; then
export NUM_NODES=3
fi
source "${KUBE_ROOT}/hack/e2e-internal/e2e-up.sh"

View File

@@ -0,0 +1,33 @@
#!/usr/bin/env bash
# Copyright 2014 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../..
if [[ ! -z "${1:-}" ]]; then
export KUBE_GCE_ZONE="${1}"
fi
if [[ ! -z "${2:-}" ]]; then
export MULTIZONE="${2}"
fi
if [[ ! -z "${3:-}" ]]; then
export KUBE_DELETE_NODES="${3}"
fi
if [[ ! -z "${4:-}" ]]; then
export KUBE_USE_EXISTING_MASTER="${4}"
fi
source "${KUBE_ROOT}/hack/e2e-internal/e2e-down.sh"

32
vendor/k8s.io/kubernetes/hack/e2e-internal/e2e-status.sh generated vendored Executable file
View File

@@ -0,0 +1,32 @@
#!/usr/bin/env bash
# Copyright 2015 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -o errexit
set -o nounset
set -o pipefail
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../..
: ${KUBECTL:=${KUBE_ROOT}/cluster/kubectl.sh}
: ${KUBE_CONFIG_FILE:="config-test.sh"}
export KUBECTL KUBE_CONFIG_FILE
source "${KUBE_ROOT}/cluster/kube-util.sh"
prepare-e2e
${KUBECTL} version

32
vendor/k8s.io/kubernetes/hack/e2e-internal/e2e-up.sh generated vendored Executable file
View File

@@ -0,0 +1,32 @@
#!/usr/bin/env bash
# Copyright 2015 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -o errexit
set -o nounset
set -o pipefail
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../..
: ${KUBECTL:=${KUBE_ROOT}/cluster/kubectl.sh}
: ${KUBE_CONFIG_FILE:="config-test.sh"}
export KUBECTL KUBE_CONFIG_FILE
source "${KUBE_ROOT}/cluster/kube-util.sh"
prepare-e2e
test-setup

43
vendor/k8s.io/kubernetes/hack/e2e-node-test.sh generated vendored Executable file
View File

@@ -0,0 +1,43 @@
#!/usr/bin/env bash
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script is a vestigial redirection. Please do not add "real" logic.
set -o errexit
set -o nounset
set -o pipefail
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/..
# For help output
ARGHELP=""
if [[ -n "${FOCUS:-}" ]]; then
ARGHELP="FOCUS='${FOCUS}' "
fi
if [[ -n "${SKIP:-}" ]]; then
ARGHELP="${ARGHELP}SKIP='${SKIP}'"
fi
echo "NOTE: $0 has been replaced by 'make test-e2e-node'"
echo
echo "This script supports a number of parameters passed as environment variables."
echo "Please see the Makefile for more details."
echo
echo "The equivalent of this invocation is: "
echo " make test-e2e-node ${ARGHELP}"
echo
echo
make --no-print-directory -C "${KUBE_ROOT}" test-e2e-node FOCUS=${FOCUS:-} SKIP=${SKIP:-}

170
vendor/k8s.io/kubernetes/hack/e2e.go generated vendored Normal file
View File

@@ -0,0 +1,170 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// User-interface for test-infra/kubetest/e2e.go
// Equivalent to go get -u k8s.io/test-infra/kubetest && kubetest "${@}"
package main
import (
"flag"
"fmt"
"go/build"
"log"
"os"
"os/exec"
"os/signal"
"path/filepath"
"strings"
"time"
)
type flags struct {
get bool
old time.Duration
args []string
}
const (
getDefault = true
oldDefault = 24 * time.Hour
)
func parse(args []string) (flags, error) {
fs := flag.NewFlagSet(args[0], flag.ContinueOnError)
get := fs.Bool("get", getDefault, "go get -u kubetest if old or not installed")
old := fs.Duration("old", oldDefault, "Consider kubetest old if it exceeds this")
verbose := fs.Bool("v", true, "this flag is translated to kubetest's --verbose-commands")
var a []string
if err := fs.Parse(args[1:]); err == flag.ErrHelp {
os.Stderr.WriteString(" -- kubetestArgs\n")
os.Stderr.WriteString(" All flags after -- are passed to the kubetest program\n")
return flags{}, err
} else if err != nil {
log.Print("NOTICE: go run hack/e2e.go is now a shim for test-infra/kubetest")
log.Printf(" Usage: go run hack/e2e.go [--get=%v] [--old=%v] -- [KUBETEST_ARGS]", getDefault, oldDefault)
log.Print(" The separator is required to use --get or --old flags")
log.Print(" The -- flag separator also suppresses this message")
a = args[len(args)-fs.NArg()-1:]
} else {
a = append(a, fmt.Sprintf("--verbose-commands=%t", *verbose))
a = append(a, fs.Args()...)
}
return flags{*get, *old, a}, nil
}
func main() {
log.SetFlags(log.LstdFlags | log.Lshortfile)
f, err := parse(os.Args)
if err != nil {
os.Exit(2)
}
t := newTester()
k, err := t.getKubetest(f.get, f.old)
if err != nil {
log.Fatalf("err: %v", err)
}
log.Printf("Calling kubetest %v...", strings.Join(f.args, " "))
if err = t.wait(k, f.args...); err != nil {
log.Fatalf("err: %v", err)
}
log.Print("Done")
}
func wait(cmd string, args ...string) error {
sigChannel := make(chan os.Signal, 1)
signal.Notify(sigChannel, os.Interrupt)
c := exec.Command(cmd, args...)
c.Stdout = os.Stdout
c.Stderr = os.Stderr
if err := c.Start(); err != nil {
return err
}
go func() {
sig := <-sigChannel
if err := c.Process.Signal(sig); err != nil {
log.Fatalf("could not send %s signal %s: %v", cmd, sig, err)
}
}()
return c.Wait()
}
// Struct that allows unit tests to override functionality.
type tester struct {
// os.Stat
stat func(string) (os.FileInfo, error)
// exec.LookPath
lookPath func(string) (string, error)
// build.Default.GOPATH
goPath string
wait func(string, ...string) error
}
func newTester() tester {
return tester{os.Stat, exec.LookPath, build.Default.GOPATH, wait}
}
// Try to find kubetest, either GOPATH/bin/kubetest or PATH
func (t tester) lookKubetest() (string, error) {
// Check for kubetest in GOPATH/bin
if t.goPath != "" {
p := filepath.Join(t.goPath, "bin", "kubetest")
_, err := t.stat(p)
if err == nil {
return p, nil
}
}
// Check for kubetest in PATH
p, err := t.lookPath("kubetest")
return p, err
}
// Upgrade if kubetest does not exist or has not been updated today
func (t tester) getKubetest(get bool, old time.Duration) (string, error) {
// Find kubetest installation
p, err := t.lookKubetest()
if err == nil && !get {
return p, nil // Installed, Skip update
}
if err == nil {
// Installed recently?
if s, err := t.stat(p); err != nil {
return p, err // Cannot stat
} else if time.Since(s.ModTime()) <= old {
return p, nil // Recently updated
} else if t.goPath == "" {
log.Print("Skipping kubetest upgrade because $GOPATH is empty")
return p, nil
}
log.Printf("The kubetest binary is older than %s.", old)
}
if t.goPath == "" {
return "", fmt.Errorf("Cannot install kubetest until $GOPATH is set")
}
log.Print("Updating kubetest binary...")
cmd := []string{"go", "get", "-u", "k8s.io/test-infra/kubetest"}
if err = t.wait(cmd[0], cmd[1:]...); err != nil {
return "", fmt.Errorf("%s: %v", strings.Join(cmd, " "), err) // Could not upgrade
}
if p, err = t.lookKubetest(); err != nil {
return "", err // Cannot find kubetest
} else if err = t.wait("touch", p); err != nil {
return "", err // Could not touch
} else {
return p, nil // Updated modtime
}
}

373
vendor/k8s.io/kubernetes/hack/e2e_test.go generated vendored Normal file
View File

@@ -0,0 +1,373 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Unit tests for hack/e2e.go shim
package main
import (
"errors"
"flag"
"fmt"
"os"
"path/filepath"
"reflect"
"testing"
"time"
)
type FileInfo struct {
when time.Time
}
func (f FileInfo) Name() string {
return "fake-file"
}
func (f FileInfo) Size() int64 {
return 0
}
func (f FileInfo) Mode() os.FileMode {
return 0
}
func (f FileInfo) ModTime() time.Time {
return f.when
}
func (f FileInfo) IsDir() bool {
return false
}
func (f FileInfo) Sys() interface{} {
return f
}
func TestParse(t *testing.T) {
cases := []struct {
args []string
expected flags
err error
}{
{
[]string{"foo", "-v=false"},
flags{getDefault, oldDefault, []string{"--verbose-commands=false"}},
nil,
},
{
[]string{"foo", "-v"},
flags{getDefault, oldDefault, []string{"--verbose-commands=true"}},
nil,
},
{
[]string{"hello", "world"},
flags{getDefault, oldDefault, []string{"--verbose-commands=true", "world"}},
nil,
},
{
[]string{"hello", "--", "--venus", "--karaoke"},
flags{getDefault, oldDefault, []string{"--verbose-commands=true", "--venus", "--karaoke"}},
nil,
},
{
[]string{"hello", "--alpha", "--beta"},
flags{getDefault, oldDefault, []string{"--alpha", "--beta"}},
nil,
},
{
[]string{"so", "--get", "--boo"},
flags{true, oldDefault, []string{"--boo"}},
nil,
},
{
[]string{"omg", "--get=false", "--", "ugh"},
flags{false, oldDefault, []string{"--verbose-commands=true", "ugh"}},
nil,
},
{
[]string{"wee", "--old=5m", "--get"},
flags{true, 5 * time.Minute, []string{"--verbose-commands=true"}},
nil,
},
{
[]string{"fun", "--times", "--old=666s"},
flags{getDefault, oldDefault, []string{"--times", "--old=666s"}},
nil,
},
{
[]string{"wut", "-h"},
flags{},
flag.ErrHelp,
},
{
[]string{"wut", "--", "-h"},
flags{getDefault, oldDefault, []string{"--verbose-commands=true", "-h"}},
nil,
},
}
for i, c := range cases {
a, err := parse(c.args)
if err != c.err {
t.Errorf("%d: a=%v != e%v", i, err, c.err)
}
e := c.expected
if a.get != e.get {
t.Errorf("%d: a=%v != e=%v", i, a.get, e.get)
}
if a.old != e.old {
t.Errorf("%d: a=%v != e=%v", i, a.old, e.old)
}
if !reflect.DeepEqual(a.args, e.args) {
t.Errorf("%d: a=%v != e=%v", i, a.args, e.args)
}
}
}
func TestLook(t *testing.T) {
lpf := errors.New("LookPath failed")
sf := errors.New("Stat failed")
lpnc := errors.New("LookPath should not be called")
snc := errors.New("Stat should not be called")
cases := []struct {
stat error
lookPath error
goPath string
expected error
}{
{ // GOPATH set, stat succeeds returns gopath
stat: nil,
lookPath: lpnc,
goPath: "fake-gopath/",
expected: nil,
},
{ // GOPATH set, stat fails, terms on lookpath
stat: sf,
lookPath: lpf,
goPath: "fake-gopath/",
expected: lpf,
},
{ // GOPATH unset, stat not called, terms on lookpath
stat: snc,
lookPath: lpf,
goPath: "",
expected: lpf,
},
{ // GOPATH unset, stat not called, lookpath matches
stat: snc,
lookPath: nil,
goPath: "",
expected: nil,
},
}
for _, c := range cases {
l := tester{
func(string) (os.FileInfo, error) {
return FileInfo{}, c.stat
},
func(string) (string, error) {
if c.lookPath != nil {
return "FAILED", c.lookPath
}
return "$PATH-FOUND", nil
},
c.goPath,
nil, // wait
}
if _, err := l.lookKubetest(); err != c.expected {
t.Errorf("err: %s != %s", err, c.expected)
}
}
}
func TestGetKubetest(t *testing.T) {
gp := "fake-gopath"
gpk := filepath.Join(gp, "bin", "kubetest")
p := "PATH"
pk := filepath.Join(p, "kubetest")
eu := errors.New("upgrade failed")
euVerbose := fmt.Errorf("go get -u k8s.io/test-infra/kubetest: %v", eu)
et := errors.New("touch failed")
cases := []struct {
name string
get bool
old time.Duration
stat string // stat succeeds on this file
path bool // file exists on path
age time.Duration // age of mod time on file
upgraded bool // go get -u succeeds
touched bool // touch succeeds
goPath string // GOPATH var
returnPath string
returnError error
}{
{name: "0: Pass when on GOPATH/bin",
get: false,
old: 0,
stat: gpk,
path: false,
age: 100,
upgraded: false,
touched: false,
goPath: gp,
returnPath: gpk,
returnError: nil,
},
{name: "1: Pass when on PATH",
get: false,
old: 0,
stat: pk,
path: true,
age: 100,
upgraded: false,
touched: false,
goPath: gp,
returnPath: pk,
returnError: nil,
},
{name: "2: Don't upgrade if on PATH and GOPATH is ''",
get: true,
old: 0,
stat: pk,
path: true,
age: 100,
upgraded: false,
touched: false,
goPath: "",
returnPath: pk,
returnError: nil,
},
{name: "3: Don't upgrade on PATH when young.",
get: true,
old: time.Hour,
stat: pk,
path: true,
age: time.Second,
upgraded: false,
touched: false,
goPath: gp,
returnPath: pk,
returnError: nil,
},
{name: "4: Upgrade if old but GOPATH is set.",
get: true,
old: 0,
stat: pk,
path: true,
age: time.Second,
upgraded: true,
touched: true,
goPath: gp,
returnPath: pk,
returnError: nil,
},
{name: "5: Fail if upgrade fails",
get: true,
old: 0,
stat: pk,
path: true,
age: time.Second,
upgraded: false,
touched: false,
goPath: gpk,
returnPath: "",
returnError: euVerbose,
},
{name: "6: Fail if touch fails",
get: true,
old: 0,
stat: pk,
path: true,
age: time.Second,
upgraded: true,
touched: false,
goPath: gpk,
returnPath: "",
returnError: et,
},
}
for i, c := range cases {
didUp := false
didTouch := false
l := tester{
stat: func(p string) (os.FileInfo, error) {
// stat
if p != c.stat {
return nil, fmt.Errorf("Failed to find %s", p)
}
return FileInfo{time.Now().Add(c.age * -1)}, nil
},
lookPath: func(name string) (string, error) {
if c.path {
return filepath.Join(p, name), nil
}
return "", fmt.Errorf("Not on path: %s", name)
},
goPath: c.goPath,
wait: func(cmd string, args ...string) error {
if cmd == "go" {
if c.upgraded {
didUp = true
return nil
}
return eu
}
if c.touched {
didTouch = true
return nil
}
return et
},
}
p, e := l.getKubetest(c.get, c.old)
if p != c.returnPath {
t.Errorf("%d: test=%q returnPath %q != %q", i, c.name, p, c.returnPath)
}
if e == nil || c.returnError == nil {
if e != c.returnError {
t.Errorf("%d: test=%q returnError %q != %q", i, c.name, e, c.returnError)
}
} else {
if e.Error() != c.returnError.Error() {
t.Errorf("%d: test=%q returnError %q != %q", i, c.name, e, c.returnError)
}
}
if didUp != c.upgraded {
t.Errorf("%d: test=%q bad upgrade state of %v", i, c.name, didUp)
}
if didTouch != c.touched {
t.Errorf("%d: test=%q bad touch state of %v", i, c.name, didTouch)
}
}
}

View File

@@ -0,0 +1,45 @@
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
FROM java:7-jre
RUN apt-get update && apt-get install -y \
asciidoctor \
unzip \
--no-install-recommends \
&& rm -rf /var/lib/apt/lists/*
# Install gradle
RUN wget -O /tmp/gradle.zip https://services.gradle.org/distributions/gradle-2.5-bin.zip \
&& mkdir -p build/ \
&& unzip /tmp/gradle.zip -d build/ \
&& rm /tmp/gradle.zip \
&& mkdir -p gradle-cache/
ENV GRADLE_USER_HOME=/gradle-cache
COPY build.gradle build/
COPY gen-swagger-docs.sh build/
# Run the script once to download the dependent java libraries into the image
RUN mkdir -p /output /swagger-source \
&& wget https://raw.githubusercontent.com/kubernetes/kubernetes/master/api/swagger-spec/v1.json -O /swagger-source/v1.json \
&& wget https://raw.githubusercontent.com/kubernetes/kubernetes/master/pkg/api/v1/register.go -O /register.go \
&& build/gen-swagger-docs.sh v1 \
&& rm -rf /output/* /swagger-source/* /register.go
RUN chmod -R 777 build/ \
&& chmod -R 777 gradle-cache/
ENTRYPOINT ["build/gen-swagger-docs.sh"]

View File

@@ -0,0 +1,18 @@
This folder contains the sources needed to build the gen-swagger-doc container.
To build the container image,
```
$ sudo docker build -t k8s.gcr.io/gen-swagger-docs:v1 .
```
To generate the html docs,
```
$ ./gen-swagger-docs.sh <API version> <absolute output path, default to PWD>
```
The generated definitions.html and operations.html will be stored in output paths.
[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/hack/gen-swagger-doc/README.md?pixel)]()

View File

@@ -0,0 +1,18 @@
buildscript {
repositories {
mavenLocal()
jcenter()
}
dependencies {
classpath 'io.github.robwin:swagger2markup:0.6.0'
}
}
task gendocs << {
io.github.robwin.swagger2markup.Swagger2MarkupConverter
.from("./input.json")
.build()
.intoFolder("./");
println '*** generating docs to ./'
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,69 @@
#!/usr/bin/env bash
# Copyright 2014 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Script to generate docs from the latest swagger spec.
set -o errexit
set -o nounset
set -o pipefail
cd /build
# gendocs takes "input.json" as the input swagger spec.
# $1 is expected to be <group>_<version>
cp /swagger-source/"$1".json input.json
./gradle-2.5/bin/gradle gendocs --info
#insert a TOC for top level API objects
buf="== Top Level API Objects\n\n"
top_level_models=$(grep '&[A-Za-z]*{},' /register.go | sed 's/.*&//;s/{},//')
# check if the top level models exist in the definitions.adoc. If they exist,
# their name will be <version>.<model_name>
VERSION="${1#*_}"
for m in $top_level_models
do
if grep -xq "=== ${VERSION}.$m" ./definitions.adoc
then
buf+="* <<${VERSION}.$m>>\n"
fi
done
sed -i "1i $buf" ./definitions.adoc
# fix the links in .adoc, replace <<x.y>> with link:definitions.html#_x_y[x.y], and lowercase the _x_y part
sed -i -e 's|<<\(.*\)\.\(.*\)>>|link:#_\L\1_\2\E[\1.\2]|g' ./definitions.adoc
sed -i -e 's|<<\(.*\)\.\(.*\)>>|link:../definitions#_\L\1_\2\E[\1.\2]|g' ./paths.adoc
# fix the link to <<any>>
sed -i -e 's|<<any>>|link:#_any[any]|g' ./definitions.adoc
sed -i -e 's|<<any>>|link:../definitions#_any[any]|g' ./paths.adoc
# change the title of paths.adoc from "paths" to "operations"
sed -i 's|== Paths|== Operations|g' ./paths.adoc
# $$ has special meaning in asciidoc, we need to escape it
sed -i 's|\$\$|+++$$+++|g' ./definitions.adoc
echo -e "=== any\nRepresents an untyped JSON map - see the description of the field for more info about the structure of this object." >> ./definitions.adoc
asciidoctor definitions.adoc
asciidoctor paths.adoc
cp definitions.html /output/
cp paths.html /output/operations.html
echo "SUCCESS"

88
vendor/k8s.io/kubernetes/hack/generate-bindata.sh generated vendored Executable file
View File

@@ -0,0 +1,88 @@
#!/usr/bin/env bash
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -o errexit
set -o pipefail
set -o nounset
export KUBE_ROOT=$(dirname "${BASH_SOURCE}")/..
source "${KUBE_ROOT}/hack/lib/init.sh"
source "${KUBE_ROOT}/hack/lib/logging.sh"
if [[ ! -d "${KUBE_ROOT}/pkg" ]]; then
echo "${KUBE_ROOT}/pkg not detected. This script should be run from a location where the source dirs are available."
exit 1
fi
# kube::golang::build_kube_toolchain installs the vendored go-bindata in
# $GOPATH/bin, so make sure that's explicitly part of our $PATH.
export PATH="${KUBE_OUTPUT_BINPATH}:${PATH}"
if ! which go-bindata &>/dev/null ; then
echo "Cannot find go-bindata."
exit 5
fi
# run the generation from the root directory for stable output
pushd "${KUBE_ROOT}" >/dev/null
# These are files for e2e tests.
BINDATA_OUTPUT="test/e2e/generated/bindata.go"
# IMPORTANT: if you make any changes to these arguments, you must also update
# test/e2e/generated/BUILD and/or build/bindata.bzl.
go-bindata -nometadata -o "${BINDATA_OUTPUT}.tmp" -pkg generated \
-ignore .jpg -ignore .png -ignore .md -ignore 'BUILD(\.bazel)?' \
"test/e2e/testing-manifests/..." \
"test/images/..." \
"test/fixtures/..."
gofmt -s -w "${BINDATA_OUTPUT}.tmp"
# Here we compare and overwrite only if different to avoid updating the
# timestamp and triggering a rebuild. The 'cat' redirect trick to preserve file
# permissions of the target file.
if ! cmp -s "${BINDATA_OUTPUT}.tmp" "${BINDATA_OUTPUT}" ; then
cat "${BINDATA_OUTPUT}.tmp" > "${BINDATA_OUTPUT}"
V=2 kube::log::info "Generated bindata file : ${BINDATA_OUTPUT} has $(wc -l ${BINDATA_OUTPUT}) lines of lovely automated artifacts"
else
V=2 kube::log::info "No changes in generated bindata file: ${BINDATA_OUTPUT}"
fi
rm -f "${BINDATA_OUTPUT}.tmp"
# These are files for runtime code
BINDATA_OUTPUT="pkg/generated/bindata.go"
# IMPORTANT: if you make any changes to these arguments, you must also update
# pkg/generated/BUILD and/or build/bindata.bzl.
go-bindata -nometadata -nocompress -o "${BINDATA_OUTPUT}.tmp" -pkg generated \
-ignore .jpg -ignore .png -ignore .md -ignore 'BUILD(\.bazel)?' \
"translations/..."
gofmt -s -w "${BINDATA_OUTPUT}.tmp"
# Here we compare and overwrite only if different to avoid updating the
# timestamp and triggering a rebuild. The 'cat' redirect trick to preserve file
# permissions of the target file.
if ! cmp -s "${BINDATA_OUTPUT}.tmp" "${BINDATA_OUTPUT}" ; then
cat "${BINDATA_OUTPUT}.tmp" > "${BINDATA_OUTPUT}"
V=2 kube::log::info "Generated bindata file : ${BINDATA_OUTPUT} has $(wc -l ${BINDATA_OUTPUT}) lines of lovely automated artifacts"
else
V=2 kube::log::info "No changes in generated bindata file: ${BINDATA_OUTPUT}"
fi
rm -f "${BINDATA_OUTPUT}.tmp"
popd >/dev/null

49
vendor/k8s.io/kubernetes/hack/generate-docs.sh generated vendored Executable file
View File

@@ -0,0 +1,49 @@
#!/usr/bin/env bash
# Copyright 2014 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file is not intended to be run automatically. It is meant to be run
# immediately before exporting docs. We do not want to check these documents in
# by default.
set -o errexit
set -o nounset
set -o pipefail
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/..
source "${KUBE_ROOT}/hack/lib/init.sh"
kube::golang::setup_env
BINS=(
cmd/gendocs
cmd/genkubedocs
cmd/genman
cmd/genyaml
)
make -C "${KUBE_ROOT}" WHAT="${BINS[*]}"
kube::util::ensure-temp-dir
kube::util::gen-docs "${KUBE_TEMP}"
# remove all of the old docs
kube::util::remove-gen-docs
# copy fresh docs into the repo.
# the shopt is so that we get docs/.generated_docs from the glob.
shopt -s dotglob
cp -af "${KUBE_TEMP}"/* "${KUBE_ROOT}"
shopt -u dotglob

83
vendor/k8s.io/kubernetes/hack/get-build.sh generated vendored Executable file
View File

@@ -0,0 +1,83 @@
#!/usr/bin/env bash
# Copyright 2015 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -o errexit
set -o nounset
set -o pipefail
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/..
source "${KUBE_ROOT}/cluster/common.sh"
declare -r KUBE_RELEASE_BUCKET_URL="https://storage.googleapis.com/kubernetes-release"
declare -r KUBE_DEV_RELEASE_BUCKET_URL="https://storage.googleapis.com/kubernetes-release-dev"
declare -r KUBE_TAR_NAME="kubernetes.tar.gz"
usage() {
echo "${0} [-v] <version number or publication>"
echo " -v: Don't get tars, just print the version number"
echo ""
echo ' Version number or publication is either a proper version number'
echo ' (e.g. "v1.0.6", "v1.2.0-alpha.1.881+376438b69c7612") or a version'
echo ' publication of the form <bucket>/<version> (e.g. "release/stable",'
echo ' "ci/latest-1"). Some common ones are:'
echo ' - "release/stable"'
echo ' - "release/latest"'
echo ' - "ci/latest"'
echo ' See the docs on getting builds for more information about version'
echo ' publication.'
}
print_version=false
while getopts ":vh" opt; do
case ${opt} in
v)
print_version="true"
;;
h)
usage
exit 0
;;
\?)
echo "Invalid option: -$OPTARG" >&2
usage
exit 1
;;
esac
done
shift $((OPTIND-1))
if [[ $# -ne 1 ]]; then
usage
exit 1
fi
set_binary_version "${1}"
if [[ "${print_version}" == "true" ]]; then
echo "${KUBE_VERSION}"
else
echo "Using version at ${1}: ${KUBE_VERSION}" >&2
if [[ ${KUBE_VERSION} =~ ${KUBE_RELEASE_VERSION_REGEX} ]]; then
curl --fail -o "kubernetes-${KUBE_VERSION}.tar.gz" "${KUBE_RELEASE_BUCKET_URL}/release/${KUBE_VERSION}/${KUBE_TAR_NAME}"
elif [[ ${KUBE_VERSION} =~ ${KUBE_CI_VERSION_REGEX} ]]; then
curl --fail -o "kubernetes-${KUBE_VERSION}.tar.gz" "${KUBE_DEV_RELEASE_BUCKET_URL}/ci/${KUBE_VERSION}/${KUBE_TAR_NAME}"
else
echo "Version doesn't match regexp" >&2
exit 1
fi
fi

170
vendor/k8s.io/kubernetes/hack/ginkgo-e2e.sh generated vendored Executable file
View File

@@ -0,0 +1,170 @@
#!/usr/bin/env bash
# Copyright 2014 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -o errexit
set -o nounset
set -o pipefail
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/..
source "${KUBE_ROOT}/cluster/common.sh"
source "${KUBE_ROOT}/hack/lib/init.sh"
# Find the ginkgo binary build as part of the release.
ginkgo=$(kube::util::find-binary "ginkgo")
e2e_test=$(kube::util::find-binary "e2e.test")
# --- Setup some env vars.
GINKGO_PARALLEL=${GINKGO_PARALLEL:-n} # set to 'y' to run tests in parallel
CLOUD_CONFIG=${CLOUD_CONFIG:-""}
# If 'y', Ginkgo's reporter will not print out in color when tests are run
# in parallel
GINKGO_NO_COLOR=${GINKGO_NO_COLOR:-n}
# If 'y', will rerun failed tests once to give them a second chance.
GINKGO_TOLERATE_FLAKES=${GINKGO_TOLERATE_FLAKES:-n}
: ${KUBECTL:="${KUBE_ROOT}/cluster/kubectl.sh"}
: ${KUBE_CONFIG_FILE:="config-test.sh"}
export KUBECTL KUBE_CONFIG_FILE
source "${KUBE_ROOT}/cluster/kube-util.sh"
function detect-master-from-kubeconfig() {
export KUBECONFIG=${KUBECONFIG:-$DEFAULT_KUBECONFIG}
local cc=$("${KUBE_ROOT}/cluster/kubectl.sh" config view -o jsonpath="{.current-context}")
if [[ ! -z "${KUBE_CONTEXT:-}" ]]; then
cc="${KUBE_CONTEXT}"
fi
local cluster=$("${KUBE_ROOT}/cluster/kubectl.sh" config view -o jsonpath="{.contexts[?(@.name == \"${cc}\")].context.cluster}")
KUBE_MASTER_URL=$("${KUBE_ROOT}/cluster/kubectl.sh" config view -o jsonpath="{.clusters[?(@.name == \"${cluster}\")].cluster.server}")
}
# ---- Do cloud-provider-specific setup
if [[ -n "${KUBERNETES_CONFORMANCE_TEST:-}" ]]; then
echo "Conformance test: not doing test setup."
KUBERNETES_PROVIDER=${KUBERNETES_CONFORMANCE_PROVIDER:-"skeleton"}
detect-master-from-kubeconfig
auth_config=(
"--kubeconfig=${KUBECONFIG}"
)
else
echo "Setting up for KUBERNETES_PROVIDER=\"${KUBERNETES_PROVIDER}\"."
prepare-e2e
detect-master >/dev/null
KUBE_MASTER_URL="${KUBE_MASTER_URL:-https://${KUBE_MASTER_IP:-}}"
auth_config=(
"--kubeconfig=${KUBECONFIG:-$DEFAULT_KUBECONFIG}"
)
fi
if [[ -n "${NODE_INSTANCE_PREFIX:-}" ]]; then
NODE_INSTANCE_GROUP="${NODE_INSTANCE_PREFIX}-group"
fi
if [[ "${KUBERNETES_PROVIDER}" == "gce" ]]; then
set_num_migs
NODE_INSTANCE_GROUP=""
for ((i=1; i<=${NUM_MIGS}; i++)); do
if [[ $i == ${NUM_MIGS} ]]; then
# We are assigning the same mig names as create-nodes function from cluster/gce/util.sh.
NODE_INSTANCE_GROUP="${NODE_INSTANCE_GROUP}${NODE_INSTANCE_PREFIX}-group"
else
NODE_INSTANCE_GROUP="${NODE_INSTANCE_GROUP}${NODE_INSTANCE_PREFIX}-group-${i},"
fi
done
fi
# TODO(kubernetes/test-infra#3330): Allow NODE_INSTANCE_GROUP to be
# set before we get here, which eliminates any cluster/gke use if
# KUBERNETES_CONFORMANCE_PROVIDER is set to "gke".
if [[ -z "${NODE_INSTANCE_GROUP:-}" ]] && [[ "${KUBERNETES_PROVIDER}" == "gke" ]]; then
detect-node-instance-groups
NODE_INSTANCE_GROUP=$(kube::util::join , "${NODE_INSTANCE_GROUPS[@]}")
fi
if [[ "${KUBERNETES_PROVIDER}" == "azure" ]]; then
if [[ ${CLOUD_CONFIG} == "" ]]; then
echo "Missing azure cloud config"
exit 1
fi
fi
ginkgo_args=()
if [[ -n "${CONFORMANCE_TEST_SKIP_REGEX:-}" ]]; then
ginkgo_args+=("--skip=${CONFORMANCE_TEST_SKIP_REGEX}")
ginkgo_args+=("--seed=1436380640")
fi
if [[ -n "${GINKGO_PARALLEL_NODES:-}" ]]; then
ginkgo_args+=("--nodes=${GINKGO_PARALLEL_NODES}")
elif [[ ${GINKGO_PARALLEL} =~ ^[yY]$ ]]; then
ginkgo_args+=("--nodes=25")
fi
if [[ "${GINKGO_UNTIL_IT_FAILS:-}" == true ]]; then
ginkgo_args+=("--untilItFails=true")
fi
FLAKE_ATTEMPTS=1
if [[ "${GINKGO_TOLERATE_FLAKES}" == "y" ]]; then
FLAKE_ATTEMPTS=2
fi
if [[ "${GINKGO_NO_COLOR}" == "y" ]]; then
ginkgo_args+=("--noColor")
fi
# The --host setting is used only when providing --auth_config
# If --kubeconfig is used, the host to use is retrieved from the .kubeconfig
# file and the one provided with --host is ignored.
# Add path for things like running kubectl binary.
export PATH=$(dirname "${e2e_test}"):"${PATH}"
"${ginkgo}" "${ginkgo_args[@]:+${ginkgo_args[@]}}" "${e2e_test}" -- \
"${auth_config[@]:+${auth_config[@]}}" \
--ginkgo.flakeAttempts="${FLAKE_ATTEMPTS}" \
--host="${KUBE_MASTER_URL}" \
--provider="${KUBERNETES_PROVIDER}" \
--gce-project="${PROJECT:-}" \
--gce-zone="${ZONE:-}" \
--gce-region="${REGION:-}" \
--gce-multizone="${MULTIZONE:-false}" \
--gke-cluster="${CLUSTER_NAME:-}" \
--kube-master="${KUBE_MASTER:-}" \
--cluster-tag="${CLUSTER_ID:-}" \
--cloud-config-file="${CLOUD_CONFIG:-}" \
--repo-root="${KUBE_ROOT}" \
--node-instance-group="${NODE_INSTANCE_GROUP:-}" \
--prefix="${KUBE_GCE_INSTANCE_PREFIX:-e2e}" \
--network="${KUBE_GCE_NETWORK:-${KUBE_GKE_NETWORK:-e2e}}" \
--node-tag="${NODE_TAG:-}" \
--master-tag="${MASTER_TAG:-}" \
--cluster-monitoring-mode="${KUBE_ENABLE_CLUSTER_MONITORING:-standalone}" \
--prometheus-monitoring="${KUBE_ENABLE_PROMETHEUS_MONITORING:-false}" \
${KUBE_CONTAINER_RUNTIME:+"--container-runtime=${KUBE_CONTAINER_RUNTIME}"} \
${MASTER_OS_DISTRIBUTION:+"--master-os-distro=${MASTER_OS_DISTRIBUTION}"} \
${NODE_OS_DISTRIBUTION:+"--node-os-distro=${NODE_OS_DISTRIBUTION}"} \
${NUM_NODES:+"--num-nodes=${NUM_NODES}"} \
${E2E_REPORT_DIR:+"--report-dir=${E2E_REPORT_DIR}"} \
${E2E_REPORT_PREFIX:+"--report-prefix=${E2E_REPORT_PREFIX}"} \
"${@:-}"

35
vendor/k8s.io/kubernetes/hack/godep-restore.sh generated vendored Executable file
View File

@@ -0,0 +1,35 @@
#!/usr/bin/env bash
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -o errexit
set -o nounset
set -o pipefail
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/..
source "${KUBE_ROOT}/hack/lib/init.sh"
kube::log::status "Restoring kubernetes godeps"
if kube::util::godep_restored >/dev/null 2>&1; then
kube::log::status "Dependencies appear to be current - skipping download"
exit 0
fi
kube::util::ensure_godep_version
kube::log::status "Downloading dependencies - this might take a while"
GOPATH="${GOPATH}:${KUBE_ROOT}/staging" godep restore "$@"
kube::log::status "Done"

105
vendor/k8s.io/kubernetes/hack/godep-save.sh generated vendored Executable file
View File

@@ -0,0 +1,105 @@
#!/usr/bin/env bash
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -o errexit
set -o nounset
set -o pipefail
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/..
source "${KUBE_ROOT}/hack/lib/init.sh"
kube::log::status "Ensuring prereqs"
kube::util::ensure_single_dir_gopath
kube::util::ensure_no_staging_repos_in_gopath
kube::util::ensure_godep_version
BACKUP=_tmp/godep-save.$RANDOM
mkdir -p "${BACKUP}"
function kube::godep_save::cleanup() {
if [[ -d "${BACKUP}/vendor" ]]; then
kube::log::error "${BACKUP}/vendor exists, restoring it"
rm -rf vendor
mv "${BACKUP}/vendor" vendor
fi
if [[ -d "${BACKUP}/Godeps" ]]; then
kube::log::error "${BACKUP}/Godeps exists, restoring it"
rm -rf Godeps
mv "${BACKUP}/Godeps" Godeps
fi
}
kube::util::trap_add kube::godep_save::cleanup EXIT
# Clear old state, but save it in case of error
if [[ -d vendor ]]; then
mv vendor "${BACKUP}/vendor"
fi
if [[ -d Godeps ]]; then
mv Godeps "${BACKUP}/Godeps"
fi
# Some things we want in godeps aren't code dependencies, so ./...
# won't pick them up.
REQUIRED_BINS=(
"github.com/onsi/ginkgo/ginkgo"
"github.com/jteeuwen/go-bindata/go-bindata"
"github.com/tools/godep"
"github.com/client9/misspell/cmd/misspell"
"github.com/bazelbuild/bazel-gazelle/cmd/gazelle"
"github.com/kubernetes/repo-infra/kazel"
"./..."
)
kube::log::status "Running godep save - this might take a while"
# This uses $(pwd) rather than ${KUBE_ROOT} because KUBE_ROOT will be
# realpath'ed, and godep barfs ("... is not using a known version control
# system") on our staging dirs.
GOPATH="${GOPATH}:$(pwd)/staging" godep save "${REQUIRED_BINS[@]}"
# create a symlink in vendor directory pointing to the staging client. This
# let other packages use the staging client as if it were vendored.
for repo in $(ls staging/src/k8s.io); do
if [ ! -e "vendor/k8s.io/${repo}" ]; then
ln -s "../../staging/src/k8s.io/${repo}" "vendor/k8s.io/${repo}"
fi
done
# Workaround broken symlink in docker repo because godep copies the link, but
# not the target
rm -rf vendor/github.com/docker/docker/project/
kube::log::status "Updating BUILD files"
# Assume that anything imported through godep doesn't need Bazel to build.
# Prune out any Bazel build files, since these can break the build due to
# missing dependencies that aren't included by godep.
find vendor/ -type f \( -name BUILD -o -name BUILD.bazel -o -name WORKSPACE \) \
-exec rm -f {} \;
hack/update-bazel.sh >/dev/null
kube::log::status "Updating LICENSES file"
hack/update-godep-licenses.sh >/dev/null
kube::log::status "Creating OWNERS file"
rm -f "Godeps/OWNERS" "vendor/OWNERS"
cat <<__EOF__ > "Godeps/OWNERS"
approvers:
- dep-approvers
__EOF__
cp "Godeps/OWNERS" "vendor/OWNERS"
# Clean up
rm -rf "${BACKUP}"

300
vendor/k8s.io/kubernetes/hack/grab-profiles.sh generated vendored Executable file
View File

@@ -0,0 +1,300 @@
#!/usr/bin/env bash
# Copyright 2015 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -o errexit
set -o nounset
set -o pipefail
function grab_profiles_from_component {
local requested_profiles=$1
local mem_pprof_flags=$2
local binary=$3
local tunnel_port=$4
local path=$5
local output_prefix=$6
local timestamp=$7
echo "binary: $binary"
for profile in ${requested_profiles}; do
case ${profile} in
cpu)
go tool pprof "-pdf" "${binary}" "http://localhost:${tunnel_port}${path}/debug/pprof/profile" > "${output_prefix}-${profile}-profile-${timestamp}.pdf"
;;
mem)
# There are different kinds of memory profiles that are available that
# had to be grabbed separately: --inuse-space, --inuse-objects,
# --alloc-space, --alloc-objects. We need to iterate over all requested
# kinds.
for flag in ${mem_pprof_flags}; do
go tool pprof "-${flag}" "-pdf" "${binary}" "http://localhost:${tunnel_port}${path}/debug/pprof/heap" > "${output_prefix}-${profile}-${flag}-profile-${timestamp}.pdf"
done
;;
esac
done
}
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/..
source "${KUBE_ROOT}/hack/lib/init.sh"
server_addr=""
kubelet_addresses=""
kubelet_binary=""
master_binary=""
scheduler_binary=""
scheduler_port="10251"
controller_manager_port="10252"
controller_manager_binary=""
requested_profiles=""
mem_pprof_flags=""
profile_components=""
output_dir="."
tunnel_port="${tunnel_port:-1234}"
args=$(getopt -o s:mho:k:c -l server:,master,heapster,output:,kubelet:,scheduler,controller-manager,help,inuse-space,inuse-objects,alloc-space,alloc-objects,cpu,kubelet-binary:,master-binary:,scheduler-binary:,controller-manager-binary:,scheduler-port:,controller-manager-port: -- "$@")
if [[ $? -ne 0 ]]; then
>&2 echo "Error in getopt"
exit 1
fi
HEAPSTER_VERSION="v0.18.2"
MASTER_PPROF_PATH=""
HEAPSTER_PPROF_PATH="/api/v1/namespaces/kube-system/services/monitoring-heapster/proxy"
KUBELET_PPROF_PATH_PREFIX="/api/v1/proxy/nodes"
SCHEDULER_PPROF_PATH_PREFIX="/api/v1/namespaces/kube-system/pods/kube-scheduler/proxy"
CONTROLLER_MANAGER_PPROF_PATH_PREFIX="/api/v1/namespaces/kube-system/pods/kube-controller-manager/proxy"
eval set -- "${args}"
while true; do
case $1 in
-s|--server)
shift
if [ -z "$1" ]; then
>&2 echo "empty argument to --server flag"
exit 1
fi
server_addr=$1
shift
;;
-m|--master)
shift
profile_components="master ${profile_components}"
;;
--master-binary)
shift
if [ -z "$1" ]; then
>&2 echo "empty argument to --master-binary flag"
exit 1
fi
master_binary=$1
shift
;;
-h|--heapster)
shift
profile_components="heapster ${profile_components}"
;;
-k|--kubelet)
shift
profile_components="kubelet ${profile_components}"
if [ -z "$1" ]; then
>&2 echo "empty argument to --kubelet flag"
exit 1
fi
kubelet_addresses="$1 $kubelet_addresses"
shift
;;
--kubelet-binary)
shift
if [ -z "$1" ]; then
>&2 echo "empty argument to --kubelet-binary flag"
exit 1
fi
kubelet_binary=$1
shift
;;
--scheduler)
shift
profile_components="scheduler ${profile_components}"
;;
--scheduler-binary)
shift
if [ -z "$1" ]; then
>&2 echo "empty argument to --scheduler-binary flag"
exit 1
fi
scheduler_binary=$1
shift
;;
--scheduler-port)
shift
if [ -z "$1" ]; then
>&2 echo "empty argument to --scheduler-port flag"
exit 1
fi
scheduler_port=$1
shift
;;
-c|--controller-manager)
shift
profile_components="controller-manager ${profile_components}"
;;
--controller-manager-binary)
shift
if [ -z "$1" ]; then
>&2 echo "empty argument to --controller-manager-binary flag"
exit 1
fi
controller_manager_binary=$1
shift
;;
--controller-manager-port)
shift
if [ -z "$1" ]; then
>&2 echo "empty argument to --controller-manager-port flag"
exit 1
fi
controller_manager_port=$1
shift
;;
-o|--output)
shift
if [ -z "$1" ]; then
>&2 echo "empty argument to --output flag"
exit 1
fi
output_dir=$1
shift
;;
--inuse-space)
shift
requested_profiles="mem ${requested_profiles}"
mem_pprof_flags="inuse_space ${mem_pprof_flags}"
;;
--inuse-objects)
shift
requested_profiles="mem ${requested_profiles}"
mem_pprof_flags="inuse_objects ${mem_pprof_flags}"
;;
--alloc-space)
shift
requested_profiles="mem ${requested_profiles}"
mem_pprof_flags="alloc_space ${mem_pprof_flags}"
;;
--alloc-objects)
shift
requested_profiles="mem ${requested_profiles}"
mem_pprof_flags="alloc_objects ${mem_pprof_flags}"
;;
--cpu)
shift
requested_profiles="cpu ${requested_profiles}"
;;
--help)
shift
echo "Recognized options:
-o/--output,
-s/--server,
-m/--master,
-h/--heapster,
--inuse-space,
--inuse-objects,
--alloc-space,
--alloc-objects,
--cpu,
--help"
exit 0
;;
--)
shift
break;
;;
esac
done
if [[ -z "${server_addr}" ]]; then
>&2 echo "Server flag is required"
exit 1
fi
if [[ -z "${profile_components}" ]]; then
>&2 echo "Choose at least one component to profile"
exit 1
fi
if [[ -z "${requested_profiles}" ]]; then
>&2 echo "Choose at least one profiling option"
exit 1
fi
gcloud compute ssh "${server_addr}" --ssh-flag=-nN --ssh-flag=-L"${tunnel_port}":localhost:8080 &
echo "Waiting for tunnel to be created..."
kube::util::wait_for_url http://localhost:"${tunnel_port}"/healthz
SSH_PID=$(pgrep -f "/usr/bin/ssh.*${tunnel_port}:localhost:8080")
kube::util::trap_add "kill $SSH_PID" EXIT
kube::util::trap_add "kill $SSH_PID" SIGTERM
requested_profiles=$(echo "${requested_profiles}" | xargs -n1 | LC_ALL=C sort -u | xargs)
profile_components=$(echo "${profile_components}" | xargs -n1 | LC_ALL=C sort -u | xargs)
kubelet_addresses=$(echo "${kubelet_addresses}" | xargs -n1 | LC_ALL=C sort -u | xargs)
echo "requested profiles: ${requested_profiles}"
echo "flags for heap profile: ${mem_pprof_flags}"
timestamp=$(date +%Y%m%d%H%M%S)
binary=""
for component in ${profile_components}; do
case ${component} in
master)
path=${MASTER_PPROF_PATH}
binary=${master_binary}
;;
controller-manager)
path="${CONTROLLER_MANAGER_PPROF_PATH_PREFIX}-${server_addr}:${controller_manager_port}"
binary=${controller_manager_binary}
;;
scheduler)
path="${SCHEDULER_PPROF_PATH_PREFIX}-${server_addr}:${scheduler_port}"
binary=${scheduler_binary}
;;
heapster)
rm heapster
wget https://github.com/kubernetes/heapster/releases/download/${HEAPSTER_VERSION}/heapster
kube::util::trap_add 'rm -f heapster' EXIT
kube::util::trap_add 'rm -f heapster' SIGTERM
binary=heapster
path=${HEAPSTER_PPROF_PATH}
;;
kubelet)
path="${KUBELET_PPROF_PATH_PREFIX}"
if [[ -z "${kubelet_binary}" ]]; then
binary="${KUBE_ROOT}/_output/local/bin/linux/amd64/kubelet"
else
binary=${kubelet_binary}
fi
;;
esac
if [[ "${component}" == "kubelet" ]]; then
for node in ${kubelet_addresses//[,;]/' '}; do
grab_profiles_from_component "${requested_profiles}" "${mem_pprof_flags}" "${binary}" "${tunnel_port}" "${path}/${node}/proxy" "${output_dir}/${component}" "${timestamp}"
done
else
grab_profiles_from_component "${requested_profiles}" "${mem_pprof_flags}" "${binary}" "${tunnel_port}" "${path}" "${output_dir}/${component}" "${timestamp}"
fi
done

118
vendor/k8s.io/kubernetes/hack/import-restrictions.yaml generated vendored Normal file
View File

@@ -0,0 +1,118 @@
- baseImportPath: "./pkg/apis/core/"
allowedImports:
- k8s.io/apimachinery
- k8s.io/apiserver/pkg/util/feature
- k8s.io/kubernetes/pkg/apis/core
- k8s.io/kubernetes/pkg/features
- k8s.io/kubernetes/pkg/fieldpath
- k8s.io/kubernetes/pkg/util
- k8s.io/api/core/v1
# the following are temporary and should go away. Think twice (or more) before adding anything here.
# Main goal: pkg/apis should be as self-contained as possible.
- k8s.io/kubernetes/pkg/apis/extensions
- k8s.io/kubernetes/pkg/api/legacyscheme
- k8s.io/kubernetes/pkg/api/testapi
- k8s.io/api/extensions/v1beta1
ignoredSubTrees:
- "./pkg/apis/core/validation"
- baseImportPath: "./pkg/kubectl/genericclioptions/"
allowedImports:
- k8s.io/apimachinery
- k8s.io/client-go
# TODO this one should be tightened. We depend on it for testing, but we should instead create our own scheme
- k8s.io/api/core/v1
- k8s.io/kubernetes/pkg/kubectl/genericclioptions/printers
- k8s.io/kubernetes/pkg/kubectl/genericclioptions/resource
- baseImportPath: "./vendor/k8s.io/apimachinery/"
allowedImports:
- k8s.io/apimachinery
- k8s.io/kube-openapi
- baseImportPath: "./vendor/k8s.io/api/"
allowedImports:
- k8s.io/api
- k8s.io/apimachinery
- baseImportPath: "./vendor/k8s.io/code-generator/"
ignoredSubTrees:
- "./vendor/k8s.io/code-generator/_test"
allowedImports:
- k8s.io/gengo
- k8s.io/code-generator
- k8s.io/kube-openapi
- baseImportPath: "./vendor/k8s.io/client-go/"
allowedImports:
- k8s.io/api
- k8s.io/apimachinery
- k8s.io/client-go
# prevent core machinery from taking explicit v1 references unless
# necessary
- baseImportPath: "./vendor/k8s.io/client-go/rest/"
excludeTests: true
allowedImports:
- k8s.io/apimachinery
- k8s.io/client-go
- baseImportPath: "./vendor/k8s.io/client-go/tools/"
excludeTests: true
ignoredSubTrees:
- "./vendor/k8s.io/client-go/tools/bootstrap/token/api"
- "./vendor/k8s.io/client-go/tools/cache/testing"
- "./vendor/k8s.io/client-go/tools/leaderelection/resourcelock"
- "./vendor/k8s.io/client-go/tools/portforward"
- "./vendor/k8s.io/client-go/tools/record"
- "./vendor/k8s.io/client-go/tools/reference"
- "./vendor/k8s.io/client-go/tools/remotecommand"
allowedImports:
- k8s.io/apimachinery
- k8s.io/client-go
- baseImportPath: "./vendor/k8s.io/apiserver/"
allowedImports:
- k8s.io/api
- k8s.io/apimachinery
- k8s.io/apiserver
- k8s.io/client-go
- k8s.io/kube-openapi
- baseImportPath: "./vendor/k8s.io/metrics/"
allowedImports:
- k8s.io/api
- k8s.io/apimachinery
- k8s.io/client-go
- k8s.io/metrics
- baseImportPath: "./vendor/k8s.io/kube-aggregator/"
allowedImports:
- k8s.io/api
- k8s.io/apimachinery
- k8s.io/apiserver
- k8s.io/client-go
- k8s.io/kube-aggregator
- k8s.io/kube-openapi
- baseImportPath: "./vendor/k8s.io/sample-apiserver/"
allowedImports:
- k8s.io/api
- k8s.io/apimachinery
- k8s.io/apiserver
- k8s.io/client-go
- k8s.io/sample-apiserver
- baseImportPath: "./vendor/k8s.io/apiextensions-apiserver/"
allowedImports:
- k8s.io/api
- k8s.io/apiextensions-apiserver
- k8s.io/apimachinery
- k8s.io/apiserver
- k8s.io/client-go
- baseImportPath: "./vendor/k8s.io/kube-openapi/"
allowedImports:
- k8s.io/kube-openapi
- k8s.io/gengo

27
vendor/k8s.io/kubernetes/hack/install-etcd.sh generated vendored Executable file
View File

@@ -0,0 +1,27 @@
#!/usr/bin/env bash
# Copyright 2014 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Convenience script to download and install etcd in third_party.
# Mostly just used by CI.
set -o errexit
set -o nounset
set -o pipefail
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/..
source "${KUBE_ROOT}/hack/lib/init.sh"
kube::etcd::install

4
vendor/k8s.io/kubernetes/hack/jenkins/OWNERS generated vendored Normal file
View File

@@ -0,0 +1,4 @@
approvers:
- sig-testing-approvers
reviewers:
- sig-testing-reviewers

100
vendor/k8s.io/kubernetes/hack/jenkins/README.md generated vendored Normal file
View File

@@ -0,0 +1,100 @@
# Jenkins
[Jenkins](http://jenkins-ci.org/) is a pluggable continuous
integration system. The Google team is running two Jenkins servers in GCE for
the Kubernetes project. The post-commit instance runs continuous builds, unit
tests, integration tests, code verification tests, and end-to-end tests on
multiple providers using the latest commits to the Kubernetes repo from the
master and release branches. The PR Jenkins instance runs these tests on each
PR by a trusted contributor, it but only runs a subset of the end-to-end tests
and only on GCE.
## General flow
The flow of the post-commit Jenkins instance:
* Under the `kubernetes-build` job: Every 2 minutes, Jenkins polls for a batch
of new commits, after which it runs the `build.sh` script (in this directory)
on the latest tip. This results in build assets getting pushed to GCS and the
`latest.txt` file in the `ci` bucket being updated.
* On trigger, and every half hour (which effectively means all the time, unless
we're failing cluster creation), e2e variants run, on the latest build assets
in GCS:
* `kubernetes-e2e-gce`: Standard GCE e2e.
* `kubernetes-e2e-gke`: GKE provider e2e, with head k8s client and GKE
creating clusters at its default version.
* `kubernetes-e2e-aws`: AWS provider e2e. This only runs once a day.
* Each job will not run concurrently with itself, so, for instance,
Jenkins executor will only ever run one `kubernetes-build`
job. However, it may run the jobs in parallel,
i.e. `kubernetes-build` may be run at the same time as
`kubernetes-e2e-gce`. For this reason, you may see your changes
pushed to our GCS bucket rapidly, but they may take some time to
fully work through Jenkins. Or you may get lucky and catch the
train in 5 minutes.
* There are many jobs not listed here, including upgrade tests, soak tests, and
tests for previous releases.
## Scripts
The scripts in this directory are directly used by Jenkins, either by
curl from githubusercontent (if we don't have a git checkout handy) or
by executing it from the git checkout. Since Jenkins is an entity
outside this repository, it's tricky to keep documentation for it up
to date quickly. However, the scripts themselves attempt to provide
color for the configuration(s) that each script runs in.
## GCS Log Format
Our `upload-to-gcs.sh` script runs at the start and end of every job. Logs on
post-commit Jenkins go under `gs://kubernetes-jenkins/logs/`. Logs on PR
Jenkins go under `gs://kubernetes-jenkins-pull/pr-logs/pull/PULL_NUMBER/`.
Individual run logs go into the `JOB_NAME/BUILD_NUMBER` folder.
At the start of the job, it uploads `started.json` containing the version of
Kubernetes under test and the timestamp.
At the end, it uploads `finished.json` containing the result and timestamp, as
well as the build log into `build-log.txt`. Under `artifacts/` we put our
test results in `junit_XY.xml`, along with gcp resource lists and cluster logs.
It also updates `latest-build.txt` at the end to point to this build number.
In the end, the directory structure looks like this:
```
gs://kubernetes-jenkins/logs/kubernetes-e2e-gce/
latest-build.txt
12345/
build-log.txt
started.json
finished.json
artifacts/
gcp-resources-{before, after}.txt
junit_{00, 01, ...}.xml
jenkins-e2e-master/{kube-apiserver.log, ...}
jenkins-e2e-node-abcd/{kubelet.log, ...}
12344/
...
```
The munger uses `latest-build.txt` and the JUnit reports to figure out whether
or not the job is healthy.
## Job Builder
New jobs should be specified as YAML files to be processed by [Jenkins Job
Builder](http://docs.openstack.org/infra/jenkins-job-builder/). The YAML files
live in `jenkins/job-configs` and its subfolders **in the
[kubernetes/test-infra repository](https://github.com/kubernetes/test-infra)**.
Jenkins runs Jenkins Job Builder in a Docker container defined in
`job-builder-image`, and triggers it using `update-jobs.sh`. Jenkins Job Builder
uses a config file called
[jenkins_jobs.ini](http://docs.openstack.org/infra/jenkins-job-builder/execution.html)
which contains the location and credentials of the Jenkins server.
E2E Job definitions are templated to avoid code duplication. To add a new job,
add a new entry to the appropriate `project`.
[This](https://github.com/kubernetes/kubernetes/commit/eb273e5a4bdd3905f881563ada4e6543c7eb96b5)
is an example of a commit which does this. If necessary, create a new project, as in
[this](https://github.com/kubernetes/kubernetes/commit/09c27cdabc300e0420a2914100bedb565c23ed73)
commit.
[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/hack/jenkins/README.md?pixel)]()

View File

@@ -0,0 +1,53 @@
#!/usr/bin/env bash
# Copyright 2018 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -o errexit
set -o nounset
set -o pipefail
set -o xtrace
retry() {
for i in {1..5}; do
"$@" && return 0 || sleep $i
done
"$@"
}
# Runs benchmark integration tests, producing pretty-printed results
# in ${WORKSPACE}/artifacts. This script can also be run within a
# kubekins-test container with a kubernetes repo mounted (at the path
# /go/src/k8s.io/kubernetes).
export PATH=${GOPATH}/bin:${PWD}/third_party/etcd:/usr/local/go/bin:${PATH}
retry go get github.com/cespare/prettybench
# Disable the Go race detector.
export KUBE_RACE=" "
# Disable coverage report
export KUBE_COVER="n"
export ARTIFACTS_DIR=${WORKSPACE}/_artifacts
mkdir -p "${ARTIFACTS_DIR}"
cd /go/src/k8s.io/kubernetes
./hack/install-etcd.sh
# Run the benchmark tests and pretty-print the results into a separate file.
make test-integration WHAT="$*" KUBE_TEST_ARGS="-run='XXX' -bench=. -benchmem" \
| tee \
>(prettybench -no-passthrough > ${ARTIFACTS_DIR}/BenchmarkResults.txt) \
>(go run test/integration/benchmark/jsonify/main.go ${ARTIFACTS_DIR}/BenchmarkResults_benchmark_$(date -u +%Y-%m-%dT%H:%M:%SZ).json || cat > /dev/null)

74
vendor/k8s.io/kubernetes/hack/jenkins/build.sh generated vendored Executable file
View File

@@ -0,0 +1,74 @@
#!/usr/bin/env bash
# Copyright 2015 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# kubernetes-build job: Triggered by github checkins on a 5 minute
# poll. We abort this job if it takes longer than 10m. (Typically this
# job takes about ~5m as of 0.8.0, but it's actually not completely
# hermetic right now due to things like the golang image. It can take
# ~8m if you force it to be totally hermetic.)
set -o errexit
set -o nounset
set -o pipefail
set -o xtrace
# !!! ALERT !!! Jenkins default $HOME is /var/lib/jenkins, which is
# global across jobs. We change $HOME instead to ${WORKSPACE}, which
# is an incoming variable Jenkins provides us for this job's scratch
# space.
export HOME=${WORKSPACE} # Nothing should want Jenkins $HOME
export PATH=$PATH:/usr/local/go/bin
# Skip gcloud update checking
export CLOUDSDK_COMPONENT_MANAGER_DISABLE_UPDATE_CHECK=true
: ${KUBE_RELEASE_RUN_TESTS:="n"}
export KUBE_RELEASE_RUN_TESTS
# Clean stuff out. Assume the last build left the tree in an odd
# state.
rm -rf ~/.kube*
make clean
# Uncomment if you want to purge the Docker cache completely each
# build. It costs about 150s each build to pull the golang image and
# rebuild the kube-build:cross image, but these rarely change.
# docker ps -aq | xargs -r docker rm
# docker images -q | xargs -r docker rmi
# Build
# Jobs explicitly set KUBE_FASTBUILD to desired settings.
make release
# Push to GCS?
if [[ ${KUBE_SKIP_PUSH_GCS:-} =~ ^[yY]$ ]]; then
echo "Not pushed to GCS..."
else
readonly release_infra_clone="${WORKSPACE}/_tmp/release.git"
mkdir -p ${WORKSPACE}/_tmp
git clone https://github.com/kubernetes/release ${release_infra_clone}
push_build=${release_infra_clone}/push-build.sh
[[ -n "${KUBE_GCS_RELEASE_BUCKET-}" ]] \
&& bucket_flag="--bucket=${KUBE_GCS_RELEASE_BUCKET-}"
[[ -n "${KUBE_GCS_RELEASE_SUFFIX-}" ]] \
&& gcs_suffix_flag="--gcs-suffix=${KUBE_GCS_RELEASE_SUFFIX-}"
${push_build} ${bucket_flag-} ${gcs_suffix_flag-} \
--nomock --verbose --ci
fi
sha256sum _output/release-tars/kubernetes*.tar.gz

45
vendor/k8s.io/kubernetes/hack/jenkins/gotest.sh generated vendored Executable file
View File

@@ -0,0 +1,45 @@
#!/usr/bin/env bash
# Copyright 2015 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Runs the unit and integration tests, production JUnit-style XML test reports
# in ${WORKSPACE}/_artifacts.
set -o errexit
set -o nounset
set -o pipefail
set -o xtrace
# !!! ALERT !!! Jenkins default $HOME is /var/lib/jenkins, which is
# global across jobs. We change $HOME instead to ${WORKSPACE}, which
# is an incoming variable Jenkins provides us for this job's scratch
# space.
export HOME=${WORKSPACE} # Nothing should want Jenkins $HOME
export GOPATH=${HOME}/_gopath
export PATH=${GOPATH}/bin:${HOME}/third_party/etcd:/usr/local/go/bin:$PATH
# Install a few things needed by unit and /integration tests.
command -v etcd &>/dev/null || ./hack/install-etcd.sh
go get -u github.com/jstemmer/go-junit-report
# Enable the Go race detector.
export KUBE_RACE=-race
# Produce a JUnit-style XML test report for Jenkins.
export KUBE_JUNIT_REPORT_DIR=${WORKSPACE}/_artifacts
# Save the verbose stdout as well.
export KUBE_KEEP_VERBOSE_TEST_OUTPUT=y
make test
make test-integration

58
vendor/k8s.io/kubernetes/hack/jenkins/test-dockerized.sh generated vendored Executable file
View File

@@ -0,0 +1,58 @@
#!/usr/bin/env bash
# Copyright 2015 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -o errexit
set -o nounset
set -o pipefail
set -o xtrace
retry() {
for i in {1..5}; do
"$@" && return 0 || sleep $i
done
"$@"
}
# Runs the unit and integration tests, producing JUnit-style XML test
# reports in ${WORKSPACE}/artifacts. This script is intended to be run from
# kubekins-test container with a kubernetes repo mapped in. See
# k8s.io/test-infra/scenarios/kubernetes_verify.py
export PATH=${GOPATH}/bin:${PWD}/third_party/etcd:/usr/local/go/bin:${PATH}
retry go get github.com/jstemmer/go-junit-report
# Enable the Go race detector.
export KUBE_RACE=-race
# Disable coverage report
export KUBE_COVER="n"
# Produce a JUnit-style XML test report for Jenkins.
export KUBE_JUNIT_REPORT_DIR=${WORKSPACE}/artifacts
export ARTIFACTS_DIR=${WORKSPACE}/artifacts
# Save the verbose stdout as well.
export KUBE_KEEP_VERBOSE_TEST_OUTPUT=y
export KUBE_INTEGRATION_TEST_MAX_CONCURRENCY=4
export LOG_LEVEL=4
cd /go/src/k8s.io/kubernetes
make generated_files
go install ./cmd/...
./hack/install-etcd.sh
make test-cmd
make test-integration
./hack/test-update-storage-objects.sh

295
vendor/k8s.io/kubernetes/hack/jenkins/upload-to-gcs.sh generated vendored Executable file
View File

@@ -0,0 +1,295 @@
#!/usr/bin/env bash
# Copyright 2015 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script uploads metadata and test results to Google Cloud Storage, in the
# location indicated by JENKINS_GCS_LOGS_PATH. By default, we use the Google
# kubernetes-jenkins bucket.
#
# The script looks for one of two environment variables to be set:
# JENKINS_BUILD_STARTED: set to a nonempty string to upload version
# information to 'started.json'. The value of the variable is not
# currently used.
# JENKINS_BUILD_FINISHED: set to the Jenkins build result to upload the build
# result to 'finished.json', any test artifacts, and update the
# 'latest-build.txt' file pointer. Since this script uses gsutil directly,
# it's a bit faster at uploading large numbers of files than the GCS Jenkins
# plugin. It also makes use of gsutil's gzip functionality.
#
# Note: for magicfile support to work correctly, the "file" utility must be
# installed.
# TODO(rmmh): rewrite this script in Python so we can actually test it!
set -o errexit
set -o nounset
set -o pipefail
if [[ -n "${JENKINS_BUILD_STARTED:-}" && -n "${JENKINS_BUILD_FINISHED:-}" ]]; then
echo "Error: JENKINS_BUILD_STARTED and JENKINS_BUILD_FINISHED should not both be set!"
exit 1
fi
if [[ ! ${JENKINS_UPLOAD_TO_GCS:-y} =~ ^[yY]$ ]]; then
exit 0
fi
# Attempt to determine if we're running against a repo other than
# kubernetes/kubernetes to determine whether to place PR logs in a different
# location.
#
# In the current CI system, the tracked repo is named remote. This is not true
# in general for most devs, where origin and upstream are more common.
GCS_SUBDIR=""
readonly remote_git_repo=$(git config --get remote.remote.url | sed 's:.*github.com/::' || true)
if [[ -n "${remote_git_repo}" ]]; then
case "${remote_git_repo}" in
# main repo: nothing extra
kubernetes/kubernetes) GCS_SUBDIR="" ;;
# a different repo on the k8s org: just the repo name (strip kubernetes/)
kubernetes/*) GCS_SUBDIR="${remote_git_repo#kubernetes/}/" ;;
# any other repo: ${org}_${repo} (replace / with _)
*) GCS_SUBDIR="${remote_git_repo/\//_}/" ;;
esac
if [[ "${remote_git_repo}" != "kubernetes/kubernetes" ]]; then
# also store the repo in started.json, so Gubernator can link it properly.
export BUILD_METADATA_REPO="${remote_git_repo}"
fi
fi
if [[ ${JOB_NAME} =~ -pull- ]]; then
: ${JENKINS_GCS_LOGS_PATH:="gs://kubernetes-jenkins/pr-logs/pull/${GCS_SUBDIR}${ghprbPullId:-unknown}"}
: ${JENKINS_GCS_LATEST_PATH:="gs://kubernetes-jenkins/pr-logs/directory"}
: ${JENKINS_GCS_LOGS_INDIRECT:="gs://kubernetes-jenkins/pr-logs/directory/${JOB_NAME}"}
else
: ${JENKINS_GCS_LOGS_PATH:="gs://kubernetes-jenkins/logs"}
: ${JENKINS_GCS_LATEST_PATH:="gs://kubernetes-jenkins/logs"}
: ${JENKINS_GCS_LOGS_INDIRECT:=""}
fi
readonly artifacts_path="${WORKSPACE}/_artifacts"
readonly gcs_job_path="${JENKINS_GCS_LOGS_PATH}/${JOB_NAME}"
readonly gcs_build_path="${gcs_job_path}/${BUILD_NUMBER}"
readonly gcs_latest_path="${JENKINS_GCS_LATEST_PATH}/${JOB_NAME}"
readonly gcs_indirect_path="${JENKINS_GCS_LOGS_INDIRECT}"
readonly gcs_acl="public-read"
readonly results_url=${gcs_build_path//"gs:/"/"https://console.cloud.google.com/storage/browser"}
readonly timestamp=$(date +%s)
#########################################################################
# $0 is called from different contexts so figure out where kubernetes is.
# Sets non-exported global kubernetes_base_path and defaults to "."
function set_kubernetes_base_path () {
for kubernetes_base_path in kubernetes go/src/k8s.io/kubernetes .; do
# Pick a canonical item to find in a kubernetes tree which could be a
# raw source tree or an expanded tarball.
[[ -f ${kubernetes_base_path}/cluster/common.sh ]] && break
done
}
#########################################################################
# Try to discover the kubernetes version.
# prints version
function find_version() {
(
# Where are we?
# This could be set in the global scope at some point if we need to
# discover the kubernetes path elsewhere.
set_kubernetes_base_path
cd ${kubernetes_base_path}
if [[ -e "version" ]]; then
cat version
elif [[ -e "hack/lib/version.sh" ]]; then
export KUBE_ROOT="."
source "hack/lib/version.sh"
kube::version::get_version_vars
echo "${KUBE_GIT_VERSION-}"
else
# Last resort from the started.json
gsutil cat ${gcs_build_path}/started.json 2>/dev/null |\
sed -n 's/ *"version": *"\([^"]*\)",*/\1/p'
fi
)
}
# Output started.json. Use test function below!
function print_started() {
local metadata_keys=$(compgen -e | grep ^BUILD_METADATA_)
echo "{"
echo " \"version\": \"${version}\"," # TODO(fejta): retire
echo " \"job-version\": \"${version}\","
echo " \"timestamp\": ${timestamp},"
if [[ -n "${metadata_keys}" ]]; then
# Any exported variables of the form BUILD_METADATA_KEY=VALUE
# will be available as started["metadata"][KEY.lower()].
echo " \"metadata\": {"
local sep="" # leading commas are easy to track
for env_var in $metadata_keys; do
local var_upper="${env_var#BUILD_METADATA_}"
echo " $sep\"${var_upper,,}\": \"${!env_var}\""
sep=","
done
echo " },"
fi
echo " \"jenkins-node\": \"${NODE_NAME:-}\""
echo "}"
}
# Use this to test changes to print_started.
if [[ -n "${TEST_STARTED_JSON:-}" ]]; then
version=$(find_version)
cat <(print_started) | jq .
exit
fi
function upload_version() {
local -r version=$(find_version)
local upload_attempt
echo -n 'Run starting at '; date -d "@${timestamp}"
if [[ -n "${version}" ]]; then
echo "Found Kubernetes version: ${version}"
else
echo "Could not find Kubernetes version"
fi
local -r json_file="${gcs_build_path}/started.json"
for upload_attempt in {1..3}; do
echo "Uploading version to: ${json_file} (attempt ${upload_attempt})"
gsutil -q -h "Content-Type:application/json" cp -a "${gcs_acl}" <(print_started) "${json_file}" || continue
break
done
}
#########################################################################
# Maintain a single file storing the full build version, Jenkins' job number
# build state. Limit its size so it does not grow unbounded.
# This is primarily used for and by the
# github.com/kubernetes/release/find_green_build tool.
# @param build_result - the state of the build
#
function update_job_result_cache() {
local -r build_result=$1
local -r version=$(find_version)
local -r job_results=${gcs_job_path}/jobResultsCache.json
local -r tmp_results="${WORKSPACE}/_tmp/jobResultsCache.tmp"
# TODO: This constraint is insufficient. The boundary for secondary
# job cache should be date based on the last primary build.
# The issue is we are trying to find a matched green set of results
# at a given hash, but all of the jobs run at wildly different lengths.
local -r cache_size=300
local upload_attempt
if [[ -n "${version}" ]]; then
echo "Found Kubernetes version: ${version}"
else
echo "Could not find Kubernetes version"
fi
mkdir -p ${tmp_results%/*}
# Construct a valid json file
echo "[" > ${tmp_results}
for upload_attempt in $(seq 3); do
echo "Copying ${job_results} to ${tmp_results} (attempt ${upload_attempt})"
# The sed construct below is stripping out only the "version" lines
# and then ensuring there's a single comma at the end of the line.
gsutil -q cat ${job_results} 2>&- |\
sed -n 's/^\({"version".*}\),*/\1,/p' |\
tail -${cache_size} >> ${tmp_results} || continue
break
done
echo "{\"version\": \"${version}\", \"buildnumber\": \"${BUILD_NUMBER}\"," \
"\"result\": \"${build_result}\"}" >> ${tmp_results}
echo "]" >> ${tmp_results}
for upload_attempt in $(seq 3); do
echo "Copying ${tmp_results} to ${job_results} (attempt ${upload_attempt})"
gsutil -q -h "Content-Type:application/json" cp -a "${gcs_acl}" \
${tmp_results} ${job_results} || continue
break
done
rm -f ${tmp_results}
}
function upload_artifacts_and_build_result() {
local -r build_result=$1
local upload_attempt
echo -n 'Run finished at '; date -d "@${timestamp}"
for upload_attempt in {1..3}; do
echo "Uploading to ${gcs_build_path} (attempt ${upload_attempt})"
echo "Uploading build result: ${build_result}"
gsutil -q -h "Content-Type:application/json" cp -a "${gcs_acl}" <(
echo "{"
echo " \"result\": \"${build_result}\","
echo " \"timestamp\": ${timestamp}"
echo "}"
) "${gcs_build_path}/finished.json" || continue
if [[ -d "${artifacts_path}" && -n $(ls -A "${artifacts_path}") ]]; then
echo "Uploading artifacts"
gsutil -m -q -o "GSUtil:use_magicfile=True" cp -a "${gcs_acl}" -r -c \
-z log,txt,xml "${artifacts_path}" "${gcs_build_path}/artifacts" || continue
fi
if [[ -e "${WORKSPACE}/build-log.txt" ]]; then
echo "Uploading build log"
gsutil -q cp -Z -a "${gcs_acl}" "${WORKSPACE}/build-log.txt" "${gcs_build_path}"
fi
# For pull jobs, keep a canonical ordering for tools that want to examine
# the output.
if [[ "${gcs_indirect_path}" != "" ]]; then
echo "Writing ${gcs_build_path} to ${gcs_indirect_path}/${BUILD_NUMBER}.txt"
echo "${gcs_build_path}" | \
gsutil -q -h "Content-Type:text/plain" \
cp -a "${gcs_acl}" - "${gcs_indirect_path}/${BUILD_NUMBER}.txt" || continue
echo "Marking build ${BUILD_NUMBER} as the latest completed build for this PR"
echo "${BUILD_NUMBER}" | \
gsutil -q -h "Content-Type:text/plain" -h "Cache-Control:private, max-age=0, no-transform" \
cp -a "${gcs_acl}" - "${gcs_job_path}/latest-build.txt" || continue
fi
# Mark this build as the latest completed.
echo "Marking build ${BUILD_NUMBER} as the latest completed build"
echo "${BUILD_NUMBER}" | \
gsutil -q -h "Content-Type:text/plain" -h "Cache-Control:private, max-age=0, no-transform" \
cp -a "${gcs_acl}" - "${gcs_latest_path}/latest-build.txt" || continue
break # all uploads succeeded if we hit this point
done
echo -e "\n\n\n*** View logs and artifacts at ${results_url} ***\n\n"
}
if [[ -z "${BOOTSTRAP_MIGRATION:-}" ]]; then
if [[ -n "${JENKINS_BUILD_STARTED:-}" ]]; then
upload_version
elif [[ -n "${JENKINS_BUILD_FINISHED:-}" ]]; then
upload_artifacts_and_build_result ${JENKINS_BUILD_FINISHED}
update_job_result_cache ${JENKINS_BUILD_FINISHED}
else
echo "ERROR: Called without JENKINS_BUILD_STARTED or JENKINS_BUILD_FINISHED set."
echo "ERROR: this should not happen"
exit 1
fi
fi

43
vendor/k8s.io/kubernetes/hack/jenkins/verify-dockerized.sh generated vendored Executable file
View File

@@ -0,0 +1,43 @@
#!/usr/bin/env bash
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -o errexit
set -o nounset
set -o pipefail
set -o xtrace
retry() {
for i in {1..5}; do
"$@" && return 0 || sleep $i
done
"$@"
}
# This script is intended to be run from kubekins-test container with a
# kubernetes repo mapped in. See k8s.io/test-infra/scenarios/kubernetes_verify.py
export PATH=${GOPATH}/bin:${PWD}/third_party/etcd:/usr/local/go/bin:${PATH}
# Produce a JUnit-style XML test report
export KUBE_JUNIT_REPORT_DIR=${WORKSPACE}/artifacts
# Set artifacts directory
export ARTIFACTS_DIR=${WORKSPACE}/artifacts
export LOG_LEVEL=4
cd /go/src/k8s.io/kubernetes
make verify

36
vendor/k8s.io/kubernetes/hack/jenkins/verify.sh generated vendored Executable file
View File

@@ -0,0 +1,36 @@
#!/usr/bin/env bash
# Copyright 2015 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Sets up the environment (e.g. installing godep and etcd if necessary)
# and then runs all of the verification checks.
set -o errexit
set -o nounset
set -o pipefail
set -o xtrace
# !!! ALERT !!! Jenkins default $HOME is /var/lib/jenkins, which is
# global across jobs. We change $HOME instead to ${WORKSPACE}, which
# is an incoming variable Jenkins provides us for this job's scratch
# space.
export HOME=${WORKSPACE} # Nothing should want Jenkins $HOME
export GOPATH=${HOME}/_gopath
export PATH=${GOPATH}/bin:${HOME}/third_party/etcd:/usr/local/go/bin:$PATH
# Install a few things needed by the verification tests.
command -v etcd &>/dev/null || ./hack/install-etcd.sh
make verify

1
vendor/k8s.io/kubernetes/hack/lib/.gitattributes generated vendored Normal file
View File

@@ -0,0 +1 @@
version.sh export-subst

28
vendor/k8s.io/kubernetes/hack/lib/BUILD generated vendored Normal file
View File

@@ -0,0 +1,28 @@
package(default_visibility = ["//visibility:public"])
sh_library(
name = "lib",
srcs = [
"etcd.sh",
"golang.sh",
"init.sh",
"logging.sh",
"swagger.sh",
"test.sh",
"util.sh",
"version.sh",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)

126
vendor/k8s.io/kubernetes/hack/lib/etcd.sh generated vendored Executable file
View File

@@ -0,0 +1,126 @@
#!/usr/bin/env bash
# Copyright 2014 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# A set of helpers for starting/running etcd for tests
ETCD_VERSION=${ETCD_VERSION:-3.2.18}
ETCD_HOST=${ETCD_HOST:-127.0.0.1}
ETCD_PORT=${ETCD_PORT:-2379}
kube::etcd::validate() {
# validate if in path
command -v etcd >/dev/null || {
kube::log::usage "etcd must be in your PATH"
exit 1
}
# validate etcd port is free
local port_check_command
if command -v ss &> /dev/null && ss -Version | grep 'iproute2' &> /dev/null; then
port_check_command="ss"
elif command -v netstat &>/dev/null; then
port_check_command="netstat"
else
kube::log::usage "unable to identify if etcd is bound to port ${ETCD_PORT}. unable to find ss or netstat utilities."
exit 1
fi
if ${port_check_command} -nat | grep "LISTEN" | grep "[\.:]${ETCD_PORT:?}" >/dev/null 2>&1; then
kube::log::usage "unable to start etcd as port ${ETCD_PORT} is in use. please stop the process listening on this port and retry."
kube::log::usage "`netstat -nat | grep "[\.:]${ETCD_PORT:?} .*LISTEN"`"
exit 1
fi
# validate installed version is at least equal to minimum
version=$(etcd --version | tail -n +1 | head -n 1 | cut -d " " -f 3)
if [[ $(kube::etcd::version $ETCD_VERSION) -gt $(kube::etcd::version $version) ]]; then
export PATH=$KUBE_ROOT/third_party/etcd:$PATH
hash etcd
echo $PATH
version=$(etcd --version | head -n 1 | cut -d " " -f 3)
if [[ $(kube::etcd::version $ETCD_VERSION) -gt $(kube::etcd::version $version) ]]; then
kube::log::usage "etcd version ${ETCD_VERSION} or greater required."
kube::log::info "You can use 'hack/install-etcd.sh' to install a copy in third_party/."
exit 1
fi
fi
}
kube::etcd::version() {
printf '%s\n' "${@}" | awk -F . '{ printf("%d%03d%03d\n", $1, $2, $3) }'
}
kube::etcd::start() {
# validate before running
kube::etcd::validate
# Start etcd
ETCD_DIR=${ETCD_DIR:-$(mktemp -d 2>/dev/null || mktemp -d -t test-etcd.XXXXXX)}
if [[ -d "${ARTIFACTS_DIR:-}" ]]; then
ETCD_LOGFILE="${ARTIFACTS_DIR}/etcd.$(uname -n).$(id -un).log.DEBUG.$(date +%Y%m%d-%H%M%S).$$"
else
ETCD_LOGFILE=/dev/null
fi
kube::log::info "etcd --advertise-client-urls http://${ETCD_HOST}:${ETCD_PORT} --data-dir ${ETCD_DIR} --listen-client-urls http://${ETCD_HOST}:${ETCD_PORT} --debug > \"${ETCD_LOGFILE}\" 2>/dev/null"
etcd --advertise-client-urls http://${ETCD_HOST}:${ETCD_PORT} --data-dir ${ETCD_DIR} --listen-client-urls http://${ETCD_HOST}:${ETCD_PORT} --debug 2> "${ETCD_LOGFILE}" >/dev/null &
ETCD_PID=$!
echo "Waiting for etcd to come up."
kube::util::wait_for_url "http://${ETCD_HOST}:${ETCD_PORT}/v2/machines" "etcd: " 0.25 80
curl -fs -X PUT "http://${ETCD_HOST}:${ETCD_PORT}/v2/keys/_test"
}
kube::etcd::stop() {
if [[ -n "${ETCD_PID-}" ]]; then
kill "${ETCD_PID}" &>/dev/null || :
wait "${ETCD_PID}" &>/dev/null || :
fi
}
kube::etcd::clean_etcd_dir() {
if [[ -n "${ETCD_DIR-}" ]]; then
rm -rf "${ETCD_DIR}"
fi
}
kube::etcd::cleanup() {
kube::etcd::stop
kube::etcd::clean_etcd_dir
}
kube::etcd::install() {
(
cd "${KUBE_ROOT}/third_party"
if [[ $(readlink etcd) == etcd-v${ETCD_VERSION}-* ]]; then
return # already installed
fi
if [[ $(uname) == "Darwin" ]]; then
download_file="etcd-v${ETCD_VERSION}-darwin-amd64.zip"
url="https://github.com/coreos/etcd/releases/download/v${ETCD_VERSION}/${download_file}"
kube::util::download_file "${url}" "${download_file}"
unzip -o "${download_file}"
ln -fns "etcd-v${ETCD_VERSION}-darwin-amd64" etcd
rm "${download_file}"
else
url="https://github.com/coreos/etcd/releases/download/v${ETCD_VERSION}/etcd-v${ETCD_VERSION}-linux-amd64.tar.gz"
download_file="etcd-v${ETCD_VERSION}-linux-amd64.tar.gz"
kube::util::download_file "${url}" "${download_file}"
tar xzf "${download_file}"
ln -fns "etcd-v${ETCD_VERSION}-linux-amd64" etcd
fi
kube::log::info "etcd v${ETCD_VERSION} installed. To use:"
kube::log::info "export PATH=$(pwd)/etcd:\${PATH}"
)
}

615
vendor/k8s.io/kubernetes/hack/lib/golang.sh generated vendored Executable file
View File

@@ -0,0 +1,615 @@
#!/usr/bin/env bash
# Copyright 2014 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# The golang package that we are building.
readonly KUBE_GO_PACKAGE=k8s.io/kubernetes
readonly KUBE_GOPATH="${KUBE_OUTPUT}/go"
# The set of server targets that we are only building for Linux
# If you update this list, please also update build/BUILD.
kube::golang::server_targets() {
local targets=(
cmd/kube-proxy
cmd/kube-apiserver
cmd/kube-controller-manager
cmd/cloud-controller-manager
cmd/kubelet
cmd/kubeadm
cmd/hyperkube
cmd/kube-scheduler
vendor/k8s.io/kube-aggregator
vendor/k8s.io/apiextensions-apiserver
cluster/gce/gci/mounter
)
echo "${targets[@]}"
}
IFS=" " read -ra KUBE_SERVER_TARGETS <<< "$(kube::golang::server_targets)"
readonly KUBE_SERVER_TARGETS
readonly KUBE_SERVER_BINARIES=("${KUBE_SERVER_TARGETS[@]##*/}")
# The set of server targets that we are only building for Kubernetes nodes
# If you update this list, please also update build/BUILD.
kube::golang::node_targets() {
local targets=(
cmd/kube-proxy
cmd/kubeadm
cmd/kubelet
)
echo "${targets[@]}"
}
IFS=" " read -ra KUBE_NODE_TARGETS <<< "$(kube::golang::node_targets)"
readonly KUBE_NODE_TARGETS
readonly KUBE_NODE_BINARIES=("${KUBE_NODE_TARGETS[@]##*/}")
readonly KUBE_NODE_BINARIES_WIN=("${KUBE_NODE_BINARIES[@]/%/.exe}")
if [[ -n "${KUBE_BUILD_PLATFORMS:-}" ]]; then
IFS=" " read -ra KUBE_SERVER_PLATFORMS <<< "$KUBE_BUILD_PLATFORMS"
IFS=" " read -ra KUBE_NODE_PLATFORMS <<< "$KUBE_BUILD_PLATFORMS"
IFS=" " read -ra KUBE_TEST_PLATFORMS <<< "$KUBE_BUILD_PLATFORMS"
IFS=" " read -ra KUBE_CLIENT_PLATFORMS <<< "$KUBE_BUILD_PLATFORMS"
readonly KUBE_SERVER_PLATFORMS
readonly KUBE_NODE_PLATFORMS
readonly KUBE_TEST_PLATFORMS
readonly KUBE_CLIENT_PLATFORMS
elif [[ "${KUBE_FASTBUILD:-}" == "true" ]]; then
readonly KUBE_SERVER_PLATFORMS=(linux/amd64)
readonly KUBE_NODE_PLATFORMS=(linux/amd64)
if [[ "${KUBE_BUILDER_OS:-}" == "darwin"* ]]; then
readonly KUBE_TEST_PLATFORMS=(
darwin/amd64
linux/amd64
)
readonly KUBE_CLIENT_PLATFORMS=(
darwin/amd64
linux/amd64
)
else
readonly KUBE_TEST_PLATFORMS=(linux/amd64)
readonly KUBE_CLIENT_PLATFORMS=(linux/amd64)
fi
else
# The server platform we are building on.
readonly KUBE_SERVER_PLATFORMS=(
linux/amd64
linux/arm
linux/arm64
linux/s390x
linux/ppc64le
)
# The node platforms we build for
readonly KUBE_NODE_PLATFORMS=(
linux/amd64
linux/arm
linux/arm64
linux/s390x
linux/ppc64le
windows/amd64
)
# If we update this we should also update the set of platforms whose standard library is precompiled for in build/build-image/cross/Dockerfile
readonly KUBE_CLIENT_PLATFORMS=(
linux/amd64
linux/386
linux/arm
linux/arm64
linux/s390x
linux/ppc64le
darwin/amd64
darwin/386
windows/amd64
windows/386
)
# Which platforms we should compile test targets for. Not all client platforms need these tests
readonly KUBE_TEST_PLATFORMS=(
linux/amd64
linux/arm
linux/arm64
linux/s390x
linux/ppc64le
darwin/amd64
windows/amd64
)
fi
# The set of client targets that we are building for all platforms
# If you update this list, please also update build/BUILD.
readonly KUBE_CLIENT_TARGETS=(
cmd/kubectl
)
readonly KUBE_CLIENT_BINARIES=("${KUBE_CLIENT_TARGETS[@]##*/}")
readonly KUBE_CLIENT_BINARIES_WIN=("${KUBE_CLIENT_BINARIES[@]/%/.exe}")
# The set of test targets that we are building for all platforms
# If you update this list, please also update build/BUILD.
kube::golang::test_targets() {
local targets=(
cmd/gendocs
cmd/genkubedocs
cmd/genman
cmd/genyaml
cmd/genswaggertypedocs
cmd/linkcheck
vendor/github.com/onsi/ginkgo/ginkgo
test/e2e/e2e.test
)
echo "${targets[@]}"
}
IFS=" " read -ra KUBE_TEST_TARGETS <<< "$(kube::golang::test_targets)"
readonly KUBE_TEST_TARGETS
readonly KUBE_TEST_BINARIES=("${KUBE_TEST_TARGETS[@]##*/}")
readonly KUBE_TEST_BINARIES_WIN=("${KUBE_TEST_BINARIES[@]/%/.exe}")
# If you update this list, please also update build/BUILD.
readonly KUBE_TEST_PORTABLE=(
test/e2e/testing-manifests
test/kubemark
hack/e2e.go
hack/e2e-internal
hack/get-build.sh
hack/ginkgo-e2e.sh
hack/lib
)
# Test targets which run on the Kubernetes clusters directly, so we only
# need to target server platforms.
# These binaries will be distributed in the kubernetes-test tarball.
# If you update this list, please also update build/BUILD.
kube::golang::server_test_targets() {
local targets=(
cmd/kubemark
vendor/github.com/onsi/ginkgo/ginkgo
)
if [[ "${OSTYPE:-}" == "linux"* ]]; then
targets+=( test/e2e_node/e2e_node.test )
fi
echo "${targets[@]}"
}
IFS=" " read -ra KUBE_TEST_SERVER_TARGETS <<< "$(kube::golang::server_test_targets)"
readonly KUBE_TEST_SERVER_TARGETS
readonly KUBE_TEST_SERVER_BINARIES=("${KUBE_TEST_SERVER_TARGETS[@]##*/}")
readonly KUBE_TEST_SERVER_PLATFORMS=("${KUBE_SERVER_PLATFORMS[@]}")
# Gigabytes necessary for parallel platform builds.
# As of January 2018, RAM usage is exceeding 30G
# Setting to 40 to provide some headroom
readonly KUBE_PARALLEL_BUILD_MEMORY=40
readonly KUBE_ALL_TARGETS=(
"${KUBE_SERVER_TARGETS[@]}"
"${KUBE_CLIENT_TARGETS[@]}"
"${KUBE_TEST_TARGETS[@]}"
"${KUBE_TEST_SERVER_TARGETS[@]}"
)
readonly KUBE_ALL_BINARIES=("${KUBE_ALL_TARGETS[@]##*/}")
readonly KUBE_STATIC_LIBRARIES=(
cloud-controller-manager
kube-apiserver
kube-controller-manager
kube-scheduler
kube-proxy
kube-aggregator
kubeadm
kubectl
)
# KUBE_CGO_OVERRIDES is a space-separated list of binaries which should be built
# with CGO enabled, assuming CGO is supported on the target platform.
# This overrides any entry in KUBE_STATIC_LIBRARIES.
IFS=" " read -ra KUBE_CGO_OVERRIDES <<< "${KUBE_CGO_OVERRIDES:-}"
readonly KUBE_CGO_OVERRIDES
# KUBE_STATIC_OVERRIDES is a space-separated list of binaries which should be
# built with CGO disabled. This is in addition to the list in
# KUBE_STATIC_LIBRARIES.
IFS=" " read -ra KUBE_STATIC_OVERRIDES <<< "${KUBE_STATIC_OVERRIDES:-}"
readonly KUBE_STATIC_OVERRIDES
kube::golang::is_statically_linked_library() {
local e
# Explicitly enable cgo when building kubectl for darwin from darwin.
[[ "$(go env GOHOSTOS)" == "darwin" && "$(go env GOOS)" == "darwin" &&
"$1" == *"/kubectl" ]] && return 1
if [[ -n "${KUBE_CGO_OVERRIDES:+x}" ]]; then
for e in "${KUBE_CGO_OVERRIDES[@]}"; do [[ "$1" == *"/$e" ]] && return 1; done;
fi
for e in "${KUBE_STATIC_LIBRARIES[@]}"; do [[ "$1" == *"/$e" ]] && return 0; done;
if [[ -n "${KUBE_STATIC_OVERRIDES:+x}" ]]; then
for e in "${KUBE_STATIC_OVERRIDES[@]}"; do [[ "$1" == *"/$e" ]] && return 0; done;
fi
return 1;
}
# kube::binaries_from_targets take a list of build targets and return the
# full go package to be built
kube::golang::binaries_from_targets() {
local target
for target; do
# If the target starts with what looks like a domain name, assume it has a
# fully-qualified package name rather than one that needs the Kubernetes
# package prepended.
if [[ "${target}" =~ ^([[:alnum:]]+".")+[[:alnum:]]+"/" ]]; then
echo "${target}"
else
echo "${KUBE_GO_PACKAGE}/${target}"
fi
done
}
# Asks golang what it thinks the host platform is. The go tool chain does some
# slightly different things when the target platform matches the host platform.
kube::golang::host_platform() {
echo "$(go env GOHOSTOS)/$(go env GOHOSTARCH)"
}
# Takes the platform name ($1) and sets the appropriate golang env variables
# for that platform.
kube::golang::set_platform_envs() {
[[ -n ${1-} ]] || {
kube::log::error_exit "!!! Internal error. No platform set in kube::golang::set_platform_envs"
}
export GOOS=${platform%/*}
export GOARCH=${platform##*/}
# Do not set CC when building natively on a platform, only if cross-compiling from linux/amd64
if [[ $(kube::golang::host_platform) == "linux/amd64" ]]; then
# Dynamic CGO linking for other server architectures than linux/amd64 goes here
# If you want to include support for more server platforms than these, add arch-specific gcc names here
case "${platform}" in
"linux/arm")
export CGO_ENABLED=1
export CC=arm-linux-gnueabihf-gcc
;;
"linux/arm64")
export CGO_ENABLED=1
export CC=aarch64-linux-gnu-gcc
;;
"linux/ppc64le")
export CGO_ENABLED=1
export CC=powerpc64le-linux-gnu-gcc
;;
"linux/s390x")
export CGO_ENABLED=1
export CC=s390x-linux-gnu-gcc
;;
esac
fi
}
kube::golang::unset_platform_envs() {
unset GOOS
unset GOARCH
unset GOROOT
unset CGO_ENABLED
unset CC
}
# Create the GOPATH tree under $KUBE_OUTPUT
kube::golang::create_gopath_tree() {
local go_pkg_dir="${KUBE_GOPATH}/src/${KUBE_GO_PACKAGE}"
local go_pkg_basedir=$(dirname "${go_pkg_dir}")
mkdir -p "${go_pkg_basedir}"
# TODO: This symlink should be relative.
if [[ ! -e "${go_pkg_dir}" || "$(readlink ${go_pkg_dir})" != "${KUBE_ROOT}" ]]; then
ln -snf "${KUBE_ROOT}" "${go_pkg_dir}"
fi
cat >"${KUBE_GOPATH}/BUILD" <<EOF
# This dummy BUILD file prevents Bazel from trying to descend through the
# infinite loop created by the symlink at
# ${go_pkg_dir}
EOF
}
# Ensure the go tool exists and is a viable version.
kube::golang::verify_go_version() {
if [[ -z "$(which go)" ]]; then
kube::log::usage_from_stdin <<EOF
Can't find 'go' in PATH, please fix and retry.
See http://golang.org/doc/install for installation instructions.
EOF
return 2
fi
local go_version
IFS=" " read -ra go_version <<< "$(go version)"
local minimum_go_version
minimum_go_version=go1.10.2
if [[ "${minimum_go_version}" != $(echo -e "${minimum_go_version}\n${go_version[2]}" | sort -s -t. -k 1,1 -k 2,2n -k 3,3n | head -n1) && "${go_version[2]}" != "devel" ]]; then
kube::log::usage_from_stdin <<EOF
Detected go version: ${go_version[*]}.
Kubernetes requires ${minimum_go_version} or greater.
Please install ${minimum_go_version} or later.
EOF
return 2
fi
}
# kube::golang::setup_env will check that the `go` commands is available in
# ${PATH}. It will also check that the Go version is good enough for the
# Kubernetes build.
#
# Inputs:
# KUBE_EXTRA_GOPATH - If set, this is included in created GOPATH
#
# Outputs:
# env-var GOPATH points to our local output dir
# env-var GOBIN is unset (we want binaries in a predictable place)
# env-var GO15VENDOREXPERIMENT=1
# current directory is within GOPATH
kube::golang::setup_env() {
kube::golang::verify_go_version
kube::golang::create_gopath_tree
export GOPATH="${KUBE_GOPATH}"
export GOCACHE="${KUBE_GOPATH}/cache"
# Append KUBE_EXTRA_GOPATH to the GOPATH if it is defined.
if [[ -n ${KUBE_EXTRA_GOPATH:-} ]]; then
GOPATH="${GOPATH}:${KUBE_EXTRA_GOPATH}"
fi
# Make sure our own Go binaries are in PATH.
export PATH="${KUBE_GOPATH}/bin:${PATH}"
# Change directories so that we are within the GOPATH. Some tools get really
# upset if this is not true. We use a whole fake GOPATH here to collect the
# resultant binaries. Go will not let us use GOBIN with `go install` and
# cross-compiling, and `go install -o <file>` only works for a single pkg.
local subdir
subdir=$(kube::realpath . | sed "s|$KUBE_ROOT||")
cd "${KUBE_GOPATH}/src/${KUBE_GO_PACKAGE}/${subdir}"
# Set GOROOT so binaries that parse code can work properly.
export GOROOT=$(go env GOROOT)
# Unset GOBIN in case it already exists in the current session.
unset GOBIN
# This seems to matter to some tools (godep, ginkgo...)
export GO15VENDOREXPERIMENT=1
}
# This will take binaries from $GOPATH/bin and copy them to the appropriate
# place in ${KUBE_OUTPUT_BINDIR}
#
# Ideally this wouldn't be necessary and we could just set GOBIN to
# KUBE_OUTPUT_BINDIR but that won't work in the face of cross compilation. 'go
# install' will place binaries that match the host platform directly in $GOBIN
# while placing cross compiled binaries into `platform_arch` subdirs. This
# complicates pretty much everything else we do around packaging and such.
kube::golang::place_bins() {
local host_platform
host_platform=$(kube::golang::host_platform)
V=2 kube::log::status "Placing binaries"
local platform
for platform in "${KUBE_CLIENT_PLATFORMS[@]}"; do
# The substitution on platform_src below will replace all slashes with
# underscores. It'll transform darwin/amd64 -> darwin_amd64.
local platform_src="/${platform//\//_}"
if [[ "$platform" == "$host_platform" ]]; then
platform_src=""
rm -f "${THIS_PLATFORM_BIN}"
ln -s "${KUBE_OUTPUT_BINPATH}/${platform}" "${THIS_PLATFORM_BIN}"
fi
local full_binpath_src="${KUBE_GOPATH}/bin${platform_src}"
if [[ -d "${full_binpath_src}" ]]; then
mkdir -p "${KUBE_OUTPUT_BINPATH}/${platform}"
find "${full_binpath_src}" -maxdepth 1 -type f -exec \
rsync -pc {} "${KUBE_OUTPUT_BINPATH}/${platform}" \;
fi
done
}
# Try and replicate the native binary placement of go install without
# calling go install.
kube::golang::outfile_for_binary() {
local binary=$1
local platform=$2
local output_path="${KUBE_GOPATH}/bin"
if [[ "$platform" != "$host_platform" ]]; then
output_path="${output_path}/${platform//\//_}"
fi
local bin=$(basename "${binary}")
if [[ ${GOOS} == "windows" ]]; then
bin="${bin}.exe"
fi
echo "${output_path}/${bin}"
}
kube::golang::build_binaries_for_platform() {
local platform=$1
local -a statics=()
local -a nonstatics=()
local -a tests=()
V=2 kube::log::info "Env for ${platform}: GOOS=${GOOS-} GOARCH=${GOARCH-} GOROOT=${GOROOT-} CGO_ENABLED=${CGO_ENABLED-} CC=${CC-}"
for binary in "${binaries[@]}"; do
if [[ "${binary}" =~ ".test"$ ]]; then
tests+=($binary)
elif kube::golang::is_statically_linked_library "${binary}"; then
statics+=($binary)
else
nonstatics+=($binary)
fi
done
if [[ "${#statics[@]}" != 0 ]]; then
CGO_ENABLED=0 go install -installsuffix static "${goflags[@]:+${goflags[@]}}" \
-gcflags "${gogcflags}" \
-ldflags "${goldflags}" \
"${statics[@]:+${statics[@]}}"
fi
if [[ "${#nonstatics[@]}" != 0 ]]; then
go install "${goflags[@]:+${goflags[@]}}" \
-gcflags "${gogcflags}" \
-ldflags "${goldflags}" \
"${nonstatics[@]:+${nonstatics[@]}}"
fi
for test in "${tests[@]:+${tests[@]}}"; do
local outfile=$(kube::golang::outfile_for_binary "${test}" "${platform}")
local testpkg="$(dirname ${test})"
mkdir -p "$(dirname ${outfile})"
go test -c \
"${goflags[@]:+${goflags[@]}}" \
-gcflags "${gogcflags}" \
-ldflags "${goldflags}" \
-o "${outfile}" \
"${testpkg}"
done
}
# Return approximate physical memory available in gigabytes.
kube::golang::get_physmem() {
local mem
# Linux kernel version >=3.14, in kb
if mem=$(grep MemAvailable /proc/meminfo | awk '{ print $2 }'); then
echo $(( ${mem} / 1048576 ))
return
fi
# Linux, in kb
if mem=$(grep MemTotal /proc/meminfo | awk '{ print $2 }'); then
echo $(( ${mem} / 1048576 ))
return
fi
# OS X, in bytes. Note that get_physmem, as used, should only ever
# run in a Linux container (because it's only used in the multiple
# platform case, which is a Dockerized build), but this is provided
# for completeness.
if mem=$(sysctl -n hw.memsize 2>/dev/null); then
echo $(( ${mem} / 1073741824 ))
return
fi
# If we can't infer it, just give up and assume a low memory system
echo 1
}
# Build binaries targets specified
#
# Input:
# $@ - targets and go flags. If no targets are set then all binaries targets
# are built.
# KUBE_BUILD_PLATFORMS - Incoming variable of targets to build for. If unset
# then just the host architecture is built.
kube::golang::build_binaries() {
# Create a sub-shell so that we don't pollute the outer environment
(
# Check for `go` binary and set ${GOPATH}.
kube::golang::setup_env
V=2 kube::log::info "Go version: $(go version)"
local host_platform
host_platform=$(kube::golang::host_platform)
# Use eval to preserve embedded quoted strings.
local goflags goldflags gogcflags
eval "goflags=(${GOFLAGS:-})"
goldflags="${GOLDFLAGS:-} $(kube::version::ldflags)"
gogcflags="${GOGCFLAGS:-}"
local -a targets=()
local arg
for arg; do
if [[ "${arg}" == -* ]]; then
# Assume arguments starting with a dash are flags to pass to go.
goflags+=("${arg}")
else
targets+=("${arg}")
fi
done
if [[ ${#targets[@]} -eq 0 ]]; then
targets=("${KUBE_ALL_TARGETS[@]}")
fi
local -a platforms
IFS=" " read -ra platforms <<< "${KUBE_BUILD_PLATFORMS:-}"
if [[ ${#platforms[@]} -eq 0 ]]; then
platforms=("${host_platform}")
fi
local binaries
binaries=($(kube::golang::binaries_from_targets "${targets[@]}"))
local parallel=false
if [[ ${#platforms[@]} -gt 1 ]]; then
local gigs
gigs=$(kube::golang::get_physmem)
if [[ ${gigs} -ge ${KUBE_PARALLEL_BUILD_MEMORY} ]]; then
kube::log::status "Multiple platforms requested and available ${gigs}G >= threshold ${KUBE_PARALLEL_BUILD_MEMORY}G, building platforms in parallel"
parallel=true
else
kube::log::status "Multiple platforms requested, but available ${gigs}G < threshold ${KUBE_PARALLEL_BUILD_MEMORY}G, building platforms in serial"
parallel=false
fi
fi
if [[ "${parallel}" == "true" ]]; then
kube::log::status "Building go targets for {${platforms[*]}} in parallel (output will appear in a burst when complete):" "${targets[@]}"
local platform
for platform in "${platforms[@]}"; do (
kube::golang::set_platform_envs "${platform}"
kube::log::status "${platform}: build started"
kube::golang::build_binaries_for_platform ${platform}
kube::log::status "${platform}: build finished"
) &> "/tmp//${platform//\//_}.build" &
done
local fails=0
for job in $(jobs -p); do
wait ${job} || let "fails+=1"
done
for platform in "${platforms[@]}"; do
cat "/tmp//${platform//\//_}.build"
done
exit ${fails}
else
for platform in "${platforms[@]}"; do
kube::log::status "Building go targets for ${platform}:" "${targets[@]}"
(
kube::golang::set_platform_envs "${platform}"
kube::golang::build_binaries_for_platform ${platform}
)
done
fi
)
}

181
vendor/k8s.io/kubernetes/hack/lib/init.sh generated vendored Executable file
View File

@@ -0,0 +1,181 @@
#!/usr/bin/env bash
# Copyright 2014 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -o errexit
set -o nounset
set -o pipefail
# Unset CDPATH so that path interpolation can work correctly
# https://github.com/kubernetes/kubernetes/issues/52255
unset CDPATH
# The root of the build/dist directory
KUBE_ROOT="$(cd "$(dirname "${BASH_SOURCE}")/../.." && pwd -P)"
KUBE_OUTPUT_SUBPATH="${KUBE_OUTPUT_SUBPATH:-_output/local}"
KUBE_OUTPUT="${KUBE_ROOT}/${KUBE_OUTPUT_SUBPATH}"
KUBE_OUTPUT_BINPATH="${KUBE_OUTPUT}/bin"
# This controls rsync compression. Set to a value > 0 to enable rsync
# compression for build container
KUBE_RSYNC_COMPRESS="${KUBE_RSYNC_COMPRESS:-0}"
# Set no_proxy for localhost if behind a proxy, otherwise,
# the connections to localhost in scripts will time out
export no_proxy=127.0.0.1,localhost
# This is a symlink to binaries for "this platform", e.g. build tools.
THIS_PLATFORM_BIN="${KUBE_ROOT}/_output/bin"
source "${KUBE_ROOT}/hack/lib/util.sh"
source "${KUBE_ROOT}/hack/lib/logging.sh"
kube::log::install_errexit
source "${KUBE_ROOT}/hack/lib/version.sh"
source "${KUBE_ROOT}/hack/lib/golang.sh"
source "${KUBE_ROOT}/hack/lib/etcd.sh"
KUBE_OUTPUT_HOSTBIN="${KUBE_OUTPUT_BINPATH}/$(kube::util::host_platform)"
# list of all available group versions. This should be used when generated code
# or when starting an API server that you want to have everything.
# most preferred version for a group should appear first
KUBE_AVAILABLE_GROUP_VERSIONS="${KUBE_AVAILABLE_GROUP_VERSIONS:-\
v1 \
admissionregistration.k8s.io/v1alpha1 \
admissionregistration.k8s.io/v1beta1 \
admission.k8s.io/v1beta1 \
apps/v1beta1 \
apps/v1beta2 \
apps/v1 \
authentication.k8s.io/v1 \
authentication.k8s.io/v1beta1 \
authorization.k8s.io/v1 \
authorization.k8s.io/v1beta1 \
autoscaling/v1 \
autoscaling/v2beta1 \
batch/v1 \
batch/v1beta1 \
batch/v2alpha1 \
certificates.k8s.io/v1beta1 \
extensions/v1beta1 \
events.k8s.io/v1beta1 \
imagepolicy.k8s.io/v1alpha1 \
networking.k8s.io/v1 \
policy/v1beta1 \
rbac.authorization.k8s.io/v1 \
rbac.authorization.k8s.io/v1beta1 \
rbac.authorization.k8s.io/v1alpha1 \
scheduling.k8s.io/v1alpha1 \
scheduling.k8s.io/v1beta1 \
settings.k8s.io/v1alpha1 \
storage.k8s.io/v1beta1 \
storage.k8s.io/v1 \
storage.k8s.io/v1alpha1 \
}"
# not all group versions are exposed by the server. This list contains those
# which are not available so we don't generate clients or swagger for them
KUBE_NONSERVER_GROUP_VERSIONS="
abac.authorization.kubernetes.io/v0 \
abac.authorization.kubernetes.io/v1beta1 \
componentconfig/v1alpha1 \
imagepolicy.k8s.io/v1alpha1\
admission.k8s.io/v1beta1\
"
# This emulates "readlink -f" which is not available on MacOS X.
# Test:
# T=/tmp/$$.$RANDOM
# mkdir $T
# touch $T/file
# mkdir $T/dir
# ln -s $T/file $T/linkfile
# ln -s $T/dir $T/linkdir
# function testone() {
# X=$(readlink -f $1 2>&1)
# Y=$(kube::readlinkdashf $1 2>&1)
# if [ "$X" != "$Y" ]; then
# echo readlinkdashf $1: expected "$X", got "$Y"
# fi
# }
# testone /
# testone /tmp
# testone $T
# testone $T/file
# testone $T/dir
# testone $T/linkfile
# testone $T/linkdir
# testone $T/nonexistant
# testone $T/linkdir/file
# testone $T/linkdir/dir
# testone $T/linkdir/linkfile
# testone $T/linkdir/linkdir
function kube::readlinkdashf {
# run in a subshell for simpler 'cd'
(
if [[ -d "$1" ]]; then # This also catch symlinks to dirs.
cd "$1"
pwd -P
else
cd "$(dirname "$1")"
local f
f=$(basename "$1")
if [[ -L "$f" ]]; then
readlink "$f"
else
echo "$(pwd -P)/${f}"
fi
fi
)
}
# This emulates "realpath" which is not available on MacOS X
# Test:
# T=/tmp/$$.$RANDOM
# mkdir $T
# touch $T/file
# mkdir $T/dir
# ln -s $T/file $T/linkfile
# ln -s $T/dir $T/linkdir
# function testone() {
# X=$(realpath $1 2>&1)
# Y=$(kube::realpath $1 2>&1)
# if [ "$X" != "$Y" ]; then
# echo realpath $1: expected "$X", got "$Y"
# fi
# }
# testone /
# testone /tmp
# testone $T
# testone $T/file
# testone $T/dir
# testone $T/linkfile
# testone $T/linkdir
# testone $T/nonexistant
# testone $T/linkdir/file
# testone $T/linkdir/dir
# testone $T/linkdir/linkfile
# testone $T/linkdir/linkdir
kube::realpath() {
if [[ ! -e "$1" ]]; then
echo "$1: No such file or directory" >&2
return 1
fi
kube::readlinkdashf "$1"
}

171
vendor/k8s.io/kubernetes/hack/lib/logging.sh generated vendored Normal file
View File

@@ -0,0 +1,171 @@
#!/usr/bin/env bash
# Copyright 2014 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Controls verbosity of the script output and logging.
KUBE_VERBOSE="${KUBE_VERBOSE:-5}"
# Handler for when we exit automatically on an error.
# Borrowed from https://gist.github.com/ahendrix/7030300
kube::log::errexit() {
local err="${PIPESTATUS[@]}"
# If the shell we are in doesn't have errexit set (common in subshells) then
# don't dump stacks.
set +o | grep -qe "-o errexit" || return
set +o xtrace
local code="${1:-1}"
# Print out the stack trace described by $function_stack
if [ ${#FUNCNAME[@]} -gt 2 ]
then
kube::log::error "Call tree:"
for ((i=1;i<${#FUNCNAME[@]}-1;i++))
do
kube::log::error " $i: ${BASH_SOURCE[$i+1]}:${BASH_LINENO[$i]} ${FUNCNAME[$i]}(...)"
done
fi
kube::log::error_exit "Error in ${BASH_SOURCE[1]}:${BASH_LINENO[0]}. '${BASH_COMMAND}' exited with status $err" "${1:-1}" 1
}
kube::log::install_errexit() {
# trap ERR to provide an error handler whenever a command exits nonzero this
# is a more verbose version of set -o errexit
trap 'kube::log::errexit' ERR
# setting errtrace allows our ERR trap handler to be propagated to functions,
# expansions and subshells
set -o errtrace
}
# Print out the stack trace
#
# Args:
# $1 The number of stack frames to skip when printing.
kube::log::stack() {
local stack_skip=${1:-0}
stack_skip=$((stack_skip + 1))
if [[ ${#FUNCNAME[@]} -gt $stack_skip ]]; then
echo "Call stack:" >&2
local i
for ((i=1 ; i <= ${#FUNCNAME[@]} - $stack_skip ; i++))
do
local frame_no=$((i - 1 + stack_skip))
local source_file=${BASH_SOURCE[$frame_no]}
local source_lineno=${BASH_LINENO[$((frame_no - 1))]}
local funcname=${FUNCNAME[$frame_no]}
echo " $i: ${source_file}:${source_lineno} ${funcname}(...)" >&2
done
fi
}
# Log an error and exit.
# Args:
# $1 Message to log with the error
# $2 The error code to return
# $3 The number of stack frames to skip when printing.
kube::log::error_exit() {
local message="${1:-}"
local code="${2:-1}"
local stack_skip="${3:-0}"
stack_skip=$((stack_skip + 1))
if [[ ${KUBE_VERBOSE} -ge 4 ]]; then
local source_file=${BASH_SOURCE[$stack_skip]}
local source_line=${BASH_LINENO[$((stack_skip - 1))]}
echo "!!! Error in ${source_file}:${source_line}" >&2
[[ -z ${1-} ]] || {
echo " ${1}" >&2
}
kube::log::stack $stack_skip
echo "Exiting with status ${code}" >&2
fi
exit "${code}"
}
# Log an error but keep going. Don't dump the stack or exit.
kube::log::error() {
timestamp=$(date +"[%m%d %H:%M:%S]")
echo "!!! $timestamp ${1-}" >&2
shift
for message; do
echo " $message" >&2
done
}
# Print an usage message to stderr. The arguments are printed directly.
kube::log::usage() {
echo >&2
local message
for message; do
echo "$message" >&2
done
echo >&2
}
kube::log::usage_from_stdin() {
local messages=()
while read -r line; do
messages+=("$line")
done
kube::log::usage "${messages[@]}"
}
# Print out some info that isn't a top level status line
kube::log::info() {
local V="${V:-0}"
if [[ $KUBE_VERBOSE < $V ]]; then
return
fi
for message; do
echo "$message"
done
}
# Just like kube::log::info, but no \n, so you can make a progress bar
kube::log::progress() {
for message; do
echo -e -n "$message"
done
}
kube::log::info_from_stdin() {
local messages=()
while read -r line; do
messages+=("$line")
done
kube::log::info "${messages[@]}"
}
# Print a status line. Formatted to show up in a stream of output.
kube::log::status() {
local V="${V:-0}"
if [[ $KUBE_VERBOSE < $V ]]; then
return
fi
timestamp=$(date +"[%m%d %H:%M:%S]")
echo "+++ $timestamp $1"
shift
for message; do
echo " $message"
done
}

90
vendor/k8s.io/kubernetes/hack/lib/protoc.sh generated vendored Normal file
View File

@@ -0,0 +1,90 @@
#!/usr/bin/env bash
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -o errexit
set -o nounset
set -o pipefail
# The root of the build/dist directory
KUBE_ROOT="$(cd "$(dirname "${BASH_SOURCE}")/../.." && pwd -P)"
source "${KUBE_ROOT}/hack/lib/init.sh"
# Generates $1/api.pb.go from the protobuf file $1/api.proto
# and formats it correctly
# $1: Full path to the directory where the api.proto file is
function kube::protoc::generate_proto() {
kube::golang::setup_env
local bins=(
vendor/k8s.io/code-generator/cmd/go-to-protobuf/protoc-gen-gogo
)
make -C "${KUBE_ROOT}" WHAT="${bins[*]}"
kube::protoc::check_protoc
local package=${1}
kube::protoc::protoc ${package}
kube::protoc::format ${package}
}
# Checks that the current protoc version is at least version 3.0.0-beta1
# exit 1 if it's not the case
function kube::protoc::check_protoc() {
if [[ -z "$(which protoc)" || "$(protoc --version)" != "libprotoc 3."* ]]; then
echo "Generating protobuf requires protoc 3.0.0-beta1 or newer. Please download and"
echo "install the platform appropriate Protobuf package for your OS: "
echo
echo " https://github.com/google/protobuf/releases"
echo
echo "WARNING: Protobuf changes are not being validated"
exit 1
fi
}
# Generates $1/api.pb.go from the protobuf file $1/api.proto
# $1: Full path to the directory where the api.proto file is
function kube::protoc::protoc() {
local package=${1}
gogopath=$(dirname "$(kube::util::find-binary "protoc-gen-gogo")")
PATH="${gogopath}:${PATH}" protoc \
--proto_path="${package}" \
--proto_path="${KUBE_ROOT}/vendor" \
--gogo_out=plugins=grpc:${package} ${package}/api.proto
}
# Formats $1/api.pb.go, adds the boilerplate comments and run gofmt on it
# $1: Full path to the directory where the api.proto file is
function kube::protoc::format() {
local package=${1}
# Update boilerplate for the generated file.
echo "$(cat hack/boilerplate/boilerplate.generatego.txt ${package}/api.pb.go)" > ${package}/api.pb.go
# Run gofmt to clean up the generated code.
kube::golang::verify_go_version
gofmt -l -s -w ${package}/api.pb.go
}
# Compares the contents of $1 and $2
# Echo's $3 in case of error and exits 1
function kube::protoc::diff() {
local ret=0
diff -I "gzipped FileDescriptorProto" -I "0x" -Naupr ${1} ${2} || ret=$?
if [[ $ret -ne 0 ]]; then
echo ${3}
exit 1
fi
}

155
vendor/k8s.io/kubernetes/hack/lib/swagger.sh generated vendored Normal file
View File

@@ -0,0 +1,155 @@
#!/usr/bin/env bash
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Contains swagger related util functions.
#
set -o errexit
set -o nounset
set -o pipefail
# The root of the build/dist directory
KUBE_ROOT="$(cd "$(dirname "${BASH_SOURCE}")/../.." && pwd -P)"
# Generates types_swagger_doc_generated file for the given group version.
# $1: Name of the group version
# $2: Path to the directory where types.go for that group version exists. This
# is the directory where the file will be generated.
kube::swagger::gen_types_swagger_doc() {
local group_version=$1
local gv_dir=$2
local TMPFILE="${TMPDIR:-/tmp}/types_swagger_doc_generated.$(date +%s).go"
echo "Generating swagger type docs for ${group_version} at ${gv_dir}"
echo -e "$(cat hack/boilerplate/boilerplate.generatego.txt)\n" > "$TMPFILE"
echo "package ${group_version##*/}" >> "$TMPFILE"
cat >> "$TMPFILE" <<EOF
// This file contains a collection of methods that can be used from go-restful to
// generate Swagger API documentation for its models. Please read this PR for more
// information on the implementation: https://github.com/emicklei/go-restful/pull/215
//
// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if
// they are on one line! For multiple line or blocks that you want to ignore use ---.
// Any context after a --- is ignored.
//
// Those methods can be generated by using hack/update-generated-swagger-docs.sh
// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT.
EOF
go run cmd/genswaggertypedocs/swagger_type_docs.go -s \
"${gv_dir}/types.go" \
-f - \
>> "$TMPFILE"
echo "// AUTO-GENERATED FUNCTIONS END HERE" >> "$TMPFILE"
gofmt -w -s "$TMPFILE"
mv "$TMPFILE" ""${gv_dir}"/types_swagger_doc_generated.go"
}
# Generates API reference docs for the given API group versions.
# Required env vars:
# GROUP_VERSIONS: Array of group versions to be included in the reference
# docs.
# GV_DIRS: Array of root directories for those group versions.
# Input vars:
# $1: Root directory path for swagger spec
# $2: Root directory path where the reference docs should be generated.
kube::swagger::gen_api_ref_docs() {
: "${GROUP_VERSIONS?Must set GROUP_VERSIONS env var}"
: "${GV_DIRS?Must set GV_DIRS env var}"
echo "Generating API reference docs for group versions: ${GROUP_VERSIONS[@]}, at dirs: ${GV_DIRS[@]}"
GROUP_VERSIONS=(${GROUP_VERSIONS[@]})
GV_DIRS=(${GV_DIRS[@]})
local swagger_spec_path=${1}
local output_dir=${2}
echo "Reading swagger spec from: ${swagger_spec_path}"
echo "Generating the docs at: ${output_dir}"
# Use REPO_DIR if provided so we can set it to the host-resolvable path
# to the repo root if we are running this script from a container with
# docker mounted in as a volume.
# We pass the host output dir as the source dir to `docker run -v`, but use
# the regular one to compute diff (they will be the same if running this
# test on the host, potentially different if running in a container).
local repo_dir=${REPO_DIR:-"${KUBE_ROOT}"}
local tmp_subpath="_output/generated_html"
local output_tmp_in_host="${repo_dir}/${tmp_subpath}"
local output_tmp="${KUBE_ROOT}/${tmp_subpath}"
echo "Generating api reference docs at ${output_tmp}"
for ver in "${GROUP_VERSIONS[@]}"; do
mkdir -p "${output_tmp}/${ver}"
done
user_flags="-u $(id -u)"
if [[ $(uname) == "Darwin" ]]; then
# mapping in a uid from OS X doesn't make any sense
user_flags=""
fi
for i in "${!GROUP_VERSIONS[@]}"; do
local ver=${GROUP_VERSIONS[i]}
local dir=${GV_DIRS[i]}
local tmp_in_host="${output_tmp_in_host}/${ver}"
local register_file="${dir}/register.go"
local swagger_json_name="$(kube::util::gv-to-swagger-name "${ver}")"
docker run ${user_flags} \
--rm -v "${tmp_in_host}":/output:z \
-v "${swagger_spec_path}":/swagger-source:z \
-v "${register_file}":/register.go:z \
--net=host -e "https_proxy=${KUBERNETES_HTTPS_PROXY:-}" \
k8s.gcr.io/gen-swagger-docs:v8 \
"${swagger_json_name}"
done
# Check if we actually changed anything
pushd "${output_tmp}" > /dev/null
touch .generated_html
find . -type f | cut -sd / -f 2- | LC_ALL=C sort > .generated_html
popd > /dev/null
kube::util::ensure-gnu-sed
while read file; do
if [[ -e "${output_dir}/${file}" && -e "${output_tmp}/${file}" ]]; then
echo "comparing ${output_dir}/${file} with ${output_tmp}/${file}"
# Remove the timestamp to reduce conflicts in PR(s)
${SED} -i 's/^Last updated.*$//' "${output_tmp}/${file}"
# By now, the contents should be normalized and stripped of any
# auto-managed content.
if diff -NauprB "${output_dir}/${file}" "${output_tmp}/${file}" >/dev/null; then
# actual contents same, overwrite generated with original.
cp "${output_dir}/${file}" "${output_tmp}/${file}"
fi
fi
done <"${output_tmp}/.generated_html"
echo "Moving api reference docs from ${output_tmp} to ${output_dir}"
# Create output_dir if doesn't exist. Prevents error on copy.
mkdir -p "${output_dir}"
cp -af "${output_tmp}"/* "${output_dir}"
rm -r "${output_tmp}"
}

425
vendor/k8s.io/kubernetes/hack/lib/test.sh generated vendored Normal file
View File

@@ -0,0 +1,425 @@
#!/usr/bin/env bash
# Copyright 2015 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# A set of helpers for tests
readonly reset=$(tput sgr0)
readonly bold=$(tput bold)
readonly black=$(tput setaf 0)
readonly red=$(tput setaf 1)
readonly green=$(tput setaf 2)
kube::test::clear_all() {
if kube::test::if_supports_resource "rc" ; then
kubectl delete "${kube_flags[@]}" rc --all --grace-period=0 --force
fi
if kube::test::if_supports_resource "pods" ; then
kubectl delete "${kube_flags[@]}" pods --all --grace-period=0 --force
fi
}
# Prints the calling file and line number $1 levels deep
# Defaults to 2 levels so you can call this to find your own caller
kube::test::get_caller() {
local levels=${1:-2}
local caller_file="${BASH_SOURCE[$levels]}"
local caller_line="${BASH_LINENO[$levels-1]}"
echo "$(basename "${caller_file}"):${caller_line}"
}
# Force exact match of a returned result for a object query. Wrap this with || to support multiple
# valid return types.
# This runs `kubectl get` once and asserts that the result is as expected.
## $1: Object on which get should be run
# $2: The go-template to run on the result
# $3: The expected output
# $4: Additional args to be passed to kubectl
kube::test::get_object_assert() {
kube::test::object_assert 1 "$@"
}
# Asserts that the output of a given get query is as expected.
# Runs the query multiple times before failing it.
# $1: Object on which get should be run
# $2: The go-template to run on the result
# $3: The expected output
# $4: Additional args to be passed to kubectl
kube::test::wait_object_assert() {
kube::test::object_assert 10 "$@"
}
# Asserts that the output of a given get query is as expected.
# Can run the query multiple times before failing it.
# $1: Number of times the query should be run before failing it.
# $2: Object on which get should be run
# $3: The go-template to run on the result
# $4: The expected output
# $5: Additional args to be passed to kubectl
kube::test::object_assert() {
local tries=$1
local object=$2
local request=$3
local expected=$4
local args=${5:-}
for j in $(seq 1 ${tries}); do
res=$(eval kubectl get "${kube_flags[@]}" ${args} $object -o go-template=\"$request\")
if [[ "$res" =~ ^$expected$ ]]; then
echo -n ${green}
echo "$(kube::test::get_caller 3): Successful get $object $request: $res"
echo -n ${reset}
return 0
fi
echo "Waiting for Get $object $request $args: expected: $expected, got: $res"
sleep $((${j}-1))
done
echo ${bold}${red}
echo "$(kube::test::get_caller 3): FAIL!"
echo "Get $object $request"
echo " Expected: $expected"
echo " Got: $res"
echo ${reset}${red}
caller
echo ${reset}
return 1
}
kube::test::get_object_jsonpath_assert() {
local object=$1
local request=$2
local expected=$3
res=$(eval kubectl get "${kube_flags[@]}" $object -o jsonpath=\"$request\")
if [[ "$res" =~ ^$expected$ ]]; then
echo -n ${green}
echo "$(kube::test::get_caller): Successful get $object $request: $res"
echo -n ${reset}
return 0
else
echo ${bold}${red}
echo "$(kube::test::get_caller): FAIL!"
echo "Get $object $request"
echo " Expected: $expected"
echo " Got: $res"
echo ${reset}${red}
caller
echo ${reset}
return 1
fi
}
kube::test::describe_object_assert() {
local resource=$1
local object=$2
local matches=${@:3}
result=$(eval kubectl describe "${kube_flags[@]}" $resource $object)
for match in ${matches}; do
if [[ ! $(echo "$result" | grep ${match}) ]]; then
echo ${bold}${red}
echo "$(kube::test::get_caller): FAIL!"
echo "Describe $resource $object"
echo " Expected Match: $match"
echo " Not found in:"
echo "$result"
echo ${reset}${red}
caller
echo ${reset}
return 1
fi
done
echo -n ${green}
echo "$(kube::test::get_caller): Successful describe $resource $object:"
echo "$result"
echo -n ${reset}
return 0
}
kube::test::describe_object_events_assert() {
local resource=$1
local object=$2
local showevents=${3:-"true"}
if [[ -z "${3:-}" ]]; then
result=$(eval kubectl describe "${kube_flags[@]}" $resource $object)
else
result=$(eval kubectl describe "${kube_flags[@]}" "--show-events=$showevents" $resource $object)
fi
if [[ -n $(echo "$result" | grep "No events.\|Events:") ]]; then
local has_events="true"
else
local has_events="false"
fi
if [[ $showevents == $has_events ]]; then
echo -n ${green}
echo "$(kube::test::get_caller): Successful describe"
echo "$result"
echo ${reset}
return 0
else
echo ${bold}${red}
echo "$(kube::test::get_caller): FAIL"
if [[ $showevents == "false" ]]; then
echo " Events information should not be described in:"
else
echo " Events information not found in:"
fi
echo $result
echo ${reset}${red}
caller
echo ${reset}
return 1
fi
}
kube::test::describe_resource_assert() {
local resource=$1
local matches=${@:2}
result=$(eval kubectl describe "${kube_flags[@]}" $resource)
for match in ${matches}; do
if [[ ! $(echo "$result" | grep ${match}) ]]; then
echo ${bold}${red}
echo "FAIL!"
echo "Describe $resource"
echo " Expected Match: $match"
echo " Not found in:"
echo "$result"
echo ${reset}${red}
caller
echo ${reset}
return 1
fi
done
echo -n ${green}
echo "Successful describe $resource:"
echo "$result"
echo -n ${reset}
return 0
}
kube::test::describe_resource_events_assert() {
local resource=$1
local showevents=${2:-"true"}
result=$(eval kubectl describe "${kube_flags[@]}" "--show-events=$showevents" $resource)
if [[ $(echo "$result" | grep "No events.\|Events:") ]]; then
local has_events="true"
else
local has_events="false"
fi
if [[ $showevents == $has_events ]]; then
echo -n ${green}
echo "Successful describe"
echo "$result"
echo -n ${reset}
return 0
else
echo ${bold}${red}
echo "FAIL"
if [[ $showevents == "false" ]]; then
echo " Events information should not be described in:"
else
echo " Events information not found in:"
fi
echo $result
caller
echo ${reset}
return 1
fi
}
# Compare sort-by resource name output with expected order specify in the last parameter
kube::test::if_sort_by_has_correct_order() {
local array=($(echo "$1" |awk '{if(NR!=1) print $1}'))
local var
for i in "${array[@]}"; do
var+="$i:"
done
kube::test::if_has_string "$var" "${@:$#}"
}
kube::test::if_has_string() {
local message=$1
local match=$2
if grep -q "${match}" <<< "${message}"; then
echo "Successful"
echo "message:$message"
echo "has:$match"
return 0
else
echo "FAIL!"
echo "message:$message"
echo "has not:$match"
caller
return 1
fi
}
kube::test::if_has_not_string() {
local message=$1
local match=$2
if grep -q "${match}" <<< "${message}"; then
echo "FAIL!"
echo "message:$message"
echo "has:$match"
caller
return 1
else
echo "Successful"
echo "message:$message"
echo "has not:$match"
return 0
fi
}
kube::test::if_empty_string() {
local match=$1
if [ -n "$match" ]; then
echo "$match is not empty"
caller
return 1
else
echo "Successful"
return 0
fi
}
# Returns true if the required resource is part of supported resources.
# Expects env vars:
# SUPPORTED_RESOURCES: Array of all resources supported by the apiserver. "*"
# means it supports all resources. For ex: ("*") or ("rc" "*") both mean that
# all resources are supported.
# $1: Name of the resource to be tested.
kube::test::if_supports_resource() {
SUPPORTED_RESOURCES=${SUPPORTED_RESOURCES:-""}
REQUIRED_RESOURCE=${1:-""}
for r in "${SUPPORTED_RESOURCES[@]}"; do
if [[ "${r}" == "*" || "${r}" == "${REQUIRED_RESOURCE}" ]]; then
return 0
fi
done
return 1
}
kube::test::version::object_to_file() {
name=$1
flags=${2:-""}
file=$3
kubectl version $flags | grep "$name Version:" | sed -e s/"$name Version: version.Info{"/'/' -e s/'}'/'/' -e s/', '/','/g -e s/':'/'=/g' -e s/'"'/""/g | tr , '\n' > "${file}"
}
kube::test::version::json_object_to_file() {
flags=$1
file=$2
kubectl version $flags --output json | sed -e s/' '/''/g -e s/'\"'/''/g -e s/'}'/''/g -e s/'{'/''/g -e s/'clientVersion:'/'clientVersion:,'/ -e s/'serverVersion:'/'serverVersion:,'/ | tr , '\n' > "${file}"
}
kube::test::version::json_client_server_object_to_file() {
flags=$1
name=$2
file=$3
kubectl version $flags --output json | jq -r ".${name}" | sed -e s/'\"'/''/g -e s/'}'/''/g -e s/'{'/''/g -e /^$/d -e s/','/''/g -e s/':'/'='/g > "${file}"
}
kube::test::version::yaml_object_to_file() {
flags=$1
file=$2
kubectl version $flags --output yaml | sed -e s/' '/''/g -e s/'\"'/''/g -e /^$/d > "${file}"
}
kube::test::version::diff_assert() {
local original=$1
local comparator=${2:-"eq"}
local latest=$3
local diff_msg=${4:-""}
local res=""
if [ ! -f $original ]; then
echo ${bold}${red}
echo "FAIL! ${diff_msg}"
echo "the file '${original}' does not exit"
echo ${reset}${red}
caller
echo ${reset}
return 1
fi
if [ ! -f $latest ]; then
echo ${bold}${red}
echo "FAIL! ${diff_msg}"
echo "the file '${latest}' does not exit"
echo ${reset}${red}
caller
echo ${reset}
return 1
fi
sort ${original} > "${original}.sorted"
sort ${latest} > "${latest}.sorted"
if [ "$comparator" == "eq" ]; then
if [ "$(diff -iwB ${original}.sorted ${latest}.sorted)" == "" ] ; then
echo -n ${green}
echo "Successful: ${diff_msg}"
echo -n ${reset}
return 0
else
echo ${bold}${red}
echo "FAIL! ${diff_msg}"
echo " Expected: "
echo "$(cat ${original})"
echo " Got: "
echo "$(cat ${latest})"
echo ${reset}${red}
caller
echo ${reset}
return 1
fi
else
if [ ! -z "$(diff -iwB ${original}.sorted ${latest}.sorted)" ] ; then
echo -n ${green}
echo "Successful: ${diff_msg}"
echo -n ${reset}
return 0
else
echo ${bold}${red}
echo "FAIL! ${diff_msg}"
echo " Expected: "
echo "$(cat ${original})"
echo " Got: "
echo "$(cat ${latest})"
echo ${reset}${red}
caller
echo ${reset}
return 1
fi
fi
}

793
vendor/k8s.io/kubernetes/hack/lib/util.sh generated vendored Executable file
View File

@@ -0,0 +1,793 @@
#!/usr/bin/env bash
# Copyright 2014 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
kube::util::sortable_date() {
date "+%Y%m%d-%H%M%S"
}
kube::util::wait_for_url() {
local url=$1
local prefix=${2:-}
local wait=${3:-1}
local times=${4:-30}
local maxtime=${5:-1}
which curl >/dev/null || {
kube::log::usage "curl must be installed"
exit 1
}
local i
for i in $(seq 1 "$times"); do
local out
if out=$(curl --max-time "$maxtime" -gkfs "$url" 2>/dev/null); then
kube::log::status "On try ${i}, ${prefix}: ${out}"
return 0
fi
sleep "${wait}"
done
kube::log::error "Timed out waiting for ${prefix} to answer at ${url}; tried ${times} waiting ${wait} between each"
return 1
}
# Example: kube::util::trap_add 'echo "in trap DEBUG"' DEBUG
# See: http://stackoverflow.com/questions/3338030/multiple-bash-traps-for-the-same-signal
kube::util::trap_add() {
local trap_add_cmd
trap_add_cmd=$1
shift
for trap_add_name in "$@"; do
local existing_cmd
local new_cmd
# Grab the currently defined trap commands for this trap
existing_cmd=`trap -p "${trap_add_name}" | awk -F"'" '{print $2}'`
if [[ -z "${existing_cmd}" ]]; then
new_cmd="${trap_add_cmd}"
else
new_cmd="${trap_add_cmd};${existing_cmd}"
fi
# Assign the test
trap "${new_cmd}" "${trap_add_name}"
done
}
# Opposite of kube::util::ensure-temp-dir()
kube::util::cleanup-temp-dir() {
rm -rf "${KUBE_TEMP}"
}
# Create a temp dir that'll be deleted at the end of this bash session.
#
# Vars set:
# KUBE_TEMP
kube::util::ensure-temp-dir() {
if [[ -z ${KUBE_TEMP-} ]]; then
KUBE_TEMP=$(mktemp -d 2>/dev/null || mktemp -d -t kubernetes.XXXXXX)
kube::util::trap_add kube::util::cleanup-temp-dir EXIT
fi
}
# This figures out the host platform without relying on golang. We need this as
# we don't want a golang install to be a prerequisite to building yet we need
# this info to figure out where the final binaries are placed.
kube::util::host_platform() {
local host_os
local host_arch
case "$(uname -s)" in
Darwin)
host_os=darwin
;;
Linux)
host_os=linux
;;
*)
kube::log::error "Unsupported host OS. Must be Linux or Mac OS X."
exit 1
;;
esac
case "$(uname -m)" in
x86_64*)
host_arch=amd64
;;
i?86_64*)
host_arch=amd64
;;
amd64*)
host_arch=amd64
;;
aarch64*)
host_arch=arm64
;;
arm64*)
host_arch=arm64
;;
arm*)
host_arch=arm
;;
i?86*)
host_arch=x86
;;
s390x*)
host_arch=s390x
;;
ppc64le*)
host_arch=ppc64le
;;
*)
kube::log::error "Unsupported host arch. Must be x86_64, 386, arm, arm64, s390x or ppc64le."
exit 1
;;
esac
echo "${host_os}/${host_arch}"
}
kube::util::find-binary-for-platform() {
local -r lookfor="$1"
local -r platform="$2"
local locations=(
"${KUBE_ROOT}/_output/bin/${lookfor}"
"${KUBE_ROOT}/_output/dockerized/bin/${platform}/${lookfor}"
"${KUBE_ROOT}/_output/local/bin/${platform}/${lookfor}"
"${KUBE_ROOT}/platforms/${platform}/${lookfor}"
)
# Also search for binary in bazel build tree.
# The bazel go rules place binaries in subtrees like
# "bazel-bin/source/path/linux_amd64_pure_stripped/binaryname", so make sure
# the platform name is matched in the path.
locations+=($(find "${KUBE_ROOT}/bazel-bin/" -type f -executable \
-path "*/${platform/\//_}*/${lookfor}" 2>/dev/null || true) )
# List most recently-updated location.
local -r bin=$( (ls -t "${locations[@]}" 2>/dev/null || true) | head -1 )
echo -n "${bin}"
}
kube::util::find-binary() {
kube::util::find-binary-for-platform "$1" "$(kube::util::host_platform)"
}
# Run all known doc generators (today gendocs and genman for kubectl)
# $1 is the directory to put those generated documents
kube::util::gen-docs() {
local dest="$1"
# Find binary
gendocs=$(kube::util::find-binary "gendocs")
genkubedocs=$(kube::util::find-binary "genkubedocs")
genman=$(kube::util::find-binary "genman")
genyaml=$(kube::util::find-binary "genyaml")
genfeddocs=$(kube::util::find-binary "genfeddocs")
mkdir -p "${dest}/docs/user-guide/kubectl/"
"${gendocs}" "${dest}/docs/user-guide/kubectl/"
mkdir -p "${dest}/docs/admin/"
"${genkubedocs}" "${dest}/docs/admin/" "kube-apiserver"
"${genkubedocs}" "${dest}/docs/admin/" "kube-controller-manager"
"${genkubedocs}" "${dest}/docs/admin/" "cloud-controller-manager"
"${genkubedocs}" "${dest}/docs/admin/" "kube-proxy"
"${genkubedocs}" "${dest}/docs/admin/" "kube-scheduler"
"${genkubedocs}" "${dest}/docs/admin/" "kubelet"
"${genkubedocs}" "${dest}/docs/admin/" "kubeadm"
mkdir -p "${dest}/docs/man/man1/"
"${genman}" "${dest}/docs/man/man1/" "kube-apiserver"
"${genman}" "${dest}/docs/man/man1/" "kube-controller-manager"
"${genman}" "${dest}/docs/man/man1/" "cloud-controller-manager"
"${genman}" "${dest}/docs/man/man1/" "kube-proxy"
"${genman}" "${dest}/docs/man/man1/" "kube-scheduler"
"${genman}" "${dest}/docs/man/man1/" "kubelet"
"${genman}" "${dest}/docs/man/man1/" "kubectl"
"${genman}" "${dest}/docs/man/man1/" "kubeadm"
mkdir -p "${dest}/docs/yaml/kubectl/"
"${genyaml}" "${dest}/docs/yaml/kubectl/"
# create the list of generated files
pushd "${dest}" > /dev/null
touch docs/.generated_docs
find . -type f | cut -sd / -f 2- | LC_ALL=C sort > docs/.generated_docs
popd > /dev/null
}
# Puts a placeholder for every generated doc. This makes the link checker work.
kube::util::set-placeholder-gen-docs() {
local list_file="${KUBE_ROOT}/docs/.generated_docs"
if [[ -e "${list_file}" ]]; then
# remove all of the old docs; we don't want to check them in.
while read file; do
if [[ "${list_file}" != "${KUBE_ROOT}/${file}" ]]; then
cp "${KUBE_ROOT}/hack/autogenerated_placeholder.txt" "${KUBE_ROOT}/${file}"
fi
done <"${list_file}"
# The docs/.generated_docs file lists itself, so we don't need to explicitly
# delete it.
fi
}
# Removes previously generated docs-- we don't want to check them in. $KUBE_ROOT
# must be set.
kube::util::remove-gen-docs() {
if [ -e "${KUBE_ROOT}/docs/.generated_docs" ]; then
# remove all of the old docs; we don't want to check them in.
while read file; do
rm "${KUBE_ROOT}/${file}" 2>/dev/null || true
done <"${KUBE_ROOT}/docs/.generated_docs"
# The docs/.generated_docs file lists itself, so we don't need to explicitly
# delete it.
fi
}
# Takes a group/version and returns the path to its location on disk, sans
# "pkg". E.g.:
# * default behavior: extensions/v1beta1 -> apis/extensions/v1beta1
# * default behavior for only a group: experimental -> apis/experimental
# * Special handling for empty group: v1 -> api/v1, unversioned -> api/unversioned
# * Special handling for groups suffixed with ".k8s.io": foo.k8s.io/v1 -> apis/foo/v1
# * Very special handling for when both group and version are "": / -> api
kube::util::group-version-to-pkg-path() {
staging_apis=(
$(
cd "${KUBE_ROOT}/staging/src/k8s.io/api" &&
find . -name types.go -exec dirname {} \; | sed "s|\./||g" | sort
))
local group_version="$1"
if [[ " ${staging_apis[@]} " =~ " ${group_version/.*k8s.io/} " ]]; then
echo "vendor/k8s.io/api/${group_version/.*k8s.io/}"
return
fi
# "v1" is the API GroupVersion
if [[ "${group_version}" == "v1" ]]; then
echo "vendor/k8s.io/api/core/v1"
return
fi
# Special cases first.
# TODO(lavalamp): Simplify this by moving pkg/api/v1 and splitting pkg/api,
# moving the results to pkg/apis/api.
case "${group_version}" in
# both group and version are "", this occurs when we generate deep copies for internal objects of the legacy v1 API.
__internal)
echo "pkg/apis/core"
;;
meta/v1)
echo "vendor/k8s.io/apimachinery/pkg/apis/meta/v1"
;;
meta/v1beta1)
echo "vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1"
;;
*.k8s.io)
echo "pkg/apis/${group_version%.*k8s.io}"
;;
*.k8s.io/*)
echo "pkg/apis/${group_version/.*k8s.io/}"
;;
*)
echo "pkg/apis/${group_version%__internal}"
;;
esac
}
# Takes a group/version and returns the swagger-spec file name.
# default behavior: extensions/v1beta1 -> extensions_v1beta1
# special case for v1: v1 -> v1
kube::util::gv-to-swagger-name() {
local group_version="$1"
case "${group_version}" in
v1)
echo "v1"
;;
*)
echo "${group_version%/*}_${group_version#*/}"
;;
esac
}
# Fetches swagger spec from apiserver.
# Assumed vars:
# SWAGGER_API_PATH: Base path for swaggerapi on apiserver. Ex:
# http://localhost:8080/swaggerapi.
# SWAGGER_ROOT_DIR: Root dir where we want to to save the fetched spec.
# VERSIONS: Array of group versions to include in swagger spec.
kube::util::fetch-swagger-spec() {
for ver in ${VERSIONS}; do
if [[ " ${KUBE_NONSERVER_GROUP_VERSIONS} " == *" ${ver} "* ]]; then
continue
fi
# fetch the swagger spec for each group version.
if [[ ${ver} == "v1" ]]; then
SUBPATH="api"
else
SUBPATH="apis"
fi
SUBPATH="${SUBPATH}/${ver}"
SWAGGER_JSON_NAME="$(kube::util::gv-to-swagger-name ${ver}).json"
curl -w "\n" -fs "${SWAGGER_API_PATH}${SUBPATH}" > "${SWAGGER_ROOT_DIR}/${SWAGGER_JSON_NAME}"
# fetch the swagger spec for the discovery mechanism at group level.
if [[ ${ver} == "v1" ]]; then
continue
fi
SUBPATH="apis/"${ver%/*}
SWAGGER_JSON_NAME="${ver%/*}.json"
curl -w "\n" -fs "${SWAGGER_API_PATH}${SUBPATH}" > "${SWAGGER_ROOT_DIR}/${SWAGGER_JSON_NAME}"
done
# fetch swagger specs for other discovery mechanism.
curl -w "\n" -fs "${SWAGGER_API_PATH}" > "${SWAGGER_ROOT_DIR}/resourceListing.json"
curl -w "\n" -fs "${SWAGGER_API_PATH}version" > "${SWAGGER_ROOT_DIR}/version.json"
curl -w "\n" -fs "${SWAGGER_API_PATH}api" > "${SWAGGER_ROOT_DIR}/api.json"
curl -w "\n" -fs "${SWAGGER_API_PATH}apis" > "${SWAGGER_ROOT_DIR}/apis.json"
curl -w "\n" -fs "${SWAGGER_API_PATH}logs" > "${SWAGGER_ROOT_DIR}/logs.json"
}
# Returns the name of the upstream remote repository name for the local git
# repo, e.g. "upstream" or "origin".
kube::util::git_upstream_remote_name() {
git remote -v | grep fetch |\
grep -E 'github.com[/:]kubernetes/kubernetes|k8s.io/kubernetes' |\
head -n 1 | awk '{print $1}'
}
# Ensures the current directory is a git tree for doing things like restoring or
# validating godeps
kube::util::create-fake-git-tree() {
local -r target_dir=${1:-$(pwd)}
pushd "${target_dir}" >/dev/null
git init >/dev/null
git config --local user.email "nobody@k8s.io"
git config --local user.name "$0"
git add . >/dev/null
git commit -q -m "Snapshot" >/dev/null
if (( ${KUBE_VERBOSE:-5} >= 6 )); then
kube::log::status "${target_dir} is now a git tree."
fi
popd >/dev/null
}
# Checks whether godep restore was run in the current GOPATH, i.e. that all referenced repos exist
# and are checked out to the referenced rev.
kube::util::godep_restored() {
local -r godeps_json=${1:-Godeps/Godeps.json}
local -r gopath=${2:-${GOPATH%:*}}
if ! which jq &>/dev/null; then
echo "jq not found. Please install." 1>&2
return 1
fi
local root
local old_rev=""
while read path rev; do
rev=$(echo "${rev}" | sed "s/['\"]//g") # remove quotes which are around revs sometimes
if [[ "${rev}" == "${old_rev}" ]] && [[ "${path}" == "${root}"* ]]; then
# avoid checking the same git/hg root again
continue
fi
root="${path}"
while [ "${root}" != "." -a ! -d "${gopath}/src/${root}/.git" -a ! -d "${gopath}/src/${root}/.hg" ]; do
root=$(dirname "${root}")
done
if [ "${root}" == "." ]; then
echo "No checkout of ${path} found in GOPATH \"${gopath}\"." 1>&2
return 1
fi
local head
if [ -d "${gopath}/src/${root}/.git" ]; then
head="$(cd "${gopath}/src/${root}" && git rev-parse HEAD)"
else
head="$(cd "${gopath}/src/${root}" && hg parent --template '{node}')"
fi
if [ "${head}" != "${rev}" ]; then
echo "Unexpected HEAD '${head}' at ${gopath}/src/${root}, expected '${rev}'." 1>&2
return 1
fi
old_rev="${rev}"
done < <(jq '.Deps|.[]|.ImportPath + " " + .Rev' -r < "${godeps_json}")
return 0
}
# Exits script if working directory is dirty. If it's run interactively in the terminal
# the user can commit changes in a second terminal. This script will wait.
kube::util::ensure_clean_working_dir() {
while ! git diff HEAD --exit-code &>/dev/null; do
echo -e "\nUnexpected dirty working directory:\n"
if tty -s; then
git status -s
else
git diff -a # be more verbose in log files without tty
exit 1
fi | sed 's/^/ /'
echo -e "\nCommit your changes in another terminal and then continue here by pressing enter."
read
done 1>&2
}
# Ensure that the given godep version is installed and in the path. Almost
# nobody should use any version but the default.
kube::util::ensure_godep_version() {
GODEP_VERSION=${1:-"v80"} # this version is known to work
if [[ "$(godep version 2>/dev/null)" == *"godep ${GODEP_VERSION}"* ]]; then
return
fi
kube::log::status "Installing godep version ${GODEP_VERSION}"
go install ./vendor/github.com/tools/godep/
if ! which godep >/dev/null 2>&1; then
kube::log::error "Can't find godep - is your GOPATH 'bin' in your PATH?"
kube::log::error " GOPATH: ${GOPATH}"
kube::log::error " PATH: ${PATH}"
return 1
fi
if [[ "$(godep version 2>/dev/null)" != *"godep ${GODEP_VERSION}"* ]]; then
kube::log::error "Wrong godep version - is your GOPATH 'bin' in your PATH?"
kube::log::error " expected: godep ${GODEP_VERSION}"
kube::log::error " got: $(godep version)"
kube::log::error " GOPATH: ${GOPATH}"
kube::log::error " PATH: ${PATH}"
return 1
fi
}
# Ensure that none of the staging repos is checked out in the GOPATH because this
# easily confused godep.
kube::util::ensure_no_staging_repos_in_gopath() {
kube::util::ensure_single_dir_gopath
local error=0
for repo_file in "${KUBE_ROOT}"/staging/src/k8s.io/*; do
if [[ ! -d "$repo_file" ]]; then
# not a directory or there were no files
continue;
fi
repo="$(basename "$repo_file")"
if [ -e "${GOPATH}/src/k8s.io/${repo}" ]; then
echo "k8s.io/${repo} exists in GOPATH. Remove before running godep-save.sh." 1>&2
error=1
fi
done
if [ "${error}" = "1" ]; then
exit 1
fi
}
# Checks that the GOPATH is simple, i.e. consists only of one directory, not multiple.
kube::util::ensure_single_dir_gopath() {
if [[ "${GOPATH}" == *:* ]]; then
echo "GOPATH must consist of a single directory." 1>&2
exit 1
fi
}
# Checks whether there are any files matching pattern $2 changed between the
# current branch and upstream branch named by $1.
# Returns 1 (false) if there are no changes, 0 (true) if there are changes
# detected.
kube::util::has_changes_against_upstream_branch() {
local -r git_branch=$1
local -r pattern=$2
local -r not_pattern=${3:-totallyimpossiblepattern}
local full_branch
full_branch="$(kube::util::git_upstream_remote_name)/${git_branch}"
echo "Checking for '${pattern}' changes against '${full_branch}'"
# make sure the branch is valid, otherwise the check will pass erroneously.
if ! git describe "${full_branch}" >/dev/null; then
# abort!
exit 1
fi
# notice this uses ... to find the first shared ancestor
if git diff --name-only "${full_branch}...HEAD" | grep -v -E "${not_pattern}" | grep "${pattern}" > /dev/null; then
return 0
fi
# also check for pending changes
if git status --porcelain | grep -v -E "${not_pattern}" | grep "${pattern}" > /dev/null; then
echo "Detected '${pattern}' uncommitted changes."
return 0
fi
echo "No '${pattern}' changes detected."
return 1
}
kube::util::download_file() {
local -r url=$1
local -r destination_file=$2
rm ${destination_file} 2&> /dev/null || true
for i in $(seq 5)
do
if ! curl -fsSL --retry 3 --keepalive-time 2 ${url} -o ${destination_file}; then
echo "Downloading ${url} failed. $((5-i)) retries left."
sleep 1
else
echo "Downloading ${url} succeed"
return 0
fi
done
return 1
}
# Test whether openssl is installed.
# Sets:
# OPENSSL_BIN: The path to the openssl binary to use
function kube::util::test_openssl_installed {
openssl version >& /dev/null
if [ "$?" != "0" ]; then
echo "Failed to run openssl. Please ensure openssl is installed"
exit 1
fi
OPENSSL_BIN=$(command -v openssl)
}
# creates a client CA, args are sudo, dest-dir, ca-id, purpose
# purpose is dropped in after "key encipherment", you usually want
# '"client auth"'
# '"server auth"'
# '"client auth","server auth"'
function kube::util::create_signing_certkey {
local sudo=$1
local dest_dir=$2
local id=$3
local purpose=$4
# Create client ca
${sudo} /usr/bin/env bash -e <<EOF
rm -f "${dest_dir}/${id}-ca.crt" "${dest_dir}/${id}-ca.key"
${OPENSSL_BIN} req -x509 -sha256 -new -nodes -days 365 -newkey rsa:2048 -keyout "${dest_dir}/${id}-ca.key" -out "${dest_dir}/${id}-ca.crt" -subj "/C=xx/ST=x/L=x/O=x/OU=x/CN=ca/emailAddress=x/"
echo '{"signing":{"default":{"expiry":"43800h","usages":["signing","key encipherment",${purpose}]}}}' > "${dest_dir}/${id}-ca-config.json"
EOF
}
# signs a client certificate: args are sudo, dest-dir, CA, filename (roughly), username, groups...
function kube::util::create_client_certkey {
local sudo=$1
local dest_dir=$2
local ca=$3
local id=$4
local cn=${5:-$4}
local groups=""
local SEP=""
shift 5
while [ -n "${1:-}" ]; do
groups+="${SEP}{\"O\":\"$1\"}"
SEP=","
shift 1
done
${sudo} /usr/bin/env bash -e <<EOF
cd ${dest_dir}
echo '{"CN":"${cn}","names":[${groups}],"hosts":[""],"key":{"algo":"rsa","size":2048}}' | ${CFSSL_BIN} gencert -ca=${ca}.crt -ca-key=${ca}.key -config=${ca}-config.json - | ${CFSSLJSON_BIN} -bare client-${id}
mv "client-${id}-key.pem" "client-${id}.key"
mv "client-${id}.pem" "client-${id}.crt"
rm -f "client-${id}.csr"
EOF
}
# signs a serving certificate: args are sudo, dest-dir, ca, filename (roughly), subject, hosts...
function kube::util::create_serving_certkey {
local sudo=$1
local dest_dir=$2
local ca=$3
local id=$4
local cn=${5:-$4}
local hosts=""
local SEP=""
shift 5
while [ -n "${1:-}" ]; do
hosts+="${SEP}\"$1\""
SEP=","
shift 1
done
${sudo} /usr/bin/env bash -e <<EOF
cd ${dest_dir}
echo '{"CN":"${cn}","hosts":[${hosts}],"key":{"algo":"rsa","size":2048}}' | ${CFSSL_BIN} gencert -ca=${ca}.crt -ca-key=${ca}.key -config=${ca}-config.json - | ${CFSSLJSON_BIN} -bare serving-${id}
mv "serving-${id}-key.pem" "serving-${id}.key"
mv "serving-${id}.pem" "serving-${id}.crt"
rm -f "serving-${id}.csr"
EOF
}
# creates a self-contained kubeconfig: args are sudo, dest-dir, ca file, host, port, client id, token(optional)
function kube::util::write_client_kubeconfig {
local sudo=$1
local dest_dir=$2
local ca_file=$3
local api_host=$4
local api_port=$5
local client_id=$6
local token=${7:-}
cat <<EOF | ${sudo} tee "${dest_dir}"/${client_id}.kubeconfig > /dev/null
apiVersion: v1
kind: Config
clusters:
- cluster:
certificate-authority: ${ca_file}
server: https://${api_host}:${api_port}/
name: local-up-cluster
users:
- user:
token: ${token}
client-certificate: ${dest_dir}/client-${client_id}.crt
client-key: ${dest_dir}/client-${client_id}.key
name: local-up-cluster
contexts:
- context:
cluster: local-up-cluster
user: local-up-cluster
name: local-up-cluster
current-context: local-up-cluster
EOF
# flatten the kubeconfig files to make them self contained
username=$(whoami)
${sudo} /usr/bin/env bash -e <<EOF
$(kube::util::find-binary kubectl) --kubeconfig="${dest_dir}/${client_id}.kubeconfig" config view --minify --flatten > "/tmp/${client_id}.kubeconfig"
mv -f "/tmp/${client_id}.kubeconfig" "${dest_dir}/${client_id}.kubeconfig"
chown ${username} "${dest_dir}/${client_id}.kubeconfig"
EOF
}
# Determines if docker can be run, failures may simply require that the user be added to the docker group.
function kube::util::ensure_docker_daemon_connectivity {
DOCKER=(docker ${DOCKER_OPTS})
if ! "${DOCKER[@]}" info > /dev/null 2>&1 ; then
cat <<'EOF' >&2
Can't connect to 'docker' daemon. please fix and retry.
Possible causes:
- Docker Daemon not started
- Linux: confirm via your init system
- macOS w/ docker-machine: run `docker-machine ls` and `docker-machine start <name>`
- macOS w/ Docker for Mac: Check the menu bar and start the Docker application
- DOCKER_HOST hasn't been set or is set incorrectly
- Linux: domain socket is used, DOCKER_* should be unset. In Bash run `unset ${!DOCKER_*}`
- macOS w/ docker-machine: run `eval "$(docker-machine env <name>)"`
- macOS w/ Docker for Mac: domain socket is used, DOCKER_* should be unset. In Bash run `unset ${!DOCKER_*}`
- Other things to check:
- Linux: User isn't in 'docker' group. Add and relogin.
- Something like 'sudo usermod -a -G docker ${USER}'
- RHEL7 bug and workaround: https://bugzilla.redhat.com/show_bug.cgi?id=1119282#c8
EOF
return 1
fi
}
# Wait for background jobs to finish. Return with
# an error status if any of the jobs failed.
kube::util::wait-for-jobs() {
local fail=0
local job
for job in $(jobs -p); do
wait "${job}" || fail=$((fail + 1))
done
return ${fail}
}
# kube::util::join <delim> <list...>
# Concatenates the list elements with the delimiter passed as first parameter
#
# Ex: kube::util::join , a b c
# -> a,b,c
function kube::util::join {
local IFS="$1"
shift
echo "$*"
}
# Downloads cfssl/cfssljson into $1 directory if they do not already exist in PATH
#
# Assumed vars:
# $1 (cfssl directory) (optional)
#
# Sets:
# CFSSL_BIN: The path of the installed cfssl binary
# CFSSLJSON_BIN: The path of the installed cfssljson binary
#
function kube::util::ensure-cfssl {
if command -v cfssl &>/dev/null && command -v cfssljson &>/dev/null; then
CFSSL_BIN=$(command -v cfssl)
CFSSLJSON_BIN=$(command -v cfssljson)
return 0
fi
# Create a temp dir for cfssl if no directory was given
local cfssldir=${1:-}
if [[ -z "${cfssldir}" ]]; then
kube::util::ensure-temp-dir
cfssldir="${KUBE_TEMP}/cfssl"
fi
mkdir -p "${cfssldir}"
pushd "${cfssldir}" > /dev/null
echo "Unable to successfully run 'cfssl' from $PATH; downloading instead..."
kernel=$(uname -s)
case "${kernel}" in
Linux)
curl --retry 10 -L -o cfssl https://pkg.cfssl.org/R1.2/cfssl_linux-amd64
curl --retry 10 -L -o cfssljson https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64
;;
Darwin)
curl --retry 10 -L -o cfssl https://pkg.cfssl.org/R1.2/cfssl_darwin-amd64
curl --retry 10 -L -o cfssljson https://pkg.cfssl.org/R1.2/cfssljson_darwin-amd64
;;
*)
echo "Unknown, unsupported platform: ${kernel}." >&2
echo "Supported platforms: Linux, Darwin." >&2
exit 2
esac
chmod +x cfssl || true
chmod +x cfssljson || true
CFSSL_BIN="${cfssldir}/cfssl"
CFSSLJSON_BIN="${cfssldir}/cfssljson"
if [[ ! -x ${CFSSL_BIN} || ! -x ${CFSSLJSON_BIN} ]]; then
echo "Failed to download 'cfssl'. Please install cfssl and cfssljson and verify they are in \$PATH."
echo "Hint: export PATH=\$PATH:\$GOPATH/bin; go get -u github.com/cloudflare/cfssl/cmd/..."
exit 1
fi
popd > /dev/null
}
# kube::util::ensure_dockerized
# Confirms that the script is being run inside a kube-build image
#
function kube::util::ensure_dockerized {
if [[ -f /kube-build-image ]]; then
return 0
else
echo "ERROR: This script is designed to be run inside a kube-build container"
exit 1
fi
}
# kube::util::ensure-gnu-sed
# Determines which sed binary is gnu-sed on linux/darwin
#
# Sets:
# SED: The name of the gnu-sed binary
#
function kube::util::ensure-gnu-sed {
if LANG=C sed --help 2>&1 | grep -q GNU; then
SED="sed"
elif which gsed &>/dev/null; then
SED="gsed"
else
kube::log::error "Failed to find GNU sed as sed or gsed. If you are on Mac: brew install gnu-sed." >&2
return 1
fi
}
# Some useful colors.
if [[ -z "${color_start-}" ]]; then
declare -r color_start="\033["
declare -r color_red="${color_start}0;31m"
declare -r color_yellow="${color_start}0;33m"
declare -r color_green="${color_start}0;32m"
declare -r color_norm="${color_start}0m"
fi
# ex: ts=2 sw=2 et filetype=sh

175
vendor/k8s.io/kubernetes/hack/lib/version.sh generated vendored Normal file
View File

@@ -0,0 +1,175 @@
#!/usr/bin/env bash
# Copyright 2014 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -----------------------------------------------------------------------------
# Version management helpers. These functions help to set, save and load the
# following variables:
#
# KUBE_GIT_COMMIT - The git commit id corresponding to this
# source code.
# KUBE_GIT_TREE_STATE - "clean" indicates no changes since the git commit id
# "dirty" indicates source code changes after the git commit id
# "archive" indicates the tree was produced by 'git archive'
# KUBE_GIT_VERSION - "vX.Y" used to indicate the last release version.
# KUBE_GIT_MAJOR - The major part of the version
# KUBE_GIT_MINOR - The minor component of the version
# Grovels through git to set a set of env variables.
#
# If KUBE_GIT_VERSION_FILE, this function will load from that file instead of
# querying git.
kube::version::get_version_vars() {
if [[ -n ${KUBE_GIT_VERSION_FILE-} ]]; then
kube::version::load_version_vars "${KUBE_GIT_VERSION_FILE}"
return
fi
# If the kubernetes source was exported through git archive, then
# we likely don't have a git tree, but these magic values may be filled in.
if [[ '$Format:%%$' == "%" ]]; then
KUBE_GIT_COMMIT='$Format:%H$'
KUBE_GIT_TREE_STATE="archive"
# When a 'git archive' is exported, the '$Format:%D$' below will look
# something like 'HEAD -> release-1.8, tag: v1.8.3' where then 'tag: '
# can be extracted from it.
if [[ '$Format:%D$' =~ tag:\ (v[^ ,]+) ]]; then
KUBE_GIT_VERSION="${BASH_REMATCH[1]}"
fi
fi
local git=(git --work-tree "${KUBE_ROOT}")
if [[ -n ${KUBE_GIT_COMMIT-} ]] || KUBE_GIT_COMMIT=$("${git[@]}" rev-parse "HEAD^{commit}" 2>/dev/null); then
if [[ -z ${KUBE_GIT_TREE_STATE-} ]]; then
# Check if the tree is dirty. default to dirty
if git_status=$("${git[@]}" status --porcelain 2>/dev/null) && [[ -z ${git_status} ]]; then
KUBE_GIT_TREE_STATE="clean"
else
KUBE_GIT_TREE_STATE="dirty"
fi
fi
# Use git describe to find the version based on tags.
if [[ -n ${KUBE_GIT_VERSION-} ]] || KUBE_GIT_VERSION=$("${git[@]}" describe --tags --abbrev=14 "${KUBE_GIT_COMMIT}^{commit}" 2>/dev/null); then
# This translates the "git describe" to an actual semver.org
# compatible semantic version that looks something like this:
# v1.1.0-alpha.0.6+84c76d1142ea4d
#
# TODO: We continue calling this "git version" because so many
# downstream consumers are expecting it there.
DASHES_IN_VERSION=$(echo "${KUBE_GIT_VERSION}" | sed "s/[^-]//g")
if [[ "${DASHES_IN_VERSION}" == "---" ]] ; then
# We have distance to subversion (v1.1.0-subversion-1-gCommitHash)
KUBE_GIT_VERSION=$(echo "${KUBE_GIT_VERSION}" | sed "s/-\([0-9]\{1,\}\)-g\([0-9a-f]\{14\}\)$/.\1\+\2/")
elif [[ "${DASHES_IN_VERSION}" == "--" ]] ; then
# We have distance to base tag (v1.1.0-1-gCommitHash)
KUBE_GIT_VERSION=$(echo "${KUBE_GIT_VERSION}" | sed "s/-g\([0-9a-f]\{14\}\)$/+\1/")
fi
if [[ "${KUBE_GIT_TREE_STATE}" == "dirty" ]]; then
# git describe --dirty only considers changes to existing files, but
# that is problematic since new untracked .go files affect the build,
# so use our idea of "dirty" from git status instead.
KUBE_GIT_VERSION+="-dirty"
fi
# Try to match the "git describe" output to a regex to try to extract
# the "major" and "minor" versions and whether this is the exact tagged
# version or whether the tree is between two tagged versions.
if [[ "${KUBE_GIT_VERSION}" =~ ^v([0-9]+)\.([0-9]+)(\.[0-9]+)?([-].*)?([+].*)?$ ]]; then
KUBE_GIT_MAJOR=${BASH_REMATCH[1]}
KUBE_GIT_MINOR=${BASH_REMATCH[2]}
if [[ -n "${BASH_REMATCH[4]}" ]]; then
KUBE_GIT_MINOR+="+"
fi
fi
# If KUBE_GIT_VERSION is not a valid Semantic Version, then refuse to build.
if ! [[ "${KUBE_GIT_VERSION}" =~ ^v([0-9]+)\.([0-9]+)(\.[0-9]+)?(-[0-9A-Za-z.-]+)?(\+[0-9A-Za-z.-]+)?$ ]]; then
echo "KUBE_GIT_VERSION should be a valid Semantic Version. Current value: ${KUBE_GIT_VERSION}"
echo "Please see more details here: https://semver.org"
exit 1
fi
fi
fi
}
# Saves the environment flags to $1
kube::version::save_version_vars() {
local version_file=${1-}
[[ -n ${version_file} ]] || {
echo "!!! Internal error. No file specified in kube::version::save_version_vars"
return 1
}
cat <<EOF >"${version_file}"
KUBE_GIT_COMMIT='${KUBE_GIT_COMMIT-}'
KUBE_GIT_TREE_STATE='${KUBE_GIT_TREE_STATE-}'
KUBE_GIT_VERSION='${KUBE_GIT_VERSION-}'
KUBE_GIT_MAJOR='${KUBE_GIT_MAJOR-}'
KUBE_GIT_MINOR='${KUBE_GIT_MINOR-}'
EOF
}
# Loads up the version variables from file $1
kube::version::load_version_vars() {
local version_file=${1-}
[[ -n ${version_file} ]] || {
echo "!!! Internal error. No file specified in kube::version::load_version_vars"
return 1
}
source "${version_file}"
}
kube::version::ldflag() {
local key=${1}
local val=${2}
# If you update these, also update the list pkg/version/def.bzl.
echo "-X '${KUBE_GO_PACKAGE}/pkg/version.${key}=${val}'"
echo "-X '${KUBE_GO_PACKAGE}/vendor/k8s.io/client-go/pkg/version.${key}=${val}'"
}
# Prints the value that needs to be passed to the -ldflags parameter of go build
# in order to set the Kubernetes based on the git tree status.
# IMPORTANT: if you update any of these, also update the lists in
# pkg/version/def.bzl and hack/print-workspace-status.sh.
kube::version::ldflags() {
kube::version::get_version_vars
local buildDate=
[[ -z ${SOURCE_DATE_EPOCH-} ]] || buildDate="--date=@${SOURCE_DATE_EPOCH}"
local -a ldflags=($(kube::version::ldflag "buildDate" "$(date ${buildDate} -u +'%Y-%m-%dT%H:%M:%SZ')"))
if [[ -n ${KUBE_GIT_COMMIT-} ]]; then
ldflags+=($(kube::version::ldflag "gitCommit" "${KUBE_GIT_COMMIT}"))
ldflags+=($(kube::version::ldflag "gitTreeState" "${KUBE_GIT_TREE_STATE}"))
fi
if [[ -n ${KUBE_GIT_VERSION-} ]]; then
ldflags+=($(kube::version::ldflag "gitVersion" "${KUBE_GIT_VERSION}"))
fi
if [[ -n ${KUBE_GIT_MAJOR-} && -n ${KUBE_GIT_MINOR-} ]]; then
ldflags+=(
$(kube::version::ldflag "gitMajor" "${KUBE_GIT_MAJOR}")
$(kube::version::ldflag "gitMinor" "${KUBE_GIT_MINOR}")
)
fi
# The -ldflags parameter takes a single string, so join the output.
echo "${ldflags[*]-}"
}

23
vendor/k8s.io/kubernetes/hack/list-feature-tests.sh generated vendored Executable file
View File

@@ -0,0 +1,23 @@
#!/usr/bin/env bash
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# A single script that lists all of the [Feature:.+] tests in our e2e suite.
set -o errexit
set -o nounset
set -o pipefail
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/..
grep "\[Feature:\w+\]" "${KUBE_ROOT}"/test/e2e/**/*.go -Eoh | LC_ALL=C sort -u

1109
vendor/k8s.io/kubernetes/hack/local-up-cluster.sh generated vendored Executable file

File diff suppressed because it is too large Load Diff

112
vendor/k8s.io/kubernetes/hack/make-rules/BUILD generated vendored Normal file
View File

@@ -0,0 +1,112 @@
# Scripts runnable from make, e.g.
#
# cd $GOPATH/src/k8s.io/kubernetes
# make test-e2e-node
#
# The sh_binary rules below exist only to validate
# dependencies; if a shell dependency is accidentally
# deleted, a presubmit BUILD will fail.
#
# If the scripts sourced their dependencies from
# $RUNFILES (rather than $BASH_SOURCE/../.. or
# whatever), then bazel build hack/... would install
# runnable, hermetically sealed shell "binaries".
# E.g. the following command would work:
#
# ./bazel-bin/hack/make-rules/test-e2e-node
#
# TODO(#47064): Should be a sh_test instead of sh_binary
sh_binary(
name = "test-cmd",
srcs = ["test-cmd.sh"],
deps = [
":test-cmd-util",
"//hack/lib",
],
)
sh_binary(
name = "test-e2e-node",
srcs = ["test-e2e-node.sh"],
deps = [
"//hack/lib",
],
)
sh_binary(
name = "test-integration",
srcs = ["test-cmd.sh"],
deps = [
"//hack/lib",
],
)
sh_binary(
name = "test-kubeadm-cmd",
srcs = ["test-kubeadm-cmd.sh"],
deps = [
"//hack/lib",
],
)
sh_binary(
name = "build",
srcs = ["build.sh"],
deps = [
"//hack/lib",
],
)
sh_binary(
name = "cross",
srcs = ["cross.sh"],
deps = [
"//hack/lib",
],
)
sh_binary(
name = "test",
srcs = ["test.sh"],
deps = [
"//hack/lib",
],
)
sh_binary(
name = "vet",
srcs = ["vet.sh"],
deps = [
"//hack/lib",
],
)
sh_binary(
name = "verify",
srcs = ["verify.sh"],
deps = [
"//hack/lib",
],
)
sh_library(
name = "test-cmd-util",
srcs = [
"test-cmd-util.sh",
],
data = ["//pkg/kubectl/validation:testdata/v1/validPod.yaml"],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@@ -0,0 +1,30 @@
# Copyright 2018 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
.PHONY: manifest-tool
MANIFEST_TOOL_DIR := $(shell mktemp -d)
export PATH := $(MANIFEST_TOOL_DIR):$(PATH)
MANIFEST_TOOL_VERSION := v0.7.0
space :=
space +=
comma := ,
prefix_linux = $(addprefix linux/,$(strip $1))
join_platforms = $(subst $(space),$(comma),$(call prefix_linux,$(strip $1)))
manifest-tool:
curl -sSL https://github.com/estesp/manifest-tool/releases/download/$(MANIFEST_TOOL_VERSION)/manifest-tool-linux-amd64 > $(MANIFEST_TOOL_DIR)/manifest-tool
chmod +x $(MANIFEST_TOOL_DIR)/manifest-tool

28
vendor/k8s.io/kubernetes/hack/make-rules/build.sh generated vendored Executable file
View File

@@ -0,0 +1,28 @@
#!/usr/bin/env bash
# Copyright 2014 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script sets up a go workspace locally and builds all go components.
set -o errexit
set -o nounset
set -o pipefail
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../..
KUBE_VERBOSE="${KUBE_VERBOSE:-1}"
source "${KUBE_ROOT}/hack/lib/init.sh"
kube::golang::build_binaries "$@"
kube::golang::place_bins

39
vendor/k8s.io/kubernetes/hack/make-rules/clean.sh generated vendored Executable file
View File

@@ -0,0 +1,39 @@
#!/usr/bin/env bash
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -o errexit
set -o nounset
set -o pipefail
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../..
source "${KUBE_ROOT}/hack/lib/util.sh"
CLEAN_PATTERNS=(
"_tmp"
"doc_tmp"
".*/zz_generated.openapi.go"
"pkg/generated/bindata.go"
"test/e2e/generated/bindata.go"
)
for pattern in ${CLEAN_PATTERNS[@]}; do
for match in $(find "${KUBE_ROOT}" -iregex "^${KUBE_ROOT}/${pattern}$"); do
echo "Removing ${match#${KUBE_ROOT}\/} .."
rm -rf "${match#${KUBE_ROOT}\/}"
done
done
# ex: ts=2 sw=2 et filetype=sh

38
vendor/k8s.io/kubernetes/hack/make-rules/cross.sh generated vendored Executable file
View File

@@ -0,0 +1,38 @@
#!/usr/bin/env bash
# Copyright 2014 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script sets up a go workspace locally and builds all for all appropriate
# platforms.
set -o errexit
set -o nounset
set -o pipefail
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../..
source "${KUBE_ROOT}/hack/lib/init.sh"
# NOTE: Using "${array[*]}" here is correct. [@] becomes distinct words (in
# bash parlance).
make all WHAT="${KUBE_SERVER_TARGETS[*]}" KUBE_BUILD_PLATFORMS="${KUBE_SERVER_PLATFORMS[*]}"
make all WHAT="${KUBE_NODE_TARGETS[*]}" KUBE_BUILD_PLATFORMS="${KUBE_NODE_PLATFORMS[*]}"
make all WHAT="${KUBE_CLIENT_TARGETS[*]}" KUBE_BUILD_PLATFORMS="${KUBE_CLIENT_PLATFORMS[*]}"
make all WHAT="${KUBE_TEST_TARGETS[*]}" KUBE_BUILD_PLATFORMS="${KUBE_TEST_PLATFORMS[*]}"
make all WHAT="${KUBE_TEST_SERVER_TARGETS[*]}" KUBE_BUILD_PLATFORMS="${KUBE_TEST_SERVER_PLATFORMS[*]}"

View File

@@ -0,0 +1,72 @@
#!/usr/bin/env bash
# Copyright 2014 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script finds, caches, and prints a list of all directories that hold
# *.go files. If any directory is newer than the cache, re-find everything and
# update the cache. Otherwise use the cached file.
set -o errexit
set -o nounset
set -o pipefail
if [[ -z "${1:-}" ]]; then
echo "usage: $0 <cache-file>"
exit 1
fi
CACHE="$1"; shift
trap "rm -f '${CACHE}'" HUP INT TERM ERR
# This is a partial 'find' command. The caller is expected to pass the
# remaining arguments.
#
# Example:
# kfind -type f -name foobar.go
function kfind() {
# include the "special" vendor directories which are actually part
# of the Kubernetes source tree - generators will use these for
# including certain core API concepts.
find -H . ./vendor/k8s.io/apimachinery ./vendor/k8s.io/apiserver ./vendor/k8s.io/kube-aggregator ./vendor/k8s.io/apiextensions-apiserver ./vendor/k8s.io/metrics ./vendor/k8s.io/sample-apiserver ./vendor/k8s.io/api ./vendor/k8s.io/client-go ./vendor/k8s.io/code-generator ./vendor/k8s.io/sample-controller \
\( \
-not \( \
\( \
-path ./vendor -o \
-path ./staging -o \
-path ./_\* -o \
-path ./.\* -o \
-path ./docs \
\) -prune \
\) \
\) \
"$@"
}
NEED_FIND=true
# It's *significantly* faster to check whether any directories are newer than
# the cache than to blindly rebuild it.
if [[ -f "${CACHE}" ]]; then
N=$(kfind -type d -newer "${CACHE}" -print -quit | wc -l)
[[ "${N}" == 0 ]] && NEED_FIND=false
fi
mkdir -p $(dirname "${CACHE}")
if $("${NEED_FIND}"); then
kfind -type f -name \*.go \
| sed 's|/[^/]*$||' \
| sed 's|^./||' \
| LC_ALL=C sort -u \
> "${CACHE}"
fi
cat "${CACHE}"

50
vendor/k8s.io/kubernetes/hack/make-rules/make-help.sh generated vendored Executable file
View File

@@ -0,0 +1,50 @@
#!/usr/bin/env bash
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -o errexit
set -o nounset
set -o pipefail
readonly red=$(tput setaf 1)
readonly reset=$(tput sgr0)
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../..
ALL_TARGETS=$(make -C "${KUBE_ROOT}" PRINT_HELP=y -rpn | sed -n -e '/^$/ { n ; /^[^ .#][^ ]*:/ { s/:.*$// ; p ; } ; }' | sort)
CMD_TARGETS=$(ls -l "${KUBE_ROOT}/cmd" |awk '/^d/ {print $NF}')
CMD_FLAG=false
PLUGIN_CMD_FLAG=false
echo "--------------------------------------------------------------------------------"
for tar in $ALL_TARGETS; do
for cmdtar in $CMD_TARGETS; do
if [ $tar = $cmdtar ]; then
if [ $CMD_FLAG = true ]; then
continue 2;
fi
echo -e "${red}${CMD_TARGETS}${reset}"
make -C "${KUBE_ROOT}" $tar PRINT_HELP=y
echo "---------------------------------------------------------------------------------"
CMD_FLAG=true
continue 2
fi
done
echo -e "${red}${tar}${reset}"
make -C "${KUBE_ROOT}" $tar PRINT_HELP=y
echo "---------------------------------------------------------------------------------"
done

5693
vendor/k8s.io/kubernetes/hack/make-rules/test-cmd-util.sh generated vendored Executable file

File diff suppressed because it is too large Load Diff

107
vendor/k8s.io/kubernetes/hack/make-rules/test-cmd.sh generated vendored Executable file
View File

@@ -0,0 +1,107 @@
#!/usr/bin/env bash
# Copyright 2014 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This command checks that the built commands can function together for
# simple scenarios. It does not require Docker.
set -o errexit
set -o nounset
set -o pipefail
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../..
source "${KUBE_ROOT}/hack/lib/init.sh"
source "${KUBE_ROOT}/hack/lib/test.sh"
source "${KUBE_ROOT}/hack/make-rules/test-cmd-util.sh"
function run_kube_apiserver() {
kube::log::status "Building kube-apiserver"
make -C "${KUBE_ROOT}" WHAT="cmd/kube-apiserver"
# Start kube-apiserver
kube::log::status "Starting kube-apiserver"
# Admission Controllers to invoke prior to persisting objects in cluster
ENABLE_ADMISSION_PLUGINS="Initializers,LimitRanger,ResourceQuota"
DISABLE_ADMISSION_PLUGINS="ServiceAccount,PersistentVolumeLabel,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook"
# Include RBAC (to exercise bootstrapping), and AlwaysAllow to allow all actions
AUTHORIZATION_MODE="RBAC,AlwaysAllow"
"${KUBE_OUTPUT_HOSTBIN}/kube-apiserver" \
--insecure-bind-address="127.0.0.1" \
--bind-address="127.0.0.1" \
--insecure-port="${API_PORT}" \
--authorization-mode="${AUTHORIZATION_MODE}" \
--secure-port="${SECURE_API_PORT}" \
--enable-admission-plugins="${ENABLE_ADMISSION_PLUGINS}" \
--disable-admission-plugins="${DISABLE_ADMISSION_PLUGINS}" \
--etcd-servers="http://${ETCD_HOST}:${ETCD_PORT}" \
--runtime-config=api/v1 \
--storage-media-type="${KUBE_TEST_API_STORAGE_TYPE-}" \
--cert-dir="${TMPDIR:-/tmp/}" \
--service-cluster-ip-range="10.0.0.0/24" \
--token-auth-file=hack/testdata/auth-tokens.csv 1>&2 &
APISERVER_PID=$!
kube::util::wait_for_url "http://127.0.0.1:${API_PORT}/healthz" "apiserver"
}
function run_kube_controller_manager() {
kube::log::status "Building kube-controller-manager"
make -C "${KUBE_ROOT}" WHAT="cmd/kube-controller-manager"
# Start controller manager
kube::log::status "Starting controller-manager"
"${KUBE_OUTPUT_HOSTBIN}/kube-controller-manager" \
--port="${CTLRMGR_PORT}" \
--kube-api-content-type="${KUBE_TEST_API_TYPE-}" \
--master="127.0.0.1:${API_PORT}" 1>&2 &
CTLRMGR_PID=$!
kube::util::wait_for_url "http://127.0.0.1:${CTLRMGR_PORT}/healthz" "controller-manager"
}
# Creates a node object with name 127.0.0.1. This is required because we do not
# run kubelet.
function create_node() {
kubectl create -f - -s "http://127.0.0.1:${API_PORT}" << __EOF__
{
"kind": "Node",
"apiVersion": "v1",
"metadata": {
"name": "127.0.0.1"
},
"status": {
"capacity": {
"memory": "1Gi"
}
}
}
__EOF__
}
kube::log::status "Running kubectl tests for kube-apiserver"
setup
run_kube_apiserver
run_kube_controller_manager
create_node
SUPPORTED_RESOURCES=("*")
# WARNING: Do not wrap this call in a subshell to capture output, e.g. output=$(runTests)
# Doing so will suppress errexit behavior inside runTests
runTests
kube::log::status "TESTS PASSED"

177
vendor/k8s.io/kubernetes/hack/make-rules/test-e2e-node.sh generated vendored Executable file
View File

@@ -0,0 +1,177 @@
#!/usr/bin/env bash
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../..
source "${KUBE_ROOT}/hack/lib/init.sh"
focus=${FOCUS:-""}
skip=${SKIP-"\[Flaky\]|\[Slow\]|\[Serial\]"}
# The number of tests that can run in parallel depends on what tests
# are running and on the size of the node. Too many, and tests will
# fail due to resource contention. 8 is a reasonable default for a
# n1-standard-1 node.
# Currently, parallelism only affects when REMOTE=true. For local test,
# ginkgo default parallelism (cores - 1) is used.
parallelism=${PARALLELISM:-8}
artifacts=${ARTIFACTS:-"/tmp/_artifacts/`date +%y%m%dT%H%M%S`"}
remote=${REMOTE:-"false"}
runtime=${RUNTIME:-"docker"}
container_runtime_endpoint=${CONTAINER_RUNTIME_ENDPOINT:-""}
image_service_endpoint=${IMAGE_SERVICE_ENDPOINT:-""}
run_until_failure=${RUN_UNTIL_FAILURE:-"false"}
test_args=${TEST_ARGS:-""}
system_spec_name=${SYSTEM_SPEC_NAME:-}
# Parse the flags to pass to ginkgo
ginkgoflags=""
if [[ $parallelism > 1 ]]; then
ginkgoflags="$ginkgoflags -nodes=$parallelism "
fi
if [[ $focus != "" ]]; then
ginkgoflags="$ginkgoflags -focus=\"$focus\" "
fi
if [[ $skip != "" ]]; then
ginkgoflags="$ginkgoflags -skip=\"$skip\" "
fi
if [[ $run_until_failure != "" ]]; then
ginkgoflags="$ginkgoflags -untilItFails=$run_until_failure "
fi
# Setup the directory to copy test artifacts (logs, junit.xml, etc) from remote host to local host
if [ ! -d "${artifacts}" ]; then
echo "Creating artifacts directory at ${artifacts}"
mkdir -p ${artifacts}
fi
echo "Test artifacts will be written to ${artifacts}"
if [[ $runtime == "remote" ]] ; then
if [[ ! -z $container_runtime_endpoint ]] ; then
test_args="--container-runtime-endpoint=${container_runtime_endpoint} $test_args"
fi
if [[ ! -z $image_service_endpoint ]] ; then
test_args="--image-service-endpoint=$image_service_endpoint $test_args"
fi
fi
if [ $remote = true ] ; then
# The following options are only valid in remote run.
images=${IMAGES:-""}
hosts=${HOSTS:-""}
image_project=${IMAGE_PROJECT:-"kubernetes-node-e2e-images"}
metadata=${INSTANCE_METADATA:-""}
list_images=${LIST_IMAGES:-false}
if [[ $list_images == "true" ]]; then
gcloud compute images list --project="${image_project}" | grep "e2e-node"
exit 0
fi
gubernator=${GUBERNATOR:-"false"}
image_config_file=${IMAGE_CONFIG_FILE:-""}
if [[ $hosts == "" && $images == "" && $image_config_file == "" ]]; then
image_project=${IMAGE_PROJECT:-"cos-cloud"}
gci_image=$(gcloud compute images list --project $image_project \
--no-standard-images --filter="name ~ 'cos-beta.*'" --format="table[no-heading](name)")
images=$gci_image
metadata="user-data<${KUBE_ROOT}/test/e2e_node/jenkins/gci-init.yaml,gci-update-strategy=update_disabled"
fi
instance_prefix=${INSTANCE_PREFIX:-"test"}
cleanup=${CLEANUP:-"true"}
delete_instances=${DELETE_INSTANCES:-"false"}
test_suite=${TEST_SUITE:-"default"}
# Get the compute zone
zone=$(gcloud info --format='value(config.properties.compute.zone)')
if [[ $zone == "" ]]; then
echo "Could not find gcloud compute/zone when running: \`gcloud info --format='value(config.properties.compute.zone)'\`"
exit 1
fi
# Get the compute project
project=$(gcloud info --format='value(config.project)')
if [[ $project == "" ]]; then
echo "Could not find gcloud project when running: \`gcloud info --format='value(config.project)'\`"
exit 1
fi
# Check if any of the images specified already have running instances. If so reuse those instances
# by moving the IMAGE to a HOST
if [[ $images != "" ]]; then
IFS=',' read -ra IM <<< "$images"
images=""
for i in "${IM[@]}"; do
if [[ $(gcloud compute instances list "${instance_prefix}-$i" | grep $i) ]]; then
if [[ $hosts != "" ]]; then
hosts="$hosts,"
fi
echo "Reusing host ${instance_prefix}-$i"
hosts="${hosts}${instance_prefix}-${i}"
else
if [[ $images != "" ]]; then
images="$images,"
fi
images="$images$i"
fi
done
fi
# Output the configuration we will try to run
echo "Running tests remotely using"
echo "Project: $project"
echo "Image Project: $image_project"
echo "Compute/Zone: $zone"
echo "Images: $images"
echo "Hosts: $hosts"
echo "Ginkgo Flags: $ginkgoflags"
echo "Instance Metadata: $metadata"
echo "Image Config File: $image_config_file"
# Invoke the runner
go run test/e2e_node/runner/remote/run_remote.go --logtostderr --vmodule=*=4 --ssh-env="gce" \
--zone="$zone" --project="$project" --gubernator="$gubernator" \
--hosts="$hosts" --images="$images" --cleanup="$cleanup" \
--results-dir="$artifacts" --ginkgo-flags="$ginkgoflags" \
--image-project="$image_project" --instance-name-prefix="$instance_prefix" \
--delete-instances="$delete_instances" --test_args="$test_args" --instance-metadata="$metadata" \
--image-config-file="$image_config_file" --system-spec-name="$system_spec_name" \
--test-suite="$test_suite" \
2>&1 | tee -i "${artifacts}/build-log.txt"
exit $?
else
# Refresh sudo credentials for local run
if ! ping -c 1 -q metadata.google.internal &> /dev/null; then
echo "Updating sudo credentials"
sudo -v || exit 1
fi
# Do not use any network plugin by default. User could override the flags with
# test_args.
test_args='--kubelet-flags="--network-plugin= --cni-bin-dir=" '$test_args
# Runtime flags
test_args='--kubelet-flags="--container-runtime='$runtime'" '$test_args
# Test using the host the script was run on
# Provided for backwards compatibility
go run test/e2e_node/runner/local/run_local.go \
--system-spec-name="$system_spec_name" --ginkgo-flags="$ginkgoflags" \
--test-flags="--container-runtime=${runtime} \
--alsologtostderr --v 4 --report-dir=${artifacts} --node-name $(hostname) \
$test_args" --build-dependencies=true 2>&1 | tee -i "${artifacts}/build-log.txt"
exit $?
fi

106
vendor/k8s.io/kubernetes/hack/make-rules/test-integration.sh generated vendored Executable file
View File

@@ -0,0 +1,106 @@
#!/usr/bin/env bash
# Copyright 2014 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -o errexit
set -o nounset
set -o pipefail
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../..
source "${KUBE_ROOT}/hack/lib/init.sh"
# Lists of API Versions of each groups that should be tested, groups are
# separated by comma, lists are separated by semicolon. e.g.,
# "v1,compute/v1alpha1,experimental/v1alpha2;v1,compute/v2,experimental/v1alpha3"
# TODO: It's going to be:
# KUBE_TEST_API_VERSIONS=${KUBE_TEST_API_VERSIONS:-"v1,extensions/v1beta1"}
# FIXME: due to current implementation of a test client (see: pkg/api/testapi/testapi.go)
# ONLY the last version is tested in each group.
ALL_VERSIONS_CSV=$(IFS=',';echo "${KUBE_AVAILABLE_GROUP_VERSIONS[*]// /,}";IFS=$)
KUBE_TEST_API_VERSIONS="${KUBE_TEST_API_VERSIONS:-${ALL_VERSIONS_CSV}}"
# Give integration tests longer to run by default.
KUBE_TIMEOUT=${KUBE_TIMEOUT:--timeout 600s}
KUBE_INTEGRATION_TEST_MAX_CONCURRENCY=${KUBE_INTEGRATION_TEST_MAX_CONCURRENCY:-"-1"}
LOG_LEVEL=${LOG_LEVEL:-2}
KUBE_TEST_ARGS=${KUBE_TEST_ARGS:-}
# Default glog module settings.
KUBE_TEST_VMODULE=${KUBE_TEST_VMODULE:-"garbagecollector*=6,graph_builder*=6"}
kube::test::find_integration_test_dirs() {
(
cd ${KUBE_ROOT}
find test/integration/ -name '*_test.go' -print0 \
| xargs -0n1 dirname | sed "s|^|${KUBE_GO_PACKAGE}/|" \
| LC_ALL=C sort -u
find vendor/k8s.io/apiextensions-apiserver/test/integration/ -name '*_test.go' -print0 \
| xargs -0n1 dirname | sed "s|^|${KUBE_GO_PACKAGE}/|" \
| LC_ALL=C sort -u
)
}
CLEANUP_REQUIRED=
cleanup() {
if [[ -z "${CLEANUP_REQUIRED}" ]]; then
return
fi
kube::log::status "Cleaning up etcd"
kube::etcd::cleanup
CLEANUP_REQUIRED=
kube::log::status "Integration test cleanup complete"
}
runTests() {
kube::log::status "Starting etcd instance"
CLEANUP_REQUIRED=1
kube::etcd::start
kube::log::status "Running integration test cases"
KUBE_RACE="-race"
make -C "${KUBE_ROOT}" test \
WHAT="${WHAT:-$(kube::test::find_integration_test_dirs | paste -sd' ' -)}" \
GOFLAGS="${GOFLAGS:-}" \
KUBE_TEST_ARGS="${KUBE_TEST_ARGS:-} ${SHORT:--short=true} --vmodule=${KUBE_TEST_VMODULE} --alsologtostderr=true" \
KUBE_RACE="" \
KUBE_TIMEOUT="${KUBE_TIMEOUT}" \
KUBE_TEST_API_VERSIONS="$1"
cleanup
}
checkEtcdOnPath() {
kube::log::status "Checking etcd is on PATH"
which etcd && return
kube::log::status "Cannot find etcd, cannot run integration tests."
kube::log::status "Please see https://github.com/kubernetes/community/blob/master/contributors/devel/testing.md#install-etcd-dependency for instructions."
kube::log::usage "You can use 'hack/install-etcd.sh' to install a copy in third_party/."
return 1
}
checkEtcdOnPath
# Run cleanup to stop etcd on interrupt or other kill signal.
trap cleanup EXIT
# If a test case is specified, just run once with v1 API version and exit
if [[ -n "${KUBE_TEST_ARGS}" ]]; then
runTests v1
exit 0
fi
# Convert the CSV to an array of API versions to test
IFS=';' read -a apiVersions <<< "${KUBE_TEST_API_VERSIONS}"
for apiVersion in "${apiVersions[@]}"; do
runTests "${apiVersion}"
done

32
vendor/k8s.io/kubernetes/hack/make-rules/test-kubeadm-cmd.sh generated vendored Executable file
View File

@@ -0,0 +1,32 @@
#!/usr/bin/env bash
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -o errexit
set -o nounset
set -o pipefail
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../..
source "${KUBE_ROOT}/hack/lib/init.sh"
KUBEADM_PATH="${KUBEADM_PATH:=$(kube::realpath "${KUBE_ROOT}")/cluster/kubeadm.sh}"
# If testing a different version of kubeadm than the current build, you can
# comment this out to save yourself from needlessly building here.
make -C "${KUBE_ROOT}" WHAT=cmd/kubeadm
make -C "${KUBE_ROOT}" test \
WHAT=k8s.io/kubernetes/cmd/kubeadm/test/cmd \
KUBE_TEST_ARGS="--kubeadm-path '${KUBEADM_PATH}'"

392
vendor/k8s.io/kubernetes/hack/make-rules/test.sh generated vendored Executable file
View File

@@ -0,0 +1,392 @@
#!/usr/bin/env bash
# Copyright 2014 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -o errexit
set -o nounset
set -o pipefail
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../..
source "${KUBE_ROOT}/hack/lib/init.sh"
kube::golang::setup_env
# start the cache mutation detector by default so that cache mutators will be found
KUBE_CACHE_MUTATION_DETECTOR="${KUBE_CACHE_MUTATION_DETECTOR:-true}"
export KUBE_CACHE_MUTATION_DETECTOR
# panic the server on watch decode errors since they are considered coder mistakes
KUBE_PANIC_WATCH_DECODE_ERROR="${KUBE_PANIC_WATCH_DECODE_ERROR:-true}"
export KUBE_PANIC_WATCH_DECODE_ERROR
# Handle case where OS has sha#sum commands, instead of shasum.
if which shasum >/dev/null 2>&1; then
SHA1SUM="shasum -a1"
elif which sha1sum >/dev/null 2>&1; then
SHA1SUM="sha1sum"
else
echo "Failed to find shasum or sha1sum utility." >&2
exit 1
fi
kube::test::find_dirs() {
(
cd ${KUBE_ROOT}
find -L . -not \( \
\( \
-path './_artifacts/*' \
-o -path './bazel-*/*' \
-o -path './_output/*' \
-o -path './_gopath/*' \
-o -path './cmd/kubeadm/test/*' \
-o -path './contrib/podex/*' \
-o -path './output/*' \
-o -path './release/*' \
-o -path './target/*' \
-o -path './test/e2e/*' \
-o -path './test/e2e_node/*' \
-o -path './test/e2e_kubeadm/*' \
-o -path './test/integration/*' \
-o -path './third_party/*' \
-o -path './staging/*' \
-o -path './vendor/*' \
\) -prune \
\) -name '*_test.go' -print0 | xargs -0n1 dirname | sed "s|^\./|${KUBE_GO_PACKAGE}/|" | LC_ALL=C sort -u
find -L . \
-path './_output' -prune \
-o -path './vendor/k8s.io/client-go/*' \
-o -path './vendor/k8s.io/apiserver/*' \
-o -path './test/e2e_node/system/*' \
-name '*_test.go' -print0 | xargs -0n1 dirname | sed "s|^\./|${KUBE_GO_PACKAGE}/|" | LC_ALL=C sort -u
# run tests for client-go
find ./staging/src/k8s.io/client-go -name '*_test.go' \
-name '*_test.go' -print0 | xargs -0n1 dirname | sed 's|^\./staging/src/|./vendor/|' | LC_ALL=C sort -u
# run tests for apiserver
find ./staging/src/k8s.io/apiserver -name '*_test.go' \
-name '*_test.go' -print0 | xargs -0n1 dirname | sed 's|^\./staging/src/|./vendor/|' | LC_ALL=C sort -u
# run tests for apimachinery
find ./staging/src/k8s.io/apimachinery -name '*_test.go' \
-name '*_test.go' -print0 | xargs -0n1 dirname | sed 's|^\./staging/src/|./vendor/|' | LC_ALL=C sort -u
find ./staging/src/k8s.io/kube-aggregator -name '*_test.go' \
-name '*_test.go' -print0 | xargs -0n1 dirname | sed 's|^\./staging/src/|./vendor/|' | LC_ALL=C sort -u
find ./staging/src/k8s.io/apiextensions-apiserver -not \( \
\( \
-path '*/test/integration/*' \
\) -prune \
\) -name '*_test.go' \
-name '*_test.go' -print0 | xargs -0n1 dirname | sed 's|^\./staging/src/|./vendor/|' | LC_ALL=C sort -u
find ./staging/src/k8s.io/sample-apiserver -name '*_test.go' \
-name '*_test.go' -print0 | xargs -0n1 dirname | sed 's|^\./staging/src/|./vendor/|' | LC_ALL=C sort -u
)
}
KUBE_TIMEOUT=${KUBE_TIMEOUT:--timeout 120s}
KUBE_COVER=${KUBE_COVER:-n} # set to 'y' to enable coverage collection
KUBE_COVERMODE=${KUBE_COVERMODE:-atomic}
# How many 'go test' instances to run simultaneously when running tests in
# coverage mode.
KUBE_COVERPROCS=${KUBE_COVERPROCS:-4}
KUBE_RACE=${KUBE_RACE:-} # use KUBE_RACE="-race" to enable race testing
# Set to the goveralls binary path to report coverage results to Coveralls.io.
KUBE_GOVERALLS_BIN=${KUBE_GOVERALLS_BIN:-}
# Lists of API Versions of each groups that should be tested, groups are
# separated by comma, lists are separated by semicolon. e.g.,
# "v1,compute/v1alpha1,experimental/v1alpha2;v1,compute/v2,experimental/v1alpha3"
# FIXME: due to current implementation of a test client (see: pkg/api/testapi/testapi.go)
# ONLY the last version is tested in each group.
ALL_VERSIONS_CSV=$(IFS=',';echo "${KUBE_AVAILABLE_GROUP_VERSIONS[*]// /,}";IFS=$)
KUBE_TEST_API_VERSIONS="${KUBE_TEST_API_VERSIONS:-${ALL_VERSIONS_CSV}}"
# once we have multiple group supports
# Create a junit-style XML test report in this directory if set.
KUBE_JUNIT_REPORT_DIR=${KUBE_JUNIT_REPORT_DIR:-}
# Set to 'y' to keep the verbose stdout from tests when KUBE_JUNIT_REPORT_DIR is
# set.
KUBE_KEEP_VERBOSE_TEST_OUTPUT=${KUBE_KEEP_VERBOSE_TEST_OUTPUT:-n}
kube::test::usage() {
kube::log::usage_from_stdin <<EOF
usage: $0 [OPTIONS] [TARGETS]
OPTIONS:
-p <number> : number of parallel workers, must be >= 1
EOF
}
isnum() {
[[ "$1" =~ ^[0-9]+$ ]]
}
PARALLEL="${PARALLEL:-1}"
while getopts "hp:i:" opt ; do
case $opt in
h)
kube::test::usage
exit 0
;;
p)
PARALLEL="$OPTARG"
if ! isnum "${PARALLEL}" || [[ "${PARALLEL}" -le 0 ]]; then
kube::log::usage "'$0': argument to -p must be numeric and greater than 0"
kube::test::usage
exit 1
fi
;;
i)
kube::log::usage "'$0': use GOFLAGS='-count <num-iterations>'"
kube::test::usage
exit 1
;;
?)
kube::test::usage
exit 1
;;
:)
kube::log::usage "Option -$OPTARG <value>"
kube::test::usage
exit 1
;;
esac
done
shift $((OPTIND - 1))
# Use eval to preserve embedded quoted strings.
eval "goflags=(${GOFLAGS:-})"
eval "testargs=(${KUBE_TEST_ARGS:-})"
# Used to filter verbose test output.
go_test_grep_pattern=".*"
# The go-junit-report tool needs full test case information to produce a
# meaningful report.
if [[ -n "${KUBE_JUNIT_REPORT_DIR}" ]] ; then
goflags+=(-v)
# Show only summary lines by matching lines like "status package/test"
go_test_grep_pattern="^[^[:space:]]\+[[:space:]]\+[^[:space:]]\+/[^[[:space:]]\+"
fi
# Filter out arguments that start with "-" and move them to goflags.
testcases=()
for arg; do
if [[ "${arg}" == -* ]]; then
goflags+=("${arg}")
else
testcases+=("${arg}")
fi
done
if [[ ${#testcases[@]} -eq 0 ]]; then
testcases=($(kube::test::find_dirs))
fi
set -- "${testcases[@]+${testcases[@]}}"
junitFilenamePrefix() {
if [[ -z "${KUBE_JUNIT_REPORT_DIR}" ]]; then
echo ""
return
fi
mkdir -p "${KUBE_JUNIT_REPORT_DIR}"
# This filename isn't parsed by anything, and we must avoid
# exceeding 255 character filename limit. KUBE_TEST_API
# barely fits there and in coverage mode test names are
# appended to generated file names, easily exceeding
# 255 chars in length. So let's just use a sha1 hash of it.
local KUBE_TEST_API_HASH="$(echo -n "${KUBE_TEST_API//\//-}"| ${SHA1SUM} |awk '{print $1}')"
echo "${KUBE_JUNIT_REPORT_DIR}/junit_${KUBE_TEST_API_HASH}_$(kube::util::sortable_date)"
}
verifyAndSuggestPackagePath() {
local specified_package_path="$1"
local alternative_package_path="$2"
local original_package_path="$3"
local suggestion_package_path="$4"
if ! [ -d "$specified_package_path" ]; then
# Because k8s sets a localized $GOPATH for testing, seeing the actual
# directory can be confusing. Instead, just show $GOPATH if it exists in the
# $specified_package_path.
local printable_package_path=$(echo "$specified_package_path" | sed "s|$GOPATH|\$GOPATH|")
kube::log::error "specified test path '$printable_package_path' does not exist"
if [ -d "$alternative_package_path" ]; then
kube::log::info "try changing \"$original_package_path\" to \"$suggestion_package_path\""
fi
exit 1
fi
}
verifyPathsToPackagesUnderTest() {
local packages_under_test=($@)
for package_path in "${packages_under_test[@]}"; do
local local_package_path="$package_path"
local go_package_path="$GOPATH/src/$package_path"
if [[ "${package_path:0:2}" == "./" ]] ; then
verifyAndSuggestPackagePath "$local_package_path" "$go_package_path" "$package_path" "${package_path:2}"
else
verifyAndSuggestPackagePath "$go_package_path" "$local_package_path" "$package_path" "./$package_path"
fi
done
}
produceJUnitXMLReport() {
local -r junit_filename_prefix=$1
if [[ -z "${junit_filename_prefix}" ]]; then
return
fi
local test_stdout_filenames
local junit_xml_filename
test_stdout_filenames=$(ls ${junit_filename_prefix}*.stdout)
junit_xml_filename="${junit_filename_prefix}.xml"
if ! command -v go-junit-report >/dev/null 2>&1; then
kube::log::error "go-junit-report not found; please install with " \
"go get -u github.com/jstemmer/go-junit-report"
return
fi
cat ${test_stdout_filenames} | go-junit-report > "${junit_xml_filename}"
if [[ ! ${KUBE_KEEP_VERBOSE_TEST_OUTPUT} =~ ^[yY]$ ]]; then
rm ${test_stdout_filenames}
fi
kube::log::status "Saved JUnit XML test report to ${junit_xml_filename}"
}
runTests() {
local junit_filename_prefix
junit_filename_prefix=$(junitFilenamePrefix)
verifyPathsToPackagesUnderTest "$@"
# If we're not collecting coverage, run all requested tests with one 'go test'
# command, which is much faster.
if [[ ! ${KUBE_COVER} =~ ^[yY]$ ]]; then
kube::log::status "Running tests without code coverage"
go test "${goflags[@]:+${goflags[@]}}" \
${KUBE_RACE} ${KUBE_TIMEOUT} "${@}" \
"${testargs[@]:+${testargs[@]}}" \
| tee ${junit_filename_prefix:+"${junit_filename_prefix}.stdout"} \
| grep --binary-files=text "${go_test_grep_pattern}" && rc=$? || rc=$?
produceJUnitXMLReport "${junit_filename_prefix}"
return ${rc}
fi
# Create coverage report directories.
KUBE_TEST_API_HASH="$(echo -n "${KUBE_TEST_API//\//-}"| ${SHA1SUM} |awk '{print $1}')"
cover_report_dir="/tmp/k8s_coverage/${KUBE_TEST_API_HASH}/$(kube::util::sortable_date)"
cover_profile="coverage.out" # Name for each individual coverage profile
kube::log::status "Saving coverage output in '${cover_report_dir}'"
mkdir -p "${@+${@/#/${cover_report_dir}/}}"
# Run all specified tests, collecting coverage results. Go currently doesn't
# support collecting coverage across multiple packages at once, so we must issue
# separate 'go test' commands for each package and then combine at the end.
# To speed things up considerably, we can at least use xargs -P to run multiple
# 'go test' commands at once.
# To properly parse the test results if generating a JUnit test report, we
# must make sure the output from PARALLEL runs is not mixed. To achieve this,
# we spawn a subshell for each PARALLEL process, redirecting the output to
# separate files.
# ignore paths:
# vendor/k8s.io/code-generator/cmd/generator: is fragile when run under coverage, so ignore it for now.
# https://github.com/kubernetes/kubernetes/issues/24967
# vendor/k8s.io/client-go/1.4/rest: causes cover internal errors
# https://github.com/golang/go/issues/16540
cover_ignore_dirs="vendor/k8s.io/code-generator/cmd/generator|vendor/k8s.io/client-go/1.4/rest"
for path in $(echo $cover_ignore_dirs | sed 's/|/ /g'); do
echo -e "skipped\tk8s.io/kubernetes/$path"
done
printf "%s\n" "${@}" \
| grep -Ev $cover_ignore_dirs \
| xargs -I{} -n 1 -P ${KUBE_COVERPROCS} \
bash -c "set -o pipefail; _pkg=\"\$0\"; _pkg_out=\${_pkg//\//_}; \
go test ${goflags[@]:+${goflags[@]}} \
${KUBE_RACE} \
${KUBE_TIMEOUT} \
-cover -covermode=\"${KUBE_COVERMODE}\" \
-coverprofile=\"${cover_report_dir}/\${_pkg}/${cover_profile}\" \
\"\${_pkg}\" \
${testargs[@]:+${testargs[@]}} \
| tee ${junit_filename_prefix:+\"${junit_filename_prefix}-\$_pkg_out.stdout\"} \
| grep \"${go_test_grep_pattern}\"" \
{} \
&& test_result=$? || test_result=$?
produceJUnitXMLReport "${junit_filename_prefix}"
COMBINED_COVER_PROFILE="${cover_report_dir}/combined-coverage.out"
{
# The combined coverage profile needs to start with a line indicating which
# coverage mode was used (set, count, or atomic). This line is included in
# each of the coverage profiles generated when running 'go test -cover', but
# we strip these lines out when combining so that there's only one.
echo "mode: ${KUBE_COVERMODE}"
# Include all coverage reach data in the combined profile, but exclude the
# 'mode' lines, as there should be only one.
for x in `find "${cover_report_dir}" -name "${cover_profile}"`; do
cat $x | grep -h -v "^mode:" || true
done
} >"${COMBINED_COVER_PROFILE}"
coverage_html_file="${cover_report_dir}/combined-coverage.html"
go tool cover -html="${COMBINED_COVER_PROFILE}" -o="${coverage_html_file}"
kube::log::status "Combined coverage report: ${coverage_html_file}"
return ${test_result}
}
reportCoverageToCoveralls() {
if [[ ${KUBE_COVER} =~ ^[yY]$ ]] && [[ -x "${KUBE_GOVERALLS_BIN}" ]]; then
kube::log::status "Reporting coverage results to Coveralls for service ${CI_NAME:-}"
${KUBE_GOVERALLS_BIN} -coverprofile="${COMBINED_COVER_PROFILE}" \
${CI_NAME:+"-service=${CI_NAME}"} \
${COVERALLS_REPO_TOKEN:+"-repotoken=${COVERALLS_REPO_TOKEN}"} \
|| true
fi
}
checkFDs() {
# several unittests panic when httptest cannot open more sockets
# due to the low default files limit on OS X. Warn about low limit.
local fileslimit="$(ulimit -n)"
if [[ $fileslimit -lt 1000 ]]; then
echo "WARNING: ulimit -n (files) should be at least 1000, is $fileslimit, may cause test failure";
fi
}
checkFDs
# Convert the CSVs to arrays.
IFS=';' read -a apiVersions <<< "${KUBE_TEST_API_VERSIONS}"
apiVersionsCount=${#apiVersions[@]}
for (( i=0; i<${apiVersionsCount}; i++ )); do
apiVersion=${apiVersions[i]}
echo "Running tests for APIVersion: $apiVersion"
# KUBE_TEST_API sets the version of each group to be tested.
KUBE_TEST_API="${apiVersion}" runTests "$@"
done
# We might run the tests for multiple versions, but we want to report only
# one of them to coveralls. Here we report coverage from the last run.
reportCoverageToCoveralls

84
vendor/k8s.io/kubernetes/hack/make-rules/update.sh generated vendored Executable file
View File

@@ -0,0 +1,84 @@
#!/usr/bin/env bash
# Copyright 2014 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# A single script that runs a predefined set of update-* scripts, as they often go together.
set -o errexit
set -o nounset
set -o pipefail
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../..
source "${KUBE_ROOT}/hack/lib/init.sh"
# If called directly, exit.
if [[ "${CALLED_FROM_MAIN_MAKEFILE:-""}" == "" ]]; then
echo "ERROR: $0 should not be run directly." >&2
echo >&2
echo "Please run this command using \"make update\""
exit 1
fi
SILENT=${SILENT:-true}
ALL=${FORCE_ALL:-false}
V=""
if [[ "${SILENT}" != "true" ]]; then
V="-v"
fi
trap 'exit 1' SIGINT
if ${SILENT} ; then
echo "Running in silent mode, run with SILENT=false if you want to see script logs."
fi
if ! ${ALL} ; then
echo "Running in short-circuit mode; run with FORCE_ALL=true to force all scripts to run."
fi
"${KUBE_ROOT}/hack/godep-restore.sh" ${V}
BASH_TARGETS="
update-generated-protobuf
update-codegen
update-generated-runtime
update-generated-device-plugin
update-generated-docs
update-generated-swagger-docs
update-swagger-spec
update-openapi-spec
update-api-reference-docs
update-staging-godeps
update-bazel"
for t in ${BASH_TARGETS}; do
echo -e "${color_yellow}Running $t${color_norm}"
if ${SILENT} ; then
if ! bash "${KUBE_ROOT}/hack/$t.sh" 1> /dev/null; then
echo -e "${color_red}Running $t FAILED${color_norm}"
if ! ${ALL}; then
exit 1
fi
fi
else
if ! bash "${KUBE_ROOT}/hack/$t.sh"; then
echo -e "${color_red}Running $t FAILED${color_norm}"
if ! ${ALL}; then
exit 1
fi
fi
fi
done
echo -e "${color_green}Update scripts completed successfully${color_norm}"

171
vendor/k8s.io/kubernetes/hack/make-rules/verify.sh generated vendored Executable file
View File

@@ -0,0 +1,171 @@
#!/usr/bin/env bash
# Copyright 2014 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -o errexit
set -o nounset
set -o pipefail
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../..
source "${KUBE_ROOT}/hack/lib/util.sh"
# include shell2junit library
source "${KUBE_ROOT}/third_party/forked/shell2junit/sh2ju.sh"
# Excluded check patterns are always skipped.
EXCLUDED_PATTERNS=(
"verify-all.sh" # this script calls the make rule and would cause a loop
"verify-linkcheck.sh" # runs in separate Jenkins job once per day due to high network usage
"verify-test-owners.sh" # TODO(rmmh): figure out how to avoid endless conflicts
"verify-*-dockerized.sh" # Don't run any scripts that intended to be run dockerized
"verify-typecheck.sh" # runs in separate typecheck job
)
# Only run whitelisted fast checks in quick mode.
# These run in <10s each on enisoc's workstation, assuming that
# `make` and `hack/godep-restore.sh` had already been run.
QUICK_PATTERNS+=(
"verify-api-groups.sh"
"verify-bazel.sh"
"verify-boilerplate.sh"
"verify-generated-files-remake"
"verify-godep-licenses.sh"
"verify-gofmt.sh"
"verify-imports.sh"
"verify-pkg-names.sh"
"verify-readonly-packages.sh"
"verify-spelling.sh"
"verify-staging-client-go.sh"
"verify-test-images.sh"
"verify-test-owners.sh"
)
EXCLUDED_CHECKS=$(ls ${EXCLUDED_PATTERNS[@]/#/${KUBE_ROOT}\/hack\/} 2>/dev/null || true)
QUICK_CHECKS=$(ls ${QUICK_PATTERNS[@]/#/${KUBE_ROOT}\/hack\/} 2>/dev/null || true)
function is-excluded {
for e in ${EXCLUDED_CHECKS[@]}; do
if [[ $1 -ef "$e" ]]; then
return
fi
done
return 1
}
function is-quick {
for e in ${QUICK_CHECKS[@]}; do
if [[ $1 -ef "$e" ]]; then
return
fi
done
return 1
}
function is-explicitly-chosen {
local name="${1#verify-}"
name="${name%.*}"
for e in ${WHAT}; do
if [[ $e == "$name" ]]; then
return
fi
done
return 1
}
function run-cmd {
local filename="${2##*/verify-}"
local testname="${filename%%.*}"
local output="${KUBE_JUNIT_REPORT_DIR:-/tmp/junit-results}"
local tr
if ${SILENT}; then
juLog -output="${output}" -class="verify" -name="${testname}" "$@" &> /dev/null
tr=$?
else
juLog -output="${output}" -class="verify" -name="${testname}" "$@"
tr=$?
fi
return ${tr}
}
# Collect Failed tests in this Array , initialize it to nil
FAILED_TESTS=()
function print-failed-tests {
echo -e "========================"
echo -e "${color_red}FAILED TESTS${color_norm}"
echo -e "========================"
for t in ${FAILED_TESTS[@]}; do
echo -e "${color_red}${t}${color_norm}"
done
}
function run-checks {
local -r pattern=$1
local -r runner=$2
local t
for t in $(ls ${pattern})
do
local check_name="$(basename "${t}")"
if [[ ! -z ${WHAT:-} ]]; then
if ! is-explicitly-chosen "${check_name}"; then
continue
fi
else
if is-excluded "${t}" ; then
echo "Skipping ${check_name}"
continue
fi
if ${QUICK} && ! is-quick "${t}" ; then
echo "Skipping ${check_name} in quick mode"
continue
fi
fi
echo -e "Verifying ${check_name}"
local start=$(date +%s)
run-cmd "${runner}" "${t}" && tr=$? || tr=$?
local elapsed=$(($(date +%s) - ${start}))
if [[ ${tr} -eq 0 ]]; then
echo -e "${color_green}SUCCESS${color_norm} ${check_name}\t${elapsed}s"
else
echo -e "${color_red}FAILED${color_norm} ${check_name}\t${elapsed}s"
ret=1
FAILED_TESTS+=(${t})
fi
done
}
SILENT=${SILENT:-false}
QUICK=${QUICK:-false}
if ${SILENT} ; then
echo "Running in silent mode, run with SILENT=false if you want to see script logs."
fi
if ${QUICK} ; then
echo "Running in quick mode (QUICK=true). Only fast checks will run."
fi
ret=0
run-checks "${KUBE_ROOT}/hack/verify-*.sh" bash
run-checks "${KUBE_ROOT}/hack/verify-*.py" python
if [[ ${ret} -eq 1 ]]; then
print-failed-tests
fi
exit ${ret}
# ex: ts=2 sw=2 et filetype=sh

56
vendor/k8s.io/kubernetes/hack/make-rules/vet.sh generated vendored Executable file
View File

@@ -0,0 +1,56 @@
#!/usr/bin/env bash
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -o errexit
set -o nounset
set -o pipefail
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../..
source "${KUBE_ROOT}/hack/lib/init.sh"
cd "${KUBE_ROOT}"
# If called directly, exit.
if [[ "${CALLED_FROM_MAIN_MAKEFILE:-""}" == "" ]]; then
echo "ERROR: $0 should not be run directly." >&2
echo >&2
echo "Please run this command using \"make vet\""
exit 1
fi
# This is required before we run govet for the results to be correct.
# See https://github.com/golang/go/issues/16086 for details.
go install ./cmd/...
# Use eval to preserve embedded quoted strings.
eval "goflags=(${GOFLAGS:-})"
# Filter out arguments that start with "-" and move them to goflags.
targets=()
for arg; do
if [[ "${arg}" == -* ]]; then
goflags+=("${arg}")
else
targets+=("${arg}")
fi
done
if [[ ${#targets[@]} -eq 0 ]]; then
# Do not run on third_party directories or generated client code.
targets=$(go list -e ./... | egrep -v "/(third_party|vendor|staging|clientset_generated)/")
fi
go vet "${goflags[@]:+${goflags[@]}}" ${targets[@]}

49
vendor/k8s.io/kubernetes/hack/print-workspace-status.sh generated vendored Executable file
View File

@@ -0,0 +1,49 @@
#!/usr/bin/env bash
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This command is used by bazel as the workspace_status_command
# to implement build stamping with git information.
set -o errexit
set -o nounset
set -o pipefail
export KUBE_ROOT=$(dirname "${BASH_SOURCE}")/..
source "${KUBE_ROOT}/hack/lib/version.sh"
kube::version::get_version_vars
# Prefix with STABLE_ so that these values are saved to stable-status.txt
# instead of volatile-status.txt.
# Stamped rules will be retriggered by changes to stable-status.txt, but not by
# changes to volatile-status.txt.
# IMPORTANT: the camelCase vars should match the lists in hack/lib/version.sh
# and pkg/version/def.bzl.
cat <<EOF
STABLE_BUILD_GIT_COMMIT ${KUBE_GIT_COMMIT-}
STABLE_BUILD_SCM_STATUS ${KUBE_GIT_TREE_STATE-}
STABLE_BUILD_SCM_REVISION ${KUBE_GIT_VERSION-}
STABLE_BUILD_MAJOR_VERSION ${KUBE_GIT_MAJOR-}
STABLE_BUILD_MINOR_VERSION ${KUBE_GIT_MINOR-}
STABLE_DOCKER_TAG ${KUBE_GIT_VERSION/+/_}
gitCommit ${KUBE_GIT_COMMIT-}
gitTreeState ${KUBE_GIT_TREE_STATE-}
gitVersion ${KUBE_GIT_VERSION-}
gitMajor ${KUBE_GIT_MAJOR-}
gitMinor ${KUBE_GIT_MINOR-}
buildDate $(date \
${SOURCE_DATE_EPOCH:+"--date=@${SOURCE_DATE_EPOCH}"} \
-u +'%Y-%m-%dT%H:%M:%SZ')
EOF

33
vendor/k8s.io/kubernetes/hack/run-in-gopath.sh generated vendored Executable file
View File

@@ -0,0 +1,33 @@
#!/usr/bin/env bash
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script sets up a temporary Kubernetes GOPATH and runs an arbitrary
# command under it. Go tooling requires that the current directory be under
# GOPATH or else it fails to find some things, such as the vendor directory for
# the project.
set -o errexit
set -o nounset
set -o pipefail
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/..
source "${KUBE_ROOT}/hack/lib/init.sh"
# This sets up a clean GOPATH and makes sure we are currently in it.
kube::golang::setup_env
# Run the user-provided command.
"${@}"

37
vendor/k8s.io/kubernetes/hack/test-go.sh generated vendored Executable file
View File

@@ -0,0 +1,37 @@
#!/usr/bin/env bash
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script is a vestigial redirection. Please do not add "real" logic.
set -o errexit
set -o nounset
set -o pipefail
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/..
# For help output
ARGHELP=""
if [[ "$#" -gt 0 ]]; then
ARGHELP="WHAT='$@'"
fi
echo "NOTE: $0 has been replaced by 'make test'"
echo
echo "The equivalent of this invocation is: "
echo " make test ${ARGHELP}"
echo
echo
make --no-print-directory -C "${KUBE_ROOT}" test WHAT="$*"

33
vendor/k8s.io/kubernetes/hack/test-integration.sh generated vendored Executable file
View File

@@ -0,0 +1,33 @@
#!/usr/bin/env bash
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script is a vestigial redirection. Please do not add "real" logic.
set -o errexit
set -o nounset
set -o pipefail
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/..
echo "$0 has been replaced by 'make test-integration'"
echo
echo "The following invocation will run all integration tests: "
echo ' make test-integration'
echo
echo "The following invocation will run a specific test with the verbose flag set: "
echo ' make test-integration WHAT=./test/integration/pods GOFLAGS="-v" KUBE_TEST_ARGS="-run ^TestPodUpdateActiveDeadlineSeconds$"'
echo
exit 1

210
vendor/k8s.io/kubernetes/hack/test-update-storage-objects.sh generated vendored Executable file
View File

@@ -0,0 +1,210 @@
#!/usr/bin/env bash
# Copyright 2014 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Script to test cluster/update-storage-objects.sh works as expected.
set -o errexit
set -o nounset
set -o pipefail
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/..
source "${KUBE_ROOT}/hack/lib/init.sh"
# The api version in which objects are currently stored in etcd.
KUBE_OLD_API_VERSION=${KUBE_OLD_API_VERSION:-"v1"}
# The api version in which our etcd objects should be converted to.
# The new api version
KUBE_NEW_API_VERSION=${KUBE_NEW_API_VERSION:-"v1"}
KUBE_OLD_STORAGE_VERSIONS=${KUBE_OLD_STORAGE_VERSIONS:-""}
KUBE_NEW_STORAGE_VERSIONS=${KUBE_NEW_STORAGE_VERSIONS:-""}
KUBE_STORAGE_MEDIA_TYPE_JSON="application/json"
KUBE_STORAGE_MEDIA_TYPE_PROTOBUF="application/vnd.kubernetes.protobuf"
ETCD_HOST=${ETCD_HOST:-127.0.0.1}
ETCD_PORT=${ETCD_PORT:-2379}
ETCD_PREFIX=${ETCD_PREFIX:-randomPrefix}
API_PORT=${API_PORT:-8080}
API_HOST=${API_HOST:-127.0.0.1}
RUNTIME_CONFIG=""
ETCDCTL=$(which etcdctl)
KUBECTL="${KUBE_OUTPUT_HOSTBIN}/kubectl"
UPDATE_ETCD_OBJECTS_SCRIPT="${KUBE_ROOT}/cluster/update-storage-objects.sh"
DISABLE_ADMISSION_PLUGINS="ServiceAccount,NamespaceLifecycle,LimitRanger,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,PersistentVolumeLabel,DefaultStorageClass"
function startApiServer() {
local storage_versions=${1:-""}
local storage_media_type=${2:-""}
kube::log::status "Starting kube-apiserver with..."
kube::log::status " storage-media-type: ${storage_media_type}"
kube::log::status " runtime-config: ${RUNTIME_CONFIG}"
kube::log::status " storage-version overrides: ${storage_versions}"
"${KUBE_OUTPUT_HOSTBIN}/kube-apiserver" \
--insecure-bind-address="${API_HOST}" \
--bind-address="${API_HOST}" \
--insecure-port="${API_PORT}" \
--storage-backend="etcd3" \
--etcd-servers="http://${ETCD_HOST}:${ETCD_PORT}" \
--etcd-prefix="/${ETCD_PREFIX}" \
--runtime-config="${RUNTIME_CONFIG}" \
--disable-admission-plugins="${DISABLE_ADMISSION_PLUGINS}" \
--cert-dir="${TMPDIR:-/tmp/}" \
--service-cluster-ip-range="10.0.0.0/24" \
--storage-versions="${storage_versions}" \
--storage-media-type=${storage_media_type} 1>&2 &
APISERVER_PID=$!
# url, prefix, wait, times
kube::util::wait_for_url "http://${API_HOST}:${API_PORT}/healthz" "apiserver: " 1 120
}
function killApiServer() {
kube::log::status "Killing api server"
if [[ -n ${APISERVER_PID-} ]]; then
kill ${APISERVER_PID} 1>&2 2>/dev/null
wait ${APISERVER_PID} || true
kube::log::status "api server exited"
fi
unset APISERVER_PID
}
function cleanup() {
killApiServer
kube::etcd::cleanup
kube::log::status "Clean up complete"
}
trap cleanup EXIT SIGINT
make -C "${KUBE_ROOT}" WHAT=cmd/kube-apiserver
kube::etcd::start
echo "${ETCD_VERSION}" > "${ETCD_DIR}/version.txt"
### BEGIN TEST DEFINITION CUSTOMIZATION ###
# source_file,resource,namespace,name,old_version,new_version
tests=(
test/e2e/testing-manifests/rbd-storage-class.yaml,storageclasses,,slow,v1beta1,v1
)
KUBE_OLD_API_VERSION="networking.k8s.io/v1,storage.k8s.io/v1beta1,extensions/v1beta1"
KUBE_NEW_API_VERSION="networking.k8s.io/v1,storage.k8s.io/v1,extensions/v1beta1,policy/v1beta1"
KUBE_OLD_STORAGE_VERSIONS="storage.k8s.io/v1beta1"
KUBE_NEW_STORAGE_VERSIONS="storage.k8s.io/v1"
### END TEST DEFINITION CUSTOMIZATION ###
#######################################################
# Step 1: Start a server which supports both the old and new api versions,
# but KUBE_OLD_API_VERSION is the latest (storage) version.
# Additionally use KUBE_STORAGE_MEDIA_TYPE_JSON for storage encoding.
#######################################################
RUNTIME_CONFIG="api/all=false,api/v1=true,apiregistration.k8s.io/v1=true,${KUBE_OLD_API_VERSION}=true,${KUBE_NEW_API_VERSION}=true"
startApiServer ${KUBE_OLD_STORAGE_VERSIONS} ${KUBE_STORAGE_MEDIA_TYPE_JSON}
# Create object(s)
for test in ${tests[@]}; do
IFS=',' read -ra test_data <<<"$test"
source_file=${test_data[0]}
kube::log::status "Creating ${source_file}"
${KUBECTL} create -f "${KUBE_ROOT}/${source_file}"
# Verify that the storage version is the old version
resource=${test_data[1]}
namespace=${test_data[2]}
name=${test_data[3]}
old_storage_version=${test_data[4]}
if [ -n "${namespace}" ]; then
namespace="${namespace}/"
fi
kube::log::status "Verifying ${resource}/${namespace}${name} has storage version ${old_storage_version} in etcd"
ETCDCTL_API=3 ${ETCDCTL} --endpoints="http://${ETCD_HOST}:${ETCD_PORT}" get "/${ETCD_PREFIX}/${resource}/${namespace}${name}" | grep ${old_storage_version}
done
killApiServer
#######################################################
# Step 2: Start a server which supports both the old and new api versions,
# but KUBE_NEW_API_VERSION is the latest (storage) version.
# Still use KUBE_STORAGE_MEDIA_TYPE_JSON for storage encoding.
#######################################################
RUNTIME_CONFIG="api/all=false,api/v1=true,apiregistration.k8s.io/v1=true,${KUBE_OLD_API_VERSION}=true,${KUBE_NEW_API_VERSION}=true"
startApiServer ${KUBE_NEW_STORAGE_VERSIONS} ${KUBE_STORAGE_MEDIA_TYPE_JSON}
# Update etcd objects, so that will now be stored in the new api version.
kube::log::status "Updating storage versions in etcd"
${UPDATE_ETCD_OBJECTS_SCRIPT}
# Verify that the storage version was changed in etcd
for test in ${tests[@]}; do
IFS=',' read -ra test_data <<<"$test"
resource=${test_data[1]}
namespace=${test_data[2]}
name=${test_data[3]}
new_storage_version=${test_data[5]}
if [ -n "${namespace}" ]; then
namespace="${namespace}/"
fi
kube::log::status "Verifying ${resource}/${namespace}${name} has updated storage version ${new_storage_version} in etcd"
ETCDCTL_API=3 ${ETCDCTL} --endpoints="http://${ETCD_HOST}:${ETCD_PORT}" get "/${ETCD_PREFIX}/${resource}/${namespace}${name}" | grep ${new_storage_version}
done
killApiServer
#######################################################
# Step 3 : Start a server which supports only the new api version.
# However, change storage encoding to KUBE_STORAGE_MEDIA_TYPE_PROTOBUF.
#######################################################
RUNTIME_CONFIG="api/all=false,api/v1=true,apiregistration.k8s.io/v1=true,${KUBE_NEW_API_VERSION}=true"
# This seems to reduce flakiness.
sleep 1
startApiServer ${KUBE_NEW_STORAGE_VERSIONS} ${KUBE_STORAGE_MEDIA_TYPE_PROTOBUF}
for test in ${tests[@]}; do
IFS=',' read -ra test_data <<<"$test"
resource=${test_data[1]}
namespace=${test_data[2]}
name=${test_data[3]}
namespace_flag=""
# Verify that the server is able to read the object.
if [ -n "${namespace}" ]; then
namespace_flag="--namespace=${namespace}"
namespace="${namespace}/"
fi
kube::log::status "Verifying we can retrieve ${resource}/${namespace}${name} via kubectl"
# We have to remove the cached discovery information about the old version; otherwise,
# the 'kubectl get' will use that and fail to find the resource.
rm -rf ${HOME}/.kube/cache/discovery/localhost_8080/${KUBE_OLD_STORAGE_VERSIONS}
${KUBECTL} get ${namespace_flag} ${resource}/${name}
done
killApiServer

8
vendor/k8s.io/kubernetes/hack/testdata/CRD/bar.yaml generated vendored Normal file
View File

@@ -0,0 +1,8 @@
kind: Bar
apiVersion: company.com/v1
metadata:
name: test
labels:
pruneGroup: "true"
someField: field1
otherField: field2

View File

@@ -0,0 +1,11 @@
kind: Foo
apiVersion: company.com/v1
metadata:
name: test
labels:
pruneGroup: "true"
someField: field1
otherField: field2
nestedField:
someSubfield: modifiedSubfield
newSubfield: subfield3

View File

@@ -0,0 +1,10 @@
kind: Foo
apiVersion: company.com/v1
metadata:
name: test
labels:
pruneGroup: "true"
someField: field1
otherField: field2
nestedField:
someSubfield: modifiedSubfield

View File

@@ -0,0 +1,11 @@
kind: Foo
apiVersion: company.com/v1
metadata:
name: test
labels:
pruneGroup: "true"
someField: field1
otherField: field2
nestedField:
someSubfield: modifiedSubfield
otherSubfield: subfield2

Some files were not shown because too many files have changed in this diff Show More