Add generated file

This PR adds generated files under pkg/client and vendor folder.
This commit is contained in:
xing-yang
2018-07-12 10:55:15 -07:00
parent 36b1de0341
commit e213d1890d
17729 changed files with 5090889 additions and 0 deletions

47
vendor/k8s.io/kubernetes/pkg/apis/extensions/BUILD generated vendored Normal file
View File

@@ -0,0 +1,47 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
)
go_library(
name = "go_default_library",
srcs = [
"doc.go",
"register.go",
"types.go",
"zz_generated.deepcopy.go",
],
importpath = "k8s.io/kubernetes/pkg/apis/extensions",
deps = [
"//pkg/apis/autoscaling:go_default_library",
"//pkg/apis/core:go_default_library",
"//pkg/apis/networking:go_default_library",
"//pkg/apis/policy:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [
":package-srcs",
"//pkg/apis/extensions/fuzzer:all-srcs",
"//pkg/apis/extensions/install:all-srcs",
"//pkg/apis/extensions/v1beta1:all-srcs",
"//pkg/apis/extensions/validation:all-srcs",
],
tags = ["automanaged"],
)

38
vendor/k8s.io/kubernetes/pkg/apis/extensions/OWNERS generated vendored Executable file
View File

@@ -0,0 +1,38 @@
reviewers:
- thockin
- lavalamp
- smarterclayton
- wojtek-t
- deads2k
- brendandburns
- derekwaynecarr
- caesarxuchao
- mikedanese
- liggitt
- nikhiljindal
- erictune
- pmorie
- sttts
- saad-ali
- janetkuo
- justinsb
- ncdc
- tallclair
- mwielgus
- soltysh
- piosz
- dims
- errordeveloper
- madhusudancs
- rootfs
- jszczepkowski
- mml
- resouer
- mbohlool
- david-mcmahon
- therc
- pweil-
- tmrts
- mqliang
- lukaszo
- jianhuiz

19
vendor/k8s.io/kubernetes/pkg/apis/extensions/doc.go generated vendored Normal file
View File

@@ -0,0 +1,19 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// +k8s:deepcopy-gen=package
package extensions // import "k8s.io/kubernetes/pkg/apis/extensions"

View File

@@ -0,0 +1,32 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
)
go_library(
name = "go_default_library",
srcs = ["fuzzer.go"],
importpath = "k8s.io/kubernetes/pkg/apis/extensions/fuzzer",
deps = [
"//pkg/apis/extensions:go_default_library",
"//vendor/github.com/google/gofuzz:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)

View File

@@ -0,0 +1,114 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package fuzzer
import (
"fmt"
fuzz "github.com/google/gofuzz"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtimeserializer "k8s.io/apimachinery/pkg/runtime/serializer"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/kubernetes/pkg/apis/extensions"
)
// Funcs returns the fuzzer functions for the extensions api group.
var Funcs = func(codecs runtimeserializer.CodecFactory) []interface{} {
return []interface{}{
func(j *extensions.Deployment, c fuzz.Continue) {
c.FuzzNoCustom(j)
// match defaulting
if j.Spec.Selector == nil {
j.Spec.Selector = &metav1.LabelSelector{MatchLabels: j.Spec.Template.Labels}
}
if len(j.Labels) == 0 {
j.Labels = j.Spec.Template.Labels
}
},
func(j *extensions.DeploymentSpec, c fuzz.Continue) {
c.FuzzNoCustom(j) // fuzz self without calling this function again
rhl := int32(c.Rand.Int31())
pds := int32(c.Rand.Int31())
j.RevisionHistoryLimit = &rhl
j.ProgressDeadlineSeconds = &pds
},
func(j *extensions.DeploymentStrategy, c fuzz.Continue) {
c.FuzzNoCustom(j) // fuzz self without calling this function again
// Ensure that strategyType is one of valid values.
strategyTypes := []extensions.DeploymentStrategyType{extensions.RecreateDeploymentStrategyType, extensions.RollingUpdateDeploymentStrategyType}
j.Type = strategyTypes[c.Rand.Intn(len(strategyTypes))]
if j.Type != extensions.RollingUpdateDeploymentStrategyType {
j.RollingUpdate = nil
} else {
rollingUpdate := extensions.RollingUpdateDeployment{}
if c.RandBool() {
rollingUpdate.MaxUnavailable = intstr.FromInt(int(c.Rand.Int31()))
rollingUpdate.MaxSurge = intstr.FromInt(int(c.Rand.Int31()))
} else {
rollingUpdate.MaxSurge = intstr.FromString(fmt.Sprintf("%d%%", c.Rand.Int31()))
}
j.RollingUpdate = &rollingUpdate
}
},
func(j *extensions.DaemonSet, c fuzz.Continue) {
c.FuzzNoCustom(j)
// match defaulter
j.Spec.Template.Generation = 0
if len(j.ObjectMeta.Labels) == 0 {
j.ObjectMeta.Labels = j.Spec.Template.ObjectMeta.Labels
}
},
func(j *extensions.DaemonSetSpec, c fuzz.Continue) {
c.FuzzNoCustom(j) // fuzz self without calling this function again
rhl := int32(c.Rand.Int31())
j.RevisionHistoryLimit = &rhl
},
func(j *extensions.DaemonSetUpdateStrategy, c fuzz.Continue) {
c.FuzzNoCustom(j) // fuzz self without calling this function again
// Ensure that strategyType is one of valid values.
strategyTypes := []extensions.DaemonSetUpdateStrategyType{extensions.RollingUpdateDaemonSetStrategyType, extensions.OnDeleteDaemonSetStrategyType}
j.Type = strategyTypes[c.Rand.Intn(len(strategyTypes))]
if j.Type != extensions.RollingUpdateDaemonSetStrategyType {
j.RollingUpdate = nil
} else {
rollingUpdate := extensions.RollingUpdateDaemonSet{}
if c.RandBool() {
if c.RandBool() {
rollingUpdate.MaxUnavailable = intstr.FromInt(1 + int(c.Rand.Int31()))
} else {
rollingUpdate.MaxUnavailable = intstr.FromString(fmt.Sprintf("%d%%", 1+c.Rand.Int31()))
}
}
j.RollingUpdate = &rollingUpdate
}
},
func(j *extensions.ReplicaSet, c fuzz.Continue) {
c.FuzzNoCustom(j)
// match defaulter
if j.Spec.Selector == nil {
j.Spec.Selector = &metav1.LabelSelector{MatchLabels: j.Spec.Template.Labels}
}
if len(j.Labels) == 0 {
j.Labels = j.Spec.Template.Labels
}
},
}
}

View File

@@ -0,0 +1,32 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
)
go_library(
name = "go_default_library",
srcs = ["install.go"],
importpath = "k8s.io/kubernetes/pkg/apis/extensions/install",
deps = [
"//pkg/api/legacyscheme:go_default_library",
"//pkg/apis/extensions:go_default_library",
"//pkg/apis/extensions/v1beta1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)

View File

@@ -0,0 +1,38 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package install installs the experimental API group, making it available as
// an option to all of the API encoding/decoding machinery.
package install
import (
"k8s.io/apimachinery/pkg/runtime"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/kubernetes/pkg/api/legacyscheme"
"k8s.io/kubernetes/pkg/apis/extensions"
"k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
)
func init() {
Install(legacyscheme.Scheme)
}
// Install registers the API group and adds types to a scheme
func Install(scheme *runtime.Scheme) {
utilruntime.Must(extensions.AddToScheme(scheme))
utilruntime.Must(v1beta1.AddToScheme(scheme))
utilruntime.Must(scheme.SetVersionPriority(v1beta1.SchemeGroupVersion))
}

View File

@@ -0,0 +1,69 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package extensions
import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/kubernetes/pkg/apis/autoscaling"
"k8s.io/kubernetes/pkg/apis/networking"
"k8s.io/kubernetes/pkg/apis/policy"
)
// GroupName is the group name use in this package
const GroupName = "extensions"
// SchemeGroupVersion is group version used to register these objects
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal}
// Kind takes an unqualified kind and returns a Group qualified GroupKind
func Kind(kind string) schema.GroupKind {
return SchemeGroupVersion.WithKind(kind).GroupKind()
}
// Resource takes an unqualified resource and returns a Group qualified GroupResource
func Resource(resource string) schema.GroupResource {
return SchemeGroupVersion.WithResource(resource).GroupResource()
}
var (
SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
AddToScheme = SchemeBuilder.AddToScheme
)
// Adds the list of known types to the given scheme.
func addKnownTypes(scheme *runtime.Scheme) error {
// TODO this gets cleaned up when the types are fixed
scheme.AddKnownTypes(SchemeGroupVersion,
&Deployment{},
&DeploymentList{},
&DeploymentRollback{},
&ReplicationControllerDummy{},
&DaemonSetList{},
&DaemonSet{},
&Ingress{},
&IngressList{},
&ReplicaSet{},
&ReplicaSetList{},
&policy.PodSecurityPolicy{},
&policy.PodSecurityPolicyList{},
&autoscaling.Scale{},
&networking.NetworkPolicy{},
&networking.NetworkPolicyList{},
)
return nil
}

775
vendor/k8s.io/kubernetes/pkg/apis/extensions/types.go generated vendored Normal file
View File

@@ -0,0 +1,775 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
This file (together with pkg/apis/extensions/v1beta1/types.go) contain the experimental
types in kubernetes. These API objects are experimental, meaning that the
APIs may be broken at any time by the kubernetes team.
DISCLAIMER: The implementation of the experimental API group itself is
a temporary one meant as a stopgap solution until kubernetes has proper
support for multiple API groups. The transition may require changes
beyond registration differences. In other words, experimental API group
support is experimental.
*/
package extensions
import (
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
api "k8s.io/kubernetes/pkg/apis/core"
)
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// Dummy definition
type ReplicationControllerDummy struct {
metav1.TypeMeta
}
// Alpha-level support for Custom Metrics in HPA (as annotations).
type CustomMetricTarget struct {
// Custom Metric name.
Name string
// Custom Metric value (average).
TargetValue resource.Quantity
}
type CustomMetricTargetList struct {
Items []CustomMetricTarget
}
type CustomMetricCurrentStatus struct {
// Custom Metric name.
Name string
// Custom Metric value (average).
CurrentValue resource.Quantity
}
type CustomMetricCurrentStatusList struct {
Items []CustomMetricCurrentStatus
}
// +genclient
// +genclient:method=GetScale,verb=get,subresource=scale,result=k8s.io/kubernetes/pkg/apis/autoscaling.Scale
// +genclient:method=UpdateScale,verb=update,subresource=scale,input=k8s.io/kubernetes/pkg/apis/autoscaling.Scale,result=k8s.io/kubernetes/pkg/apis/autoscaling.Scale
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
type Deployment struct {
metav1.TypeMeta
// +optional
metav1.ObjectMeta
// Specification of the desired behavior of the Deployment.
// +optional
Spec DeploymentSpec
// Most recently observed status of the Deployment.
// +optional
Status DeploymentStatus
}
type DeploymentSpec struct {
// Number of desired pods. This is a pointer to distinguish between explicit
// zero and not specified. Defaults to 1.
// +optional
Replicas int32
// Label selector for pods. Existing ReplicaSets whose pods are
// selected by this will be the ones affected by this deployment.
// +optional
Selector *metav1.LabelSelector
// Template describes the pods that will be created.
Template api.PodTemplateSpec
// The deployment strategy to use to replace existing pods with new ones.
// +optional
Strategy DeploymentStrategy
// Minimum number of seconds for which a newly created pod should be ready
// without any of its container crashing, for it to be considered available.
// Defaults to 0 (pod will be considered available as soon as it is ready)
// +optional
MinReadySeconds int32
// The number of old ReplicaSets to retain to allow rollback.
// This is a pointer to distinguish between explicit zero and not specified.
// +optional
RevisionHistoryLimit *int32
// Indicates that the deployment is paused and will not be processed by the
// deployment controller.
// +optional
Paused bool
// DEPRECATED.
// The config this deployment is rolling back to. Will be cleared after rollback is done.
// +optional
RollbackTo *RollbackConfig
// The maximum time in seconds for a deployment to make progress before it
// is considered to be failed. The deployment controller will continue to
// process failed deployments and a condition with a ProgressDeadlineExceeded
// reason will be surfaced in the deployment status. Note that progress will
// not be estimated during the time a deployment is paused. This is not set
// by default.
// +optional
ProgressDeadlineSeconds *int32
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// DEPRECATED.
// DeploymentRollback stores the information required to rollback a deployment.
type DeploymentRollback struct {
metav1.TypeMeta
// Required: This must match the Name of a deployment.
Name string
// The annotations to be updated to a deployment
// +optional
UpdatedAnnotations map[string]string
// The config of this deployment rollback.
RollbackTo RollbackConfig
}
// DEPRECATED.
type RollbackConfig struct {
// The revision to rollback to. If set to 0, rollback to the last revision.
// +optional
Revision int64
}
const (
// DefaultDeploymentUniqueLabelKey is the default key of the selector that is added
// to existing RCs (and label key that is added to its pods) to prevent the existing RCs
// to select new pods (and old pods being select by new RC).
DefaultDeploymentUniqueLabelKey string = "pod-template-hash"
)
type DeploymentStrategy struct {
// Type of deployment. Can be "Recreate" or "RollingUpdate". Default is RollingUpdate.
// +optional
Type DeploymentStrategyType
// Rolling update config params. Present only if DeploymentStrategyType =
// RollingUpdate.
//---
// TODO: Update this to follow our convention for oneOf, whatever we decide it
// to be.
// +optional
RollingUpdate *RollingUpdateDeployment
}
type DeploymentStrategyType string
const (
// Kill all existing pods before creating new ones.
RecreateDeploymentStrategyType DeploymentStrategyType = "Recreate"
// Replace the old RCs by new one using rolling update i.e gradually scale down the old RCs and scale up the new one.
RollingUpdateDeploymentStrategyType DeploymentStrategyType = "RollingUpdate"
)
// Spec to control the desired behavior of rolling update.
type RollingUpdateDeployment struct {
// The maximum number of pods that can be unavailable during the update.
// Value can be an absolute number (ex: 5) or a percentage of total pods at the start of update (ex: 10%).
// Absolute number is calculated from percentage by rounding down.
// This can not be 0 if MaxSurge is 0.
// By default, a fixed value of 1 is used.
// Example: when this is set to 30%, the old RC can be scaled down by 30%
// immediately when the rolling update starts. Once new pods are ready, old RC
// can be scaled down further, followed by scaling up the new RC, ensuring
// that at least 70% of original number of pods are available at all times
// during the update.
// +optional
MaxUnavailable intstr.IntOrString
// The maximum number of pods that can be scheduled above the original number of
// pods.
// Value can be an absolute number (ex: 5) or a percentage of total pods at
// the start of the update (ex: 10%). This can not be 0 if MaxUnavailable is 0.
// Absolute number is calculated from percentage by rounding up.
// By default, a value of 1 is used.
// Example: when this is set to 30%, the new RC can be scaled up by 30%
// immediately when the rolling update starts. Once old pods have been killed,
// new RC can be scaled up further, ensuring that total number of pods running
// at any time during the update is atmost 130% of original pods.
// +optional
MaxSurge intstr.IntOrString
}
type DeploymentStatus struct {
// The generation observed by the deployment controller.
// +optional
ObservedGeneration int64
// Total number of non-terminated pods targeted by this deployment (their labels match the selector).
// +optional
Replicas int32
// Total number of non-terminated pods targeted by this deployment that have the desired template spec.
// +optional
UpdatedReplicas int32
// Total number of ready pods targeted by this deployment.
// +optional
ReadyReplicas int32
// Total number of available pods (ready for at least minReadySeconds) targeted by this deployment.
// +optional
AvailableReplicas int32
// Total number of unavailable pods targeted by this deployment. This is the total number of
// pods that are still required for the deployment to have 100% available capacity. They may
// either be pods that are running but not yet available or pods that still have not been created.
// +optional
UnavailableReplicas int32
// Represents the latest available observations of a deployment's current state.
Conditions []DeploymentCondition
// Count of hash collisions for the Deployment. The Deployment controller uses this
// field as a collision avoidance mechanism when it needs to create the name for the
// newest ReplicaSet.
// +optional
CollisionCount *int32
}
type DeploymentConditionType string
// These are valid conditions of a deployment.
const (
// Available means the deployment is available, ie. at least the minimum available
// replicas required are up and running for at least minReadySeconds.
DeploymentAvailable DeploymentConditionType = "Available"
// Progressing means the deployment is progressing. Progress for a deployment is
// considered when a new replica set is created or adopted, and when new pods scale
// up or old pods scale down. Progress is not estimated for paused deployments or
// when progressDeadlineSeconds is not specified.
DeploymentProgressing DeploymentConditionType = "Progressing"
// ReplicaFailure is added in a deployment when one of its pods fails to be created
// or deleted.
DeploymentReplicaFailure DeploymentConditionType = "ReplicaFailure"
)
// DeploymentCondition describes the state of a deployment at a certain point.
type DeploymentCondition struct {
// Type of deployment condition.
Type DeploymentConditionType
// Status of the condition, one of True, False, Unknown.
Status api.ConditionStatus
// The last time this condition was updated.
LastUpdateTime metav1.Time
// Last time the condition transitioned from one status to another.
LastTransitionTime metav1.Time
// The reason for the condition's last transition.
Reason string
// A human readable message indicating details about the transition.
Message string
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
type DeploymentList struct {
metav1.TypeMeta
// +optional
metav1.ListMeta
// Items is the list of deployments.
Items []Deployment
}
type DaemonSetUpdateStrategy struct {
// Type of daemon set update. Can be "RollingUpdate" or "OnDelete".
// Default is OnDelete.
// +optional
Type DaemonSetUpdateStrategyType
// Rolling update config params. Present only if type = "RollingUpdate".
//---
// TODO: Update this to follow our convention for oneOf, whatever we decide it
// to be. Same as Deployment `strategy.rollingUpdate`.
// See https://github.com/kubernetes/kubernetes/issues/35345
// +optional
RollingUpdate *RollingUpdateDaemonSet
}
type DaemonSetUpdateStrategyType string
const (
// Replace the old daemons by new ones using rolling update i.e replace them on each node one after the other.
RollingUpdateDaemonSetStrategyType DaemonSetUpdateStrategyType = "RollingUpdate"
// Replace the old daemons only when it's killed
OnDeleteDaemonSetStrategyType DaemonSetUpdateStrategyType = "OnDelete"
)
// Spec to control the desired behavior of daemon set rolling update.
type RollingUpdateDaemonSet struct {
// The maximum number of DaemonSet pods that can be unavailable during the
// update. Value can be an absolute number (ex: 5) or a percentage of total
// number of DaemonSet pods at the start of the update (ex: 10%). Absolute
// number is calculated from percentage by rounding up.
// This cannot be 0.
// Default value is 1.
// Example: when this is set to 30%, at most 30% of the total number of nodes
// that should be running the daemon pod (i.e. status.desiredNumberScheduled)
// can have their pods stopped for an update at any given
// time. The update starts by stopping at most 30% of those DaemonSet pods
// and then brings up new DaemonSet pods in their place. Once the new pods
// are available, it then proceeds onto other DaemonSet pods, thus ensuring
// that at least 70% of original number of DaemonSet pods are available at
// all times during the update.
// +optional
MaxUnavailable intstr.IntOrString
}
// DaemonSetSpec is the specification of a daemon set.
type DaemonSetSpec struct {
// A label query over pods that are managed by the daemon set.
// Must match in order to be controlled.
// If empty, defaulted to labels on Pod template.
// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors
// +optional
Selector *metav1.LabelSelector
// An object that describes the pod that will be created.
// The DaemonSet will create exactly one copy of this pod on every node
// that matches the template's node selector (or on every node if no node
// selector is specified).
// More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template
Template api.PodTemplateSpec
// An update strategy to replace existing DaemonSet pods with new pods.
// +optional
UpdateStrategy DaemonSetUpdateStrategy
// The minimum number of seconds for which a newly created DaemonSet pod should
// be ready without any of its container crashing, for it to be considered
// available. Defaults to 0 (pod will be considered available as soon as it
// is ready).
// +optional
MinReadySeconds int32
// DEPRECATED.
// A sequence number representing a specific generation of the template.
// Populated by the system. It can be set only during the creation.
// +optional
TemplateGeneration int64
// The number of old history to retain to allow rollback.
// This is a pointer to distinguish between explicit zero and not specified.
// Defaults to 10.
// +optional
RevisionHistoryLimit *int32
}
// DaemonSetStatus represents the current status of a daemon set.
type DaemonSetStatus struct {
// The number of nodes that are running at least 1
// daemon pod and are supposed to run the daemon pod.
CurrentNumberScheduled int32
// The number of nodes that are running the daemon pod, but are
// not supposed to run the daemon pod.
NumberMisscheduled int32
// The total number of nodes that should be running the daemon
// pod (including nodes correctly running the daemon pod).
DesiredNumberScheduled int32
// The number of nodes that should be running the daemon pod and have one
// or more of the daemon pod running and ready.
NumberReady int32
// The most recent generation observed by the daemon set controller.
// +optional
ObservedGeneration int64
// The total number of nodes that are running updated daemon pod
// +optional
UpdatedNumberScheduled int32
// The number of nodes that should be running the
// daemon pod and have one or more of the daemon pod running and
// available (ready for at least spec.minReadySeconds)
// +optional
NumberAvailable int32
// The number of nodes that should be running the
// daemon pod and have none of the daemon pod running and available
// (ready for at least spec.minReadySeconds)
// +optional
NumberUnavailable int32
// Count of hash collisions for the DaemonSet. The DaemonSet controller
// uses this field as a collision avoidance mechanism when it needs to
// create the name for the newest ControllerRevision.
// +optional
CollisionCount *int32
// Represents the latest available observations of a DaemonSet's current state.
Conditions []DaemonSetCondition
}
type DaemonSetConditionType string
// TODO: Add valid condition types of a DaemonSet.
// DaemonSetCondition describes the state of a DaemonSet at a certain point.
type DaemonSetCondition struct {
// Type of DaemonSet condition.
Type DaemonSetConditionType
// Status of the condition, one of True, False, Unknown.
Status api.ConditionStatus
// Last time the condition transitioned from one status to another.
LastTransitionTime metav1.Time
// The reason for the condition's last transition.
Reason string
// A human readable message indicating details about the transition.
Message string
}
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// DaemonSet represents the configuration of a daemon set.
type DaemonSet struct {
metav1.TypeMeta
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
// +optional
metav1.ObjectMeta
// The desired behavior of this daemon set.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
// +optional
Spec DaemonSetSpec
// The current status of this daemon set. This data may be
// out of date by some window of time.
// Populated by the system.
// Read-only.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
// +optional
Status DaemonSetStatus
}
const (
// DEPRECATED: DefaultDaemonSetUniqueLabelKey is used instead.
// DaemonSetTemplateGenerationKey is the key of the labels that is added
// to daemon set pods to distinguish between old and new pod templates
// during DaemonSet template update.
DaemonSetTemplateGenerationKey string = "pod-template-generation"
)
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// DaemonSetList is a collection of daemon sets.
type DaemonSetList struct {
metav1.TypeMeta
// Standard list metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
// +optional
metav1.ListMeta
// A list of daemon sets.
Items []DaemonSet
}
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// Ingress is a collection of rules that allow inbound connections to reach the
// endpoints defined by a backend. An Ingress can be configured to give services
// externally-reachable urls, load balance traffic, terminate SSL, offer name
// based virtual hosting etc.
type Ingress struct {
metav1.TypeMeta
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
// +optional
metav1.ObjectMeta
// Spec is the desired state of the Ingress.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
// +optional
Spec IngressSpec
// Status is the current state of the Ingress.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
// +optional
Status IngressStatus
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// IngressList is a collection of Ingress.
type IngressList struct {
metav1.TypeMeta
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
// +optional
metav1.ListMeta
// Items is the list of Ingress.
Items []Ingress
}
// IngressSpec describes the Ingress the user wishes to exist.
type IngressSpec struct {
// A default backend capable of servicing requests that don't match any
// rule. At least one of 'backend' or 'rules' must be specified. This field
// is optional to allow the loadbalancer controller or defaulting logic to
// specify a global default.
// +optional
Backend *IngressBackend
// TLS configuration. Currently the Ingress only supports a single TLS
// port, 443. If multiple members of this list specify different hosts, they
// will be multiplexed on the same port according to the hostname specified
// through the SNI TLS extension, if the ingress controller fulfilling the
// ingress supports SNI.
// +optional
TLS []IngressTLS
// A list of host rules used to configure the Ingress. If unspecified, or
// no rule matches, all traffic is sent to the default backend.
// +optional
Rules []IngressRule
// TODO: Add the ability to specify load-balancer IP through claims
}
// IngressTLS describes the transport layer security associated with an Ingress.
type IngressTLS struct {
// Hosts are a list of hosts included in the TLS certificate. The values in
// this list must match the name/s used in the tlsSecret. Defaults to the
// wildcard host setting for the loadbalancer controller fulfilling this
// Ingress, if left unspecified.
// +optional
Hosts []string
// SecretName is the name of the secret used to terminate SSL traffic on 443.
// Field is left optional to allow SSL routing based on SNI hostname alone.
// If the SNI host in a listener conflicts with the "Host" header field used
// by an IngressRule, the SNI host is used for termination and value of the
// Host header is used for routing.
// +optional
SecretName string
// TODO: Consider specifying different modes of termination, protocols etc.
}
// IngressStatus describe the current state of the Ingress.
type IngressStatus struct {
// LoadBalancer contains the current status of the load-balancer.
// +optional
LoadBalancer api.LoadBalancerStatus
}
// IngressRule represents the rules mapping the paths under a specified host to
// the related backend services. Incoming requests are first evaluated for a host
// match, then routed to the backend associated with the matching IngressRuleValue.
type IngressRule struct {
// Host is the fully qualified domain name of a network host, as defined
// by RFC 3986. Note the following deviations from the "host" part of the
// URI as defined in the RFC:
// 1. IPs are not allowed. Currently an IngressRuleValue can only apply to the
// IP in the Spec of the parent Ingress.
// 2. The `:` delimiter is not respected because ports are not allowed.
// Currently the port of an Ingress is implicitly :80 for http and
// :443 for https.
// Both these may change in the future.
// Incoming requests are matched against the host before the IngressRuleValue.
// If the host is unspecified, the Ingress routes all traffic based on the
// specified IngressRuleValue.
// +optional
Host string
// IngressRuleValue represents a rule to route requests for this IngressRule.
// If unspecified, the rule defaults to a http catch-all. Whether that sends
// just traffic matching the host to the default backend or all traffic to the
// default backend, is left to the controller fulfilling the Ingress. Http is
// currently the only supported IngressRuleValue.
// +optional
IngressRuleValue
}
// IngressRuleValue represents a rule to apply against incoming requests. If the
// rule is satisfied, the request is routed to the specified backend. Currently
// mixing different types of rules in a single Ingress is disallowed, so exactly
// one of the following must be set.
type IngressRuleValue struct {
//TODO:
// 1. Consider renaming this resource and the associated rules so they
// aren't tied to Ingress. They can be used to route intra-cluster traffic.
// 2. Consider adding fields for ingress-type specific global options
// usable by a loadbalancer, like http keep-alive.
// +optional
HTTP *HTTPIngressRuleValue
}
// HTTPIngressRuleValue is a list of http selectors pointing to backends.
// In the example: http://<host>/<path>?<searchpart> -> backend where
// where parts of the url correspond to RFC 3986, this resource will be used
// to match against everything after the last '/' and before the first '?'
// or '#'.
type HTTPIngressRuleValue struct {
// A collection of paths that map requests to backends.
Paths []HTTPIngressPath
// TODO: Consider adding fields for ingress-type specific global
// options usable by a loadbalancer, like http keep-alive.
}
// HTTPIngressPath associates a path regex with a backend. Incoming urls matching
// the path are forwarded to the backend.
type HTTPIngressPath struct {
// Path is an extended POSIX regex as defined by IEEE Std 1003.1,
// (i.e this follows the egrep/unix syntax, not the perl syntax)
// matched against the path of an incoming request. Currently it can
// contain characters disallowed from the conventional "path"
// part of a URL as defined by RFC 3986. Paths must begin with
// a '/'. If unspecified, the path defaults to a catch all sending
// traffic to the backend.
// +optional
Path string
// Backend defines the referenced service endpoint to which the traffic
// will be forwarded to.
Backend IngressBackend
}
// IngressBackend describes all endpoints for a given service and port.
type IngressBackend struct {
// Specifies the name of the referenced service.
ServiceName string
// Specifies the port of the referenced service.
ServicePort intstr.IntOrString
}
// +genclient
// +genclient:method=GetScale,verb=get,subresource=scale,result=k8s.io/kubernetes/pkg/apis/autoscaling.Scale
// +genclient:method=UpdateScale,verb=update,subresource=scale,input=k8s.io/kubernetes/pkg/apis/autoscaling.Scale,result=k8s.io/kubernetes/pkg/apis/autoscaling.Scale
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ReplicaSet ensures that a specified number of pod replicas are running at any given time.
type ReplicaSet struct {
metav1.TypeMeta
// +optional
metav1.ObjectMeta
// Spec defines the desired behavior of this ReplicaSet.
// +optional
Spec ReplicaSetSpec
// Status is the current status of this ReplicaSet. This data may be
// out of date by some window of time.
// +optional
Status ReplicaSetStatus
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ReplicaSetList is a collection of ReplicaSets.
type ReplicaSetList struct {
metav1.TypeMeta
// +optional
metav1.ListMeta
Items []ReplicaSet
}
// ReplicaSetSpec is the specification of a ReplicaSet.
// As the internal representation of a ReplicaSet, it must have
// a Template set.
type ReplicaSetSpec struct {
// Replicas is the number of desired replicas.
Replicas int32
// Minimum number of seconds for which a newly created pod should be ready
// without any of its container crashing, for it to be considered available.
// Defaults to 0 (pod will be considered available as soon as it is ready)
// +optional
MinReadySeconds int32
// Selector is a label query over pods that should match the replica count.
// Must match in order to be controlled.
// If empty, defaulted to labels on pod template.
// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors
// +optional
Selector *metav1.LabelSelector
// Template is the object that describes the pod that will be created if
// insufficient replicas are detected.
// +optional
Template api.PodTemplateSpec
}
// ReplicaSetStatus represents the current status of a ReplicaSet.
type ReplicaSetStatus struct {
// Replicas is the number of actual replicas.
Replicas int32
// The number of pods that have labels matching the labels of the pod template of the replicaset.
// +optional
FullyLabeledReplicas int32
// The number of ready replicas for this replica set.
// +optional
ReadyReplicas int32
// The number of available replicas (ready for at least minReadySeconds) for this replica set.
// +optional
AvailableReplicas int32
// ObservedGeneration is the most recent generation observed by the controller.
// +optional
ObservedGeneration int64
// Represents the latest available observations of a replica set's current state.
// +optional
Conditions []ReplicaSetCondition
}
type ReplicaSetConditionType string
// These are valid conditions of a replica set.
const (
// ReplicaSetReplicaFailure is added in a replica set when one of its pods fails to be created
// due to insufficient quota, limit ranges, pod security policy, node selectors, etc. or deleted
// due to kubelet being down or finalizers are failing.
ReplicaSetReplicaFailure ReplicaSetConditionType = "ReplicaFailure"
)
// ReplicaSetCondition describes the state of a replica set at a certain point.
type ReplicaSetCondition struct {
// Type of replica set condition.
Type ReplicaSetConditionType
// Status of the condition, one of True, False, Unknown.
Status api.ConditionStatus
// The last time the condition transitioned from one status to another.
// +optional
LastTransitionTime metav1.Time
// The reason for the condition's last transition.
// +optional
Reason string
// A human readable message indicating details about the transition.
// +optional
Message string
}

View File

@@ -0,0 +1,69 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
"go_test",
)
go_library(
name = "go_default_library",
srcs = [
"conversion.go",
"defaults.go",
"doc.go",
"register.go",
"zz_generated.conversion.go",
"zz_generated.defaults.go",
],
importpath = "k8s.io/kubernetes/pkg/apis/extensions/v1beta1",
deps = [
"//pkg/apis/autoscaling:go_default_library",
"//pkg/apis/core:go_default_library",
"//pkg/apis/core/v1:go_default_library",
"//pkg/apis/extensions:go_default_library",
"//pkg/apis/networking:go_default_library",
"//pkg/apis/policy:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/conversion:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
],
)
go_test(
name = "go_default_xtest",
srcs = ["defaults_test.go"],
deps = [
":go_default_library",
"//pkg/api/legacyscheme:go_default_library",
"//pkg/apis/core:go_default_library",
"//pkg/apis/core/install:go_default_library",
"//pkg/apis/extensions/install:go_default_library",
"//pkg/util/pointer:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)

View File

@@ -0,0 +1,504 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta1
import (
"fmt"
extensionsv1beta1 "k8s.io/api/extensions/v1beta1"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/conversion"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/kubernetes/pkg/apis/autoscaling"
api "k8s.io/kubernetes/pkg/apis/core"
k8s_api_v1 "k8s.io/kubernetes/pkg/apis/core/v1"
"k8s.io/kubernetes/pkg/apis/extensions"
"k8s.io/kubernetes/pkg/apis/networking"
)
func addConversionFuncs(scheme *runtime.Scheme) error {
// Add non-generated conversion functions
err := scheme.AddConversionFuncs(
Convert_autoscaling_ScaleStatus_To_v1beta1_ScaleStatus,
Convert_v1beta1_ScaleStatus_To_autoscaling_ScaleStatus,
Convert_extensions_DeploymentSpec_To_v1beta1_DeploymentSpec,
Convert_v1beta1_DeploymentSpec_To_extensions_DeploymentSpec,
Convert_extensions_DeploymentStrategy_To_v1beta1_DeploymentStrategy,
Convert_v1beta1_DeploymentStrategy_To_extensions_DeploymentStrategy,
Convert_extensions_RollingUpdateDeployment_To_v1beta1_RollingUpdateDeployment,
Convert_v1beta1_RollingUpdateDeployment_To_extensions_RollingUpdateDeployment,
Convert_extensions_RollingUpdateDaemonSet_To_v1beta1_RollingUpdateDaemonSet,
Convert_v1beta1_RollingUpdateDaemonSet_To_extensions_RollingUpdateDaemonSet,
Convert_extensions_ReplicaSetSpec_To_v1beta1_ReplicaSetSpec,
Convert_v1beta1_ReplicaSetSpec_To_extensions_ReplicaSetSpec,
Convert_v1beta1_NetworkPolicy_To_networking_NetworkPolicy,
Convert_networking_NetworkPolicy_To_v1beta1_NetworkPolicy,
Convert_v1beta1_NetworkPolicyIngressRule_To_networking_NetworkPolicyIngressRule,
Convert_networking_NetworkPolicyIngressRule_To_v1beta1_NetworkPolicyIngressRule,
Convert_v1beta1_NetworkPolicyList_To_networking_NetworkPolicyList,
Convert_networking_NetworkPolicyList_To_v1beta1_NetworkPolicyList,
Convert_v1beta1_NetworkPolicyPeer_To_networking_NetworkPolicyPeer,
Convert_networking_NetworkPolicyPeer_To_v1beta1_NetworkPolicyPeer,
Convert_v1beta1_NetworkPolicyPort_To_networking_NetworkPolicyPort,
Convert_networking_NetworkPolicyPort_To_v1beta1_NetworkPolicyPort,
Convert_v1beta1_NetworkPolicySpec_To_networking_NetworkPolicySpec,
Convert_networking_NetworkPolicySpec_To_v1beta1_NetworkPolicySpec,
Convert_v1beta1_IPBlock_To_networking_IPBlock,
Convert_networking_IPBlock_To_v1beta1_IPBlock,
Convert_networking_NetworkPolicyEgressRule_To_v1beta1_NetworkPolicyEgressRule,
Convert_v1beta1_NetworkPolicyEgressRule_To_networking_NetworkPolicyEgressRule,
)
if err != nil {
return err
}
return nil
}
func Convert_autoscaling_ScaleStatus_To_v1beta1_ScaleStatus(in *autoscaling.ScaleStatus, out *extensionsv1beta1.ScaleStatus, s conversion.Scope) error {
out.Replicas = int32(in.Replicas)
out.TargetSelector = in.Selector
out.Selector = nil
selector, err := metav1.ParseToLabelSelector(in.Selector)
if err != nil {
return fmt.Errorf("failed to parse selector: %v", err)
}
if len(selector.MatchExpressions) == 0 {
out.Selector = selector.MatchLabels
}
return nil
}
func Convert_v1beta1_ScaleStatus_To_autoscaling_ScaleStatus(in *extensionsv1beta1.ScaleStatus, out *autoscaling.ScaleStatus, s conversion.Scope) error {
out.Replicas = in.Replicas
if in.TargetSelector != "" {
out.Selector = in.TargetSelector
} else if in.Selector != nil {
set := labels.Set{}
for key, val := range in.Selector {
set[key] = val
}
out.Selector = labels.SelectorFromSet(set).String()
} else {
out.Selector = ""
}
return nil
}
func Convert_extensions_DeploymentSpec_To_v1beta1_DeploymentSpec(in *extensions.DeploymentSpec, out *extensionsv1beta1.DeploymentSpec, s conversion.Scope) error {
out.Replicas = &in.Replicas
out.Selector = in.Selector
if err := k8s_api_v1.Convert_core_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil {
return err
}
if err := Convert_extensions_DeploymentStrategy_To_v1beta1_DeploymentStrategy(&in.Strategy, &out.Strategy, s); err != nil {
return err
}
if in.RevisionHistoryLimit != nil {
out.RevisionHistoryLimit = new(int32)
*out.RevisionHistoryLimit = int32(*in.RevisionHistoryLimit)
}
out.MinReadySeconds = int32(in.MinReadySeconds)
out.Paused = in.Paused
if in.RollbackTo != nil {
out.RollbackTo = new(extensionsv1beta1.RollbackConfig)
out.RollbackTo.Revision = int64(in.RollbackTo.Revision)
} else {
out.RollbackTo = nil
}
if in.ProgressDeadlineSeconds != nil {
out.ProgressDeadlineSeconds = new(int32)
*out.ProgressDeadlineSeconds = *in.ProgressDeadlineSeconds
}
return nil
}
func Convert_v1beta1_DeploymentSpec_To_extensions_DeploymentSpec(in *extensionsv1beta1.DeploymentSpec, out *extensions.DeploymentSpec, s conversion.Scope) error {
if in.Replicas != nil {
out.Replicas = *in.Replicas
}
out.Selector = in.Selector
if err := k8s_api_v1.Convert_v1_PodTemplateSpec_To_core_PodTemplateSpec(&in.Template, &out.Template, s); err != nil {
return err
}
if err := Convert_v1beta1_DeploymentStrategy_To_extensions_DeploymentStrategy(&in.Strategy, &out.Strategy, s); err != nil {
return err
}
out.RevisionHistoryLimit = in.RevisionHistoryLimit
out.MinReadySeconds = in.MinReadySeconds
out.Paused = in.Paused
if in.RollbackTo != nil {
out.RollbackTo = new(extensions.RollbackConfig)
out.RollbackTo.Revision = in.RollbackTo.Revision
} else {
out.RollbackTo = nil
}
if in.ProgressDeadlineSeconds != nil {
out.ProgressDeadlineSeconds = new(int32)
*out.ProgressDeadlineSeconds = *in.ProgressDeadlineSeconds
}
return nil
}
func Convert_extensions_DeploymentStrategy_To_v1beta1_DeploymentStrategy(in *extensions.DeploymentStrategy, out *extensionsv1beta1.DeploymentStrategy, s conversion.Scope) error {
out.Type = extensionsv1beta1.DeploymentStrategyType(in.Type)
if in.RollingUpdate != nil {
out.RollingUpdate = new(extensionsv1beta1.RollingUpdateDeployment)
if err := Convert_extensions_RollingUpdateDeployment_To_v1beta1_RollingUpdateDeployment(in.RollingUpdate, out.RollingUpdate, s); err != nil {
return err
}
} else {
out.RollingUpdate = nil
}
return nil
}
func Convert_v1beta1_DeploymentStrategy_To_extensions_DeploymentStrategy(in *extensionsv1beta1.DeploymentStrategy, out *extensions.DeploymentStrategy, s conversion.Scope) error {
out.Type = extensions.DeploymentStrategyType(in.Type)
if in.RollingUpdate != nil {
out.RollingUpdate = new(extensions.RollingUpdateDeployment)
if err := Convert_v1beta1_RollingUpdateDeployment_To_extensions_RollingUpdateDeployment(in.RollingUpdate, out.RollingUpdate, s); err != nil {
return err
}
} else {
out.RollingUpdate = nil
}
return nil
}
func Convert_extensions_RollingUpdateDeployment_To_v1beta1_RollingUpdateDeployment(in *extensions.RollingUpdateDeployment, out *extensionsv1beta1.RollingUpdateDeployment, s conversion.Scope) error {
if out.MaxUnavailable == nil {
out.MaxUnavailable = &intstr.IntOrString{}
}
if err := s.Convert(&in.MaxUnavailable, out.MaxUnavailable, 0); err != nil {
return err
}
if out.MaxSurge == nil {
out.MaxSurge = &intstr.IntOrString{}
}
if err := s.Convert(&in.MaxSurge, out.MaxSurge, 0); err != nil {
return err
}
return nil
}
func Convert_v1beta1_RollingUpdateDeployment_To_extensions_RollingUpdateDeployment(in *extensionsv1beta1.RollingUpdateDeployment, out *extensions.RollingUpdateDeployment, s conversion.Scope) error {
if err := s.Convert(in.MaxUnavailable, &out.MaxUnavailable, 0); err != nil {
return err
}
if err := s.Convert(in.MaxSurge, &out.MaxSurge, 0); err != nil {
return err
}
return nil
}
func Convert_extensions_RollingUpdateDaemonSet_To_v1beta1_RollingUpdateDaemonSet(in *extensions.RollingUpdateDaemonSet, out *extensionsv1beta1.RollingUpdateDaemonSet, s conversion.Scope) error {
if out.MaxUnavailable == nil {
out.MaxUnavailable = &intstr.IntOrString{}
}
if err := s.Convert(&in.MaxUnavailable, out.MaxUnavailable, 0); err != nil {
return err
}
return nil
}
func Convert_v1beta1_RollingUpdateDaemonSet_To_extensions_RollingUpdateDaemonSet(in *extensionsv1beta1.RollingUpdateDaemonSet, out *extensions.RollingUpdateDaemonSet, s conversion.Scope) error {
if err := s.Convert(in.MaxUnavailable, &out.MaxUnavailable, 0); err != nil {
return err
}
return nil
}
func Convert_extensions_ReplicaSetSpec_To_v1beta1_ReplicaSetSpec(in *extensions.ReplicaSetSpec, out *extensionsv1beta1.ReplicaSetSpec, s conversion.Scope) error {
out.Replicas = new(int32)
*out.Replicas = int32(in.Replicas)
out.MinReadySeconds = in.MinReadySeconds
out.Selector = in.Selector
if err := k8s_api_v1.Convert_core_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil {
return err
}
return nil
}
func Convert_v1beta1_ReplicaSetSpec_To_extensions_ReplicaSetSpec(in *extensionsv1beta1.ReplicaSetSpec, out *extensions.ReplicaSetSpec, s conversion.Scope) error {
if in.Replicas != nil {
out.Replicas = *in.Replicas
}
out.MinReadySeconds = in.MinReadySeconds
out.Selector = in.Selector
if err := k8s_api_v1.Convert_v1_PodTemplateSpec_To_core_PodTemplateSpec(&in.Template, &out.Template, s); err != nil {
return err
}
return nil
}
func Convert_v1beta1_NetworkPolicy_To_networking_NetworkPolicy(in *extensionsv1beta1.NetworkPolicy, out *networking.NetworkPolicy, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
return Convert_v1beta1_NetworkPolicySpec_To_networking_NetworkPolicySpec(&in.Spec, &out.Spec, s)
}
func Convert_networking_NetworkPolicy_To_v1beta1_NetworkPolicy(in *networking.NetworkPolicy, out *extensionsv1beta1.NetworkPolicy, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
return Convert_networking_NetworkPolicySpec_To_v1beta1_NetworkPolicySpec(&in.Spec, &out.Spec, s)
}
func Convert_v1beta1_NetworkPolicySpec_To_networking_NetworkPolicySpec(in *extensionsv1beta1.NetworkPolicySpec, out *networking.NetworkPolicySpec, s conversion.Scope) error {
if err := s.Convert(&in.PodSelector, &out.PodSelector, 0); err != nil {
return err
}
out.Ingress = make([]networking.NetworkPolicyIngressRule, len(in.Ingress))
for i := range in.Ingress {
if err := Convert_v1beta1_NetworkPolicyIngressRule_To_networking_NetworkPolicyIngressRule(&in.Ingress[i], &out.Ingress[i], s); err != nil {
return err
}
}
out.Egress = make([]networking.NetworkPolicyEgressRule, len(in.Egress))
for i := range in.Egress {
if err := Convert_v1beta1_NetworkPolicyEgressRule_To_networking_NetworkPolicyEgressRule(&in.Egress[i], &out.Egress[i], s); err != nil {
return err
}
}
if in.PolicyTypes != nil {
in, out := &in.PolicyTypes, &out.PolicyTypes
*out = make([]networking.PolicyType, len(*in))
for i := range *in {
if err := s.Convert(&(*in)[i], &(*out)[i], 0); err != nil {
return err
}
}
}
return nil
}
func Convert_networking_NetworkPolicySpec_To_v1beta1_NetworkPolicySpec(in *networking.NetworkPolicySpec, out *extensionsv1beta1.NetworkPolicySpec, s conversion.Scope) error {
if err := s.Convert(&in.PodSelector, &out.PodSelector, 0); err != nil {
return err
}
out.Ingress = make([]extensionsv1beta1.NetworkPolicyIngressRule, len(in.Ingress))
for i := range in.Ingress {
if err := Convert_networking_NetworkPolicyIngressRule_To_v1beta1_NetworkPolicyIngressRule(&in.Ingress[i], &out.Ingress[i], s); err != nil {
return err
}
}
out.Egress = make([]extensionsv1beta1.NetworkPolicyEgressRule, len(in.Egress))
for i := range in.Egress {
if err := Convert_networking_NetworkPolicyEgressRule_To_v1beta1_NetworkPolicyEgressRule(&in.Egress[i], &out.Egress[i], s); err != nil {
return err
}
}
if in.PolicyTypes != nil {
in, out := &in.PolicyTypes, &out.PolicyTypes
*out = make([]extensionsv1beta1.PolicyType, len(*in))
for i := range *in {
if err := s.Convert(&(*in)[i], &(*out)[i], 0); err != nil {
return err
}
}
}
return nil
}
func Convert_v1beta1_NetworkPolicyIngressRule_To_networking_NetworkPolicyIngressRule(in *extensionsv1beta1.NetworkPolicyIngressRule, out *networking.NetworkPolicyIngressRule, s conversion.Scope) error {
out.Ports = make([]networking.NetworkPolicyPort, len(in.Ports))
for i := range in.Ports {
if err := Convert_v1beta1_NetworkPolicyPort_To_networking_NetworkPolicyPort(&in.Ports[i], &out.Ports[i], s); err != nil {
return err
}
}
if in.From != nil {
out.From = make([]networking.NetworkPolicyPeer, len(in.From))
for i := range in.From {
if err := Convert_v1beta1_NetworkPolicyPeer_To_networking_NetworkPolicyPeer(&in.From[i], &out.From[i], s); err != nil {
return err
}
}
}
return nil
}
func Convert_networking_NetworkPolicyIngressRule_To_v1beta1_NetworkPolicyIngressRule(in *networking.NetworkPolicyIngressRule, out *extensionsv1beta1.NetworkPolicyIngressRule, s conversion.Scope) error {
out.Ports = make([]extensionsv1beta1.NetworkPolicyPort, len(in.Ports))
for i := range in.Ports {
if err := Convert_networking_NetworkPolicyPort_To_v1beta1_NetworkPolicyPort(&in.Ports[i], &out.Ports[i], s); err != nil {
return err
}
}
if in.From != nil {
out.From = make([]extensionsv1beta1.NetworkPolicyPeer, len(in.From))
for i := range in.From {
if err := Convert_networking_NetworkPolicyPeer_To_v1beta1_NetworkPolicyPeer(&in.From[i], &out.From[i], s); err != nil {
return err
}
}
}
return nil
}
func Convert_v1beta1_NetworkPolicyEgressRule_To_networking_NetworkPolicyEgressRule(in *extensionsv1beta1.NetworkPolicyEgressRule, out *networking.NetworkPolicyEgressRule, s conversion.Scope) error {
out.Ports = make([]networking.NetworkPolicyPort, len(in.Ports))
for i := range in.Ports {
if err := Convert_v1beta1_NetworkPolicyPort_To_networking_NetworkPolicyPort(&in.Ports[i], &out.Ports[i], s); err != nil {
return err
}
}
out.To = make([]networking.NetworkPolicyPeer, len(in.To))
for i := range in.To {
if err := Convert_v1beta1_NetworkPolicyPeer_To_networking_NetworkPolicyPeer(&in.To[i], &out.To[i], s); err != nil {
return err
}
}
return nil
}
func Convert_networking_NetworkPolicyEgressRule_To_v1beta1_NetworkPolicyEgressRule(in *networking.NetworkPolicyEgressRule, out *extensionsv1beta1.NetworkPolicyEgressRule, s conversion.Scope) error {
out.Ports = make([]extensionsv1beta1.NetworkPolicyPort, len(in.Ports))
for i := range in.Ports {
if err := Convert_networking_NetworkPolicyPort_To_v1beta1_NetworkPolicyPort(&in.Ports[i], &out.Ports[i], s); err != nil {
return err
}
}
out.To = make([]extensionsv1beta1.NetworkPolicyPeer, len(in.To))
for i := range in.To {
if err := Convert_networking_NetworkPolicyPeer_To_v1beta1_NetworkPolicyPeer(&in.To[i], &out.To[i], s); err != nil {
return err
}
}
return nil
}
func Convert_v1beta1_NetworkPolicyPeer_To_networking_NetworkPolicyPeer(in *extensionsv1beta1.NetworkPolicyPeer, out *networking.NetworkPolicyPeer, s conversion.Scope) error {
if in.PodSelector != nil {
out.PodSelector = new(metav1.LabelSelector)
if err := s.Convert(in.PodSelector, out.PodSelector, 0); err != nil {
return err
}
} else {
out.PodSelector = nil
}
if in.NamespaceSelector != nil {
out.NamespaceSelector = new(metav1.LabelSelector)
if err := s.Convert(in.NamespaceSelector, out.NamespaceSelector, 0); err != nil {
return err
}
} else {
out.NamespaceSelector = nil
}
if in.IPBlock != nil {
out.IPBlock = new(networking.IPBlock)
if err := s.Convert(in.IPBlock, out.IPBlock, 0); err != nil {
return err
}
} else {
out.IPBlock = nil
}
return nil
}
func Convert_networking_NetworkPolicyPeer_To_v1beta1_NetworkPolicyPeer(in *networking.NetworkPolicyPeer, out *extensionsv1beta1.NetworkPolicyPeer, s conversion.Scope) error {
if in.PodSelector != nil {
out.PodSelector = new(metav1.LabelSelector)
if err := s.Convert(in.PodSelector, out.PodSelector, 0); err != nil {
return err
}
} else {
out.PodSelector = nil
}
if in.NamespaceSelector != nil {
out.NamespaceSelector = new(metav1.LabelSelector)
if err := s.Convert(in.NamespaceSelector, out.NamespaceSelector, 0); err != nil {
return err
}
} else {
out.NamespaceSelector = nil
}
if in.IPBlock != nil {
out.IPBlock = new(extensionsv1beta1.IPBlock)
if err := s.Convert(in.IPBlock, out.IPBlock, 0); err != nil {
return err
}
} else {
out.IPBlock = nil
}
return nil
}
func Convert_v1beta1_IPBlock_To_networking_IPBlock(in *extensionsv1beta1.IPBlock, out *networking.IPBlock, s conversion.Scope) error {
out.CIDR = in.CIDR
out.Except = make([]string, len(in.Except))
copy(out.Except, in.Except)
return nil
}
func Convert_networking_IPBlock_To_v1beta1_IPBlock(in *networking.IPBlock, out *extensionsv1beta1.IPBlock, s conversion.Scope) error {
out.CIDR = in.CIDR
out.Except = make([]string, len(in.Except))
copy(out.Except, in.Except)
return nil
}
func Convert_v1beta1_NetworkPolicyPort_To_networking_NetworkPolicyPort(in *extensionsv1beta1.NetworkPolicyPort, out *networking.NetworkPolicyPort, s conversion.Scope) error {
if in.Protocol != nil {
out.Protocol = new(api.Protocol)
*out.Protocol = api.Protocol(*in.Protocol)
} else {
out.Protocol = nil
}
out.Port = in.Port
return nil
}
func Convert_networking_NetworkPolicyPort_To_v1beta1_NetworkPolicyPort(in *networking.NetworkPolicyPort, out *extensionsv1beta1.NetworkPolicyPort, s conversion.Scope) error {
if in.Protocol != nil {
out.Protocol = new(v1.Protocol)
*out.Protocol = v1.Protocol(*in.Protocol)
} else {
out.Protocol = nil
}
out.Port = in.Port
return nil
}
func Convert_v1beta1_NetworkPolicyList_To_networking_NetworkPolicyList(in *extensionsv1beta1.NetworkPolicyList, out *networking.NetworkPolicyList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
out.Items = make([]networking.NetworkPolicy, len(in.Items))
for i := range in.Items {
if err := Convert_v1beta1_NetworkPolicy_To_networking_NetworkPolicy(&in.Items[i], &out.Items[i], s); err != nil {
return err
}
}
return nil
}
func Convert_networking_NetworkPolicyList_To_v1beta1_NetworkPolicyList(in *networking.NetworkPolicyList, out *extensionsv1beta1.NetworkPolicyList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
out.Items = make([]extensionsv1beta1.NetworkPolicy, len(in.Items))
for i := range in.Items {
if err := Convert_networking_NetworkPolicy_To_v1beta1_NetworkPolicy(&in.Items[i], &out.Items[i], s); err != nil {
return err
}
}
return nil
}

View File

@@ -0,0 +1,153 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta1
import (
"k8s.io/api/core/v1"
extensionsv1beta1 "k8s.io/api/extensions/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/intstr"
)
func addDefaultingFuncs(scheme *runtime.Scheme) error {
return RegisterDefaults(scheme)
}
func SetDefaults_DaemonSet(obj *extensionsv1beta1.DaemonSet) {
labels := obj.Spec.Template.Labels
// TODO: support templates defined elsewhere when we support them in the API
if labels != nil {
if obj.Spec.Selector == nil {
obj.Spec.Selector = &metav1.LabelSelector{
MatchLabels: labels,
}
}
if len(obj.Labels) == 0 {
obj.Labels = labels
}
}
updateStrategy := &obj.Spec.UpdateStrategy
if updateStrategy.Type == "" {
updateStrategy.Type = extensionsv1beta1.OnDeleteDaemonSetStrategyType
}
if updateStrategy.Type == extensionsv1beta1.RollingUpdateDaemonSetStrategyType {
if updateStrategy.RollingUpdate == nil {
rollingUpdate := extensionsv1beta1.RollingUpdateDaemonSet{}
updateStrategy.RollingUpdate = &rollingUpdate
}
if updateStrategy.RollingUpdate.MaxUnavailable == nil {
// Set default MaxUnavailable as 1 by default.
maxUnavailable := intstr.FromInt(1)
updateStrategy.RollingUpdate.MaxUnavailable = &maxUnavailable
}
}
if obj.Spec.RevisionHistoryLimit == nil {
obj.Spec.RevisionHistoryLimit = new(int32)
*obj.Spec.RevisionHistoryLimit = 10
}
}
func SetDefaults_PodSecurityPolicySpec(obj *extensionsv1beta1.PodSecurityPolicySpec) {
// This field was added after PodSecurityPolicy was released.
// Policies that do not include this field must remain as permissive as they were prior to the introduction of this field.
if obj.AllowPrivilegeEscalation == nil {
t := true
obj.AllowPrivilegeEscalation = &t
}
}
func SetDefaults_Deployment(obj *extensionsv1beta1.Deployment) {
// Default labels and selector to labels from pod template spec.
labels := obj.Spec.Template.Labels
if labels != nil {
if obj.Spec.Selector == nil {
obj.Spec.Selector = &metav1.LabelSelector{MatchLabels: labels}
}
if len(obj.Labels) == 0 {
obj.Labels = labels
}
}
// Set extensionsv1beta1.DeploymentSpec.Replicas to 1 if it is not set.
if obj.Spec.Replicas == nil {
obj.Spec.Replicas = new(int32)
*obj.Spec.Replicas = 1
}
strategy := &obj.Spec.Strategy
// Set default extensionsv1beta1.DeploymentStrategyType as RollingUpdate.
if strategy.Type == "" {
strategy.Type = extensionsv1beta1.RollingUpdateDeploymentStrategyType
}
if strategy.Type == extensionsv1beta1.RollingUpdateDeploymentStrategyType || strategy.RollingUpdate != nil {
if strategy.RollingUpdate == nil {
rollingUpdate := extensionsv1beta1.RollingUpdateDeployment{}
strategy.RollingUpdate = &rollingUpdate
}
if strategy.RollingUpdate.MaxUnavailable == nil {
// Set default MaxUnavailable as 1 by default.
maxUnavailable := intstr.FromInt(1)
strategy.RollingUpdate.MaxUnavailable = &maxUnavailable
}
if strategy.RollingUpdate.MaxSurge == nil {
// Set default MaxSurge as 1 by default.
maxSurge := intstr.FromInt(1)
strategy.RollingUpdate.MaxSurge = &maxSurge
}
}
}
func SetDefaults_ReplicaSet(obj *extensionsv1beta1.ReplicaSet) {
labels := obj.Spec.Template.Labels
// TODO: support templates defined elsewhere when we support them in the API
if labels != nil {
if obj.Spec.Selector == nil {
obj.Spec.Selector = &metav1.LabelSelector{
MatchLabels: labels,
}
}
if len(obj.Labels) == 0 {
obj.Labels = labels
}
}
if obj.Spec.Replicas == nil {
obj.Spec.Replicas = new(int32)
*obj.Spec.Replicas = 1
}
}
func SetDefaults_NetworkPolicy(obj *extensionsv1beta1.NetworkPolicy) {
// Default any undefined Protocol fields to TCP.
for _, i := range obj.Spec.Ingress {
for _, p := range i.Ports {
if p.Protocol == nil {
proto := v1.ProtocolTCP
p.Protocol = &proto
}
}
}
if len(obj.Spec.PolicyTypes) == 0 {
// Any policy that does not specify policyTypes implies at least "Ingress".
obj.Spec.PolicyTypes = []extensionsv1beta1.PolicyType{extensionsv1beta1.PolicyTypeIngress}
if len(obj.Spec.Egress) != 0 {
obj.Spec.PolicyTypes = append(obj.Spec.PolicyTypes, extensionsv1beta1.PolicyTypeEgress)
}
}
}

View File

@@ -0,0 +1,745 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta1_test
import (
"reflect"
"testing"
extensionsv1beta1 "k8s.io/api/extensions/v1beta1"
"k8s.io/api/core/v1"
apiequality "k8s.io/apimachinery/pkg/api/equality"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/kubernetes/pkg/api/legacyscheme"
api "k8s.io/kubernetes/pkg/apis/core"
_ "k8s.io/kubernetes/pkg/apis/core/install"
_ "k8s.io/kubernetes/pkg/apis/extensions/install"
. "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
utilpointer "k8s.io/kubernetes/pkg/util/pointer"
)
func TestSetDefaultDaemonSetSpec(t *testing.T) {
defaultLabels := map[string]string{"foo": "bar"}
period := int64(v1.DefaultTerminationGracePeriodSeconds)
defaultTemplate := v1.PodTemplateSpec{
Spec: v1.PodSpec{
DNSPolicy: v1.DNSClusterFirst,
RestartPolicy: v1.RestartPolicyAlways,
SecurityContext: &v1.PodSecurityContext{},
TerminationGracePeriodSeconds: &period,
SchedulerName: api.DefaultSchedulerName,
},
ObjectMeta: metav1.ObjectMeta{
Labels: defaultLabels,
},
}
templateNoLabel := v1.PodTemplateSpec{
Spec: v1.PodSpec{
DNSPolicy: v1.DNSClusterFirst,
RestartPolicy: v1.RestartPolicyAlways,
SecurityContext: &v1.PodSecurityContext{},
TerminationGracePeriodSeconds: &period,
SchedulerName: api.DefaultSchedulerName,
},
}
tests := []struct {
original *extensionsv1beta1.DaemonSet
expected *extensionsv1beta1.DaemonSet
}{
{ // Labels change/defaulting test.
original: &extensionsv1beta1.DaemonSet{
Spec: extensionsv1beta1.DaemonSetSpec{
Template: defaultTemplate,
},
},
expected: &extensionsv1beta1.DaemonSet{
ObjectMeta: metav1.ObjectMeta{
Labels: defaultLabels,
},
Spec: extensionsv1beta1.DaemonSetSpec{
Selector: &metav1.LabelSelector{
MatchLabels: defaultLabels,
},
Template: defaultTemplate,
UpdateStrategy: extensionsv1beta1.DaemonSetUpdateStrategy{
Type: extensionsv1beta1.OnDeleteDaemonSetStrategyType,
},
RevisionHistoryLimit: utilpointer.Int32Ptr(10),
},
},
},
{ // Labels change/defaulting test.
original: &extensionsv1beta1.DaemonSet{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
"bar": "foo",
},
},
Spec: extensionsv1beta1.DaemonSetSpec{
Template: defaultTemplate,
RevisionHistoryLimit: utilpointer.Int32Ptr(1),
},
},
expected: &extensionsv1beta1.DaemonSet{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
"bar": "foo",
},
},
Spec: extensionsv1beta1.DaemonSetSpec{
Selector: &metav1.LabelSelector{
MatchLabels: defaultLabels,
},
Template: defaultTemplate,
UpdateStrategy: extensionsv1beta1.DaemonSetUpdateStrategy{
Type: extensionsv1beta1.OnDeleteDaemonSetStrategyType,
},
RevisionHistoryLimit: utilpointer.Int32Ptr(1),
},
},
},
{ // Update strategy.
original: &extensionsv1beta1.DaemonSet{},
expected: &extensionsv1beta1.DaemonSet{
Spec: extensionsv1beta1.DaemonSetSpec{
Template: templateNoLabel,
UpdateStrategy: extensionsv1beta1.DaemonSetUpdateStrategy{
Type: extensionsv1beta1.OnDeleteDaemonSetStrategyType,
},
RevisionHistoryLimit: utilpointer.Int32Ptr(10),
},
},
},
{ // Custom unique label key.
original: &extensionsv1beta1.DaemonSet{
Spec: extensionsv1beta1.DaemonSetSpec{},
},
expected: &extensionsv1beta1.DaemonSet{
Spec: extensionsv1beta1.DaemonSetSpec{
Template: templateNoLabel,
UpdateStrategy: extensionsv1beta1.DaemonSetUpdateStrategy{
Type: extensionsv1beta1.OnDeleteDaemonSetStrategyType,
},
RevisionHistoryLimit: utilpointer.Int32Ptr(10),
},
},
},
}
for i, test := range tests {
original := test.original
expected := test.expected
obj2 := roundTrip(t, runtime.Object(original))
got, ok := obj2.(*extensionsv1beta1.DaemonSet)
if !ok {
t.Errorf("(%d) unexpected object: %v", i, got)
t.FailNow()
}
if !apiequality.Semantic.DeepEqual(got.Spec, expected.Spec) {
t.Errorf("(%d) got different than expected\ngot:\n\t%+v\nexpected:\n\t%+v", i, got.Spec, expected.Spec)
}
}
}
func TestSetDefaultDeployment(t *testing.T) {
defaultIntOrString := intstr.FromInt(1)
differentIntOrString := intstr.FromInt(5)
period := int64(v1.DefaultTerminationGracePeriodSeconds)
defaultTemplate := v1.PodTemplateSpec{
Spec: v1.PodSpec{
DNSPolicy: v1.DNSClusterFirst,
RestartPolicy: v1.RestartPolicyAlways,
SecurityContext: &v1.PodSecurityContext{},
TerminationGracePeriodSeconds: &period,
SchedulerName: api.DefaultSchedulerName,
},
}
tests := []struct {
original *extensionsv1beta1.Deployment
expected *extensionsv1beta1.Deployment
}{
{
original: &extensionsv1beta1.Deployment{},
expected: &extensionsv1beta1.Deployment{
Spec: extensionsv1beta1.DeploymentSpec{
Replicas: utilpointer.Int32Ptr(1),
Strategy: extensionsv1beta1.DeploymentStrategy{
Type: extensionsv1beta1.RollingUpdateDeploymentStrategyType,
RollingUpdate: &extensionsv1beta1.RollingUpdateDeployment{
MaxSurge: &defaultIntOrString,
MaxUnavailable: &defaultIntOrString,
},
},
Template: defaultTemplate,
},
},
},
{
original: &extensionsv1beta1.Deployment{
Spec: extensionsv1beta1.DeploymentSpec{
Replicas: utilpointer.Int32Ptr(5),
Strategy: extensionsv1beta1.DeploymentStrategy{
RollingUpdate: &extensionsv1beta1.RollingUpdateDeployment{
MaxSurge: &differentIntOrString,
},
},
},
},
expected: &extensionsv1beta1.Deployment{
Spec: extensionsv1beta1.DeploymentSpec{
Replicas: utilpointer.Int32Ptr(5),
Strategy: extensionsv1beta1.DeploymentStrategy{
Type: extensionsv1beta1.RollingUpdateDeploymentStrategyType,
RollingUpdate: &extensionsv1beta1.RollingUpdateDeployment{
MaxSurge: &differentIntOrString,
MaxUnavailable: &defaultIntOrString,
},
},
Template: defaultTemplate,
},
},
},
{
original: &extensionsv1beta1.Deployment{
Spec: extensionsv1beta1.DeploymentSpec{
Replicas: utilpointer.Int32Ptr(3),
Strategy: extensionsv1beta1.DeploymentStrategy{
Type: extensionsv1beta1.RollingUpdateDeploymentStrategyType,
RollingUpdate: nil,
},
},
},
expected: &extensionsv1beta1.Deployment{
Spec: extensionsv1beta1.DeploymentSpec{
Replicas: utilpointer.Int32Ptr(3),
Strategy: extensionsv1beta1.DeploymentStrategy{
Type: extensionsv1beta1.RollingUpdateDeploymentStrategyType,
RollingUpdate: &extensionsv1beta1.RollingUpdateDeployment{
MaxSurge: &defaultIntOrString,
MaxUnavailable: &defaultIntOrString,
},
},
Template: defaultTemplate,
},
},
},
{
original: &extensionsv1beta1.Deployment{
Spec: extensionsv1beta1.DeploymentSpec{
Replicas: utilpointer.Int32Ptr(5),
Strategy: extensionsv1beta1.DeploymentStrategy{
Type: extensionsv1beta1.RecreateDeploymentStrategyType,
},
},
},
expected: &extensionsv1beta1.Deployment{
Spec: extensionsv1beta1.DeploymentSpec{
Replicas: utilpointer.Int32Ptr(5),
Strategy: extensionsv1beta1.DeploymentStrategy{
Type: extensionsv1beta1.RecreateDeploymentStrategyType,
},
Template: defaultTemplate,
},
},
},
{
original: &extensionsv1beta1.Deployment{
Spec: extensionsv1beta1.DeploymentSpec{
Replicas: utilpointer.Int32Ptr(5),
Strategy: extensionsv1beta1.DeploymentStrategy{
Type: extensionsv1beta1.RecreateDeploymentStrategyType,
},
ProgressDeadlineSeconds: utilpointer.Int32Ptr(30),
},
},
expected: &extensionsv1beta1.Deployment{
Spec: extensionsv1beta1.DeploymentSpec{
Replicas: utilpointer.Int32Ptr(5),
Strategy: extensionsv1beta1.DeploymentStrategy{
Type: extensionsv1beta1.RecreateDeploymentStrategyType,
},
Template: defaultTemplate,
ProgressDeadlineSeconds: utilpointer.Int32Ptr(30),
},
},
},
}
for _, test := range tests {
original := test.original
expected := test.expected
obj2 := roundTrip(t, runtime.Object(original))
got, ok := obj2.(*extensionsv1beta1.Deployment)
if !ok {
t.Errorf("unexpected object: %v", got)
t.FailNow()
}
if !apiequality.Semantic.DeepEqual(got.Spec, expected.Spec) {
t.Errorf("object mismatch!\nexpected:\n\t%+v\ngot:\n\t%+v", got.Spec, expected.Spec)
}
}
}
func TestSetDefaultReplicaSet(t *testing.T) {
tests := []struct {
rs *extensionsv1beta1.ReplicaSet
expectLabels bool
expectSelector bool
}{
{
rs: &extensionsv1beta1.ReplicaSet{
Spec: extensionsv1beta1.ReplicaSetSpec{
Template: v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
"foo": "bar",
},
},
},
},
},
expectLabels: true,
expectSelector: true,
},
{
rs: &extensionsv1beta1.ReplicaSet{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
"bar": "foo",
},
},
Spec: extensionsv1beta1.ReplicaSetSpec{
Template: v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
"foo": "bar",
},
},
},
},
},
expectLabels: false,
expectSelector: true,
},
{
rs: &extensionsv1beta1.ReplicaSet{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
"bar": "foo",
},
},
Spec: extensionsv1beta1.ReplicaSetSpec{
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{
"some": "other",
},
},
Template: v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
"foo": "bar",
},
},
},
},
},
expectLabels: false,
expectSelector: false,
},
{
rs: &extensionsv1beta1.ReplicaSet{
Spec: extensionsv1beta1.ReplicaSetSpec{
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{
"some": "other",
},
},
Template: v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
"foo": "bar",
},
},
},
},
},
expectLabels: true,
expectSelector: false,
},
}
for _, test := range tests {
rs := test.rs
obj2 := roundTrip(t, runtime.Object(rs))
rs2, ok := obj2.(*extensionsv1beta1.ReplicaSet)
if !ok {
t.Errorf("unexpected object: %v", rs2)
t.FailNow()
}
if test.expectSelector != reflect.DeepEqual(rs2.Spec.Selector.MatchLabels, rs2.Spec.Template.Labels) {
if test.expectSelector {
t.Errorf("expected: %v, got: %v", rs2.Spec.Template.Labels, rs2.Spec.Selector)
} else {
t.Errorf("unexpected equality: %v", rs.Spec.Selector)
}
}
if test.expectLabels != reflect.DeepEqual(rs2.Labels, rs2.Spec.Template.Labels) {
if test.expectLabels {
t.Errorf("expected: %v, got: %v", rs2.Spec.Template.Labels, rs2.Labels)
} else {
t.Errorf("unexpected equality: %v", rs.Labels)
}
}
}
}
func TestSetDefaultReplicaSetReplicas(t *testing.T) {
tests := []struct {
rs extensionsv1beta1.ReplicaSet
expectReplicas int32
}{
{
rs: extensionsv1beta1.ReplicaSet{
Spec: extensionsv1beta1.ReplicaSetSpec{
Template: v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
"foo": "bar",
},
},
},
},
},
expectReplicas: 1,
},
{
rs: extensionsv1beta1.ReplicaSet{
Spec: extensionsv1beta1.ReplicaSetSpec{
Replicas: utilpointer.Int32Ptr(0),
Template: v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
"foo": "bar",
},
},
},
},
},
expectReplicas: 0,
},
{
rs: extensionsv1beta1.ReplicaSet{
Spec: extensionsv1beta1.ReplicaSetSpec{
Replicas: utilpointer.Int32Ptr(3),
Template: v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
"foo": "bar",
},
},
},
},
},
expectReplicas: 3,
},
}
for _, test := range tests {
rs := &test.rs
obj2 := roundTrip(t, runtime.Object(rs))
rs2, ok := obj2.(*extensionsv1beta1.ReplicaSet)
if !ok {
t.Errorf("unexpected object: %v", rs2)
t.FailNow()
}
if rs2.Spec.Replicas == nil {
t.Errorf("unexpected nil Replicas")
} else if test.expectReplicas != *rs2.Spec.Replicas {
t.Errorf("expected: %d replicas, got: %d", test.expectReplicas, *rs2.Spec.Replicas)
}
}
}
func TestDefaultRequestIsNotSetForReplicaSet(t *testing.T) {
s := v1.PodSpec{}
s.Containers = []v1.Container{
{
Resources: v1.ResourceRequirements{
Limits: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("100m"),
},
},
},
}
rs := &extensionsv1beta1.ReplicaSet{
Spec: extensionsv1beta1.ReplicaSetSpec{
Replicas: utilpointer.Int32Ptr(3),
Template: v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
"foo": "bar",
},
},
Spec: s,
},
},
}
output := roundTrip(t, runtime.Object(rs))
rs2 := output.(*extensionsv1beta1.ReplicaSet)
defaultRequest := rs2.Spec.Template.Spec.Containers[0].Resources.Requests
requestValue := defaultRequest[v1.ResourceCPU]
if requestValue.String() != "0" {
t.Errorf("Expected 0 request value, got: %s", requestValue.String())
}
}
func TestDefaultAllowPrivilegeEscalationForPodSecurityPolicy(t *testing.T) {
psp := &extensionsv1beta1.PodSecurityPolicy{}
output := roundTrip(t, runtime.Object(psp))
psp2 := output.(*extensionsv1beta1.PodSecurityPolicy)
if psp2.Spec.AllowPrivilegeEscalation == nil || *psp2.Spec.AllowPrivilegeEscalation != true {
t.Errorf("Expected default to true, got: %#v", psp2.Spec.AllowPrivilegeEscalation)
}
}
func TestSetDefaultNetworkPolicy(t *testing.T) {
tests := []struct {
original *extensionsv1beta1.NetworkPolicy
expected *extensionsv1beta1.NetworkPolicy
}{
{ // Empty NetworkPolicy should be set to PolicyTypes Ingress
original: &extensionsv1beta1.NetworkPolicy{
Spec: extensionsv1beta1.NetworkPolicySpec{
PodSelector: metav1.LabelSelector{
MatchLabels: map[string]string{"a": "b"},
},
},
},
expected: &extensionsv1beta1.NetworkPolicy{
Spec: extensionsv1beta1.NetworkPolicySpec{
PodSelector: metav1.LabelSelector{
MatchLabels: map[string]string{"a": "b"},
},
PolicyTypes: []extensionsv1beta1.PolicyType{extensionsv1beta1.PolicyTypeIngress},
},
},
},
{ // Empty Ingress NetworkPolicy should be set to PolicyTypes Ingress
original: &extensionsv1beta1.NetworkPolicy{
Spec: extensionsv1beta1.NetworkPolicySpec{
PodSelector: metav1.LabelSelector{
MatchLabels: map[string]string{"a": "b"},
},
Ingress: []extensionsv1beta1.NetworkPolicyIngressRule{},
},
},
expected: &extensionsv1beta1.NetworkPolicy{
Spec: extensionsv1beta1.NetworkPolicySpec{
PodSelector: metav1.LabelSelector{
MatchLabels: map[string]string{"a": "b"},
},
Ingress: []extensionsv1beta1.NetworkPolicyIngressRule{},
PolicyTypes: []extensionsv1beta1.PolicyType{extensionsv1beta1.PolicyTypeIngress},
},
},
},
{ // Defined Ingress and Egress should be set to Ingress,Egress
original: &extensionsv1beta1.NetworkPolicy{
Spec: extensionsv1beta1.NetworkPolicySpec{
PodSelector: metav1.LabelSelector{
MatchLabels: map[string]string{"a": "b"},
},
Ingress: []extensionsv1beta1.NetworkPolicyIngressRule{
{
From: []extensionsv1beta1.NetworkPolicyPeer{
{
PodSelector: &metav1.LabelSelector{
MatchLabels: map[string]string{"c": "d"},
},
NamespaceSelector: &metav1.LabelSelector{
MatchLabels: map[string]string{"c": "d"},
},
},
},
},
},
Egress: []extensionsv1beta1.NetworkPolicyEgressRule{
{
To: []extensionsv1beta1.NetworkPolicyPeer{
{
NamespaceSelector: &metav1.LabelSelector{
MatchLabels: map[string]string{"c": "d"},
},
},
},
},
},
},
},
expected: &extensionsv1beta1.NetworkPolicy{
Spec: extensionsv1beta1.NetworkPolicySpec{
PodSelector: metav1.LabelSelector{
MatchLabels: map[string]string{"a": "b"},
},
Ingress: []extensionsv1beta1.NetworkPolicyIngressRule{
{
From: []extensionsv1beta1.NetworkPolicyPeer{
{
PodSelector: &metav1.LabelSelector{
MatchLabels: map[string]string{"c": "d"},
},
NamespaceSelector: &metav1.LabelSelector{
MatchLabels: map[string]string{"c": "d"},
},
},
},
},
},
Egress: []extensionsv1beta1.NetworkPolicyEgressRule{
{
To: []extensionsv1beta1.NetworkPolicyPeer{
{
NamespaceSelector: &metav1.LabelSelector{
MatchLabels: map[string]string{"c": "d"},
},
},
},
},
},
PolicyTypes: []extensionsv1beta1.PolicyType{extensionsv1beta1.PolicyTypeIngress, extensionsv1beta1.PolicyTypeEgress},
},
},
},
{ // Egress only with unset PolicyTypes should be set to Ingress, Egress
original: &extensionsv1beta1.NetworkPolicy{
Spec: extensionsv1beta1.NetworkPolicySpec{
PodSelector: metav1.LabelSelector{
MatchLabels: map[string]string{"a": "b"},
},
Egress: []extensionsv1beta1.NetworkPolicyEgressRule{
{
To: []extensionsv1beta1.NetworkPolicyPeer{
{
NamespaceSelector: &metav1.LabelSelector{
MatchLabels: map[string]string{"c": "d"},
},
},
},
},
},
},
},
expected: &extensionsv1beta1.NetworkPolicy{
Spec: extensionsv1beta1.NetworkPolicySpec{
PodSelector: metav1.LabelSelector{
MatchLabels: map[string]string{"a": "b"},
},
Egress: []extensionsv1beta1.NetworkPolicyEgressRule{
{
To: []extensionsv1beta1.NetworkPolicyPeer{
{
NamespaceSelector: &metav1.LabelSelector{
MatchLabels: map[string]string{"c": "d"},
},
},
},
},
},
PolicyTypes: []extensionsv1beta1.PolicyType{extensionsv1beta1.PolicyTypeIngress, extensionsv1beta1.PolicyTypeEgress},
},
},
},
{ // Egress only with PolicyTypes set to Egress should be set to only Egress
original: &extensionsv1beta1.NetworkPolicy{
Spec: extensionsv1beta1.NetworkPolicySpec{
PodSelector: metav1.LabelSelector{
MatchLabels: map[string]string{"a": "b"},
},
Egress: []extensionsv1beta1.NetworkPolicyEgressRule{
{
To: []extensionsv1beta1.NetworkPolicyPeer{
{
NamespaceSelector: &metav1.LabelSelector{
MatchLabels: map[string]string{"Egress": "only"},
},
},
},
},
},
PolicyTypes: []extensionsv1beta1.PolicyType{extensionsv1beta1.PolicyTypeEgress},
},
},
expected: &extensionsv1beta1.NetworkPolicy{
Spec: extensionsv1beta1.NetworkPolicySpec{
PodSelector: metav1.LabelSelector{
MatchLabels: map[string]string{"a": "b"},
},
Egress: []extensionsv1beta1.NetworkPolicyEgressRule{
{
To: []extensionsv1beta1.NetworkPolicyPeer{
{
NamespaceSelector: &metav1.LabelSelector{
MatchLabels: map[string]string{"Egress": "only"},
},
},
},
},
},
PolicyTypes: []extensionsv1beta1.PolicyType{extensionsv1beta1.PolicyTypeEgress},
},
},
},
}
for i, test := range tests {
original := test.original
expected := test.expected
obj2 := roundTrip(t, runtime.Object(original))
got, ok := obj2.(*extensionsv1beta1.NetworkPolicy)
if !ok {
t.Errorf("(%d) unexpected object: %v", i, got)
t.FailNow()
}
if !apiequality.Semantic.DeepEqual(got.Spec, expected.Spec) {
t.Errorf("(%d) got different than expected\ngot:\n\t%+v\nexpected:\n\t%+v", i, got.Spec, expected.Spec)
}
}
}
func roundTrip(t *testing.T, obj runtime.Object) runtime.Object {
data, err := runtime.Encode(legacyscheme.Codecs.LegacyCodec(SchemeGroupVersion), obj)
if err != nil {
t.Errorf("%v\n %#v", err, obj)
return nil
}
obj2, err := runtime.Decode(legacyscheme.Codecs.UniversalDecoder(), data)
if err != nil {
t.Errorf("%v\nData: %s\nSource: %#v", err, string(data), obj)
return nil
}
obj3 := reflect.New(reflect.TypeOf(obj).Elem()).Interface().(runtime.Object)
err = legacyscheme.Scheme.Convert(obj2, obj3, nil)
if err != nil {
t.Errorf("%v\nSource: %#v", err, obj2)
return nil
}
return obj3
}

View File

@@ -0,0 +1,24 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// +k8s:conversion-gen=k8s.io/kubernetes/pkg/apis/policy
// +k8s:conversion-gen=k8s.io/kubernetes/pkg/apis/extensions
// +k8s:conversion-gen=k8s.io/kubernetes/pkg/apis/autoscaling
// +k8s:conversion-gen-external-types=k8s.io/api/extensions/v1beta1
// +k8s:defaulter-gen=TypeMeta
// +k8s:defaulter-gen-input=../../../../vendor/k8s.io/api/extensions/v1beta1
package v1beta1 // import "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"

View File

@@ -0,0 +1,45 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta1
import (
extensionsv1beta1 "k8s.io/api/extensions/v1beta1"
"k8s.io/apimachinery/pkg/runtime/schema"
)
// GroupName is the group name use in this package
const GroupName = "extensions"
// SchemeGroupVersion is group version used to register these objects
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"}
// Resource takes an unqualified resource and returns a Group qualified GroupResource
func Resource(resource string) schema.GroupResource {
return SchemeGroupVersion.WithResource(resource).GroupResource()
}
var (
localSchemeBuilder = &extensionsv1beta1.SchemeBuilder
AddToScheme = localSchemeBuilder.AddToScheme
)
func init() {
// We only register manually written functions here. The registration of the
// generated functions takes place in the generated files. The separation
// makes the code compile even when the generated files are missing.
localSchemeBuilder.Register(addDefaultingFuncs, addConversionFuncs)
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,507 @@
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by defaulter-gen. DO NOT EDIT.
package v1beta1
import (
v1beta1 "k8s.io/api/extensions/v1beta1"
runtime "k8s.io/apimachinery/pkg/runtime"
v1 "k8s.io/kubernetes/pkg/apis/core/v1"
)
// RegisterDefaults adds defaulters functions to the given scheme.
// Public to allow building arbitrary schemes.
// All generated defaulters are covering - they call all nested defaulters.
func RegisterDefaults(scheme *runtime.Scheme) error {
scheme.AddTypeDefaultingFunc(&v1beta1.DaemonSet{}, func(obj interface{}) { SetObjectDefaults_DaemonSet(obj.(*v1beta1.DaemonSet)) })
scheme.AddTypeDefaultingFunc(&v1beta1.DaemonSetList{}, func(obj interface{}) { SetObjectDefaults_DaemonSetList(obj.(*v1beta1.DaemonSetList)) })
scheme.AddTypeDefaultingFunc(&v1beta1.Deployment{}, func(obj interface{}) { SetObjectDefaults_Deployment(obj.(*v1beta1.Deployment)) })
scheme.AddTypeDefaultingFunc(&v1beta1.DeploymentList{}, func(obj interface{}) { SetObjectDefaults_DeploymentList(obj.(*v1beta1.DeploymentList)) })
scheme.AddTypeDefaultingFunc(&v1beta1.NetworkPolicy{}, func(obj interface{}) { SetObjectDefaults_NetworkPolicy(obj.(*v1beta1.NetworkPolicy)) })
scheme.AddTypeDefaultingFunc(&v1beta1.NetworkPolicyList{}, func(obj interface{}) { SetObjectDefaults_NetworkPolicyList(obj.(*v1beta1.NetworkPolicyList)) })
scheme.AddTypeDefaultingFunc(&v1beta1.PodSecurityPolicy{}, func(obj interface{}) { SetObjectDefaults_PodSecurityPolicy(obj.(*v1beta1.PodSecurityPolicy)) })
scheme.AddTypeDefaultingFunc(&v1beta1.PodSecurityPolicyList{}, func(obj interface{}) { SetObjectDefaults_PodSecurityPolicyList(obj.(*v1beta1.PodSecurityPolicyList)) })
scheme.AddTypeDefaultingFunc(&v1beta1.ReplicaSet{}, func(obj interface{}) { SetObjectDefaults_ReplicaSet(obj.(*v1beta1.ReplicaSet)) })
scheme.AddTypeDefaultingFunc(&v1beta1.ReplicaSetList{}, func(obj interface{}) { SetObjectDefaults_ReplicaSetList(obj.(*v1beta1.ReplicaSetList)) })
return nil
}
func SetObjectDefaults_DaemonSet(in *v1beta1.DaemonSet) {
SetDefaults_DaemonSet(in)
v1.SetDefaults_PodSpec(&in.Spec.Template.Spec)
for i := range in.Spec.Template.Spec.Volumes {
a := &in.Spec.Template.Spec.Volumes[i]
v1.SetDefaults_Volume(a)
if a.VolumeSource.HostPath != nil {
v1.SetDefaults_HostPathVolumeSource(a.VolumeSource.HostPath)
}
if a.VolumeSource.Secret != nil {
v1.SetDefaults_SecretVolumeSource(a.VolumeSource.Secret)
}
if a.VolumeSource.ISCSI != nil {
v1.SetDefaults_ISCSIVolumeSource(a.VolumeSource.ISCSI)
}
if a.VolumeSource.RBD != nil {
v1.SetDefaults_RBDVolumeSource(a.VolumeSource.RBD)
}
if a.VolumeSource.DownwardAPI != nil {
v1.SetDefaults_DownwardAPIVolumeSource(a.VolumeSource.DownwardAPI)
for j := range a.VolumeSource.DownwardAPI.Items {
b := &a.VolumeSource.DownwardAPI.Items[j]
if b.FieldRef != nil {
v1.SetDefaults_ObjectFieldSelector(b.FieldRef)
}
}
}
if a.VolumeSource.ConfigMap != nil {
v1.SetDefaults_ConfigMapVolumeSource(a.VolumeSource.ConfigMap)
}
if a.VolumeSource.AzureDisk != nil {
v1.SetDefaults_AzureDiskVolumeSource(a.VolumeSource.AzureDisk)
}
if a.VolumeSource.Projected != nil {
v1.SetDefaults_ProjectedVolumeSource(a.VolumeSource.Projected)
for j := range a.VolumeSource.Projected.Sources {
b := &a.VolumeSource.Projected.Sources[j]
if b.DownwardAPI != nil {
for k := range b.DownwardAPI.Items {
c := &b.DownwardAPI.Items[k]
if c.FieldRef != nil {
v1.SetDefaults_ObjectFieldSelector(c.FieldRef)
}
}
}
if b.ServiceAccountToken != nil {
v1.SetDefaults_ServiceAccountTokenProjection(b.ServiceAccountToken)
}
}
}
if a.VolumeSource.ScaleIO != nil {
v1.SetDefaults_ScaleIOVolumeSource(a.VolumeSource.ScaleIO)
}
}
for i := range in.Spec.Template.Spec.InitContainers {
a := &in.Spec.Template.Spec.InitContainers[i]
v1.SetDefaults_Container(a)
for j := range a.Ports {
b := &a.Ports[j]
v1.SetDefaults_ContainerPort(b)
}
for j := range a.Env {
b := &a.Env[j]
if b.ValueFrom != nil {
if b.ValueFrom.FieldRef != nil {
v1.SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef)
}
}
}
v1.SetDefaults_ResourceList(&a.Resources.Limits)
v1.SetDefaults_ResourceList(&a.Resources.Requests)
if a.LivenessProbe != nil {
v1.SetDefaults_Probe(a.LivenessProbe)
if a.LivenessProbe.Handler.HTTPGet != nil {
v1.SetDefaults_HTTPGetAction(a.LivenessProbe.Handler.HTTPGet)
}
}
if a.ReadinessProbe != nil {
v1.SetDefaults_Probe(a.ReadinessProbe)
if a.ReadinessProbe.Handler.HTTPGet != nil {
v1.SetDefaults_HTTPGetAction(a.ReadinessProbe.Handler.HTTPGet)
}
}
if a.Lifecycle != nil {
if a.Lifecycle.PostStart != nil {
if a.Lifecycle.PostStart.HTTPGet != nil {
v1.SetDefaults_HTTPGetAction(a.Lifecycle.PostStart.HTTPGet)
}
}
if a.Lifecycle.PreStop != nil {
if a.Lifecycle.PreStop.HTTPGet != nil {
v1.SetDefaults_HTTPGetAction(a.Lifecycle.PreStop.HTTPGet)
}
}
}
}
for i := range in.Spec.Template.Spec.Containers {
a := &in.Spec.Template.Spec.Containers[i]
v1.SetDefaults_Container(a)
for j := range a.Ports {
b := &a.Ports[j]
v1.SetDefaults_ContainerPort(b)
}
for j := range a.Env {
b := &a.Env[j]
if b.ValueFrom != nil {
if b.ValueFrom.FieldRef != nil {
v1.SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef)
}
}
}
v1.SetDefaults_ResourceList(&a.Resources.Limits)
v1.SetDefaults_ResourceList(&a.Resources.Requests)
if a.LivenessProbe != nil {
v1.SetDefaults_Probe(a.LivenessProbe)
if a.LivenessProbe.Handler.HTTPGet != nil {
v1.SetDefaults_HTTPGetAction(a.LivenessProbe.Handler.HTTPGet)
}
}
if a.ReadinessProbe != nil {
v1.SetDefaults_Probe(a.ReadinessProbe)
if a.ReadinessProbe.Handler.HTTPGet != nil {
v1.SetDefaults_HTTPGetAction(a.ReadinessProbe.Handler.HTTPGet)
}
}
if a.Lifecycle != nil {
if a.Lifecycle.PostStart != nil {
if a.Lifecycle.PostStart.HTTPGet != nil {
v1.SetDefaults_HTTPGetAction(a.Lifecycle.PostStart.HTTPGet)
}
}
if a.Lifecycle.PreStop != nil {
if a.Lifecycle.PreStop.HTTPGet != nil {
v1.SetDefaults_HTTPGetAction(a.Lifecycle.PreStop.HTTPGet)
}
}
}
}
}
func SetObjectDefaults_DaemonSetList(in *v1beta1.DaemonSetList) {
for i := range in.Items {
a := &in.Items[i]
SetObjectDefaults_DaemonSet(a)
}
}
func SetObjectDefaults_Deployment(in *v1beta1.Deployment) {
SetDefaults_Deployment(in)
v1.SetDefaults_PodSpec(&in.Spec.Template.Spec)
for i := range in.Spec.Template.Spec.Volumes {
a := &in.Spec.Template.Spec.Volumes[i]
v1.SetDefaults_Volume(a)
if a.VolumeSource.HostPath != nil {
v1.SetDefaults_HostPathVolumeSource(a.VolumeSource.HostPath)
}
if a.VolumeSource.Secret != nil {
v1.SetDefaults_SecretVolumeSource(a.VolumeSource.Secret)
}
if a.VolumeSource.ISCSI != nil {
v1.SetDefaults_ISCSIVolumeSource(a.VolumeSource.ISCSI)
}
if a.VolumeSource.RBD != nil {
v1.SetDefaults_RBDVolumeSource(a.VolumeSource.RBD)
}
if a.VolumeSource.DownwardAPI != nil {
v1.SetDefaults_DownwardAPIVolumeSource(a.VolumeSource.DownwardAPI)
for j := range a.VolumeSource.DownwardAPI.Items {
b := &a.VolumeSource.DownwardAPI.Items[j]
if b.FieldRef != nil {
v1.SetDefaults_ObjectFieldSelector(b.FieldRef)
}
}
}
if a.VolumeSource.ConfigMap != nil {
v1.SetDefaults_ConfigMapVolumeSource(a.VolumeSource.ConfigMap)
}
if a.VolumeSource.AzureDisk != nil {
v1.SetDefaults_AzureDiskVolumeSource(a.VolumeSource.AzureDisk)
}
if a.VolumeSource.Projected != nil {
v1.SetDefaults_ProjectedVolumeSource(a.VolumeSource.Projected)
for j := range a.VolumeSource.Projected.Sources {
b := &a.VolumeSource.Projected.Sources[j]
if b.DownwardAPI != nil {
for k := range b.DownwardAPI.Items {
c := &b.DownwardAPI.Items[k]
if c.FieldRef != nil {
v1.SetDefaults_ObjectFieldSelector(c.FieldRef)
}
}
}
if b.ServiceAccountToken != nil {
v1.SetDefaults_ServiceAccountTokenProjection(b.ServiceAccountToken)
}
}
}
if a.VolumeSource.ScaleIO != nil {
v1.SetDefaults_ScaleIOVolumeSource(a.VolumeSource.ScaleIO)
}
}
for i := range in.Spec.Template.Spec.InitContainers {
a := &in.Spec.Template.Spec.InitContainers[i]
v1.SetDefaults_Container(a)
for j := range a.Ports {
b := &a.Ports[j]
v1.SetDefaults_ContainerPort(b)
}
for j := range a.Env {
b := &a.Env[j]
if b.ValueFrom != nil {
if b.ValueFrom.FieldRef != nil {
v1.SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef)
}
}
}
v1.SetDefaults_ResourceList(&a.Resources.Limits)
v1.SetDefaults_ResourceList(&a.Resources.Requests)
if a.LivenessProbe != nil {
v1.SetDefaults_Probe(a.LivenessProbe)
if a.LivenessProbe.Handler.HTTPGet != nil {
v1.SetDefaults_HTTPGetAction(a.LivenessProbe.Handler.HTTPGet)
}
}
if a.ReadinessProbe != nil {
v1.SetDefaults_Probe(a.ReadinessProbe)
if a.ReadinessProbe.Handler.HTTPGet != nil {
v1.SetDefaults_HTTPGetAction(a.ReadinessProbe.Handler.HTTPGet)
}
}
if a.Lifecycle != nil {
if a.Lifecycle.PostStart != nil {
if a.Lifecycle.PostStart.HTTPGet != nil {
v1.SetDefaults_HTTPGetAction(a.Lifecycle.PostStart.HTTPGet)
}
}
if a.Lifecycle.PreStop != nil {
if a.Lifecycle.PreStop.HTTPGet != nil {
v1.SetDefaults_HTTPGetAction(a.Lifecycle.PreStop.HTTPGet)
}
}
}
}
for i := range in.Spec.Template.Spec.Containers {
a := &in.Spec.Template.Spec.Containers[i]
v1.SetDefaults_Container(a)
for j := range a.Ports {
b := &a.Ports[j]
v1.SetDefaults_ContainerPort(b)
}
for j := range a.Env {
b := &a.Env[j]
if b.ValueFrom != nil {
if b.ValueFrom.FieldRef != nil {
v1.SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef)
}
}
}
v1.SetDefaults_ResourceList(&a.Resources.Limits)
v1.SetDefaults_ResourceList(&a.Resources.Requests)
if a.LivenessProbe != nil {
v1.SetDefaults_Probe(a.LivenessProbe)
if a.LivenessProbe.Handler.HTTPGet != nil {
v1.SetDefaults_HTTPGetAction(a.LivenessProbe.Handler.HTTPGet)
}
}
if a.ReadinessProbe != nil {
v1.SetDefaults_Probe(a.ReadinessProbe)
if a.ReadinessProbe.Handler.HTTPGet != nil {
v1.SetDefaults_HTTPGetAction(a.ReadinessProbe.Handler.HTTPGet)
}
}
if a.Lifecycle != nil {
if a.Lifecycle.PostStart != nil {
if a.Lifecycle.PostStart.HTTPGet != nil {
v1.SetDefaults_HTTPGetAction(a.Lifecycle.PostStart.HTTPGet)
}
}
if a.Lifecycle.PreStop != nil {
if a.Lifecycle.PreStop.HTTPGet != nil {
v1.SetDefaults_HTTPGetAction(a.Lifecycle.PreStop.HTTPGet)
}
}
}
}
}
func SetObjectDefaults_DeploymentList(in *v1beta1.DeploymentList) {
for i := range in.Items {
a := &in.Items[i]
SetObjectDefaults_Deployment(a)
}
}
func SetObjectDefaults_NetworkPolicy(in *v1beta1.NetworkPolicy) {
SetDefaults_NetworkPolicy(in)
}
func SetObjectDefaults_NetworkPolicyList(in *v1beta1.NetworkPolicyList) {
for i := range in.Items {
a := &in.Items[i]
SetObjectDefaults_NetworkPolicy(a)
}
}
func SetObjectDefaults_PodSecurityPolicy(in *v1beta1.PodSecurityPolicy) {
SetDefaults_PodSecurityPolicySpec(&in.Spec)
}
func SetObjectDefaults_PodSecurityPolicyList(in *v1beta1.PodSecurityPolicyList) {
for i := range in.Items {
a := &in.Items[i]
SetObjectDefaults_PodSecurityPolicy(a)
}
}
func SetObjectDefaults_ReplicaSet(in *v1beta1.ReplicaSet) {
SetDefaults_ReplicaSet(in)
v1.SetDefaults_PodSpec(&in.Spec.Template.Spec)
for i := range in.Spec.Template.Spec.Volumes {
a := &in.Spec.Template.Spec.Volumes[i]
v1.SetDefaults_Volume(a)
if a.VolumeSource.HostPath != nil {
v1.SetDefaults_HostPathVolumeSource(a.VolumeSource.HostPath)
}
if a.VolumeSource.Secret != nil {
v1.SetDefaults_SecretVolumeSource(a.VolumeSource.Secret)
}
if a.VolumeSource.ISCSI != nil {
v1.SetDefaults_ISCSIVolumeSource(a.VolumeSource.ISCSI)
}
if a.VolumeSource.RBD != nil {
v1.SetDefaults_RBDVolumeSource(a.VolumeSource.RBD)
}
if a.VolumeSource.DownwardAPI != nil {
v1.SetDefaults_DownwardAPIVolumeSource(a.VolumeSource.DownwardAPI)
for j := range a.VolumeSource.DownwardAPI.Items {
b := &a.VolumeSource.DownwardAPI.Items[j]
if b.FieldRef != nil {
v1.SetDefaults_ObjectFieldSelector(b.FieldRef)
}
}
}
if a.VolumeSource.ConfigMap != nil {
v1.SetDefaults_ConfigMapVolumeSource(a.VolumeSource.ConfigMap)
}
if a.VolumeSource.AzureDisk != nil {
v1.SetDefaults_AzureDiskVolumeSource(a.VolumeSource.AzureDisk)
}
if a.VolumeSource.Projected != nil {
v1.SetDefaults_ProjectedVolumeSource(a.VolumeSource.Projected)
for j := range a.VolumeSource.Projected.Sources {
b := &a.VolumeSource.Projected.Sources[j]
if b.DownwardAPI != nil {
for k := range b.DownwardAPI.Items {
c := &b.DownwardAPI.Items[k]
if c.FieldRef != nil {
v1.SetDefaults_ObjectFieldSelector(c.FieldRef)
}
}
}
if b.ServiceAccountToken != nil {
v1.SetDefaults_ServiceAccountTokenProjection(b.ServiceAccountToken)
}
}
}
if a.VolumeSource.ScaleIO != nil {
v1.SetDefaults_ScaleIOVolumeSource(a.VolumeSource.ScaleIO)
}
}
for i := range in.Spec.Template.Spec.InitContainers {
a := &in.Spec.Template.Spec.InitContainers[i]
v1.SetDefaults_Container(a)
for j := range a.Ports {
b := &a.Ports[j]
v1.SetDefaults_ContainerPort(b)
}
for j := range a.Env {
b := &a.Env[j]
if b.ValueFrom != nil {
if b.ValueFrom.FieldRef != nil {
v1.SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef)
}
}
}
v1.SetDefaults_ResourceList(&a.Resources.Limits)
v1.SetDefaults_ResourceList(&a.Resources.Requests)
if a.LivenessProbe != nil {
v1.SetDefaults_Probe(a.LivenessProbe)
if a.LivenessProbe.Handler.HTTPGet != nil {
v1.SetDefaults_HTTPGetAction(a.LivenessProbe.Handler.HTTPGet)
}
}
if a.ReadinessProbe != nil {
v1.SetDefaults_Probe(a.ReadinessProbe)
if a.ReadinessProbe.Handler.HTTPGet != nil {
v1.SetDefaults_HTTPGetAction(a.ReadinessProbe.Handler.HTTPGet)
}
}
if a.Lifecycle != nil {
if a.Lifecycle.PostStart != nil {
if a.Lifecycle.PostStart.HTTPGet != nil {
v1.SetDefaults_HTTPGetAction(a.Lifecycle.PostStart.HTTPGet)
}
}
if a.Lifecycle.PreStop != nil {
if a.Lifecycle.PreStop.HTTPGet != nil {
v1.SetDefaults_HTTPGetAction(a.Lifecycle.PreStop.HTTPGet)
}
}
}
}
for i := range in.Spec.Template.Spec.Containers {
a := &in.Spec.Template.Spec.Containers[i]
v1.SetDefaults_Container(a)
for j := range a.Ports {
b := &a.Ports[j]
v1.SetDefaults_ContainerPort(b)
}
for j := range a.Env {
b := &a.Env[j]
if b.ValueFrom != nil {
if b.ValueFrom.FieldRef != nil {
v1.SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef)
}
}
}
v1.SetDefaults_ResourceList(&a.Resources.Limits)
v1.SetDefaults_ResourceList(&a.Resources.Requests)
if a.LivenessProbe != nil {
v1.SetDefaults_Probe(a.LivenessProbe)
if a.LivenessProbe.Handler.HTTPGet != nil {
v1.SetDefaults_HTTPGetAction(a.LivenessProbe.Handler.HTTPGet)
}
}
if a.ReadinessProbe != nil {
v1.SetDefaults_Probe(a.ReadinessProbe)
if a.ReadinessProbe.Handler.HTTPGet != nil {
v1.SetDefaults_HTTPGetAction(a.ReadinessProbe.Handler.HTTPGet)
}
}
if a.Lifecycle != nil {
if a.Lifecycle.PostStart != nil {
if a.Lifecycle.PostStart.HTTPGet != nil {
v1.SetDefaults_HTTPGetAction(a.Lifecycle.PostStart.HTTPGet)
}
}
if a.Lifecycle.PreStop != nil {
if a.Lifecycle.PreStop.HTTPGet != nil {
v1.SetDefaults_HTTPGetAction(a.Lifecycle.PreStop.HTTPGet)
}
}
}
}
}
func SetObjectDefaults_ReplicaSetList(in *v1beta1.ReplicaSetList) {
for i := range in.Items {
a := &in.Items[i]
SetObjectDefaults_ReplicaSet(a)
}
}

View File

@@ -0,0 +1,52 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
"go_test",
)
go_library(
name = "go_default_library",
srcs = ["validation.go"],
importpath = "k8s.io/kubernetes/pkg/apis/extensions/validation",
deps = [
"//pkg/apis/core:go_default_library",
"//pkg/apis/core/validation:go_default_library",
"//pkg/apis/extensions:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1/validation:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/validation:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/validation/field:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = ["validation_test.go"],
embed = [":go_default_library"],
deps = [
"//pkg/apis/core:go_default_library",
"//pkg/apis/extensions:go_default_library",
"//vendor/github.com/davecgh/go-spew/spew:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/validation/field:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)

View File

@@ -0,0 +1,608 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package validation
import (
"net"
"regexp"
"strconv"
"strings"
apiequality "k8s.io/apimachinery/pkg/api/equality"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
unversionedvalidation "k8s.io/apimachinery/pkg/apis/meta/v1/validation"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/apimachinery/pkg/util/validation"
"k8s.io/apimachinery/pkg/util/validation/field"
api "k8s.io/kubernetes/pkg/apis/core"
apivalidation "k8s.io/kubernetes/pkg/apis/core/validation"
"k8s.io/kubernetes/pkg/apis/extensions"
)
// ValidateDaemonSet tests if required fields in the DaemonSet are set.
func ValidateDaemonSet(ds *extensions.DaemonSet) field.ErrorList {
allErrs := apivalidation.ValidateObjectMeta(&ds.ObjectMeta, true, ValidateDaemonSetName, field.NewPath("metadata"))
allErrs = append(allErrs, ValidateDaemonSetSpec(&ds.Spec, field.NewPath("spec"))...)
return allErrs
}
// ValidateDaemonSetUpdate tests if required fields in the DaemonSet are set.
func ValidateDaemonSetUpdate(ds, oldDS *extensions.DaemonSet) field.ErrorList {
allErrs := apivalidation.ValidateObjectMetaUpdate(&ds.ObjectMeta, &oldDS.ObjectMeta, field.NewPath("metadata"))
allErrs = append(allErrs, ValidateDaemonSetSpecUpdate(&ds.Spec, &oldDS.Spec, field.NewPath("spec"))...)
allErrs = append(allErrs, ValidateDaemonSetSpec(&ds.Spec, field.NewPath("spec"))...)
return allErrs
}
func ValidateDaemonSetSpecUpdate(newSpec, oldSpec *extensions.DaemonSetSpec, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
// TemplateGeneration shouldn't be decremented
if newSpec.TemplateGeneration < oldSpec.TemplateGeneration {
allErrs = append(allErrs, field.Invalid(fldPath.Child("templateGeneration"), newSpec.TemplateGeneration, "must not be decremented"))
}
// TemplateGeneration should be increased when and only when template is changed
templateUpdated := !apiequality.Semantic.DeepEqual(newSpec.Template, oldSpec.Template)
if newSpec.TemplateGeneration == oldSpec.TemplateGeneration && templateUpdated {
allErrs = append(allErrs, field.Invalid(fldPath.Child("templateGeneration"), newSpec.TemplateGeneration, "must be incremented upon template update"))
} else if newSpec.TemplateGeneration > oldSpec.TemplateGeneration && !templateUpdated {
allErrs = append(allErrs, field.Invalid(fldPath.Child("templateGeneration"), newSpec.TemplateGeneration, "must not be incremented without template update"))
}
return allErrs
}
// validateDaemonSetStatus validates a DaemonSetStatus
func validateDaemonSetStatus(status *extensions.DaemonSetStatus, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(status.CurrentNumberScheduled), fldPath.Child("currentNumberScheduled"))...)
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(status.NumberMisscheduled), fldPath.Child("numberMisscheduled"))...)
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(status.DesiredNumberScheduled), fldPath.Child("desiredNumberScheduled"))...)
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(status.NumberReady), fldPath.Child("numberReady"))...)
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(status.ObservedGeneration, fldPath.Child("observedGeneration"))...)
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(status.UpdatedNumberScheduled), fldPath.Child("updatedNumberScheduled"))...)
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(status.NumberAvailable), fldPath.Child("numberAvailable"))...)
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(status.NumberUnavailable), fldPath.Child("numberUnavailable"))...)
if status.CollisionCount != nil {
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(*status.CollisionCount), fldPath.Child("collisionCount"))...)
}
return allErrs
}
// ValidateDaemonSetStatus validates tests if required fields in the DaemonSet Status section
func ValidateDaemonSetStatusUpdate(ds, oldDS *extensions.DaemonSet) field.ErrorList {
allErrs := apivalidation.ValidateObjectMetaUpdate(&ds.ObjectMeta, &oldDS.ObjectMeta, field.NewPath("metadata"))
allErrs = append(allErrs, validateDaemonSetStatus(&ds.Status, field.NewPath("status"))...)
if apivalidation.IsDecremented(ds.Status.CollisionCount, oldDS.Status.CollisionCount) {
value := int32(0)
if ds.Status.CollisionCount != nil {
value = *ds.Status.CollisionCount
}
allErrs = append(allErrs, field.Invalid(field.NewPath("status").Child("collisionCount"), value, "cannot be decremented"))
}
return allErrs
}
// ValidateDaemonSetSpec tests if required fields in the DaemonSetSpec are set.
func ValidateDaemonSetSpec(spec *extensions.DaemonSetSpec, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
allErrs = append(allErrs, unversionedvalidation.ValidateLabelSelector(spec.Selector, fldPath.Child("selector"))...)
selector, err := metav1.LabelSelectorAsSelector(spec.Selector)
if err == nil && !selector.Matches(labels.Set(spec.Template.Labels)) {
allErrs = append(allErrs, field.Invalid(fldPath.Child("template", "metadata", "labels"), spec.Template.Labels, "`selector` does not match template `labels`"))
}
if spec.Selector != nil && len(spec.Selector.MatchLabels)+len(spec.Selector.MatchExpressions) == 0 {
allErrs = append(allErrs, field.Invalid(fldPath.Child("selector"), spec.Selector, "empty selector is not valid for daemonset."))
}
allErrs = append(allErrs, apivalidation.ValidatePodTemplateSpec(&spec.Template, fldPath.Child("template"))...)
// Daemons typically run on more than one node, so mark Read-Write persistent disks as invalid.
allErrs = append(allErrs, apivalidation.ValidateReadOnlyPersistentDisks(spec.Template.Spec.Volumes, fldPath.Child("template", "spec", "volumes"))...)
// RestartPolicy has already been first-order validated as per ValidatePodTemplateSpec().
if spec.Template.Spec.RestartPolicy != api.RestartPolicyAlways {
allErrs = append(allErrs, field.NotSupported(fldPath.Child("template", "spec", "restartPolicy"), spec.Template.Spec.RestartPolicy, []string{string(api.RestartPolicyAlways)}))
}
if spec.Template.Spec.ActiveDeadlineSeconds != nil {
allErrs = append(allErrs, field.Invalid(fldPath.Child("template", "spec", "activeDeadlineSeconds"), spec.Template.Spec.ActiveDeadlineSeconds, "must not be specified"))
}
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(spec.MinReadySeconds), fldPath.Child("minReadySeconds"))...)
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(spec.TemplateGeneration), fldPath.Child("templateGeneration"))...)
allErrs = append(allErrs, ValidateDaemonSetUpdateStrategy(&spec.UpdateStrategy, fldPath.Child("updateStrategy"))...)
if spec.RevisionHistoryLimit != nil {
// zero is a valid RevisionHistoryLimit
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(*spec.RevisionHistoryLimit), fldPath.Child("revisionHistoryLimit"))...)
}
return allErrs
}
func ValidateRollingUpdateDaemonSet(rollingUpdate *extensions.RollingUpdateDaemonSet, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
allErrs = append(allErrs, ValidatePositiveIntOrPercent(rollingUpdate.MaxUnavailable, fldPath.Child("maxUnavailable"))...)
if getIntOrPercentValue(rollingUpdate.MaxUnavailable) == 0 {
// MaxUnavailable cannot be 0.
allErrs = append(allErrs, field.Invalid(fldPath.Child("maxUnavailable"), rollingUpdate.MaxUnavailable, "cannot be 0"))
}
// Validate that MaxUnavailable is not more than 100%.
allErrs = append(allErrs, IsNotMoreThan100Percent(rollingUpdate.MaxUnavailable, fldPath.Child("maxUnavailable"))...)
return allErrs
}
func ValidateDaemonSetUpdateStrategy(strategy *extensions.DaemonSetUpdateStrategy, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
switch strategy.Type {
case extensions.OnDeleteDaemonSetStrategyType:
case extensions.RollingUpdateDaemonSetStrategyType:
// Make sure RollingUpdate field isn't nil.
if strategy.RollingUpdate == nil {
allErrs = append(allErrs, field.Required(fldPath.Child("rollingUpdate"), ""))
return allErrs
}
allErrs = append(allErrs, ValidateRollingUpdateDaemonSet(strategy.RollingUpdate, fldPath.Child("rollingUpdate"))...)
default:
validValues := []string{string(extensions.RollingUpdateDaemonSetStrategyType), string(extensions.OnDeleteDaemonSetStrategyType)}
allErrs = append(allErrs, field.NotSupported(fldPath, strategy, validValues))
}
return allErrs
}
// ValidateDaemonSetName can be used to check whether the given daemon set name is valid.
// Prefix indicates this name will be used as part of generation, in which case
// trailing dashes are allowed.
var ValidateDaemonSetName = apivalidation.NameIsDNSSubdomain
// Validates that the given name can be used as a deployment name.
var ValidateDeploymentName = apivalidation.NameIsDNSSubdomain
func ValidatePositiveIntOrPercent(intOrPercent intstr.IntOrString, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
switch intOrPercent.Type {
case intstr.String:
for _, msg := range validation.IsValidPercent(intOrPercent.StrVal) {
allErrs = append(allErrs, field.Invalid(fldPath, intOrPercent, msg))
}
case intstr.Int:
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(intOrPercent.IntValue()), fldPath)...)
default:
allErrs = append(allErrs, field.Invalid(fldPath, intOrPercent, "must be an integer or percentage (e.g '5%%')"))
}
return allErrs
}
func getPercentValue(intOrStringValue intstr.IntOrString) (int, bool) {
if intOrStringValue.Type != intstr.String {
return 0, false
}
if len(validation.IsValidPercent(intOrStringValue.StrVal)) != 0 {
return 0, false
}
value, _ := strconv.Atoi(intOrStringValue.StrVal[:len(intOrStringValue.StrVal)-1])
return value, true
}
func getIntOrPercentValue(intOrStringValue intstr.IntOrString) int {
value, isPercent := getPercentValue(intOrStringValue)
if isPercent {
return value
}
return intOrStringValue.IntValue()
}
func IsNotMoreThan100Percent(intOrStringValue intstr.IntOrString, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
value, isPercent := getPercentValue(intOrStringValue)
if !isPercent || value <= 100 {
return nil
}
allErrs = append(allErrs, field.Invalid(fldPath, intOrStringValue, "must not be greater than 100%"))
return allErrs
}
func ValidateRollingUpdateDeployment(rollingUpdate *extensions.RollingUpdateDeployment, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
allErrs = append(allErrs, ValidatePositiveIntOrPercent(rollingUpdate.MaxUnavailable, fldPath.Child("maxUnavailable"))...)
allErrs = append(allErrs, ValidatePositiveIntOrPercent(rollingUpdate.MaxSurge, fldPath.Child("maxSurge"))...)
if getIntOrPercentValue(rollingUpdate.MaxUnavailable) == 0 && getIntOrPercentValue(rollingUpdate.MaxSurge) == 0 {
// Both MaxSurge and MaxUnavailable cannot be zero.
allErrs = append(allErrs, field.Invalid(fldPath.Child("maxUnavailable"), rollingUpdate.MaxUnavailable, "may not be 0 when `maxSurge` is 0"))
}
// Validate that MaxUnavailable is not more than 100%.
allErrs = append(allErrs, IsNotMoreThan100Percent(rollingUpdate.MaxUnavailable, fldPath.Child("maxUnavailable"))...)
return allErrs
}
func ValidateDeploymentStrategy(strategy *extensions.DeploymentStrategy, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
switch strategy.Type {
case extensions.RecreateDeploymentStrategyType:
if strategy.RollingUpdate != nil {
allErrs = append(allErrs, field.Forbidden(fldPath.Child("rollingUpdate"), "may not be specified when strategy `type` is '"+string(extensions.RecreateDeploymentStrategyType+"'")))
}
case extensions.RollingUpdateDeploymentStrategyType:
// This should never happen since it's set and checked in defaults.go
if strategy.RollingUpdate == nil {
allErrs = append(allErrs, field.Required(fldPath.Child("rollingUpdate"), "this should be defaulted and never be nil"))
} else {
allErrs = append(allErrs, ValidateRollingUpdateDeployment(strategy.RollingUpdate, fldPath.Child("rollingUpdate"))...)
}
default:
validValues := []string{string(extensions.RecreateDeploymentStrategyType), string(extensions.RollingUpdateDeploymentStrategyType)}
allErrs = append(allErrs, field.NotSupported(fldPath, strategy, validValues))
}
return allErrs
}
func ValidateRollback(rollback *extensions.RollbackConfig, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
v := rollback.Revision
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(v), fldPath.Child("version"))...)
return allErrs
}
// Validates given deployment spec.
func ValidateDeploymentSpec(spec *extensions.DeploymentSpec, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(spec.Replicas), fldPath.Child("replicas"))...)
if spec.Selector == nil {
allErrs = append(allErrs, field.Required(fldPath.Child("selector"), ""))
} else {
allErrs = append(allErrs, unversionedvalidation.ValidateLabelSelector(spec.Selector, fldPath.Child("selector"))...)
if len(spec.Selector.MatchLabels)+len(spec.Selector.MatchExpressions) == 0 {
allErrs = append(allErrs, field.Invalid(fldPath.Child("selector"), spec.Selector, "empty selector is not valid for deployment."))
}
}
selector, err := metav1.LabelSelectorAsSelector(spec.Selector)
if err != nil {
allErrs = append(allErrs, field.Invalid(fldPath.Child("selector"), spec.Selector, "invalid label selector."))
} else {
allErrs = append(allErrs, ValidatePodTemplateSpecForReplicaSet(&spec.Template, selector, spec.Replicas, fldPath.Child("template"))...)
}
allErrs = append(allErrs, ValidateDeploymentStrategy(&spec.Strategy, fldPath.Child("strategy"))...)
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(spec.MinReadySeconds), fldPath.Child("minReadySeconds"))...)
if spec.RevisionHistoryLimit != nil {
// zero is a valid RevisionHistoryLimit
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(*spec.RevisionHistoryLimit), fldPath.Child("revisionHistoryLimit"))...)
}
if spec.RollbackTo != nil {
allErrs = append(allErrs, ValidateRollback(spec.RollbackTo, fldPath.Child("rollback"))...)
}
if spec.ProgressDeadlineSeconds != nil {
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(*spec.ProgressDeadlineSeconds), fldPath.Child("progressDeadlineSeconds"))...)
if *spec.ProgressDeadlineSeconds <= spec.MinReadySeconds {
allErrs = append(allErrs, field.Invalid(fldPath.Child("progressDeadlineSeconds"), spec.ProgressDeadlineSeconds, "must be greater than minReadySeconds."))
}
}
return allErrs
}
// Validates given deployment status.
func ValidateDeploymentStatus(status *extensions.DeploymentStatus, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(status.ObservedGeneration, fldPath.Child("observedGeneration"))...)
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(status.Replicas), fldPath.Child("replicas"))...)
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(status.UpdatedReplicas), fldPath.Child("updatedReplicas"))...)
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(status.ReadyReplicas), fldPath.Child("readyReplicas"))...)
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(status.AvailableReplicas), fldPath.Child("availableReplicas"))...)
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(status.UnavailableReplicas), fldPath.Child("unavailableReplicas"))...)
if status.CollisionCount != nil {
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(*status.CollisionCount), fldPath.Child("collisionCount"))...)
}
msg := "cannot be greater than status.replicas"
if status.UpdatedReplicas > status.Replicas {
allErrs = append(allErrs, field.Invalid(fldPath.Child("updatedReplicas"), status.UpdatedReplicas, msg))
}
if status.ReadyReplicas > status.Replicas {
allErrs = append(allErrs, field.Invalid(fldPath.Child("readyReplicas"), status.ReadyReplicas, msg))
}
if status.AvailableReplicas > status.Replicas {
allErrs = append(allErrs, field.Invalid(fldPath.Child("availableReplicas"), status.AvailableReplicas, msg))
}
if status.AvailableReplicas > status.ReadyReplicas {
allErrs = append(allErrs, field.Invalid(fldPath.Child("availableReplicas"), status.AvailableReplicas, "cannot be greater than readyReplicas"))
}
return allErrs
}
func ValidateDeploymentUpdate(update, old *extensions.Deployment) field.ErrorList {
allErrs := apivalidation.ValidateObjectMetaUpdate(&update.ObjectMeta, &old.ObjectMeta, field.NewPath("metadata"))
allErrs = append(allErrs, ValidateDeploymentSpec(&update.Spec, field.NewPath("spec"))...)
return allErrs
}
func ValidateDeploymentStatusUpdate(update, old *extensions.Deployment) field.ErrorList {
allErrs := apivalidation.ValidateObjectMetaUpdate(&update.ObjectMeta, &old.ObjectMeta, field.NewPath("metadata"))
fldPath := field.NewPath("status")
allErrs = append(allErrs, ValidateDeploymentStatus(&update.Status, fldPath)...)
if apivalidation.IsDecremented(update.Status.CollisionCount, old.Status.CollisionCount) {
value := int32(0)
if update.Status.CollisionCount != nil {
value = *update.Status.CollisionCount
}
allErrs = append(allErrs, field.Invalid(fldPath.Child("collisionCount"), value, "cannot be decremented"))
}
return allErrs
}
func ValidateDeployment(obj *extensions.Deployment) field.ErrorList {
allErrs := apivalidation.ValidateObjectMeta(&obj.ObjectMeta, true, ValidateDeploymentName, field.NewPath("metadata"))
allErrs = append(allErrs, ValidateDeploymentSpec(&obj.Spec, field.NewPath("spec"))...)
return allErrs
}
func ValidateDeploymentRollback(obj *extensions.DeploymentRollback) field.ErrorList {
allErrs := apivalidation.ValidateAnnotations(obj.UpdatedAnnotations, field.NewPath("updatedAnnotations"))
if len(obj.Name) == 0 {
allErrs = append(allErrs, field.Required(field.NewPath("name"), "name is required"))
}
allErrs = append(allErrs, ValidateRollback(&obj.RollbackTo, field.NewPath("rollback"))...)
return allErrs
}
// ValidateIngress tests if required fields in the Ingress are set.
func ValidateIngress(ingress *extensions.Ingress) field.ErrorList {
allErrs := apivalidation.ValidateObjectMeta(&ingress.ObjectMeta, true, ValidateIngressName, field.NewPath("metadata"))
allErrs = append(allErrs, ValidateIngressSpec(&ingress.Spec, field.NewPath("spec"))...)
return allErrs
}
// ValidateIngressName validates that the given name can be used as an Ingress name.
var ValidateIngressName = apivalidation.NameIsDNSSubdomain
func validateIngressTLS(spec *extensions.IngressSpec, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
// TODO: Perform a more thorough validation of spec.TLS.Hosts that takes
// the wildcard spec from RFC 6125 into account.
for _, itls := range spec.TLS {
for i, host := range itls.Hosts {
if strings.Contains(host, "*") {
for _, msg := range validation.IsWildcardDNS1123Subdomain(host) {
allErrs = append(allErrs, field.Invalid(fldPath.Index(i).Child("hosts"), host, msg))
}
continue
}
for _, msg := range validation.IsDNS1123Subdomain(host) {
allErrs = append(allErrs, field.Invalid(fldPath.Index(i).Child("hosts"), host, msg))
}
}
}
return allErrs
}
// ValidateIngressSpec tests if required fields in the IngressSpec are set.
func ValidateIngressSpec(spec *extensions.IngressSpec, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
// TODO: Is a default backend mandatory?
if spec.Backend != nil {
allErrs = append(allErrs, validateIngressBackend(spec.Backend, fldPath.Child("backend"))...)
} else if len(spec.Rules) == 0 {
allErrs = append(allErrs, field.Invalid(fldPath, spec.Rules, "either `backend` or `rules` must be specified"))
}
if len(spec.Rules) > 0 {
allErrs = append(allErrs, validateIngressRules(spec.Rules, fldPath.Child("rules"))...)
}
if len(spec.TLS) > 0 {
allErrs = append(allErrs, validateIngressTLS(spec, fldPath.Child("tls"))...)
}
return allErrs
}
// ValidateIngressUpdate tests if required fields in the Ingress are set.
func ValidateIngressUpdate(ingress, oldIngress *extensions.Ingress) field.ErrorList {
allErrs := apivalidation.ValidateObjectMetaUpdate(&ingress.ObjectMeta, &oldIngress.ObjectMeta, field.NewPath("metadata"))
allErrs = append(allErrs, ValidateIngressSpec(&ingress.Spec, field.NewPath("spec"))...)
return allErrs
}
// ValidateIngressStatusUpdate tests if required fields in the Ingress are set when updating status.
func ValidateIngressStatusUpdate(ingress, oldIngress *extensions.Ingress) field.ErrorList {
allErrs := apivalidation.ValidateObjectMetaUpdate(&ingress.ObjectMeta, &oldIngress.ObjectMeta, field.NewPath("metadata"))
allErrs = append(allErrs, apivalidation.ValidateLoadBalancerStatus(&ingress.Status.LoadBalancer, field.NewPath("status", "loadBalancer"))...)
return allErrs
}
func validateIngressRules(ingressRules []extensions.IngressRule, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
if len(ingressRules) == 0 {
return append(allErrs, field.Required(fldPath, ""))
}
for i, ih := range ingressRules {
if len(ih.Host) > 0 {
if isIP := (net.ParseIP(ih.Host) != nil); isIP {
allErrs = append(allErrs, field.Invalid(fldPath.Index(i).Child("host"), ih.Host, "must be a DNS name, not an IP address"))
}
// TODO: Ports and ips are allowed in the host part of a url
// according to RFC 3986, consider allowing them.
if strings.Contains(ih.Host, "*") {
for _, msg := range validation.IsWildcardDNS1123Subdomain(ih.Host) {
allErrs = append(allErrs, field.Invalid(fldPath.Index(i).Child("host"), ih.Host, msg))
}
continue
}
for _, msg := range validation.IsDNS1123Subdomain(ih.Host) {
allErrs = append(allErrs, field.Invalid(fldPath.Index(i).Child("host"), ih.Host, msg))
}
}
allErrs = append(allErrs, validateIngressRuleValue(&ih.IngressRuleValue, fldPath.Index(0))...)
}
return allErrs
}
func validateIngressRuleValue(ingressRule *extensions.IngressRuleValue, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
if ingressRule.HTTP != nil {
allErrs = append(allErrs, validateHTTPIngressRuleValue(ingressRule.HTTP, fldPath.Child("http"))...)
}
return allErrs
}
func validateHTTPIngressRuleValue(httpIngressRuleValue *extensions.HTTPIngressRuleValue, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
if len(httpIngressRuleValue.Paths) == 0 {
allErrs = append(allErrs, field.Required(fldPath.Child("paths"), ""))
}
for i, rule := range httpIngressRuleValue.Paths {
if len(rule.Path) > 0 {
if !strings.HasPrefix(rule.Path, "/") {
allErrs = append(allErrs, field.Invalid(fldPath.Child("paths").Index(i).Child("path"), rule.Path, "must be an absolute path"))
}
// TODO: More draconian path regex validation.
// Path must be a valid regex. This is the basic requirement.
// In addition to this any characters not allowed in a path per
// RFC 3986 section-3.3 cannot appear as a literal in the regex.
// Consider the example: http://host/valid?#bar, everything after
// the last '/' is a valid regex that matches valid#bar, which
// isn't a valid path, because the path terminates at the first ?
// or #. A more sophisticated form of validation would detect that
// the user is confusing url regexes with path regexes.
_, err := regexp.CompilePOSIX(rule.Path)
if err != nil {
allErrs = append(allErrs, field.Invalid(fldPath.Child("paths").Index(i).Child("path"), rule.Path, "must be a valid regex"))
}
}
allErrs = append(allErrs, validateIngressBackend(&rule.Backend, fldPath.Child("backend"))...)
}
return allErrs
}
// validateIngressBackend tests if a given backend is valid.
func validateIngressBackend(backend *extensions.IngressBackend, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
// All backends must reference a single local service by name, and a single service port by name or number.
if len(backend.ServiceName) == 0 {
return append(allErrs, field.Required(fldPath.Child("serviceName"), ""))
} else {
for _, msg := range apivalidation.ValidateServiceName(backend.ServiceName, false) {
allErrs = append(allErrs, field.Invalid(fldPath.Child("serviceName"), backend.ServiceName, msg))
}
}
allErrs = append(allErrs, apivalidation.ValidatePortNumOrName(backend.ServicePort, fldPath.Child("servicePort"))...)
return allErrs
}
// ValidateReplicaSetName can be used to check whether the given ReplicaSet
// name is valid.
// Prefix indicates this name will be used as part of generation, in which case
// trailing dashes are allowed.
var ValidateReplicaSetName = apivalidation.NameIsDNSSubdomain
// ValidateReplicaSet tests if required fields in the ReplicaSet are set.
func ValidateReplicaSet(rs *extensions.ReplicaSet) field.ErrorList {
allErrs := apivalidation.ValidateObjectMeta(&rs.ObjectMeta, true, ValidateReplicaSetName, field.NewPath("metadata"))
allErrs = append(allErrs, ValidateReplicaSetSpec(&rs.Spec, field.NewPath("spec"))...)
return allErrs
}
// ValidateReplicaSetUpdate tests if required fields in the ReplicaSet are set.
func ValidateReplicaSetUpdate(rs, oldRs *extensions.ReplicaSet) field.ErrorList {
allErrs := field.ErrorList{}
allErrs = append(allErrs, apivalidation.ValidateObjectMetaUpdate(&rs.ObjectMeta, &oldRs.ObjectMeta, field.NewPath("metadata"))...)
allErrs = append(allErrs, ValidateReplicaSetSpec(&rs.Spec, field.NewPath("spec"))...)
return allErrs
}
// ValidateReplicaSetStatusUpdate tests if required fields in the ReplicaSet are set.
func ValidateReplicaSetStatusUpdate(rs, oldRs *extensions.ReplicaSet) field.ErrorList {
allErrs := field.ErrorList{}
allErrs = append(allErrs, apivalidation.ValidateObjectMetaUpdate(&rs.ObjectMeta, &oldRs.ObjectMeta, field.NewPath("metadata"))...)
allErrs = append(allErrs, ValidateReplicaSetStatus(rs.Status, field.NewPath("status"))...)
return allErrs
}
func ValidateReplicaSetStatus(status extensions.ReplicaSetStatus, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(status.Replicas), fldPath.Child("replicas"))...)
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(status.FullyLabeledReplicas), fldPath.Child("fullyLabeledReplicas"))...)
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(status.ReadyReplicas), fldPath.Child("readyReplicas"))...)
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(status.AvailableReplicas), fldPath.Child("availableReplicas"))...)
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(status.ObservedGeneration), fldPath.Child("observedGeneration"))...)
msg := "cannot be greater than status.replicas"
if status.FullyLabeledReplicas > status.Replicas {
allErrs = append(allErrs, field.Invalid(fldPath.Child("fullyLabeledReplicas"), status.FullyLabeledReplicas, msg))
}
if status.ReadyReplicas > status.Replicas {
allErrs = append(allErrs, field.Invalid(fldPath.Child("readyReplicas"), status.ReadyReplicas, msg))
}
if status.AvailableReplicas > status.Replicas {
allErrs = append(allErrs, field.Invalid(fldPath.Child("availableReplicas"), status.AvailableReplicas, msg))
}
if status.AvailableReplicas > status.ReadyReplicas {
allErrs = append(allErrs, field.Invalid(fldPath.Child("availableReplicas"), status.AvailableReplicas, "cannot be greater than readyReplicas"))
}
return allErrs
}
// ValidateReplicaSetSpec tests if required fields in the ReplicaSet spec are set.
func ValidateReplicaSetSpec(spec *extensions.ReplicaSetSpec, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(spec.Replicas), fldPath.Child("replicas"))...)
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(spec.MinReadySeconds), fldPath.Child("minReadySeconds"))...)
if spec.Selector == nil {
allErrs = append(allErrs, field.Required(fldPath.Child("selector"), ""))
} else {
allErrs = append(allErrs, unversionedvalidation.ValidateLabelSelector(spec.Selector, fldPath.Child("selector"))...)
if len(spec.Selector.MatchLabels)+len(spec.Selector.MatchExpressions) == 0 {
allErrs = append(allErrs, field.Invalid(fldPath.Child("selector"), spec.Selector, "empty selector is not valid for deployment."))
}
}
selector, err := metav1.LabelSelectorAsSelector(spec.Selector)
if err != nil {
allErrs = append(allErrs, field.Invalid(fldPath.Child("selector"), spec.Selector, "invalid label selector."))
} else {
allErrs = append(allErrs, ValidatePodTemplateSpecForReplicaSet(&spec.Template, selector, spec.Replicas, fldPath.Child("template"))...)
}
return allErrs
}
// Validates the given template and ensures that it is in accordance with the desired selector and replicas.
func ValidatePodTemplateSpecForReplicaSet(template *api.PodTemplateSpec, selector labels.Selector, replicas int32, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
if template == nil {
allErrs = append(allErrs, field.Required(fldPath, ""))
} else {
if !selector.Empty() {
// Verify that the ReplicaSet selector matches the labels in template.
labels := labels.Set(template.Labels)
if !selector.Matches(labels) {
allErrs = append(allErrs, field.Invalid(fldPath.Child("metadata", "labels"), template.Labels, "`selector` does not match template `labels`"))
}
}
allErrs = append(allErrs, apivalidation.ValidatePodTemplateSpec(template, fldPath)...)
if replicas > 1 {
allErrs = append(allErrs, apivalidation.ValidateReadOnlyPersistentDisks(template.Spec.Volumes, fldPath.Child("spec", "volumes"))...)
}
// RestartPolicy has already been first-order validated as per ValidatePodTemplateSpec().
if template.Spec.RestartPolicy != api.RestartPolicyAlways {
allErrs = append(allErrs, field.NotSupported(fldPath.Child("spec", "restartPolicy"), template.Spec.RestartPolicy, []string{string(api.RestartPolicyAlways)}))
}
if template.Spec.ActiveDeadlineSeconds != nil {
allErrs = append(allErrs, field.Invalid(fldPath.Child("spec", "activeDeadlineSeconds"), template.Spec.ActiveDeadlineSeconds, "must not be specified"))
}
}
return allErrs
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,938 @@
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by deepcopy-gen. DO NOT EDIT.
package extensions
import (
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CustomMetricCurrentStatus) DeepCopyInto(out *CustomMetricCurrentStatus) {
*out = *in
out.CurrentValue = in.CurrentValue.DeepCopy()
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomMetricCurrentStatus.
func (in *CustomMetricCurrentStatus) DeepCopy() *CustomMetricCurrentStatus {
if in == nil {
return nil
}
out := new(CustomMetricCurrentStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CustomMetricCurrentStatusList) DeepCopyInto(out *CustomMetricCurrentStatusList) {
*out = *in
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]CustomMetricCurrentStatus, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomMetricCurrentStatusList.
func (in *CustomMetricCurrentStatusList) DeepCopy() *CustomMetricCurrentStatusList {
if in == nil {
return nil
}
out := new(CustomMetricCurrentStatusList)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CustomMetricTarget) DeepCopyInto(out *CustomMetricTarget) {
*out = *in
out.TargetValue = in.TargetValue.DeepCopy()
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomMetricTarget.
func (in *CustomMetricTarget) DeepCopy() *CustomMetricTarget {
if in == nil {
return nil
}
out := new(CustomMetricTarget)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CustomMetricTargetList) DeepCopyInto(out *CustomMetricTargetList) {
*out = *in
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]CustomMetricTarget, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomMetricTargetList.
func (in *CustomMetricTargetList) DeepCopy() *CustomMetricTargetList {
if in == nil {
return nil
}
out := new(CustomMetricTargetList)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DaemonSet) DeepCopyInto(out *DaemonSet) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonSet.
func (in *DaemonSet) DeepCopy() *DaemonSet {
if in == nil {
return nil
}
out := new(DaemonSet)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *DaemonSet) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DaemonSetCondition) DeepCopyInto(out *DaemonSetCondition) {
*out = *in
in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonSetCondition.
func (in *DaemonSetCondition) DeepCopy() *DaemonSetCondition {
if in == nil {
return nil
}
out := new(DaemonSetCondition)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DaemonSetList) DeepCopyInto(out *DaemonSetList) {
*out = *in
out.TypeMeta = in.TypeMeta
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]DaemonSet, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonSetList.
func (in *DaemonSetList) DeepCopy() *DaemonSetList {
if in == nil {
return nil
}
out := new(DaemonSetList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *DaemonSetList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DaemonSetSpec) DeepCopyInto(out *DaemonSetSpec) {
*out = *in
if in.Selector != nil {
in, out := &in.Selector, &out.Selector
if *in == nil {
*out = nil
} else {
*out = new(v1.LabelSelector)
(*in).DeepCopyInto(*out)
}
}
in.Template.DeepCopyInto(&out.Template)
in.UpdateStrategy.DeepCopyInto(&out.UpdateStrategy)
if in.RevisionHistoryLimit != nil {
in, out := &in.RevisionHistoryLimit, &out.RevisionHistoryLimit
if *in == nil {
*out = nil
} else {
*out = new(int32)
**out = **in
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonSetSpec.
func (in *DaemonSetSpec) DeepCopy() *DaemonSetSpec {
if in == nil {
return nil
}
out := new(DaemonSetSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DaemonSetStatus) DeepCopyInto(out *DaemonSetStatus) {
*out = *in
if in.CollisionCount != nil {
in, out := &in.CollisionCount, &out.CollisionCount
if *in == nil {
*out = nil
} else {
*out = new(int32)
**out = **in
}
}
if in.Conditions != nil {
in, out := &in.Conditions, &out.Conditions
*out = make([]DaemonSetCondition, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonSetStatus.
func (in *DaemonSetStatus) DeepCopy() *DaemonSetStatus {
if in == nil {
return nil
}
out := new(DaemonSetStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DaemonSetUpdateStrategy) DeepCopyInto(out *DaemonSetUpdateStrategy) {
*out = *in
if in.RollingUpdate != nil {
in, out := &in.RollingUpdate, &out.RollingUpdate
if *in == nil {
*out = nil
} else {
*out = new(RollingUpdateDaemonSet)
**out = **in
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonSetUpdateStrategy.
func (in *DaemonSetUpdateStrategy) DeepCopy() *DaemonSetUpdateStrategy {
if in == nil {
return nil
}
out := new(DaemonSetUpdateStrategy)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Deployment) DeepCopyInto(out *Deployment) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Deployment.
func (in *Deployment) DeepCopy() *Deployment {
if in == nil {
return nil
}
out := new(Deployment)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *Deployment) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DeploymentCondition) DeepCopyInto(out *DeploymentCondition) {
*out = *in
in.LastUpdateTime.DeepCopyInto(&out.LastUpdateTime)
in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentCondition.
func (in *DeploymentCondition) DeepCopy() *DeploymentCondition {
if in == nil {
return nil
}
out := new(DeploymentCondition)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DeploymentList) DeepCopyInto(out *DeploymentList) {
*out = *in
out.TypeMeta = in.TypeMeta
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]Deployment, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentList.
func (in *DeploymentList) DeepCopy() *DeploymentList {
if in == nil {
return nil
}
out := new(DeploymentList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *DeploymentList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DeploymentRollback) DeepCopyInto(out *DeploymentRollback) {
*out = *in
out.TypeMeta = in.TypeMeta
if in.UpdatedAnnotations != nil {
in, out := &in.UpdatedAnnotations, &out.UpdatedAnnotations
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
out.RollbackTo = in.RollbackTo
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentRollback.
func (in *DeploymentRollback) DeepCopy() *DeploymentRollback {
if in == nil {
return nil
}
out := new(DeploymentRollback)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *DeploymentRollback) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DeploymentSpec) DeepCopyInto(out *DeploymentSpec) {
*out = *in
if in.Selector != nil {
in, out := &in.Selector, &out.Selector
if *in == nil {
*out = nil
} else {
*out = new(v1.LabelSelector)
(*in).DeepCopyInto(*out)
}
}
in.Template.DeepCopyInto(&out.Template)
in.Strategy.DeepCopyInto(&out.Strategy)
if in.RevisionHistoryLimit != nil {
in, out := &in.RevisionHistoryLimit, &out.RevisionHistoryLimit
if *in == nil {
*out = nil
} else {
*out = new(int32)
**out = **in
}
}
if in.RollbackTo != nil {
in, out := &in.RollbackTo, &out.RollbackTo
if *in == nil {
*out = nil
} else {
*out = new(RollbackConfig)
**out = **in
}
}
if in.ProgressDeadlineSeconds != nil {
in, out := &in.ProgressDeadlineSeconds, &out.ProgressDeadlineSeconds
if *in == nil {
*out = nil
} else {
*out = new(int32)
**out = **in
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentSpec.
func (in *DeploymentSpec) DeepCopy() *DeploymentSpec {
if in == nil {
return nil
}
out := new(DeploymentSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DeploymentStatus) DeepCopyInto(out *DeploymentStatus) {
*out = *in
if in.Conditions != nil {
in, out := &in.Conditions, &out.Conditions
*out = make([]DeploymentCondition, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.CollisionCount != nil {
in, out := &in.CollisionCount, &out.CollisionCount
if *in == nil {
*out = nil
} else {
*out = new(int32)
**out = **in
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentStatus.
func (in *DeploymentStatus) DeepCopy() *DeploymentStatus {
if in == nil {
return nil
}
out := new(DeploymentStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DeploymentStrategy) DeepCopyInto(out *DeploymentStrategy) {
*out = *in
if in.RollingUpdate != nil {
in, out := &in.RollingUpdate, &out.RollingUpdate
if *in == nil {
*out = nil
} else {
*out = new(RollingUpdateDeployment)
**out = **in
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentStrategy.
func (in *DeploymentStrategy) DeepCopy() *DeploymentStrategy {
if in == nil {
return nil
}
out := new(DeploymentStrategy)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *HTTPIngressPath) DeepCopyInto(out *HTTPIngressPath) {
*out = *in
out.Backend = in.Backend
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPIngressPath.
func (in *HTTPIngressPath) DeepCopy() *HTTPIngressPath {
if in == nil {
return nil
}
out := new(HTTPIngressPath)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *HTTPIngressRuleValue) DeepCopyInto(out *HTTPIngressRuleValue) {
*out = *in
if in.Paths != nil {
in, out := &in.Paths, &out.Paths
*out = make([]HTTPIngressPath, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPIngressRuleValue.
func (in *HTTPIngressRuleValue) DeepCopy() *HTTPIngressRuleValue {
if in == nil {
return nil
}
out := new(HTTPIngressRuleValue)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Ingress) DeepCopyInto(out *Ingress) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Ingress.
func (in *Ingress) DeepCopy() *Ingress {
if in == nil {
return nil
}
out := new(Ingress)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *Ingress) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *IngressBackend) DeepCopyInto(out *IngressBackend) {
*out = *in
out.ServicePort = in.ServicePort
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressBackend.
func (in *IngressBackend) DeepCopy() *IngressBackend {
if in == nil {
return nil
}
out := new(IngressBackend)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *IngressList) DeepCopyInto(out *IngressList) {
*out = *in
out.TypeMeta = in.TypeMeta
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]Ingress, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressList.
func (in *IngressList) DeepCopy() *IngressList {
if in == nil {
return nil
}
out := new(IngressList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *IngressList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *IngressRule) DeepCopyInto(out *IngressRule) {
*out = *in
in.IngressRuleValue.DeepCopyInto(&out.IngressRuleValue)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressRule.
func (in *IngressRule) DeepCopy() *IngressRule {
if in == nil {
return nil
}
out := new(IngressRule)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *IngressRuleValue) DeepCopyInto(out *IngressRuleValue) {
*out = *in
if in.HTTP != nil {
in, out := &in.HTTP, &out.HTTP
if *in == nil {
*out = nil
} else {
*out = new(HTTPIngressRuleValue)
(*in).DeepCopyInto(*out)
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressRuleValue.
func (in *IngressRuleValue) DeepCopy() *IngressRuleValue {
if in == nil {
return nil
}
out := new(IngressRuleValue)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *IngressSpec) DeepCopyInto(out *IngressSpec) {
*out = *in
if in.Backend != nil {
in, out := &in.Backend, &out.Backend
if *in == nil {
*out = nil
} else {
*out = new(IngressBackend)
**out = **in
}
}
if in.TLS != nil {
in, out := &in.TLS, &out.TLS
*out = make([]IngressTLS, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Rules != nil {
in, out := &in.Rules, &out.Rules
*out = make([]IngressRule, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressSpec.
func (in *IngressSpec) DeepCopy() *IngressSpec {
if in == nil {
return nil
}
out := new(IngressSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *IngressStatus) DeepCopyInto(out *IngressStatus) {
*out = *in
in.LoadBalancer.DeepCopyInto(&out.LoadBalancer)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressStatus.
func (in *IngressStatus) DeepCopy() *IngressStatus {
if in == nil {
return nil
}
out := new(IngressStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *IngressTLS) DeepCopyInto(out *IngressTLS) {
*out = *in
if in.Hosts != nil {
in, out := &in.Hosts, &out.Hosts
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressTLS.
func (in *IngressTLS) DeepCopy() *IngressTLS {
if in == nil {
return nil
}
out := new(IngressTLS)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ReplicaSet) DeepCopyInto(out *ReplicaSet) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicaSet.
func (in *ReplicaSet) DeepCopy() *ReplicaSet {
if in == nil {
return nil
}
out := new(ReplicaSet)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ReplicaSet) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ReplicaSetCondition) DeepCopyInto(out *ReplicaSetCondition) {
*out = *in
in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicaSetCondition.
func (in *ReplicaSetCondition) DeepCopy() *ReplicaSetCondition {
if in == nil {
return nil
}
out := new(ReplicaSetCondition)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ReplicaSetList) DeepCopyInto(out *ReplicaSetList) {
*out = *in
out.TypeMeta = in.TypeMeta
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]ReplicaSet, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicaSetList.
func (in *ReplicaSetList) DeepCopy() *ReplicaSetList {
if in == nil {
return nil
}
out := new(ReplicaSetList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ReplicaSetList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ReplicaSetSpec) DeepCopyInto(out *ReplicaSetSpec) {
*out = *in
if in.Selector != nil {
in, out := &in.Selector, &out.Selector
if *in == nil {
*out = nil
} else {
*out = new(v1.LabelSelector)
(*in).DeepCopyInto(*out)
}
}
in.Template.DeepCopyInto(&out.Template)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicaSetSpec.
func (in *ReplicaSetSpec) DeepCopy() *ReplicaSetSpec {
if in == nil {
return nil
}
out := new(ReplicaSetSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ReplicaSetStatus) DeepCopyInto(out *ReplicaSetStatus) {
*out = *in
if in.Conditions != nil {
in, out := &in.Conditions, &out.Conditions
*out = make([]ReplicaSetCondition, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicaSetStatus.
func (in *ReplicaSetStatus) DeepCopy() *ReplicaSetStatus {
if in == nil {
return nil
}
out := new(ReplicaSetStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ReplicationControllerDummy) DeepCopyInto(out *ReplicationControllerDummy) {
*out = *in
out.TypeMeta = in.TypeMeta
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicationControllerDummy.
func (in *ReplicationControllerDummy) DeepCopy() *ReplicationControllerDummy {
if in == nil {
return nil
}
out := new(ReplicationControllerDummy)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ReplicationControllerDummy) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RollbackConfig) DeepCopyInto(out *RollbackConfig) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RollbackConfig.
func (in *RollbackConfig) DeepCopy() *RollbackConfig {
if in == nil {
return nil
}
out := new(RollbackConfig)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RollingUpdateDaemonSet) DeepCopyInto(out *RollingUpdateDaemonSet) {
*out = *in
out.MaxUnavailable = in.MaxUnavailable
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RollingUpdateDaemonSet.
func (in *RollingUpdateDaemonSet) DeepCopy() *RollingUpdateDaemonSet {
if in == nil {
return nil
}
out := new(RollingUpdateDaemonSet)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RollingUpdateDeployment) DeepCopyInto(out *RollingUpdateDeployment) {
*out = *in
out.MaxUnavailable = in.MaxUnavailable
out.MaxSurge = in.MaxSurge
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RollingUpdateDeployment.
func (in *RollingUpdateDeployment) DeepCopy() *RollingUpdateDeployment {
if in == nil {
return nil
}
out := new(RollingUpdateDeployment)
in.DeepCopyInto(out)
return out
}