Bumping k8s dependencies to 1.13
This commit is contained in:
50
vendor/k8s.io/kubernetes/cmd/kube-controller-manager/app/autoscaling.go
generated
vendored
50
vendor/k8s.io/kubernetes/cmd/kube-controller-manager/app/autoscaling.go
generated
vendored
@@ -21,19 +21,22 @@ limitations under the License.
|
||||
package app
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/client-go/dynamic"
|
||||
"k8s.io/client-go/scale"
|
||||
"k8s.io/kubernetes/pkg/controller/podautoscaler"
|
||||
"k8s.io/kubernetes/pkg/controller/podautoscaler/metrics"
|
||||
resourceclient "k8s.io/metrics/pkg/client/clientset_generated/clientset/typed/metrics/v1beta1"
|
||||
|
||||
resourceclient "k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1beta1"
|
||||
"k8s.io/metrics/pkg/client/custom_metrics"
|
||||
"k8s.io/metrics/pkg/client/external_metrics"
|
||||
)
|
||||
|
||||
func startHPAController(ctx ControllerContext) (bool, error) {
|
||||
func startHPAController(ctx ControllerContext) (http.Handler, bool, error) {
|
||||
if !ctx.AvailableResources[schema.GroupVersionResource{Group: "autoscaling", Version: "v1", Resource: "horizontalpodautoscalers"}] {
|
||||
return false, nil
|
||||
return nil, false, nil
|
||||
}
|
||||
|
||||
if ctx.ComponentConfig.HPAController.HorizontalPodAutoscalerUseRESTClients {
|
||||
@@ -44,17 +47,27 @@ func startHPAController(ctx ControllerContext) (bool, error) {
|
||||
return startHPAControllerWithLegacyClient(ctx)
|
||||
}
|
||||
|
||||
func startHPAControllerWithRESTClient(ctx ControllerContext) (bool, error) {
|
||||
func startHPAControllerWithRESTClient(ctx ControllerContext) (http.Handler, bool, error) {
|
||||
clientConfig := ctx.ClientBuilder.ConfigOrDie("horizontal-pod-autoscaler")
|
||||
hpaClient := ctx.ClientBuilder.ClientOrDie("horizontal-pod-autoscaler")
|
||||
|
||||
apiVersionsGetter := custom_metrics.NewAvailableAPIsGetter(hpaClient.Discovery())
|
||||
// invalidate the discovery information roughly once per resync interval our API
|
||||
// information is *at most* two resync intervals old.
|
||||
go custom_metrics.PeriodicallyInvalidate(
|
||||
apiVersionsGetter,
|
||||
ctx.ComponentConfig.HPAController.HorizontalPodAutoscalerSyncPeriod.Duration,
|
||||
ctx.Stop)
|
||||
|
||||
metricsClient := metrics.NewRESTMetricsClient(
|
||||
resourceclient.NewForConfigOrDie(clientConfig),
|
||||
custom_metrics.NewForConfigOrDie(clientConfig),
|
||||
custom_metrics.NewForConfig(clientConfig, ctx.RESTMapper, apiVersionsGetter),
|
||||
external_metrics.NewForConfigOrDie(clientConfig),
|
||||
)
|
||||
return startHPAControllerWithMetricsClient(ctx, metricsClient)
|
||||
}
|
||||
|
||||
func startHPAControllerWithLegacyClient(ctx ControllerContext) (bool, error) {
|
||||
func startHPAControllerWithLegacyClient(ctx ControllerContext) (http.Handler, bool, error) {
|
||||
hpaClient := ctx.ClientBuilder.ClientOrDie("horizontal-pod-autoscaler")
|
||||
metricsClient := metrics.NewHeapsterMetricsClient(
|
||||
hpaClient,
|
||||
@@ -66,34 +79,31 @@ func startHPAControllerWithLegacyClient(ctx ControllerContext) (bool, error) {
|
||||
return startHPAControllerWithMetricsClient(ctx, metricsClient)
|
||||
}
|
||||
|
||||
func startHPAControllerWithMetricsClient(ctx ControllerContext, metricsClient metrics.MetricsClient) (bool, error) {
|
||||
hpaClientGoClient := ctx.ClientBuilder.ClientGoClientOrDie("horizontal-pod-autoscaler")
|
||||
func startHPAControllerWithMetricsClient(ctx ControllerContext, metricsClient metrics.MetricsClient) (http.Handler, bool, error) {
|
||||
hpaClient := ctx.ClientBuilder.ClientOrDie("horizontal-pod-autoscaler")
|
||||
hpaClientConfig := ctx.ClientBuilder.ConfigOrDie("horizontal-pod-autoscaler")
|
||||
|
||||
// we don't use cached discovery because DiscoveryScaleKindResolver does its own caching,
|
||||
// so we want to re-fetch every time when we actually ask for it
|
||||
scaleKindResolver := scale.NewDiscoveryScaleKindResolver(hpaClientGoClient.Discovery())
|
||||
scaleKindResolver := scale.NewDiscoveryScaleKindResolver(hpaClient.Discovery())
|
||||
scaleClient, err := scale.NewForConfig(hpaClientConfig, ctx.RESTMapper, dynamic.LegacyAPIPathResolverFunc, scaleKindResolver)
|
||||
if err != nil {
|
||||
return false, err
|
||||
return nil, false, err
|
||||
}
|
||||
|
||||
replicaCalc := podautoscaler.NewReplicaCalculator(
|
||||
metricsClient,
|
||||
hpaClient.CoreV1(),
|
||||
ctx.ComponentConfig.HPAController.HorizontalPodAutoscalerTolerance,
|
||||
)
|
||||
go podautoscaler.NewHorizontalController(
|
||||
hpaClientGoClient.CoreV1(),
|
||||
hpaClient.CoreV1(),
|
||||
scaleClient,
|
||||
hpaClient.AutoscalingV1(),
|
||||
ctx.RESTMapper,
|
||||
replicaCalc,
|
||||
metricsClient,
|
||||
ctx.InformerFactory.Autoscaling().V1().HorizontalPodAutoscalers(),
|
||||
ctx.InformerFactory.Core().V1().Pods(),
|
||||
ctx.ComponentConfig.HPAController.HorizontalPodAutoscalerSyncPeriod.Duration,
|
||||
ctx.ComponentConfig.HPAController.HorizontalPodAutoscalerUpscaleForbiddenWindow.Duration,
|
||||
ctx.ComponentConfig.HPAController.HorizontalPodAutoscalerDownscaleForbiddenWindow.Duration,
|
||||
ctx.ComponentConfig.HPAController.HorizontalPodAutoscalerDownscaleStabilizationWindow.Duration,
|
||||
ctx.ComponentConfig.HPAController.HorizontalPodAutoscalerTolerance,
|
||||
ctx.ComponentConfig.HPAController.HorizontalPodAutoscalerCPUInitializationPeriod.Duration,
|
||||
ctx.ComponentConfig.HPAController.HorizontalPodAutoscalerInitialReadinessDelay.Duration,
|
||||
).Run(ctx.Stop)
|
||||
return true, nil
|
||||
return nil, true, nil
|
||||
}
|
||||
|
Reference in New Issue
Block a user