Bumping k8s dependencies to 1.13
This commit is contained in:
48
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/BUILD
generated
vendored
48
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/BUILD
generated
vendored
@@ -59,6 +59,24 @@ go_library(
|
||||
"//pkg/version:go_default_library",
|
||||
"//pkg/volume:go_default_library",
|
||||
"//pkg/volume/util:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/fields:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/errors:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/watch:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/informers:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes/scheme:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/record:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/util/flowcontrol:go_default_library",
|
||||
"//vendor/cloud.google.com/go/compute/metadata:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/github.com/prometheus/client_golang/prometheus:go_default_library",
|
||||
@@ -71,24 +89,6 @@ go_library(
|
||||
"//vendor/google.golang.org/api/googleapi:go_default_library",
|
||||
"//vendor/google.golang.org/api/tpu/v1:go_default_library",
|
||||
"//vendor/gopkg.in/gcfg.v1:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/fields:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/record:go_default_library",
|
||||
"//vendor/k8s.io/client-go/util/flowcontrol:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -116,6 +116,12 @@ go_test(
|
||||
"//pkg/cloudprovider/providers/gce/cloud/mock:go_default_library",
|
||||
"//pkg/kubelet/apis:go_default_library",
|
||||
"//pkg/util/net/sets:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/record:go_default_library",
|
||||
"//vendor/github.com/stretchr/testify/assert:go_default_library",
|
||||
"//vendor/github.com/stretchr/testify/require:go_default_library",
|
||||
"//vendor/golang.org/x/oauth2/google:go_default_library",
|
||||
@@ -123,12 +129,6 @@ go_test(
|
||||
"//vendor/google.golang.org/api/compute/v0.beta:go_default_library",
|
||||
"//vendor/google.golang.org/api/compute/v1:go_default_library",
|
||||
"//vendor/google.golang.org/api/googleapi:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/record:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
1
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/BUILD
generated
vendored
1
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/BUILD
generated
vendored
@@ -6,7 +6,6 @@ go_library(
|
||||
"constants.go",
|
||||
"context.go",
|
||||
"doc.go",
|
||||
"errors.go",
|
||||
"gce_projects.go",
|
||||
"gen.go",
|
||||
"op.go",
|
||||
|
48
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/errors.go
generated
vendored
48
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/errors.go
generated
vendored
@@ -1,48 +0,0 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cloud
|
||||
|
||||
import "fmt"
|
||||
|
||||
// OperationPollingError occurs when the GCE Operation cannot be retrieved for a prolonged period.
|
||||
type OperationPollingError struct {
|
||||
LastPollError error
|
||||
}
|
||||
|
||||
// Error returns a string representation including the last poll error encountered.
|
||||
func (e *OperationPollingError) Error() string {
|
||||
return fmt.Sprintf("GCE operation polling error: %v", e.LastPollError)
|
||||
}
|
||||
|
||||
// GCEOperationError occurs when the GCE Operation finishes with an error.
|
||||
type GCEOperationError struct {
|
||||
// HTTPStatusCode is the HTTP status code of the final error.
|
||||
// For example, a failed operation may have 400 - BadRequest.
|
||||
HTTPStatusCode int
|
||||
// Code is GCE's code of what went wrong.
|
||||
// For example, RESOURCE_IN_USE_BY_ANOTHER_RESOURCE
|
||||
Code string
|
||||
// Message is a human readable message.
|
||||
// For example, "The network resource 'xxx' is already being used by 'xxx'"
|
||||
Message string
|
||||
}
|
||||
|
||||
// Error returns a string representation including the HTTP Status code, GCE's error code
|
||||
// and a human readable message.
|
||||
func (e *GCEOperationError) Error() string {
|
||||
return fmt.Sprintf("GCE %v - %v: %v", e.HTTPStatusCode, e.Code, e.Message)
|
||||
}
|
1034
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/gen.go
generated
vendored
1034
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/gen.go
generated
vendored
File diff suppressed because it is too large
Load Diff
4
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/gen/main.go
generated
vendored
4
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/gen/main.go
generated
vendored
@@ -1207,7 +1207,7 @@ func Test{{.Service}}Group(t *testing.T) {
|
||||
got[obj.Name] = true
|
||||
}
|
||||
if !reflect.DeepEqual(got, want) {
|
||||
t.Errorf("Alpha{{.Service}}().List(); got %+v, want %+v", got, want)
|
||||
t.Errorf("Beta{{.Service}}().List(); got %+v, want %+v", got, want)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1227,7 +1227,7 @@ func Test{{.Service}}Group(t *testing.T) {
|
||||
got[obj.Name] = true
|
||||
}
|
||||
if !reflect.DeepEqual(got, want) {
|
||||
t.Errorf("Alpha{{.Service}}().List(); got %+v, want %+v", got, want)
|
||||
t.Errorf("{{.Service}}().List(); got %+v, want %+v", got, want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
124
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/gen_test.go
generated
vendored
124
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/gen_test.go
generated
vendored
@@ -127,7 +127,7 @@ func TestAddressesGroup(t *testing.T) {
|
||||
got[obj.Name] = true
|
||||
}
|
||||
if !reflect.DeepEqual(got, want) {
|
||||
t.Errorf("AlphaAddresses().List(); got %+v, want %+v", got, want)
|
||||
t.Errorf("BetaAddresses().List(); got %+v, want %+v", got, want)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -141,7 +141,7 @@ func TestAddressesGroup(t *testing.T) {
|
||||
got[obj.Name] = true
|
||||
}
|
||||
if !reflect.DeepEqual(got, want) {
|
||||
t.Errorf("AlphaAddresses().List(); got %+v, want %+v", got, want)
|
||||
t.Errorf("Addresses().List(); got %+v, want %+v", got, want)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -262,7 +262,7 @@ func TestBackendServicesGroup(t *testing.T) {
|
||||
got[obj.Name] = true
|
||||
}
|
||||
if !reflect.DeepEqual(got, want) {
|
||||
t.Errorf("AlphaBackendServices().List(); got %+v, want %+v", got, want)
|
||||
t.Errorf("BetaBackendServices().List(); got %+v, want %+v", got, want)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -276,7 +276,7 @@ func TestBackendServicesGroup(t *testing.T) {
|
||||
got[obj.Name] = true
|
||||
}
|
||||
if !reflect.DeepEqual(got, want) {
|
||||
t.Errorf("AlphaBackendServices().List(); got %+v, want %+v", got, want)
|
||||
t.Errorf("BackendServices().List(); got %+v, want %+v", got, want)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -351,7 +351,7 @@ func TestDisksGroup(t *testing.T) {
|
||||
got[obj.Name] = true
|
||||
}
|
||||
if !reflect.DeepEqual(got, want) {
|
||||
t.Errorf("AlphaDisks().List(); got %+v, want %+v", got, want)
|
||||
t.Errorf("Disks().List(); got %+v, want %+v", got, want)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -414,7 +414,7 @@ func TestFirewallsGroup(t *testing.T) {
|
||||
got[obj.Name] = true
|
||||
}
|
||||
if !reflect.DeepEqual(got, want) {
|
||||
t.Errorf("AlphaFirewalls().List(); got %+v, want %+v", got, want)
|
||||
t.Errorf("Firewalls().List(); got %+v, want %+v", got, want)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -507,7 +507,7 @@ func TestForwardingRulesGroup(t *testing.T) {
|
||||
got[obj.Name] = true
|
||||
}
|
||||
if !reflect.DeepEqual(got, want) {
|
||||
t.Errorf("AlphaForwardingRules().List(); got %+v, want %+v", got, want)
|
||||
t.Errorf("ForwardingRules().List(); got %+v, want %+v", got, want)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -576,7 +576,7 @@ func TestGlobalAddressesGroup(t *testing.T) {
|
||||
got[obj.Name] = true
|
||||
}
|
||||
if !reflect.DeepEqual(got, want) {
|
||||
t.Errorf("AlphaGlobalAddresses().List(); got %+v, want %+v", got, want)
|
||||
t.Errorf("GlobalAddresses().List(); got %+v, want %+v", got, want)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -639,7 +639,7 @@ func TestGlobalForwardingRulesGroup(t *testing.T) {
|
||||
got[obj.Name] = true
|
||||
}
|
||||
if !reflect.DeepEqual(got, want) {
|
||||
t.Errorf("AlphaGlobalForwardingRules().List(); got %+v, want %+v", got, want)
|
||||
t.Errorf("GlobalForwardingRules().List(); got %+v, want %+v", got, want)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -665,6 +665,8 @@ func TestHealthChecksGroup(t *testing.T) {
|
||||
var key *meta.Key
|
||||
keyAlpha := meta.GlobalKey("key-alpha")
|
||||
key = keyAlpha
|
||||
keyBeta := meta.GlobalKey("key-beta")
|
||||
key = keyBeta
|
||||
keyGA := meta.GlobalKey("key-ga")
|
||||
key = keyGA
|
||||
// Ignore unused variables.
|
||||
@@ -674,6 +676,9 @@ func TestHealthChecksGroup(t *testing.T) {
|
||||
if _, err := mock.AlphaHealthChecks().Get(ctx, key); err == nil {
|
||||
t.Errorf("AlphaHealthChecks().Get(%v, %v) = _, nil; want error", ctx, key)
|
||||
}
|
||||
if _, err := mock.BetaHealthChecks().Get(ctx, key); err == nil {
|
||||
t.Errorf("BetaHealthChecks().Get(%v, %v) = _, nil; want error", ctx, key)
|
||||
}
|
||||
if _, err := mock.HealthChecks().Get(ctx, key); err == nil {
|
||||
t.Errorf("HealthChecks().Get(%v, %v) = _, nil; want error", ctx, key)
|
||||
}
|
||||
@@ -685,6 +690,12 @@ func TestHealthChecksGroup(t *testing.T) {
|
||||
t.Errorf("AlphaHealthChecks().Insert(%v, %v, %v) = %v; want nil", ctx, keyAlpha, obj, err)
|
||||
}
|
||||
}
|
||||
{
|
||||
obj := &beta.HealthCheck{}
|
||||
if err := mock.BetaHealthChecks().Insert(ctx, keyBeta, obj); err != nil {
|
||||
t.Errorf("BetaHealthChecks().Insert(%v, %v, %v) = %v; want nil", ctx, keyBeta, obj, err)
|
||||
}
|
||||
}
|
||||
{
|
||||
obj := &ga.HealthCheck{}
|
||||
if err := mock.HealthChecks().Insert(ctx, keyGA, obj); err != nil {
|
||||
@@ -696,15 +707,20 @@ func TestHealthChecksGroup(t *testing.T) {
|
||||
if obj, err := mock.AlphaHealthChecks().Get(ctx, key); err != nil {
|
||||
t.Errorf("AlphaHealthChecks().Get(%v, %v) = %v, %v; want nil", ctx, key, obj, err)
|
||||
}
|
||||
if obj, err := mock.BetaHealthChecks().Get(ctx, key); err != nil {
|
||||
t.Errorf("BetaHealthChecks().Get(%v, %v) = %v, %v; want nil", ctx, key, obj, err)
|
||||
}
|
||||
if obj, err := mock.HealthChecks().Get(ctx, key); err != nil {
|
||||
t.Errorf("HealthChecks().Get(%v, %v) = %v, %v; want nil", ctx, key, obj, err)
|
||||
}
|
||||
|
||||
// List.
|
||||
mock.MockAlphaHealthChecks.Objects[*keyAlpha] = mock.MockAlphaHealthChecks.Obj(&alpha.HealthCheck{Name: keyAlpha.Name})
|
||||
mock.MockBetaHealthChecks.Objects[*keyBeta] = mock.MockBetaHealthChecks.Obj(&beta.HealthCheck{Name: keyBeta.Name})
|
||||
mock.MockHealthChecks.Objects[*keyGA] = mock.MockHealthChecks.Obj(&ga.HealthCheck{Name: keyGA.Name})
|
||||
want := map[string]bool{
|
||||
"key-alpha": true,
|
||||
"key-beta": true,
|
||||
"key-ga": true,
|
||||
}
|
||||
_ = want // ignore unused variables.
|
||||
@@ -722,6 +738,20 @@ func TestHealthChecksGroup(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
{
|
||||
objs, err := mock.BetaHealthChecks().List(ctx, filter.None)
|
||||
if err != nil {
|
||||
t.Errorf("BetaHealthChecks().List(%v, %v, %v) = %v, %v; want _, nil", ctx, location, filter.None, objs, err)
|
||||
} else {
|
||||
got := map[string]bool{}
|
||||
for _, obj := range objs {
|
||||
got[obj.Name] = true
|
||||
}
|
||||
if !reflect.DeepEqual(got, want) {
|
||||
t.Errorf("BetaHealthChecks().List(); got %+v, want %+v", got, want)
|
||||
}
|
||||
}
|
||||
}
|
||||
{
|
||||
objs, err := mock.HealthChecks().List(ctx, filter.None)
|
||||
if err != nil {
|
||||
@@ -732,7 +762,7 @@ func TestHealthChecksGroup(t *testing.T) {
|
||||
got[obj.Name] = true
|
||||
}
|
||||
if !reflect.DeepEqual(got, want) {
|
||||
t.Errorf("AlphaHealthChecks().List(); got %+v, want %+v", got, want)
|
||||
t.Errorf("HealthChecks().List(); got %+v, want %+v", got, want)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -741,6 +771,9 @@ func TestHealthChecksGroup(t *testing.T) {
|
||||
if err := mock.AlphaHealthChecks().Delete(ctx, keyAlpha); err != nil {
|
||||
t.Errorf("AlphaHealthChecks().Delete(%v, %v) = %v; want nil", ctx, keyAlpha, err)
|
||||
}
|
||||
if err := mock.BetaHealthChecks().Delete(ctx, keyBeta); err != nil {
|
||||
t.Errorf("BetaHealthChecks().Delete(%v, %v) = %v; want nil", ctx, keyBeta, err)
|
||||
}
|
||||
if err := mock.HealthChecks().Delete(ctx, keyGA); err != nil {
|
||||
t.Errorf("HealthChecks().Delete(%v, %v) = %v; want nil", ctx, keyGA, err)
|
||||
}
|
||||
@@ -749,6 +782,9 @@ func TestHealthChecksGroup(t *testing.T) {
|
||||
if err := mock.AlphaHealthChecks().Delete(ctx, keyAlpha); err == nil {
|
||||
t.Errorf("AlphaHealthChecks().Delete(%v, %v) = nil; want error", ctx, keyAlpha)
|
||||
}
|
||||
if err := mock.BetaHealthChecks().Delete(ctx, keyBeta); err == nil {
|
||||
t.Errorf("BetaHealthChecks().Delete(%v, %v) = nil; want error", ctx, keyBeta)
|
||||
}
|
||||
if err := mock.HealthChecks().Delete(ctx, keyGA); err == nil {
|
||||
t.Errorf("HealthChecks().Delete(%v, %v) = nil; want error", ctx, keyGA)
|
||||
}
|
||||
@@ -801,7 +837,7 @@ func TestHttpHealthChecksGroup(t *testing.T) {
|
||||
got[obj.Name] = true
|
||||
}
|
||||
if !reflect.DeepEqual(got, want) {
|
||||
t.Errorf("AlphaHttpHealthChecks().List(); got %+v, want %+v", got, want)
|
||||
t.Errorf("HttpHealthChecks().List(); got %+v, want %+v", got, want)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -864,7 +900,7 @@ func TestHttpsHealthChecksGroup(t *testing.T) {
|
||||
got[obj.Name] = true
|
||||
}
|
||||
if !reflect.DeepEqual(got, want) {
|
||||
t.Errorf("AlphaHttpsHealthChecks().List(); got %+v, want %+v", got, want)
|
||||
t.Errorf("HttpsHealthChecks().List(); got %+v, want %+v", got, want)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -927,7 +963,7 @@ func TestInstanceGroupsGroup(t *testing.T) {
|
||||
got[obj.Name] = true
|
||||
}
|
||||
if !reflect.DeepEqual(got, want) {
|
||||
t.Errorf("AlphaInstanceGroups().List(); got %+v, want %+v", got, want)
|
||||
t.Errorf("InstanceGroups().List(); got %+v, want %+v", got, want)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1036,7 +1072,7 @@ func TestInstancesGroup(t *testing.T) {
|
||||
got[obj.Name] = true
|
||||
}
|
||||
if !reflect.DeepEqual(got, want) {
|
||||
t.Errorf("AlphaInstances().List(); got %+v, want %+v", got, want)
|
||||
t.Errorf("BetaInstances().List(); got %+v, want %+v", got, want)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1050,7 +1086,7 @@ func TestInstancesGroup(t *testing.T) {
|
||||
got[obj.Name] = true
|
||||
}
|
||||
if !reflect.DeepEqual(got, want) {
|
||||
t.Errorf("AlphaInstances().List(); got %+v, want %+v", got, want)
|
||||
t.Errorf("Instances().List(); got %+v, want %+v", got, want)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1088,6 +1124,8 @@ func TestNetworkEndpointGroupsGroup(t *testing.T) {
|
||||
var key *meta.Key
|
||||
keyAlpha := meta.ZonalKey("key-alpha", "location")
|
||||
key = keyAlpha
|
||||
keyBeta := meta.ZonalKey("key-beta", "location")
|
||||
key = keyBeta
|
||||
// Ignore unused variables.
|
||||
_, _, _ = ctx, mock, key
|
||||
|
||||
@@ -1095,6 +1133,9 @@ func TestNetworkEndpointGroupsGroup(t *testing.T) {
|
||||
if _, err := mock.AlphaNetworkEndpointGroups().Get(ctx, key); err == nil {
|
||||
t.Errorf("AlphaNetworkEndpointGroups().Get(%v, %v) = _, nil; want error", ctx, key)
|
||||
}
|
||||
if _, err := mock.BetaNetworkEndpointGroups().Get(ctx, key); err == nil {
|
||||
t.Errorf("BetaNetworkEndpointGroups().Get(%v, %v) = _, nil; want error", ctx, key)
|
||||
}
|
||||
|
||||
// Insert.
|
||||
{
|
||||
@@ -1103,16 +1144,27 @@ func TestNetworkEndpointGroupsGroup(t *testing.T) {
|
||||
t.Errorf("AlphaNetworkEndpointGroups().Insert(%v, %v, %v) = %v; want nil", ctx, keyAlpha, obj, err)
|
||||
}
|
||||
}
|
||||
{
|
||||
obj := &beta.NetworkEndpointGroup{}
|
||||
if err := mock.BetaNetworkEndpointGroups().Insert(ctx, keyBeta, obj); err != nil {
|
||||
t.Errorf("BetaNetworkEndpointGroups().Insert(%v, %v, %v) = %v; want nil", ctx, keyBeta, obj, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Get across versions.
|
||||
if obj, err := mock.AlphaNetworkEndpointGroups().Get(ctx, key); err != nil {
|
||||
t.Errorf("AlphaNetworkEndpointGroups().Get(%v, %v) = %v, %v; want nil", ctx, key, obj, err)
|
||||
}
|
||||
if obj, err := mock.BetaNetworkEndpointGroups().Get(ctx, key); err != nil {
|
||||
t.Errorf("BetaNetworkEndpointGroups().Get(%v, %v) = %v, %v; want nil", ctx, key, obj, err)
|
||||
}
|
||||
|
||||
// List.
|
||||
mock.MockAlphaNetworkEndpointGroups.Objects[*keyAlpha] = mock.MockAlphaNetworkEndpointGroups.Obj(&alpha.NetworkEndpointGroup{Name: keyAlpha.Name})
|
||||
mock.MockBetaNetworkEndpointGroups.Objects[*keyBeta] = mock.MockBetaNetworkEndpointGroups.Obj(&beta.NetworkEndpointGroup{Name: keyBeta.Name})
|
||||
want := map[string]bool{
|
||||
"key-alpha": true,
|
||||
"key-beta": true,
|
||||
}
|
||||
_ = want // ignore unused variables.
|
||||
{
|
||||
@@ -1129,16 +1181,36 @@ func TestNetworkEndpointGroupsGroup(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
{
|
||||
objs, err := mock.BetaNetworkEndpointGroups().List(ctx, location, filter.None)
|
||||
if err != nil {
|
||||
t.Errorf("BetaNetworkEndpointGroups().List(%v, %v, %v) = %v, %v; want _, nil", ctx, location, filter.None, objs, err)
|
||||
} else {
|
||||
got := map[string]bool{}
|
||||
for _, obj := range objs {
|
||||
got[obj.Name] = true
|
||||
}
|
||||
if !reflect.DeepEqual(got, want) {
|
||||
t.Errorf("BetaNetworkEndpointGroups().List(); got %+v, want %+v", got, want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Delete across versions.
|
||||
if err := mock.AlphaNetworkEndpointGroups().Delete(ctx, keyAlpha); err != nil {
|
||||
t.Errorf("AlphaNetworkEndpointGroups().Delete(%v, %v) = %v; want nil", ctx, keyAlpha, err)
|
||||
}
|
||||
if err := mock.BetaNetworkEndpointGroups().Delete(ctx, keyBeta); err != nil {
|
||||
t.Errorf("BetaNetworkEndpointGroups().Delete(%v, %v) = %v; want nil", ctx, keyBeta, err)
|
||||
}
|
||||
|
||||
// Delete not found.
|
||||
if err := mock.AlphaNetworkEndpointGroups().Delete(ctx, keyAlpha); err == nil {
|
||||
t.Errorf("AlphaNetworkEndpointGroups().Delete(%v, %v) = nil; want error", ctx, keyAlpha)
|
||||
}
|
||||
if err := mock.BetaNetworkEndpointGroups().Delete(ctx, keyBeta); err == nil {
|
||||
t.Errorf("BetaNetworkEndpointGroups().Delete(%v, %v) = nil; want error", ctx, keyBeta)
|
||||
}
|
||||
}
|
||||
|
||||
func TestProjectsGroup(t *testing.T) {
|
||||
@@ -1249,7 +1321,7 @@ func TestRegionBackendServicesGroup(t *testing.T) {
|
||||
got[obj.Name] = true
|
||||
}
|
||||
if !reflect.DeepEqual(got, want) {
|
||||
t.Errorf("AlphaRegionBackendServices().List(); got %+v, want %+v", got, want)
|
||||
t.Errorf("RegionBackendServices().List(); got %+v, want %+v", got, want)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1318,7 +1390,7 @@ func TestRegionDisksGroup(t *testing.T) {
|
||||
got[obj.Name] = true
|
||||
}
|
||||
if !reflect.DeepEqual(got, want) {
|
||||
t.Errorf("AlphaRegionDisks().List(); got %+v, want %+v", got, want)
|
||||
t.Errorf("BetaRegionDisks().List(); got %+v, want %+v", got, want)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1372,7 +1444,7 @@ func TestRegionsGroup(t *testing.T) {
|
||||
got[obj.Name] = true
|
||||
}
|
||||
if !reflect.DeepEqual(got, want) {
|
||||
t.Errorf("AlphaRegions().List(); got %+v, want %+v", got, want)
|
||||
t.Errorf("Regions().List(); got %+v, want %+v", got, want)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1429,7 +1501,7 @@ func TestRoutesGroup(t *testing.T) {
|
||||
got[obj.Name] = true
|
||||
}
|
||||
if !reflect.DeepEqual(got, want) {
|
||||
t.Errorf("AlphaRoutes().List(); got %+v, want %+v", got, want)
|
||||
t.Errorf("Routes().List(); got %+v, want %+v", got, want)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1492,7 +1564,7 @@ func TestSecurityPoliciesGroup(t *testing.T) {
|
||||
got[obj.Name] = true
|
||||
}
|
||||
if !reflect.DeepEqual(got, want) {
|
||||
t.Errorf("AlphaSecurityPolicies().List(); got %+v, want %+v", got, want)
|
||||
t.Errorf("BetaSecurityPolicies().List(); got %+v, want %+v", got, want)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1555,7 +1627,7 @@ func TestSslCertificatesGroup(t *testing.T) {
|
||||
got[obj.Name] = true
|
||||
}
|
||||
if !reflect.DeepEqual(got, want) {
|
||||
t.Errorf("AlphaSslCertificates().List(); got %+v, want %+v", got, want)
|
||||
t.Errorf("SslCertificates().List(); got %+v, want %+v", got, want)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1618,7 +1690,7 @@ func TestTargetHttpProxiesGroup(t *testing.T) {
|
||||
got[obj.Name] = true
|
||||
}
|
||||
if !reflect.DeepEqual(got, want) {
|
||||
t.Errorf("AlphaTargetHttpProxies().List(); got %+v, want %+v", got, want)
|
||||
t.Errorf("TargetHttpProxies().List(); got %+v, want %+v", got, want)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1681,7 +1753,7 @@ func TestTargetHttpsProxiesGroup(t *testing.T) {
|
||||
got[obj.Name] = true
|
||||
}
|
||||
if !reflect.DeepEqual(got, want) {
|
||||
t.Errorf("AlphaTargetHttpsProxies().List(); got %+v, want %+v", got, want)
|
||||
t.Errorf("TargetHttpsProxies().List(); got %+v, want %+v", got, want)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1744,7 +1816,7 @@ func TestTargetPoolsGroup(t *testing.T) {
|
||||
got[obj.Name] = true
|
||||
}
|
||||
if !reflect.DeepEqual(got, want) {
|
||||
t.Errorf("AlphaTargetPools().List(); got %+v, want %+v", got, want)
|
||||
t.Errorf("TargetPools().List(); got %+v, want %+v", got, want)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1807,7 +1879,7 @@ func TestUrlMapsGroup(t *testing.T) {
|
||||
got[obj.Name] = true
|
||||
}
|
||||
if !reflect.DeepEqual(got, want) {
|
||||
t.Errorf("AlphaUrlMaps().List(); got %+v, want %+v", got, want)
|
||||
t.Errorf("UrlMaps().List(); got %+v, want %+v", got, want)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1861,7 +1933,7 @@ func TestZonesGroup(t *testing.T) {
|
||||
got[obj.Name] = true
|
||||
}
|
||||
if !reflect.DeepEqual(got, want) {
|
||||
t.Errorf("AlphaZones().List(); got %+v, want %+v", got, want)
|
||||
t.Errorf("Zones().List(); got %+v, want %+v", got, want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
26
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/meta/meta.go
generated
vendored
26
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/meta/meta.go
generated
vendored
@@ -115,6 +115,7 @@ var AllServices = []*ServiceInfo{
|
||||
keyType: Global,
|
||||
serviceType: reflect.TypeOf(&beta.BackendServicesService{}),
|
||||
additionalMethods: []string{
|
||||
"Update",
|
||||
"SetSecurityPolicy",
|
||||
},
|
||||
},
|
||||
@@ -220,6 +221,17 @@ var AllServices = []*ServiceInfo{
|
||||
"Update",
|
||||
},
|
||||
},
|
||||
{
|
||||
Object: "HealthCheck",
|
||||
Service: "HealthChecks",
|
||||
Resource: "healthChecks",
|
||||
version: VersionBeta,
|
||||
keyType: Global,
|
||||
serviceType: reflect.TypeOf(&beta.HealthChecksService{}),
|
||||
additionalMethods: []string{
|
||||
"Update",
|
||||
},
|
||||
},
|
||||
{
|
||||
Object: "HealthCheck",
|
||||
Service: "HealthChecks",
|
||||
@@ -315,6 +327,20 @@ var AllServices = []*ServiceInfo{
|
||||
},
|
||||
options: AggregatedList,
|
||||
},
|
||||
{
|
||||
Object: "NetworkEndpointGroup",
|
||||
Service: "NetworkEndpointGroups",
|
||||
Resource: "networkEndpointGroups",
|
||||
version: VersionBeta,
|
||||
keyType: Zonal,
|
||||
serviceType: reflect.TypeOf(&beta.NetworkEndpointGroupsService{}),
|
||||
additionalMethods: []string{
|
||||
"AttachNetworkEndpoints",
|
||||
"DetachNetworkEndpoints",
|
||||
"ListNetworkEndpoints",
|
||||
},
|
||||
options: AggregatedList,
|
||||
},
|
||||
{
|
||||
Object: "Project",
|
||||
Service: "Projects",
|
||||
|
7
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/op.go
generated
vendored
7
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/op.go
generated
vendored
@@ -25,6 +25,7 @@ import (
|
||||
alpha "google.golang.org/api/compute/v0.alpha"
|
||||
beta "google.golang.org/api/compute/v0.beta"
|
||||
ga "google.golang.org/api/compute/v1"
|
||||
"google.golang.org/api/googleapi"
|
||||
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/meta"
|
||||
)
|
||||
@@ -85,7 +86,7 @@ func (o *gaOperation) isDone(ctx context.Context) (bool, error) {
|
||||
|
||||
if op.Error != nil && len(op.Error.Errors) > 0 && op.Error.Errors[0] != nil {
|
||||
e := op.Error.Errors[0]
|
||||
o.err = &GCEOperationError{HTTPStatusCode: op.HTTPStatusCode, Code: e.Code, Message: e.Message}
|
||||
o.err = &googleapi.Error{Code: int(op.HttpErrorStatusCode), Message: fmt.Sprintf("%v - %v", e.Code, e.Message)}
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
@@ -142,7 +143,7 @@ func (o *alphaOperation) isDone(ctx context.Context) (bool, error) {
|
||||
|
||||
if op.Error != nil && len(op.Error.Errors) > 0 && op.Error.Errors[0] != nil {
|
||||
e := op.Error.Errors[0]
|
||||
o.err = &GCEOperationError{HTTPStatusCode: op.HTTPStatusCode, Code: e.Code, Message: e.Message}
|
||||
o.err = &googleapi.Error{Code: int(op.HttpErrorStatusCode), Message: fmt.Sprintf("%v - %v", e.Code, e.Message)}
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
@@ -199,7 +200,7 @@ func (o *betaOperation) isDone(ctx context.Context) (bool, error) {
|
||||
|
||||
if op.Error != nil && len(op.Error.Errors) > 0 && op.Error.Errors[0] != nil {
|
||||
e := op.Error.Errors[0]
|
||||
o.err = &GCEOperationError{HTTPStatusCode: op.HTTPStatusCode, Code: e.Code, Message: e.Message}
|
||||
o.err = &googleapi.Error{Code: int(op.HttpErrorStatusCode), Message: fmt.Sprintf("%v - %v", e.Code, e.Message)}
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
59
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce.go
generated
vendored
59
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce.go
generated
vendored
@@ -114,6 +114,7 @@ type GCECloud struct {
|
||||
eventRecorder record.EventRecorder
|
||||
projectID string
|
||||
region string
|
||||
regional bool
|
||||
localZone string // The zone in which we are running
|
||||
// managedZones will be set to the 1 zone if running a single zone cluster
|
||||
// it will be set to ALL zones in region for any multi-zone cluster
|
||||
@@ -174,10 +175,14 @@ type ConfigGlobal struct {
|
||||
SecondaryRangeName string `gcfg:"secondary-range-name"`
|
||||
NodeTags []string `gcfg:"node-tags"`
|
||||
NodeInstancePrefix string `gcfg:"node-instance-prefix"`
|
||||
Regional bool `gcfg:"regional"`
|
||||
Multizone bool `gcfg:"multizone"`
|
||||
// ApiEndpoint is the GCE compute API endpoint to use. If this is blank,
|
||||
// then the default endpoint is used.
|
||||
ApiEndpoint string `gcfg:"api-endpoint"`
|
||||
// ContainerApiEndpoint is the GCE container API endpoint to use. If this is blank,
|
||||
// then the default endpoint is used.
|
||||
ContainerApiEndpoint string `gcfg:"container-api-endpoint"`
|
||||
// LocalZone specifies the GCE zone that gce cloud client instance is
|
||||
// located in (i.e. where the controller will be running). If this is
|
||||
// blank, then the local zone will be discovered via the metadata server.
|
||||
@@ -194,22 +199,24 @@ type ConfigFile struct {
|
||||
|
||||
// CloudConfig includes all the necessary configuration for creating GCECloud
|
||||
type CloudConfig struct {
|
||||
ApiEndpoint string
|
||||
ProjectID string
|
||||
NetworkProjectID string
|
||||
Region string
|
||||
Zone string
|
||||
ManagedZones []string
|
||||
NetworkName string
|
||||
NetworkURL string
|
||||
SubnetworkName string
|
||||
SubnetworkURL string
|
||||
SecondaryRangeName string
|
||||
NodeTags []string
|
||||
NodeInstancePrefix string
|
||||
TokenSource oauth2.TokenSource
|
||||
UseMetadataServer bool
|
||||
AlphaFeatureGate *AlphaFeatureGate
|
||||
ApiEndpoint string
|
||||
ContainerApiEndpoint string
|
||||
ProjectID string
|
||||
NetworkProjectID string
|
||||
Region string
|
||||
Regional bool
|
||||
Zone string
|
||||
ManagedZones []string
|
||||
NetworkName string
|
||||
NetworkURL string
|
||||
SubnetworkName string
|
||||
SubnetworkURL string
|
||||
SecondaryRangeName string
|
||||
NodeTags []string
|
||||
NodeInstancePrefix string
|
||||
TokenSource oauth2.TokenSource
|
||||
UseMetadataServer bool
|
||||
AlphaFeatureGate *AlphaFeatureGate
|
||||
}
|
||||
|
||||
func init() {
|
||||
@@ -238,6 +245,11 @@ func (g *GCECloud) Compute() cloud.Cloud {
|
||||
return g.c
|
||||
}
|
||||
|
||||
// ContainerService returns the container service.
|
||||
func (g *GCECloud) ContainerService() *container.Service {
|
||||
return g.containerService
|
||||
}
|
||||
|
||||
// newGCECloud creates a new instance of GCECloud.
|
||||
func newGCECloud(config io.Reader) (gceCloud *GCECloud, err error) {
|
||||
var cloudConfig *CloudConfig
|
||||
@@ -278,6 +290,10 @@ func generateCloudConfig(configFile *ConfigFile) (cloudConfig *CloudConfig, err
|
||||
cloudConfig.ApiEndpoint = configFile.Global.ApiEndpoint
|
||||
}
|
||||
|
||||
if configFile.Global.ContainerApiEndpoint != "" {
|
||||
cloudConfig.ContainerApiEndpoint = configFile.Global.ContainerApiEndpoint
|
||||
}
|
||||
|
||||
if configFile.Global.TokenURL != "" {
|
||||
// if tokenURL is nil, set tokenSource to nil. This will force the OAuth client to fall
|
||||
// back to use DefaultTokenSource. This allows running gceCloud remotely.
|
||||
@@ -319,9 +335,14 @@ func generateCloudConfig(configFile *ConfigFile) (cloudConfig *CloudConfig, err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Determine if its a regional cluster
|
||||
if configFile != nil && configFile.Global.Regional {
|
||||
cloudConfig.Regional = true
|
||||
}
|
||||
|
||||
// generate managedZones
|
||||
cloudConfig.ManagedZones = []string{cloudConfig.Zone}
|
||||
if configFile != nil && configFile.Global.Multizone {
|
||||
if configFile != nil && (configFile.Global.Multizone || configFile.Global.Regional) {
|
||||
cloudConfig.ManagedZones = nil // Use all zones in region
|
||||
}
|
||||
|
||||
@@ -419,6 +440,9 @@ func CreateGCECloud(config *CloudConfig) (*GCECloud, error) {
|
||||
return nil, err
|
||||
}
|
||||
containerService.UserAgent = userAgent
|
||||
if config.ContainerApiEndpoint != "" {
|
||||
containerService.BasePath = config.ContainerApiEndpoint
|
||||
}
|
||||
|
||||
tpuService, err := newTPUService(client)
|
||||
if err != nil {
|
||||
@@ -496,6 +520,7 @@ func CreateGCECloud(config *CloudConfig) (*GCECloud, error) {
|
||||
networkProjectID: netProjID,
|
||||
onXPN: onXPN,
|
||||
region: config.Region,
|
||||
regional: config.Regional,
|
||||
localZone: config.Zone,
|
||||
managedZones: config.ManagedZones,
|
||||
networkURL: networkURL,
|
||||
|
2
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_address_manager.go
generated
vendored
2
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_address_manager.go
generated
vendored
@@ -169,7 +169,7 @@ func (am *addressManager) ensureAddressReservation() (string, error) {
|
||||
if am.isManagedAddress(addr) {
|
||||
// The address with this name is checked at the beginning of 'HoldAddress()', but for some reason
|
||||
// it was re-created by this point. May be possible that two controllers are running.
|
||||
glog.Warning("%v: address %q unexpectedly existed with IP %q.", am.logPrefix, addr.Name, am.targetIP)
|
||||
glog.Warningf("%v: address %q unexpectedly existed with IP %q.", am.logPrefix, addr.Name, am.targetIP)
|
||||
} else {
|
||||
// If the retrieved address is not named with the loadbalancer name, then the controller does not own it, but will allow use of it.
|
||||
glog.V(4).Infof("%v: address %q was already reserved with name: %q, description: %q", am.logPrefix, am.targetIP, addr.Name, addr.Description)
|
||||
|
2
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_alpha.go
generated
vendored
2
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_alpha.go
generated
vendored
@@ -26,8 +26,6 @@ const (
|
||||
// Allows Services backed by a GCP load balancer to choose what network
|
||||
// tier to use. Currently supports "Standard" and "Premium" (default).
|
||||
AlphaFeatureNetworkTiers = "NetworkTiers"
|
||||
|
||||
AlphaFeatureNetworkEndpointGroup = "NetworkEndpointGroup"
|
||||
)
|
||||
|
||||
type AlphaFeatureGate struct {
|
||||
|
23
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_backendservice.go
generated
vendored
23
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_backendservice.go
generated
vendored
@@ -74,13 +74,23 @@ func (gce *GCECloud) UpdateGlobalBackendService(bg *compute.BackendService) erro
|
||||
return mc.Observe(gce.c.BackendServices().Update(ctx, meta.GlobalKey(bg.Name), bg))
|
||||
}
|
||||
|
||||
// UpdateBetaGlobalBackendService applies the given beta BackendService as an
|
||||
// update to an existing service.
|
||||
func (gce *GCECloud) UpdateBetaGlobalBackendService(bg *computebeta.BackendService) error {
|
||||
ctx, cancel := cloud.ContextWithCallTimeout()
|
||||
defer cancel()
|
||||
|
||||
mc := newBackendServiceMetricContextWithVersion("update", "", computeBetaVersion)
|
||||
return mc.Observe(gce.c.BetaBackendServices().Update(ctx, meta.GlobalKey(bg.Name), bg))
|
||||
}
|
||||
|
||||
// UpdateAlphaGlobalBackendService applies the given alpha BackendService as an
|
||||
// update to an existing service.
|
||||
func (gce *GCECloud) UpdateAlphaGlobalBackendService(bg *computealpha.BackendService) error {
|
||||
ctx, cancel := cloud.ContextWithCallTimeout()
|
||||
defer cancel()
|
||||
|
||||
mc := newBackendServiceMetricContext("update", "")
|
||||
mc := newBackendServiceMetricContextWithVersion("update", "", computeAlphaVersion)
|
||||
return mc.Observe(gce.c.AlphaBackendServices().Update(ctx, meta.GlobalKey(bg.Name), bg))
|
||||
}
|
||||
|
||||
@@ -102,12 +112,21 @@ func (gce *GCECloud) CreateGlobalBackendService(bg *compute.BackendService) erro
|
||||
return mc.Observe(gce.c.BackendServices().Insert(ctx, meta.GlobalKey(bg.Name), bg))
|
||||
}
|
||||
|
||||
// CreateBetaGlobalBackendService creates the given beta BackendService.
|
||||
func (gce *GCECloud) CreateBetaGlobalBackendService(bg *computebeta.BackendService) error {
|
||||
ctx, cancel := cloud.ContextWithCallTimeout()
|
||||
defer cancel()
|
||||
|
||||
mc := newBackendServiceMetricContextWithVersion("create", "", computeBetaVersion)
|
||||
return mc.Observe(gce.c.BetaBackendServices().Insert(ctx, meta.GlobalKey(bg.Name), bg))
|
||||
}
|
||||
|
||||
// CreateAlphaGlobalBackendService creates the given alpha BackendService.
|
||||
func (gce *GCECloud) CreateAlphaGlobalBackendService(bg *computealpha.BackendService) error {
|
||||
ctx, cancel := cloud.ContextWithCallTimeout()
|
||||
defer cancel()
|
||||
|
||||
mc := newBackendServiceMetricContext("create", "")
|
||||
mc := newBackendServiceMetricContextWithVersion("create", "", computeAlphaVersion)
|
||||
return mc.Observe(gce.c.AlphaBackendServices().Insert(ctx, meta.GlobalKey(bg.Name), bg))
|
||||
}
|
||||
|
||||
|
59
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_clusters.go
generated
vendored
59
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_clusters.go
generated
vendored
@@ -16,7 +16,14 @@ limitations under the License.
|
||||
|
||||
package gce
|
||||
|
||||
import "context"
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/golang/glog"
|
||||
container "google.golang.org/api/container/v1"
|
||||
)
|
||||
|
||||
func newClustersMetricContext(request, zone string) *metricContext {
|
||||
return newGenericMetricContext("clusters", request, unusedMetricLabel, zone, computeV1Version)
|
||||
@@ -37,21 +44,59 @@ func (gce *GCECloud) ListClusters(ctx context.Context) ([]string, error) {
|
||||
return allClusters, nil
|
||||
}
|
||||
|
||||
func (gce *GCECloud) GetManagedClusters(ctx context.Context) ([]*container.Cluster, error) {
|
||||
managedClusters := []*container.Cluster{}
|
||||
|
||||
if gce.regional {
|
||||
var err error
|
||||
managedClusters, err = gce.getClustersInLocation(gce.region)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else if len(gce.managedZones) >= 1 {
|
||||
for _, zone := range gce.managedZones {
|
||||
clusters, err := gce.getClustersInLocation(zone)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
managedClusters = append(managedClusters, clusters...)
|
||||
}
|
||||
} else {
|
||||
return nil, errors.New(fmt.Sprintf("no zones associated with this cluster(%s)", gce.ProjectID()))
|
||||
}
|
||||
|
||||
return managedClusters, nil
|
||||
}
|
||||
|
||||
func (gce *GCECloud) Master(ctx context.Context, clusterName string) (string, error) {
|
||||
return "k8s-" + clusterName + "-master.internal", nil
|
||||
}
|
||||
|
||||
func (gce *GCECloud) listClustersInZone(zone string) ([]string, error) {
|
||||
mc := newClustersMetricContext("list_zone", zone)
|
||||
// TODO: use PageToken to list all not just the first 500
|
||||
list, err := gce.containerService.Projects.Zones.Clusters.List(gce.projectID, zone).Do()
|
||||
clusters, err := gce.getClustersInLocation(zone)
|
||||
if err != nil {
|
||||
return nil, mc.Observe(err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
result := []string{}
|
||||
for _, cluster := range list.Clusters {
|
||||
for _, cluster := range clusters {
|
||||
result = append(result, cluster.Name)
|
||||
}
|
||||
return result, mc.Observe(nil)
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (gce *GCECloud) getClustersInLocation(zoneOrRegion string) ([]*container.Cluster, error) {
|
||||
// TODO: Issue/68913 migrate metric to list_location instead of list_zone.
|
||||
mc := newClustersMetricContext("list_zone", zoneOrRegion)
|
||||
// TODO: use PageToken to list all not just the first 500
|
||||
location := getLocationName(gce.projectID, zoneOrRegion)
|
||||
list, err := gce.containerService.Projects.Locations.Clusters.List(location).Do()
|
||||
if err != nil {
|
||||
return nil, mc.Observe(err)
|
||||
}
|
||||
if list.Header.Get("nextPageToken") != "" {
|
||||
glog.Errorf("Failed to get all clusters for request, received next page token %s", list.Header.Get("nextPageToken"))
|
||||
}
|
||||
|
||||
return list.Clusters, mc.Observe(nil)
|
||||
}
|
||||
|
13
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_disks.go
generated
vendored
13
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_disks.go
generated
vendored
@@ -746,13 +746,12 @@ func (gce *GCECloud) ResizeDisk(diskToResize string, oldSize resource.Quantity,
|
||||
return oldSize, err
|
||||
}
|
||||
|
||||
requestBytes := newSize.Value()
|
||||
// GCE resizes in chunks of GBs (not GiB)
|
||||
requestGB := volumeutil.RoundUpSize(requestBytes, 1000*1000*1000)
|
||||
newSizeQuant := resource.MustParse(fmt.Sprintf("%dG", requestGB))
|
||||
// GCE resizes in chunks of GiBs
|
||||
requestGIB := volumeutil.RoundUpToGiB(newSize)
|
||||
newSizeQuant := resource.MustParse(fmt.Sprintf("%dGi", requestGIB))
|
||||
|
||||
// If disk is already of size equal or greater than requested size, we simply return
|
||||
if disk.SizeGb >= requestGB {
|
||||
if disk.SizeGb >= requestGIB {
|
||||
return newSizeQuant, nil
|
||||
}
|
||||
|
||||
@@ -761,7 +760,7 @@ func (gce *GCECloud) ResizeDisk(diskToResize string, oldSize resource.Quantity,
|
||||
switch zoneInfo := disk.ZoneInfo.(type) {
|
||||
case singleZone:
|
||||
mc = newDiskMetricContextZonal("resize", disk.Region, zoneInfo.zone)
|
||||
err := gce.manager.ResizeDiskOnCloudProvider(disk, requestGB, zoneInfo.zone)
|
||||
err := gce.manager.ResizeDiskOnCloudProvider(disk, requestGIB, zoneInfo.zone)
|
||||
|
||||
if err != nil {
|
||||
return oldSize, mc.Observe(err)
|
||||
@@ -774,7 +773,7 @@ func (gce *GCECloud) ResizeDisk(diskToResize string, oldSize resource.Quantity,
|
||||
}
|
||||
|
||||
mc = newDiskMetricContextRegional("resize", disk.Region)
|
||||
err := gce.manager.RegionalResizeDiskOnCloudProvider(disk, requestGB)
|
||||
err := gce.manager.RegionalResizeDiskOnCloudProvider(disk, requestGIB)
|
||||
|
||||
if err != nil {
|
||||
return oldSize, mc.Observe(err)
|
||||
|
29
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_healthchecks.go
generated
vendored
29
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_healthchecks.go
generated
vendored
@@ -20,6 +20,7 @@ import (
|
||||
"github.com/golang/glog"
|
||||
|
||||
computealpha "google.golang.org/api/compute/v0.alpha"
|
||||
computebeta "google.golang.org/api/compute/v0.beta"
|
||||
compute "google.golang.org/api/compute/v1"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
@@ -173,6 +174,16 @@ func (gce *GCECloud) GetAlphaHealthCheck(name string) (*computealpha.HealthCheck
|
||||
return v, mc.Observe(err)
|
||||
}
|
||||
|
||||
// GetBetaHealthCheck returns the given beta HealthCheck by name.
|
||||
func (gce *GCECloud) GetBetaHealthCheck(name string) (*computebeta.HealthCheck, error) {
|
||||
ctx, cancel := cloud.ContextWithCallTimeout()
|
||||
defer cancel()
|
||||
|
||||
mc := newHealthcheckMetricContextWithVersion("get", computeBetaVersion)
|
||||
v, err := gce.c.BetaHealthChecks().Get(ctx, meta.GlobalKey(name))
|
||||
return v, mc.Observe(err)
|
||||
}
|
||||
|
||||
// UpdateHealthCheck applies the given HealthCheck as an update.
|
||||
func (gce *GCECloud) UpdateHealthCheck(hc *compute.HealthCheck) error {
|
||||
ctx, cancel := cloud.ContextWithCallTimeout()
|
||||
@@ -191,6 +202,15 @@ func (gce *GCECloud) UpdateAlphaHealthCheck(hc *computealpha.HealthCheck) error
|
||||
return mc.Observe(gce.c.AlphaHealthChecks().Update(ctx, meta.GlobalKey(hc.Name), hc))
|
||||
}
|
||||
|
||||
// UpdateBetaHealthCheck applies the given beta HealthCheck as an update.
|
||||
func (gce *GCECloud) UpdateBetaHealthCheck(hc *computebeta.HealthCheck) error {
|
||||
ctx, cancel := cloud.ContextWithCallTimeout()
|
||||
defer cancel()
|
||||
|
||||
mc := newHealthcheckMetricContextWithVersion("update", computeBetaVersion)
|
||||
return mc.Observe(gce.c.BetaHealthChecks().Update(ctx, meta.GlobalKey(hc.Name), hc))
|
||||
}
|
||||
|
||||
// DeleteHealthCheck deletes the given HealthCheck by name.
|
||||
func (gce *GCECloud) DeleteHealthCheck(name string) error {
|
||||
ctx, cancel := cloud.ContextWithCallTimeout()
|
||||
@@ -218,6 +238,15 @@ func (gce *GCECloud) CreateAlphaHealthCheck(hc *computealpha.HealthCheck) error
|
||||
return mc.Observe(gce.c.AlphaHealthChecks().Insert(ctx, meta.GlobalKey(hc.Name), hc))
|
||||
}
|
||||
|
||||
// CreateBetaHealthCheck creates the given beta HealthCheck.
|
||||
func (gce *GCECloud) CreateBetaHealthCheck(hc *computebeta.HealthCheck) error {
|
||||
ctx, cancel := cloud.ContextWithCallTimeout()
|
||||
defer cancel()
|
||||
|
||||
mc := newHealthcheckMetricContextWithVersion("create", computeBetaVersion)
|
||||
return mc.Observe(gce.c.BetaHealthChecks().Insert(ctx, meta.GlobalKey(hc.Name), hc))
|
||||
}
|
||||
|
||||
// ListHealthChecks lists all HealthCheck in the project.
|
||||
func (gce *GCECloud) ListHealthChecks() ([]*compute.HealthCheck, error) {
|
||||
ctx, cancel := cloud.ContextWithCallTimeout()
|
||||
|
14
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_instances.go
generated
vendored
14
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_instances.go
generated
vendored
@@ -91,10 +91,20 @@ func (gce *GCECloud) NodeAddresses(_ context.Context, _ types.NodeName) ([]v1.No
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't get external IP: %v", err)
|
||||
}
|
||||
return []v1.NodeAddress{
|
||||
addresses := []v1.NodeAddress{
|
||||
{Type: v1.NodeInternalIP, Address: internalIP},
|
||||
{Type: v1.NodeExternalIP, Address: externalIP},
|
||||
}, nil
|
||||
}
|
||||
|
||||
if internalDNSFull, err := metadata.Get("instance/hostname"); err != nil {
|
||||
glog.Warningf("couldn't get full internal DNS name: %v", err)
|
||||
} else {
|
||||
addresses = append(addresses,
|
||||
v1.NodeAddress{Type: v1.NodeInternalDNS, Address: internalDNSFull},
|
||||
v1.NodeAddress{Type: v1.NodeHostName, Address: internalDNSFull},
|
||||
)
|
||||
}
|
||||
return addresses, nil
|
||||
}
|
||||
|
||||
// NodeAddressesByProviderID will not be called from the node that is requesting this ID.
|
||||
|
14
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_loadbalancer.go
generated
vendored
14
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_loadbalancer.go
generated
vendored
@@ -92,7 +92,7 @@ func LoadBalancerSrcRanges() []string {
|
||||
|
||||
// GetLoadBalancer is an implementation of LoadBalancer.GetLoadBalancer
|
||||
func (gce *GCECloud) GetLoadBalancer(ctx context.Context, clusterName string, svc *v1.Service) (*v1.LoadBalancerStatus, bool, error) {
|
||||
loadBalancerName := cloudprovider.GetLoadBalancerName(svc)
|
||||
loadBalancerName := gce.GetLoadBalancerName(ctx, clusterName, svc)
|
||||
fwd, err := gce.GetRegionForwardingRule(loadBalancerName, gce.region)
|
||||
if err == nil {
|
||||
status := &v1.LoadBalancerStatus{}
|
||||
@@ -103,9 +103,15 @@ func (gce *GCECloud) GetLoadBalancer(ctx context.Context, clusterName string, sv
|
||||
return nil, false, ignoreNotFound(err)
|
||||
}
|
||||
|
||||
// GetLoadBalancerName is an implementation of LoadBalancer.GetLoadBalancerName.
|
||||
func (gce *GCECloud) GetLoadBalancerName(ctx context.Context, clusterName string, svc *v1.Service) string {
|
||||
// TODO: replace DefaultLoadBalancerName to generate more meaningful loadbalancer names.
|
||||
return cloudprovider.DefaultLoadBalancerName(svc)
|
||||
}
|
||||
|
||||
// EnsureLoadBalancer is an implementation of LoadBalancer.EnsureLoadBalancer.
|
||||
func (gce *GCECloud) EnsureLoadBalancer(ctx context.Context, clusterName string, svc *v1.Service, nodes []*v1.Node) (*v1.LoadBalancerStatus, error) {
|
||||
loadBalancerName := cloudprovider.GetLoadBalancerName(svc)
|
||||
loadBalancerName := gce.GetLoadBalancerName(ctx, clusterName, svc)
|
||||
desiredScheme := getSvcScheme(svc)
|
||||
clusterID, err := gce.ClusterID.GetID()
|
||||
if err != nil {
|
||||
@@ -154,7 +160,7 @@ func (gce *GCECloud) EnsureLoadBalancer(ctx context.Context, clusterName string,
|
||||
|
||||
// UpdateLoadBalancer is an implementation of LoadBalancer.UpdateLoadBalancer.
|
||||
func (gce *GCECloud) UpdateLoadBalancer(ctx context.Context, clusterName string, svc *v1.Service, nodes []*v1.Node) error {
|
||||
loadBalancerName := cloudprovider.GetLoadBalancerName(svc)
|
||||
loadBalancerName := gce.GetLoadBalancerName(ctx, clusterName, svc)
|
||||
scheme := getSvcScheme(svc)
|
||||
clusterID, err := gce.ClusterID.GetID()
|
||||
if err != nil {
|
||||
@@ -175,7 +181,7 @@ func (gce *GCECloud) UpdateLoadBalancer(ctx context.Context, clusterName string,
|
||||
|
||||
// EnsureLoadBalancerDeleted is an implementation of LoadBalancer.EnsureLoadBalancerDeleted.
|
||||
func (gce *GCECloud) EnsureLoadBalancerDeleted(ctx context.Context, clusterName string, svc *v1.Service) error {
|
||||
loadBalancerName := cloudprovider.GetLoadBalancerName(svc)
|
||||
loadBalancerName := gce.GetLoadBalancerName(ctx, clusterName, svc)
|
||||
scheme := getSvcScheme(svc)
|
||||
clusterID, err := gce.ClusterID.GetID()
|
||||
if err != nil {
|
||||
|
10
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_loadbalancer_external.go
generated
vendored
10
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_loadbalancer_external.go
generated
vendored
@@ -17,6 +17,7 @@ limitations under the License.
|
||||
package gce
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strconv"
|
||||
@@ -27,7 +28,6 @@ import (
|
||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
apiservice "k8s.io/kubernetes/pkg/api/v1/service"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud"
|
||||
netsets "k8s.io/kubernetes/pkg/util/net/sets"
|
||||
|
||||
@@ -44,7 +44,7 @@ import (
|
||||
// Due to an interesting series of design decisions, this handles both creating
|
||||
// new load balancers and updating existing load balancers, recognizing when
|
||||
// each is needed.
|
||||
func (gce *GCECloud) ensureExternalLoadBalancer(clusterName, clusterID string, apiService *v1.Service, existingFwdRule *compute.ForwardingRule, nodes []*v1.Node) (*v1.LoadBalancerStatus, error) {
|
||||
func (gce *GCECloud) ensureExternalLoadBalancer(clusterName string, clusterID string, apiService *v1.Service, existingFwdRule *compute.ForwardingRule, nodes []*v1.Node) (*v1.LoadBalancerStatus, error) {
|
||||
if len(nodes) == 0 {
|
||||
return nil, fmt.Errorf("Cannot EnsureLoadBalancer() with no hosts")
|
||||
}
|
||||
@@ -56,7 +56,7 @@ func (gce *GCECloud) ensureExternalLoadBalancer(clusterName, clusterID string, a
|
||||
return nil, err
|
||||
}
|
||||
|
||||
loadBalancerName := cloudprovider.GetLoadBalancerName(apiService)
|
||||
loadBalancerName := gce.GetLoadBalancerName(context.TODO(), clusterName, apiService)
|
||||
requestedIP := apiService.Spec.LoadBalancerIP
|
||||
ports := apiService.Spec.Ports
|
||||
portStr := []string{}
|
||||
@@ -281,13 +281,13 @@ func (gce *GCECloud) updateExternalLoadBalancer(clusterName string, service *v1.
|
||||
return err
|
||||
}
|
||||
|
||||
loadBalancerName := cloudprovider.GetLoadBalancerName(service)
|
||||
loadBalancerName := gce.GetLoadBalancerName(context.TODO(), clusterName, service)
|
||||
return gce.updateTargetPool(loadBalancerName, hosts)
|
||||
}
|
||||
|
||||
// ensureExternalLoadBalancerDeleted is the external implementation of LoadBalancer.EnsureLoadBalancerDeleted
|
||||
func (gce *GCECloud) ensureExternalLoadBalancerDeleted(clusterName, clusterID string, service *v1.Service) error {
|
||||
loadBalancerName := cloudprovider.GetLoadBalancerName(service)
|
||||
loadBalancerName := gce.GetLoadBalancerName(context.TODO(), clusterName, service)
|
||||
serviceName := types.NamespacedName{Namespace: service.Namespace, Name: service.Name}
|
||||
lbRefStr := fmt.Sprintf("%v(%v)", loadBalancerName, serviceName)
|
||||
|
||||
|
@@ -31,7 +31,6 @@ import (
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/meta"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/mock"
|
||||
@@ -330,7 +329,7 @@ func TestUpdateExternalLoadBalancer(t *testing.T) {
|
||||
err = gce.updateExternalLoadBalancer("", svc, newNodes)
|
||||
assert.NoError(t, err)
|
||||
|
||||
lbName := cloudprovider.GetLoadBalancerName(svc)
|
||||
lbName := gce.GetLoadBalancerName(context.TODO(), "", svc)
|
||||
|
||||
pool, err := gce.GetTargetPool(lbName, gce.region)
|
||||
require.NoError(t, err)
|
||||
@@ -401,7 +400,7 @@ func TestLoadBalancerWrongTierResourceDeletion(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, cloud.NetworkTierPremium, desiredTier)
|
||||
|
||||
lbName := cloudprovider.GetLoadBalancerName(svc)
|
||||
lbName := gce.GetLoadBalancerName(context.TODO(), "", svc)
|
||||
serviceName := types.NamespacedName{Namespace: svc.Namespace, Name: svc.Name}
|
||||
|
||||
// create ForwardingRule and Address with the wrong tier
|
||||
@@ -484,7 +483,7 @@ func TestForwardingRuleNeedsUpdate(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
svc := fakeLoadbalancerService("")
|
||||
lbName := cloudprovider.GetLoadBalancerName(svc)
|
||||
lbName := gce.GetLoadBalancerName(context.TODO(), "", svc)
|
||||
ipAddr := status.Ingress[0].IP
|
||||
|
||||
lbIP := svc.Spec.LoadBalancerIP
|
||||
@@ -566,7 +565,7 @@ func TestTargetPoolNeedsRecreation(t *testing.T) {
|
||||
|
||||
svc := fakeLoadbalancerService("")
|
||||
serviceName := svc.ObjectMeta.Name
|
||||
lbName := cloudprovider.GetLoadBalancerName(svc)
|
||||
lbName := gce.GetLoadBalancerName(context.TODO(), "", svc)
|
||||
nodes, err := createAndInsertNodes(gce, []string{"test-node-1"}, vals.ZoneName)
|
||||
require.NoError(t, err)
|
||||
hostNames := nodeNames(nodes)
|
||||
@@ -619,7 +618,7 @@ func TestFirewallNeedsUpdate(t *testing.T) {
|
||||
region := vals.Region
|
||||
|
||||
ipAddr := status.Ingress[0].IP
|
||||
lbName := cloudprovider.GetLoadBalancerName(svc)
|
||||
lbName := gce.GetLoadBalancerName(context.TODO(), "", svc)
|
||||
|
||||
ipnet, err := netsets.ParseIPNets("0.0.0.0/0")
|
||||
require.NoError(t, err)
|
||||
@@ -804,7 +803,7 @@ func TestEnsureTargetPoolAndHealthCheck(t *testing.T) {
|
||||
clusterID := vals.ClusterID
|
||||
|
||||
ipAddr := status.Ingress[0].IP
|
||||
lbName := cloudprovider.GetLoadBalancerName(svc)
|
||||
lbName := gce.GetLoadBalancerName(context.TODO(), "", svc)
|
||||
region := vals.Region
|
||||
|
||||
hcToCreate := makeHttpHealthCheck(MakeNodesHealthCheckName(clusterID), GetNodesHealthCheckPath(), GetNodesHealthCheckPort())
|
||||
@@ -869,7 +868,7 @@ func TestCreateAndUpdateFirewallSucceedsOnXPN(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
gce.createFirewall(
|
||||
svc,
|
||||
cloudprovider.GetLoadBalancerName(svc),
|
||||
gce.GetLoadBalancerName(context.TODO(), "", svc),
|
||||
gce.region,
|
||||
"A sad little firewall",
|
||||
ipnet,
|
||||
@@ -882,7 +881,7 @@ func TestCreateAndUpdateFirewallSucceedsOnXPN(t *testing.T) {
|
||||
|
||||
gce.updateFirewall(
|
||||
svc,
|
||||
cloudprovider.GetLoadBalancerName(svc),
|
||||
gce.GetLoadBalancerName(context.TODO(), "", svc),
|
||||
gce.region,
|
||||
"A sad little firewall",
|
||||
ipnet,
|
||||
|
11
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_loadbalancer_internal.go
generated
vendored
11
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_loadbalancer_internal.go
generated
vendored
@@ -17,6 +17,7 @@ limitations under the License.
|
||||
package gce
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
@@ -27,7 +28,6 @@ import (
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
v1_service "k8s.io/kubernetes/pkg/api/v1/service"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud"
|
||||
)
|
||||
|
||||
@@ -38,8 +38,11 @@ const (
|
||||
func (gce *GCECloud) ensureInternalLoadBalancer(clusterName, clusterID string, svc *v1.Service, existingFwdRule *compute.ForwardingRule, nodes []*v1.Node) (*v1.LoadBalancerStatus, error) {
|
||||
nm := types.NamespacedName{Name: svc.Name, Namespace: svc.Namespace}
|
||||
ports, protocol := getPortsAndProtocol(svc.Spec.Ports)
|
||||
if protocol != v1.ProtocolTCP && protocol != v1.ProtocolUDP {
|
||||
return nil, fmt.Errorf("Invalid protocol %s, only TCP and UDP are supported", string(protocol))
|
||||
}
|
||||
scheme := cloud.SchemeInternal
|
||||
loadBalancerName := cloudprovider.GetLoadBalancerName(svc)
|
||||
loadBalancerName := gce.GetLoadBalancerName(context.TODO(), clusterName, svc)
|
||||
sharedBackend := shareBackendService(svc)
|
||||
backendServiceName := makeBackendServiceName(loadBalancerName, clusterID, sharedBackend, scheme, protocol, svc.Spec.SessionAffinity)
|
||||
backendServiceLink := gce.getBackendServiceLink(backendServiceName)
|
||||
@@ -210,14 +213,14 @@ func (gce *GCECloud) updateInternalLoadBalancer(clusterName, clusterID string, s
|
||||
// Generate the backend service name
|
||||
_, protocol := getPortsAndProtocol(svc.Spec.Ports)
|
||||
scheme := cloud.SchemeInternal
|
||||
loadBalancerName := cloudprovider.GetLoadBalancerName(svc)
|
||||
loadBalancerName := gce.GetLoadBalancerName(context.TODO(), clusterName, svc)
|
||||
backendServiceName := makeBackendServiceName(loadBalancerName, clusterID, shareBackendService(svc), scheme, protocol, svc.Spec.SessionAffinity)
|
||||
// Ensure the backend service has the proper backend/instance-group links
|
||||
return gce.ensureInternalBackendServiceGroups(backendServiceName, igLinks)
|
||||
}
|
||||
|
||||
func (gce *GCECloud) ensureInternalLoadBalancerDeleted(clusterName, clusterID string, svc *v1.Service) error {
|
||||
loadBalancerName := cloudprovider.GetLoadBalancerName(svc)
|
||||
loadBalancerName := gce.GetLoadBalancerName(context.TODO(), clusterName, svc)
|
||||
_, protocol := getPortsAndProtocol(svc.Spec.Ports)
|
||||
scheme := cloud.SchemeInternal
|
||||
sharedBackend := shareBackendService(svc)
|
||||
|
@@ -17,6 +17,7 @@ limitations under the License.
|
||||
package gce
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"testing"
|
||||
@@ -29,7 +30,6 @@ import (
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/client-go/tools/record"
|
||||
v1_service "k8s.io/kubernetes/pkg/api/v1/service"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/mock"
|
||||
)
|
||||
@@ -59,7 +59,7 @@ func TestEnsureInternalBackendServiceUpdates(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
svc := fakeLoadbalancerService(string(LBTypeInternal))
|
||||
lbName := cloudprovider.GetLoadBalancerName(svc)
|
||||
lbName := gce.GetLoadBalancerName(context.TODO(), "", svc)
|
||||
nodes, err := createAndInsertNodes(gce, nodeNames, vals.ZoneName)
|
||||
igName := makeInstanceGroupName(vals.ClusterID)
|
||||
igLinks, err := gce.ensureInternalInstanceGroups(igName, nodes)
|
||||
@@ -80,6 +80,8 @@ func TestEnsureInternalBackendServiceUpdates(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestEnsureInternalBackendServiceGroups(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
for desc, tc := range map[string]struct {
|
||||
mockModifier func(*cloud.MockGCE)
|
||||
}{
|
||||
@@ -96,8 +98,6 @@ func TestEnsureInternalBackendServiceGroups(t *testing.T) {
|
||||
},
|
||||
} {
|
||||
t.Run(desc, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
vals := DefaultTestClusterValues()
|
||||
nodeNames := []string{"test-node-1"}
|
||||
|
||||
@@ -105,7 +105,7 @@ func TestEnsureInternalBackendServiceGroups(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
svc := fakeLoadbalancerService(string(LBTypeInternal))
|
||||
lbName := cloudprovider.GetLoadBalancerName(svc)
|
||||
lbName := gce.GetLoadBalancerName(context.TODO(), "", svc)
|
||||
nodes, err := createAndInsertNodes(gce, nodeNames, vals.ZoneName)
|
||||
igName := makeInstanceGroupName(vals.ClusterID)
|
||||
igLinks, err := gce.ensureInternalInstanceGroups(igName, nodes)
|
||||
@@ -117,12 +117,12 @@ func TestEnsureInternalBackendServiceGroups(t *testing.T) {
|
||||
err = gce.ensureInternalBackendService(bsName, "description", svc.Spec.SessionAffinity, cloud.SchemeInternal, "TCP", igLinks, "")
|
||||
require.NoError(t, err)
|
||||
|
||||
// Update the BackendService with new Instances
|
||||
// Update the BackendService with new InstanceGroups
|
||||
if tc.mockModifier != nil {
|
||||
tc.mockModifier(gce.c.(*cloud.MockGCE))
|
||||
}
|
||||
newNodeNames := []string{"new-test-node-1", "new-test-node-2"}
|
||||
err = gce.ensureInternalBackendServiceGroups(bsName, newNodeNames)
|
||||
newIGLinks := []string{"new-test-ig-1", "new-test-ig-2"}
|
||||
err = gce.ensureInternalBackendServiceGroups(bsName, newIGLinks)
|
||||
if tc.mockModifier != nil {
|
||||
assert.Error(t, err)
|
||||
return
|
||||
@@ -132,10 +132,8 @@ func TestEnsureInternalBackendServiceGroups(t *testing.T) {
|
||||
bs, err := gce.GetRegionBackendService(bsName, gce.region)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Check that the instances are updated
|
||||
newNodes, err := createAndInsertNodes(gce, newNodeNames, vals.ZoneName)
|
||||
newIgLinks, err := gce.ensureInternalInstanceGroups(igName, newNodes)
|
||||
backends := backendsFromGroupLinks(newIgLinks)
|
||||
// Check that the Backends reflect the new InstanceGroups
|
||||
backends := backendsFromGroupLinks(newIGLinks)
|
||||
assert.Equal(t, bs.Backends, backends)
|
||||
})
|
||||
}
|
||||
@@ -169,7 +167,7 @@ func TestEnsureInternalLoadBalancerWithExistingResources(t *testing.T) {
|
||||
|
||||
// Create the expected resources necessary for an Internal Load Balancer
|
||||
nm := types.NamespacedName{Name: svc.Name, Namespace: svc.Namespace}
|
||||
lbName := cloudprovider.GetLoadBalancerName(svc)
|
||||
lbName := gce.GetLoadBalancerName(context.TODO(), "", svc)
|
||||
|
||||
sharedHealthCheck := !v1_service.RequestsOnlyLocalTraffic(svc)
|
||||
hcName := makeHealthCheckName(lbName, vals.ClusterID, sharedHealthCheck)
|
||||
@@ -201,7 +199,7 @@ func TestEnsureInternalLoadBalancerClearPreviousResources(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
svc := fakeLoadbalancerService(string(LBTypeInternal))
|
||||
lbName := cloudprovider.GetLoadBalancerName(svc)
|
||||
lbName := gce.GetLoadBalancerName(context.TODO(), "", svc)
|
||||
|
||||
// Create a ForwardingRule that's missing an IP address
|
||||
existingFwdRule := &compute.ForwardingRule{
|
||||
@@ -287,7 +285,7 @@ func TestUpdateInternalLoadBalancerBackendServices(t *testing.T) {
|
||||
// incorrect (missing) attributes.
|
||||
// ensureInternalBackendServiceGroups is called and creates the correct
|
||||
// BackendService
|
||||
lbName := cloudprovider.GetLoadBalancerName(svc)
|
||||
lbName := gce.GetLoadBalancerName(context.TODO(), "", svc)
|
||||
sharedBackend := shareBackendService(svc)
|
||||
backendServiceName := makeBackendServiceName(lbName, vals.ClusterID, sharedBackend, cloud.SchemeInternal, "TCP", svc.Spec.SessionAffinity)
|
||||
existingBS := &compute.BackendService{
|
||||
@@ -351,7 +349,7 @@ func TestUpdateInternalLoadBalancerNodes(t *testing.T) {
|
||||
err = gce.updateInternalLoadBalancer(vals.ClusterName, vals.ClusterID, svc, nodes)
|
||||
assert.NoError(t, err)
|
||||
|
||||
lbName := cloudprovider.GetLoadBalancerName(svc)
|
||||
lbName := gce.GetLoadBalancerName(context.TODO(), "", svc)
|
||||
sharedBackend := shareBackendService(svc)
|
||||
backendServiceName := makeBackendServiceName(lbName, vals.ClusterID, sharedBackend, cloud.SchemeInternal, "TCP", svc.Spec.SessionAffinity)
|
||||
bs, err := gce.GetRegionBackendService(backendServiceName, gce.region)
|
||||
@@ -444,7 +442,7 @@ func TestEnsureInternalLoadBalancerWithSpecialHealthCheck(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
assert.NotEmpty(t, status.Ingress)
|
||||
|
||||
loadBalancerName := cloudprovider.GetLoadBalancerName(svc)
|
||||
loadBalancerName := gce.GetLoadBalancerName(context.TODO(), "", svc)
|
||||
hc, err := gce.GetHealthCheck(loadBalancerName)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, hc)
|
||||
@@ -455,9 +453,9 @@ func TestClearPreviousInternalResources(t *testing.T) {
|
||||
// Configure testing environment.
|
||||
vals := DefaultTestClusterValues()
|
||||
svc := fakeLoadbalancerService(string(LBTypeInternal))
|
||||
loadBalancerName := cloudprovider.GetLoadBalancerName(svc)
|
||||
nm := types.NamespacedName{Name: svc.Name, Namespace: svc.Namespace}
|
||||
gce, err := fakeGCECloud(vals)
|
||||
loadBalancerName := gce.GetLoadBalancerName(context.TODO(), "", svc)
|
||||
nm := types.NamespacedName{Name: svc.Name, Namespace: svc.Namespace}
|
||||
c := gce.c.(*cloud.MockGCE)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -518,7 +516,7 @@ func TestEnsureInternalFirewallSucceedsOnXPN(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
vals := DefaultTestClusterValues()
|
||||
svc := fakeLoadbalancerService(string(LBTypeInternal))
|
||||
fwName := cloudprovider.GetLoadBalancerName(svc)
|
||||
fwName := gce.GetLoadBalancerName(context.TODO(), "", svc)
|
||||
|
||||
c := gce.c.(*cloud.MockGCE)
|
||||
c.MockFirewalls.InsertHook = mock.InsertFirewallsUnauthorizedErrHook
|
||||
|
7
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_loadbalancer_naming.go
generated
vendored
7
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_loadbalancer_naming.go
generated
vendored
@@ -31,7 +31,12 @@ import (
|
||||
|
||||
// Instance groups remain legacy named to stay consistent with ingress
|
||||
func makeInstanceGroupName(clusterID string) string {
|
||||
return fmt.Sprintf("k8s-ig--%s", clusterID)
|
||||
prefix := "k8s-ig"
|
||||
// clusterID might be empty for legacy clusters
|
||||
if clusterID == "" {
|
||||
return prefix
|
||||
}
|
||||
return fmt.Sprintf("%s--%s", prefix, clusterID)
|
||||
}
|
||||
|
||||
func makeBackendServiceName(loadBalancerName, clusterID string, shared bool, scheme cloud.LbScheme, protocol v1.Protocol, svcAffinity v1.ServiceAffinity) string {
|
||||
|
10
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_loadbalancer_utils_test.go
generated
vendored
10
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_loadbalancer_utils_test.go
generated
vendored
@@ -21,6 +21,7 @@ limitations under the License.
|
||||
package gce
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
@@ -37,7 +38,6 @@ import (
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/tools/record"
|
||||
v1_service "k8s.io/kubernetes/pkg/api/v1/service"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/meta"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/mock"
|
||||
@@ -220,7 +220,7 @@ func fakeClusterID(clusterID string) ClusterID {
|
||||
}
|
||||
|
||||
func assertExternalLbResources(t *testing.T, gce *GCECloud, apiService *v1.Service, vals TestClusterValues, nodeNames []string) {
|
||||
lbName := cloudprovider.GetLoadBalancerName(apiService)
|
||||
lbName := gce.GetLoadBalancerName(context.TODO(), "", apiService)
|
||||
hcName := MakeNodesHealthCheckName(vals.ClusterID)
|
||||
|
||||
// Check that Firewalls are created for the LoadBalancer and the HealthCheck
|
||||
@@ -257,7 +257,7 @@ func assertExternalLbResources(t *testing.T, gce *GCECloud, apiService *v1.Servi
|
||||
}
|
||||
|
||||
func assertExternalLbResourcesDeleted(t *testing.T, gce *GCECloud, apiService *v1.Service, vals TestClusterValues, firewallsDeleted bool) {
|
||||
lbName := cloudprovider.GetLoadBalancerName(apiService)
|
||||
lbName := gce.GetLoadBalancerName(context.TODO(), "", apiService)
|
||||
hcName := MakeNodesHealthCheckName(vals.ClusterID)
|
||||
|
||||
if firewallsDeleted {
|
||||
@@ -292,7 +292,7 @@ func assertExternalLbResourcesDeleted(t *testing.T, gce *GCECloud, apiService *v
|
||||
}
|
||||
|
||||
func assertInternalLbResources(t *testing.T, gce *GCECloud, apiService *v1.Service, vals TestClusterValues, nodeNames []string) {
|
||||
lbName := cloudprovider.GetLoadBalancerName(apiService)
|
||||
lbName := gce.GetLoadBalancerName(context.TODO(), "", apiService)
|
||||
|
||||
// Check that Instance Group is created
|
||||
igName := makeInstanceGroupName(vals.ClusterID)
|
||||
@@ -345,7 +345,7 @@ func assertInternalLbResources(t *testing.T, gce *GCECloud, apiService *v1.Servi
|
||||
}
|
||||
|
||||
func assertInternalLbResourcesDeleted(t *testing.T, gce *GCECloud, apiService *v1.Service, vals TestClusterValues, firewallsDeleted bool) {
|
||||
lbName := cloudprovider.GetLoadBalancerName(apiService)
|
||||
lbName := gce.GetLoadBalancerName(context.TODO(), "", apiService)
|
||||
sharedHealthCheck := !v1_service.RequestsOnlyLocalTraffic(apiService)
|
||||
hcName := makeHealthCheckName(lbName, vals.ClusterID, sharedHealthCheck)
|
||||
|
||||
|
67
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_networkendpointgroup.go
generated
vendored
67
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_networkendpointgroup.go
generated
vendored
@@ -20,7 +20,7 @@ import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
computealpha "google.golang.org/api/compute/v0.alpha"
|
||||
computebeta "google.golang.org/api/compute/v0.beta"
|
||||
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/filter"
|
||||
@@ -28,53 +28,43 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
NEGLoadBalancerType = "LOAD_BALANCING"
|
||||
NEGIPPortNetworkEndpointType = "GCE_VM_IP_PORT"
|
||||
)
|
||||
|
||||
func newNetworkEndpointGroupMetricContext(request string, zone string) *metricContext {
|
||||
return newGenericMetricContext("networkendpointgroup_", request, unusedMetricLabel, zone, computeAlphaVersion)
|
||||
return newGenericMetricContext("networkendpointgroup_", request, unusedMetricLabel, zone, computeBetaVersion)
|
||||
}
|
||||
|
||||
func (gce *GCECloud) GetNetworkEndpointGroup(name string, zone string) (*computealpha.NetworkEndpointGroup, error) {
|
||||
func (gce *GCECloud) GetNetworkEndpointGroup(name string, zone string) (*computebeta.NetworkEndpointGroup, error) {
|
||||
ctx, cancel := cloud.ContextWithCallTimeout()
|
||||
defer cancel()
|
||||
|
||||
mc := newNetworkEndpointGroupMetricContext("get", zone)
|
||||
if err := gce.alphaFeatureEnabled(AlphaFeatureNetworkEndpointGroup); err != nil {
|
||||
return nil, mc.Observe(err)
|
||||
}
|
||||
v, err := gce.c.AlphaNetworkEndpointGroups().Get(ctx, meta.ZonalKey(name, zone))
|
||||
v, err := gce.c.BetaNetworkEndpointGroups().Get(ctx, meta.ZonalKey(name, zone))
|
||||
return v, mc.Observe(err)
|
||||
}
|
||||
|
||||
func (gce *GCECloud) ListNetworkEndpointGroup(zone string) ([]*computealpha.NetworkEndpointGroup, error) {
|
||||
func (gce *GCECloud) ListNetworkEndpointGroup(zone string) ([]*computebeta.NetworkEndpointGroup, error) {
|
||||
ctx, cancel := cloud.ContextWithCallTimeout()
|
||||
defer cancel()
|
||||
|
||||
mc := newNetworkEndpointGroupMetricContext("list", zone)
|
||||
if err := gce.alphaFeatureEnabled(AlphaFeatureNetworkEndpointGroup); err != nil {
|
||||
return nil, mc.Observe(err)
|
||||
}
|
||||
negs, err := gce.c.AlphaNetworkEndpointGroups().List(ctx, zone, filter.None)
|
||||
negs, err := gce.c.BetaNetworkEndpointGroups().List(ctx, zone, filter.None)
|
||||
return negs, mc.Observe(err)
|
||||
}
|
||||
|
||||
// AggregatedListNetworkEndpointGroup returns a map of zone -> endpoint group.
|
||||
func (gce *GCECloud) AggregatedListNetworkEndpointGroup() (map[string][]*computealpha.NetworkEndpointGroup, error) {
|
||||
func (gce *GCECloud) AggregatedListNetworkEndpointGroup() (map[string][]*computebeta.NetworkEndpointGroup, error) {
|
||||
ctx, cancel := cloud.ContextWithCallTimeout()
|
||||
defer cancel()
|
||||
|
||||
mc := newNetworkEndpointGroupMetricContext("aggregated_list", "")
|
||||
if err := gce.alphaFeatureEnabled(AlphaFeatureNetworkEndpointGroup); err != nil {
|
||||
return nil, mc.Observe(err)
|
||||
}
|
||||
// TODO: filter for the region the cluster is in.
|
||||
all, err := gce.c.AlphaNetworkEndpointGroups().AggregatedList(ctx, filter.None)
|
||||
all, err := gce.c.BetaNetworkEndpointGroups().AggregatedList(ctx, filter.None)
|
||||
if err != nil {
|
||||
return nil, mc.Observe(err)
|
||||
}
|
||||
ret := map[string][]*computealpha.NetworkEndpointGroup{}
|
||||
ret := map[string][]*computebeta.NetworkEndpointGroup{}
|
||||
for key, byZone := range all {
|
||||
// key is "zones/<zone name>"
|
||||
parts := strings.Split(key, "/")
|
||||
@@ -87,71 +77,56 @@ func (gce *GCECloud) AggregatedListNetworkEndpointGroup() (map[string][]*compute
|
||||
return ret, mc.Observe(nil)
|
||||
}
|
||||
|
||||
func (gce *GCECloud) CreateNetworkEndpointGroup(neg *computealpha.NetworkEndpointGroup, zone string) error {
|
||||
func (gce *GCECloud) CreateNetworkEndpointGroup(neg *computebeta.NetworkEndpointGroup, zone string) error {
|
||||
ctx, cancel := cloud.ContextWithCallTimeout()
|
||||
defer cancel()
|
||||
|
||||
if err := gce.alphaFeatureEnabled(AlphaFeatureNetworkEndpointGroup); err != nil {
|
||||
return err
|
||||
}
|
||||
mc := newNetworkEndpointGroupMetricContext("create", zone)
|
||||
return mc.Observe(gce.c.AlphaNetworkEndpointGroups().Insert(ctx, meta.ZonalKey(neg.Name, zone), neg))
|
||||
return mc.Observe(gce.c.BetaNetworkEndpointGroups().Insert(ctx, meta.ZonalKey(neg.Name, zone), neg))
|
||||
}
|
||||
|
||||
func (gce *GCECloud) DeleteNetworkEndpointGroup(name string, zone string) error {
|
||||
ctx, cancel := cloud.ContextWithCallTimeout()
|
||||
defer cancel()
|
||||
|
||||
if err := gce.alphaFeatureEnabled(AlphaFeatureNetworkEndpointGroup); err != nil {
|
||||
return err
|
||||
}
|
||||
mc := newNetworkEndpointGroupMetricContext("delete", zone)
|
||||
return mc.Observe(gce.c.AlphaNetworkEndpointGroups().Delete(ctx, meta.ZonalKey(name, zone)))
|
||||
return mc.Observe(gce.c.BetaNetworkEndpointGroups().Delete(ctx, meta.ZonalKey(name, zone)))
|
||||
}
|
||||
|
||||
func (gce *GCECloud) AttachNetworkEndpoints(name, zone string, endpoints []*computealpha.NetworkEndpoint) error {
|
||||
func (gce *GCECloud) AttachNetworkEndpoints(name, zone string, endpoints []*computebeta.NetworkEndpoint) error {
|
||||
ctx, cancel := cloud.ContextWithCallTimeout()
|
||||
defer cancel()
|
||||
|
||||
mc := newNetworkEndpointGroupMetricContext("attach", zone)
|
||||
if err := gce.alphaFeatureEnabled(AlphaFeatureNetworkEndpointGroup); err != nil {
|
||||
return mc.Observe(err)
|
||||
}
|
||||
req := &computealpha.NetworkEndpointGroupsAttachEndpointsRequest{
|
||||
req := &computebeta.NetworkEndpointGroupsAttachEndpointsRequest{
|
||||
NetworkEndpoints: endpoints,
|
||||
}
|
||||
return mc.Observe(gce.c.AlphaNetworkEndpointGroups().AttachNetworkEndpoints(ctx, meta.ZonalKey(name, zone), req))
|
||||
return mc.Observe(gce.c.BetaNetworkEndpointGroups().AttachNetworkEndpoints(ctx, meta.ZonalKey(name, zone), req))
|
||||
}
|
||||
|
||||
func (gce *GCECloud) DetachNetworkEndpoints(name, zone string, endpoints []*computealpha.NetworkEndpoint) error {
|
||||
func (gce *GCECloud) DetachNetworkEndpoints(name, zone string, endpoints []*computebeta.NetworkEndpoint) error {
|
||||
ctx, cancel := cloud.ContextWithCallTimeout()
|
||||
defer cancel()
|
||||
|
||||
mc := newNetworkEndpointGroupMetricContext("detach", zone)
|
||||
if err := gce.alphaFeatureEnabled(AlphaFeatureNetworkEndpointGroup); err != nil {
|
||||
return mc.Observe(err)
|
||||
}
|
||||
req := &computealpha.NetworkEndpointGroupsDetachEndpointsRequest{
|
||||
req := &computebeta.NetworkEndpointGroupsDetachEndpointsRequest{
|
||||
NetworkEndpoints: endpoints,
|
||||
}
|
||||
return mc.Observe(gce.c.AlphaNetworkEndpointGroups().DetachNetworkEndpoints(ctx, meta.ZonalKey(name, zone), req))
|
||||
return mc.Observe(gce.c.BetaNetworkEndpointGroups().DetachNetworkEndpoints(ctx, meta.ZonalKey(name, zone), req))
|
||||
}
|
||||
|
||||
func (gce *GCECloud) ListNetworkEndpoints(name, zone string, showHealthStatus bool) ([]*computealpha.NetworkEndpointWithHealthStatus, error) {
|
||||
func (gce *GCECloud) ListNetworkEndpoints(name, zone string, showHealthStatus bool) ([]*computebeta.NetworkEndpointWithHealthStatus, error) {
|
||||
ctx, cancel := cloud.ContextWithCallTimeout()
|
||||
defer cancel()
|
||||
|
||||
mc := newNetworkEndpointGroupMetricContext("list_networkendpoints", zone)
|
||||
if err := gce.alphaFeatureEnabled(AlphaFeatureNetworkEndpointGroup); err != nil {
|
||||
return nil, mc.Observe(err)
|
||||
}
|
||||
healthStatus := "SKIP"
|
||||
if showHealthStatus {
|
||||
healthStatus = "SHOW"
|
||||
}
|
||||
req := &computealpha.NetworkEndpointGroupsListEndpointsRequest{
|
||||
req := &computebeta.NetworkEndpointGroupsListEndpointsRequest{
|
||||
HealthStatus: healthStatus,
|
||||
}
|
||||
l, err := gce.c.AlphaNetworkEndpointGroups().ListNetworkEndpoints(ctx, meta.ZonalKey(name, zone), req, filter.None)
|
||||
l, err := gce.c.BetaNetworkEndpointGroups().ListNetworkEndpoints(ctx, meta.ZonalKey(name, zone), req, filter.None)
|
||||
return l, mc.Observe(err)
|
||||
}
|
||||
|
8
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_targetproxy.go
generated
vendored
8
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_targetproxy.go
generated
vendored
@@ -48,11 +48,11 @@ func (gce *GCECloud) CreateTargetHttpProxy(proxy *compute.TargetHttpProxy) error
|
||||
}
|
||||
|
||||
// SetUrlMapForTargetHttpProxy sets the given UrlMap for the given TargetHttpProxy.
|
||||
func (gce *GCECloud) SetUrlMapForTargetHttpProxy(proxy *compute.TargetHttpProxy, urlMap *compute.UrlMap) error {
|
||||
func (gce *GCECloud) SetUrlMapForTargetHttpProxy(proxy *compute.TargetHttpProxy, urlMapLink string) error {
|
||||
ctx, cancel := cloud.ContextWithCallTimeout()
|
||||
defer cancel()
|
||||
|
||||
ref := &compute.UrlMapReference{UrlMap: urlMap.SelfLink}
|
||||
ref := &compute.UrlMapReference{UrlMap: urlMapLink}
|
||||
mc := newTargetProxyMetricContext("set_url_map")
|
||||
return mc.Observe(gce.c.TargetHttpProxies().SetUrlMap(ctx, meta.GlobalKey(proxy.Name), ref))
|
||||
}
|
||||
@@ -98,12 +98,12 @@ func (gce *GCECloud) CreateTargetHttpsProxy(proxy *compute.TargetHttpsProxy) err
|
||||
}
|
||||
|
||||
// SetUrlMapForTargetHttpsProxy sets the given UrlMap for the given TargetHttpsProxy.
|
||||
func (gce *GCECloud) SetUrlMapForTargetHttpsProxy(proxy *compute.TargetHttpsProxy, urlMap *compute.UrlMap) error {
|
||||
func (gce *GCECloud) SetUrlMapForTargetHttpsProxy(proxy *compute.TargetHttpsProxy, urlMapLink string) error {
|
||||
ctx, cancel := cloud.ContextWithCallTimeout()
|
||||
defer cancel()
|
||||
|
||||
mc := newTargetProxyMetricContext("set_url_map")
|
||||
ref := &compute.UrlMapReference{UrlMap: urlMap.SelfLink}
|
||||
ref := &compute.UrlMapReference{UrlMap: urlMapLink}
|
||||
return mc.Observe(gce.c.TargetHttpsProxies().SetUrlMap(ctx, meta.GlobalKey(proxy.Name), ref))
|
||||
}
|
||||
|
||||
|
17
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_test.go
generated
vendored
17
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_test.go
generated
vendored
@@ -39,6 +39,7 @@ secondary-range-name = my-secondary-range
|
||||
node-tags = my-node-tag1
|
||||
node-instance-prefix = my-prefix
|
||||
multizone = true
|
||||
regional = true
|
||||
`
|
||||
reader := strings.NewReader(s)
|
||||
config, err := readConfig(reader)
|
||||
@@ -57,6 +58,7 @@ multizone = true
|
||||
NodeTags: []string{"my-node-tag1"},
|
||||
NodeInstancePrefix: "my-prefix",
|
||||
Multizone: true,
|
||||
Regional: true,
|
||||
}}
|
||||
|
||||
if !reflect.DeepEqual(expected, config) {
|
||||
@@ -328,6 +330,7 @@ func TestGenerateCloudConfigs(t *testing.T) {
|
||||
NodeTags: []string{"node-tag"},
|
||||
NodeInstancePrefix: "node-prefix",
|
||||
Multizone: false,
|
||||
Regional: false,
|
||||
ApiEndpoint: "",
|
||||
LocalZone: "us-central1-a",
|
||||
AlphaFeatures: []string{},
|
||||
@@ -446,6 +449,20 @@ func TestGenerateCloudConfigs(t *testing.T) {
|
||||
return v
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Regional",
|
||||
config: func() ConfigGlobal {
|
||||
v := configBoilerplate
|
||||
v.Regional = true
|
||||
return v
|
||||
},
|
||||
cloud: func() CloudConfig {
|
||||
v := cloudBoilerplate
|
||||
v.Regional = true
|
||||
v.ManagedZones = nil
|
||||
return v
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Secondary Range Name",
|
||||
config: func() ConfigGlobal {
|
||||
|
51
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_tpu.go
generated
vendored
51
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_tpu.go
generated
vendored
@@ -38,16 +38,14 @@ func newTPUService(client *http.Client) (*tpuService, error) {
|
||||
return nil, err
|
||||
}
|
||||
return &tpuService{
|
||||
nodesService: tpuapi.NewProjectsLocationsNodesService(s),
|
||||
operationsService: tpuapi.NewProjectsLocationsOperationsService(s),
|
||||
projects: tpuapi.NewProjectsService(s),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// tpuService encapsulates the TPU services on nodes and the operations on the
|
||||
// nodes.
|
||||
type tpuService struct {
|
||||
nodesService *tpuapi.ProjectsLocationsNodesService
|
||||
operationsService *tpuapi.ProjectsLocationsOperationsService
|
||||
projects *tpuapi.ProjectsService
|
||||
}
|
||||
|
||||
// CreateTPU creates the Cloud TPU node with the specified name in the
|
||||
@@ -59,13 +57,13 @@ func (gce *GCECloud) CreateTPU(ctx context.Context, name, zone string, node *tpu
|
||||
|
||||
var op *tpuapi.Operation
|
||||
parent := getTPUParentName(gce.projectID, zone)
|
||||
op, err = gce.tpuService.nodesService.Create(parent, node).NodeId(name).Do()
|
||||
op, err = gce.tpuService.projects.Locations.Nodes.Create(parent, node).NodeId(name).Do()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
glog.V(2).Infof("Creating Cloud TPU %q in zone %q with operation %q", name, zone, op.Name)
|
||||
|
||||
op, err = gce.waitForTPUOp(30*time.Second, 10*time.Minute, op)
|
||||
op, err = gce.waitForTPUOp(ctx, op)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -92,13 +90,13 @@ func (gce *GCECloud) DeleteTPU(ctx context.Context, name, zone string) error {
|
||||
|
||||
var op *tpuapi.Operation
|
||||
name = getTPUName(gce.projectID, zone, name)
|
||||
op, err = gce.tpuService.nodesService.Delete(name).Do()
|
||||
op, err = gce.tpuService.projects.Locations.Nodes.Delete(name).Do()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
glog.V(2).Infof("Deleting Cloud TPU %q in zone %q with operation %q", name, zone, op.Name)
|
||||
|
||||
op, err = gce.waitForTPUOp(30*time.Second, 10*time.Minute, op)
|
||||
op, err = gce.waitForTPUOp(ctx, op)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -114,7 +112,7 @@ func (gce *GCECloud) GetTPU(ctx context.Context, name, zone string) (*tpuapi.Nod
|
||||
mc := newTPUMetricContext("get", zone)
|
||||
|
||||
name = getTPUName(gce.projectID, zone, name)
|
||||
node, err := gce.tpuService.nodesService.Get(name).Do()
|
||||
node, err := gce.tpuService.projects.Locations.Nodes.Get(name).Do()
|
||||
if err != nil {
|
||||
return nil, mc.Observe(err)
|
||||
}
|
||||
@@ -126,17 +124,36 @@ func (gce *GCECloud) ListTPUs(ctx context.Context, zone string) ([]*tpuapi.Node,
|
||||
mc := newTPUMetricContext("list", zone)
|
||||
|
||||
parent := getTPUParentName(gce.projectID, zone)
|
||||
response, err := gce.tpuService.nodesService.List(parent).Do()
|
||||
response, err := gce.tpuService.projects.Locations.Nodes.List(parent).Do()
|
||||
if err != nil {
|
||||
return nil, mc.Observe(err)
|
||||
}
|
||||
return response.Nodes, mc.Observe(nil)
|
||||
}
|
||||
|
||||
// waitForTPUOp checks whether the op is done every interval before the timeout
|
||||
// occurs.
|
||||
func (gce *GCECloud) waitForTPUOp(interval, timeout time.Duration, op *tpuapi.Operation) (*tpuapi.Operation, error) {
|
||||
if err := wait.PollImmediate(interval, timeout, func() (bool, error) {
|
||||
// ListLocations returns the zones where Cloud TPUs are available.
|
||||
func (gce *GCECloud) ListLocations(ctx context.Context) ([]*tpuapi.Location, error) {
|
||||
mc := newTPUMetricContext("list_locations", "")
|
||||
parent := getTPUProjectURL(gce.projectID)
|
||||
response, err := gce.tpuService.projects.Locations.List(parent).Do()
|
||||
if err != nil {
|
||||
return nil, mc.Observe(err)
|
||||
}
|
||||
return response.Locations, mc.Observe(nil)
|
||||
}
|
||||
|
||||
// waitForTPUOp checks whether the op is done every 30 seconds before the ctx
|
||||
// is cancelled.
|
||||
func (gce *GCECloud) waitForTPUOp(ctx context.Context, op *tpuapi.Operation) (*tpuapi.Operation, error) {
|
||||
if err := wait.PollInfinite(30*time.Second, func() (bool, error) {
|
||||
// Check if context has been cancelled.
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
glog.V(3).Infof("Context for operation %q has been cancelled: %s", op.Name, ctx.Err())
|
||||
return true, ctx.Err()
|
||||
default:
|
||||
}
|
||||
|
||||
glog.V(3).Infof("Waiting for operation %q to complete...", op.Name)
|
||||
|
||||
start := time.Now()
|
||||
@@ -147,7 +164,7 @@ func (gce *GCECloud) waitForTPUOp(interval, timeout time.Duration, op *tpuapi.Op
|
||||
}
|
||||
|
||||
var err error
|
||||
op, err = gce.tpuService.operationsService.Get(op.Name).Do()
|
||||
op, err = gce.tpuService.projects.Locations.Operations.Get(op.Name).Do()
|
||||
if err != nil {
|
||||
return true, err
|
||||
}
|
||||
@@ -180,6 +197,10 @@ func getErrorFromTPUOp(op *tpuapi.Operation) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func getTPUProjectURL(project string) string {
|
||||
return fmt.Sprintf("projects/%s", project)
|
||||
}
|
||||
|
||||
func getTPUParentName(project, zone string) string {
|
||||
return fmt.Sprintf("projects/%s/locations/%s", project, zone)
|
||||
}
|
||||
|
4
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_util.go
generated
vendored
4
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_util.go
generated
vendored
@@ -281,3 +281,7 @@ func typeOfNetwork(network *compute.Network) netType {
|
||||
|
||||
return netTypeCustom
|
||||
}
|
||||
|
||||
func getLocationName(project, zoneOrRegion string) string {
|
||||
return fmt.Sprintf("projects/%s/locations/%s", project, zoneOrRegion)
|
||||
}
|
||||
|
6
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_util_test.go
generated
vendored
6
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_util_test.go
generated
vendored
@@ -103,11 +103,15 @@ func TestFirewallToGcloudArgs(t *testing.T) {
|
||||
IPProtocol: "tcp",
|
||||
Ports: []string{"321", "123-456", "123"},
|
||||
},
|
||||
{
|
||||
IPProtocol: "sctp",
|
||||
Ports: []string{"321", "123-456", "123"},
|
||||
},
|
||||
},
|
||||
}
|
||||
got := firewallToGcloudArgs(&firewall, "my-project")
|
||||
|
||||
var e = `--description "Last Line of Defense" --allow tcp:123,tcp:123-456,tcp:321,udp:123,udp:123-456,udp:321 --source-ranges 1.1.1.1/20,2.2.2.2/20,3.3.3.3/20 --target-tags band-nodes,jock-nodes --project my-project`
|
||||
var e = `--description "Last Line of Defense" --allow sctp:123,sctp:123-456,sctp:321,tcp:123,tcp:123-456,tcp:321,udp:123,udp:123-456,udp:321 --source-ranges 1.1.1.1/20,2.2.2.2/20,3.3.3.3/20 --target-tags band-nodes,jock-nodes --project my-project`
|
||||
if got != e {
|
||||
t.Errorf("%q does not equal %q", got, e)
|
||||
}
|
||||
|
Reference in New Issue
Block a user