Bumping k8s dependencies to 1.13
This commit is contained in:
38
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/BUILD
generated
vendored
38
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/BUILD
generated
vendored
@@ -40,10 +40,26 @@ go_library(
|
||||
"//pkg/cloudprovider:go_default_library",
|
||||
"//pkg/cloudprovider/providers/azure/auth:go_default_library",
|
||||
"//pkg/controller:go_default_library",
|
||||
"//pkg/kubelet/apis:go_default_library",
|
||||
"//pkg/version:go_default_library",
|
||||
"//pkg/volume:go_default_library",
|
||||
"//pkg/volume/util:go_default_library",
|
||||
"//vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/errors:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/informers:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes/scheme:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/record:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/util/flowcontrol:go_default_library",
|
||||
"//vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute:go_default_library",
|
||||
"//vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network:go_default_library",
|
||||
"//vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2017-10-01/storage:go_default_library",
|
||||
"//vendor/github.com/Azure/azure-sdk-for-go/storage:go_default_library",
|
||||
@@ -55,15 +71,6 @@ go_library(
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/github.com/prometheus/client_golang/prometheus:go_default_library",
|
||||
"//vendor/github.com/rubiojr/go-vhd/vhd:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//vendor/k8s.io/client-go/util/flowcontrol:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -72,6 +79,7 @@ go_test(
|
||||
srcs = [
|
||||
"azure_backoff_test.go",
|
||||
"azure_cache_test.go",
|
||||
"azure_instances_test.go",
|
||||
"azure_loadbalancer_test.go",
|
||||
"azure_metrics_test.go",
|
||||
"azure_routes_test.go",
|
||||
@@ -82,6 +90,7 @@ go_test(
|
||||
"azure_vmss_cache_test.go",
|
||||
"azure_vmss_test.go",
|
||||
"azure_wrap_test.go",
|
||||
"azure_zones_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
@@ -89,15 +98,16 @@ go_test(
|
||||
"//pkg/cloudprovider:go_default_library",
|
||||
"//pkg/cloudprovider/providers/azure/auth:go_default_library",
|
||||
"//pkg/kubelet/apis:go_default_library",
|
||||
"//vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute:go_default_library",
|
||||
"//vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network:go_default_library",
|
||||
"//vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2017-10-01/storage:go_default_library",
|
||||
"//vendor/github.com/Azure/go-autorest/autorest:go_default_library",
|
||||
"//vendor/github.com/Azure/go-autorest/autorest/to:go_default_library",
|
||||
"//vendor/github.com/stretchr/testify/assert:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
5
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/OWNERS
generated
vendored
5
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/OWNERS
generated
vendored
@@ -1,16 +1,13 @@
|
||||
approvers:
|
||||
- andyzhangx
|
||||
- brendandburns
|
||||
- colemickens
|
||||
- feiskyer
|
||||
- jdumars
|
||||
- karataliu
|
||||
- khenidak
|
||||
reviewers:
|
||||
- andyzhangx
|
||||
- brendandburns
|
||||
- colemickens
|
||||
- feiskyer
|
||||
- jdumars
|
||||
- justaugustus
|
||||
- karataliu
|
||||
- khenidak
|
||||
|
21
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/auth/azure_auth.go
generated
vendored
21
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/auth/azure_auth.go
generated
vendored
@@ -44,28 +44,39 @@ type AzureAuthConfig struct {
|
||||
AADClientCertPassword string `json:"aadClientCertPassword" yaml:"aadClientCertPassword"`
|
||||
// Use managed service identity for the virtual machine to access Azure ARM APIs
|
||||
UseManagedIdentityExtension bool `json:"useManagedIdentityExtension" yaml:"useManagedIdentityExtension"`
|
||||
// UserAssignedIdentityID contains the Client ID of the user assigned MSI which is assigned to the underlying VMs. If empty the user assigned identity is not used.
|
||||
// More details of the user assigned identity can be found at: https://docs.microsoft.com/en-us/azure/active-directory/managed-service-identity/overview
|
||||
// For the user assigned identity specified here to be used, the UseManagedIdentityExtension has to be set to true.
|
||||
UserAssignedIdentityID string `json:"userAssignedIdentityID" yaml:"userAssignedIdentityID"`
|
||||
// The ID of the Azure Subscription that the cluster is deployed in
|
||||
SubscriptionID string `json:"subscriptionId" yaml:"subscriptionId"`
|
||||
}
|
||||
|
||||
// GetServicePrincipalToken creates a new service principal token based on the configuration
|
||||
func GetServicePrincipalToken(config *AzureAuthConfig, env *azure.Environment) (*adal.ServicePrincipalToken, error) {
|
||||
oauthConfig, err := adal.NewOAuthConfig(env.ActiveDirectoryEndpoint, config.TenantID)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("creating the OAuth config: %v", err)
|
||||
}
|
||||
|
||||
if config.UseManagedIdentityExtension {
|
||||
glog.V(2).Infoln("azure: using managed identity extension to retrieve access token")
|
||||
msiEndpoint, err := adal.GetMSIVMEndpoint()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Getting the managed service identity endpoint: %v", err)
|
||||
}
|
||||
if len(config.UserAssignedIdentityID) > 0 {
|
||||
glog.V(4).Info("azure: using User Assigned MSI ID to retrieve access token")
|
||||
return adal.NewServicePrincipalTokenFromMSIWithUserAssignedID(msiEndpoint,
|
||||
env.ServiceManagementEndpoint,
|
||||
config.UserAssignedIdentityID)
|
||||
}
|
||||
glog.V(4).Info("azure: using System Assigned MSI to retrieve access token")
|
||||
return adal.NewServicePrincipalTokenFromMSI(
|
||||
msiEndpoint,
|
||||
env.ServiceManagementEndpoint)
|
||||
}
|
||||
|
||||
oauthConfig, err := adal.NewOAuthConfig(env.ActiveDirectoryEndpoint, config.TenantID)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("creating the OAuth config: %v", err)
|
||||
}
|
||||
|
||||
if len(config.AADClientSecret) > 0 {
|
||||
glog.V(2).Infoln("azure: using client_id+client_secret to retrieve access token")
|
||||
return adal.NewServicePrincipalToken(
|
||||
|
249
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure.go
generated
vendored
249
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure.go
generated
vendored
@@ -21,13 +21,23 @@ import (
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/informers"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/client-go/util/flowcontrol"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/azure/auth"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
|
||||
"k8s.io/kubernetes/pkg/version"
|
||||
|
||||
"github.com/Azure/go-autorest/autorest"
|
||||
@@ -52,6 +62,9 @@ const (
|
||||
|
||||
loadBalancerSkuBasic = "basic"
|
||||
loadBalancerSkuStandard = "standard"
|
||||
|
||||
externalResourceGroupLabel = "kubernetes.azure.com/resource-group"
|
||||
managedByAzureLabel = "kubernetes.azure.com/managed"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -59,6 +72,9 @@ var (
|
||||
defaultExcludeMasterFromStandardLB = true
|
||||
)
|
||||
|
||||
// Azure implements PVLabeler.
|
||||
var _ cloudprovider.PVLabeler = (*Cloud)(nil)
|
||||
|
||||
// Config holds the configuration parsed from the --cloud-config flag
|
||||
// All fields are required unless otherwise specified
|
||||
type Config struct {
|
||||
@@ -147,10 +163,34 @@ type Cloud struct {
|
||||
metadata *InstanceMetadata
|
||||
vmSet VMSet
|
||||
|
||||
// Lock for access to node caches, includes nodeZones, nodeResourceGroups, and unmanagedNodes.
|
||||
nodeCachesLock sync.Mutex
|
||||
// nodeZones is a mapping from Zone to a sets.String of Node's names in the Zone
|
||||
// it is updated by the nodeInformer
|
||||
nodeZones map[string]sets.String
|
||||
// nodeResourceGroups holds nodes external resource groups
|
||||
nodeResourceGroups map[string]string
|
||||
// unmanagedNodes holds a list of nodes not managed by Azure cloud provider.
|
||||
unmanagedNodes sets.String
|
||||
// nodeInformerSynced is for determining if the informer has synced.
|
||||
nodeInformerSynced cache.InformerSynced
|
||||
|
||||
// routeCIDRsLock holds lock for routeCIDRs cache.
|
||||
routeCIDRsLock sync.Mutex
|
||||
// routeCIDRs holds cache for route CIDRs.
|
||||
routeCIDRs map[string]string
|
||||
|
||||
// Clients for vmss.
|
||||
VirtualMachineScaleSetsClient VirtualMachineScaleSetsClient
|
||||
VirtualMachineScaleSetVMsClient VirtualMachineScaleSetVMsClient
|
||||
|
||||
// client for vm sizes list
|
||||
VirtualMachineSizesClient VirtualMachineSizesClient
|
||||
|
||||
kubeClient clientset.Interface
|
||||
eventBroadcaster record.EventBroadcaster
|
||||
eventRecorder record.EventRecorder
|
||||
|
||||
vmCache *timedCache
|
||||
lbCache *timedCache
|
||||
nsgCache *timedCache
|
||||
@@ -238,8 +278,12 @@ func NewCloud(configReader io.Reader) (cloudprovider.Interface, error) {
|
||||
rateLimiterWriter: operationPollRateLimiterWrite,
|
||||
}
|
||||
az := Cloud{
|
||||
Config: *config,
|
||||
Environment: *env,
|
||||
Config: *config,
|
||||
Environment: *env,
|
||||
nodeZones: map[string]sets.String{},
|
||||
nodeResourceGroups: map[string]string{},
|
||||
unmanagedNodes: sets.NewString(),
|
||||
routeCIDRs: map[string]string{},
|
||||
|
||||
DisksClient: newAzDisksClient(azClientConfig),
|
||||
RoutesClient: newAzRoutesClient(azClientConfig),
|
||||
@@ -251,6 +295,7 @@ func NewCloud(configReader io.Reader) (cloudprovider.Interface, error) {
|
||||
StorageAccountClient: newAzStorageAccountClient(azClientConfig),
|
||||
VirtualMachinesClient: newAzVirtualMachinesClient(azClientConfig),
|
||||
PublicIPAddressesClient: newAzPublicIPAddressesClient(azClientConfig),
|
||||
VirtualMachineSizesClient: newAzVirtualMachineSizesClient(azClientConfig),
|
||||
VirtualMachineScaleSetsClient: newAzVirtualMachineScaleSetsClient(azClientConfig),
|
||||
VirtualMachineScaleSetVMsClient: newAzVirtualMachineScaleSetVMsClient(azClientConfig),
|
||||
FileClient: &azureFileClient{env: *env},
|
||||
@@ -277,7 +322,7 @@ func NewCloud(configReader io.Reader) (cloudprovider.Interface, error) {
|
||||
Duration: time.Duration(az.CloudProviderBackoffDuration) * time.Second,
|
||||
Jitter: az.CloudProviderBackoffJitter,
|
||||
}
|
||||
glog.V(2).Infof("Azure cloudprovider using retry backoff: retries=%d, exponent=%f, duration=%d, jitter=%f",
|
||||
glog.V(2).Infof("Azure cloudprovider using try backoff: retries=%d, exponent=%f, duration=%d, jitter=%f",
|
||||
az.CloudProviderBackoffRetries,
|
||||
az.CloudProviderBackoffExponent,
|
||||
az.CloudProviderBackoffDuration,
|
||||
@@ -346,7 +391,12 @@ func parseConfig(configReader io.Reader) (*Config, error) {
|
||||
}
|
||||
|
||||
// Initialize passes a Kubernetes clientBuilder interface to the cloud provider
|
||||
func (az *Cloud) Initialize(clientBuilder controller.ControllerClientBuilder) {}
|
||||
func (az *Cloud) Initialize(clientBuilder controller.ControllerClientBuilder) {
|
||||
az.kubeClient = clientBuilder.ClientOrDie("azure-cloud-provider")
|
||||
az.eventBroadcaster = record.NewBroadcaster()
|
||||
az.eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: az.kubeClient.CoreV1().Events("")})
|
||||
az.eventRecorder = az.eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "azure-cloud-provider"})
|
||||
}
|
||||
|
||||
// LoadBalancer returns a balancer interface. Also returns true if the interface is supported, false otherwise.
|
||||
func (az *Cloud) LoadBalancer() (cloudprovider.LoadBalancer, bool) {
|
||||
@@ -424,3 +474,194 @@ func initDiskControllers(az *Cloud) error {
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetInformers sets informers for Azure cloud provider.
|
||||
func (az *Cloud) SetInformers(informerFactory informers.SharedInformerFactory) {
|
||||
glog.Infof("Setting up informers for Azure cloud provider")
|
||||
nodeInformer := informerFactory.Core().V1().Nodes().Informer()
|
||||
nodeInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: func(obj interface{}) {
|
||||
node := obj.(*v1.Node)
|
||||
az.updateNodeCaches(nil, node)
|
||||
},
|
||||
UpdateFunc: func(prev, obj interface{}) {
|
||||
prevNode := prev.(*v1.Node)
|
||||
newNode := obj.(*v1.Node)
|
||||
if newNode.Labels[kubeletapis.LabelZoneFailureDomain] ==
|
||||
prevNode.Labels[kubeletapis.LabelZoneFailureDomain] {
|
||||
return
|
||||
}
|
||||
az.updateNodeCaches(prevNode, newNode)
|
||||
},
|
||||
DeleteFunc: func(obj interface{}) {
|
||||
node, isNode := obj.(*v1.Node)
|
||||
// We can get DeletedFinalStateUnknown instead of *v1.Node here
|
||||
// and we need to handle that correctly.
|
||||
if !isNode {
|
||||
deletedState, ok := obj.(cache.DeletedFinalStateUnknown)
|
||||
if !ok {
|
||||
glog.Errorf("Received unexpected object: %v", obj)
|
||||
return
|
||||
}
|
||||
node, ok = deletedState.Obj.(*v1.Node)
|
||||
if !ok {
|
||||
glog.Errorf("DeletedFinalStateUnknown contained non-Node object: %v", deletedState.Obj)
|
||||
return
|
||||
}
|
||||
}
|
||||
az.updateNodeCaches(node, nil)
|
||||
},
|
||||
})
|
||||
az.nodeInformerSynced = nodeInformer.HasSynced
|
||||
}
|
||||
|
||||
// updateNodeCaches updates local cache for node's zones and external resource groups.
|
||||
func (az *Cloud) updateNodeCaches(prevNode, newNode *v1.Node) {
|
||||
az.nodeCachesLock.Lock()
|
||||
defer az.nodeCachesLock.Unlock()
|
||||
|
||||
if prevNode != nil {
|
||||
// Remove from nodeZones cache.
|
||||
prevZone, ok := prevNode.ObjectMeta.Labels[kubeletapis.LabelZoneFailureDomain]
|
||||
if ok && az.isAvailabilityZone(prevZone) {
|
||||
az.nodeZones[prevZone].Delete(prevNode.ObjectMeta.Name)
|
||||
if az.nodeZones[prevZone].Len() == 0 {
|
||||
az.nodeZones[prevZone] = nil
|
||||
}
|
||||
}
|
||||
|
||||
// Remove from nodeResourceGroups cache.
|
||||
_, ok = prevNode.ObjectMeta.Labels[externalResourceGroupLabel]
|
||||
if ok {
|
||||
delete(az.nodeResourceGroups, prevNode.ObjectMeta.Name)
|
||||
}
|
||||
|
||||
// Remove from unmanagedNodes cache.
|
||||
managed, ok := prevNode.ObjectMeta.Labels[managedByAzureLabel]
|
||||
if ok && managed == "false" {
|
||||
az.unmanagedNodes.Delete(prevNode.ObjectMeta.Name)
|
||||
}
|
||||
}
|
||||
|
||||
if newNode != nil {
|
||||
// Add to nodeZones cache.
|
||||
newZone, ok := newNode.ObjectMeta.Labels[kubeletapis.LabelZoneFailureDomain]
|
||||
if ok && az.isAvailabilityZone(newZone) {
|
||||
if az.nodeZones[newZone] == nil {
|
||||
az.nodeZones[newZone] = sets.NewString()
|
||||
}
|
||||
az.nodeZones[newZone].Insert(newNode.ObjectMeta.Name)
|
||||
}
|
||||
|
||||
// Add to nodeResourceGroups cache.
|
||||
newRG, ok := newNode.ObjectMeta.Labels[externalResourceGroupLabel]
|
||||
if ok && len(newRG) > 0 {
|
||||
az.nodeResourceGroups[newNode.ObjectMeta.Name] = newRG
|
||||
}
|
||||
|
||||
// Add to unmanagedNodes cache.
|
||||
managed, ok := newNode.ObjectMeta.Labels[managedByAzureLabel]
|
||||
if ok && managed == "false" {
|
||||
az.unmanagedNodes.Insert(newNode.ObjectMeta.Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// GetActiveZones returns all the zones in which k8s nodes are currently running.
|
||||
func (az *Cloud) GetActiveZones() (sets.String, error) {
|
||||
if az.nodeInformerSynced == nil {
|
||||
return nil, fmt.Errorf("Azure cloud provider doesn't have informers set")
|
||||
}
|
||||
|
||||
az.nodeCachesLock.Lock()
|
||||
defer az.nodeCachesLock.Unlock()
|
||||
if !az.nodeInformerSynced() {
|
||||
return nil, fmt.Errorf("node informer is not synced when trying to GetActiveZones")
|
||||
}
|
||||
|
||||
zones := sets.NewString()
|
||||
for zone, nodes := range az.nodeZones {
|
||||
if len(nodes) > 0 {
|
||||
zones.Insert(zone)
|
||||
}
|
||||
}
|
||||
return zones, nil
|
||||
}
|
||||
|
||||
// GetLocation returns the location in which k8s cluster is currently running.
|
||||
func (az *Cloud) GetLocation() string {
|
||||
return az.Location
|
||||
}
|
||||
|
||||
// GetNodeResourceGroup gets resource group for given node.
|
||||
func (az *Cloud) GetNodeResourceGroup(nodeName string) (string, error) {
|
||||
// Kubelet won't set az.nodeInformerSynced, always return configured resourceGroup.
|
||||
if az.nodeInformerSynced == nil {
|
||||
return az.ResourceGroup, nil
|
||||
}
|
||||
|
||||
az.nodeCachesLock.Lock()
|
||||
defer az.nodeCachesLock.Unlock()
|
||||
if !az.nodeInformerSynced() {
|
||||
return "", fmt.Errorf("node informer is not synced when trying to GetNodeResourceGroup")
|
||||
}
|
||||
|
||||
// Return external resource group if it has been cached.
|
||||
if cachedRG, ok := az.nodeResourceGroups[nodeName]; ok {
|
||||
return cachedRG, nil
|
||||
}
|
||||
|
||||
// Return resource group from cloud provider options.
|
||||
return az.ResourceGroup, nil
|
||||
}
|
||||
|
||||
// GetResourceGroups returns a set of resource groups that all nodes are running on.
|
||||
func (az *Cloud) GetResourceGroups() (sets.String, error) {
|
||||
// Kubelet won't set az.nodeInformerSynced, always return configured resourceGroup.
|
||||
if az.nodeInformerSynced == nil {
|
||||
return sets.NewString(az.ResourceGroup), nil
|
||||
}
|
||||
|
||||
az.nodeCachesLock.Lock()
|
||||
defer az.nodeCachesLock.Unlock()
|
||||
if !az.nodeInformerSynced() {
|
||||
return nil, fmt.Errorf("node informer is not synced when trying to GetResourceGroups")
|
||||
}
|
||||
|
||||
resourceGroups := sets.NewString(az.ResourceGroup)
|
||||
for _, rg := range az.nodeResourceGroups {
|
||||
resourceGroups.Insert(rg)
|
||||
}
|
||||
|
||||
return resourceGroups, nil
|
||||
}
|
||||
|
||||
// GetUnmanagedNodes returns a list of nodes not managed by Azure cloud provider (e.g. on-prem nodes).
|
||||
func (az *Cloud) GetUnmanagedNodes() (sets.String, error) {
|
||||
// Kubelet won't set az.nodeInformerSynced, always return nil.
|
||||
if az.nodeInformerSynced == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
az.nodeCachesLock.Lock()
|
||||
defer az.nodeCachesLock.Unlock()
|
||||
if !az.nodeInformerSynced() {
|
||||
return nil, fmt.Errorf("node informer is not synced when trying to GetUnmanagedNodes")
|
||||
}
|
||||
|
||||
return sets.NewString(az.unmanagedNodes.List()...), nil
|
||||
}
|
||||
|
||||
// ShouldNodeExcludedFromLoadBalancer returns true if node is unmanaged or in external resource group.
|
||||
func (az *Cloud) ShouldNodeExcludedFromLoadBalancer(node *v1.Node) bool {
|
||||
labels := node.ObjectMeta.Labels
|
||||
if rg, ok := labels[externalResourceGroupLabel]; ok && rg != az.ResourceGroup {
|
||||
return true
|
||||
}
|
||||
|
||||
if managed, ok := labels[managedByAzureLabel]; ok && managed == "false" {
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
111
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_backoff.go
generated
vendored
111
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_backoff.go
generated
vendored
@@ -18,13 +18,15 @@ package azure
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute"
|
||||
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute"
|
||||
"github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network"
|
||||
"github.com/Azure/go-autorest/autorest"
|
||||
"github.com/golang/glog"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
@@ -45,6 +47,13 @@ func (az *Cloud) requestBackoff() (resourceRequestBackoff wait.Backoff) {
|
||||
return resourceRequestBackoff
|
||||
}
|
||||
|
||||
// Event creates a event for the specified object.
|
||||
func (az *Cloud) Event(obj runtime.Object, eventtype, reason, message string) {
|
||||
if obj != nil && reason != "" {
|
||||
az.eventRecorder.Event(obj, eventtype, reason, message)
|
||||
}
|
||||
}
|
||||
|
||||
// GetVirtualMachineWithRetry invokes az.getVirtualMachine with exponential backoff retry
|
||||
func (az *Cloud) GetVirtualMachineWithRetry(name types.NodeName) (compute.VirtualMachine, error) {
|
||||
var machine compute.VirtualMachine
|
||||
@@ -69,20 +78,20 @@ func (az *Cloud) GetVirtualMachineWithRetry(name types.NodeName) (compute.Virtua
|
||||
}
|
||||
|
||||
// VirtualMachineClientListWithRetry invokes az.VirtualMachinesClient.List with exponential backoff retry
|
||||
func (az *Cloud) VirtualMachineClientListWithRetry() ([]compute.VirtualMachine, error) {
|
||||
func (az *Cloud) VirtualMachineClientListWithRetry(resourceGroup string) ([]compute.VirtualMachine, error) {
|
||||
allNodes := []compute.VirtualMachine{}
|
||||
err := wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) {
|
||||
var retryErr error
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
allNodes, retryErr = az.VirtualMachinesClient.List(ctx, az.ResourceGroup)
|
||||
allNodes, retryErr = az.VirtualMachinesClient.List(ctx, resourceGroup)
|
||||
if retryErr != nil {
|
||||
glog.Errorf("VirtualMachinesClient.List(%v) - backoff: failure, will retry,err=%v",
|
||||
az.ResourceGroup,
|
||||
resourceGroup,
|
||||
retryErr)
|
||||
return false, retryErr
|
||||
}
|
||||
glog.V(2).Infof("VirtualMachinesClient.List(%v) - backoff: success", az.ResourceGroup)
|
||||
glog.V(2).Infof("VirtualMachinesClient.List(%v) - backoff: success", resourceGroup)
|
||||
return true, nil
|
||||
})
|
||||
if err != nil {
|
||||
@@ -109,14 +118,14 @@ func (az *Cloud) GetIPForMachineWithRetry(name types.NodeName) (string, string,
|
||||
}
|
||||
|
||||
// CreateOrUpdateSGWithRetry invokes az.SecurityGroupsClient.CreateOrUpdate with exponential backoff retry
|
||||
func (az *Cloud) CreateOrUpdateSGWithRetry(sg network.SecurityGroup) error {
|
||||
func (az *Cloud) CreateOrUpdateSGWithRetry(service *v1.Service, sg network.SecurityGroup) error {
|
||||
return wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) {
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
|
||||
resp, err := az.SecurityGroupsClient.CreateOrUpdate(ctx, az.ResourceGroup, *sg.Name, sg)
|
||||
glog.V(10).Infof("SecurityGroupsClient.CreateOrUpdate(%s): end", *sg.Name)
|
||||
done, err := processHTTPRetryResponse(resp, err)
|
||||
done, err := az.processHTTPRetryResponse(service, "CreateOrUpdateSecurityGroup", resp, err)
|
||||
if done && err == nil {
|
||||
// Invalidate the cache right after updating
|
||||
az.nsgCache.Delete(*sg.Name)
|
||||
@@ -126,14 +135,14 @@ func (az *Cloud) CreateOrUpdateSGWithRetry(sg network.SecurityGroup) error {
|
||||
}
|
||||
|
||||
// CreateOrUpdateLBWithRetry invokes az.LoadBalancerClient.CreateOrUpdate with exponential backoff retry
|
||||
func (az *Cloud) CreateOrUpdateLBWithRetry(lb network.LoadBalancer) error {
|
||||
func (az *Cloud) CreateOrUpdateLBWithRetry(service *v1.Service, lb network.LoadBalancer) error {
|
||||
return wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) {
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
|
||||
resp, err := az.LoadBalancerClient.CreateOrUpdate(ctx, az.ResourceGroup, *lb.Name, lb)
|
||||
glog.V(10).Infof("LoadBalancerClient.CreateOrUpdate(%s): end", *lb.Name)
|
||||
done, err := processHTTPRetryResponse(resp, err)
|
||||
done, err := az.processHTTPRetryResponse(service, "CreateOrUpdateLoadBalancer", resp, err)
|
||||
if done && err == nil {
|
||||
// Invalidate the cache right after updating
|
||||
az.lbCache.Delete(*lb.Name)
|
||||
@@ -143,7 +152,7 @@ func (az *Cloud) CreateOrUpdateLBWithRetry(lb network.LoadBalancer) error {
|
||||
}
|
||||
|
||||
// ListLBWithRetry invokes az.LoadBalancerClient.List with exponential backoff retry
|
||||
func (az *Cloud) ListLBWithRetry() ([]network.LoadBalancer, error) {
|
||||
func (az *Cloud) ListLBWithRetry(service *v1.Service) ([]network.LoadBalancer, error) {
|
||||
var allLBs []network.LoadBalancer
|
||||
|
||||
err := wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) {
|
||||
@@ -153,6 +162,7 @@ func (az *Cloud) ListLBWithRetry() ([]network.LoadBalancer, error) {
|
||||
|
||||
allLBs, retryErr = az.LoadBalancerClient.List(ctx, az.ResourceGroup)
|
||||
if retryErr != nil {
|
||||
az.Event(service, v1.EventTypeWarning, "ListLoadBalancers", retryErr.Error())
|
||||
glog.Errorf("LoadBalancerClient.List(%v) - backoff: failure, will retry,err=%v",
|
||||
az.ResourceGroup,
|
||||
retryErr)
|
||||
@@ -169,7 +179,7 @@ func (az *Cloud) ListLBWithRetry() ([]network.LoadBalancer, error) {
|
||||
}
|
||||
|
||||
// ListPIPWithRetry list the PIP resources in the given resource group
|
||||
func (az *Cloud) ListPIPWithRetry(pipResourceGroup string) ([]network.PublicIPAddress, error) {
|
||||
func (az *Cloud) ListPIPWithRetry(service *v1.Service, pipResourceGroup string) ([]network.PublicIPAddress, error) {
|
||||
var allPIPs []network.PublicIPAddress
|
||||
|
||||
err := wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) {
|
||||
@@ -179,6 +189,7 @@ func (az *Cloud) ListPIPWithRetry(pipResourceGroup string) ([]network.PublicIPAd
|
||||
|
||||
allPIPs, retryErr = az.PublicIPAddressesClient.List(ctx, pipResourceGroup)
|
||||
if retryErr != nil {
|
||||
az.Event(service, v1.EventTypeWarning, "ListPublicIPs", retryErr.Error())
|
||||
glog.Errorf("PublicIPAddressesClient.List(%v) - backoff: failure, will retry,err=%v",
|
||||
pipResourceGroup,
|
||||
retryErr)
|
||||
@@ -195,48 +206,48 @@ func (az *Cloud) ListPIPWithRetry(pipResourceGroup string) ([]network.PublicIPAd
|
||||
}
|
||||
|
||||
// CreateOrUpdatePIPWithRetry invokes az.PublicIPAddressesClient.CreateOrUpdate with exponential backoff retry
|
||||
func (az *Cloud) CreateOrUpdatePIPWithRetry(pipResourceGroup string, pip network.PublicIPAddress) error {
|
||||
func (az *Cloud) CreateOrUpdatePIPWithRetry(service *v1.Service, pipResourceGroup string, pip network.PublicIPAddress) error {
|
||||
return wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) {
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
|
||||
resp, err := az.PublicIPAddressesClient.CreateOrUpdate(ctx, pipResourceGroup, *pip.Name, pip)
|
||||
glog.V(10).Infof("PublicIPAddressesClient.CreateOrUpdate(%s, %s): end", pipResourceGroup, *pip.Name)
|
||||
return processHTTPRetryResponse(resp, err)
|
||||
return az.processHTTPRetryResponse(service, "CreateOrUpdatePublicIPAddress", resp, err)
|
||||
})
|
||||
}
|
||||
|
||||
// CreateOrUpdateInterfaceWithRetry invokes az.PublicIPAddressesClient.CreateOrUpdate with exponential backoff retry
|
||||
func (az *Cloud) CreateOrUpdateInterfaceWithRetry(nic network.Interface) error {
|
||||
func (az *Cloud) CreateOrUpdateInterfaceWithRetry(service *v1.Service, nic network.Interface) error {
|
||||
return wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) {
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
|
||||
resp, err := az.InterfacesClient.CreateOrUpdate(ctx, az.ResourceGroup, *nic.Name, nic)
|
||||
glog.V(10).Infof("InterfacesClient.CreateOrUpdate(%s): end", *nic.Name)
|
||||
return processHTTPRetryResponse(resp, err)
|
||||
return az.processHTTPRetryResponse(service, "CreateOrUpdateInterface", resp, err)
|
||||
})
|
||||
}
|
||||
|
||||
// DeletePublicIPWithRetry invokes az.PublicIPAddressesClient.Delete with exponential backoff retry
|
||||
func (az *Cloud) DeletePublicIPWithRetry(pipResourceGroup string, pipName string) error {
|
||||
func (az *Cloud) DeletePublicIPWithRetry(service *v1.Service, pipResourceGroup string, pipName string) error {
|
||||
return wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) {
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
|
||||
resp, err := az.PublicIPAddressesClient.Delete(ctx, pipResourceGroup, pipName)
|
||||
return processHTTPRetryResponse(resp, err)
|
||||
return az.processHTTPRetryResponse(service, "DeletePublicIPAddress", resp, err)
|
||||
})
|
||||
}
|
||||
|
||||
// DeleteLBWithRetry invokes az.LoadBalancerClient.Delete with exponential backoff retry
|
||||
func (az *Cloud) DeleteLBWithRetry(lbName string) error {
|
||||
func (az *Cloud) DeleteLBWithRetry(service *v1.Service, lbName string) error {
|
||||
return wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) {
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
|
||||
resp, err := az.LoadBalancerClient.Delete(ctx, az.ResourceGroup, lbName)
|
||||
done, err := processHTTPRetryResponse(resp, err)
|
||||
done, err := az.processHTTPRetryResponse(service, "DeleteLoadBalancer", resp, err)
|
||||
if done && err == nil {
|
||||
// Invalidate the cache right after deleting
|
||||
az.lbCache.Delete(lbName)
|
||||
@@ -252,7 +263,7 @@ func (az *Cloud) CreateOrUpdateRouteTableWithRetry(routeTable network.RouteTable
|
||||
defer cancel()
|
||||
|
||||
resp, err := az.RouteTablesClient.CreateOrUpdate(ctx, az.ResourceGroup, az.RouteTableName, routeTable)
|
||||
return processHTTPRetryResponse(resp, err)
|
||||
return az.processHTTPRetryResponse(nil, "", resp, err)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -264,7 +275,7 @@ func (az *Cloud) CreateOrUpdateRouteWithRetry(route network.Route) error {
|
||||
|
||||
resp, err := az.RoutesClient.CreateOrUpdate(ctx, az.ResourceGroup, az.RouteTableName, *route.Name, route)
|
||||
glog.V(10).Infof("RoutesClient.CreateOrUpdate(%s): end", *route.Name)
|
||||
return processHTTPRetryResponse(resp, err)
|
||||
return az.processHTTPRetryResponse(nil, "", resp, err)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -276,19 +287,19 @@ func (az *Cloud) DeleteRouteWithRetry(routeName string) error {
|
||||
|
||||
resp, err := az.RoutesClient.Delete(ctx, az.ResourceGroup, az.RouteTableName, routeName)
|
||||
glog.V(10).Infof("RoutesClient.Delete(%s): end", az.RouteTableName)
|
||||
return processHTTPRetryResponse(resp, err)
|
||||
return az.processHTTPRetryResponse(nil, "", resp, err)
|
||||
})
|
||||
}
|
||||
|
||||
// CreateOrUpdateVMWithRetry invokes az.VirtualMachinesClient.CreateOrUpdate with exponential backoff retry
|
||||
func (az *Cloud) CreateOrUpdateVMWithRetry(vmName string, newVM compute.VirtualMachine) error {
|
||||
func (az *Cloud) CreateOrUpdateVMWithRetry(resourceGroup, vmName string, newVM compute.VirtualMachine) error {
|
||||
return wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) {
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
|
||||
resp, err := az.VirtualMachinesClient.CreateOrUpdate(ctx, az.ResourceGroup, vmName, newVM)
|
||||
resp, err := az.VirtualMachinesClient.CreateOrUpdate(ctx, resourceGroup, vmName, newVM)
|
||||
glog.V(10).Infof("VirtualMachinesClient.CreateOrUpdate(%s): end", vmName)
|
||||
return processHTTPRetryResponse(resp, err)
|
||||
return az.processHTTPRetryResponse(nil, "", resp, err)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -297,39 +308,12 @@ func (az *Cloud) UpdateVmssVMWithRetry(ctx context.Context, resourceGroupName st
|
||||
return wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) {
|
||||
resp, err := az.VirtualMachineScaleSetVMsClient.Update(ctx, resourceGroupName, VMScaleSetName, instanceID, parameters)
|
||||
glog.V(10).Infof("VirtualMachinesClient.CreateOrUpdate(%s,%s): end", VMScaleSetName, instanceID)
|
||||
return processHTTPRetryResponse(resp, err)
|
||||
return az.processHTTPRetryResponse(nil, "", resp, err)
|
||||
})
|
||||
}
|
||||
|
||||
// A wait.ConditionFunc function to deal with common HTTP backoff response conditions
|
||||
func processRetryResponse(resp autorest.Response, err error) (bool, error) {
|
||||
if isSuccessHTTPResponse(resp) {
|
||||
glog.V(2).Infof("processRetryResponse: backoff success, HTTP response=%d", resp.StatusCode)
|
||||
return true, nil
|
||||
}
|
||||
if shouldRetryAPIRequest(resp, err) {
|
||||
glog.Errorf("processRetryResponse: backoff failure, will retry, HTTP response=%d, err=%v", resp.StatusCode, err)
|
||||
// suppress the error object so that backoff process continues
|
||||
return false, nil
|
||||
}
|
||||
// Fall-through: stop periodic backoff
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// shouldRetryAPIRequest determines if the response from an HTTP request suggests periodic retry behavior
|
||||
func shouldRetryAPIRequest(resp autorest.Response, err error) bool {
|
||||
if err != nil {
|
||||
return true
|
||||
}
|
||||
// HTTP 4xx or 5xx suggests we should retry
|
||||
if 399 < resp.StatusCode && resp.StatusCode < 600 {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// isSuccessHTTPResponse determines if the response from an HTTP request suggests success
|
||||
func isSuccessHTTPResponse(resp autorest.Response) bool {
|
||||
func isSuccessHTTPResponse(resp http.Response) bool {
|
||||
// HTTP 2xx suggests a successful response
|
||||
if 199 < resp.StatusCode && resp.StatusCode < 300 {
|
||||
return true
|
||||
@@ -352,16 +336,21 @@ func shouldRetryHTTPRequest(resp *http.Response, err error) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func processHTTPRetryResponse(resp *http.Response, err error) (bool, error) {
|
||||
if resp != nil {
|
||||
func (az *Cloud) processHTTPRetryResponse(service *v1.Service, reason string, resp *http.Response, err error) (bool, error) {
|
||||
if resp != nil && isSuccessHTTPResponse(*resp) {
|
||||
// HTTP 2xx suggests a successful response
|
||||
if 199 < resp.StatusCode && resp.StatusCode < 300 {
|
||||
return true, nil
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
if shouldRetryHTTPRequest(resp, err) {
|
||||
glog.Errorf("processHTTPRetryResponse: backoff failure, will retry, HTTP response=%d, err=%v", resp.StatusCode, err)
|
||||
if err != nil {
|
||||
az.Event(service, v1.EventTypeWarning, reason, err.Error())
|
||||
glog.Errorf("processHTTPRetryResponse: backoff failure, will retry, err=%v", err)
|
||||
} else {
|
||||
az.Event(service, v1.EventTypeWarning, reason, fmt.Sprintf("Azure HTTP response %d", resp.StatusCode))
|
||||
glog.Errorf("processHTTPRetryResponse: backoff failure, will retry, HTTP response=%d", resp.StatusCode)
|
||||
}
|
||||
|
||||
// suppress the error object so that backoff process continues
|
||||
return false, nil
|
||||
}
|
||||
|
27
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_backoff_test.go
generated
vendored
27
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_backoff_test.go
generated
vendored
@@ -20,11 +20,9 @@ import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"testing"
|
||||
|
||||
"github.com/Azure/go-autorest/autorest"
|
||||
)
|
||||
|
||||
func TestShouldRetry(t *testing.T) {
|
||||
func TestShouldRetryHTTPRequest(t *testing.T) {
|
||||
tests := []struct {
|
||||
code int
|
||||
err error
|
||||
@@ -54,12 +52,10 @@ func TestShouldRetry(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
resp := autorest.Response{
|
||||
Response: &http.Response{
|
||||
StatusCode: test.code,
|
||||
},
|
||||
resp := &http.Response{
|
||||
StatusCode: test.code,
|
||||
}
|
||||
res := shouldRetryAPIRequest(resp, test.err)
|
||||
res := shouldRetryHTTPRequest(resp, test.err)
|
||||
if res != test.expected {
|
||||
t.Errorf("expected: %v, saw: %v", test.expected, res)
|
||||
}
|
||||
@@ -86,10 +82,8 @@ func TestIsSuccessResponse(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
resp := autorest.Response{
|
||||
Response: &http.Response{
|
||||
StatusCode: test.code,
|
||||
},
|
||||
resp := http.Response{
|
||||
StatusCode: test.code,
|
||||
}
|
||||
res := isSuccessHTTPResponse(resp)
|
||||
if res != test.expected {
|
||||
@@ -99,6 +93,7 @@ func TestIsSuccessResponse(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestProcessRetryResponse(t *testing.T) {
|
||||
az := &Cloud{}
|
||||
tests := []struct {
|
||||
code int
|
||||
err error
|
||||
@@ -132,12 +127,10 @@ func TestProcessRetryResponse(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
resp := autorest.Response{
|
||||
Response: &http.Response{
|
||||
StatusCode: test.code,
|
||||
},
|
||||
resp := &http.Response{
|
||||
StatusCode: test.code,
|
||||
}
|
||||
res, err := processRetryResponse(resp, test.err)
|
||||
res, err := az.processHTTPRetryResponse(nil, "", resp, test.err)
|
||||
if res != test.stop {
|
||||
t.Errorf("expected: %v, saw: %v", test.stop, res)
|
||||
}
|
||||
|
@@ -32,6 +32,7 @@ import (
|
||||
"github.com/Azure/go-autorest/autorest/to"
|
||||
"github.com/golang/glog"
|
||||
"github.com/rubiojr/go-vhd/vhd"
|
||||
|
||||
kwait "k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
)
|
||||
@@ -79,7 +80,7 @@ func newBlobDiskController(common *controllerCommon) (*BlobDiskController, error
|
||||
// If no storage account is given, search all the storage accounts associated with the resource group and pick one that
|
||||
// fits storage type and location.
|
||||
func (c *BlobDiskController) CreateVolume(blobName, accountName, accountType, location string, requestGB int) (string, string, int, error) {
|
||||
account, key, err := c.common.cloud.ensureStorageAccount(accountName, accountType, location, dedicatedDiskAccountNamePrefix)
|
||||
account, key, err := c.common.cloud.ensureStorageAccount(accountName, accountType, c.common.resourceGroup, location, dedicatedDiskAccountNamePrefix)
|
||||
if err != nil {
|
||||
return "", "", 0, fmt.Errorf("could not get storage key for storage account %s: %v", accountName, err)
|
||||
}
|
||||
@@ -107,7 +108,7 @@ func (c *BlobDiskController) DeleteVolume(diskURI string) error {
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to parse vhd URI %v", err)
|
||||
}
|
||||
key, err := c.common.cloud.getStorageAccesskey(accountName)
|
||||
key, err := c.common.cloud.getStorageAccesskey(accountName, c.common.resourceGroup)
|
||||
if err != nil {
|
||||
return fmt.Errorf("no key for storage account %s, err %v", accountName, err)
|
||||
}
|
||||
@@ -488,7 +489,9 @@ func (c *BlobDiskController) createStorageAccount(storageAccountName string, sto
|
||||
glog.V(2).Infof("azureDisk - Creating storage account %s type %s", storageAccountName, string(storageAccountType))
|
||||
|
||||
cp := storage.AccountCreateParameters{
|
||||
Sku: &storage.Sku{Name: storageAccountType},
|
||||
Sku: &storage.Sku{Name: storageAccountType},
|
||||
// switch to use StorageV2 as it's recommended according to https://docs.microsoft.com/en-us/azure/storage/common/storage-account-options
|
||||
Kind: storage.StorageV2,
|
||||
Tags: map[string]*string{"created-by": to.StringPtr("azure-dd")},
|
||||
Location: &location}
|
||||
ctx, cancel := getContextWithCancel()
|
||||
|
179
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_client.go
generated
vendored
179
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_client.go
generated
vendored
@@ -22,7 +22,7 @@ import (
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute"
|
||||
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute"
|
||||
"github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network"
|
||||
"github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2017-10-01/storage"
|
||||
"github.com/Azure/go-autorest/autorest"
|
||||
@@ -33,12 +33,12 @@ import (
|
||||
)
|
||||
|
||||
// Helpers for rate limiting error/error channel creation
|
||||
func createARMRateLimitErr(isWrite bool, opName string) error {
|
||||
func createRateLimitErr(isWrite bool, opName string) error {
|
||||
opType := "read"
|
||||
if isWrite {
|
||||
opType = "write"
|
||||
}
|
||||
return fmt.Errorf("azure - ARM rate limited(%s) for operation:%s", opType, opName)
|
||||
return fmt.Errorf("azure - cloud provider rate limited(%s) for operation:%s", opType, opName)
|
||||
}
|
||||
|
||||
// VirtualMachinesClient defines needed functions for azure compute.VirtualMachinesClient
|
||||
@@ -131,6 +131,11 @@ type DisksClient interface {
|
||||
Get(ctx context.Context, resourceGroupName string, diskName string) (result compute.Disk, err error)
|
||||
}
|
||||
|
||||
// VirtualMachineSizesClient defines needed functions for azure compute.VirtualMachineSizesClient
|
||||
type VirtualMachineSizesClient interface {
|
||||
List(ctx context.Context, location string) (result compute.VirtualMachineSizeListResult, err error)
|
||||
}
|
||||
|
||||
// azClientConfig contains all essential information to create an Azure client.
|
||||
type azClientConfig struct {
|
||||
subscriptionID string
|
||||
@@ -170,7 +175,7 @@ func newAzVirtualMachinesClient(config *azClientConfig) *azVirtualMachinesClient
|
||||
func (az *azVirtualMachinesClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, VMName string, parameters compute.VirtualMachine) (resp *http.Response, err error) {
|
||||
// /* Write rate limiting */
|
||||
if !az.rateLimiterWriter.TryAccept() {
|
||||
err = createARMRateLimitErr(true, "VMCreateOrUpdate")
|
||||
err = createRateLimitErr(true, "VMCreateOrUpdate")
|
||||
return
|
||||
}
|
||||
|
||||
@@ -185,14 +190,14 @@ func (az *azVirtualMachinesClient) CreateOrUpdate(ctx context.Context, resourceG
|
||||
return future.Response(), err
|
||||
}
|
||||
|
||||
err = future.WaitForCompletion(ctx, az.client.Client)
|
||||
err = future.WaitForCompletionRef(ctx, az.client.Client)
|
||||
mc.Observe(err)
|
||||
return future.Response(), err
|
||||
}
|
||||
|
||||
func (az *azVirtualMachinesClient) Get(ctx context.Context, resourceGroupName string, VMName string, expand compute.InstanceViewTypes) (result compute.VirtualMachine, err error) {
|
||||
if !az.rateLimiterReader.TryAccept() {
|
||||
err = createARMRateLimitErr(false, "VMGet")
|
||||
err = createRateLimitErr(false, "VMGet")
|
||||
return
|
||||
}
|
||||
|
||||
@@ -209,7 +214,7 @@ func (az *azVirtualMachinesClient) Get(ctx context.Context, resourceGroupName st
|
||||
|
||||
func (az *azVirtualMachinesClient) List(ctx context.Context, resourceGroupName string) (result []compute.VirtualMachine, err error) {
|
||||
if !az.rateLimiterReader.TryAccept() {
|
||||
err = createARMRateLimitErr(false, "VMList")
|
||||
err = createRateLimitErr(false, "VMList")
|
||||
return
|
||||
}
|
||||
|
||||
@@ -261,7 +266,7 @@ func newAzInterfacesClient(config *azClientConfig) *azInterfacesClient {
|
||||
func (az *azInterfacesClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, networkInterfaceName string, parameters network.Interface) (resp *http.Response, err error) {
|
||||
/* Write rate limiting */
|
||||
if !az.rateLimiterWriter.TryAccept() {
|
||||
err = createARMRateLimitErr(true, "NiCreateOrUpdate")
|
||||
err = createRateLimitErr(true, "NiCreateOrUpdate")
|
||||
return
|
||||
}
|
||||
|
||||
@@ -277,14 +282,14 @@ func (az *azInterfacesClient) CreateOrUpdate(ctx context.Context, resourceGroupN
|
||||
return future.Response(), err
|
||||
}
|
||||
|
||||
err = future.WaitForCompletion(ctx, az.client.Client)
|
||||
err = future.WaitForCompletionRef(ctx, az.client.Client)
|
||||
mc.Observe(err)
|
||||
return future.Response(), err
|
||||
}
|
||||
|
||||
func (az *azInterfacesClient) Get(ctx context.Context, resourceGroupName string, networkInterfaceName string, expand string) (result network.Interface, err error) {
|
||||
if !az.rateLimiterReader.TryAccept() {
|
||||
err = createARMRateLimitErr(false, "NicGet")
|
||||
err = createRateLimitErr(false, "NicGet")
|
||||
return
|
||||
}
|
||||
|
||||
@@ -301,7 +306,7 @@ func (az *azInterfacesClient) Get(ctx context.Context, resourceGroupName string,
|
||||
|
||||
func (az *azInterfacesClient) GetVirtualMachineScaleSetNetworkInterface(ctx context.Context, resourceGroupName string, virtualMachineScaleSetName string, virtualmachineIndex string, networkInterfaceName string, expand string) (result network.Interface, err error) {
|
||||
if !az.rateLimiterReader.TryAccept() {
|
||||
err = createARMRateLimitErr(false, "NicGetVirtualMachineScaleSetNetworkInterface")
|
||||
err = createRateLimitErr(false, "NicGetVirtualMachineScaleSetNetworkInterface")
|
||||
return
|
||||
}
|
||||
|
||||
@@ -340,7 +345,7 @@ func newAzLoadBalancersClient(config *azClientConfig) *azLoadBalancersClient {
|
||||
func (az *azLoadBalancersClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, loadBalancerName string, parameters network.LoadBalancer) (resp *http.Response, err error) {
|
||||
/* Write rate limiting */
|
||||
if !az.rateLimiterWriter.TryAccept() {
|
||||
err = createARMRateLimitErr(true, "LBCreateOrUpdate")
|
||||
err = createRateLimitErr(true, "LBCreateOrUpdate")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -356,7 +361,7 @@ func (az *azLoadBalancersClient) CreateOrUpdate(ctx context.Context, resourceGro
|
||||
return future.Response(), err
|
||||
}
|
||||
|
||||
err = future.WaitForCompletion(ctx, az.client.Client)
|
||||
err = future.WaitForCompletionRef(ctx, az.client.Client)
|
||||
mc.Observe(err)
|
||||
return future.Response(), err
|
||||
}
|
||||
@@ -364,7 +369,7 @@ func (az *azLoadBalancersClient) CreateOrUpdate(ctx context.Context, resourceGro
|
||||
func (az *azLoadBalancersClient) Delete(ctx context.Context, resourceGroupName string, loadBalancerName string) (resp *http.Response, err error) {
|
||||
/* Write rate limiting */
|
||||
if !az.rateLimiterWriter.TryAccept() {
|
||||
err = createARMRateLimitErr(true, "LBDelete")
|
||||
err = createRateLimitErr(true, "LBDelete")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -380,14 +385,14 @@ func (az *azLoadBalancersClient) Delete(ctx context.Context, resourceGroupName s
|
||||
return future.Response(), err
|
||||
}
|
||||
|
||||
err = future.WaitForCompletion(ctx, az.client.Client)
|
||||
err = future.WaitForCompletionRef(ctx, az.client.Client)
|
||||
mc.Observe(err)
|
||||
return future.Response(), err
|
||||
}
|
||||
|
||||
func (az *azLoadBalancersClient) Get(ctx context.Context, resourceGroupName string, loadBalancerName string, expand string) (result network.LoadBalancer, err error) {
|
||||
if !az.rateLimiterReader.TryAccept() {
|
||||
err = createARMRateLimitErr(false, "LBGet")
|
||||
err = createRateLimitErr(false, "LBGet")
|
||||
return
|
||||
}
|
||||
|
||||
@@ -404,7 +409,7 @@ func (az *azLoadBalancersClient) Get(ctx context.Context, resourceGroupName stri
|
||||
|
||||
func (az *azLoadBalancersClient) List(ctx context.Context, resourceGroupName string) ([]network.LoadBalancer, error) {
|
||||
if !az.rateLimiterReader.TryAccept() {
|
||||
err := createARMRateLimitErr(false, "LBList")
|
||||
err := createRateLimitErr(false, "LBList")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -456,7 +461,7 @@ func newAzPublicIPAddressesClient(config *azClientConfig) *azPublicIPAddressesCl
|
||||
func (az *azPublicIPAddressesClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, publicIPAddressName string, parameters network.PublicIPAddress) (resp *http.Response, err error) {
|
||||
/* Write rate limiting */
|
||||
if !az.rateLimiterWriter.TryAccept() {
|
||||
err = createARMRateLimitErr(true, "PublicIPCreateOrUpdate")
|
||||
err = createRateLimitErr(true, "PublicIPCreateOrUpdate")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -472,7 +477,7 @@ func (az *azPublicIPAddressesClient) CreateOrUpdate(ctx context.Context, resourc
|
||||
return future.Response(), err
|
||||
}
|
||||
|
||||
err = future.WaitForCompletion(ctx, az.client.Client)
|
||||
err = future.WaitForCompletionRef(ctx, az.client.Client)
|
||||
mc.Observe(err)
|
||||
return future.Response(), err
|
||||
}
|
||||
@@ -480,7 +485,7 @@ func (az *azPublicIPAddressesClient) CreateOrUpdate(ctx context.Context, resourc
|
||||
func (az *azPublicIPAddressesClient) Delete(ctx context.Context, resourceGroupName string, publicIPAddressName string) (resp *http.Response, err error) {
|
||||
/* Write rate limiting */
|
||||
if !az.rateLimiterWriter.TryAccept() {
|
||||
err = createARMRateLimitErr(true, "PublicIPDelete")
|
||||
err = createRateLimitErr(true, "PublicIPDelete")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -496,14 +501,14 @@ func (az *azPublicIPAddressesClient) Delete(ctx context.Context, resourceGroupNa
|
||||
return future.Response(), err
|
||||
}
|
||||
|
||||
err = future.WaitForCompletion(ctx, az.client.Client)
|
||||
err = future.WaitForCompletionRef(ctx, az.client.Client)
|
||||
mc.Observe(err)
|
||||
return future.Response(), err
|
||||
}
|
||||
|
||||
func (az *azPublicIPAddressesClient) Get(ctx context.Context, resourceGroupName string, publicIPAddressName string, expand string) (result network.PublicIPAddress, err error) {
|
||||
if !az.rateLimiterReader.TryAccept() {
|
||||
err = createARMRateLimitErr(false, "PublicIPGet")
|
||||
err = createRateLimitErr(false, "PublicIPGet")
|
||||
return
|
||||
}
|
||||
|
||||
@@ -520,7 +525,7 @@ func (az *azPublicIPAddressesClient) Get(ctx context.Context, resourceGroupName
|
||||
|
||||
func (az *azPublicIPAddressesClient) List(ctx context.Context, resourceGroupName string) ([]network.PublicIPAddress, error) {
|
||||
if !az.rateLimiterReader.TryAccept() {
|
||||
return nil, createARMRateLimitErr(false, "PublicIPList")
|
||||
return nil, createRateLimitErr(false, "PublicIPList")
|
||||
}
|
||||
|
||||
glog.V(10).Infof("azPublicIPAddressesClient.List(%q): start", resourceGroupName)
|
||||
@@ -571,7 +576,7 @@ func newAzSubnetsClient(config *azClientConfig) *azSubnetsClient {
|
||||
func (az *azSubnetsClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, virtualNetworkName string, subnetName string, subnetParameters network.Subnet) (resp *http.Response, err error) {
|
||||
/* Write rate limiting */
|
||||
if !az.rateLimiterWriter.TryAccept() {
|
||||
err = createARMRateLimitErr(true, "SubnetCreateOrUpdate")
|
||||
err = createRateLimitErr(true, "SubnetCreateOrUpdate")
|
||||
return
|
||||
}
|
||||
|
||||
@@ -587,7 +592,7 @@ func (az *azSubnetsClient) CreateOrUpdate(ctx context.Context, resourceGroupName
|
||||
return future.Response(), err
|
||||
}
|
||||
|
||||
err = future.WaitForCompletion(ctx, az.client.Client)
|
||||
err = future.WaitForCompletionRef(ctx, az.client.Client)
|
||||
mc.Observe(err)
|
||||
return future.Response(), err
|
||||
}
|
||||
@@ -595,7 +600,7 @@ func (az *azSubnetsClient) CreateOrUpdate(ctx context.Context, resourceGroupName
|
||||
func (az *azSubnetsClient) Delete(ctx context.Context, resourceGroupName string, virtualNetworkName string, subnetName string) (resp *http.Response, err error) {
|
||||
/* Write rate limiting */
|
||||
if !az.rateLimiterWriter.TryAccept() {
|
||||
err = createARMRateLimitErr(true, "SubnetDelete")
|
||||
err = createRateLimitErr(true, "SubnetDelete")
|
||||
return
|
||||
}
|
||||
|
||||
@@ -611,14 +616,14 @@ func (az *azSubnetsClient) Delete(ctx context.Context, resourceGroupName string,
|
||||
return future.Response(), err
|
||||
}
|
||||
|
||||
err = future.WaitForCompletion(ctx, az.client.Client)
|
||||
err = future.WaitForCompletionRef(ctx, az.client.Client)
|
||||
mc.Observe(err)
|
||||
return future.Response(), err
|
||||
}
|
||||
|
||||
func (az *azSubnetsClient) Get(ctx context.Context, resourceGroupName string, virtualNetworkName string, subnetName string, expand string) (result network.Subnet, err error) {
|
||||
if !az.rateLimiterReader.TryAccept() {
|
||||
err = createARMRateLimitErr(false, "SubnetGet")
|
||||
err = createRateLimitErr(false, "SubnetGet")
|
||||
return
|
||||
}
|
||||
|
||||
@@ -635,7 +640,7 @@ func (az *azSubnetsClient) Get(ctx context.Context, resourceGroupName string, vi
|
||||
|
||||
func (az *azSubnetsClient) List(ctx context.Context, resourceGroupName string, virtualNetworkName string) ([]network.Subnet, error) {
|
||||
if !az.rateLimiterReader.TryAccept() {
|
||||
return nil, createARMRateLimitErr(false, "SubnetList")
|
||||
return nil, createRateLimitErr(false, "SubnetList")
|
||||
}
|
||||
|
||||
glog.V(10).Infof("azSubnetsClient.List(%q,%q): start", resourceGroupName, virtualNetworkName)
|
||||
@@ -686,7 +691,7 @@ func newAzSecurityGroupsClient(config *azClientConfig) *azSecurityGroupsClient {
|
||||
func (az *azSecurityGroupsClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, networkSecurityGroupName string, parameters network.SecurityGroup) (resp *http.Response, err error) {
|
||||
/* Write rate limiting */
|
||||
if !az.rateLimiterWriter.TryAccept() {
|
||||
err = createARMRateLimitErr(true, "NSGCreateOrUpdate")
|
||||
err = createRateLimitErr(true, "NSGCreateOrUpdate")
|
||||
return
|
||||
}
|
||||
|
||||
@@ -702,7 +707,7 @@ func (az *azSecurityGroupsClient) CreateOrUpdate(ctx context.Context, resourceGr
|
||||
return future.Response(), err
|
||||
}
|
||||
|
||||
err = future.WaitForCompletion(ctx, az.client.Client)
|
||||
err = future.WaitForCompletionRef(ctx, az.client.Client)
|
||||
mc.Observe(err)
|
||||
return future.Response(), err
|
||||
}
|
||||
@@ -710,7 +715,7 @@ func (az *azSecurityGroupsClient) CreateOrUpdate(ctx context.Context, resourceGr
|
||||
func (az *azSecurityGroupsClient) Delete(ctx context.Context, resourceGroupName string, networkSecurityGroupName string) (resp *http.Response, err error) {
|
||||
/* Write rate limiting */
|
||||
if !az.rateLimiterWriter.TryAccept() {
|
||||
err = createARMRateLimitErr(true, "NSGDelete")
|
||||
err = createRateLimitErr(true, "NSGDelete")
|
||||
return
|
||||
}
|
||||
|
||||
@@ -726,14 +731,14 @@ func (az *azSecurityGroupsClient) Delete(ctx context.Context, resourceGroupName
|
||||
return future.Response(), err
|
||||
}
|
||||
|
||||
err = future.WaitForCompletion(ctx, az.client.Client)
|
||||
err = future.WaitForCompletionRef(ctx, az.client.Client)
|
||||
mc.Observe(err)
|
||||
return future.Response(), err
|
||||
}
|
||||
|
||||
func (az *azSecurityGroupsClient) Get(ctx context.Context, resourceGroupName string, networkSecurityGroupName string, expand string) (result network.SecurityGroup, err error) {
|
||||
if !az.rateLimiterReader.TryAccept() {
|
||||
err = createARMRateLimitErr(false, "NSGGet")
|
||||
err = createRateLimitErr(false, "NSGGet")
|
||||
return
|
||||
}
|
||||
|
||||
@@ -750,7 +755,7 @@ func (az *azSecurityGroupsClient) Get(ctx context.Context, resourceGroupName str
|
||||
|
||||
func (az *azSecurityGroupsClient) List(ctx context.Context, resourceGroupName string) ([]network.SecurityGroup, error) {
|
||||
if !az.rateLimiterReader.TryAccept() {
|
||||
return nil, createARMRateLimitErr(false, "NSGList")
|
||||
return nil, createRateLimitErr(false, "NSGList")
|
||||
}
|
||||
|
||||
glog.V(10).Infof("azSecurityGroupsClient.List(%q): start", resourceGroupName)
|
||||
@@ -801,7 +806,7 @@ func newAzVirtualMachineScaleSetsClient(config *azClientConfig) *azVirtualMachin
|
||||
func (az *azVirtualMachineScaleSetsClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, VMScaleSetName string, parameters compute.VirtualMachineScaleSet) (resp *http.Response, err error) {
|
||||
/* Write rate limiting */
|
||||
if !az.rateLimiterWriter.TryAccept() {
|
||||
err = createARMRateLimitErr(true, "VMSSCreateOrUpdate")
|
||||
err = createRateLimitErr(true, "VMSSCreateOrUpdate")
|
||||
return
|
||||
}
|
||||
|
||||
@@ -817,14 +822,14 @@ func (az *azVirtualMachineScaleSetsClient) CreateOrUpdate(ctx context.Context, r
|
||||
return future.Response(), err
|
||||
}
|
||||
|
||||
err = future.WaitForCompletion(ctx, az.client.Client)
|
||||
err = future.WaitForCompletionRef(ctx, az.client.Client)
|
||||
mc.Observe(err)
|
||||
return future.Response(), err
|
||||
}
|
||||
|
||||
func (az *azVirtualMachineScaleSetsClient) Get(ctx context.Context, resourceGroupName string, VMScaleSetName string) (result compute.VirtualMachineScaleSet, err error) {
|
||||
if !az.rateLimiterReader.TryAccept() {
|
||||
err = createARMRateLimitErr(false, "VMSSGet")
|
||||
err = createRateLimitErr(false, "VMSSGet")
|
||||
return
|
||||
}
|
||||
|
||||
@@ -841,13 +846,13 @@ func (az *azVirtualMachineScaleSetsClient) Get(ctx context.Context, resourceGrou
|
||||
|
||||
func (az *azVirtualMachineScaleSetsClient) List(ctx context.Context, resourceGroupName string) (result []compute.VirtualMachineScaleSet, err error) {
|
||||
if !az.rateLimiterReader.TryAccept() {
|
||||
err = createARMRateLimitErr(false, "VMSSList")
|
||||
err = createRateLimitErr(false, "VMSSList")
|
||||
return
|
||||
}
|
||||
|
||||
glog.V(10).Infof("azVirtualMachineScaleSetsClient.List(%q,%q): start", resourceGroupName)
|
||||
glog.V(10).Infof("azVirtualMachineScaleSetsClient.List(%q): start", resourceGroupName)
|
||||
defer func() {
|
||||
glog.V(10).Infof("azVirtualMachineScaleSetsClient.List(%q,%q): end", resourceGroupName)
|
||||
glog.V(10).Infof("azVirtualMachineScaleSetsClient.List(%q): end", resourceGroupName)
|
||||
}()
|
||||
|
||||
mc := newMetricContext("vmss", "list", resourceGroupName, az.client.SubscriptionID)
|
||||
@@ -872,13 +877,13 @@ func (az *azVirtualMachineScaleSetsClient) List(ctx context.Context, resourceGro
|
||||
func (az *azVirtualMachineScaleSetsClient) UpdateInstances(ctx context.Context, resourceGroupName string, VMScaleSetName string, VMInstanceIDs compute.VirtualMachineScaleSetVMInstanceRequiredIDs) (resp *http.Response, err error) {
|
||||
/* Write rate limiting */
|
||||
if !az.rateLimiterWriter.TryAccept() {
|
||||
err = createARMRateLimitErr(true, "VMSSUpdateInstances")
|
||||
err = createRateLimitErr(true, "VMSSUpdateInstances")
|
||||
return
|
||||
}
|
||||
|
||||
glog.V(10).Infof("azVirtualMachineScaleSetsClient.UpdateInstances(%q,%q,%q): start", resourceGroupName, VMScaleSetName, VMInstanceIDs)
|
||||
glog.V(10).Infof("azVirtualMachineScaleSetsClient.UpdateInstances(%q,%q,%v): start", resourceGroupName, VMScaleSetName, VMInstanceIDs)
|
||||
defer func() {
|
||||
glog.V(10).Infof("azVirtualMachineScaleSetsClient.UpdateInstances(%q,%q,%q): end", resourceGroupName, VMScaleSetName, VMInstanceIDs)
|
||||
glog.V(10).Infof("azVirtualMachineScaleSetsClient.UpdateInstances(%q,%q,%v): end", resourceGroupName, VMScaleSetName, VMInstanceIDs)
|
||||
}()
|
||||
|
||||
mc := newMetricContext("vmss", "update_instances", resourceGroupName, az.client.SubscriptionID)
|
||||
@@ -888,7 +893,7 @@ func (az *azVirtualMachineScaleSetsClient) UpdateInstances(ctx context.Context,
|
||||
return future.Response(), err
|
||||
}
|
||||
|
||||
err = future.WaitForCompletion(ctx, az.client.Client)
|
||||
err = future.WaitForCompletionRef(ctx, az.client.Client)
|
||||
mc.Observe(err)
|
||||
return future.Response(), err
|
||||
}
|
||||
@@ -916,7 +921,7 @@ func newAzVirtualMachineScaleSetVMsClient(config *azClientConfig) *azVirtualMach
|
||||
|
||||
func (az *azVirtualMachineScaleSetVMsClient) Get(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string) (result compute.VirtualMachineScaleSetVM, err error) {
|
||||
if !az.rateLimiterReader.TryAccept() {
|
||||
err = createARMRateLimitErr(false, "VMSSGet")
|
||||
err = createRateLimitErr(false, "VMSSGet")
|
||||
return
|
||||
}
|
||||
|
||||
@@ -933,7 +938,7 @@ func (az *azVirtualMachineScaleSetVMsClient) Get(ctx context.Context, resourceGr
|
||||
|
||||
func (az *azVirtualMachineScaleSetVMsClient) GetInstanceView(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string) (result compute.VirtualMachineScaleSetVMInstanceView, err error) {
|
||||
if !az.rateLimiterReader.TryAccept() {
|
||||
err = createARMRateLimitErr(false, "VMSSGetInstanceView")
|
||||
err = createRateLimitErr(false, "VMSSGetInstanceView")
|
||||
return
|
||||
}
|
||||
|
||||
@@ -950,7 +955,7 @@ func (az *azVirtualMachineScaleSetVMsClient) GetInstanceView(ctx context.Context
|
||||
|
||||
func (az *azVirtualMachineScaleSetVMsClient) List(ctx context.Context, resourceGroupName string, virtualMachineScaleSetName string, filter string, selectParameter string, expand string) (result []compute.VirtualMachineScaleSetVM, err error) {
|
||||
if !az.rateLimiterReader.TryAccept() {
|
||||
err = createARMRateLimitErr(false, "VMSSList")
|
||||
err = createRateLimitErr(false, "VMSSList")
|
||||
return
|
||||
}
|
||||
|
||||
@@ -980,7 +985,7 @@ func (az *azVirtualMachineScaleSetVMsClient) List(ctx context.Context, resourceG
|
||||
|
||||
func (az *azVirtualMachineScaleSetVMsClient) Update(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string, parameters compute.VirtualMachineScaleSetVM) (resp *http.Response, err error) {
|
||||
if !az.rateLimiterWriter.TryAccept() {
|
||||
err = createARMRateLimitErr(true, "VMSSUpdate")
|
||||
err = createRateLimitErr(true, "VMSSUpdate")
|
||||
return
|
||||
}
|
||||
|
||||
@@ -996,7 +1001,7 @@ func (az *azVirtualMachineScaleSetVMsClient) Update(ctx context.Context, resourc
|
||||
return future.Response(), err
|
||||
}
|
||||
|
||||
err = future.WaitForCompletion(ctx, az.client.Client)
|
||||
err = future.WaitForCompletionRef(ctx, az.client.Client)
|
||||
mc.Observe(err)
|
||||
return future.Response(), err
|
||||
}
|
||||
@@ -1025,7 +1030,7 @@ func newAzRoutesClient(config *azClientConfig) *azRoutesClient {
|
||||
func (az *azRoutesClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, routeTableName string, routeName string, routeParameters network.Route) (resp *http.Response, err error) {
|
||||
/* Write rate limiting */
|
||||
if !az.rateLimiterWriter.TryAccept() {
|
||||
err = createARMRateLimitErr(true, "RouteCreateOrUpdate")
|
||||
err = createRateLimitErr(true, "RouteCreateOrUpdate")
|
||||
return
|
||||
}
|
||||
|
||||
@@ -1041,7 +1046,7 @@ func (az *azRoutesClient) CreateOrUpdate(ctx context.Context, resourceGroupName
|
||||
return future.Response(), err
|
||||
}
|
||||
|
||||
err = future.WaitForCompletion(ctx, az.client.Client)
|
||||
err = future.WaitForCompletionRef(ctx, az.client.Client)
|
||||
mc.Observe(err)
|
||||
return future.Response(), err
|
||||
}
|
||||
@@ -1049,7 +1054,7 @@ func (az *azRoutesClient) CreateOrUpdate(ctx context.Context, resourceGroupName
|
||||
func (az *azRoutesClient) Delete(ctx context.Context, resourceGroupName string, routeTableName string, routeName string) (resp *http.Response, err error) {
|
||||
/* Write rate limiting */
|
||||
if !az.rateLimiterWriter.TryAccept() {
|
||||
err = createARMRateLimitErr(true, "RouteDelete")
|
||||
err = createRateLimitErr(true, "RouteDelete")
|
||||
return
|
||||
}
|
||||
|
||||
@@ -1065,7 +1070,7 @@ func (az *azRoutesClient) Delete(ctx context.Context, resourceGroupName string,
|
||||
return future.Response(), err
|
||||
}
|
||||
|
||||
err = future.WaitForCompletion(ctx, az.client.Client)
|
||||
err = future.WaitForCompletionRef(ctx, az.client.Client)
|
||||
mc.Observe(err)
|
||||
return future.Response(), err
|
||||
}
|
||||
@@ -1094,7 +1099,7 @@ func newAzRouteTablesClient(config *azClientConfig) *azRouteTablesClient {
|
||||
func (az *azRouteTablesClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, routeTableName string, parameters network.RouteTable) (resp *http.Response, err error) {
|
||||
/* Write rate limiting */
|
||||
if !az.rateLimiterWriter.TryAccept() {
|
||||
err = createARMRateLimitErr(true, "RouteTableCreateOrUpdate")
|
||||
err = createRateLimitErr(true, "RouteTableCreateOrUpdate")
|
||||
return
|
||||
}
|
||||
|
||||
@@ -1110,14 +1115,14 @@ func (az *azRouteTablesClient) CreateOrUpdate(ctx context.Context, resourceGroup
|
||||
return future.Response(), err
|
||||
}
|
||||
|
||||
err = future.WaitForCompletion(ctx, az.client.Client)
|
||||
err = future.WaitForCompletionRef(ctx, az.client.Client)
|
||||
mc.Observe(err)
|
||||
return future.Response(), err
|
||||
}
|
||||
|
||||
func (az *azRouteTablesClient) Get(ctx context.Context, resourceGroupName string, routeTableName string, expand string) (result network.RouteTable, err error) {
|
||||
if !az.rateLimiterReader.TryAccept() {
|
||||
err = createARMRateLimitErr(false, "GetRouteTable")
|
||||
err = createRateLimitErr(false, "GetRouteTable")
|
||||
return
|
||||
}
|
||||
|
||||
@@ -1155,7 +1160,7 @@ func newAzStorageAccountClient(config *azClientConfig) *azStorageAccountClient {
|
||||
func (az *azStorageAccountClient) Create(ctx context.Context, resourceGroupName string, accountName string, parameters storage.AccountCreateParameters) (result *http.Response, err error) {
|
||||
/* Write rate limiting */
|
||||
if !az.rateLimiterWriter.TryAccept() {
|
||||
err = createARMRateLimitErr(true, "StorageAccountCreate")
|
||||
err = createRateLimitErr(true, "StorageAccountCreate")
|
||||
return
|
||||
}
|
||||
|
||||
@@ -1170,14 +1175,14 @@ func (az *azStorageAccountClient) Create(ctx context.Context, resourceGroupName
|
||||
return future.Response(), err
|
||||
}
|
||||
|
||||
err = future.WaitForCompletion(ctx, az.client.Client)
|
||||
err = future.WaitForCompletionRef(ctx, az.client.Client)
|
||||
mc.Observe(err)
|
||||
return future.Response(), err
|
||||
}
|
||||
|
||||
func (az *azStorageAccountClient) Delete(ctx context.Context, resourceGroupName string, accountName string) (result autorest.Response, err error) {
|
||||
if !az.rateLimiterReader.TryAccept() {
|
||||
err = createARMRateLimitErr(false, "DeleteStorageAccount")
|
||||
err = createRateLimitErr(false, "DeleteStorageAccount")
|
||||
return
|
||||
}
|
||||
|
||||
@@ -1194,7 +1199,7 @@ func (az *azStorageAccountClient) Delete(ctx context.Context, resourceGroupName
|
||||
|
||||
func (az *azStorageAccountClient) ListKeys(ctx context.Context, resourceGroupName string, accountName string) (result storage.AccountListKeysResult, err error) {
|
||||
if !az.rateLimiterReader.TryAccept() {
|
||||
err = createARMRateLimitErr(false, "ListStorageAccountKeys")
|
||||
err = createRateLimitErr(false, "ListStorageAccountKeys")
|
||||
return
|
||||
}
|
||||
|
||||
@@ -1211,7 +1216,7 @@ func (az *azStorageAccountClient) ListKeys(ctx context.Context, resourceGroupNam
|
||||
|
||||
func (az *azStorageAccountClient) ListByResourceGroup(ctx context.Context, resourceGroupName string) (result storage.AccountListResult, err error) {
|
||||
if !az.rateLimiterReader.TryAccept() {
|
||||
err = createARMRateLimitErr(false, "ListStorageAccountsByResourceGroup")
|
||||
err = createRateLimitErr(false, "ListStorageAccountsByResourceGroup")
|
||||
return
|
||||
}
|
||||
|
||||
@@ -1228,7 +1233,7 @@ func (az *azStorageAccountClient) ListByResourceGroup(ctx context.Context, resou
|
||||
|
||||
func (az *azStorageAccountClient) GetProperties(ctx context.Context, resourceGroupName string, accountName string) (result storage.Account, err error) {
|
||||
if !az.rateLimiterReader.TryAccept() {
|
||||
err = createARMRateLimitErr(false, "GetStorageAccount/Properties")
|
||||
err = createRateLimitErr(false, "GetStorageAccount/Properties")
|
||||
return
|
||||
}
|
||||
|
||||
@@ -1266,7 +1271,7 @@ func newAzDisksClient(config *azClientConfig) *azDisksClient {
|
||||
func (az *azDisksClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, diskName string, diskParameter compute.Disk) (resp *http.Response, err error) {
|
||||
/* Write rate limiting */
|
||||
if !az.rateLimiterWriter.TryAccept() {
|
||||
err = createARMRateLimitErr(true, "DiskCreateOrUpdate")
|
||||
err = createRateLimitErr(true, "DiskCreateOrUpdate")
|
||||
return
|
||||
}
|
||||
|
||||
@@ -1282,7 +1287,7 @@ func (az *azDisksClient) CreateOrUpdate(ctx context.Context, resourceGroupName s
|
||||
return future.Response(), err
|
||||
}
|
||||
|
||||
err = future.WaitForCompletion(ctx, az.client.Client)
|
||||
err = future.WaitForCompletionRef(ctx, az.client.Client)
|
||||
mc.Observe(err)
|
||||
return future.Response(), err
|
||||
}
|
||||
@@ -1290,7 +1295,7 @@ func (az *azDisksClient) CreateOrUpdate(ctx context.Context, resourceGroupName s
|
||||
func (az *azDisksClient) Delete(ctx context.Context, resourceGroupName string, diskName string) (resp *http.Response, err error) {
|
||||
/* Write rate limiting */
|
||||
if !az.rateLimiterWriter.TryAccept() {
|
||||
err = createARMRateLimitErr(true, "DiskDelete")
|
||||
err = createRateLimitErr(true, "DiskDelete")
|
||||
return
|
||||
}
|
||||
|
||||
@@ -1306,14 +1311,14 @@ func (az *azDisksClient) Delete(ctx context.Context, resourceGroupName string, d
|
||||
return future.Response(), err
|
||||
}
|
||||
|
||||
err = future.WaitForCompletion(ctx, az.client.Client)
|
||||
err = future.WaitForCompletionRef(ctx, az.client.Client)
|
||||
mc.Observe(err)
|
||||
return future.Response(), err
|
||||
}
|
||||
|
||||
func (az *azDisksClient) Get(ctx context.Context, resourceGroupName string, diskName string) (result compute.Disk, err error) {
|
||||
if !az.rateLimiterReader.TryAccept() {
|
||||
err = createARMRateLimitErr(false, "GetDisk")
|
||||
err = createRateLimitErr(false, "GetDisk")
|
||||
return
|
||||
}
|
||||
|
||||
@@ -1327,3 +1332,41 @@ func (az *azDisksClient) Get(ctx context.Context, resourceGroupName string, disk
|
||||
mc.Observe(err)
|
||||
return
|
||||
}
|
||||
|
||||
// azVirtualMachineSizesClient implements VirtualMachineSizesClient.
|
||||
type azVirtualMachineSizesClient struct {
|
||||
client compute.VirtualMachineSizesClient
|
||||
rateLimiterReader flowcontrol.RateLimiter
|
||||
rateLimiterWriter flowcontrol.RateLimiter
|
||||
}
|
||||
|
||||
func newAzVirtualMachineSizesClient(config *azClientConfig) *azVirtualMachineSizesClient {
|
||||
VirtualMachineSizesClient := compute.NewVirtualMachineSizesClient(config.subscriptionID)
|
||||
VirtualMachineSizesClient.BaseURI = config.resourceManagerEndpoint
|
||||
VirtualMachineSizesClient.Authorizer = autorest.NewBearerAuthorizer(config.servicePrincipalToken)
|
||||
VirtualMachineSizesClient.PollingDelay = 5 * time.Second
|
||||
configureUserAgent(&VirtualMachineSizesClient.Client)
|
||||
|
||||
return &azVirtualMachineSizesClient{
|
||||
rateLimiterReader: config.rateLimiterReader,
|
||||
rateLimiterWriter: config.rateLimiterWriter,
|
||||
client: VirtualMachineSizesClient,
|
||||
}
|
||||
}
|
||||
|
||||
func (az *azVirtualMachineSizesClient) List(ctx context.Context, location string) (result compute.VirtualMachineSizeListResult, err error) {
|
||||
if !az.rateLimiterReader.TryAccept() {
|
||||
err = createRateLimitErr(false, "VMSizesList")
|
||||
return
|
||||
}
|
||||
|
||||
glog.V(10).Infof("azVirtualMachineSizesClient.List(%q): start", location)
|
||||
defer func() {
|
||||
glog.V(10).Infof("azVirtualMachineSizesClient.List(%q): end", location)
|
||||
}()
|
||||
|
||||
mc := newMetricContext("vmsizes", "list", "", az.client.SubscriptionID)
|
||||
result, err = az.client.List(ctx, location)
|
||||
mc.Observe(err)
|
||||
return
|
||||
}
|
||||
|
@@ -20,7 +20,7 @@ import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute"
|
||||
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute"
|
||||
"github.com/golang/glog"
|
||||
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
@@ -128,7 +128,7 @@ func (c *controllerCommon) GetDiskLun(diskName, diskURI string, nodeName types.N
|
||||
(disk.Vhd != nil && disk.Vhd.URI != nil && diskURI != "" && *disk.Vhd.URI == diskURI) ||
|
||||
(disk.ManagedDisk != nil && *disk.ManagedDisk.ID == diskURI) {
|
||||
// found the disk
|
||||
glog.V(4).Infof("azureDisk - find disk: lun %d name %q uri %q", *disk.Lun, diskName, diskURI)
|
||||
glog.V(2).Infof("azureDisk - find disk: lun %d name %q uri %q", *disk.Lun, diskName, diskURI)
|
||||
return *disk.Lun, nil
|
||||
}
|
||||
}
|
||||
|
36
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_controller_standard.go
generated
vendored
36
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_controller_standard.go
generated
vendored
@@ -20,7 +20,7 @@ import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute"
|
||||
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute"
|
||||
"github.com/golang/glog"
|
||||
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
@@ -34,6 +34,12 @@ func (as *availabilitySet) AttachDisk(isManagedDisk bool, diskName, diskURI stri
|
||||
return err
|
||||
}
|
||||
|
||||
vmName := mapNodeNameToVMName(nodeName)
|
||||
nodeResourceGroup, err := as.GetNodeResourceGroup(vmName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
disks := *vm.StorageProfile.DataDisks
|
||||
if isManagedDisk {
|
||||
disks = append(disks,
|
||||
@@ -67,17 +73,16 @@ func (as *availabilitySet) AttachDisk(isManagedDisk bool, diskName, diskURI stri
|
||||
},
|
||||
},
|
||||
}
|
||||
vmName := mapNodeNameToVMName(nodeName)
|
||||
glog.V(2).Infof("azureDisk - update(%s): vm(%s) - attach disk", as.resourceGroup, vmName)
|
||||
glog.V(2).Infof("azureDisk - update(%s): vm(%s) - attach disk", nodeResourceGroup, vmName)
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
resp, err := as.VirtualMachinesClient.CreateOrUpdate(ctx, as.resourceGroup, vmName, newVM)
|
||||
resp, err := as.VirtualMachinesClient.CreateOrUpdate(ctx, nodeResourceGroup, vmName, newVM)
|
||||
if as.CloudProviderBackoff && shouldRetryHTTPRequest(resp, err) {
|
||||
glog.V(2).Infof("azureDisk - update(%s) backing off: vm(%s)", as.resourceGroup, vmName)
|
||||
retryErr := as.CreateOrUpdateVMWithRetry(vmName, newVM)
|
||||
glog.V(2).Infof("azureDisk - update(%s) backing off: vm(%s)", nodeResourceGroup, vmName)
|
||||
retryErr := as.CreateOrUpdateVMWithRetry(nodeResourceGroup, vmName, newVM)
|
||||
if retryErr != nil {
|
||||
err = retryErr
|
||||
glog.V(2).Infof("azureDisk - update(%s) abort backoff: vm(%s)", as.resourceGroup, vmName)
|
||||
glog.V(2).Infof("azureDisk - update(%s) abort backoff: vm(%s)", nodeResourceGroup, vmName)
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
@@ -106,6 +111,12 @@ func (as *availabilitySet) DetachDiskByName(diskName, diskURI string, nodeName t
|
||||
return nil
|
||||
}
|
||||
|
||||
vmName := mapNodeNameToVMName(nodeName)
|
||||
nodeResourceGroup, err := as.GetNodeResourceGroup(vmName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
disks := *vm.StorageProfile.DataDisks
|
||||
bFoundDisk := false
|
||||
for i, disk := range disks {
|
||||
@@ -132,17 +143,16 @@ func (as *availabilitySet) DetachDiskByName(diskName, diskURI string, nodeName t
|
||||
},
|
||||
},
|
||||
}
|
||||
vmName := mapNodeNameToVMName(nodeName)
|
||||
glog.V(2).Infof("azureDisk - update(%s): vm(%s) - detach disk", as.resourceGroup, vmName)
|
||||
glog.V(2).Infof("azureDisk - update(%s): vm(%s) - detach disk", nodeResourceGroup, vmName)
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
resp, err := as.VirtualMachinesClient.CreateOrUpdate(ctx, as.resourceGroup, vmName, newVM)
|
||||
resp, err := as.VirtualMachinesClient.CreateOrUpdate(ctx, nodeResourceGroup, vmName, newVM)
|
||||
if as.CloudProviderBackoff && shouldRetryHTTPRequest(resp, err) {
|
||||
glog.V(2).Infof("azureDisk - update(%s) backing off: vm(%s)", as.resourceGroup, vmName)
|
||||
retryErr := as.CreateOrUpdateVMWithRetry(vmName, newVM)
|
||||
glog.V(2).Infof("azureDisk - update(%s) backing off: vm(%s)", nodeResourceGroup, vmName)
|
||||
retryErr := as.CreateOrUpdateVMWithRetry(nodeResourceGroup, vmName, newVM)
|
||||
if retryErr != nil {
|
||||
err = retryErr
|
||||
glog.V(2).Infof("azureDisk - update(%s) abort backoff: vm(%s)", as.ResourceGroup, vmName)
|
||||
glog.V(2).Infof("azureDisk - update(%s) abort backoff: vm(%s)", nodeResourceGroup, vmName)
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
|
44
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_controller_vmss.go
generated
vendored
44
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_controller_vmss.go
generated
vendored
@@ -20,7 +20,7 @@ import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute"
|
||||
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute"
|
||||
"github.com/golang/glog"
|
||||
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
@@ -29,7 +29,13 @@ import (
|
||||
// AttachDisk attaches a vhd to vm
|
||||
// the vhd must exist, can be identified by diskName, diskURI, and lun.
|
||||
func (ss *scaleSet) AttachDisk(isManagedDisk bool, diskName, diskURI string, nodeName types.NodeName, lun int32, cachingMode compute.CachingTypes) error {
|
||||
ssName, instanceID, vm, err := ss.getVmssVM(string(nodeName))
|
||||
vmName := mapNodeNameToVMName(nodeName)
|
||||
ssName, instanceID, vm, err := ss.getVmssVM(vmName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
nodeResourceGroup, err := ss.GetNodeResourceGroup(vmName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -65,14 +71,14 @@ func (ss *scaleSet) AttachDisk(isManagedDisk bool, diskName, diskURI string, nod
|
||||
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
glog.V(2).Infof("azureDisk - update(%s): vm(%s) - attach disk", ss.resourceGroup, nodeName)
|
||||
resp, err := ss.VirtualMachineScaleSetVMsClient.Update(ctx, ss.resourceGroup, ssName, instanceID, vm)
|
||||
glog.V(2).Infof("azureDisk - update(%s): vm(%s) - attach disk", nodeResourceGroup, nodeName)
|
||||
resp, err := ss.VirtualMachineScaleSetVMsClient.Update(ctx, nodeResourceGroup, ssName, instanceID, vm)
|
||||
if ss.CloudProviderBackoff && shouldRetryHTTPRequest(resp, err) {
|
||||
glog.V(2).Infof("azureDisk - update(%s) backing off: vm(%s)", ss.resourceGroup, nodeName)
|
||||
retryErr := ss.UpdateVmssVMWithRetry(ctx, ss.resourceGroup, ssName, instanceID, vm)
|
||||
glog.V(2).Infof("azureDisk - update(%s) backing off: vm(%s)", nodeResourceGroup, nodeName)
|
||||
retryErr := ss.UpdateVmssVMWithRetry(ctx, nodeResourceGroup, ssName, instanceID, vm)
|
||||
if retryErr != nil {
|
||||
err = retryErr
|
||||
glog.V(2).Infof("azureDisk - update(%s) abort backoff: vm(%s)", ss.resourceGroup, nodeName)
|
||||
glog.V(2).Infof("azureDisk - update(%s) abort backoff: vm(%s)", nodeResourceGroup, nodeName)
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
@@ -85,7 +91,8 @@ func (ss *scaleSet) AttachDisk(isManagedDisk bool, diskName, diskURI string, nod
|
||||
} else {
|
||||
glog.V(4).Info("azureDisk - azure attach succeeded")
|
||||
// Invalidate the cache right after updating
|
||||
ss.vmssVMCache.Delete(ss.makeVmssVMName(ssName, instanceID))
|
||||
key := buildVmssCacheKey(nodeResourceGroup, ss.makeVmssVMName(ssName, instanceID))
|
||||
ss.vmssVMCache.Delete(key)
|
||||
}
|
||||
return err
|
||||
}
|
||||
@@ -93,7 +100,13 @@ func (ss *scaleSet) AttachDisk(isManagedDisk bool, diskName, diskURI string, nod
|
||||
// DetachDiskByName detaches a vhd from host
|
||||
// the vhd can be identified by diskName or diskURI
|
||||
func (ss *scaleSet) DetachDiskByName(diskName, diskURI string, nodeName types.NodeName) error {
|
||||
ssName, instanceID, vm, err := ss.getVmssVM(string(nodeName))
|
||||
vmName := mapNodeNameToVMName(nodeName)
|
||||
ssName, instanceID, vm, err := ss.getVmssVM(vmName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
nodeResourceGroup, err := ss.GetNodeResourceGroup(vmName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -122,14 +135,14 @@ func (ss *scaleSet) DetachDiskByName(diskName, diskURI string, nodeName types.No
|
||||
vm.StorageProfile.DataDisks = &disks
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
glog.V(2).Infof("azureDisk - update(%s): vm(%s) - detach disk", ss.resourceGroup, nodeName)
|
||||
resp, err := ss.VirtualMachineScaleSetVMsClient.Update(ctx, ss.resourceGroup, ssName, instanceID, vm)
|
||||
glog.V(2).Infof("azureDisk - update(%s): vm(%s) - detach disk", nodeResourceGroup, nodeName)
|
||||
resp, err := ss.VirtualMachineScaleSetVMsClient.Update(ctx, nodeResourceGroup, ssName, instanceID, vm)
|
||||
if ss.CloudProviderBackoff && shouldRetryHTTPRequest(resp, err) {
|
||||
glog.V(2).Infof("azureDisk - update(%s) backing off: vm(%s)", ss.resourceGroup, nodeName)
|
||||
retryErr := ss.UpdateVmssVMWithRetry(ctx, ss.resourceGroup, ssName, instanceID, vm)
|
||||
glog.V(2).Infof("azureDisk - update(%s) backing off: vm(%s)", nodeResourceGroup, nodeName)
|
||||
retryErr := ss.UpdateVmssVMWithRetry(ctx, nodeResourceGroup, ssName, instanceID, vm)
|
||||
if retryErr != nil {
|
||||
err = retryErr
|
||||
glog.V(2).Infof("azureDisk - update(%s) abort backoff: vm(%s)", ss.resourceGroup, nodeName)
|
||||
glog.V(2).Infof("azureDisk - update(%s) abort backoff: vm(%s)", nodeResourceGroup, nodeName)
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
@@ -137,7 +150,8 @@ func (ss *scaleSet) DetachDiskByName(diskName, diskURI string, nodeName types.No
|
||||
} else {
|
||||
glog.V(4).Info("azureDisk - azure detach succeeded")
|
||||
// Invalidate the cache right after updating
|
||||
ss.vmssVMCache.Delete(ss.makeVmssVMName(ssName, instanceID))
|
||||
key := buildVmssCacheKey(nodeResourceGroup, ss.makeVmssVMName(ssName, instanceID))
|
||||
ss.vmssVMCache.Delete(key)
|
||||
}
|
||||
|
||||
return err
|
||||
|
43
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_fakes.go
generated
vendored
43
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_fakes.go
generated
vendored
@@ -29,7 +29,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute"
|
||||
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute"
|
||||
"github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network"
|
||||
"github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2017-10-01/storage"
|
||||
"github.com/Azure/go-autorest/autorest"
|
||||
@@ -207,6 +207,13 @@ func (fAPC *fakeAzurePIPClient) List(ctx context.Context, resourceGroupName stri
|
||||
return value, nil
|
||||
}
|
||||
|
||||
func (fAPC *fakeAzurePIPClient) setFakeStore(store map[string]map[string]network.PublicIPAddress) {
|
||||
fAPC.mutex.Lock()
|
||||
defer fAPC.mutex.Unlock()
|
||||
|
||||
fAPC.FakeStore = store
|
||||
}
|
||||
|
||||
type fakeAzureInterfacesClient struct {
|
||||
mutex *sync.Mutex
|
||||
FakeStore map[string]map[string]network.Interface
|
||||
@@ -247,7 +254,24 @@ func (fIC *fakeAzureInterfacesClient) Get(ctx context.Context, resourceGroupName
|
||||
}
|
||||
|
||||
func (fIC *fakeAzureInterfacesClient) GetVirtualMachineScaleSetNetworkInterface(ctx context.Context, resourceGroupName string, virtualMachineScaleSetName string, virtualmachineIndex string, networkInterfaceName string, expand string) (result network.Interface, err error) {
|
||||
return result, nil
|
||||
fIC.mutex.Lock()
|
||||
defer fIC.mutex.Unlock()
|
||||
if _, ok := fIC.FakeStore[resourceGroupName]; ok {
|
||||
if entity, ok := fIC.FakeStore[resourceGroupName][networkInterfaceName]; ok {
|
||||
return entity, nil
|
||||
}
|
||||
}
|
||||
return result, autorest.DetailedError{
|
||||
StatusCode: http.StatusNotFound,
|
||||
Message: "Not such Interface",
|
||||
}
|
||||
}
|
||||
|
||||
func (fIC *fakeAzureInterfacesClient) setFakeStore(store map[string]map[string]network.Interface) {
|
||||
fIC.mutex.Lock()
|
||||
defer fIC.mutex.Unlock()
|
||||
|
||||
fIC.FakeStore = store
|
||||
}
|
||||
|
||||
type fakeAzureVirtualMachinesClient struct {
|
||||
@@ -302,6 +326,13 @@ func (fVMC *fakeAzureVirtualMachinesClient) List(ctx context.Context, resourceGr
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (fVMC *fakeAzureVirtualMachinesClient) setFakeStore(store map[string]map[string]compute.VirtualMachine) {
|
||||
fVMC.mutex.Lock()
|
||||
defer fVMC.mutex.Unlock()
|
||||
|
||||
fVMC.FakeStore = store
|
||||
}
|
||||
|
||||
type fakeAzureSubnetsClient struct {
|
||||
mutex *sync.Mutex
|
||||
FakeStore map[string]map[string]network.Subnet
|
||||
@@ -867,11 +898,11 @@ func (f *fakeVMSet) GetVMSetNames(service *v1.Service, nodes []*v1.Node) (availa
|
||||
return nil, fmt.Errorf("unimplemented")
|
||||
}
|
||||
|
||||
func (f *fakeVMSet) EnsureHostsInPool(serviceName string, nodes []*v1.Node, backendPoolID string, vmSetName string, isInternal bool) error {
|
||||
func (f *fakeVMSet) EnsureHostsInPool(service *v1.Service, nodes []*v1.Node, backendPoolID string, vmSetName string, isInternal bool) error {
|
||||
return fmt.Errorf("unimplemented")
|
||||
}
|
||||
|
||||
func (f *fakeVMSet) EnsureBackendPoolDeleted(poolID, vmSetName string, backendAddressPools *[]network.BackendAddressPool) error {
|
||||
func (f *fakeVMSet) EnsureBackendPoolDeleted(service *v1.Service, poolID, vmSetName string, backendAddressPools *[]network.BackendAddressPool) error {
|
||||
return fmt.Errorf("unimplemented")
|
||||
}
|
||||
|
||||
@@ -886,3 +917,7 @@ func (f *fakeVMSet) DetachDiskByName(diskName, diskURI string, nodeName types.No
|
||||
func (f *fakeVMSet) GetDataDisks(nodeName types.NodeName) ([]compute.DataDisk, error) {
|
||||
return nil, fmt.Errorf("unimplemented")
|
||||
}
|
||||
|
||||
func (f *fakeVMSet) GetPowerStatusByNodeName(name string) (string, error) {
|
||||
return "", fmt.Errorf("unimplemented")
|
||||
}
|
||||
|
@@ -59,6 +59,13 @@ type InstanceMetadata struct {
|
||||
baseURL string
|
||||
}
|
||||
|
||||
// ComputeMetadata represents compute information
|
||||
type ComputeMetadata struct {
|
||||
Name string `json:"name,omitempty"`
|
||||
Zone string `json:"zone,omitempty"`
|
||||
VMSize string `json:"vmSize,omitempty"`
|
||||
}
|
||||
|
||||
// NewInstanceMetadata creates an instance of the InstanceMetadata accessor object.
|
||||
func NewInstanceMetadata() *InstanceMetadata {
|
||||
return &InstanceMetadata{
|
||||
@@ -100,7 +107,7 @@ func (i *InstanceMetadata) queryMetadataBytes(path, format string) ([]byte, erro
|
||||
|
||||
q := req.URL.Query()
|
||||
q.Add("format", format)
|
||||
q.Add("api-version", "2017-04-02")
|
||||
q.Add("api-version", "2017-12-01")
|
||||
req.URL.RawQuery = q.Encode()
|
||||
|
||||
resp, err := client.Do(req)
|
||||
|
146
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_instances.go
generated
vendored
146
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_instances.go
generated
vendored
@@ -28,8 +28,24 @@ import (
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
)
|
||||
|
||||
const (
|
||||
vmPowerStatePrefix = "PowerState/"
|
||||
vmPowerStateStopped = "stopped"
|
||||
vmPowerStateDeallocated = "deallocated"
|
||||
)
|
||||
|
||||
// NodeAddresses returns the addresses of the specified instance.
|
||||
func (az *Cloud) NodeAddresses(ctx context.Context, name types.NodeName) ([]v1.NodeAddress, error) {
|
||||
// Returns nil for unmanaged nodes because azure cloud provider couldn't fetch information for them.
|
||||
unmanaged, err := az.IsNodeUnmanaged(string(name))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if unmanaged {
|
||||
glog.V(4).Infof("NodeAddresses: omitting unmanaged node %q", name)
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
addressGetter := func(nodeName types.NodeName) ([]v1.NodeAddress, error) {
|
||||
ip, publicIP, err := az.GetIPForMachineWithRetry(nodeName)
|
||||
if err != nil {
|
||||
@@ -51,7 +67,12 @@ func (az *Cloud) NodeAddresses(ctx context.Context, name types.NodeName) ([]v1.N
|
||||
}
|
||||
|
||||
if az.UseInstanceMetadata {
|
||||
isLocalInstance, err := az.isCurrentInstance(name)
|
||||
computeMetadata, err := az.getComputeMetadata()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
isLocalInstance, err := az.isCurrentInstance(name, computeMetadata.Name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -66,6 +87,15 @@ func (az *Cloud) NodeAddresses(ctx context.Context, name types.NodeName) ([]v1.N
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Fall back to ARM API if the address is empty string.
|
||||
// TODO: this is a workaround because IMDS is not stable enough.
|
||||
// It should be removed after IMDS fixing the issue.
|
||||
if strings.TrimSpace(ipAddress.PrivateIP) == "" {
|
||||
return addressGetter(name)
|
||||
}
|
||||
|
||||
// Use ip address got from instance metadata.
|
||||
addresses := []v1.NodeAddress{
|
||||
{Type: v1.NodeInternalIP, Address: ipAddress.PrivateIP},
|
||||
{Type: v1.NodeHostName, Address: string(name)},
|
||||
@@ -87,6 +117,12 @@ func (az *Cloud) NodeAddresses(ctx context.Context, name types.NodeName) ([]v1.N
|
||||
// This method will not be called from the node that is requesting this ID. i.e. metadata service
|
||||
// and other local methods cannot be used here
|
||||
func (az *Cloud) NodeAddressesByProviderID(ctx context.Context, providerID string) ([]v1.NodeAddress, error) {
|
||||
// Returns nil for unmanaged nodes because azure cloud provider couldn't fetch information for them.
|
||||
if az.IsNodeUnmanagedByProviderID(providerID) {
|
||||
glog.V(4).Infof("NodeAddressesByProviderID: omitting unmanaged node %q", providerID)
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
name, err := az.vmSet.GetNodeNameByProviderID(providerID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -98,6 +134,12 @@ func (az *Cloud) NodeAddressesByProviderID(ctx context.Context, providerID strin
|
||||
// InstanceExistsByProviderID returns true if the instance with the given provider id still exists and is running.
|
||||
// If false is returned with no error, the instance will be immediately deleted by the cloud controller manager.
|
||||
func (az *Cloud) InstanceExistsByProviderID(ctx context.Context, providerID string) (bool, error) {
|
||||
// Returns true for unmanaged nodes because azure cloud provider always assumes them exists.
|
||||
if az.IsNodeUnmanagedByProviderID(providerID) {
|
||||
glog.V(4).Infof("InstanceExistsByProviderID: assuming unmanaged node %q exists", providerID)
|
||||
return true, nil
|
||||
}
|
||||
|
||||
name, err := az.vmSet.GetNodeNameByProviderID(providerID)
|
||||
if err != nil {
|
||||
return false, err
|
||||
@@ -116,35 +158,67 @@ func (az *Cloud) InstanceExistsByProviderID(ctx context.Context, providerID stri
|
||||
|
||||
// InstanceShutdownByProviderID returns true if the instance is in safe state to detach volumes
|
||||
func (az *Cloud) InstanceShutdownByProviderID(ctx context.Context, providerID string) (bool, error) {
|
||||
return false, cloudprovider.NotImplemented
|
||||
}
|
||||
|
||||
func (az *Cloud) isCurrentInstance(name types.NodeName) (bool, error) {
|
||||
nodeName := mapNodeNameToVMName(name)
|
||||
metadataName, err := az.metadata.Text("instance/compute/name")
|
||||
nodeName, err := az.vmSet.GetNodeNameByProviderID(providerID)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
powerStatus, err := az.vmSet.GetPowerStatusByNodeName(string(nodeName))
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
glog.V(5).Infof("InstanceShutdownByProviderID gets power status %q for node %q", powerStatus, nodeName)
|
||||
|
||||
return strings.ToLower(powerStatus) == vmPowerStateStopped || strings.ToLower(powerStatus) == vmPowerStateDeallocated, nil
|
||||
}
|
||||
|
||||
// getComputeMetadata gets compute information from instance metadata.
|
||||
func (az *Cloud) getComputeMetadata() (*ComputeMetadata, error) {
|
||||
computeInfo := ComputeMetadata{}
|
||||
err := az.metadata.Object(computeMetadataURI, &computeInfo)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &computeInfo, nil
|
||||
}
|
||||
|
||||
func (az *Cloud) isCurrentInstance(name types.NodeName, metadataVMName string) (bool, error) {
|
||||
var err error
|
||||
nodeName := mapNodeNameToVMName(name)
|
||||
if az.VMType == vmTypeVMSS {
|
||||
// VMSS vmName is not same with hostname, use hostname instead.
|
||||
metadataName, err = os.Hostname()
|
||||
metadataVMName, err = os.Hostname()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
|
||||
metadataName = strings.ToLower(metadataName)
|
||||
return (metadataName == nodeName), err
|
||||
metadataVMName = strings.ToLower(metadataVMName)
|
||||
return (metadataVMName == nodeName), err
|
||||
}
|
||||
|
||||
// InstanceID returns the cloud provider ID of the specified instance.
|
||||
// Note that if the instance does not exist or is no longer running, we must return ("", cloudprovider.InstanceNotFound)
|
||||
func (az *Cloud) InstanceID(ctx context.Context, name types.NodeName) (string, error) {
|
||||
nodeName := mapNodeNameToVMName(name)
|
||||
unmanaged, err := az.IsNodeUnmanaged(nodeName)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if unmanaged {
|
||||
// InstanceID is same with nodeName for unmanaged nodes.
|
||||
glog.V(4).Infof("InstanceID: getting ID %q for unmanaged node %q", name, name)
|
||||
return nodeName, nil
|
||||
}
|
||||
|
||||
if az.UseInstanceMetadata {
|
||||
isLocalInstance, err := az.isCurrentInstance(name)
|
||||
computeMetadata, err := az.getComputeMetadata()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
isLocalInstance, err := az.isCurrentInstance(name, computeMetadata.Name)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
@@ -154,26 +228,28 @@ func (az *Cloud) InstanceID(ctx context.Context, name types.NodeName) (string, e
|
||||
return az.vmSet.GetInstanceIDByNodeName(nodeName)
|
||||
}
|
||||
|
||||
// Compose instanceID based on nodeName for standard instance.
|
||||
if az.VMType == vmTypeStandard {
|
||||
return az.getStandardMachineID(nodeName), nil
|
||||
}
|
||||
|
||||
// Get scale set name and instanceID from vmName for vmss.
|
||||
metadataName, err := az.metadata.Text("instance/compute/name")
|
||||
// Get resource group name.
|
||||
resourceGroup, err := az.metadata.Text("instance/compute/resourceGroupName")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
ssName, instanceID, err := extractVmssVMName(metadataName)
|
||||
|
||||
// Compose instanceID based on nodeName for standard instance.
|
||||
if az.VMType == vmTypeStandard {
|
||||
return az.getStandardMachineID(resourceGroup, nodeName), nil
|
||||
}
|
||||
|
||||
// Get scale set name and instanceID from vmName for vmss.
|
||||
ssName, instanceID, err := extractVmssVMName(computeMetadata.Name)
|
||||
if err != nil {
|
||||
if err == ErrorNotVmssInstance {
|
||||
// Compose machineID for standard Node.
|
||||
return az.getStandardMachineID(nodeName), nil
|
||||
return az.getStandardMachineID(resourceGroup, nodeName), nil
|
||||
}
|
||||
return "", err
|
||||
}
|
||||
// Compose instanceID based on ssName and instanceID for vmss instance.
|
||||
return az.getVmssMachineID(ssName, instanceID), nil
|
||||
return az.getVmssMachineID(resourceGroup, ssName, instanceID), nil
|
||||
}
|
||||
|
||||
return az.vmSet.GetInstanceIDByNodeName(nodeName)
|
||||
@@ -183,6 +259,12 @@ func (az *Cloud) InstanceID(ctx context.Context, name types.NodeName) (string, e
|
||||
// This method will not be called from the node that is requesting this ID. i.e. metadata service
|
||||
// and other local methods cannot be used here
|
||||
func (az *Cloud) InstanceTypeByProviderID(ctx context.Context, providerID string) (string, error) {
|
||||
// Returns "" for unmanaged nodes because azure cloud provider couldn't fetch information for them.
|
||||
if az.IsNodeUnmanagedByProviderID(providerID) {
|
||||
glog.V(4).Infof("InstanceTypeByProviderID: omitting unmanaged node %q", providerID)
|
||||
return "", nil
|
||||
}
|
||||
|
||||
name, err := az.vmSet.GetNodeNameByProviderID(providerID)
|
||||
if err != nil {
|
||||
return "", err
|
||||
@@ -196,15 +278,29 @@ func (az *Cloud) InstanceTypeByProviderID(ctx context.Context, providerID string
|
||||
// (Implementer Note): This is used by kubelet. Kubelet will label the node. Real log from kubelet:
|
||||
// Adding node label from cloud provider: beta.kubernetes.io/instance-type=[value]
|
||||
func (az *Cloud) InstanceType(ctx context.Context, name types.NodeName) (string, error) {
|
||||
// Returns "" for unmanaged nodes because azure cloud provider couldn't fetch information for them.
|
||||
unmanaged, err := az.IsNodeUnmanaged(string(name))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if unmanaged {
|
||||
glog.V(4).Infof("InstanceType: omitting unmanaged node %q", name)
|
||||
return "", nil
|
||||
}
|
||||
|
||||
if az.UseInstanceMetadata {
|
||||
isLocalInstance, err := az.isCurrentInstance(name)
|
||||
computeMetadata, err := az.getComputeMetadata()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
isLocalInstance, err := az.isCurrentInstance(name, computeMetadata.Name)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if isLocalInstance {
|
||||
machineType, err := az.metadata.Text("instance/compute/vmSize")
|
||||
if err == nil {
|
||||
return machineType, nil
|
||||
if computeMetadata.VMSize != "" {
|
||||
return computeMetadata.VMSize, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
216
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_instances_test.go
generated
vendored
Normal file
216
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_instances_test.go
generated
vendored
Normal file
@@ -0,0 +1,216 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package azure
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
"testing"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute"
|
||||
"github.com/Azure/go-autorest/autorest/to"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
)
|
||||
|
||||
// setTestVirtualMachines sets test virtual machine with powerstate.
|
||||
func setTestVirtualMachines(c *Cloud, vmList map[string]string) {
|
||||
virtualMachineClient := c.VirtualMachinesClient.(*fakeAzureVirtualMachinesClient)
|
||||
store := map[string]map[string]compute.VirtualMachine{
|
||||
"rg": make(map[string]compute.VirtualMachine),
|
||||
}
|
||||
|
||||
for nodeName, powerState := range vmList {
|
||||
instanceID := fmt.Sprintf("/subscriptions/subscription/resourceGroups/rg/providers/Microsoft.Compute/virtualMachines/%s", nodeName)
|
||||
vm := compute.VirtualMachine{
|
||||
Name: &nodeName,
|
||||
ID: &instanceID,
|
||||
Location: &c.Location,
|
||||
}
|
||||
if powerState != "" {
|
||||
status := []compute.InstanceViewStatus{
|
||||
{
|
||||
Code: to.StringPtr(powerState),
|
||||
},
|
||||
{
|
||||
Code: to.StringPtr("ProvisioningState/succeeded"),
|
||||
},
|
||||
}
|
||||
vm.VirtualMachineProperties = &compute.VirtualMachineProperties{
|
||||
InstanceView: &compute.VirtualMachineInstanceView{
|
||||
Statuses: &status,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
store["rg"][nodeName] = vm
|
||||
}
|
||||
|
||||
virtualMachineClient.setFakeStore(store)
|
||||
}
|
||||
|
||||
func TestInstanceID(t *testing.T) {
|
||||
cloud := getTestCloud()
|
||||
cloud.metadata = &InstanceMetadata{}
|
||||
|
||||
testcases := []struct {
|
||||
name string
|
||||
vmList []string
|
||||
nodeName string
|
||||
metadataName string
|
||||
expected string
|
||||
expectError bool
|
||||
}{
|
||||
{
|
||||
name: "InstanceID should get instanceID if node's name are equal to metadataName",
|
||||
vmList: []string{"vm1"},
|
||||
nodeName: "vm1",
|
||||
metadataName: "vm1",
|
||||
expected: "/subscriptions/subscription/resourceGroups/rg/providers/Microsoft.Compute/virtualMachines/vm1",
|
||||
},
|
||||
{
|
||||
name: "InstanceID should get instanceID from Azure API if node is not local instance",
|
||||
vmList: []string{"vm2"},
|
||||
nodeName: "vm2",
|
||||
metadataName: "vm1",
|
||||
expected: "/subscriptions/subscription/resourceGroups/rg/providers/Microsoft.Compute/virtualMachines/vm2",
|
||||
},
|
||||
{
|
||||
name: "InstanceID should report error if VM doesn't exist",
|
||||
vmList: []string{"vm1"},
|
||||
nodeName: "vm3",
|
||||
expectError: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range testcases {
|
||||
listener, err := net.Listen("tcp", "127.0.0.1:0")
|
||||
if err != nil {
|
||||
t.Errorf("Test [%s] unexpected error: %v", test.name, err)
|
||||
}
|
||||
|
||||
mux := http.NewServeMux()
|
||||
mux.Handle("/instance/compute", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
fmt.Fprintf(w, fmt.Sprintf("{\"name\":\"%s\"}", test.metadataName))
|
||||
}))
|
||||
go func() {
|
||||
http.Serve(listener, mux)
|
||||
}()
|
||||
defer listener.Close()
|
||||
|
||||
cloud.metadata.baseURL = "http://" + listener.Addr().String() + "/"
|
||||
vmListWithPowerState := make(map[string]string)
|
||||
for _, vm := range test.vmList {
|
||||
vmListWithPowerState[vm] = ""
|
||||
}
|
||||
setTestVirtualMachines(cloud, vmListWithPowerState)
|
||||
instanceID, err := cloud.InstanceID(context.Background(), types.NodeName(test.nodeName))
|
||||
if test.expectError {
|
||||
if err == nil {
|
||||
t.Errorf("Test [%s] unexpected nil err", test.name)
|
||||
}
|
||||
} else {
|
||||
if err != nil {
|
||||
t.Errorf("Test [%s] unexpected error: %v", test.name, err)
|
||||
}
|
||||
}
|
||||
|
||||
if instanceID != test.expected {
|
||||
t.Errorf("Test [%s] unexpected instanceID: %s, expected %q", test.name, instanceID, test.expected)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestInstanceShutdownByProviderID(t *testing.T) {
|
||||
testcases := []struct {
|
||||
name string
|
||||
vmList map[string]string
|
||||
nodeName string
|
||||
expected bool
|
||||
expectError bool
|
||||
}{
|
||||
{
|
||||
name: "InstanceShutdownByProviderID should return false if the vm is in PowerState/Running status",
|
||||
vmList: map[string]string{"vm1": "PowerState/Running"},
|
||||
nodeName: "vm1",
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "InstanceShutdownByProviderID should return true if the vm is in PowerState/Deallocated status",
|
||||
vmList: map[string]string{"vm2": "PowerState/Deallocated"},
|
||||
nodeName: "vm2",
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
name: "InstanceShutdownByProviderID should return false if the vm is in PowerState/Deallocating status",
|
||||
vmList: map[string]string{"vm3": "PowerState/Deallocating"},
|
||||
nodeName: "vm3",
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "InstanceShutdownByProviderID should return false if the vm is in PowerState/Starting status",
|
||||
vmList: map[string]string{"vm4": "PowerState/Starting"},
|
||||
nodeName: "vm4",
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "InstanceShutdownByProviderID should return true if the vm is in PowerState/Stopped status",
|
||||
vmList: map[string]string{"vm5": "PowerState/Stopped"},
|
||||
nodeName: "vm5",
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
name: "InstanceShutdownByProviderID should return false if the vm is in PowerState/Stopping status",
|
||||
vmList: map[string]string{"vm6": "PowerState/Stopping"},
|
||||
nodeName: "vm6",
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "InstanceShutdownByProviderID should return false if the vm is in PowerState/Unknown status",
|
||||
vmList: map[string]string{"vm7": "PowerState/Unknown"},
|
||||
nodeName: "vm7",
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "InstanceShutdownByProviderID should report error if VM doesn't exist",
|
||||
vmList: map[string]string{"vm1": "PowerState/running"},
|
||||
nodeName: "vm8",
|
||||
expectError: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range testcases {
|
||||
cloud := getTestCloud()
|
||||
setTestVirtualMachines(cloud, test.vmList)
|
||||
providerID := "azure://" + cloud.getStandardMachineID("rg", test.nodeName)
|
||||
hasShutdown, err := cloud.InstanceShutdownByProviderID(context.Background(), providerID)
|
||||
if test.expectError {
|
||||
if err == nil {
|
||||
t.Errorf("Test [%s] unexpected nil err", test.name)
|
||||
}
|
||||
} else {
|
||||
if err != nil {
|
||||
t.Errorf("Test [%s] unexpected error: %v", test.name, err)
|
||||
}
|
||||
}
|
||||
|
||||
if hasShutdown != test.expected {
|
||||
t.Errorf("Test [%s] unexpected hasShutdown: %v, expected %v", test.name, hasShutdown, test.expected)
|
||||
}
|
||||
}
|
||||
}
|
383
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_loadbalancer.go
generated
vendored
383
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_loadbalancer.go
generated
vendored
@@ -27,6 +27,7 @@ import (
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
serviceapi "k8s.io/kubernetes/pkg/api/v1/service"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network"
|
||||
"github.com/Azure/go-autorest/autorest/to"
|
||||
@@ -74,6 +75,14 @@ const (
|
||||
// ServiceAnnotationAllowedServiceTag is the annotation used on the service
|
||||
// to specify a list of allowed service tags separated by comma
|
||||
ServiceAnnotationAllowedServiceTag = "service.beta.kubernetes.io/azure-allowed-service-tags"
|
||||
|
||||
// ServiceAnnotationLoadBalancerIdleTimeout is the annotation used on the service
|
||||
// to specify the idle timeout for connections on the load balancer in minutes.
|
||||
ServiceAnnotationLoadBalancerIdleTimeout = "service.beta.kubernetes.io/azure-load-balancer-tcp-idle-timeout"
|
||||
|
||||
// ServiceAnnotationLoadBalancerMixedProtocols is the annotation used on the service
|
||||
// to create both TCP and UDP protocols when creating load balancer rules.
|
||||
ServiceAnnotationLoadBalancerMixedProtocols = "service.beta.kubernetes.io/azure-load-balancer-mixed-protocols"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -112,14 +121,6 @@ func (az *Cloud) EnsureLoadBalancer(ctx context.Context, clusterName string, ser
|
||||
// Here we'll firstly ensure service do not lie in the opposite LB.
|
||||
serviceName := getServiceName(service)
|
||||
glog.V(5).Infof("ensureloadbalancer(%s): START clusterName=%q", serviceName, clusterName)
|
||||
flippedService := flipServiceInternalAnnotation(service)
|
||||
if _, err := az.reconcileLoadBalancer(clusterName, flippedService, nil, false /* wantLb */); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if _, err := az.reconcilePublicIP(clusterName, service, true /* wantLb */); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
lb, err := az.reconcileLoadBalancer(clusterName, service, nodes, true /* wantLb */)
|
||||
if err != nil {
|
||||
@@ -140,6 +141,16 @@ func (az *Cloud) EnsureLoadBalancer(ctx context.Context, clusterName string, ser
|
||||
return nil, err
|
||||
}
|
||||
|
||||
updateService := updateServiceLoadBalancerIP(service, to.String(serviceIP))
|
||||
flippedService := flipServiceInternalAnnotation(updateService)
|
||||
if _, err := az.reconcileLoadBalancer(clusterName, flippedService, nil, false /* wantLb */); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if _, err := az.reconcilePublicIP(clusterName, updateService, true /* wantLb */); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return lbStatus, nil
|
||||
}
|
||||
|
||||
@@ -182,6 +193,12 @@ func (az *Cloud) EnsureLoadBalancerDeleted(ctx context.Context, clusterName stri
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetLoadBalancerName returns the LoadBalancer name.
|
||||
func (az *Cloud) GetLoadBalancerName(ctx context.Context, clusterName string, service *v1.Service) string {
|
||||
// TODO: replace DefaultLoadBalancerName to generate more meaningful loadbalancer names.
|
||||
return cloudprovider.DefaultLoadBalancerName(service)
|
||||
}
|
||||
|
||||
// getServiceLoadBalancer gets the loadbalancer for the service if it already exists.
|
||||
// If wantLb is TRUE then -it selects a new load balancer.
|
||||
// In case the selected load balancer does not exist it returns network.LoadBalancer struct
|
||||
@@ -191,9 +208,9 @@ func (az *Cloud) getServiceLoadBalancer(service *v1.Service, clusterName string,
|
||||
isInternal := requiresInternalLoadBalancer(service)
|
||||
var defaultLB *network.LoadBalancer
|
||||
primaryVMSetName := az.vmSet.GetPrimaryVMSetName()
|
||||
defaultLBName := az.getLoadBalancerName(clusterName, primaryVMSetName, isInternal)
|
||||
defaultLBName := az.getAzureLoadBalancerName(clusterName, primaryVMSetName, isInternal)
|
||||
|
||||
existingLBs, err := az.ListLBWithRetry()
|
||||
existingLBs, err := az.ListLBWithRetry(service)
|
||||
if err != nil {
|
||||
return nil, nil, false, err
|
||||
}
|
||||
@@ -213,7 +230,7 @@ func (az *Cloud) getServiceLoadBalancer(service *v1.Service, clusterName string,
|
||||
return nil, nil, false, err
|
||||
}
|
||||
if status == nil {
|
||||
// service is not om this load balancer
|
||||
// service is not on this load balancer
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -262,7 +279,7 @@ func (az *Cloud) getServiceLoadBalancer(service *v1.Service, clusterName string,
|
||||
func (az *Cloud) selectLoadBalancer(clusterName string, service *v1.Service, existingLBs *[]network.LoadBalancer, nodes []*v1.Node) (selectedLB *network.LoadBalancer, existsLb bool, err error) {
|
||||
isInternal := requiresInternalLoadBalancer(service)
|
||||
serviceName := getServiceName(service)
|
||||
glog.V(2).Infof("selectLoadBalancer for service (%s): isInternal(%s) - start", serviceName, isInternal)
|
||||
glog.V(2).Infof("selectLoadBalancer for service (%s): isInternal(%v) - start", serviceName, isInternal)
|
||||
vmSetNames, err := az.vmSet.GetVMSetNames(service, nodes)
|
||||
if err != nil {
|
||||
glog.Errorf("az.selectLoadBalancer: cluster(%s) service(%s) isInternal(%t) - az.GetVMSetNames failed, err=(%v)", clusterName, serviceName, isInternal, err)
|
||||
@@ -276,7 +293,7 @@ func (az *Cloud) selectLoadBalancer(clusterName string, service *v1.Service, exi
|
||||
}
|
||||
selectedLBRuleCount := math.MaxInt32
|
||||
for _, currASName := range *vmSetNames {
|
||||
currLBName := az.getLoadBalancerName(clusterName, currASName, isInternal)
|
||||
currLBName := az.getAzureLoadBalancerName(clusterName, currASName, isInternal)
|
||||
lb, exists := mapExistingLBs[currLBName]
|
||||
if !exists {
|
||||
// select this LB as this is a new LB and will have minimum rules
|
||||
@@ -326,7 +343,7 @@ func (az *Cloud) getServiceLoadBalancerStatus(service *v1.Service, lb *network.L
|
||||
return nil, nil
|
||||
}
|
||||
isInternal := requiresInternalLoadBalancer(service)
|
||||
lbFrontendIPConfigName := getFrontendIPConfigName(service, subnet(service))
|
||||
lbFrontendIPConfigName := az.getFrontendIPConfigName(service, subnet(service))
|
||||
serviceName := getServiceName(service)
|
||||
for _, ipConfiguration := range *lb.FrontendIPConfigurations {
|
||||
if lbFrontendIPConfigName == *ipConfiguration.Name {
|
||||
@@ -354,8 +371,8 @@ func (az *Cloud) getServiceLoadBalancerStatus(service *v1.Service, lb *network.L
|
||||
}
|
||||
}
|
||||
|
||||
glog.V(2).Infof("getServiceLoadBalancerStatus gets ingress IP %q from frontendIPConfiguration %q for service %q", *lbIP, lbFrontendIPConfigName, serviceName)
|
||||
return &v1.LoadBalancerStatus{Ingress: []v1.LoadBalancerIngress{{IP: *lbIP}}}, nil
|
||||
glog.V(2).Infof("getServiceLoadBalancerStatus gets ingress IP %q from frontendIPConfiguration %q for service %q", to.String(lbIP), lbFrontendIPConfigName, serviceName)
|
||||
return &v1.LoadBalancerStatus{Ingress: []v1.LoadBalancerIngress{{IP: to.String(lbIP)}}}, nil
|
||||
}
|
||||
}
|
||||
|
||||
@@ -365,12 +382,12 @@ func (az *Cloud) getServiceLoadBalancerStatus(service *v1.Service, lb *network.L
|
||||
func (az *Cloud) determinePublicIPName(clusterName string, service *v1.Service) (string, error) {
|
||||
loadBalancerIP := service.Spec.LoadBalancerIP
|
||||
if len(loadBalancerIP) == 0 {
|
||||
return getPublicIPName(clusterName, service), nil
|
||||
return az.getPublicIPName(clusterName, service), nil
|
||||
}
|
||||
|
||||
pipResourceGroup := az.getPublicIPAddressResourceGroup(service)
|
||||
|
||||
pips, err := az.ListPIPWithRetry(pipResourceGroup)
|
||||
pips, err := az.ListPIPWithRetry(service, pipResourceGroup)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
@@ -399,6 +416,14 @@ func flipServiceInternalAnnotation(service *v1.Service) *v1.Service {
|
||||
return copyService
|
||||
}
|
||||
|
||||
func updateServiceLoadBalancerIP(service *v1.Service, serviceIP string) *v1.Service {
|
||||
copyService := service.DeepCopy()
|
||||
if len(serviceIP) > 0 && copyService != nil {
|
||||
copyService.Spec.LoadBalancerIP = serviceIP
|
||||
}
|
||||
return copyService
|
||||
}
|
||||
|
||||
func (az *Cloud) findServiceIPAddress(ctx context.Context, clusterName string, service *v1.Service, isInternalLb bool) (string, error) {
|
||||
if len(service.Spec.LoadBalancerIP) > 0 {
|
||||
return service.Spec.LoadBalancerIP, nil
|
||||
@@ -450,7 +475,7 @@ func (az *Cloud) ensurePublicIPExists(service *v1.Service, pipName string, domai
|
||||
|
||||
glog.V(2).Infof("ensurePublicIPExists for service(%s): pip(%s) - creating", serviceName, *pip.Name)
|
||||
glog.V(10).Infof("CreateOrUpdatePIPWithRetry(%s, %q): start", pipResourceGroup, *pip.Name)
|
||||
err = az.CreateOrUpdatePIPWithRetry(pipResourceGroup, pip)
|
||||
err = az.CreateOrUpdatePIPWithRetry(service, pipResourceGroup, pip)
|
||||
if err != nil {
|
||||
glog.V(2).Infof("ensure(%s) abort backoff: pip(%s) - creating", serviceName, *pip.Name)
|
||||
return nil, err
|
||||
@@ -463,10 +488,81 @@ func (az *Cloud) ensurePublicIPExists(service *v1.Service, pipName string, domai
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &pip, nil
|
||||
}
|
||||
|
||||
func getIdleTimeout(s *v1.Service) (*int32, error) {
|
||||
const (
|
||||
min = 4
|
||||
max = 30
|
||||
)
|
||||
|
||||
val, ok := s.Annotations[ServiceAnnotationLoadBalancerIdleTimeout]
|
||||
if !ok {
|
||||
// Return a nil here as this will set the value to the azure default
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
errInvalidTimeout := fmt.Errorf("idle timeout value must be a whole number representing minutes between %d and %d", min, max)
|
||||
to, err := strconv.Atoi(val)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error parsing idle timeout value: %v: %v", err, errInvalidTimeout)
|
||||
}
|
||||
to32 := int32(to)
|
||||
|
||||
if to32 < min || to32 > max {
|
||||
return nil, errInvalidTimeout
|
||||
}
|
||||
return &to32, nil
|
||||
}
|
||||
|
||||
func (az *Cloud) isFrontendIPChanged(clusterName string, config network.FrontendIPConfiguration, service *v1.Service, lbFrontendIPConfigName string) (bool, error) {
|
||||
if az.serviceOwnsFrontendIP(config, service) && !strings.EqualFold(to.String(config.Name), lbFrontendIPConfigName) {
|
||||
return true, nil
|
||||
}
|
||||
if !strings.EqualFold(to.String(config.Name), lbFrontendIPConfigName) {
|
||||
return false, nil
|
||||
}
|
||||
loadBalancerIP := service.Spec.LoadBalancerIP
|
||||
isInternal := requiresInternalLoadBalancer(service)
|
||||
if isInternal {
|
||||
// Judge subnet
|
||||
subnetName := subnet(service)
|
||||
if subnetName != nil {
|
||||
subnet, existsSubnet, err := az.getSubnet(az.VnetName, *subnetName)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if !existsSubnet {
|
||||
return false, fmt.Errorf("failed to get subnet")
|
||||
}
|
||||
if config.Subnet != nil && !strings.EqualFold(to.String(config.Subnet.Name), to.String(subnet.Name)) {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
if loadBalancerIP == "" {
|
||||
return config.PrivateIPAllocationMethod == network.Static, nil
|
||||
}
|
||||
return config.PrivateIPAllocationMethod != network.Static || !strings.EqualFold(loadBalancerIP, to.String(config.PrivateIPAddress)), nil
|
||||
}
|
||||
if loadBalancerIP == "" {
|
||||
return false, nil
|
||||
}
|
||||
pipName, err := az.determinePublicIPName(clusterName, service)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
pipResourceGroup := az.getPublicIPAddressResourceGroup(service)
|
||||
pip, existsPip, err := az.getPublicIPAddress(pipResourceGroup, pipName)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if !existsPip {
|
||||
return true, nil
|
||||
}
|
||||
return config.PublicIPAddress != nil && !strings.EqualFold(to.String(pip.ID), to.String(config.PublicIPAddress.ID)), nil
|
||||
}
|
||||
|
||||
// This ensures load balancer exists and the frontend ip config is setup.
|
||||
// This also reconciles the Service's Ports with the LoadBalancer config.
|
||||
// This entails adding rules/probes for expected Ports and removing stale rules/ports.
|
||||
@@ -482,11 +578,16 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service,
|
||||
}
|
||||
lbName := *lb.Name
|
||||
glog.V(2).Infof("reconcileLoadBalancer for service(%s): lb(%s) wantLb(%t) resolved load balancer name", serviceName, lbName, wantLb)
|
||||
lbFrontendIPConfigName := getFrontendIPConfigName(service, subnet(service))
|
||||
lbFrontendIPConfigName := az.getFrontendIPConfigName(service, subnet(service))
|
||||
lbFrontendIPConfigID := az.getFrontendIPConfigID(lbName, lbFrontendIPConfigName)
|
||||
lbBackendPoolName := getBackendPoolName(clusterName)
|
||||
lbBackendPoolID := az.getBackendPoolID(lbName, lbBackendPoolName)
|
||||
|
||||
lbIdleTimeout, err := getIdleTimeout(service)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
dirtyLb := false
|
||||
|
||||
// Ensure LoadBalancer's Backend Pool Configuration
|
||||
@@ -527,21 +628,23 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service,
|
||||
if !wantLb {
|
||||
for i := len(newConfigs) - 1; i >= 0; i-- {
|
||||
config := newConfigs[i]
|
||||
if serviceOwnsFrontendIP(config, service) {
|
||||
if az.serviceOwnsFrontendIP(config, service) {
|
||||
glog.V(2).Infof("reconcileLoadBalancer for service (%s)(%t): lb frontendconfig(%s) - dropping", serviceName, wantLb, lbFrontendIPConfigName)
|
||||
newConfigs = append(newConfigs[:i], newConfigs[i+1:]...)
|
||||
dirtyConfigs = true
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if isInternal {
|
||||
for i := len(newConfigs) - 1; i >= 0; i-- {
|
||||
config := newConfigs[i]
|
||||
if serviceOwnsFrontendIP(config, service) && !strings.EqualFold(*config.Name, lbFrontendIPConfigName) {
|
||||
glog.V(2).Infof("reconcileLoadBalancer for service (%s)(%t): lb frontendconfig(%s) - dropping", serviceName, wantLb, *config.Name)
|
||||
newConfigs = append(newConfigs[:i], newConfigs[i+1:]...)
|
||||
dirtyConfigs = true
|
||||
}
|
||||
for i := len(newConfigs) - 1; i >= 0; i-- {
|
||||
config := newConfigs[i]
|
||||
isFipChanged, err := az.isFrontendIPChanged(clusterName, config, service, lbFrontendIPConfigName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if isFipChanged {
|
||||
glog.V(2).Infof("reconcileLoadBalancer for service (%s)(%t): lb frontendconfig(%s) - dropping", serviceName, wantLb, *config.Name)
|
||||
newConfigs = append(newConfigs[:i], newConfigs[i+1:]...)
|
||||
dirtyConfigs = true
|
||||
}
|
||||
}
|
||||
foundConfig := false
|
||||
@@ -612,87 +715,7 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service,
|
||||
}
|
||||
|
||||
// update probes/rules
|
||||
var ports []v1.ServicePort
|
||||
if wantLb {
|
||||
ports = service.Spec.Ports
|
||||
} else {
|
||||
ports = []v1.ServicePort{}
|
||||
}
|
||||
|
||||
var expectedProbes []network.Probe
|
||||
var expectedRules []network.LoadBalancingRule
|
||||
for _, port := range ports {
|
||||
lbRuleName := getLoadBalancerRuleName(service, port, subnet(service))
|
||||
|
||||
transportProto, _, probeProto, err := getProtocolsFromKubernetesProtocol(port.Protocol)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if serviceapi.NeedsHealthCheck(service) {
|
||||
if port.Protocol == v1.ProtocolUDP {
|
||||
// ERROR: this isn't supported
|
||||
// health check (aka source ip preservation) is not
|
||||
// compatible with UDP (it uses an HTTP check)
|
||||
return nil, fmt.Errorf("services requiring health checks are incompatible with UDP ports")
|
||||
}
|
||||
|
||||
podPresencePath, podPresencePort := serviceapi.GetServiceHealthCheckPathPort(service)
|
||||
|
||||
expectedProbes = append(expectedProbes, network.Probe{
|
||||
Name: &lbRuleName,
|
||||
ProbePropertiesFormat: &network.ProbePropertiesFormat{
|
||||
RequestPath: to.StringPtr(podPresencePath),
|
||||
Protocol: network.ProbeProtocolHTTP,
|
||||
Port: to.Int32Ptr(podPresencePort),
|
||||
IntervalInSeconds: to.Int32Ptr(5),
|
||||
NumberOfProbes: to.Int32Ptr(2),
|
||||
},
|
||||
})
|
||||
} else if port.Protocol != v1.ProtocolUDP {
|
||||
// we only add the expected probe if we're doing TCP
|
||||
expectedProbes = append(expectedProbes, network.Probe{
|
||||
Name: &lbRuleName,
|
||||
ProbePropertiesFormat: &network.ProbePropertiesFormat{
|
||||
Protocol: *probeProto,
|
||||
Port: to.Int32Ptr(port.NodePort),
|
||||
IntervalInSeconds: to.Int32Ptr(5),
|
||||
NumberOfProbes: to.Int32Ptr(2),
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
loadDistribution := network.Default
|
||||
if service.Spec.SessionAffinity == v1.ServiceAffinityClientIP {
|
||||
loadDistribution = network.SourceIP
|
||||
}
|
||||
|
||||
expectedRule := network.LoadBalancingRule{
|
||||
Name: &lbRuleName,
|
||||
LoadBalancingRulePropertiesFormat: &network.LoadBalancingRulePropertiesFormat{
|
||||
Protocol: *transportProto,
|
||||
FrontendIPConfiguration: &network.SubResource{
|
||||
ID: to.StringPtr(lbFrontendIPConfigID),
|
||||
},
|
||||
BackendAddressPool: &network.SubResource{
|
||||
ID: to.StringPtr(lbBackendPoolID),
|
||||
},
|
||||
LoadDistribution: loadDistribution,
|
||||
FrontendPort: to.Int32Ptr(port.Port),
|
||||
BackendPort: to.Int32Ptr(port.Port),
|
||||
EnableFloatingIP: to.BoolPtr(true),
|
||||
},
|
||||
}
|
||||
|
||||
// we didn't construct the probe objects for UDP because they're not used/needed/allowed
|
||||
if port.Protocol != v1.ProtocolUDP {
|
||||
expectedRule.Probe = &network.SubResource{
|
||||
ID: to.StringPtr(az.getLoadBalancerProbeID(lbName, lbRuleName)),
|
||||
}
|
||||
}
|
||||
|
||||
expectedRules = append(expectedRules, expectedRule)
|
||||
}
|
||||
expectedProbes, expectedRules, err := az.reconcileLoadBalancerRule(service, wantLb, lbFrontendIPConfigID, lbBackendPoolID, lbName, lbIdleTimeout)
|
||||
|
||||
// remove unwanted probes
|
||||
dirtyProbes := false
|
||||
@@ -702,7 +725,7 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service,
|
||||
}
|
||||
for i := len(updatedProbes) - 1; i >= 0; i-- {
|
||||
existingProbe := updatedProbes[i]
|
||||
if serviceOwnsRule(service, *existingProbe.Name) {
|
||||
if az.serviceOwnsRule(service, *existingProbe.Name) {
|
||||
glog.V(10).Infof("reconcileLoadBalancer for service (%s)(%t): lb probe(%s) - considering evicting", serviceName, wantLb, *existingProbe.Name)
|
||||
keepProbe := false
|
||||
if findProbe(expectedProbes, existingProbe) {
|
||||
@@ -743,7 +766,7 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service,
|
||||
// update rules: remove unwanted
|
||||
for i := len(updatedRules) - 1; i >= 0; i-- {
|
||||
existingRule := updatedRules[i]
|
||||
if serviceOwnsRule(service, *existingRule.Name) {
|
||||
if az.serviceOwnsRule(service, *existingRule.Name) {
|
||||
keepRule := false
|
||||
glog.V(10).Infof("reconcileLoadBalancer for service (%s)(%t): lb rule(%s) - considering evicting", serviceName, wantLb, *existingRule.Name)
|
||||
if findRule(expectedRules, existingRule) {
|
||||
@@ -787,7 +810,7 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service,
|
||||
// Remove backend pools from vmSets. This is required for virtual machine scale sets before removing the LB.
|
||||
vmSetName := az.mapLoadBalancerNameToVMSet(lbName, clusterName)
|
||||
glog.V(10).Infof("EnsureBackendPoolDeleted(%s, %s): start", lbBackendPoolID, vmSetName)
|
||||
err := az.vmSet.EnsureBackendPoolDeleted(lbBackendPoolID, vmSetName, lb.BackendAddressPools)
|
||||
err := az.vmSet.EnsureBackendPoolDeleted(service, lbBackendPoolID, vmSetName, lb.BackendAddressPools)
|
||||
if err != nil {
|
||||
glog.Errorf("EnsureBackendPoolDeleted(%s, %s) failed: %v", lbBackendPoolID, vmSetName, err)
|
||||
return nil, err
|
||||
@@ -796,7 +819,7 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service,
|
||||
|
||||
// Remove the LB.
|
||||
glog.V(10).Infof("reconcileLoadBalancer: az.DeleteLBWithRetry(%q): start", lbName)
|
||||
err = az.DeleteLBWithRetry(lbName)
|
||||
err = az.DeleteLBWithRetry(service, lbName)
|
||||
if err != nil {
|
||||
glog.V(2).Infof("reconcileLoadBalancer for service(%s) abort backoff: lb(%s) - deleting; no remaining frontendIPConfigurations", serviceName, lbName)
|
||||
return nil, err
|
||||
@@ -804,7 +827,7 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service,
|
||||
glog.V(10).Infof("az.DeleteLBWithRetry(%q): end", lbName)
|
||||
} else {
|
||||
glog.V(2).Infof("reconcileLoadBalancer: reconcileLoadBalancer for service(%s): lb(%s) - updating", serviceName, lbName)
|
||||
err := az.CreateOrUpdateLBWithRetry(*lb)
|
||||
err := az.CreateOrUpdateLBWithRetry(service, *lb)
|
||||
if err != nil {
|
||||
glog.V(2).Infof("reconcileLoadBalancer for service(%s) abort backoff: lb(%s) - updating", serviceName, lbName)
|
||||
return nil, err
|
||||
@@ -828,7 +851,7 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service,
|
||||
if wantLb && nodes != nil {
|
||||
// Add the machines to the backend pool if they're not already
|
||||
vmSetName := az.mapLoadBalancerNameToVMSet(lbName, clusterName)
|
||||
err := az.vmSet.EnsureHostsInPool(serviceName, nodes, lbBackendPoolID, vmSetName, isInternal)
|
||||
err := az.vmSet.EnsureHostsInPool(service, nodes, lbBackendPoolID, vmSetName, isInternal)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -838,11 +861,102 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service,
|
||||
return lb, nil
|
||||
}
|
||||
|
||||
func (az *Cloud) reconcileLoadBalancerRule(
|
||||
service *v1.Service,
|
||||
wantLb bool,
|
||||
lbFrontendIPConfigID string,
|
||||
lbBackendPoolID string,
|
||||
lbName string,
|
||||
lbIdleTimeout *int32) ([]network.Probe, []network.LoadBalancingRule, error) {
|
||||
|
||||
var ports []v1.ServicePort
|
||||
if wantLb {
|
||||
ports = service.Spec.Ports
|
||||
} else {
|
||||
ports = []v1.ServicePort{}
|
||||
}
|
||||
|
||||
var expectedProbes []network.Probe
|
||||
var expectedRules []network.LoadBalancingRule
|
||||
for _, port := range ports {
|
||||
lbRuleName := az.getLoadBalancerRuleName(service, port, subnet(service))
|
||||
|
||||
glog.V(2).Infof("reconcileLoadBalancerRule lb name (%s) rule name (%s)", lbName, lbRuleName)
|
||||
|
||||
transportProto, _, probeProto, err := getProtocolsFromKubernetesProtocol(port.Protocol)
|
||||
if err != nil {
|
||||
return expectedProbes, expectedRules, err
|
||||
}
|
||||
|
||||
if serviceapi.NeedsHealthCheck(service) {
|
||||
podPresencePath, podPresencePort := serviceapi.GetServiceHealthCheckPathPort(service)
|
||||
|
||||
expectedProbes = append(expectedProbes, network.Probe{
|
||||
Name: &lbRuleName,
|
||||
ProbePropertiesFormat: &network.ProbePropertiesFormat{
|
||||
RequestPath: to.StringPtr(podPresencePath),
|
||||
Protocol: network.ProbeProtocolHTTP,
|
||||
Port: to.Int32Ptr(podPresencePort),
|
||||
IntervalInSeconds: to.Int32Ptr(5),
|
||||
NumberOfProbes: to.Int32Ptr(2),
|
||||
},
|
||||
})
|
||||
} else if port.Protocol != v1.ProtocolUDP && port.Protocol != v1.ProtocolSCTP {
|
||||
// we only add the expected probe if we're doing TCP
|
||||
expectedProbes = append(expectedProbes, network.Probe{
|
||||
Name: &lbRuleName,
|
||||
ProbePropertiesFormat: &network.ProbePropertiesFormat{
|
||||
Protocol: *probeProto,
|
||||
Port: to.Int32Ptr(port.NodePort),
|
||||
IntervalInSeconds: to.Int32Ptr(5),
|
||||
NumberOfProbes: to.Int32Ptr(2),
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
loadDistribution := network.Default
|
||||
if service.Spec.SessionAffinity == v1.ServiceAffinityClientIP {
|
||||
loadDistribution = network.SourceIP
|
||||
}
|
||||
|
||||
expectedRule := network.LoadBalancingRule{
|
||||
Name: &lbRuleName,
|
||||
LoadBalancingRulePropertiesFormat: &network.LoadBalancingRulePropertiesFormat{
|
||||
Protocol: *transportProto,
|
||||
FrontendIPConfiguration: &network.SubResource{
|
||||
ID: to.StringPtr(lbFrontendIPConfigID),
|
||||
},
|
||||
BackendAddressPool: &network.SubResource{
|
||||
ID: to.StringPtr(lbBackendPoolID),
|
||||
},
|
||||
LoadDistribution: loadDistribution,
|
||||
FrontendPort: to.Int32Ptr(port.Port),
|
||||
BackendPort: to.Int32Ptr(port.Port),
|
||||
EnableFloatingIP: to.BoolPtr(true),
|
||||
},
|
||||
}
|
||||
if port.Protocol == v1.ProtocolTCP {
|
||||
expectedRule.LoadBalancingRulePropertiesFormat.IdleTimeoutInMinutes = lbIdleTimeout
|
||||
}
|
||||
|
||||
// we didn't construct the probe objects for UDP or SCTP because they're not used/needed/allowed
|
||||
if port.Protocol != v1.ProtocolUDP && port.Protocol != v1.ProtocolSCTP {
|
||||
expectedRule.Probe = &network.SubResource{
|
||||
ID: to.StringPtr(az.getLoadBalancerProbeID(lbName, lbRuleName)),
|
||||
}
|
||||
}
|
||||
|
||||
expectedRules = append(expectedRules, expectedRule)
|
||||
}
|
||||
|
||||
return expectedProbes, expectedRules, nil
|
||||
}
|
||||
|
||||
// This reconciles the Network Security Group similar to how the LB is reconciled.
|
||||
// This entails adding required, missing SecurityRules and removing stale rules.
|
||||
func (az *Cloud) reconcileSecurityGroup(clusterName string, service *v1.Service, lbIP *string, wantLb bool) (*network.SecurityGroup, error) {
|
||||
serviceName := getServiceName(service)
|
||||
glog.V(5).Infof("reconcileSecurityGroup(%s): START clusterName=%q lbName=%q", serviceName, clusterName)
|
||||
glog.V(5).Infof("reconcileSecurityGroup(%s): START clusterName=%q", serviceName, clusterName)
|
||||
|
||||
ports := service.Spec.Ports
|
||||
if ports == nil {
|
||||
@@ -902,7 +1016,7 @@ func (az *Cloud) reconcileSecurityGroup(clusterName string, service *v1.Service,
|
||||
}
|
||||
for j := range sourceAddressPrefixes {
|
||||
ix := i*len(sourceAddressPrefixes) + j
|
||||
securityRuleName := getSecurityRuleName(service, port, sourceAddressPrefixes[j])
|
||||
securityRuleName := az.getSecurityRuleName(service, port, sourceAddressPrefixes[j])
|
||||
expectedSecurityRules[ix] = network.SecurityRule{
|
||||
Name: to.StringPtr(securityRuleName),
|
||||
SecurityRulePropertiesFormat: &network.SecurityRulePropertiesFormat{
|
||||
@@ -938,7 +1052,7 @@ func (az *Cloud) reconcileSecurityGroup(clusterName string, service *v1.Service,
|
||||
// to this service
|
||||
for i := len(updatedRules) - 1; i >= 0; i-- {
|
||||
existingRule := updatedRules[i]
|
||||
if serviceOwnsRule(service, *existingRule.Name) {
|
||||
if az.serviceOwnsRule(service, *existingRule.Name) {
|
||||
glog.V(10).Infof("reconcile(%s)(%t): sg rule(%s) - considering evicting", serviceName, wantLb, *existingRule.Name)
|
||||
keepRule := false
|
||||
if findSecurityRule(expectedSecurityRules, existingRule) {
|
||||
@@ -957,7 +1071,7 @@ func (az *Cloud) reconcileSecurityGroup(clusterName string, service *v1.Service,
|
||||
if useSharedSecurityRule(service) && !wantLb {
|
||||
for _, port := range ports {
|
||||
for _, sourceAddressPrefix := range sourceAddressPrefixes {
|
||||
sharedRuleName := getSecurityRuleName(service, port, sourceAddressPrefix)
|
||||
sharedRuleName := az.getSecurityRuleName(service, port, sourceAddressPrefix)
|
||||
sharedIndex, sharedRule, sharedRuleFound := findSecurityRuleByName(updatedRules, sharedRuleName)
|
||||
if !sharedRuleFound {
|
||||
glog.V(4).Infof("Expected to find shared rule %s for service %s being deleted, but did not", sharedRuleName, service.Name)
|
||||
@@ -1030,7 +1144,7 @@ func (az *Cloud) reconcileSecurityGroup(clusterName string, service *v1.Service,
|
||||
sg.SecurityRules = &updatedRules
|
||||
glog.V(2).Infof("reconcileSecurityGroup for service(%s): sg(%s) - updating", serviceName, *sg.Name)
|
||||
glog.V(10).Infof("CreateOrUpdateSGWithRetry(%q): start", *sg.Name)
|
||||
err := az.CreateOrUpdateSGWithRetry(sg)
|
||||
err := az.CreateOrUpdateSGWithRetry(service, sg)
|
||||
if err != nil {
|
||||
glog.V(2).Infof("ensure(%s) abort backoff: sg(%s) - updating", serviceName, *sg.Name)
|
||||
// TODO (Nov 2017): remove when augmented security rules are out of preview
|
||||
@@ -1084,13 +1198,13 @@ func findIndex(strs []string, s string) (int, bool) {
|
||||
}
|
||||
|
||||
func allowsConsolidation(rule network.SecurityRule) bool {
|
||||
return strings.HasPrefix(*rule.Name, "shared")
|
||||
return strings.HasPrefix(to.String(rule.Name), "shared")
|
||||
}
|
||||
|
||||
func findConsolidationCandidate(rules []network.SecurityRule, rule network.SecurityRule) (int, bool) {
|
||||
for index, r := range rules {
|
||||
if allowsConsolidation(r) {
|
||||
if strings.EqualFold(*r.Name, *rule.Name) {
|
||||
if strings.EqualFold(to.String(r.Name), to.String(rule.Name)) {
|
||||
return index, true
|
||||
}
|
||||
}
|
||||
@@ -1201,7 +1315,7 @@ func (az *Cloud) reconcilePublicIP(clusterName string, service *v1.Service, want
|
||||
|
||||
pipResourceGroup := az.getPublicIPAddressResourceGroup(service)
|
||||
|
||||
pips, err := az.ListPIPWithRetry(pipResourceGroup)
|
||||
pips, err := az.ListPIPWithRetry(service, pipResourceGroup)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -1218,7 +1332,7 @@ func (az *Cloud) reconcilePublicIP(clusterName string, service *v1.Service, want
|
||||
} else {
|
||||
glog.V(2).Infof("reconcilePublicIP for service(%s): pip(%s) - deleting", serviceName, pipName)
|
||||
glog.V(10).Infof("DeletePublicIPWithRetry(%s, %q): start", pipResourceGroup, pipName)
|
||||
err = az.DeletePublicIPWithRetry(pipResourceGroup, pipName)
|
||||
err = az.DeletePublicIPWithRetry(service, pipResourceGroup, pipName)
|
||||
if err != nil {
|
||||
glog.V(2).Infof("ensure(%s) abort backoff: pip(%s) - deleting", serviceName, pipName)
|
||||
// We let err to pass through
|
||||
@@ -1250,7 +1364,7 @@ func (az *Cloud) reconcilePublicIP(clusterName string, service *v1.Service, want
|
||||
|
||||
func findProbe(probes []network.Probe, probe network.Probe) bool {
|
||||
for _, existingProbe := range probes {
|
||||
if strings.EqualFold(*existingProbe.Name, *probe.Name) && *existingProbe.Port == *probe.Port {
|
||||
if strings.EqualFold(to.String(existingProbe.Name), to.String(probe.Name)) && to.Int32(existingProbe.Port) == to.Int32(probe.Port) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
@@ -1259,7 +1373,7 @@ func findProbe(probes []network.Probe, probe network.Probe) bool {
|
||||
|
||||
func findRule(rules []network.LoadBalancingRule, rule network.LoadBalancingRule) bool {
|
||||
for _, existingRule := range rules {
|
||||
if strings.EqualFold(*existingRule.Name, *rule.Name) &&
|
||||
if strings.EqualFold(to.String(existingRule.Name), to.String(rule.Name)) &&
|
||||
equalLoadBalancingRulePropertiesFormat(existingRule.LoadBalancingRulePropertiesFormat, rule.LoadBalancingRulePropertiesFormat) {
|
||||
return true
|
||||
}
|
||||
@@ -1280,7 +1394,8 @@ func equalLoadBalancingRulePropertiesFormat(s, t *network.LoadBalancingRulePrope
|
||||
reflect.DeepEqual(s.LoadDistribution, t.LoadDistribution) &&
|
||||
reflect.DeepEqual(s.FrontendPort, t.FrontendPort) &&
|
||||
reflect.DeepEqual(s.BackendPort, t.BackendPort) &&
|
||||
reflect.DeepEqual(s.EnableFloatingIP, t.EnableFloatingIP)
|
||||
reflect.DeepEqual(s.EnableFloatingIP, t.EnableFloatingIP) &&
|
||||
reflect.DeepEqual(s.IdleTimeoutInMinutes, t.IdleTimeoutInMinutes)
|
||||
}
|
||||
|
||||
// This compares rule's Name, Protocol, SourcePortRange, DestinationPortRange, SourceAddressPrefix, Access, and Direction.
|
||||
@@ -1289,23 +1404,23 @@ func equalLoadBalancingRulePropertiesFormat(s, t *network.LoadBalancingRulePrope
|
||||
// despite different DestinationAddressPrefixes, in order to give it a chance to consolidate the two rules.
|
||||
func findSecurityRule(rules []network.SecurityRule, rule network.SecurityRule) bool {
|
||||
for _, existingRule := range rules {
|
||||
if !strings.EqualFold(*existingRule.Name, *rule.Name) {
|
||||
if !strings.EqualFold(to.String(existingRule.Name), to.String(rule.Name)) {
|
||||
continue
|
||||
}
|
||||
if existingRule.Protocol != rule.Protocol {
|
||||
continue
|
||||
}
|
||||
if !strings.EqualFold(*existingRule.SourcePortRange, *rule.SourcePortRange) {
|
||||
if !strings.EqualFold(to.String(existingRule.SourcePortRange), to.String(rule.SourcePortRange)) {
|
||||
continue
|
||||
}
|
||||
if !strings.EqualFold(*existingRule.DestinationPortRange, *rule.DestinationPortRange) {
|
||||
if !strings.EqualFold(to.String(existingRule.DestinationPortRange), to.String(rule.DestinationPortRange)) {
|
||||
continue
|
||||
}
|
||||
if !strings.EqualFold(*existingRule.SourceAddressPrefix, *rule.SourceAddressPrefix) {
|
||||
if !strings.EqualFold(to.String(existingRule.SourceAddressPrefix), to.String(rule.SourceAddressPrefix)) {
|
||||
continue
|
||||
}
|
||||
if !allowsConsolidation(existingRule) && !allowsConsolidation(rule) {
|
||||
if !strings.EqualFold(*existingRule.DestinationAddressPrefix, *rule.DestinationAddressPrefix) {
|
||||
if !strings.EqualFold(to.String(existingRule.DestinationAddressPrefix), to.String(rule.DestinationAddressPrefix)) {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
77
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_loadbalancer.md
generated
vendored
77
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_loadbalancer.md
generated
vendored
@@ -1,77 +0,0 @@
|
||||
# Azure LoadBalancer
|
||||
|
||||
The way azure define LoadBalancer is different with GCE or AWS. Azure's LB can have multiple frontend IP refs. The GCE and AWS can only allow one, if you want more, you better to have another LB. Because of the fact, Public IP is not part of the LB in Azure. NSG is not part of LB in Azure either. However, you cannot delete them in parallel, Public IP can only be delete after LB's frontend IP ref is removed.
|
||||
|
||||
For different Azure Resources, such as LB, Public IP, NSG. They are the same tier azure resources. We need to make sure there is no connection in their own ensure loops. In another words, They would be eventually reconciled regardless of other resources' state. They should only depends on service state.
|
||||
|
||||
Despite the ideal philosophy above, we have to face the reality. NSG depends on LB's frontend ip to adjust NSG rules. So when we want to reconcile NSG, the LB should contain the corresponding frontend ip config.
|
||||
|
||||
And also, For Azure, we cannot afford to have more than 1 worker of service_controller. Because, different services could operate on the same LB, concurrent execution could result in conflict or unexpected result. For AWS and GCE, they apparently doesn't have the problem, they use one LB per service, no such conflict.
|
||||
|
||||
There are two load balancers per availability set internal and external. There is a limit on number of services that can be associated with a single load balancer.
|
||||
By default primary load balancer is selected. Services can be annotated to allow auto selection of available load balancers. Service annotations can also be used to provide specific availability sets that host the load balancers. Note that in case of auto selection or specific availability set selection, when the availability set is lost in case of downtime or cluster scale down the services are currently not auto assigned to an available load balancer.
|
||||
Service Annotation for Auto and specific load balancer mode
|
||||
|
||||
- service.beta.kubernetes.io/azure-load-balancer-mode" (__auto__|as1,as2...)
|
||||
|
||||
## Introduce Functions
|
||||
|
||||
- reconcileLoadBalancer(clusterName string, service *v1.Service, nodes []*v1.Node, wantLb bool) (*network.LoadBalancer, error)
|
||||
- Go through lb's properties, update based on wantLb
|
||||
- If any change on the lb, no matter if the lb exists or not
|
||||
- Call az cloud to CreateOrUpdate on this lb, or Delete if nothing left
|
||||
- return lb, err
|
||||
|
||||
- reconcileSecurityGroup(clusterName string, service *v1.Service, lbIP *string, wantLb bool) (*network.SecurityGroup, error)
|
||||
- Go though NSG' properties, update based on wantLb
|
||||
- Use destinationIPAddress as target address if possible
|
||||
- Consolidate NSG rules if possible
|
||||
- If any change on the NSG, (the NSG should always exists)
|
||||
- Call az cloud to CreateOrUpdate on this NSG
|
||||
- return sg, err
|
||||
|
||||
- reconcilePublicIP(clusterName string, service *v1.Service, wantLb bool) (*network.PublicIPAddress, error)
|
||||
- List all the public ip in the resource group
|
||||
- Make sure we only touch Public IP resources has tags[service] = "namespace/serviceName"
|
||||
- skip for wantLb && !isInternal && pipName == desiredPipName
|
||||
- delete other public ip resources if any
|
||||
- if !isInternal && wantLb
|
||||
- ensure Public IP with desiredPipName exists
|
||||
|
||||
- getServiceLoadBalancer(service *v1.Service, clusterName string, nodes []*v1.Node, wantLb bool) (lb, status, exists, error)
|
||||
- gets the loadbalancer for the service if it already exists
|
||||
- If wantLb is TRUE then -it selects a new load balancer, the selection helps distribute the services across load balancers
|
||||
- In case the selected load balancer does not exists it returns network.LoadBalancer struct with added metadata (such as name, location) and existsLB set to FALSE
|
||||
- By default - cluster default LB is returned
|
||||
|
||||
## Define interface behaviors
|
||||
|
||||
### GetLoadBalancer
|
||||
|
||||
- Get LoadBalancer status, return status, error
|
||||
- return the load balancer status for this service
|
||||
- it will not create or update or delete any resource
|
||||
|
||||
### EnsureLoadBalancer
|
||||
|
||||
- Reconcile LB for the flipped service
|
||||
- Call reconcileLoadBalancer(clusterName, flipedService, nil, false/* wantLb */)
|
||||
- Reconcile Public IP
|
||||
- Call reconcilePublicIP(cluster, service, true)
|
||||
- Reconcile LB's related and owned resources, such as FrontEndIPConfig, Rules, Probe.
|
||||
- Call reconcileLoadBalancer(clusterName, service, nodes, true /* wantLb */)
|
||||
- Reconcile NSG rules, it need to be called after reconcileLB
|
||||
- Call reconcileSecurityGroup(clusterName, service, lbStatus, true /* wantLb */)
|
||||
|
||||
### UpdateLoadBalancer
|
||||
|
||||
- Has no difference with EnsureLoadBalancer
|
||||
|
||||
### EnsureLoadBalancerDeleted
|
||||
|
||||
- Reconcile NSG first, before reconcile LB, because SG need LB to be there
|
||||
- Call reconcileSecurityGroup(clusterName, service, nil, false /* wantLb */)
|
||||
- Reconcile LB's related and owned resources, such as FrontEndIPConfig, Rules, Probe.
|
||||
- Call reconcileLoadBalancer(clusterName, service, nodes, false)
|
||||
- Reconcile Public IP, public IP needs related LB reconciled first
|
||||
- Call reconcilePublicIP(cluster, service, false)
|
33
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_loadbalancer_test.go
generated
vendored
33
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_loadbalancer_test.go
generated
vendored
@@ -18,11 +18,13 @@ package azure
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network"
|
||||
"github.com/Azure/go-autorest/autorest/to"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
func TestFindProbe(t *testing.T) {
|
||||
@@ -210,3 +212,34 @@ func TestFindRule(t *testing.T) {
|
||||
assert.Equal(t, test.expected, findResult, fmt.Sprintf("TestCase[%d]: %s", i, test.msg))
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetIdleTimeout(t *testing.T) {
|
||||
for _, c := range []struct {
|
||||
desc string
|
||||
annotations map[string]string
|
||||
i *int32
|
||||
err bool
|
||||
}{
|
||||
{desc: "no annotation"},
|
||||
{desc: "annotation empty value", annotations: map[string]string{ServiceAnnotationLoadBalancerIdleTimeout: ""}, err: true},
|
||||
{desc: "annotation not a number", annotations: map[string]string{ServiceAnnotationLoadBalancerIdleTimeout: "cookies"}, err: true},
|
||||
{desc: "annotation negative value", annotations: map[string]string{ServiceAnnotationLoadBalancerIdleTimeout: "-6"}, err: true},
|
||||
{desc: "annotation zero value", annotations: map[string]string{ServiceAnnotationLoadBalancerIdleTimeout: "0"}, err: true},
|
||||
{desc: "annotation too low value", annotations: map[string]string{ServiceAnnotationLoadBalancerIdleTimeout: "3"}, err: true},
|
||||
{desc: "annotation too high value", annotations: map[string]string{ServiceAnnotationLoadBalancerIdleTimeout: "31"}, err: true},
|
||||
{desc: "annotation good value", annotations: map[string]string{ServiceAnnotationLoadBalancerIdleTimeout: "24"}, i: to.Int32Ptr(24)},
|
||||
} {
|
||||
t.Run(c.desc, func(t *testing.T) {
|
||||
s := &v1.Service{}
|
||||
s.Annotations = c.annotations
|
||||
i, err := getIdleTimeout(s)
|
||||
|
||||
if !reflect.DeepEqual(c.i, i) {
|
||||
t.Fatalf("got unexpected value: %d", to.Int32(i))
|
||||
}
|
||||
if (err != nil) != c.err {
|
||||
t.Fatalf("expected error=%v, got %v", c.err, err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
153
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_managedDiskController.go
generated
vendored
153
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_managedDiskController.go
generated
vendored
@@ -17,16 +17,21 @@ limitations under the License.
|
||||
package azure
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute"
|
||||
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute"
|
||||
"github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2017-10-01/storage"
|
||||
"github.com/golang/glog"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
kwait "k8s.io/apimachinery/pkg/util/wait"
|
||||
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
"k8s.io/kubernetes/pkg/volume/util"
|
||||
)
|
||||
|
||||
@@ -35,21 +40,45 @@ type ManagedDiskController struct {
|
||||
common *controllerCommon
|
||||
}
|
||||
|
||||
// ManagedDiskOptions specifies the options of managed disks.
|
||||
type ManagedDiskOptions struct {
|
||||
// The name of the disk.
|
||||
DiskName string
|
||||
// The size in GB.
|
||||
SizeGB int
|
||||
// The name of PVC.
|
||||
PVCName string
|
||||
// The name of resource group.
|
||||
ResourceGroup string
|
||||
// The AvailabilityZone to create the disk.
|
||||
AvailabilityZone string
|
||||
// The tags of the disk.
|
||||
Tags map[string]string
|
||||
// The SKU of storage account.
|
||||
StorageAccountType storage.SkuName
|
||||
}
|
||||
|
||||
func newManagedDiskController(common *controllerCommon) (*ManagedDiskController, error) {
|
||||
return &ManagedDiskController{common: common}, nil
|
||||
}
|
||||
|
||||
//CreateManagedDisk : create managed disk
|
||||
func (c *ManagedDiskController) CreateManagedDisk(diskName string, storageAccountType storage.SkuName, sizeGB int, tags map[string]string) (string, error) {
|
||||
glog.V(4).Infof("azureDisk - creating new managed Name:%s StorageAccountType:%s Size:%v", diskName, storageAccountType, sizeGB)
|
||||
func (c *ManagedDiskController) CreateManagedDisk(options *ManagedDiskOptions) (string, error) {
|
||||
var err error
|
||||
glog.V(4).Infof("azureDisk - creating new managed Name:%s StorageAccountType:%s Size:%v", options.DiskName, options.StorageAccountType, options.SizeGB)
|
||||
|
||||
var createZones *[]string
|
||||
if len(options.AvailabilityZone) > 0 {
|
||||
zoneList := []string{c.common.cloud.GetZoneID(options.AvailabilityZone)}
|
||||
createZones = &zoneList
|
||||
}
|
||||
|
||||
// insert original tags to newTags
|
||||
newTags := make(map[string]*string)
|
||||
azureDDTag := "kubernetes-azure-dd"
|
||||
newTags["created-by"] = &azureDDTag
|
||||
|
||||
// insert original tags to newTags
|
||||
if tags != nil {
|
||||
for k, v := range tags {
|
||||
if options.Tags != nil {
|
||||
for k, v := range options.Tags {
|
||||
// Azure won't allow / (forward slash) in tags
|
||||
newKey := strings.Replace(k, "/", "-", -1)
|
||||
newValue := strings.Replace(v, "/", "-", -1)
|
||||
@@ -57,20 +86,27 @@ func (c *ManagedDiskController) CreateManagedDisk(diskName string, storageAccoun
|
||||
}
|
||||
}
|
||||
|
||||
diskSizeGB := int32(sizeGB)
|
||||
diskSizeGB := int32(options.SizeGB)
|
||||
model := compute.Disk{
|
||||
Location: &c.common.location,
|
||||
Tags: newTags,
|
||||
Zones: createZones,
|
||||
Sku: &compute.DiskSku{
|
||||
Name: compute.StorageAccountTypes(storageAccountType),
|
||||
Name: compute.StorageAccountTypes(options.StorageAccountType),
|
||||
},
|
||||
DiskProperties: &compute.DiskProperties{
|
||||
DiskSizeGB: &diskSizeGB,
|
||||
CreationData: &compute.CreationData{CreateOption: compute.Empty},
|
||||
}}
|
||||
},
|
||||
}
|
||||
|
||||
if options.ResourceGroup == "" {
|
||||
options.ResourceGroup = c.common.resourceGroup
|
||||
}
|
||||
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
_, err := c.common.cloud.DisksClient.CreateOrUpdate(ctx, c.common.resourceGroup, diskName, model)
|
||||
_, err = c.common.cloud.DisksClient.CreateOrUpdate(ctx, options.ResourceGroup, options.DiskName, model)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
@@ -78,7 +114,7 @@ func (c *ManagedDiskController) CreateManagedDisk(diskName string, storageAccoun
|
||||
diskID := ""
|
||||
|
||||
err = kwait.ExponentialBackoff(defaultBackOff, func() (bool, error) {
|
||||
provisionState, id, err := c.getDisk(diskName)
|
||||
provisionState, id, err := c.getDisk(options.ResourceGroup, options.DiskName)
|
||||
diskID = id
|
||||
// We are waiting for provisioningState==Succeeded
|
||||
// We don't want to hand-off managed disks to k8s while they are
|
||||
@@ -93,9 +129,9 @@ func (c *ManagedDiskController) CreateManagedDisk(diskName string, storageAccoun
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
glog.V(2).Infof("azureDisk - created new MD Name:%s StorageAccountType:%s Size:%v but was unable to confirm provisioningState in poll process", diskName, storageAccountType, sizeGB)
|
||||
glog.V(2).Infof("azureDisk - created new MD Name:%s StorageAccountType:%s Size:%v but was unable to confirm provisioningState in poll process", options.DiskName, options.StorageAccountType, options.SizeGB)
|
||||
} else {
|
||||
glog.V(2).Infof("azureDisk - created new MD Name:%s StorageAccountType:%s Size:%v", diskName, storageAccountType, sizeGB)
|
||||
glog.V(2).Infof("azureDisk - created new MD Name:%s StorageAccountType:%s Size:%v", options.DiskName, options.StorageAccountType, options.SizeGB)
|
||||
}
|
||||
|
||||
return diskID, nil
|
||||
@@ -104,10 +140,15 @@ func (c *ManagedDiskController) CreateManagedDisk(diskName string, storageAccoun
|
||||
//DeleteManagedDisk : delete managed disk
|
||||
func (c *ManagedDiskController) DeleteManagedDisk(diskURI string) error {
|
||||
diskName := path.Base(diskURI)
|
||||
resourceGroup, err := getResourceGroupFromDiskURI(diskURI)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
|
||||
_, err := c.common.cloud.DisksClient.Delete(ctx, c.common.resourceGroup, diskName)
|
||||
_, err = c.common.cloud.DisksClient.Delete(ctx, resourceGroup, diskName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -120,11 +161,11 @@ func (c *ManagedDiskController) DeleteManagedDisk(diskURI string) error {
|
||||
}
|
||||
|
||||
// return: disk provisionState, diskID, error
|
||||
func (c *ManagedDiskController) getDisk(diskName string) (string, string, error) {
|
||||
func (c *ManagedDiskController) getDisk(resourceGroup, diskName string) (string, string, error) {
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
|
||||
result, err := c.common.cloud.DisksClient.Get(ctx, c.common.resourceGroup, diskName)
|
||||
result, err := c.common.cloud.DisksClient.Get(ctx, resourceGroup, diskName)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
@@ -137,11 +178,17 @@ func (c *ManagedDiskController) getDisk(diskName string) (string, string, error)
|
||||
}
|
||||
|
||||
// ResizeDisk Expand the disk to new size
|
||||
func (c *ManagedDiskController) ResizeDisk(diskName string, oldSize resource.Quantity, newSize resource.Quantity) (resource.Quantity, error) {
|
||||
func (c *ManagedDiskController) ResizeDisk(diskURI string, oldSize resource.Quantity, newSize resource.Quantity) (resource.Quantity, error) {
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
|
||||
result, err := c.common.cloud.DisksClient.Get(ctx, c.common.resourceGroup, diskName)
|
||||
diskName := path.Base(diskURI)
|
||||
resourceGroup, err := getResourceGroupFromDiskURI(diskURI)
|
||||
if err != nil {
|
||||
return oldSize, err
|
||||
}
|
||||
|
||||
result, err := c.common.cloud.DisksClient.Get(ctx, resourceGroup, diskName)
|
||||
if err != nil {
|
||||
return oldSize, err
|
||||
}
|
||||
@@ -165,7 +212,7 @@ func (c *ManagedDiskController) ResizeDisk(diskName string, oldSize resource.Qua
|
||||
|
||||
ctx, cancel = getContextWithCancel()
|
||||
defer cancel()
|
||||
if _, err := c.common.cloud.DisksClient.CreateOrUpdate(ctx, c.common.resourceGroup, diskName, result); err != nil {
|
||||
if _, err := c.common.cloud.DisksClient.CreateOrUpdate(ctx, resourceGroup, diskName, result); err != nil {
|
||||
return oldSize, err
|
||||
}
|
||||
|
||||
@@ -173,3 +220,69 @@ func (c *ManagedDiskController) ResizeDisk(diskName string, oldSize resource.Qua
|
||||
|
||||
return newSizeQuant, nil
|
||||
}
|
||||
|
||||
// get resource group name from a managed disk URI, e.g. return {group-name} according to
|
||||
// /subscriptions/{sub-id}/resourcegroups/{group-name}/providers/microsoft.compute/disks/{disk-id}
|
||||
// according to https://docs.microsoft.com/en-us/rest/api/compute/disks/get
|
||||
func getResourceGroupFromDiskURI(diskURI string) (string, error) {
|
||||
fields := strings.Split(diskURI, "/")
|
||||
if len(fields) != 9 || fields[3] != "resourceGroups" {
|
||||
return "", fmt.Errorf("invalid disk URI: %s", diskURI)
|
||||
}
|
||||
return fields[4], nil
|
||||
}
|
||||
|
||||
// GetLabelsForVolume implements PVLabeler.GetLabelsForVolume
|
||||
func (c *Cloud) GetLabelsForVolume(ctx context.Context, pv *v1.PersistentVolume) (map[string]string, error) {
|
||||
// Ignore if not AzureDisk.
|
||||
if pv.Spec.AzureDisk == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Ignore any volumes that are being provisioned
|
||||
if pv.Spec.AzureDisk.DiskName == volume.ProvisionedVolumeName {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
return c.GetAzureDiskLabels(pv.Spec.AzureDisk.DataDiskURI)
|
||||
}
|
||||
|
||||
// GetAzureDiskLabels gets availability zone labels for Azuredisk.
|
||||
func (c *Cloud) GetAzureDiskLabels(diskURI string) (map[string]string, error) {
|
||||
// Get disk's resource group.
|
||||
diskName := path.Base(diskURI)
|
||||
resourceGroup, err := getResourceGroupFromDiskURI(diskURI)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to get resource group for AzureDisk %q: %v", diskName, err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Get information of the disk.
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
disk, err := c.DisksClient.Get(ctx, resourceGroup, diskName)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to get information for AzureDisk %q: %v", diskName, err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Check whether availability zone is specified.
|
||||
if disk.Zones == nil || len(*disk.Zones) == 0 {
|
||||
glog.V(4).Infof("Azure disk %q is not zoned", diskName)
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
zones := *disk.Zones
|
||||
zoneID, err := strconv.Atoi(zones[0])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse zone %v for AzureDisk %v: %v", zones, diskName, err)
|
||||
}
|
||||
|
||||
zone := c.makeZone(zoneID)
|
||||
glog.V(4).Infof("Got zone %q for Azure disk %q", zone, diskName)
|
||||
labels := map[string]string{
|
||||
kubeletapis.LabelZoneRegion: c.Location,
|
||||
kubeletapis.LabelZoneFailureDomain: zone,
|
||||
}
|
||||
return labels, nil
|
||||
}
|
||||
|
52
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_routes.go
generated
vendored
52
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_routes.go
generated
vendored
@@ -32,7 +32,29 @@ import (
|
||||
func (az *Cloud) ListRoutes(ctx context.Context, clusterName string) ([]*cloudprovider.Route, error) {
|
||||
glog.V(10).Infof("ListRoutes: START clusterName=%q", clusterName)
|
||||
routeTable, existsRouteTable, err := az.getRouteTable()
|
||||
return processRoutes(routeTable, existsRouteTable, err)
|
||||
routes, err := processRoutes(routeTable, existsRouteTable, err)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Compose routes for unmanaged routes so that node controller won't retry creating routes for them.
|
||||
unmanagedNodes, err := az.GetUnmanagedNodes()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
az.routeCIDRsLock.Lock()
|
||||
defer az.routeCIDRsLock.Unlock()
|
||||
for _, nodeName := range unmanagedNodes.List() {
|
||||
if cidr, ok := az.routeCIDRs[nodeName]; ok {
|
||||
routes = append(routes, &cloudprovider.Route{
|
||||
Name: nodeName,
|
||||
TargetNode: mapRouteNameToNodeName(nodeName),
|
||||
DestinationCIDR: cidr,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
return routes, nil
|
||||
}
|
||||
|
||||
// Injectable for testing
|
||||
@@ -107,6 +129,20 @@ func (az *Cloud) createRouteTable() error {
|
||||
// route.Name will be ignored, although the cloud-provider may use nameHint
|
||||
// to create a more user-meaningful name.
|
||||
func (az *Cloud) CreateRoute(ctx context.Context, clusterName string, nameHint string, kubeRoute *cloudprovider.Route) error {
|
||||
// Returns for unmanaged nodes because azure cloud provider couldn't fetch information for them.
|
||||
nodeName := string(kubeRoute.TargetNode)
|
||||
unmanaged, err := az.IsNodeUnmanaged(nodeName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if unmanaged {
|
||||
glog.V(2).Infof("CreateRoute: omitting unmanaged node %q", kubeRoute.TargetNode)
|
||||
az.routeCIDRsLock.Lock()
|
||||
defer az.routeCIDRsLock.Unlock()
|
||||
az.routeCIDRs[nodeName] = kubeRoute.DestinationCIDR
|
||||
return nil
|
||||
}
|
||||
|
||||
glog.V(2).Infof("CreateRoute: creating route. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
|
||||
if err := az.createRouteTableIfNotExists(clusterName, kubeRoute); err != nil {
|
||||
return err
|
||||
@@ -150,6 +186,20 @@ func (az *Cloud) CreateRoute(ctx context.Context, clusterName string, nameHint s
|
||||
// DeleteRoute deletes the specified managed route
|
||||
// Route should be as returned by ListRoutes
|
||||
func (az *Cloud) DeleteRoute(ctx context.Context, clusterName string, kubeRoute *cloudprovider.Route) error {
|
||||
// Returns for unmanaged nodes because azure cloud provider couldn't fetch information for them.
|
||||
nodeName := string(kubeRoute.TargetNode)
|
||||
unmanaged, err := az.IsNodeUnmanaged(nodeName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if unmanaged {
|
||||
glog.V(2).Infof("DeleteRoute: omitting unmanaged node %q", kubeRoute.TargetNode)
|
||||
az.routeCIDRsLock.Lock()
|
||||
defer az.routeCIDRsLock.Unlock()
|
||||
delete(az.routeCIDRs, nodeName)
|
||||
return nil
|
||||
}
|
||||
|
||||
glog.V(2).Infof("DeleteRoute: deleting route. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
|
||||
|
||||
ctx, cancel := getContextWithCancel()
|
||||
|
50
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_routes_test.go
generated
vendored
50
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_routes_test.go
generated
vendored
@@ -22,6 +22,7 @@ import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network"
|
||||
@@ -38,6 +39,8 @@ func TestDeleteRoute(t *testing.T) {
|
||||
RouteTableName: "bar",
|
||||
Location: "location",
|
||||
},
|
||||
unmanagedNodes: sets.NewString(),
|
||||
nodeInformerSynced: func() bool { return true },
|
||||
}
|
||||
route := cloudprovider.Route{TargetNode: "node", DestinationCIDR: "1.2.3.4/24"}
|
||||
routeName := mapNodeNameToRouteName(route.TargetNode)
|
||||
@@ -62,6 +65,28 @@ func TestDeleteRoute(t *testing.T) {
|
||||
ob, found := mp[routeName]
|
||||
if found {
|
||||
t.Errorf("unexpectedly found: %v that should have been deleted.", ob)
|
||||
t.FailNow()
|
||||
}
|
||||
|
||||
// test delete route for unmanaged nodes.
|
||||
nodeName := "node1"
|
||||
nodeCIDR := "4.3.2.1/24"
|
||||
cloud.unmanagedNodes.Insert(nodeName)
|
||||
cloud.routeCIDRs = map[string]string{
|
||||
nodeName: nodeCIDR,
|
||||
}
|
||||
route1 := cloudprovider.Route{
|
||||
TargetNode: mapRouteNameToNodeName(nodeName),
|
||||
DestinationCIDR: nodeCIDR,
|
||||
}
|
||||
err = cloud.DeleteRoute(context.TODO(), "cluster", &route1)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error deleting route: %v", err)
|
||||
t.FailNow()
|
||||
}
|
||||
cidr, found := cloud.routeCIDRs[nodeName]
|
||||
if found {
|
||||
t.Errorf("unexpected CIDR item (%q) for %s", cidr, nodeName)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -79,6 +104,8 @@ func TestCreateRoute(t *testing.T) {
|
||||
RouteTableName: "bar",
|
||||
Location: "location",
|
||||
},
|
||||
unmanagedNodes: sets.NewString(),
|
||||
nodeInformerSynced: func() bool { return true },
|
||||
}
|
||||
cache, _ := cloud.newRouteTableCache()
|
||||
cloud.rtCache = cache
|
||||
@@ -122,6 +149,29 @@ func TestCreateRoute(t *testing.T) {
|
||||
if *routeInfo.NextHopIPAddress != nodeIP {
|
||||
t.Errorf("Expected IP address: %s, saw %s", nodeIP, *routeInfo.NextHopIPAddress)
|
||||
}
|
||||
|
||||
// test create route for unmanaged nodes.
|
||||
nodeName := "node1"
|
||||
nodeCIDR := "4.3.2.1/24"
|
||||
cloud.unmanagedNodes.Insert(nodeName)
|
||||
cloud.routeCIDRs = map[string]string{}
|
||||
route1 := cloudprovider.Route{
|
||||
TargetNode: mapRouteNameToNodeName(nodeName),
|
||||
DestinationCIDR: nodeCIDR,
|
||||
}
|
||||
err = cloud.CreateRoute(context.TODO(), "cluster", "unused", &route1)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error creating route: %v", err)
|
||||
t.FailNow()
|
||||
}
|
||||
cidr, found := cloud.routeCIDRs[nodeName]
|
||||
if !found {
|
||||
t.Errorf("unexpected missing item for %s", nodeName)
|
||||
t.FailNow()
|
||||
}
|
||||
if cidr != nodeCIDR {
|
||||
t.Errorf("unexpected cidr %s, saw %s", nodeCIDR, cidr)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCreateRouteTableIfNotExists_Exists(t *testing.T) {
|
||||
|
192
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_standard.go
generated
vendored
192
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_standard.go
generated
vendored
@@ -17,6 +17,7 @@ limitations under the License.
|
||||
package azure
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"hash/crc32"
|
||||
@@ -28,7 +29,7 @@ import (
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute"
|
||||
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute"
|
||||
"github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network"
|
||||
"github.com/Azure/go-autorest/autorest/to"
|
||||
"github.com/golang/glog"
|
||||
@@ -52,7 +53,8 @@ const (
|
||||
InternalLoadBalancerNameSuffix = "-internal"
|
||||
|
||||
// nodeLabelRole specifies the role of a node
|
||||
nodeLabelRole = "kubernetes.io/role"
|
||||
nodeLabelRole = "kubernetes.io/role"
|
||||
nicFailedState = "Failed"
|
||||
|
||||
storageAccountNameMaxLength = 24
|
||||
)
|
||||
@@ -60,22 +62,24 @@ const (
|
||||
var errNotInVMSet = errors.New("vm is not in the vmset")
|
||||
var providerIDRE = regexp.MustCompile(`^` + CloudProviderName + `://(?:.*)/Microsoft.Compute/virtualMachines/(.+)$`)
|
||||
var backendPoolIDRE = regexp.MustCompile(`^/subscriptions/(?:.*)/resourceGroups/(?:.*)/providers/Microsoft.Network/loadBalancers/(.+)/backendAddressPools/(?:.*)`)
|
||||
var nicResourceGroupRE = regexp.MustCompile(`.*/subscriptions/(?:.*)/resourceGroups/(.+)/providers/Microsoft.Network/networkInterfaces/(?:.*)`)
|
||||
var publicIPResourceGroupRE = regexp.MustCompile(`.*/subscriptions/(?:.*)/resourceGroups/(.+)/providers/Microsoft.Network/publicIPAddresses/(?:.*)`)
|
||||
|
||||
// getStandardMachineID returns the full identifier of a virtual machine.
|
||||
func (az *Cloud) getStandardMachineID(machineName string) string {
|
||||
func (az *Cloud) getStandardMachineID(resourceGroup, machineName string) string {
|
||||
return fmt.Sprintf(
|
||||
machineIDTemplate,
|
||||
az.SubscriptionID,
|
||||
az.ResourceGroup,
|
||||
resourceGroup,
|
||||
machineName)
|
||||
}
|
||||
|
||||
// returns the full identifier of an availabilitySet
|
||||
func (az *Cloud) getAvailabilitySetID(availabilitySetName string) string {
|
||||
func (az *Cloud) getAvailabilitySetID(resourceGroup, availabilitySetName string) string {
|
||||
return fmt.Sprintf(
|
||||
availabilitySetIDTemplate,
|
||||
az.SubscriptionID,
|
||||
az.ResourceGroup,
|
||||
resourceGroup,
|
||||
availabilitySetName)
|
||||
}
|
||||
|
||||
@@ -122,7 +126,7 @@ func (az *Cloud) mapLoadBalancerNameToVMSet(lbName string, clusterName string) (
|
||||
// Thus Azure do not allow mixed type (public and internal) load balancer.
|
||||
// So we'd have a separate name for internal load balancer.
|
||||
// This would be the name for Azure LoadBalancer resource.
|
||||
func (az *Cloud) getLoadBalancerName(clusterName string, vmSetName string, isInternal bool) string {
|
||||
func (az *Cloud) getAzureLoadBalancerName(clusterName string, vmSetName string, isInternal bool) string {
|
||||
lbNamePrefix := vmSetName
|
||||
if strings.EqualFold(vmSetName, az.vmSet.GetPrimaryVMSetName()) || az.useStandardLoadBalancer() {
|
||||
lbNamePrefix = clusterName
|
||||
@@ -219,20 +223,22 @@ func getBackendPoolName(clusterName string) string {
|
||||
return clusterName
|
||||
}
|
||||
|
||||
func getLoadBalancerRuleName(service *v1.Service, port v1.ServicePort, subnetName *string) string {
|
||||
func (az *Cloud) getLoadBalancerRuleName(service *v1.Service, port v1.ServicePort, subnetName *string) string {
|
||||
prefix := az.getRulePrefix(service)
|
||||
if subnetName == nil {
|
||||
return fmt.Sprintf("%s-%s-%d", getRulePrefix(service), port.Protocol, port.Port)
|
||||
return fmt.Sprintf("%s-%s-%d", prefix, port.Protocol, port.Port)
|
||||
}
|
||||
return fmt.Sprintf("%s-%s-%s-%d", getRulePrefix(service), *subnetName, port.Protocol, port.Port)
|
||||
return fmt.Sprintf("%s-%s-%s-%d", prefix, *subnetName, port.Protocol, port.Port)
|
||||
}
|
||||
|
||||
func getSecurityRuleName(service *v1.Service, port v1.ServicePort, sourceAddrPrefix string) string {
|
||||
func (az *Cloud) getSecurityRuleName(service *v1.Service, port v1.ServicePort, sourceAddrPrefix string) string {
|
||||
if useSharedSecurityRule(service) {
|
||||
safePrefix := strings.Replace(sourceAddrPrefix, "/", "_", -1)
|
||||
return fmt.Sprintf("shared-%s-%d-%s", port.Protocol, port.Port, safePrefix)
|
||||
}
|
||||
safePrefix := strings.Replace(sourceAddrPrefix, "/", "_", -1)
|
||||
return fmt.Sprintf("%s-%s-%d-%s", getRulePrefix(service), port.Protocol, port.Port, safePrefix)
|
||||
rulePrefix := az.getRulePrefix(service)
|
||||
return fmt.Sprintf("%s-%s-%d-%s", rulePrefix, port.Protocol, port.Port, safePrefix)
|
||||
}
|
||||
|
||||
// This returns a human-readable version of the Service used to tag some resources.
|
||||
@@ -242,26 +248,26 @@ func getServiceName(service *v1.Service) string {
|
||||
}
|
||||
|
||||
// This returns a prefix for loadbalancer/security rules.
|
||||
func getRulePrefix(service *v1.Service) string {
|
||||
return cloudprovider.GetLoadBalancerName(service)
|
||||
func (az *Cloud) getRulePrefix(service *v1.Service) string {
|
||||
return az.GetLoadBalancerName(context.TODO(), "", service)
|
||||
}
|
||||
|
||||
func getPublicIPName(clusterName string, service *v1.Service) string {
|
||||
return fmt.Sprintf("%s-%s", clusterName, cloudprovider.GetLoadBalancerName(service))
|
||||
func (az *Cloud) getPublicIPName(clusterName string, service *v1.Service) string {
|
||||
return fmt.Sprintf("%s-%s", clusterName, az.GetLoadBalancerName(context.TODO(), clusterName, service))
|
||||
}
|
||||
|
||||
func serviceOwnsRule(service *v1.Service, rule string) bool {
|
||||
prefix := getRulePrefix(service)
|
||||
func (az *Cloud) serviceOwnsRule(service *v1.Service, rule string) bool {
|
||||
prefix := az.getRulePrefix(service)
|
||||
return strings.HasPrefix(strings.ToUpper(rule), strings.ToUpper(prefix))
|
||||
}
|
||||
|
||||
func serviceOwnsFrontendIP(fip network.FrontendIPConfiguration, service *v1.Service) bool {
|
||||
baseName := cloudprovider.GetLoadBalancerName(service)
|
||||
func (az *Cloud) serviceOwnsFrontendIP(fip network.FrontendIPConfiguration, service *v1.Service) bool {
|
||||
baseName := az.GetLoadBalancerName(context.TODO(), "", service)
|
||||
return strings.HasPrefix(*fip.Name, baseName)
|
||||
}
|
||||
|
||||
func getFrontendIPConfigName(service *v1.Service, subnetName *string) string {
|
||||
baseName := cloudprovider.GetLoadBalancerName(service)
|
||||
func (az *Cloud) getFrontendIPConfigName(service *v1.Service, subnetName *string) string {
|
||||
baseName := az.GetLoadBalancerName(context.TODO(), "", service)
|
||||
if subnetName != nil {
|
||||
return fmt.Sprintf("%s-%s", baseName, *subnetName)
|
||||
}
|
||||
@@ -302,51 +308,6 @@ func MakeCRC32(str string) string {
|
||||
return strconv.FormatUint(uint64(hash), 10)
|
||||
}
|
||||
|
||||
//ExtractVMData : extract dataDisks, storageProfile from a map struct
|
||||
func ExtractVMData(vmData map[string]interface{}) (dataDisks []interface{},
|
||||
storageProfile map[string]interface{},
|
||||
hardwareProfile map[string]interface{}, err error) {
|
||||
props, ok := vmData["properties"].(map[string]interface{})
|
||||
if !ok {
|
||||
return nil, nil, nil, fmt.Errorf("convert vmData(properties) to map error")
|
||||
}
|
||||
|
||||
storageProfile, ok = props["storageProfile"].(map[string]interface{})
|
||||
if !ok {
|
||||
return nil, nil, nil, fmt.Errorf("convert vmData(storageProfile) to map error")
|
||||
}
|
||||
|
||||
hardwareProfile, ok = props["hardwareProfile"].(map[string]interface{})
|
||||
if !ok {
|
||||
return nil, nil, nil, fmt.Errorf("convert vmData(hardwareProfile) to map error")
|
||||
}
|
||||
|
||||
dataDisks, ok = storageProfile["dataDisks"].([]interface{})
|
||||
if !ok {
|
||||
return nil, nil, nil, fmt.Errorf("convert vmData(dataDisks) to map error")
|
||||
}
|
||||
return dataDisks, storageProfile, hardwareProfile, nil
|
||||
}
|
||||
|
||||
//ExtractDiskData : extract provisioningState, diskState from a map struct
|
||||
func ExtractDiskData(diskData interface{}) (provisioningState string, diskState string, err error) {
|
||||
fragment, ok := diskData.(map[string]interface{})
|
||||
if !ok {
|
||||
return "", "", fmt.Errorf("convert diskData to map error")
|
||||
}
|
||||
|
||||
properties, ok := fragment["properties"].(map[string]interface{})
|
||||
if !ok {
|
||||
return "", "", fmt.Errorf("convert diskData(properties) to map error")
|
||||
}
|
||||
|
||||
provisioningState, ok = properties["provisioningState"].(string) // if there is a disk, provisioningState property will be there
|
||||
if ref, ok := properties["diskState"]; ok {
|
||||
diskState = ref.(string)
|
||||
}
|
||||
return provisioningState, diskState, nil
|
||||
}
|
||||
|
||||
// availabilitySet implements VMSet interface for Azure availability sets.
|
||||
type availabilitySet struct {
|
||||
*Cloud
|
||||
@@ -385,6 +346,26 @@ func (as *availabilitySet) GetInstanceIDByNodeName(name string) (string, error)
|
||||
return *machine.ID, nil
|
||||
}
|
||||
|
||||
// GetPowerStatusByNodeName returns the power state of the specified node.
|
||||
func (as *availabilitySet) GetPowerStatusByNodeName(name string) (powerState string, err error) {
|
||||
vm, err := as.getVirtualMachine(types.NodeName(name))
|
||||
if err != nil {
|
||||
return powerState, err
|
||||
}
|
||||
|
||||
if vm.InstanceView != nil && vm.InstanceView.Statuses != nil {
|
||||
statuses := *vm.InstanceView.Statuses
|
||||
for _, status := range statuses {
|
||||
state := to.String(status.Code)
|
||||
if strings.HasPrefix(state, vmPowerStatePrefix) {
|
||||
return strings.TrimPrefix(state, vmPowerStatePrefix), nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return "", fmt.Errorf("failed to get power status for node %q", name)
|
||||
}
|
||||
|
||||
// GetNodeNameByProviderID gets the node name by provider ID.
|
||||
func (as *availabilitySet) GetNodeNameByProviderID(providerID string) (types.NodeName, error) {
|
||||
// NodeName is part of providerID for standard instances.
|
||||
@@ -407,14 +388,29 @@ func (as *availabilitySet) GetInstanceTypeByNodeName(name string) (string, error
|
||||
return string(machine.HardwareProfile.VMSize), nil
|
||||
}
|
||||
|
||||
// GetZoneByNodeName gets zone from instance view.
|
||||
// GetZoneByNodeName gets availability zone for the specified node. If the node is not running
|
||||
// with availability zone, then it returns fault domain.
|
||||
func (as *availabilitySet) GetZoneByNodeName(name string) (cloudprovider.Zone, error) {
|
||||
vm, err := as.getVirtualMachine(types.NodeName(name))
|
||||
if err != nil {
|
||||
return cloudprovider.Zone{}, err
|
||||
}
|
||||
|
||||
failureDomain := strconv.Itoa(int(*vm.VirtualMachineProperties.InstanceView.PlatformFaultDomain))
|
||||
var failureDomain string
|
||||
if vm.Zones != nil && len(*vm.Zones) > 0 {
|
||||
// Get availability zone for the node.
|
||||
zones := *vm.Zones
|
||||
zoneID, err := strconv.Atoi(zones[0])
|
||||
if err != nil {
|
||||
return cloudprovider.Zone{}, fmt.Errorf("failed to parse zone %q: %v", zones, err)
|
||||
}
|
||||
|
||||
failureDomain = as.makeZone(zoneID)
|
||||
} else {
|
||||
// Availability zone is not used for the node, falling back to fault domain.
|
||||
failureDomain = strconv.Itoa(int(*vm.VirtualMachineProperties.InstanceView.PlatformFaultDomain))
|
||||
}
|
||||
|
||||
zone := cloudprovider.Zone{
|
||||
FailureDomain: failureDomain,
|
||||
Region: *(vm.Location),
|
||||
@@ -464,7 +460,7 @@ func (as *availabilitySet) GetIPByNodeName(name string) (string, string, error)
|
||||
// getAgentPoolAvailabiliySets lists the virtual machines for the resource group and then builds
|
||||
// a list of availability sets that match the nodes available to k8s.
|
||||
func (as *availabilitySet) getAgentPoolAvailabiliySets(nodes []*v1.Node) (agentPoolAvailabilitySets *[]string, err error) {
|
||||
vms, err := as.VirtualMachineClientListWithRetry()
|
||||
vms, err := as.VirtualMachineClientListWithRetry(as.ResourceGroup)
|
||||
if err != nil {
|
||||
glog.Errorf("as.getNodeAvailabilitySet - VirtualMachineClientListWithRetry failed, err=%v", err)
|
||||
return nil, err
|
||||
@@ -559,6 +555,26 @@ func (as *availabilitySet) GetPrimaryInterface(nodeName string) (network.Interfa
|
||||
return as.getPrimaryInterfaceWithVMSet(nodeName, "")
|
||||
}
|
||||
|
||||
// extractResourceGroupByNicID extracts the resource group name by nicID.
|
||||
func extractResourceGroupByNicID(nicID string) (string, error) {
|
||||
matches := nicResourceGroupRE.FindStringSubmatch(nicID)
|
||||
if len(matches) != 2 {
|
||||
return "", fmt.Errorf("error of extracting resourceGroup from nicID %q", nicID)
|
||||
}
|
||||
|
||||
return matches[1], nil
|
||||
}
|
||||
|
||||
// extractResourceGroupByPipID extracts the resource group name by publicIP ID.
|
||||
func extractResourceGroupByPipID(pipID string) (string, error) {
|
||||
matches := publicIPResourceGroupRE.FindStringSubmatch(pipID)
|
||||
if len(matches) != 2 {
|
||||
return "", fmt.Errorf("error of extracting resourceGroup from pipID %q", pipID)
|
||||
}
|
||||
|
||||
return matches[1], nil
|
||||
}
|
||||
|
||||
// getPrimaryInterfaceWithVMSet gets machine primary network interface by node name and vmSet.
|
||||
func (as *availabilitySet) getPrimaryInterfaceWithVMSet(nodeName, vmSetName string) (network.Interface, error) {
|
||||
var machine compute.VirtualMachine
|
||||
@@ -577,6 +593,10 @@ func (as *availabilitySet) getPrimaryInterfaceWithVMSet(nodeName, vmSetName stri
|
||||
if err != nil {
|
||||
return network.Interface{}, err
|
||||
}
|
||||
nodeResourceGroup, err := as.GetNodeResourceGroup(nodeName)
|
||||
if err != nil {
|
||||
return network.Interface{}, err
|
||||
}
|
||||
|
||||
// Check availability set name. Note that vmSetName is empty string when getting
|
||||
// the Node's IP address. While vmSetName is not empty, it should be checked with
|
||||
@@ -586,7 +606,7 @@ func (as *availabilitySet) getPrimaryInterfaceWithVMSet(nodeName, vmSetName stri
|
||||
// - For standard SKU load balancer, backend could belong to multiple VMAS, so we
|
||||
// don't check vmSet for it.
|
||||
if vmSetName != "" && !as.useStandardLoadBalancer() {
|
||||
expectedAvailabilitySetName := as.getAvailabilitySetID(vmSetName)
|
||||
expectedAvailabilitySetName := as.getAvailabilitySetID(nodeResourceGroup, vmSetName)
|
||||
if machine.AvailabilitySet == nil || !strings.EqualFold(*machine.AvailabilitySet.ID, expectedAvailabilitySetName) {
|
||||
glog.V(3).Infof(
|
||||
"GetPrimaryInterface: nic (%s) is not in the availabilitySet(%s)", nicName, vmSetName)
|
||||
@@ -594,9 +614,14 @@ func (as *availabilitySet) getPrimaryInterfaceWithVMSet(nodeName, vmSetName stri
|
||||
}
|
||||
}
|
||||
|
||||
nicResourceGroup, err := extractResourceGroupByNicID(primaryNicID)
|
||||
if err != nil {
|
||||
return network.Interface{}, err
|
||||
}
|
||||
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
nic, err := as.InterfacesClient.Get(ctx, as.ResourceGroup, nicName, "")
|
||||
nic, err := as.InterfacesClient.Get(ctx, nicResourceGroup, nicName, "")
|
||||
if err != nil {
|
||||
return network.Interface{}, err
|
||||
}
|
||||
@@ -606,8 +631,9 @@ func (as *availabilitySet) getPrimaryInterfaceWithVMSet(nodeName, vmSetName stri
|
||||
|
||||
// ensureHostInPool ensures the given VM's Primary NIC's Primary IP Configuration is
|
||||
// participating in the specified LoadBalancer Backend Pool.
|
||||
func (as *availabilitySet) ensureHostInPool(serviceName string, nodeName types.NodeName, backendPoolID string, vmSetName string, isInternal bool) error {
|
||||
func (as *availabilitySet) ensureHostInPool(service *v1.Service, nodeName types.NodeName, backendPoolID string, vmSetName string, isInternal bool) error {
|
||||
vmName := mapNodeNameToVMName(nodeName)
|
||||
serviceName := getServiceName(service)
|
||||
nic, err := as.getPrimaryInterfaceWithVMSet(vmName, vmSetName)
|
||||
if err != nil {
|
||||
if err == errNotInVMSet {
|
||||
@@ -619,6 +645,11 @@ func (as *availabilitySet) ensureHostInPool(serviceName string, nodeName types.N
|
||||
return err
|
||||
}
|
||||
|
||||
if nic.ProvisioningState != nil && *nic.ProvisioningState == nicFailedState {
|
||||
glog.V(3).Infof("ensureHostInPool skips node %s because its primary nic %s is in Failed state", nodeName, *nic.Name)
|
||||
return nil
|
||||
}
|
||||
|
||||
var primaryIPConfig *network.InterfaceIPConfiguration
|
||||
primaryIPConfig, err = getPrimaryIPConfig(nic)
|
||||
if err != nil {
|
||||
@@ -670,7 +701,7 @@ func (as *availabilitySet) ensureHostInPool(serviceName string, nodeName types.N
|
||||
glog.V(10).Infof("InterfacesClient.CreateOrUpdate(%q): end", *nic.Name)
|
||||
if as.CloudProviderBackoff && shouldRetryHTTPRequest(resp, err) {
|
||||
glog.V(2).Infof("nicupdate(%s) backing off: nic(%s) - updating, err=%v", serviceName, nicName, err)
|
||||
retryErr := as.CreateOrUpdateInterfaceWithRetry(nic)
|
||||
retryErr := as.CreateOrUpdateInterfaceWithRetry(service, nic)
|
||||
if retryErr != nil {
|
||||
err = retryErr
|
||||
glog.V(2).Infof("nicupdate(%s) abort backoff: nic(%s) - updating", serviceName, nicName)
|
||||
@@ -685,7 +716,7 @@ func (as *availabilitySet) ensureHostInPool(serviceName string, nodeName types.N
|
||||
|
||||
// EnsureHostsInPool ensures the given Node's primary IP configurations are
|
||||
// participating in the specified LoadBalancer Backend Pool.
|
||||
func (as *availabilitySet) EnsureHostsInPool(serviceName string, nodes []*v1.Node, backendPoolID string, vmSetName string, isInternal bool) error {
|
||||
func (as *availabilitySet) EnsureHostsInPool(service *v1.Service, nodes []*v1.Node, backendPoolID string, vmSetName string, isInternal bool) error {
|
||||
hostUpdates := make([]func() error, 0, len(nodes))
|
||||
for _, node := range nodes {
|
||||
localNodeName := node.Name
|
||||
@@ -694,10 +725,15 @@ func (as *availabilitySet) EnsureHostsInPool(serviceName string, nodes []*v1.Nod
|
||||
continue
|
||||
}
|
||||
|
||||
if as.ShouldNodeExcludedFromLoadBalancer(node) {
|
||||
glog.V(4).Infof("Excluding unmanaged/external-resource-group node %q", localNodeName)
|
||||
continue
|
||||
}
|
||||
|
||||
f := func() error {
|
||||
err := as.ensureHostInPool(serviceName, types.NodeName(localNodeName), backendPoolID, vmSetName, isInternal)
|
||||
err := as.ensureHostInPool(service, types.NodeName(localNodeName), backendPoolID, vmSetName, isInternal)
|
||||
if err != nil {
|
||||
return fmt.Errorf("ensure(%s): backendPoolID(%s) - failed to ensure host in pool: %q", serviceName, backendPoolID, err)
|
||||
return fmt.Errorf("ensure(%s): backendPoolID(%s) - failed to ensure host in pool: %q", getServiceName(service), backendPoolID, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -713,7 +749,7 @@ func (as *availabilitySet) EnsureHostsInPool(serviceName string, nodes []*v1.Nod
|
||||
}
|
||||
|
||||
// EnsureBackendPoolDeleted ensures the loadBalancer backendAddressPools deleted from the specified vmSet.
|
||||
func (as *availabilitySet) EnsureBackendPoolDeleted(poolID, vmSetName string, backendAddressPools *[]network.BackendAddressPool) error {
|
||||
func (as *availabilitySet) EnsureBackendPoolDeleted(service *v1.Service, poolID, vmSetName string, backendAddressPools *[]network.BackendAddressPool) error {
|
||||
// Do nothing for availability set.
|
||||
return nil
|
||||
}
|
||||
|
4
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_standard_test.go
generated
vendored
4
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_standard_test.go
generated
vendored
@@ -171,7 +171,7 @@ func TestMapLoadBalancerNameToVMSet(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetLoadBalancerName(t *testing.T) {
|
||||
func TestGetAzureLoadBalancerName(t *testing.T) {
|
||||
az := getTestCloud()
|
||||
az.PrimaryAvailabilitySetName = "primary"
|
||||
|
||||
@@ -247,7 +247,7 @@ func TestGetLoadBalancerName(t *testing.T) {
|
||||
} else {
|
||||
az.Config.LoadBalancerSku = loadBalancerSkuBasic
|
||||
}
|
||||
loadbalancerName := az.getLoadBalancerName(c.clusterName, c.vmSet, c.isInternal)
|
||||
loadbalancerName := az.getAzureLoadBalancerName(c.clusterName, c.vmSet, c.isInternal)
|
||||
assert.Equal(t, c.expected, loadbalancerName, c.description)
|
||||
}
|
||||
}
|
||||
|
8
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_storage.go
generated
vendored
8
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_storage.go
generated
vendored
@@ -31,8 +31,12 @@ const (
|
||||
)
|
||||
|
||||
// CreateFileShare creates a file share, using a matching storage account
|
||||
func (az *Cloud) CreateFileShare(shareName, accountName, accountType, location string, requestGiB int) (string, string, error) {
|
||||
account, key, err := az.ensureStorageAccount(accountName, accountType, location, fileShareAccountNamePrefix)
|
||||
func (az *Cloud) CreateFileShare(shareName, accountName, accountType, resourceGroup, location string, requestGiB int) (string, string, error) {
|
||||
if resourceGroup == "" {
|
||||
resourceGroup = az.resourceGroup
|
||||
}
|
||||
|
||||
account, key, err := az.ensureStorageAccount(accountName, accountType, resourceGroup, location, fileShareAccountNamePrefix)
|
||||
if err != nil {
|
||||
return "", "", fmt.Errorf("could not get storage key for storage account %s: %v", accountName, err)
|
||||
}
|
||||
|
2
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_storage_test.go
generated
vendored
2
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_storage_test.go
generated
vendored
@@ -116,7 +116,7 @@ func TestCreateFileShare(t *testing.T) {
|
||||
fake.Keys = test.keys
|
||||
fake.Err = test.err
|
||||
|
||||
account, key, err := cloud.CreateFileShare(test.name, test.acct, test.acctType, test.loc, test.gb)
|
||||
account, key, err := cloud.CreateFileShare(test.name, test.acct, test.acctType, "rg", test.loc, test.gb)
|
||||
if test.expectErr && err == nil {
|
||||
t.Errorf("unexpected non-error")
|
||||
continue
|
||||
|
25
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_storageaccount.go
generated
vendored
25
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_storageaccount.go
generated
vendored
@@ -30,15 +30,15 @@ type accountWithLocation struct {
|
||||
}
|
||||
|
||||
// getStorageAccounts gets name, type, location of all storage accounts in a resource group which matches matchingAccountType, matchingLocation
|
||||
func (az *Cloud) getStorageAccounts(matchingAccountType, matchingLocation string) ([]accountWithLocation, error) {
|
||||
func (az *Cloud) getStorageAccounts(matchingAccountType, resourceGroup, matchingLocation string) ([]accountWithLocation, error) {
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
result, err := az.StorageAccountClient.ListByResourceGroup(ctx, az.ResourceGroup)
|
||||
result, err := az.StorageAccountClient.ListByResourceGroup(ctx, resourceGroup)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if result.Value == nil {
|
||||
return nil, fmt.Errorf("unexpected error when listing storage accounts from resource group %s", az.ResourceGroup)
|
||||
return nil, fmt.Errorf("unexpected error when listing storage accounts from resource group %s", resourceGroup)
|
||||
}
|
||||
|
||||
accounts := []accountWithLocation{}
|
||||
@@ -61,11 +61,11 @@ func (az *Cloud) getStorageAccounts(matchingAccountType, matchingLocation string
|
||||
}
|
||||
|
||||
// getStorageAccesskey gets the storage account access key
|
||||
func (az *Cloud) getStorageAccesskey(account string) (string, error) {
|
||||
func (az *Cloud) getStorageAccesskey(account, resourceGroup string) (string, error) {
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
|
||||
result, err := az.StorageAccountClient.ListKeys(ctx, az.ResourceGroup, account)
|
||||
result, err := az.StorageAccountClient.ListKeys(ctx, resourceGroup, account)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
@@ -86,10 +86,10 @@ func (az *Cloud) getStorageAccesskey(account string) (string, error) {
|
||||
}
|
||||
|
||||
// ensureStorageAccount search storage account, create one storage account(with genAccountNamePrefix) if not found, return accountName, accountKey
|
||||
func (az *Cloud) ensureStorageAccount(accountName, accountType, location, genAccountNamePrefix string) (string, string, error) {
|
||||
func (az *Cloud) ensureStorageAccount(accountName, accountType, resourceGroup, location, genAccountNamePrefix string) (string, string, error) {
|
||||
if len(accountName) == 0 {
|
||||
// find a storage account that matches accountType
|
||||
accounts, err := az.getStorageAccounts(accountType, location)
|
||||
accounts, err := az.getStorageAccounts(accountType, resourceGroup, location)
|
||||
if err != nil {
|
||||
return "", "", fmt.Errorf("could not list storage accounts for account type %s: %v", accountType, err)
|
||||
}
|
||||
@@ -110,15 +110,18 @@ func (az *Cloud) ensureStorageAccount(accountName, accountType, location, genAcc
|
||||
}
|
||||
|
||||
glog.V(2).Infof("azure - no matching account found, begin to create a new account %s in resource group %s, location: %s, accountType: %s",
|
||||
accountName, az.ResourceGroup, location, accountType)
|
||||
accountName, resourceGroup, location, accountType)
|
||||
cp := storage.AccountCreateParameters{
|
||||
Sku: &storage.Sku{Name: storage.SkuName(accountType)},
|
||||
Sku: &storage.Sku{Name: storage.SkuName(accountType)},
|
||||
// switch to use StorageV2 as it's recommended according to https://docs.microsoft.com/en-us/azure/storage/common/storage-account-options
|
||||
Kind: storage.StorageV2,
|
||||
AccountPropertiesCreateParameters: &storage.AccountPropertiesCreateParameters{EnableHTTPSTrafficOnly: to.BoolPtr(true)},
|
||||
Tags: map[string]*string{"created-by": to.StringPtr("azure")},
|
||||
Location: &location}
|
||||
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
_, err := az.StorageAccountClient.Create(ctx, az.ResourceGroup, accountName, cp)
|
||||
_, err := az.StorageAccountClient.Create(ctx, resourceGroup, accountName, cp)
|
||||
if err != nil {
|
||||
return "", "", fmt.Errorf(fmt.Sprintf("Failed to create storage account %s, error: %s", accountName, err))
|
||||
}
|
||||
@@ -126,7 +129,7 @@ func (az *Cloud) ensureStorageAccount(accountName, accountType, location, genAcc
|
||||
}
|
||||
|
||||
// find the access key with this account
|
||||
accountKey, err := az.getStorageAccesskey(accountName)
|
||||
accountKey, err := az.getStorageAccesskey(accountName, resourceGroup)
|
||||
if err != nil {
|
||||
return "", "", fmt.Errorf("could not get storage key for storage account %s: %v", accountName, err)
|
||||
}
|
||||
|
@@ -64,7 +64,7 @@ func TestGetStorageAccessKeys(t *testing.T) {
|
||||
expectedKey := test.expectedKey
|
||||
fake.Keys = test.results
|
||||
fake.Err = test.err
|
||||
key, err := cloud.getStorageAccesskey("acct")
|
||||
key, err := cloud.getStorageAccesskey("acct", "rg")
|
||||
if test.expectErr && err == nil {
|
||||
t.Errorf("Unexpected non-error")
|
||||
continue
|
||||
|
258
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_test.go
generated
vendored
258
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_test.go
generated
vendored
@@ -22,6 +22,7 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"math"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"reflect"
|
||||
@@ -31,11 +32,12 @@ import (
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
serviceapi "k8s.io/kubernetes/pkg/api/v1/service"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/azure/auth"
|
||||
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute"
|
||||
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute"
|
||||
"github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network"
|
||||
"github.com/Azure/go-autorest/autorest/to"
|
||||
"github.com/stretchr/testify/assert"
|
||||
@@ -952,6 +954,11 @@ func getTestCloud() (az *Cloud) {
|
||||
MaximumLoadBalancerRuleCount: 250,
|
||||
VMType: vmTypeStandard,
|
||||
},
|
||||
nodeZones: map[string]sets.String{},
|
||||
nodeInformerSynced: func() bool { return true },
|
||||
nodeResourceGroups: map[string]string{},
|
||||
unmanagedNodes: sets.NewString(),
|
||||
routeCIDRs: map[string]string{},
|
||||
}
|
||||
az.DisksClient = newFakeDisksClient()
|
||||
az.InterfacesClient = newFakeAzureInterfacesClient()
|
||||
@@ -1061,7 +1068,7 @@ func getClusterResources(az *Cloud, vmCount int, availabilitySetCount int) (clus
|
||||
az.InterfacesClient.CreateOrUpdate(ctx, az.Config.ResourceGroup, nicName, newNIC)
|
||||
|
||||
// create vm
|
||||
asID := az.getAvailabilitySetID(asName)
|
||||
asID := az.getAvailabilitySetID(az.Config.ResourceGroup, asName)
|
||||
newVM := compute.VirtualMachine{
|
||||
Name: &vmName,
|
||||
Location: &az.Config.Location,
|
||||
@@ -1159,7 +1166,7 @@ func getTestSecurityGroup(az *Cloud, services ...v1.Service) *network.SecurityGr
|
||||
for _, port := range service.Spec.Ports {
|
||||
sources := getServiceSourceRanges(&service)
|
||||
for _, src := range sources {
|
||||
ruleName := getSecurityRuleName(&service, port, src)
|
||||
ruleName := az.getSecurityRuleName(&service, port, src)
|
||||
rules = append(rules, network.SecurityRule{
|
||||
Name: to.StringPtr(ruleName),
|
||||
SecurityRulePropertiesFormat: &network.SecurityRulePropertiesFormat{
|
||||
@@ -1190,6 +1197,7 @@ func getTestSecurityGroup(az *Cloud, services ...v1.Service) *network.SecurityGr
|
||||
}
|
||||
|
||||
func validateLoadBalancer(t *testing.T, loadBalancer *network.LoadBalancer, services ...v1.Service) {
|
||||
az := getTestCloud()
|
||||
expectedRuleCount := 0
|
||||
expectedFrontendIPCount := 0
|
||||
expectedProbeCount := 0
|
||||
@@ -1198,14 +1206,14 @@ func validateLoadBalancer(t *testing.T, loadBalancer *network.LoadBalancer, serv
|
||||
if len(svc.Spec.Ports) > 0 {
|
||||
expectedFrontendIPCount++
|
||||
expectedFrontendIP := ExpectedFrontendIPInfo{
|
||||
Name: getFrontendIPConfigName(&svc, subnet(&svc)),
|
||||
Name: az.getFrontendIPConfigName(&svc, subnet(&svc)),
|
||||
Subnet: subnet(&svc),
|
||||
}
|
||||
expectedFrontendIPs = append(expectedFrontendIPs, expectedFrontendIP)
|
||||
}
|
||||
for _, wantedRule := range svc.Spec.Ports {
|
||||
expectedRuleCount++
|
||||
wantedRuleName := getLoadBalancerRuleName(&svc, wantedRule, subnet(&svc))
|
||||
wantedRuleName := az.getLoadBalancerRuleName(&svc, wantedRule, subnet(&svc))
|
||||
foundRule := false
|
||||
for _, actualRule := range *loadBalancer.LoadBalancingRules {
|
||||
if strings.EqualFold(*actualRule.Name, wantedRuleName) &&
|
||||
@@ -1396,12 +1404,13 @@ func securityRuleMatches(serviceSourceRange string, servicePort v1.ServicePort,
|
||||
}
|
||||
|
||||
func validateSecurityGroup(t *testing.T, securityGroup *network.SecurityGroup, services ...v1.Service) {
|
||||
az := getTestCloud()
|
||||
seenRules := make(map[string]string)
|
||||
for _, svc := range services {
|
||||
for _, wantedRule := range svc.Spec.Ports {
|
||||
sources := getServiceSourceRanges(&svc)
|
||||
for _, source := range sources {
|
||||
wantedRuleName := getSecurityRuleName(&svc, wantedRule, source)
|
||||
wantedRuleName := az.getSecurityRuleName(&svc, wantedRule, source)
|
||||
seenRules[wantedRuleName] = wantedRuleName
|
||||
foundRule := false
|
||||
for _, actualRule := range *securityGroup.SecurityRules {
|
||||
@@ -1667,35 +1676,82 @@ func validateEmptyConfig(t *testing.T, config string) {
|
||||
t.Errorf("got incorrect value for CloudProviderRateLimit")
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetZone(t *testing.T) {
|
||||
data := `{"ID":"_azdev","UD":"0","FD":"99"}`
|
||||
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
fmt.Fprintln(w, data)
|
||||
}))
|
||||
defer ts.Close()
|
||||
|
||||
cloud := &Cloud{}
|
||||
cloud.Location = "eastus"
|
||||
|
||||
zone, err := cloud.getZoneFromURL(ts.URL)
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error: %v", err)
|
||||
cloud := &Cloud{
|
||||
Config: Config{
|
||||
Location: "eastus",
|
||||
},
|
||||
metadata: &InstanceMetadata{},
|
||||
}
|
||||
if zone.FailureDomain != "99" {
|
||||
t.Errorf("Unexpected value: %s, expected '99'", zone.FailureDomain)
|
||||
testcases := []struct {
|
||||
name string
|
||||
zone string
|
||||
faultDomain string
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
name: "GetZone should get real zone if only node's zone is set",
|
||||
zone: "1",
|
||||
expected: "eastus-1",
|
||||
},
|
||||
{
|
||||
name: "GetZone should get real zone if both node's zone and FD are set",
|
||||
zone: "1",
|
||||
faultDomain: "99",
|
||||
expected: "eastus-1",
|
||||
},
|
||||
{
|
||||
name: "GetZone should get faultDomain if node's zone isn't set",
|
||||
faultDomain: "99",
|
||||
expected: "99",
|
||||
},
|
||||
}
|
||||
if zone.Region != cloud.Location {
|
||||
t.Errorf("Expected: %s, saw: %s", cloud.Location, zone.Region)
|
||||
|
||||
for _, test := range testcases {
|
||||
listener, err := net.Listen("tcp", "127.0.0.1:0")
|
||||
if err != nil {
|
||||
t.Errorf("Test [%s] unexpected error: %v", test.name, err)
|
||||
}
|
||||
|
||||
mux := http.NewServeMux()
|
||||
mux.Handle("/v1/InstanceInfo/FD", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
fmt.Fprintf(w, test.faultDomain)
|
||||
}))
|
||||
mux.Handle("/instance/compute", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
fmt.Fprintf(w, fmt.Sprintf("{\"zone\":\"%s\"}", test.zone))
|
||||
}))
|
||||
go func() {
|
||||
http.Serve(listener, mux)
|
||||
}()
|
||||
defer listener.Close()
|
||||
|
||||
cloud.metadata.baseURL = "http://" + listener.Addr().String() + "/"
|
||||
zone, err := cloud.GetZone(context.Background())
|
||||
if err != nil {
|
||||
t.Errorf("Test [%s] unexpected error: %v", test.name, err)
|
||||
}
|
||||
if zone.FailureDomain != test.expected {
|
||||
t.Errorf("Test [%s] unexpected zone: %s, expected %q", test.name, zone.FailureDomain, test.expected)
|
||||
}
|
||||
if zone.Region != cloud.Location {
|
||||
t.Errorf("Test [%s] unexpected region: %s, expected: %s", test.name, zone.Region, cloud.Location)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFetchFaultDomain(t *testing.T) {
|
||||
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
fmt.Fprintln(w, `{"ID":"_azdev","UD":"0","FD":"99"}`)
|
||||
fmt.Fprint(w, "99")
|
||||
}))
|
||||
defer ts.Close()
|
||||
|
||||
faultDomain, err := fetchFaultDomain(ts.URL)
|
||||
cloud := &Cloud{}
|
||||
cloud.metadata = &InstanceMetadata{
|
||||
baseURL: ts.URL + "/",
|
||||
}
|
||||
|
||||
faultDomain, err := cloud.fetchFaultDomain()
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error: %v", err)
|
||||
}
|
||||
@@ -1707,23 +1763,6 @@ func TestFetchFaultDomain(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestDecodeInstanceInfo(t *testing.T) {
|
||||
response := `{"ID":"_azdev","UD":"0","FD":"99"}`
|
||||
|
||||
faultDomain, err := readFaultDomain(strings.NewReader(response))
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error in ReadFaultDomain: %v", err)
|
||||
}
|
||||
|
||||
if faultDomain == nil {
|
||||
t.Error("Fault domain was unexpectedly nil")
|
||||
}
|
||||
|
||||
if *faultDomain != "99" {
|
||||
t.Error("got incorrect fault domain")
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetNodeNameByProviderID(t *testing.T) {
|
||||
az := getTestCloud()
|
||||
providers := []struct {
|
||||
@@ -2537,8 +2576,8 @@ func TestCanCombineSharedAndPrivateRulesInSameGroup(t *testing.T) {
|
||||
|
||||
expectedRuleName13 := "shared-TCP-4444-Internet"
|
||||
expectedRuleName2 := "shared-TCP-8888-Internet"
|
||||
expectedRuleName4 := getSecurityRuleName(&svc4, v1.ServicePort{Port: 4444, Protocol: v1.ProtocolTCP}, "Internet")
|
||||
expectedRuleName5 := getSecurityRuleName(&svc5, v1.ServicePort{Port: 8888, Protocol: v1.ProtocolTCP}, "Internet")
|
||||
expectedRuleName4 := az.getSecurityRuleName(&svc4, v1.ServicePort{Port: 4444, Protocol: v1.ProtocolTCP}, "Internet")
|
||||
expectedRuleName5 := az.getSecurityRuleName(&svc5, v1.ServicePort{Port: 8888, Protocol: v1.ProtocolTCP}, "Internet")
|
||||
|
||||
sg := getTestSecurityGroup(az)
|
||||
|
||||
@@ -2699,3 +2738,136 @@ func TestCanCombineSharedAndPrivateRulesInSameGroup(t *testing.T) {
|
||||
// func TestIfServiceIsEditedFromSharedRuleToOwnRuleThenItIsRemovedFromSharedRuleAndOwnRuleIsCreated(t *testing.T) {
|
||||
// t.Error()
|
||||
// }
|
||||
|
||||
func TestGetResourceGroupFromDiskURI(t *testing.T) {
|
||||
tests := []struct {
|
||||
diskURL string
|
||||
expectedResult string
|
||||
expectError bool
|
||||
}{
|
||||
{
|
||||
diskURL: "/subscriptions/4be8920b-2978-43d7-axyz-04d8549c1d05/resourceGroups/azure-k8s1102/providers/Microsoft.Compute/disks/andy-mghyb1102-dynamic-pvc-f7f014c9-49f4-11e8-ab5c-000d3af7b38e",
|
||||
expectedResult: "azure-k8s1102",
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
diskURL: "/4be8920b-2978-43d7-axyz-04d8549c1d05/resourceGroups/azure-k8s1102/providers/Microsoft.Compute/disks/andy-mghyb1102-dynamic-pvc-f7f014c9-49f4-11e8-ab5c-000d3af7b38e",
|
||||
expectedResult: "",
|
||||
expectError: true,
|
||||
},
|
||||
{
|
||||
diskURL: "",
|
||||
expectedResult: "",
|
||||
expectError: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
result, err := getResourceGroupFromDiskURI(test.diskURL)
|
||||
assert.Equal(t, result, test.expectedResult, "Expect result not equal with getResourceGroupFromDiskURI(%s) return: %q, expected: %q",
|
||||
test.diskURL, result, test.expectedResult)
|
||||
|
||||
if test.expectError {
|
||||
assert.NotNil(t, err, "Expect error during getResourceGroupFromDiskURI(%s)", test.diskURL)
|
||||
} else {
|
||||
assert.Nil(t, err, "Expect error is nil during getResourceGroupFromDiskURI(%s)", test.diskURL)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetResourceGroups(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
nodeResourceGroups map[string]string
|
||||
expected sets.String
|
||||
informerSynced bool
|
||||
expectError bool
|
||||
}{
|
||||
{
|
||||
name: "cloud provider configured RG should be returned by default",
|
||||
nodeResourceGroups: map[string]string{},
|
||||
informerSynced: true,
|
||||
expected: sets.NewString("rg"),
|
||||
},
|
||||
{
|
||||
name: "cloud provider configured RG and node RGs should be returned",
|
||||
nodeResourceGroups: map[string]string{"node1": "rg1", "node2": "rg2"},
|
||||
informerSynced: true,
|
||||
expected: sets.NewString("rg", "rg1", "rg2"),
|
||||
},
|
||||
{
|
||||
name: "error should be returned if informer hasn't synced yet",
|
||||
nodeResourceGroups: map[string]string{"node1": "rg1", "node2": "rg2"},
|
||||
informerSynced: false,
|
||||
expectError: true,
|
||||
},
|
||||
}
|
||||
|
||||
az := getTestCloud()
|
||||
for _, test := range tests {
|
||||
az.nodeResourceGroups = test.nodeResourceGroups
|
||||
if test.informerSynced {
|
||||
az.nodeInformerSynced = func() bool { return true }
|
||||
} else {
|
||||
az.nodeInformerSynced = func() bool { return false }
|
||||
}
|
||||
actual, err := az.GetResourceGroups()
|
||||
if test.expectError {
|
||||
assert.NotNil(t, err, test.name)
|
||||
continue
|
||||
}
|
||||
|
||||
assert.Nil(t, err, test.name)
|
||||
assert.Equal(t, test.expected, actual, test.name)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetNodeResourceGroup(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
nodeResourceGroups map[string]string
|
||||
node string
|
||||
expected string
|
||||
informerSynced bool
|
||||
expectError bool
|
||||
}{
|
||||
{
|
||||
name: "cloud provider configured RG should be returned by default",
|
||||
nodeResourceGroups: map[string]string{},
|
||||
informerSynced: true,
|
||||
node: "node1",
|
||||
expected: "rg",
|
||||
},
|
||||
{
|
||||
name: "node RGs should be returned",
|
||||
nodeResourceGroups: map[string]string{"node1": "rg1", "node2": "rg2"},
|
||||
informerSynced: true,
|
||||
node: "node1",
|
||||
expected: "rg1",
|
||||
},
|
||||
{
|
||||
name: "error should be returned if informer hasn't synced yet",
|
||||
nodeResourceGroups: map[string]string{"node1": "rg1", "node2": "rg2"},
|
||||
informerSynced: false,
|
||||
expectError: true,
|
||||
},
|
||||
}
|
||||
|
||||
az := getTestCloud()
|
||||
for _, test := range tests {
|
||||
az.nodeResourceGroups = test.nodeResourceGroups
|
||||
if test.informerSynced {
|
||||
az.nodeInformerSynced = func() bool { return true }
|
||||
} else {
|
||||
az.nodeInformerSynced = func() bool { return false }
|
||||
}
|
||||
actual, err := az.GetNodeResourceGroup(test.node)
|
||||
if test.expectError {
|
||||
assert.NotNil(t, err, test.name)
|
||||
continue
|
||||
}
|
||||
|
||||
assert.Nil(t, err, test.name)
|
||||
assert.Equal(t, test.expected, actual, test.name)
|
||||
}
|
||||
}
|
||||
|
9
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_vmsets.go
generated
vendored
9
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_vmsets.go
generated
vendored
@@ -17,7 +17,7 @@ limitations under the License.
|
||||
package azure
|
||||
|
||||
import (
|
||||
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute"
|
||||
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute"
|
||||
"github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
@@ -54,9 +54,9 @@ type VMSet interface {
|
||||
GetVMSetNames(service *v1.Service, nodes []*v1.Node) (availabilitySetNames *[]string, err error)
|
||||
// EnsureHostsInPool ensures the given Node's primary IP configurations are
|
||||
// participating in the specified LoadBalancer Backend Pool.
|
||||
EnsureHostsInPool(serviceName string, nodes []*v1.Node, backendPoolID string, vmSetName string, isInternal bool) error
|
||||
EnsureHostsInPool(service *v1.Service, nodes []*v1.Node, backendPoolID string, vmSetName string, isInternal bool) error
|
||||
// EnsureBackendPoolDeleted ensures the loadBalancer backendAddressPools deleted from the specified vmSet.
|
||||
EnsureBackendPoolDeleted(poolID, vmSetName string, backendAddressPools *[]network.BackendAddressPool) error
|
||||
EnsureBackendPoolDeleted(service *v1.Service, poolID, vmSetName string, backendAddressPools *[]network.BackendAddressPool) error
|
||||
|
||||
// AttachDisk attaches a vhd to vm. The vhd must exist, can be identified by diskName, diskURI, and lun.
|
||||
AttachDisk(isManagedDisk bool, diskName, diskURI string, nodeName types.NodeName, lun int32, cachingMode compute.CachingTypes) error
|
||||
@@ -64,4 +64,7 @@ type VMSet interface {
|
||||
DetachDiskByName(diskName, diskURI string, nodeName types.NodeName) error
|
||||
// GetDataDisks gets a list of data disks attached to the node.
|
||||
GetDataDisks(nodeName types.NodeName) ([]compute.DataDisk, error)
|
||||
|
||||
// GetPowerStatusByNodeName returns the power state of the specified node.
|
||||
GetPowerStatusByNodeName(name string) (string, error)
|
||||
}
|
||||
|
203
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_vmss.go
generated
vendored
203
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_vmss.go
generated
vendored
@@ -24,7 +24,7 @@ import (
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute"
|
||||
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute"
|
||||
"github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network"
|
||||
"github.com/Azure/go-autorest/autorest/to"
|
||||
"github.com/golang/glog"
|
||||
@@ -40,8 +40,10 @@ var (
|
||||
// ErrorNotVmssInstance indicates an instance is not belongint to any vmss.
|
||||
ErrorNotVmssInstance = errors.New("not a vmss instance")
|
||||
|
||||
scaleSetNameRE = regexp.MustCompile(`.*/subscriptions/(?:.*)/Microsoft.Compute/virtualMachineScaleSets/(.+)/virtualMachines(?:.*)`)
|
||||
vmssMachineIDTemplate = "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Compute/virtualMachineScaleSets/%s/virtualMachines/%s"
|
||||
scaleSetNameRE = regexp.MustCompile(`.*/subscriptions/(?:.*)/Microsoft.Compute/virtualMachineScaleSets/(.+)/virtualMachines(?:.*)`)
|
||||
resourceGroupRE = regexp.MustCompile(`.*/subscriptions/(?:.*)/resourceGroups/(.+)/providers/Microsoft.Compute/virtualMachineScaleSets/(?:.*)/virtualMachines(?:.*)`)
|
||||
vmssNicResourceGroupRE = regexp.MustCompile(`.*/subscriptions/(?:.*)/resourceGroups/(.+)/providers/Microsoft.Compute/virtualMachineScaleSets/(?:.*)/virtualMachines/(?:.*)/networkInterfaces/(?:.*)`)
|
||||
vmssMachineIDTemplate = "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Compute/virtualMachineScaleSets/%s/virtualMachines/%s"
|
||||
)
|
||||
|
||||
// scaleSet implements VMSet interface for Azure scale set.
|
||||
@@ -106,8 +108,14 @@ func (ss *scaleSet) getVmssVM(nodeName string) (ssName, instanceID string, vm co
|
||||
return "", "", vm, cloudprovider.InstanceNotFound
|
||||
}
|
||||
|
||||
resourceGroup, err := ss.GetNodeResourceGroup(nodeName)
|
||||
if err != nil {
|
||||
return "", "", vm, err
|
||||
}
|
||||
|
||||
glog.V(4).Infof("getVmssVM gets scaleSetName (%q) and instanceID (%q) for node %q", ssName, instanceID, nodeName)
|
||||
cachedVM, err := ss.vmssVMCache.Get(ss.makeVmssVMName(ssName, instanceID))
|
||||
key := buildVmssCacheKey(resourceGroup, ss.makeVmssVMName(ssName, instanceID))
|
||||
cachedVM, err := ss.vmssVMCache.Get(key)
|
||||
if err != nil {
|
||||
return ssName, instanceID, vm, err
|
||||
}
|
||||
@@ -120,11 +128,32 @@ func (ss *scaleSet) getVmssVM(nodeName string) (ssName, instanceID string, vm co
|
||||
return ssName, instanceID, *(cachedVM.(*compute.VirtualMachineScaleSetVM)), nil
|
||||
}
|
||||
|
||||
// GetPowerStatusByNodeName returns the power state of the specified node.
|
||||
func (ss *scaleSet) GetPowerStatusByNodeName(name string) (powerState string, err error) {
|
||||
_, _, vm, err := ss.getVmssVM(name)
|
||||
if err != nil {
|
||||
return powerState, err
|
||||
}
|
||||
|
||||
if vm.InstanceView != nil && vm.InstanceView.Statuses != nil {
|
||||
statuses := *vm.InstanceView.Statuses
|
||||
for _, status := range statuses {
|
||||
state := to.String(status.Code)
|
||||
if strings.HasPrefix(state, vmPowerStatePrefix) {
|
||||
return strings.TrimPrefix(state, vmPowerStatePrefix), nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return "", fmt.Errorf("failed to get power status for node %q", name)
|
||||
}
|
||||
|
||||
// getCachedVirtualMachineByInstanceID gets scaleSetVMInfo from cache.
|
||||
// The node must belong to one of scale sets.
|
||||
func (ss *scaleSet) getVmssVMByInstanceID(scaleSetName, instanceID string) (vm compute.VirtualMachineScaleSetVM, err error) {
|
||||
func (ss *scaleSet) getVmssVMByInstanceID(resourceGroup, scaleSetName, instanceID string) (vm compute.VirtualMachineScaleSetVM, err error) {
|
||||
vmName := ss.makeVmssVMName(scaleSetName, instanceID)
|
||||
cachedVM, err := ss.vmssVMCache.Get(vmName)
|
||||
key := buildVmssCacheKey(resourceGroup, vmName)
|
||||
cachedVM, err := ss.vmssVMCache.Get(key)
|
||||
if err != nil {
|
||||
return vm, err
|
||||
}
|
||||
@@ -168,13 +197,18 @@ func (ss *scaleSet) GetNodeNameByProviderID(providerID string) (types.NodeName,
|
||||
return ss.availabilitySet.GetNodeNameByProviderID(providerID)
|
||||
}
|
||||
|
||||
resourceGroup, err := extractResourceGroupByProviderID(providerID)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("error of extracting resource group for node %q", providerID)
|
||||
}
|
||||
|
||||
instanceID, err := getLastSegment(providerID)
|
||||
if err != nil {
|
||||
glog.V(4).Infof("Can not extract instanceID from providerID (%s), assuming it is mananaged by availability set: %v", providerID, err)
|
||||
return ss.availabilitySet.GetNodeNameByProviderID(providerID)
|
||||
}
|
||||
|
||||
vm, err := ss.getVmssVMByInstanceID(scaleSetName, instanceID)
|
||||
vm, err := ss.getVmssVMByInstanceID(resourceGroup, scaleSetName, instanceID)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
@@ -211,7 +245,8 @@ func (ss *scaleSet) GetInstanceTypeByNodeName(name string) (string, error) {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
// GetZoneByNodeName gets cloudprovider.Zone by node name.
|
||||
// GetZoneByNodeName gets availability zone for the specified node. If the node is not running
|
||||
// with availability zone, then it returns fault domain.
|
||||
func (ss *scaleSet) GetZoneByNodeName(name string) (cloudprovider.Zone, error) {
|
||||
managedByAS, err := ss.isNodeManagedByAvailabilitySet(name)
|
||||
if err != nil {
|
||||
@@ -228,14 +263,25 @@ func (ss *scaleSet) GetZoneByNodeName(name string) (cloudprovider.Zone, error) {
|
||||
return cloudprovider.Zone{}, err
|
||||
}
|
||||
|
||||
if vm.InstanceView != nil && vm.InstanceView.PlatformFaultDomain != nil {
|
||||
return cloudprovider.Zone{
|
||||
FailureDomain: strconv.Itoa(int(*vm.InstanceView.PlatformFaultDomain)),
|
||||
Region: *vm.Location,
|
||||
}, nil
|
||||
var failureDomain string
|
||||
if vm.Zones != nil && len(*vm.Zones) > 0 {
|
||||
// Get availability zone for the node.
|
||||
zones := *vm.Zones
|
||||
zoneID, err := strconv.Atoi(zones[0])
|
||||
if err != nil {
|
||||
return cloudprovider.Zone{}, fmt.Errorf("failed to parse zone %q: %v", zones, err)
|
||||
}
|
||||
|
||||
failureDomain = ss.makeZone(zoneID)
|
||||
} else if vm.InstanceView != nil && vm.InstanceView.PlatformFaultDomain != nil {
|
||||
// Availability zone is not used for the node, falling back to fault domain.
|
||||
failureDomain = strconv.Itoa(int(*vm.InstanceView.PlatformFaultDomain))
|
||||
}
|
||||
|
||||
return cloudprovider.Zone{}, nil
|
||||
return cloudprovider.Zone{
|
||||
FailureDomain: failureDomain,
|
||||
Region: *vm.Location,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// GetPrimaryVMSetName returns the VM set name depending on the configured vmType.
|
||||
@@ -245,8 +291,6 @@ func (ss *scaleSet) GetPrimaryVMSetName() string {
|
||||
}
|
||||
|
||||
// GetIPByNodeName gets machine private IP and public IP by node name.
|
||||
// TODO(feiskyer): Azure vmss doesn't support associating a public IP to single virtual machine yet,
|
||||
// fix this after it is supported.
|
||||
func (ss *scaleSet) GetIPByNodeName(nodeName string) (string, string, error) {
|
||||
nic, err := ss.GetPrimaryInterface(nodeName)
|
||||
if err != nil {
|
||||
@@ -260,8 +304,30 @@ func (ss *scaleSet) GetIPByNodeName(nodeName string) (string, string, error) {
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
targetIP := *ipConfig.PrivateIPAddress
|
||||
return targetIP, "", nil
|
||||
internalIP := *ipConfig.PrivateIPAddress
|
||||
publicIP := ""
|
||||
if ipConfig.PublicIPAddress != nil && ipConfig.PublicIPAddress.ID != nil {
|
||||
pipID := *ipConfig.PublicIPAddress.ID
|
||||
pipName, err := getLastSegment(pipID)
|
||||
if err != nil {
|
||||
return "", "", fmt.Errorf("failed to get publicIP name for node %q with pipID %q", nodeName, pipID)
|
||||
}
|
||||
|
||||
resourceGroup, err := ss.GetNodeResourceGroup(nodeName)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
pip, existsPip, err := ss.getPublicIPAddress(resourceGroup, pipName)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
if existsPip {
|
||||
publicIP = *pip.IPAddress
|
||||
}
|
||||
}
|
||||
|
||||
return internalIP, publicIP, nil
|
||||
}
|
||||
|
||||
// This returns the full identifier of the primary NIC for the given VM.
|
||||
@@ -296,7 +362,7 @@ func getScaleSetVMInstanceID(machineName string) (string, error) {
|
||||
return fmt.Sprintf("%d", instanceID), nil
|
||||
}
|
||||
|
||||
// extractScaleSetNameByProviderID extracts the scaleset name by node's ProviderID.
|
||||
// extractScaleSetNameByProviderID extracts the scaleset name by vmss node's ProviderID.
|
||||
func extractScaleSetNameByProviderID(providerID string) (string, error) {
|
||||
matches := scaleSetNameRE.FindStringSubmatch(providerID)
|
||||
if len(matches) != 2 {
|
||||
@@ -306,13 +372,23 @@ func extractScaleSetNameByProviderID(providerID string) (string, error) {
|
||||
return matches[1], nil
|
||||
}
|
||||
|
||||
// extractResourceGroupByProviderID extracts the resource group name by vmss node's ProviderID.
|
||||
func extractResourceGroupByProviderID(providerID string) (string, error) {
|
||||
matches := resourceGroupRE.FindStringSubmatch(providerID)
|
||||
if len(matches) != 2 {
|
||||
return "", ErrorNotVmssInstance
|
||||
}
|
||||
|
||||
return matches[1], nil
|
||||
}
|
||||
|
||||
// listScaleSets lists all scale sets.
|
||||
func (ss *scaleSet) listScaleSets() ([]string, error) {
|
||||
func (ss *scaleSet) listScaleSets(resourceGroup string) ([]string, error) {
|
||||
var err error
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
|
||||
allScaleSets, err := ss.VirtualMachineScaleSetsClient.List(ctx, ss.ResourceGroup)
|
||||
allScaleSets, err := ss.VirtualMachineScaleSetsClient.List(ctx, resourceGroup)
|
||||
if err != nil {
|
||||
glog.Errorf("VirtualMachineScaleSetsClient.List failed: %v", err)
|
||||
return nil, err
|
||||
@@ -327,12 +403,12 @@ func (ss *scaleSet) listScaleSets() ([]string, error) {
|
||||
}
|
||||
|
||||
// listScaleSetVMs lists VMs belonging to the specified scale set.
|
||||
func (ss *scaleSet) listScaleSetVMs(scaleSetName string) ([]compute.VirtualMachineScaleSetVM, error) {
|
||||
func (ss *scaleSet) listScaleSetVMs(scaleSetName, resourceGroup string) ([]compute.VirtualMachineScaleSetVM, error) {
|
||||
var err error
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
|
||||
allVMs, err := ss.VirtualMachineScaleSetVMsClient.List(ctx, ss.ResourceGroup, scaleSetName, "", "", string(compute.InstanceView))
|
||||
allVMs, err := ss.VirtualMachineScaleSetVMsClient.List(ctx, resourceGroup, scaleSetName, "", "", string(compute.InstanceView))
|
||||
if err != nil {
|
||||
glog.Errorf("VirtualMachineScaleSetVMsClient.List failed: %v", err)
|
||||
return nil, err
|
||||
@@ -350,6 +426,10 @@ func (ss *scaleSet) getAgentPoolScaleSets(nodes []*v1.Node) (*[]string, error) {
|
||||
continue
|
||||
}
|
||||
|
||||
if ss.ShouldNodeExcludedFromLoadBalancer(nodes[nx]) {
|
||||
continue
|
||||
}
|
||||
|
||||
nodeName := nodes[nx].Name
|
||||
ssName, err := ss.getScaleSetNameByNodeName(nodeName)
|
||||
if err != nil {
|
||||
@@ -417,6 +497,16 @@ func (ss *scaleSet) GetVMSetNames(service *v1.Service, nodes []*v1.Node) (vmSetN
|
||||
return vmSetNames, nil
|
||||
}
|
||||
|
||||
// extractResourceGroupByVMSSNicID extracts the resource group name by vmss nicID.
|
||||
func extractResourceGroupByVMSSNicID(nicID string) (string, error) {
|
||||
matches := vmssNicResourceGroupRE.FindStringSubmatch(nicID)
|
||||
if len(matches) != 2 {
|
||||
return "", fmt.Errorf("error of extracting resourceGroup from nicID %q", nicID)
|
||||
}
|
||||
|
||||
return matches[1], nil
|
||||
}
|
||||
|
||||
// GetPrimaryInterface gets machine primary network interface by node name and vmSet.
|
||||
func (ss *scaleSet) GetPrimaryInterface(nodeName string) (network.Interface, error) {
|
||||
managedByAS, err := ss.isNodeManagedByAvailabilitySet(nodeName)
|
||||
@@ -431,6 +521,11 @@ func (ss *scaleSet) GetPrimaryInterface(nodeName string) (network.Interface, err
|
||||
|
||||
ssName, instanceID, vm, err := ss.getVmssVM(nodeName)
|
||||
if err != nil {
|
||||
// VM is availability set, but not cached yet in availabilitySetNodesCache.
|
||||
if err == ErrorNotVmssInstance {
|
||||
return ss.availabilitySet.GetPrimaryInterface(nodeName)
|
||||
}
|
||||
|
||||
glog.Errorf("error: ss.GetPrimaryInterface(%s), ss.getVmssVM(%s), err=%v", nodeName, nodeName, err)
|
||||
return network.Interface{}, err
|
||||
}
|
||||
@@ -446,12 +541,16 @@ func (ss *scaleSet) GetPrimaryInterface(nodeName string) (network.Interface, err
|
||||
glog.Errorf("error: ss.GetPrimaryInterface(%s), getLastSegment(%s), err=%v", nodeName, primaryInterfaceID, err)
|
||||
return network.Interface{}, err
|
||||
}
|
||||
resourceGroup, err := extractResourceGroupByVMSSNicID(primaryInterfaceID)
|
||||
if err != nil {
|
||||
return network.Interface{}, err
|
||||
}
|
||||
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
nic, err := ss.InterfacesClient.GetVirtualMachineScaleSetNetworkInterface(ctx, ss.ResourceGroup, ssName, instanceID, nicName, "")
|
||||
nic, err := ss.InterfacesClient.GetVirtualMachineScaleSetNetworkInterface(ctx, resourceGroup, ssName, instanceID, nicName, "")
|
||||
if err != nil {
|
||||
glog.Errorf("error: ss.GetPrimaryInterface(%s), ss.GetVirtualMachineScaleSetNetworkInterface.Get(%s, %s, %s), err=%v", nodeName, ss.ResourceGroup, ssName, nicName, err)
|
||||
glog.Errorf("error: ss.GetPrimaryInterface(%s), ss.GetVirtualMachineScaleSetNetworkInterface.Get(%s, %s, %s), err=%v", nodeName, resourceGroup, ssName, nicName, err)
|
||||
return network.Interface{}, err
|
||||
}
|
||||
|
||||
@@ -465,17 +564,18 @@ func (ss *scaleSet) GetPrimaryInterface(nodeName string) (network.Interface, err
|
||||
}
|
||||
|
||||
// getScaleSetWithRetry gets scale set with exponential backoff retry
|
||||
func (ss *scaleSet) getScaleSetWithRetry(name string) (compute.VirtualMachineScaleSet, bool, error) {
|
||||
func (ss *scaleSet) getScaleSetWithRetry(service *v1.Service, name string) (compute.VirtualMachineScaleSet, bool, error) {
|
||||
var result compute.VirtualMachineScaleSet
|
||||
var exists bool
|
||||
|
||||
err := wait.ExponentialBackoff(ss.requestBackoff(), func() (bool, error) {
|
||||
cached, retryErr := ss.vmssCache.Get(name)
|
||||
if retryErr != nil {
|
||||
ss.Event(service, v1.EventTypeWarning, "GetVirtualMachineScaleSet", retryErr.Error())
|
||||
glog.Errorf("backoff: failure for scale set %q, will retry,err=%v", name, retryErr)
|
||||
return false, nil
|
||||
}
|
||||
glog.V(4).Info("backoff: success for scale set %q", name)
|
||||
glog.V(4).Infof("backoff: success for scale set %q", name)
|
||||
|
||||
if cached != nil {
|
||||
exists = true
|
||||
@@ -522,24 +622,24 @@ func (ss *scaleSet) getPrimaryIPConfigForScaleSet(config *compute.VirtualMachine
|
||||
}
|
||||
|
||||
// createOrUpdateVMSSWithRetry invokes ss.VirtualMachineScaleSetsClient.CreateOrUpdate with exponential backoff retry.
|
||||
func (ss *scaleSet) createOrUpdateVMSSWithRetry(virtualMachineScaleSet compute.VirtualMachineScaleSet) error {
|
||||
func (ss *scaleSet) createOrUpdateVMSSWithRetry(service *v1.Service, virtualMachineScaleSet compute.VirtualMachineScaleSet) error {
|
||||
return wait.ExponentialBackoff(ss.requestBackoff(), func() (bool, error) {
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
resp, err := ss.VirtualMachineScaleSetsClient.CreateOrUpdate(ctx, ss.ResourceGroup, *virtualMachineScaleSet.Name, virtualMachineScaleSet)
|
||||
glog.V(10).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate(%s): end", *virtualMachineScaleSet.Name)
|
||||
return processHTTPRetryResponse(resp, err)
|
||||
return ss.processHTTPRetryResponse(service, "CreateOrUpdateVMSS", resp, err)
|
||||
})
|
||||
}
|
||||
|
||||
// updateVMSSInstancesWithRetry invokes ss.VirtualMachineScaleSetsClient.UpdateInstances with exponential backoff retry.
|
||||
func (ss *scaleSet) updateVMSSInstancesWithRetry(scaleSetName string, vmInstanceIDs compute.VirtualMachineScaleSetVMInstanceRequiredIDs) error {
|
||||
func (ss *scaleSet) updateVMSSInstancesWithRetry(service *v1.Service, scaleSetName string, vmInstanceIDs compute.VirtualMachineScaleSetVMInstanceRequiredIDs) error {
|
||||
return wait.ExponentialBackoff(ss.requestBackoff(), func() (bool, error) {
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
resp, err := ss.VirtualMachineScaleSetsClient.UpdateInstances(ctx, ss.ResourceGroup, scaleSetName, vmInstanceIDs)
|
||||
glog.V(10).Infof("VirtualMachineScaleSetsClient.UpdateInstances(%s): end", scaleSetName)
|
||||
return processHTTPRetryResponse(resp, err)
|
||||
return ss.processHTTPRetryResponse(service, "CreateOrUpdateVMSSInstance", resp, err)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -554,6 +654,11 @@ func (ss *scaleSet) getNodesScaleSets(nodes []*v1.Node) (map[string]sets.String,
|
||||
continue
|
||||
}
|
||||
|
||||
if ss.ShouldNodeExcludedFromLoadBalancer(curNode) {
|
||||
glog.V(4).Infof("Excluding unmanaged/external-resource-group node %q", curNode.Name)
|
||||
continue
|
||||
}
|
||||
|
||||
curScaleSetName, err := extractScaleSetNameByProviderID(curNode.Spec.ProviderID)
|
||||
if err != nil {
|
||||
glog.V(4).Infof("Node %q is not belonging to any scale sets, assuming it is belong to availability sets", curNode.Name)
|
||||
@@ -579,9 +684,10 @@ func (ss *scaleSet) getNodesScaleSets(nodes []*v1.Node) (map[string]sets.String,
|
||||
|
||||
// ensureHostsInVMSetPool ensures the given Node's primary IP configurations are
|
||||
// participating in the vmSet's LoadBalancer Backend Pool.
|
||||
func (ss *scaleSet) ensureHostsInVMSetPool(serviceName string, backendPoolID string, vmSetName string, instanceIDs []string, isInternal bool) error {
|
||||
func (ss *scaleSet) ensureHostsInVMSetPool(service *v1.Service, backendPoolID string, vmSetName string, instanceIDs []string, isInternal bool) error {
|
||||
glog.V(3).Infof("ensuring hosts %q of scaleset %q in LB backendpool %q", instanceIDs, vmSetName, backendPoolID)
|
||||
virtualMachineScaleSet, exists, err := ss.getScaleSetWithRetry(vmSetName)
|
||||
serviceName := getServiceName(service)
|
||||
virtualMachineScaleSet, exists, err := ss.getScaleSetWithRetry(service, vmSetName)
|
||||
if err != nil {
|
||||
glog.Errorf("ss.getScaleSetWithRetry(%s) for service %q failed: %v", vmSetName, serviceName, err)
|
||||
return err
|
||||
@@ -649,7 +755,7 @@ func (ss *scaleSet) ensureHostsInVMSetPool(serviceName string, backendPoolID str
|
||||
glog.V(10).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate(%q): end", vmSetName)
|
||||
if ss.CloudProviderBackoff && shouldRetryHTTPRequest(resp, err) {
|
||||
glog.V(2).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate for service (%s): scale set (%s) - updating, err=%v", serviceName, vmSetName, err)
|
||||
retryErr := ss.createOrUpdateVMSSWithRetry(virtualMachineScaleSet)
|
||||
retryErr := ss.createOrUpdateVMSSWithRetry(service, virtualMachineScaleSet)
|
||||
if retryErr != nil {
|
||||
err = retryErr
|
||||
glog.V(2).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate for service (%s) abort backoff: scale set (%s) - updating", serviceName, vmSetName)
|
||||
@@ -670,7 +776,7 @@ func (ss *scaleSet) ensureHostsInVMSetPool(serviceName string, backendPoolID str
|
||||
glog.V(10).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate(%q): end", vmSetName)
|
||||
if ss.CloudProviderBackoff && shouldRetryHTTPRequest(instanceResp, err) {
|
||||
glog.V(2).Infof("VirtualMachineScaleSetsClient.UpdateInstances for service (%s): scale set (%s) - updating, err=%v", serviceName, vmSetName, err)
|
||||
retryErr := ss.updateVMSSInstancesWithRetry(vmSetName, vmInstanceIDs)
|
||||
retryErr := ss.updateVMSSInstancesWithRetry(service, vmSetName, vmInstanceIDs)
|
||||
if retryErr != nil {
|
||||
err = retryErr
|
||||
glog.V(2).Infof("VirtualMachineScaleSetsClient.UpdateInstances for service (%s) abort backoff: scale set (%s) - updating", serviceName, vmSetName)
|
||||
@@ -685,7 +791,8 @@ func (ss *scaleSet) ensureHostsInVMSetPool(serviceName string, backendPoolID str
|
||||
|
||||
// EnsureHostsInPool ensures the given Node's primary IP configurations are
|
||||
// participating in the specified LoadBalancer Backend Pool.
|
||||
func (ss *scaleSet) EnsureHostsInPool(serviceName string, nodes []*v1.Node, backendPoolID string, vmSetName string, isInternal bool) error {
|
||||
func (ss *scaleSet) EnsureHostsInPool(service *v1.Service, nodes []*v1.Node, backendPoolID string, vmSetName string, isInternal bool) error {
|
||||
serviceName := getServiceName(service)
|
||||
scalesets, standardNodes, err := ss.getNodesScaleSets(nodes)
|
||||
if err != nil {
|
||||
glog.Errorf("getNodesScaleSets() for service %q failed: %v", serviceName, err)
|
||||
@@ -705,7 +812,7 @@ func (ss *scaleSet) EnsureHostsInPool(serviceName string, nodes []*v1.Node, back
|
||||
instanceIDs.Insert("*")
|
||||
}
|
||||
|
||||
err := ss.ensureHostsInVMSetPool(serviceName, backendPoolID, ssName, instanceIDs.List(), isInternal)
|
||||
err := ss.ensureHostsInVMSetPool(service, backendPoolID, ssName, instanceIDs.List(), isInternal)
|
||||
if err != nil {
|
||||
glog.Errorf("ensureHostsInVMSetPool() with scaleSet %q for service %q failed: %v", ssName, serviceName, err)
|
||||
return err
|
||||
@@ -713,7 +820,7 @@ func (ss *scaleSet) EnsureHostsInPool(serviceName string, nodes []*v1.Node, back
|
||||
}
|
||||
|
||||
if ss.useStandardLoadBalancer() && len(standardNodes) > 0 {
|
||||
err := ss.availabilitySet.EnsureHostsInPool(serviceName, standardNodes, backendPoolID, "", isInternal)
|
||||
err := ss.availabilitySet.EnsureHostsInPool(service, standardNodes, backendPoolID, "", isInternal)
|
||||
if err != nil {
|
||||
glog.Errorf("availabilitySet.EnsureHostsInPool() for service %q failed: %v", serviceName, err)
|
||||
return err
|
||||
@@ -724,9 +831,9 @@ func (ss *scaleSet) EnsureHostsInPool(serviceName string, nodes []*v1.Node, back
|
||||
}
|
||||
|
||||
// ensureScaleSetBackendPoolDeleted ensures the loadBalancer backendAddressPools deleted from the specified scaleset.
|
||||
func (ss *scaleSet) ensureScaleSetBackendPoolDeleted(poolID, ssName string) error {
|
||||
func (ss *scaleSet) ensureScaleSetBackendPoolDeleted(service *v1.Service, poolID, ssName string) error {
|
||||
glog.V(3).Infof("ensuring backend pool %q deleted from scaleset %q", poolID, ssName)
|
||||
virtualMachineScaleSet, exists, err := ss.getScaleSetWithRetry(ssName)
|
||||
virtualMachineScaleSet, exists, err := ss.getScaleSetWithRetry(service, ssName)
|
||||
if err != nil {
|
||||
glog.Errorf("ss.ensureScaleSetBackendPoolDeleted(%s, %s) getScaleSetWithRetry(%s) failed: %v", poolID, ssName, ssName, err)
|
||||
return err
|
||||
@@ -778,7 +885,7 @@ func (ss *scaleSet) ensureScaleSetBackendPoolDeleted(poolID, ssName string) erro
|
||||
glog.V(10).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate(%q): end", ssName)
|
||||
if ss.CloudProviderBackoff && shouldRetryHTTPRequest(resp, err) {
|
||||
glog.V(2).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate: scale set (%s) - updating, err=%v", ssName, err)
|
||||
retryErr := ss.createOrUpdateVMSSWithRetry(virtualMachineScaleSet)
|
||||
retryErr := ss.createOrUpdateVMSSWithRetry(service, virtualMachineScaleSet)
|
||||
if retryErr != nil {
|
||||
err = retryErr
|
||||
glog.V(2).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate abort backoff: scale set (%s) - updating", ssName)
|
||||
@@ -799,7 +906,7 @@ func (ss *scaleSet) ensureScaleSetBackendPoolDeleted(poolID, ssName string) erro
|
||||
glog.V(10).Infof("VirtualMachineScaleSetsClient.UpdateInstances(%q): end", ssName)
|
||||
if ss.CloudProviderBackoff && shouldRetryHTTPRequest(instanceResp, err) {
|
||||
glog.V(2).Infof("VirtualMachineScaleSetsClient.UpdateInstances scale set (%s) - updating, err=%v", ssName, err)
|
||||
retryErr := ss.updateVMSSInstancesWithRetry(ssName, vmInstanceIDs)
|
||||
retryErr := ss.updateVMSSInstancesWithRetry(service, ssName, vmInstanceIDs)
|
||||
if retryErr != nil {
|
||||
err = retryErr
|
||||
glog.V(2).Infof("VirtualMachineScaleSetsClient.UpdateInstances abort backoff: scale set (%s) - updating", ssName)
|
||||
@@ -819,7 +926,7 @@ func (ss *scaleSet) ensureScaleSetBackendPoolDeleted(poolID, ssName string) erro
|
||||
glog.V(10).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate(%q): end", ssName)
|
||||
if ss.CloudProviderBackoff && shouldRetryHTTPRequest(resp, err) {
|
||||
glog.V(2).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate: scale set (%s) - updating, err=%v", ssName, err)
|
||||
retryErr := ss.createOrUpdateVMSSWithRetry(virtualMachineScaleSet)
|
||||
retryErr := ss.createOrUpdateVMSSWithRetry(service, virtualMachineScaleSet)
|
||||
if retryErr != nil {
|
||||
glog.V(2).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate abort backoff: scale set (%s) - updating", ssName)
|
||||
}
|
||||
@@ -830,7 +937,7 @@ func (ss *scaleSet) ensureScaleSetBackendPoolDeleted(poolID, ssName string) erro
|
||||
}
|
||||
|
||||
// EnsureBackendPoolDeleted ensures the loadBalancer backendAddressPools deleted from the specified vmSet.
|
||||
func (ss *scaleSet) EnsureBackendPoolDeleted(poolID, vmSetName string, backendAddressPools *[]network.BackendAddressPool) error {
|
||||
func (ss *scaleSet) EnsureBackendPoolDeleted(service *v1.Service, poolID, vmSetName string, backendAddressPools *[]network.BackendAddressPool) error {
|
||||
if backendAddressPools == nil {
|
||||
return nil
|
||||
}
|
||||
@@ -845,7 +952,7 @@ func (ss *scaleSet) EnsureBackendPoolDeleted(poolID, vmSetName string, backendAd
|
||||
|
||||
ssName, err := extractScaleSetNameByProviderID(*ipConfigurations.ID)
|
||||
if err != nil {
|
||||
glog.V(4).Infof("backend IP configuration %q is not belonging to any vmss, omit it")
|
||||
glog.V(4).Infof("backend IP configuration %q is not belonging to any vmss, omit it", *ipConfigurations.ID)
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -861,7 +968,7 @@ func (ss *scaleSet) EnsureBackendPoolDeleted(poolID, vmSetName string, backendAd
|
||||
continue
|
||||
}
|
||||
|
||||
err := ss.ensureScaleSetBackendPoolDeleted(poolID, ssName)
|
||||
err := ss.ensureScaleSetBackendPoolDeleted(service, poolID, ssName)
|
||||
if err != nil {
|
||||
glog.Errorf("ensureScaleSetBackendPoolDeleted() with scaleSet %q failed: %v", ssName, err)
|
||||
return err
|
||||
@@ -872,11 +979,11 @@ func (ss *scaleSet) EnsureBackendPoolDeleted(poolID, vmSetName string, backendAd
|
||||
}
|
||||
|
||||
// getVmssMachineID returns the full identifier of a vmss virtual machine.
|
||||
func (az *Cloud) getVmssMachineID(scaleSetName, instanceID string) string {
|
||||
func (az *Cloud) getVmssMachineID(resourceGroup, scaleSetName, instanceID string) string {
|
||||
return fmt.Sprintf(
|
||||
vmssMachineIDTemplate,
|
||||
az.SubscriptionID,
|
||||
az.ResourceGroup,
|
||||
resourceGroup,
|
||||
scaleSetName,
|
||||
instanceID)
|
||||
}
|
||||
|
105
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_vmss_cache.go
generated
vendored
105
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_vmss_cache.go
generated
vendored
@@ -27,15 +27,16 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
vmssNameSeparator = "_"
|
||||
vmssNameSeparator = "_"
|
||||
vmssCacheSeparator = "#"
|
||||
|
||||
nodeNameToScaleSetMappingKey = "k8sNodeNameToScaleSetMappingKey"
|
||||
availabilitySetNodesKey = "k8sAvailabilitySetNodesKey"
|
||||
|
||||
vmssCacheTTL = time.Minute
|
||||
vmssVMCacheTTL = time.Minute
|
||||
availabilitySetNodesCacheTTL = 15 * time.Minute
|
||||
nodeNameToScaleSetMappingCacheTTL = 15 * time.Minute
|
||||
availabilitySetNodesCacheTTL = 5 * time.Minute
|
||||
nodeNameToScaleSetMappingCacheTTL = 5 * time.Minute
|
||||
)
|
||||
|
||||
// nodeNameToScaleSetMapping maps nodeName to scaleSet name.
|
||||
@@ -49,19 +50,19 @@ func (ss *scaleSet) makeVmssVMName(scaleSetName, instanceID string) string {
|
||||
func extractVmssVMName(name string) (string, string, error) {
|
||||
split := strings.SplitAfter(name, vmssNameSeparator)
|
||||
if len(split) < 2 {
|
||||
glog.Errorf("Failed to extract vmssVMName %q", name)
|
||||
glog.V(3).Infof("Failed to extract vmssVMName %q", name)
|
||||
return "", "", ErrorNotVmssInstance
|
||||
}
|
||||
|
||||
ssName := strings.Join(split[0:len(split)-1], "")
|
||||
// removing the trailing `vmssNameSeparator` since we used SplitAfter
|
||||
ssName = ssName[:len(ssName)-1]
|
||||
|
||||
instanceID := split[len(split)-1]
|
||||
|
||||
return ssName, instanceID, nil
|
||||
}
|
||||
|
||||
// vmssCache only holds vmss from ss.ResourceGroup because nodes from other resourceGroups
|
||||
// will be excluded from LB backends.
|
||||
func (ss *scaleSet) newVmssCache() (*timedCache, error) {
|
||||
getter := func(key string) (interface{}, error) {
|
||||
ctx, cancel := getContextWithCancel()
|
||||
@@ -85,26 +86,34 @@ func (ss *scaleSet) newVmssCache() (*timedCache, error) {
|
||||
|
||||
func (ss *scaleSet) newNodeNameToScaleSetMappingCache() (*timedCache, error) {
|
||||
getter := func(key string) (interface{}, error) {
|
||||
scaleSetNames, err := ss.listScaleSets()
|
||||
localCache := make(nodeNameToScaleSetMapping)
|
||||
|
||||
allResourceGroups, err := ss.GetResourceGroups()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
localCache := make(nodeNameToScaleSetMapping)
|
||||
for _, ssName := range scaleSetNames {
|
||||
vms, err := ss.listScaleSetVMs(ssName)
|
||||
for _, resourceGroup := range allResourceGroups.List() {
|
||||
scaleSetNames, err := ss.listScaleSets(resourceGroup)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, vm := range vms {
|
||||
if vm.OsProfile == nil || vm.OsProfile.ComputerName == nil {
|
||||
glog.Warningf("failed to get computerName for vmssVM (%q)", vm.Name)
|
||||
continue
|
||||
for _, ssName := range scaleSetNames {
|
||||
vms, err := ss.listScaleSetVMs(ssName, resourceGroup)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
computerName := strings.ToLower(*vm.OsProfile.ComputerName)
|
||||
localCache[computerName] = ssName
|
||||
for _, vm := range vms {
|
||||
if vm.OsProfile == nil || vm.OsProfile.ComputerName == nil {
|
||||
glog.Warningf("failed to get computerName for vmssVM (%q)", ssName)
|
||||
continue
|
||||
}
|
||||
|
||||
computerName := strings.ToLower(*vm.OsProfile.ComputerName)
|
||||
localCache[computerName] = ssName
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -116,14 +125,23 @@ func (ss *scaleSet) newNodeNameToScaleSetMappingCache() (*timedCache, error) {
|
||||
|
||||
func (ss *scaleSet) newAvailabilitySetNodesCache() (*timedCache, error) {
|
||||
getter := func(key string) (interface{}, error) {
|
||||
vmList, err := ss.Cloud.VirtualMachineClientListWithRetry()
|
||||
localCache := sets.NewString()
|
||||
resourceGroups, err := ss.GetResourceGroups()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
localCache := sets.NewString()
|
||||
for _, vm := range vmList {
|
||||
localCache.Insert(*vm.Name)
|
||||
for _, resourceGroup := range resourceGroups.List() {
|
||||
vmList, err := ss.Cloud.VirtualMachineClientListWithRetry(resourceGroup)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, vm := range vmList {
|
||||
if vm.Name != nil {
|
||||
localCache.Insert(*vm.Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return localCache, nil
|
||||
@@ -132,10 +150,33 @@ func (ss *scaleSet) newAvailabilitySetNodesCache() (*timedCache, error) {
|
||||
return newTimedcache(availabilitySetNodesCacheTTL, getter)
|
||||
}
|
||||
|
||||
func buildVmssCacheKey(resourceGroup, name string) string {
|
||||
// key is composed of <resourceGroup>#<vmName>
|
||||
return fmt.Sprintf("%s%s%s", resourceGroup, vmssCacheSeparator, name)
|
||||
}
|
||||
|
||||
func extractVmssCacheKey(key string) (string, string, error) {
|
||||
// key is composed of <resourceGroup>#<vmName>
|
||||
keyItems := strings.Split(key, vmssCacheSeparator)
|
||||
if len(keyItems) != 2 {
|
||||
return "", "", fmt.Errorf("key %q is not in format '<resouceGroup>#<vmName>'", key)
|
||||
}
|
||||
|
||||
resourceGroup := keyItems[0]
|
||||
vmName := keyItems[1]
|
||||
return resourceGroup, vmName, nil
|
||||
}
|
||||
|
||||
func (ss *scaleSet) newVmssVMCache() (*timedCache, error) {
|
||||
getter := func(key string) (interface{}, error) {
|
||||
// vmssVM name's format is 'scaleSetName_instanceID'
|
||||
ssName, instanceID, err := extractVmssVMName(key)
|
||||
// key is composed of <resourceGroup>#<vmName>
|
||||
resourceGroup, vmName, err := extractVmssCacheKey(key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// vmName's format is 'scaleSetName_instanceID'
|
||||
ssName, instanceID, err := extractVmssVMName(vmName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -147,7 +188,7 @@ func (ss *scaleSet) newVmssVMCache() (*timedCache, error) {
|
||||
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
result, err := ss.VirtualMachineScaleSetVMsClient.Get(ctx, ss.ResourceGroup, ssName, instanceID)
|
||||
result, err := ss.VirtualMachineScaleSetVMsClient.Get(ctx, resourceGroup, ssName, instanceID)
|
||||
exists, message, realErr := checkResourceExistsFromError(err)
|
||||
if realErr != nil {
|
||||
return nil, realErr
|
||||
@@ -158,6 +199,24 @@ func (ss *scaleSet) newVmssVMCache() (*timedCache, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Get instanceView for vmssVM.
|
||||
if result.InstanceView == nil {
|
||||
viewCtx, viewCancel := getContextWithCancel()
|
||||
defer viewCancel()
|
||||
view, err := ss.VirtualMachineScaleSetVMsClient.GetInstanceView(viewCtx, resourceGroup, ssName, instanceID)
|
||||
// It is possible that the vmssVM gets removed just before this call. So check whether the VM exist again.
|
||||
exists, message, realErr = checkResourceExistsFromError(err)
|
||||
if realErr != nil {
|
||||
return nil, realErr
|
||||
}
|
||||
if !exists {
|
||||
glog.V(2).Infof("Virtual machine scale set VM %q not found with message: %q", key, message)
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
result.InstanceView = &view
|
||||
}
|
||||
|
||||
return &result, nil
|
||||
}
|
||||
|
||||
|
175
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_vmss_test.go
generated
vendored
175
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_vmss_test.go
generated
vendored
@@ -20,13 +20,20 @@ import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
compute "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute"
|
||||
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute"
|
||||
"github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network"
|
||||
"github.com/Azure/go-autorest/autorest/to"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func newTestScaleSet(scaleSetName string, vmList []string) (*scaleSet, error) {
|
||||
const (
|
||||
fakePrivateIP = "10.240.0.10"
|
||||
fakePublicIP = "10.10.10.10"
|
||||
)
|
||||
|
||||
func newTestScaleSet(scaleSetName, zone string, faultDomain int32, vmList []string) (*scaleSet, error) {
|
||||
cloud := getTestCloud()
|
||||
setTestVirtualMachineCloud(cloud, scaleSetName, vmList)
|
||||
setTestVirtualMachineCloud(cloud, scaleSetName, zone, faultDomain, vmList)
|
||||
ss, err := newScaleSet(cloud)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -35,8 +42,13 @@ func newTestScaleSet(scaleSetName string, vmList []string) (*scaleSet, error) {
|
||||
return ss.(*scaleSet), nil
|
||||
}
|
||||
|
||||
func setTestVirtualMachineCloud(ss *Cloud, scaleSetName string, vmList []string) {
|
||||
func setTestVirtualMachineCloud(ss *Cloud, scaleSetName, zone string, faultDomain int32, vmList []string) {
|
||||
virtualMachineScaleSetsClient := newFakeVirtualMachineScaleSetsClient()
|
||||
virtualMachineScaleSetVMsClient := newFakeVirtualMachineScaleSetVMsClient()
|
||||
publicIPAddressesClient := newFakeAzurePIPClient("rg")
|
||||
interfaceClient := newFakeAzureInterfacesClient()
|
||||
|
||||
// set test scale sets.
|
||||
scaleSets := make(map[string]map[string]compute.VirtualMachineScaleSet)
|
||||
scaleSets["rg"] = map[string]compute.VirtualMachineScaleSet{
|
||||
scaleSetName: {
|
||||
@@ -45,20 +57,30 @@ func setTestVirtualMachineCloud(ss *Cloud, scaleSetName string, vmList []string)
|
||||
}
|
||||
virtualMachineScaleSetsClient.setFakeStore(scaleSets)
|
||||
|
||||
virtualMachineScaleSetVMsClient := newFakeVirtualMachineScaleSetVMsClient()
|
||||
ssVMs := make(map[string]map[string]compute.VirtualMachineScaleSetVM)
|
||||
ssVMs["rg"] = make(map[string]compute.VirtualMachineScaleSetVM)
|
||||
testInterfaces := map[string]map[string]network.Interface{
|
||||
"rg": make(map[string]network.Interface),
|
||||
}
|
||||
testPIPs := map[string]map[string]network.PublicIPAddress{
|
||||
"rg": make(map[string]network.PublicIPAddress),
|
||||
}
|
||||
ssVMs := map[string]map[string]compute.VirtualMachineScaleSetVM{
|
||||
"rg": make(map[string]compute.VirtualMachineScaleSetVM),
|
||||
}
|
||||
for i := range vmList {
|
||||
ID := fmt.Sprintf("/subscriptions/script/resourceGroups/rg/providers/Microsoft.Compute/virtualMachineScaleSets/%s/virtualMachines/%d", scaleSetName, i)
|
||||
nodeName := vmList[i]
|
||||
ID := fmt.Sprintf("/subscriptions/script/resourceGroups/rg/providers/Microsoft.Compute/virtualMachineScaleSets/%s/virtualMachines/%d", scaleSetName, i)
|
||||
interfaceID := fmt.Sprintf("/subscriptions/script/resourceGroups/rg/providers/Microsoft.Compute/virtualMachineScaleSets/%s/virtualMachines/%d/networkInterfaces/%s", scaleSetName, i, nodeName)
|
||||
publicAddressID := fmt.Sprintf("/subscriptions/script/resourceGroups/rg/providers/Microsoft.Compute/virtualMachineScaleSets/%s/virtualMachines/%d/networkInterfaces/%s/ipConfigurations/ipconfig1/publicIPAddresses/%s", scaleSetName, i, nodeName, nodeName)
|
||||
instanceID := fmt.Sprintf("%d", i)
|
||||
vmName := fmt.Sprintf("%s_%s", scaleSetName, instanceID)
|
||||
|
||||
// set vmss virtual machine.
|
||||
networkInterfaces := []compute.NetworkInterfaceReference{
|
||||
{
|
||||
ID: &nodeName,
|
||||
ID: &interfaceID,
|
||||
},
|
||||
}
|
||||
ssVMs["rg"][vmName] = compute.VirtualMachineScaleSetVM{
|
||||
vmssVM := compute.VirtualMachineScaleSetVM{
|
||||
VirtualMachineScaleSetVMProperties: &compute.VirtualMachineScaleSetVMProperties{
|
||||
OsProfile: &compute.OSProfile{
|
||||
ComputerName: &nodeName,
|
||||
@@ -66,17 +88,55 @@ func setTestVirtualMachineCloud(ss *Cloud, scaleSetName string, vmList []string)
|
||||
NetworkProfile: &compute.NetworkProfile{
|
||||
NetworkInterfaces: &networkInterfaces,
|
||||
},
|
||||
InstanceView: &compute.VirtualMachineScaleSetVMInstanceView{
|
||||
PlatformFaultDomain: &faultDomain,
|
||||
},
|
||||
},
|
||||
ID: &ID,
|
||||
InstanceID: &instanceID,
|
||||
Name: &vmName,
|
||||
Location: &ss.Location,
|
||||
}
|
||||
if zone != "" {
|
||||
zones := []string{zone}
|
||||
vmssVM.Zones = &zones
|
||||
}
|
||||
ssVMs["rg"][vmName] = vmssVM
|
||||
|
||||
// set interfaces.
|
||||
testInterfaces["rg"][nodeName] = network.Interface{
|
||||
ID: &interfaceID,
|
||||
InterfacePropertiesFormat: &network.InterfacePropertiesFormat{
|
||||
IPConfigurations: &[]network.InterfaceIPConfiguration{
|
||||
{
|
||||
InterfaceIPConfigurationPropertiesFormat: &network.InterfaceIPConfigurationPropertiesFormat{
|
||||
Primary: to.BoolPtr(true),
|
||||
PrivateIPAddress: to.StringPtr(fakePrivateIP),
|
||||
PublicIPAddress: &network.PublicIPAddress{
|
||||
ID: to.StringPtr(publicAddressID),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// set public IPs.
|
||||
testPIPs["rg"][nodeName] = network.PublicIPAddress{
|
||||
ID: to.StringPtr(publicAddressID),
|
||||
PublicIPAddressPropertiesFormat: &network.PublicIPAddressPropertiesFormat{
|
||||
IPAddress: to.StringPtr(fakePublicIP),
|
||||
},
|
||||
}
|
||||
}
|
||||
virtualMachineScaleSetVMsClient.setFakeStore(ssVMs)
|
||||
interfaceClient.setFakeStore(testInterfaces)
|
||||
publicIPAddressesClient.setFakeStore(testPIPs)
|
||||
|
||||
ss.VirtualMachineScaleSetsClient = virtualMachineScaleSetsClient
|
||||
ss.VirtualMachineScaleSetVMsClient = virtualMachineScaleSetVMsClient
|
||||
ss.InterfacesClient = interfaceClient
|
||||
ss.PublicIPAddressesClient = publicIPAddressesClient
|
||||
}
|
||||
|
||||
func TestGetScaleSetVMInstanceID(t *testing.T) {
|
||||
@@ -141,7 +201,7 @@ func TestGetInstanceIDByNodeName(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, test := range testCases {
|
||||
ss, err := newTestScaleSet(test.scaleSet, test.vmList)
|
||||
ss, err := newTestScaleSet(test.scaleSet, "", 0, test.vmList)
|
||||
assert.NoError(t, err, test.description)
|
||||
|
||||
real, err := ss.GetInstanceIDByNodeName(test.nodeName)
|
||||
@@ -154,3 +214,96 @@ func TestGetInstanceIDByNodeName(t *testing.T) {
|
||||
assert.Equal(t, test.expected, real, test.description)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetZoneByNodeName(t *testing.T) {
|
||||
testCases := []struct {
|
||||
description string
|
||||
scaleSet string
|
||||
vmList []string
|
||||
nodeName string
|
||||
zone string
|
||||
faultDomain int32
|
||||
expected string
|
||||
expectError bool
|
||||
}{
|
||||
{
|
||||
description: "scaleSet should get faultDomain for non-zoned nodes",
|
||||
scaleSet: "ss",
|
||||
vmList: []string{"vmssee6c2000000", "vmssee6c2000001"},
|
||||
nodeName: "vmssee6c2000000",
|
||||
faultDomain: 3,
|
||||
expected: "3",
|
||||
},
|
||||
{
|
||||
description: "scaleSet should get availability zone for zoned nodes",
|
||||
scaleSet: "ss",
|
||||
vmList: []string{"vmssee6c2000000", "vmssee6c2000001"},
|
||||
nodeName: "vmssee6c2000000",
|
||||
zone: "2",
|
||||
faultDomain: 3,
|
||||
expected: "westus-2",
|
||||
},
|
||||
{
|
||||
description: "scaleSet should return error for non-exist nodes",
|
||||
scaleSet: "ss",
|
||||
faultDomain: 3,
|
||||
vmList: []string{"vmssee6c2000000", "vmssee6c2000001"},
|
||||
nodeName: "agente6c2000005",
|
||||
expectError: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range testCases {
|
||||
ss, err := newTestScaleSet(test.scaleSet, test.zone, test.faultDomain, test.vmList)
|
||||
assert.NoError(t, err, test.description)
|
||||
|
||||
real, err := ss.GetZoneByNodeName(test.nodeName)
|
||||
if test.expectError {
|
||||
assert.Error(t, err, test.description)
|
||||
continue
|
||||
}
|
||||
|
||||
assert.NoError(t, err, test.description)
|
||||
assert.Equal(t, test.expected, real.FailureDomain, test.description)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetIPByNodeName(t *testing.T) {
|
||||
testCases := []struct {
|
||||
description string
|
||||
scaleSet string
|
||||
vmList []string
|
||||
nodeName string
|
||||
expected []string
|
||||
expectError bool
|
||||
}{
|
||||
{
|
||||
description: "GetIPByNodeName should get node's privateIP and publicIP",
|
||||
scaleSet: "ss",
|
||||
vmList: []string{"vmssee6c2000000", "vmssee6c2000001"},
|
||||
nodeName: "vmssee6c2000000",
|
||||
expected: []string{fakePrivateIP, fakePublicIP},
|
||||
},
|
||||
{
|
||||
description: "GetIPByNodeName should return error for non-exist nodes",
|
||||
scaleSet: "ss",
|
||||
vmList: []string{"vmssee6c2000000", "vmssee6c2000001"},
|
||||
nodeName: "agente6c2000005",
|
||||
expectError: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range testCases {
|
||||
ss, err := newTestScaleSet(test.scaleSet, "", 0, test.vmList)
|
||||
assert.NoError(t, err, test.description)
|
||||
|
||||
privateIP, publicIP, err := ss.GetIPByNodeName(test.nodeName)
|
||||
if test.expectError {
|
||||
assert.Error(t, err, test.description)
|
||||
continue
|
||||
}
|
||||
|
||||
assert.NoError(t, err, test.description)
|
||||
assert.Equal(t, test.expected, []string{privateIP, publicIP}, test.description)
|
||||
}
|
||||
}
|
||||
|
31
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_wrap.go
generated
vendored
31
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_wrap.go
generated
vendored
@@ -19,10 +19,11 @@ package azure
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"regexp"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute"
|
||||
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute"
|
||||
"github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network"
|
||||
"github.com/Azure/go-autorest/autorest"
|
||||
"github.com/golang/glog"
|
||||
@@ -36,6 +37,8 @@ var (
|
||||
lbCacheTTL = 2 * time.Minute
|
||||
nsgCacheTTL = 2 * time.Minute
|
||||
rtCacheTTL = 2 * time.Minute
|
||||
|
||||
azureNodeProviderIDRE = regexp.MustCompile(`^azure:///subscriptions/(?:.*)/resourceGroups/(?:.*)/providers/Microsoft.Compute/(?:.*)`)
|
||||
)
|
||||
|
||||
// checkExistsFromError inspects an error and returns a true if err is nil,
|
||||
@@ -189,7 +192,13 @@ func (az *Cloud) newVMCache() (*timedCache, error) {
|
||||
// Consider adding separate parameter for controlling 'InstanceView' once node update issue #56276 is fixed
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
vm, err := az.VirtualMachinesClient.Get(ctx, az.ResourceGroup, key, compute.InstanceView)
|
||||
|
||||
resourceGroup, err := az.GetNodeResourceGroup(key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
vm, err := az.VirtualMachinesClient.Get(ctx, resourceGroup, key, compute.InstanceView)
|
||||
exists, message, realErr := checkResourceExistsFromError(err)
|
||||
if realErr != nil {
|
||||
return nil, realErr
|
||||
@@ -277,3 +286,21 @@ func (az *Cloud) useStandardLoadBalancer() bool {
|
||||
func (az *Cloud) excludeMasterNodesFromStandardLB() bool {
|
||||
return az.ExcludeMasterFromStandardLB != nil && *az.ExcludeMasterFromStandardLB
|
||||
}
|
||||
|
||||
// IsNodeUnmanaged returns true if the node is not managed by Azure cloud provider.
|
||||
// Those nodes includes on-prem or VMs from other clouds. They will not be added to load balancer
|
||||
// backends. Azure routes and managed disks are also not supported for them.
|
||||
func (az *Cloud) IsNodeUnmanaged(nodeName string) (bool, error) {
|
||||
unmanagedNodes, err := az.GetUnmanagedNodes()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
return unmanagedNodes.Has(nodeName), nil
|
||||
}
|
||||
|
||||
// IsNodeUnmanagedByProviderID returns true if the node is not managed by Azure cloud provider.
|
||||
// All managed node's providerIDs are in format 'azure:///subscriptions/<id>/resourceGroups/<rg>/providers/Microsoft.Compute/.*'
|
||||
func (az *Cloud) IsNodeUnmanagedByProviderID(providerID string) bool {
|
||||
return azureNodeProviderIDRE.Match([]byte(providerID))
|
||||
}
|
||||
|
56
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_wrap_test.go
generated
vendored
56
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_wrap_test.go
generated
vendored
@@ -23,6 +23,8 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/Azure/go-autorest/autorest"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
)
|
||||
|
||||
func TestExtractNotFound(t *testing.T) {
|
||||
@@ -51,3 +53,57 @@ func TestExtractNotFound(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestIsNodeUnmanaged(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
unmanagedNodes sets.String
|
||||
node string
|
||||
expected bool
|
||||
expectErr bool
|
||||
}{
|
||||
{
|
||||
name: "unmanaged node should return true",
|
||||
unmanagedNodes: sets.NewString("node1", "node2"),
|
||||
node: "node1",
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
name: "managed node should return false",
|
||||
unmanagedNodes: sets.NewString("node1", "node2"),
|
||||
node: "node3",
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "empty unmanagedNodes should return true",
|
||||
unmanagedNodes: sets.NewString(),
|
||||
node: "node3",
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "no synced informer should report error",
|
||||
unmanagedNodes: sets.NewString(),
|
||||
node: "node1",
|
||||
expectErr: true,
|
||||
},
|
||||
}
|
||||
|
||||
az := getTestCloud()
|
||||
for _, test := range tests {
|
||||
az.unmanagedNodes = test.unmanagedNodes
|
||||
if test.expectErr {
|
||||
az.nodeInformerSynced = func() bool {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
real, err := az.IsNodeUnmanaged(test.node)
|
||||
if test.expectErr {
|
||||
assert.Error(t, err, test.name)
|
||||
continue
|
||||
}
|
||||
|
||||
assert.NoError(t, err, test.name)
|
||||
assert.Equal(t, test.expected, real, test.name)
|
||||
}
|
||||
}
|
||||
|
100
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_zones.go
generated
vendored
100
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_zones.go
generated
vendored
@@ -18,39 +18,76 @@ package azure
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
)
|
||||
|
||||
const instanceInfoURL = "http://169.254.169.254/metadata/v1/InstanceInfo"
|
||||
const (
|
||||
faultDomainURI = "v1/InstanceInfo/FD"
|
||||
computeMetadataURI = "instance/compute"
|
||||
)
|
||||
|
||||
var faultMutex = &sync.Mutex{}
|
||||
var faultDomain *string
|
||||
|
||||
type instanceInfo struct {
|
||||
ID string `json:"ID"`
|
||||
UpdateDomain string `json:"UD"`
|
||||
FaultDomain string `json:"FD"`
|
||||
// makeZone returns the zone value in format of <region>-<zone-id>.
|
||||
func (az *Cloud) makeZone(zoneID int) string {
|
||||
return fmt.Sprintf("%s-%d", strings.ToLower(az.Location), zoneID)
|
||||
}
|
||||
|
||||
// GetZone returns the Zone containing the current failure zone and locality region that the program is running in
|
||||
// isAvailabilityZone returns true if the zone is in format of <region>-<zone-id>.
|
||||
func (az *Cloud) isAvailabilityZone(zone string) bool {
|
||||
return strings.HasPrefix(zone, fmt.Sprintf("%s-", az.Location))
|
||||
}
|
||||
|
||||
// GetZoneID returns the ID of zone from node's zone label.
|
||||
func (az *Cloud) GetZoneID(zoneLabel string) string {
|
||||
if !az.isAvailabilityZone(zoneLabel) {
|
||||
return ""
|
||||
}
|
||||
|
||||
return strings.TrimPrefix(zoneLabel, fmt.Sprintf("%s-", az.Location))
|
||||
}
|
||||
|
||||
// GetZone returns the Zone containing the current availability zone and locality region that the program is running in.
|
||||
// If the node is not running with availability zones, then it will fall back to fault domain.
|
||||
func (az *Cloud) GetZone(ctx context.Context) (cloudprovider.Zone, error) {
|
||||
return az.getZoneFromURL(instanceInfoURL)
|
||||
computeInfo := ComputeMetadata{}
|
||||
err := az.metadata.Object(computeMetadataURI, &computeInfo)
|
||||
if err != nil {
|
||||
return cloudprovider.Zone{}, err
|
||||
}
|
||||
|
||||
if computeInfo.Zone == "" {
|
||||
glog.V(3).Infof("Availability zone is not enabled for the node, falling back to fault domain")
|
||||
return az.getZoneFromFaultDomain()
|
||||
}
|
||||
|
||||
zoneID, err := strconv.Atoi(computeInfo.Zone)
|
||||
if err != nil {
|
||||
return cloudprovider.Zone{}, fmt.Errorf("failed to parse zone ID %q: %v", computeInfo.Zone, err)
|
||||
}
|
||||
|
||||
return cloudprovider.Zone{
|
||||
FailureDomain: az.makeZone(zoneID),
|
||||
Region: az.Location,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// This is injectable for testing.
|
||||
func (az *Cloud) getZoneFromURL(url string) (cloudprovider.Zone, error) {
|
||||
// getZoneFromFaultDomain gets fault domain for the instance.
|
||||
// Fault domain is the fallback when availability zone is not enabled for the node.
|
||||
func (az *Cloud) getZoneFromFaultDomain() (cloudprovider.Zone, error) {
|
||||
faultMutex.Lock()
|
||||
defer faultMutex.Unlock()
|
||||
if faultDomain == nil {
|
||||
var err error
|
||||
faultDomain, err = fetchFaultDomain(url)
|
||||
faultDomain, err = az.fetchFaultDomain()
|
||||
if err != nil {
|
||||
return cloudprovider.Zone{}, err
|
||||
}
|
||||
@@ -66,6 +103,12 @@ func (az *Cloud) getZoneFromURL(url string) (cloudprovider.Zone, error) {
|
||||
// This is particularly useful in external cloud providers where the kubelet
|
||||
// does not initialize node data.
|
||||
func (az *Cloud) GetZoneByProviderID(ctx context.Context, providerID string) (cloudprovider.Zone, error) {
|
||||
// Returns nil for unmanaged nodes because azure cloud provider couldn't fetch information for them.
|
||||
if az.IsNodeUnmanagedByProviderID(providerID) {
|
||||
glog.V(2).Infof("GetZoneByProviderID: omitting unmanaged node %q", providerID)
|
||||
return cloudprovider.Zone{}, nil
|
||||
}
|
||||
|
||||
nodeName, err := az.vmSet.GetNodeNameByProviderID(providerID)
|
||||
if err != nil {
|
||||
return cloudprovider.Zone{}, err
|
||||
@@ -78,27 +121,24 @@ func (az *Cloud) GetZoneByProviderID(ctx context.Context, providerID string) (cl
|
||||
// This is particularly useful in external cloud providers where the kubelet
|
||||
// does not initialize node data.
|
||||
func (az *Cloud) GetZoneByNodeName(ctx context.Context, nodeName types.NodeName) (cloudprovider.Zone, error) {
|
||||
// Returns "" for unmanaged nodes because azure cloud provider couldn't fetch information for them.
|
||||
unmanaged, err := az.IsNodeUnmanaged(string(nodeName))
|
||||
if err != nil {
|
||||
return cloudprovider.Zone{}, err
|
||||
}
|
||||
if unmanaged {
|
||||
glog.V(2).Infof("GetZoneByNodeName: omitting unmanaged node %q", nodeName)
|
||||
return cloudprovider.Zone{}, nil
|
||||
}
|
||||
|
||||
return az.vmSet.GetZoneByNodeName(string(nodeName))
|
||||
}
|
||||
|
||||
func fetchFaultDomain(url string) (*string, error) {
|
||||
resp, err := http.Get(url)
|
||||
func (az *Cloud) fetchFaultDomain() (*string, error) {
|
||||
faultDomain, err := az.metadata.Text(faultDomainURI)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
return readFaultDomain(resp.Body)
|
||||
}
|
||||
|
||||
func readFaultDomain(reader io.Reader) (*string, error) {
|
||||
var instanceInfo instanceInfo
|
||||
body, err := ioutil.ReadAll(reader)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = json.Unmarshal(body, &instanceInfo)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &instanceInfo.FaultDomain, nil
|
||||
return &faultDomain, nil
|
||||
}
|
||||
|
73
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_zones_test.go
generated
vendored
Normal file
73
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_zones_test.go
generated
vendored
Normal file
@@ -0,0 +1,73 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package azure
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestIsAvailabilityZone(t *testing.T) {
|
||||
location := "eastus"
|
||||
az := &Cloud{
|
||||
Config: Config{
|
||||
Location: location,
|
||||
},
|
||||
}
|
||||
tests := []struct {
|
||||
desc string
|
||||
zone string
|
||||
expected bool
|
||||
}{
|
||||
{"empty string should return false", "", false},
|
||||
{"wrong farmat should return false", "123", false},
|
||||
{"wrong location should return false", "chinanorth-1", false},
|
||||
{"correct zone should return true", "eastus-1", true},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
actual := az.isAvailabilityZone(test.zone)
|
||||
if actual != test.expected {
|
||||
t.Errorf("test [%q] get unexpected result: %v != %v", test.desc, actual, test.expected)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetZoneID(t *testing.T) {
|
||||
location := "eastus"
|
||||
az := &Cloud{
|
||||
Config: Config{
|
||||
Location: location,
|
||||
},
|
||||
}
|
||||
tests := []struct {
|
||||
desc string
|
||||
zone string
|
||||
expected string
|
||||
}{
|
||||
{"empty string should return empty string", "", ""},
|
||||
{"wrong farmat should return empty string", "123", ""},
|
||||
{"wrong location should return empty string", "chinanorth-1", ""},
|
||||
{"correct zone should return zone ID", "eastus-1", "1"},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
actual := az.GetZoneID(test.zone)
|
||||
if actual != test.expected {
|
||||
t.Errorf("test [%q] get unexpected result: %q != %q", test.desc, actual, test.expected)
|
||||
}
|
||||
}
|
||||
}
|
Reference in New Issue
Block a user