Add generated file
This PR adds generated files under pkg/client and vendor folder.
This commit is contained in:
118
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/BUILD
generated
vendored
Normal file
118
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/BUILD
generated
vendored
Normal file
@@ -0,0 +1,118 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
"go_test",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"azure.go",
|
||||
"azure_backoff.go",
|
||||
"azure_blobDiskController.go",
|
||||
"azure_cache.go",
|
||||
"azure_client.go",
|
||||
"azure_controller_common.go",
|
||||
"azure_controller_standard.go",
|
||||
"azure_controller_vmss.go",
|
||||
"azure_fakes.go",
|
||||
"azure_file.go",
|
||||
"azure_instance_metadata.go",
|
||||
"azure_instances.go",
|
||||
"azure_loadbalancer.go",
|
||||
"azure_managedDiskController.go",
|
||||
"azure_metrics.go",
|
||||
"azure_routes.go",
|
||||
"azure_standard.go",
|
||||
"azure_storage.go",
|
||||
"azure_storageaccount.go",
|
||||
"azure_vmsets.go",
|
||||
"azure_vmss.go",
|
||||
"azure_vmss_cache.go",
|
||||
"azure_wrap.go",
|
||||
"azure_zones.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/pkg/cloudprovider/providers/azure",
|
||||
deps = [
|
||||
"//pkg/api/v1/service:go_default_library",
|
||||
"//pkg/cloudprovider:go_default_library",
|
||||
"//pkg/cloudprovider/providers/azure/auth:go_default_library",
|
||||
"//pkg/controller:go_default_library",
|
||||
"//pkg/version:go_default_library",
|
||||
"//pkg/volume:go_default_library",
|
||||
"//pkg/volume/util:go_default_library",
|
||||
"//vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute:go_default_library",
|
||||
"//vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network:go_default_library",
|
||||
"//vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2017-10-01/storage:go_default_library",
|
||||
"//vendor/github.com/Azure/azure-sdk-for-go/storage:go_default_library",
|
||||
"//vendor/github.com/Azure/go-autorest/autorest:go_default_library",
|
||||
"//vendor/github.com/Azure/go-autorest/autorest/adal:go_default_library",
|
||||
"//vendor/github.com/Azure/go-autorest/autorest/azure:go_default_library",
|
||||
"//vendor/github.com/Azure/go-autorest/autorest/to:go_default_library",
|
||||
"//vendor/github.com/ghodss/yaml:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/github.com/prometheus/client_golang/prometheus:go_default_library",
|
||||
"//vendor/github.com/rubiojr/go-vhd/vhd:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//vendor/k8s.io/client-go/util/flowcontrol:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"azure_backoff_test.go",
|
||||
"azure_cache_test.go",
|
||||
"azure_loadbalancer_test.go",
|
||||
"azure_metrics_test.go",
|
||||
"azure_routes_test.go",
|
||||
"azure_standard_test.go",
|
||||
"azure_storage_test.go",
|
||||
"azure_storageaccount_test.go",
|
||||
"azure_test.go",
|
||||
"azure_vmss_cache_test.go",
|
||||
"azure_vmss_test.go",
|
||||
"azure_wrap_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//pkg/api/v1/service:go_default_library",
|
||||
"//pkg/cloudprovider:go_default_library",
|
||||
"//pkg/cloudprovider/providers/azure/auth:go_default_library",
|
||||
"//pkg/kubelet/apis:go_default_library",
|
||||
"//vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute:go_default_library",
|
||||
"//vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network:go_default_library",
|
||||
"//vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2017-10-01/storage:go_default_library",
|
||||
"//vendor/github.com/Azure/go-autorest/autorest:go_default_library",
|
||||
"//vendor/github.com/Azure/go-autorest/autorest/to:go_default_library",
|
||||
"//vendor/github.com/stretchr/testify/assert:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [
|
||||
":package-srcs",
|
||||
"//pkg/cloudprovider/providers/azure/auth:all-srcs",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
)
|
16
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/OWNERS
generated
vendored
Normal file
16
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/OWNERS
generated
vendored
Normal file
@@ -0,0 +1,16 @@
|
||||
approvers:
|
||||
- andyzhangx
|
||||
- brendandburns
|
||||
- colemickens
|
||||
- feiskyer
|
||||
- jdumars
|
||||
- karataliu
|
||||
- khenidak
|
||||
reviewers:
|
||||
- andyzhangx
|
||||
- brendandburns
|
||||
- colemickens
|
||||
- feiskyer
|
||||
- jdumars
|
||||
- karataliu
|
||||
- khenidak
|
28
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/auth/BUILD
generated
vendored
Normal file
28
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/auth/BUILD
generated
vendored
Normal file
@@ -0,0 +1,28 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["azure_auth.go"],
|
||||
importpath = "k8s.io/kubernetes/pkg/cloudprovider/providers/azure/auth",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//vendor/github.com/Azure/go-autorest/autorest/adal:go_default_library",
|
||||
"//vendor/github.com/Azure/go-autorest/autorest/azure:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/golang.org/x/crypto/pkcs12:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
124
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/auth/azure_auth.go
generated
vendored
Normal file
124
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/auth/azure_auth.go
generated
vendored
Normal file
@@ -0,0 +1,124 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package auth
|
||||
|
||||
import (
|
||||
"crypto/rsa"
|
||||
"crypto/x509"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
|
||||
"github.com/Azure/go-autorest/autorest/adal"
|
||||
"github.com/Azure/go-autorest/autorest/azure"
|
||||
"github.com/golang/glog"
|
||||
"golang.org/x/crypto/pkcs12"
|
||||
)
|
||||
|
||||
// AzureAuthConfig holds auth related part of cloud config
|
||||
type AzureAuthConfig struct {
|
||||
// The cloud environment identifier. Takes values from https://github.com/Azure/go-autorest/blob/ec5f4903f77ed9927ac95b19ab8e44ada64c1356/autorest/azure/environments.go#L13
|
||||
Cloud string `json:"cloud" yaml:"cloud"`
|
||||
// The AAD Tenant ID for the Subscription that the cluster is deployed in
|
||||
TenantID string `json:"tenantId" yaml:"tenantId"`
|
||||
// The ClientID for an AAD application with RBAC access to talk to Azure RM APIs
|
||||
AADClientID string `json:"aadClientId" yaml:"aadClientId"`
|
||||
// The ClientSecret for an AAD application with RBAC access to talk to Azure RM APIs
|
||||
AADClientSecret string `json:"aadClientSecret" yaml:"aadClientSecret"`
|
||||
// The path of a client certificate for an AAD application with RBAC access to talk to Azure RM APIs
|
||||
AADClientCertPath string `json:"aadClientCertPath" yaml:"aadClientCertPath"`
|
||||
// The password of the client certificate for an AAD application with RBAC access to talk to Azure RM APIs
|
||||
AADClientCertPassword string `json:"aadClientCertPassword" yaml:"aadClientCertPassword"`
|
||||
// Use managed service identity for the virtual machine to access Azure ARM APIs
|
||||
UseManagedIdentityExtension bool `json:"useManagedIdentityExtension" yaml:"useManagedIdentityExtension"`
|
||||
// The ID of the Azure Subscription that the cluster is deployed in
|
||||
SubscriptionID string `json:"subscriptionId" yaml:"subscriptionId"`
|
||||
}
|
||||
|
||||
// GetServicePrincipalToken creates a new service principal token based on the configuration
|
||||
func GetServicePrincipalToken(config *AzureAuthConfig, env *azure.Environment) (*adal.ServicePrincipalToken, error) {
|
||||
oauthConfig, err := adal.NewOAuthConfig(env.ActiveDirectoryEndpoint, config.TenantID)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("creating the OAuth config: %v", err)
|
||||
}
|
||||
|
||||
if config.UseManagedIdentityExtension {
|
||||
glog.V(2).Infoln("azure: using managed identity extension to retrieve access token")
|
||||
msiEndpoint, err := adal.GetMSIVMEndpoint()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Getting the managed service identity endpoint: %v", err)
|
||||
}
|
||||
return adal.NewServicePrincipalTokenFromMSI(
|
||||
msiEndpoint,
|
||||
env.ServiceManagementEndpoint)
|
||||
}
|
||||
|
||||
if len(config.AADClientSecret) > 0 {
|
||||
glog.V(2).Infoln("azure: using client_id+client_secret to retrieve access token")
|
||||
return adal.NewServicePrincipalToken(
|
||||
*oauthConfig,
|
||||
config.AADClientID,
|
||||
config.AADClientSecret,
|
||||
env.ServiceManagementEndpoint)
|
||||
}
|
||||
|
||||
if len(config.AADClientCertPath) > 0 && len(config.AADClientCertPassword) > 0 {
|
||||
glog.V(2).Infoln("azure: using jwt client_assertion (client_cert+client_private_key) to retrieve access token")
|
||||
certData, err := ioutil.ReadFile(config.AADClientCertPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("reading the client certificate from file %s: %v", config.AADClientCertPath, err)
|
||||
}
|
||||
certificate, privateKey, err := decodePkcs12(certData, config.AADClientCertPassword)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("decoding the client certificate: %v", err)
|
||||
}
|
||||
return adal.NewServicePrincipalTokenFromCertificate(
|
||||
*oauthConfig,
|
||||
config.AADClientID,
|
||||
certificate,
|
||||
privateKey,
|
||||
env.ServiceManagementEndpoint)
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("No credentials provided for AAD application %s", config.AADClientID)
|
||||
}
|
||||
|
||||
// ParseAzureEnvironment returns azure environment by name
|
||||
func ParseAzureEnvironment(cloudName string) (*azure.Environment, error) {
|
||||
var env azure.Environment
|
||||
var err error
|
||||
if cloudName == "" {
|
||||
env = azure.PublicCloud
|
||||
} else {
|
||||
env, err = azure.EnvironmentFromName(cloudName)
|
||||
}
|
||||
return &env, err
|
||||
}
|
||||
|
||||
// decodePkcs12 decodes a PKCS#12 client certificate by extracting the public certificate and
|
||||
// the private RSA key
|
||||
func decodePkcs12(pkcs []byte, password string) (*x509.Certificate, *rsa.PrivateKey, error) {
|
||||
privateKey, certificate, err := pkcs12.Decode(pkcs, password)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("decoding the PKCS#12 client certificate: %v", err)
|
||||
}
|
||||
rsaPrivateKey, isRsaKey := privateKey.(*rsa.PrivateKey)
|
||||
if !isRsaKey {
|
||||
return nil, nil, fmt.Errorf("PKCS#12 certificate must contain a RSA private key")
|
||||
}
|
||||
|
||||
return certificate, rsaPrivateKey, nil
|
||||
}
|
426
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure.go
generated
vendored
Normal file
426
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure.go
generated
vendored
Normal file
@@ -0,0 +1,426 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package azure
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/util/flowcontrol"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/azure/auth"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/version"
|
||||
|
||||
"github.com/Azure/go-autorest/autorest"
|
||||
"github.com/Azure/go-autorest/autorest/azure"
|
||||
"github.com/ghodss/yaml"
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
const (
|
||||
// CloudProviderName is the value used for the --cloud-provider flag
|
||||
CloudProviderName = "azure"
|
||||
rateLimitQPSDefault = 1.0
|
||||
rateLimitBucketDefault = 5
|
||||
backoffRetriesDefault = 6
|
||||
backoffExponentDefault = 1.5
|
||||
backoffDurationDefault = 5 // in seconds
|
||||
backoffJitterDefault = 1.0
|
||||
maximumLoadBalancerRuleCount = 148 // According to Azure LB rule default limit
|
||||
|
||||
vmTypeVMSS = "vmss"
|
||||
vmTypeStandard = "standard"
|
||||
|
||||
loadBalancerSkuBasic = "basic"
|
||||
loadBalancerSkuStandard = "standard"
|
||||
)
|
||||
|
||||
var (
|
||||
// Master nodes are not added to standard load balancer by default.
|
||||
defaultExcludeMasterFromStandardLB = true
|
||||
)
|
||||
|
||||
// Config holds the configuration parsed from the --cloud-config flag
|
||||
// All fields are required unless otherwise specified
|
||||
type Config struct {
|
||||
auth.AzureAuthConfig
|
||||
|
||||
// The name of the resource group that the cluster is deployed in
|
||||
ResourceGroup string `json:"resourceGroup" yaml:"resourceGroup"`
|
||||
// The location of the resource group that the cluster is deployed in
|
||||
Location string `json:"location" yaml:"location"`
|
||||
// The name of the VNet that the cluster is deployed in
|
||||
VnetName string `json:"vnetName" yaml:"vnetName"`
|
||||
// The name of the resource group that the Vnet is deployed in
|
||||
VnetResourceGroup string `json:"vnetResourceGroup" yaml:"vnetResourceGroup"`
|
||||
// The name of the subnet that the cluster is deployed in
|
||||
SubnetName string `json:"subnetName" yaml:"subnetName"`
|
||||
// The name of the security group attached to the cluster's subnet
|
||||
SecurityGroupName string `json:"securityGroupName" yaml:"securityGroupName"`
|
||||
// (Optional in 1.6) The name of the route table attached to the subnet that the cluster is deployed in
|
||||
RouteTableName string `json:"routeTableName" yaml:"routeTableName"`
|
||||
// (Optional) The name of the availability set that should be used as the load balancer backend
|
||||
// If this is set, the Azure cloudprovider will only add nodes from that availability set to the load
|
||||
// balancer backend pool. If this is not set, and multiple agent pools (availability sets) are used, then
|
||||
// the cloudprovider will try to add all nodes to a single backend pool which is forbidden.
|
||||
// In other words, if you use multiple agent pools (availability sets), you MUST set this field.
|
||||
PrimaryAvailabilitySetName string `json:"primaryAvailabilitySetName" yaml:"primaryAvailabilitySetName"`
|
||||
// The type of azure nodes. Candidate values are: vmss and standard.
|
||||
// If not set, it will be default to standard.
|
||||
VMType string `json:"vmType" yaml:"vmType"`
|
||||
// The name of the scale set that should be used as the load balancer backend.
|
||||
// If this is set, the Azure cloudprovider will only add nodes from that scale set to the load
|
||||
// balancer backend pool. If this is not set, and multiple agent pools (scale sets) are used, then
|
||||
// the cloudprovider will try to add all nodes to a single backend pool which is forbidden.
|
||||
// In other words, if you use multiple agent pools (scale sets), you MUST set this field.
|
||||
PrimaryScaleSetName string `json:"primaryScaleSetName" yaml:"primaryScaleSetName"`
|
||||
// Enable exponential backoff to manage resource request retries
|
||||
CloudProviderBackoff bool `json:"cloudProviderBackoff" yaml:"cloudProviderBackoff"`
|
||||
// Backoff retry limit
|
||||
CloudProviderBackoffRetries int `json:"cloudProviderBackoffRetries" yaml:"cloudProviderBackoffRetries"`
|
||||
// Backoff exponent
|
||||
CloudProviderBackoffExponent float64 `json:"cloudProviderBackoffExponent" yaml:"cloudProviderBackoffExponent"`
|
||||
// Backoff duration
|
||||
CloudProviderBackoffDuration int `json:"cloudProviderBackoffDuration" yaml:"cloudProviderBackoffDuration"`
|
||||
// Backoff jitter
|
||||
CloudProviderBackoffJitter float64 `json:"cloudProviderBackoffJitter" yaml:"cloudProviderBackoffJitter"`
|
||||
// Enable rate limiting
|
||||
CloudProviderRateLimit bool `json:"cloudProviderRateLimit" yaml:"cloudProviderRateLimit"`
|
||||
// Rate limit QPS (Read)
|
||||
CloudProviderRateLimitQPS float32 `json:"cloudProviderRateLimitQPS" yaml:"cloudProviderRateLimitQPS"`
|
||||
// Rate limit Bucket Size
|
||||
CloudProviderRateLimitBucket int `json:"cloudProviderRateLimitBucket" yaml:"cloudProviderRateLimitBucket"`
|
||||
// Rate limit QPS (Write)
|
||||
CloudProviderRateLimitQPSWrite float32 `json:"cloudProviderRateLimitQPSWrite" yaml:"cloudProviderRateLimitQPSWrite"`
|
||||
// Rate limit Bucket Size
|
||||
CloudProviderRateLimitBucketWrite int `json:"cloudProviderRateLimitBucketWrite" yaml:"cloudProviderRateLimitBucketWrite"`
|
||||
|
||||
// Use instance metadata service where possible
|
||||
UseInstanceMetadata bool `json:"useInstanceMetadata" yaml:"useInstanceMetadata"`
|
||||
|
||||
// Sku of Load Balancer and Public IP. Candidate values are: basic and standard.
|
||||
// If not set, it will be default to basic.
|
||||
LoadBalancerSku string `json:"loadBalancerSku" yaml:"loadBalancerSku"`
|
||||
// ExcludeMasterFromStandardLB excludes master nodes from standard load balancer.
|
||||
// If not set, it will be default to true.
|
||||
ExcludeMasterFromStandardLB *bool `json:"excludeMasterFromStandardLB" yaml:"excludeMasterFromStandardLB"`
|
||||
|
||||
// Maximum allowed LoadBalancer Rule Count is the limit enforced by Azure Load balancer
|
||||
MaximumLoadBalancerRuleCount int `json:"maximumLoadBalancerRuleCount" yaml:"maximumLoadBalancerRuleCount"`
|
||||
}
|
||||
|
||||
// Cloud holds the config and clients
|
||||
type Cloud struct {
|
||||
Config
|
||||
Environment azure.Environment
|
||||
RoutesClient RoutesClient
|
||||
SubnetsClient SubnetsClient
|
||||
InterfacesClient InterfacesClient
|
||||
RouteTablesClient RouteTablesClient
|
||||
LoadBalancerClient LoadBalancersClient
|
||||
PublicIPAddressesClient PublicIPAddressesClient
|
||||
SecurityGroupsClient SecurityGroupsClient
|
||||
VirtualMachinesClient VirtualMachinesClient
|
||||
StorageAccountClient StorageAccountClient
|
||||
DisksClient DisksClient
|
||||
FileClient FileClient
|
||||
resourceRequestBackoff wait.Backoff
|
||||
metadata *InstanceMetadata
|
||||
vmSet VMSet
|
||||
|
||||
// Clients for vmss.
|
||||
VirtualMachineScaleSetsClient VirtualMachineScaleSetsClient
|
||||
VirtualMachineScaleSetVMsClient VirtualMachineScaleSetVMsClient
|
||||
|
||||
vmCache *timedCache
|
||||
lbCache *timedCache
|
||||
nsgCache *timedCache
|
||||
rtCache *timedCache
|
||||
|
||||
*BlobDiskController
|
||||
*ManagedDiskController
|
||||
*controllerCommon
|
||||
}
|
||||
|
||||
func init() {
|
||||
cloudprovider.RegisterCloudProvider(CloudProviderName, NewCloud)
|
||||
}
|
||||
|
||||
// NewCloud returns a Cloud with initialized clients
|
||||
func NewCloud(configReader io.Reader) (cloudprovider.Interface, error) {
|
||||
config, err := parseConfig(configReader)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if config.VMType == "" {
|
||||
// default to standard vmType if not set.
|
||||
config.VMType = vmTypeStandard
|
||||
}
|
||||
|
||||
env, err := auth.ParseAzureEnvironment(config.Cloud)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
servicePrincipalToken, err := auth.GetServicePrincipalToken(&config.AzureAuthConfig, env)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// operationPollRateLimiter.Accept() is a no-op if rate limits are configured off.
|
||||
operationPollRateLimiter := flowcontrol.NewFakeAlwaysRateLimiter()
|
||||
operationPollRateLimiterWrite := flowcontrol.NewFakeAlwaysRateLimiter()
|
||||
|
||||
// If reader is provided (and no writer) we will
|
||||
// use the same value for both.
|
||||
if config.CloudProviderRateLimit {
|
||||
// Assign rate limit defaults if no configuration was passed in
|
||||
if config.CloudProviderRateLimitQPS == 0 {
|
||||
config.CloudProviderRateLimitQPS = rateLimitQPSDefault
|
||||
}
|
||||
if config.CloudProviderRateLimitBucket == 0 {
|
||||
config.CloudProviderRateLimitBucket = rateLimitBucketDefault
|
||||
}
|
||||
if config.CloudProviderRateLimitQPSWrite == 0 {
|
||||
config.CloudProviderRateLimitQPSWrite = rateLimitQPSDefault
|
||||
}
|
||||
if config.CloudProviderRateLimitBucketWrite == 0 {
|
||||
config.CloudProviderRateLimitBucketWrite = rateLimitBucketDefault
|
||||
}
|
||||
|
||||
operationPollRateLimiter = flowcontrol.NewTokenBucketRateLimiter(
|
||||
config.CloudProviderRateLimitQPS,
|
||||
config.CloudProviderRateLimitBucket)
|
||||
|
||||
operationPollRateLimiterWrite = flowcontrol.NewTokenBucketRateLimiter(
|
||||
config.CloudProviderRateLimitQPSWrite,
|
||||
config.CloudProviderRateLimitBucketWrite)
|
||||
|
||||
glog.V(2).Infof("Azure cloudprovider (read ops) using rate limit config: QPS=%g, bucket=%d",
|
||||
config.CloudProviderRateLimitQPS,
|
||||
config.CloudProviderRateLimitBucket)
|
||||
|
||||
glog.V(2).Infof("Azure cloudprovider (write ops) using rate limit config: QPS=%g, bucket=%d",
|
||||
config.CloudProviderRateLimitQPSWrite,
|
||||
config.CloudProviderRateLimitBucketWrite)
|
||||
}
|
||||
|
||||
// Do not add master nodes to standard LB by default.
|
||||
if config.ExcludeMasterFromStandardLB == nil {
|
||||
config.ExcludeMasterFromStandardLB = &defaultExcludeMasterFromStandardLB
|
||||
}
|
||||
|
||||
azClientConfig := &azClientConfig{
|
||||
subscriptionID: config.SubscriptionID,
|
||||
resourceManagerEndpoint: env.ResourceManagerEndpoint,
|
||||
servicePrincipalToken: servicePrincipalToken,
|
||||
rateLimiterReader: operationPollRateLimiter,
|
||||
rateLimiterWriter: operationPollRateLimiterWrite,
|
||||
}
|
||||
az := Cloud{
|
||||
Config: *config,
|
||||
Environment: *env,
|
||||
|
||||
DisksClient: newAzDisksClient(azClientConfig),
|
||||
RoutesClient: newAzRoutesClient(azClientConfig),
|
||||
SubnetsClient: newAzSubnetsClient(azClientConfig),
|
||||
InterfacesClient: newAzInterfacesClient(azClientConfig),
|
||||
RouteTablesClient: newAzRouteTablesClient(azClientConfig),
|
||||
LoadBalancerClient: newAzLoadBalancersClient(azClientConfig),
|
||||
SecurityGroupsClient: newAzSecurityGroupsClient(azClientConfig),
|
||||
StorageAccountClient: newAzStorageAccountClient(azClientConfig),
|
||||
VirtualMachinesClient: newAzVirtualMachinesClient(azClientConfig),
|
||||
PublicIPAddressesClient: newAzPublicIPAddressesClient(azClientConfig),
|
||||
VirtualMachineScaleSetsClient: newAzVirtualMachineScaleSetsClient(azClientConfig),
|
||||
VirtualMachineScaleSetVMsClient: newAzVirtualMachineScaleSetVMsClient(azClientConfig),
|
||||
FileClient: &azureFileClient{env: *env},
|
||||
}
|
||||
|
||||
// Conditionally configure resource request backoff
|
||||
if az.CloudProviderBackoff {
|
||||
// Assign backoff defaults if no configuration was passed in
|
||||
if az.CloudProviderBackoffRetries == 0 {
|
||||
az.CloudProviderBackoffRetries = backoffRetriesDefault
|
||||
}
|
||||
if az.CloudProviderBackoffExponent == 0 {
|
||||
az.CloudProviderBackoffExponent = backoffExponentDefault
|
||||
}
|
||||
if az.CloudProviderBackoffDuration == 0 {
|
||||
az.CloudProviderBackoffDuration = backoffDurationDefault
|
||||
}
|
||||
if az.CloudProviderBackoffJitter == 0 {
|
||||
az.CloudProviderBackoffJitter = backoffJitterDefault
|
||||
}
|
||||
az.resourceRequestBackoff = wait.Backoff{
|
||||
Steps: az.CloudProviderBackoffRetries,
|
||||
Factor: az.CloudProviderBackoffExponent,
|
||||
Duration: time.Duration(az.CloudProviderBackoffDuration) * time.Second,
|
||||
Jitter: az.CloudProviderBackoffJitter,
|
||||
}
|
||||
glog.V(2).Infof("Azure cloudprovider using retry backoff: retries=%d, exponent=%f, duration=%d, jitter=%f",
|
||||
az.CloudProviderBackoffRetries,
|
||||
az.CloudProviderBackoffExponent,
|
||||
az.CloudProviderBackoffDuration,
|
||||
az.CloudProviderBackoffJitter)
|
||||
}
|
||||
|
||||
az.metadata = NewInstanceMetadata()
|
||||
|
||||
if az.MaximumLoadBalancerRuleCount == 0 {
|
||||
az.MaximumLoadBalancerRuleCount = maximumLoadBalancerRuleCount
|
||||
}
|
||||
|
||||
if strings.EqualFold(vmTypeVMSS, az.Config.VMType) {
|
||||
az.vmSet, err = newScaleSet(&az)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
az.vmSet = newAvailabilitySet(&az)
|
||||
}
|
||||
|
||||
az.vmCache, err = az.newVMCache()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
az.lbCache, err = az.newLBCache()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
az.nsgCache, err = az.newNSGCache()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
az.rtCache, err = az.newRouteTableCache()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := initDiskControllers(&az); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &az, nil
|
||||
}
|
||||
|
||||
// parseConfig returns a parsed configuration for an Azure cloudprovider config file
|
||||
func parseConfig(configReader io.Reader) (*Config, error) {
|
||||
var config Config
|
||||
|
||||
if configReader == nil {
|
||||
return &config, nil
|
||||
}
|
||||
|
||||
configContents, err := ioutil.ReadAll(configReader)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = yaml.Unmarshal(configContents, &config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &config, nil
|
||||
}
|
||||
|
||||
// Initialize passes a Kubernetes clientBuilder interface to the cloud provider
|
||||
func (az *Cloud) Initialize(clientBuilder controller.ControllerClientBuilder) {}
|
||||
|
||||
// LoadBalancer returns a balancer interface. Also returns true if the interface is supported, false otherwise.
|
||||
func (az *Cloud) LoadBalancer() (cloudprovider.LoadBalancer, bool) {
|
||||
return az, true
|
||||
}
|
||||
|
||||
// Instances returns an instances interface. Also returns true if the interface is supported, false otherwise.
|
||||
func (az *Cloud) Instances() (cloudprovider.Instances, bool) {
|
||||
return az, true
|
||||
}
|
||||
|
||||
// Zones returns a zones interface. Also returns true if the interface is supported, false otherwise.
|
||||
func (az *Cloud) Zones() (cloudprovider.Zones, bool) {
|
||||
return az, true
|
||||
}
|
||||
|
||||
// Clusters returns a clusters interface. Also returns true if the interface is supported, false otherwise.
|
||||
func (az *Cloud) Clusters() (cloudprovider.Clusters, bool) {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
// Routes returns a routes interface along with whether the interface is supported.
|
||||
func (az *Cloud) Routes() (cloudprovider.Routes, bool) {
|
||||
return az, true
|
||||
}
|
||||
|
||||
// HasClusterID returns true if the cluster has a clusterID
|
||||
func (az *Cloud) HasClusterID() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// ProviderName returns the cloud provider ID.
|
||||
func (az *Cloud) ProviderName() string {
|
||||
return CloudProviderName
|
||||
}
|
||||
|
||||
// configureUserAgent configures the autorest client with a user agent that
|
||||
// includes "kubernetes" and the full kubernetes git version string
|
||||
// example:
|
||||
// Azure-SDK-for-Go/7.0.1-beta arm-network/2016-09-01; kubernetes-cloudprovider/v1.7.0-alpha.2.711+a2fadef8170bb0-dirty;
|
||||
func configureUserAgent(client *autorest.Client) {
|
||||
k8sVersion := version.Get().GitVersion
|
||||
client.UserAgent = fmt.Sprintf("%s; kubernetes-cloudprovider/%s", client.UserAgent, k8sVersion)
|
||||
}
|
||||
|
||||
func initDiskControllers(az *Cloud) error {
|
||||
// Common controller contains the function
|
||||
// needed by both blob disk and managed disk controllers
|
||||
|
||||
common := &controllerCommon{
|
||||
location: az.Location,
|
||||
storageEndpointSuffix: az.Environment.StorageEndpointSuffix,
|
||||
resourceGroup: az.ResourceGroup,
|
||||
subscriptionID: az.SubscriptionID,
|
||||
cloud: az,
|
||||
}
|
||||
|
||||
// BlobDiskController: contains the function needed to
|
||||
// create/attach/detach/delete blob based (unmanaged disks)
|
||||
blobController, err := newBlobDiskController(common)
|
||||
if err != nil {
|
||||
return fmt.Errorf("AzureDisk - failed to init Blob Disk Controller with error (%s)", err.Error())
|
||||
}
|
||||
|
||||
// ManagedDiskController: contains the functions needed to
|
||||
// create/attach/detach/delete managed disks
|
||||
managedController, err := newManagedDiskController(common)
|
||||
if err != nil {
|
||||
return fmt.Errorf("AzureDisk - failed to init Managed Disk Controller with error (%s)", err.Error())
|
||||
}
|
||||
|
||||
az.BlobDiskController = blobController
|
||||
az.ManagedDiskController = managedController
|
||||
az.controllerCommon = common
|
||||
|
||||
return nil
|
||||
}
|
371
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_backoff.go
generated
vendored
Normal file
371
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_backoff.go
generated
vendored
Normal file
@@ -0,0 +1,371 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package azure
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute"
|
||||
"github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network"
|
||||
"github.com/Azure/go-autorest/autorest"
|
||||
"github.com/golang/glog"
|
||||
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
)
|
||||
|
||||
// requestBackoff if backoff is disabled in cloud provider it
|
||||
// returns a new Backoff object steps = 1
|
||||
// This is to make sure that the requested command executes
|
||||
// at least once
|
||||
func (az *Cloud) requestBackoff() (resourceRequestBackoff wait.Backoff) {
|
||||
if az.CloudProviderBackoff {
|
||||
return az.resourceRequestBackoff
|
||||
}
|
||||
resourceRequestBackoff = wait.Backoff{
|
||||
Steps: 1,
|
||||
}
|
||||
|
||||
return resourceRequestBackoff
|
||||
}
|
||||
|
||||
// GetVirtualMachineWithRetry invokes az.getVirtualMachine with exponential backoff retry
|
||||
func (az *Cloud) GetVirtualMachineWithRetry(name types.NodeName) (compute.VirtualMachine, error) {
|
||||
var machine compute.VirtualMachine
|
||||
var retryErr error
|
||||
err := wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) {
|
||||
machine, retryErr = az.getVirtualMachine(name)
|
||||
if retryErr == cloudprovider.InstanceNotFound {
|
||||
return true, cloudprovider.InstanceNotFound
|
||||
}
|
||||
if retryErr != nil {
|
||||
glog.Errorf("GetVirtualMachineWithRetry(%s): backoff failure, will retry, err=%v", name, retryErr)
|
||||
return false, nil
|
||||
}
|
||||
glog.V(2).Infof("GetVirtualMachineWithRetry(%s): backoff success", name)
|
||||
return true, nil
|
||||
})
|
||||
if err == wait.ErrWaitTimeout {
|
||||
err = retryErr
|
||||
}
|
||||
|
||||
return machine, err
|
||||
}
|
||||
|
||||
// VirtualMachineClientListWithRetry invokes az.VirtualMachinesClient.List with exponential backoff retry
|
||||
func (az *Cloud) VirtualMachineClientListWithRetry() ([]compute.VirtualMachine, error) {
|
||||
allNodes := []compute.VirtualMachine{}
|
||||
err := wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) {
|
||||
var retryErr error
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
allNodes, retryErr = az.VirtualMachinesClient.List(ctx, az.ResourceGroup)
|
||||
if retryErr != nil {
|
||||
glog.Errorf("VirtualMachinesClient.List(%v) - backoff: failure, will retry,err=%v",
|
||||
az.ResourceGroup,
|
||||
retryErr)
|
||||
return false, retryErr
|
||||
}
|
||||
glog.V(2).Infof("VirtualMachinesClient.List(%v) - backoff: success", az.ResourceGroup)
|
||||
return true, nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return allNodes, err
|
||||
}
|
||||
|
||||
// GetIPForMachineWithRetry invokes az.getIPForMachine with exponential backoff retry
|
||||
func (az *Cloud) GetIPForMachineWithRetry(name types.NodeName) (string, string, error) {
|
||||
var ip, publicIP string
|
||||
err := wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) {
|
||||
var retryErr error
|
||||
ip, publicIP, retryErr = az.getIPForMachine(name)
|
||||
if retryErr != nil {
|
||||
glog.Errorf("GetIPForMachineWithRetry(%s): backoff failure, will retry,err=%v", name, retryErr)
|
||||
return false, nil
|
||||
}
|
||||
glog.V(2).Infof("GetIPForMachineWithRetry(%s): backoff success", name)
|
||||
return true, nil
|
||||
})
|
||||
return ip, publicIP, err
|
||||
}
|
||||
|
||||
// CreateOrUpdateSGWithRetry invokes az.SecurityGroupsClient.CreateOrUpdate with exponential backoff retry
|
||||
func (az *Cloud) CreateOrUpdateSGWithRetry(sg network.SecurityGroup) error {
|
||||
return wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) {
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
|
||||
resp, err := az.SecurityGroupsClient.CreateOrUpdate(ctx, az.ResourceGroup, *sg.Name, sg)
|
||||
glog.V(10).Infof("SecurityGroupsClient.CreateOrUpdate(%s): end", *sg.Name)
|
||||
done, err := processHTTPRetryResponse(resp, err)
|
||||
if done && err == nil {
|
||||
// Invalidate the cache right after updating
|
||||
az.nsgCache.Delete(*sg.Name)
|
||||
}
|
||||
return done, err
|
||||
})
|
||||
}
|
||||
|
||||
// CreateOrUpdateLBWithRetry invokes az.LoadBalancerClient.CreateOrUpdate with exponential backoff retry
|
||||
func (az *Cloud) CreateOrUpdateLBWithRetry(lb network.LoadBalancer) error {
|
||||
return wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) {
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
|
||||
resp, err := az.LoadBalancerClient.CreateOrUpdate(ctx, az.ResourceGroup, *lb.Name, lb)
|
||||
glog.V(10).Infof("LoadBalancerClient.CreateOrUpdate(%s): end", *lb.Name)
|
||||
done, err := processHTTPRetryResponse(resp, err)
|
||||
if done && err == nil {
|
||||
// Invalidate the cache right after updating
|
||||
az.lbCache.Delete(*lb.Name)
|
||||
}
|
||||
return done, err
|
||||
})
|
||||
}
|
||||
|
||||
// ListLBWithRetry invokes az.LoadBalancerClient.List with exponential backoff retry
|
||||
func (az *Cloud) ListLBWithRetry() ([]network.LoadBalancer, error) {
|
||||
var allLBs []network.LoadBalancer
|
||||
|
||||
err := wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) {
|
||||
var retryErr error
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
|
||||
allLBs, retryErr = az.LoadBalancerClient.List(ctx, az.ResourceGroup)
|
||||
if retryErr != nil {
|
||||
glog.Errorf("LoadBalancerClient.List(%v) - backoff: failure, will retry,err=%v",
|
||||
az.ResourceGroup,
|
||||
retryErr)
|
||||
return false, retryErr
|
||||
}
|
||||
glog.V(2).Infof("LoadBalancerClient.List(%v) - backoff: success", az.ResourceGroup)
|
||||
return true, nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return allLBs, nil
|
||||
}
|
||||
|
||||
// ListPIPWithRetry list the PIP resources in the given resource group
|
||||
func (az *Cloud) ListPIPWithRetry(pipResourceGroup string) ([]network.PublicIPAddress, error) {
|
||||
var allPIPs []network.PublicIPAddress
|
||||
|
||||
err := wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) {
|
||||
var retryErr error
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
|
||||
allPIPs, retryErr = az.PublicIPAddressesClient.List(ctx, pipResourceGroup)
|
||||
if retryErr != nil {
|
||||
glog.Errorf("PublicIPAddressesClient.List(%v) - backoff: failure, will retry,err=%v",
|
||||
pipResourceGroup,
|
||||
retryErr)
|
||||
return false, retryErr
|
||||
}
|
||||
glog.V(2).Infof("PublicIPAddressesClient.List(%v) - backoff: success", pipResourceGroup)
|
||||
return true, nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return allPIPs, nil
|
||||
}
|
||||
|
||||
// CreateOrUpdatePIPWithRetry invokes az.PublicIPAddressesClient.CreateOrUpdate with exponential backoff retry
|
||||
func (az *Cloud) CreateOrUpdatePIPWithRetry(pipResourceGroup string, pip network.PublicIPAddress) error {
|
||||
return wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) {
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
|
||||
resp, err := az.PublicIPAddressesClient.CreateOrUpdate(ctx, pipResourceGroup, *pip.Name, pip)
|
||||
glog.V(10).Infof("PublicIPAddressesClient.CreateOrUpdate(%s, %s): end", pipResourceGroup, *pip.Name)
|
||||
return processHTTPRetryResponse(resp, err)
|
||||
})
|
||||
}
|
||||
|
||||
// CreateOrUpdateInterfaceWithRetry invokes az.PublicIPAddressesClient.CreateOrUpdate with exponential backoff retry
|
||||
func (az *Cloud) CreateOrUpdateInterfaceWithRetry(nic network.Interface) error {
|
||||
return wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) {
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
|
||||
resp, err := az.InterfacesClient.CreateOrUpdate(ctx, az.ResourceGroup, *nic.Name, nic)
|
||||
glog.V(10).Infof("InterfacesClient.CreateOrUpdate(%s): end", *nic.Name)
|
||||
return processHTTPRetryResponse(resp, err)
|
||||
})
|
||||
}
|
||||
|
||||
// DeletePublicIPWithRetry invokes az.PublicIPAddressesClient.Delete with exponential backoff retry
|
||||
func (az *Cloud) DeletePublicIPWithRetry(pipResourceGroup string, pipName string) error {
|
||||
return wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) {
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
|
||||
resp, err := az.PublicIPAddressesClient.Delete(ctx, pipResourceGroup, pipName)
|
||||
return processHTTPRetryResponse(resp, err)
|
||||
})
|
||||
}
|
||||
|
||||
// DeleteLBWithRetry invokes az.LoadBalancerClient.Delete with exponential backoff retry
|
||||
func (az *Cloud) DeleteLBWithRetry(lbName string) error {
|
||||
return wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) {
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
|
||||
resp, err := az.LoadBalancerClient.Delete(ctx, az.ResourceGroup, lbName)
|
||||
done, err := processHTTPRetryResponse(resp, err)
|
||||
if done && err == nil {
|
||||
// Invalidate the cache right after deleting
|
||||
az.lbCache.Delete(lbName)
|
||||
}
|
||||
return done, err
|
||||
})
|
||||
}
|
||||
|
||||
// CreateOrUpdateRouteTableWithRetry invokes az.RouteTablesClient.CreateOrUpdate with exponential backoff retry
|
||||
func (az *Cloud) CreateOrUpdateRouteTableWithRetry(routeTable network.RouteTable) error {
|
||||
return wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) {
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
|
||||
resp, err := az.RouteTablesClient.CreateOrUpdate(ctx, az.ResourceGroup, az.RouteTableName, routeTable)
|
||||
return processHTTPRetryResponse(resp, err)
|
||||
})
|
||||
}
|
||||
|
||||
// CreateOrUpdateRouteWithRetry invokes az.RoutesClient.CreateOrUpdate with exponential backoff retry
|
||||
func (az *Cloud) CreateOrUpdateRouteWithRetry(route network.Route) error {
|
||||
return wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) {
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
|
||||
resp, err := az.RoutesClient.CreateOrUpdate(ctx, az.ResourceGroup, az.RouteTableName, *route.Name, route)
|
||||
glog.V(10).Infof("RoutesClient.CreateOrUpdate(%s): end", *route.Name)
|
||||
return processHTTPRetryResponse(resp, err)
|
||||
})
|
||||
}
|
||||
|
||||
// DeleteRouteWithRetry invokes az.RoutesClient.Delete with exponential backoff retry
|
||||
func (az *Cloud) DeleteRouteWithRetry(routeName string) error {
|
||||
return wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) {
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
|
||||
resp, err := az.RoutesClient.Delete(ctx, az.ResourceGroup, az.RouteTableName, routeName)
|
||||
glog.V(10).Infof("RoutesClient.Delete(%s): end", az.RouteTableName)
|
||||
return processHTTPRetryResponse(resp, err)
|
||||
})
|
||||
}
|
||||
|
||||
// CreateOrUpdateVMWithRetry invokes az.VirtualMachinesClient.CreateOrUpdate with exponential backoff retry
|
||||
func (az *Cloud) CreateOrUpdateVMWithRetry(vmName string, newVM compute.VirtualMachine) error {
|
||||
return wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) {
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
|
||||
resp, err := az.VirtualMachinesClient.CreateOrUpdate(ctx, az.ResourceGroup, vmName, newVM)
|
||||
glog.V(10).Infof("VirtualMachinesClient.CreateOrUpdate(%s): end", vmName)
|
||||
return processHTTPRetryResponse(resp, err)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateVmssVMWithRetry invokes az.VirtualMachineScaleSetVMsClient.Update with exponential backoff retry
|
||||
func (az *Cloud) UpdateVmssVMWithRetry(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string, parameters compute.VirtualMachineScaleSetVM) error {
|
||||
return wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) {
|
||||
resp, err := az.VirtualMachineScaleSetVMsClient.Update(ctx, resourceGroupName, VMScaleSetName, instanceID, parameters)
|
||||
glog.V(10).Infof("VirtualMachinesClient.CreateOrUpdate(%s,%s): end", VMScaleSetName, instanceID)
|
||||
return processHTTPRetryResponse(resp, err)
|
||||
})
|
||||
}
|
||||
|
||||
// A wait.ConditionFunc function to deal with common HTTP backoff response conditions
|
||||
func processRetryResponse(resp autorest.Response, err error) (bool, error) {
|
||||
if isSuccessHTTPResponse(resp) {
|
||||
glog.V(2).Infof("processRetryResponse: backoff success, HTTP response=%d", resp.StatusCode)
|
||||
return true, nil
|
||||
}
|
||||
if shouldRetryAPIRequest(resp, err) {
|
||||
glog.Errorf("processRetryResponse: backoff failure, will retry, HTTP response=%d, err=%v", resp.StatusCode, err)
|
||||
// suppress the error object so that backoff process continues
|
||||
return false, nil
|
||||
}
|
||||
// Fall-through: stop periodic backoff
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// shouldRetryAPIRequest determines if the response from an HTTP request suggests periodic retry behavior
|
||||
func shouldRetryAPIRequest(resp autorest.Response, err error) bool {
|
||||
if err != nil {
|
||||
return true
|
||||
}
|
||||
// HTTP 4xx or 5xx suggests we should retry
|
||||
if 399 < resp.StatusCode && resp.StatusCode < 600 {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// isSuccessHTTPResponse determines if the response from an HTTP request suggests success
|
||||
func isSuccessHTTPResponse(resp autorest.Response) bool {
|
||||
// HTTP 2xx suggests a successful response
|
||||
if 199 < resp.StatusCode && resp.StatusCode < 300 {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func shouldRetryHTTPRequest(resp *http.Response, err error) bool {
|
||||
if err != nil {
|
||||
return true
|
||||
}
|
||||
|
||||
if resp != nil {
|
||||
// HTTP 4xx or 5xx suggests we should retry
|
||||
if 399 < resp.StatusCode && resp.StatusCode < 600 {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func processHTTPRetryResponse(resp *http.Response, err error) (bool, error) {
|
||||
if resp != nil {
|
||||
// HTTP 2xx suggests a successful response
|
||||
if 199 < resp.StatusCode && resp.StatusCode < 300 {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
|
||||
if shouldRetryHTTPRequest(resp, err) {
|
||||
glog.Errorf("processHTTPRetryResponse: backoff failure, will retry, HTTP response=%d, err=%v", resp.StatusCode, err)
|
||||
// suppress the error object so that backoff process continues
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// Fall-through: stop periodic backoff
|
||||
return true, nil
|
||||
}
|
148
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_backoff_test.go
generated
vendored
Normal file
148
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_backoff_test.go
generated
vendored
Normal file
@@ -0,0 +1,148 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package azure
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"testing"
|
||||
|
||||
"github.com/Azure/go-autorest/autorest"
|
||||
)
|
||||
|
||||
func TestShouldRetry(t *testing.T) {
|
||||
tests := []struct {
|
||||
code int
|
||||
err error
|
||||
expected bool
|
||||
}{
|
||||
{
|
||||
code: http.StatusBadRequest,
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
code: http.StatusInternalServerError,
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
code: http.StatusOK,
|
||||
err: fmt.Errorf("some error"),
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
code: http.StatusOK,
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
code: 399,
|
||||
expected: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
resp := autorest.Response{
|
||||
Response: &http.Response{
|
||||
StatusCode: test.code,
|
||||
},
|
||||
}
|
||||
res := shouldRetryAPIRequest(resp, test.err)
|
||||
if res != test.expected {
|
||||
t.Errorf("expected: %v, saw: %v", test.expected, res)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestIsSuccessResponse(t *testing.T) {
|
||||
tests := []struct {
|
||||
code int
|
||||
expected bool
|
||||
}{
|
||||
{
|
||||
code: http.StatusNotFound,
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
code: http.StatusInternalServerError,
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
code: http.StatusOK,
|
||||
expected: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
resp := autorest.Response{
|
||||
Response: &http.Response{
|
||||
StatusCode: test.code,
|
||||
},
|
||||
}
|
||||
res := isSuccessHTTPResponse(resp)
|
||||
if res != test.expected {
|
||||
t.Errorf("expected: %v, saw: %v", test.expected, res)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestProcessRetryResponse(t *testing.T) {
|
||||
tests := []struct {
|
||||
code int
|
||||
err error
|
||||
stop bool
|
||||
}{
|
||||
{
|
||||
code: http.StatusBadRequest,
|
||||
stop: false,
|
||||
},
|
||||
{
|
||||
code: http.StatusInternalServerError,
|
||||
stop: false,
|
||||
},
|
||||
{
|
||||
code: http.StatusSeeOther,
|
||||
err: fmt.Errorf("some error"),
|
||||
stop: false,
|
||||
},
|
||||
{
|
||||
code: http.StatusSeeOther,
|
||||
stop: true,
|
||||
},
|
||||
{
|
||||
code: http.StatusOK,
|
||||
stop: true,
|
||||
},
|
||||
{
|
||||
code: 399,
|
||||
stop: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
resp := autorest.Response{
|
||||
Response: &http.Response{
|
||||
StatusCode: test.code,
|
||||
},
|
||||
}
|
||||
res, err := processRetryResponse(resp, test.err)
|
||||
if res != test.stop {
|
||||
t.Errorf("expected: %v, saw: %v", test.stop, res)
|
||||
}
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
633
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_blobDiskController.go
generated
vendored
Normal file
633
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_blobDiskController.go
generated
vendored
Normal file
@@ -0,0 +1,633 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package azure
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"regexp"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2017-10-01/storage"
|
||||
azstorage "github.com/Azure/azure-sdk-for-go/storage"
|
||||
"github.com/Azure/go-autorest/autorest/to"
|
||||
"github.com/golang/glog"
|
||||
"github.com/rubiojr/go-vhd/vhd"
|
||||
kwait "k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
)
|
||||
|
||||
const (
|
||||
vhdContainerName = "vhds"
|
||||
useHTTPSForBlobBasedDisk = true
|
||||
blobServiceName = "blob"
|
||||
)
|
||||
|
||||
type storageAccountState struct {
|
||||
name string
|
||||
saType storage.SkuName
|
||||
key string
|
||||
diskCount int32
|
||||
isValidating int32
|
||||
defaultContainerCreated bool
|
||||
}
|
||||
|
||||
//BlobDiskController : blob disk controller struct
|
||||
type BlobDiskController struct {
|
||||
common *controllerCommon
|
||||
accounts map[string]*storageAccountState
|
||||
}
|
||||
|
||||
var (
|
||||
accountsLock = &sync.Mutex{}
|
||||
)
|
||||
|
||||
func newBlobDiskController(common *controllerCommon) (*BlobDiskController, error) {
|
||||
c := BlobDiskController{common: common}
|
||||
|
||||
// get accounts
|
||||
accounts, err := c.getAllStorageAccounts()
|
||||
if err != nil {
|
||||
glog.Errorf("azureDisk - getAllStorageAccounts error: %v", err)
|
||||
c.accounts = make(map[string]*storageAccountState)
|
||||
return &c, nil
|
||||
}
|
||||
c.accounts = accounts
|
||||
return &c, nil
|
||||
}
|
||||
|
||||
// CreateVolume creates a VHD blob in a storage account that has storageType and location using the given storage account.
|
||||
// If no storage account is given, search all the storage accounts associated with the resource group and pick one that
|
||||
// fits storage type and location.
|
||||
func (c *BlobDiskController) CreateVolume(blobName, accountName, accountType, location string, requestGB int) (string, string, int, error) {
|
||||
account, key, err := c.common.cloud.ensureStorageAccount(accountName, accountType, location, dedicatedDiskAccountNamePrefix)
|
||||
if err != nil {
|
||||
return "", "", 0, fmt.Errorf("could not get storage key for storage account %s: %v", accountName, err)
|
||||
}
|
||||
|
||||
client, err := azstorage.NewBasicClientOnSovereignCloud(account, key, c.common.cloud.Environment)
|
||||
if err != nil {
|
||||
return "", "", 0, err
|
||||
}
|
||||
blobClient := client.GetBlobService()
|
||||
|
||||
// create a page blob in this account's vhd container
|
||||
diskName, diskURI, err := c.createVHDBlobDisk(blobClient, account, blobName, vhdContainerName, int64(requestGB))
|
||||
if err != nil {
|
||||
return "", "", 0, err
|
||||
}
|
||||
|
||||
glog.V(4).Infof("azureDisk - created vhd blob uri: %s", diskURI)
|
||||
return diskName, diskURI, requestGB, err
|
||||
}
|
||||
|
||||
// DeleteVolume deletes a VHD blob
|
||||
func (c *BlobDiskController) DeleteVolume(diskURI string) error {
|
||||
glog.V(4).Infof("azureDisk - begin to delete volume %s", diskURI)
|
||||
accountName, blob, err := c.common.cloud.getBlobNameAndAccountFromURI(diskURI)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to parse vhd URI %v", err)
|
||||
}
|
||||
key, err := c.common.cloud.getStorageAccesskey(accountName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("no key for storage account %s, err %v", accountName, err)
|
||||
}
|
||||
err = c.common.cloud.deleteVhdBlob(accountName, key, blob)
|
||||
if err != nil {
|
||||
glog.Warningf("azureDisk - failed to delete blob %s err: %v", diskURI, err)
|
||||
detail := err.Error()
|
||||
if strings.Contains(detail, errLeaseIDMissing) {
|
||||
// disk is still being used
|
||||
// see https://msdn.microsoft.com/en-us/library/microsoft.windowsazure.storage.blob.protocol.bloberrorcodestrings.leaseidmissing.aspx
|
||||
return volume.NewDeletedVolumeInUseError(fmt.Sprintf("disk %q is still in use while being deleted", diskURI))
|
||||
}
|
||||
return fmt.Errorf("failed to delete vhd %v, account %s, blob %s, err: %v", diskURI, accountName, blob, err)
|
||||
}
|
||||
glog.V(4).Infof("azureDisk - blob %s deleted", diskURI)
|
||||
return nil
|
||||
|
||||
}
|
||||
|
||||
// get diskURI https://foo.blob.core.windows.net/vhds/bar.vhd and return foo (account) and bar.vhd (blob name)
|
||||
func (c *BlobDiskController) getBlobNameAndAccountFromURI(diskURI string) (string, string, error) {
|
||||
scheme := "http"
|
||||
if useHTTPSForBlobBasedDisk {
|
||||
scheme = "https"
|
||||
}
|
||||
host := fmt.Sprintf("%s://(.*).%s.%s", scheme, blobServiceName, c.common.storageEndpointSuffix)
|
||||
reStr := fmt.Sprintf("%s/%s/(.*)", host, vhdContainerName)
|
||||
re := regexp.MustCompile(reStr)
|
||||
res := re.FindSubmatch([]byte(diskURI))
|
||||
if len(res) < 3 {
|
||||
return "", "", fmt.Errorf("invalid vhd URI for regex %s: %s", reStr, diskURI)
|
||||
}
|
||||
return string(res[1]), string(res[2]), nil
|
||||
}
|
||||
|
||||
func (c *BlobDiskController) createVHDBlobDisk(blobClient azstorage.BlobStorageClient, accountName, vhdName, containerName string, sizeGB int64) (string, string, error) {
|
||||
container := blobClient.GetContainerReference(containerName)
|
||||
size := 1024 * 1024 * 1024 * sizeGB
|
||||
vhdSize := size + vhd.VHD_HEADER_SIZE /* header size */
|
||||
// Blob name in URL must end with '.vhd' extension.
|
||||
vhdName = vhdName + ".vhd"
|
||||
|
||||
tags := make(map[string]string)
|
||||
tags["createdby"] = "k8sAzureDataDisk"
|
||||
glog.V(4).Infof("azureDisk - creating page blob %s in container %s account %s", vhdName, containerName, accountName)
|
||||
|
||||
blob := container.GetBlobReference(vhdName)
|
||||
blob.Properties.ContentLength = vhdSize
|
||||
blob.Metadata = tags
|
||||
err := blob.PutPageBlob(nil)
|
||||
if err != nil {
|
||||
// if container doesn't exist, create one and retry PutPageBlob
|
||||
detail := err.Error()
|
||||
if strings.Contains(detail, errContainerNotFound) {
|
||||
err = container.Create(&azstorage.CreateContainerOptions{Access: azstorage.ContainerAccessTypePrivate})
|
||||
if err == nil {
|
||||
err = blob.PutPageBlob(nil)
|
||||
}
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return "", "", fmt.Errorf("failed to put page blob %s in container %s: %v", vhdName, containerName, err)
|
||||
}
|
||||
|
||||
// add VHD signature to the blob
|
||||
h, err := createVHDHeader(uint64(size))
|
||||
if err != nil {
|
||||
blob.DeleteIfExists(nil)
|
||||
return "", "", fmt.Errorf("failed to create vhd header, err: %v", err)
|
||||
}
|
||||
|
||||
blobRange := azstorage.BlobRange{
|
||||
Start: uint64(size),
|
||||
End: uint64(vhdSize - 1),
|
||||
}
|
||||
if err = blob.WriteRange(blobRange, bytes.NewBuffer(h[:vhd.VHD_HEADER_SIZE]), nil); err != nil {
|
||||
glog.Infof("azureDisk - failed to put header page for data disk %s in container %s account %s, error was %s\n",
|
||||
vhdName, containerName, accountName, err.Error())
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
scheme := "http"
|
||||
if useHTTPSForBlobBasedDisk {
|
||||
scheme = "https"
|
||||
}
|
||||
|
||||
host := fmt.Sprintf("%s://%s.%s.%s", scheme, accountName, blobServiceName, c.common.storageEndpointSuffix)
|
||||
uri := fmt.Sprintf("%s/%s/%s", host, containerName, vhdName)
|
||||
return vhdName, uri, nil
|
||||
}
|
||||
|
||||
// delete a vhd blob
|
||||
func (c *BlobDiskController) deleteVhdBlob(accountName, accountKey, blobName string) error {
|
||||
client, err := azstorage.NewBasicClientOnSovereignCloud(accountName, accountKey, c.common.cloud.Environment)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
blobSvc := client.GetBlobService()
|
||||
|
||||
container := blobSvc.GetContainerReference(vhdContainerName)
|
||||
blob := container.GetBlobReference(blobName)
|
||||
return blob.Delete(nil)
|
||||
}
|
||||
|
||||
//CreateBlobDisk : create a blob disk in a node
|
||||
func (c *BlobDiskController) CreateBlobDisk(dataDiskName string, storageAccountType storage.SkuName, sizeGB int) (string, error) {
|
||||
glog.V(4).Infof("azureDisk - creating blob data disk named:%s on StorageAccountType:%s", dataDiskName, storageAccountType)
|
||||
|
||||
storageAccountName, err := c.findSANameForDisk(storageAccountType)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
blobClient, err := c.getBlobSvcClient(storageAccountName)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
_, diskURI, err := c.createVHDBlobDisk(blobClient, storageAccountName, dataDiskName, vhdContainerName, int64(sizeGB))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
atomic.AddInt32(&c.accounts[storageAccountName].diskCount, 1)
|
||||
|
||||
return diskURI, nil
|
||||
}
|
||||
|
||||
//DeleteBlobDisk : delete a blob disk from a node
|
||||
func (c *BlobDiskController) DeleteBlobDisk(diskURI string) error {
|
||||
storageAccountName, vhdName, err := diskNameandSANameFromURI(diskURI)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, ok := c.accounts[storageAccountName]
|
||||
if !ok {
|
||||
// the storage account is specified by user
|
||||
glog.V(4).Infof("azureDisk - deleting volume %s", diskURI)
|
||||
return c.DeleteVolume(diskURI)
|
||||
}
|
||||
|
||||
blobSvc, err := c.getBlobSvcClient(storageAccountName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
glog.V(4).Infof("azureDisk - About to delete vhd file %s on storage account %s container %s", vhdName, storageAccountName, vhdContainerName)
|
||||
|
||||
container := blobSvc.GetContainerReference(vhdContainerName)
|
||||
blob := container.GetBlobReference(vhdName)
|
||||
_, err = blob.DeleteIfExists(nil)
|
||||
|
||||
if c.accounts[storageAccountName].diskCount == -1 {
|
||||
if diskCount, err := c.getDiskCount(storageAccountName); err != nil {
|
||||
c.accounts[storageAccountName].diskCount = int32(diskCount)
|
||||
} else {
|
||||
glog.Warningf("azureDisk - failed to get disk count for %s however the delete disk operation was ok", storageAccountName)
|
||||
return nil // we have failed to acquire a new count. not an error condition
|
||||
}
|
||||
}
|
||||
atomic.AddInt32(&c.accounts[storageAccountName].diskCount, -1)
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *BlobDiskController) getStorageAccountKey(SAName string) (string, error) {
|
||||
if account, exists := c.accounts[SAName]; exists && account.key != "" {
|
||||
return c.accounts[SAName].key, nil
|
||||
}
|
||||
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
listKeysResult, err := c.common.cloud.StorageAccountClient.ListKeys(ctx, c.common.resourceGroup, SAName)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if listKeysResult.Keys == nil {
|
||||
return "", fmt.Errorf("azureDisk - empty listKeysResult in storage account:%s keys", SAName)
|
||||
}
|
||||
for _, v := range *listKeysResult.Keys {
|
||||
if v.Value != nil && *v.Value == "key1" {
|
||||
if _, ok := c.accounts[SAName]; !ok {
|
||||
glog.Warningf("azureDisk - account %s was not cached while getting keys", SAName)
|
||||
return *v.Value, nil
|
||||
}
|
||||
}
|
||||
|
||||
c.accounts[SAName].key = *v.Value
|
||||
return c.accounts[SAName].key, nil
|
||||
}
|
||||
|
||||
return "", fmt.Errorf("couldn't find key named key1 in storage account:%s keys", SAName)
|
||||
}
|
||||
|
||||
func (c *BlobDiskController) getBlobSvcClient(SAName string) (azstorage.BlobStorageClient, error) {
|
||||
key := ""
|
||||
var client azstorage.Client
|
||||
var blobSvc azstorage.BlobStorageClient
|
||||
var err error
|
||||
if key, err = c.getStorageAccountKey(SAName); err != nil {
|
||||
return blobSvc, err
|
||||
}
|
||||
|
||||
if client, err = azstorage.NewBasicClientOnSovereignCloud(SAName, key, c.common.cloud.Environment); err != nil {
|
||||
return blobSvc, err
|
||||
}
|
||||
|
||||
blobSvc = client.GetBlobService()
|
||||
return blobSvc, nil
|
||||
}
|
||||
|
||||
func (c *BlobDiskController) ensureDefaultContainer(storageAccountName string) error {
|
||||
var err error
|
||||
var blobSvc azstorage.BlobStorageClient
|
||||
|
||||
// short circuit the check via local cache
|
||||
// we are forgiving the fact that account may not be in cache yet
|
||||
if v, ok := c.accounts[storageAccountName]; ok && v.defaultContainerCreated {
|
||||
return nil
|
||||
}
|
||||
|
||||
// not cached, check existence and readiness
|
||||
bExist, provisionState, _ := c.getStorageAccountState(storageAccountName)
|
||||
|
||||
// account does not exist
|
||||
if !bExist {
|
||||
return fmt.Errorf("azureDisk - account %s does not exist while trying to create/ensure default container", storageAccountName)
|
||||
}
|
||||
|
||||
// account exists but not ready yet
|
||||
if provisionState != storage.Succeeded {
|
||||
// we don't want many attempts to validate the account readiness
|
||||
// here hence we are locking
|
||||
counter := 1
|
||||
for swapped := atomic.CompareAndSwapInt32(&c.accounts[storageAccountName].isValidating, 0, 1); swapped != true; {
|
||||
time.Sleep(3 * time.Second)
|
||||
counter = counter + 1
|
||||
// check if we passed the max sleep
|
||||
if counter >= 20 {
|
||||
return fmt.Errorf("azureDisk - timeout waiting to acquire lock to validate account:%s readiness", storageAccountName)
|
||||
}
|
||||
}
|
||||
|
||||
// swapped
|
||||
defer func() {
|
||||
c.accounts[storageAccountName].isValidating = 0
|
||||
}()
|
||||
|
||||
// short circuit the check again.
|
||||
if v, ok := c.accounts[storageAccountName]; ok && v.defaultContainerCreated {
|
||||
return nil
|
||||
}
|
||||
|
||||
err = kwait.ExponentialBackoff(defaultBackOff, func() (bool, error) {
|
||||
_, provisionState, err := c.getStorageAccountState(storageAccountName)
|
||||
|
||||
if err != nil {
|
||||
glog.V(4).Infof("azureDisk - GetStorageAccount:%s err %s", storageAccountName, err.Error())
|
||||
return false, nil // error performing the query - retryable
|
||||
}
|
||||
|
||||
if provisionState == storage.Succeeded {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
glog.V(4).Infof("azureDisk - GetStorageAccount:%s not ready yet (not flagged Succeeded by ARM)", storageAccountName)
|
||||
return false, nil // back off and see if the account becomes ready on next retry
|
||||
})
|
||||
// we have failed to ensure that account is ready for us to create
|
||||
// the default vhd container
|
||||
if err != nil {
|
||||
if err == kwait.ErrWaitTimeout {
|
||||
return fmt.Errorf("azureDisk - timed out waiting for storage account %s to become ready", storageAccountName)
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if blobSvc, err = c.getBlobSvcClient(storageAccountName); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
container := blobSvc.GetContainerReference(vhdContainerName)
|
||||
bCreated, err := container.CreateIfNotExists(&azstorage.CreateContainerOptions{Access: azstorage.ContainerAccessTypePrivate})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if bCreated {
|
||||
glog.V(2).Infof("azureDisk - storage account:%s had no default container(%s) and it was created \n", storageAccountName, vhdContainerName)
|
||||
}
|
||||
|
||||
// flag so we no longer have to check on ARM
|
||||
c.accounts[storageAccountName].defaultContainerCreated = true
|
||||
return nil
|
||||
}
|
||||
|
||||
// Gets Disk counts per storage account
|
||||
func (c *BlobDiskController) getDiskCount(SAName string) (int, error) {
|
||||
// if we have it in cache
|
||||
if c.accounts[SAName].diskCount != -1 {
|
||||
return int(c.accounts[SAName].diskCount), nil
|
||||
}
|
||||
|
||||
var err error
|
||||
var blobSvc azstorage.BlobStorageClient
|
||||
|
||||
if err = c.ensureDefaultContainer(SAName); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
if blobSvc, err = c.getBlobSvcClient(SAName); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
params := azstorage.ListBlobsParameters{}
|
||||
|
||||
container := blobSvc.GetContainerReference(vhdContainerName)
|
||||
response, err := container.ListBlobs(params)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
glog.V(4).Infof("azure-Disk - refreshed data count for account %s and found %v", SAName, len(response.Blobs))
|
||||
c.accounts[SAName].diskCount = int32(len(response.Blobs))
|
||||
|
||||
return int(c.accounts[SAName].diskCount), nil
|
||||
}
|
||||
|
||||
func (c *BlobDiskController) getAllStorageAccounts() (map[string]*storageAccountState, error) {
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
accountListResult, err := c.common.cloud.StorageAccountClient.ListByResourceGroup(ctx, c.common.resourceGroup)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if accountListResult.Value == nil {
|
||||
return nil, fmt.Errorf("azureDisk - empty accountListResult")
|
||||
}
|
||||
|
||||
accounts := make(map[string]*storageAccountState)
|
||||
for _, v := range *accountListResult.Value {
|
||||
if v.Name == nil || v.Sku == nil {
|
||||
glog.Info("azureDisk - accountListResult Name or Sku is nil")
|
||||
continue
|
||||
}
|
||||
if !strings.HasPrefix(*v.Name, sharedDiskAccountNamePrefix) {
|
||||
continue
|
||||
}
|
||||
glog.Infof("azureDisk - identified account %s as part of shared PVC accounts", *v.Name)
|
||||
|
||||
sastate := &storageAccountState{
|
||||
name: *v.Name,
|
||||
saType: (*v.Sku).Name,
|
||||
diskCount: -1,
|
||||
}
|
||||
|
||||
accounts[*v.Name] = sastate
|
||||
}
|
||||
|
||||
return accounts, nil
|
||||
}
|
||||
|
||||
func (c *BlobDiskController) createStorageAccount(storageAccountName string, storageAccountType storage.SkuName, location string, checkMaxAccounts bool) error {
|
||||
bExist, _, _ := c.getStorageAccountState(storageAccountName)
|
||||
if bExist {
|
||||
newAccountState := &storageAccountState{
|
||||
diskCount: -1,
|
||||
saType: storageAccountType,
|
||||
name: storageAccountName,
|
||||
}
|
||||
|
||||
c.addAccountState(storageAccountName, newAccountState)
|
||||
}
|
||||
// Account Does not exist
|
||||
if !bExist {
|
||||
if len(c.accounts) == maxStorageAccounts && checkMaxAccounts {
|
||||
return fmt.Errorf("azureDisk - can not create new storage account, current storage accounts count:%v Max is:%v", len(c.accounts), maxStorageAccounts)
|
||||
}
|
||||
|
||||
glog.V(2).Infof("azureDisk - Creating storage account %s type %s", storageAccountName, string(storageAccountType))
|
||||
|
||||
cp := storage.AccountCreateParameters{
|
||||
Sku: &storage.Sku{Name: storageAccountType},
|
||||
Tags: map[string]*string{"created-by": to.StringPtr("azure-dd")},
|
||||
Location: &location}
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
|
||||
_, err := c.common.cloud.StorageAccountClient.Create(ctx, c.common.resourceGroup, storageAccountName, cp)
|
||||
if err != nil {
|
||||
return fmt.Errorf(fmt.Sprintf("Create Storage Account: %s, error: %s", storageAccountName, err))
|
||||
}
|
||||
|
||||
newAccountState := &storageAccountState{
|
||||
diskCount: -1,
|
||||
saType: storageAccountType,
|
||||
name: storageAccountName,
|
||||
}
|
||||
|
||||
c.addAccountState(storageAccountName, newAccountState)
|
||||
}
|
||||
|
||||
// finally, make sure that we default container is created
|
||||
// before handing it back over
|
||||
return c.ensureDefaultContainer(storageAccountName)
|
||||
}
|
||||
|
||||
// finds a new suitable storageAccount for this disk
|
||||
func (c *BlobDiskController) findSANameForDisk(storageAccountType storage.SkuName) (string, error) {
|
||||
maxDiskCount := maxDisksPerStorageAccounts
|
||||
SAName := ""
|
||||
totalDiskCounts := 0
|
||||
countAccounts := 0 // account of this type.
|
||||
for _, v := range c.accounts {
|
||||
// filter out any stand-alone disks/accounts
|
||||
if !strings.HasPrefix(v.name, sharedDiskAccountNamePrefix) {
|
||||
continue
|
||||
}
|
||||
|
||||
// note: we compute avg stratified by type.
|
||||
// this is to enable user to grow per SA type to avoid low
|
||||
// avg utilization on one account type skewing all data.
|
||||
|
||||
if v.saType == storageAccountType {
|
||||
// compute average
|
||||
dCount, err := c.getDiskCount(v.name)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
totalDiskCounts = totalDiskCounts + dCount
|
||||
countAccounts = countAccounts + 1
|
||||
// empty account
|
||||
if dCount == 0 {
|
||||
glog.V(2).Infof("azureDisk - account %s identified for a new disk is because it has 0 allocated disks", v.name)
|
||||
return v.name, nil // short circuit, avg is good and no need to adjust
|
||||
}
|
||||
// if this account is less allocated
|
||||
if dCount < maxDiskCount {
|
||||
maxDiskCount = dCount
|
||||
SAName = v.name
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// if we failed to find storageaccount
|
||||
if SAName == "" {
|
||||
glog.V(2).Infof("azureDisk - failed to identify a suitable account for new disk and will attempt to create new account")
|
||||
SAName = generateStorageAccountName(sharedDiskAccountNamePrefix)
|
||||
err := c.createStorageAccount(SAName, storageAccountType, c.common.location, true)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return SAName, nil
|
||||
}
|
||||
|
||||
disksAfter := totalDiskCounts + 1 // with the new one!
|
||||
|
||||
avgUtilization := float64(disksAfter) / float64(countAccounts*maxDisksPerStorageAccounts)
|
||||
aboveAvg := (avgUtilization > storageAccountUtilizationBeforeGrowing)
|
||||
|
||||
// avg are not create and we should create more accounts if we can
|
||||
if aboveAvg && countAccounts < maxStorageAccounts {
|
||||
glog.V(2).Infof("azureDisk - shared storageAccounts utilization(%v) > grow-at-avg-utilization (%v). New storage account will be created", avgUtilization, storageAccountUtilizationBeforeGrowing)
|
||||
SAName = generateStorageAccountName(sharedDiskAccountNamePrefix)
|
||||
err := c.createStorageAccount(SAName, storageAccountType, c.common.location, true)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return SAName, nil
|
||||
}
|
||||
|
||||
// averages are not ok and we are at capacity (max storage accounts allowed)
|
||||
if aboveAvg && countAccounts == maxStorageAccounts {
|
||||
glog.Infof("azureDisk - shared storageAccounts utilization(%v) > grow-at-avg-utilization (%v). But k8s maxed on SAs for PVC(%v). k8s will now exceed grow-at-avg-utilization without adding accounts",
|
||||
avgUtilization, storageAccountUtilizationBeforeGrowing, maxStorageAccounts)
|
||||
}
|
||||
|
||||
// we found a storage accounts && [ avg are ok || we reached max sa count ]
|
||||
return SAName, nil
|
||||
}
|
||||
|
||||
//Gets storage account exist, provisionStatus, Error if any
|
||||
func (c *BlobDiskController) getStorageAccountState(storageAccountName string) (bool, storage.ProvisioningState, error) {
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
account, err := c.common.cloud.StorageAccountClient.GetProperties(ctx, c.common.resourceGroup, storageAccountName)
|
||||
if err != nil {
|
||||
return false, "", err
|
||||
}
|
||||
return true, account.AccountProperties.ProvisioningState, nil
|
||||
}
|
||||
|
||||
func (c *BlobDiskController) addAccountState(key string, state *storageAccountState) {
|
||||
accountsLock.Lock()
|
||||
defer accountsLock.Unlock()
|
||||
|
||||
if _, ok := c.accounts[key]; !ok {
|
||||
c.accounts[key] = state
|
||||
}
|
||||
}
|
||||
|
||||
func createVHDHeader(size uint64) ([]byte, error) {
|
||||
h := vhd.CreateFixedHeader(size, &vhd.VHDOptions{})
|
||||
b := new(bytes.Buffer)
|
||||
err := binary.Write(b, binary.BigEndian, h)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return b.Bytes(), nil
|
||||
}
|
||||
|
||||
func diskNameandSANameFromURI(diskURI string) (string, string, error) {
|
||||
uri, err := url.Parse(diskURI)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
hostName := uri.Host
|
||||
storageAccountName := strings.Split(hostName, ".")[0]
|
||||
|
||||
segments := strings.Split(uri.Path, "/")
|
||||
diskNameVhd := segments[len(segments)-1]
|
||||
|
||||
return storageAccountName, diskNameVhd, nil
|
||||
}
|
124
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_cache.go
generated
vendored
Normal file
124
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_cache.go
generated
vendored
Normal file
@@ -0,0 +1,124 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package azure
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"k8s.io/client-go/tools/cache"
|
||||
)
|
||||
|
||||
// getFunc defines a getter function for timedCache.
|
||||
type getFunc func(key string) (interface{}, error)
|
||||
|
||||
// cacheEntry is the internal structure stores inside TTLStore.
|
||||
type cacheEntry struct {
|
||||
key string
|
||||
data interface{}
|
||||
|
||||
// The lock to ensure not updating same entry simultaneously.
|
||||
lock sync.Mutex
|
||||
}
|
||||
|
||||
// cacheKeyFunc defines the key function required in TTLStore.
|
||||
func cacheKeyFunc(obj interface{}) (string, error) {
|
||||
return obj.(*cacheEntry).key, nil
|
||||
}
|
||||
|
||||
// timedCache is a cache with TTL.
|
||||
type timedCache struct {
|
||||
store cache.Store
|
||||
lock sync.Mutex
|
||||
getter getFunc
|
||||
}
|
||||
|
||||
// newTimedcache creates a new timedCache.
|
||||
func newTimedcache(ttl time.Duration, getter getFunc) (*timedCache, error) {
|
||||
if getter == nil {
|
||||
return nil, fmt.Errorf("getter is not provided")
|
||||
}
|
||||
|
||||
return &timedCache{
|
||||
getter: getter,
|
||||
store: cache.NewTTLStore(cacheKeyFunc, ttl),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// getInternal returns cacheEntry by key. If the key is not cached yet,
|
||||
// it returns a cacheEntry with nil data.
|
||||
func (t *timedCache) getInternal(key string) (*cacheEntry, error) {
|
||||
entry, exists, err := t.store.GetByKey(key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if exists {
|
||||
return entry.(*cacheEntry), nil
|
||||
}
|
||||
|
||||
t.lock.Lock()
|
||||
defer t.lock.Unlock()
|
||||
entry, exists, err = t.store.GetByKey(key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if exists {
|
||||
return entry.(*cacheEntry), nil
|
||||
}
|
||||
|
||||
// Still not found, add new entry with nil data.
|
||||
// Note the data will be filled later by getter.
|
||||
newEntry := &cacheEntry{
|
||||
key: key,
|
||||
data: nil,
|
||||
}
|
||||
t.store.Add(newEntry)
|
||||
return newEntry, nil
|
||||
}
|
||||
|
||||
// Get returns the requested item by key.
|
||||
func (t *timedCache) Get(key string) (interface{}, error) {
|
||||
entry, err := t.getInternal(key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Data is still not cached yet, cache it by getter.
|
||||
if entry.data == nil {
|
||||
entry.lock.Lock()
|
||||
defer entry.lock.Unlock()
|
||||
|
||||
if entry.data == nil {
|
||||
data, err := t.getter(key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
entry.data = data
|
||||
}
|
||||
}
|
||||
|
||||
return entry.data, nil
|
||||
}
|
||||
|
||||
// Delete removes an item from the cache.
|
||||
func (t *timedCache) Delete(key string) error {
|
||||
return t.store.Delete(&cacheEntry{
|
||||
key: key,
|
||||
})
|
||||
}
|
160
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_cache_test.go
generated
vendored
Normal file
160
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_cache_test.go
generated
vendored
Normal file
@@ -0,0 +1,160 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package azure
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
var (
|
||||
fakeCacheTTL = 2 * time.Second
|
||||
)
|
||||
|
||||
type fakeDataObj struct{}
|
||||
|
||||
type fakeDataSource struct {
|
||||
called int
|
||||
data map[string]*fakeDataObj
|
||||
lock sync.Mutex
|
||||
}
|
||||
|
||||
func (fake *fakeDataSource) get(key string) (interface{}, error) {
|
||||
fake.lock.Lock()
|
||||
defer fake.lock.Unlock()
|
||||
|
||||
fake.called = fake.called + 1
|
||||
if v, ok := fake.data[key]; ok {
|
||||
return v, nil
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (fake *fakeDataSource) set(data map[string]*fakeDataObj) {
|
||||
fake.lock.Lock()
|
||||
defer fake.lock.Unlock()
|
||||
|
||||
fake.data = data
|
||||
fake.called = 0
|
||||
}
|
||||
|
||||
func newFakeCache(t *testing.T) (*fakeDataSource, *timedCache) {
|
||||
dataSource := &fakeDataSource{
|
||||
data: make(map[string]*fakeDataObj),
|
||||
}
|
||||
getter := dataSource.get
|
||||
cache, err := newTimedcache(fakeCacheTTL, getter)
|
||||
assert.NoError(t, err)
|
||||
return dataSource, cache
|
||||
}
|
||||
|
||||
func TestCacheGet(t *testing.T) {
|
||||
val := &fakeDataObj{}
|
||||
cases := []struct {
|
||||
name string
|
||||
data map[string]*fakeDataObj
|
||||
key string
|
||||
expected interface{}
|
||||
}{
|
||||
{
|
||||
name: "cache should return nil for empty data source",
|
||||
key: "key1",
|
||||
expected: nil,
|
||||
},
|
||||
{
|
||||
name: "cache should return nil for non exist key",
|
||||
data: map[string]*fakeDataObj{"key2": val},
|
||||
key: "key1",
|
||||
expected: nil,
|
||||
},
|
||||
{
|
||||
name: "cache should return data for existing key",
|
||||
data: map[string]*fakeDataObj{"key1": val},
|
||||
key: "key1",
|
||||
expected: val,
|
||||
},
|
||||
}
|
||||
|
||||
for _, c := range cases {
|
||||
dataSource, cache := newFakeCache(t)
|
||||
dataSource.set(c.data)
|
||||
val, err := cache.Get(c.key)
|
||||
assert.NoError(t, err, c.name)
|
||||
assert.Equal(t, c.expected, val, c.name)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCacheGetError(t *testing.T) {
|
||||
getError := fmt.Errorf("getError")
|
||||
getter := func(key string) (interface{}, error) {
|
||||
return nil, getError
|
||||
}
|
||||
cache, err := newTimedcache(fakeCacheTTL, getter)
|
||||
assert.NoError(t, err)
|
||||
|
||||
val, err := cache.Get("key")
|
||||
assert.Error(t, err)
|
||||
assert.Equal(t, getError, err)
|
||||
assert.Nil(t, val)
|
||||
}
|
||||
|
||||
func TestCacheDelete(t *testing.T) {
|
||||
key := "key1"
|
||||
val := &fakeDataObj{}
|
||||
data := map[string]*fakeDataObj{
|
||||
key: val,
|
||||
}
|
||||
dataSource, cache := newFakeCache(t)
|
||||
dataSource.set(data)
|
||||
|
||||
v, err := cache.Get(key)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, val, v, "cache should get correct data")
|
||||
|
||||
dataSource.set(nil)
|
||||
cache.Delete(key)
|
||||
v, err = cache.Get(key)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 1, dataSource.called)
|
||||
assert.Equal(t, nil, v, "cache should get nil after data is removed")
|
||||
}
|
||||
|
||||
func TestCacheExpired(t *testing.T) {
|
||||
key := "key1"
|
||||
val := &fakeDataObj{}
|
||||
data := map[string]*fakeDataObj{
|
||||
key: val,
|
||||
}
|
||||
dataSource, cache := newFakeCache(t)
|
||||
dataSource.set(data)
|
||||
|
||||
v, err := cache.Get(key)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 1, dataSource.called)
|
||||
assert.Equal(t, val, v, "cache should get correct data")
|
||||
|
||||
time.Sleep(fakeCacheTTL)
|
||||
v, err = cache.Get(key)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 2, dataSource.called)
|
||||
assert.Equal(t, val, v, "cache should get correct data even after expired")
|
||||
}
|
1329
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_client.go
generated
vendored
Normal file
1329
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_client.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
188
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_controller_common.go
generated
vendored
Normal file
188
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_controller_common.go
generated
vendored
Normal file
@@ -0,0 +1,188 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package azure
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute"
|
||||
"github.com/golang/glog"
|
||||
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
kwait "k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
)
|
||||
|
||||
const (
|
||||
storageAccountNameTemplate = "pvc%s"
|
||||
|
||||
// for limits check https://docs.microsoft.com/en-us/azure/azure-subscription-service-limits#storage-limits
|
||||
maxStorageAccounts = 100 // max # is 200 (250 with special request). this allows 100 for everything else including stand alone disks
|
||||
maxDisksPerStorageAccounts = 60
|
||||
storageAccountUtilizationBeforeGrowing = 0.5
|
||||
|
||||
maxLUN = 64 // max number of LUNs per VM
|
||||
errLeaseFailed = "AcquireDiskLeaseFailed"
|
||||
errLeaseIDMissing = "LeaseIdMissing"
|
||||
errContainerNotFound = "ContainerNotFound"
|
||||
errDiskBlobNotFound = "DiskBlobNotFound"
|
||||
)
|
||||
|
||||
var defaultBackOff = kwait.Backoff{
|
||||
Steps: 20,
|
||||
Duration: 2 * time.Second,
|
||||
Factor: 1.5,
|
||||
Jitter: 0.0,
|
||||
}
|
||||
|
||||
type controllerCommon struct {
|
||||
subscriptionID string
|
||||
location string
|
||||
storageEndpointSuffix string
|
||||
resourceGroup string
|
||||
cloud *Cloud
|
||||
}
|
||||
|
||||
// getNodeVMSet gets the VMSet interface based on config.VMType and the real virtual machine type.
|
||||
func (c *controllerCommon) getNodeVMSet(nodeName types.NodeName) (VMSet, error) {
|
||||
// 1. vmType is standard, return cloud.vmSet directly.
|
||||
if c.cloud.VMType == vmTypeStandard {
|
||||
return c.cloud.vmSet, nil
|
||||
}
|
||||
|
||||
// 2. vmType is Virtual Machine Scale Set (vmss), convert vmSet to scaleSet.
|
||||
ss, ok := c.cloud.vmSet.(*scaleSet)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("error of converting vmSet (%q) to scaleSet with vmType %q", c.cloud.vmSet, c.cloud.VMType)
|
||||
}
|
||||
|
||||
// 3. If the node is managed by availability set, then return ss.availabilitySet.
|
||||
managedByAS, err := ss.isNodeManagedByAvailabilitySet(mapNodeNameToVMName(nodeName))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if managedByAS {
|
||||
// vm is managed by availability set.
|
||||
return ss.availabilitySet, nil
|
||||
}
|
||||
|
||||
// 4. Node is managed by vmss
|
||||
return ss, nil
|
||||
}
|
||||
|
||||
// AttachDisk attaches a vhd to vm. The vhd must exist, can be identified by diskName, diskURI, and lun.
|
||||
func (c *controllerCommon) AttachDisk(isManagedDisk bool, diskName, diskURI string, nodeName types.NodeName, lun int32, cachingMode compute.CachingTypes) error {
|
||||
vmset, err := c.getNodeVMSet(nodeName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return vmset.AttachDisk(isManagedDisk, diskName, diskURI, nodeName, lun, cachingMode)
|
||||
}
|
||||
|
||||
// DetachDiskByName detaches a vhd from host. The vhd can be identified by diskName or diskURI.
|
||||
func (c *controllerCommon) DetachDiskByName(diskName, diskURI string, nodeName types.NodeName) error {
|
||||
vmset, err := c.getNodeVMSet(nodeName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return vmset.DetachDiskByName(diskName, diskURI, nodeName)
|
||||
}
|
||||
|
||||
// getNodeDataDisks invokes vmSet interfaces to get data disks for the node.
|
||||
func (c *controllerCommon) getNodeDataDisks(nodeName types.NodeName) ([]compute.DataDisk, error) {
|
||||
vmset, err := c.getNodeVMSet(nodeName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return vmset.GetDataDisks(nodeName)
|
||||
}
|
||||
|
||||
// GetDiskLun finds the lun on the host that the vhd is attached to, given a vhd's diskName and diskURI.
|
||||
func (c *controllerCommon) GetDiskLun(diskName, diskURI string, nodeName types.NodeName) (int32, error) {
|
||||
disks, err := c.getNodeDataDisks(nodeName)
|
||||
if err != nil {
|
||||
glog.Errorf("error of getting data disks for node %q: %v", nodeName, err)
|
||||
return -1, err
|
||||
}
|
||||
|
||||
for _, disk := range disks {
|
||||
if disk.Lun != nil && (disk.Name != nil && diskName != "" && *disk.Name == diskName) ||
|
||||
(disk.Vhd != nil && disk.Vhd.URI != nil && diskURI != "" && *disk.Vhd.URI == diskURI) ||
|
||||
(disk.ManagedDisk != nil && *disk.ManagedDisk.ID == diskURI) {
|
||||
// found the disk
|
||||
glog.V(4).Infof("azureDisk - find disk: lun %d name %q uri %q", *disk.Lun, diskName, diskURI)
|
||||
return *disk.Lun, nil
|
||||
}
|
||||
}
|
||||
return -1, fmt.Errorf("Cannot find Lun for disk %s", diskName)
|
||||
}
|
||||
|
||||
// GetNextDiskLun searches all vhd attachment on the host and find unused lun. Return -1 if all luns are used.
|
||||
func (c *controllerCommon) GetNextDiskLun(nodeName types.NodeName) (int32, error) {
|
||||
disks, err := c.getNodeDataDisks(nodeName)
|
||||
if err != nil {
|
||||
glog.Errorf("error of getting data disks for node %q: %v", nodeName, err)
|
||||
return -1, err
|
||||
}
|
||||
|
||||
used := make([]bool, maxLUN)
|
||||
for _, disk := range disks {
|
||||
if disk.Lun != nil {
|
||||
used[*disk.Lun] = true
|
||||
}
|
||||
}
|
||||
for k, v := range used {
|
||||
if !v {
|
||||
return int32(k), nil
|
||||
}
|
||||
}
|
||||
return -1, fmt.Errorf("all luns are used")
|
||||
}
|
||||
|
||||
// DisksAreAttached checks if a list of volumes are attached to the node with the specified NodeName.
|
||||
func (c *controllerCommon) DisksAreAttached(diskNames []string, nodeName types.NodeName) (map[string]bool, error) {
|
||||
attached := make(map[string]bool)
|
||||
for _, diskName := range diskNames {
|
||||
attached[diskName] = false
|
||||
}
|
||||
|
||||
disks, err := c.getNodeDataDisks(nodeName)
|
||||
if err != nil {
|
||||
if err == cloudprovider.InstanceNotFound {
|
||||
// if host doesn't exist, no need to detach
|
||||
glog.Warningf("azureDisk - Cannot find node %q, DisksAreAttached will assume disks %v are not attached to it.",
|
||||
nodeName, diskNames)
|
||||
return attached, nil
|
||||
}
|
||||
|
||||
return attached, err
|
||||
}
|
||||
|
||||
for _, disk := range disks {
|
||||
for _, diskName := range diskNames {
|
||||
if disk.Name != nil && diskName != "" && *disk.Name == diskName {
|
||||
attached[diskName] = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return attached, nil
|
||||
}
|
170
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_controller_standard.go
generated
vendored
Normal file
170
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_controller_standard.go
generated
vendored
Normal file
@@ -0,0 +1,170 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package azure
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute"
|
||||
"github.com/golang/glog"
|
||||
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
)
|
||||
|
||||
// AttachDisk attaches a vhd to vm
|
||||
// the vhd must exist, can be identified by diskName, diskURI, and lun.
|
||||
func (as *availabilitySet) AttachDisk(isManagedDisk bool, diskName, diskURI string, nodeName types.NodeName, lun int32, cachingMode compute.CachingTypes) error {
|
||||
vm, err := as.getVirtualMachine(nodeName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
disks := *vm.StorageProfile.DataDisks
|
||||
if isManagedDisk {
|
||||
disks = append(disks,
|
||||
compute.DataDisk{
|
||||
Name: &diskName,
|
||||
Lun: &lun,
|
||||
Caching: cachingMode,
|
||||
CreateOption: "attach",
|
||||
ManagedDisk: &compute.ManagedDiskParameters{
|
||||
ID: &diskURI,
|
||||
},
|
||||
})
|
||||
} else {
|
||||
disks = append(disks,
|
||||
compute.DataDisk{
|
||||
Name: &diskName,
|
||||
Vhd: &compute.VirtualHardDisk{
|
||||
URI: &diskURI,
|
||||
},
|
||||
Lun: &lun,
|
||||
Caching: cachingMode,
|
||||
CreateOption: "attach",
|
||||
})
|
||||
}
|
||||
|
||||
newVM := compute.VirtualMachine{
|
||||
Location: vm.Location,
|
||||
VirtualMachineProperties: &compute.VirtualMachineProperties{
|
||||
StorageProfile: &compute.StorageProfile{
|
||||
DataDisks: &disks,
|
||||
},
|
||||
},
|
||||
}
|
||||
vmName := mapNodeNameToVMName(nodeName)
|
||||
glog.V(2).Infof("azureDisk - update(%s): vm(%s) - attach disk", as.resourceGroup, vmName)
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
resp, err := as.VirtualMachinesClient.CreateOrUpdate(ctx, as.resourceGroup, vmName, newVM)
|
||||
if as.CloudProviderBackoff && shouldRetryHTTPRequest(resp, err) {
|
||||
glog.V(2).Infof("azureDisk - update(%s) backing off: vm(%s)", as.resourceGroup, vmName)
|
||||
retryErr := as.CreateOrUpdateVMWithRetry(vmName, newVM)
|
||||
if retryErr != nil {
|
||||
err = retryErr
|
||||
glog.V(2).Infof("azureDisk - update(%s) abort backoff: vm(%s)", as.resourceGroup, vmName)
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
glog.Errorf("azureDisk - azure attach failed, err: %v", err)
|
||||
detail := err.Error()
|
||||
if strings.Contains(detail, errLeaseFailed) || strings.Contains(detail, errDiskBlobNotFound) {
|
||||
// if lease cannot be acquired or disk not found, immediately detach the disk and return the original error
|
||||
glog.Infof("azureDisk - err %s, try detach", detail)
|
||||
as.DetachDiskByName(diskName, diskURI, nodeName)
|
||||
}
|
||||
} else {
|
||||
glog.V(4).Info("azureDisk - azure attach succeeded")
|
||||
// Invalidate the cache right after updating
|
||||
as.cloud.vmCache.Delete(vmName)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// DetachDiskByName detaches a vhd from host
|
||||
// the vhd can be identified by diskName or diskURI
|
||||
func (as *availabilitySet) DetachDiskByName(diskName, diskURI string, nodeName types.NodeName) error {
|
||||
vm, err := as.getVirtualMachine(nodeName)
|
||||
if err != nil {
|
||||
// if host doesn't exist, no need to detach
|
||||
glog.Warningf("azureDisk - cannot find node %s, skip detaching disk %s", nodeName, diskName)
|
||||
return nil
|
||||
}
|
||||
|
||||
disks := *vm.StorageProfile.DataDisks
|
||||
bFoundDisk := false
|
||||
for i, disk := range disks {
|
||||
if disk.Lun != nil && (disk.Name != nil && diskName != "" && *disk.Name == diskName) ||
|
||||
(disk.Vhd != nil && disk.Vhd.URI != nil && diskURI != "" && *disk.Vhd.URI == diskURI) ||
|
||||
(disk.ManagedDisk != nil && diskURI != "" && *disk.ManagedDisk.ID == diskURI) {
|
||||
// found the disk
|
||||
glog.V(4).Infof("azureDisk - detach disk: name %q uri %q", diskName, diskURI)
|
||||
disks = append(disks[:i], disks[i+1:]...)
|
||||
bFoundDisk = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !bFoundDisk {
|
||||
return fmt.Errorf("detach azure disk failure, disk %s not found, diskURI: %s", diskName, diskURI)
|
||||
}
|
||||
|
||||
newVM := compute.VirtualMachine{
|
||||
Location: vm.Location,
|
||||
VirtualMachineProperties: &compute.VirtualMachineProperties{
|
||||
StorageProfile: &compute.StorageProfile{
|
||||
DataDisks: &disks,
|
||||
},
|
||||
},
|
||||
}
|
||||
vmName := mapNodeNameToVMName(nodeName)
|
||||
glog.V(2).Infof("azureDisk - update(%s): vm(%s) - detach disk", as.resourceGroup, vmName)
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
resp, err := as.VirtualMachinesClient.CreateOrUpdate(ctx, as.resourceGroup, vmName, newVM)
|
||||
if as.CloudProviderBackoff && shouldRetryHTTPRequest(resp, err) {
|
||||
glog.V(2).Infof("azureDisk - update(%s) backing off: vm(%s)", as.resourceGroup, vmName)
|
||||
retryErr := as.CreateOrUpdateVMWithRetry(vmName, newVM)
|
||||
if retryErr != nil {
|
||||
err = retryErr
|
||||
glog.V(2).Infof("azureDisk - update(%s) abort backoff: vm(%s)", as.ResourceGroup, vmName)
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
glog.Errorf("azureDisk - azure disk detach failed, err: %v", err)
|
||||
} else {
|
||||
glog.V(4).Info("azureDisk - azure disk detach succeeded")
|
||||
// Invalidate the cache right after updating
|
||||
as.cloud.vmCache.Delete(vmName)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// GetDataDisks gets a list of data disks attached to the node.
|
||||
func (as *availabilitySet) GetDataDisks(nodeName types.NodeName) ([]compute.DataDisk, error) {
|
||||
vm, err := as.getVirtualMachine(nodeName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if vm.StorageProfile.DataDisks == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
return *vm.StorageProfile.DataDisks, nil
|
||||
}
|
158
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_controller_vmss.go
generated
vendored
Normal file
158
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_controller_vmss.go
generated
vendored
Normal file
@@ -0,0 +1,158 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package azure
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute"
|
||||
"github.com/golang/glog"
|
||||
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
)
|
||||
|
||||
// AttachDisk attaches a vhd to vm
|
||||
// the vhd must exist, can be identified by diskName, diskURI, and lun.
|
||||
func (ss *scaleSet) AttachDisk(isManagedDisk bool, diskName, diskURI string, nodeName types.NodeName, lun int32, cachingMode compute.CachingTypes) error {
|
||||
ssName, instanceID, vm, err := ss.getVmssVM(string(nodeName))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
disks := []compute.DataDisk{}
|
||||
if vm.StorageProfile != nil && vm.StorageProfile.DataDisks != nil {
|
||||
disks = *vm.StorageProfile.DataDisks
|
||||
}
|
||||
if isManagedDisk {
|
||||
disks = append(disks,
|
||||
compute.DataDisk{
|
||||
Name: &diskName,
|
||||
Lun: &lun,
|
||||
Caching: compute.CachingTypes(cachingMode),
|
||||
CreateOption: "attach",
|
||||
ManagedDisk: &compute.ManagedDiskParameters{
|
||||
ID: &diskURI,
|
||||
},
|
||||
})
|
||||
} else {
|
||||
disks = append(disks,
|
||||
compute.DataDisk{
|
||||
Name: &diskName,
|
||||
Vhd: &compute.VirtualHardDisk{
|
||||
URI: &diskURI,
|
||||
},
|
||||
Lun: &lun,
|
||||
Caching: compute.CachingTypes(cachingMode),
|
||||
CreateOption: "attach",
|
||||
})
|
||||
}
|
||||
vm.StorageProfile.DataDisks = &disks
|
||||
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
glog.V(2).Infof("azureDisk - update(%s): vm(%s) - attach disk", ss.resourceGroup, nodeName)
|
||||
resp, err := ss.VirtualMachineScaleSetVMsClient.Update(ctx, ss.resourceGroup, ssName, instanceID, vm)
|
||||
if ss.CloudProviderBackoff && shouldRetryHTTPRequest(resp, err) {
|
||||
glog.V(2).Infof("azureDisk - update(%s) backing off: vm(%s)", ss.resourceGroup, nodeName)
|
||||
retryErr := ss.UpdateVmssVMWithRetry(ctx, ss.resourceGroup, ssName, instanceID, vm)
|
||||
if retryErr != nil {
|
||||
err = retryErr
|
||||
glog.V(2).Infof("azureDisk - update(%s) abort backoff: vm(%s)", ss.resourceGroup, nodeName)
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
detail := err.Error()
|
||||
if strings.Contains(detail, errLeaseFailed) || strings.Contains(detail, errDiskBlobNotFound) {
|
||||
// if lease cannot be acquired or disk not found, immediately detach the disk and return the original error
|
||||
glog.Infof("azureDisk - err %s, try detach", detail)
|
||||
ss.DetachDiskByName(diskName, diskURI, nodeName)
|
||||
}
|
||||
} else {
|
||||
glog.V(4).Info("azureDisk - azure attach succeeded")
|
||||
// Invalidate the cache right after updating
|
||||
ss.vmssVMCache.Delete(ss.makeVmssVMName(ssName, instanceID))
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// DetachDiskByName detaches a vhd from host
|
||||
// the vhd can be identified by diskName or diskURI
|
||||
func (ss *scaleSet) DetachDiskByName(diskName, diskURI string, nodeName types.NodeName) error {
|
||||
ssName, instanceID, vm, err := ss.getVmssVM(string(nodeName))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
disks := []compute.DataDisk{}
|
||||
if vm.StorageProfile != nil && vm.StorageProfile.DataDisks != nil {
|
||||
disks = *vm.StorageProfile.DataDisks
|
||||
}
|
||||
bFoundDisk := false
|
||||
for i, disk := range disks {
|
||||
if disk.Lun != nil && (disk.Name != nil && diskName != "" && *disk.Name == diskName) ||
|
||||
(disk.Vhd != nil && disk.Vhd.URI != nil && diskURI != "" && *disk.Vhd.URI == diskURI) ||
|
||||
(disk.ManagedDisk != nil && diskURI != "" && *disk.ManagedDisk.ID == diskURI) {
|
||||
// found the disk
|
||||
glog.V(4).Infof("azureDisk - detach disk: name %q uri %q", diskName, diskURI)
|
||||
disks = append(disks[:i], disks[i+1:]...)
|
||||
bFoundDisk = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !bFoundDisk {
|
||||
return fmt.Errorf("detach azure disk failure, disk %s not found, diskURI: %s", diskName, diskURI)
|
||||
}
|
||||
|
||||
vm.StorageProfile.DataDisks = &disks
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
glog.V(2).Infof("azureDisk - update(%s): vm(%s) - detach disk", ss.resourceGroup, nodeName)
|
||||
resp, err := ss.VirtualMachineScaleSetVMsClient.Update(ctx, ss.resourceGroup, ssName, instanceID, vm)
|
||||
if ss.CloudProviderBackoff && shouldRetryHTTPRequest(resp, err) {
|
||||
glog.V(2).Infof("azureDisk - update(%s) backing off: vm(%s)", ss.resourceGroup, nodeName)
|
||||
retryErr := ss.UpdateVmssVMWithRetry(ctx, ss.resourceGroup, ssName, instanceID, vm)
|
||||
if retryErr != nil {
|
||||
err = retryErr
|
||||
glog.V(2).Infof("azureDisk - update(%s) abort backoff: vm(%s)", ss.resourceGroup, nodeName)
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
glog.Errorf("azureDisk - azure disk detach %q from %s failed, err: %v", diskName, nodeName, err)
|
||||
} else {
|
||||
glog.V(4).Info("azureDisk - azure detach succeeded")
|
||||
// Invalidate the cache right after updating
|
||||
ss.vmssVMCache.Delete(ss.makeVmssVMName(ssName, instanceID))
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// GetDataDisks gets a list of data disks attached to the node.
|
||||
func (ss *scaleSet) GetDataDisks(nodeName types.NodeName) ([]compute.DataDisk, error) {
|
||||
_, _, vm, err := ss.getVmssVM(string(nodeName))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if vm.StorageProfile == nil || vm.StorageProfile.DataDisks == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
return *vm.StorageProfile.DataDisks, nil
|
||||
}
|
888
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_fakes.go
generated
vendored
Normal file
888
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_fakes.go
generated
vendored
Normal file
@@ -0,0 +1,888 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package azure
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute"
|
||||
"github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network"
|
||||
"github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2017-10-01/storage"
|
||||
"github.com/Azure/go-autorest/autorest"
|
||||
"github.com/Azure/go-autorest/autorest/to"
|
||||
)
|
||||
|
||||
type fakeAzureLBClient struct {
|
||||
mutex *sync.Mutex
|
||||
FakeStore map[string]map[string]network.LoadBalancer
|
||||
}
|
||||
|
||||
func newFakeAzureLBClient() *fakeAzureLBClient {
|
||||
fLBC := &fakeAzureLBClient{}
|
||||
fLBC.FakeStore = make(map[string]map[string]network.LoadBalancer)
|
||||
fLBC.mutex = &sync.Mutex{}
|
||||
return fLBC
|
||||
}
|
||||
|
||||
func (fLBC *fakeAzureLBClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, loadBalancerName string, parameters network.LoadBalancer) (resp *http.Response, err error) {
|
||||
fLBC.mutex.Lock()
|
||||
defer fLBC.mutex.Unlock()
|
||||
|
||||
if _, ok := fLBC.FakeStore[resourceGroupName]; !ok {
|
||||
fLBC.FakeStore[resourceGroupName] = make(map[string]network.LoadBalancer)
|
||||
}
|
||||
|
||||
// For dynamic ip allocation, just fill in the PrivateIPAddress
|
||||
if parameters.FrontendIPConfigurations != nil {
|
||||
for idx, config := range *parameters.FrontendIPConfigurations {
|
||||
if config.PrivateIPAllocationMethod == network.Dynamic {
|
||||
// Here we randomly assign an ip as private ip
|
||||
// It doesn't smart enough to know whether it is in the subnet's range
|
||||
(*parameters.FrontendIPConfigurations)[idx].PrivateIPAddress = getRandomIPPtr()
|
||||
}
|
||||
}
|
||||
}
|
||||
fLBC.FakeStore[resourceGroupName][loadBalancerName] = parameters
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (fLBC *fakeAzureLBClient) Delete(ctx context.Context, resourceGroupName string, loadBalancerName string) (resp *http.Response, err error) {
|
||||
fLBC.mutex.Lock()
|
||||
defer fLBC.mutex.Unlock()
|
||||
|
||||
if rgLBs, ok := fLBC.FakeStore[resourceGroupName]; ok {
|
||||
if _, ok := rgLBs[loadBalancerName]; ok {
|
||||
delete(rgLBs, loadBalancerName)
|
||||
return nil, nil
|
||||
}
|
||||
}
|
||||
|
||||
return &http.Response{
|
||||
StatusCode: http.StatusNotFound,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (fLBC *fakeAzureLBClient) Get(ctx context.Context, resourceGroupName string, loadBalancerName string, expand string) (result network.LoadBalancer, err error) {
|
||||
fLBC.mutex.Lock()
|
||||
defer fLBC.mutex.Unlock()
|
||||
if _, ok := fLBC.FakeStore[resourceGroupName]; ok {
|
||||
if entity, ok := fLBC.FakeStore[resourceGroupName][loadBalancerName]; ok {
|
||||
return entity, nil
|
||||
}
|
||||
}
|
||||
return result, autorest.DetailedError{
|
||||
StatusCode: http.StatusNotFound,
|
||||
Message: "Not such LB",
|
||||
}
|
||||
}
|
||||
|
||||
func (fLBC *fakeAzureLBClient) List(ctx context.Context, resourceGroupName string) (result []network.LoadBalancer, err error) {
|
||||
fLBC.mutex.Lock()
|
||||
defer fLBC.mutex.Unlock()
|
||||
|
||||
var value []network.LoadBalancer
|
||||
if _, ok := fLBC.FakeStore[resourceGroupName]; ok {
|
||||
for _, v := range fLBC.FakeStore[resourceGroupName] {
|
||||
value = append(value, v)
|
||||
}
|
||||
}
|
||||
return value, nil
|
||||
}
|
||||
|
||||
type fakeAzurePIPClient struct {
|
||||
mutex *sync.Mutex
|
||||
FakeStore map[string]map[string]network.PublicIPAddress
|
||||
SubscriptionID string
|
||||
}
|
||||
|
||||
const publicIPAddressIDTemplate = "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Network/publicIPAddresses/%s"
|
||||
|
||||
// returns the full identifier of a publicIPAddress.
|
||||
func getpublicIPAddressID(subscriptionID string, resourceGroupName, pipName string) string {
|
||||
return fmt.Sprintf(
|
||||
publicIPAddressIDTemplate,
|
||||
subscriptionID,
|
||||
resourceGroupName,
|
||||
pipName)
|
||||
}
|
||||
|
||||
func newFakeAzurePIPClient(subscriptionID string) *fakeAzurePIPClient {
|
||||
fAPC := &fakeAzurePIPClient{}
|
||||
fAPC.FakeStore = make(map[string]map[string]network.PublicIPAddress)
|
||||
fAPC.SubscriptionID = subscriptionID
|
||||
fAPC.mutex = &sync.Mutex{}
|
||||
return fAPC
|
||||
}
|
||||
|
||||
func (fAPC *fakeAzurePIPClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, publicIPAddressName string, parameters network.PublicIPAddress) (resp *http.Response, err error) {
|
||||
fAPC.mutex.Lock()
|
||||
defer fAPC.mutex.Unlock()
|
||||
|
||||
if _, ok := fAPC.FakeStore[resourceGroupName]; !ok {
|
||||
fAPC.FakeStore[resourceGroupName] = make(map[string]network.PublicIPAddress)
|
||||
}
|
||||
|
||||
// assign id
|
||||
pipID := getpublicIPAddressID(fAPC.SubscriptionID, resourceGroupName, publicIPAddressName)
|
||||
parameters.ID = &pipID
|
||||
|
||||
// only create in the case user has not provided
|
||||
if parameters.PublicIPAddressPropertiesFormat != nil &&
|
||||
parameters.PublicIPAddressPropertiesFormat.PublicIPAllocationMethod == network.Static {
|
||||
// assign ip
|
||||
parameters.IPAddress = getRandomIPPtr()
|
||||
}
|
||||
|
||||
fAPC.FakeStore[resourceGroupName][publicIPAddressName] = parameters
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (fAPC *fakeAzurePIPClient) Delete(ctx context.Context, resourceGroupName string, publicIPAddressName string) (resp *http.Response, err error) {
|
||||
fAPC.mutex.Lock()
|
||||
defer fAPC.mutex.Unlock()
|
||||
|
||||
if rgPIPs, ok := fAPC.FakeStore[resourceGroupName]; ok {
|
||||
if _, ok := rgPIPs[publicIPAddressName]; ok {
|
||||
delete(rgPIPs, publicIPAddressName)
|
||||
return nil, nil
|
||||
}
|
||||
}
|
||||
|
||||
return &http.Response{
|
||||
StatusCode: http.StatusNotFound,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (fAPC *fakeAzurePIPClient) Get(ctx context.Context, resourceGroupName string, publicIPAddressName string, expand string) (result network.PublicIPAddress, err error) {
|
||||
fAPC.mutex.Lock()
|
||||
defer fAPC.mutex.Unlock()
|
||||
if _, ok := fAPC.FakeStore[resourceGroupName]; ok {
|
||||
if entity, ok := fAPC.FakeStore[resourceGroupName][publicIPAddressName]; ok {
|
||||
return entity, nil
|
||||
}
|
||||
}
|
||||
return result, autorest.DetailedError{
|
||||
StatusCode: http.StatusNotFound,
|
||||
Message: "Not such PIP",
|
||||
}
|
||||
}
|
||||
|
||||
func (fAPC *fakeAzurePIPClient) List(ctx context.Context, resourceGroupName string) (result []network.PublicIPAddress, err error) {
|
||||
fAPC.mutex.Lock()
|
||||
defer fAPC.mutex.Unlock()
|
||||
|
||||
var value []network.PublicIPAddress
|
||||
if _, ok := fAPC.FakeStore[resourceGroupName]; ok {
|
||||
for _, v := range fAPC.FakeStore[resourceGroupName] {
|
||||
value = append(value, v)
|
||||
}
|
||||
}
|
||||
|
||||
return value, nil
|
||||
}
|
||||
|
||||
type fakeAzureInterfacesClient struct {
|
||||
mutex *sync.Mutex
|
||||
FakeStore map[string]map[string]network.Interface
|
||||
}
|
||||
|
||||
func newFakeAzureInterfacesClient() *fakeAzureInterfacesClient {
|
||||
fIC := &fakeAzureInterfacesClient{}
|
||||
fIC.FakeStore = make(map[string]map[string]network.Interface)
|
||||
fIC.mutex = &sync.Mutex{}
|
||||
|
||||
return fIC
|
||||
}
|
||||
|
||||
func (fIC *fakeAzureInterfacesClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, networkInterfaceName string, parameters network.Interface) (resp *http.Response, err error) {
|
||||
fIC.mutex.Lock()
|
||||
defer fIC.mutex.Unlock()
|
||||
|
||||
if _, ok := fIC.FakeStore[resourceGroupName]; !ok {
|
||||
fIC.FakeStore[resourceGroupName] = make(map[string]network.Interface)
|
||||
}
|
||||
fIC.FakeStore[resourceGroupName][networkInterfaceName] = parameters
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (fIC *fakeAzureInterfacesClient) Get(ctx context.Context, resourceGroupName string, networkInterfaceName string, expand string) (result network.Interface, err error) {
|
||||
fIC.mutex.Lock()
|
||||
defer fIC.mutex.Unlock()
|
||||
if _, ok := fIC.FakeStore[resourceGroupName]; ok {
|
||||
if entity, ok := fIC.FakeStore[resourceGroupName][networkInterfaceName]; ok {
|
||||
return entity, nil
|
||||
}
|
||||
}
|
||||
return result, autorest.DetailedError{
|
||||
StatusCode: http.StatusNotFound,
|
||||
Message: "Not such Interface",
|
||||
}
|
||||
}
|
||||
|
||||
func (fIC *fakeAzureInterfacesClient) GetVirtualMachineScaleSetNetworkInterface(ctx context.Context, resourceGroupName string, virtualMachineScaleSetName string, virtualmachineIndex string, networkInterfaceName string, expand string) (result network.Interface, err error) {
|
||||
return result, nil
|
||||
}
|
||||
|
||||
type fakeAzureVirtualMachinesClient struct {
|
||||
mutex *sync.Mutex
|
||||
FakeStore map[string]map[string]compute.VirtualMachine
|
||||
}
|
||||
|
||||
func newFakeAzureVirtualMachinesClient() *fakeAzureVirtualMachinesClient {
|
||||
fVMC := &fakeAzureVirtualMachinesClient{}
|
||||
fVMC.FakeStore = make(map[string]map[string]compute.VirtualMachine)
|
||||
fVMC.mutex = &sync.Mutex{}
|
||||
return fVMC
|
||||
}
|
||||
|
||||
func (fVMC *fakeAzureVirtualMachinesClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, VMName string, parameters compute.VirtualMachine) (resp *http.Response, err error) {
|
||||
fVMC.mutex.Lock()
|
||||
defer fVMC.mutex.Unlock()
|
||||
|
||||
if _, ok := fVMC.FakeStore[resourceGroupName]; !ok {
|
||||
fVMC.FakeStore[resourceGroupName] = make(map[string]compute.VirtualMachine)
|
||||
}
|
||||
fVMC.FakeStore[resourceGroupName][VMName] = parameters
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (fVMC *fakeAzureVirtualMachinesClient) Get(ctx context.Context, resourceGroupName string, VMName string, expand compute.InstanceViewTypes) (result compute.VirtualMachine, err error) {
|
||||
fVMC.mutex.Lock()
|
||||
defer fVMC.mutex.Unlock()
|
||||
if _, ok := fVMC.FakeStore[resourceGroupName]; ok {
|
||||
if entity, ok := fVMC.FakeStore[resourceGroupName][VMName]; ok {
|
||||
return entity, nil
|
||||
}
|
||||
}
|
||||
return result, autorest.DetailedError{
|
||||
StatusCode: http.StatusNotFound,
|
||||
Message: "Not such VM",
|
||||
}
|
||||
}
|
||||
|
||||
func (fVMC *fakeAzureVirtualMachinesClient) List(ctx context.Context, resourceGroupName string) (result []compute.VirtualMachine, err error) {
|
||||
fVMC.mutex.Lock()
|
||||
defer fVMC.mutex.Unlock()
|
||||
|
||||
result = []compute.VirtualMachine{}
|
||||
if _, ok := fVMC.FakeStore[resourceGroupName]; ok {
|
||||
for _, v := range fVMC.FakeStore[resourceGroupName] {
|
||||
result = append(result, v)
|
||||
}
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
type fakeAzureSubnetsClient struct {
|
||||
mutex *sync.Mutex
|
||||
FakeStore map[string]map[string]network.Subnet
|
||||
}
|
||||
|
||||
func newFakeAzureSubnetsClient() *fakeAzureSubnetsClient {
|
||||
fASC := &fakeAzureSubnetsClient{}
|
||||
fASC.FakeStore = make(map[string]map[string]network.Subnet)
|
||||
fASC.mutex = &sync.Mutex{}
|
||||
return fASC
|
||||
}
|
||||
|
||||
func (fASC *fakeAzureSubnetsClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, virtualNetworkName string, subnetName string, subnetParameters network.Subnet) (resp *http.Response, err error) {
|
||||
fASC.mutex.Lock()
|
||||
defer fASC.mutex.Unlock()
|
||||
|
||||
rgVnet := strings.Join([]string{resourceGroupName, virtualNetworkName}, "AND")
|
||||
if _, ok := fASC.FakeStore[rgVnet]; !ok {
|
||||
fASC.FakeStore[rgVnet] = make(map[string]network.Subnet)
|
||||
}
|
||||
fASC.FakeStore[rgVnet][subnetName] = subnetParameters
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (fASC *fakeAzureSubnetsClient) Delete(ctx context.Context, resourceGroupName string, virtualNetworkName string, subnetName string) (resp *http.Response, err error) {
|
||||
fASC.mutex.Lock()
|
||||
defer fASC.mutex.Unlock()
|
||||
|
||||
rgVnet := strings.Join([]string{resourceGroupName, virtualNetworkName}, "AND")
|
||||
if rgSubnets, ok := fASC.FakeStore[rgVnet]; ok {
|
||||
if _, ok := rgSubnets[subnetName]; ok {
|
||||
delete(rgSubnets, subnetName)
|
||||
return nil, nil
|
||||
}
|
||||
}
|
||||
|
||||
return &http.Response{
|
||||
StatusCode: http.StatusNotFound,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (fASC *fakeAzureSubnetsClient) Get(ctx context.Context, resourceGroupName string, virtualNetworkName string, subnetName string, expand string) (result network.Subnet, err error) {
|
||||
fASC.mutex.Lock()
|
||||
defer fASC.mutex.Unlock()
|
||||
rgVnet := strings.Join([]string{resourceGroupName, virtualNetworkName}, "AND")
|
||||
if _, ok := fASC.FakeStore[rgVnet]; ok {
|
||||
if entity, ok := fASC.FakeStore[rgVnet][subnetName]; ok {
|
||||
return entity, nil
|
||||
}
|
||||
}
|
||||
return result, autorest.DetailedError{
|
||||
StatusCode: http.StatusNotFound,
|
||||
Message: "Not such Subnet",
|
||||
}
|
||||
}
|
||||
|
||||
func (fASC *fakeAzureSubnetsClient) List(ctx context.Context, resourceGroupName string, virtualNetworkName string) (result []network.Subnet, err error) {
|
||||
fASC.mutex.Lock()
|
||||
defer fASC.mutex.Unlock()
|
||||
|
||||
rgVnet := strings.Join([]string{resourceGroupName, virtualNetworkName}, "AND")
|
||||
var value []network.Subnet
|
||||
if _, ok := fASC.FakeStore[rgVnet]; ok {
|
||||
for _, v := range fASC.FakeStore[rgVnet] {
|
||||
value = append(value, v)
|
||||
}
|
||||
}
|
||||
|
||||
return value, nil
|
||||
}
|
||||
|
||||
type fakeAzureNSGClient struct {
|
||||
mutex *sync.Mutex
|
||||
FakeStore map[string]map[string]network.SecurityGroup
|
||||
}
|
||||
|
||||
func newFakeAzureNSGClient() *fakeAzureNSGClient {
|
||||
fNSG := &fakeAzureNSGClient{}
|
||||
fNSG.FakeStore = make(map[string]map[string]network.SecurityGroup)
|
||||
fNSG.mutex = &sync.Mutex{}
|
||||
return fNSG
|
||||
}
|
||||
|
||||
func (fNSG *fakeAzureNSGClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, networkSecurityGroupName string, parameters network.SecurityGroup) (resp *http.Response, err error) {
|
||||
fNSG.mutex.Lock()
|
||||
defer fNSG.mutex.Unlock()
|
||||
|
||||
if _, ok := fNSG.FakeStore[resourceGroupName]; !ok {
|
||||
fNSG.FakeStore[resourceGroupName] = make(map[string]network.SecurityGroup)
|
||||
}
|
||||
fNSG.FakeStore[resourceGroupName][networkSecurityGroupName] = parameters
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (fNSG *fakeAzureNSGClient) Delete(ctx context.Context, resourceGroupName string, networkSecurityGroupName string) (resp *http.Response, err error) {
|
||||
fNSG.mutex.Lock()
|
||||
defer fNSG.mutex.Unlock()
|
||||
|
||||
if rgSGs, ok := fNSG.FakeStore[resourceGroupName]; ok {
|
||||
if _, ok := rgSGs[networkSecurityGroupName]; ok {
|
||||
delete(rgSGs, networkSecurityGroupName)
|
||||
return nil, nil
|
||||
}
|
||||
}
|
||||
|
||||
return &http.Response{
|
||||
StatusCode: http.StatusNotFound,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (fNSG *fakeAzureNSGClient) Get(ctx context.Context, resourceGroupName string, networkSecurityGroupName string, expand string) (result network.SecurityGroup, err error) {
|
||||
fNSG.mutex.Lock()
|
||||
defer fNSG.mutex.Unlock()
|
||||
if _, ok := fNSG.FakeStore[resourceGroupName]; ok {
|
||||
if entity, ok := fNSG.FakeStore[resourceGroupName][networkSecurityGroupName]; ok {
|
||||
return entity, nil
|
||||
}
|
||||
}
|
||||
return result, autorest.DetailedError{
|
||||
StatusCode: http.StatusNotFound,
|
||||
Message: "Not such NSG",
|
||||
}
|
||||
}
|
||||
|
||||
func (fNSG *fakeAzureNSGClient) List(ctx context.Context, resourceGroupName string) (result []network.SecurityGroup, err error) {
|
||||
fNSG.mutex.Lock()
|
||||
defer fNSG.mutex.Unlock()
|
||||
|
||||
var value []network.SecurityGroup
|
||||
if _, ok := fNSG.FakeStore[resourceGroupName]; ok {
|
||||
for _, v := range fNSG.FakeStore[resourceGroupName] {
|
||||
value = append(value, v)
|
||||
}
|
||||
}
|
||||
|
||||
return value, nil
|
||||
}
|
||||
|
||||
func getRandomIPPtr() *string {
|
||||
rand.Seed(time.Now().UnixNano())
|
||||
return to.StringPtr(fmt.Sprintf("%d.%d.%d.%d", rand.Intn(256), rand.Intn(256), rand.Intn(256), rand.Intn(256)))
|
||||
}
|
||||
|
||||
type fakeVirtualMachineScaleSetVMsClient struct {
|
||||
mutex *sync.Mutex
|
||||
FakeStore map[string]map[string]compute.VirtualMachineScaleSetVM
|
||||
}
|
||||
|
||||
func newFakeVirtualMachineScaleSetVMsClient() *fakeVirtualMachineScaleSetVMsClient {
|
||||
fVMC := &fakeVirtualMachineScaleSetVMsClient{}
|
||||
fVMC.FakeStore = make(map[string]map[string]compute.VirtualMachineScaleSetVM)
|
||||
fVMC.mutex = &sync.Mutex{}
|
||||
|
||||
return fVMC
|
||||
}
|
||||
|
||||
func (fVMC *fakeVirtualMachineScaleSetVMsClient) setFakeStore(store map[string]map[string]compute.VirtualMachineScaleSetVM) {
|
||||
fVMC.mutex.Lock()
|
||||
defer fVMC.mutex.Unlock()
|
||||
|
||||
fVMC.FakeStore = store
|
||||
}
|
||||
|
||||
func (fVMC *fakeVirtualMachineScaleSetVMsClient) List(ctx context.Context, resourceGroupName string, virtualMachineScaleSetName string, filter string, selectParameter string, expand string) (result []compute.VirtualMachineScaleSetVM, err error) {
|
||||
fVMC.mutex.Lock()
|
||||
defer fVMC.mutex.Unlock()
|
||||
|
||||
result = []compute.VirtualMachineScaleSetVM{}
|
||||
if _, ok := fVMC.FakeStore[resourceGroupName]; ok {
|
||||
for _, v := range fVMC.FakeStore[resourceGroupName] {
|
||||
result = append(result, v)
|
||||
}
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (fVMC *fakeVirtualMachineScaleSetVMsClient) Get(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string) (result compute.VirtualMachineScaleSetVM, err error) {
|
||||
fVMC.mutex.Lock()
|
||||
defer fVMC.mutex.Unlock()
|
||||
|
||||
vmKey := fmt.Sprintf("%s_%s", VMScaleSetName, instanceID)
|
||||
if scaleSetMap, ok := fVMC.FakeStore[resourceGroupName]; ok {
|
||||
if entity, ok := scaleSetMap[vmKey]; ok {
|
||||
return entity, nil
|
||||
}
|
||||
}
|
||||
|
||||
return result, autorest.DetailedError{
|
||||
StatusCode: http.StatusNotFound,
|
||||
Message: "No such VirtualMachineScaleSetVM",
|
||||
}
|
||||
}
|
||||
|
||||
func (fVMC *fakeVirtualMachineScaleSetVMsClient) GetInstanceView(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string) (result compute.VirtualMachineScaleSetVMInstanceView, err error) {
|
||||
_, err = fVMC.Get(ctx, resourceGroupName, VMScaleSetName, instanceID)
|
||||
if err != nil {
|
||||
return result, err
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (fVMC *fakeVirtualMachineScaleSetVMsClient) Update(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string, parameters compute.VirtualMachineScaleSetVM) (resp *http.Response, err error) {
|
||||
fVMC.mutex.Lock()
|
||||
defer fVMC.mutex.Unlock()
|
||||
|
||||
vmKey := fmt.Sprintf("%s_%s", VMScaleSetName, instanceID)
|
||||
if scaleSetMap, ok := fVMC.FakeStore[resourceGroupName]; ok {
|
||||
if _, ok := scaleSetMap[vmKey]; ok {
|
||||
scaleSetMap[vmKey] = parameters
|
||||
}
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
type fakeVirtualMachineScaleSetsClient struct {
|
||||
mutex *sync.Mutex
|
||||
FakeStore map[string]map[string]compute.VirtualMachineScaleSet
|
||||
}
|
||||
|
||||
func newFakeVirtualMachineScaleSetsClient() *fakeVirtualMachineScaleSetsClient {
|
||||
fVMSSC := &fakeVirtualMachineScaleSetsClient{}
|
||||
fVMSSC.FakeStore = make(map[string]map[string]compute.VirtualMachineScaleSet)
|
||||
fVMSSC.mutex = &sync.Mutex{}
|
||||
|
||||
return fVMSSC
|
||||
}
|
||||
|
||||
func (fVMSSC *fakeVirtualMachineScaleSetsClient) setFakeStore(store map[string]map[string]compute.VirtualMachineScaleSet) {
|
||||
fVMSSC.mutex.Lock()
|
||||
defer fVMSSC.mutex.Unlock()
|
||||
|
||||
fVMSSC.FakeStore = store
|
||||
}
|
||||
|
||||
func (fVMSSC *fakeVirtualMachineScaleSetsClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, VMScaleSetName string, parameters compute.VirtualMachineScaleSet) (resp *http.Response, err error) {
|
||||
fVMSSC.mutex.Lock()
|
||||
defer fVMSSC.mutex.Unlock()
|
||||
|
||||
if _, ok := fVMSSC.FakeStore[resourceGroupName]; !ok {
|
||||
fVMSSC.FakeStore[resourceGroupName] = make(map[string]compute.VirtualMachineScaleSet)
|
||||
}
|
||||
fVMSSC.FakeStore[resourceGroupName][VMScaleSetName] = parameters
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (fVMSSC *fakeVirtualMachineScaleSetsClient) Get(ctx context.Context, resourceGroupName string, VMScaleSetName string) (result compute.VirtualMachineScaleSet, err error) {
|
||||
fVMSSC.mutex.Lock()
|
||||
defer fVMSSC.mutex.Unlock()
|
||||
|
||||
if scaleSetMap, ok := fVMSSC.FakeStore[resourceGroupName]; ok {
|
||||
if entity, ok := scaleSetMap[VMScaleSetName]; ok {
|
||||
return entity, nil
|
||||
}
|
||||
}
|
||||
|
||||
return result, autorest.DetailedError{
|
||||
StatusCode: http.StatusNotFound,
|
||||
Message: "No such ScaleSet",
|
||||
}
|
||||
}
|
||||
|
||||
func (fVMSSC *fakeVirtualMachineScaleSetsClient) List(ctx context.Context, resourceGroupName string) (result []compute.VirtualMachineScaleSet, err error) {
|
||||
fVMSSC.mutex.Lock()
|
||||
defer fVMSSC.mutex.Unlock()
|
||||
|
||||
result = []compute.VirtualMachineScaleSet{}
|
||||
if _, ok := fVMSSC.FakeStore[resourceGroupName]; ok {
|
||||
for _, v := range fVMSSC.FakeStore[resourceGroupName] {
|
||||
result = append(result, v)
|
||||
}
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (fVMSSC *fakeVirtualMachineScaleSetsClient) UpdateInstances(ctx context.Context, resourceGroupName string, VMScaleSetName string, VMInstanceIDs compute.VirtualMachineScaleSetVMInstanceRequiredIDs) (resp *http.Response, err error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
type fakeRoutesClient struct {
|
||||
mutex *sync.Mutex
|
||||
FakeStore map[string]map[string]network.Route
|
||||
}
|
||||
|
||||
func newFakeRoutesClient() *fakeRoutesClient {
|
||||
fRC := &fakeRoutesClient{}
|
||||
fRC.FakeStore = make(map[string]map[string]network.Route)
|
||||
fRC.mutex = &sync.Mutex{}
|
||||
return fRC
|
||||
}
|
||||
|
||||
func (fRC *fakeRoutesClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, routeTableName string, routeName string, routeParameters network.Route) (resp *http.Response, err error) {
|
||||
fRC.mutex.Lock()
|
||||
defer fRC.mutex.Unlock()
|
||||
|
||||
if _, ok := fRC.FakeStore[routeTableName]; !ok {
|
||||
fRC.FakeStore[routeTableName] = make(map[string]network.Route)
|
||||
}
|
||||
fRC.FakeStore[routeTableName][routeName] = routeParameters
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (fRC *fakeRoutesClient) Delete(ctx context.Context, resourceGroupName string, routeTableName string, routeName string) (resp *http.Response, err error) {
|
||||
fRC.mutex.Lock()
|
||||
defer fRC.mutex.Unlock()
|
||||
|
||||
if routes, ok := fRC.FakeStore[routeTableName]; ok {
|
||||
if _, ok := routes[routeName]; ok {
|
||||
delete(routes, routeName)
|
||||
return nil, nil
|
||||
}
|
||||
}
|
||||
|
||||
return &http.Response{
|
||||
StatusCode: http.StatusNotFound,
|
||||
}, nil
|
||||
}
|
||||
|
||||
type fakeRouteTablesClient struct {
|
||||
mutex *sync.Mutex
|
||||
FakeStore map[string]map[string]network.RouteTable
|
||||
Calls []string
|
||||
}
|
||||
|
||||
func newFakeRouteTablesClient() *fakeRouteTablesClient {
|
||||
fRTC := &fakeRouteTablesClient{}
|
||||
fRTC.FakeStore = make(map[string]map[string]network.RouteTable)
|
||||
fRTC.mutex = &sync.Mutex{}
|
||||
return fRTC
|
||||
}
|
||||
|
||||
func (fRTC *fakeRouteTablesClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, routeTableName string, parameters network.RouteTable) (resp *http.Response, err error) {
|
||||
fRTC.mutex.Lock()
|
||||
defer fRTC.mutex.Unlock()
|
||||
|
||||
fRTC.Calls = append(fRTC.Calls, "CreateOrUpdate")
|
||||
|
||||
if _, ok := fRTC.FakeStore[resourceGroupName]; !ok {
|
||||
fRTC.FakeStore[resourceGroupName] = make(map[string]network.RouteTable)
|
||||
}
|
||||
fRTC.FakeStore[resourceGroupName][routeTableName] = parameters
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (fRTC *fakeRouteTablesClient) Get(ctx context.Context, resourceGroupName string, routeTableName string, expand string) (result network.RouteTable, err error) {
|
||||
fRTC.mutex.Lock()
|
||||
defer fRTC.mutex.Unlock()
|
||||
|
||||
fRTC.Calls = append(fRTC.Calls, "Get")
|
||||
|
||||
if _, ok := fRTC.FakeStore[resourceGroupName]; ok {
|
||||
if entity, ok := fRTC.FakeStore[resourceGroupName][routeTableName]; ok {
|
||||
return entity, nil
|
||||
}
|
||||
}
|
||||
return result, autorest.DetailedError{
|
||||
StatusCode: http.StatusNotFound,
|
||||
Message: "Not such RouteTable",
|
||||
}
|
||||
}
|
||||
|
||||
type fakeFileClient struct {
|
||||
}
|
||||
|
||||
func (fFC *fakeFileClient) createFileShare(accountName, accountKey, name string, sizeGiB int) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (fFC *fakeFileClient) deleteFileShare(accountName, accountKey, name string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (fFC *fakeFileClient) resizeFileShare(accountName, accountKey, name string, sizeGiB int) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
type fakeStorageAccountClient struct {
|
||||
mutex *sync.Mutex
|
||||
FakeStore map[string]map[string]storage.Account
|
||||
Keys storage.AccountListKeysResult
|
||||
Accounts storage.AccountListResult
|
||||
Err error
|
||||
}
|
||||
|
||||
func newFakeStorageAccountClient() *fakeStorageAccountClient {
|
||||
fSAC := &fakeStorageAccountClient{}
|
||||
fSAC.FakeStore = make(map[string]map[string]storage.Account)
|
||||
fSAC.mutex = &sync.Mutex{}
|
||||
return fSAC
|
||||
}
|
||||
|
||||
func (fSAC *fakeStorageAccountClient) Create(ctx context.Context, resourceGroupName string, accountName string, parameters storage.AccountCreateParameters) (resp *http.Response, err error) {
|
||||
fSAC.mutex.Lock()
|
||||
defer fSAC.mutex.Unlock()
|
||||
|
||||
if _, ok := fSAC.FakeStore[resourceGroupName]; !ok {
|
||||
fSAC.FakeStore[resourceGroupName] = make(map[string]storage.Account)
|
||||
}
|
||||
fSAC.FakeStore[resourceGroupName][accountName] = storage.Account{
|
||||
Name: &accountName,
|
||||
Sku: parameters.Sku,
|
||||
Kind: parameters.Kind,
|
||||
Location: parameters.Location,
|
||||
Identity: parameters.Identity,
|
||||
Tags: parameters.Tags,
|
||||
AccountProperties: &storage.AccountProperties{},
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (fSAC *fakeStorageAccountClient) Delete(ctx context.Context, resourceGroupName string, accountName string) (result autorest.Response, err error) {
|
||||
fSAC.mutex.Lock()
|
||||
defer fSAC.mutex.Unlock()
|
||||
|
||||
if rgAccounts, ok := fSAC.FakeStore[resourceGroupName]; ok {
|
||||
if _, ok := rgAccounts[accountName]; ok {
|
||||
delete(rgAccounts, accountName)
|
||||
result.Response = &http.Response{
|
||||
StatusCode: http.StatusAccepted,
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
}
|
||||
|
||||
result.Response = &http.Response{
|
||||
StatusCode: http.StatusNotFound,
|
||||
}
|
||||
err = autorest.DetailedError{
|
||||
StatusCode: http.StatusNotFound,
|
||||
Message: "Not such StorageAccount",
|
||||
}
|
||||
return result, err
|
||||
}
|
||||
|
||||
func (fSAC *fakeStorageAccountClient) ListKeys(ctx context.Context, resourceGroupName string, accountName string) (result storage.AccountListKeysResult, err error) {
|
||||
return fSAC.Keys, fSAC.Err
|
||||
}
|
||||
|
||||
func (fSAC *fakeStorageAccountClient) ListByResourceGroup(ctx context.Context, resourceGroupName string) (result storage.AccountListResult, err error) {
|
||||
return fSAC.Accounts, fSAC.Err
|
||||
}
|
||||
|
||||
func (fSAC *fakeStorageAccountClient) GetProperties(ctx context.Context, resourceGroupName string, accountName string) (result storage.Account, err error) {
|
||||
fSAC.mutex.Lock()
|
||||
defer fSAC.mutex.Unlock()
|
||||
|
||||
if _, ok := fSAC.FakeStore[resourceGroupName]; ok {
|
||||
if entity, ok := fSAC.FakeStore[resourceGroupName][accountName]; ok {
|
||||
return entity, nil
|
||||
}
|
||||
}
|
||||
|
||||
return result, autorest.DetailedError{
|
||||
StatusCode: http.StatusNotFound,
|
||||
Message: "Not such StorageAccount",
|
||||
}
|
||||
}
|
||||
|
||||
type fakeDisksClient struct {
|
||||
mutex *sync.Mutex
|
||||
FakeStore map[string]map[string]compute.Disk
|
||||
}
|
||||
|
||||
func newFakeDisksClient() *fakeDisksClient {
|
||||
fDC := &fakeDisksClient{}
|
||||
fDC.FakeStore = make(map[string]map[string]compute.Disk)
|
||||
fDC.mutex = &sync.Mutex{}
|
||||
return fDC
|
||||
}
|
||||
|
||||
func (fDC *fakeDisksClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, diskName string, diskParameter compute.Disk) (resp *http.Response, err error) {
|
||||
fDC.mutex.Lock()
|
||||
defer fDC.mutex.Unlock()
|
||||
|
||||
if _, ok := fDC.FakeStore[resourceGroupName]; !ok {
|
||||
fDC.FakeStore[resourceGroupName] = make(map[string]compute.Disk)
|
||||
}
|
||||
fDC.FakeStore[resourceGroupName][diskName] = diskParameter
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (fDC *fakeDisksClient) Delete(ctx context.Context, resourceGroupName string, diskName string) (resp *http.Response, err error) {
|
||||
fDC.mutex.Lock()
|
||||
defer fDC.mutex.Unlock()
|
||||
|
||||
if rgDisks, ok := fDC.FakeStore[resourceGroupName]; ok {
|
||||
if _, ok := rgDisks[diskName]; ok {
|
||||
delete(rgDisks, diskName)
|
||||
return nil, nil
|
||||
}
|
||||
}
|
||||
|
||||
return &http.Response{
|
||||
StatusCode: http.StatusAccepted,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (fDC *fakeDisksClient) Get(ctx context.Context, resourceGroupName string, diskName string) (result compute.Disk, err error) {
|
||||
fDC.mutex.Lock()
|
||||
defer fDC.mutex.Unlock()
|
||||
|
||||
if _, ok := fDC.FakeStore[resourceGroupName]; ok {
|
||||
if entity, ok := fDC.FakeStore[resourceGroupName][diskName]; ok {
|
||||
return entity, nil
|
||||
}
|
||||
}
|
||||
|
||||
return result, autorest.DetailedError{
|
||||
StatusCode: http.StatusNotFound,
|
||||
Message: "Not such Disk",
|
||||
}
|
||||
}
|
||||
|
||||
type fakeVMSet struct {
|
||||
NodeToIP map[string]string
|
||||
Err error
|
||||
}
|
||||
|
||||
func (f *fakeVMSet) GetInstanceIDByNodeName(name string) (string, error) {
|
||||
return "", fmt.Errorf("unimplemented")
|
||||
}
|
||||
|
||||
func (f *fakeVMSet) GetInstanceTypeByNodeName(name string) (string, error) {
|
||||
return "", fmt.Errorf("unimplemented")
|
||||
}
|
||||
|
||||
func (f *fakeVMSet) GetIPByNodeName(name string) (string, string, error) {
|
||||
ip, found := f.NodeToIP[name]
|
||||
if !found {
|
||||
return "", "", fmt.Errorf("not found")
|
||||
}
|
||||
|
||||
return ip, "", nil
|
||||
}
|
||||
|
||||
func (f *fakeVMSet) GetPrimaryInterface(nodeName string) (network.Interface, error) {
|
||||
return network.Interface{}, fmt.Errorf("unimplemented")
|
||||
}
|
||||
|
||||
func (f *fakeVMSet) GetNodeNameByProviderID(providerID string) (types.NodeName, error) {
|
||||
return types.NodeName(""), fmt.Errorf("unimplemented")
|
||||
}
|
||||
|
||||
func (f *fakeVMSet) GetZoneByNodeName(name string) (cloudprovider.Zone, error) {
|
||||
return cloudprovider.Zone{}, fmt.Errorf("unimplemented")
|
||||
}
|
||||
|
||||
func (f *fakeVMSet) GetPrimaryVMSetName() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (f *fakeVMSet) GetVMSetNames(service *v1.Service, nodes []*v1.Node) (availabilitySetNames *[]string, err error) {
|
||||
return nil, fmt.Errorf("unimplemented")
|
||||
}
|
||||
|
||||
func (f *fakeVMSet) EnsureHostsInPool(serviceName string, nodes []*v1.Node, backendPoolID string, vmSetName string, isInternal bool) error {
|
||||
return fmt.Errorf("unimplemented")
|
||||
}
|
||||
|
||||
func (f *fakeVMSet) EnsureBackendPoolDeleted(poolID, vmSetName string, backendAddressPools *[]network.BackendAddressPool) error {
|
||||
return fmt.Errorf("unimplemented")
|
||||
}
|
||||
|
||||
func (f *fakeVMSet) AttachDisk(isManagedDisk bool, diskName, diskURI string, nodeName types.NodeName, lun int32, cachingMode compute.CachingTypes) error {
|
||||
return fmt.Errorf("unimplemented")
|
||||
}
|
||||
|
||||
func (f *fakeVMSet) DetachDiskByName(diskName, diskURI string, nodeName types.NodeName) error {
|
||||
return fmt.Errorf("unimplemented")
|
||||
}
|
||||
|
||||
func (f *fakeVMSet) GetDataDisks(nodeName types.NodeName) ([]compute.DataDisk, error) {
|
||||
return nil, fmt.Errorf("unimplemented")
|
||||
}
|
115
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_file.go
generated
vendored
Normal file
115
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_file.go
generated
vendored
Normal file
@@ -0,0 +1,115 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package azure
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
azs "github.com/Azure/azure-sdk-for-go/storage"
|
||||
"github.com/Azure/go-autorest/autorest/azure"
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
const (
|
||||
useHTTPS = true
|
||||
)
|
||||
|
||||
// FileClient is the interface for creating file shares, interface for test
|
||||
// injection.
|
||||
type FileClient interface {
|
||||
createFileShare(accountName, accountKey, name string, sizeGiB int) error
|
||||
deleteFileShare(accountName, accountKey, name string) error
|
||||
resizeFileShare(accountName, accountKey, name string, sizeGiB int) error
|
||||
}
|
||||
|
||||
// create file share
|
||||
func (az *Cloud) createFileShare(accountName, accountKey, name string, sizeGiB int) error {
|
||||
return az.FileClient.createFileShare(accountName, accountKey, name, sizeGiB)
|
||||
}
|
||||
|
||||
func (az *Cloud) deleteFileShare(accountName, accountKey, name string) error {
|
||||
return az.FileClient.deleteFileShare(accountName, accountKey, name)
|
||||
}
|
||||
|
||||
func (az *Cloud) resizeFileShare(accountName, accountKey, name string, sizeGiB int) error {
|
||||
return az.FileClient.resizeFileShare(accountName, accountKey, name, sizeGiB)
|
||||
}
|
||||
|
||||
type azureFileClient struct {
|
||||
env azure.Environment
|
||||
}
|
||||
|
||||
func (f *azureFileClient) createFileShare(accountName, accountKey, name string, sizeGiB int) error {
|
||||
fileClient, err := f.getFileSvcClient(accountName, accountKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// create a file share and set quota
|
||||
// Note. Per https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Create-Share,
|
||||
// setting x-ms-share-quota can set quota on the new share, but in reality, setting quota in CreateShare
|
||||
// receives error "The metadata specified is invalid. It has characters that are not permitted."
|
||||
// As a result,breaking into two API calls: create share and set quota
|
||||
share := fileClient.GetShareReference(name)
|
||||
if err = share.Create(nil); err != nil {
|
||||
return fmt.Errorf("failed to create file share, err: %v", err)
|
||||
}
|
||||
share.Properties.Quota = sizeGiB
|
||||
if err = share.SetProperties(nil); err != nil {
|
||||
if err := share.Delete(nil); err != nil {
|
||||
glog.Errorf("Error deleting share: %v", err)
|
||||
}
|
||||
return fmt.Errorf("failed to set quota on file share %s, err: %v", name, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// delete a file share
|
||||
func (f *azureFileClient) deleteFileShare(accountName, accountKey, name string) error {
|
||||
fileClient, err := f.getFileSvcClient(accountName, accountKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return fileClient.GetShareReference(name).Delete(nil)
|
||||
}
|
||||
|
||||
func (f *azureFileClient) resizeFileShare(accountName, accountKey, name string, sizeGiB int) error {
|
||||
fileClient, err := f.getFileSvcClient(accountName, accountKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
share := fileClient.GetShareReference(name)
|
||||
if share.Properties.Quota >= sizeGiB {
|
||||
glog.Warningf("file share size(%dGi) is already greater or equal than requested size(%dGi), accountName: %s, shareName: %s",
|
||||
share.Properties.Quota, sizeGiB, accountName, name)
|
||||
return nil
|
||||
}
|
||||
share.Properties.Quota = sizeGiB
|
||||
if err = share.SetProperties(nil); err != nil {
|
||||
return fmt.Errorf("failed to set quota on file share %s, err: %v", name, err)
|
||||
}
|
||||
glog.V(4).Infof("resize file share completed, accountName: %s, shareName: %s, sizeGiB: %d", accountName, name, sizeGiB)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *azureFileClient) getFileSvcClient(accountName, accountKey string) (*azs.FileServiceClient, error) {
|
||||
fileClient, err := azs.NewClient(accountName, accountKey, f.env.StorageEndpointSuffix, azs.DefaultAPIVersion, useHTTPS)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error creating azure client: %v", err)
|
||||
}
|
||||
fc := fileClient.GetFileService()
|
||||
return &fc, nil
|
||||
}
|
113
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_instance_metadata.go
generated
vendored
Normal file
113
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_instance_metadata.go
generated
vendored
Normal file
@@ -0,0 +1,113 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package azure
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
const metadataURL = "http://169.254.169.254/metadata/"
|
||||
|
||||
// NetworkMetadata contains metadata about an instance's network
|
||||
type NetworkMetadata struct {
|
||||
Interface []NetworkInterface `json:"interface"`
|
||||
}
|
||||
|
||||
// NetworkInterface represents an instances network interface.
|
||||
type NetworkInterface struct {
|
||||
IPV4 NetworkData `json:"ipv4"`
|
||||
IPV6 NetworkData `json:"ipv6"`
|
||||
MAC string `json:"macAddress"`
|
||||
}
|
||||
|
||||
// NetworkData contains IP information for a network.
|
||||
type NetworkData struct {
|
||||
IPAddress []IPAddress `json:"ipAddress"`
|
||||
Subnet []Subnet `json:"subnet"`
|
||||
}
|
||||
|
||||
// IPAddress represents IP address information.
|
||||
type IPAddress struct {
|
||||
PrivateIP string `json:"privateIPAddress"`
|
||||
PublicIP string `json:"publicIPAddress"`
|
||||
}
|
||||
|
||||
// Subnet represents subnet information.
|
||||
type Subnet struct {
|
||||
Address string `json:"address"`
|
||||
Prefix string `json:"prefix"`
|
||||
}
|
||||
|
||||
// InstanceMetadata knows how to query the Azure instance metadata server.
|
||||
type InstanceMetadata struct {
|
||||
baseURL string
|
||||
}
|
||||
|
||||
// NewInstanceMetadata creates an instance of the InstanceMetadata accessor object.
|
||||
func NewInstanceMetadata() *InstanceMetadata {
|
||||
return &InstanceMetadata{
|
||||
baseURL: metadataURL,
|
||||
}
|
||||
}
|
||||
|
||||
// makeMetadataURL makes a complete metadata URL from the given path.
|
||||
func (i *InstanceMetadata) makeMetadataURL(path string) string {
|
||||
return i.baseURL + path
|
||||
}
|
||||
|
||||
// Object queries the metadata server and populates the passed in object
|
||||
func (i *InstanceMetadata) Object(path string, obj interface{}) error {
|
||||
data, err := i.queryMetadataBytes(path, "json")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return json.Unmarshal(data, obj)
|
||||
}
|
||||
|
||||
// Text queries the metadata server and returns the corresponding text
|
||||
func (i *InstanceMetadata) Text(path string) (string, error) {
|
||||
data, err := i.queryMetadataBytes(path, "text")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return string(data), err
|
||||
}
|
||||
|
||||
func (i *InstanceMetadata) queryMetadataBytes(path, format string) ([]byte, error) {
|
||||
client := &http.Client{}
|
||||
|
||||
req, err := http.NewRequest("GET", i.makeMetadataURL(path), nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req.Header.Add("Metadata", "True")
|
||||
|
||||
q := req.URL.Query()
|
||||
q.Add("format", format)
|
||||
q.Add("api-version", "2017-04-02")
|
||||
req.URL.RawQuery = q.Encode()
|
||||
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
return ioutil.ReadAll(resp.Body)
|
||||
}
|
231
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_instances.go
generated
vendored
Normal file
231
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_instances.go
generated
vendored
Normal file
@@ -0,0 +1,231 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package azure
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
)
|
||||
|
||||
// NodeAddresses returns the addresses of the specified instance.
|
||||
func (az *Cloud) NodeAddresses(ctx context.Context, name types.NodeName) ([]v1.NodeAddress, error) {
|
||||
addressGetter := func(nodeName types.NodeName) ([]v1.NodeAddress, error) {
|
||||
ip, publicIP, err := az.GetIPForMachineWithRetry(nodeName)
|
||||
if err != nil {
|
||||
glog.V(2).Infof("NodeAddresses(%s) abort backoff: %v", nodeName, err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
addresses := []v1.NodeAddress{
|
||||
{Type: v1.NodeInternalIP, Address: ip},
|
||||
{Type: v1.NodeHostName, Address: string(name)},
|
||||
}
|
||||
if len(publicIP) > 0 {
|
||||
addresses = append(addresses, v1.NodeAddress{
|
||||
Type: v1.NodeExternalIP,
|
||||
Address: publicIP,
|
||||
})
|
||||
}
|
||||
return addresses, nil
|
||||
}
|
||||
|
||||
if az.UseInstanceMetadata {
|
||||
isLocalInstance, err := az.isCurrentInstance(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Not local instance, get addresses from Azure ARM API.
|
||||
if !isLocalInstance {
|
||||
return addressGetter(name)
|
||||
}
|
||||
|
||||
ipAddress := IPAddress{}
|
||||
err = az.metadata.Object("instance/network/interface/0/ipv4/ipAddress/0", &ipAddress)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
addresses := []v1.NodeAddress{
|
||||
{Type: v1.NodeInternalIP, Address: ipAddress.PrivateIP},
|
||||
{Type: v1.NodeHostName, Address: string(name)},
|
||||
}
|
||||
if len(ipAddress.PublicIP) > 0 {
|
||||
addr := v1.NodeAddress{
|
||||
Type: v1.NodeExternalIP,
|
||||
Address: ipAddress.PublicIP,
|
||||
}
|
||||
addresses = append(addresses, addr)
|
||||
}
|
||||
return addresses, nil
|
||||
}
|
||||
|
||||
return addressGetter(name)
|
||||
}
|
||||
|
||||
// NodeAddressesByProviderID returns the node addresses of an instances with the specified unique providerID
|
||||
// This method will not be called from the node that is requesting this ID. i.e. metadata service
|
||||
// and other local methods cannot be used here
|
||||
func (az *Cloud) NodeAddressesByProviderID(ctx context.Context, providerID string) ([]v1.NodeAddress, error) {
|
||||
name, err := az.vmSet.GetNodeNameByProviderID(providerID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return az.NodeAddresses(ctx, name)
|
||||
}
|
||||
|
||||
// InstanceExistsByProviderID returns true if the instance with the given provider id still exists and is running.
|
||||
// If false is returned with no error, the instance will be immediately deleted by the cloud controller manager.
|
||||
func (az *Cloud) InstanceExistsByProviderID(ctx context.Context, providerID string) (bool, error) {
|
||||
name, err := az.vmSet.GetNodeNameByProviderID(providerID)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
_, err = az.InstanceID(ctx, name)
|
||||
if err != nil {
|
||||
if err == cloudprovider.InstanceNotFound {
|
||||
return false, nil
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// InstanceShutdownByProviderID returns true if the instance is in safe state to detach volumes
|
||||
func (az *Cloud) InstanceShutdownByProviderID(ctx context.Context, providerID string) (bool, error) {
|
||||
return false, cloudprovider.NotImplemented
|
||||
}
|
||||
|
||||
func (az *Cloud) isCurrentInstance(name types.NodeName) (bool, error) {
|
||||
nodeName := mapNodeNameToVMName(name)
|
||||
metadataName, err := az.metadata.Text("instance/compute/name")
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if az.VMType == vmTypeVMSS {
|
||||
// VMSS vmName is not same with hostname, use hostname instead.
|
||||
metadataName, err = os.Hostname()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
|
||||
metadataName = strings.ToLower(metadataName)
|
||||
return (metadataName == nodeName), err
|
||||
}
|
||||
|
||||
// InstanceID returns the cloud provider ID of the specified instance.
|
||||
// Note that if the instance does not exist or is no longer running, we must return ("", cloudprovider.InstanceNotFound)
|
||||
func (az *Cloud) InstanceID(ctx context.Context, name types.NodeName) (string, error) {
|
||||
nodeName := mapNodeNameToVMName(name)
|
||||
|
||||
if az.UseInstanceMetadata {
|
||||
isLocalInstance, err := az.isCurrentInstance(name)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// Not local instance, get instanceID from Azure ARM API.
|
||||
if !isLocalInstance {
|
||||
return az.vmSet.GetInstanceIDByNodeName(nodeName)
|
||||
}
|
||||
|
||||
// Compose instanceID based on nodeName for standard instance.
|
||||
if az.VMType == vmTypeStandard {
|
||||
return az.getStandardMachineID(nodeName), nil
|
||||
}
|
||||
|
||||
// Get scale set name and instanceID from vmName for vmss.
|
||||
metadataName, err := az.metadata.Text("instance/compute/name")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
ssName, instanceID, err := extractVmssVMName(metadataName)
|
||||
if err != nil {
|
||||
if err == ErrorNotVmssInstance {
|
||||
// Compose machineID for standard Node.
|
||||
return az.getStandardMachineID(nodeName), nil
|
||||
}
|
||||
return "", err
|
||||
}
|
||||
// Compose instanceID based on ssName and instanceID for vmss instance.
|
||||
return az.getVmssMachineID(ssName, instanceID), nil
|
||||
}
|
||||
|
||||
return az.vmSet.GetInstanceIDByNodeName(nodeName)
|
||||
}
|
||||
|
||||
// InstanceTypeByProviderID returns the cloudprovider instance type of the node with the specified unique providerID
|
||||
// This method will not be called from the node that is requesting this ID. i.e. metadata service
|
||||
// and other local methods cannot be used here
|
||||
func (az *Cloud) InstanceTypeByProviderID(ctx context.Context, providerID string) (string, error) {
|
||||
name, err := az.vmSet.GetNodeNameByProviderID(providerID)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return az.InstanceType(ctx, name)
|
||||
}
|
||||
|
||||
// InstanceType returns the type of the specified instance.
|
||||
// Note that if the instance does not exist or is no longer running, we must return ("", cloudprovider.InstanceNotFound)
|
||||
// (Implementer Note): This is used by kubelet. Kubelet will label the node. Real log from kubelet:
|
||||
// Adding node label from cloud provider: beta.kubernetes.io/instance-type=[value]
|
||||
func (az *Cloud) InstanceType(ctx context.Context, name types.NodeName) (string, error) {
|
||||
if az.UseInstanceMetadata {
|
||||
isLocalInstance, err := az.isCurrentInstance(name)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if isLocalInstance {
|
||||
machineType, err := az.metadata.Text("instance/compute/vmSize")
|
||||
if err == nil {
|
||||
return machineType, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return az.vmSet.GetInstanceTypeByNodeName(string(name))
|
||||
}
|
||||
|
||||
// AddSSHKeyToAllInstances adds an SSH public key as a legal identity for all instances
|
||||
// expected format for the key is standard ssh-keygen format: <protocol> <blob>
|
||||
func (az *Cloud) AddSSHKeyToAllInstances(ctx context.Context, user string, keyData []byte) error {
|
||||
return cloudprovider.NotImplemented
|
||||
}
|
||||
|
||||
// CurrentNodeName returns the name of the node we are currently running on.
|
||||
// On Azure this is the hostname, so we just return the hostname.
|
||||
func (az *Cloud) CurrentNodeName(ctx context.Context, hostname string) (types.NodeName, error) {
|
||||
return types.NodeName(hostname), nil
|
||||
}
|
||||
|
||||
// mapNodeNameToVMName maps a k8s NodeName to an Azure VM Name
|
||||
// This is a simple string cast.
|
||||
func mapNodeNameToVMName(nodeName types.NodeName) string {
|
||||
return string(nodeName)
|
||||
}
|
1400
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_loadbalancer.go
generated
vendored
Normal file
1400
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_loadbalancer.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
77
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_loadbalancer.md
generated
vendored
Normal file
77
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_loadbalancer.md
generated
vendored
Normal file
@@ -0,0 +1,77 @@
|
||||
# Azure LoadBalancer
|
||||
|
||||
The way azure define LoadBalancer is different with GCE or AWS. Azure's LB can have multiple frontend IP refs. The GCE and AWS can only allow one, if you want more, you better to have another LB. Because of the fact, Public IP is not part of the LB in Azure. NSG is not part of LB in Azure either. However, you cannot delete them in parallel, Public IP can only be delete after LB's frontend IP ref is removed.
|
||||
|
||||
For different Azure Resources, such as LB, Public IP, NSG. They are the same tier azure resources. We need to make sure there is no connection in their own ensure loops. In another words, They would be eventually reconciled regardless of other resources' state. They should only depends on service state.
|
||||
|
||||
Despite the ideal philosophy above, we have to face the reality. NSG depends on LB's frontend ip to adjust NSG rules. So when we want to reconcile NSG, the LB should contain the corresponding frontend ip config.
|
||||
|
||||
And also, For Azure, we cannot afford to have more than 1 worker of service_controller. Because, different services could operate on the same LB, concurrent execution could result in conflict or unexpected result. For AWS and GCE, they apparently doesn't have the problem, they use one LB per service, no such conflict.
|
||||
|
||||
There are two load balancers per availability set internal and external. There is a limit on number of services that can be associated with a single load balancer.
|
||||
By default primary load balancer is selected. Services can be annotated to allow auto selection of available load balancers. Service annotations can also be used to provide specific availability sets that host the load balancers. Note that in case of auto selection or specific availability set selection, when the availability set is lost in case of downtime or cluster scale down the services are currently not auto assigned to an available load balancer.
|
||||
Service Annotation for Auto and specific load balancer mode
|
||||
|
||||
- service.beta.kubernetes.io/azure-load-balancer-mode" (__auto__|as1,as2...)
|
||||
|
||||
## Introduce Functions
|
||||
|
||||
- reconcileLoadBalancer(clusterName string, service *v1.Service, nodes []*v1.Node, wantLb bool) (*network.LoadBalancer, error)
|
||||
- Go through lb's properties, update based on wantLb
|
||||
- If any change on the lb, no matter if the lb exists or not
|
||||
- Call az cloud to CreateOrUpdate on this lb, or Delete if nothing left
|
||||
- return lb, err
|
||||
|
||||
- reconcileSecurityGroup(clusterName string, service *v1.Service, lbIP *string, wantLb bool) (*network.SecurityGroup, error)
|
||||
- Go though NSG' properties, update based on wantLb
|
||||
- Use destinationIPAddress as target address if possible
|
||||
- Consolidate NSG rules if possible
|
||||
- If any change on the NSG, (the NSG should always exists)
|
||||
- Call az cloud to CreateOrUpdate on this NSG
|
||||
- return sg, err
|
||||
|
||||
- reconcilePublicIP(clusterName string, service *v1.Service, wantLb bool) (*network.PublicIPAddress, error)
|
||||
- List all the public ip in the resource group
|
||||
- Make sure we only touch Public IP resources has tags[service] = "namespace/serviceName"
|
||||
- skip for wantLb && !isInternal && pipName == desiredPipName
|
||||
- delete other public ip resources if any
|
||||
- if !isInternal && wantLb
|
||||
- ensure Public IP with desiredPipName exists
|
||||
|
||||
- getServiceLoadBalancer(service *v1.Service, clusterName string, nodes []*v1.Node, wantLb bool) (lb, status, exists, error)
|
||||
- gets the loadbalancer for the service if it already exists
|
||||
- If wantLb is TRUE then -it selects a new load balancer, the selection helps distribute the services across load balancers
|
||||
- In case the selected load balancer does not exists it returns network.LoadBalancer struct with added metadata (such as name, location) and existsLB set to FALSE
|
||||
- By default - cluster default LB is returned
|
||||
|
||||
## Define interface behaviors
|
||||
|
||||
### GetLoadBalancer
|
||||
|
||||
- Get LoadBalancer status, return status, error
|
||||
- return the load balancer status for this service
|
||||
- it will not create or update or delete any resource
|
||||
|
||||
### EnsureLoadBalancer
|
||||
|
||||
- Reconcile LB for the flipped service
|
||||
- Call reconcileLoadBalancer(clusterName, flipedService, nil, false/* wantLb */)
|
||||
- Reconcile Public IP
|
||||
- Call reconcilePublicIP(cluster, service, true)
|
||||
- Reconcile LB's related and owned resources, such as FrontEndIPConfig, Rules, Probe.
|
||||
- Call reconcileLoadBalancer(clusterName, service, nodes, true /* wantLb */)
|
||||
- Reconcile NSG rules, it need to be called after reconcileLB
|
||||
- Call reconcileSecurityGroup(clusterName, service, lbStatus, true /* wantLb */)
|
||||
|
||||
### UpdateLoadBalancer
|
||||
|
||||
- Has no difference with EnsureLoadBalancer
|
||||
|
||||
### EnsureLoadBalancerDeleted
|
||||
|
||||
- Reconcile NSG first, before reconcile LB, because SG need LB to be there
|
||||
- Call reconcileSecurityGroup(clusterName, service, nil, false /* wantLb */)
|
||||
- Reconcile LB's related and owned resources, such as FrontEndIPConfig, Rules, Probe.
|
||||
- Call reconcileLoadBalancer(clusterName, service, nodes, false)
|
||||
- Reconcile Public IP, public IP needs related LB reconciled first
|
||||
- Call reconcilePublicIP(cluster, service, false)
|
212
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_loadbalancer_test.go
generated
vendored
Normal file
212
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_loadbalancer_test.go
generated
vendored
Normal file
@@ -0,0 +1,212 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package azure
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network"
|
||||
"github.com/Azure/go-autorest/autorest/to"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestFindProbe(t *testing.T) {
|
||||
tests := []struct {
|
||||
msg string
|
||||
existingProbe []network.Probe
|
||||
curProbe network.Probe
|
||||
expected bool
|
||||
}{
|
||||
{
|
||||
msg: "empty existing probes should return false",
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
msg: "probe names match while ports unmatch should return false",
|
||||
existingProbe: []network.Probe{
|
||||
{
|
||||
Name: to.StringPtr("httpProbe"),
|
||||
ProbePropertiesFormat: &network.ProbePropertiesFormat{
|
||||
Port: to.Int32Ptr(1),
|
||||
},
|
||||
},
|
||||
},
|
||||
curProbe: network.Probe{
|
||||
Name: to.StringPtr("httpProbe"),
|
||||
ProbePropertiesFormat: &network.ProbePropertiesFormat{
|
||||
Port: to.Int32Ptr(2),
|
||||
},
|
||||
},
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
msg: "probe ports match while names unmatch should return false",
|
||||
existingProbe: []network.Probe{
|
||||
{
|
||||
Name: to.StringPtr("probe1"),
|
||||
ProbePropertiesFormat: &network.ProbePropertiesFormat{
|
||||
Port: to.Int32Ptr(1),
|
||||
},
|
||||
},
|
||||
},
|
||||
curProbe: network.Probe{
|
||||
Name: to.StringPtr("probe2"),
|
||||
ProbePropertiesFormat: &network.ProbePropertiesFormat{
|
||||
Port: to.Int32Ptr(1),
|
||||
},
|
||||
},
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
msg: "both probe ports and names match should return true",
|
||||
existingProbe: []network.Probe{
|
||||
{
|
||||
Name: to.StringPtr("matchName"),
|
||||
ProbePropertiesFormat: &network.ProbePropertiesFormat{
|
||||
Port: to.Int32Ptr(1),
|
||||
},
|
||||
},
|
||||
},
|
||||
curProbe: network.Probe{
|
||||
Name: to.StringPtr("matchName"),
|
||||
ProbePropertiesFormat: &network.ProbePropertiesFormat{
|
||||
Port: to.Int32Ptr(1),
|
||||
},
|
||||
},
|
||||
expected: true,
|
||||
},
|
||||
}
|
||||
|
||||
for i, test := range tests {
|
||||
findResult := findProbe(test.existingProbe, test.curProbe)
|
||||
assert.Equal(t, test.expected, findResult, fmt.Sprintf("TestCase[%d]: %s", i, test.msg))
|
||||
}
|
||||
}
|
||||
|
||||
func TestFindRule(t *testing.T) {
|
||||
tests := []struct {
|
||||
msg string
|
||||
existingRule []network.LoadBalancingRule
|
||||
curRule network.LoadBalancingRule
|
||||
expected bool
|
||||
}{
|
||||
{
|
||||
msg: "empty existing rules should return false",
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
msg: "rule names unmatch should return false",
|
||||
existingRule: []network.LoadBalancingRule{
|
||||
{
|
||||
Name: to.StringPtr("httpProbe1"),
|
||||
LoadBalancingRulePropertiesFormat: &network.LoadBalancingRulePropertiesFormat{
|
||||
FrontendPort: to.Int32Ptr(1),
|
||||
},
|
||||
},
|
||||
},
|
||||
curRule: network.LoadBalancingRule{
|
||||
Name: to.StringPtr("httpProbe2"),
|
||||
LoadBalancingRulePropertiesFormat: &network.LoadBalancingRulePropertiesFormat{
|
||||
FrontendPort: to.Int32Ptr(1),
|
||||
},
|
||||
},
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
msg: "rule names match while frontend ports unmatch should return false",
|
||||
existingRule: []network.LoadBalancingRule{
|
||||
{
|
||||
Name: to.StringPtr("httpProbe"),
|
||||
LoadBalancingRulePropertiesFormat: &network.LoadBalancingRulePropertiesFormat{
|
||||
FrontendPort: to.Int32Ptr(1),
|
||||
},
|
||||
},
|
||||
},
|
||||
curRule: network.LoadBalancingRule{
|
||||
Name: to.StringPtr("httpProbe"),
|
||||
LoadBalancingRulePropertiesFormat: &network.LoadBalancingRulePropertiesFormat{
|
||||
FrontendPort: to.Int32Ptr(2),
|
||||
},
|
||||
},
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
msg: "rule names match while backend ports unmatch should return false",
|
||||
existingRule: []network.LoadBalancingRule{
|
||||
{
|
||||
Name: to.StringPtr("httpProbe"),
|
||||
LoadBalancingRulePropertiesFormat: &network.LoadBalancingRulePropertiesFormat{
|
||||
BackendPort: to.Int32Ptr(1),
|
||||
},
|
||||
},
|
||||
},
|
||||
curRule: network.LoadBalancingRule{
|
||||
Name: to.StringPtr("httpProbe"),
|
||||
LoadBalancingRulePropertiesFormat: &network.LoadBalancingRulePropertiesFormat{
|
||||
BackendPort: to.Int32Ptr(2),
|
||||
},
|
||||
},
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
msg: "rule names match while LoadDistribution unmatch should return false",
|
||||
existingRule: []network.LoadBalancingRule{
|
||||
{
|
||||
Name: to.StringPtr("probe1"),
|
||||
LoadBalancingRulePropertiesFormat: &network.LoadBalancingRulePropertiesFormat{
|
||||
LoadDistribution: network.Default,
|
||||
},
|
||||
},
|
||||
},
|
||||
curRule: network.LoadBalancingRule{
|
||||
Name: to.StringPtr("probe2"),
|
||||
LoadBalancingRulePropertiesFormat: &network.LoadBalancingRulePropertiesFormat{
|
||||
LoadDistribution: network.SourceIP,
|
||||
},
|
||||
},
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
msg: "both rule names and LoadBalancingRulePropertiesFormats match should return true",
|
||||
existingRule: []network.LoadBalancingRule{
|
||||
{
|
||||
Name: to.StringPtr("matchName"),
|
||||
LoadBalancingRulePropertiesFormat: &network.LoadBalancingRulePropertiesFormat{
|
||||
BackendPort: to.Int32Ptr(2),
|
||||
FrontendPort: to.Int32Ptr(2),
|
||||
LoadDistribution: network.SourceIP,
|
||||
},
|
||||
},
|
||||
},
|
||||
curRule: network.LoadBalancingRule{
|
||||
Name: to.StringPtr("matchName"),
|
||||
LoadBalancingRulePropertiesFormat: &network.LoadBalancingRulePropertiesFormat{
|
||||
BackendPort: to.Int32Ptr(2),
|
||||
FrontendPort: to.Int32Ptr(2),
|
||||
LoadDistribution: network.SourceIP,
|
||||
},
|
||||
},
|
||||
expected: true,
|
||||
},
|
||||
}
|
||||
|
||||
for i, test := range tests {
|
||||
findResult := findRule(test.existingRule, test.curRule)
|
||||
assert.Equal(t, test.expected, findResult, fmt.Sprintf("TestCase[%d]: %s", i, test.msg))
|
||||
}
|
||||
}
|
175
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_managedDiskController.go
generated
vendored
Normal file
175
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_managedDiskController.go
generated
vendored
Normal file
@@ -0,0 +1,175 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package azure
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path"
|
||||
"strings"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute"
|
||||
"github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2017-10-01/storage"
|
||||
"github.com/golang/glog"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
kwait "k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/kubernetes/pkg/volume/util"
|
||||
)
|
||||
|
||||
//ManagedDiskController : managed disk controller struct
|
||||
type ManagedDiskController struct {
|
||||
common *controllerCommon
|
||||
}
|
||||
|
||||
func newManagedDiskController(common *controllerCommon) (*ManagedDiskController, error) {
|
||||
return &ManagedDiskController{common: common}, nil
|
||||
}
|
||||
|
||||
//CreateManagedDisk : create managed disk
|
||||
func (c *ManagedDiskController) CreateManagedDisk(diskName string, storageAccountType storage.SkuName, sizeGB int, tags map[string]string) (string, error) {
|
||||
glog.V(4).Infof("azureDisk - creating new managed Name:%s StorageAccountType:%s Size:%v", diskName, storageAccountType, sizeGB)
|
||||
|
||||
newTags := make(map[string]*string)
|
||||
azureDDTag := "kubernetes-azure-dd"
|
||||
newTags["created-by"] = &azureDDTag
|
||||
|
||||
// insert original tags to newTags
|
||||
if tags != nil {
|
||||
for k, v := range tags {
|
||||
// Azure won't allow / (forward slash) in tags
|
||||
newKey := strings.Replace(k, "/", "-", -1)
|
||||
newValue := strings.Replace(v, "/", "-", -1)
|
||||
newTags[newKey] = &newValue
|
||||
}
|
||||
}
|
||||
|
||||
diskSizeGB := int32(sizeGB)
|
||||
model := compute.Disk{
|
||||
Location: &c.common.location,
|
||||
Tags: newTags,
|
||||
Sku: &compute.DiskSku{
|
||||
Name: compute.StorageAccountTypes(storageAccountType),
|
||||
},
|
||||
DiskProperties: &compute.DiskProperties{
|
||||
DiskSizeGB: &diskSizeGB,
|
||||
CreationData: &compute.CreationData{CreateOption: compute.Empty},
|
||||
}}
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
_, err := c.common.cloud.DisksClient.CreateOrUpdate(ctx, c.common.resourceGroup, diskName, model)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
diskID := ""
|
||||
|
||||
err = kwait.ExponentialBackoff(defaultBackOff, func() (bool, error) {
|
||||
provisionState, id, err := c.getDisk(diskName)
|
||||
diskID = id
|
||||
// We are waiting for provisioningState==Succeeded
|
||||
// We don't want to hand-off managed disks to k8s while they are
|
||||
//still being provisioned, this is to avoid some race conditions
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if strings.ToLower(provisionState) == "succeeded" {
|
||||
return true, nil
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
glog.V(2).Infof("azureDisk - created new MD Name:%s StorageAccountType:%s Size:%v but was unable to confirm provisioningState in poll process", diskName, storageAccountType, sizeGB)
|
||||
} else {
|
||||
glog.V(2).Infof("azureDisk - created new MD Name:%s StorageAccountType:%s Size:%v", diskName, storageAccountType, sizeGB)
|
||||
}
|
||||
|
||||
return diskID, nil
|
||||
}
|
||||
|
||||
//DeleteManagedDisk : delete managed disk
|
||||
func (c *ManagedDiskController) DeleteManagedDisk(diskURI string) error {
|
||||
diskName := path.Base(diskURI)
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
|
||||
_, err := c.common.cloud.DisksClient.Delete(ctx, c.common.resourceGroup, diskName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// We don't need poll here, k8s will immediately stop referencing the disk
|
||||
// the disk will be eventually deleted - cleanly - by ARM
|
||||
|
||||
glog.V(2).Infof("azureDisk - deleted a managed disk: %s", diskURI)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// return: disk provisionState, diskID, error
|
||||
func (c *ManagedDiskController) getDisk(diskName string) (string, string, error) {
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
|
||||
result, err := c.common.cloud.DisksClient.Get(ctx, c.common.resourceGroup, diskName)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
if result.DiskProperties != nil && (*result.DiskProperties).ProvisioningState != nil {
|
||||
return *(*result.DiskProperties).ProvisioningState, *result.ID, nil
|
||||
}
|
||||
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
// ResizeDisk Expand the disk to new size
|
||||
func (c *ManagedDiskController) ResizeDisk(diskName string, oldSize resource.Quantity, newSize resource.Quantity) (resource.Quantity, error) {
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
|
||||
result, err := c.common.cloud.DisksClient.Get(ctx, c.common.resourceGroup, diskName)
|
||||
if err != nil {
|
||||
return oldSize, err
|
||||
}
|
||||
|
||||
if result.DiskProperties == nil || result.DiskProperties.DiskSizeGB == nil {
|
||||
return oldSize, fmt.Errorf("DiskProperties of disk(%s) is nil", diskName)
|
||||
}
|
||||
|
||||
requestBytes := newSize.Value()
|
||||
// Azure resizes in chunks of GiB (not GB)
|
||||
requestGiB := int32(util.RoundUpSize(requestBytes, 1024*1024*1024))
|
||||
newSizeQuant := resource.MustParse(fmt.Sprintf("%dGi", requestGiB))
|
||||
|
||||
glog.V(2).Infof("azureDisk - begin to resize disk(%s) with new size(%d), old size(%v)", diskName, requestGiB, oldSize)
|
||||
// If disk already of greater or equal size than requested we return
|
||||
if *result.DiskProperties.DiskSizeGB >= requestGiB {
|
||||
return newSizeQuant, nil
|
||||
}
|
||||
|
||||
result.DiskProperties.DiskSizeGB = &requestGiB
|
||||
|
||||
ctx, cancel = getContextWithCancel()
|
||||
defer cancel()
|
||||
if _, err := c.common.cloud.DisksClient.CreateOrUpdate(ctx, c.common.resourceGroup, diskName, result); err != nil {
|
||||
return oldSize, err
|
||||
}
|
||||
|
||||
glog.V(2).Infof("azureDisk - resize disk(%s) with new size(%d) completed", diskName, requestGiB)
|
||||
|
||||
return newSizeQuant, nil
|
||||
}
|
82
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_metrics.go
generated
vendored
Normal file
82
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_metrics.go
generated
vendored
Normal file
@@ -0,0 +1,82 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package azure
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
type apiCallMetrics struct {
|
||||
latency *prometheus.HistogramVec
|
||||
errors *prometheus.CounterVec
|
||||
}
|
||||
|
||||
var (
|
||||
metricLabels = []string{
|
||||
"request", // API function that is being invoked
|
||||
"resource_group", // Resource group of the resource being monitored
|
||||
"subscription_id", // Subscription ID of the resource being monitored
|
||||
}
|
||||
|
||||
apiMetrics = registerAPIMetrics(metricLabels...)
|
||||
)
|
||||
|
||||
type metricContext struct {
|
||||
start time.Time
|
||||
attributes []string
|
||||
}
|
||||
|
||||
func newMetricContext(prefix, request, resourceGroup, subscriptionID string) *metricContext {
|
||||
return &metricContext{
|
||||
start: time.Now(),
|
||||
attributes: []string{prefix + "_" + request, resourceGroup, subscriptionID},
|
||||
}
|
||||
}
|
||||
|
||||
func (mc *metricContext) Observe(err error) {
|
||||
apiMetrics.latency.WithLabelValues(mc.attributes...).Observe(
|
||||
time.Since(mc.start).Seconds())
|
||||
if err != nil {
|
||||
apiMetrics.errors.WithLabelValues(mc.attributes...).Inc()
|
||||
}
|
||||
}
|
||||
|
||||
func registerAPIMetrics(attributes ...string) *apiCallMetrics {
|
||||
metrics := &apiCallMetrics{
|
||||
latency: prometheus.NewHistogramVec(
|
||||
prometheus.HistogramOpts{
|
||||
Name: "cloudprovider_azure_api_request_duration_seconds",
|
||||
Help: "Latency of an Azure API call",
|
||||
},
|
||||
attributes,
|
||||
),
|
||||
errors: prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Name: "cloudprovider_azure_api_request_errors",
|
||||
Help: "Number of errors for an Azure API call",
|
||||
},
|
||||
attributes,
|
||||
),
|
||||
}
|
||||
|
||||
prometheus.MustRegister(metrics.latency)
|
||||
prometheus.MustRegister(metrics.errors)
|
||||
|
||||
return metrics
|
||||
}
|
39
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_metrics_test.go
generated
vendored
Normal file
39
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_metrics_test.go
generated
vendored
Normal file
@@ -0,0 +1,39 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package azure
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestAzureMetricLabelCardinality(t *testing.T) {
|
||||
mc := newMetricContext("test", "create", "resource_group", "subscription_id")
|
||||
assert.Len(t, mc.attributes, len(metricLabels), "cardinalities of labels and values must match")
|
||||
}
|
||||
|
||||
func TestAzureMetricLabelPrefix(t *testing.T) {
|
||||
mc := newMetricContext("prefix", "request", "resource_group", "subscription_id")
|
||||
found := false
|
||||
for _, attribute := range mc.attributes {
|
||||
if attribute == "prefix_request" {
|
||||
found = true
|
||||
}
|
||||
}
|
||||
assert.True(t, found, "request label must be prefixed")
|
||||
}
|
188
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_routes.go
generated
vendored
Normal file
188
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_routes.go
generated
vendored
Normal file
@@ -0,0 +1,188 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package azure
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network"
|
||||
"github.com/Azure/go-autorest/autorest/to"
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
)
|
||||
|
||||
// ListRoutes lists all managed routes that belong to the specified clusterName
|
||||
func (az *Cloud) ListRoutes(ctx context.Context, clusterName string) ([]*cloudprovider.Route, error) {
|
||||
glog.V(10).Infof("ListRoutes: START clusterName=%q", clusterName)
|
||||
routeTable, existsRouteTable, err := az.getRouteTable()
|
||||
return processRoutes(routeTable, existsRouteTable, err)
|
||||
}
|
||||
|
||||
// Injectable for testing
|
||||
func processRoutes(routeTable network.RouteTable, exists bool, err error) ([]*cloudprovider.Route, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !exists {
|
||||
return []*cloudprovider.Route{}, nil
|
||||
}
|
||||
|
||||
var kubeRoutes []*cloudprovider.Route
|
||||
if routeTable.RouteTablePropertiesFormat != nil && routeTable.Routes != nil {
|
||||
kubeRoutes = make([]*cloudprovider.Route, len(*routeTable.Routes))
|
||||
for i, route := range *routeTable.Routes {
|
||||
instance := mapRouteNameToNodeName(*route.Name)
|
||||
cidr := *route.AddressPrefix
|
||||
glog.V(10).Infof("ListRoutes: * instance=%q, cidr=%q", instance, cidr)
|
||||
|
||||
kubeRoutes[i] = &cloudprovider.Route{
|
||||
Name: *route.Name,
|
||||
TargetNode: instance,
|
||||
DestinationCIDR: cidr,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
glog.V(10).Info("ListRoutes: FINISH")
|
||||
return kubeRoutes, nil
|
||||
}
|
||||
|
||||
func (az *Cloud) createRouteTableIfNotExists(clusterName string, kubeRoute *cloudprovider.Route) error {
|
||||
if _, existsRouteTable, err := az.getRouteTable(); err != nil {
|
||||
glog.V(2).Infof("createRouteTableIfNotExists error: couldn't get routetable. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
|
||||
return err
|
||||
} else if existsRouteTable {
|
||||
return nil
|
||||
}
|
||||
return az.createRouteTable()
|
||||
}
|
||||
|
||||
func (az *Cloud) createRouteTable() error {
|
||||
routeTable := network.RouteTable{
|
||||
Name: to.StringPtr(az.RouteTableName),
|
||||
Location: to.StringPtr(az.Location),
|
||||
RouteTablePropertiesFormat: &network.RouteTablePropertiesFormat{},
|
||||
}
|
||||
|
||||
glog.V(3).Infof("createRouteTableIfNotExists: creating routetable. routeTableName=%q", az.RouteTableName)
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
resp, err := az.RouteTablesClient.CreateOrUpdate(ctx, az.ResourceGroup, az.RouteTableName, routeTable)
|
||||
glog.V(10).Infof("RouteTablesClient.CreateOrUpdate(%q): end", az.RouteTableName)
|
||||
if az.CloudProviderBackoff && shouldRetryHTTPRequest(resp, err) {
|
||||
glog.V(2).Infof("createRouteTableIfNotExists backing off: creating routetable. routeTableName=%q", az.RouteTableName)
|
||||
retryErr := az.CreateOrUpdateRouteTableWithRetry(routeTable)
|
||||
if retryErr != nil {
|
||||
err = retryErr
|
||||
glog.V(2).Infof("createRouteTableIfNotExists abort backoff: creating routetable. routeTableName=%q", az.RouteTableName)
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Invalidate the cache right after updating
|
||||
az.rtCache.Delete(az.RouteTableName)
|
||||
return nil
|
||||
}
|
||||
|
||||
// CreateRoute creates the described managed route
|
||||
// route.Name will be ignored, although the cloud-provider may use nameHint
|
||||
// to create a more user-meaningful name.
|
||||
func (az *Cloud) CreateRoute(ctx context.Context, clusterName string, nameHint string, kubeRoute *cloudprovider.Route) error {
|
||||
glog.V(2).Infof("CreateRoute: creating route. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
|
||||
if err := az.createRouteTableIfNotExists(clusterName, kubeRoute); err != nil {
|
||||
return err
|
||||
}
|
||||
targetIP, _, err := az.getIPForMachine(kubeRoute.TargetNode)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
routeName := mapNodeNameToRouteName(kubeRoute.TargetNode)
|
||||
route := network.Route{
|
||||
Name: to.StringPtr(routeName),
|
||||
RoutePropertiesFormat: &network.RoutePropertiesFormat{
|
||||
AddressPrefix: to.StringPtr(kubeRoute.DestinationCIDR),
|
||||
NextHopType: network.RouteNextHopTypeVirtualAppliance,
|
||||
NextHopIPAddress: to.StringPtr(targetIP),
|
||||
},
|
||||
}
|
||||
|
||||
glog.V(3).Infof("CreateRoute: creating route: instance=%q cidr=%q", kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
resp, err := az.RoutesClient.CreateOrUpdate(ctx, az.ResourceGroup, az.RouteTableName, *route.Name, route)
|
||||
glog.V(10).Infof("RoutesClient.CreateOrUpdate(%q): end", az.RouteTableName)
|
||||
if az.CloudProviderBackoff && shouldRetryHTTPRequest(resp, err) {
|
||||
glog.V(2).Infof("CreateRoute backing off: creating route: instance=%q cidr=%q", kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
|
||||
retryErr := az.CreateOrUpdateRouteWithRetry(route)
|
||||
if retryErr != nil {
|
||||
err = retryErr
|
||||
glog.V(2).Infof("CreateRoute abort backoff: creating route: instance=%q cidr=%q", kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
glog.V(2).Infof("CreateRoute: route created. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeleteRoute deletes the specified managed route
|
||||
// Route should be as returned by ListRoutes
|
||||
func (az *Cloud) DeleteRoute(ctx context.Context, clusterName string, kubeRoute *cloudprovider.Route) error {
|
||||
glog.V(2).Infof("DeleteRoute: deleting route. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
|
||||
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
routeName := mapNodeNameToRouteName(kubeRoute.TargetNode)
|
||||
resp, err := az.RoutesClient.Delete(ctx, az.ResourceGroup, az.RouteTableName, routeName)
|
||||
glog.V(10).Infof("RoutesClient.Delete(%q): end", az.RouteTableName)
|
||||
|
||||
if az.CloudProviderBackoff && shouldRetryHTTPRequest(resp, err) {
|
||||
glog.V(2).Infof("DeleteRoute backing off: deleting route. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
|
||||
retryErr := az.DeleteRouteWithRetry(routeName)
|
||||
if retryErr != nil {
|
||||
err = retryErr
|
||||
glog.V(2).Infof("DeleteRoute abort backoff: deleting route. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
glog.V(2).Infof("DeleteRoute: route deleted. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
|
||||
return nil
|
||||
}
|
||||
|
||||
// This must be kept in sync with mapRouteNameToNodeName.
|
||||
// These two functions enable stashing the instance name in the route
|
||||
// and then retrieving it later when listing. This is needed because
|
||||
// Azure does not let you put tags/descriptions on the Route itself.
|
||||
func mapNodeNameToRouteName(nodeName types.NodeName) string {
|
||||
return fmt.Sprintf("%s", nodeName)
|
||||
}
|
||||
|
||||
// Used with mapNodeNameToRouteName. See comment on mapNodeNameToRouteName.
|
||||
func mapRouteNameToNodeName(routeName string) types.NodeName {
|
||||
return types.NodeName(fmt.Sprintf("%s", routeName))
|
||||
}
|
342
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_routes_test.go
generated
vendored
Normal file
342
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_routes_test.go
generated
vendored
Normal file
@@ -0,0 +1,342 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package azure
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network"
|
||||
"github.com/Azure/go-autorest/autorest/to"
|
||||
)
|
||||
|
||||
func TestDeleteRoute(t *testing.T) {
|
||||
fakeRoutes := newFakeRoutesClient()
|
||||
|
||||
cloud := &Cloud{
|
||||
RoutesClient: fakeRoutes,
|
||||
Config: Config{
|
||||
ResourceGroup: "foo",
|
||||
RouteTableName: "bar",
|
||||
Location: "location",
|
||||
},
|
||||
}
|
||||
route := cloudprovider.Route{TargetNode: "node", DestinationCIDR: "1.2.3.4/24"}
|
||||
routeName := mapNodeNameToRouteName(route.TargetNode)
|
||||
|
||||
fakeRoutes.FakeStore = map[string]map[string]network.Route{
|
||||
cloud.RouteTableName: {
|
||||
routeName: {},
|
||||
},
|
||||
}
|
||||
|
||||
err := cloud.DeleteRoute(context.TODO(), "cluster", &route)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error deleting route: %v", err)
|
||||
t.FailNow()
|
||||
}
|
||||
|
||||
mp, found := fakeRoutes.FakeStore[cloud.RouteTableName]
|
||||
if !found {
|
||||
t.Errorf("unexpected missing item for %s", cloud.RouteTableName)
|
||||
t.FailNow()
|
||||
}
|
||||
ob, found := mp[routeName]
|
||||
if found {
|
||||
t.Errorf("unexpectedly found: %v that should have been deleted.", ob)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCreateRoute(t *testing.T) {
|
||||
fakeTable := newFakeRouteTablesClient()
|
||||
fakeVM := &fakeVMSet{}
|
||||
fakeRoutes := newFakeRoutesClient()
|
||||
|
||||
cloud := &Cloud{
|
||||
RouteTablesClient: fakeTable,
|
||||
RoutesClient: fakeRoutes,
|
||||
vmSet: fakeVM,
|
||||
Config: Config{
|
||||
ResourceGroup: "foo",
|
||||
RouteTableName: "bar",
|
||||
Location: "location",
|
||||
},
|
||||
}
|
||||
cache, _ := cloud.newRouteTableCache()
|
||||
cloud.rtCache = cache
|
||||
|
||||
expectedTable := network.RouteTable{
|
||||
Name: &cloud.RouteTableName,
|
||||
Location: &cloud.Location,
|
||||
}
|
||||
fakeTable.FakeStore = map[string]map[string]network.RouteTable{}
|
||||
fakeTable.FakeStore[cloud.ResourceGroup] = map[string]network.RouteTable{
|
||||
cloud.RouteTableName: expectedTable,
|
||||
}
|
||||
route := cloudprovider.Route{TargetNode: "node", DestinationCIDR: "1.2.3.4/24"}
|
||||
|
||||
nodeIP := "2.4.6.8"
|
||||
fakeVM.NodeToIP = map[string]string{
|
||||
"node": nodeIP,
|
||||
}
|
||||
|
||||
err := cloud.CreateRoute(context.TODO(), "cluster", "unused", &route)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error create if not exists route table: %v", err)
|
||||
t.FailNow()
|
||||
}
|
||||
if len(fakeTable.Calls) != 1 || fakeTable.Calls[0] != "Get" {
|
||||
t.Errorf("unexpected calls create if not exists, exists: %v", fakeTable.Calls)
|
||||
}
|
||||
|
||||
routeName := mapNodeNameToRouteName(route.TargetNode)
|
||||
routeInfo, found := fakeRoutes.FakeStore[cloud.RouteTableName][routeName]
|
||||
if !found {
|
||||
t.Errorf("could not find route: %v in %v", routeName, fakeRoutes.FakeStore)
|
||||
t.FailNow()
|
||||
}
|
||||
if *routeInfo.AddressPrefix != route.DestinationCIDR {
|
||||
t.Errorf("Expected cidr: %s, saw %s", *routeInfo.AddressPrefix, route.DestinationCIDR)
|
||||
}
|
||||
if routeInfo.NextHopType != network.RouteNextHopTypeVirtualAppliance {
|
||||
t.Errorf("Expected next hop: %v, saw %v", network.RouteNextHopTypeVirtualAppliance, routeInfo.NextHopType)
|
||||
}
|
||||
if *routeInfo.NextHopIPAddress != nodeIP {
|
||||
t.Errorf("Expected IP address: %s, saw %s", nodeIP, *routeInfo.NextHopIPAddress)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCreateRouteTableIfNotExists_Exists(t *testing.T) {
|
||||
fake := newFakeRouteTablesClient()
|
||||
cloud := &Cloud{
|
||||
RouteTablesClient: fake,
|
||||
Config: Config{
|
||||
ResourceGroup: "foo",
|
||||
RouteTableName: "bar",
|
||||
Location: "location",
|
||||
},
|
||||
}
|
||||
cache, _ := cloud.newRouteTableCache()
|
||||
cloud.rtCache = cache
|
||||
|
||||
expectedTable := network.RouteTable{
|
||||
Name: &cloud.RouteTableName,
|
||||
Location: &cloud.Location,
|
||||
}
|
||||
fake.FakeStore = map[string]map[string]network.RouteTable{}
|
||||
fake.FakeStore[cloud.ResourceGroup] = map[string]network.RouteTable{
|
||||
cloud.RouteTableName: expectedTable,
|
||||
}
|
||||
err := cloud.createRouteTableIfNotExists("clusterName", &cloudprovider.Route{TargetNode: "node", DestinationCIDR: "1.2.3.4/16"})
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error create if not exists route table: %v", err)
|
||||
t.FailNow()
|
||||
}
|
||||
if len(fake.Calls) != 1 || fake.Calls[0] != "Get" {
|
||||
t.Errorf("unexpected calls create if not exists, exists: %v", fake.Calls)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCreateRouteTableIfNotExists_NotExists(t *testing.T) {
|
||||
fake := newFakeRouteTablesClient()
|
||||
cloud := &Cloud{
|
||||
RouteTablesClient: fake,
|
||||
Config: Config{
|
||||
ResourceGroup: "foo",
|
||||
RouteTableName: "bar",
|
||||
Location: "location",
|
||||
},
|
||||
}
|
||||
cache, _ := cloud.newRouteTableCache()
|
||||
cloud.rtCache = cache
|
||||
|
||||
expectedTable := network.RouteTable{
|
||||
Name: &cloud.RouteTableName,
|
||||
Location: &cloud.Location,
|
||||
}
|
||||
|
||||
err := cloud.createRouteTableIfNotExists("clusterName", &cloudprovider.Route{TargetNode: "node", DestinationCIDR: "1.2.3.4/16"})
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error create if not exists route table: %v", err)
|
||||
t.FailNow()
|
||||
}
|
||||
|
||||
table := fake.FakeStore[cloud.ResourceGroup][cloud.RouteTableName]
|
||||
if *table.Location != *expectedTable.Location {
|
||||
t.Errorf("mismatch: %s vs %s", *table.Location, *expectedTable.Location)
|
||||
}
|
||||
if *table.Name != *expectedTable.Name {
|
||||
t.Errorf("mismatch: %s vs %s", *table.Name, *expectedTable.Name)
|
||||
}
|
||||
if len(fake.Calls) != 2 || fake.Calls[0] != "Get" || fake.Calls[1] != "CreateOrUpdate" {
|
||||
t.Errorf("unexpected calls create if not exists, exists: %v", fake.Calls)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCreateRouteTable(t *testing.T) {
|
||||
fake := newFakeRouteTablesClient()
|
||||
cloud := &Cloud{
|
||||
RouteTablesClient: fake,
|
||||
Config: Config{
|
||||
ResourceGroup: "foo",
|
||||
RouteTableName: "bar",
|
||||
Location: "location",
|
||||
},
|
||||
}
|
||||
cache, _ := cloud.newRouteTableCache()
|
||||
cloud.rtCache = cache
|
||||
|
||||
expectedTable := network.RouteTable{
|
||||
Name: &cloud.RouteTableName,
|
||||
Location: &cloud.Location,
|
||||
}
|
||||
|
||||
err := cloud.createRouteTable()
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error in creating route table: %v", err)
|
||||
t.FailNow()
|
||||
}
|
||||
|
||||
table := fake.FakeStore["foo"]["bar"]
|
||||
if *table.Location != *expectedTable.Location {
|
||||
t.Errorf("mismatch: %s vs %s", *table.Location, *expectedTable.Location)
|
||||
}
|
||||
if *table.Name != *expectedTable.Name {
|
||||
t.Errorf("mismatch: %s vs %s", *table.Name, *expectedTable.Name)
|
||||
}
|
||||
}
|
||||
|
||||
func TestProcessRoutes(t *testing.T) {
|
||||
tests := []struct {
|
||||
rt network.RouteTable
|
||||
exists bool
|
||||
err error
|
||||
expectErr bool
|
||||
expectedError string
|
||||
expectedRoute []cloudprovider.Route
|
||||
name string
|
||||
}{
|
||||
{
|
||||
err: fmt.Errorf("test error"),
|
||||
expectErr: true,
|
||||
expectedError: "test error",
|
||||
},
|
||||
{
|
||||
exists: false,
|
||||
name: "doesn't exist",
|
||||
},
|
||||
{
|
||||
rt: network.RouteTable{},
|
||||
exists: true,
|
||||
name: "nil routes",
|
||||
},
|
||||
{
|
||||
rt: network.RouteTable{
|
||||
RouteTablePropertiesFormat: &network.RouteTablePropertiesFormat{},
|
||||
},
|
||||
exists: true,
|
||||
name: "no routes",
|
||||
},
|
||||
{
|
||||
rt: network.RouteTable{
|
||||
RouteTablePropertiesFormat: &network.RouteTablePropertiesFormat{
|
||||
Routes: &[]network.Route{
|
||||
{
|
||||
Name: to.StringPtr("name"),
|
||||
RoutePropertiesFormat: &network.RoutePropertiesFormat{
|
||||
AddressPrefix: to.StringPtr("1.2.3.4/16"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
exists: true,
|
||||
expectedRoute: []cloudprovider.Route{
|
||||
{
|
||||
Name: "name",
|
||||
TargetNode: mapRouteNameToNodeName("name"),
|
||||
DestinationCIDR: "1.2.3.4/16",
|
||||
},
|
||||
},
|
||||
name: "one route",
|
||||
},
|
||||
{
|
||||
rt: network.RouteTable{
|
||||
RouteTablePropertiesFormat: &network.RouteTablePropertiesFormat{
|
||||
Routes: &[]network.Route{
|
||||
{
|
||||
Name: to.StringPtr("name"),
|
||||
RoutePropertiesFormat: &network.RoutePropertiesFormat{
|
||||
AddressPrefix: to.StringPtr("1.2.3.4/16"),
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: to.StringPtr("name2"),
|
||||
RoutePropertiesFormat: &network.RoutePropertiesFormat{
|
||||
AddressPrefix: to.StringPtr("5.6.7.8/16"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
exists: true,
|
||||
expectedRoute: []cloudprovider.Route{
|
||||
{
|
||||
Name: "name",
|
||||
TargetNode: mapRouteNameToNodeName("name"),
|
||||
DestinationCIDR: "1.2.3.4/16",
|
||||
},
|
||||
{
|
||||
Name: "name2",
|
||||
TargetNode: mapRouteNameToNodeName("name2"),
|
||||
DestinationCIDR: "5.6.7.8/16",
|
||||
},
|
||||
},
|
||||
name: "more routes",
|
||||
},
|
||||
}
|
||||
for _, test := range tests {
|
||||
routes, err := processRoutes(test.rt, test.exists, test.err)
|
||||
if test.expectErr {
|
||||
if err == nil {
|
||||
t.Errorf("%s: unexpected non-error", test.name)
|
||||
continue
|
||||
}
|
||||
if err.Error() != test.expectedError {
|
||||
t.Errorf("%s: Expected error: %v, saw error: %v", test.name, test.expectedError, err.Error())
|
||||
continue
|
||||
}
|
||||
}
|
||||
if !test.expectErr && err != nil {
|
||||
t.Errorf("%s; unexpected error: %v", test.name, err)
|
||||
continue
|
||||
}
|
||||
if len(routes) != len(test.expectedRoute) {
|
||||
t.Errorf("%s: Unexpected difference: %#v vs %#v", test.name, routes, test.expectedRoute)
|
||||
continue
|
||||
}
|
||||
for ix := range test.expectedRoute {
|
||||
if !reflect.DeepEqual(test.expectedRoute[ix], *routes[ix]) {
|
||||
t.Errorf("%s: Unexpected difference: %#v vs %#v", test.name, test.expectedRoute[ix], *routes[ix])
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
729
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_standard.go
generated
vendored
Normal file
729
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_standard.go
generated
vendored
Normal file
@@ -0,0 +1,729 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package azure
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"hash/crc32"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute"
|
||||
"github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network"
|
||||
"github.com/Azure/go-autorest/autorest/to"
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
)
|
||||
|
||||
const (
|
||||
loadBalancerMinimumPriority = 500
|
||||
loadBalancerMaximumPriority = 4096
|
||||
|
||||
machineIDTemplate = "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Compute/virtualMachines/%s"
|
||||
availabilitySetIDTemplate = "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Compute/availabilitySets/%s"
|
||||
frontendIPConfigIDTemplate = "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Network/loadBalancers/%s/frontendIPConfigurations/%s"
|
||||
backendPoolIDTemplate = "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Network/loadBalancers/%s/backendAddressPools/%s"
|
||||
loadBalancerProbeIDTemplate = "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Network/loadBalancers/%s/probes/%s"
|
||||
|
||||
// InternalLoadBalancerNameSuffix is load balancer posfix
|
||||
InternalLoadBalancerNameSuffix = "-internal"
|
||||
|
||||
// nodeLabelRole specifies the role of a node
|
||||
nodeLabelRole = "kubernetes.io/role"
|
||||
|
||||
storageAccountNameMaxLength = 24
|
||||
)
|
||||
|
||||
var errNotInVMSet = errors.New("vm is not in the vmset")
|
||||
var providerIDRE = regexp.MustCompile(`^` + CloudProviderName + `://(?:.*)/Microsoft.Compute/virtualMachines/(.+)$`)
|
||||
var backendPoolIDRE = regexp.MustCompile(`^/subscriptions/(?:.*)/resourceGroups/(?:.*)/providers/Microsoft.Network/loadBalancers/(.+)/backendAddressPools/(?:.*)`)
|
||||
|
||||
// getStandardMachineID returns the full identifier of a virtual machine.
|
||||
func (az *Cloud) getStandardMachineID(machineName string) string {
|
||||
return fmt.Sprintf(
|
||||
machineIDTemplate,
|
||||
az.SubscriptionID,
|
||||
az.ResourceGroup,
|
||||
machineName)
|
||||
}
|
||||
|
||||
// returns the full identifier of an availabilitySet
|
||||
func (az *Cloud) getAvailabilitySetID(availabilitySetName string) string {
|
||||
return fmt.Sprintf(
|
||||
availabilitySetIDTemplate,
|
||||
az.SubscriptionID,
|
||||
az.ResourceGroup,
|
||||
availabilitySetName)
|
||||
}
|
||||
|
||||
// returns the full identifier of a loadbalancer frontendipconfiguration.
|
||||
func (az *Cloud) getFrontendIPConfigID(lbName, backendPoolName string) string {
|
||||
return fmt.Sprintf(
|
||||
frontendIPConfigIDTemplate,
|
||||
az.SubscriptionID,
|
||||
az.ResourceGroup,
|
||||
lbName,
|
||||
backendPoolName)
|
||||
}
|
||||
|
||||
// returns the full identifier of a loadbalancer backendpool.
|
||||
func (az *Cloud) getBackendPoolID(lbName, backendPoolName string) string {
|
||||
return fmt.Sprintf(
|
||||
backendPoolIDTemplate,
|
||||
az.SubscriptionID,
|
||||
az.ResourceGroup,
|
||||
lbName,
|
||||
backendPoolName)
|
||||
}
|
||||
|
||||
// returns the full identifier of a loadbalancer probe.
|
||||
func (az *Cloud) getLoadBalancerProbeID(lbName, lbRuleName string) string {
|
||||
return fmt.Sprintf(
|
||||
loadBalancerProbeIDTemplate,
|
||||
az.SubscriptionID,
|
||||
az.ResourceGroup,
|
||||
lbName,
|
||||
lbRuleName)
|
||||
}
|
||||
|
||||
func (az *Cloud) mapLoadBalancerNameToVMSet(lbName string, clusterName string) (vmSetName string) {
|
||||
vmSetName = strings.TrimSuffix(lbName, InternalLoadBalancerNameSuffix)
|
||||
if strings.EqualFold(clusterName, vmSetName) {
|
||||
vmSetName = az.vmSet.GetPrimaryVMSetName()
|
||||
}
|
||||
|
||||
return vmSetName
|
||||
}
|
||||
|
||||
// For a load balancer, all frontend ip should reference either a subnet or publicIpAddress.
|
||||
// Thus Azure do not allow mixed type (public and internal) load balancer.
|
||||
// So we'd have a separate name for internal load balancer.
|
||||
// This would be the name for Azure LoadBalancer resource.
|
||||
func (az *Cloud) getLoadBalancerName(clusterName string, vmSetName string, isInternal bool) string {
|
||||
lbNamePrefix := vmSetName
|
||||
if strings.EqualFold(vmSetName, az.vmSet.GetPrimaryVMSetName()) || az.useStandardLoadBalancer() {
|
||||
lbNamePrefix = clusterName
|
||||
}
|
||||
if isInternal {
|
||||
return fmt.Sprintf("%s%s", lbNamePrefix, InternalLoadBalancerNameSuffix)
|
||||
}
|
||||
return lbNamePrefix
|
||||
}
|
||||
|
||||
// isMasterNode returns true if the node has a master role label.
|
||||
// The master role is determined by looking for:
|
||||
// * a kubernetes.io/role="master" label
|
||||
func isMasterNode(node *v1.Node) bool {
|
||||
if val, ok := node.Labels[nodeLabelRole]; ok && val == "master" {
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// returns the deepest child's identifier from a full identifier string.
|
||||
func getLastSegment(ID string) (string, error) {
|
||||
parts := strings.Split(ID, "/")
|
||||
name := parts[len(parts)-1]
|
||||
if len(name) == 0 {
|
||||
return "", fmt.Errorf("resource name was missing from identifier")
|
||||
}
|
||||
|
||||
return name, nil
|
||||
}
|
||||
|
||||
// returns the equivalent LoadBalancerRule, SecurityRule and LoadBalancerProbe
|
||||
// protocol types for the given Kubernetes protocol type.
|
||||
func getProtocolsFromKubernetesProtocol(protocol v1.Protocol) (*network.TransportProtocol, *network.SecurityRuleProtocol, *network.ProbeProtocol, error) {
|
||||
var transportProto network.TransportProtocol
|
||||
var securityProto network.SecurityRuleProtocol
|
||||
var probeProto network.ProbeProtocol
|
||||
|
||||
switch protocol {
|
||||
case v1.ProtocolTCP:
|
||||
transportProto = network.TransportProtocolTCP
|
||||
securityProto = network.SecurityRuleProtocolTCP
|
||||
probeProto = network.ProbeProtocolTCP
|
||||
return &transportProto, &securityProto, &probeProto, nil
|
||||
case v1.ProtocolUDP:
|
||||
transportProto = network.TransportProtocolUDP
|
||||
securityProto = network.SecurityRuleProtocolUDP
|
||||
return &transportProto, &securityProto, nil, nil
|
||||
default:
|
||||
return &transportProto, &securityProto, &probeProto, fmt.Errorf("only TCP and UDP are supported for Azure LoadBalancers")
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// This returns the full identifier of the primary NIC for the given VM.
|
||||
func getPrimaryInterfaceID(machine compute.VirtualMachine) (string, error) {
|
||||
if len(*machine.NetworkProfile.NetworkInterfaces) == 1 {
|
||||
return *(*machine.NetworkProfile.NetworkInterfaces)[0].ID, nil
|
||||
}
|
||||
|
||||
for _, ref := range *machine.NetworkProfile.NetworkInterfaces {
|
||||
if *ref.Primary {
|
||||
return *ref.ID, nil
|
||||
}
|
||||
}
|
||||
|
||||
return "", fmt.Errorf("failed to find a primary nic for the vm. vmname=%q", *machine.Name)
|
||||
}
|
||||
|
||||
func getPrimaryIPConfig(nic network.Interface) (*network.InterfaceIPConfiguration, error) {
|
||||
if nic.IPConfigurations == nil {
|
||||
return nil, fmt.Errorf("nic.IPConfigurations for nic (nicname=%q) is nil", *nic.Name)
|
||||
}
|
||||
|
||||
if len(*nic.IPConfigurations) == 1 {
|
||||
return &((*nic.IPConfigurations)[0]), nil
|
||||
}
|
||||
|
||||
for _, ref := range *nic.IPConfigurations {
|
||||
if *ref.Primary {
|
||||
return &ref, nil
|
||||
}
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("failed to determine the primary ipconfig. nicname=%q", *nic.Name)
|
||||
}
|
||||
|
||||
func isInternalLoadBalancer(lb *network.LoadBalancer) bool {
|
||||
return strings.HasSuffix(*lb.Name, InternalLoadBalancerNameSuffix)
|
||||
}
|
||||
|
||||
func getBackendPoolName(clusterName string) string {
|
||||
return clusterName
|
||||
}
|
||||
|
||||
func getLoadBalancerRuleName(service *v1.Service, port v1.ServicePort, subnetName *string) string {
|
||||
if subnetName == nil {
|
||||
return fmt.Sprintf("%s-%s-%d", getRulePrefix(service), port.Protocol, port.Port)
|
||||
}
|
||||
return fmt.Sprintf("%s-%s-%s-%d", getRulePrefix(service), *subnetName, port.Protocol, port.Port)
|
||||
}
|
||||
|
||||
func getSecurityRuleName(service *v1.Service, port v1.ServicePort, sourceAddrPrefix string) string {
|
||||
if useSharedSecurityRule(service) {
|
||||
safePrefix := strings.Replace(sourceAddrPrefix, "/", "_", -1)
|
||||
return fmt.Sprintf("shared-%s-%d-%s", port.Protocol, port.Port, safePrefix)
|
||||
}
|
||||
safePrefix := strings.Replace(sourceAddrPrefix, "/", "_", -1)
|
||||
return fmt.Sprintf("%s-%s-%d-%s", getRulePrefix(service), port.Protocol, port.Port, safePrefix)
|
||||
}
|
||||
|
||||
// This returns a human-readable version of the Service used to tag some resources.
|
||||
// This is only used for human-readable convenience, and not to filter.
|
||||
func getServiceName(service *v1.Service) string {
|
||||
return fmt.Sprintf("%s/%s", service.Namespace, service.Name)
|
||||
}
|
||||
|
||||
// This returns a prefix for loadbalancer/security rules.
|
||||
func getRulePrefix(service *v1.Service) string {
|
||||
return cloudprovider.GetLoadBalancerName(service)
|
||||
}
|
||||
|
||||
func getPublicIPName(clusterName string, service *v1.Service) string {
|
||||
return fmt.Sprintf("%s-%s", clusterName, cloudprovider.GetLoadBalancerName(service))
|
||||
}
|
||||
|
||||
func serviceOwnsRule(service *v1.Service, rule string) bool {
|
||||
prefix := getRulePrefix(service)
|
||||
return strings.HasPrefix(strings.ToUpper(rule), strings.ToUpper(prefix))
|
||||
}
|
||||
|
||||
func serviceOwnsFrontendIP(fip network.FrontendIPConfiguration, service *v1.Service) bool {
|
||||
baseName := cloudprovider.GetLoadBalancerName(service)
|
||||
return strings.HasPrefix(*fip.Name, baseName)
|
||||
}
|
||||
|
||||
func getFrontendIPConfigName(service *v1.Service, subnetName *string) string {
|
||||
baseName := cloudprovider.GetLoadBalancerName(service)
|
||||
if subnetName != nil {
|
||||
return fmt.Sprintf("%s-%s", baseName, *subnetName)
|
||||
}
|
||||
return baseName
|
||||
}
|
||||
|
||||
// This returns the next available rule priority level for a given set of security rules.
|
||||
func getNextAvailablePriority(rules []network.SecurityRule) (int32, error) {
|
||||
var smallest int32 = loadBalancerMinimumPriority
|
||||
var spread int32 = 1
|
||||
|
||||
outer:
|
||||
for smallest < loadBalancerMaximumPriority {
|
||||
for _, rule := range rules {
|
||||
if *rule.Priority == smallest {
|
||||
smallest += spread
|
||||
continue outer
|
||||
}
|
||||
}
|
||||
// no one else had it
|
||||
return smallest, nil
|
||||
}
|
||||
|
||||
return -1, fmt.Errorf("securityGroup priorities are exhausted")
|
||||
}
|
||||
|
||||
func (az *Cloud) getIPForMachine(nodeName types.NodeName) (string, string, error) {
|
||||
return az.vmSet.GetIPByNodeName(string(nodeName))
|
||||
}
|
||||
|
||||
var polyTable = crc32.MakeTable(crc32.Koopman)
|
||||
|
||||
//MakeCRC32 : convert string to CRC32 format
|
||||
func MakeCRC32(str string) string {
|
||||
crc := crc32.New(polyTable)
|
||||
crc.Write([]byte(str))
|
||||
hash := crc.Sum32()
|
||||
return strconv.FormatUint(uint64(hash), 10)
|
||||
}
|
||||
|
||||
//ExtractVMData : extract dataDisks, storageProfile from a map struct
|
||||
func ExtractVMData(vmData map[string]interface{}) (dataDisks []interface{},
|
||||
storageProfile map[string]interface{},
|
||||
hardwareProfile map[string]interface{}, err error) {
|
||||
props, ok := vmData["properties"].(map[string]interface{})
|
||||
if !ok {
|
||||
return nil, nil, nil, fmt.Errorf("convert vmData(properties) to map error")
|
||||
}
|
||||
|
||||
storageProfile, ok = props["storageProfile"].(map[string]interface{})
|
||||
if !ok {
|
||||
return nil, nil, nil, fmt.Errorf("convert vmData(storageProfile) to map error")
|
||||
}
|
||||
|
||||
hardwareProfile, ok = props["hardwareProfile"].(map[string]interface{})
|
||||
if !ok {
|
||||
return nil, nil, nil, fmt.Errorf("convert vmData(hardwareProfile) to map error")
|
||||
}
|
||||
|
||||
dataDisks, ok = storageProfile["dataDisks"].([]interface{})
|
||||
if !ok {
|
||||
return nil, nil, nil, fmt.Errorf("convert vmData(dataDisks) to map error")
|
||||
}
|
||||
return dataDisks, storageProfile, hardwareProfile, nil
|
||||
}
|
||||
|
||||
//ExtractDiskData : extract provisioningState, diskState from a map struct
|
||||
func ExtractDiskData(diskData interface{}) (provisioningState string, diskState string, err error) {
|
||||
fragment, ok := diskData.(map[string]interface{})
|
||||
if !ok {
|
||||
return "", "", fmt.Errorf("convert diskData to map error")
|
||||
}
|
||||
|
||||
properties, ok := fragment["properties"].(map[string]interface{})
|
||||
if !ok {
|
||||
return "", "", fmt.Errorf("convert diskData(properties) to map error")
|
||||
}
|
||||
|
||||
provisioningState, ok = properties["provisioningState"].(string) // if there is a disk, provisioningState property will be there
|
||||
if ref, ok := properties["diskState"]; ok {
|
||||
diskState = ref.(string)
|
||||
}
|
||||
return provisioningState, diskState, nil
|
||||
}
|
||||
|
||||
// availabilitySet implements VMSet interface for Azure availability sets.
|
||||
type availabilitySet struct {
|
||||
*Cloud
|
||||
}
|
||||
|
||||
// newStandardSet creates a new availabilitySet.
|
||||
func newAvailabilitySet(az *Cloud) VMSet {
|
||||
return &availabilitySet{
|
||||
Cloud: az,
|
||||
}
|
||||
}
|
||||
|
||||
// GetInstanceIDByNodeName gets the cloud provider ID by node name.
|
||||
// It must return ("", cloudprovider.InstanceNotFound) if the instance does
|
||||
// not exist or is no longer running.
|
||||
func (as *availabilitySet) GetInstanceIDByNodeName(name string) (string, error) {
|
||||
var machine compute.VirtualMachine
|
||||
var err error
|
||||
|
||||
machine, err = as.getVirtualMachine(types.NodeName(name))
|
||||
if err == cloudprovider.InstanceNotFound {
|
||||
return "", cloudprovider.InstanceNotFound
|
||||
}
|
||||
if err != nil {
|
||||
if as.CloudProviderBackoff {
|
||||
glog.V(2).Infof("GetInstanceIDByNodeName(%s) backing off", name)
|
||||
machine, err = as.GetVirtualMachineWithRetry(types.NodeName(name))
|
||||
if err != nil {
|
||||
glog.V(2).Infof("GetInstanceIDByNodeName(%s) abort backoff", name)
|
||||
return "", err
|
||||
}
|
||||
} else {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
return *machine.ID, nil
|
||||
}
|
||||
|
||||
// GetNodeNameByProviderID gets the node name by provider ID.
|
||||
func (as *availabilitySet) GetNodeNameByProviderID(providerID string) (types.NodeName, error) {
|
||||
// NodeName is part of providerID for standard instances.
|
||||
matches := providerIDRE.FindStringSubmatch(providerID)
|
||||
if len(matches) != 2 {
|
||||
return "", errors.New("error splitting providerID")
|
||||
}
|
||||
|
||||
return types.NodeName(matches[1]), nil
|
||||
}
|
||||
|
||||
// GetInstanceTypeByNodeName gets the instance type by node name.
|
||||
func (as *availabilitySet) GetInstanceTypeByNodeName(name string) (string, error) {
|
||||
machine, err := as.getVirtualMachine(types.NodeName(name))
|
||||
if err != nil {
|
||||
glog.Errorf("as.GetInstanceTypeByNodeName(%s) failed: as.getVirtualMachine(%s) err=%v", name, name, err)
|
||||
return "", err
|
||||
}
|
||||
|
||||
return string(machine.HardwareProfile.VMSize), nil
|
||||
}
|
||||
|
||||
// GetZoneByNodeName gets zone from instance view.
|
||||
func (as *availabilitySet) GetZoneByNodeName(name string) (cloudprovider.Zone, error) {
|
||||
vm, err := as.getVirtualMachine(types.NodeName(name))
|
||||
if err != nil {
|
||||
return cloudprovider.Zone{}, err
|
||||
}
|
||||
|
||||
failureDomain := strconv.Itoa(int(*vm.VirtualMachineProperties.InstanceView.PlatformFaultDomain))
|
||||
zone := cloudprovider.Zone{
|
||||
FailureDomain: failureDomain,
|
||||
Region: *(vm.Location),
|
||||
}
|
||||
return zone, nil
|
||||
}
|
||||
|
||||
// GetPrimaryVMSetName returns the VM set name depending on the configured vmType.
|
||||
// It returns config.PrimaryScaleSetName for vmss and config.PrimaryAvailabilitySetName for standard vmType.
|
||||
func (as *availabilitySet) GetPrimaryVMSetName() string {
|
||||
return as.Config.PrimaryAvailabilitySetName
|
||||
}
|
||||
|
||||
// GetIPByNodeName gets machine private IP and public IP by node name.
|
||||
func (as *availabilitySet) GetIPByNodeName(name string) (string, string, error) {
|
||||
nic, err := as.GetPrimaryInterface(name)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
ipConfig, err := getPrimaryIPConfig(nic)
|
||||
if err != nil {
|
||||
glog.Errorf("as.GetIPByNodeName(%s) failed: getPrimaryIPConfig(%v), err=%v", name, nic, err)
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
privateIP := *ipConfig.PrivateIPAddress
|
||||
publicIP := ""
|
||||
if ipConfig.PublicIPAddress != nil && ipConfig.PublicIPAddress.ID != nil {
|
||||
pipID := *ipConfig.PublicIPAddress.ID
|
||||
pipName, err := getLastSegment(pipID)
|
||||
if err != nil {
|
||||
return "", "", fmt.Errorf("failed to publicIP name for node %q with pipID %q", name, pipID)
|
||||
}
|
||||
pip, existsPip, err := as.getPublicIPAddress(as.ResourceGroup, pipName)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
if existsPip {
|
||||
publicIP = *pip.IPAddress
|
||||
}
|
||||
}
|
||||
|
||||
return privateIP, publicIP, nil
|
||||
}
|
||||
|
||||
// getAgentPoolAvailabiliySets lists the virtual machines for the resource group and then builds
|
||||
// a list of availability sets that match the nodes available to k8s.
|
||||
func (as *availabilitySet) getAgentPoolAvailabiliySets(nodes []*v1.Node) (agentPoolAvailabilitySets *[]string, err error) {
|
||||
vms, err := as.VirtualMachineClientListWithRetry()
|
||||
if err != nil {
|
||||
glog.Errorf("as.getNodeAvailabilitySet - VirtualMachineClientListWithRetry failed, err=%v", err)
|
||||
return nil, err
|
||||
}
|
||||
vmNameToAvailabilitySetID := make(map[string]string, len(vms))
|
||||
for vmx := range vms {
|
||||
vm := vms[vmx]
|
||||
if vm.AvailabilitySet != nil {
|
||||
vmNameToAvailabilitySetID[*vm.Name] = *vm.AvailabilitySet.ID
|
||||
}
|
||||
}
|
||||
availabilitySetIDs := sets.NewString()
|
||||
agentPoolAvailabilitySets = &[]string{}
|
||||
for nx := range nodes {
|
||||
nodeName := (*nodes[nx]).Name
|
||||
if isMasterNode(nodes[nx]) {
|
||||
continue
|
||||
}
|
||||
asID, ok := vmNameToAvailabilitySetID[nodeName]
|
||||
if !ok {
|
||||
glog.Errorf("as.getNodeAvailabilitySet - Node(%s) has no availability sets", nodeName)
|
||||
return nil, fmt.Errorf("Node (%s) - has no availability sets", nodeName)
|
||||
}
|
||||
if availabilitySetIDs.Has(asID) {
|
||||
// already added in the list
|
||||
continue
|
||||
}
|
||||
asName, err := getLastSegment(asID)
|
||||
if err != nil {
|
||||
glog.Errorf("as.getNodeAvailabilitySet - Node (%s)- getLastSegment(%s), err=%v", nodeName, asID, err)
|
||||
return nil, err
|
||||
}
|
||||
// AvailabilitySet ID is currently upper cased in a indeterministic way
|
||||
// We want to keep it lower case, before the ID get fixed
|
||||
asName = strings.ToLower(asName)
|
||||
|
||||
*agentPoolAvailabilitySets = append(*agentPoolAvailabilitySets, asName)
|
||||
}
|
||||
|
||||
return agentPoolAvailabilitySets, nil
|
||||
}
|
||||
|
||||
// GetVMSetNames selects all possible availability sets or scale sets
|
||||
// (depending vmType configured) for service load balancer, if the service has
|
||||
// no loadbalancer mode annotaion returns the primary VMSet. If service annotation
|
||||
// for loadbalancer exists then return the eligible VMSet.
|
||||
func (as *availabilitySet) GetVMSetNames(service *v1.Service, nodes []*v1.Node) (availabilitySetNames *[]string, err error) {
|
||||
hasMode, isAuto, serviceAvailabilitySetNames := getServiceLoadBalancerMode(service)
|
||||
if !hasMode {
|
||||
// no mode specified in service annotation default to PrimaryAvailabilitySetName
|
||||
availabilitySetNames = &[]string{as.Config.PrimaryAvailabilitySetName}
|
||||
return availabilitySetNames, nil
|
||||
}
|
||||
availabilitySetNames, err = as.getAgentPoolAvailabiliySets(nodes)
|
||||
if err != nil {
|
||||
glog.Errorf("as.GetVMSetNames - getAgentPoolAvailabiliySets failed err=(%v)", err)
|
||||
return nil, err
|
||||
}
|
||||
if len(*availabilitySetNames) == 0 {
|
||||
glog.Errorf("as.GetVMSetNames - No availability sets found for nodes in the cluster, node count(%d)", len(nodes))
|
||||
return nil, fmt.Errorf("No availability sets found for nodes, node count(%d)", len(nodes))
|
||||
}
|
||||
// sort the list to have deterministic selection
|
||||
sort.Strings(*availabilitySetNames)
|
||||
if !isAuto {
|
||||
if serviceAvailabilitySetNames == nil || len(serviceAvailabilitySetNames) == 0 {
|
||||
return nil, fmt.Errorf("service annotation for LoadBalancerMode is empty, it should have __auto__ or availability sets value")
|
||||
}
|
||||
// validate availability set exists
|
||||
var found bool
|
||||
for sasx := range serviceAvailabilitySetNames {
|
||||
for asx := range *availabilitySetNames {
|
||||
if strings.EqualFold((*availabilitySetNames)[asx], serviceAvailabilitySetNames[sasx]) {
|
||||
found = true
|
||||
serviceAvailabilitySetNames[sasx] = (*availabilitySetNames)[asx]
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
glog.Errorf("as.GetVMSetNames - Availability set (%s) in service annotation not found", serviceAvailabilitySetNames[sasx])
|
||||
return nil, fmt.Errorf("availability set (%s) - not found", serviceAvailabilitySetNames[sasx])
|
||||
}
|
||||
}
|
||||
availabilitySetNames = &serviceAvailabilitySetNames
|
||||
}
|
||||
|
||||
return availabilitySetNames, nil
|
||||
}
|
||||
|
||||
// GetPrimaryInterface gets machine primary network interface by node name.
|
||||
func (as *availabilitySet) GetPrimaryInterface(nodeName string) (network.Interface, error) {
|
||||
return as.getPrimaryInterfaceWithVMSet(nodeName, "")
|
||||
}
|
||||
|
||||
// getPrimaryInterfaceWithVMSet gets machine primary network interface by node name and vmSet.
|
||||
func (as *availabilitySet) getPrimaryInterfaceWithVMSet(nodeName, vmSetName string) (network.Interface, error) {
|
||||
var machine compute.VirtualMachine
|
||||
|
||||
machine, err := as.GetVirtualMachineWithRetry(types.NodeName(nodeName))
|
||||
if err != nil {
|
||||
glog.V(2).Infof("GetPrimaryInterface(%s, %s) abort backoff", nodeName, vmSetName)
|
||||
return network.Interface{}, err
|
||||
}
|
||||
|
||||
primaryNicID, err := getPrimaryInterfaceID(machine)
|
||||
if err != nil {
|
||||
return network.Interface{}, err
|
||||
}
|
||||
nicName, err := getLastSegment(primaryNicID)
|
||||
if err != nil {
|
||||
return network.Interface{}, err
|
||||
}
|
||||
|
||||
// Check availability set name. Note that vmSetName is empty string when getting
|
||||
// the Node's IP address. While vmSetName is not empty, it should be checked with
|
||||
// Node's real availability set name:
|
||||
// - For basic SKU load balancer, errNotInVMSet should be returned if the node's
|
||||
// availability set is mismatched with vmSetName.
|
||||
// - For standard SKU load balancer, backend could belong to multiple VMAS, so we
|
||||
// don't check vmSet for it.
|
||||
if vmSetName != "" && !as.useStandardLoadBalancer() {
|
||||
expectedAvailabilitySetName := as.getAvailabilitySetID(vmSetName)
|
||||
if machine.AvailabilitySet == nil || !strings.EqualFold(*machine.AvailabilitySet.ID, expectedAvailabilitySetName) {
|
||||
glog.V(3).Infof(
|
||||
"GetPrimaryInterface: nic (%s) is not in the availabilitySet(%s)", nicName, vmSetName)
|
||||
return network.Interface{}, errNotInVMSet
|
||||
}
|
||||
}
|
||||
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
nic, err := as.InterfacesClient.Get(ctx, as.ResourceGroup, nicName, "")
|
||||
if err != nil {
|
||||
return network.Interface{}, err
|
||||
}
|
||||
|
||||
return nic, nil
|
||||
}
|
||||
|
||||
// ensureHostInPool ensures the given VM's Primary NIC's Primary IP Configuration is
|
||||
// participating in the specified LoadBalancer Backend Pool.
|
||||
func (as *availabilitySet) ensureHostInPool(serviceName string, nodeName types.NodeName, backendPoolID string, vmSetName string, isInternal bool) error {
|
||||
vmName := mapNodeNameToVMName(nodeName)
|
||||
nic, err := as.getPrimaryInterfaceWithVMSet(vmName, vmSetName)
|
||||
if err != nil {
|
||||
if err == errNotInVMSet {
|
||||
glog.V(3).Infof("ensureHostInPool skips node %s because it is not in the vmSet %s", nodeName, vmSetName)
|
||||
return nil
|
||||
}
|
||||
|
||||
glog.Errorf("error: az.ensureHostInPool(%s), az.vmSet.GetPrimaryInterface.Get(%s, %s), err=%v", nodeName, vmName, vmSetName, err)
|
||||
return err
|
||||
}
|
||||
|
||||
var primaryIPConfig *network.InterfaceIPConfiguration
|
||||
primaryIPConfig, err = getPrimaryIPConfig(nic)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
foundPool := false
|
||||
newBackendPools := []network.BackendAddressPool{}
|
||||
if primaryIPConfig.LoadBalancerBackendAddressPools != nil {
|
||||
newBackendPools = *primaryIPConfig.LoadBalancerBackendAddressPools
|
||||
}
|
||||
for _, existingPool := range newBackendPools {
|
||||
if strings.EqualFold(backendPoolID, *existingPool.ID) {
|
||||
foundPool = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !foundPool {
|
||||
if as.useStandardLoadBalancer() && len(newBackendPools) > 0 {
|
||||
// Although standard load balancer supports backends from multiple availability
|
||||
// sets, the same network interface couldn't be added to more than one load balancer of
|
||||
// the same type. Omit those nodes (e.g. masters) so Azure ARM won't complain
|
||||
// about this.
|
||||
for _, pool := range newBackendPools {
|
||||
backendPool := *pool.ID
|
||||
matches := backendPoolIDRE.FindStringSubmatch(backendPool)
|
||||
if len(matches) == 2 {
|
||||
lbName := matches[1]
|
||||
if strings.HasSuffix(lbName, InternalLoadBalancerNameSuffix) == isInternal {
|
||||
glog.V(4).Infof("Node %q has already been added to LB %q, omit adding it to a new one", nodeName, lbName)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
newBackendPools = append(newBackendPools,
|
||||
network.BackendAddressPool{
|
||||
ID: to.StringPtr(backendPoolID),
|
||||
})
|
||||
|
||||
primaryIPConfig.LoadBalancerBackendAddressPools = &newBackendPools
|
||||
|
||||
nicName := *nic.Name
|
||||
glog.V(3).Infof("nicupdate(%s): nic(%s) - updating", serviceName, nicName)
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
resp, err := as.InterfacesClient.CreateOrUpdate(ctx, as.ResourceGroup, *nic.Name, nic)
|
||||
glog.V(10).Infof("InterfacesClient.CreateOrUpdate(%q): end", *nic.Name)
|
||||
if as.CloudProviderBackoff && shouldRetryHTTPRequest(resp, err) {
|
||||
glog.V(2).Infof("nicupdate(%s) backing off: nic(%s) - updating, err=%v", serviceName, nicName, err)
|
||||
retryErr := as.CreateOrUpdateInterfaceWithRetry(nic)
|
||||
if retryErr != nil {
|
||||
err = retryErr
|
||||
glog.V(2).Infof("nicupdate(%s) abort backoff: nic(%s) - updating", serviceName, nicName)
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// EnsureHostsInPool ensures the given Node's primary IP configurations are
|
||||
// participating in the specified LoadBalancer Backend Pool.
|
||||
func (as *availabilitySet) EnsureHostsInPool(serviceName string, nodes []*v1.Node, backendPoolID string, vmSetName string, isInternal bool) error {
|
||||
hostUpdates := make([]func() error, 0, len(nodes))
|
||||
for _, node := range nodes {
|
||||
localNodeName := node.Name
|
||||
if as.useStandardLoadBalancer() && as.excludeMasterNodesFromStandardLB() && isMasterNode(node) {
|
||||
glog.V(4).Infof("Excluding master node %q from load balancer backendpool %q", localNodeName, backendPoolID)
|
||||
continue
|
||||
}
|
||||
|
||||
f := func() error {
|
||||
err := as.ensureHostInPool(serviceName, types.NodeName(localNodeName), backendPoolID, vmSetName, isInternal)
|
||||
if err != nil {
|
||||
return fmt.Errorf("ensure(%s): backendPoolID(%s) - failed to ensure host in pool: %q", serviceName, backendPoolID, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
hostUpdates = append(hostUpdates, f)
|
||||
}
|
||||
|
||||
errs := utilerrors.AggregateGoroutines(hostUpdates...)
|
||||
if errs != nil {
|
||||
return utilerrors.Flatten(errs)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// EnsureBackendPoolDeleted ensures the loadBalancer backendAddressPools deleted from the specified vmSet.
|
||||
func (as *availabilitySet) EnsureBackendPoolDeleted(poolID, vmSetName string, backendAddressPools *[]network.BackendAddressPool) error {
|
||||
// Do nothing for availability set.
|
||||
return nil
|
||||
}
|
||||
|
||||
// get a storage account by UUID
|
||||
func generateStorageAccountName(accountNamePrefix string) string {
|
||||
uniqueID := strings.Replace(string(uuid.NewUUID()), "-", "", -1)
|
||||
accountName := strings.ToLower(accountNamePrefix + uniqueID)
|
||||
if len(accountName) > storageAccountNameMaxLength {
|
||||
return accountName[:storageAccountNameMaxLength-1]
|
||||
}
|
||||
return accountName
|
||||
}
|
253
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_standard_test.go
generated
vendored
Normal file
253
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_standard_test.go
generated
vendored
Normal file
@@ -0,0 +1,253 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package azure
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
func TestIsMasterNode(t *testing.T) {
|
||||
if isMasterNode(&v1.Node{}) {
|
||||
t.Errorf("Empty node should not be master!")
|
||||
}
|
||||
if isMasterNode(&v1.Node{
|
||||
ObjectMeta: meta.ObjectMeta{
|
||||
Labels: map[string]string{
|
||||
nodeLabelRole: "worker",
|
||||
},
|
||||
},
|
||||
}) {
|
||||
t.Errorf("Node labelled 'worker' should not be master!")
|
||||
}
|
||||
if !isMasterNode(&v1.Node{
|
||||
ObjectMeta: meta.ObjectMeta{
|
||||
Labels: map[string]string{
|
||||
nodeLabelRole: "master",
|
||||
},
|
||||
},
|
||||
}) {
|
||||
t.Errorf("Node should be master!")
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetLastSegment(t *testing.T) {
|
||||
tests := []struct {
|
||||
ID string
|
||||
expected string
|
||||
expectErr bool
|
||||
}{
|
||||
{
|
||||
ID: "",
|
||||
expected: "",
|
||||
expectErr: true,
|
||||
},
|
||||
{
|
||||
ID: "foo/",
|
||||
expected: "",
|
||||
expectErr: true,
|
||||
},
|
||||
{
|
||||
ID: "foo/bar",
|
||||
expected: "bar",
|
||||
expectErr: false,
|
||||
},
|
||||
{
|
||||
ID: "foo/bar/baz",
|
||||
expected: "baz",
|
||||
expectErr: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
s, e := getLastSegment(test.ID)
|
||||
if test.expectErr && e == nil {
|
||||
t.Errorf("Expected err, but it was nil")
|
||||
continue
|
||||
}
|
||||
if !test.expectErr && e != nil {
|
||||
t.Errorf("Unexpected error: %v", e)
|
||||
continue
|
||||
}
|
||||
if s != test.expected {
|
||||
t.Errorf("expected: %s, got %s", test.expected, s)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestGenerateStorageAccountName(t *testing.T) {
|
||||
tests := []struct {
|
||||
prefix string
|
||||
}{
|
||||
{
|
||||
prefix: "",
|
||||
},
|
||||
{
|
||||
prefix: "pvc",
|
||||
},
|
||||
{
|
||||
prefix: "1234512345123451234512345",
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
accountName := generateStorageAccountName(test.prefix)
|
||||
if len(accountName) > storageAccountNameMaxLength || len(accountName) < 3 {
|
||||
t.Errorf("input prefix: %s, output account name: %s, length not in [3,%d]", test.prefix, accountName, storageAccountNameMaxLength)
|
||||
}
|
||||
|
||||
for _, char := range accountName {
|
||||
if (char < 'a' || char > 'z') && (char < '0' || char > '9') {
|
||||
t.Errorf("input prefix: %s, output account name: %s, there is non-digit or non-letter(%q)", test.prefix, accountName, char)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestMapLoadBalancerNameToVMSet(t *testing.T) {
|
||||
az := getTestCloud()
|
||||
az.PrimaryAvailabilitySetName = "primary"
|
||||
|
||||
cases := []struct {
|
||||
description string
|
||||
lbName string
|
||||
useStandardLB bool
|
||||
clusterName string
|
||||
expectedVMSet string
|
||||
}{
|
||||
{
|
||||
description: "default external LB should map to primary vmset",
|
||||
lbName: "azure",
|
||||
clusterName: "azure",
|
||||
expectedVMSet: "primary",
|
||||
},
|
||||
{
|
||||
description: "default internal LB should map to primary vmset",
|
||||
lbName: "azure-internal",
|
||||
clusterName: "azure",
|
||||
expectedVMSet: "primary",
|
||||
},
|
||||
{
|
||||
description: "non-default external LB should map to its own vmset",
|
||||
lbName: "azuretest-internal",
|
||||
clusterName: "azure",
|
||||
expectedVMSet: "azuretest",
|
||||
},
|
||||
{
|
||||
description: "non-default internal LB should map to its own vmset",
|
||||
lbName: "azuretest-internal",
|
||||
clusterName: "azure",
|
||||
expectedVMSet: "azuretest",
|
||||
},
|
||||
}
|
||||
|
||||
for _, c := range cases {
|
||||
if c.useStandardLB {
|
||||
az.Config.LoadBalancerSku = loadBalancerSkuStandard
|
||||
} else {
|
||||
az.Config.LoadBalancerSku = loadBalancerSkuBasic
|
||||
}
|
||||
vmset := az.mapLoadBalancerNameToVMSet(c.lbName, c.clusterName)
|
||||
assert.Equal(t, c.expectedVMSet, vmset, c.description)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetLoadBalancerName(t *testing.T) {
|
||||
az := getTestCloud()
|
||||
az.PrimaryAvailabilitySetName = "primary"
|
||||
|
||||
cases := []struct {
|
||||
description string
|
||||
vmSet string
|
||||
isInternal bool
|
||||
useStandardLB bool
|
||||
clusterName string
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
description: "default external LB should get primary vmset",
|
||||
vmSet: "primary",
|
||||
clusterName: "azure",
|
||||
expected: "azure",
|
||||
},
|
||||
{
|
||||
description: "default internal LB should get primary vmset",
|
||||
vmSet: "primary",
|
||||
clusterName: "azure",
|
||||
isInternal: true,
|
||||
expected: "azure-internal",
|
||||
},
|
||||
{
|
||||
description: "non-default external LB should get its own vmset",
|
||||
vmSet: "as",
|
||||
clusterName: "azure",
|
||||
expected: "as",
|
||||
},
|
||||
{
|
||||
description: "non-default internal LB should get its own vmset",
|
||||
vmSet: "as",
|
||||
clusterName: "azure",
|
||||
isInternal: true,
|
||||
expected: "as-internal",
|
||||
},
|
||||
{
|
||||
description: "default standard external LB should get cluster name",
|
||||
vmSet: "primary",
|
||||
useStandardLB: true,
|
||||
clusterName: "azure",
|
||||
expected: "azure",
|
||||
},
|
||||
{
|
||||
description: "default standard internal LB should get cluster name",
|
||||
vmSet: "primary",
|
||||
useStandardLB: true,
|
||||
isInternal: true,
|
||||
clusterName: "azure",
|
||||
expected: "azure-internal",
|
||||
},
|
||||
{
|
||||
description: "non-default standard external LB should get cluster-name",
|
||||
vmSet: "as",
|
||||
useStandardLB: true,
|
||||
clusterName: "azure",
|
||||
expected: "azure",
|
||||
},
|
||||
{
|
||||
description: "non-default standard internal LB should get cluster-name",
|
||||
vmSet: "as",
|
||||
useStandardLB: true,
|
||||
isInternal: true,
|
||||
clusterName: "azure",
|
||||
expected: "azure-internal",
|
||||
},
|
||||
}
|
||||
|
||||
for _, c := range cases {
|
||||
if c.useStandardLB {
|
||||
az.Config.LoadBalancerSku = loadBalancerSkuStandard
|
||||
} else {
|
||||
az.Config.LoadBalancerSku = loadBalancerSkuBasic
|
||||
}
|
||||
loadbalancerName := az.getLoadBalancerName(c.clusterName, c.vmSet, c.isInternal)
|
||||
assert.Equal(t, c.expected, loadbalancerName, c.description)
|
||||
}
|
||||
}
|
59
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_storage.go
generated
vendored
Normal file
59
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_storage.go
generated
vendored
Normal file
@@ -0,0 +1,59 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package azure
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2017-10-01/storage"
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultStorageAccountType = string(storage.StandardLRS)
|
||||
fileShareAccountNamePrefix = "f"
|
||||
sharedDiskAccountNamePrefix = "ds"
|
||||
dedicatedDiskAccountNamePrefix = "dd"
|
||||
)
|
||||
|
||||
// CreateFileShare creates a file share, using a matching storage account
|
||||
func (az *Cloud) CreateFileShare(shareName, accountName, accountType, location string, requestGiB int) (string, string, error) {
|
||||
account, key, err := az.ensureStorageAccount(accountName, accountType, location, fileShareAccountNamePrefix)
|
||||
if err != nil {
|
||||
return "", "", fmt.Errorf("could not get storage key for storage account %s: %v", accountName, err)
|
||||
}
|
||||
|
||||
if err := az.createFileShare(account, key, shareName, requestGiB); err != nil {
|
||||
return "", "", fmt.Errorf("failed to create share %s in account %s: %v", shareName, account, err)
|
||||
}
|
||||
glog.V(4).Infof("created share %s in account %s", shareName, account)
|
||||
return account, key, nil
|
||||
}
|
||||
|
||||
// DeleteFileShare deletes a file share using storage account name and key
|
||||
func (az *Cloud) DeleteFileShare(accountName, accountKey, shareName string) error {
|
||||
if err := az.deleteFileShare(accountName, accountKey, shareName); err != nil {
|
||||
return err
|
||||
}
|
||||
glog.V(4).Infof("share %s deleted", shareName)
|
||||
return nil
|
||||
}
|
||||
|
||||
// ResizeFileShare resizes a file share
|
||||
func (az *Cloud) ResizeFileShare(accountName, accountKey, name string, sizeGiB int) error {
|
||||
return az.resizeFileShare(accountName, accountKey, name, sizeGiB)
|
||||
}
|
135
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_storage_test.go
generated
vendored
Normal file
135
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_storage_test.go
generated
vendored
Normal file
@@ -0,0 +1,135 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package azure
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2017-10-01/storage"
|
||||
)
|
||||
|
||||
func TestCreateFileShare(t *testing.T) {
|
||||
cloud := &Cloud{}
|
||||
fake := newFakeStorageAccountClient()
|
||||
cloud.StorageAccountClient = fake
|
||||
cloud.FileClient = &fakeFileClient{}
|
||||
|
||||
name := "baz"
|
||||
sku := "sku"
|
||||
location := "centralus"
|
||||
value := "foo key"
|
||||
bogus := "bogus"
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
acct string
|
||||
acctType string
|
||||
loc string
|
||||
gb int
|
||||
accounts storage.AccountListResult
|
||||
keys storage.AccountListKeysResult
|
||||
err error
|
||||
|
||||
expectErr bool
|
||||
expectAcct string
|
||||
expectKey string
|
||||
}{
|
||||
{
|
||||
name: "foo",
|
||||
acct: "bar",
|
||||
acctType: "type",
|
||||
loc: "eastus",
|
||||
gb: 10,
|
||||
expectErr: true,
|
||||
},
|
||||
{
|
||||
name: "foo",
|
||||
acct: "",
|
||||
acctType: "type",
|
||||
loc: "eastus",
|
||||
gb: 10,
|
||||
expectErr: true,
|
||||
},
|
||||
{
|
||||
name: "foo",
|
||||
acct: "",
|
||||
acctType: sku,
|
||||
loc: location,
|
||||
gb: 10,
|
||||
accounts: storage.AccountListResult{
|
||||
Value: &[]storage.Account{
|
||||
{Name: &name, Sku: &storage.Sku{Name: storage.SkuName(sku)}, Location: &location},
|
||||
},
|
||||
},
|
||||
keys: storage.AccountListKeysResult{
|
||||
Keys: &[]storage.AccountKey{
|
||||
{Value: &value},
|
||||
},
|
||||
},
|
||||
expectAcct: "baz",
|
||||
expectKey: "key",
|
||||
},
|
||||
{
|
||||
name: "foo",
|
||||
acct: "",
|
||||
acctType: sku,
|
||||
loc: location,
|
||||
gb: 10,
|
||||
accounts: storage.AccountListResult{
|
||||
Value: &[]storage.Account{
|
||||
{Name: &bogus, Sku: &storage.Sku{Name: storage.SkuName(sku)}, Location: &location},
|
||||
},
|
||||
},
|
||||
expectErr: true,
|
||||
},
|
||||
{
|
||||
name: "foo",
|
||||
acct: "",
|
||||
acctType: sku,
|
||||
loc: location,
|
||||
gb: 10,
|
||||
accounts: storage.AccountListResult{
|
||||
Value: &[]storage.Account{
|
||||
{Name: &name, Sku: &storage.Sku{Name: storage.SkuName(sku)}, Location: &bogus},
|
||||
},
|
||||
},
|
||||
expectErr: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
fake.Accounts = test.accounts
|
||||
fake.Keys = test.keys
|
||||
fake.Err = test.err
|
||||
|
||||
account, key, err := cloud.CreateFileShare(test.name, test.acct, test.acctType, test.loc, test.gb)
|
||||
if test.expectErr && err == nil {
|
||||
t.Errorf("unexpected non-error")
|
||||
continue
|
||||
}
|
||||
if !test.expectErr && err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
continue
|
||||
}
|
||||
if test.expectAcct != account {
|
||||
t.Errorf("Expected: %s, got %s", test.expectAcct, account)
|
||||
}
|
||||
if test.expectKey != key {
|
||||
t.Errorf("Expected: %s, got %s", test.expectKey, key)
|
||||
}
|
||||
}
|
||||
}
|
135
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_storageaccount.go
generated
vendored
Normal file
135
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_storageaccount.go
generated
vendored
Normal file
@@ -0,0 +1,135 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package azure
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2017-10-01/storage"
|
||||
"github.com/Azure/go-autorest/autorest/to"
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
type accountWithLocation struct {
|
||||
Name, StorageType, Location string
|
||||
}
|
||||
|
||||
// getStorageAccounts gets name, type, location of all storage accounts in a resource group which matches matchingAccountType, matchingLocation
|
||||
func (az *Cloud) getStorageAccounts(matchingAccountType, matchingLocation string) ([]accountWithLocation, error) {
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
result, err := az.StorageAccountClient.ListByResourceGroup(ctx, az.ResourceGroup)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if result.Value == nil {
|
||||
return nil, fmt.Errorf("unexpected error when listing storage accounts from resource group %s", az.ResourceGroup)
|
||||
}
|
||||
|
||||
accounts := []accountWithLocation{}
|
||||
for _, acct := range *result.Value {
|
||||
if acct.Name != nil && acct.Location != nil && acct.Sku != nil {
|
||||
storageType := string((*acct.Sku).Name)
|
||||
if matchingAccountType != "" && !strings.EqualFold(matchingAccountType, storageType) {
|
||||
continue
|
||||
}
|
||||
|
||||
location := *acct.Location
|
||||
if matchingLocation != "" && !strings.EqualFold(matchingLocation, location) {
|
||||
continue
|
||||
}
|
||||
accounts = append(accounts, accountWithLocation{Name: *acct.Name, StorageType: storageType, Location: location})
|
||||
}
|
||||
}
|
||||
|
||||
return accounts, nil
|
||||
}
|
||||
|
||||
// getStorageAccesskey gets the storage account access key
|
||||
func (az *Cloud) getStorageAccesskey(account string) (string, error) {
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
|
||||
result, err := az.StorageAccountClient.ListKeys(ctx, az.ResourceGroup, account)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if result.Keys == nil {
|
||||
return "", fmt.Errorf("empty keys")
|
||||
}
|
||||
|
||||
for _, k := range *result.Keys {
|
||||
if k.Value != nil && *k.Value != "" {
|
||||
v := *k.Value
|
||||
if ind := strings.LastIndex(v, " "); ind >= 0 {
|
||||
v = v[(ind + 1):]
|
||||
}
|
||||
return v, nil
|
||||
}
|
||||
}
|
||||
return "", fmt.Errorf("no valid keys")
|
||||
}
|
||||
|
||||
// ensureStorageAccount search storage account, create one storage account(with genAccountNamePrefix) if not found, return accountName, accountKey
|
||||
func (az *Cloud) ensureStorageAccount(accountName, accountType, location, genAccountNamePrefix string) (string, string, error) {
|
||||
if len(accountName) == 0 {
|
||||
// find a storage account that matches accountType
|
||||
accounts, err := az.getStorageAccounts(accountType, location)
|
||||
if err != nil {
|
||||
return "", "", fmt.Errorf("could not list storage accounts for account type %s: %v", accountType, err)
|
||||
}
|
||||
|
||||
if len(accounts) > 0 {
|
||||
accountName = accounts[0].Name
|
||||
glog.V(4).Infof("found a matching account %s type %s location %s", accounts[0].Name, accounts[0].StorageType, accounts[0].Location)
|
||||
}
|
||||
|
||||
if len(accountName) == 0 {
|
||||
// not found a matching account, now create a new account in current resource group
|
||||
accountName = generateStorageAccountName(genAccountNamePrefix)
|
||||
if location == "" {
|
||||
location = az.Location
|
||||
}
|
||||
if accountType == "" {
|
||||
accountType = defaultStorageAccountType
|
||||
}
|
||||
|
||||
glog.V(2).Infof("azure - no matching account found, begin to create a new account %s in resource group %s, location: %s, accountType: %s",
|
||||
accountName, az.ResourceGroup, location, accountType)
|
||||
cp := storage.AccountCreateParameters{
|
||||
Sku: &storage.Sku{Name: storage.SkuName(accountType)},
|
||||
Tags: map[string]*string{"created-by": to.StringPtr("azure")},
|
||||
Location: &location}
|
||||
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
_, err := az.StorageAccountClient.Create(ctx, az.ResourceGroup, accountName, cp)
|
||||
if err != nil {
|
||||
return "", "", fmt.Errorf(fmt.Sprintf("Failed to create storage account %s, error: %s", accountName, err))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// find the access key with this account
|
||||
accountKey, err := az.getStorageAccesskey(accountName)
|
||||
if err != nil {
|
||||
return "", "", fmt.Errorf("could not get storage key for storage account %s: %v", accountName, err)
|
||||
}
|
||||
|
||||
return accountName, accountKey, nil
|
||||
}
|
80
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_storageaccount_test.go
generated
vendored
Normal file
80
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_storageaccount_test.go
generated
vendored
Normal file
@@ -0,0 +1,80 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package azure
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2017-10-01/storage"
|
||||
)
|
||||
|
||||
func TestGetStorageAccessKeys(t *testing.T) {
|
||||
cloud := &Cloud{}
|
||||
fake := newFakeStorageAccountClient()
|
||||
cloud.StorageAccountClient = fake
|
||||
value := "foo bar"
|
||||
|
||||
tests := []struct {
|
||||
results storage.AccountListKeysResult
|
||||
expectedKey string
|
||||
expectErr bool
|
||||
err error
|
||||
}{
|
||||
{storage.AccountListKeysResult{}, "", true, nil},
|
||||
{
|
||||
storage.AccountListKeysResult{
|
||||
Keys: &[]storage.AccountKey{
|
||||
{Value: &value},
|
||||
},
|
||||
},
|
||||
"bar",
|
||||
false,
|
||||
nil,
|
||||
},
|
||||
{
|
||||
storage.AccountListKeysResult{
|
||||
Keys: &[]storage.AccountKey{
|
||||
{},
|
||||
{Value: &value},
|
||||
},
|
||||
},
|
||||
"bar",
|
||||
false,
|
||||
nil,
|
||||
},
|
||||
{storage.AccountListKeysResult{}, "", true, fmt.Errorf("test error")},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
expectedKey := test.expectedKey
|
||||
fake.Keys = test.results
|
||||
fake.Err = test.err
|
||||
key, err := cloud.getStorageAccesskey("acct")
|
||||
if test.expectErr && err == nil {
|
||||
t.Errorf("Unexpected non-error")
|
||||
continue
|
||||
}
|
||||
if !test.expectErr && err != nil {
|
||||
t.Errorf("Unexpected error: %v", err)
|
||||
continue
|
||||
}
|
||||
if key != expectedKey {
|
||||
t.Errorf("expected: %s, saw %s", expectedKey, key)
|
||||
}
|
||||
}
|
||||
}
|
2701
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_test.go
generated
vendored
Normal file
2701
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_test.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
67
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_vmsets.go
generated
vendored
Normal file
67
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_vmsets.go
generated
vendored
Normal file
@@ -0,0 +1,67 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package azure
|
||||
|
||||
import (
|
||||
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute"
|
||||
"github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
)
|
||||
|
||||
// VMSet defines functions all vmsets (including scale set and availability
|
||||
// set) should be implemented.
|
||||
type VMSet interface {
|
||||
// GetInstanceIDByNodeName gets the cloud provider ID by node name.
|
||||
// It must return ("", cloudprovider.InstanceNotFound) if the instance does
|
||||
// not exist or is no longer running.
|
||||
GetInstanceIDByNodeName(name string) (string, error)
|
||||
// GetInstanceTypeByNodeName gets the instance type by node name.
|
||||
GetInstanceTypeByNodeName(name string) (string, error)
|
||||
// GetIPByNodeName gets machine private IP and public IP by node name.
|
||||
GetIPByNodeName(name string) (string, string, error)
|
||||
// GetPrimaryInterface gets machine primary network interface by node name.
|
||||
GetPrimaryInterface(nodeName string) (network.Interface, error)
|
||||
// GetNodeNameByProviderID gets the node name by provider ID.
|
||||
GetNodeNameByProviderID(providerID string) (types.NodeName, error)
|
||||
|
||||
// GetZoneByNodeName gets cloudprovider.Zone by node name.
|
||||
GetZoneByNodeName(name string) (cloudprovider.Zone, error)
|
||||
|
||||
// GetPrimaryVMSetName returns the VM set name depending on the configured vmType.
|
||||
// It returns config.PrimaryScaleSetName for vmss and config.PrimaryAvailabilitySetName for standard vmType.
|
||||
GetPrimaryVMSetName() string
|
||||
// GetVMSetNames selects all possible availability sets or scale sets
|
||||
// (depending vmType configured) for service load balancer, if the service has
|
||||
// no loadbalancer mode annotation returns the primary VMSet. If service annotation
|
||||
// for loadbalancer exists then return the eligible VMSet.
|
||||
GetVMSetNames(service *v1.Service, nodes []*v1.Node) (availabilitySetNames *[]string, err error)
|
||||
// EnsureHostsInPool ensures the given Node's primary IP configurations are
|
||||
// participating in the specified LoadBalancer Backend Pool.
|
||||
EnsureHostsInPool(serviceName string, nodes []*v1.Node, backendPoolID string, vmSetName string, isInternal bool) error
|
||||
// EnsureBackendPoolDeleted ensures the loadBalancer backendAddressPools deleted from the specified vmSet.
|
||||
EnsureBackendPoolDeleted(poolID, vmSetName string, backendAddressPools *[]network.BackendAddressPool) error
|
||||
|
||||
// AttachDisk attaches a vhd to vm. The vhd must exist, can be identified by diskName, diskURI, and lun.
|
||||
AttachDisk(isManagedDisk bool, diskName, diskURI string, nodeName types.NodeName, lun int32, cachingMode compute.CachingTypes) error
|
||||
// DetachDiskByName detaches a vhd from host. The vhd can be identified by diskName or diskURI.
|
||||
DetachDiskByName(diskName, diskURI string, nodeName types.NodeName) error
|
||||
// GetDataDisks gets a list of data disks attached to the node.
|
||||
GetDataDisks(nodeName types.NodeName) ([]compute.DataDisk, error)
|
||||
}
|
882
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_vmss.go
generated
vendored
Normal file
882
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_vmss.go
generated
vendored
Normal file
@@ -0,0 +1,882 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package azure
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute"
|
||||
"github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network"
|
||||
"github.com/Azure/go-autorest/autorest/to"
|
||||
"github.com/golang/glog"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrorNotVmssInstance indicates an instance is not belongint to any vmss.
|
||||
ErrorNotVmssInstance = errors.New("not a vmss instance")
|
||||
|
||||
scaleSetNameRE = regexp.MustCompile(`.*/subscriptions/(?:.*)/Microsoft.Compute/virtualMachineScaleSets/(.+)/virtualMachines(?:.*)`)
|
||||
vmssMachineIDTemplate = "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Compute/virtualMachineScaleSets/%s/virtualMachines/%s"
|
||||
)
|
||||
|
||||
// scaleSet implements VMSet interface for Azure scale set.
|
||||
type scaleSet struct {
|
||||
*Cloud
|
||||
|
||||
// availabilitySet is also required for scaleSet because some instances
|
||||
// (e.g. master nodes) may not belong to any scale sets.
|
||||
availabilitySet VMSet
|
||||
|
||||
vmssCache *timedCache
|
||||
vmssVMCache *timedCache
|
||||
nodeNameToScaleSetMappingCache *timedCache
|
||||
availabilitySetNodesCache *timedCache
|
||||
}
|
||||
|
||||
// newScaleSet creates a new scaleSet.
|
||||
func newScaleSet(az *Cloud) (VMSet, error) {
|
||||
var err error
|
||||
ss := &scaleSet{
|
||||
Cloud: az,
|
||||
availabilitySet: newAvailabilitySet(az),
|
||||
}
|
||||
|
||||
ss.nodeNameToScaleSetMappingCache, err = ss.newNodeNameToScaleSetMappingCache()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ss.availabilitySetNodesCache, err = ss.newAvailabilitySetNodesCache()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ss.vmssCache, err = ss.newVmssCache()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ss.vmssVMCache, err = ss.newVmssVMCache()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return ss, nil
|
||||
}
|
||||
|
||||
// getVmssVM gets virtualMachineScaleSetVM by nodeName from cache.
|
||||
// It returns cloudprovider.InstanceNotFound if node does not belong to any scale sets.
|
||||
func (ss *scaleSet) getVmssVM(nodeName string) (ssName, instanceID string, vm compute.VirtualMachineScaleSetVM, err error) {
|
||||
instanceID, err = getScaleSetVMInstanceID(nodeName)
|
||||
if err != nil {
|
||||
return ssName, instanceID, vm, err
|
||||
}
|
||||
|
||||
ssName, err = ss.getScaleSetNameByNodeName(nodeName)
|
||||
if err != nil {
|
||||
return ssName, instanceID, vm, err
|
||||
}
|
||||
|
||||
if ssName == "" {
|
||||
return "", "", vm, cloudprovider.InstanceNotFound
|
||||
}
|
||||
|
||||
glog.V(4).Infof("getVmssVM gets scaleSetName (%q) and instanceID (%q) for node %q", ssName, instanceID, nodeName)
|
||||
cachedVM, err := ss.vmssVMCache.Get(ss.makeVmssVMName(ssName, instanceID))
|
||||
if err != nil {
|
||||
return ssName, instanceID, vm, err
|
||||
}
|
||||
|
||||
if cachedVM == nil {
|
||||
glog.Errorf("Can't find node (%q) in any scale sets", nodeName)
|
||||
return ssName, instanceID, vm, cloudprovider.InstanceNotFound
|
||||
}
|
||||
|
||||
return ssName, instanceID, *(cachedVM.(*compute.VirtualMachineScaleSetVM)), nil
|
||||
}
|
||||
|
||||
// getCachedVirtualMachineByInstanceID gets scaleSetVMInfo from cache.
|
||||
// The node must belong to one of scale sets.
|
||||
func (ss *scaleSet) getVmssVMByInstanceID(scaleSetName, instanceID string) (vm compute.VirtualMachineScaleSetVM, err error) {
|
||||
vmName := ss.makeVmssVMName(scaleSetName, instanceID)
|
||||
cachedVM, err := ss.vmssVMCache.Get(vmName)
|
||||
if err != nil {
|
||||
return vm, err
|
||||
}
|
||||
|
||||
if cachedVM == nil {
|
||||
glog.Errorf("cound't find vmss virtual machine by scaleSetName (%q) and instanceID (%q)", scaleSetName, instanceID)
|
||||
return vm, cloudprovider.InstanceNotFound
|
||||
}
|
||||
|
||||
return *(cachedVM.(*compute.VirtualMachineScaleSetVM)), nil
|
||||
}
|
||||
|
||||
// GetInstanceIDByNodeName gets the cloud provider ID by node name.
|
||||
// It must return ("", cloudprovider.InstanceNotFound) if the instance does
|
||||
// not exist or is no longer running.
|
||||
func (ss *scaleSet) GetInstanceIDByNodeName(name string) (string, error) {
|
||||
managedByAS, err := ss.isNodeManagedByAvailabilitySet(name)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to check isNodeManagedByAvailabilitySet: %v", err)
|
||||
return "", err
|
||||
}
|
||||
if managedByAS {
|
||||
// vm is managed by availability set.
|
||||
return ss.availabilitySet.GetInstanceIDByNodeName(name)
|
||||
}
|
||||
|
||||
_, _, vm, err := ss.getVmssVM(name)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return *vm.ID, nil
|
||||
}
|
||||
|
||||
// GetNodeNameByProviderID gets the node name by provider ID.
|
||||
func (ss *scaleSet) GetNodeNameByProviderID(providerID string) (types.NodeName, error) {
|
||||
// NodeName is not part of providerID for vmss instances.
|
||||
scaleSetName, err := extractScaleSetNameByProviderID(providerID)
|
||||
if err != nil {
|
||||
glog.V(4).Infof("Can not extract scale set name from providerID (%s), assuming it is mananaged by availability set: %v", providerID, err)
|
||||
return ss.availabilitySet.GetNodeNameByProviderID(providerID)
|
||||
}
|
||||
|
||||
instanceID, err := getLastSegment(providerID)
|
||||
if err != nil {
|
||||
glog.V(4).Infof("Can not extract instanceID from providerID (%s), assuming it is mananaged by availability set: %v", providerID, err)
|
||||
return ss.availabilitySet.GetNodeNameByProviderID(providerID)
|
||||
}
|
||||
|
||||
vm, err := ss.getVmssVMByInstanceID(scaleSetName, instanceID)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if vm.OsProfile != nil && vm.OsProfile.ComputerName != nil {
|
||||
nodeName := strings.ToLower(*vm.OsProfile.ComputerName)
|
||||
return types.NodeName(nodeName), nil
|
||||
}
|
||||
|
||||
return "", nil
|
||||
}
|
||||
|
||||
// GetInstanceTypeByNodeName gets the instance type by node name.
|
||||
func (ss *scaleSet) GetInstanceTypeByNodeName(name string) (string, error) {
|
||||
managedByAS, err := ss.isNodeManagedByAvailabilitySet(name)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to check isNodeManagedByAvailabilitySet: %v", err)
|
||||
return "", err
|
||||
}
|
||||
if managedByAS {
|
||||
// vm is managed by availability set.
|
||||
return ss.availabilitySet.GetInstanceTypeByNodeName(name)
|
||||
}
|
||||
|
||||
_, _, vm, err := ss.getVmssVM(name)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if vm.Sku != nil && vm.Sku.Name != nil {
|
||||
return *vm.Sku.Name, nil
|
||||
}
|
||||
|
||||
return "", nil
|
||||
}
|
||||
|
||||
// GetZoneByNodeName gets cloudprovider.Zone by node name.
|
||||
func (ss *scaleSet) GetZoneByNodeName(name string) (cloudprovider.Zone, error) {
|
||||
managedByAS, err := ss.isNodeManagedByAvailabilitySet(name)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to check isNodeManagedByAvailabilitySet: %v", err)
|
||||
return cloudprovider.Zone{}, err
|
||||
}
|
||||
if managedByAS {
|
||||
// vm is managed by availability set.
|
||||
return ss.availabilitySet.GetZoneByNodeName(name)
|
||||
}
|
||||
|
||||
_, _, vm, err := ss.getVmssVM(name)
|
||||
if err != nil {
|
||||
return cloudprovider.Zone{}, err
|
||||
}
|
||||
|
||||
if vm.InstanceView != nil && vm.InstanceView.PlatformFaultDomain != nil {
|
||||
return cloudprovider.Zone{
|
||||
FailureDomain: strconv.Itoa(int(*vm.InstanceView.PlatformFaultDomain)),
|
||||
Region: *vm.Location,
|
||||
}, nil
|
||||
}
|
||||
|
||||
return cloudprovider.Zone{}, nil
|
||||
}
|
||||
|
||||
// GetPrimaryVMSetName returns the VM set name depending on the configured vmType.
|
||||
// It returns config.PrimaryScaleSetName for vmss and config.PrimaryAvailabilitySetName for standard vmType.
|
||||
func (ss *scaleSet) GetPrimaryVMSetName() string {
|
||||
return ss.Config.PrimaryScaleSetName
|
||||
}
|
||||
|
||||
// GetIPByNodeName gets machine private IP and public IP by node name.
|
||||
// TODO(feiskyer): Azure vmss doesn't support associating a public IP to single virtual machine yet,
|
||||
// fix this after it is supported.
|
||||
func (ss *scaleSet) GetIPByNodeName(nodeName string) (string, string, error) {
|
||||
nic, err := ss.GetPrimaryInterface(nodeName)
|
||||
if err != nil {
|
||||
glog.Errorf("error: ss.GetIPByNodeName(%s), GetPrimaryInterface(%q), err=%v", nodeName, nodeName, err)
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
ipConfig, err := getPrimaryIPConfig(nic)
|
||||
if err != nil {
|
||||
glog.Errorf("error: ss.GetIPByNodeName(%s), getPrimaryIPConfig(%v), err=%v", nodeName, nic, err)
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
targetIP := *ipConfig.PrivateIPAddress
|
||||
return targetIP, "", nil
|
||||
}
|
||||
|
||||
// This returns the full identifier of the primary NIC for the given VM.
|
||||
func (ss *scaleSet) getPrimaryInterfaceID(machine compute.VirtualMachineScaleSetVM) (string, error) {
|
||||
if len(*machine.NetworkProfile.NetworkInterfaces) == 1 {
|
||||
return *(*machine.NetworkProfile.NetworkInterfaces)[0].ID, nil
|
||||
}
|
||||
|
||||
for _, ref := range *machine.NetworkProfile.NetworkInterfaces {
|
||||
if *ref.Primary {
|
||||
return *ref.ID, nil
|
||||
}
|
||||
}
|
||||
|
||||
return "", fmt.Errorf("failed to find a primary nic for the vm. vmname=%q", *machine.Name)
|
||||
}
|
||||
|
||||
// machineName is composed of computerNamePrefix and 36-based instanceID.
|
||||
// And instanceID part if in fixed length of 6 characters.
|
||||
// Refer https://msftstack.wordpress.com/2017/05/10/figuring-out-azure-vm-scale-set-machine-names/.
|
||||
func getScaleSetVMInstanceID(machineName string) (string, error) {
|
||||
nameLength := len(machineName)
|
||||
if nameLength < 6 {
|
||||
return "", ErrorNotVmssInstance
|
||||
}
|
||||
|
||||
instanceID, err := strconv.ParseUint(machineName[nameLength-6:], 36, 64)
|
||||
if err != nil {
|
||||
return "", ErrorNotVmssInstance
|
||||
}
|
||||
|
||||
return fmt.Sprintf("%d", instanceID), nil
|
||||
}
|
||||
|
||||
// extractScaleSetNameByProviderID extracts the scaleset name by node's ProviderID.
|
||||
func extractScaleSetNameByProviderID(providerID string) (string, error) {
|
||||
matches := scaleSetNameRE.FindStringSubmatch(providerID)
|
||||
if len(matches) != 2 {
|
||||
return "", ErrorNotVmssInstance
|
||||
}
|
||||
|
||||
return matches[1], nil
|
||||
}
|
||||
|
||||
// listScaleSets lists all scale sets.
|
||||
func (ss *scaleSet) listScaleSets() ([]string, error) {
|
||||
var err error
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
|
||||
allScaleSets, err := ss.VirtualMachineScaleSetsClient.List(ctx, ss.ResourceGroup)
|
||||
if err != nil {
|
||||
glog.Errorf("VirtualMachineScaleSetsClient.List failed: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ssNames := make([]string, len(allScaleSets))
|
||||
for i := range allScaleSets {
|
||||
ssNames[i] = *(allScaleSets[i].Name)
|
||||
}
|
||||
|
||||
return ssNames, nil
|
||||
}
|
||||
|
||||
// listScaleSetVMs lists VMs belonging to the specified scale set.
|
||||
func (ss *scaleSet) listScaleSetVMs(scaleSetName string) ([]compute.VirtualMachineScaleSetVM, error) {
|
||||
var err error
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
|
||||
allVMs, err := ss.VirtualMachineScaleSetVMsClient.List(ctx, ss.ResourceGroup, scaleSetName, "", "", string(compute.InstanceView))
|
||||
if err != nil {
|
||||
glog.Errorf("VirtualMachineScaleSetVMsClient.List failed: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return allVMs, nil
|
||||
}
|
||||
|
||||
// getAgentPoolScaleSets lists the virtual machines for the resource group and then builds
|
||||
// a list of scale sets that match the nodes available to k8s.
|
||||
func (ss *scaleSet) getAgentPoolScaleSets(nodes []*v1.Node) (*[]string, error) {
|
||||
agentPoolScaleSets := &[]string{}
|
||||
for nx := range nodes {
|
||||
if isMasterNode(nodes[nx]) {
|
||||
continue
|
||||
}
|
||||
|
||||
nodeName := nodes[nx].Name
|
||||
ssName, err := ss.getScaleSetNameByNodeName(nodeName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if ssName == "" {
|
||||
glog.V(3).Infof("Node %q is not belonging to any known scale sets", nodeName)
|
||||
continue
|
||||
}
|
||||
|
||||
*agentPoolScaleSets = append(*agentPoolScaleSets, ssName)
|
||||
}
|
||||
|
||||
return agentPoolScaleSets, nil
|
||||
}
|
||||
|
||||
// GetVMSetNames selects all possible availability sets or scale sets
|
||||
// (depending vmType configured) for service load balancer. If the service has
|
||||
// no loadbalancer mode annotation returns the primary VMSet. If service annotation
|
||||
// for loadbalancer exists then return the eligible VMSet.
|
||||
func (ss *scaleSet) GetVMSetNames(service *v1.Service, nodes []*v1.Node) (vmSetNames *[]string, err error) {
|
||||
hasMode, isAuto, serviceVMSetNames := getServiceLoadBalancerMode(service)
|
||||
if !hasMode {
|
||||
// no mode specified in service annotation default to PrimaryScaleSetName.
|
||||
scaleSetNames := &[]string{ss.Config.PrimaryScaleSetName}
|
||||
return scaleSetNames, nil
|
||||
}
|
||||
|
||||
scaleSetNames, err := ss.getAgentPoolScaleSets(nodes)
|
||||
if err != nil {
|
||||
glog.Errorf("ss.GetVMSetNames - getAgentPoolScaleSets failed err=(%v)", err)
|
||||
return nil, err
|
||||
}
|
||||
if len(*scaleSetNames) == 0 {
|
||||
glog.Errorf("ss.GetVMSetNames - No scale sets found for nodes in the cluster, node count(%d)", len(nodes))
|
||||
return nil, fmt.Errorf("No scale sets found for nodes, node count(%d)", len(nodes))
|
||||
}
|
||||
|
||||
// sort the list to have deterministic selection
|
||||
sort.Strings(*scaleSetNames)
|
||||
|
||||
if !isAuto {
|
||||
if serviceVMSetNames == nil || len(serviceVMSetNames) == 0 {
|
||||
return nil, fmt.Errorf("service annotation for LoadBalancerMode is empty, it should have __auto__ or availability sets value")
|
||||
}
|
||||
// validate scale set exists
|
||||
var found bool
|
||||
for sasx := range serviceVMSetNames {
|
||||
for asx := range *scaleSetNames {
|
||||
if strings.EqualFold((*scaleSetNames)[asx], serviceVMSetNames[sasx]) {
|
||||
found = true
|
||||
serviceVMSetNames[sasx] = (*scaleSetNames)[asx]
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
glog.Errorf("ss.GetVMSetNames - scale set (%s) in service annotation not found", serviceVMSetNames[sasx])
|
||||
return nil, fmt.Errorf("scale set (%s) - not found", serviceVMSetNames[sasx])
|
||||
}
|
||||
}
|
||||
vmSetNames = &serviceVMSetNames
|
||||
}
|
||||
|
||||
return vmSetNames, nil
|
||||
}
|
||||
|
||||
// GetPrimaryInterface gets machine primary network interface by node name and vmSet.
|
||||
func (ss *scaleSet) GetPrimaryInterface(nodeName string) (network.Interface, error) {
|
||||
managedByAS, err := ss.isNodeManagedByAvailabilitySet(nodeName)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to check isNodeManagedByAvailabilitySet: %v", err)
|
||||
return network.Interface{}, err
|
||||
}
|
||||
if managedByAS {
|
||||
// vm is managed by availability set.
|
||||
return ss.availabilitySet.GetPrimaryInterface(nodeName)
|
||||
}
|
||||
|
||||
ssName, instanceID, vm, err := ss.getVmssVM(nodeName)
|
||||
if err != nil {
|
||||
glog.Errorf("error: ss.GetPrimaryInterface(%s), ss.getVmssVM(%s), err=%v", nodeName, nodeName, err)
|
||||
return network.Interface{}, err
|
||||
}
|
||||
|
||||
primaryInterfaceID, err := ss.getPrimaryInterfaceID(vm)
|
||||
if err != nil {
|
||||
glog.Errorf("error: ss.GetPrimaryInterface(%s), ss.getPrimaryInterfaceID(), err=%v", nodeName, err)
|
||||
return network.Interface{}, err
|
||||
}
|
||||
|
||||
nicName, err := getLastSegment(primaryInterfaceID)
|
||||
if err != nil {
|
||||
glog.Errorf("error: ss.GetPrimaryInterface(%s), getLastSegment(%s), err=%v", nodeName, primaryInterfaceID, err)
|
||||
return network.Interface{}, err
|
||||
}
|
||||
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
nic, err := ss.InterfacesClient.GetVirtualMachineScaleSetNetworkInterface(ctx, ss.ResourceGroup, ssName, instanceID, nicName, "")
|
||||
if err != nil {
|
||||
glog.Errorf("error: ss.GetPrimaryInterface(%s), ss.GetVirtualMachineScaleSetNetworkInterface.Get(%s, %s, %s), err=%v", nodeName, ss.ResourceGroup, ssName, nicName, err)
|
||||
return network.Interface{}, err
|
||||
}
|
||||
|
||||
// Fix interface's location, which is required when updating the interface.
|
||||
// TODO: is this a bug of azure SDK?
|
||||
if nic.Location == nil || *nic.Location == "" {
|
||||
nic.Location = vm.Location
|
||||
}
|
||||
|
||||
return nic, nil
|
||||
}
|
||||
|
||||
// getScaleSetWithRetry gets scale set with exponential backoff retry
|
||||
func (ss *scaleSet) getScaleSetWithRetry(name string) (compute.VirtualMachineScaleSet, bool, error) {
|
||||
var result compute.VirtualMachineScaleSet
|
||||
var exists bool
|
||||
|
||||
err := wait.ExponentialBackoff(ss.requestBackoff(), func() (bool, error) {
|
||||
cached, retryErr := ss.vmssCache.Get(name)
|
||||
if retryErr != nil {
|
||||
glog.Errorf("backoff: failure for scale set %q, will retry,err=%v", name, retryErr)
|
||||
return false, nil
|
||||
}
|
||||
glog.V(4).Info("backoff: success for scale set %q", name)
|
||||
|
||||
if cached != nil {
|
||||
exists = true
|
||||
result = *(cached.(*compute.VirtualMachineScaleSet))
|
||||
}
|
||||
|
||||
return true, nil
|
||||
})
|
||||
|
||||
return result, exists, err
|
||||
}
|
||||
|
||||
// getPrimaryNetworkConfiguration gets primary network interface configuration for scale sets.
|
||||
func (ss *scaleSet) getPrimaryNetworkConfiguration(networkConfigurationList *[]compute.VirtualMachineScaleSetNetworkConfiguration, scaleSetName string) (*compute.VirtualMachineScaleSetNetworkConfiguration, error) {
|
||||
networkConfigurations := *networkConfigurationList
|
||||
if len(networkConfigurations) == 1 {
|
||||
return &networkConfigurations[0], nil
|
||||
}
|
||||
|
||||
for idx := range networkConfigurations {
|
||||
networkConfig := &networkConfigurations[idx]
|
||||
if networkConfig.Primary != nil && *networkConfig.Primary == true {
|
||||
return networkConfig, nil
|
||||
}
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("failed to find a primary network configuration for the scale set %q", scaleSetName)
|
||||
}
|
||||
|
||||
func (ss *scaleSet) getPrimaryIPConfigForScaleSet(config *compute.VirtualMachineScaleSetNetworkConfiguration, scaleSetName string) (*compute.VirtualMachineScaleSetIPConfiguration, error) {
|
||||
ipConfigurations := *config.IPConfigurations
|
||||
if len(ipConfigurations) == 1 {
|
||||
return &ipConfigurations[0], nil
|
||||
}
|
||||
|
||||
for idx := range ipConfigurations {
|
||||
ipConfig := &ipConfigurations[idx]
|
||||
if ipConfig.Primary != nil && *ipConfig.Primary == true {
|
||||
return ipConfig, nil
|
||||
}
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("failed to find a primary IP configuration for the scale set %q", scaleSetName)
|
||||
}
|
||||
|
||||
// createOrUpdateVMSSWithRetry invokes ss.VirtualMachineScaleSetsClient.CreateOrUpdate with exponential backoff retry.
|
||||
func (ss *scaleSet) createOrUpdateVMSSWithRetry(virtualMachineScaleSet compute.VirtualMachineScaleSet) error {
|
||||
return wait.ExponentialBackoff(ss.requestBackoff(), func() (bool, error) {
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
resp, err := ss.VirtualMachineScaleSetsClient.CreateOrUpdate(ctx, ss.ResourceGroup, *virtualMachineScaleSet.Name, virtualMachineScaleSet)
|
||||
glog.V(10).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate(%s): end", *virtualMachineScaleSet.Name)
|
||||
return processHTTPRetryResponse(resp, err)
|
||||
})
|
||||
}
|
||||
|
||||
// updateVMSSInstancesWithRetry invokes ss.VirtualMachineScaleSetsClient.UpdateInstances with exponential backoff retry.
|
||||
func (ss *scaleSet) updateVMSSInstancesWithRetry(scaleSetName string, vmInstanceIDs compute.VirtualMachineScaleSetVMInstanceRequiredIDs) error {
|
||||
return wait.ExponentialBackoff(ss.requestBackoff(), func() (bool, error) {
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
resp, err := ss.VirtualMachineScaleSetsClient.UpdateInstances(ctx, ss.ResourceGroup, scaleSetName, vmInstanceIDs)
|
||||
glog.V(10).Infof("VirtualMachineScaleSetsClient.UpdateInstances(%s): end", scaleSetName)
|
||||
return processHTTPRetryResponse(resp, err)
|
||||
})
|
||||
}
|
||||
|
||||
// getNodesScaleSets returns scalesets with instanceIDs and standard node names for given nodes.
|
||||
func (ss *scaleSet) getNodesScaleSets(nodes []*v1.Node) (map[string]sets.String, []*v1.Node, error) {
|
||||
scalesets := make(map[string]sets.String)
|
||||
standardNodes := []*v1.Node{}
|
||||
|
||||
for _, curNode := range nodes {
|
||||
if ss.useStandardLoadBalancer() && ss.excludeMasterNodesFromStandardLB() && isMasterNode(curNode) {
|
||||
glog.V(4).Infof("Excluding master node %q from load balancer backendpool", curNode.Name)
|
||||
continue
|
||||
}
|
||||
|
||||
curScaleSetName, err := extractScaleSetNameByProviderID(curNode.Spec.ProviderID)
|
||||
if err != nil {
|
||||
glog.V(4).Infof("Node %q is not belonging to any scale sets, assuming it is belong to availability sets", curNode.Name)
|
||||
standardNodes = append(standardNodes, curNode)
|
||||
continue
|
||||
}
|
||||
|
||||
if _, ok := scalesets[curScaleSetName]; !ok {
|
||||
scalesets[curScaleSetName] = sets.NewString()
|
||||
}
|
||||
|
||||
instanceID, err := getLastSegment(curNode.Spec.ProviderID)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to get instance ID for node %q: %v", curNode.Spec.ProviderID, err)
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
scalesets[curScaleSetName].Insert(instanceID)
|
||||
}
|
||||
|
||||
return scalesets, standardNodes, nil
|
||||
}
|
||||
|
||||
// ensureHostsInVMSetPool ensures the given Node's primary IP configurations are
|
||||
// participating in the vmSet's LoadBalancer Backend Pool.
|
||||
func (ss *scaleSet) ensureHostsInVMSetPool(serviceName string, backendPoolID string, vmSetName string, instanceIDs []string, isInternal bool) error {
|
||||
glog.V(3).Infof("ensuring hosts %q of scaleset %q in LB backendpool %q", instanceIDs, vmSetName, backendPoolID)
|
||||
virtualMachineScaleSet, exists, err := ss.getScaleSetWithRetry(vmSetName)
|
||||
if err != nil {
|
||||
glog.Errorf("ss.getScaleSetWithRetry(%s) for service %q failed: %v", vmSetName, serviceName, err)
|
||||
return err
|
||||
}
|
||||
if !exists {
|
||||
errorMessage := fmt.Errorf("Scale set %q not found", vmSetName)
|
||||
glog.Errorf("%v", errorMessage)
|
||||
return errorMessage
|
||||
}
|
||||
|
||||
// Find primary network interface configuration.
|
||||
networkConfigureList := virtualMachineScaleSet.VirtualMachineProfile.NetworkProfile.NetworkInterfaceConfigurations
|
||||
primaryNetworkConfiguration, err := ss.getPrimaryNetworkConfiguration(networkConfigureList, vmSetName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Find primary IP configuration.
|
||||
primaryIPConfiguration, err := ss.getPrimaryIPConfigForScaleSet(primaryNetworkConfiguration, vmSetName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Update primary IP configuration's LoadBalancerBackendAddressPools.
|
||||
foundPool := false
|
||||
newBackendPools := []compute.SubResource{}
|
||||
if primaryIPConfiguration.LoadBalancerBackendAddressPools != nil {
|
||||
newBackendPools = *primaryIPConfiguration.LoadBalancerBackendAddressPools
|
||||
}
|
||||
for _, existingPool := range newBackendPools {
|
||||
if strings.EqualFold(backendPoolID, *existingPool.ID) {
|
||||
foundPool = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !foundPool {
|
||||
if ss.useStandardLoadBalancer() && len(newBackendPools) > 0 {
|
||||
// Although standard load balancer supports backends from multiple vmss,
|
||||
// the same network interface couldn't be added to more than one load balancer of
|
||||
// the same type. Omit those nodes (e.g. masters) so Azure ARM won't complain
|
||||
// about this.
|
||||
for _, pool := range newBackendPools {
|
||||
backendPool := *pool.ID
|
||||
matches := backendPoolIDRE.FindStringSubmatch(backendPool)
|
||||
if len(matches) == 2 {
|
||||
lbName := matches[1]
|
||||
if strings.HasSuffix(lbName, InternalLoadBalancerNameSuffix) == isInternal {
|
||||
glog.V(4).Infof("vmss %q has already been added to LB %q, omit adding it to a new one", vmSetName, lbName)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
newBackendPools = append(newBackendPools,
|
||||
compute.SubResource{
|
||||
ID: to.StringPtr(backendPoolID),
|
||||
})
|
||||
primaryIPConfiguration.LoadBalancerBackendAddressPools = &newBackendPools
|
||||
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
glog.V(3).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate for service (%s): scale set (%s) - updating", serviceName, vmSetName)
|
||||
resp, err := ss.VirtualMachineScaleSetsClient.CreateOrUpdate(ctx, ss.ResourceGroup, vmSetName, virtualMachineScaleSet)
|
||||
glog.V(10).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate(%q): end", vmSetName)
|
||||
if ss.CloudProviderBackoff && shouldRetryHTTPRequest(resp, err) {
|
||||
glog.V(2).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate for service (%s): scale set (%s) - updating, err=%v", serviceName, vmSetName, err)
|
||||
retryErr := ss.createOrUpdateVMSSWithRetry(virtualMachineScaleSet)
|
||||
if retryErr != nil {
|
||||
err = retryErr
|
||||
glog.V(2).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate for service (%s) abort backoff: scale set (%s) - updating", serviceName, vmSetName)
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Update instances to latest VMSS model.
|
||||
vmInstanceIDs := compute.VirtualMachineScaleSetVMInstanceRequiredIDs{
|
||||
InstanceIds: &instanceIDs,
|
||||
}
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
instanceResp, err := ss.VirtualMachineScaleSetsClient.UpdateInstances(ctx, ss.ResourceGroup, vmSetName, vmInstanceIDs)
|
||||
glog.V(10).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate(%q): end", vmSetName)
|
||||
if ss.CloudProviderBackoff && shouldRetryHTTPRequest(instanceResp, err) {
|
||||
glog.V(2).Infof("VirtualMachineScaleSetsClient.UpdateInstances for service (%s): scale set (%s) - updating, err=%v", serviceName, vmSetName, err)
|
||||
retryErr := ss.updateVMSSInstancesWithRetry(vmSetName, vmInstanceIDs)
|
||||
if retryErr != nil {
|
||||
err = retryErr
|
||||
glog.V(2).Infof("VirtualMachineScaleSetsClient.UpdateInstances for service (%s) abort backoff: scale set (%s) - updating", serviceName, vmSetName)
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// EnsureHostsInPool ensures the given Node's primary IP configurations are
|
||||
// participating in the specified LoadBalancer Backend Pool.
|
||||
func (ss *scaleSet) EnsureHostsInPool(serviceName string, nodes []*v1.Node, backendPoolID string, vmSetName string, isInternal bool) error {
|
||||
scalesets, standardNodes, err := ss.getNodesScaleSets(nodes)
|
||||
if err != nil {
|
||||
glog.Errorf("getNodesScaleSets() for service %q failed: %v", serviceName, err)
|
||||
return err
|
||||
}
|
||||
|
||||
for ssName, instanceIDs := range scalesets {
|
||||
// Only add nodes belonging to specified vmSet for basic SKU LB.
|
||||
if !ss.useStandardLoadBalancer() && !strings.EqualFold(ssName, vmSetName) {
|
||||
continue
|
||||
}
|
||||
|
||||
if instanceIDs.Len() == 0 {
|
||||
// This may happen when scaling a vmss capacity to 0.
|
||||
glog.V(3).Infof("scale set %q has 0 nodes, adding it to load balancer anyway", ssName)
|
||||
// InstanceIDs is required to update vmss, use * instead here since there are no nodes actually.
|
||||
instanceIDs.Insert("*")
|
||||
}
|
||||
|
||||
err := ss.ensureHostsInVMSetPool(serviceName, backendPoolID, ssName, instanceIDs.List(), isInternal)
|
||||
if err != nil {
|
||||
glog.Errorf("ensureHostsInVMSetPool() with scaleSet %q for service %q failed: %v", ssName, serviceName, err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if ss.useStandardLoadBalancer() && len(standardNodes) > 0 {
|
||||
err := ss.availabilitySet.EnsureHostsInPool(serviceName, standardNodes, backendPoolID, "", isInternal)
|
||||
if err != nil {
|
||||
glog.Errorf("availabilitySet.EnsureHostsInPool() for service %q failed: %v", serviceName, err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ensureScaleSetBackendPoolDeleted ensures the loadBalancer backendAddressPools deleted from the specified scaleset.
|
||||
func (ss *scaleSet) ensureScaleSetBackendPoolDeleted(poolID, ssName string) error {
|
||||
glog.V(3).Infof("ensuring backend pool %q deleted from scaleset %q", poolID, ssName)
|
||||
virtualMachineScaleSet, exists, err := ss.getScaleSetWithRetry(ssName)
|
||||
if err != nil {
|
||||
glog.Errorf("ss.ensureScaleSetBackendPoolDeleted(%s, %s) getScaleSetWithRetry(%s) failed: %v", poolID, ssName, ssName, err)
|
||||
return err
|
||||
}
|
||||
if !exists {
|
||||
glog.V(2).Infof("ss.ensureScaleSetBackendPoolDeleted(%s, %s), scale set %s has already been non-exist", poolID, ssName, ssName)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Find primary network interface configuration.
|
||||
networkConfigureList := virtualMachineScaleSet.VirtualMachineProfile.NetworkProfile.NetworkInterfaceConfigurations
|
||||
primaryNetworkConfiguration, err := ss.getPrimaryNetworkConfiguration(networkConfigureList, ssName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Find primary IP configuration.
|
||||
primaryIPConfiguration, err := ss.getPrimaryIPConfigForScaleSet(primaryNetworkConfiguration, ssName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Construct new loadBalancerBackendAddressPools and remove backendAddressPools from primary IP configuration.
|
||||
if primaryIPConfiguration.LoadBalancerBackendAddressPools == nil || len(*primaryIPConfiguration.LoadBalancerBackendAddressPools) == 0 {
|
||||
return nil
|
||||
}
|
||||
existingBackendPools := *primaryIPConfiguration.LoadBalancerBackendAddressPools
|
||||
newBackendPools := []compute.SubResource{}
|
||||
foundPool := false
|
||||
for i := len(existingBackendPools) - 1; i >= 0; i-- {
|
||||
curPool := existingBackendPools[i]
|
||||
if strings.EqualFold(poolID, *curPool.ID) {
|
||||
glog.V(10).Infof("ensureScaleSetBackendPoolDeleted gets unwanted backend pool %q for scale set %q", poolID, ssName)
|
||||
foundPool = true
|
||||
newBackendPools = append(existingBackendPools[:i], existingBackendPools[i+1:]...)
|
||||
}
|
||||
}
|
||||
if !foundPool {
|
||||
// Pool not found, assume it has been already removed.
|
||||
return nil
|
||||
}
|
||||
|
||||
// Update scale set with backoff.
|
||||
primaryIPConfiguration.LoadBalancerBackendAddressPools = &newBackendPools
|
||||
glog.V(3).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate: scale set (%s) - updating", ssName)
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
resp, err := ss.VirtualMachineScaleSetsClient.CreateOrUpdate(ctx, ss.ResourceGroup, ssName, virtualMachineScaleSet)
|
||||
glog.V(10).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate(%q): end", ssName)
|
||||
if ss.CloudProviderBackoff && shouldRetryHTTPRequest(resp, err) {
|
||||
glog.V(2).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate: scale set (%s) - updating, err=%v", ssName, err)
|
||||
retryErr := ss.createOrUpdateVMSSWithRetry(virtualMachineScaleSet)
|
||||
if retryErr != nil {
|
||||
err = retryErr
|
||||
glog.V(2).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate abort backoff: scale set (%s) - updating", ssName)
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Update instances to latest VMSS model.
|
||||
instanceIDs := []string{"*"}
|
||||
vmInstanceIDs := compute.VirtualMachineScaleSetVMInstanceRequiredIDs{
|
||||
InstanceIds: &instanceIDs,
|
||||
}
|
||||
instanceCtx, instanceCancel := getContextWithCancel()
|
||||
defer instanceCancel()
|
||||
instanceResp, err := ss.VirtualMachineScaleSetsClient.UpdateInstances(instanceCtx, ss.ResourceGroup, ssName, vmInstanceIDs)
|
||||
glog.V(10).Infof("VirtualMachineScaleSetsClient.UpdateInstances(%q): end", ssName)
|
||||
if ss.CloudProviderBackoff && shouldRetryHTTPRequest(instanceResp, err) {
|
||||
glog.V(2).Infof("VirtualMachineScaleSetsClient.UpdateInstances scale set (%s) - updating, err=%v", ssName, err)
|
||||
retryErr := ss.updateVMSSInstancesWithRetry(ssName, vmInstanceIDs)
|
||||
if retryErr != nil {
|
||||
err = retryErr
|
||||
glog.V(2).Infof("VirtualMachineScaleSetsClient.UpdateInstances abort backoff: scale set (%s) - updating", ssName)
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Update virtualMachineScaleSet again. This is a workaround for removing VMSS reference from LB.
|
||||
// TODO: remove this workaround when figuring out the root cause.
|
||||
if len(newBackendPools) == 0 {
|
||||
updateCtx, updateCancel := getContextWithCancel()
|
||||
defer updateCancel()
|
||||
glog.V(3).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate: scale set (%s) - updating second time", ssName)
|
||||
resp, err = ss.VirtualMachineScaleSetsClient.CreateOrUpdate(updateCtx, ss.ResourceGroup, ssName, virtualMachineScaleSet)
|
||||
glog.V(10).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate(%q): end", ssName)
|
||||
if ss.CloudProviderBackoff && shouldRetryHTTPRequest(resp, err) {
|
||||
glog.V(2).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate: scale set (%s) - updating, err=%v", ssName, err)
|
||||
retryErr := ss.createOrUpdateVMSSWithRetry(virtualMachineScaleSet)
|
||||
if retryErr != nil {
|
||||
glog.V(2).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate abort backoff: scale set (%s) - updating", ssName)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// EnsureBackendPoolDeleted ensures the loadBalancer backendAddressPools deleted from the specified vmSet.
|
||||
func (ss *scaleSet) EnsureBackendPoolDeleted(poolID, vmSetName string, backendAddressPools *[]network.BackendAddressPool) error {
|
||||
if backendAddressPools == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
scalesets := sets.NewString()
|
||||
for _, backendPool := range *backendAddressPools {
|
||||
if strings.EqualFold(*backendPool.ID, poolID) && backendPool.BackendIPConfigurations != nil {
|
||||
for _, ipConfigurations := range *backendPool.BackendIPConfigurations {
|
||||
if ipConfigurations.ID == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
ssName, err := extractScaleSetNameByProviderID(*ipConfigurations.ID)
|
||||
if err != nil {
|
||||
glog.V(4).Infof("backend IP configuration %q is not belonging to any vmss, omit it")
|
||||
continue
|
||||
}
|
||||
|
||||
scalesets.Insert(ssName)
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
for ssName := range scalesets {
|
||||
// Only remove nodes belonging to specified vmSet to basic LB backends.
|
||||
if !ss.useStandardLoadBalancer() && !strings.EqualFold(ssName, vmSetName) {
|
||||
continue
|
||||
}
|
||||
|
||||
err := ss.ensureScaleSetBackendPoolDeleted(poolID, ssName)
|
||||
if err != nil {
|
||||
glog.Errorf("ensureScaleSetBackendPoolDeleted() with scaleSet %q failed: %v", ssName, err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// getVmssMachineID returns the full identifier of a vmss virtual machine.
|
||||
func (az *Cloud) getVmssMachineID(scaleSetName, instanceID string) string {
|
||||
return fmt.Sprintf(
|
||||
vmssMachineIDTemplate,
|
||||
az.SubscriptionID,
|
||||
az.ResourceGroup,
|
||||
scaleSetName,
|
||||
instanceID)
|
||||
}
|
205
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_vmss_cache.go
generated
vendored
Normal file
205
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_vmss_cache.go
generated
vendored
Normal file
@@ -0,0 +1,205 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package azure
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
)
|
||||
|
||||
var (
|
||||
vmssNameSeparator = "_"
|
||||
|
||||
nodeNameToScaleSetMappingKey = "k8sNodeNameToScaleSetMappingKey"
|
||||
availabilitySetNodesKey = "k8sAvailabilitySetNodesKey"
|
||||
|
||||
vmssCacheTTL = time.Minute
|
||||
vmssVMCacheTTL = time.Minute
|
||||
availabilitySetNodesCacheTTL = 15 * time.Minute
|
||||
nodeNameToScaleSetMappingCacheTTL = 15 * time.Minute
|
||||
)
|
||||
|
||||
// nodeNameToScaleSetMapping maps nodeName to scaleSet name.
|
||||
// The map is required because vmss nodeName is not equal to its vmName.
|
||||
type nodeNameToScaleSetMapping map[string]string
|
||||
|
||||
func (ss *scaleSet) makeVmssVMName(scaleSetName, instanceID string) string {
|
||||
return fmt.Sprintf("%s%s%s", scaleSetName, vmssNameSeparator, instanceID)
|
||||
}
|
||||
|
||||
func extractVmssVMName(name string) (string, string, error) {
|
||||
split := strings.SplitAfter(name, vmssNameSeparator)
|
||||
if len(split) < 2 {
|
||||
glog.Errorf("Failed to extract vmssVMName %q", name)
|
||||
return "", "", ErrorNotVmssInstance
|
||||
}
|
||||
|
||||
ssName := strings.Join(split[0:len(split)-1], "")
|
||||
// removing the trailing `vmssNameSeparator` since we used SplitAfter
|
||||
ssName = ssName[:len(ssName)-1]
|
||||
|
||||
instanceID := split[len(split)-1]
|
||||
|
||||
return ssName, instanceID, nil
|
||||
}
|
||||
|
||||
func (ss *scaleSet) newVmssCache() (*timedCache, error) {
|
||||
getter := func(key string) (interface{}, error) {
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
result, err := ss.VirtualMachineScaleSetsClient.Get(ctx, ss.ResourceGroup, key)
|
||||
exists, message, realErr := checkResourceExistsFromError(err)
|
||||
if realErr != nil {
|
||||
return nil, realErr
|
||||
}
|
||||
|
||||
if !exists {
|
||||
glog.V(2).Infof("Virtual machine scale set %q not found with message: %q", key, message)
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
return &result, nil
|
||||
}
|
||||
|
||||
return newTimedcache(vmssCacheTTL, getter)
|
||||
}
|
||||
|
||||
func (ss *scaleSet) newNodeNameToScaleSetMappingCache() (*timedCache, error) {
|
||||
getter := func(key string) (interface{}, error) {
|
||||
scaleSetNames, err := ss.listScaleSets()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
localCache := make(nodeNameToScaleSetMapping)
|
||||
for _, ssName := range scaleSetNames {
|
||||
vms, err := ss.listScaleSetVMs(ssName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, vm := range vms {
|
||||
if vm.OsProfile == nil || vm.OsProfile.ComputerName == nil {
|
||||
glog.Warningf("failed to get computerName for vmssVM (%q)", vm.Name)
|
||||
continue
|
||||
}
|
||||
|
||||
computerName := strings.ToLower(*vm.OsProfile.ComputerName)
|
||||
localCache[computerName] = ssName
|
||||
}
|
||||
}
|
||||
|
||||
return localCache, nil
|
||||
}
|
||||
|
||||
return newTimedcache(nodeNameToScaleSetMappingCacheTTL, getter)
|
||||
}
|
||||
|
||||
func (ss *scaleSet) newAvailabilitySetNodesCache() (*timedCache, error) {
|
||||
getter := func(key string) (interface{}, error) {
|
||||
vmList, err := ss.Cloud.VirtualMachineClientListWithRetry()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
localCache := sets.NewString()
|
||||
for _, vm := range vmList {
|
||||
localCache.Insert(*vm.Name)
|
||||
}
|
||||
|
||||
return localCache, nil
|
||||
}
|
||||
|
||||
return newTimedcache(availabilitySetNodesCacheTTL, getter)
|
||||
}
|
||||
|
||||
func (ss *scaleSet) newVmssVMCache() (*timedCache, error) {
|
||||
getter := func(key string) (interface{}, error) {
|
||||
// vmssVM name's format is 'scaleSetName_instanceID'
|
||||
ssName, instanceID, err := extractVmssVMName(key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Not found, the VM doesn't belong to any known scale sets.
|
||||
if ssName == "" {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
result, err := ss.VirtualMachineScaleSetVMsClient.Get(ctx, ss.ResourceGroup, ssName, instanceID)
|
||||
exists, message, realErr := checkResourceExistsFromError(err)
|
||||
if realErr != nil {
|
||||
return nil, realErr
|
||||
}
|
||||
|
||||
if !exists {
|
||||
glog.V(2).Infof("Virtual machine scale set VM %q not found with message: %q", key, message)
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
return &result, nil
|
||||
}
|
||||
|
||||
return newTimedcache(vmssVMCacheTTL, getter)
|
||||
}
|
||||
|
||||
func (ss *scaleSet) getScaleSetNameByNodeName(nodeName string) (string, error) {
|
||||
getScaleSetName := func(nodeName string) (string, error) {
|
||||
nodeNameMapping, err := ss.nodeNameToScaleSetMappingCache.Get(nodeNameToScaleSetMappingKey)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
realMapping := nodeNameMapping.(nodeNameToScaleSetMapping)
|
||||
if ssName, ok := realMapping[nodeName]; ok {
|
||||
return ssName, nil
|
||||
}
|
||||
|
||||
return "", nil
|
||||
}
|
||||
|
||||
ssName, err := getScaleSetName(nodeName)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if ssName != "" {
|
||||
return ssName, nil
|
||||
}
|
||||
|
||||
// ssName is still not found, it is likely that new Nodes are created.
|
||||
// Force refresh the cache and try again.
|
||||
ss.nodeNameToScaleSetMappingCache.Delete(nodeNameToScaleSetMappingKey)
|
||||
return getScaleSetName(nodeName)
|
||||
}
|
||||
|
||||
func (ss *scaleSet) isNodeManagedByAvailabilitySet(nodeName string) (bool, error) {
|
||||
cached, err := ss.availabilitySetNodesCache.Get(availabilitySetNodesKey)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
availabilitySetNodes := cached.(sets.String)
|
||||
return availabilitySetNodes.Has(nodeName), nil
|
||||
}
|
67
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_vmss_cache_test.go
generated
vendored
Normal file
67
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_vmss_cache_test.go
generated
vendored
Normal file
@@ -0,0 +1,67 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package azure
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestExtractVmssVMName(t *testing.T) {
|
||||
cases := []struct {
|
||||
description string
|
||||
vmName string
|
||||
expectError bool
|
||||
expectedScaleSet string
|
||||
expectedInstanceID string
|
||||
}{
|
||||
{
|
||||
description: "wrong vmss VM name should report error",
|
||||
vmName: "vm1234",
|
||||
expectError: true,
|
||||
},
|
||||
{
|
||||
description: "wrong VM name separator should report error",
|
||||
vmName: "vm-1234",
|
||||
expectError: true,
|
||||
},
|
||||
{
|
||||
description: "correct vmss VM name should return correct scaleSet and instanceID",
|
||||
vmName: "vm_1234",
|
||||
expectedScaleSet: "vm",
|
||||
expectedInstanceID: "1234",
|
||||
},
|
||||
{
|
||||
description: "correct vmss VM name with Extra Separator should return correct scaleSet and instanceID",
|
||||
vmName: "vm_test_1234",
|
||||
expectedScaleSet: "vm_test",
|
||||
expectedInstanceID: "1234",
|
||||
},
|
||||
}
|
||||
|
||||
for _, c := range cases {
|
||||
ssName, instanceID, err := extractVmssVMName(c.vmName)
|
||||
if c.expectError {
|
||||
assert.Error(t, err, c.description)
|
||||
continue
|
||||
}
|
||||
|
||||
assert.Equal(t, c.expectedScaleSet, ssName, c.description)
|
||||
assert.Equal(t, c.expectedInstanceID, instanceID, c.description)
|
||||
}
|
||||
}
|
156
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_vmss_test.go
generated
vendored
Normal file
156
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_vmss_test.go
generated
vendored
Normal file
@@ -0,0 +1,156 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package azure
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
compute "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func newTestScaleSet(scaleSetName string, vmList []string) (*scaleSet, error) {
|
||||
cloud := getTestCloud()
|
||||
setTestVirtualMachineCloud(cloud, scaleSetName, vmList)
|
||||
ss, err := newScaleSet(cloud)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return ss.(*scaleSet), nil
|
||||
}
|
||||
|
||||
func setTestVirtualMachineCloud(ss *Cloud, scaleSetName string, vmList []string) {
|
||||
virtualMachineScaleSetsClient := newFakeVirtualMachineScaleSetsClient()
|
||||
scaleSets := make(map[string]map[string]compute.VirtualMachineScaleSet)
|
||||
scaleSets["rg"] = map[string]compute.VirtualMachineScaleSet{
|
||||
scaleSetName: {
|
||||
Name: &scaleSetName,
|
||||
},
|
||||
}
|
||||
virtualMachineScaleSetsClient.setFakeStore(scaleSets)
|
||||
|
||||
virtualMachineScaleSetVMsClient := newFakeVirtualMachineScaleSetVMsClient()
|
||||
ssVMs := make(map[string]map[string]compute.VirtualMachineScaleSetVM)
|
||||
ssVMs["rg"] = make(map[string]compute.VirtualMachineScaleSetVM)
|
||||
for i := range vmList {
|
||||
ID := fmt.Sprintf("/subscriptions/script/resourceGroups/rg/providers/Microsoft.Compute/virtualMachineScaleSets/%s/virtualMachines/%d", scaleSetName, i)
|
||||
nodeName := vmList[i]
|
||||
instanceID := fmt.Sprintf("%d", i)
|
||||
vmName := fmt.Sprintf("%s_%s", scaleSetName, instanceID)
|
||||
networkInterfaces := []compute.NetworkInterfaceReference{
|
||||
{
|
||||
ID: &nodeName,
|
||||
},
|
||||
}
|
||||
ssVMs["rg"][vmName] = compute.VirtualMachineScaleSetVM{
|
||||
VirtualMachineScaleSetVMProperties: &compute.VirtualMachineScaleSetVMProperties{
|
||||
OsProfile: &compute.OSProfile{
|
||||
ComputerName: &nodeName,
|
||||
},
|
||||
NetworkProfile: &compute.NetworkProfile{
|
||||
NetworkInterfaces: &networkInterfaces,
|
||||
},
|
||||
},
|
||||
ID: &ID,
|
||||
InstanceID: &instanceID,
|
||||
Name: &vmName,
|
||||
Location: &ss.Location,
|
||||
}
|
||||
}
|
||||
virtualMachineScaleSetVMsClient.setFakeStore(ssVMs)
|
||||
|
||||
ss.VirtualMachineScaleSetsClient = virtualMachineScaleSetsClient
|
||||
ss.VirtualMachineScaleSetVMsClient = virtualMachineScaleSetVMsClient
|
||||
}
|
||||
|
||||
func TestGetScaleSetVMInstanceID(t *testing.T) {
|
||||
tests := []struct {
|
||||
msg string
|
||||
machineName string
|
||||
expectError bool
|
||||
expectedInstanceID string
|
||||
}{{
|
||||
msg: "invalid vmss instance name",
|
||||
machineName: "vmvm",
|
||||
expectError: true,
|
||||
},
|
||||
{
|
||||
msg: "valid vmss instance name",
|
||||
machineName: "vm00000Z",
|
||||
expectError: false,
|
||||
expectedInstanceID: "35",
|
||||
},
|
||||
}
|
||||
|
||||
for i, test := range tests {
|
||||
instanceID, err := getScaleSetVMInstanceID(test.machineName)
|
||||
if test.expectError {
|
||||
assert.Error(t, err, fmt.Sprintf("TestCase[%d]: %s", i, test.msg))
|
||||
} else {
|
||||
assert.Equal(t, test.expectedInstanceID, instanceID, fmt.Sprintf("TestCase[%d]: %s", i, test.msg))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetInstanceIDByNodeName(t *testing.T) {
|
||||
testCases := []struct {
|
||||
description string
|
||||
scaleSet string
|
||||
vmList []string
|
||||
nodeName string
|
||||
expected string
|
||||
expectError bool
|
||||
}{
|
||||
{
|
||||
description: "scaleSet should get instance by node name",
|
||||
scaleSet: "ss",
|
||||
vmList: []string{"vmssee6c2000000", "vmssee6c2000001"},
|
||||
nodeName: "vmssee6c2000001",
|
||||
expected: "/subscriptions/script/resourceGroups/rg/providers/Microsoft.Compute/virtualMachineScaleSets/ss/virtualMachines/1",
|
||||
},
|
||||
{
|
||||
description: "scaleSet should get instance by node name with upper cases hostname",
|
||||
scaleSet: "ss",
|
||||
vmList: []string{"VMSSEE6C2000000", "VMSSEE6C2000001"},
|
||||
nodeName: "vmssee6c2000000",
|
||||
expected: "/subscriptions/script/resourceGroups/rg/providers/Microsoft.Compute/virtualMachineScaleSets/ss/virtualMachines/0",
|
||||
},
|
||||
{
|
||||
description: "scaleSet should not get instance for non-exist nodes",
|
||||
scaleSet: "ss",
|
||||
vmList: []string{"vmssee6c2000000", "vmssee6c2000001"},
|
||||
nodeName: "agente6c2000005",
|
||||
expectError: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range testCases {
|
||||
ss, err := newTestScaleSet(test.scaleSet, test.vmList)
|
||||
assert.NoError(t, err, test.description)
|
||||
|
||||
real, err := ss.GetInstanceIDByNodeName(test.nodeName)
|
||||
if test.expectError {
|
||||
assert.Error(t, err, test.description)
|
||||
continue
|
||||
}
|
||||
|
||||
assert.NoError(t, err, test.description)
|
||||
assert.Equal(t, test.expected, real, test.description)
|
||||
}
|
||||
}
|
279
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_wrap.go
generated
vendored
Normal file
279
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_wrap.go
generated
vendored
Normal file
@@ -0,0 +1,279 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package azure
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute"
|
||||
"github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network"
|
||||
"github.com/Azure/go-autorest/autorest"
|
||||
"github.com/golang/glog"
|
||||
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
)
|
||||
|
||||
var (
|
||||
vmCacheTTL = time.Minute
|
||||
lbCacheTTL = 2 * time.Minute
|
||||
nsgCacheTTL = 2 * time.Minute
|
||||
rtCacheTTL = 2 * time.Minute
|
||||
)
|
||||
|
||||
// checkExistsFromError inspects an error and returns a true if err is nil,
|
||||
// false if error is an autorest.Error with StatusCode=404 and will return the
|
||||
// error back if error is another status code or another type of error.
|
||||
func checkResourceExistsFromError(err error) (bool, string, error) {
|
||||
if err == nil {
|
||||
return true, "", nil
|
||||
}
|
||||
v, ok := err.(autorest.DetailedError)
|
||||
if !ok {
|
||||
return false, "", err
|
||||
}
|
||||
if v.StatusCode == http.StatusNotFound {
|
||||
return false, err.Error(), nil
|
||||
}
|
||||
return false, "", v
|
||||
}
|
||||
|
||||
// If it is StatusNotFound return nil,
|
||||
// Otherwise, return what it is
|
||||
func ignoreStatusNotFoundFromError(err error) error {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
v, ok := err.(autorest.DetailedError)
|
||||
if ok && v.StatusCode == http.StatusNotFound {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
/// getVirtualMachine calls 'VirtualMachinesClient.Get' with a timed cache
|
||||
/// The service side has throttling control that delays responses if there're multiple requests onto certain vm
|
||||
/// resource request in short period.
|
||||
func (az *Cloud) getVirtualMachine(nodeName types.NodeName) (vm compute.VirtualMachine, err error) {
|
||||
vmName := string(nodeName)
|
||||
cachedVM, err := az.vmCache.Get(vmName)
|
||||
if err != nil {
|
||||
return vm, err
|
||||
}
|
||||
|
||||
if cachedVM == nil {
|
||||
return vm, cloudprovider.InstanceNotFound
|
||||
}
|
||||
|
||||
return *(cachedVM.(*compute.VirtualMachine)), nil
|
||||
}
|
||||
|
||||
func (az *Cloud) getRouteTable() (routeTable network.RouteTable, exists bool, err error) {
|
||||
cachedRt, err := az.rtCache.Get(az.RouteTableName)
|
||||
if err != nil {
|
||||
return routeTable, false, err
|
||||
}
|
||||
|
||||
if cachedRt == nil {
|
||||
return routeTable, false, nil
|
||||
}
|
||||
|
||||
return *(cachedRt.(*network.RouteTable)), true, nil
|
||||
}
|
||||
|
||||
func (az *Cloud) getPublicIPAddress(pipResourceGroup string, pipName string) (pip network.PublicIPAddress, exists bool, err error) {
|
||||
resourceGroup := az.ResourceGroup
|
||||
if pipResourceGroup != "" {
|
||||
resourceGroup = pipResourceGroup
|
||||
}
|
||||
|
||||
var realErr error
|
||||
var message string
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
pip, err = az.PublicIPAddressesClient.Get(ctx, resourceGroup, pipName, "")
|
||||
exists, message, realErr = checkResourceExistsFromError(err)
|
||||
if realErr != nil {
|
||||
return pip, false, realErr
|
||||
}
|
||||
|
||||
if !exists {
|
||||
glog.V(2).Infof("Public IP %q not found with message: %q", pipName, message)
|
||||
return pip, false, nil
|
||||
}
|
||||
|
||||
return pip, exists, err
|
||||
}
|
||||
|
||||
func (az *Cloud) getSubnet(virtualNetworkName string, subnetName string) (subnet network.Subnet, exists bool, err error) {
|
||||
var realErr error
|
||||
var message string
|
||||
var rg string
|
||||
|
||||
if len(az.VnetResourceGroup) > 0 {
|
||||
rg = az.VnetResourceGroup
|
||||
} else {
|
||||
rg = az.ResourceGroup
|
||||
}
|
||||
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
subnet, err = az.SubnetsClient.Get(ctx, rg, virtualNetworkName, subnetName, "")
|
||||
exists, message, realErr = checkResourceExistsFromError(err)
|
||||
if realErr != nil {
|
||||
return subnet, false, realErr
|
||||
}
|
||||
|
||||
if !exists {
|
||||
glog.V(2).Infof("Subnet %q not found with message: %q", subnetName, message)
|
||||
return subnet, false, nil
|
||||
}
|
||||
|
||||
return subnet, exists, err
|
||||
}
|
||||
|
||||
func (az *Cloud) getAzureLoadBalancer(name string) (lb network.LoadBalancer, exists bool, err error) {
|
||||
cachedLB, err := az.lbCache.Get(name)
|
||||
if err != nil {
|
||||
return lb, false, err
|
||||
}
|
||||
|
||||
if cachedLB == nil {
|
||||
return lb, false, nil
|
||||
}
|
||||
|
||||
return *(cachedLB.(*network.LoadBalancer)), true, nil
|
||||
}
|
||||
|
||||
func (az *Cloud) getSecurityGroup() (nsg network.SecurityGroup, err error) {
|
||||
if az.SecurityGroupName == "" {
|
||||
return nsg, fmt.Errorf("securityGroupName is not configured")
|
||||
}
|
||||
|
||||
securityGroup, err := az.nsgCache.Get(az.SecurityGroupName)
|
||||
if err != nil {
|
||||
return nsg, err
|
||||
}
|
||||
|
||||
if securityGroup == nil {
|
||||
return nsg, fmt.Errorf("nsg %q not found", az.SecurityGroupName)
|
||||
}
|
||||
|
||||
return *(securityGroup.(*network.SecurityGroup)), nil
|
||||
}
|
||||
|
||||
func (az *Cloud) newVMCache() (*timedCache, error) {
|
||||
getter := func(key string) (interface{}, error) {
|
||||
// Currently InstanceView request are used by azure_zones, while the calls come after non-InstanceView
|
||||
// request. If we first send an InstanceView request and then a non InstanceView request, the second
|
||||
// request will still hit throttling. This is what happens now for cloud controller manager: In this
|
||||
// case we do get instance view every time to fulfill the azure_zones requirement without hitting
|
||||
// throttling.
|
||||
// Consider adding separate parameter for controlling 'InstanceView' once node update issue #56276 is fixed
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
vm, err := az.VirtualMachinesClient.Get(ctx, az.ResourceGroup, key, compute.InstanceView)
|
||||
exists, message, realErr := checkResourceExistsFromError(err)
|
||||
if realErr != nil {
|
||||
return nil, realErr
|
||||
}
|
||||
|
||||
if !exists {
|
||||
glog.V(2).Infof("Virtual machine %q not found with message: %q", key, message)
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
return &vm, nil
|
||||
}
|
||||
|
||||
return newTimedcache(vmCacheTTL, getter)
|
||||
}
|
||||
|
||||
func (az *Cloud) newLBCache() (*timedCache, error) {
|
||||
getter := func(key string) (interface{}, error) {
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
|
||||
lb, err := az.LoadBalancerClient.Get(ctx, az.ResourceGroup, key, "")
|
||||
exists, message, realErr := checkResourceExistsFromError(err)
|
||||
if realErr != nil {
|
||||
return nil, realErr
|
||||
}
|
||||
|
||||
if !exists {
|
||||
glog.V(2).Infof("Load balancer %q not found with message: %q", key, message)
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
return &lb, nil
|
||||
}
|
||||
|
||||
return newTimedcache(lbCacheTTL, getter)
|
||||
}
|
||||
|
||||
func (az *Cloud) newNSGCache() (*timedCache, error) {
|
||||
getter := func(key string) (interface{}, error) {
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
nsg, err := az.SecurityGroupsClient.Get(ctx, az.ResourceGroup, key, "")
|
||||
exists, message, realErr := checkResourceExistsFromError(err)
|
||||
if realErr != nil {
|
||||
return nil, realErr
|
||||
}
|
||||
|
||||
if !exists {
|
||||
glog.V(2).Infof("Security group %q not found with message: %q", key, message)
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
return &nsg, nil
|
||||
}
|
||||
|
||||
return newTimedcache(nsgCacheTTL, getter)
|
||||
}
|
||||
|
||||
func (az *Cloud) newRouteTableCache() (*timedCache, error) {
|
||||
getter := func(key string) (interface{}, error) {
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
rt, err := az.RouteTablesClient.Get(ctx, az.ResourceGroup, key, "")
|
||||
exists, message, realErr := checkResourceExistsFromError(err)
|
||||
if realErr != nil {
|
||||
return nil, realErr
|
||||
}
|
||||
|
||||
if !exists {
|
||||
glog.V(2).Infof("Route table %q not found with message: %q", key, message)
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
return &rt, nil
|
||||
}
|
||||
|
||||
return newTimedcache(rtCacheTTL, getter)
|
||||
}
|
||||
|
||||
func (az *Cloud) useStandardLoadBalancer() bool {
|
||||
return strings.EqualFold(az.LoadBalancerSku, loadBalancerSkuStandard)
|
||||
}
|
||||
|
||||
func (az *Cloud) excludeMasterNodesFromStandardLB() bool {
|
||||
return az.ExcludeMasterFromStandardLB != nil && *az.ExcludeMasterFromStandardLB
|
||||
}
|
53
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_wrap_test.go
generated
vendored
Normal file
53
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_wrap_test.go
generated
vendored
Normal file
@@ -0,0 +1,53 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package azure
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/Azure/go-autorest/autorest"
|
||||
)
|
||||
|
||||
func TestExtractNotFound(t *testing.T) {
|
||||
notFound := autorest.DetailedError{StatusCode: http.StatusNotFound}
|
||||
otherHTTP := autorest.DetailedError{StatusCode: http.StatusForbidden}
|
||||
otherErr := fmt.Errorf("other error")
|
||||
|
||||
tests := []struct {
|
||||
err error
|
||||
expectedErr error
|
||||
exists bool
|
||||
}{
|
||||
{nil, nil, true},
|
||||
{otherErr, otherErr, false},
|
||||
{notFound, nil, false},
|
||||
{otherHTTP, otherHTTP, false},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
exists, _, err := checkResourceExistsFromError(test.err)
|
||||
if test.exists != exists {
|
||||
t.Errorf("expected: %v, saw: %v", test.exists, exists)
|
||||
}
|
||||
if !reflect.DeepEqual(test.expectedErr, err) {
|
||||
t.Errorf("expected err: %v, saw: %v", test.expectedErr, err)
|
||||
}
|
||||
}
|
||||
}
|
104
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_zones.go
generated
vendored
Normal file
104
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_zones.go
generated
vendored
Normal file
@@ -0,0 +1,104 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package azure
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"sync"
|
||||
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
)
|
||||
|
||||
const instanceInfoURL = "http://169.254.169.254/metadata/v1/InstanceInfo"
|
||||
|
||||
var faultMutex = &sync.Mutex{}
|
||||
var faultDomain *string
|
||||
|
||||
type instanceInfo struct {
|
||||
ID string `json:"ID"`
|
||||
UpdateDomain string `json:"UD"`
|
||||
FaultDomain string `json:"FD"`
|
||||
}
|
||||
|
||||
// GetZone returns the Zone containing the current failure zone and locality region that the program is running in
|
||||
func (az *Cloud) GetZone(ctx context.Context) (cloudprovider.Zone, error) {
|
||||
return az.getZoneFromURL(instanceInfoURL)
|
||||
}
|
||||
|
||||
// This is injectable for testing.
|
||||
func (az *Cloud) getZoneFromURL(url string) (cloudprovider.Zone, error) {
|
||||
faultMutex.Lock()
|
||||
defer faultMutex.Unlock()
|
||||
if faultDomain == nil {
|
||||
var err error
|
||||
faultDomain, err = fetchFaultDomain(url)
|
||||
if err != nil {
|
||||
return cloudprovider.Zone{}, err
|
||||
}
|
||||
}
|
||||
zone := cloudprovider.Zone{
|
||||
FailureDomain: *faultDomain,
|
||||
Region: az.Location,
|
||||
}
|
||||
return zone, nil
|
||||
}
|
||||
|
||||
// GetZoneByProviderID implements Zones.GetZoneByProviderID
|
||||
// This is particularly useful in external cloud providers where the kubelet
|
||||
// does not initialize node data.
|
||||
func (az *Cloud) GetZoneByProviderID(ctx context.Context, providerID string) (cloudprovider.Zone, error) {
|
||||
nodeName, err := az.vmSet.GetNodeNameByProviderID(providerID)
|
||||
if err != nil {
|
||||
return cloudprovider.Zone{}, err
|
||||
}
|
||||
|
||||
return az.GetZoneByNodeName(ctx, nodeName)
|
||||
}
|
||||
|
||||
// GetZoneByNodeName implements Zones.GetZoneByNodeName
|
||||
// This is particularly useful in external cloud providers where the kubelet
|
||||
// does not initialize node data.
|
||||
func (az *Cloud) GetZoneByNodeName(ctx context.Context, nodeName types.NodeName) (cloudprovider.Zone, error) {
|
||||
return az.vmSet.GetZoneByNodeName(string(nodeName))
|
||||
}
|
||||
|
||||
func fetchFaultDomain(url string) (*string, error) {
|
||||
resp, err := http.Get(url)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
return readFaultDomain(resp.Body)
|
||||
}
|
||||
|
||||
func readFaultDomain(reader io.Reader) (*string, error) {
|
||||
var instanceInfo instanceInfo
|
||||
body, err := ioutil.ReadAll(reader)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = json.Unmarshal(body, &instanceInfo)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &instanceInfo.FaultDomain, nil
|
||||
}
|
Reference in New Issue
Block a user