Bumping k8s dependencies to 1.13

This commit is contained in:
Cheng Xing
2018-11-16 14:08:25 -08:00
parent 305407125c
commit b4c0b68ec7
8002 changed files with 884099 additions and 276228 deletions

View File

@@ -34,6 +34,16 @@ go_library(
"//pkg/kubelet/apis:go_default_library",
"//pkg/volume:go_default_library",
"//pkg/volume/util:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes/scheme:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
"//staging/src/k8s.io/client-go/tools/record:go_default_library",
"//vendor/github.com/aws/aws-sdk-go/aws:go_default_library",
"//vendor/github.com/aws/aws-sdk-go/aws/awserr:go_default_library",
"//vendor/github.com/aws/aws-sdk-go/aws/credentials:go_default_library",
@@ -51,16 +61,6 @@ go_library(
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/github.com/prometheus/client_golang/prometheus:go_default_library",
"//vendor/gopkg.in/gcfg.v1:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
"//vendor/k8s.io/client-go/tools/record:go_default_library",
],
)
@@ -78,16 +78,16 @@ go_test(
embed = [":go_default_library"],
deps = [
"//pkg/kubelet/apis:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//vendor/github.com/aws/aws-sdk-go/aws:go_default_library",
"//vendor/github.com/aws/aws-sdk-go/service/ec2:go_default_library",
"//vendor/github.com/aws/aws-sdk-go/service/elb:go_default_library",
"//vendor/github.com/stretchr/testify/assert:go_default_library",
"//vendor/github.com/stretchr/testify/mock:go_default_library",
"//vendor/github.com/stretchr/testify/require:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
],
)

View File

@@ -9,3 +9,5 @@ reviewers:
- justinsb
- zmerlynn
- chrislovecnm
- nckturner
- micahhausler

View File

@@ -220,6 +220,13 @@ const (
createTagFactor = 2.0
createTagSteps = 9
// encryptedCheck* is configuration of poll for created volume to check
// it has not been silently removed by AWS.
// On a random AWS account (shared among several developers) it took 4s on
// average.
encryptedCheckInterval = 1 * time.Second
encryptedCheckTimeout = 30 * time.Second
// Number of node names that can be added to a filter. The AWS limit is 200
// but we are using a lower limit on purpose
filterNodeLimit = 150
@@ -406,14 +413,10 @@ const (
// VolumeOptions specifies capacity and tags for a volume.
type VolumeOptions struct {
CapacityGB int
Tags map[string]string
PVCName string
VolumeType string
ZonePresent bool
ZonesPresent bool
AvailabilityZone string
AvailabilityZones string
CapacityGB int
Tags map[string]string
VolumeType string
AvailabilityZone string
// IOPSPerGB x CapacityGB will give total IOPS of the volume to create.
// Calculated total IOPS will be capped at MaxTotalIOPS.
IOPSPerGB int
@@ -643,7 +646,11 @@ func (p *awsSDKProvider) Compute(regionName string) (EC2, error) {
}
awsConfig = awsConfig.WithCredentialsChainVerboseErrors(true)
service := ec2.New(session.New(awsConfig))
sess, err := session.NewSession(awsConfig)
if err != nil {
return nil, fmt.Errorf("unable to initialize AWS session: %v", err)
}
service := ec2.New(sess)
p.addHandlers(regionName, &service.Handlers)
@@ -660,8 +667,11 @@ func (p *awsSDKProvider) LoadBalancing(regionName string) (ELB, error) {
}
awsConfig = awsConfig.WithCredentialsChainVerboseErrors(true)
elbClient := elb.New(session.New(awsConfig))
sess, err := session.NewSession(awsConfig)
if err != nil {
return nil, fmt.Errorf("unable to initialize AWS session: %v", err)
}
elbClient := elb.New(sess)
p.addHandlers(regionName, &elbClient.Handlers)
return elbClient, nil
@@ -674,7 +684,11 @@ func (p *awsSDKProvider) LoadBalancingV2(regionName string) (ELBV2, error) {
}
awsConfig = awsConfig.WithCredentialsChainVerboseErrors(true)
elbClient := elbv2.New(session.New(awsConfig))
sess, err := session.NewSession(awsConfig)
if err != nil {
return nil, fmt.Errorf("unable to initialize AWS session: %v", err)
}
elbClient := elbv2.New(sess)
p.addHandlers(regionName, &elbClient.Handlers)
@@ -688,7 +702,11 @@ func (p *awsSDKProvider) Autoscaling(regionName string) (ASG, error) {
}
awsConfig = awsConfig.WithCredentialsChainVerboseErrors(true)
client := autoscaling.New(session.New(awsConfig))
sess, err := session.NewSession(awsConfig)
if err != nil {
return nil, fmt.Errorf("unable to initialize AWS session: %v", err)
}
client := autoscaling.New(sess)
p.addHandlers(regionName, &client.Handlers)
@@ -696,7 +714,11 @@ func (p *awsSDKProvider) Autoscaling(regionName string) (ASG, error) {
}
func (p *awsSDKProvider) Metadata() (EC2Metadata, error) {
client := ec2metadata.New(session.New(&aws.Config{}))
sess, err := session.NewSession(&aws.Config{})
if err != nil {
return nil, fmt.Errorf("unable to initialize AWS session: %v", err)
}
client := ec2metadata.New(sess)
p.addAPILoggingHandlers(&client.Handlers)
return client, nil
}
@@ -708,22 +730,17 @@ func (p *awsSDKProvider) KeyManagement(regionName string) (KMS, error) {
}
awsConfig = awsConfig.WithCredentialsChainVerboseErrors(true)
kmsClient := kms.New(session.New(awsConfig))
sess, err := session.NewSession(awsConfig)
if err != nil {
return nil, fmt.Errorf("unable to initialize AWS session: %v", err)
}
kmsClient := kms.New(sess)
p.addHandlers(regionName, &kmsClient.Handlers)
return kmsClient, nil
}
// stringPointerArray creates a slice of string pointers from a slice of strings
// Deprecated: consider using aws.StringSlice - but note the slightly different behaviour with a nil input
func stringPointerArray(orig []string) []*string {
if orig == nil {
return nil
}
return aws.StringSlice(orig)
}
func newEc2Filter(name string, values ...string) *ec2.Filter {
filter := &ec2.Filter{
Name: aws.String(name),
@@ -1228,6 +1245,7 @@ func (c *Cloud) NodeAddresses(ctx context.Context, name types.NodeName) ([]v1.No
glog.V(4).Info("Could not determine private DNS from AWS metadata.")
} else {
addresses = append(addresses, v1.NodeAddress{Type: v1.NodeInternalDNS, Address: internalDNS})
addresses = append(addresses, v1.NodeAddress{Type: v1.NodeHostName, Address: internalDNS})
}
externalDNS, err := c.metadata.GetMetadata("public-hostname")
@@ -1290,6 +1308,7 @@ func extractNodeAddresses(instance *ec2.Instance) ([]v1.NodeAddress, error) {
privateDNSName := aws.StringValue(instance.PrivateDnsName)
if privateDNSName != "" {
addresses = append(addresses, v1.NodeAddress{Type: v1.NodeInternalDNS, Address: privateDNSName})
addresses = append(addresses, v1.NodeAddress{Type: v1.NodeHostName, Address: privateDNSName})
}
publicDNSName := aws.StringValue(instance.PublicDnsName)
@@ -1317,7 +1336,7 @@ func (c *Cloud) NodeAddressesByProviderID(ctx context.Context, providerID string
return extractNodeAddresses(instance)
}
// InstanceExistsByProviderID returns true if the instance with the given provider id still exists and is running.
// InstanceExistsByProviderID returns true if the instance with the given provider id still exists.
// If false is returned with no error, the instance will be immediately deleted by the cloud controller manager.
func (c *Cloud) InstanceExistsByProviderID(ctx context.Context, providerID string) (bool, error) {
instanceID, err := kubernetesInstanceID(providerID).mapToAWSInstanceID()
@@ -1341,8 +1360,8 @@ func (c *Cloud) InstanceExistsByProviderID(ctx context.Context, providerID strin
}
state := instances[0].State.Name
if *state != "running" {
glog.Warningf("the instance %s is not running", instanceID)
if *state == ec2.InstanceStateNameTerminated {
glog.Warningf("the instance %s is terminated", instanceID)
return false, nil
}
@@ -1351,7 +1370,36 @@ func (c *Cloud) InstanceExistsByProviderID(ctx context.Context, providerID strin
// InstanceShutdownByProviderID returns true if the instance is in safe state to detach volumes
func (c *Cloud) InstanceShutdownByProviderID(ctx context.Context, providerID string) (bool, error) {
return false, cloudprovider.NotImplemented
instanceID, err := kubernetesInstanceID(providerID).mapToAWSInstanceID()
if err != nil {
return false, err
}
request := &ec2.DescribeInstancesInput{
InstanceIds: []*string{instanceID.awsString()},
}
instances, err := c.ec2.DescribeInstances(request)
if err != nil {
return false, err
}
if len(instances) == 0 {
glog.Warningf("the instance %s does not exist anymore", providerID)
return true, nil
}
if len(instances) > 1 {
return false, fmt.Errorf("multiple instances found for instance: %s", instanceID)
}
instance := instances[0]
if instance.State != nil {
state := aws.StringValue(instance.State.Name)
// valid state for detaching volumes
if state == ec2.InstanceStateNameStopped || state == ec2.InstanceStateNameTerminated {
return true, nil
}
}
return false, nil
}
// InstanceID returns the cloud provider ID of the node with the specified nodeName.
@@ -1401,9 +1449,9 @@ func (c *Cloud) InstanceType(ctx context.Context, nodeName types.NodeName) (stri
return aws.StringValue(inst.InstanceType), nil
}
// getCandidateZonesForDynamicVolume retrieves a list of all the zones in which nodes are running
// GetCandidateZonesForDynamicVolume retrieves a list of all the zones in which nodes are running
// It currently involves querying all instances
func (c *Cloud) getCandidateZonesForDynamicVolume() (sets.String, error) {
func (c *Cloud) GetCandidateZonesForDynamicVolume() (sets.String, error) {
// We don't currently cache this; it is currently used only in volume
// creation which is expected to be a comparatively rare occurrence.
@@ -1766,7 +1814,9 @@ func (d *awsDisk) modifyVolume(requestGiB int64) (int64, error) {
return false, err
}
if aws.StringValue(volumeModification.ModificationState) == ec2.VolumeModificationStateCompleted {
// According to https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/monitoring_mods.html
// Size changes usually take a few seconds to complete and take effect after a volume is in the Optimizing state.
if aws.StringValue(volumeModification.ModificationState) == ec2.VolumeModificationStateOptimizing {
return true, nil
}
return false, nil
@@ -2127,29 +2177,6 @@ func (c *Cloud) DetachDisk(diskName KubernetesVolumeID, nodeName types.NodeName)
// CreateDisk implements Volumes.CreateDisk
func (c *Cloud) CreateDisk(volumeOptions *VolumeOptions) (KubernetesVolumeID, error) {
allZones, err := c.getCandidateZonesForDynamicVolume()
if err != nil {
return "", fmt.Errorf("error querying for all zones: %v", err)
}
var createAZ string
if !volumeOptions.ZonePresent && !volumeOptions.ZonesPresent {
createAZ = volumeutil.ChooseZoneForVolume(allZones, volumeOptions.PVCName)
}
if !volumeOptions.ZonePresent && volumeOptions.ZonesPresent {
if adminSetOfZones, err := volumeutil.ZonesToSet(volumeOptions.AvailabilityZones); err != nil {
return "", err
} else {
createAZ = volumeutil.ChooseZoneForVolume(adminSetOfZones, volumeOptions.PVCName)
}
}
if volumeOptions.ZonePresent && !volumeOptions.ZonesPresent {
if err := volumeutil.ValidateZone(volumeOptions.AvailabilityZone); err != nil {
return "", err
}
createAZ = volumeOptions.AvailabilityZone
}
var createType string
var iops int64
switch volumeOptions.VolumeType {
@@ -2181,19 +2208,11 @@ func (c *Cloud) CreateDisk(volumeOptions *VolumeOptions) (KubernetesVolumeID, er
// TODO: Should we tag this with the cluster id (so it gets deleted when the cluster does?)
request := &ec2.CreateVolumeInput{}
request.AvailabilityZone = aws.String(createAZ)
request.AvailabilityZone = aws.String(volumeOptions.AvailabilityZone)
request.Size = aws.Int64(int64(volumeOptions.CapacityGB))
request.VolumeType = aws.String(createType)
request.Encrypted = aws.Bool(volumeOptions.Encrypted)
if len(volumeOptions.KmsKeyId) > 0 {
if missing, err := c.checkEncryptionKey(volumeOptions.KmsKeyId); err != nil {
if missing {
// KSM key is missing, provisioning would fail
return "", err
}
// Log checkEncryptionKey error and try provisioning anyway.
glog.Warningf("Cannot check KSM key %s: %v", volumeOptions.KmsKeyId, err)
}
request.KmsKeyId = aws.String(volumeOptions.KmsKeyId)
request.Encrypted = aws.Bool(true)
}
@@ -2222,24 +2241,50 @@ func (c *Cloud) CreateDisk(volumeOptions *VolumeOptions) (KubernetesVolumeID, er
return "", fmt.Errorf("error tagging volume %s: %q", volumeName, err)
}
// AWS has a bad habbit of reporting success when creating a volume with
// encryption keys that either don't exists or have wrong permissions.
// Such volume lives for couple of seconds and then it's silently deleted
// by AWS. There is no other check to ensure that given KMS key is correct,
// because Kubernetes may have limited permissions to the key.
if len(volumeOptions.KmsKeyId) > 0 {
err := c.waitUntilVolumeAvailable(volumeName)
if err != nil {
if isAWSErrorVolumeNotFound(err) {
err = fmt.Errorf("failed to create encrypted volume: the volume disappeared after creation, most likely due to inaccessible KMS encryption key")
}
return "", err
}
}
return volumeName, nil
}
// checkEncryptionKey tests that given encryption key exists.
func (c *Cloud) checkEncryptionKey(keyId string) (missing bool, err error) {
input := &kms.DescribeKeyInput{
KeyId: aws.String(keyId),
func (c *Cloud) waitUntilVolumeAvailable(volumeName KubernetesVolumeID) error {
disk, err := newAWSDisk(c, volumeName)
if err != nil {
// Unreachable code
return err
}
_, err = c.kms.DescribeKey(input)
if err == nil {
return false, nil
}
if awsError, ok := err.(awserr.Error); ok {
if awsError.Code() == "NotFoundException" {
return true, fmt.Errorf("KMS key %s not found: %q", keyId, err)
err = wait.Poll(encryptedCheckInterval, encryptedCheckTimeout, func() (done bool, err error) {
vol, err := disk.describeVolume()
if err != nil {
return true, err
}
}
return false, fmt.Errorf("Error checking KSM key %s: %q", keyId, err)
if vol.State != nil {
switch *vol.State {
case "available":
// The volume is Available, it won't be deleted now.
return true, nil
case "creating":
return false, nil
default:
return true, fmt.Errorf("unexpected State of newly created AWS EBS volume %s: %q", volumeName, *vol.State)
}
}
return false, nil
})
return err
}
// DeleteDisk implements Volumes.DeleteDisk
@@ -2333,7 +2378,7 @@ func (c *Cloud) GetVolumeLabels(volumeName KubernetesVolumeID) (map[string]strin
labels := make(map[string]string)
az := aws.StringValue(info.AvailabilityZone)
if az == "" {
return nil, fmt.Errorf("volume did not have AZ information: %q", info.VolumeId)
return nil, fmt.Errorf("volume did not have AZ information: %q", aws.StringValue(info.VolumeId))
}
labels[kubeletapis.LabelZoneFailureDomain] = az
@@ -2462,9 +2507,8 @@ func (c *Cloud) ResizeDisk(
descErr := fmt.Errorf("AWS.ResizeDisk Error describing volume %s with %v", diskName, err)
return oldSize, descErr
}
requestBytes := newSize.Value()
// AWS resizes in chunks of GiB (not GB)
requestGiB := volumeutil.RoundUpSize(requestBytes, 1024*1024*1024)
requestGiB := volumeutil.RoundUpToGiB(newSize)
newSizeQuant := resource.MustParse(fmt.Sprintf("%dGi", requestGiB))
// If disk already if of greater or equal size than requested we return
@@ -3313,7 +3357,7 @@ func (c *Cloud) EnsureLoadBalancer(ctx context.Context, clusterName string, apiS
return nil, fmt.Errorf("could not find any suitable subnets for creating the ELB")
}
loadBalancerName := cloudprovider.GetLoadBalancerName(apiService)
loadBalancerName := c.GetLoadBalancerName(ctx, clusterName, apiService)
serviceName := types.NamespacedName{Namespace: apiService.Namespace, Name: apiService.Name}
instanceIDs := []string{}
@@ -3475,7 +3519,7 @@ func (c *Cloud) EnsureLoadBalancer(ctx context.Context, clusterName string, apiS
return nil, fmt.Errorf("could not find any suitable subnets for creating the ELB")
}
loadBalancerName := cloudprovider.GetLoadBalancerName(apiService)
loadBalancerName := c.GetLoadBalancerName(ctx, clusterName, apiService)
serviceName := types.NamespacedName{Namespace: apiService.Namespace, Name: apiService.Name}
securityGroupIDs, err := c.buildELBSecurityGroupList(serviceName, loadBalancerName, annotations)
if err != nil {
@@ -3511,7 +3555,7 @@ func (c *Cloud) EnsureLoadBalancer(ctx context.Context, clusterName string, apiS
IpProtocol: aws.String("icmp"),
FromPort: aws.Int64(3),
ToPort: aws.Int64(4),
IpRanges: []*ec2.IpRange{{CidrIp: aws.String("0.0.0.0/0")}},
IpRanges: ec2SourceRanges,
}
permissions.Insert(permission)
@@ -3598,7 +3642,7 @@ func (c *Cloud) EnsureLoadBalancer(ctx context.Context, clusterName string, apiS
// GetLoadBalancer is an implementation of LoadBalancer.GetLoadBalancer
func (c *Cloud) GetLoadBalancer(ctx context.Context, clusterName string, service *v1.Service) (*v1.LoadBalancerStatus, bool, error) {
loadBalancerName := cloudprovider.GetLoadBalancerName(service)
loadBalancerName := c.GetLoadBalancerName(ctx, clusterName, service)
if isNLB(service.Annotations) {
lb, err := c.describeLoadBalancerv2(loadBalancerName)
@@ -3624,6 +3668,12 @@ func (c *Cloud) GetLoadBalancer(ctx context.Context, clusterName string, service
return status, true, nil
}
// GetLoadBalancerName is an implementation of LoadBalancer.GetLoadBalancerName
func (c *Cloud) GetLoadBalancerName(ctx context.Context, clusterName string, service *v1.Service) string {
// TODO: replace DefaultLoadBalancerName to generate more meaningful loadbalancer names.
return cloudprovider.DefaultLoadBalancerName(service)
}
func toStatus(lb *elb.LoadBalancerDescription) *v1.LoadBalancerStatus {
status := &v1.LoadBalancerStatus{}
@@ -3862,7 +3912,7 @@ func (c *Cloud) updateInstanceSecurityGroupsForLoadBalancer(lb *elb.LoadBalancer
// EnsureLoadBalancerDeleted implements LoadBalancer.EnsureLoadBalancerDeleted.
func (c *Cloud) EnsureLoadBalancerDeleted(ctx context.Context, clusterName string, service *v1.Service) error {
loadBalancerName := cloudprovider.GetLoadBalancerName(service)
loadBalancerName := c.GetLoadBalancerName(ctx, clusterName, service)
if isNLB(service.Annotations) {
lb, err := c.describeLoadBalancerv2(loadBalancerName)
@@ -4110,7 +4160,7 @@ func (c *Cloud) UpdateLoadBalancer(ctx context.Context, clusterName string, serv
return err
}
loadBalancerName := cloudprovider.GetLoadBalancerName(service)
loadBalancerName := c.GetLoadBalancerName(ctx, clusterName, service)
if isNLB(service.Annotations) {
lb, err := c.describeLoadBalancerv2(loadBalancerName)
if err != nil {
@@ -4270,13 +4320,22 @@ func mapInstanceToNodeName(i *ec2.Instance) types.NodeName {
return types.NodeName(aws.StringValue(i.PrivateDnsName))
}
var aliveFilter = []string{
ec2.InstanceStateNamePending,
ec2.InstanceStateNameRunning,
ec2.InstanceStateNameShuttingDown,
ec2.InstanceStateNameStopping,
ec2.InstanceStateNameStopped,
}
// Returns the instance with the specified node name
// Returns nil if it does not exist
func (c *Cloud) findInstanceByNodeName(nodeName types.NodeName) (*ec2.Instance, error) {
privateDNSName := mapNodeNameToPrivateDNSName(nodeName)
filters := []*ec2.Filter{
newEc2Filter("private-dns-name", privateDNSName),
newEc2Filter("instance-state-name", "running"),
// exclude instances in "terminated" state
newEc2Filter("instance-state-name", aliveFilter...),
}
instances, err := c.describeInstances(filters)

View File

@@ -912,9 +912,17 @@ func (c *Cloud) ensureLoadBalancer(namespacedName types.NamespacedName, loadBala
// We are supposed to specify one subnet per AZ.
// TODO: What happens if we have more than one subnet per AZ?
createRequest.Subnets = stringPointerArray(subnetIDs)
if subnetIDs == nil {
createRequest.Subnets = nil
} else {
createRequest.Subnets = aws.StringSlice(subnetIDs)
}
createRequest.SecurityGroups = stringPointerArray(securityGroupIDs)
if securityGroupIDs == nil {
createRequest.SecurityGroups = nil
} else {
createRequest.SecurityGroups = aws.StringSlice(securityGroupIDs)
}
// Get additional tags set by the user
tags := getLoadBalancerAdditionalTags(annotations)
@@ -996,7 +1004,11 @@ func (c *Cloud) ensureLoadBalancer(namespacedName types.NamespacedName, loadBala
// This call just replaces the security groups, unlike e.g. subnets (!)
request := &elb.ApplySecurityGroupsToLoadBalancerInput{}
request.LoadBalancerName = aws.String(loadBalancerName)
request.SecurityGroups = stringPointerArray(securityGroupIDs)
if securityGroupIDs == nil {
request.SecurityGroups = nil
} else {
request.SecurityGroups = aws.StringSlice(securityGroupIDs)
}
glog.V(2).Info("Applying updated security groups to load balancer")
_, err := c.elb.ApplySecurityGroupsToLoadBalancer(request)
if err != nil {

View File

@@ -348,13 +348,14 @@ func TestNodeAddresses(t *testing.T) {
if err3 != nil {
t.Errorf("Should not error when instance found")
}
if len(addrs3) != 4 {
t.Errorf("Should return exactly 4 NodeAddresses")
if len(addrs3) != 5 {
t.Errorf("Should return exactly 5 NodeAddresses")
}
testHasNodeAddress(t, addrs3, v1.NodeInternalIP, "192.168.0.1")
testHasNodeAddress(t, addrs3, v1.NodeExternalIP, "1.2.3.4")
testHasNodeAddress(t, addrs3, v1.NodeExternalDNS, "instance-same.ec2.external")
testHasNodeAddress(t, addrs3, v1.NodeInternalDNS, "instance-same.ec2.internal")
testHasNodeAddress(t, addrs3, v1.NodeHostName, "instance-same.ec2.internal")
}
func TestNodeAddressesWithMetadata(t *testing.T) {
@@ -797,6 +798,18 @@ func TestIpPermissionExistsHandlesMultipleGroupIdsWithUserIds(t *testing.T) {
}
func TestFindInstanceByNodeNameExcludesTerminatedInstances(t *testing.T) {
awsStates := []struct {
id int64
state string
expected bool
}{
{0, ec2.InstanceStateNamePending, true},
{16, ec2.InstanceStateNameRunning, true},
{32, ec2.InstanceStateNameShuttingDown, true},
{48, ec2.InstanceStateNameTerminated, false},
{64, ec2.InstanceStateNameStopping, true},
{80, ec2.InstanceStateNameStopped, true},
}
awsServices := newMockedFakeAWSServices(TestClusterId)
nodeName := types.NodeName("my-dns.internal")
@@ -806,36 +819,41 @@ func TestFindInstanceByNodeNameExcludesTerminatedInstances(t *testing.T) {
tag.Value = aws.String(TestClusterId)
tags := []*ec2.Tag{&tag}
var runningInstance ec2.Instance
runningInstance.InstanceId = aws.String("i-running")
runningInstance.PrivateDnsName = aws.String(string(nodeName))
runningInstance.State = &ec2.InstanceState{Code: aws.Int64(16), Name: aws.String("running")}
runningInstance.Tags = tags
var testInstance ec2.Instance
testInstance.PrivateDnsName = aws.String(string(nodeName))
testInstance.Tags = tags
var terminatedInstance ec2.Instance
terminatedInstance.InstanceId = aws.String("i-terminated")
terminatedInstance.PrivateDnsName = aws.String(string(nodeName))
terminatedInstance.State = &ec2.InstanceState{Code: aws.Int64(48), Name: aws.String("terminated")}
terminatedInstance.Tags = tags
awsDefaultInstances := awsServices.instances
for _, awsState := range awsStates {
id := "i-" + awsState.state
testInstance.InstanceId = aws.String(id)
testInstance.State = &ec2.InstanceState{Code: aws.Int64(awsState.id), Name: aws.String(awsState.state)}
instances := []*ec2.Instance{&terminatedInstance, &runningInstance}
awsServices.instances = append(awsServices.instances, instances...)
awsServices.instances = append(awsDefaultInstances, &testInstance)
c, err := newAWSCloud(CloudConfig{}, awsServices)
if err != nil {
t.Errorf("Error building aws cloud: %v", err)
return
}
c, err := newAWSCloud(CloudConfig{}, awsServices)
if err != nil {
t.Errorf("Error building aws cloud: %v", err)
return
}
instance, err := c.findInstanceByNodeName(nodeName)
resultInstance, err := c.findInstanceByNodeName(nodeName)
if err != nil {
t.Errorf("Failed to find instance: %v", err)
return
}
if *instance.InstanceId != "i-running" {
t.Errorf("Expected running instance but got %v", *instance.InstanceId)
if awsState.expected {
if err != nil || resultInstance == nil {
t.Errorf("Expected to find instance %v", *testInstance.InstanceId)
return
}
if *resultInstance.InstanceId != *testInstance.InstanceId {
t.Errorf("Wrong instance returned by findInstanceByNodeName() expected: %v, actual: %v", *testInstance.InstanceId, *resultInstance.InstanceId)
return
}
} else {
if err == nil && resultInstance != nil {
t.Errorf("Did not expect to find instance %v", *resultInstance.InstanceId)
return
}
}
}
}
@@ -862,6 +880,7 @@ func TestGetInstanceByNodeNameBatching(t *testing.T) {
}
instances, err := c.getInstancesByNodeNames(nodeNames)
assert.Nil(t, err, "Error getting instances by nodeNames %v: %v", nodeNames, err)
assert.NotEmpty(t, instances)
assert.Equal(t, 200, len(instances), "Expected 200 but got less")
}

View File

@@ -119,7 +119,7 @@ func (c *Cloud) checkIfAttachedToNode(diskName KubernetesVolumeID, nodeName type
info, err := disk.describeVolume()
if err != nil {
glog.Warning("Error describing volume %s with %v", diskName, err)
glog.Warningf("Error describing volume %s with %v", diskName, err)
awsDiskInfo.volumeState = "unknown"
return awsDiskInfo, false, err
}