Bumping k8s dependencies to 1.13
This commit is contained in:
26
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/openstack/BUILD
generated
vendored
26
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/openstack/BUILD
generated
vendored
@@ -28,6 +28,13 @@ go_library(
|
||||
"//pkg/util/mount:go_default_library",
|
||||
"//pkg/volume:go_default_library",
|
||||
"//pkg/volume/util:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/net:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/util/cert:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/github.com/gophercloud/gophercloud:go_default_library",
|
||||
"//vendor/github.com/gophercloud/gophercloud/openstack:go_default_library",
|
||||
@@ -56,13 +63,6 @@ go_library(
|
||||
"//vendor/github.com/mitchellh/mapstructure:go_default_library",
|
||||
"//vendor/github.com/prometheus/client_golang/prometheus:go_default_library",
|
||||
"//vendor/gopkg.in/gcfg.v1:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/net:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/client-go/util/cert:go_default_library",
|
||||
"//vendor/k8s.io/utils/exec:go_default_library",
|
||||
],
|
||||
)
|
||||
@@ -77,15 +77,15 @@ go_test(
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//pkg/cloudprovider:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/rand:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/github.com/gophercloud/gophercloud:go_default_library",
|
||||
"//vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers:go_default_library",
|
||||
"//vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/routers:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/rand:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
20
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/openstack/openstack.go
generated
vendored
20
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/openstack/openstack.go
generated
vendored
@@ -26,6 +26,7 @@ import (
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
"reflect"
|
||||
"regexp"
|
||||
"strings"
|
||||
"time"
|
||||
@@ -52,7 +53,10 @@ import (
|
||||
|
||||
const (
|
||||
// ProviderName is the name of the openstack provider
|
||||
ProviderName = "openstack"
|
||||
ProviderName = "openstack"
|
||||
|
||||
// TypeHostName is the name type of openstack instance
|
||||
TypeHostName = "hostname"
|
||||
availabilityZone = "availability_zone"
|
||||
defaultTimeOut = 60 * time.Second
|
||||
)
|
||||
@@ -497,6 +501,15 @@ func nodeAddresses(srv *servers.Server) ([]v1.NodeAddress, error) {
|
||||
)
|
||||
}
|
||||
|
||||
if srv.Metadata[TypeHostName] != "" {
|
||||
v1helper.AddToNodeAddresses(&addrs,
|
||||
v1.NodeAddress{
|
||||
Type: v1.NodeHostName,
|
||||
Address: srv.Metadata[TypeHostName],
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
return addrs, nil
|
||||
}
|
||||
|
||||
@@ -573,6 +586,11 @@ func (os *OpenStack) HasClusterID() bool {
|
||||
func (os *OpenStack) LoadBalancer() (cloudprovider.LoadBalancer, bool) {
|
||||
glog.V(4).Info("openstack.LoadBalancer() called")
|
||||
|
||||
if reflect.DeepEqual(os.lbOpts, LoadBalancerOpts{}) {
|
||||
glog.V(4).Info("LoadBalancer section is empty/not defined in cloud-config")
|
||||
return nil, false
|
||||
}
|
||||
|
||||
network, err := os.NewNetworkV2()
|
||||
if err != nil {
|
||||
return nil, false
|
||||
|
29
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/openstack/openstack_instances.go
generated
vendored
29
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/openstack/openstack_instances.go
generated
vendored
@@ -36,6 +36,10 @@ type Instances struct {
|
||||
opts MetadataOpts
|
||||
}
|
||||
|
||||
const (
|
||||
instanceShutoff = "SHUTOFF"
|
||||
)
|
||||
|
||||
// Instances returns an implementation of Instances for OpenStack.
|
||||
func (os *OpenStack) Instances() (cloudprovider.Instances, bool) {
|
||||
glog.V(4).Info("openstack.Instances() called")
|
||||
@@ -106,7 +110,7 @@ func (i *Instances) NodeAddressesByProviderID(ctx context.Context, providerID st
|
||||
return addresses, nil
|
||||
}
|
||||
|
||||
// InstanceExistsByProviderID returns true if the instance with the given provider id still exists and is running.
|
||||
// InstanceExistsByProviderID returns true if the instance with the given provider id still exist.
|
||||
// If false is returned with no error, the instance will be immediately deleted by the cloud controller manager.
|
||||
func (i *Instances) InstanceExistsByProviderID(ctx context.Context, providerID string) (bool, error) {
|
||||
instanceID, err := instanceIDFromProviderID(providerID)
|
||||
@@ -114,7 +118,7 @@ func (i *Instances) InstanceExistsByProviderID(ctx context.Context, providerID s
|
||||
return false, err
|
||||
}
|
||||
|
||||
server, err := servers.Get(i.compute, instanceID).Extract()
|
||||
_, err = servers.Get(i.compute, instanceID).Extract()
|
||||
if err != nil {
|
||||
if isNotFound(err) {
|
||||
return false, nil
|
||||
@@ -122,17 +126,26 @@ func (i *Instances) InstanceExistsByProviderID(ctx context.Context, providerID s
|
||||
return false, err
|
||||
}
|
||||
|
||||
if server.Status != "ACTIVE" {
|
||||
glog.Warningf("the instance %s is not active", instanceID)
|
||||
return false, nil
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// InstanceShutdownByProviderID returns true if the instances is in safe state to detach volumes
|
||||
func (i *Instances) InstanceShutdownByProviderID(ctx context.Context, providerID string) (bool, error) {
|
||||
return false, cloudprovider.NotImplemented
|
||||
instanceID, err := instanceIDFromProviderID(providerID)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
server, err := servers.Get(i.compute, instanceID).Extract()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
// SHUTOFF is the only state where we can detach volumes immediately
|
||||
if server.Status == instanceShutoff {
|
||||
return true, nil
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// InstanceID returns the kubelet's cloud provider ID.
|
||||
|
45
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/openstack/openstack_loadbalancer.go
generated
vendored
45
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/openstack/openstack_loadbalancer.go
generated
vendored
@@ -460,7 +460,7 @@ func (lbaas *LbaasV2) createLoadBalancer(service *v1.Service, name string, inter
|
||||
|
||||
// GetLoadBalancer returns whether the specified load balancer exists and its status
|
||||
func (lbaas *LbaasV2) GetLoadBalancer(ctx context.Context, clusterName string, service *v1.Service) (*v1.LoadBalancerStatus, bool, error) {
|
||||
loadBalancerName := cloudprovider.GetLoadBalancerName(service)
|
||||
loadBalancerName := lbaas.GetLoadBalancerName(ctx, clusterName, service)
|
||||
loadbalancer, err := getLoadbalancerByName(lbaas.lb, loadBalancerName)
|
||||
if err == ErrNotFound {
|
||||
return nil, false, nil
|
||||
@@ -474,10 +474,13 @@ func (lbaas *LbaasV2) GetLoadBalancer(ctx context.Context, clusterName string, s
|
||||
portID := loadbalancer.VipPortID
|
||||
if portID != "" {
|
||||
floatIP, err := getFloatingIPByPortID(lbaas.network, portID)
|
||||
if err != nil {
|
||||
if err != nil && err != ErrNotFound {
|
||||
return nil, false, fmt.Errorf("error getting floating ip for port %s: %v", portID, err)
|
||||
}
|
||||
status.Ingress = []v1.LoadBalancerIngress{{IP: floatIP.FloatingIP}}
|
||||
|
||||
if floatIP != nil {
|
||||
status.Ingress = []v1.LoadBalancerIngress{{IP: floatIP.FloatingIP}}
|
||||
}
|
||||
} else {
|
||||
status.Ingress = []v1.LoadBalancerIngress{{IP: loadbalancer.VipAddress}}
|
||||
}
|
||||
@@ -485,6 +488,12 @@ func (lbaas *LbaasV2) GetLoadBalancer(ctx context.Context, clusterName string, s
|
||||
return status, true, err
|
||||
}
|
||||
|
||||
// GetLoadBalancerName is an implementation of LoadBalancer.GetLoadBalancerName.
|
||||
func (lbaas *LbaasV2) GetLoadBalancerName(ctx context.Context, clusterName string, service *v1.Service) string {
|
||||
// TODO: replace DefaultLoadBalancerName to generate more meaningful loadbalancer names.
|
||||
return cloudprovider.DefaultLoadBalancerName(service)
|
||||
}
|
||||
|
||||
// The LB needs to be configured with instance addresses on the same
|
||||
// subnet as the LB (aka opts.SubnetID). Currently we're just
|
||||
// guessing that the node's InternalIP is the right address.
|
||||
@@ -554,14 +563,14 @@ func getSubnetIDForLB(compute *gophercloud.ServiceClient, node v1.Node) (string,
|
||||
}
|
||||
|
||||
// getNodeSecurityGroupIDForLB lists node-security-groups for specific nodes
|
||||
func getNodeSecurityGroupIDForLB(compute *gophercloud.ServiceClient, nodes []*v1.Node) ([]string, error) {
|
||||
nodeSecurityGroupIDs := sets.NewString()
|
||||
func getNodeSecurityGroupIDForLB(compute *gophercloud.ServiceClient, network *gophercloud.ServiceClient, nodes []*v1.Node) ([]string, error) {
|
||||
secGroupNames := sets.NewString()
|
||||
|
||||
for _, node := range nodes {
|
||||
nodeName := types.NodeName(node.Name)
|
||||
srv, err := getServerByName(compute, nodeName)
|
||||
if err != nil {
|
||||
return nodeSecurityGroupIDs.List(), err
|
||||
return []string{}, err
|
||||
}
|
||||
|
||||
// use the first node-security-groups
|
||||
@@ -569,11 +578,19 @@ func getNodeSecurityGroupIDForLB(compute *gophercloud.ServiceClient, nodes []*v1
|
||||
// case 1: node1:SG1 node2:SG2 return SG1,SG2
|
||||
// case 2: node1:SG1,SG2 node2:SG3,SG4 return SG1,SG3
|
||||
// case 3: node1:SG1,SG2 node2:SG2,SG3 return SG1,SG2
|
||||
securityGroupName := srv.SecurityGroups[0]["name"]
|
||||
nodeSecurityGroupIDs.Insert(securityGroupName.(string))
|
||||
secGroupNames.Insert(srv.SecurityGroups[0]["name"].(string))
|
||||
}
|
||||
|
||||
return nodeSecurityGroupIDs.List(), nil
|
||||
secGroupIDs := make([]string, secGroupNames.Len())
|
||||
for i, name := range secGroupNames.List() {
|
||||
secGroupID, err := groups.IDFromName(network, name)
|
||||
if err != nil {
|
||||
return []string{}, err
|
||||
}
|
||||
secGroupIDs[i] = secGroupID
|
||||
}
|
||||
|
||||
return secGroupIDs, nil
|
||||
}
|
||||
|
||||
// isSecurityGroupNotFound return true while 'err' is object of gophercloud.ErrResourceNotFound
|
||||
@@ -723,7 +740,7 @@ func (lbaas *LbaasV2) EnsureLoadBalancer(ctx context.Context, clusterName string
|
||||
return nil, fmt.Errorf("unsupported load balancer affinity: %v", affinity)
|
||||
}
|
||||
|
||||
name := cloudprovider.GetLoadBalancerName(apiService)
|
||||
name := lbaas.GetLoadBalancerName(ctx, clusterName, apiService)
|
||||
loadbalancer, err := getLoadbalancerByName(lbaas.lb, name)
|
||||
if err != nil {
|
||||
if err != ErrNotFound {
|
||||
@@ -997,7 +1014,7 @@ func (lbaas *LbaasV2) ensureSecurityGroup(clusterName string, apiService *v1.Ser
|
||||
// find node-security-group for service
|
||||
var err error
|
||||
if len(lbaas.opts.NodeSecurityGroupIDs) == 0 {
|
||||
lbaas.opts.NodeSecurityGroupIDs, err = getNodeSecurityGroupIDForLB(lbaas.compute, nodes)
|
||||
lbaas.opts.NodeSecurityGroupIDs, err = getNodeSecurityGroupIDForLB(lbaas.compute, lbaas.network, nodes)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to find node-security-group for loadbalancer service %s/%s: %v", apiService.Namespace, apiService.Name, err)
|
||||
}
|
||||
@@ -1165,7 +1182,7 @@ func (lbaas *LbaasV2) ensureSecurityGroup(clusterName string, apiService *v1.Ser
|
||||
|
||||
// UpdateLoadBalancer updates hosts under the specified load balancer.
|
||||
func (lbaas *LbaasV2) UpdateLoadBalancer(ctx context.Context, clusterName string, service *v1.Service, nodes []*v1.Node) error {
|
||||
loadBalancerName := cloudprovider.GetLoadBalancerName(service)
|
||||
loadBalancerName := lbaas.GetLoadBalancerName(ctx, clusterName, service)
|
||||
glog.V(4).Infof("UpdateLoadBalancer(%v, %v, %v)", clusterName, loadBalancerName, nodes)
|
||||
|
||||
lbaas.opts.SubnetID = getStringFromServiceAnnotation(service, ServiceAnnotationLoadBalancerSubnetID, lbaas.opts.SubnetID)
|
||||
@@ -1311,7 +1328,7 @@ func (lbaas *LbaasV2) updateSecurityGroup(clusterName string, apiService *v1.Ser
|
||||
originalNodeSecurityGroupIDs := lbaas.opts.NodeSecurityGroupIDs
|
||||
|
||||
var err error
|
||||
lbaas.opts.NodeSecurityGroupIDs, err = getNodeSecurityGroupIDForLB(lbaas.compute, nodes)
|
||||
lbaas.opts.NodeSecurityGroupIDs, err = getNodeSecurityGroupIDForLB(lbaas.compute, lbaas.network, nodes)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to find node-security-group for loadbalancer service %s/%s: %v", apiService.Namespace, apiService.Name, err)
|
||||
}
|
||||
@@ -1388,7 +1405,7 @@ func (lbaas *LbaasV2) updateSecurityGroup(clusterName string, apiService *v1.Ser
|
||||
|
||||
// EnsureLoadBalancerDeleted deletes the specified load balancer
|
||||
func (lbaas *LbaasV2) EnsureLoadBalancerDeleted(ctx context.Context, clusterName string, service *v1.Service) error {
|
||||
loadBalancerName := cloudprovider.GetLoadBalancerName(service)
|
||||
loadBalancerName := lbaas.GetLoadBalancerName(ctx, clusterName, service)
|
||||
glog.V(4).Infof("EnsureLoadBalancerDeleted(%v, %v)", clusterName, loadBalancerName)
|
||||
|
||||
loadbalancer, err := getLoadbalancerByName(lbaas.lb, loadBalancerName)
|
||||
|
@@ -87,6 +87,9 @@ func TestRoutes(t *testing.T) {
|
||||
|
||||
func getServers(os *OpenStack) []servers.Server {
|
||||
c, err := os.NewComputeV2()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
allPages, err := servers.List(c, servers.ListOpts{}).AllPages()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
|
12
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/openstack/openstack_test.go
generated
vendored
12
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/openstack/openstack_test.go
generated
vendored
@@ -104,6 +104,7 @@ func TestReadConfig(t *testing.T) {
|
||||
auth-url = http://auth.url
|
||||
user-id = user
|
||||
tenant-name = demo
|
||||
region = RegionOne
|
||||
[LoadBalancer]
|
||||
create-monitor = yes
|
||||
monitor-delay = 1m
|
||||
@@ -136,6 +137,10 @@ func TestReadConfig(t *testing.T) {
|
||||
t.Errorf("incorrect tenant name: %s", cfg.Global.TenantName)
|
||||
}
|
||||
|
||||
if cfg.Global.Region != "RegionOne" {
|
||||
t.Errorf("incorrect region: %s", cfg.Global.Region)
|
||||
}
|
||||
|
||||
if !cfg.LoadBalancer.CreateMonitor {
|
||||
t.Errorf("incorrect lb.createmonitor: %t", cfg.LoadBalancer.CreateMonitor)
|
||||
}
|
||||
@@ -439,6 +444,10 @@ func TestNodeAddresses(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
Metadata: map[string]string{
|
||||
"name": "a1-yinvcez57-0-bvynoyawrhcg-kube-minion-fg5i4jwcc2yy",
|
||||
TypeHostName: "a1-yinvcez57-0-bvynoyawrhcg-kube-minion-fg5i4jwcc2yy.novalocal",
|
||||
},
|
||||
}
|
||||
|
||||
addrs, err := nodeAddresses(&srv)
|
||||
@@ -457,6 +466,7 @@ func TestNodeAddresses(t *testing.T) {
|
||||
{Type: v1.NodeExternalIP, Address: "50.56.176.35"},
|
||||
{Type: v1.NodeExternalIP, Address: "50.56.176.36"},
|
||||
{Type: v1.NodeExternalIP, Address: "50.56.176.99"},
|
||||
{Type: v1.NodeHostName, Address: "a1-yinvcez57-0-bvynoyawrhcg-kube-minion-fg5i4jwcc2yy.novalocal"},
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(want, addrs) {
|
||||
@@ -554,7 +564,7 @@ func TestVolumes(t *testing.T) {
|
||||
tags := map[string]string{
|
||||
"test": "value",
|
||||
}
|
||||
vol, _, _, err := os.CreateVolume("kubernetes-test-volume-"+rand.String(10), 1, "", "", &tags)
|
||||
vol, _, _, _, err := os.CreateVolume("kubernetes-test-volume-"+rand.String(10), 1, "", "", &tags)
|
||||
if err != nil {
|
||||
t.Fatalf("Cannot create a new Cinder volume: %v", err)
|
||||
}
|
||||
|
23
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/openstack/openstack_volumes.go
generated
vendored
23
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/openstack/openstack_volumes.go
generated
vendored
@@ -235,6 +235,7 @@ func (volumes *VolumesV3) getVolume(volumeID string) (Volume, error) {
|
||||
ID: volumeV3.ID,
|
||||
Name: volumeV3.Name,
|
||||
Status: volumeV3.Status,
|
||||
Size: volumeV3.Size,
|
||||
}
|
||||
|
||||
if len(volumeV3.Attachments) > 0 {
|
||||
@@ -411,13 +412,15 @@ func (os *OpenStack) ExpandVolume(volumeID string, oldSize resource.Quantity, ne
|
||||
return oldSize, fmt.Errorf("volume status is not available")
|
||||
}
|
||||
|
||||
volSizeBytes := newSize.Value()
|
||||
// Cinder works with gigabytes, convert to GiB with rounding up
|
||||
volSizeGB := int(volumeutil.RoundUpSize(volSizeBytes, 1024*1024*1024))
|
||||
newSizeQuant := resource.MustParse(fmt.Sprintf("%dGi", volSizeGB))
|
||||
volSizeGiB, err := volumeutil.RoundUpToGiBInt(newSize)
|
||||
if err != nil {
|
||||
return oldSize, err
|
||||
}
|
||||
newSizeQuant := resource.MustParse(fmt.Sprintf("%dGi", volSizeGiB))
|
||||
|
||||
// if volume size equals to or greater than the newSize, return nil
|
||||
if volume.Size >= volSizeGB {
|
||||
if volume.Size >= volSizeGiB {
|
||||
return newSizeQuant, nil
|
||||
}
|
||||
|
||||
@@ -426,7 +429,7 @@ func (os *OpenStack) ExpandVolume(volumeID string, oldSize resource.Quantity, ne
|
||||
return oldSize, err
|
||||
}
|
||||
|
||||
err = volumes.expandVolume(volumeID, volSizeGB)
|
||||
err = volumes.expandVolume(volumeID, volSizeGiB)
|
||||
if err != nil {
|
||||
return oldSize, err
|
||||
}
|
||||
@@ -443,10 +446,10 @@ func (os *OpenStack) getVolume(volumeID string) (Volume, error) {
|
||||
}
|
||||
|
||||
// CreateVolume creates a volume of given size (in GiB)
|
||||
func (os *OpenStack) CreateVolume(name string, size int, vtype, availability string, tags *map[string]string) (string, string, bool, error) {
|
||||
func (os *OpenStack) CreateVolume(name string, size int, vtype, availability string, tags *map[string]string) (string, string, string, bool, error) {
|
||||
volumes, err := os.volumeService("")
|
||||
if err != nil {
|
||||
return "", "", os.bsOpts.IgnoreVolumeAZ, fmt.Errorf("unable to initialize cinder client for region: %s, err: %v", os.region, err)
|
||||
return "", "", "", os.bsOpts.IgnoreVolumeAZ, fmt.Errorf("unable to initialize cinder client for region: %s, err: %v", os.region, err)
|
||||
}
|
||||
|
||||
opts := volumeCreateOpts{
|
||||
@@ -462,11 +465,11 @@ func (os *OpenStack) CreateVolume(name string, size int, vtype, availability str
|
||||
volumeID, volumeAZ, err := volumes.createVolume(opts)
|
||||
|
||||
if err != nil {
|
||||
return "", "", os.bsOpts.IgnoreVolumeAZ, fmt.Errorf("failed to create a %d GB volume: %v", size, err)
|
||||
return "", "", "", os.bsOpts.IgnoreVolumeAZ, fmt.Errorf("failed to create a %d GB volume: %v", size, err)
|
||||
}
|
||||
|
||||
glog.Infof("Created volume %v in Availability Zone: %v Ignore volume AZ: %v", volumeID, volumeAZ, os.bsOpts.IgnoreVolumeAZ)
|
||||
return volumeID, volumeAZ, os.bsOpts.IgnoreVolumeAZ, nil
|
||||
glog.Infof("Created volume %v in Availability Zone: %v Region: %v Ignore volume AZ: %v", volumeID, volumeAZ, os.region, os.bsOpts.IgnoreVolumeAZ)
|
||||
return volumeID, volumeAZ, os.region, os.bsOpts.IgnoreVolumeAZ, nil
|
||||
}
|
||||
|
||||
// GetDevicePathBySerialID returns the path of an attached block storage volume, specified by its id.
|
||||
|
Reference in New Issue
Block a user