Bumping k8s dependencies to 1.13

This commit is contained in:
Cheng Xing
2018-11-16 14:08:25 -08:00
parent 305407125c
commit b4c0b68ec7
8002 changed files with 884099 additions and 276228 deletions

View File

@@ -21,18 +21,19 @@ go_library(
"//pkg/cloudprovider/providers/gce:go_default_library",
"//pkg/features:go_default_library",
"//pkg/kubelet/apis:go_default_library",
"//pkg/util/file:go_default_library",
"//pkg/util/mount:go_default_library",
"//pkg/util/strings:go_default_library",
"//pkg/volume:go_default_library",
"//pkg/volume/util:go_default_library",
"//pkg/volume/util/volumepathhandler:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
"//vendor/k8s.io/utils/exec:go_default_library",
],
)
@@ -43,6 +44,7 @@ go_test(
"attacher_test.go",
"gce_pd_block_test.go",
"gce_pd_test.go",
"gce_util_test.go",
],
embed = [":go_default_library"],
deps = [
@@ -51,14 +53,14 @@ go_test(
"//pkg/volume:go_default_library",
"//pkg/volume/testing:go_default_library",
"//pkg/volume/util:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library",
"//staging/src/k8s.io/client-go/util/testing:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/fake:go_default_library",
"//vendor/k8s.io/client-go/util/testing:go_default_library",
],
)

View File

@@ -29,7 +29,6 @@ import (
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/kubernetes/pkg/cloudprovider/providers/gce"
"k8s.io/kubernetes/pkg/util/mount"
"k8s.io/kubernetes/pkg/volume"
volumeutil "k8s.io/kubernetes/pkg/volume/util"
)
@@ -41,8 +40,12 @@ type gcePersistentDiskAttacher struct {
var _ volume.Attacher = &gcePersistentDiskAttacher{}
var _ volume.DeviceMounter = &gcePersistentDiskAttacher{}
var _ volume.AttachableVolumePlugin = &gcePersistentDiskPlugin{}
var _ volume.DeviceMountableVolumePlugin = &gcePersistentDiskPlugin{}
func (plugin *gcePersistentDiskPlugin) NewAttacher() (volume.Attacher, error) {
gceCloud, err := getCloudProvider(plugin.host.GetCloudProvider())
if err != nil {
@@ -55,9 +58,13 @@ func (plugin *gcePersistentDiskPlugin) NewAttacher() (volume.Attacher, error) {
}, nil
}
func (plugin *gcePersistentDiskPlugin) NewDeviceMounter() (volume.DeviceMounter, error) {
return plugin.NewAttacher()
}
func (plugin *gcePersistentDiskPlugin) GetDeviceMountRefs(deviceMountPath string) ([]string, error) {
mounter := plugin.host.GetMounter(plugin.GetPluginName())
return mount.GetMountRefs(mounter, deviceMountPath)
return mounter.GetMountRefs(deviceMountPath)
}
// Attach checks with the GCE cloud provider if the specified volume is already
@@ -158,7 +165,7 @@ func (attacher *gcePersistentDiskAttacher) WaitForAttach(spec *volume.Spec, devi
select {
case <-ticker.C:
glog.V(5).Infof("Checking GCE PD %q is attached.", pdName)
path, err := verifyDevicePath(devicePaths, sdBeforeSet)
path, err := verifyDevicePath(devicePaths, sdBeforeSet, pdName)
if err != nil {
// Log error, if any, and continue checking periodically. See issue #11321
glog.Errorf("Error verifying GCE PD (%q) is attached: %v", pdName, err)
@@ -227,6 +234,8 @@ type gcePersistentDiskDetacher struct {
var _ volume.Detacher = &gcePersistentDiskDetacher{}
var _ volume.DeviceUnmounter = &gcePersistentDiskDetacher{}
func (plugin *gcePersistentDiskPlugin) NewDetacher() (volume.Detacher, error) {
gceCloud, err := getCloudProvider(plugin.host.GetCloudProvider())
if err != nil {
@@ -239,6 +248,10 @@ func (plugin *gcePersistentDiskPlugin) NewDetacher() (volume.Detacher, error) {
}, nil
}
func (plugin *gcePersistentDiskPlugin) NewDeviceUnmounter() (volume.DeviceUnmounter, error) {
return plugin.NewDetacher()
}
// Detach checks with the GCE cloud provider if the specified volume is already
// attached to the specified node. If the volume is not attached, it succeeds
// (returns nil). If it is attached, Detach issues a call to the GCE cloud

View File

@@ -32,6 +32,7 @@ import (
utilfeature "k8s.io/apiserver/pkg/util/feature"
gcecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/gce"
"k8s.io/kubernetes/pkg/features"
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
"k8s.io/kubernetes/pkg/util/mount"
kstrings "k8s.io/kubernetes/pkg/util/strings"
"k8s.io/kubernetes/pkg/volume"
@@ -303,7 +304,7 @@ func (plugin *gcePersistentDiskPlugin) ConstructVolumeSpec(volumeName, mountPath
// Abstract interface to PD operations.
type pdManager interface {
// Creates a volume
CreateVolume(provisioner *gcePersistentDiskProvisioner) (volumeID string, volumeSizeGB int, labels map[string]string, fstype string, err error)
CreateVolume(provisioner *gcePersistentDiskProvisioner, node *v1.Node, allowedTopologies []v1.TopologySelectorTerm) (volumeID string, volumeSizeGB int, labels map[string]string, fstype string, err error)
// Deletes a volume
DeleteVolume(deleter *gcePersistentDiskDeleter) error
}
@@ -471,7 +472,7 @@ func (c *gcePersistentDiskProvisioner) Provision(selectedNode *v1.Node, allowedT
return nil, fmt.Errorf("invalid AccessModes %v: only AccessModes %v are supported", c.options.PVC.Spec.AccessModes, c.plugin.GetAccessModes())
}
volumeID, sizeGB, labels, fstype, err := c.manager.CreateVolume(c)
volumeID, sizeGB, labels, fstype, err := c.manager.CreateVolume(c, selectedNode, allowedTopologies)
if err != nil {
return nil, err
}
@@ -480,6 +481,15 @@ func (c *gcePersistentDiskProvisioner) Provision(selectedNode *v1.Node, allowedT
fstype = "ext4"
}
var volumeMode *v1.PersistentVolumeMode
if utilfeature.DefaultFeatureGate.Enabled(features.BlockVolume) {
volumeMode = c.options.PVC.Spec.VolumeMode
if volumeMode != nil && *volumeMode == v1.PersistentVolumeBlock {
// Block volumes should not have any FSType
fstype = ""
}
}
pv := &v1.PersistentVolume{
ObjectMeta: metav1.ObjectMeta{
Name: c.options.PVName,
@@ -492,8 +502,9 @@ func (c *gcePersistentDiskProvisioner) Provision(selectedNode *v1.Node, allowedT
PersistentVolumeReclaimPolicy: c.options.PersistentVolumeReclaimPolicy,
AccessModes: c.options.PVC.Spec.AccessModes,
Capacity: v1.ResourceList{
v1.ResourceName(v1.ResourceStorage): resource.MustParse(fmt.Sprintf("%dG", sizeGB)),
v1.ResourceName(v1.ResourceStorage): resource.MustParse(fmt.Sprintf("%dGi", sizeGB)),
},
VolumeMode: volumeMode,
PersistentVolumeSource: v1.PersistentVolumeSource{
GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{
PDName: volumeID,
@@ -509,17 +520,31 @@ func (c *gcePersistentDiskProvisioner) Provision(selectedNode *v1.Node, allowedT
pv.Spec.AccessModes = c.plugin.GetAccessModes()
}
requirements := make([]v1.NodeSelectorRequirement, 0)
if len(labels) != 0 {
if pv.Labels == nil {
pv.Labels = make(map[string]string)
}
for k, v := range labels {
pv.Labels[k] = v
var values []string
if k == kubeletapis.LabelZoneFailureDomain {
values, err = util.LabelZonesToList(v)
if err != nil {
return nil, fmt.Errorf("failed to convert label string for Zone: %s to a List: %v", v, err)
}
} else {
values = []string{v}
}
requirements = append(requirements, v1.NodeSelectorRequirement{Key: k, Operator: v1.NodeSelectorOpIn, Values: values})
}
}
if utilfeature.DefaultFeatureGate.Enabled(features.BlockVolume) {
pv.Spec.VolumeMode = c.options.PVC.Spec.VolumeMode
if utilfeature.DefaultFeatureGate.Enabled(features.VolumeScheduling) && len(requirements) > 0 {
pv.Spec.NodeAffinity = new(v1.VolumeNodeAffinity)
pv.Spec.NodeAffinity.Required = new(v1.NodeSelector)
pv.Spec.NodeAffinity.Required.NodeSelectorTerms = make([]v1.NodeSelectorTerm, 1)
pv.Spec.NodeAffinity.Required.NodeSelectorTerms[0].MatchExpressions = requirements
}
return pv, nil

View File

@@ -20,6 +20,8 @@ import (
"fmt"
"os"
"path"
"reflect"
"sort"
"testing"
"k8s.io/api/core/v1"
@@ -27,10 +29,12 @@ import (
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/kubernetes/fake"
utiltesting "k8s.io/client-go/util/testing"
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
"k8s.io/kubernetes/pkg/util/mount"
"k8s.io/kubernetes/pkg/volume"
volumetest "k8s.io/kubernetes/pkg/volume/testing"
"k8s.io/kubernetes/pkg/volume/util"
volumeutil "k8s.io/kubernetes/pkg/volume/util"
)
func TestCanSupport(t *testing.T) {
@@ -78,9 +82,10 @@ func TestGetAccessModes(t *testing.T) {
type fakePDManager struct {
}
func (fake *fakePDManager) CreateVolume(c *gcePersistentDiskProvisioner) (volumeID string, volumeSizeGB int, labels map[string]string, fstype string, err error) {
func (fake *fakePDManager) CreateVolume(c *gcePersistentDiskProvisioner, node *v1.Node, allowedTopologies []v1.TopologySelectorTerm) (volumeID string, volumeSizeGB int, labels map[string]string, fstype string, err error) {
labels = make(map[string]string)
labels["fakepdmanager"] = "yes"
labels[kubeletapis.LabelZoneFailureDomain] = "zone1__zone2"
return "test-gce-volume-name", 100, labels, "", nil
}
@@ -91,6 +96,15 @@ func (fake *fakePDManager) DeleteVolume(cd *gcePersistentDiskDeleter) error {
return nil
}
func getNodeSelectorRequirementWithKey(key string, term v1.NodeSelectorTerm) (*v1.NodeSelectorRequirement, error) {
for _, r := range term.MatchExpressions {
if r.Key == key {
return &r, nil
}
}
return nil, fmt.Errorf("key %s not found", key)
}
func TestPlugin(t *testing.T) {
tmpDir, err := utiltesting.MkTmpdir("gcepdTest")
if err != nil {
@@ -177,12 +191,40 @@ func TestPlugin(t *testing.T) {
}
cap := persistentSpec.Spec.Capacity[v1.ResourceStorage]
size := cap.Value()
if size != 100*util.GB {
if size != 100*util.GIB {
t.Errorf("Provision() returned unexpected volume size: %v", size)
}
if persistentSpec.Labels["fakepdmanager"] != "yes" {
t.Errorf("Provision() returned unexpected labels: %v", persistentSpec.Labels)
t.Errorf("Provision() returned unexpected value for fakepdmanager: %v", persistentSpec.Labels["fakepdmanager"])
}
if persistentSpec.Labels[kubeletapis.LabelZoneFailureDomain] != "zone1__zone2" {
t.Errorf("Provision() returned unexpected value for %s: %v", kubeletapis.LabelZoneFailureDomain, persistentSpec.Labels[kubeletapis.LabelZoneFailureDomain])
}
if persistentSpec.Spec.NodeAffinity == nil {
t.Errorf("Unexpected nil NodeAffinity found")
}
if len(persistentSpec.Spec.NodeAffinity.Required.NodeSelectorTerms) != 1 {
t.Errorf("Unexpected number of NodeSelectorTerms")
}
term := persistentSpec.Spec.NodeAffinity.Required.NodeSelectorTerms[0]
if len(term.MatchExpressions) != 2 {
t.Errorf("Unexpected number of NodeSelectorRequirements in volume NodeAffinity: %d", len(term.MatchExpressions))
}
r, _ := getNodeSelectorRequirementWithKey("fakepdmanager", term)
if r == nil || r.Values[0] != "yes" || r.Operator != v1.NodeSelectorOpIn {
t.Errorf("NodeSelectorRequirement fakepdmanager-in-yes not found in volume NodeAffinity")
}
zones, _ := volumeutil.ZonesToSet("zone1,zone2")
r, _ = getNodeSelectorRequirementWithKey(kubeletapis.LabelZoneFailureDomain, term)
if r == nil {
t.Errorf("NodeSelectorRequirement %s-in-%v not found in volume NodeAffinity", kubeletapis.LabelZoneFailureDomain, zones)
}
sort.Strings(r.Values)
if !reflect.DeepEqual(r.Values, zones.List()) {
t.Errorf("ZoneFailureDomain elements %v does not match zone labels %v", r.Values, zones)
}
// Test Deleter

View File

@@ -20,6 +20,7 @@ import (
"fmt"
"path"
"path/filepath"
"regexp"
"strings"
"time"
@@ -31,6 +32,7 @@ import (
gcecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/gce"
"k8s.io/kubernetes/pkg/features"
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
utilfile "k8s.io/kubernetes/pkg/util/file"
"k8s.io/kubernetes/pkg/volume"
volumeutil "k8s.io/kubernetes/pkg/volume/util"
"k8s.io/utils/exec"
@@ -50,12 +52,19 @@ const (
// Replication type constants must be lower case.
replicationTypeNone = "none"
replicationTypeRegionalPD = "regional-pd"
// scsi_id output should be in the form of:
// 0Google PersistentDisk <disk name>
scsiPattern = `^0Google\s+PersistentDisk\s+([\S]+)\s*$`
)
// These variables are modified only in unit tests and should be constant
// otherwise.
var (
// errorSleepDuration is modified only in unit tests and should be constant
// otherwise.
errorSleepDuration time.Duration = 5 * time.Second
// regex to parse scsi_id output and extract the serial
scsiRegex = regexp.MustCompile(scsiPattern)
)
type GCEDiskUtil struct{}
@@ -78,7 +87,7 @@ func (util *GCEDiskUtil) DeleteVolume(d *gcePersistentDiskDeleter) error {
// CreateVolume creates a GCE PD.
// Returns: gcePDName, volumeSizeGB, labels, fsType, error
func (gceutil *GCEDiskUtil) CreateVolume(c *gcePersistentDiskProvisioner) (string, int, map[string]string, string, error) {
func (gceutil *GCEDiskUtil) CreateVolume(c *gcePersistentDiskProvisioner, node *v1.Node, allowedTopologies []v1.TopologySelectorTerm) (string, int, map[string]string, string, error) {
cloud, err := getCloudProvider(c.gcePersistentDisk.plugin.host.GetCloudProvider())
if err != nil {
return "", 0, nil, "", err
@@ -86,8 +95,8 @@ func (gceutil *GCEDiskUtil) CreateVolume(c *gcePersistentDiskProvisioner) (strin
name := volumeutil.GenerateVolumeName(c.options.ClusterName, c.options.PVName, 63) // GCE PD name can have up to 63 characters
capacity := c.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)]
// GCE PDs are allocated in chunks of GBs (not GiBs)
requestGB := volumeutil.RoundUpToGB(capacity)
// GCE PDs are allocated in chunks of GiBs
requestGB := volumeutil.RoundUpToGiB(capacity)
// Apply Parameters.
// Values for parameter "replication-type" are canonicalized to lower case.
@@ -95,7 +104,7 @@ func (gceutil *GCEDiskUtil) CreateVolume(c *gcePersistentDiskProvisioner) (strin
// to the cloud provider.
diskType := ""
configuredZone := ""
configuredZones := ""
var configuredZones sets.String
zonePresent := false
zonesPresent := false
replicationType := replicationTypeNone
@@ -109,7 +118,10 @@ func (gceutil *GCEDiskUtil) CreateVolume(c *gcePersistentDiskProvisioner) (strin
configuredZone = v
case "zones":
zonesPresent = true
configuredZones = v
configuredZones, err = volumeutil.ZonesToSet(v)
if err != nil {
return "", 0, nil, "", err
}
case "replication-type":
if !utilfeature.DefaultFeatureGate.Enabled(features.GCERegionalPersistentDisk) {
return "", 0, nil, "",
@@ -124,76 +136,49 @@ func (gceutil *GCEDiskUtil) CreateVolume(c *gcePersistentDiskProvisioner) (strin
}
}
if zonePresent && zonesPresent {
return "", 0, nil, "", fmt.Errorf("the 'zone' and 'zones' StorageClass parameters must not be used at the same time")
}
if replicationType == replicationTypeRegionalPD && zonePresent {
// If a user accidentally types 'zone' instead of 'zones', we want to throw an error
// instead of assuming that 'zones' is empty and proceed by randomly selecting zones.
return "", 0, nil, "", fmt.Errorf("the '%s' replication type does not support the 'zone' parameter; use 'zones' instead", replicationTypeRegionalPD)
}
// TODO: implement PVC.Selector parsing
if c.options.PVC.Spec.Selector != nil {
return "", 0, nil, "", fmt.Errorf("claim.Spec.Selector is not supported for dynamic provisioning on GCE")
}
var activezones sets.String
activezones, err = cloud.GetAllCurrentZones()
if err != nil {
return "", 0, nil, "", err
}
switch replicationType {
case replicationTypeRegionalPD:
err = createRegionalPD(
name,
c.options.PVC.Name,
diskType,
configuredZones,
requestGB,
c.options.CloudTags,
cloud)
selectedZones, err := volumeutil.SelectZonesForVolume(zonePresent, zonesPresent, configuredZone, configuredZones, activezones, node, allowedTopologies, c.options.PVC.Name, maxRegionalPDZones)
if err != nil {
glog.V(2).Infof("Error selecting zones for regional GCE PD volume: %v", err)
return "", 0, nil, "", err
}
if err = cloud.CreateRegionalDisk(
name,
diskType,
selectedZones,
int64(requestGB),
*c.options.CloudTags); err != nil {
glog.V(2).Infof("Error creating regional GCE PD volume: %v", err)
return "", 0, nil, "", err
}
glog.V(2).Infof("Successfully created Regional GCE PD volume %s", name)
case replicationTypeNone:
var zones sets.String
if !zonePresent && !zonesPresent {
// 00 - neither "zone" or "zones" specified
// Pick a zone randomly selected from all active zones where
// Kubernetes cluster has a node.
zones, err = cloud.GetAllCurrentZones()
if err != nil {
glog.V(2).Infof("error getting zone information from GCE: %v", err)
return "", 0, nil, "", err
}
} else if !zonePresent && zonesPresent {
// 01 - "zones" specified
// Pick a zone randomly selected from specified set.
if zones, err = volumeutil.ZonesToSet(configuredZones); err != nil {
return "", 0, nil, "", err
}
} else if zonePresent && !zonesPresent {
// 10 - "zone" specified
// Use specified zone
if err := volumeutil.ValidateZone(configuredZone); err != nil {
return "", 0, nil, "", err
}
zones = make(sets.String)
zones.Insert(configuredZone)
selectedZone, err := volumeutil.SelectZoneForVolume(zonePresent, zonesPresent, configuredZone, configuredZones, activezones, node, allowedTopologies, c.options.PVC.Name)
if err != nil {
return "", 0, nil, "", err
}
zone := volumeutil.ChooseZoneForVolume(zones, c.options.PVC.Name)
if err := cloud.CreateDisk(
name,
diskType,
zone,
selectedZone,
int64(requestGB),
*c.options.CloudTags); err != nil {
glog.V(2).Infof("Error creating single-zone GCE PD volume: %v", err)
return "", 0, nil, "", err
}
glog.V(2).Infof("Successfully created single-zone GCE PD volume %s", name)
default:
@@ -209,61 +194,12 @@ func (gceutil *GCEDiskUtil) CreateVolume(c *gcePersistentDiskProvisioner) (strin
return name, int(requestGB), labels, fstype, nil
}
// Creates a Regional PD
func createRegionalPD(
diskName string,
pvcName string,
diskType string,
zonesString string,
requestGB int64,
cloudTags *map[string]string,
cloud *gcecloud.GCECloud) error {
var replicaZones sets.String
var err error
if zonesString == "" {
// Consider all zones
replicaZones, err = cloud.GetAllCurrentZones()
if err != nil {
glog.V(2).Infof("error getting zone information from GCE: %v", err)
return err
}
} else {
replicaZones, err = volumeutil.ZonesToSet(zonesString)
if err != nil {
return err
}
}
zoneCount := replicaZones.Len()
var selectedReplicaZones sets.String
if zoneCount < maxRegionalPDZones {
return fmt.Errorf("cannot specify only %d zone(s) for Regional PDs.", zoneCount)
} else if zoneCount == maxRegionalPDZones {
selectedReplicaZones = replicaZones
} else {
// Must randomly select zones
selectedReplicaZones = volumeutil.ChooseZonesForVolume(
replicaZones, pvcName, maxRegionalPDZones)
}
if err = cloud.CreateRegionalDisk(
diskName,
diskType,
selectedReplicaZones,
int64(requestGB),
*cloudTags); err != nil {
return err
}
return nil
}
// Returns the first path that exists, or empty string if none exist.
func verifyDevicePath(devicePaths []string, sdBeforeSet sets.String) (string, error) {
func verifyDevicePath(devicePaths []string, sdBeforeSet sets.String, diskName string) (string, error) {
if err := udevadmChangeToNewDrives(sdBeforeSet); err != nil {
// udevadm errors should not block disk detachment, log and continue
// It's possible udevadm was called on other disks so it should not block this
// call. If it did fail on this disk, then the devicePath will either
// not exist or be wrong. If it's wrong, then the scsi_id check below will fail.
glog.Errorf("udevadmChangeToNewDrives failed with: %v", err)
}
@@ -271,6 +207,22 @@ func verifyDevicePath(devicePaths []string, sdBeforeSet sets.String) (string, er
if pathExists, err := volumeutil.PathExists(path); err != nil {
return "", fmt.Errorf("Error checking if path exists: %v", err)
} else if pathExists {
// validate that the path actually resolves to the correct disk
serial, err := getScsiSerial(path, diskName)
if err != nil {
return "", fmt.Errorf("failed to get scsi serial %v", err)
}
if serial != diskName {
// The device link is not pointing to the correct device
// Trigger udev on this device to try to fix the link
if udevErr := udevadmChangeToDrive(path); udevErr != nil {
glog.Errorf("udevadmChangeToDrive %q failed with: %v", path, err)
}
// Return error to retry WaitForAttach and verifyDevicePath
return "", fmt.Errorf("scsi_id serial %q for device %q doesn't match disk %q", serial, path, diskName)
}
// The device link is correct
return path, nil
}
}
@@ -278,22 +230,38 @@ func verifyDevicePath(devicePaths []string, sdBeforeSet sets.String) (string, er
return "", nil
}
// Returns the first path that exists, or empty string if none exist.
func verifyAllPathsRemoved(devicePaths []string) (bool, error) {
allPathsRemoved := true
for _, path := range devicePaths {
if err := udevadmChangeToDrive(path); err != nil {
// udevadm errors should not block disk detachment, log and continue
glog.Errorf("%v", err)
}
if exists, err := volumeutil.PathExists(path); err != nil {
return false, fmt.Errorf("Error checking if path exists: %v", err)
} else {
allPathsRemoved = allPathsRemoved && !exists
}
// Calls scsi_id on the given devicePath to get the serial number reported by that device.
func getScsiSerial(devicePath, diskName string) (string, error) {
exists, err := utilfile.FileExists("/lib/udev/scsi_id")
if err != nil {
return "", fmt.Errorf("failed to check scsi_id existence: %v", err)
}
return allPathsRemoved, nil
if !exists {
glog.V(6).Infof("scsi_id doesn't exist; skipping check for %v", devicePath)
return diskName, nil
}
out, err := exec.New().Command(
"/lib/udev/scsi_id",
"--page=0x83",
"--whitelisted",
fmt.Sprintf("--device=%v", devicePath)).CombinedOutput()
if err != nil {
return "", fmt.Errorf("scsi_id failed for device %q with %v.", devicePath, err)
}
return parseScsiSerial(string(out))
}
// Parse the output returned by scsi_id and extract the serial number
func parseScsiSerial(output string) (string, error) {
substrings := scsiRegex.FindStringSubmatch(output)
if substrings == nil {
return "", fmt.Errorf("scsi_id output cannot be parsed: %q", output)
}
return substrings[1], nil
}
// Returns list of all /dev/disk/by-id/* paths for given PD.

View File

@@ -0,0 +1,62 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package gce_pd
import "testing"
func TestParseScsiSerial(t *testing.T) {
cases := []struct {
name string
output string
diskName string
expectErr bool
}{
{
name: "valid",
output: "0Google PersistentDisk test-disk",
diskName: "test-disk",
},
{
name: "valid with newline",
output: "0Google PersistentDisk test-disk\n",
diskName: "test-disk",
},
{
name: "invalid prefix",
output: "00Google PersistentDisk test-disk",
expectErr: true,
},
{
name: "invalid suffix",
output: "0Google PersistentDisk test-disk more",
expectErr: true,
},
}
for _, test := range cases {
serial, err := parseScsiSerial(test.output)
if err != nil && !test.expectErr {
t.Errorf("test %v failed: %v", test.name, err)
}
if err == nil && test.expectErr {
t.Errorf("test %q failed: got success", test.name)
}
if serial != test.diskName {
t.Errorf("test %v failed: expected serial %q, got %q", test.name, test.diskName, serial)
}
}
}