Bumping k8s dependencies to 1.13
This commit is contained in:
240
vendor/k8s.io/kubernetes/pkg/volume/util/util.go
generated
vendored
240
vendor/k8s.io/kubernetes/pkg/volume/util/util.go
generated
vendored
@@ -39,6 +39,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
utilstrings "k8s.io/kubernetes/pkg/util/strings"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
|
||||
"reflect"
|
||||
@@ -79,6 +80,16 @@ const (
|
||||
VolumeDynamicallyCreatedByKey = "kubernetes.io/createdby"
|
||||
)
|
||||
|
||||
// VolumeZoneConfig contains config information about zonal volume.
|
||||
type VolumeZoneConfig struct {
|
||||
ZonePresent bool
|
||||
ZonesPresent bool
|
||||
ReplicaZoneFromNodePresent bool
|
||||
Zone string
|
||||
Zones string
|
||||
ReplicaZoneFromNode string
|
||||
}
|
||||
|
||||
// IsReady checks for the existence of a regular file
|
||||
// called 'ready' in the given directory and returns
|
||||
// true if that file exists.
|
||||
@@ -132,7 +143,7 @@ func UnmountMountPoint(mountPath string, mounter mount.Interface, extensiveMount
|
||||
glog.Warningf("Warning: Unmount skipped because path does not exist: %v", mountPath)
|
||||
return nil
|
||||
}
|
||||
corruptedMnt := isCorruptedMnt(pathErr)
|
||||
corruptedMnt := IsCorruptedMnt(pathErr)
|
||||
if pathErr != nil && !corruptedMnt {
|
||||
return fmt.Errorf("Error checking path: %v", pathErr)
|
||||
}
|
||||
@@ -188,15 +199,15 @@ func PathExists(path string) (bool, error) {
|
||||
return true, nil
|
||||
} else if os.IsNotExist(err) {
|
||||
return false, nil
|
||||
} else if isCorruptedMnt(err) {
|
||||
} else if IsCorruptedMnt(err) {
|
||||
return true, err
|
||||
} else {
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
|
||||
// isCorruptedMnt return true if err is about corrupted mount point
|
||||
func isCorruptedMnt(err error) bool {
|
||||
// IsCorruptedMnt return true if err is about corrupted mount point
|
||||
func IsCorruptedMnt(err error) bool {
|
||||
if err == nil {
|
||||
return false
|
||||
}
|
||||
@@ -211,7 +222,8 @@ func isCorruptedMnt(err error) bool {
|
||||
case *os.SyscallError:
|
||||
underlyingError = pe.Err
|
||||
}
|
||||
return underlyingError == syscall.ENOTCONN || underlyingError == syscall.ESTALE
|
||||
|
||||
return underlyingError == syscall.ENOTCONN || underlyingError == syscall.ESTALE || underlyingError == syscall.EIO
|
||||
}
|
||||
|
||||
// GetSecretForPod locates secret by name in the pod's namespace and returns secret map
|
||||
@@ -308,13 +320,128 @@ func LoadPodFromFile(filePath string) (*v1.Pod, error) {
|
||||
return pod, nil
|
||||
}
|
||||
|
||||
// SelectZoneForVolume is a wrapper around SelectZonesForVolume
|
||||
// to select a single zone for a volume based on parameters
|
||||
func SelectZoneForVolume(zoneParameterPresent, zonesParameterPresent bool, zoneParameter string, zonesParameter, zonesWithNodes sets.String, node *v1.Node, allowedTopologies []v1.TopologySelectorTerm, pvcName string) (string, error) {
|
||||
zones, err := SelectZonesForVolume(zoneParameterPresent, zonesParameterPresent, zoneParameter, zonesParameter, zonesWithNodes, node, allowedTopologies, pvcName, 1)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
zone, ok := zones.PopAny()
|
||||
if !ok {
|
||||
return "", fmt.Errorf("could not determine a zone to provision volume in")
|
||||
}
|
||||
return zone, nil
|
||||
}
|
||||
|
||||
// SelectZonesForVolume selects zones for a volume based on several factors:
|
||||
// node.zone, allowedTopologies, zone/zones parameters from storageclass,
|
||||
// zones with active nodes from the cluster. The number of zones = replicas.
|
||||
func SelectZonesForVolume(zoneParameterPresent, zonesParameterPresent bool, zoneParameter string, zonesParameter, zonesWithNodes sets.String, node *v1.Node, allowedTopologies []v1.TopologySelectorTerm, pvcName string, numReplicas uint32) (sets.String, error) {
|
||||
if zoneParameterPresent && zonesParameterPresent {
|
||||
return nil, fmt.Errorf("both zone and zones StorageClass parameters must not be used at the same time")
|
||||
}
|
||||
|
||||
var zoneFromNode string
|
||||
// pick one zone from node if present
|
||||
if node != nil {
|
||||
// VolumeScheduling implicit since node is not nil
|
||||
if zoneParameterPresent || zonesParameterPresent {
|
||||
return nil, fmt.Errorf("zone[s] cannot be specified in StorageClass if VolumeBindingMode is set to WaitForFirstConsumer. Please specify allowedTopologies in StorageClass for constraining zones")
|
||||
}
|
||||
|
||||
// pick node's zone for one of the replicas
|
||||
var ok bool
|
||||
zoneFromNode, ok = node.ObjectMeta.Labels[kubeletapis.LabelZoneFailureDomain]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("%s Label for node missing", kubeletapis.LabelZoneFailureDomain)
|
||||
}
|
||||
// if single replica volume and node with zone found, return immediately
|
||||
if numReplicas == 1 {
|
||||
return sets.NewString(zoneFromNode), nil
|
||||
}
|
||||
}
|
||||
|
||||
// pick zone from allowedZones if specified
|
||||
allowedZones, err := ZonesFromAllowedTopologies(allowedTopologies)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if (len(allowedTopologies) > 0) && (allowedZones.Len() == 0) {
|
||||
return nil, fmt.Errorf("no matchLabelExpressions with %s key found in allowedTopologies. Please specify matchLabelExpressions with %s key", kubeletapis.LabelZoneFailureDomain, kubeletapis.LabelZoneFailureDomain)
|
||||
}
|
||||
|
||||
if allowedZones.Len() > 0 {
|
||||
// VolumeScheduling implicit since allowedZones present
|
||||
if zoneParameterPresent || zonesParameterPresent {
|
||||
return nil, fmt.Errorf("zone[s] cannot be specified in StorageClass if allowedTopologies specified")
|
||||
}
|
||||
// scheduler will guarantee if node != null above, zoneFromNode is member of allowedZones.
|
||||
// so if zoneFromNode != "", we can safely assume it is part of allowedZones.
|
||||
if zones, err := chooseZonesForVolumeIncludingZone(allowedZones, pvcName, zoneFromNode, numReplicas); err != nil {
|
||||
return nil, fmt.Errorf("cannot process zones in allowedTopologies: %v", err)
|
||||
} else {
|
||||
return zones, nil
|
||||
}
|
||||
}
|
||||
|
||||
// pick zone from parameters if present
|
||||
if zoneParameterPresent {
|
||||
if numReplicas > 1 {
|
||||
return nil, fmt.Errorf("zone cannot be specified if desired number of replicas for pv is greather than 1. Please specify zones or allowedTopologies to specify desired zones")
|
||||
}
|
||||
return sets.NewString(zoneParameter), nil
|
||||
}
|
||||
|
||||
if zonesParameterPresent {
|
||||
if uint32(zonesParameter.Len()) < numReplicas {
|
||||
return nil, fmt.Errorf("not enough zones found in zones parameter to provision a volume with %d replicas. Found %d zones, need %d zones", numReplicas, zonesParameter.Len(), numReplicas)
|
||||
}
|
||||
// directly choose from zones parameter; no zone from node need to be considered
|
||||
return ChooseZonesForVolume(zonesParameter, pvcName, numReplicas), nil
|
||||
}
|
||||
|
||||
// pick zone from zones with nodes
|
||||
if zonesWithNodes.Len() > 0 {
|
||||
// If node != null (and thus zoneFromNode != ""), zoneFromNode will be member of zonesWithNodes
|
||||
if zones, err := chooseZonesForVolumeIncludingZone(zonesWithNodes, pvcName, zoneFromNode, numReplicas); err != nil {
|
||||
return nil, fmt.Errorf("cannot process zones where nodes exist in the cluster: %v", err)
|
||||
} else {
|
||||
return zones, nil
|
||||
}
|
||||
}
|
||||
return nil, fmt.Errorf("cannot determine zones to provision volume in")
|
||||
}
|
||||
|
||||
// ZonesFromAllowedTopologies returns a list of zones specified in allowedTopologies
|
||||
func ZonesFromAllowedTopologies(allowedTopologies []v1.TopologySelectorTerm) (sets.String, error) {
|
||||
zones := make(sets.String)
|
||||
for _, term := range allowedTopologies {
|
||||
for _, exp := range term.MatchLabelExpressions {
|
||||
if exp.Key == kubeletapis.LabelZoneFailureDomain {
|
||||
for _, value := range exp.Values {
|
||||
zones.Insert(value)
|
||||
}
|
||||
} else {
|
||||
return nil, fmt.Errorf("unsupported key found in matchLabelExpressions: %s", exp.Key)
|
||||
}
|
||||
}
|
||||
}
|
||||
return zones, nil
|
||||
}
|
||||
|
||||
func ZonesSetToLabelValue(strSet sets.String) string {
|
||||
return strings.Join(strSet.UnsortedList(), kubeletapis.LabelMultiZoneDelimiter)
|
||||
}
|
||||
|
||||
// ZonesToSet converts a string containing a comma separated list of zones to set
|
||||
func ZonesToSet(zonesString string) (sets.String, error) {
|
||||
return stringToSet(zonesString, ",")
|
||||
zones, err := stringToSet(zonesString, ",")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error parsing zones %s, must be strings separated by commas: %v", zonesString, err)
|
||||
}
|
||||
return zones, nil
|
||||
}
|
||||
|
||||
// LabelZonesToSet converts a PV label value from string containing a delimited list of zones to set
|
||||
@@ -322,7 +449,7 @@ func LabelZonesToSet(labelZonesValue string) (sets.String, error) {
|
||||
return stringToSet(labelZonesValue, kubeletapis.LabelMultiZoneDelimiter)
|
||||
}
|
||||
|
||||
// StringToSet converts a string containing list separated by specified delimiter to to a set
|
||||
// StringToSet converts a string containing list separated by specified delimiter to a set
|
||||
func stringToSet(str, delimiter string) (sets.String, error) {
|
||||
zonesSlice := strings.Split(str, delimiter)
|
||||
zonesSet := make(sets.String)
|
||||
@@ -339,6 +466,27 @@ func stringToSet(str, delimiter string) (sets.String, error) {
|
||||
return zonesSet, nil
|
||||
}
|
||||
|
||||
// LabelZonesToList converts a PV label value from string containing a delimited list of zones to list
|
||||
func LabelZonesToList(labelZonesValue string) ([]string, error) {
|
||||
return stringToList(labelZonesValue, kubeletapis.LabelMultiZoneDelimiter)
|
||||
}
|
||||
|
||||
// StringToList converts a string containing list separated by specified delimiter to a list
|
||||
func stringToList(str, delimiter string) ([]string, error) {
|
||||
zonesSlice := make([]string, 0)
|
||||
for _, zone := range strings.Split(str, delimiter) {
|
||||
trimmedZone := strings.TrimSpace(zone)
|
||||
if trimmedZone == "" {
|
||||
return nil, fmt.Errorf(
|
||||
"%q separated list (%q) must not contain an empty string",
|
||||
delimiter,
|
||||
str)
|
||||
}
|
||||
zonesSlice = append(zonesSlice, trimmedZone)
|
||||
}
|
||||
return zonesSlice, nil
|
||||
}
|
||||
|
||||
// CalculateTimeoutForVolume calculates time for a Recycler pod to complete a
|
||||
// recycle operation. The calculation and return value is either the
|
||||
// minimumTimeout or the timeoutIncrement per Gi of storage size, whichever is
|
||||
@@ -361,7 +509,11 @@ func CalculateTimeoutForVolume(minimumTimeout, timeoutIncrement int, pv *v1.Pers
|
||||
// RoundUpSize(1500 * 1024*1024, 1024*1024*1024) returns '2'
|
||||
// (2 GiB is the smallest allocatable volume that can hold 1500MiB)
|
||||
func RoundUpSize(volumeSizeBytes int64, allocationUnitBytes int64) int64 {
|
||||
return (volumeSizeBytes + allocationUnitBytes - 1) / allocationUnitBytes
|
||||
roundedUp := volumeSizeBytes / allocationUnitBytes
|
||||
if volumeSizeBytes%allocationUnitBytes > 0 {
|
||||
roundedUp += 1
|
||||
}
|
||||
return roundedUp
|
||||
}
|
||||
|
||||
// RoundUpToGB rounds up given quantity to chunks of GB
|
||||
@@ -376,6 +528,32 @@ func RoundUpToGiB(size resource.Quantity) int64 {
|
||||
return RoundUpSize(requestBytes, GIB)
|
||||
}
|
||||
|
||||
// RoundUpSizeInt calculates how many allocation units are needed to accommodate
|
||||
// a volume of given size. It returns an int instead of an int64 and an error if
|
||||
// there's overflow
|
||||
func RoundUpSizeInt(volumeSizeBytes int64, allocationUnitBytes int64) (int, error) {
|
||||
roundedUp := RoundUpSize(volumeSizeBytes, allocationUnitBytes)
|
||||
roundedUpInt := int(roundedUp)
|
||||
if int64(roundedUpInt) != roundedUp {
|
||||
return 0, fmt.Errorf("capacity %v is too great, casting results in integer overflow", roundedUp)
|
||||
}
|
||||
return roundedUpInt, nil
|
||||
}
|
||||
|
||||
// RoundUpToGBInt rounds up given quantity to chunks of GB. It returns an
|
||||
// int instead of an int64 and an error if there's overflow
|
||||
func RoundUpToGBInt(size resource.Quantity) (int, error) {
|
||||
requestBytes := size.Value()
|
||||
return RoundUpSizeInt(requestBytes, GB)
|
||||
}
|
||||
|
||||
// RoundUpToGiBInt rounds up given quantity upto chunks of GiB. It returns an
|
||||
// int instead of an int64 and an error if there's overflow
|
||||
func RoundUpToGiBInt(size resource.Quantity) (int, error) {
|
||||
requestBytes := size.Value()
|
||||
return RoundUpSizeInt(requestBytes, GIB)
|
||||
}
|
||||
|
||||
// GenerateVolumeName returns a PV name with clusterName prefix. The function
|
||||
// should be used to generate a name of GCE PD or Cinder volume. It basically
|
||||
// adds "<clusterName>-dynamic-" before the PV name, making sure the resulting
|
||||
@@ -407,6 +585,11 @@ func GetPath(mounter volume.Mounter) (string, error) {
|
||||
// This means that a StatefulSet's volumes (`claimname-statefulsetname-id`) will spread across available zones,
|
||||
// assuming the id values are consecutive.
|
||||
func ChooseZoneForVolume(zones sets.String, pvcName string) string {
|
||||
// No zones available, return empty string.
|
||||
if zones.Len() == 0 {
|
||||
return ""
|
||||
}
|
||||
|
||||
// We create the volume in a zone determined by the name
|
||||
// Eventually the scheduler will coordinate placement into an available zone
|
||||
hash, index := getPVCNameHashAndIndexOffset(pvcName)
|
||||
@@ -426,8 +609,41 @@ func ChooseZoneForVolume(zones sets.String, pvcName string) string {
|
||||
return zone
|
||||
}
|
||||
|
||||
// chooseZonesForVolumeIncludingZone is a wrapper around ChooseZonesForVolume that ensures zoneToInclude is chosen
|
||||
// zoneToInclude can either be empty in which case it is ignored. If non-empty, zoneToInclude is expected to be member of zones.
|
||||
// numReplicas is expected to be > 0 and <= zones.Len()
|
||||
func chooseZonesForVolumeIncludingZone(zones sets.String, pvcName, zoneToInclude string, numReplicas uint32) (sets.String, error) {
|
||||
if numReplicas == 0 {
|
||||
return nil, fmt.Errorf("invalid number of replicas passed")
|
||||
}
|
||||
if uint32(zones.Len()) < numReplicas {
|
||||
return nil, fmt.Errorf("not enough zones found to provision a volume with %d replicas. Need at least %d distinct zones for a volume with %d replicas", numReplicas, numReplicas, numReplicas)
|
||||
}
|
||||
if zoneToInclude != "" && !zones.Has(zoneToInclude) {
|
||||
return nil, fmt.Errorf("zone to be included: %s needs to be member of set: %v", zoneToInclude, zones)
|
||||
}
|
||||
if uint32(zones.Len()) == numReplicas {
|
||||
return zones, nil
|
||||
}
|
||||
if zoneToInclude != "" {
|
||||
zones.Delete(zoneToInclude)
|
||||
numReplicas = numReplicas - 1
|
||||
}
|
||||
zonesChosen := ChooseZonesForVolume(zones, pvcName, numReplicas)
|
||||
if zoneToInclude != "" {
|
||||
zonesChosen.Insert(zoneToInclude)
|
||||
}
|
||||
return zonesChosen, nil
|
||||
}
|
||||
|
||||
// ChooseZonesForVolume is identical to ChooseZoneForVolume, but selects a multiple zones, for multi-zone disks.
|
||||
func ChooseZonesForVolume(zones sets.String, pvcName string, numZones uint32) sets.String {
|
||||
// No zones available, return empty set.
|
||||
replicaZones := sets.NewString()
|
||||
if zones.Len() == 0 {
|
||||
return replicaZones
|
||||
}
|
||||
|
||||
// We create the volume in a zone determined by the name
|
||||
// Eventually the scheduler will coordinate placement into an available zone
|
||||
hash, index := getPVCNameHashAndIndexOffset(pvcName)
|
||||
@@ -441,7 +657,6 @@ func ChooseZonesForVolume(zones sets.String, pvcName string, numZones uint32) se
|
||||
// PVC placement (which could also e.g. avoid putting volumes in overloaded or
|
||||
// unhealthy zones)
|
||||
zoneSlice := zones.List()
|
||||
replicaZones := sets.NewString()
|
||||
|
||||
startingIndex := index * numZones
|
||||
for index = startingIndex; index < startingIndex+numZones; index++ {
|
||||
@@ -661,7 +876,7 @@ func notRunning(statuses []v1.ContainerStatus) bool {
|
||||
}
|
||||
|
||||
// SplitUniqueName splits the unique name to plugin name and volume name strings. It expects the uniqueName to follow
|
||||
// the fromat plugin_name/volume_name and the plugin name must be namespaced as described by the plugin interface,
|
||||
// the format plugin_name/volume_name and the plugin name must be namespaced as described by the plugin interface,
|
||||
// i.e. namespace/plugin containing exactly one '/'. This means the unique name will always be in the form of
|
||||
// plugin_namespace/plugin/volume_name, see k8s.io/kubernetes/pkg/volume/plugins.go VolumePlugin interface
|
||||
// description and pkg/volume/util/volumehelper/volumehelper.go GetUniqueVolumeNameFromSpec that constructs
|
||||
@@ -704,6 +919,11 @@ func GetPersistentVolumeClaimVolumeMode(claim *v1.PersistentVolumeClaim) (v1.Per
|
||||
return "", fmt.Errorf("cannot get volumeMode from pvc: %v", claim.Name)
|
||||
}
|
||||
|
||||
// GetPersistentVolumeClaimQualifiedName returns a qualified name for pvc.
|
||||
func GetPersistentVolumeClaimQualifiedName(claim *v1.PersistentVolumeClaim) string {
|
||||
return utilstrings.JoinQualifiedName(claim.GetNamespace(), claim.GetName())
|
||||
}
|
||||
|
||||
// CheckVolumeModeFilesystem checks VolumeMode.
|
||||
// If the mode is Filesystem, return true otherwise return false.
|
||||
func CheckVolumeModeFilesystem(volumeSpec *volume.Spec) (bool, error) {
|
||||
|
Reference in New Issue
Block a user