Add generated file

This PR adds generated files under pkg/client and vendor folder.
This commit is contained in:
xing-yang
2018-07-12 10:55:15 -07:00
parent 36b1de0341
commit e213d1890d
17729 changed files with 5090889 additions and 0 deletions

View File

@@ -0,0 +1,98 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = [
"compute.go",
"health.go",
"policy.go",
"postupgrade.go",
"prepull.go",
"selfhosted.go",
"staticpods.go",
"versiongetter.go",
],
importpath = "k8s.io/kubernetes/cmd/kubeadm/app/phases/upgrade",
visibility = ["//visibility:public"],
deps = [
"//cmd/kubeadm/app/apis/kubeadm:go_default_library",
"//cmd/kubeadm/app/apis/kubeadm/v1alpha2:go_default_library",
"//cmd/kubeadm/app/constants:go_default_library",
"//cmd/kubeadm/app/features:go_default_library",
"//cmd/kubeadm/app/images:go_default_library",
"//cmd/kubeadm/app/phases/addons/dns:go_default_library",
"//cmd/kubeadm/app/phases/addons/proxy:go_default_library",
"//cmd/kubeadm/app/phases/bootstraptoken/clusterinfo:go_default_library",
"//cmd/kubeadm/app/phases/bootstraptoken/node:go_default_library",
"//cmd/kubeadm/app/phases/certs:go_default_library",
"//cmd/kubeadm/app/phases/controlplane:go_default_library",
"//cmd/kubeadm/app/phases/etcd:go_default_library",
"//cmd/kubeadm/app/phases/kubelet:go_default_library",
"//cmd/kubeadm/app/phases/patchnode:go_default_library",
"//cmd/kubeadm/app/phases/selfhosting:go_default_library",
"//cmd/kubeadm/app/phases/uploadconfig:go_default_library",
"//cmd/kubeadm/app/preflight:go_default_library",
"//cmd/kubeadm/app/util:go_default_library",
"//cmd/kubeadm/app/util/apiclient:go_default_library",
"//cmd/kubeadm/app/util/dryrun:go_default_library",
"//cmd/kubeadm/app/util/etcd:go_default_library",
"//pkg/util/version:go_default_library",
"//pkg/version:go_default_library",
"//vendor/k8s.io/api/apps/v1:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
"//vendor/k8s.io/client-go/util/cert:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)
go_test(
name = "go_default_test",
srcs = [
"compute_test.go",
"policy_test.go",
"postupgrade_test.go",
"prepull_test.go",
"staticpods_test.go",
],
embed = [":go_default_library"],
deps = [
"//cmd/kubeadm/app/apis/kubeadm:go_default_library",
"//cmd/kubeadm/app/apis/kubeadm/scheme:go_default_library",
"//cmd/kubeadm/app/apis/kubeadm/v1alpha2:go_default_library",
"//cmd/kubeadm/app/constants:go_default_library",
"//cmd/kubeadm/app/phases/certs:go_default_library",
"//cmd/kubeadm/app/phases/certs/pkiutil:go_default_library",
"//cmd/kubeadm/app/phases/controlplane:go_default_library",
"//cmd/kubeadm/app/phases/etcd:go_default_library",
"//cmd/kubeadm/app/util/apiclient:go_default_library",
"//cmd/kubeadm/app/util/etcd:go_default_library",
"//cmd/kubeadm/test:go_default_library",
"//pkg/util/version:go_default_library",
"//vendor/github.com/coreos/etcd/clientv3:go_default_library",
"//vendor/github.com/coreos/etcd/pkg/transport:go_default_library",
"//vendor/k8s.io/api/apps/v1:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/fake:go_default_library",
],
)

View File

@@ -0,0 +1,309 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package upgrade
import (
"fmt"
"strings"
clientset "k8s.io/client-go/kubernetes"
kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants"
"k8s.io/kubernetes/cmd/kubeadm/app/features"
"k8s.io/kubernetes/cmd/kubeadm/app/phases/addons/dns"
etcdutil "k8s.io/kubernetes/cmd/kubeadm/app/util/etcd"
versionutil "k8s.io/kubernetes/pkg/util/version"
)
// Upgrade defines an upgrade possibility to upgrade from a current version to a new one
type Upgrade struct {
Description string
Before ClusterState
After ClusterState
}
// CanUpgradeKubelets returns whether an upgrade of any kubelet in the cluster is possible
func (u *Upgrade) CanUpgradeKubelets() bool {
// If there are multiple different versions now, an upgrade is possible (even if only for a subset of the nodes)
if len(u.Before.KubeletVersions) > 1 {
return true
}
// Don't report something available for upgrade if we don't know the current state
if len(u.Before.KubeletVersions) == 0 {
return false
}
// if the same version number existed both before and after, we don't have to upgrade it
_, sameVersionFound := u.Before.KubeletVersions[u.After.KubeVersion]
return !sameVersionFound
}
// CanUpgradeEtcd returns whether an upgrade of etcd is possible
func (u *Upgrade) CanUpgradeEtcd() bool {
return u.Before.EtcdVersion != u.After.EtcdVersion
}
// ActiveDNSAddon returns the version of CoreDNS or kube-dns
func ActiveDNSAddon(featureGates map[string]bool) string {
if features.Enabled(featureGates, features.CoreDNS) {
return kubeadmconstants.CoreDNS
}
return kubeadmconstants.KubeDNS
}
// ClusterState describes the state of certain versions for a cluster
type ClusterState struct {
// KubeVersion describes the version of the Kubernetes API Server, Controller Manager, Scheduler and Proxy.
KubeVersion string
// DNSType
DNSType string
// DNSVersion describes the version of the kube-dns images used and manifest version
DNSVersion string
// KubeadmVersion describes the version of the kubeadm CLI
KubeadmVersion string
// KubeletVersions is a map with a version number linked to the amount of kubelets running that version in the cluster
KubeletVersions map[string]uint16
// EtcdVersion represents the version of etcd used in the cluster
EtcdVersion string
}
// GetAvailableUpgrades fetches all versions from the specified VersionGetter and computes which
// kinds of upgrades can be performed
func GetAvailableUpgrades(versionGetterImpl VersionGetter, experimentalUpgradesAllowed, rcUpgradesAllowed bool, etcdClient etcdutil.ClusterInterrogator, featureGates map[string]bool, client clientset.Interface) ([]Upgrade, error) {
fmt.Println("[upgrade] Fetching available versions to upgrade to")
// Collect the upgrades kubeadm can do in this list
upgrades := []Upgrade{}
// Get the cluster version
clusterVersionStr, clusterVersion, err := versionGetterImpl.ClusterVersion()
if err != nil {
return upgrades, err
}
// Get current kubeadm CLI version
kubeadmVersionStr, kubeadmVersion, err := versionGetterImpl.KubeadmVersion()
if err != nil {
return upgrades, err
}
// Get and output the current latest stable version
stableVersionStr, stableVersion, err := versionGetterImpl.VersionFromCILabel("stable", "stable version")
if err != nil {
fmt.Printf("[upgrade/versions] WARNING: %v\n", err)
fmt.Println("[upgrade/versions] WARNING: Falling back to current kubeadm version as latest stable version")
stableVersionStr, stableVersion = kubeadmVersionStr, kubeadmVersion
}
// Get the kubelet versions in the cluster
kubeletVersions, err := versionGetterImpl.KubeletVersions()
if err != nil {
return upgrades, err
}
// Get current etcd version
etcdVersion, err := etcdClient.GetVersion()
if err != nil {
return upgrades, err
}
dnsType, dnsVersion, err := dns.DeployedDNSAddon(client)
if err != nil {
return nil, err
}
// Construct a descriptor for the current state of the world
beforeState := ClusterState{
KubeVersion: clusterVersionStr,
DNSType: dnsType,
DNSVersion: dnsVersion,
KubeadmVersion: kubeadmVersionStr,
KubeletVersions: kubeletVersions,
EtcdVersion: etcdVersion,
}
// Do a "dumb guess" that a new minor upgrade is available just because the latest stable version is higher than the cluster version
// This guess will be corrected once we know if there is a patch version available
canDoMinorUpgrade := clusterVersion.LessThan(stableVersion)
// A patch version doesn't exist if the cluster version is higher than or equal to the current stable version
// in the case that a user is trying to upgrade from, let's say, v1.8.0-beta.2 to v1.8.0-rc.1 (given we support such upgrades experimentally)
// a stable-1.8 branch doesn't exist yet. Hence this check.
if patchVersionBranchExists(clusterVersion, stableVersion) {
currentBranch := getBranchFromVersion(clusterVersionStr)
versionLabel := fmt.Sprintf("stable-%s", currentBranch)
description := fmt.Sprintf("version in the v%s series", currentBranch)
// Get and output the latest patch version for the cluster branch
patchVersionStr, patchVersion, err := versionGetterImpl.VersionFromCILabel(versionLabel, description)
if err != nil {
fmt.Printf("[upgrade/versions] WARNING: %v\n", err)
} else {
// Check if a minor version upgrade is possible when a patch release exists
// It's only possible if the latest patch version is higher than the current patch version
// If that's the case, they must be on different branches => a newer minor version can be upgraded to
canDoMinorUpgrade = minorUpgradePossibleWithPatchRelease(stableVersion, patchVersion)
// If the cluster version is lower than the newest patch version, we should inform about the possible upgrade
if patchUpgradePossible(clusterVersion, patchVersion) {
// The kubeadm version has to be upgraded to the latest patch version
newKubeadmVer := patchVersionStr
if kubeadmVersion.AtLeast(patchVersion) {
// In this case, the kubeadm CLI version is new enough. Don't display an update suggestion for kubeadm by making .NewKubeadmVersion equal .CurrentKubeadmVersion
newKubeadmVer = kubeadmVersionStr
}
upgrades = append(upgrades, Upgrade{
Description: description,
Before: beforeState,
After: ClusterState{
KubeVersion: patchVersionStr,
DNSType: ActiveDNSAddon(featureGates),
DNSVersion: kubeadmconstants.GetDNSVersion(ActiveDNSAddon(featureGates)),
KubeadmVersion: newKubeadmVer,
EtcdVersion: getSuggestedEtcdVersion(patchVersionStr),
// KubeletVersions is unset here as it is not used anywhere in .After
},
})
}
}
}
if canDoMinorUpgrade {
upgrades = append(upgrades, Upgrade{
Description: "stable version",
Before: beforeState,
After: ClusterState{
KubeVersion: stableVersionStr,
DNSType: ActiveDNSAddon(featureGates),
DNSVersion: kubeadmconstants.GetDNSVersion(ActiveDNSAddon(featureGates)),
KubeadmVersion: stableVersionStr,
EtcdVersion: getSuggestedEtcdVersion(stableVersionStr),
// KubeletVersions is unset here as it is not used anywhere in .After
},
})
}
if experimentalUpgradesAllowed || rcUpgradesAllowed {
// dl.k8s.io/release/latest.txt is ALWAYS an alpha.X version
// dl.k8s.io/release/latest-1.X.txt is first v1.X.0-alpha.0 -> v1.X.0-alpha.Y, then v1.X.0-beta.0 to v1.X.0-beta.Z, then v1.X.0-rc.1 to v1.X.0-rc.W.
// After the v1.X.0 release, latest-1.X.txt is always a beta.0 version. Let's say the latest stable version on the v1.7 branch is v1.7.3, then the
// latest-1.7 version is v1.7.4-beta.0
// Worth noticing is that when the release-1.X branch is cut; there are two versions tagged: v1.X.0-beta.0 AND v1.(X+1).alpha.0
// The v1.(X+1).alpha.0 is pretty much useless and should just be ignored, as more betas may be released that have more features than the initial v1.(X+1).alpha.0
// So what we do below is getting the latest overall version, always an v1.X.0-alpha.Y version. Then we get latest-1.(X-1) version. This version may be anything
// between v1.(X-1).0-beta.0 and v1.(X-1).Z-beta.0. At some point in time, latest-1.(X-1) will point to v1.(X-1).0-rc.1. Then we should show it.
// The flow looks like this (with time on the X axis):
// v1.8.0-alpha.1 -> v1.8.0-alpha.2 -> v1.8.0-alpha.3 | release-1.8 branch | v1.8.0-beta.0 -> v1.8.0-beta.1 -> v1.8.0-beta.2 -> v1.8.0-rc.1 -> v1.8.0 -> v1.8.1
// v1.9.0-alpha.0 -> v1.9.0-alpha.1 -> v1.9.0-alpha.2
// Get and output the current latest unstable version
latestVersionStr, latestVersion, err := versionGetterImpl.VersionFromCILabel("latest", "experimental version")
if err != nil {
return upgrades, err
}
minorUnstable := latestVersion.Components()[1]
// Get and output the current latest unstable version
previousBranch := fmt.Sprintf("latest-1.%d", minorUnstable-1)
previousBranchLatestVersionStr, previousBranchLatestVersion, err := versionGetterImpl.VersionFromCILabel(previousBranch, "")
if err != nil {
return upgrades, err
}
// If that previous latest version is an RC, RCs are allowed and the cluster version is lower than the RC version, show the upgrade
if rcUpgradesAllowed && rcUpgradePossible(clusterVersion, previousBranchLatestVersion) {
upgrades = append(upgrades, Upgrade{
Description: "release candidate version",
Before: beforeState,
After: ClusterState{
KubeVersion: previousBranchLatestVersionStr,
DNSType: ActiveDNSAddon(featureGates),
DNSVersion: kubeadmconstants.GetDNSVersion(ActiveDNSAddon(featureGates)),
KubeadmVersion: previousBranchLatestVersionStr,
EtcdVersion: getSuggestedEtcdVersion(previousBranchLatestVersionStr),
// KubeletVersions is unset here as it is not used anywhere in .After
},
})
}
// Show the possibility if experimental upgrades are allowed
if experimentalUpgradesAllowed && clusterVersion.LessThan(latestVersion) {
// Default to assume that the experimental version to show is the unstable one
unstableKubeVersion := latestVersionStr
unstableKubeDNSVersion := kubeadmconstants.GetDNSVersion(ActiveDNSAddon(featureGates))
// Ẃe should not display alpha.0. The previous branch's beta/rc versions are more relevant due how the kube branching process works.
if latestVersion.PreRelease() == "alpha.0" {
unstableKubeVersion = previousBranchLatestVersionStr
unstableKubeDNSVersion = kubeadmconstants.GetDNSVersion(ActiveDNSAddon(featureGates))
}
upgrades = append(upgrades, Upgrade{
Description: "experimental version",
Before: beforeState,
After: ClusterState{
KubeVersion: unstableKubeVersion,
DNSType: ActiveDNSAddon(featureGates),
DNSVersion: unstableKubeDNSVersion,
KubeadmVersion: unstableKubeVersion,
EtcdVersion: getSuggestedEtcdVersion(unstableKubeVersion),
// KubeletVersions is unset here as it is not used anywhere in .After
},
})
}
}
// Add a newline in the end of this output to leave some space to the next output section
fmt.Println("")
return upgrades, nil
}
func getBranchFromVersion(version string) string {
v := versionutil.MustParseGeneric(version)
return fmt.Sprintf("%d.%d", v.Major(), v.Minor())
}
func patchVersionBranchExists(clusterVersion, stableVersion *versionutil.Version) bool {
return stableVersion.AtLeast(clusterVersion)
}
func patchUpgradePossible(clusterVersion, patchVersion *versionutil.Version) bool {
return clusterVersion.LessThan(patchVersion)
}
func rcUpgradePossible(clusterVersion, previousBranchLatestVersion *versionutil.Version) bool {
return strings.HasPrefix(previousBranchLatestVersion.PreRelease(), "rc") && clusterVersion.LessThan(previousBranchLatestVersion)
}
func minorUpgradePossibleWithPatchRelease(stableVersion, patchVersion *versionutil.Version) bool {
return patchVersion.LessThan(stableVersion)
}
func getSuggestedEtcdVersion(kubernetesVersion string) string {
etcdVersion, err := kubeadmconstants.EtcdSupportedVersion(kubernetesVersion)
if err != nil {
fmt.Printf("[upgrade/versions] WARNING: No recommended etcd for requested kubernetes version (%s)\n", kubernetesVersion)
return "N/A"
}
return etcdVersion.String()
}

View File

@@ -0,0 +1,881 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package upgrade
import (
"fmt"
"reflect"
"testing"
"time"
"github.com/coreos/etcd/clientv3"
apps "k8s.io/api/apps/v1"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientsetfake "k8s.io/client-go/kubernetes/fake"
"k8s.io/kubernetes/cmd/kubeadm/app/constants"
etcdutil "k8s.io/kubernetes/cmd/kubeadm/app/util/etcd"
versionutil "k8s.io/kubernetes/pkg/util/version"
)
type fakeVersionGetter struct {
clusterVersion, kubeadmVersion, stableVersion, latestVersion, latestDevBranchVersion, stablePatchVersion, kubeletVersion string
}
var _ VersionGetter = &fakeVersionGetter{}
// ClusterVersion gets a fake API server version
func (f *fakeVersionGetter) ClusterVersion() (string, *versionutil.Version, error) {
return f.clusterVersion, versionutil.MustParseSemantic(f.clusterVersion), nil
}
// KubeadmVersion gets a fake kubeadm version
func (f *fakeVersionGetter) KubeadmVersion() (string, *versionutil.Version, error) {
return f.kubeadmVersion, versionutil.MustParseSemantic(f.kubeadmVersion), nil
}
// VersionFromCILabel gets fake latest versions from CI
func (f *fakeVersionGetter) VersionFromCILabel(ciVersionLabel, _ string) (string, *versionutil.Version, error) {
if ciVersionLabel == "stable" {
return f.stableVersion, versionutil.MustParseSemantic(f.stableVersion), nil
}
if ciVersionLabel == "latest" {
return f.latestVersion, versionutil.MustParseSemantic(f.latestVersion), nil
}
if ciVersionLabel == "latest-1.11" {
return f.latestDevBranchVersion, versionutil.MustParseSemantic(f.latestDevBranchVersion), nil
}
return f.stablePatchVersion, versionutil.MustParseSemantic(f.stablePatchVersion), nil
}
// KubeletVersions gets the versions of the kubelets in the cluster
func (f *fakeVersionGetter) KubeletVersions() (map[string]uint16, error) {
return map[string]uint16{
f.kubeletVersion: 1,
}, nil
}
type fakeEtcdClient struct {
TLS bool
mismatchedVersions bool
}
func (f fakeEtcdClient) HasTLS() bool { return f.TLS }
func (f fakeEtcdClient) ClusterAvailable() (bool, error) { return true, nil }
func (f fakeEtcdClient) WaitForClusterAvailable(delay time.Duration, retries int, retryInterval time.Duration) (bool, error) {
return true, nil
}
func (f fakeEtcdClient) GetClusterStatus() (map[string]*clientv3.StatusResponse, error) {
return make(map[string]*clientv3.StatusResponse), nil
}
func (f fakeEtcdClient) GetVersion() (string, error) {
versions, _ := f.GetClusterVersions()
if f.mismatchedVersions {
return "", fmt.Errorf("etcd cluster contains endpoints with mismatched versions: %v", versions)
}
return "3.1.12", nil
}
func (f fakeEtcdClient) GetClusterVersions() (map[string]string, error) {
if f.mismatchedVersions {
return map[string]string{
"foo": "3.1.12",
"bar": "3.2.0",
}, nil
}
return map[string]string{
"foo": "3.1.12",
"bar": "3.1.12",
}, nil
}
func TestGetAvailableUpgrades(t *testing.T) {
etcdClient := fakeEtcdClient{}
tests := []struct {
name string
vg VersionGetter
expectedUpgrades []Upgrade
allowExperimental, allowRCs bool
errExpected bool
etcdClient etcdutil.ClusterInterrogator
beforeDNSType string
beforeDNSVersion string
featureGates map[string]bool
}{
{
name: "no action needed, already up-to-date",
vg: &fakeVersionGetter{
clusterVersion: "v1.10.3",
kubeletVersion: "v1.10.3",
kubeadmVersion: "v1.10.3",
stablePatchVersion: "v1.10.3",
stableVersion: "v1.10.3",
},
beforeDNSType: constants.CoreDNS,
beforeDNSVersion: "v1.0.6",
featureGates: make(map[string]bool),
expectedUpgrades: []Upgrade{},
allowExperimental: false,
errExpected: false,
etcdClient: etcdClient,
},
{
name: "simple patch version upgrade",
vg: &fakeVersionGetter{
clusterVersion: "v1.10.1",
kubeletVersion: "v1.10.1", // the kubelet are on the same version as the control plane
kubeadmVersion: "v1.10.2",
stablePatchVersion: "v1.10.3",
stableVersion: "v1.10.3",
},
beforeDNSType: constants.CoreDNS,
beforeDNSVersion: "1.0.6",
featureGates: make(map[string]bool),
expectedUpgrades: []Upgrade{
{
Description: "version in the v1.10 series",
Before: ClusterState{
KubeVersion: "v1.10.1",
KubeletVersions: map[string]uint16{
"v1.10.1": 1,
},
KubeadmVersion: "v1.10.2",
DNSType: "coredns",
DNSVersion: "1.0.6",
EtcdVersion: "3.1.12",
},
After: ClusterState{
KubeVersion: "v1.10.3",
KubeadmVersion: "v1.10.3",
DNSType: "coredns",
DNSVersion: "1.1.3",
EtcdVersion: "3.1.12",
},
},
},
allowExperimental: false,
errExpected: false,
etcdClient: etcdClient,
},
{
name: "no version provided to offline version getter does not change behavior",
vg: NewOfflineVersionGetter(&fakeVersionGetter{
clusterVersion: "v1.10.1",
kubeletVersion: "v1.10.1", // the kubelet are on the same version as the control plane
kubeadmVersion: "v1.10.2",
stablePatchVersion: "v1.10.3",
stableVersion: "v1.10.3",
}, ""),
beforeDNSType: constants.CoreDNS,
beforeDNSVersion: "1.0.6",
featureGates: make(map[string]bool),
expectedUpgrades: []Upgrade{
{
Description: "version in the v1.10 series",
Before: ClusterState{
KubeVersion: "v1.10.1",
KubeletVersions: map[string]uint16{
"v1.10.1": 1,
},
KubeadmVersion: "v1.10.2",
DNSType: "coredns",
DNSVersion: "1.0.6",
EtcdVersion: "3.1.12",
},
After: ClusterState{
KubeVersion: "v1.10.3",
KubeadmVersion: "v1.10.3",
DNSType: "coredns",
DNSVersion: "1.1.3",
EtcdVersion: "3.1.12",
},
},
},
allowExperimental: false,
errExpected: false,
etcdClient: etcdClient,
},
{
name: "minor version upgrade only",
vg: &fakeVersionGetter{
clusterVersion: "v1.10.1",
kubeletVersion: "v1.10.1", // the kubelet are on the same version as the control plane
kubeadmVersion: "v1.11.0",
stablePatchVersion: "v1.10.1",
stableVersion: "v1.11.0",
},
beforeDNSType: constants.CoreDNS,
beforeDNSVersion: "1.0.6",
featureGates: make(map[string]bool),
expectedUpgrades: []Upgrade{
{
Description: "stable version",
Before: ClusterState{
KubeVersion: "v1.10.1",
KubeletVersions: map[string]uint16{
"v1.10.1": 1,
},
KubeadmVersion: "v1.11.0",
DNSType: "coredns",
DNSVersion: "1.0.6",
EtcdVersion: "3.1.12",
},
After: ClusterState{
KubeVersion: "v1.11.0",
KubeadmVersion: "v1.11.0",
DNSType: "coredns",
DNSVersion: "1.1.3",
EtcdVersion: "3.2.18",
},
},
},
allowExperimental: false,
errExpected: false,
etcdClient: etcdClient,
},
{
name: "both minor version upgrade and patch version upgrade available",
vg: &fakeVersionGetter{
clusterVersion: "v1.10.3",
kubeletVersion: "v1.10.3", // the kubelet are on the same version as the control plane
kubeadmVersion: "v1.10.5",
stablePatchVersion: "v1.10.5",
stableVersion: "v1.11.1",
},
beforeDNSType: constants.CoreDNS,
beforeDNSVersion: "1.0.6",
featureGates: make(map[string]bool),
expectedUpgrades: []Upgrade{
{
Description: "version in the v1.10 series",
Before: ClusterState{
KubeVersion: "v1.10.3",
KubeletVersions: map[string]uint16{
"v1.10.3": 1,
},
KubeadmVersion: "v1.10.5",
DNSType: "coredns",
DNSVersion: "1.0.6",
EtcdVersion: "3.1.12",
},
After: ClusterState{
KubeVersion: "v1.10.5",
KubeadmVersion: "v1.10.5", // Note: The kubeadm version mustn't be "downgraded" here
DNSType: "coredns",
DNSVersion: "1.1.3",
EtcdVersion: "3.1.12",
},
},
{
Description: "stable version",
Before: ClusterState{
KubeVersion: "v1.10.3",
KubeletVersions: map[string]uint16{
"v1.10.3": 1,
},
KubeadmVersion: "v1.10.5",
DNSType: "coredns",
DNSVersion: "1.0.6",
EtcdVersion: "3.1.12",
},
After: ClusterState{
KubeVersion: "v1.11.1",
KubeadmVersion: "v1.11.1",
DNSType: "coredns",
DNSVersion: "1.1.3",
EtcdVersion: "3.2.18",
},
},
},
allowExperimental: false,
errExpected: false,
etcdClient: etcdClient,
},
{
name: "allow experimental upgrades, but no upgrade available",
vg: &fakeVersionGetter{
clusterVersion: "v1.11.0-alpha.2",
kubeletVersion: "v1.10.5",
kubeadmVersion: "v1.10.5",
stablePatchVersion: "v1.10.5",
stableVersion: "v1.10.5",
latestVersion: "v1.11.0-alpha.2",
},
beforeDNSType: constants.CoreDNS,
beforeDNSVersion: "v1.0.6",
featureGates: make(map[string]bool),
expectedUpgrades: []Upgrade{},
allowExperimental: true,
errExpected: false,
etcdClient: etcdClient,
},
{
name: "upgrade to an unstable version should be supported",
vg: &fakeVersionGetter{
clusterVersion: "v1.10.5",
kubeletVersion: "v1.10.5",
kubeadmVersion: "v1.10.5",
stablePatchVersion: "v1.10.5",
stableVersion: "v1.10.5",
latestVersion: "v1.11.0-alpha.2",
},
beforeDNSType: constants.CoreDNS,
beforeDNSVersion: "1.0.6",
featureGates: make(map[string]bool),
expectedUpgrades: []Upgrade{
{
Description: "experimental version",
Before: ClusterState{
KubeVersion: "v1.10.5",
KubeletVersions: map[string]uint16{
"v1.10.5": 1,
},
KubeadmVersion: "v1.10.5",
DNSType: "coredns",
DNSVersion: "1.0.6",
EtcdVersion: "3.1.12",
},
After: ClusterState{
KubeVersion: "v1.11.0-alpha.2",
KubeadmVersion: "v1.11.0-alpha.2",
DNSType: "coredns",
DNSVersion: "1.1.3",
EtcdVersion: "3.2.18",
},
},
},
allowExperimental: true,
errExpected: false,
etcdClient: etcdClient,
},
{
name: "upgrade from an unstable version to an unstable version should be supported",
vg: &fakeVersionGetter{
clusterVersion: "v1.11.0-alpha.1",
kubeletVersion: "v1.10.5",
kubeadmVersion: "v1.10.5",
stablePatchVersion: "v1.10.5",
stableVersion: "v1.10.5",
latestVersion: "v1.11.0-alpha.2",
},
beforeDNSType: constants.CoreDNS,
beforeDNSVersion: "1.0.6",
featureGates: make(map[string]bool),
expectedUpgrades: []Upgrade{
{
Description: "experimental version",
Before: ClusterState{
KubeVersion: "v1.11.0-alpha.1",
KubeletVersions: map[string]uint16{
"v1.10.5": 1,
},
KubeadmVersion: "v1.10.5",
DNSType: "coredns",
DNSVersion: "1.0.6",
EtcdVersion: "3.1.12",
},
After: ClusterState{
KubeVersion: "v1.11.0-alpha.2",
KubeadmVersion: "v1.11.0-alpha.2",
DNSType: "coredns",
DNSVersion: "1.1.3",
EtcdVersion: "3.2.18",
},
},
},
allowExperimental: true,
errExpected: false,
etcdClient: etcdClient,
},
{
name: "v1.X.0-alpha.0 should be ignored",
vg: &fakeVersionGetter{
clusterVersion: "v1.10.5",
kubeletVersion: "v1.10.5",
kubeadmVersion: "v1.10.5",
stablePatchVersion: "v1.10.5",
stableVersion: "v1.10.5",
latestDevBranchVersion: "v1.11.0-beta.1",
latestVersion: "v1.12.0-alpha.0",
},
beforeDNSType: constants.CoreDNS,
beforeDNSVersion: "1.0.6",
featureGates: make(map[string]bool),
expectedUpgrades: []Upgrade{
{
Description: "experimental version",
Before: ClusterState{
KubeVersion: "v1.10.5",
KubeletVersions: map[string]uint16{
"v1.10.5": 1,
},
KubeadmVersion: "v1.10.5",
DNSType: "coredns",
DNSVersion: "1.0.6",
EtcdVersion: "3.1.12",
},
After: ClusterState{
KubeVersion: "v1.11.0-beta.1",
KubeadmVersion: "v1.11.0-beta.1",
DNSType: "coredns",
DNSVersion: "1.1.3",
EtcdVersion: "3.2.18",
},
},
},
allowExperimental: true,
errExpected: false,
etcdClient: etcdClient,
},
{
name: "upgrade to an RC version should be supported",
vg: &fakeVersionGetter{
clusterVersion: "v1.10.5",
kubeletVersion: "v1.10.5",
kubeadmVersion: "v1.10.5",
stablePatchVersion: "v1.10.5",
stableVersion: "v1.10.5",
latestDevBranchVersion: "v1.11.0-rc.1",
latestVersion: "v1.12.0-alpha.1",
},
beforeDNSType: constants.CoreDNS,
beforeDNSVersion: "1.0.6",
featureGates: make(map[string]bool),
expectedUpgrades: []Upgrade{
{
Description: "release candidate version",
Before: ClusterState{
KubeVersion: "v1.10.5",
KubeletVersions: map[string]uint16{
"v1.10.5": 1,
},
KubeadmVersion: "v1.10.5",
DNSType: "coredns",
DNSVersion: "1.0.6",
EtcdVersion: "3.1.12",
},
After: ClusterState{
KubeVersion: "v1.11.0-rc.1",
KubeadmVersion: "v1.11.0-rc.1",
DNSType: "coredns",
DNSVersion: "1.1.3",
EtcdVersion: "3.2.18",
},
},
},
allowRCs: true,
errExpected: false,
etcdClient: etcdClient,
},
{
name: "it is possible (but very uncommon) that the latest version from the previous branch is an rc and the current latest version is alpha.0. In that case, show the RC",
vg: &fakeVersionGetter{
clusterVersion: "v1.10.5",
kubeletVersion: "v1.10.5",
kubeadmVersion: "v1.10.5",
stablePatchVersion: "v1.10.5",
stableVersion: "v1.10.5",
latestDevBranchVersion: "v1.11.6-rc.1",
latestVersion: "v1.12.1-alpha.0",
},
beforeDNSType: constants.CoreDNS,
beforeDNSVersion: "1.0.6",
featureGates: make(map[string]bool),
expectedUpgrades: []Upgrade{
{
Description: "experimental version", // Note that this is considered an experimental version in this uncommon scenario
Before: ClusterState{
KubeVersion: "v1.10.5",
KubeletVersions: map[string]uint16{
"v1.10.5": 1,
},
KubeadmVersion: "v1.10.5",
DNSType: "coredns",
DNSVersion: "1.0.6",
EtcdVersion: "3.1.12",
},
After: ClusterState{
KubeVersion: "v1.11.6-rc.1",
KubeadmVersion: "v1.11.6-rc.1",
DNSType: "coredns",
DNSVersion: "1.1.3",
EtcdVersion: "3.2.18",
},
},
},
allowExperimental: true,
errExpected: false,
etcdClient: etcdClient,
},
{
name: "upgrade to an RC version should be supported. There may also be an even newer unstable version.",
vg: &fakeVersionGetter{
clusterVersion: "v1.10.5",
kubeletVersion: "v1.10.5",
kubeadmVersion: "v1.10.5",
stablePatchVersion: "v1.10.5",
stableVersion: "v1.10.5",
latestDevBranchVersion: "v1.11.0-rc.1",
latestVersion: "v1.12.0-alpha.2",
},
beforeDNSType: constants.CoreDNS,
beforeDNSVersion: "1.0.6",
featureGates: make(map[string]bool),
expectedUpgrades: []Upgrade{
{
Description: "release candidate version",
Before: ClusterState{
KubeVersion: "v1.10.5",
KubeletVersions: map[string]uint16{
"v1.10.5": 1,
},
KubeadmVersion: "v1.10.5",
DNSType: "coredns",
DNSVersion: "1.0.6",
EtcdVersion: "3.1.12",
},
After: ClusterState{
KubeVersion: "v1.11.0-rc.1",
KubeadmVersion: "v1.11.0-rc.1",
DNSType: "coredns",
DNSVersion: "1.1.3",
EtcdVersion: "3.2.18",
},
},
{
Description: "experimental version",
Before: ClusterState{
KubeVersion: "v1.10.5",
KubeletVersions: map[string]uint16{
"v1.10.5": 1,
},
KubeadmVersion: "v1.10.5",
DNSType: "coredns",
DNSVersion: "1.0.6",
EtcdVersion: "3.1.12",
},
After: ClusterState{
KubeVersion: "v1.12.0-alpha.2",
KubeadmVersion: "v1.12.0-alpha.2",
DNSType: "coredns",
DNSVersion: "1.1.3",
EtcdVersion: "3.2.18",
},
},
},
allowRCs: true,
allowExperimental: true,
errExpected: false,
etcdClient: etcdClient,
},
{
name: "Upgrades with external etcd with mismatched versions should not be allowed.",
vg: &fakeVersionGetter{
clusterVersion: "v1.10.3",
kubeletVersion: "v1.10.3",
kubeadmVersion: "v1.10.3",
stablePatchVersion: "v1.10.3",
stableVersion: "v1.10.3",
},
allowRCs: false,
allowExperimental: false,
etcdClient: fakeEtcdClient{mismatchedVersions: true},
expectedUpgrades: []Upgrade{},
errExpected: true,
},
{
name: "offline version getter",
vg: NewOfflineVersionGetter(&fakeVersionGetter{
clusterVersion: "v1.11.1",
kubeletVersion: "v1.11.0",
kubeadmVersion: "v1.11.1",
}, "v1.12.1"),
etcdClient: etcdClient,
beforeDNSType: constants.CoreDNS,
beforeDNSVersion: "1.0.6",
featureGates: make(map[string]bool),
expectedUpgrades: []Upgrade{
{
Description: "version in the v1.11 series",
Before: ClusterState{
KubeVersion: "v1.11.1",
KubeletVersions: map[string]uint16{
"v1.11.0": 1,
},
KubeadmVersion: "v1.11.1",
DNSType: "coredns",
DNSVersion: "1.0.6",
EtcdVersion: "3.1.12",
},
After: ClusterState{
KubeVersion: "v1.12.1",
KubeadmVersion: "v1.12.1",
DNSType: "coredns",
DNSVersion: "1.1.3",
EtcdVersion: "3.2.18",
},
},
},
},
{
name: "kubedns to coredns",
vg: &fakeVersionGetter{
clusterVersion: "v1.11.2",
kubeletVersion: "v1.11.2", // the kubelet are on the same version as the control plane
kubeadmVersion: "v1.12.0",
stablePatchVersion: "v1.12.0",
stableVersion: "v1.12.0",
},
etcdClient: etcdClient,
beforeDNSType: constants.KubeDNS,
beforeDNSVersion: "1.14.7",
featureGates: make(map[string]bool),
expectedUpgrades: []Upgrade{
{
Description: "version in the v1.11 series",
Before: ClusterState{
KubeVersion: "v1.11.2",
KubeletVersions: map[string]uint16{
"v1.11.2": 1,
},
KubeadmVersion: "v1.12.0",
DNSType: "kube-dns",
DNSVersion: "1.14.7",
EtcdVersion: "3.1.12",
},
After: ClusterState{
KubeVersion: "v1.12.0",
KubeadmVersion: "v1.12.0",
DNSType: "coredns",
DNSVersion: "1.1.3",
EtcdVersion: "3.2.18",
},
},
},
},
{
name: "keep coredns",
vg: &fakeVersionGetter{
clusterVersion: "v1.11.2",
kubeletVersion: "v1.11.2", // the kubelet are on the same version as the control plane
kubeadmVersion: "v1.12.0",
stablePatchVersion: "v1.12.0",
stableVersion: "v1.12.0",
},
etcdClient: etcdClient,
beforeDNSType: constants.KubeDNS,
beforeDNSVersion: "1.14.7",
featureGates: map[string]bool{"CoreDNS": false},
expectedUpgrades: []Upgrade{
{
Description: "version in the v1.11 series",
Before: ClusterState{
KubeVersion: "v1.11.2",
KubeletVersions: map[string]uint16{
"v1.11.2": 1,
},
KubeadmVersion: "v1.12.0",
DNSType: "kube-dns",
DNSVersion: "1.14.7",
EtcdVersion: "3.1.12",
},
After: ClusterState{
KubeVersion: "v1.12.0",
KubeadmVersion: "v1.12.0",
DNSType: "kube-dns",
DNSVersion: "1.14.10",
EtcdVersion: "3.2.18",
},
},
},
},
}
// Instantiating a fake etcd cluster for being able to get etcd version for a corresponding
// kubernetes release.
for _, rt := range tests {
t.Run(rt.name, func(t *testing.T) {
client := clientsetfake.NewSimpleClientset(&apps.Deployment{
TypeMeta: metav1.TypeMeta{
Kind: "Deployment",
APIVersion: "apps/v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: rt.beforeDNSType,
Namespace: "kube-system",
Labels: map[string]string{
"k8s-app": "kube-dns",
},
},
Spec: apps.DeploymentSpec{
Template: v1.PodTemplateSpec{
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Image: "test:" + rt.beforeDNSVersion,
},
},
},
},
},
})
actualUpgrades, actualErr := GetAvailableUpgrades(rt.vg, rt.allowExperimental, rt.allowRCs, rt.etcdClient, rt.featureGates, client)
if !reflect.DeepEqual(actualUpgrades, rt.expectedUpgrades) {
t.Errorf("failed TestGetAvailableUpgrades\n\texpected upgrades: %v\n\tgot: %v", rt.expectedUpgrades, actualUpgrades)
}
if (actualErr != nil) != rt.errExpected {
fmt.Printf("Hello error")
t.Errorf("failed TestGetAvailableUpgrades\n\texpected error: %t\n\tgot error: %t", rt.errExpected, (actualErr != nil))
}
if !reflect.DeepEqual(actualUpgrades, rt.expectedUpgrades) {
t.Errorf("failed TestGetAvailableUpgrades\n\texpected upgrades: %v\n\tgot: %v", rt.expectedUpgrades, actualUpgrades)
}
})
}
}
func TestKubeletUpgrade(t *testing.T) {
tests := []struct {
before map[string]uint16
after string
expected bool
}{
{ // upgrade available
before: map[string]uint16{
"v1.10.1": 1,
},
after: "v1.10.3",
expected: true,
},
{ // upgrade available
before: map[string]uint16{
"v1.10.1": 1,
"v1.10.3": 100,
},
after: "v1.10.3",
expected: true,
},
{ // upgrade not available
before: map[string]uint16{
"v1.10.3": 1,
},
after: "v1.10.3",
expected: false,
},
{ // upgrade not available
before: map[string]uint16{
"v1.10.3": 100,
},
after: "v1.10.3",
expected: false,
},
{ // upgrade not available if we don't know anything about the earlier state
before: map[string]uint16{},
after: "v1.10.3",
expected: false,
},
}
for _, rt := range tests {
upgrade := Upgrade{
Before: ClusterState{
KubeletVersions: rt.before,
},
After: ClusterState{
KubeVersion: rt.after,
},
}
actual := upgrade.CanUpgradeKubelets()
if actual != rt.expected {
t.Errorf("failed TestKubeletUpgrade\n\texpected: %t\n\tgot: %t\n\ttest object: %v", rt.expected, actual, upgrade)
}
}
}
func TestGetBranchFromVersion(t *testing.T) {
testCases := []struct {
version string
expectedVersion string
}{
{
version: "v1.9.5",
expectedVersion: "1.9",
},
{
version: "v1.9.0-alpha.2",
expectedVersion: "1.9",
},
{
version: "v1.9.0-beta.0",
expectedVersion: "1.9",
},
{
version: "v1.9.0-rc.1",
expectedVersion: "1.9",
},
{
version: "v1.12.5",
expectedVersion: "1.12",
},
{
version: "v1.11.0-alpha.0",
expectedVersion: "1.11",
},
{
version: "v1.11.0-beta.1",
expectedVersion: "1.11",
},
{
version: "v1.11.0-rc.0",
expectedVersion: "1.11",
},
{
version: "1.12.5",
expectedVersion: "1.12",
},
}
for _, tc := range testCases {
v := getBranchFromVersion(tc.version)
if v != tc.expectedVersion {
t.Errorf("expected version %s, got %s", tc.expectedVersion, v)
}
}
}

View File

@@ -0,0 +1,214 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package upgrade
import (
"fmt"
"net/http"
"os"
apps "k8s.io/api/apps/v1"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/util/sets"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/cmd/kubeadm/app/constants"
"k8s.io/kubernetes/cmd/kubeadm/app/preflight"
)
// healthCheck is a helper struct for easily performing healthchecks against the cluster and printing the output
type healthCheck struct {
name string
client clientset.Interface
// f is invoked with a k8s client passed to it. Should return an optional error
f func(clientset.Interface) error
}
// Check is part of the preflight.Checker interface
func (c *healthCheck) Check() (warnings, errors []error) {
if err := c.f(c.client); err != nil {
return nil, []error{err}
}
return nil, nil
}
// Name is part of the preflight.Checker interface
func (c *healthCheck) Name() string {
return c.name
}
// CheckClusterHealth makes sure:
// - the API /healthz endpoint is healthy
// - all master Nodes are Ready
// - (if self-hosted) that there are DaemonSets with at least one Pod for all control plane components
// - (if static pod-hosted) that all required Static Pod manifests exist on disk
func CheckClusterHealth(client clientset.Interface, ignoreChecksErrors sets.String) error {
fmt.Println("[upgrade] Making sure the cluster is healthy:")
healthChecks := []preflight.Checker{
&healthCheck{
name: "APIServerHealth",
client: client,
f: apiServerHealthy,
},
&healthCheck{
name: "MasterNodesReady",
client: client,
f: masterNodesReady,
},
// TODO: Add a check for ComponentStatuses here?
}
// Run slightly different health checks depending on control plane hosting type
if IsControlPlaneSelfHosted(client) {
healthChecks = append(healthChecks, &healthCheck{
name: "ControlPlaneHealth",
client: client,
f: controlPlaneHealth,
})
} else {
healthChecks = append(healthChecks, &healthCheck{
name: "StaticPodManifest",
client: client,
f: staticPodManifestHealth,
})
}
return preflight.RunChecks(healthChecks, os.Stderr, ignoreChecksErrors)
}
// apiServerHealthy checks whether the API server's /healthz endpoint is healthy
func apiServerHealthy(client clientset.Interface) error {
healthStatus := 0
// If client.Discovery().RESTClient() is nil, the fake client is used, and that means we are dry-running. Just proceed
if client.Discovery().RESTClient() == nil {
return nil
}
client.Discovery().RESTClient().Get().AbsPath("/healthz").Do().StatusCode(&healthStatus)
if healthStatus != http.StatusOK {
return fmt.Errorf("the API Server is unhealthy; /healthz didn't return %q", "ok")
}
return nil
}
// masterNodesReady checks whether all master Nodes in the cluster are in the Running state
func masterNodesReady(client clientset.Interface) error {
selector := labels.SelectorFromSet(labels.Set(map[string]string{
constants.LabelNodeRoleMaster: "",
}))
masters, err := client.CoreV1().Nodes().List(metav1.ListOptions{
LabelSelector: selector.String(),
})
if err != nil {
return fmt.Errorf("couldn't list masters in cluster: %v", err)
}
if len(masters.Items) == 0 {
return fmt.Errorf("failed to find any nodes with master role")
}
notReadyMasters := getNotReadyNodes(masters.Items)
if len(notReadyMasters) != 0 {
return fmt.Errorf("there are NotReady masters in the cluster: %v", notReadyMasters)
}
return nil
}
// controlPlaneHealth ensures all control plane DaemonSets are healthy
func controlPlaneHealth(client clientset.Interface) error {
notReadyDaemonSets, err := getNotReadyDaemonSets(client)
if err != nil {
return err
}
if len(notReadyDaemonSets) != 0 {
return fmt.Errorf("there are control plane DaemonSets in the cluster that are not ready: %v", notReadyDaemonSets)
}
return nil
}
// staticPodManifestHealth makes sure the required static pods are presents
func staticPodManifestHealth(_ clientset.Interface) error {
nonExistentManifests := []string{}
for _, component := range constants.MasterComponents {
manifestFile := constants.GetStaticPodFilepath(component, constants.GetStaticPodDirectory())
if _, err := os.Stat(manifestFile); os.IsNotExist(err) {
nonExistentManifests = append(nonExistentManifests, manifestFile)
}
}
if len(nonExistentManifests) == 0 {
return nil
}
return fmt.Errorf("The control plane seems to be Static Pod-hosted, but some of the manifests don't seem to exist on disk. This probably means you're running 'kubeadm upgrade' on a remote machine, which is not supported for a Static Pod-hosted cluster. Manifest files not found: %v", nonExistentManifests)
}
// IsControlPlaneSelfHosted returns whether the control plane is self hosted or not
func IsControlPlaneSelfHosted(client clientset.Interface) bool {
notReadyDaemonSets, err := getNotReadyDaemonSets(client)
if err != nil {
return false
}
// If there are no NotReady DaemonSets, we are using self-hosting
return len(notReadyDaemonSets) == 0
}
// getNotReadyDaemonSets gets the amount of Ready control plane DaemonSets
func getNotReadyDaemonSets(client clientset.Interface) ([]error, error) {
notReadyDaemonSets := []error{}
for _, component := range constants.MasterComponents {
dsName := constants.AddSelfHostedPrefix(component)
ds, err := client.AppsV1().DaemonSets(metav1.NamespaceSystem).Get(dsName, metav1.GetOptions{})
if err != nil {
return nil, fmt.Errorf("couldn't get daemonset %q in the %s namespace", dsName, metav1.NamespaceSystem)
}
if err := daemonSetHealth(&ds.Status); err != nil {
notReadyDaemonSets = append(notReadyDaemonSets, fmt.Errorf("DaemonSet %q not healthy: %v", dsName, err))
}
}
return notReadyDaemonSets, nil
}
// daemonSetHealth is a helper function for getting the health of a DaemonSet's status
func daemonSetHealth(dsStatus *apps.DaemonSetStatus) error {
if dsStatus.CurrentNumberScheduled != dsStatus.DesiredNumberScheduled {
return fmt.Errorf("current number of scheduled Pods ('%d') doesn't match the amount of desired Pods ('%d')", dsStatus.CurrentNumberScheduled, dsStatus.DesiredNumberScheduled)
}
if dsStatus.NumberAvailable == 0 {
return fmt.Errorf("no available Pods for DaemonSet")
}
if dsStatus.NumberReady == 0 {
return fmt.Errorf("no ready Pods for DaemonSet")
}
return nil
}
// getNotReadyNodes returns a string slice of nodes in the cluster that are NotReady
func getNotReadyNodes(nodes []v1.Node) []string {
notReadyNodes := []string{}
for _, node := range nodes {
for _, condition := range node.Status.Conditions {
if condition.Type == v1.NodeReady && condition.Status != v1.ConditionTrue {
notReadyNodes = append(notReadyNodes, node.ObjectMeta.Name)
}
}
}
return notReadyNodes
}

View File

@@ -0,0 +1,184 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package upgrade
import (
"fmt"
"strings"
"k8s.io/kubernetes/cmd/kubeadm/app/constants"
"k8s.io/kubernetes/pkg/util/version"
)
const (
// MaximumAllowedMinorVersionUpgradeSkew describes how many minor versions kubeadm can upgrade the control plane version in one go
MaximumAllowedMinorVersionUpgradeSkew = 1
// MaximumAllowedMinorVersionDowngradeSkew describes how many minor versions kubeadm can upgrade the control plane version in one go
MaximumAllowedMinorVersionDowngradeSkew = 1
// MaximumAllowedMinorVersionKubeletSkew describes how many minor versions the control plane version and the kubelet can skew in a kubeadm cluster
MaximumAllowedMinorVersionKubeletSkew = 1
)
// VersionSkewPolicyErrors describes version skew errors that might be seen during the validation process in EnforceVersionPolicies
type VersionSkewPolicyErrors struct {
Mandatory []error
Skippable []error
}
// EnforceVersionPolicies enforces that the proposed new version is compatible with all the different version skew policies
func EnforceVersionPolicies(versionGetter VersionGetter, newK8sVersionStr string, newK8sVersion *version.Version, allowExperimentalUpgrades, allowRCUpgrades bool) *VersionSkewPolicyErrors {
skewErrors := &VersionSkewPolicyErrors{
Mandatory: []error{},
Skippable: []error{},
}
clusterVersionStr, clusterVersion, err := versionGetter.ClusterVersion()
if err != nil {
// This case can't be forced: kubeadm has to be able to lookup cluster version for upgrades to work
skewErrors.Mandatory = append(skewErrors.Mandatory, fmt.Errorf("Unable to fetch cluster version: %v", err))
return skewErrors
}
kubeadmVersionStr, kubeadmVersion, err := versionGetter.KubeadmVersion()
if err != nil {
// This case can't be forced: kubeadm has to be able to lookup its version for upgrades to work
skewErrors.Mandatory = append(skewErrors.Mandatory, fmt.Errorf("Unable to fetch kubeadm version: %v", err))
return skewErrors
}
kubeletVersions, err := versionGetter.KubeletVersions()
if err != nil {
// This is a non-critical error; continue although kubeadm couldn't look this up
skewErrors.Skippable = append(skewErrors.Skippable, fmt.Errorf("Unable to fetch kubelet version: %v", err))
}
// Make sure the new version is a supported version (higher than the minimum one supported)
if constants.MinimumControlPlaneVersion.AtLeast(newK8sVersion) {
// This must not happen, kubeadm always supports a minimum version; and we can't go below that
skewErrors.Mandatory = append(skewErrors.Mandatory, fmt.Errorf("Specified version to upgrade to %q is equal to or lower than the minimum supported version %q. Please specify a higher version to upgrade to", newK8sVersionStr, clusterVersionStr))
}
// kubeadm doesn't support upgrades between two minor versions; e.g. a v1.7 -> v1.9 upgrade is not supported right away
if newK8sVersion.Minor() > clusterVersion.Minor()+MaximumAllowedMinorVersionUpgradeSkew {
tooLargeUpgradeSkewErr := fmt.Errorf("Specified version to upgrade to %q is too high; kubeadm can upgrade only %d minor version at a time", newK8sVersionStr, MaximumAllowedMinorVersionUpgradeSkew)
// If the version that we're about to upgrade to is a released version, we should fully enforce this policy
// If the version is a CI/dev/experimental version, it's okay to jump two minor version steps, but then require the -f flag
if len(newK8sVersion.PreRelease()) == 0 {
skewErrors.Mandatory = append(skewErrors.Mandatory, tooLargeUpgradeSkewErr)
} else {
skewErrors.Skippable = append(skewErrors.Skippable, tooLargeUpgradeSkewErr)
}
}
// kubeadm doesn't support downgrades between two minor versions; e.g. a v1.9 -> v1.7 downgrade is not supported right away
if newK8sVersion.Minor() < clusterVersion.Minor()-MaximumAllowedMinorVersionDowngradeSkew {
tooLargeDowngradeSkewErr := fmt.Errorf("Specified version to downgrade to %q is too low; kubeadm can downgrade only %d minor version at a time", newK8sVersionStr, MaximumAllowedMinorVersionDowngradeSkew)
// If the version that we're about to downgrade to is a released version, we should fully enforce this policy
// If the version is a CI/dev/experimental version, it's okay to jump two minor version steps, but then require the -f flag
if len(newK8sVersion.PreRelease()) == 0 {
skewErrors.Mandatory = append(skewErrors.Mandatory, tooLargeDowngradeSkewErr)
} else {
skewErrors.Skippable = append(skewErrors.Skippable, tooLargeDowngradeSkewErr)
}
}
// If the kubeadm version is lower than what we want to upgrade to; error
if kubeadmVersion.LessThan(newK8sVersion) {
if newK8sVersion.Minor() > kubeadmVersion.Minor() {
tooLargeKubeadmSkew := fmt.Errorf("Specified version to upgrade to %q is at least one minor release higher than the kubeadm minor release (%d > %d). Such an upgrade is not supported", newK8sVersionStr, newK8sVersion.Minor(), kubeadmVersion.Minor())
// This is unsupported; kubeadm has no idea how it should handle a newer minor release than itself
// If the version is a CI/dev/experimental version though, lower the severity of this check, but then require the -f flag
if len(newK8sVersion.PreRelease()) == 0 {
skewErrors.Mandatory = append(skewErrors.Mandatory, tooLargeKubeadmSkew)
} else {
skewErrors.Skippable = append(skewErrors.Skippable, tooLargeKubeadmSkew)
}
} else {
// Upgrading to a higher patch version than kubeadm is ok if the user specifies --force. Not recommended, but possible.
skewErrors.Skippable = append(skewErrors.Skippable, fmt.Errorf("Specified version to upgrade to %q is higher than the kubeadm version %q. Upgrade kubeadm first using the tool you used to install kubeadm", newK8sVersionStr, kubeadmVersionStr))
}
}
if kubeadmVersion.Major() > newK8sVersion.Major() ||
kubeadmVersion.Minor() > newK8sVersion.Minor() {
skewErrors.Skippable = append(skewErrors.Skippable, fmt.Errorf("Kubeadm version %s can only be used to upgrade to Kubernetes version %d.%d", kubeadmVersionStr, kubeadmVersion.Major(), kubeadmVersion.Minor()))
}
// Detect if the version is unstable and the user didn't allow that
if err = detectUnstableVersionError(newK8sVersion, newK8sVersionStr, allowExperimentalUpgrades, allowRCUpgrades); err != nil {
skewErrors.Skippable = append(skewErrors.Skippable, err)
}
// Detect if there are too old kubelets in the cluster
// Check for nil here since this is the only case where kubeletVersions can be nil; if KubeletVersions() returned an error
// However, it's okay to skip that check
if kubeletVersions != nil {
if err = detectTooOldKubelets(newK8sVersion, kubeletVersions); err != nil {
skewErrors.Skippable = append(skewErrors.Skippable, err)
}
}
// If we did not see any errors, return nil
if len(skewErrors.Skippable) == 0 && len(skewErrors.Mandatory) == 0 {
return nil
}
// Uh oh, we encountered one or more errors, return them
return skewErrors
}
// detectUnstableVersionError is a helper function for detecting if the unstable version (if specified) is allowed to be used
func detectUnstableVersionError(newK8sVersion *version.Version, newK8sVersionStr string, allowExperimentalUpgrades, allowRCUpgrades bool) error {
// Short-circuit quickly if this is not an unstable version
if len(newK8sVersion.PreRelease()) == 0 {
return nil
}
// If the user has specified that unstable versions are fine, then no error should be returned
if allowExperimentalUpgrades {
return nil
}
// If this is a release candidate and we allow such ones, everything's fine
if strings.HasPrefix(newK8sVersion.PreRelease(), "rc") && allowRCUpgrades {
return nil
}
return fmt.Errorf("Specified version to upgrade to %q is an unstable version and such upgrades weren't allowed via setting the --allow-*-upgrades flags", newK8sVersionStr)
}
// detectTooOldKubelets errors out if the kubelet versions are so old that an unsupported skew would happen if the cluster was upgraded
func detectTooOldKubelets(newK8sVersion *version.Version, kubeletVersions map[string]uint16) error {
tooOldKubeletVersions := []string{}
for versionStr := range kubeletVersions {
kubeletVersion, err := version.ParseSemantic(versionStr)
if err != nil {
return fmt.Errorf("couldn't parse kubelet version %s", versionStr)
}
if newK8sVersion.Minor() > kubeletVersion.Minor()+MaximumAllowedMinorVersionKubeletSkew {
tooOldKubeletVersions = append(tooOldKubeletVersions, versionStr)
}
}
if len(tooOldKubeletVersions) == 0 {
return nil
}
return fmt.Errorf("There are kubelets in this cluster that are too old that have these versions %v", tooOldKubeletVersions)
}

View File

@@ -0,0 +1,222 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package upgrade
import (
"testing"
"k8s.io/kubernetes/pkg/util/version"
)
func TestEnforceVersionPolicies(t *testing.T) {
tests := []struct {
name string
vg *fakeVersionGetter
expectedMandatoryErrs int
expectedSkippableErrs int
allowExperimental, allowRCs bool
newK8sVersion string
}{
{
name: "minor upgrade",
vg: &fakeVersionGetter{
clusterVersion: "v1.10.3",
kubeletVersion: "v1.10.3",
kubeadmVersion: "v1.10.5",
},
newK8sVersion: "v1.10.5",
},
{
name: "major upgrade",
vg: &fakeVersionGetter{
clusterVersion: "v1.10.3",
kubeletVersion: "v1.10.2",
kubeadmVersion: "v1.11.1",
},
newK8sVersion: "v1.11.0",
},
{
name: "downgrade",
vg: &fakeVersionGetter{
clusterVersion: "v1.10.3",
kubeletVersion: "v1.10.3",
kubeadmVersion: "v1.10.3",
},
newK8sVersion: "v1.10.2",
},
{
name: "same version upgrade",
vg: &fakeVersionGetter{
clusterVersion: "v1.10.3",
kubeletVersion: "v1.10.3",
kubeadmVersion: "v1.10.3",
},
newK8sVersion: "v1.10.3",
},
{
name: "new version must be higher than v1.10.0",
vg: &fakeVersionGetter{
clusterVersion: "v1.10.3",
kubeletVersion: "v1.10.3",
kubeadmVersion: "v1.10.3",
},
newK8sVersion: "v1.9.10",
expectedMandatoryErrs: 1, // version must be higher than v1.10.0
expectedSkippableErrs: 1, // can't upgrade old k8s with newer kubeadm
},
{
name: "upgrading two minor versions in one go is not supported",
vg: &fakeVersionGetter{
clusterVersion: "v1.10.3",
kubeletVersion: "v1.10.3",
kubeadmVersion: "v1.12.0",
},
newK8sVersion: "v1.12.0",
expectedMandatoryErrs: 1, // can't upgrade two minor versions
expectedSkippableErrs: 1, // kubelet <-> apiserver skew too large
},
{
name: "downgrading two minor versions in one go is not supported",
vg: &fakeVersionGetter{
clusterVersion: "v1.12.3",
kubeletVersion: "v1.12.3",
kubeadmVersion: "v1.12.0",
},
newK8sVersion: "v1.10.3",
expectedMandatoryErrs: 1, // can't downgrade two minor versions
expectedSkippableErrs: 1, // can't upgrade old k8s with newer kubeadm
},
{
name: "kubeadm version must be higher than the new kube version. However, patch version skews may be forced",
vg: &fakeVersionGetter{
clusterVersion: "v1.10.3",
kubeletVersion: "v1.10.3",
kubeadmVersion: "v1.10.3",
},
newK8sVersion: "v1.10.5",
expectedSkippableErrs: 1,
},
{
name: "kubeadm version must be higher than the new kube version. Trying to upgrade k8s to a higher minor version than kubeadm itself should never be supported",
vg: &fakeVersionGetter{
clusterVersion: "v1.10.3",
kubeletVersion: "v1.10.3",
kubeadmVersion: "v1.10.3",
},
newK8sVersion: "v1.11.0",
expectedMandatoryErrs: 1,
},
{
name: "the maximum skew between the cluster version and the kubelet versions should be one minor version. This may be forced through though.",
vg: &fakeVersionGetter{
clusterVersion: "v1.10.3",
kubeletVersion: "v1.9.8",
kubeadmVersion: "v1.11.0",
},
newK8sVersion: "v1.11.0",
expectedSkippableErrs: 1,
},
{
name: "experimental upgrades supported if the flag is set",
vg: &fakeVersionGetter{
clusterVersion: "v1.10.3",
kubeletVersion: "v1.10.3",
kubeadmVersion: "v1.11.0-beta.1",
},
newK8sVersion: "v1.11.0-beta.1",
allowExperimental: true,
},
{
name: "release candidate upgrades supported if the flag is set",
vg: &fakeVersionGetter{
clusterVersion: "v1.10.3",
kubeletVersion: "v1.10.3",
kubeadmVersion: "v1.11.0-rc.1",
},
newK8sVersion: "v1.11.0-rc.1",
allowRCs: true,
},
{
name: "release candidate upgrades supported if the flag is set",
vg: &fakeVersionGetter{
clusterVersion: "v1.10.3",
kubeletVersion: "v1.10.3",
kubeadmVersion: "v1.11.0-rc.1",
},
newK8sVersion: "v1.11.0-rc.1",
allowExperimental: true,
},
{
name: "the user should not be able to upgrade to an experimental version if they haven't opted into that",
vg: &fakeVersionGetter{
clusterVersion: "v1.10.3",
kubeletVersion: "v1.10.3",
kubeadmVersion: "v1.11.0-beta.1",
},
newK8sVersion: "v1.11.0-beta.1",
allowRCs: true,
expectedSkippableErrs: 1,
},
{
name: "the user should not be able to upgrade to an release candidate version if they haven't opted into that",
vg: &fakeVersionGetter{
clusterVersion: "v1.10.3",
kubeletVersion: "v1.10.3",
kubeadmVersion: "v1.11.0-rc.1",
},
newK8sVersion: "v1.11.0-rc.1",
expectedSkippableErrs: 1,
},
{
name: "the user can't use a newer minor version of kubeadm to upgrade an older version of kubeadm",
vg: &fakeVersionGetter{
clusterVersion: "v1.10.3",
kubeletVersion: "v1.10.3",
kubeadmVersion: "v1.11.0",
},
newK8sVersion: "v1.10.6",
expectedSkippableErrs: 1, // can't upgrade old k8s with newer kubeadm
},
}
for _, rt := range tests {
t.Run(rt.name, func(t *testing.T) {
newK8sVer, err := version.ParseSemantic(rt.newK8sVersion)
if err != nil {
t.Fatalf("couldn't parse version %s: %v", rt.newK8sVersion, err)
}
actualSkewErrs := EnforceVersionPolicies(rt.vg, rt.newK8sVersion, newK8sVer, rt.allowExperimental, rt.allowRCs)
if actualSkewErrs == nil {
// No errors were seen. Report unit test failure if we expected to see errors
if rt.expectedMandatoryErrs+rt.expectedSkippableErrs > 0 {
t.Errorf("failed TestEnforceVersionPolicies\n\texpected errors but got none")
}
// Otherwise, just move on with the next test
return
}
if len(actualSkewErrs.Skippable) != rt.expectedSkippableErrs {
t.Errorf("failed TestEnforceVersionPolicies\n\texpected skippable errors: %d\n\tgot skippable errors: %d %v", rt.expectedSkippableErrs, len(actualSkewErrs.Skippable), *rt.vg)
}
if len(actualSkewErrs.Mandatory) != rt.expectedMandatoryErrs {
t.Errorf("failed TestEnforceVersionPolicies\n\texpected mandatory errors: %d\n\tgot mandatory errors: %d %v", rt.expectedMandatoryErrs, len(actualSkewErrs.Mandatory), *rt.vg)
}
})
}
}

View File

@@ -0,0 +1,309 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package upgrade
import (
"fmt"
"io/ioutil"
"os"
"path/filepath"
"time"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/errors"
clientset "k8s.io/client-go/kubernetes"
certutil "k8s.io/client-go/util/cert"
kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
kubeadmapiv1alpha2 "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1alpha2"
kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants"
"k8s.io/kubernetes/cmd/kubeadm/app/features"
"k8s.io/kubernetes/cmd/kubeadm/app/phases/addons/dns"
"k8s.io/kubernetes/cmd/kubeadm/app/phases/addons/proxy"
"k8s.io/kubernetes/cmd/kubeadm/app/phases/bootstraptoken/clusterinfo"
nodebootstraptoken "k8s.io/kubernetes/cmd/kubeadm/app/phases/bootstraptoken/node"
certsphase "k8s.io/kubernetes/cmd/kubeadm/app/phases/certs"
kubeletphase "k8s.io/kubernetes/cmd/kubeadm/app/phases/kubelet"
patchnodephase "k8s.io/kubernetes/cmd/kubeadm/app/phases/patchnode"
"k8s.io/kubernetes/cmd/kubeadm/app/phases/selfhosting"
"k8s.io/kubernetes/cmd/kubeadm/app/phases/uploadconfig"
"k8s.io/kubernetes/cmd/kubeadm/app/util/apiclient"
dryrunutil "k8s.io/kubernetes/cmd/kubeadm/app/util/dryrun"
"k8s.io/kubernetes/pkg/util/version"
)
var expiry = 180 * 24 * time.Hour
// PerformPostUpgradeTasks runs nearly the same functions as 'kubeadm init' would do
// Note that the markmaster phase is left out, not needed, and no token is created as that doesn't belong to the upgrade
func PerformPostUpgradeTasks(client clientset.Interface, cfg *kubeadmapi.MasterConfiguration, newK8sVer *version.Version, dryRun bool) error {
errs := []error{}
// Upload currently used configuration to the cluster
// Note: This is done right in the beginning of cluster initialization; as we might want to make other phases
// depend on centralized information from this source in the future
if err := uploadconfig.UploadConfiguration(cfg, client); err != nil {
errs = append(errs, err)
}
// Create the new, version-branched kubelet ComponentConfig ConfigMap
if err := kubeletphase.CreateConfigMap(cfg, client); err != nil {
errs = append(errs, fmt.Errorf("error creating kubelet configuration ConfigMap: %v", err))
}
// Write the new kubelet config down to disk and the env file if needed
if err := writeKubeletConfigFiles(client, cfg, newK8sVer, dryRun); err != nil {
errs = append(errs, err)
}
// Annotate the node with the crisocket information, sourced either from the MasterConfiguration struct or
// --cri-socket.
// TODO: In the future we want to use something more official like NodeStatus or similar for detecting this properly
if err := patchnodephase.AnnotateCRISocket(client, cfg.NodeRegistration.Name, cfg.NodeRegistration.CRISocket); err != nil {
errs = append(errs, fmt.Errorf("error uploading crisocket: %v", err))
}
// Create/update RBAC rules that makes the bootstrap tokens able to post CSRs
if err := nodebootstraptoken.AllowBootstrapTokensToPostCSRs(client); err != nil {
errs = append(errs, err)
}
// Create/update RBAC rules that makes the bootstrap tokens able to get their CSRs approved automatically
if err := nodebootstraptoken.AutoApproveNodeBootstrapTokens(client); err != nil {
errs = append(errs, err)
}
// Create/update RBAC rules that makes the nodes to rotate certificates and get their CSRs approved automatically
if err := nodebootstraptoken.AutoApproveNodeCertificateRotation(client); err != nil {
errs = append(errs, err)
}
// Upgrade to a self-hosted control plane if possible
if err := upgradeToSelfHosting(client, cfg, dryRun); err != nil {
errs = append(errs, err)
}
// TODO: Is this needed to do here? I think that updating cluster info should probably be separate from a normal upgrade
// Create the cluster-info ConfigMap with the associated RBAC rules
// if err := clusterinfo.CreateBootstrapConfigMapIfNotExists(client, kubeadmconstants.GetAdminKubeConfigPath()); err != nil {
// return err
//}
// Create/update RBAC rules that makes the cluster-info ConfigMap reachable
if err := clusterinfo.CreateClusterInfoRBACRules(client); err != nil {
errs = append(errs, err)
}
// Rotate the kube-apiserver cert and key if needed
if err := backupAPIServerCertIfNeeded(cfg, dryRun); err != nil {
errs = append(errs, err)
}
// Upgrade kube-dns/CoreDNS and kube-proxy
if err := dns.EnsureDNSAddon(cfg, client); err != nil {
errs = append(errs, err)
}
// Remove the old DNS deployment if a new DNS service is now used (kube-dns to CoreDNS or vice versa)
if err := removeOldDNSDeploymentIfAnotherDNSIsUsed(cfg, client, dryRun); err != nil {
errs = append(errs, err)
}
if err := proxy.EnsureProxyAddon(cfg, client); err != nil {
errs = append(errs, err)
}
return errors.NewAggregate(errs)
}
func removeOldDNSDeploymentIfAnotherDNSIsUsed(cfg *kubeadmapi.MasterConfiguration, client clientset.Interface, dryRun bool) error {
return apiclient.TryRunCommand(func() error {
installedDeploymentName := kubeadmconstants.KubeDNS
deploymentToDelete := kubeadmconstants.CoreDNS
if features.Enabled(cfg.FeatureGates, features.CoreDNS) {
installedDeploymentName = kubeadmconstants.CoreDNS
deploymentToDelete = kubeadmconstants.KubeDNS
}
// If we're dry-running, we don't need to wait for the new DNS addon to become ready
if !dryRun {
dnsDeployment, err := client.AppsV1().Deployments(metav1.NamespaceSystem).Get(installedDeploymentName, metav1.GetOptions{})
if err != nil {
return err
}
if dnsDeployment.Status.ReadyReplicas == 0 {
return fmt.Errorf("the DNS deployment isn't ready yet")
}
}
// We don't want to wait for the DNS deployment above to become ready when dryrunning (as it never will)
// but here we should execute the DELETE command against the dryrun clientset, as it will only be logged
err := apiclient.DeleteDeploymentForeground(client, metav1.NamespaceSystem, deploymentToDelete)
if err != nil && !apierrors.IsNotFound(err) {
return err
}
return nil
}, 10)
}
func upgradeToSelfHosting(client clientset.Interface, cfg *kubeadmapi.MasterConfiguration, dryRun bool) error {
if features.Enabled(cfg.FeatureGates, features.SelfHosting) && !IsControlPlaneSelfHosted(client) {
waiter := getWaiter(dryRun, client)
// kubeadm will now convert the static Pod-hosted control plane into a self-hosted one
fmt.Println("[self-hosted] Creating self-hosted control plane.")
if err := selfhosting.CreateSelfHostedControlPlane(kubeadmconstants.GetStaticPodDirectory(), kubeadmconstants.KubernetesDir, cfg, client, waiter, dryRun); err != nil {
return fmt.Errorf("error creating self hosted control plane: %v", err)
}
}
return nil
}
func backupAPIServerCertIfNeeded(cfg *kubeadmapi.MasterConfiguration, dryRun bool) error {
certAndKeyDir := kubeadmapiv1alpha2.DefaultCertificatesDir
shouldBackup, err := shouldBackupAPIServerCertAndKey(certAndKeyDir)
if err != nil {
// Don't fail the upgrade phase if failing to determine to backup kube-apiserver cert and key.
return fmt.Errorf("[postupgrade] WARNING: failed to determine to backup kube-apiserver cert and key: %v", err)
}
if !shouldBackup {
return nil
}
// If dry-running, just say that this would happen to the user and exit
if dryRun {
fmt.Println("[postupgrade] Would rotate the API server certificate and key.")
return nil
}
// Don't fail the upgrade phase if failing to backup kube-apiserver cert and key, just continue rotating the cert
// TODO: We might want to reconsider this choice.
if err := backupAPIServerCertAndKey(certAndKeyDir); err != nil {
fmt.Printf("[postupgrade] WARNING: failed to backup kube-apiserver cert and key: %v", err)
}
return certsphase.CreateAPIServerCertAndKeyFiles(cfg)
}
func writeKubeletConfigFiles(client clientset.Interface, cfg *kubeadmapi.MasterConfiguration, newK8sVer *version.Version, dryRun bool) error {
kubeletDir, err := getKubeletDir(dryRun)
if err != nil {
// The error here should never occur in reality, would only be thrown if /tmp doesn't exist on the machine.
return err
}
errs := []error{}
// Write the configuration for the kubelet down to disk so the upgraded kubelet can start with fresh config
if err := kubeletphase.DownloadConfig(client, newK8sVer, kubeletDir); err != nil {
// Tolerate the error being NotFound when dryrunning, as there is a pretty common scenario: the dryrun process
// *would* post the new kubelet-config-1.X configmap that doesn't exist now when we're trying to download it
// again.
if !(apierrors.IsNotFound(err) && dryRun) {
errs = append(errs, fmt.Errorf("error downloading kubelet configuration from the ConfigMap: %v", err))
}
}
if dryRun { // Print what contents would be written
dryrunutil.PrintDryRunFile(kubeadmconstants.KubeletConfigurationFileName, kubeletDir, kubeadmconstants.KubeletRunDirectory, os.Stdout)
}
envFilePath := filepath.Join(kubeadmconstants.KubeletRunDirectory, kubeadmconstants.KubeletEnvFileName)
if _, err := os.Stat(envFilePath); os.IsNotExist(err) {
// Write env file with flags for the kubelet to use. We do not need to write the --register-with-taints for the master,
// as we handle that ourselves in the markmaster phase
// TODO: Maybe we want to do that some time in the future, in order to remove some logic from the markmaster phase?
if err := kubeletphase.WriteKubeletDynamicEnvFile(&cfg.NodeRegistration, cfg.FeatureGates, false, kubeletDir); err != nil {
errs = append(errs, fmt.Errorf("error writing a dynamic environment file for the kubelet: %v", err))
}
if dryRun { // Print what contents would be written
dryrunutil.PrintDryRunFile(kubeadmconstants.KubeletEnvFileName, kubeletDir, kubeadmconstants.KubeletRunDirectory, os.Stdout)
}
}
return errors.NewAggregate(errs)
}
// getWaiter gets the right waiter implementation for the right occasion
// TODO: Consolidate this with what's in init.go?
func getWaiter(dryRun bool, client clientset.Interface) apiclient.Waiter {
if dryRun {
return dryrunutil.NewWaiter()
}
return apiclient.NewKubeWaiter(client, 30*time.Minute, os.Stdout)
}
// getKubeletDir gets the kubelet directory based on whether the user is dry-running this command or not.
// TODO: Consolidate this with similar funcs?
func getKubeletDir(dryRun bool) (string, error) {
if dryRun {
return ioutil.TempDir("", "kubeadm-upgrade-dryrun")
}
return kubeadmconstants.KubeletRunDirectory, nil
}
// backupAPIServerCertAndKey backups the old cert and key of kube-apiserver to a specified directory.
func backupAPIServerCertAndKey(certAndKeyDir string) error {
subDir := filepath.Join(certAndKeyDir, "expired")
if err := os.Mkdir(subDir, 0766); err != nil {
return fmt.Errorf("failed to created backup directory %s: %v", subDir, err)
}
filesToMove := map[string]string{
filepath.Join(certAndKeyDir, kubeadmconstants.APIServerCertName): filepath.Join(subDir, kubeadmconstants.APIServerCertName),
filepath.Join(certAndKeyDir, kubeadmconstants.APIServerKeyName): filepath.Join(subDir, kubeadmconstants.APIServerKeyName),
}
return moveFiles(filesToMove)
}
// moveFiles moves files from one directory to another.
func moveFiles(files map[string]string) error {
filesToRecover := map[string]string{}
for from, to := range files {
if err := os.Rename(from, to); err != nil {
return rollbackFiles(filesToRecover, err)
}
filesToRecover[to] = from
}
return nil
}
// rollbackFiles moves the files back to the original directory.
func rollbackFiles(files map[string]string, originalErr error) error {
errs := []error{originalErr}
for from, to := range files {
if err := os.Rename(from, to); err != nil {
errs = append(errs, err)
}
}
return fmt.Errorf("couldn't move these files: %v. Got errors: %v", files, errors.NewAggregate(errs))
}
// shouldBackupAPIServerCertAndKey checks if the cert of kube-apiserver will be expired in 180 days.
func shouldBackupAPIServerCertAndKey(certAndKeyDir string) (bool, error) {
apiServerCert := filepath.Join(certAndKeyDir, kubeadmconstants.APIServerCertName)
certs, err := certutil.CertsFromFile(apiServerCert)
if err != nil {
return false, fmt.Errorf("couldn't load the certificate file %s: %v", apiServerCert, err)
}
if len(certs) == 0 {
return false, fmt.Errorf("no certificate data found")
}
if time.Now().Sub(certs[0].NotBefore) > expiry {
return true, nil
}
return false, nil
}

View File

@@ -0,0 +1,184 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package upgrade
import (
"errors"
"os"
"path/filepath"
"strings"
"testing"
"time"
kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
"k8s.io/kubernetes/cmd/kubeadm/app/constants"
certsphase "k8s.io/kubernetes/cmd/kubeadm/app/phases/certs"
"k8s.io/kubernetes/cmd/kubeadm/app/phases/certs/pkiutil"
testutil "k8s.io/kubernetes/cmd/kubeadm/test"
)
func TestBackupAPIServerCertAndKey(t *testing.T) {
tmpdir := testutil.SetupTempDir(t)
defer os.RemoveAll(tmpdir)
os.Chmod(tmpdir, 0766)
certPath := filepath.Join(tmpdir, constants.APIServerCertName)
certFile, err := os.OpenFile(certPath, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, 0666)
if err != nil {
t.Fatalf("Failed to create cert file %s: %v", certPath, err)
}
defer certFile.Close()
keyPath := filepath.Join(tmpdir, constants.APIServerKeyName)
keyFile, err := os.OpenFile(keyPath, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, 0666)
if err != nil {
t.Fatalf("Failed to create key file %s: %v", keyPath, err)
}
defer keyFile.Close()
if err := backupAPIServerCertAndKey(tmpdir); err != nil {
t.Fatalf("Failed to backup cert and key in dir %s: %v", tmpdir, err)
}
}
func TestMoveFiles(t *testing.T) {
tmpdir := testutil.SetupTempDir(t)
defer os.RemoveAll(tmpdir)
os.Chmod(tmpdir, 0766)
certPath := filepath.Join(tmpdir, constants.APIServerCertName)
certFile, err := os.OpenFile(certPath, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, 0666)
if err != nil {
t.Fatalf("Failed to create cert file %s: %v", certPath, err)
}
defer certFile.Close()
keyPath := filepath.Join(tmpdir, constants.APIServerKeyName)
keyFile, err := os.OpenFile(keyPath, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, 0666)
if err != nil {
t.Fatalf("Failed to create key file %s: %v", keyPath, err)
}
defer keyFile.Close()
subDir := filepath.Join(tmpdir, "expired")
if err := os.Mkdir(subDir, 0766); err != nil {
t.Fatalf("Failed to create backup directory %s: %v", subDir, err)
}
filesToMove := map[string]string{
filepath.Join(tmpdir, constants.APIServerCertName): filepath.Join(subDir, constants.APIServerCertName),
filepath.Join(tmpdir, constants.APIServerKeyName): filepath.Join(subDir, constants.APIServerKeyName),
}
if err := moveFiles(filesToMove); err != nil {
t.Fatalf("Failed to move files %v: %v", filesToMove, err)
}
}
func TestRollbackFiles(t *testing.T) {
tmpdir := testutil.SetupTempDir(t)
defer os.RemoveAll(tmpdir)
os.Chmod(tmpdir, 0766)
subDir := filepath.Join(tmpdir, "expired")
if err := os.Mkdir(subDir, 0766); err != nil {
t.Fatalf("Failed to create backup directory %s: %v", subDir, err)
}
certPath := filepath.Join(subDir, constants.APIServerCertName)
certFile, err := os.OpenFile(certPath, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, 0666)
if err != nil {
t.Fatalf("Failed to create cert file %s: %v", certPath, err)
}
defer certFile.Close()
keyPath := filepath.Join(subDir, constants.APIServerKeyName)
keyFile, err := os.OpenFile(keyPath, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, 0666)
if err != nil {
t.Fatalf("Failed to create key file %s: %v", keyPath, err)
}
defer keyFile.Close()
filesToRollBack := map[string]string{
filepath.Join(subDir, constants.APIServerCertName): filepath.Join(tmpdir, constants.APIServerCertName),
filepath.Join(subDir, constants.APIServerKeyName): filepath.Join(tmpdir, constants.APIServerKeyName),
}
errString := "there are files need roll back"
originalErr := errors.New(errString)
err = rollbackFiles(filesToRollBack, originalErr)
if err == nil {
t.Fatalf("Expected error contains %q, got nil", errString)
}
if !strings.Contains(err.Error(), errString) {
t.Fatalf("Expected error contains %q, got %v", errString, err)
}
}
func TestShouldBackupAPIServerCertAndKey(t *testing.T) {
cfg := &kubeadmapi.MasterConfiguration{
API: kubeadmapi.API{AdvertiseAddress: "1.2.3.4"},
Networking: kubeadmapi.Networking{ServiceSubnet: "10.96.0.0/12", DNSDomain: "cluster.local"},
NodeRegistration: kubeadmapi.NodeRegistrationOptions{Name: "test-node"},
}
for desc, test := range map[string]struct {
adjustedExpiry time.Duration
expected bool
}{
"default: cert not older than 180 days doesn't needs to backup": {
expected: false,
},
"cert older than 180 days need to backup": {
adjustedExpiry: expiry + 100*time.Hour,
expected: true,
},
} {
caCert, caKey, err := certsphase.NewCACertAndKey()
if err != nil {
t.Fatalf("failed creation of ca cert and key: %v", err)
}
caCert.NotBefore = caCert.NotBefore.Add(-test.adjustedExpiry).UTC()
apiCert, apiKey, err := certsphase.NewAPIServerCertAndKey(cfg, caCert, caKey)
if err != nil {
t.Fatalf("Test %s: failed creation of cert and key: %v", desc, err)
}
tmpdir := testutil.SetupTempDir(t)
defer os.RemoveAll(tmpdir)
if err := pkiutil.WriteCertAndKey(tmpdir, constants.APIServerCertAndKeyBaseName, apiCert, apiKey); err != nil {
t.Fatalf("Test %s: failure while saving %s certificate and key: %v", desc, constants.APIServerCertAndKeyBaseName, err)
}
certAndKey := []string{filepath.Join(tmpdir, constants.APIServerCertName), filepath.Join(tmpdir, constants.APIServerKeyName)}
for _, path := range certAndKey {
if _, err := os.Stat(path); os.IsNotExist(err) {
t.Fatalf("Test %s: %s not exist: %v", desc, path, err)
}
}
shouldBackup, err := shouldBackupAPIServerCertAndKey(tmpdir)
if err != nil {
t.Fatalf("Test %s: failed to check shouldBackupAPIServerCertAndKey: %v", desc, err)
}
if shouldBackup != test.expected {
t.Fatalf("Test %s: shouldBackupAPIServerCertAndKey expected %v, got %v", desc, test.expected, shouldBackup)
}
}
}

View File

@@ -0,0 +1,180 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package upgrade
import (
"fmt"
"time"
apps "k8s.io/api/apps/v1"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes"
kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
"k8s.io/kubernetes/cmd/kubeadm/app/constants"
"k8s.io/kubernetes/cmd/kubeadm/app/images"
"k8s.io/kubernetes/cmd/kubeadm/app/util/apiclient"
)
const (
prepullPrefix = "upgrade-prepull-"
)
// Prepuller defines an interface for performing a prepull operation in a create-wait-delete fashion in parallel
type Prepuller interface {
CreateFunc(string) error
WaitFunc(string)
DeleteFunc(string) error
}
// DaemonSetPrepuller makes sure the control plane images are available on all masters
type DaemonSetPrepuller struct {
client clientset.Interface
cfg *kubeadmapi.MasterConfiguration
waiter apiclient.Waiter
}
// NewDaemonSetPrepuller creates a new instance of the DaemonSetPrepuller struct
func NewDaemonSetPrepuller(client clientset.Interface, waiter apiclient.Waiter, cfg *kubeadmapi.MasterConfiguration) *DaemonSetPrepuller {
return &DaemonSetPrepuller{
client: client,
cfg: cfg,
waiter: waiter,
}
}
// CreateFunc creates a DaemonSet for making the image available on every relevant node
func (d *DaemonSetPrepuller) CreateFunc(component string) error {
image := images.GetCoreImage(component, d.cfg.GetControlPlaneImageRepository(), d.cfg.KubernetesVersion, d.cfg.UnifiedControlPlaneImage)
ds := buildPrePullDaemonSet(component, image)
// Create the DaemonSet in the API Server
if err := apiclient.CreateOrUpdateDaemonSet(d.client, ds); err != nil {
return fmt.Errorf("unable to create a DaemonSet for prepulling the component %q: %v", component, err)
}
return nil
}
// WaitFunc waits for all Pods in the specified DaemonSet to be in the Running state
func (d *DaemonSetPrepuller) WaitFunc(component string) {
fmt.Printf("[upgrade/prepull] Prepulling image for component %s.\n", component)
d.waiter.WaitForPodsWithLabel("k8s-app=upgrade-prepull-" + component)
}
// DeleteFunc deletes the DaemonSet used for making the image available on every relevant node
func (d *DaemonSetPrepuller) DeleteFunc(component string) error {
dsName := addPrepullPrefix(component)
if err := apiclient.DeleteDaemonSetForeground(d.client, metav1.NamespaceSystem, dsName); err != nil {
return fmt.Errorf("unable to cleanup the DaemonSet used for prepulling %s: %v", component, err)
}
fmt.Printf("[upgrade/prepull] Prepulled image for component %s.\n", component)
return nil
}
// PrepullImagesInParallel creates DaemonSets synchronously but waits in parallel for the images to pull
func PrepullImagesInParallel(kubePrepuller Prepuller, timeout time.Duration) error {
componentsToPrepull := append(constants.MasterComponents, constants.Etcd)
fmt.Printf("[upgrade/prepull] Will prepull images for components %v\n", componentsToPrepull)
timeoutChan := time.After(timeout)
// Synchronously create the DaemonSets
for _, component := range componentsToPrepull {
if err := kubePrepuller.CreateFunc(component); err != nil {
return err
}
}
// Create a channel for streaming data from goroutines that run in parallel to a blocking for loop that cleans up
prePulledChan := make(chan string, len(componentsToPrepull))
for _, component := range componentsToPrepull {
go func(c string) {
// Wait as long as needed. This WaitFunc call should be blocking until completion
kubePrepuller.WaitFunc(c)
// When the task is done, go ahead and cleanup by sending the name to the channel
prePulledChan <- c
}(component)
}
// This call blocks until all expected messages are received from the channel or errors out if timeoutChan fires.
// For every successful wait, kubePrepuller.DeleteFunc is executed
if err := waitForItemsFromChan(timeoutChan, prePulledChan, len(componentsToPrepull), kubePrepuller.DeleteFunc); err != nil {
return err
}
fmt.Println("[upgrade/prepull] Successfully prepulled the images for all the control plane components")
return nil
}
// waitForItemsFromChan waits for n elements from stringChan with a timeout. For every item received from stringChan, cleanupFunc is executed
func waitForItemsFromChan(timeoutChan <-chan time.Time, stringChan chan string, n int, cleanupFunc func(string) error) error {
i := 0
for {
select {
case <-timeoutChan:
return fmt.Errorf("The prepull operation timed out")
case result := <-stringChan:
i++
// If the cleanup function errors; error here as well
if err := cleanupFunc(result); err != nil {
return err
}
if i == n {
return nil
}
}
}
}
// addPrepullPrefix adds the prepull prefix for this functionality; can be used in names, labels, etc.
func addPrepullPrefix(component string) string {
return fmt.Sprintf("%s%s", prepullPrefix, component)
}
// buildPrePullDaemonSet builds the DaemonSet that ensures the control plane image is available
func buildPrePullDaemonSet(component, image string) *apps.DaemonSet {
var gracePeriodSecs int64
return &apps.DaemonSet{
ObjectMeta: metav1.ObjectMeta{
Name: addPrepullPrefix(component),
Namespace: metav1.NamespaceSystem,
},
Spec: apps.DaemonSetSpec{
Template: v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
"k8s-app": addPrepullPrefix(component),
},
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: component,
Image: image,
Command: []string{"/bin/sleep", "3600"},
},
},
NodeSelector: map[string]string{
constants.LabelNodeRoleMaster: "",
},
Tolerations: []v1.Toleration{constants.MasterToleration},
TerminationGracePeriodSeconds: &gracePeriodSecs,
},
},
},
}
}

View File

@@ -0,0 +1,145 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package upgrade
import (
"fmt"
"testing"
"time"
//"k8s.io/kubernetes/pkg/util/version"
)
// failedCreatePrepuller is a fake prepuller that errors for kube-controller-manager in the CreateFunc call
type failedCreatePrepuller struct{}
func NewFailedCreatePrepuller() Prepuller {
return &failedCreatePrepuller{}
}
func (p *failedCreatePrepuller) CreateFunc(component string) error {
if component == "kube-controller-manager" {
return fmt.Errorf("boo")
}
return nil
}
func (p *failedCreatePrepuller) WaitFunc(component string) {}
func (p *failedCreatePrepuller) DeleteFunc(component string) error {
return nil
}
// foreverWaitPrepuller is a fake prepuller that basically waits "forever" (10 mins, but longer than the 10sec timeout)
type foreverWaitPrepuller struct{}
func NewForeverWaitPrepuller() Prepuller {
return &foreverWaitPrepuller{}
}
func (p *foreverWaitPrepuller) CreateFunc(component string) error {
return nil
}
func (p *foreverWaitPrepuller) WaitFunc(component string) {
time.Sleep(10 * time.Minute)
}
func (p *foreverWaitPrepuller) DeleteFunc(component string) error {
return nil
}
// failedDeletePrepuller is a fake prepuller that errors for kube-scheduler in the DeleteFunc call
type failedDeletePrepuller struct{}
func NewFailedDeletePrepuller() Prepuller {
return &failedDeletePrepuller{}
}
func (p *failedDeletePrepuller) CreateFunc(component string) error {
return nil
}
func (p *failedDeletePrepuller) WaitFunc(component string) {}
func (p *failedDeletePrepuller) DeleteFunc(component string) error {
if component == "kube-scheduler" {
return fmt.Errorf("boo")
}
return nil
}
// goodPrepuller is a fake prepuller that works as expected
type goodPrepuller struct{}
func NewGoodPrepuller() Prepuller {
return &goodPrepuller{}
}
func (p *goodPrepuller) CreateFunc(component string) error {
time.Sleep(300 * time.Millisecond)
return nil
}
func (p *goodPrepuller) WaitFunc(component string) {
time.Sleep(300 * time.Millisecond)
}
func (p *goodPrepuller) DeleteFunc(component string) error {
time.Sleep(300 * time.Millisecond)
return nil
}
func TestPrepullImagesInParallel(t *testing.T) {
tests := []struct {
p Prepuller
timeout time.Duration
expectedErr bool
}{
{ // should error out; create failed
p: NewFailedCreatePrepuller(),
timeout: 10 * time.Second,
expectedErr: true,
},
{ // should error out; timeout exceeded
p: NewForeverWaitPrepuller(),
timeout: 10 * time.Second,
expectedErr: true,
},
{ // should error out; delete failed
p: NewFailedDeletePrepuller(),
timeout: 10 * time.Second,
expectedErr: true,
},
{ // should work just fine
p: NewGoodPrepuller(),
timeout: 10 * time.Second,
expectedErr: false,
},
}
for _, rt := range tests {
actualErr := PrepullImagesInParallel(rt.p, rt.timeout)
if (actualErr != nil) != rt.expectedErr {
t.Errorf(
"failed TestPrepullImagesInParallel\n\texpected error: %t\n\tgot: %t",
rt.expectedErr,
(actualErr != nil),
)
}
}
}

View File

@@ -0,0 +1,272 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package upgrade
import (
"fmt"
"time"
apps "k8s.io/api/apps/v1"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes"
kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
"k8s.io/kubernetes/cmd/kubeadm/app/constants"
"k8s.io/kubernetes/cmd/kubeadm/app/phases/controlplane"
"k8s.io/kubernetes/cmd/kubeadm/app/phases/selfhosting"
"k8s.io/kubernetes/cmd/kubeadm/app/util/apiclient"
"k8s.io/kubernetes/pkg/util/version"
)
const (
// upgradeTempDSPrefix is the prefix added to the temporary DaemonSet's name used during the upgrade
upgradeTempDSPrefix = "temp-upgrade-"
// upgradeTempLabel is the label key used for identifying the temporary component's DaemonSet
upgradeTempLabel = "temp-upgrade-component"
// selfHostingWaitTimeout describes the maximum amount of time a self-hosting wait process should wait before timing out
selfHostingWaitTimeout = 2 * time.Minute
// selfHostingFailureThreshold describes how many times kubeadm will retry creating the DaemonSets
selfHostingFailureThreshold int = 10
)
// controlPlaneComponentResources holds the relevant Pod and DaemonSet associated with a control plane component
type controlPlaneComponentResources struct {
pod *v1.Pod
daemonSet *apps.DaemonSet
}
// SelfHostedControlPlane upgrades a self-hosted control plane
// It works as follows:
// - The client gets the currently running DaemonSets and their associated Pods used for self-hosting the control plane
// - A temporary DaemonSet for the component in question is created; but nearly identical to the DaemonSet for the self-hosted component running right now
// - Why use this temporary DaemonSet? Because, the RollingUpdate strategy for upgrading DaemonSets first kills the old Pod, and then adds the new one
// - This doesn't work for self-hosted upgrades, as if you remove the only API server for instance you have in the cluster, the cluster essentially goes down
// - So instead, a nearly identical copy of the pre-upgrade DaemonSet is created and applied to the cluster. In the beginning, this duplicate DS is just idle
// - kubeadm waits for the temporary DaemonSet's Pod to become Running
// - kubeadm updates the real, self-hosted component. This will result in the pre-upgrade component Pod being removed from the cluster
// - Luckily, the temporary, backup DaemonSet now kicks in and takes over and acts as the control plane. It recognizes that a new Pod should be created,
// - as the "real" DaemonSet is being updated.
// - kubeadm waits for the pre-upgrade Pod to become deleted. It now takes advantage of the backup/temporary component
// - kubeadm waits for the new, upgraded DaemonSet to become Running.
// - Now that the new, upgraded DaemonSet is Running, we can delete the backup/temporary DaemonSet
// - Lastly, make sure the API /healthz endpoint still is reachable
//
// TL;DR; This is what the flow looks like in pseudo-code:
// for [kube-apiserver, kube-controller-manager, kube-scheduler], do:
// 1. Self-Hosted component v1 Running
// -> Duplicate the DaemonSet manifest
// 2. Self-Hosted component v1 Running (active). Backup component v1 Running (passive)
// -> Upgrade the Self-Hosted component v1 to v2.
// -> Self-Hosted component v1 is Deleted from the cluster
// 3. Backup component v1 Running becomes active and completes the upgrade by creating the Self-Hosted component v2 Pod (passive)
// -> Wait for Self-Hosted component v2 to become Running
// 4. Backup component v1 Running (active). Self-Hosted component v2 Running (passive)
// -> Backup component v1 is Deleted
// 5. Wait for Self-Hosted component v2 Running to become active
// 6. Repeat for all control plane components
func SelfHostedControlPlane(client clientset.Interface, waiter apiclient.Waiter, cfg *kubeadmapi.MasterConfiguration, k8sVersion *version.Version) error {
// Adjust the timeout slightly to something self-hosting specific
waiter.SetTimeout(selfHostingWaitTimeout)
// This function returns a map of DaemonSet objects ready to post to the API server
newControlPlaneDaemonSets := BuildUpgradedDaemonSetsFromConfig(cfg, k8sVersion)
controlPlaneResources, err := getCurrentControlPlaneComponentResources(client)
if err != nil {
return err
}
for _, component := range constants.MasterComponents {
// Make a shallow copy of the current DaemonSet in order to create a new, temporary one
tempDS := *controlPlaneResources[component].daemonSet
// Mutate the temp daemonset a little to be suitable for this usage (change label selectors, etc)
mutateTempDaemonSet(&tempDS, component)
// Create or update the DaemonSet in the API Server, and retry selfHostingFailureThreshold times if it errors out
if err := apiclient.TryRunCommand(func() error {
return apiclient.CreateOrUpdateDaemonSet(client, &tempDS)
}, selfHostingFailureThreshold); err != nil {
return err
}
// Wait for the temporary/backup self-hosted component to come up
if err := waiter.WaitForPodsWithLabel(buildTempUpgradeDSLabelQuery(component)); err != nil {
return err
}
newDS := newControlPlaneDaemonSets[component]
// Upgrade the component's self-hosted resource
// During this upgrade; the temporary/backup component will take over
if err := apiclient.TryRunCommand(func() error {
if _, err := client.AppsV1().DaemonSets(newDS.ObjectMeta.Namespace).Update(newDS); err != nil {
return fmt.Errorf("couldn't update self-hosted component's DaemonSet: %v", err)
}
return nil
}, selfHostingFailureThreshold); err != nil {
return err
}
// Wait for the component's old Pod to disappear
oldPod := controlPlaneResources[component].pod
if err := waiter.WaitForPodToDisappear(oldPod.ObjectMeta.Name); err != nil {
return err
}
// Wait for the main, upgraded self-hosted component to come up
// Here we're talking to the temporary/backup component; the upgraded component is in the process of starting up
if err := waiter.WaitForPodsWithLabel(selfhosting.BuildSelfHostedComponentLabelQuery(component)); err != nil {
return err
}
// Delete the temporary DaemonSet, and retry selfHostingFailureThreshold times if it errors out
// In order to pivot back to the upgraded API server, we kill the temporary/backup component
if err := apiclient.TryRunCommand(func() error {
return apiclient.DeleteDaemonSetForeground(client, tempDS.ObjectMeta.Namespace, tempDS.ObjectMeta.Name)
}, selfHostingFailureThreshold); err != nil {
return err
}
// Just as an extra safety check; make sure the API server is returning ok at the /healthz endpoint
if err := waiter.WaitForAPI(); err != nil {
return err
}
fmt.Printf("[upgrade/apply] Self-hosted component %q upgraded successfully!\n", component)
}
return nil
}
// BuildUpgradedDaemonSetsFromConfig takes a config object and the current version and returns the DaemonSet objects to post to the master
func BuildUpgradedDaemonSetsFromConfig(cfg *kubeadmapi.MasterConfiguration, k8sVersion *version.Version) map[string]*apps.DaemonSet {
// Here the map of different mutators to use for the control plane's podspec is stored
mutators := selfhosting.GetMutatorsFromFeatureGates(cfg.FeatureGates)
// Get the new PodSpecs to use
controlPlanePods := controlplane.GetStaticPodSpecs(cfg, k8sVersion)
// Store the created DaemonSets in this map
controlPlaneDaemonSets := map[string]*apps.DaemonSet{}
for _, component := range constants.MasterComponents {
podSpec := controlPlanePods[component].Spec
// Build the full DaemonSet object from the PodSpec generated from the control plane phase and
// using the self-hosting mutators available from the selfhosting phase
ds := selfhosting.BuildDaemonSet(component, &podSpec, mutators)
controlPlaneDaemonSets[component] = ds
}
return controlPlaneDaemonSets
}
// addTempUpgradeDSPrefix adds the upgradeTempDSPrefix to the specified DaemonSet name
func addTempUpgradeDSPrefix(currentName string) string {
return fmt.Sprintf("%s%s", upgradeTempDSPrefix, currentName)
}
// buildTempUpgradeLabels returns the label string-string map for identifying the temporary
func buildTempUpgradeLabels(component string) map[string]string {
return map[string]string{
upgradeTempLabel: component,
}
}
// buildTempUpgradeDSLabelQuery creates the right query for matching
func buildTempUpgradeDSLabelQuery(component string) string {
return fmt.Sprintf("%s=%s", upgradeTempLabel, component)
}
// mutateTempDaemonSet mutates the specified self-hosted DaemonSet for the specified component
// in a way that makes it possible to post a nearly identical, temporary DaemonSet as a backup
func mutateTempDaemonSet(tempDS *apps.DaemonSet, component string) {
// Prefix the name of the temporary DaemonSet with upgradeTempDSPrefix
tempDS.ObjectMeta.Name = addTempUpgradeDSPrefix(tempDS.ObjectMeta.Name)
// Set .Labels to something else than the "real" self-hosted components have
tempDS.ObjectMeta.Labels = buildTempUpgradeLabels(component)
tempDS.Spec.Selector.MatchLabels = buildTempUpgradeLabels(component)
tempDS.Spec.Template.ObjectMeta.Labels = buildTempUpgradeLabels(component)
// Clean all unnecessary ObjectMeta fields
tempDS.ObjectMeta = extractRelevantObjectMeta(tempDS.ObjectMeta)
// Reset .Status as we're posting a new object
tempDS.Status = apps.DaemonSetStatus{}
}
// extractRelevantObjectMeta returns only the relevant parts of ObjectMeta required when creating
// a new, identical resource. We should not POST ResourceVersion, UUIDs, etc., only the name, labels,
// namespace and annotations should be preserved.
func extractRelevantObjectMeta(ob metav1.ObjectMeta) metav1.ObjectMeta {
return metav1.ObjectMeta{
Name: ob.Name,
Namespace: ob.Namespace,
Labels: ob.Labels,
Annotations: ob.Annotations,
}
}
// listPodsWithLabelSelector returns the relevant Pods for the given LabelSelector
func listPodsWithLabelSelector(client clientset.Interface, kvLabel string) (*v1.PodList, error) {
return client.CoreV1().Pods(metav1.NamespaceSystem).List(metav1.ListOptions{
LabelSelector: kvLabel,
})
}
// getCurrentControlPlaneComponentResources returns a string-(Pod|DaemonSet) map for later use
func getCurrentControlPlaneComponentResources(client clientset.Interface) (map[string]controlPlaneComponentResources, error) {
controlPlaneResources := map[string]controlPlaneComponentResources{}
for _, component := range constants.MasterComponents {
var podList *v1.PodList
var currentDS *apps.DaemonSet
// Get the self-hosted pod associated with the component
podLabelSelector := selfhosting.BuildSelfHostedComponentLabelQuery(component)
if err := apiclient.TryRunCommand(func() error {
var tryrunerr error
podList, tryrunerr = listPodsWithLabelSelector(client, podLabelSelector)
return tryrunerr // note that tryrunerr is most likely nil here (in successful cases)
}, selfHostingFailureThreshold); err != nil {
return nil, err
}
// Make sure that there are only one Pod with this label selector; otherwise unexpected things can happen
if len(podList.Items) > 1 {
return nil, fmt.Errorf("too many pods with label selector %q found in the %s namespace", podLabelSelector, metav1.NamespaceSystem)
}
// Get the component's DaemonSet object
dsName := constants.AddSelfHostedPrefix(component)
if err := apiclient.TryRunCommand(func() error {
var tryrunerr error
// Try to get the current self-hosted component
currentDS, tryrunerr = client.AppsV1().DaemonSets(metav1.NamespaceSystem).Get(dsName, metav1.GetOptions{})
return tryrunerr // note that tryrunerr is most likely nil here (in successful cases)
}, selfHostingFailureThreshold); err != nil {
return nil, err
}
// Add the associated resources to the map to return later
controlPlaneResources[component] = controlPlaneComponentResources{
pod: &podList.Items[0],
daemonSet: currentDS,
}
}
return controlPlaneResources, nil
}

View File

@@ -0,0 +1,516 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package upgrade
import (
"fmt"
"os"
"strings"
"time"
kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
"k8s.io/kubernetes/cmd/kubeadm/app/constants"
certsphase "k8s.io/kubernetes/cmd/kubeadm/app/phases/certs"
controlplanephase "k8s.io/kubernetes/cmd/kubeadm/app/phases/controlplane"
etcdphase "k8s.io/kubernetes/cmd/kubeadm/app/phases/etcd"
"k8s.io/kubernetes/cmd/kubeadm/app/util"
"k8s.io/kubernetes/cmd/kubeadm/app/util/apiclient"
etcdutil "k8s.io/kubernetes/cmd/kubeadm/app/util/etcd"
"k8s.io/kubernetes/pkg/util/version"
)
// StaticPodPathManager is responsible for tracking the directories used in the static pod upgrade transition
type StaticPodPathManager interface {
// MoveFile should move a file from oldPath to newPath
MoveFile(oldPath, newPath string) error
// RealManifestPath gets the file path for the component in the "real" static pod manifest directory used by the kubelet
RealManifestPath(component string) string
// RealManifestDir should point to the static pod manifest directory used by the kubelet
RealManifestDir() string
// TempManifestPath gets the file path for the component in the temporary directory created for generating new manifests for the upgrade
TempManifestPath(component string) string
// TempManifestDir should point to the temporary directory created for generating new manifests for the upgrade
TempManifestDir() string
// BackupManifestPath gets the file path for the component in the backup directory used for backuping manifests during the transition
BackupManifestPath(component string) string
// BackupManifestDir should point to the backup directory used for backuping manifests during the transition
BackupManifestDir() string
// BackupEtcdDir should point to the backup directory used for backuping manifests during the transition
BackupEtcdDir() string
// CleanupDirs cleans up all temporary directories
CleanupDirs() error
}
// KubeStaticPodPathManager is a real implementation of StaticPodPathManager that is used when upgrading a static pod cluster
type KubeStaticPodPathManager struct {
realManifestDir string
tempManifestDir string
backupManifestDir string
backupEtcdDir string
keepManifestDir bool
keepEtcdDir bool
}
// NewKubeStaticPodPathManager creates a new instance of KubeStaticPodPathManager
func NewKubeStaticPodPathManager(realDir, tempDir, backupDir, backupEtcdDir string, keepManifestDir, keepEtcdDir bool) StaticPodPathManager {
return &KubeStaticPodPathManager{
realManifestDir: realDir,
tempManifestDir: tempDir,
backupManifestDir: backupDir,
backupEtcdDir: backupEtcdDir,
keepManifestDir: keepManifestDir,
keepEtcdDir: keepEtcdDir,
}
}
// NewKubeStaticPodPathManagerUsingTempDirs creates a new instance of KubeStaticPodPathManager with temporary directories backing it
func NewKubeStaticPodPathManagerUsingTempDirs(realManifestDir string, saveManifestsDir, saveEtcdDir bool) (StaticPodPathManager, error) {
upgradedManifestsDir, err := constants.CreateTempDirForKubeadm("kubeadm-upgraded-manifests")
if err != nil {
return nil, err
}
backupManifestsDir, err := constants.CreateTimestampDirForKubeadm("kubeadm-backup-manifests")
if err != nil {
return nil, err
}
backupEtcdDir, err := constants.CreateTimestampDirForKubeadm("kubeadm-backup-etcd")
if err != nil {
return nil, err
}
return NewKubeStaticPodPathManager(realManifestDir, upgradedManifestsDir, backupManifestsDir, backupEtcdDir, saveManifestsDir, saveEtcdDir), nil
}
// MoveFile should move a file from oldPath to newPath
func (spm *KubeStaticPodPathManager) MoveFile(oldPath, newPath string) error {
return os.Rename(oldPath, newPath)
}
// RealManifestPath gets the file path for the component in the "real" static pod manifest directory used by the kubelet
func (spm *KubeStaticPodPathManager) RealManifestPath(component string) string {
return constants.GetStaticPodFilepath(component, spm.realManifestDir)
}
// RealManifestDir should point to the static pod manifest directory used by the kubelet
func (spm *KubeStaticPodPathManager) RealManifestDir() string {
return spm.realManifestDir
}
// TempManifestPath gets the file path for the component in the temporary directory created for generating new manifests for the upgrade
func (spm *KubeStaticPodPathManager) TempManifestPath(component string) string {
return constants.GetStaticPodFilepath(component, spm.tempManifestDir)
}
// TempManifestDir should point to the temporary directory created for generating new manifests for the upgrade
func (spm *KubeStaticPodPathManager) TempManifestDir() string {
return spm.tempManifestDir
}
// BackupManifestPath gets the file path for the component in the backup directory used for backuping manifests during the transition
func (spm *KubeStaticPodPathManager) BackupManifestPath(component string) string {
return constants.GetStaticPodFilepath(component, spm.backupManifestDir)
}
// BackupManifestDir should point to the backup directory used for backuping manifests during the transition
func (spm *KubeStaticPodPathManager) BackupManifestDir() string {
return spm.backupManifestDir
}
// BackupEtcdDir should point to the backup directory used for backuping manifests during the transition
func (spm *KubeStaticPodPathManager) BackupEtcdDir() string {
return spm.backupEtcdDir
}
// CleanupDirs cleans up all temporary directories except those the user has requested to keep around
func (spm *KubeStaticPodPathManager) CleanupDirs() error {
if err := os.RemoveAll(spm.TempManifestDir()); err != nil {
return err
}
if !spm.keepManifestDir {
if err := os.RemoveAll(spm.BackupManifestDir()); err != nil {
return err
}
}
if !spm.keepEtcdDir {
if err := os.RemoveAll(spm.BackupEtcdDir()); err != nil {
return err
}
}
return nil
}
func upgradeComponent(component string, waiter apiclient.Waiter, pathMgr StaticPodPathManager, cfg *kubeadmapi.MasterConfiguration, beforePodHash string, recoverManifests map[string]string, isTLSUpgrade bool) error {
// Special treatment is required for etcd case, when rollbackOldManifests should roll back etcd
// manifests only for the case when component is Etcd
recoverEtcd := false
waitForComponentRestart := true
if component == constants.Etcd {
recoverEtcd = true
}
if isTLSUpgrade {
// We currently depend on getting the Etcd mirror Pod hash from the KubeAPIServer;
// Upgrading the Etcd protocol takes down the apiserver, so we can't verify component restarts if we restart Etcd independently.
// Skip waiting for Etcd to restart and immediately move on to updating the apiserver.
if component == constants.Etcd {
waitForComponentRestart = false
}
// Normally, if an Etcd upgrade is successful, but the apiserver upgrade fails, Etcd is not rolled back.
// In the case of a TLS upgrade, the old KubeAPIServer config is incompatible with the new Etcd confg, so we rollback Etcd
// if the APIServer upgrade fails.
if component == constants.KubeAPIServer {
recoverEtcd = true
fmt.Printf("[upgrade/staticpods] The %s manifest will be restored if component %q fails to upgrade\n", constants.Etcd, component)
}
}
// ensure etcd certs are generated for etcd and kube-apiserver
if component == constants.Etcd || component == constants.KubeAPIServer {
if err := certsphase.CreateEtcdCACertAndKeyFiles(cfg); err != nil {
return fmt.Errorf("failed to upgrade the %s CA certificate and key: %v", constants.Etcd, err)
}
}
if component == constants.Etcd {
if err := certsphase.CreateEtcdServerCertAndKeyFiles(cfg); err != nil {
return fmt.Errorf("failed to upgrade the %s certificate and key: %v", constants.Etcd, err)
}
if err := certsphase.CreateEtcdPeerCertAndKeyFiles(cfg); err != nil {
return fmt.Errorf("failed to upgrade the %s peer certificate and key: %v", constants.Etcd, err)
}
if err := certsphase.CreateEtcdHealthcheckClientCertAndKeyFiles(cfg); err != nil {
return fmt.Errorf("failed to upgrade the %s healthcheck certificate and key: %v", constants.Etcd, err)
}
}
if component == constants.KubeAPIServer {
if err := certsphase.CreateAPIServerEtcdClientCertAndKeyFiles(cfg); err != nil {
return fmt.Errorf("failed to upgrade the %s %s-client certificate and key: %v", constants.KubeAPIServer, constants.Etcd, err)
}
}
// The old manifest is here; in the /etc/kubernetes/manifests/
currentManifestPath := pathMgr.RealManifestPath(component)
// The new, upgraded manifest will be written here
newManifestPath := pathMgr.TempManifestPath(component)
// The old manifest will be moved here; into a subfolder of the temporary directory
// If a rollback is needed, these manifests will be put back to where they where initially
backupManifestPath := pathMgr.BackupManifestPath(component)
// Store the backup path in the recover list. If something goes wrong now, this component will be rolled back.
recoverManifests[component] = backupManifestPath
// Move the old manifest into the old-manifests directory
if err := pathMgr.MoveFile(currentManifestPath, backupManifestPath); err != nil {
return rollbackOldManifests(recoverManifests, err, pathMgr, recoverEtcd)
}
// Move the new manifest into the manifests directory
if err := pathMgr.MoveFile(newManifestPath, currentManifestPath); err != nil {
return rollbackOldManifests(recoverManifests, err, pathMgr, recoverEtcd)
}
fmt.Printf("[upgrade/staticpods] Moved new manifest to %q and backed up old manifest to %q\n", currentManifestPath, backupManifestPath)
if waitForComponentRestart {
fmt.Println("[upgrade/staticpods] Waiting for the kubelet to restart the component")
// Wait for the mirror Pod hash to change; otherwise we'll run into race conditions here when the kubelet hasn't had time to
// notice the removal of the Static Pod, leading to a false positive below where we check that the API endpoint is healthy
// If we don't do this, there is a case where we remove the Static Pod manifest, kubelet is slow to react, kubeadm checks the
// API endpoint below of the OLD Static Pod component and proceeds quickly enough, which might lead to unexpected results.
if err := waiter.WaitForStaticPodHashChange(cfg.NodeRegistration.Name, component, beforePodHash); err != nil {
return rollbackOldManifests(recoverManifests, err, pathMgr, recoverEtcd)
}
// Wait for the static pod component to come up and register itself as a mirror pod
if err := waiter.WaitForPodsWithLabel("component=" + component); err != nil {
return rollbackOldManifests(recoverManifests, err, pathMgr, recoverEtcd)
}
fmt.Printf("[upgrade/staticpods] Component %q upgraded successfully!\n", component)
} else {
fmt.Printf("[upgrade/staticpods] Not waiting for pod-hash change for component %q\n", component)
}
return nil
}
// performEtcdStaticPodUpgrade performs upgrade of etcd, it returns bool which indicates fatal error or not and the actual error.
func performEtcdStaticPodUpgrade(waiter apiclient.Waiter, pathMgr StaticPodPathManager, cfg *kubeadmapi.MasterConfiguration, recoverManifests map[string]string, isTLSUpgrade bool, oldEtcdClient, newEtcdClient etcdutil.ClusterInterrogator) (bool, error) {
// Add etcd static pod spec only if external etcd is not configured
if cfg.Etcd.External != nil {
return false, fmt.Errorf("external etcd detected, won't try to change any etcd state")
}
// Checking health state of etcd before proceeding with the upgrade
_, err := oldEtcdClient.GetClusterStatus()
if err != nil {
return true, fmt.Errorf("etcd cluster is not healthy: %v", err)
}
// Backing up etcd data store
backupEtcdDir := pathMgr.BackupEtcdDir()
runningEtcdDir := cfg.Etcd.Local.DataDir
if err := util.CopyDir(runningEtcdDir, backupEtcdDir); err != nil {
return true, fmt.Errorf("failed to back up etcd data: %v", err)
}
// Need to check currently used version and version from constants, if differs then upgrade
desiredEtcdVersion, err := constants.EtcdSupportedVersion(cfg.KubernetesVersion)
if err != nil {
return true, fmt.Errorf("failed to retrieve an etcd version for the target kubernetes version: %v", err)
}
currentEtcdVersionStr, err := oldEtcdClient.GetVersion()
if err != nil {
return true, fmt.Errorf("failed to retrieve the current etcd version: %v", err)
}
currentEtcdVersion, err := version.ParseSemantic(currentEtcdVersionStr)
if err != nil {
return true, fmt.Errorf("failed to parse the current etcd version(%s): %v", currentEtcdVersionStr, err)
}
// Comparing current etcd version with desired to catch the same version or downgrade condition and fail on them.
if desiredEtcdVersion.LessThan(currentEtcdVersion) {
return false, fmt.Errorf("the desired etcd version for this Kubernetes version %q is %q, but the current etcd version is %q. Won't downgrade etcd, instead just continue", cfg.KubernetesVersion, desiredEtcdVersion.String(), currentEtcdVersion.String())
}
// For the case when desired etcd version is the same as current etcd version
if strings.Compare(desiredEtcdVersion.String(), currentEtcdVersion.String()) == 0 {
return false, nil
}
beforeEtcdPodHash, err := waiter.WaitForStaticPodSingleHash(cfg.NodeRegistration.Name, constants.Etcd)
if err != nil {
return true, fmt.Errorf("failed to get etcd pod's hash: %v", err)
}
// Write the updated etcd static Pod manifest into the temporary directory, at this point no etcd change
// has occurred in any aspects.
if err := etcdphase.CreateLocalEtcdStaticPodManifestFile(pathMgr.TempManifestDir(), cfg); err != nil {
return true, fmt.Errorf("error creating local etcd static pod manifest file: %v", err)
}
// Waiter configurations for checking etcd status
noDelay := 0 * time.Second
podRestartDelay := noDelay
if isTLSUpgrade {
// If we are upgrading TLS we need to wait for old static pod to be removed.
// This is needed because we are not able to currently verify that the static pod
// has been updated through the apiserver across an etcd TLS upgrade.
// This value is arbitrary but seems to be long enough in manual testing.
podRestartDelay = 30 * time.Second
}
retries := 10
retryInterval := 15 * time.Second
// Perform etcd upgrade using common to all control plane components function
if err := upgradeComponent(constants.Etcd, waiter, pathMgr, cfg, beforeEtcdPodHash, recoverManifests, isTLSUpgrade); err != nil {
fmt.Printf("[upgrade/etcd] Failed to upgrade etcd: %v\n", err)
// Since upgrade component failed, the old etcd manifest has either been restored or was never touched
// Now we need to check the health of etcd cluster if it is up with old manifest
fmt.Println("[upgrade/etcd] Waiting for previous etcd to become available")
if _, err := oldEtcdClient.WaitForClusterAvailable(noDelay, retries, retryInterval); err != nil {
fmt.Printf("[upgrade/etcd] Failed to healthcheck previous etcd: %v\n", err)
// At this point we know that etcd cluster is dead and it is safe to copy backup datastore and to rollback old etcd manifest
fmt.Println("[upgrade/etcd] Rolling back etcd data")
if err := rollbackEtcdData(cfg, pathMgr); err != nil {
// Even copying back datastore failed, no options for recovery left, bailing out
return true, fmt.Errorf("fatal error rolling back local etcd cluster datadir: %v, the backup of etcd database is stored here:(%s)", err, backupEtcdDir)
}
fmt.Println("[upgrade/etcd] Etcd data rollback successful")
// Now that we've rolled back the data, let's check if the cluster comes up
fmt.Println("[upgrade/etcd] Waiting for previous etcd to become available")
if _, err := oldEtcdClient.WaitForClusterAvailable(noDelay, retries, retryInterval); err != nil {
fmt.Printf("[upgrade/etcd] Failed to healthcheck previous etcd: %v\n", err)
// Nothing else left to try to recover etcd cluster
return true, fmt.Errorf("fatal error rolling back local etcd cluster manifest: %v, the backup of etcd database is stored here:(%s)", err, backupEtcdDir)
}
// We've recovered to the previous etcd from this case
}
fmt.Println("[upgrade/etcd] Etcd was rolled back and is now available")
// Since etcd cluster came back up with the old manifest
return true, fmt.Errorf("fatal error when trying to upgrade the etcd cluster: %v, rolled the state back to pre-upgrade state", err)
}
// Initialize the new etcd client if it wasn't pre-initialized
if newEtcdClient == nil {
client, err := etcdutil.NewFromStaticPod(
[]string{"localhost:2379"},
constants.GetStaticPodDirectory(),
cfg.CertificatesDir,
)
if err != nil {
return true, fmt.Errorf("fatal error creating etcd client: %v", err)
}
newEtcdClient = client
}
// Checking health state of etcd after the upgrade
fmt.Println("[upgrade/etcd] Waiting for etcd to become available")
if _, err = newEtcdClient.WaitForClusterAvailable(podRestartDelay, retries, retryInterval); err != nil {
fmt.Printf("[upgrade/etcd] Failed to healthcheck etcd: %v\n", err)
// Despite the fact that upgradeComponent was successful, there is something wrong with the etcd cluster
// First step is to restore back up of datastore
fmt.Println("[upgrade/etcd] Rolling back etcd data")
if err := rollbackEtcdData(cfg, pathMgr); err != nil {
// Even copying back datastore failed, no options for recovery left, bailing out
return true, fmt.Errorf("fatal error rolling back local etcd cluster datadir: %v, the backup of etcd database is stored here:(%s)", err, backupEtcdDir)
}
fmt.Println("[upgrade/etcd] Etcd data rollback successful")
// Old datastore has been copied, rolling back old manifests
fmt.Println("[upgrade/etcd] Rolling back etcd manifest")
rollbackOldManifests(recoverManifests, err, pathMgr, true)
// rollbackOldManifests() always returns an error -- ignore it and continue
// Assuming rollback of the old etcd manifest was successful, check the status of etcd cluster again
fmt.Println("[upgrade/etcd] Waiting for previous etcd to become available")
if _, err := oldEtcdClient.WaitForClusterAvailable(noDelay, retries, retryInterval); err != nil {
fmt.Printf("[upgrade/etcd] Failed to healthcheck previous etcd: %v\n", err)
// Nothing else left to try to recover etcd cluster
return true, fmt.Errorf("fatal error rolling back local etcd cluster manifest: %v, the backup of etcd database is stored here:(%s)", err, backupEtcdDir)
}
fmt.Println("[upgrade/etcd] Etcd was rolled back and is now available")
// We've successfully rolled back etcd, and now return an error describing that the upgrade failed
return true, fmt.Errorf("fatal error upgrading local etcd cluster: %v, rolled the state back to pre-upgrade state", err)
}
return false, nil
}
// StaticPodControlPlane upgrades a static pod-hosted control plane
func StaticPodControlPlane(waiter apiclient.Waiter, pathMgr StaticPodPathManager, cfg *kubeadmapi.MasterConfiguration, etcdUpgrade bool, oldEtcdClient, newEtcdClient etcdutil.ClusterInterrogator) error {
recoverManifests := map[string]string{}
var isTLSUpgrade bool
var isExternalEtcd bool
beforePodHashMap, err := waiter.WaitForStaticPodControlPlaneHashes(cfg.NodeRegistration.Name)
if err != nil {
return err
}
if oldEtcdClient == nil {
if cfg.Etcd.External != nil {
// External etcd
isExternalEtcd = true
client, err := etcdutil.New(
cfg.Etcd.External.Endpoints,
cfg.Etcd.External.CAFile,
cfg.Etcd.External.CertFile,
cfg.Etcd.External.KeyFile,
)
if err != nil {
return fmt.Errorf("failed to create etcd client for external etcd: %v", err)
}
oldEtcdClient = client
// Since etcd is managed externally, the new etcd client will be the same as the old client
if newEtcdClient == nil {
newEtcdClient = client
}
} else {
// etcd Static Pod
client, err := etcdutil.NewFromStaticPod(
[]string{"localhost:2379"},
constants.GetStaticPodDirectory(),
cfg.CertificatesDir,
)
if err != nil {
return fmt.Errorf("failed to create etcd client: %v", err)
}
oldEtcdClient = client
}
}
// etcd upgrade is done prior to other control plane components
if !isExternalEtcd && etcdUpgrade {
previousEtcdHasTLS := oldEtcdClient.HasTLS()
// set the TLS upgrade flag for all components
isTLSUpgrade = !previousEtcdHasTLS
if isTLSUpgrade {
fmt.Printf("[upgrade/etcd] Upgrading to TLS for %s\n", constants.Etcd)
}
// Perform etcd upgrade using common to all control plane components function
fatal, err := performEtcdStaticPodUpgrade(waiter, pathMgr, cfg, recoverManifests, isTLSUpgrade, oldEtcdClient, newEtcdClient)
if err != nil {
if fatal {
return err
}
fmt.Printf("[upgrade/etcd] non fatal issue encountered during upgrade: %v\n", err)
}
}
// Write the updated static Pod manifests into the temporary directory
fmt.Printf("[upgrade/staticpods] Writing new Static Pod manifests to %q\n", pathMgr.TempManifestDir())
err = controlplanephase.CreateInitStaticPodManifestFiles(pathMgr.TempManifestDir(), cfg)
if err != nil {
return fmt.Errorf("error creating init static pod manifest files: %v", err)
}
for _, component := range constants.MasterComponents {
if err = upgradeComponent(component, waiter, pathMgr, cfg, beforePodHashMap[component], recoverManifests, isTLSUpgrade); err != nil {
return err
}
}
// Remove the temporary directories used on a best-effort (don't fail if the calls error out)
// The calls are set here by design; we should _not_ use "defer" above as that would remove the directories
// even in the "fail and rollback" case, where we want the directories preserved for the user.
return pathMgr.CleanupDirs()
}
// rollbackOldManifests rolls back the backed-up manifests if something went wrong.
// It always returns an error to the caller.
func rollbackOldManifests(oldManifests map[string]string, origErr error, pathMgr StaticPodPathManager, restoreEtcd bool) error {
errs := []error{origErr}
for component, backupPath := range oldManifests {
// Will restore etcd manifest only if it was explicitly requested by setting restoreEtcd to True
if component == constants.Etcd && !restoreEtcd {
continue
}
// Where we should put back the backed up manifest
realManifestPath := pathMgr.RealManifestPath(component)
// Move the backup manifest back into the manifests directory
err := pathMgr.MoveFile(backupPath, realManifestPath)
if err != nil {
errs = append(errs, err)
}
}
// Let the user know there were problems, but we tried to recover
return fmt.Errorf("couldn't upgrade control plane. kubeadm has tried to recover everything into the earlier state. Errors faced: %v", errs)
}
// rollbackEtcdData rolls back the the content of etcd folder if something went wrong.
// When the folder contents are successfully rolled back, nil is returned, otherwise an error is returned.
func rollbackEtcdData(cfg *kubeadmapi.MasterConfiguration, pathMgr StaticPodPathManager) error {
backupEtcdDir := pathMgr.BackupEtcdDir()
runningEtcdDir := cfg.Etcd.Local.DataDir
if err := util.CopyDir(backupEtcdDir, runningEtcdDir); err != nil {
// Let the user know there we're problems, but we tried to reçover
return fmt.Errorf("couldn't recover etcd database with error: %v, the location of etcd backup: %s ", err, backupEtcdDir)
}
return nil
}

View File

@@ -0,0 +1,603 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package upgrade
import (
"crypto/sha256"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"strings"
"testing"
"time"
"github.com/coreos/etcd/clientv3"
"github.com/coreos/etcd/pkg/transport"
"k8s.io/apimachinery/pkg/runtime"
kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
kubeadmscheme "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/scheme"
kubeadmapiv1alpha2 "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1alpha2"
"k8s.io/kubernetes/cmd/kubeadm/app/constants"
certsphase "k8s.io/kubernetes/cmd/kubeadm/app/phases/certs"
controlplanephase "k8s.io/kubernetes/cmd/kubeadm/app/phases/controlplane"
etcdphase "k8s.io/kubernetes/cmd/kubeadm/app/phases/etcd"
"k8s.io/kubernetes/cmd/kubeadm/app/util/apiclient"
etcdutil "k8s.io/kubernetes/cmd/kubeadm/app/util/etcd"
)
const (
waitForHashes = "wait-for-hashes"
waitForHashChange = "wait-for-hash-change"
waitForPodsWithLabel = "wait-for-pods-with-label"
testConfiguration = `
apiVersion: kubeadm.k8s.io/v1alpha2
kind: MasterConfiguration
api:
advertiseAddress: 1.2.3.4
bindPort: 6443
apiServerCertSANs: null
apiServerExtraArgs: null
certificatesDir: %s
controllerManagerExtraArgs: null
etcd:
local:
dataDir: %s
image: ""
featureFlags: null
imageRepository: k8s.gcr.io
kubernetesVersion: %s
networking:
dnsDomain: cluster.local
podSubnet: ""
serviceSubnet: 10.96.0.0/12
nodeRegistration:
name: foo
criSocket: ""
schedulerExtraArgs: null
token: ce3aa5.5ec8455bb76b379f
tokenTTL: 24h
unifiedControlPlaneImage: ""
`
)
// fakeWaiter is a fake apiclient.Waiter that returns errors it was initialized with
type fakeWaiter struct {
errsToReturn map[string]error
}
func NewFakeStaticPodWaiter(errsToReturn map[string]error) apiclient.Waiter {
return &fakeWaiter{
errsToReturn: errsToReturn,
}
}
// WaitForAPI just returns a dummy nil, to indicate that the program should just proceed
func (w *fakeWaiter) WaitForAPI() error {
return nil
}
// WaitForPodsWithLabel just returns an error if set from errsToReturn
func (w *fakeWaiter) WaitForPodsWithLabel(kvLabel string) error {
return w.errsToReturn[waitForPodsWithLabel]
}
// WaitForPodToDisappear just returns a dummy nil, to indicate that the program should just proceed
func (w *fakeWaiter) WaitForPodToDisappear(podName string) error {
return nil
}
// SetTimeout is a no-op; we don't use it in this implementation
func (w *fakeWaiter) SetTimeout(_ time.Duration) {}
// WaitForStaticPodControlPlaneHashes returns an error if set from errsToReturn
func (w *fakeWaiter) WaitForStaticPodControlPlaneHashes(_ string) (map[string]string, error) {
return map[string]string{}, w.errsToReturn[waitForHashes]
}
// WaitForStaticPodSingleHash returns an error if set from errsToReturn
func (w *fakeWaiter) WaitForStaticPodSingleHash(_ string, _ string) (string, error) {
return "", w.errsToReturn[waitForHashes]
}
// WaitForStaticPodHashChange returns an error if set from errsToReturn
func (w *fakeWaiter) WaitForStaticPodHashChange(_, _, _ string) error {
return w.errsToReturn[waitForHashChange]
}
// WaitForHealthyKubelet returns a dummy nil just to implement the interface
func (w *fakeWaiter) WaitForHealthyKubelet(_ time.Duration, _ string) error {
return nil
}
type fakeStaticPodPathManager struct {
kubernetesDir string
realManifestDir string
tempManifestDir string
backupManifestDir string
backupEtcdDir string
MoveFileFunc func(string, string) error
}
func NewFakeStaticPodPathManager(moveFileFunc func(string, string) error) (StaticPodPathManager, error) {
kubernetesDir, err := ioutil.TempDir("", "kubeadm-pathmanager-")
if err != nil {
return nil, fmt.Errorf("couldn't create a temporary directory for the upgrade: %v", err)
}
realManifestDir := filepath.Join(kubernetesDir, constants.ManifestsSubDirName)
if err := os.Mkdir(realManifestDir, 0700); err != nil {
return nil, fmt.Errorf("couldn't create a realManifestDir for the upgrade: %v", err)
}
upgradedManifestDir := filepath.Join(kubernetesDir, "upgraded-manifests")
if err := os.Mkdir(upgradedManifestDir, 0700); err != nil {
return nil, fmt.Errorf("couldn't create a upgradedManifestDir for the upgrade: %v", err)
}
backupManifestDir := filepath.Join(kubernetesDir, "backup-manifests")
if err := os.Mkdir(backupManifestDir, 0700); err != nil {
return nil, fmt.Errorf("couldn't create a backupManifestDir for the upgrade: %v", err)
}
backupEtcdDir := filepath.Join(kubernetesDir, "kubeadm-backup-etcd")
if err := os.Mkdir(backupEtcdDir, 0700); err != nil {
return nil, err
}
return &fakeStaticPodPathManager{
kubernetesDir: kubernetesDir,
realManifestDir: realManifestDir,
tempManifestDir: upgradedManifestDir,
backupManifestDir: backupManifestDir,
backupEtcdDir: backupEtcdDir,
MoveFileFunc: moveFileFunc,
}, nil
}
func (spm *fakeStaticPodPathManager) MoveFile(oldPath, newPath string) error {
return spm.MoveFileFunc(oldPath, newPath)
}
func (spm *fakeStaticPodPathManager) KubernetesDir() string {
return spm.kubernetesDir
}
func (spm *fakeStaticPodPathManager) RealManifestPath(component string) string {
return constants.GetStaticPodFilepath(component, spm.realManifestDir)
}
func (spm *fakeStaticPodPathManager) RealManifestDir() string {
return spm.realManifestDir
}
func (spm *fakeStaticPodPathManager) TempManifestPath(component string) string {
return constants.GetStaticPodFilepath(component, spm.tempManifestDir)
}
func (spm *fakeStaticPodPathManager) TempManifestDir() string {
return spm.tempManifestDir
}
func (spm *fakeStaticPodPathManager) BackupManifestPath(component string) string {
return constants.GetStaticPodFilepath(component, spm.backupManifestDir)
}
func (spm *fakeStaticPodPathManager) BackupManifestDir() string {
return spm.backupManifestDir
}
func (spm *fakeStaticPodPathManager) BackupEtcdDir() string {
return spm.backupEtcdDir
}
func (spm *fakeStaticPodPathManager) CleanupDirs() error {
if err := os.RemoveAll(spm.TempManifestDir()); err != nil {
return err
}
if err := os.RemoveAll(spm.BackupManifestDir()); err != nil {
return err
}
return os.RemoveAll(spm.BackupEtcdDir())
}
type fakeTLSEtcdClient struct{ TLS bool }
func (c fakeTLSEtcdClient) HasTLS() bool {
return c.TLS
}
func (c fakeTLSEtcdClient) ClusterAvailable() (bool, error) { return true, nil }
func (c fakeTLSEtcdClient) WaitForClusterAvailable(delay time.Duration, retries int, retryInterval time.Duration) (bool, error) {
return true, nil
}
func (c fakeTLSEtcdClient) GetClusterStatus() (map[string]*clientv3.StatusResponse, error) {
return map[string]*clientv3.StatusResponse{
"foo": {
Version: "3.1.12",
}}, nil
}
func (c fakeTLSEtcdClient) GetClusterVersions() (map[string]string, error) {
return map[string]string{
"foo": "3.1.12",
}, nil
}
func (c fakeTLSEtcdClient) GetVersion() (string, error) {
return "3.1.12", nil
}
type fakePodManifestEtcdClient struct{ ManifestDir, CertificatesDir string }
func (c fakePodManifestEtcdClient) HasTLS() bool {
hasTLS, _ := etcdutil.PodManifestsHaveTLS(c.ManifestDir)
return hasTLS
}
func (c fakePodManifestEtcdClient) ClusterAvailable() (bool, error) { return true, nil }
func (c fakePodManifestEtcdClient) WaitForClusterAvailable(delay time.Duration, retries int, retryInterval time.Duration) (bool, error) {
return true, nil
}
func (c fakePodManifestEtcdClient) GetClusterStatus() (map[string]*clientv3.StatusResponse, error) {
// Make sure the certificates generated from the upgrade are readable from disk
tlsInfo := transport.TLSInfo{
CertFile: filepath.Join(c.CertificatesDir, constants.EtcdCACertName),
KeyFile: filepath.Join(c.CertificatesDir, constants.EtcdHealthcheckClientCertName),
TrustedCAFile: filepath.Join(c.CertificatesDir, constants.EtcdHealthcheckClientKeyName),
}
_, err := tlsInfo.ClientConfig()
if err != nil {
return nil, err
}
return map[string]*clientv3.StatusResponse{
"foo": {Version: "3.1.12"},
}, nil
}
func (c fakePodManifestEtcdClient) GetClusterVersions() (map[string]string, error) {
return map[string]string{
"foo": "3.1.12",
}, nil
}
func (c fakePodManifestEtcdClient) GetVersion() (string, error) {
return "3.1.12", nil
}
func TestStaticPodControlPlane(t *testing.T) {
tests := []struct {
description string
waitErrsToReturn map[string]error
moveFileFunc func(string, string) error
expectedErr bool
manifestShouldChange bool
}{
{
description: "error-free case should succeed",
waitErrsToReturn: map[string]error{
waitForHashes: nil,
waitForHashChange: nil,
waitForPodsWithLabel: nil,
},
moveFileFunc: func(oldPath, newPath string) error {
return os.Rename(oldPath, newPath)
},
expectedErr: false,
manifestShouldChange: true,
},
{
description: "any wait error should result in a rollback and an abort",
waitErrsToReturn: map[string]error{
waitForHashes: fmt.Errorf("boo! failed"),
waitForHashChange: nil,
waitForPodsWithLabel: nil,
},
moveFileFunc: func(oldPath, newPath string) error {
return os.Rename(oldPath, newPath)
},
expectedErr: true,
manifestShouldChange: false,
},
{
description: "any wait error should result in a rollback and an abort",
waitErrsToReturn: map[string]error{
waitForHashes: nil,
waitForHashChange: fmt.Errorf("boo! failed"),
waitForPodsWithLabel: nil,
},
moveFileFunc: func(oldPath, newPath string) error {
return os.Rename(oldPath, newPath)
},
expectedErr: true,
manifestShouldChange: false,
},
{
description: "any wait error should result in a rollback and an abort",
waitErrsToReturn: map[string]error{
waitForHashes: nil,
waitForHashChange: nil,
waitForPodsWithLabel: fmt.Errorf("boo! failed"),
},
moveFileFunc: func(oldPath, newPath string) error {
return os.Rename(oldPath, newPath)
},
expectedErr: true,
manifestShouldChange: false,
},
{
description: "any path-moving error should result in a rollback and an abort",
waitErrsToReturn: map[string]error{
waitForHashes: nil,
waitForHashChange: nil,
waitForPodsWithLabel: nil,
},
moveFileFunc: func(oldPath, newPath string) error {
// fail for kube-apiserver move
if strings.Contains(newPath, "kube-apiserver") {
return fmt.Errorf("moving the kube-apiserver file failed")
}
return os.Rename(oldPath, newPath)
},
expectedErr: true,
manifestShouldChange: false,
},
{
description: "any path-moving error should result in a rollback and an abort",
waitErrsToReturn: map[string]error{
waitForHashes: nil,
waitForHashChange: nil,
waitForPodsWithLabel: nil,
},
moveFileFunc: func(oldPath, newPath string) error {
// fail for kube-controller-manager move
if strings.Contains(newPath, "kube-controller-manager") {
return fmt.Errorf("moving the kube-apiserver file failed")
}
return os.Rename(oldPath, newPath)
},
expectedErr: true,
manifestShouldChange: false,
},
{
description: "any path-moving error should result in a rollback and an abort; even though this is the last component (kube-apiserver and kube-controller-manager healthy)",
waitErrsToReturn: map[string]error{
waitForHashes: nil,
waitForHashChange: nil,
waitForPodsWithLabel: nil,
},
moveFileFunc: func(oldPath, newPath string) error {
// fail for kube-scheduler move
if strings.Contains(newPath, "kube-scheduler") {
return fmt.Errorf("moving the kube-apiserver file failed")
}
return os.Rename(oldPath, newPath)
},
expectedErr: true,
manifestShouldChange: false,
},
}
for _, rt := range tests {
waiter := NewFakeStaticPodWaiter(rt.waitErrsToReturn)
pathMgr, err := NewFakeStaticPodPathManager(rt.moveFileFunc)
if err != nil {
t.Fatalf("couldn't run NewFakeStaticPodPathManager: %v", err)
}
defer os.RemoveAll(pathMgr.(*fakeStaticPodPathManager).KubernetesDir())
constants.KubernetesDir = pathMgr.(*fakeStaticPodPathManager).KubernetesDir()
tempCertsDir, err := ioutil.TempDir("", "kubeadm-certs")
if err != nil {
t.Fatalf("couldn't create temporary certificates directory: %v", err)
}
defer os.RemoveAll(tempCertsDir)
tmpEtcdDataDir, err := ioutil.TempDir("", "kubeadm-etcd-data")
if err != nil {
t.Fatalf("couldn't create temporary etcd data directory: %v", err)
}
defer os.RemoveAll(tmpEtcdDataDir)
oldcfg, err := getConfig("v1.9.0", tempCertsDir, tmpEtcdDataDir)
if err != nil {
t.Fatalf("couldn't create config: %v", err)
}
// Initialize PKI minus any etcd certificates to simulate etcd PKI upgrade
certActions := []func(cfg *kubeadmapi.MasterConfiguration) error{
certsphase.CreateCACertAndKeyFiles,
certsphase.CreateAPIServerCertAndKeyFiles,
certsphase.CreateAPIServerKubeletClientCertAndKeyFiles,
// certsphase.CreateEtcdCACertAndKeyFiles,
// certsphase.CreateEtcdServerCertAndKeyFiles,
// certsphase.CreateEtcdPeerCertAndKeyFiles,
// certsphase.CreateEtcdHealthcheckClientCertAndKeyFiles,
// certsphase.CreateAPIServerEtcdClientCertAndKeyFiles,
certsphase.CreateServiceAccountKeyAndPublicKeyFiles,
certsphase.CreateFrontProxyCACertAndKeyFiles,
certsphase.CreateFrontProxyClientCertAndKeyFiles,
}
for _, action := range certActions {
err := action(oldcfg)
if err != nil {
t.Fatalf("couldn't initialize pre-upgrade certificate: %v", err)
}
}
fmt.Printf("Wrote certs to %s\n", oldcfg.CertificatesDir)
// Initialize the directory with v1.7 manifests; should then be upgraded to v1.8 using the method
err = controlplanephase.CreateInitStaticPodManifestFiles(pathMgr.RealManifestDir(), oldcfg)
if err != nil {
t.Fatalf("couldn't run CreateInitStaticPodManifestFiles: %v", err)
}
err = etcdphase.CreateLocalEtcdStaticPodManifestFile(pathMgr.RealManifestDir(), oldcfg)
if err != nil {
t.Fatalf("couldn't run CreateLocalEtcdStaticPodManifestFile: %v", err)
}
// Get a hash of the v1.7 API server manifest to compare later (was the file re-written)
oldHash, err := getAPIServerHash(pathMgr.RealManifestDir())
if err != nil {
t.Fatalf("couldn't read temp file: %v", err)
}
newcfg, err := getConfig("v1.10.0", tempCertsDir, tmpEtcdDataDir)
if err != nil {
t.Fatalf("couldn't create config: %v", err)
}
actualErr := StaticPodControlPlane(
waiter,
pathMgr,
newcfg,
true,
fakeTLSEtcdClient{
TLS: false,
},
fakePodManifestEtcdClient{
ManifestDir: pathMgr.RealManifestDir(),
CertificatesDir: newcfg.CertificatesDir,
},
)
if (actualErr != nil) != rt.expectedErr {
t.Errorf(
"failed UpgradeStaticPodControlPlane\n%s\n\texpected error: %t\n\tgot: %t\n\tactual error: %v",
rt.description,
rt.expectedErr,
(actualErr != nil),
actualErr,
)
}
newHash, err := getAPIServerHash(pathMgr.RealManifestDir())
if err != nil {
t.Fatalf("couldn't read temp file: %v", err)
}
if (oldHash != newHash) != rt.manifestShouldChange {
t.Errorf(
"failed StaticPodControlPlane\n%s\n\texpected manifest change: %t\n\tgot: %t",
rt.description,
rt.manifestShouldChange,
(oldHash != newHash),
)
}
return
}
}
func getAPIServerHash(dir string) (string, error) {
manifestPath := constants.GetStaticPodFilepath(constants.KubeAPIServer, dir)
fileBytes, err := ioutil.ReadFile(manifestPath)
if err != nil {
return "", err
}
return fmt.Sprintf("%x", sha256.Sum256(fileBytes)), nil
}
// TODO: Make this test function use the rest of the "official" API machinery helper funcs we have inside of kubeadm
func getConfig(version, certsDir, etcdDataDir string) (*kubeadmapi.MasterConfiguration, error) {
externalcfg := &kubeadmapiv1alpha2.MasterConfiguration{}
internalcfg := &kubeadmapi.MasterConfiguration{}
if err := runtime.DecodeInto(kubeadmscheme.Codecs.UniversalDecoder(), []byte(fmt.Sprintf(testConfiguration, certsDir, etcdDataDir, version)), externalcfg); err != nil {
return nil, fmt.Errorf("unable to decode config: %v", err)
}
kubeadmscheme.Scheme.Convert(externalcfg, internalcfg, nil)
return internalcfg, nil
}
func getTempDir(t *testing.T, name string) (string, func()) {
dir, err := ioutil.TempDir(os.TempDir(), name)
if err != nil {
t.Fatalf("couldn't make temporary directory: %v", err)
}
return dir, func() {
os.RemoveAll(dir)
}
}
func TestCleanupDirs(t *testing.T) {
tests := []struct {
name string
keepManifest, keepEtcd bool
}{
{
name: "save manifest backup",
keepManifest: true,
},
{
name: "save both etcd and manifest",
keepManifest: true,
keepEtcd: true,
},
{
name: "save nothing",
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
realManifestDir, cleanup := getTempDir(t, "realManifestDir")
defer cleanup()
tempManifestDir, cleanup := getTempDir(t, "tempManifestDir")
defer cleanup()
backupManifestDir, cleanup := getTempDir(t, "backupManifestDir")
defer cleanup()
backupEtcdDir, cleanup := getTempDir(t, "backupEtcdDir")
defer cleanup()
mgr := NewKubeStaticPodPathManager(realManifestDir, tempManifestDir, backupManifestDir, backupEtcdDir, test.keepManifest, test.keepEtcd)
err := mgr.CleanupDirs()
if err != nil {
t.Errorf("unexpected error cleaning up: %v", err)
}
if _, err := os.Stat(tempManifestDir); !os.IsNotExist(err) {
t.Errorf("%q should not have existed", tempManifestDir)
}
_, err = os.Stat(backupManifestDir)
if test.keepManifest {
if err != nil {
t.Errorf("unexpected error getting backup manifest dir")
}
} else {
if !os.IsNotExist(err) {
t.Error("expected backup manifest to not exist")
}
}
_, err = os.Stat(backupEtcdDir)
if test.keepEtcd {
if err != nil {
t.Errorf("unexpected error getting backup etcd dir")
}
} else {
if !os.IsNotExist(err) {
t.Error("expected backup etcd dir to not exist")
}
}
})
}
}

View File

@@ -0,0 +1,151 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package upgrade
import (
"fmt"
"io"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes"
kubeadmutil "k8s.io/kubernetes/cmd/kubeadm/app/util"
versionutil "k8s.io/kubernetes/pkg/util/version"
"k8s.io/kubernetes/pkg/version"
)
// VersionGetter defines an interface for fetching different versions.
// Easy to implement a fake variant of this interface for unit testing
type VersionGetter interface {
// ClusterVersion should return the version of the cluster i.e. the API Server version
ClusterVersion() (string, *versionutil.Version, error)
// KubeadmVersion should return the version of the kubeadm CLI
KubeadmVersion() (string, *versionutil.Version, error)
// VersionFromCILabel should resolve CI labels like `latest`, `stable`, `stable-1.8`, etc. to real versions
VersionFromCILabel(string, string) (string, *versionutil.Version, error)
// KubeletVersions should return a map with a version and a number that describes how many kubelets there are for that version
KubeletVersions() (map[string]uint16, error)
}
// KubeVersionGetter handles the version-fetching mechanism from external sources
type KubeVersionGetter struct {
client clientset.Interface
w io.Writer
}
// NewKubeVersionGetter returns a new instance of KubeVersionGetter
func NewKubeVersionGetter(client clientset.Interface, writer io.Writer) VersionGetter {
return &KubeVersionGetter{
client: client,
w: writer,
}
}
// ClusterVersion gets API server version
func (g *KubeVersionGetter) ClusterVersion() (string, *versionutil.Version, error) {
clusterVersionInfo, err := g.client.Discovery().ServerVersion()
if err != nil {
return "", nil, fmt.Errorf("Couldn't fetch cluster version from the API Server: %v", err)
}
fmt.Fprintf(g.w, "[upgrade/versions] Cluster version: %s\n", clusterVersionInfo.String())
clusterVersion, err := versionutil.ParseSemantic(clusterVersionInfo.String())
if err != nil {
return "", nil, fmt.Errorf("Couldn't parse cluster version: %v", err)
}
return clusterVersionInfo.String(), clusterVersion, nil
}
// KubeadmVersion gets kubeadm version
func (g *KubeVersionGetter) KubeadmVersion() (string, *versionutil.Version, error) {
kubeadmVersionInfo := version.Get()
fmt.Fprintf(g.w, "[upgrade/versions] kubeadm version: %s\n", kubeadmVersionInfo.String())
kubeadmVersion, err := versionutil.ParseSemantic(kubeadmVersionInfo.String())
if err != nil {
return "", nil, fmt.Errorf("Couldn't parse kubeadm version: %v", err)
}
return kubeadmVersionInfo.String(), kubeadmVersion, nil
}
// VersionFromCILabel resolves a version label like "latest" or "stable" to an actual version using the public Kubernetes CI uploads
func (g *KubeVersionGetter) VersionFromCILabel(ciVersionLabel, description string) (string, *versionutil.Version, error) {
versionStr, err := kubeadmutil.KubernetesReleaseVersion(ciVersionLabel)
if err != nil {
return "", nil, fmt.Errorf("Couldn't fetch latest %s from the internet: %v", description, err)
}
if description != "" {
fmt.Fprintf(g.w, "[upgrade/versions] Latest %s: %s\n", description, versionStr)
}
ver, err := versionutil.ParseSemantic(versionStr)
if err != nil {
return "", nil, fmt.Errorf("Couldn't parse latest %s: %v", description, err)
}
return versionStr, ver, nil
}
// KubeletVersions gets the versions of the kubelets in the cluster
func (g *KubeVersionGetter) KubeletVersions() (map[string]uint16, error) {
nodes, err := g.client.CoreV1().Nodes().List(metav1.ListOptions{})
if err != nil {
return nil, fmt.Errorf("couldn't list all nodes in cluster")
}
return computeKubeletVersions(nodes.Items), nil
}
// computeKubeletVersions returns a string-int map that describes how many nodes are of a specific version
func computeKubeletVersions(nodes []v1.Node) map[string]uint16 {
kubeletVersions := map[string]uint16{}
for _, node := range nodes {
kver := node.Status.NodeInfo.KubeletVersion
if _, found := kubeletVersions[kver]; !found {
kubeletVersions[kver] = 1
continue
}
kubeletVersions[kver]++
}
return kubeletVersions
}
// OfflineVersionGetter will use the version provided or
type OfflineVersionGetter struct {
VersionGetter
version string
}
// NewOfflineVersionGetter wraps a VersionGetter and skips online communication if default information is supplied.
// Version can be "" and the behavior will be identical to the versionGetter passed in.
func NewOfflineVersionGetter(versionGetter VersionGetter, version string) VersionGetter {
return &OfflineVersionGetter{
VersionGetter: versionGetter,
version: version,
}
}
// VersionFromCILabel will return the version that was passed into the struct
func (o *OfflineVersionGetter) VersionFromCILabel(ciVersionLabel, description string) (string, *versionutil.Version, error) {
if o.version == "" {
return o.VersionGetter.VersionFromCILabel(ciVersionLabel, description)
}
ver, err := versionutil.ParseSemantic(o.version)
if err != nil {
return "", nil, fmt.Errorf("Couldn't parse version %s: %v", description, err)
}
return o.version, ver, nil
}