update vendor csi-lib-utils@v0.6.1
Signed-off-by: Andrew Sy Kim <kiman@vmware.com>
This commit is contained in:
42
Gopkg.lock
generated
42
Gopkg.lock
generated
@@ -2,12 +2,12 @@
|
||||
|
||||
|
||||
[[projects]]
|
||||
digest = "1:94ffc0947c337d618b6ff5ed9abaddc1217b090c1b3a1ae4739b35b7b25851d5"
|
||||
digest = "1:7f21fa1f8ab9a529dba26a7e9cf20de217c307fa1d96cb599d3afd9e5c83e9d6"
|
||||
name = "github.com/container-storage-interface/spec"
|
||||
packages = ["lib/go/csi"]
|
||||
pruneopts = "NUT"
|
||||
revision = "ed0bb0e1557548aa028307f48728767cfe8f6345"
|
||||
version = "v1.0.0"
|
||||
revision = "f750e6765f5f6b4ac0e13e95214d58901290fb4b"
|
||||
version = "v1.1.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:ffe9824d294da03b391f44e1ae8281281b4afc1bdaa9588c9097785e3af10cec"
|
||||
@@ -117,27 +117,28 @@
|
||||
version = "v1.1.6"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:a0892607b4f5385bb9fb12759facc8fad4e61b8b557384e4a078150c6ba43623"
|
||||
digest = "1:cfbb7ea18e982b7f97205743fcbdd7828f42ce1ac667192b713ada9fc62a958d"
|
||||
name = "github.com/kubernetes-csi/csi-lib-utils"
|
||||
packages = [
|
||||
"connection",
|
||||
"leaderelection",
|
||||
"protosanitizer",
|
||||
"rpc",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "0ce529dbd3fa19d36b0c9a579f2819db64d430ad"
|
||||
version = "0.4.0"
|
||||
revision = "b8b7a89535d80e12f2c0f4c53cfb981add8aaca2"
|
||||
version = "v0.6.1"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:cab5d1fe86e273b35887f707dbec779d77d87613d9f2f14ea23002912197ce81"
|
||||
digest = "1:189ada87769e6bd22de67cf8d5ac2f348070e7bcc4cdc5be3aaa5b19758e8fc2"
|
||||
name = "github.com/kubernetes-csi/csi-test"
|
||||
packages = [
|
||||
"driver",
|
||||
"utils",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "5b1e3786b7c8f7ca514b40e882a0b5dc36e4c842"
|
||||
version = "v1.1.0"
|
||||
revision = "5421d9f3c37be3b95b241b44a094a3db11bee789"
|
||||
version = "v2.0.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:2f42fa12d6911c7b7659738758631bec870b7e9b4c6be5444f963cdcfccc191f"
|
||||
@@ -186,7 +187,7 @@
|
||||
"trace",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "74de082e2cca95839e88aa0aeee5aadf6ce7710f"
|
||||
revision = "b630fd6fe46bcfc98f989005d8b8ec1400e60a6e"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
@@ -197,18 +198,18 @@
|
||||
"internal",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "c85d3e98c914e3a33234ad863dcbff5dbc425bb8"
|
||||
revision = "9f3314589c9a9136388751d9adae6b0ed400978a"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:af393301820919efc2288c08608a9aab90a3b293aa64e65a06739b7edbb7082f"
|
||||
digest = "1:992976af3618fd8fb06e5e4c4fa177800a1e0880cbbbc91df3f952520808806c"
|
||||
name = "golang.org/x/sys"
|
||||
packages = [
|
||||
"unix",
|
||||
"windows",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "9eb1bfa1ce65ae8a6ff3114b0aaf9a41a6cf3560"
|
||||
revision = "81d4e9dc473e5e8c933f2aaeba2a3d81efb9aed2"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:e7071ed636b5422cc51c0e3a6cebc229d6c9fffc528814b519a980641422d619"
|
||||
@@ -243,7 +244,7 @@
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:664bc16fa5d9231f2790aa23270750da8e3f1739f538fd162185fbb8a18175ec"
|
||||
digest = "1:59b401940f79c311efa01c10e2fa8e78522a978538127f369fb39690a03c3eb4"
|
||||
name = "golang.org/x/tools"
|
||||
packages = [
|
||||
"go/ast/astutil",
|
||||
@@ -259,7 +260,7 @@
|
||||
"internal/semver",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "a96101f168f4ff1e2cd559ba4f61dbd091684e41"
|
||||
revision = "8a44e74612bcf51f0d4407df3d3a8377cb99c2d8"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:372cd8eba449f9b6db06677d0e73fa193ec5b19aaee148f355503ab6127045ca"
|
||||
@@ -283,7 +284,7 @@
|
||||
name = "google.golang.org/genproto"
|
||||
packages = ["googleapis/rpc/status"]
|
||||
pruneopts = "NUT"
|
||||
revision = "d831d65fe17df2e52bcc4316d4a9f7a418701f43"
|
||||
revision = "f467c93bbac2133ff463e1f93d18d8f9f3f04451"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:119dce30813e7fe92f031c730bd2fb3ff1bac6fc6bb9d1450507e8bf0ad77620"
|
||||
@@ -455,7 +456,7 @@
|
||||
version = "kubernetes-1.14.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:baa7b54b63153d6fe64d41b78ecd91f00fa760f9bbc33f806bd924d9a9b7938d"
|
||||
digest = "1:85f25c196bc700354adc889a397f6550105865ad2588704094276f78f6e9d9ba"
|
||||
name = "k8s.io/client-go"
|
||||
packages = [
|
||||
"discovery",
|
||||
@@ -632,6 +633,8 @@
|
||||
"tools/clientcmd/api",
|
||||
"tools/clientcmd/api/latest",
|
||||
"tools/clientcmd/api/v1",
|
||||
"tools/leaderelection",
|
||||
"tools/leaderelection/resourcelock",
|
||||
"tools/metrics",
|
||||
"tools/pager",
|
||||
"tools/record",
|
||||
@@ -706,11 +709,11 @@
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:03a96603922fc1f6895ae083e1e16d943b55ef0656b56965351bd87e7d90485f"
|
||||
digest = "1:42674e29bf0cf4662d49bd9528e24b9ecc4895b32d0be281f9cf04d3a7671846"
|
||||
name = "k8s.io/kube-openapi"
|
||||
packages = ["pkg/util/proto"]
|
||||
pruneopts = "NUT"
|
||||
revision = "5e45bb682580c9be5ffa4d27d367f0eeba125c7b"
|
||||
revision = "94e1e7b7574c44c4c0f2007de6fe617e259191f3"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:15eb7bbf576cc9517ba70b469f9097948ac32f6766bbf6f8b982b81a468b1b74"
|
||||
@@ -753,6 +756,7 @@
|
||||
"github.com/golang/protobuf/ptypes",
|
||||
"github.com/golang/protobuf/ptypes/timestamp",
|
||||
"github.com/kubernetes-csi/csi-lib-utils/connection",
|
||||
"github.com/kubernetes-csi/csi-lib-utils/leaderelection",
|
||||
"github.com/kubernetes-csi/csi-lib-utils/rpc",
|
||||
"github.com/kubernetes-csi/csi-test/driver",
|
||||
"google.golang.org/grpc",
|
||||
|
@@ -11,8 +11,7 @@ required = [
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/container-storage-interface/spec"
|
||||
version = "=1.0.0" # Set this to exactly 1.0.0 until we can update test code to handle 1.1.0
|
||||
|
||||
version = "=1.1.0"
|
||||
|
||||
# The dependency on external-provisioner should be removed with #60.
|
||||
[[constraint]]
|
||||
@@ -37,7 +36,7 @@ required = [
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/kubernetes-csi/csi-lib-utils"
|
||||
version = ">=0.4.0"
|
||||
version = ">=v0.6.1"
|
||||
|
||||
[prune]
|
||||
non-go = true
|
||||
|
972
vendor/github.com/container-storage-interface/spec/lib/go/csi/csi.pb.go
generated
vendored
972
vendor/github.com/container-storage-interface/spec/lib/go/csi/csi.pb.go
generated
vendored
File diff suppressed because it is too large
Load Diff
211
vendor/github.com/kubernetes-csi/csi-lib-utils/leaderelection/leader_election.go
generated
vendored
Normal file
211
vendor/github.com/kubernetes-csi/csi-lib-utils/leaderelection/leader_election.go
generated
vendored
Normal file
@@ -0,0 +1,211 @@
|
||||
/*
|
||||
Copyright 2019 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package leaderelection
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"regexp"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
corev1 "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
"k8s.io/client-go/tools/leaderelection"
|
||||
"k8s.io/client-go/tools/leaderelection/resourcelock"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultLeaseDuration = 15 * time.Second
|
||||
defaultRenewDeadline = 10 * time.Second
|
||||
defaultRetryPeriod = 5 * time.Second
|
||||
)
|
||||
|
||||
// leaderElection is a convenience wrapper around client-go's leader election library.
|
||||
type leaderElection struct {
|
||||
runFunc func(ctx context.Context)
|
||||
|
||||
// the lockName identifies the leader election config and should be shared across all members
|
||||
lockName string
|
||||
// the identity is the unique identity of the currently running member
|
||||
identity string
|
||||
// the namespace to store the lock resource
|
||||
namespace string
|
||||
// resourceLock defines the type of leaderelection that should be used
|
||||
// valid options are resourcelock.LeasesResourceLock, resourcelock.EndpointsResourceLock,
|
||||
// and resourcelock.ConfigMapsResourceLock
|
||||
resourceLock string
|
||||
|
||||
leaseDuration time.Duration
|
||||
renewDeadline time.Duration
|
||||
retryPeriod time.Duration
|
||||
|
||||
clientset kubernetes.Interface
|
||||
}
|
||||
|
||||
// NewLeaderElection returns the default & preferred leader election type
|
||||
func NewLeaderElection(clientset kubernetes.Interface, lockName string, runFunc func(ctx context.Context)) *leaderElection {
|
||||
return NewLeaderElectionWithLeases(clientset, lockName, runFunc)
|
||||
}
|
||||
|
||||
// NewLeaderElectionWithLeases returns an implementation of leader election using Leases
|
||||
func NewLeaderElectionWithLeases(clientset kubernetes.Interface, lockName string, runFunc func(ctx context.Context)) *leaderElection {
|
||||
return &leaderElection{
|
||||
runFunc: runFunc,
|
||||
lockName: lockName,
|
||||
resourceLock: resourcelock.LeasesResourceLock,
|
||||
leaseDuration: defaultLeaseDuration,
|
||||
renewDeadline: defaultRenewDeadline,
|
||||
retryPeriod: defaultRetryPeriod,
|
||||
clientset: clientset,
|
||||
}
|
||||
}
|
||||
|
||||
// NewLeaderElectionWithEndpoints returns an implementation of leader election using Endpoints
|
||||
func NewLeaderElectionWithEndpoints(clientset kubernetes.Interface, lockName string, runFunc func(ctx context.Context)) *leaderElection {
|
||||
return &leaderElection{
|
||||
runFunc: runFunc,
|
||||
lockName: lockName,
|
||||
resourceLock: resourcelock.EndpointsResourceLock,
|
||||
leaseDuration: defaultLeaseDuration,
|
||||
renewDeadline: defaultRenewDeadline,
|
||||
retryPeriod: defaultRetryPeriod,
|
||||
clientset: clientset,
|
||||
}
|
||||
}
|
||||
|
||||
// NewLeaderElectionWithConfigMaps returns an implementation of leader election using ConfigMaps
|
||||
func NewLeaderElectionWithConfigMaps(clientset kubernetes.Interface, lockName string, runFunc func(ctx context.Context)) *leaderElection {
|
||||
return &leaderElection{
|
||||
runFunc: runFunc,
|
||||
lockName: lockName,
|
||||
resourceLock: resourcelock.ConfigMapsResourceLock,
|
||||
leaseDuration: defaultLeaseDuration,
|
||||
renewDeadline: defaultRenewDeadline,
|
||||
retryPeriod: defaultRetryPeriod,
|
||||
clientset: clientset,
|
||||
}
|
||||
}
|
||||
|
||||
func (l *leaderElection) WithIdentity(identity string) {
|
||||
l.identity = identity
|
||||
}
|
||||
|
||||
func (l *leaderElection) WithNamespace(namespace string) {
|
||||
l.namespace = namespace
|
||||
}
|
||||
|
||||
func (l *leaderElection) WithLeaseDuration(leaseDuration time.Duration) {
|
||||
l.leaseDuration = leaseDuration
|
||||
}
|
||||
|
||||
func (l *leaderElection) WithRenewDeadline(renewDeadline time.Duration) {
|
||||
l.renewDeadline = renewDeadline
|
||||
}
|
||||
|
||||
func (l *leaderElection) WithRetryPeriod(retryPeriod time.Duration) {
|
||||
l.retryPeriod = retryPeriod
|
||||
}
|
||||
|
||||
func (l *leaderElection) Run() error {
|
||||
if l.identity == "" {
|
||||
id, err := defaultLeaderElectionIdentity()
|
||||
if err != nil {
|
||||
return fmt.Errorf("error getting the default leader identity: %v", err)
|
||||
}
|
||||
|
||||
l.identity = id
|
||||
}
|
||||
|
||||
if l.namespace == "" {
|
||||
l.namespace = inClusterNamespace()
|
||||
}
|
||||
|
||||
broadcaster := record.NewBroadcaster()
|
||||
broadcaster.StartRecordingToSink(&corev1.EventSinkImpl{Interface: l.clientset.CoreV1().Events(l.namespace)})
|
||||
eventRecorder := broadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: fmt.Sprintf("%s/%s", l.lockName, string(l.identity))})
|
||||
|
||||
rlConfig := resourcelock.ResourceLockConfig{
|
||||
Identity: sanitizeName(l.identity),
|
||||
EventRecorder: eventRecorder,
|
||||
}
|
||||
|
||||
lock, err := resourcelock.New(l.resourceLock, l.namespace, sanitizeName(l.lockName), l.clientset.CoreV1(), l.clientset.CoordinationV1(), rlConfig)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
leaderConfig := leaderelection.LeaderElectionConfig{
|
||||
Lock: lock,
|
||||
LeaseDuration: l.leaseDuration,
|
||||
RenewDeadline: l.renewDeadline,
|
||||
RetryPeriod: l.retryPeriod,
|
||||
Callbacks: leaderelection.LeaderCallbacks{
|
||||
OnStartedLeading: func(ctx context.Context) {
|
||||
klog.V(2).Info("became leader, starting")
|
||||
l.runFunc(ctx)
|
||||
},
|
||||
OnStoppedLeading: func() {
|
||||
klog.Fatal("stopped leading")
|
||||
},
|
||||
OnNewLeader: func(identity string) {
|
||||
klog.V(3).Infof("new leader detected, current leader: %s", identity)
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
leaderelection.RunOrDie(context.TODO(), leaderConfig)
|
||||
return nil // should never reach here
|
||||
}
|
||||
|
||||
func defaultLeaderElectionIdentity() (string, error) {
|
||||
return os.Hostname()
|
||||
}
|
||||
|
||||
// sanitizeName sanitizes the provided string so it can be consumed by leader election library
|
||||
func sanitizeName(name string) string {
|
||||
re := regexp.MustCompile("[^a-zA-Z0-9-]")
|
||||
name = re.ReplaceAllString(name, "-")
|
||||
if name[len(name)-1] == '-' {
|
||||
// name must not end with '-'
|
||||
name = name + "X"
|
||||
}
|
||||
return name
|
||||
}
|
||||
|
||||
// inClusterNamespace returns the namespace in which the pod is running in by checking
|
||||
// the env var POD_NAMESPACE, then the file /var/run/secrets/kubernetes.io/serviceaccount/namespace.
|
||||
// if neither returns a valid namespace, the "default" namespace is returned
|
||||
func inClusterNamespace() string {
|
||||
if ns := os.Getenv("POD_NAMESPACE"); ns != "" {
|
||||
return ns
|
||||
}
|
||||
|
||||
if data, err := ioutil.ReadFile("/var/run/secrets/kubernetes.io/serviceaccount/namespace"); err == nil {
|
||||
if ns := strings.TrimSpace(string(data)); len(ns) > 0 {
|
||||
return ns
|
||||
}
|
||||
}
|
||||
|
||||
return "default"
|
||||
}
|
1
vendor/github.com/kubernetes-csi/csi-test/.travis.yml
generated
vendored
Symbolic link
1
vendor/github.com/kubernetes-csi/csi-test/.travis.yml
generated
vendored
Symbolic link
@@ -0,0 +1 @@
|
||||
release-tools/travis.yml
|
110
vendor/github.com/kubernetes-csi/csi-test/driver/driver-controller.go
generated
vendored
Normal file
110
vendor/github.com/kubernetes-csi/csi-test/driver/driver-controller.go
generated
vendored
Normal file
@@ -0,0 +1,110 @@
|
||||
/*
|
||||
Copyright 2019 Kubernetes Authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package driver
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net"
|
||||
"sync"
|
||||
|
||||
"google.golang.org/grpc/reflection"
|
||||
|
||||
csi "github.com/container-storage-interface/spec/lib/go/csi"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
// CSIDriverControllerServer is the Controller service component of the driver.
|
||||
type CSIDriverControllerServer struct {
|
||||
Controller csi.ControllerServer
|
||||
Identity csi.IdentityServer
|
||||
}
|
||||
|
||||
// CSIDriverController is the CSI Driver Controller backend.
|
||||
type CSIDriverController struct {
|
||||
listener net.Listener
|
||||
server *grpc.Server
|
||||
controllerServer *CSIDriverControllerServer
|
||||
wg sync.WaitGroup
|
||||
running bool
|
||||
lock sync.Mutex
|
||||
creds *CSICreds
|
||||
}
|
||||
|
||||
func NewCSIDriverController(controllerServer *CSIDriverControllerServer) *CSIDriverController {
|
||||
return &CSIDriverController{
|
||||
controllerServer: controllerServer,
|
||||
}
|
||||
}
|
||||
|
||||
func (c *CSIDriverController) goServe(started chan<- bool) {
|
||||
goServe(c.server, &c.wg, c.listener, started)
|
||||
}
|
||||
|
||||
func (c *CSIDriverController) Address() string {
|
||||
return c.listener.Addr().String()
|
||||
}
|
||||
|
||||
func (c *CSIDriverController) Start(l net.Listener) error {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
|
||||
// Set listener.
|
||||
c.listener = l
|
||||
|
||||
// Create a new grpc server.
|
||||
c.server = grpc.NewServer(
|
||||
grpc.UnaryInterceptor(c.callInterceptor),
|
||||
)
|
||||
|
||||
if c.controllerServer.Controller != nil {
|
||||
csi.RegisterControllerServer(c.server, c.controllerServer.Controller)
|
||||
}
|
||||
if c.controllerServer.Identity != nil {
|
||||
csi.RegisterIdentityServer(c.server, c.controllerServer.Identity)
|
||||
}
|
||||
|
||||
reflection.Register(c.server)
|
||||
|
||||
waitForServer := make(chan bool)
|
||||
c.goServe(waitForServer)
|
||||
<-waitForServer
|
||||
c.running = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *CSIDriverController) Stop() {
|
||||
stop(&c.lock, &c.wg, c.server, c.running)
|
||||
}
|
||||
|
||||
func (c *CSIDriverController) Close() {
|
||||
c.server.Stop()
|
||||
}
|
||||
|
||||
func (c *CSIDriverController) IsRunning() bool {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
|
||||
return c.running
|
||||
}
|
||||
|
||||
func (c *CSIDriverController) SetDefaultCreds() {
|
||||
setDefaultCreds(c.creds)
|
||||
}
|
||||
|
||||
func (c *CSIDriverController) callInterceptor(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) {
|
||||
return callInterceptor(ctx, c.creds, req, info, handler)
|
||||
}
|
109
vendor/github.com/kubernetes-csi/csi-test/driver/driver-node.go
generated
vendored
Normal file
109
vendor/github.com/kubernetes-csi/csi-test/driver/driver-node.go
generated
vendored
Normal file
@@ -0,0 +1,109 @@
|
||||
/*
|
||||
Copyright 2019 Kubernetes Authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package driver
|
||||
|
||||
import (
|
||||
context "context"
|
||||
"net"
|
||||
"sync"
|
||||
|
||||
csi "github.com/container-storage-interface/spec/lib/go/csi"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/reflection"
|
||||
)
|
||||
|
||||
// CSIDriverNodeServer is the Node service component of the driver.
|
||||
type CSIDriverNodeServer struct {
|
||||
Node csi.NodeServer
|
||||
Identity csi.IdentityServer
|
||||
}
|
||||
|
||||
// CSIDriverNode is the CSI Driver Node backend.
|
||||
type CSIDriverNode struct {
|
||||
listener net.Listener
|
||||
server *grpc.Server
|
||||
nodeServer *CSIDriverNodeServer
|
||||
wg sync.WaitGroup
|
||||
running bool
|
||||
lock sync.Mutex
|
||||
creds *CSICreds
|
||||
}
|
||||
|
||||
func NewCSIDriverNode(nodeServer *CSIDriverNodeServer) *CSIDriverNode {
|
||||
return &CSIDriverNode{
|
||||
nodeServer: nodeServer,
|
||||
}
|
||||
}
|
||||
|
||||
func (c *CSIDriverNode) goServe(started chan<- bool) {
|
||||
goServe(c.server, &c.wg, c.listener, started)
|
||||
}
|
||||
|
||||
func (c *CSIDriverNode) Address() string {
|
||||
return c.listener.Addr().String()
|
||||
}
|
||||
|
||||
func (c *CSIDriverNode) Start(l net.Listener) error {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
|
||||
// Set listener.
|
||||
c.listener = l
|
||||
|
||||
// Create a new grpc server.
|
||||
c.server = grpc.NewServer(
|
||||
grpc.UnaryInterceptor(c.callInterceptor),
|
||||
)
|
||||
|
||||
if c.nodeServer.Node != nil {
|
||||
csi.RegisterNodeServer(c.server, c.nodeServer.Node)
|
||||
}
|
||||
if c.nodeServer.Identity != nil {
|
||||
csi.RegisterIdentityServer(c.server, c.nodeServer.Identity)
|
||||
}
|
||||
|
||||
reflection.Register(c.server)
|
||||
|
||||
waitForServer := make(chan bool)
|
||||
c.goServe(waitForServer)
|
||||
<-waitForServer
|
||||
c.running = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *CSIDriverNode) Stop() {
|
||||
stop(&c.lock, &c.wg, c.server, c.running)
|
||||
}
|
||||
|
||||
func (c *CSIDriverNode) Close() {
|
||||
c.server.Stop()
|
||||
}
|
||||
|
||||
func (c *CSIDriverNode) IsRunning() bool {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
|
||||
return c.running
|
||||
}
|
||||
|
||||
func (c *CSIDriverNode) SetDefaultCreds() {
|
||||
setDefaultCreds(c.creds)
|
||||
}
|
||||
|
||||
func (c *CSIDriverNode) callInterceptor(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) {
|
||||
return callInterceptor(ctx, c.creds, req, info, handler)
|
||||
}
|
109
vendor/github.com/kubernetes-csi/csi-test/driver/driver.go
generated
vendored
109
vendor/github.com/kubernetes-csi/csi-test/driver/driver.go
generated
vendored
@@ -41,6 +41,8 @@ var (
|
||||
ErrAuthFailed = errors.New("authentication failed")
|
||||
)
|
||||
|
||||
// CSIDriverServers is a unified driver component with both Controller and Node
|
||||
// services.
|
||||
type CSIDriverServers struct {
|
||||
Controller csi.ControllerServer
|
||||
Identity csi.IdentityServer
|
||||
@@ -54,14 +56,15 @@ const secretField = "secretKey"
|
||||
// secrets. This mock driver has a single string secret with secretField as the
|
||||
// key.
|
||||
type CSICreds struct {
|
||||
CreateVolumeSecret string
|
||||
DeleteVolumeSecret string
|
||||
ControllerPublishVolumeSecret string
|
||||
ControllerUnpublishVolumeSecret string
|
||||
NodeStageVolumeSecret string
|
||||
NodePublishVolumeSecret string
|
||||
CreateSnapshotSecret string
|
||||
DeleteSnapshotSecret string
|
||||
CreateVolumeSecret string
|
||||
DeleteVolumeSecret string
|
||||
ControllerPublishVolumeSecret string
|
||||
ControllerUnpublishVolumeSecret string
|
||||
NodeStageVolumeSecret string
|
||||
NodePublishVolumeSecret string
|
||||
CreateSnapshotSecret string
|
||||
DeleteSnapshotSecret string
|
||||
ControllerValidateVolumeCapabilitiesSecret string
|
||||
}
|
||||
|
||||
type CSIDriver struct {
|
||||
@@ -81,15 +84,7 @@ func NewCSIDriver(servers *CSIDriverServers) *CSIDriver {
|
||||
}
|
||||
|
||||
func (c *CSIDriver) goServe(started chan<- bool) {
|
||||
c.wg.Add(1)
|
||||
go func() {
|
||||
defer c.wg.Done()
|
||||
started <- true
|
||||
err := c.server.Serve(c.listener)
|
||||
if err != nil {
|
||||
panic(err.Error())
|
||||
}
|
||||
}()
|
||||
goServe(c.server, &c.wg, c.listener, started)
|
||||
}
|
||||
|
||||
func (c *CSIDriver) Address() string {
|
||||
@@ -128,15 +123,7 @@ func (c *CSIDriver) Start(l net.Listener) error {
|
||||
}
|
||||
|
||||
func (c *CSIDriver) Stop() {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
|
||||
if !c.running {
|
||||
return
|
||||
}
|
||||
|
||||
c.server.Stop()
|
||||
c.wg.Wait()
|
||||
stop(&c.lock, &c.wg, c.server, c.running)
|
||||
}
|
||||
|
||||
func (c *CSIDriver) Close() {
|
||||
@@ -152,20 +139,56 @@ func (c *CSIDriver) IsRunning() bool {
|
||||
|
||||
// SetDefaultCreds sets the default secrets for CSI creds.
|
||||
func (c *CSIDriver) SetDefaultCreds() {
|
||||
c.creds = &CSICreds{
|
||||
CreateVolumeSecret: "secretval1",
|
||||
DeleteVolumeSecret: "secretval2",
|
||||
ControllerPublishVolumeSecret: "secretval3",
|
||||
ControllerUnpublishVolumeSecret: "secretval4",
|
||||
NodeStageVolumeSecret: "secretval5",
|
||||
NodePublishVolumeSecret: "secretval6",
|
||||
CreateSnapshotSecret: "secretval7",
|
||||
DeleteSnapshotSecret: "secretval8",
|
||||
}
|
||||
setDefaultCreds(c.creds)
|
||||
}
|
||||
|
||||
func (c *CSIDriver) callInterceptor(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) {
|
||||
err := c.authInterceptor(req)
|
||||
return callInterceptor(ctx, c.creds, req, info, handler)
|
||||
}
|
||||
|
||||
// goServe starts a grpc server.
|
||||
func goServe(server *grpc.Server, wg *sync.WaitGroup, listener net.Listener, started chan<- bool) {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
started <- true
|
||||
err := server.Serve(listener)
|
||||
if err != nil {
|
||||
panic(err.Error())
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// stop stops a grpc server.
|
||||
func stop(lock *sync.Mutex, wg *sync.WaitGroup, server *grpc.Server, running bool) {
|
||||
lock.Lock()
|
||||
defer lock.Unlock()
|
||||
|
||||
if !running {
|
||||
return
|
||||
}
|
||||
|
||||
server.Stop()
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
// setDefaultCreds sets the default credentials, given a CSICreds instance.
|
||||
func setDefaultCreds(creds *CSICreds) {
|
||||
creds = &CSICreds{
|
||||
CreateVolumeSecret: "secretval1",
|
||||
DeleteVolumeSecret: "secretval2",
|
||||
ControllerPublishVolumeSecret: "secretval3",
|
||||
ControllerUnpublishVolumeSecret: "secretval4",
|
||||
NodeStageVolumeSecret: "secretval5",
|
||||
NodePublishVolumeSecret: "secretval6",
|
||||
CreateSnapshotSecret: "secretval7",
|
||||
DeleteSnapshotSecret: "secretval8",
|
||||
ControllerValidateVolumeCapabilitiesSecret: "secretval9",
|
||||
}
|
||||
}
|
||||
|
||||
func callInterceptor(ctx context.Context, creds *CSICreds, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) {
|
||||
err := authInterceptor(creds, req)
|
||||
if err != nil {
|
||||
logGRPC(info.FullMethod, req, nil, err)
|
||||
return nil, err
|
||||
@@ -175,9 +198,9 @@ func (c *CSIDriver) callInterceptor(ctx context.Context, req interface{}, info *
|
||||
return rsp, err
|
||||
}
|
||||
|
||||
func (c *CSIDriver) authInterceptor(req interface{}) error {
|
||||
if c.creds != nil {
|
||||
authenticated, authErr := isAuthenticated(req, c.creds)
|
||||
func authInterceptor(creds *CSICreds, req interface{}) error {
|
||||
if creds != nil {
|
||||
authenticated, authErr := isAuthenticated(req, creds)
|
||||
if !authenticated {
|
||||
if authErr == ErrNoCredentials {
|
||||
return status.Error(codes.InvalidArgument, authErr.Error())
|
||||
@@ -227,6 +250,8 @@ func isAuthenticated(req interface{}, creds *CSICreds) (bool, error) {
|
||||
return authenticateCreateSnapshot(r, creds)
|
||||
case *csi.DeleteSnapshotRequest:
|
||||
return authenticateDeleteSnapshot(r, creds)
|
||||
case *csi.ValidateVolumeCapabilitiesRequest:
|
||||
return authenticateControllerValidateVolumeCapabilities(r, creds)
|
||||
default:
|
||||
return true, nil
|
||||
}
|
||||
@@ -264,6 +289,10 @@ func authenticateDeleteSnapshot(req *csi.DeleteSnapshotRequest, creds *CSICreds)
|
||||
return credsCheck(req.GetSecrets(), creds.DeleteSnapshotSecret)
|
||||
}
|
||||
|
||||
func authenticateControllerValidateVolumeCapabilities(req *csi.ValidateVolumeCapabilitiesRequest, creds *CSICreds) (bool, error) {
|
||||
return credsCheck(req.GetSecrets(), creds.ControllerValidateVolumeCapabilitiesSecret)
|
||||
}
|
||||
|
||||
func credsCheck(secrets map[string]string, secretVal string) (bool, error) {
|
||||
if len(secrets) == 0 {
|
||||
return false, ErrNoCredentials
|
||||
|
26
vendor/github.com/kubernetes-csi/csi-test/driver/driver.mock.go
generated
vendored
26
vendor/github.com/kubernetes-csi/csi-test/driver/driver.mock.go
generated
vendored
@@ -96,6 +96,19 @@ func (m *MockControllerServer) EXPECT() *MockControllerServerMockRecorder {
|
||||
return m.recorder
|
||||
}
|
||||
|
||||
// ControllerExpandVolume mocks base method
|
||||
func (m *MockControllerServer) ControllerExpandVolume(arg0 context.Context, arg1 *csi.ControllerExpandVolumeRequest) (*csi.ControllerExpandVolumeResponse, error) {
|
||||
ret := m.ctrl.Call(m, "ControllerExpandVolume", arg0, arg1)
|
||||
ret0, _ := ret[0].(*csi.ControllerExpandVolumeResponse)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// ControllerExpandVolume indicates an expected call of ControllerExpandVolume
|
||||
func (mr *MockControllerServerMockRecorder) ControllerExpandVolume(arg0, arg1 interface{}) *gomock.Call {
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ControllerExpandVolume", reflect.TypeOf((*MockControllerServer)(nil).ControllerExpandVolume), arg0, arg1)
|
||||
}
|
||||
|
||||
// ControllerGetCapabilities mocks base method
|
||||
func (m *MockControllerServer) ControllerGetCapabilities(arg0 context.Context, arg1 *csi.ControllerGetCapabilitiesRequest) (*csi.ControllerGetCapabilitiesResponse, error) {
|
||||
ret := m.ctrl.Call(m, "ControllerGetCapabilities", arg0, arg1)
|
||||
@@ -262,6 +275,19 @@ func (m *MockNodeServer) EXPECT() *MockNodeServerMockRecorder {
|
||||
return m.recorder
|
||||
}
|
||||
|
||||
// NodeExpandVolume mocks base method
|
||||
func (m *MockNodeServer) NodeExpandVolume(arg0 context.Context, arg1 *csi.NodeExpandVolumeRequest) (*csi.NodeExpandVolumeResponse, error) {
|
||||
ret := m.ctrl.Call(m, "NodeExpandVolume", arg0, arg1)
|
||||
ret0, _ := ret[0].(*csi.NodeExpandVolumeResponse)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// NodeExpandVolume indicates an expected call of NodeExpandVolume
|
||||
func (mr *MockNodeServerMockRecorder) NodeExpandVolume(arg0, arg1 interface{}) *gomock.Call {
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NodeExpandVolume", reflect.TypeOf((*MockNodeServer)(nil).NodeExpandVolume), arg0, arg1)
|
||||
}
|
||||
|
||||
// NodeGetCapabilities mocks base method
|
||||
func (m *MockNodeServer) NodeGetCapabilities(arg0 context.Context, arg1 *csi.NodeGetCapabilitiesRequest) (*csi.NodeGetCapabilitiesResponse, error) {
|
||||
ret := m.ctrl.Call(m, "NodeGetCapabilities", arg0, arg1)
|
||||
|
12
vendor/github.com/kubernetes-csi/csi-test/driver/mock.go
generated
vendored
12
vendor/github.com/kubernetes-csi/csi-test/driver/mock.go
generated
vendored
@@ -46,9 +46,9 @@ func NewMockCSIDriver(servers *MockCSIDriverServers) *MockCSIDriver {
|
||||
}
|
||||
}
|
||||
|
||||
func (m *MockCSIDriver) Start() error {
|
||||
// Listen on a port assigned by the net package
|
||||
l, err := net.Listen("tcp", "127.0.0.1:0")
|
||||
// StartOnAddress starts a new gRPC server listening on given address.
|
||||
func (m *MockCSIDriver) StartOnAddress(network, address string) error {
|
||||
l, err := net.Listen(network, address)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -61,6 +61,12 @@ func (m *MockCSIDriver) Start() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Start starts a new gRPC server listening on a random TCP loopback port.
|
||||
func (m *MockCSIDriver) Start() error {
|
||||
// Listen on a port assigned by the net package
|
||||
return m.StartOnAddress("tcp", "127.0.0.1:0")
|
||||
}
|
||||
|
||||
func (m *MockCSIDriver) Nexus() (*grpc.ClientConn, error) {
|
||||
// Start server
|
||||
err := m.Start()
|
||||
|
201
vendor/github.com/kubernetes-csi/csi-test/release-tools/LICENSE
generated
vendored
Normal file
201
vendor/github.com/kubernetes-csi/csi-test/release-tools/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,201 @@
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "{}"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright {yyyy} {name of copyright owner}
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
5
vendor/golang.org/x/sys/unix/mksyscall.go
generated
vendored
5
vendor/golang.org/x/sys/unix/mksyscall.go
generated
vendored
@@ -153,6 +153,11 @@ func main() {
|
||||
}
|
||||
funct, inps, outps, sysname := f[2], f[3], f[4], f[5]
|
||||
|
||||
// ClockGettime doesn't have a syscall number on Darwin, only generate libc wrappers.
|
||||
if goos == "darwin" && !libc && funct == "ClockGettime" {
|
||||
continue
|
||||
}
|
||||
|
||||
// Split argument lists on comma.
|
||||
in := parseParamList(inps)
|
||||
out := parseParamList(outps)
|
||||
|
17
vendor/golang.org/x/sys/unix/syscall_darwin.go
generated
vendored
17
vendor/golang.org/x/sys/unix/syscall_darwin.go
generated
vendored
@@ -144,6 +144,23 @@ func getAttrList(path string, attrList attrList, attrBuf []byte, options uint) (
|
||||
|
||||
//sys getattrlist(path *byte, list unsafe.Pointer, buf unsafe.Pointer, size uintptr, options int) (err error)
|
||||
|
||||
func SysctlClockinfo(name string) (*Clockinfo, error) {
|
||||
mib, err := sysctlmib(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
n := uintptr(SizeofClockinfo)
|
||||
var ci Clockinfo
|
||||
if err := sysctl(mib, (*byte)(unsafe.Pointer(&ci)), &n, nil, 0); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if n != SizeofClockinfo {
|
||||
return nil, EIO
|
||||
}
|
||||
return &ci, nil
|
||||
}
|
||||
|
||||
//sysnb pipe() (r int, w int, err error)
|
||||
|
||||
func Pipe(p []int) (err error) {
|
||||
|
6
vendor/golang.org/x/sys/unix/types_darwin.go
generated
vendored
6
vendor/golang.org/x/sys/unix/types_darwin.go
generated
vendored
@@ -275,3 +275,9 @@ const (
|
||||
// uname
|
||||
|
||||
type Utsname C.struct_utsname
|
||||
|
||||
// Clockinfo
|
||||
|
||||
const SizeofClockinfo = C.sizeof_struct_clockinfo
|
||||
|
||||
type Clockinfo C.struct_clockinfo
|
||||
|
10
vendor/golang.org/x/sys/unix/ztypes_darwin_386.go
generated
vendored
10
vendor/golang.org/x/sys/unix/ztypes_darwin_386.go
generated
vendored
@@ -487,3 +487,13 @@ type Utsname struct {
|
||||
Version [256]byte
|
||||
Machine [256]byte
|
||||
}
|
||||
|
||||
const SizeofClockinfo = 0x14
|
||||
|
||||
type Clockinfo struct {
|
||||
Hz int32
|
||||
Tick int32
|
||||
Tickadj int32
|
||||
Stathz int32
|
||||
Profhz int32
|
||||
}
|
||||
|
10
vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go
generated
vendored
10
vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go
generated
vendored
@@ -497,3 +497,13 @@ type Utsname struct {
|
||||
Version [256]byte
|
||||
Machine [256]byte
|
||||
}
|
||||
|
||||
const SizeofClockinfo = 0x14
|
||||
|
||||
type Clockinfo struct {
|
||||
Hz int32
|
||||
Tick int32
|
||||
Tickadj int32
|
||||
Stathz int32
|
||||
Profhz int32
|
||||
}
|
||||
|
10
vendor/golang.org/x/sys/unix/ztypes_darwin_arm.go
generated
vendored
10
vendor/golang.org/x/sys/unix/ztypes_darwin_arm.go
generated
vendored
@@ -488,3 +488,13 @@ type Utsname struct {
|
||||
Version [256]byte
|
||||
Machine [256]byte
|
||||
}
|
||||
|
||||
const SizeofClockinfo = 0x14
|
||||
|
||||
type Clockinfo struct {
|
||||
Hz int32
|
||||
Tick int32
|
||||
Tickadj int32
|
||||
Stathz int32
|
||||
Profhz int32
|
||||
}
|
||||
|
10
vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go
generated
vendored
10
vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go
generated
vendored
@@ -497,3 +497,13 @@ type Utsname struct {
|
||||
Version [256]byte
|
||||
Machine [256]byte
|
||||
}
|
||||
|
||||
const SizeofClockinfo = 0x14
|
||||
|
||||
type Clockinfo struct {
|
||||
Hz int32
|
||||
Tick int32
|
||||
Tickadj int32
|
||||
Stathz int32
|
||||
Profhz int32
|
||||
}
|
||||
|
2
vendor/golang.org/x/tools/go/packages/external.go
generated
vendored
2
vendor/golang.org/x/tools/go/packages/external.go
generated
vendored
@@ -18,7 +18,7 @@ import (
|
||||
|
||||
// Driver
|
||||
type driverRequest struct {
|
||||
Command string `json "command"`
|
||||
Command string `json:"command"`
|
||||
Mode LoadMode `json:"mode"`
|
||||
Env []string `json:"env"`
|
||||
BuildFlags []string `json:"build_flags"`
|
||||
|
18
vendor/golang.org/x/tools/go/packages/golist.go
generated
vendored
18
vendor/golang.org/x/tools/go/packages/golist.go
generated
vendored
@@ -78,7 +78,7 @@ func goListDriver(cfg *Config, patterns ...string) (*driverResponse, error) {
|
||||
var sizes types.Sizes
|
||||
var sizeserr error
|
||||
var sizeswg sync.WaitGroup
|
||||
if cfg.Mode&NeedTypesSizes != 0 {
|
||||
if cfg.Mode&NeedTypesSizes != 0 || cfg.Mode&NeedTypes != 0 {
|
||||
sizeswg.Add(1)
|
||||
go func() {
|
||||
sizes, sizeserr = getSizes(cfg)
|
||||
@@ -128,7 +128,7 @@ extractQueries:
|
||||
// patterns also requires a go list call, since it's the equivalent of
|
||||
// ".".
|
||||
if len(restPatterns) > 0 || len(patterns) == 0 {
|
||||
dr, err := golistDriverCurrent(cfg, restPatterns...)
|
||||
dr, err := golistDriver(cfg, restPatterns...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -147,13 +147,13 @@ extractQueries:
|
||||
var containsCandidates []string
|
||||
|
||||
if len(containFiles) != 0 {
|
||||
if err := runContainsQueries(cfg, golistDriverCurrent, response, containFiles); err != nil {
|
||||
if err := runContainsQueries(cfg, golistDriver, response, containFiles); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if len(packagesNamed) != 0 {
|
||||
if err := runNamedQueries(cfg, golistDriverCurrent, response, packagesNamed); err != nil {
|
||||
if err := runNamedQueries(cfg, golistDriver, response, packagesNamed); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
@@ -168,7 +168,7 @@ extractQueries:
|
||||
}
|
||||
|
||||
if len(needPkgs) > 0 {
|
||||
addNeededOverlayPackages(cfg, golistDriverCurrent, response, needPkgs)
|
||||
addNeededOverlayPackages(cfg, golistDriver, response, needPkgs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -540,10 +540,10 @@ func otherFiles(p *jsonPackage) [][]string {
|
||||
return [][]string{p.CFiles, p.CXXFiles, p.MFiles, p.HFiles, p.FFiles, p.SFiles, p.SwigFiles, p.SwigCXXFiles, p.SysoFiles}
|
||||
}
|
||||
|
||||
// golistDriverCurrent uses the "go list" command to expand the
|
||||
// pattern words and return metadata for the specified packages.
|
||||
// dir may be "" and env may be nil, as per os/exec.Command.
|
||||
func golistDriverCurrent(cfg *Config, words ...string) (*driverResponse, error) {
|
||||
// golistDriver uses the "go list" command to expand the pattern
|
||||
// words and return metadata for the specified packages. dir may be
|
||||
// "" and env may be nil, as per os/exec.Command.
|
||||
func golistDriver(cfg *Config, words ...string) (*driverResponse, error) {
|
||||
// go list uses the following identifiers in ImportPath and Imports:
|
||||
//
|
||||
// "p" -- importable package or main (command)
|
||||
|
50
vendor/golang.org/x/tools/go/packages/packages.go
generated
vendored
50
vendor/golang.org/x/tools/go/packages/packages.go
generated
vendored
@@ -88,14 +88,14 @@ const (
|
||||
|
||||
// LoadTypes adds type information for package-level
|
||||
// declarations in the packages matching the patterns.
|
||||
// Package fields added: Types, Fset, and IllTyped.
|
||||
// Package fields added: Types, TypesSizes, Fset, and IllTyped.
|
||||
// This mode uses type information provided by the build system when
|
||||
// possible, and may fill in the ExportFile field.
|
||||
LoadTypes = LoadImports | NeedTypes
|
||||
LoadTypes = LoadImports | NeedTypes | NeedTypesSizes
|
||||
|
||||
// LoadSyntax adds typed syntax trees for the packages matching the patterns.
|
||||
// Package fields added: Syntax, and TypesInfo, for direct pattern matches only.
|
||||
LoadSyntax = LoadTypes | NeedSyntax | NeedTypesInfo | NeedTypesSizes
|
||||
LoadSyntax = LoadTypes | NeedSyntax | NeedTypesInfo
|
||||
|
||||
// LoadAllSyntax adds typed syntax trees for the packages matching the patterns
|
||||
// and all dependencies.
|
||||
@@ -137,7 +137,7 @@ type Config struct {
|
||||
BuildFlags []string
|
||||
|
||||
// Fset provides source position information for syntax trees and types.
|
||||
// If Fset is nil, the loader will create a new FileSet.
|
||||
// If Fset is nil, Load will use a new fileset, but preserve Fset's value.
|
||||
Fset *token.FileSet
|
||||
|
||||
// ParseFile is called to read and parse each file
|
||||
@@ -420,6 +420,12 @@ type loader struct {
|
||||
Config
|
||||
sizes types.Sizes
|
||||
exportMu sync.Mutex // enforces mutual exclusion of exportdata operations
|
||||
|
||||
// TODO(matloob): Add an implied mode here and use that instead of mode.
|
||||
// Implied mode would contain all the fields we need the data for so we can
|
||||
// get the actually requested fields. We'll zero them out before returning
|
||||
// packages to the user. This will make it easier for us to get the conditions
|
||||
// where we need certain modes right.
|
||||
}
|
||||
|
||||
func newLoader(cfg *Config) *loader {
|
||||
@@ -563,7 +569,7 @@ func (ld *loader) refine(roots []string, list ...*Package) ([]*Package, error) {
|
||||
return lpkg.needsrc
|
||||
}
|
||||
|
||||
if ld.Mode&NeedImports == 0 {
|
||||
if ld.Mode&(NeedImports|NeedDeps) == 0 {
|
||||
// We do this to drop the stub import packages that we are not even going to try to resolve.
|
||||
for _, lpkg := range initial {
|
||||
lpkg.Imports = nil
|
||||
@@ -574,7 +580,7 @@ func (ld *loader) refine(roots []string, list ...*Package) ([]*Package, error) {
|
||||
visit(lpkg)
|
||||
}
|
||||
}
|
||||
if ld.Mode&NeedDeps != 0 {
|
||||
if ld.Mode&NeedDeps != 0 { // TODO(matloob): This is only the case if NeedTypes is also set, right?
|
||||
for _, lpkg := range srcPkgs {
|
||||
// Complete type information is required for the
|
||||
// immediate dependencies of each source package.
|
||||
@@ -602,46 +608,48 @@ func (ld *loader) refine(roots []string, list ...*Package) ([]*Package, error) {
|
||||
importPlaceholders := make(map[string]*Package)
|
||||
for i, lpkg := range initial {
|
||||
result[i] = lpkg.Package
|
||||
}
|
||||
for i := range ld.pkgs {
|
||||
// Clear all unrequested fields, for extra de-Hyrum-ization.
|
||||
if ld.Mode&NeedName == 0 {
|
||||
result[i].Name = ""
|
||||
result[i].PkgPath = ""
|
||||
ld.pkgs[i].Name = ""
|
||||
ld.pkgs[i].PkgPath = ""
|
||||
}
|
||||
if ld.Mode&NeedFiles == 0 {
|
||||
result[i].GoFiles = nil
|
||||
result[i].OtherFiles = nil
|
||||
ld.pkgs[i].GoFiles = nil
|
||||
ld.pkgs[i].OtherFiles = nil
|
||||
}
|
||||
if ld.Mode&NeedCompiledGoFiles == 0 {
|
||||
result[i].CompiledGoFiles = nil
|
||||
ld.pkgs[i].CompiledGoFiles = nil
|
||||
}
|
||||
if ld.Mode&NeedImports == 0 {
|
||||
result[i].Imports = nil
|
||||
ld.pkgs[i].Imports = nil
|
||||
}
|
||||
if ld.Mode&NeedExportsFile == 0 {
|
||||
result[i].ExportFile = ""
|
||||
ld.pkgs[i].ExportFile = ""
|
||||
}
|
||||
if ld.Mode&NeedTypes == 0 {
|
||||
result[i].Types = nil
|
||||
result[i].Fset = nil
|
||||
result[i].IllTyped = false
|
||||
ld.pkgs[i].Types = nil
|
||||
ld.pkgs[i].Fset = nil
|
||||
ld.pkgs[i].IllTyped = false
|
||||
}
|
||||
if ld.Mode&NeedSyntax == 0 {
|
||||
result[i].Syntax = nil
|
||||
ld.pkgs[i].Syntax = nil
|
||||
}
|
||||
if ld.Mode&NeedTypesInfo == 0 {
|
||||
result[i].TypesInfo = nil
|
||||
ld.pkgs[i].TypesInfo = nil
|
||||
}
|
||||
if ld.Mode&NeedTypesSizes == 0 {
|
||||
result[i].TypesSizes = nil
|
||||
ld.pkgs[i].TypesSizes = nil
|
||||
}
|
||||
if ld.Mode&NeedDeps == 0 {
|
||||
for j, pkg := range result[i].Imports {
|
||||
for j, pkg := range ld.pkgs[i].Imports {
|
||||
ph, ok := importPlaceholders[pkg.ID]
|
||||
if !ok {
|
||||
ph = &Package{ID: pkg.ID}
|
||||
importPlaceholders[pkg.ID] = ph
|
||||
}
|
||||
result[i].Imports[j] = ph
|
||||
ld.pkgs[i].Imports[j] = ph
|
||||
}
|
||||
}
|
||||
}
|
||||
|
20
vendor/golang.org/x/tools/imports/mkstdlib.go
generated
vendored
20
vendor/golang.org/x/tools/imports/mkstdlib.go
generated
vendored
@@ -14,6 +14,7 @@ import (
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"runtime"
|
||||
@@ -59,6 +60,10 @@ func main() {
|
||||
mustOpen(api("go1.10.txt")),
|
||||
mustOpen(api("go1.11.txt")),
|
||||
mustOpen(api("go1.12.txt")),
|
||||
|
||||
// The API of the syscall/js package needs to be computed explicitly,
|
||||
// because it's not included in the GOROOT/api/go1.*.txt files at this time.
|
||||
syscallJSAPI(),
|
||||
)
|
||||
sc := bufio.NewScanner(f)
|
||||
|
||||
@@ -110,3 +115,18 @@ func main() {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
// syscallJSAPI returns the API of the syscall/js package.
|
||||
// It's computed from the contents of $(go env GOROOT)/src/syscall/js.
|
||||
func syscallJSAPI() io.Reader {
|
||||
var exeSuffix string
|
||||
if runtime.GOOS == "windows" {
|
||||
exeSuffix = ".exe"
|
||||
}
|
||||
cmd := exec.Command("go"+exeSuffix, "run", "cmd/api", "-contexts", "js-wasm", "syscall/js")
|
||||
out, err := cmd.Output()
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
return bytes.NewReader(out)
|
||||
}
|
||||
|
23
vendor/golang.org/x/tools/imports/zstdlib.go
generated
vendored
23
vendor/golang.org/x/tools/imports/zstdlib.go
generated
vendored
@@ -9783,6 +9783,29 @@ var stdlib = map[string]map[string]bool{
|
||||
"XP1_UNI_RECV": true,
|
||||
"XP1_UNI_SEND": true,
|
||||
},
|
||||
"syscall/js": map[string]bool{
|
||||
"Error": true,
|
||||
"Func": true,
|
||||
"FuncOf": true,
|
||||
"Global": true,
|
||||
"Null": true,
|
||||
"Type": true,
|
||||
"TypeBoolean": true,
|
||||
"TypeFunction": true,
|
||||
"TypeNull": true,
|
||||
"TypeNumber": true,
|
||||
"TypeObject": true,
|
||||
"TypeString": true,
|
||||
"TypeSymbol": true,
|
||||
"TypeUndefined": true,
|
||||
"TypedArray": true,
|
||||
"TypedArrayOf": true,
|
||||
"Undefined": true,
|
||||
"Value": true,
|
||||
"ValueError": true,
|
||||
"ValueOf": true,
|
||||
"Wrapper": true,
|
||||
},
|
||||
"testing": map[string]bool{
|
||||
"AllocsPerRun": true,
|
||||
"B": true,
|
||||
|
69
vendor/k8s.io/client-go/tools/leaderelection/healthzadaptor.go
generated
vendored
Normal file
69
vendor/k8s.io/client-go/tools/leaderelection/healthzadaptor.go
generated
vendored
Normal file
@@ -0,0 +1,69 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package leaderelection
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// HealthzAdaptor associates the /healthz endpoint with the LeaderElection object.
|
||||
// It helps deal with the /healthz endpoint being set up prior to the LeaderElection.
|
||||
// This contains the code needed to act as an adaptor between the leader
|
||||
// election code the health check code. It allows us to provide health
|
||||
// status about the leader election. Most specifically about if the leader
|
||||
// has failed to renew without exiting the process. In that case we should
|
||||
// report not healthy and rely on the kubelet to take down the process.
|
||||
type HealthzAdaptor struct {
|
||||
pointerLock sync.Mutex
|
||||
le *LeaderElector
|
||||
timeout time.Duration
|
||||
}
|
||||
|
||||
// Name returns the name of the health check we are implementing.
|
||||
func (l *HealthzAdaptor) Name() string {
|
||||
return "leaderElection"
|
||||
}
|
||||
|
||||
// Check is called by the healthz endpoint handler.
|
||||
// It fails (returns an error) if we own the lease but had not been able to renew it.
|
||||
func (l *HealthzAdaptor) Check(req *http.Request) error {
|
||||
l.pointerLock.Lock()
|
||||
defer l.pointerLock.Unlock()
|
||||
if l.le == nil {
|
||||
return nil
|
||||
}
|
||||
return l.le.Check(l.timeout)
|
||||
}
|
||||
|
||||
// SetLeaderElection ties a leader election object to a HealthzAdaptor
|
||||
func (l *HealthzAdaptor) SetLeaderElection(le *LeaderElector) {
|
||||
l.pointerLock.Lock()
|
||||
defer l.pointerLock.Unlock()
|
||||
l.le = le
|
||||
}
|
||||
|
||||
// NewLeaderHealthzAdaptor creates a basic healthz adaptor to monitor a leader election.
|
||||
// timeout determines the time beyond the lease expiry to be allowed for timeout.
|
||||
// checks within the timeout period after the lease expires will still return healthy.
|
||||
func NewLeaderHealthzAdaptor(timeout time.Duration) *HealthzAdaptor {
|
||||
result := &HealthzAdaptor{
|
||||
timeout: timeout,
|
||||
}
|
||||
return result
|
||||
}
|
373
vendor/k8s.io/client-go/tools/leaderelection/leaderelection.go
generated
vendored
Normal file
373
vendor/k8s.io/client-go/tools/leaderelection/leaderelection.go
generated
vendored
Normal file
@@ -0,0 +1,373 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package leaderelection implements leader election of a set of endpoints.
|
||||
// It uses an annotation in the endpoints object to store the record of the
|
||||
// election state.
|
||||
//
|
||||
// This implementation does not guarantee that only one client is acting as a
|
||||
// leader (a.k.a. fencing). A client observes timestamps captured locally to
|
||||
// infer the state of the leader election. Thus the implementation is tolerant
|
||||
// to arbitrary clock skew, but is not tolerant to arbitrary clock skew rate.
|
||||
//
|
||||
// However the level of tolerance to skew rate can be configured by setting
|
||||
// RenewDeadline and LeaseDuration appropriately. The tolerance expressed as a
|
||||
// maximum tolerated ratio of time passed on the fastest node to time passed on
|
||||
// the slowest node can be approximately achieved with a configuration that sets
|
||||
// the same ratio of LeaseDuration to RenewDeadline. For example if a user wanted
|
||||
// to tolerate some nodes progressing forward in time twice as fast as other nodes,
|
||||
// the user could set LeaseDuration to 60 seconds and RenewDeadline to 30 seconds.
|
||||
//
|
||||
// While not required, some method of clock synchronization between nodes in the
|
||||
// cluster is highly recommended. It's important to keep in mind when configuring
|
||||
// this client that the tolerance to skew rate varies inversely to master
|
||||
// availability.
|
||||
//
|
||||
// Larger clusters often have a more lenient SLA for API latency. This should be
|
||||
// taken into account when configuring the client. The rate of leader transitions
|
||||
// should be monitored and RetryPeriod and LeaseDuration should be increased
|
||||
// until the rate is stable and acceptably low. It's important to keep in mind
|
||||
// when configuring this client that the tolerance to API latency varies inversely
|
||||
// to master availability.
|
||||
//
|
||||
// DISCLAIMER: this is an alpha API. This library will likely change significantly
|
||||
// or even be removed entirely in subsequent releases. Depend on this API at
|
||||
// your own risk.
|
||||
package leaderelection
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
"k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
rl "k8s.io/client-go/tools/leaderelection/resourcelock"
|
||||
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
const (
|
||||
JitterFactor = 1.2
|
||||
)
|
||||
|
||||
// NewLeaderElector creates a LeaderElector from a LeaderElectionConfig
|
||||
func NewLeaderElector(lec LeaderElectionConfig) (*LeaderElector, error) {
|
||||
if lec.LeaseDuration <= lec.RenewDeadline {
|
||||
return nil, fmt.Errorf("leaseDuration must be greater than renewDeadline")
|
||||
}
|
||||
if lec.RenewDeadline <= time.Duration(JitterFactor*float64(lec.RetryPeriod)) {
|
||||
return nil, fmt.Errorf("renewDeadline must be greater than retryPeriod*JitterFactor")
|
||||
}
|
||||
if lec.LeaseDuration < 1 {
|
||||
return nil, fmt.Errorf("leaseDuration must be greater than zero")
|
||||
}
|
||||
if lec.RenewDeadline < 1 {
|
||||
return nil, fmt.Errorf("renewDeadline must be greater than zero")
|
||||
}
|
||||
if lec.RetryPeriod < 1 {
|
||||
return nil, fmt.Errorf("retryPeriod must be greater than zero")
|
||||
}
|
||||
|
||||
if lec.Lock == nil {
|
||||
return nil, fmt.Errorf("Lock must not be nil.")
|
||||
}
|
||||
le := LeaderElector{
|
||||
config: lec,
|
||||
clock: clock.RealClock{},
|
||||
metrics: globalMetricsFactory.newLeaderMetrics(),
|
||||
}
|
||||
le.metrics.leaderOff(le.config.Name)
|
||||
return &le, nil
|
||||
}
|
||||
|
||||
type LeaderElectionConfig struct {
|
||||
// Lock is the resource that will be used for locking
|
||||
Lock rl.Interface
|
||||
|
||||
// LeaseDuration is the duration that non-leader candidates will
|
||||
// wait to force acquire leadership. This is measured against time of
|
||||
// last observed ack.
|
||||
LeaseDuration time.Duration
|
||||
// RenewDeadline is the duration that the acting master will retry
|
||||
// refreshing leadership before giving up.
|
||||
RenewDeadline time.Duration
|
||||
// RetryPeriod is the duration the LeaderElector clients should wait
|
||||
// between tries of actions.
|
||||
RetryPeriod time.Duration
|
||||
|
||||
// Callbacks are callbacks that are triggered during certain lifecycle
|
||||
// events of the LeaderElector
|
||||
Callbacks LeaderCallbacks
|
||||
|
||||
// WatchDog is the associated health checker
|
||||
// WatchDog may be null if its not needed/configured.
|
||||
WatchDog *HealthzAdaptor
|
||||
|
||||
// ReleaseOnCancel should be set true if the lock should be released
|
||||
// when the run context is cancelled. If you set this to true, you must
|
||||
// ensure all code guarded by this lease has successfully completed
|
||||
// prior to cancelling the context, or you may have two processes
|
||||
// simultaneously acting on the critical path.
|
||||
ReleaseOnCancel bool
|
||||
|
||||
// Name is the name of the resource lock for debugging
|
||||
Name string
|
||||
}
|
||||
|
||||
// LeaderCallbacks are callbacks that are triggered during certain
|
||||
// lifecycle events of the LeaderElector. These are invoked asynchronously.
|
||||
//
|
||||
// possible future callbacks:
|
||||
// * OnChallenge()
|
||||
type LeaderCallbacks struct {
|
||||
// OnStartedLeading is called when a LeaderElector client starts leading
|
||||
OnStartedLeading func(context.Context)
|
||||
// OnStoppedLeading is called when a LeaderElector client stops leading
|
||||
OnStoppedLeading func()
|
||||
// OnNewLeader is called when the client observes a leader that is
|
||||
// not the previously observed leader. This includes the first observed
|
||||
// leader when the client starts.
|
||||
OnNewLeader func(identity string)
|
||||
}
|
||||
|
||||
// LeaderElector is a leader election client.
|
||||
type LeaderElector struct {
|
||||
config LeaderElectionConfig
|
||||
// internal bookkeeping
|
||||
observedRecord rl.LeaderElectionRecord
|
||||
observedTime time.Time
|
||||
// used to implement OnNewLeader(), may lag slightly from the
|
||||
// value observedRecord.HolderIdentity if the transition has
|
||||
// not yet been reported.
|
||||
reportedLeader string
|
||||
|
||||
// clock is wrapper around time to allow for less flaky testing
|
||||
clock clock.Clock
|
||||
|
||||
metrics leaderMetricsAdapter
|
||||
|
||||
// name is the name of the resource lock for debugging
|
||||
name string
|
||||
}
|
||||
|
||||
// Run starts the leader election loop
|
||||
func (le *LeaderElector) Run(ctx context.Context) {
|
||||
defer func() {
|
||||
runtime.HandleCrash()
|
||||
le.config.Callbacks.OnStoppedLeading()
|
||||
}()
|
||||
if !le.acquire(ctx) {
|
||||
return // ctx signalled done
|
||||
}
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
go le.config.Callbacks.OnStartedLeading(ctx)
|
||||
le.renew(ctx)
|
||||
}
|
||||
|
||||
// RunOrDie starts a client with the provided config or panics if the config
|
||||
// fails to validate.
|
||||
func RunOrDie(ctx context.Context, lec LeaderElectionConfig) {
|
||||
le, err := NewLeaderElector(lec)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
if lec.WatchDog != nil {
|
||||
lec.WatchDog.SetLeaderElection(le)
|
||||
}
|
||||
le.Run(ctx)
|
||||
}
|
||||
|
||||
// GetLeader returns the identity of the last observed leader or returns the empty string if
|
||||
// no leader has yet been observed.
|
||||
func (le *LeaderElector) GetLeader() string {
|
||||
return le.observedRecord.HolderIdentity
|
||||
}
|
||||
|
||||
// IsLeader returns true if the last observed leader was this client else returns false.
|
||||
func (le *LeaderElector) IsLeader() bool {
|
||||
return le.observedRecord.HolderIdentity == le.config.Lock.Identity()
|
||||
}
|
||||
|
||||
// acquire loops calling tryAcquireOrRenew and returns true immediately when tryAcquireOrRenew succeeds.
|
||||
// Returns false if ctx signals done.
|
||||
func (le *LeaderElector) acquire(ctx context.Context) bool {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
succeeded := false
|
||||
desc := le.config.Lock.Describe()
|
||||
klog.Infof("attempting to acquire leader lease %v...", desc)
|
||||
wait.JitterUntil(func() {
|
||||
succeeded = le.tryAcquireOrRenew()
|
||||
le.maybeReportTransition()
|
||||
if !succeeded {
|
||||
klog.V(4).Infof("failed to acquire lease %v", desc)
|
||||
return
|
||||
}
|
||||
le.config.Lock.RecordEvent("became leader")
|
||||
le.metrics.leaderOn(le.config.Name)
|
||||
klog.Infof("successfully acquired lease %v", desc)
|
||||
cancel()
|
||||
}, le.config.RetryPeriod, JitterFactor, true, ctx.Done())
|
||||
return succeeded
|
||||
}
|
||||
|
||||
// renew loops calling tryAcquireOrRenew and returns immediately when tryAcquireOrRenew fails or ctx signals done.
|
||||
func (le *LeaderElector) renew(ctx context.Context) {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
wait.Until(func() {
|
||||
timeoutCtx, timeoutCancel := context.WithTimeout(ctx, le.config.RenewDeadline)
|
||||
defer timeoutCancel()
|
||||
err := wait.PollImmediateUntil(le.config.RetryPeriod, func() (bool, error) {
|
||||
done := make(chan bool, 1)
|
||||
go func() {
|
||||
defer close(done)
|
||||
done <- le.tryAcquireOrRenew()
|
||||
}()
|
||||
|
||||
select {
|
||||
case <-timeoutCtx.Done():
|
||||
return false, fmt.Errorf("failed to tryAcquireOrRenew %s", timeoutCtx.Err())
|
||||
case result := <-done:
|
||||
return result, nil
|
||||
}
|
||||
}, timeoutCtx.Done())
|
||||
|
||||
le.maybeReportTransition()
|
||||
desc := le.config.Lock.Describe()
|
||||
if err == nil {
|
||||
klog.V(5).Infof("successfully renewed lease %v", desc)
|
||||
return
|
||||
}
|
||||
le.config.Lock.RecordEvent("stopped leading")
|
||||
le.metrics.leaderOff(le.config.Name)
|
||||
klog.Infof("failed to renew lease %v: %v", desc, err)
|
||||
cancel()
|
||||
}, le.config.RetryPeriod, ctx.Done())
|
||||
|
||||
// if we hold the lease, give it up
|
||||
if le.config.ReleaseOnCancel {
|
||||
le.release()
|
||||
}
|
||||
}
|
||||
|
||||
// release attempts to release the leader lease if we have acquired it.
|
||||
func (le *LeaderElector) release() bool {
|
||||
if !le.IsLeader() {
|
||||
return true
|
||||
}
|
||||
leaderElectionRecord := rl.LeaderElectionRecord{
|
||||
LeaderTransitions: le.observedRecord.LeaderTransitions,
|
||||
}
|
||||
if err := le.config.Lock.Update(leaderElectionRecord); err != nil {
|
||||
klog.Errorf("Failed to release lock: %v", err)
|
||||
return false
|
||||
}
|
||||
le.observedRecord = leaderElectionRecord
|
||||
le.observedTime = le.clock.Now()
|
||||
return true
|
||||
}
|
||||
|
||||
// tryAcquireOrRenew tries to acquire a leader lease if it is not already acquired,
|
||||
// else it tries to renew the lease if it has already been acquired. Returns true
|
||||
// on success else returns false.
|
||||
func (le *LeaderElector) tryAcquireOrRenew() bool {
|
||||
now := metav1.Now()
|
||||
leaderElectionRecord := rl.LeaderElectionRecord{
|
||||
HolderIdentity: le.config.Lock.Identity(),
|
||||
LeaseDurationSeconds: int(le.config.LeaseDuration / time.Second),
|
||||
RenewTime: now,
|
||||
AcquireTime: now,
|
||||
}
|
||||
|
||||
// 1. obtain or create the ElectionRecord
|
||||
oldLeaderElectionRecord, err := le.config.Lock.Get()
|
||||
if err != nil {
|
||||
if !errors.IsNotFound(err) {
|
||||
klog.Errorf("error retrieving resource lock %v: %v", le.config.Lock.Describe(), err)
|
||||
return false
|
||||
}
|
||||
if err = le.config.Lock.Create(leaderElectionRecord); err != nil {
|
||||
klog.Errorf("error initially creating leader election record: %v", err)
|
||||
return false
|
||||
}
|
||||
le.observedRecord = leaderElectionRecord
|
||||
le.observedTime = le.clock.Now()
|
||||
return true
|
||||
}
|
||||
|
||||
// 2. Record obtained, check the Identity & Time
|
||||
if !reflect.DeepEqual(le.observedRecord, *oldLeaderElectionRecord) {
|
||||
le.observedRecord = *oldLeaderElectionRecord
|
||||
le.observedTime = le.clock.Now()
|
||||
}
|
||||
if len(oldLeaderElectionRecord.HolderIdentity) > 0 &&
|
||||
le.observedTime.Add(le.config.LeaseDuration).After(now.Time) &&
|
||||
!le.IsLeader() {
|
||||
klog.V(4).Infof("lock is held by %v and has not yet expired", oldLeaderElectionRecord.HolderIdentity)
|
||||
return false
|
||||
}
|
||||
|
||||
// 3. We're going to try to update. The leaderElectionRecord is set to it's default
|
||||
// here. Let's correct it before updating.
|
||||
if le.IsLeader() {
|
||||
leaderElectionRecord.AcquireTime = oldLeaderElectionRecord.AcquireTime
|
||||
leaderElectionRecord.LeaderTransitions = oldLeaderElectionRecord.LeaderTransitions
|
||||
} else {
|
||||
leaderElectionRecord.LeaderTransitions = oldLeaderElectionRecord.LeaderTransitions + 1
|
||||
}
|
||||
|
||||
// update the lock itself
|
||||
if err = le.config.Lock.Update(leaderElectionRecord); err != nil {
|
||||
klog.Errorf("Failed to update lock: %v", err)
|
||||
return false
|
||||
}
|
||||
le.observedRecord = leaderElectionRecord
|
||||
le.observedTime = le.clock.Now()
|
||||
return true
|
||||
}
|
||||
|
||||
func (le *LeaderElector) maybeReportTransition() {
|
||||
if le.observedRecord.HolderIdentity == le.reportedLeader {
|
||||
return
|
||||
}
|
||||
le.reportedLeader = le.observedRecord.HolderIdentity
|
||||
if le.config.Callbacks.OnNewLeader != nil {
|
||||
go le.config.Callbacks.OnNewLeader(le.reportedLeader)
|
||||
}
|
||||
}
|
||||
|
||||
// Check will determine if the current lease is expired by more than timeout.
|
||||
func (le *LeaderElector) Check(maxTolerableExpiredLease time.Duration) error {
|
||||
if !le.IsLeader() {
|
||||
// Currently not concerned with the case that we are hot standby
|
||||
return nil
|
||||
}
|
||||
// If we are more than timeout seconds after the lease duration that is past the timeout
|
||||
// on the lease renew. Time to start reporting ourselves as unhealthy. We should have
|
||||
// died but conditions like deadlock can prevent this. (See #70819)
|
||||
if le.clock.Since(le.observedTime) > le.config.LeaseDuration+maxTolerableExpiredLease {
|
||||
return fmt.Errorf("failed election to renew leadership on lease %s", le.config.Name)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
109
vendor/k8s.io/client-go/tools/leaderelection/metrics.go
generated
vendored
Normal file
109
vendor/k8s.io/client-go/tools/leaderelection/metrics.go
generated
vendored
Normal file
@@ -0,0 +1,109 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package leaderelection
|
||||
|
||||
import (
|
||||
"sync"
|
||||
)
|
||||
|
||||
// This file provides abstractions for setting the provider (e.g., prometheus)
|
||||
// of metrics.
|
||||
|
||||
type leaderMetricsAdapter interface {
|
||||
leaderOn(name string)
|
||||
leaderOff(name string)
|
||||
}
|
||||
|
||||
// GaugeMetric represents a single numerical value that can arbitrarily go up
|
||||
// and down.
|
||||
type SwitchMetric interface {
|
||||
On(name string)
|
||||
Off(name string)
|
||||
}
|
||||
|
||||
type noopMetric struct{}
|
||||
|
||||
func (noopMetric) On(name string) {}
|
||||
func (noopMetric) Off(name string) {}
|
||||
|
||||
// defaultLeaderMetrics expects the caller to lock before setting any metrics.
|
||||
type defaultLeaderMetrics struct {
|
||||
// leader's value indicates if the current process is the owner of name lease
|
||||
leader SwitchMetric
|
||||
}
|
||||
|
||||
func (m *defaultLeaderMetrics) leaderOn(name string) {
|
||||
if m == nil {
|
||||
return
|
||||
}
|
||||
m.leader.On(name)
|
||||
}
|
||||
|
||||
func (m *defaultLeaderMetrics) leaderOff(name string) {
|
||||
if m == nil {
|
||||
return
|
||||
}
|
||||
m.leader.Off(name)
|
||||
}
|
||||
|
||||
type noMetrics struct{}
|
||||
|
||||
func (noMetrics) leaderOn(name string) {}
|
||||
func (noMetrics) leaderOff(name string) {}
|
||||
|
||||
// MetricsProvider generates various metrics used by the leader election.
|
||||
type MetricsProvider interface {
|
||||
NewLeaderMetric() SwitchMetric
|
||||
}
|
||||
|
||||
type noopMetricsProvider struct{}
|
||||
|
||||
func (_ noopMetricsProvider) NewLeaderMetric() SwitchMetric {
|
||||
return noopMetric{}
|
||||
}
|
||||
|
||||
var globalMetricsFactory = leaderMetricsFactory{
|
||||
metricsProvider: noopMetricsProvider{},
|
||||
}
|
||||
|
||||
type leaderMetricsFactory struct {
|
||||
metricsProvider MetricsProvider
|
||||
|
||||
onlyOnce sync.Once
|
||||
}
|
||||
|
||||
func (f *leaderMetricsFactory) setProvider(mp MetricsProvider) {
|
||||
f.onlyOnce.Do(func() {
|
||||
f.metricsProvider = mp
|
||||
})
|
||||
}
|
||||
|
||||
func (f *leaderMetricsFactory) newLeaderMetrics() leaderMetricsAdapter {
|
||||
mp := f.metricsProvider
|
||||
if mp == (noopMetricsProvider{}) {
|
||||
return noMetrics{}
|
||||
}
|
||||
return &defaultLeaderMetrics{
|
||||
leader: mp.NewLeaderMetric(),
|
||||
}
|
||||
}
|
||||
|
||||
// SetProvider sets the metrics provider for all subsequently created work
|
||||
// queues. Only the first call has an effect.
|
||||
func SetProvider(metricsProvider MetricsProvider) {
|
||||
globalMetricsFactory.setProvider(metricsProvider)
|
||||
}
|
112
vendor/k8s.io/client-go/tools/leaderelection/resourcelock/configmaplock.go
generated
vendored
Normal file
112
vendor/k8s.io/client-go/tools/leaderelection/resourcelock/configmaplock.go
generated
vendored
Normal file
@@ -0,0 +1,112 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package resourcelock
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
corev1client "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
)
|
||||
|
||||
// TODO: This is almost a exact replica of Endpoints lock.
|
||||
// going forwards as we self host more and more components
|
||||
// and use ConfigMaps as the means to pass that configuration
|
||||
// data we will likely move to deprecate the Endpoints lock.
|
||||
|
||||
type ConfigMapLock struct {
|
||||
// ConfigMapMeta should contain a Name and a Namespace of a
|
||||
// ConfigMapMeta object that the LeaderElector will attempt to lead.
|
||||
ConfigMapMeta metav1.ObjectMeta
|
||||
Client corev1client.ConfigMapsGetter
|
||||
LockConfig ResourceLockConfig
|
||||
cm *v1.ConfigMap
|
||||
}
|
||||
|
||||
// Get returns the election record from a ConfigMap Annotation
|
||||
func (cml *ConfigMapLock) Get() (*LeaderElectionRecord, error) {
|
||||
var record LeaderElectionRecord
|
||||
var err error
|
||||
cml.cm, err = cml.Client.ConfigMaps(cml.ConfigMapMeta.Namespace).Get(cml.ConfigMapMeta.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if cml.cm.Annotations == nil {
|
||||
cml.cm.Annotations = make(map[string]string)
|
||||
}
|
||||
if recordBytes, found := cml.cm.Annotations[LeaderElectionRecordAnnotationKey]; found {
|
||||
if err := json.Unmarshal([]byte(recordBytes), &record); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return &record, nil
|
||||
}
|
||||
|
||||
// Create attempts to create a LeaderElectionRecord annotation
|
||||
func (cml *ConfigMapLock) Create(ler LeaderElectionRecord) error {
|
||||
recordBytes, err := json.Marshal(ler)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cml.cm, err = cml.Client.ConfigMaps(cml.ConfigMapMeta.Namespace).Create(&v1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: cml.ConfigMapMeta.Name,
|
||||
Namespace: cml.ConfigMapMeta.Namespace,
|
||||
Annotations: map[string]string{
|
||||
LeaderElectionRecordAnnotationKey: string(recordBytes),
|
||||
},
|
||||
},
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
// Update will update an existing annotation on a given resource.
|
||||
func (cml *ConfigMapLock) Update(ler LeaderElectionRecord) error {
|
||||
if cml.cm == nil {
|
||||
return errors.New("configmap not initialized, call get or create first")
|
||||
}
|
||||
recordBytes, err := json.Marshal(ler)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cml.cm.Annotations[LeaderElectionRecordAnnotationKey] = string(recordBytes)
|
||||
cml.cm, err = cml.Client.ConfigMaps(cml.ConfigMapMeta.Namespace).Update(cml.cm)
|
||||
return err
|
||||
}
|
||||
|
||||
// RecordEvent in leader election while adding meta-data
|
||||
func (cml *ConfigMapLock) RecordEvent(s string) {
|
||||
if cml.LockConfig.EventRecorder == nil {
|
||||
return
|
||||
}
|
||||
events := fmt.Sprintf("%v %v", cml.LockConfig.Identity, s)
|
||||
cml.LockConfig.EventRecorder.Eventf(&v1.ConfigMap{ObjectMeta: cml.cm.ObjectMeta}, v1.EventTypeNormal, "LeaderElection", events)
|
||||
}
|
||||
|
||||
// Describe is used to convert details on current resource lock
|
||||
// into a string
|
||||
func (cml *ConfigMapLock) Describe() string {
|
||||
return fmt.Sprintf("%v/%v", cml.ConfigMapMeta.Namespace, cml.ConfigMapMeta.Name)
|
||||
}
|
||||
|
||||
// returns the Identity of the lock
|
||||
func (cml *ConfigMapLock) Identity() string {
|
||||
return cml.LockConfig.Identity
|
||||
}
|
107
vendor/k8s.io/client-go/tools/leaderelection/resourcelock/endpointslock.go
generated
vendored
Normal file
107
vendor/k8s.io/client-go/tools/leaderelection/resourcelock/endpointslock.go
generated
vendored
Normal file
@@ -0,0 +1,107 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package resourcelock
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
corev1client "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
)
|
||||
|
||||
type EndpointsLock struct {
|
||||
// EndpointsMeta should contain a Name and a Namespace of an
|
||||
// Endpoints object that the LeaderElector will attempt to lead.
|
||||
EndpointsMeta metav1.ObjectMeta
|
||||
Client corev1client.EndpointsGetter
|
||||
LockConfig ResourceLockConfig
|
||||
e *v1.Endpoints
|
||||
}
|
||||
|
||||
// Get returns the election record from a Endpoints Annotation
|
||||
func (el *EndpointsLock) Get() (*LeaderElectionRecord, error) {
|
||||
var record LeaderElectionRecord
|
||||
var err error
|
||||
el.e, err = el.Client.Endpoints(el.EndpointsMeta.Namespace).Get(el.EndpointsMeta.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if el.e.Annotations == nil {
|
||||
el.e.Annotations = make(map[string]string)
|
||||
}
|
||||
if recordBytes, found := el.e.Annotations[LeaderElectionRecordAnnotationKey]; found {
|
||||
if err := json.Unmarshal([]byte(recordBytes), &record); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return &record, nil
|
||||
}
|
||||
|
||||
// Create attempts to create a LeaderElectionRecord annotation
|
||||
func (el *EndpointsLock) Create(ler LeaderElectionRecord) error {
|
||||
recordBytes, err := json.Marshal(ler)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
el.e, err = el.Client.Endpoints(el.EndpointsMeta.Namespace).Create(&v1.Endpoints{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: el.EndpointsMeta.Name,
|
||||
Namespace: el.EndpointsMeta.Namespace,
|
||||
Annotations: map[string]string{
|
||||
LeaderElectionRecordAnnotationKey: string(recordBytes),
|
||||
},
|
||||
},
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
// Update will update and existing annotation on a given resource.
|
||||
func (el *EndpointsLock) Update(ler LeaderElectionRecord) error {
|
||||
if el.e == nil {
|
||||
return errors.New("endpoint not initialized, call get or create first")
|
||||
}
|
||||
recordBytes, err := json.Marshal(ler)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
el.e.Annotations[LeaderElectionRecordAnnotationKey] = string(recordBytes)
|
||||
el.e, err = el.Client.Endpoints(el.EndpointsMeta.Namespace).Update(el.e)
|
||||
return err
|
||||
}
|
||||
|
||||
// RecordEvent in leader election while adding meta-data
|
||||
func (el *EndpointsLock) RecordEvent(s string) {
|
||||
if el.LockConfig.EventRecorder == nil {
|
||||
return
|
||||
}
|
||||
events := fmt.Sprintf("%v %v", el.LockConfig.Identity, s)
|
||||
el.LockConfig.EventRecorder.Eventf(&v1.Endpoints{ObjectMeta: el.e.ObjectMeta}, v1.EventTypeNormal, "LeaderElection", events)
|
||||
}
|
||||
|
||||
// Describe is used to convert details on current resource lock
|
||||
// into a string
|
||||
func (el *EndpointsLock) Describe() string {
|
||||
return fmt.Sprintf("%v/%v", el.EndpointsMeta.Namespace, el.EndpointsMeta.Name)
|
||||
}
|
||||
|
||||
// returns the Identity of the lock
|
||||
func (el *EndpointsLock) Identity() string {
|
||||
return el.LockConfig.Identity
|
||||
}
|
126
vendor/k8s.io/client-go/tools/leaderelection/resourcelock/interface.go
generated
vendored
Normal file
126
vendor/k8s.io/client-go/tools/leaderelection/resourcelock/interface.go
generated
vendored
Normal file
@@ -0,0 +1,126 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package resourcelock
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
coordinationv1 "k8s.io/client-go/kubernetes/typed/coordination/v1"
|
||||
corev1 "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
)
|
||||
|
||||
const (
|
||||
LeaderElectionRecordAnnotationKey = "control-plane.alpha.kubernetes.io/leader"
|
||||
EndpointsResourceLock = "endpoints"
|
||||
ConfigMapsResourceLock = "configmaps"
|
||||
LeasesResourceLock = "leases"
|
||||
)
|
||||
|
||||
// LeaderElectionRecord is the record that is stored in the leader election annotation.
|
||||
// This information should be used for observational purposes only and could be replaced
|
||||
// with a random string (e.g. UUID) with only slight modification of this code.
|
||||
// TODO(mikedanese): this should potentially be versioned
|
||||
type LeaderElectionRecord struct {
|
||||
// HolderIdentity is the ID that owns the lease. If empty, no one owns this lease and
|
||||
// all callers may acquire. Versions of this library prior to Kubernetes 1.14 will not
|
||||
// attempt to acquire leases with empty identities and will wait for the full lease
|
||||
// interval to expire before attempting to reacquire. This value is set to empty when
|
||||
// a client voluntarily steps down.
|
||||
HolderIdentity string `json:"holderIdentity"`
|
||||
LeaseDurationSeconds int `json:"leaseDurationSeconds"`
|
||||
AcquireTime metav1.Time `json:"acquireTime"`
|
||||
RenewTime metav1.Time `json:"renewTime"`
|
||||
LeaderTransitions int `json:"leaderTransitions"`
|
||||
}
|
||||
|
||||
// EventRecorder records a change in the ResourceLock.
|
||||
type EventRecorder interface {
|
||||
Eventf(obj runtime.Object, eventType, reason, message string, args ...interface{})
|
||||
}
|
||||
|
||||
// ResourceLockConfig common data that exists across different
|
||||
// resource locks
|
||||
type ResourceLockConfig struct {
|
||||
// Identity is the unique string identifying a lease holder across
|
||||
// all participants in an election.
|
||||
Identity string
|
||||
// EventRecorder is optional.
|
||||
EventRecorder EventRecorder
|
||||
}
|
||||
|
||||
// Interface offers a common interface for locking on arbitrary
|
||||
// resources used in leader election. The Interface is used
|
||||
// to hide the details on specific implementations in order to allow
|
||||
// them to change over time. This interface is strictly for use
|
||||
// by the leaderelection code.
|
||||
type Interface interface {
|
||||
// Get returns the LeaderElectionRecord
|
||||
Get() (*LeaderElectionRecord, error)
|
||||
|
||||
// Create attempts to create a LeaderElectionRecord
|
||||
Create(ler LeaderElectionRecord) error
|
||||
|
||||
// Update will update and existing LeaderElectionRecord
|
||||
Update(ler LeaderElectionRecord) error
|
||||
|
||||
// RecordEvent is used to record events
|
||||
RecordEvent(string)
|
||||
|
||||
// Identity will return the locks Identity
|
||||
Identity() string
|
||||
|
||||
// Describe is used to convert details on current resource lock
|
||||
// into a string
|
||||
Describe() string
|
||||
}
|
||||
|
||||
// Manufacture will create a lock of a given type according to the input parameters
|
||||
func New(lockType string, ns string, name string, coreClient corev1.CoreV1Interface, coordinationClient coordinationv1.CoordinationV1Interface, rlc ResourceLockConfig) (Interface, error) {
|
||||
switch lockType {
|
||||
case EndpointsResourceLock:
|
||||
return &EndpointsLock{
|
||||
EndpointsMeta: metav1.ObjectMeta{
|
||||
Namespace: ns,
|
||||
Name: name,
|
||||
},
|
||||
Client: coreClient,
|
||||
LockConfig: rlc,
|
||||
}, nil
|
||||
case ConfigMapsResourceLock:
|
||||
return &ConfigMapLock{
|
||||
ConfigMapMeta: metav1.ObjectMeta{
|
||||
Namespace: ns,
|
||||
Name: name,
|
||||
},
|
||||
Client: coreClient,
|
||||
LockConfig: rlc,
|
||||
}, nil
|
||||
case LeasesResourceLock:
|
||||
return &LeaseLock{
|
||||
LeaseMeta: metav1.ObjectMeta{
|
||||
Namespace: ns,
|
||||
Name: name,
|
||||
},
|
||||
Client: coordinationClient,
|
||||
LockConfig: rlc,
|
||||
}, nil
|
||||
default:
|
||||
return nil, fmt.Errorf("Invalid lock-type %s", lockType)
|
||||
}
|
||||
}
|
124
vendor/k8s.io/client-go/tools/leaderelection/resourcelock/leaselock.go
generated
vendored
Normal file
124
vendor/k8s.io/client-go/tools/leaderelection/resourcelock/leaselock.go
generated
vendored
Normal file
@@ -0,0 +1,124 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package resourcelock
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
coordinationv1 "k8s.io/api/coordination/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
coordinationv1client "k8s.io/client-go/kubernetes/typed/coordination/v1"
|
||||
)
|
||||
|
||||
type LeaseLock struct {
|
||||
// LeaseMeta should contain a Name and a Namespace of a
|
||||
// LeaseMeta object that the LeaderElector will attempt to lead.
|
||||
LeaseMeta metav1.ObjectMeta
|
||||
Client coordinationv1client.LeasesGetter
|
||||
LockConfig ResourceLockConfig
|
||||
lease *coordinationv1.Lease
|
||||
}
|
||||
|
||||
// Get returns the election record from a Lease spec
|
||||
func (ll *LeaseLock) Get() (*LeaderElectionRecord, error) {
|
||||
var err error
|
||||
ll.lease, err = ll.Client.Leases(ll.LeaseMeta.Namespace).Get(ll.LeaseMeta.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return LeaseSpecToLeaderElectionRecord(&ll.lease.Spec), nil
|
||||
}
|
||||
|
||||
// Create attempts to create a Lease
|
||||
func (ll *LeaseLock) Create(ler LeaderElectionRecord) error {
|
||||
var err error
|
||||
ll.lease, err = ll.Client.Leases(ll.LeaseMeta.Namespace).Create(&coordinationv1.Lease{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: ll.LeaseMeta.Name,
|
||||
Namespace: ll.LeaseMeta.Namespace,
|
||||
},
|
||||
Spec: LeaderElectionRecordToLeaseSpec(&ler),
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
// Update will update an existing Lease spec.
|
||||
func (ll *LeaseLock) Update(ler LeaderElectionRecord) error {
|
||||
if ll.lease == nil {
|
||||
return errors.New("lease not initialized, call get or create first")
|
||||
}
|
||||
ll.lease.Spec = LeaderElectionRecordToLeaseSpec(&ler)
|
||||
var err error
|
||||
ll.lease, err = ll.Client.Leases(ll.LeaseMeta.Namespace).Update(ll.lease)
|
||||
return err
|
||||
}
|
||||
|
||||
// RecordEvent in leader election while adding meta-data
|
||||
func (ll *LeaseLock) RecordEvent(s string) {
|
||||
if ll.LockConfig.EventRecorder == nil {
|
||||
return
|
||||
}
|
||||
events := fmt.Sprintf("%v %v", ll.LockConfig.Identity, s)
|
||||
ll.LockConfig.EventRecorder.Eventf(&coordinationv1.Lease{ObjectMeta: ll.lease.ObjectMeta}, corev1.EventTypeNormal, "LeaderElection", events)
|
||||
}
|
||||
|
||||
// Describe is used to convert details on current resource lock
|
||||
// into a string
|
||||
func (ll *LeaseLock) Describe() string {
|
||||
return fmt.Sprintf("%v/%v", ll.LeaseMeta.Namespace, ll.LeaseMeta.Name)
|
||||
}
|
||||
|
||||
// returns the Identity of the lock
|
||||
func (ll *LeaseLock) Identity() string {
|
||||
return ll.LockConfig.Identity
|
||||
}
|
||||
|
||||
func LeaseSpecToLeaderElectionRecord(spec *coordinationv1.LeaseSpec) *LeaderElectionRecord {
|
||||
holderIdentity := ""
|
||||
if spec.HolderIdentity != nil {
|
||||
holderIdentity = *spec.HolderIdentity
|
||||
}
|
||||
leaseDurationSeconds := 0
|
||||
if spec.LeaseDurationSeconds != nil {
|
||||
leaseDurationSeconds = int(*spec.LeaseDurationSeconds)
|
||||
}
|
||||
leaseTransitions := 0
|
||||
if spec.LeaseTransitions != nil {
|
||||
leaseTransitions = int(*spec.LeaseTransitions)
|
||||
}
|
||||
return &LeaderElectionRecord{
|
||||
HolderIdentity: holderIdentity,
|
||||
LeaseDurationSeconds: leaseDurationSeconds,
|
||||
AcquireTime: metav1.Time{spec.AcquireTime.Time},
|
||||
RenewTime: metav1.Time{spec.RenewTime.Time},
|
||||
LeaderTransitions: leaseTransitions,
|
||||
}
|
||||
}
|
||||
|
||||
func LeaderElectionRecordToLeaseSpec(ler *LeaderElectionRecord) coordinationv1.LeaseSpec {
|
||||
leaseDurationSeconds := int32(ler.LeaseDurationSeconds)
|
||||
leaseTransitions := int32(ler.LeaderTransitions)
|
||||
return coordinationv1.LeaseSpec{
|
||||
HolderIdentity: &ler.HolderIdentity,
|
||||
LeaseDurationSeconds: &leaseDurationSeconds,
|
||||
AcquireTime: &metav1.MicroTime{ler.AcquireTime.Time},
|
||||
RenewTime: &metav1.MicroTime{ler.RenewTime.Time},
|
||||
LeaseTransitions: &leaseTransitions,
|
||||
}
|
||||
}
|
17
vendor/k8s.io/kube-openapi/pkg/util/proto/document.go
generated
vendored
17
vendor/k8s.io/kube-openapi/pkg/util/proto/document.go
generated
vendored
@@ -92,13 +92,16 @@ func NewOpenAPIData(doc *openapi_v2.Document) (Models, error) {
|
||||
// We believe the schema is a reference, verify that and returns a new
|
||||
// Schema
|
||||
func (d *Definitions) parseReference(s *openapi_v2.Schema, path *Path) (Schema, error) {
|
||||
// TODO(wrong): a schema with a $ref can have properties. We can ignore them (would be incomplete), but we cannot return an error.
|
||||
if len(s.GetProperties().GetAdditionalProperties()) > 0 {
|
||||
return nil, newSchemaError(path, "unallowed embedded type definition")
|
||||
}
|
||||
// TODO(wrong): a schema with a $ref can have a type. We can ignore it (would be incomplete), but we cannot return an error.
|
||||
if len(s.GetType().GetValue()) > 0 {
|
||||
return nil, newSchemaError(path, "definition reference can't have a type")
|
||||
}
|
||||
|
||||
// TODO(wrong): $refs outside of the definitions are completely valid. We can ignore them (would be incomplete), but we cannot return an error.
|
||||
if !strings.HasPrefix(s.GetXRef(), "#/definitions/") {
|
||||
return nil, newSchemaError(path, "unallowed reference to non-definition %q", s.GetXRef())
|
||||
}
|
||||
@@ -127,6 +130,7 @@ func (d *Definitions) parseMap(s *openapi_v2.Schema, path *Path) (Schema, error)
|
||||
return nil, newSchemaError(path, "invalid object type")
|
||||
}
|
||||
var sub Schema
|
||||
// TODO(incomplete): this misses the boolean case as AdditionalProperties is a bool+schema sum type.
|
||||
if s.GetAdditionalProperties().GetSchema() == nil {
|
||||
sub = &Arbitrary{
|
||||
BaseSchema: d.parseBaseSchema(s, path),
|
||||
@@ -157,6 +161,7 @@ func (d *Definitions) parsePrimitive(s *openapi_v2.Schema, path *Path) (Schema,
|
||||
case Number: // do nothing
|
||||
case Integer: // do nothing
|
||||
case Boolean: // do nothing
|
||||
// TODO(wrong): this misses "null". Would skip the null case (would be incomplete), but we cannot return an error.
|
||||
default:
|
||||
return nil, newSchemaError(path, "Unknown primitive type: %q", t)
|
||||
}
|
||||
@@ -175,6 +180,8 @@ func (d *Definitions) parseArray(s *openapi_v2.Schema, path *Path) (Schema, erro
|
||||
return nil, newSchemaError(path, `array should have type "array"`)
|
||||
}
|
||||
if len(s.GetItems().GetSchema()) != 1 {
|
||||
// TODO(wrong): Items can have multiple elements. We can ignore Items then (would be incomplete), but we cannot return an error.
|
||||
// TODO(wrong): "type: array" witohut any items at all is completely valid.
|
||||
return nil, newSchemaError(path, "array should have exactly one sub-item")
|
||||
}
|
||||
sub, err := d.ParseSchema(s.GetItems().GetSchema()[0], path)
|
||||
@@ -227,6 +234,8 @@ func (d *Definitions) parseArbitrary(s *openapi_v2.Schema, path *Path) (Schema,
|
||||
// this function is public, it doesn't leak through the interface.
|
||||
func (d *Definitions) ParseSchema(s *openapi_v2.Schema, path *Path) (Schema, error) {
|
||||
if s.GetXRef() != "" {
|
||||
// TODO(incomplete): ignoring the rest of s is wrong. As long as there are no conflict, everything from s must be considered
|
||||
// Reference: https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#path-item-object
|
||||
return d.parseReference(s, path)
|
||||
}
|
||||
objectTypes := s.GetType().GetValue()
|
||||
@@ -234,11 +243,15 @@ func (d *Definitions) ParseSchema(s *openapi_v2.Schema, path *Path) (Schema, err
|
||||
case 0:
|
||||
// in the OpenAPI schema served by older k8s versions, object definitions created from structs did not include
|
||||
// the type:object property (they only included the "properties" property), so we need to handle this case
|
||||
// TODO: validate that we ever published empty, non-nil properties. JSON roundtripping nils them.
|
||||
if s.GetProperties() != nil {
|
||||
// TODO(wrong): when verifying a non-object later against this, it will be rejected as invalid type.
|
||||
// TODO(CRD validation schema publishing): we have to filter properties (empty or not) if type=object is not given
|
||||
return d.parseKind(s, path)
|
||||
} else {
|
||||
// Definition has no type and no properties. Treat it as an arbitrary value
|
||||
// TODO: what if it has additionalProperties or patternProperties?
|
||||
// TODO(incomplete): what if it has additionalProperties=false or patternProperties?
|
||||
// ANSWER: parseArbitrary is less strict than it has to be with patternProperties (which is ignored). So this is correct (of course not complete).
|
||||
return d.parseArbitrary(s, path)
|
||||
}
|
||||
case 1:
|
||||
@@ -256,6 +269,8 @@ func (d *Definitions) ParseSchema(s *openapi_v2.Schema, path *Path) (Schema, err
|
||||
return d.parsePrimitive(s, path)
|
||||
default:
|
||||
// the OpenAPI generator never generates (nor it ever did in the past) OpenAPI type definitions with multiple types
|
||||
// TODO(wrong): this is rejecting a completely valid OpenAPI spec
|
||||
// TODO(CRD validation schema publishing): filter these out
|
||||
return nil, newSchemaError(path, "definitions with multiple types aren't supported")
|
||||
}
|
||||
}
|
||||
|
Reference in New Issue
Block a user