Bumping k8s dependencies to 1.13
This commit is contained in:
103
vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/BUILD
generated
vendored
103
vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/BUILD
generated
vendored
@@ -12,8 +12,14 @@ go_library(
|
||||
"doc.go",
|
||||
"fake_kuberuntime_manager.go",
|
||||
"helpers.go",
|
||||
"helpers_linux.go",
|
||||
"helpers_unsupported.go",
|
||||
"helpers_windows.go",
|
||||
"instrumented_services.go",
|
||||
"kuberuntime_container.go",
|
||||
"kuberuntime_container_linux.go",
|
||||
"kuberuntime_container_unsupported.go",
|
||||
"kuberuntime_container_windows.go",
|
||||
"kuberuntime_gc.go",
|
||||
"kuberuntime_image.go",
|
||||
"kuberuntime_logs.go",
|
||||
@@ -22,53 +28,7 @@ go_library(
|
||||
"labels.go",
|
||||
"legacy.go",
|
||||
"security_context.go",
|
||||
] + select({
|
||||
"@io_bazel_rules_go//go/platform:android": [
|
||||
"helpers_unsupported.go",
|
||||
"kuberuntime_container_unsupported.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:darwin": [
|
||||
"helpers_unsupported.go",
|
||||
"kuberuntime_container_unsupported.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:dragonfly": [
|
||||
"helpers_unsupported.go",
|
||||
"kuberuntime_container_unsupported.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:freebsd": [
|
||||
"helpers_unsupported.go",
|
||||
"kuberuntime_container_unsupported.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:linux": [
|
||||
"helpers_linux.go",
|
||||
"kuberuntime_container_linux.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:nacl": [
|
||||
"helpers_unsupported.go",
|
||||
"kuberuntime_container_unsupported.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:netbsd": [
|
||||
"helpers_unsupported.go",
|
||||
"kuberuntime_container_unsupported.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:openbsd": [
|
||||
"helpers_unsupported.go",
|
||||
"kuberuntime_container_unsupported.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:plan9": [
|
||||
"helpers_unsupported.go",
|
||||
"kuberuntime_container_unsupported.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:solaris": [
|
||||
"helpers_unsupported.go",
|
||||
"kuberuntime_container_unsupported.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:windows": [
|
||||
"helpers_windows.go",
|
||||
"kuberuntime_container_windows.go",
|
||||
],
|
||||
"//conditions:default": [],
|
||||
}),
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/pkg/kubelet/kuberuntime",
|
||||
deps = [
|
||||
"//pkg/api/legacyscheme:go_default_library",
|
||||
@@ -85,6 +45,7 @@ go_library(
|
||||
"//pkg/kubelet/lifecycle:go_default_library",
|
||||
"//pkg/kubelet/metrics:go_default_library",
|
||||
"//pkg/kubelet/prober/results:go_default_library",
|
||||
"//pkg/kubelet/runtimeclass:go_default_library",
|
||||
"//pkg/kubelet/types:go_default_library",
|
||||
"//pkg/kubelet/util/cache:go_default_library",
|
||||
"//pkg/kubelet/util/format:go_default_library",
|
||||
@@ -94,20 +55,20 @@ go_library(
|
||||
"//pkg/util/selinux:go_default_library",
|
||||
"//pkg/util/tail:go_default_library",
|
||||
"//pkg/util/version:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/errors:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/record:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/reference:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/util/flowcontrol:go_default_library",
|
||||
"//vendor/github.com/armon/circbuf:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/github.com/google/cadvisor/info/v1:go_default_library",
|
||||
"//vendor/google.golang.org/grpc:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/record:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/reference:go_default_library",
|
||||
"//vendor/k8s.io/client-go/util/flowcontrol:go_default_library",
|
||||
] + select({
|
||||
"@io_bazel_rules_go//go/platform:linux": [
|
||||
"//pkg/kubelet/qos:go_default_library",
|
||||
@@ -123,8 +84,10 @@ go_library(
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"helpers_linux_test.go",
|
||||
"helpers_test.go",
|
||||
"instrumented_services_test.go",
|
||||
"kuberuntime_container_linux_test.go",
|
||||
"kuberuntime_container_test.go",
|
||||
"kuberuntime_gc_test.go",
|
||||
"kuberuntime_image_test.go",
|
||||
@@ -133,12 +96,7 @@ go_test(
|
||||
"labels_test.go",
|
||||
"legacy_test.go",
|
||||
"security_context_test.go",
|
||||
] + select({
|
||||
"@io_bazel_rules_go//go/platform:linux": [
|
||||
"kuberuntime_container_linux_test.go",
|
||||
],
|
||||
"//conditions:default": [],
|
||||
}),
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//pkg/credentialprovider:go_default_library",
|
||||
@@ -149,19 +107,22 @@ go_test(
|
||||
"//pkg/kubelet/container/testing:go_default_library",
|
||||
"//pkg/kubelet/lifecycle:go_default_library",
|
||||
"//pkg/kubelet/metrics:go_default_library",
|
||||
"//pkg/kubelet/runtimeclass:go_default_library",
|
||||
"//pkg/kubelet/runtimeclass/testing:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/util/feature/testing:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/util/flowcontrol:go_default_library",
|
||||
"//vendor/github.com/golang/mock/gomock:go_default_library",
|
||||
"//vendor/github.com/google/cadvisor/info/v1:go_default_library",
|
||||
"//vendor/github.com/prometheus/client_golang/prometheus:go_default_library",
|
||||
"//vendor/github.com/stretchr/testify/assert:go_default_library",
|
||||
"//vendor/github.com/stretchr/testify/require:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/util/feature/testing:go_default_library",
|
||||
"//vendor/k8s.io/client-go/util/flowcontrol:go_default_library",
|
||||
"//vendor/k8s.io/utils/pointer:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
2
vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/fake_kuberuntime_manager.go
generated
vendored
2
vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/fake_kuberuntime_manager.go
generated
vendored
@@ -21,6 +21,7 @@ import (
|
||||
"time"
|
||||
|
||||
cadvisorapi "github.com/google/cadvisor/info/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/client-go/util/flowcontrol"
|
||||
@@ -74,6 +75,7 @@ func NewFakeKubeRuntimeManager(runtimeService internalapi.RuntimeService, imageS
|
||||
kubeRuntimeManager := &kubeGenericRuntimeManager{
|
||||
recorder: recorder,
|
||||
cpuCFSQuota: false,
|
||||
cpuCFSQuotaPeriod: metav1.Duration{Duration: time.Microsecond * 100},
|
||||
livenessManager: proberesults.NewManager(),
|
||||
containerRefManager: kubecontainer.NewRefManager(),
|
||||
machineInfo: machineInfo,
|
||||
|
2
vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/helpers.go
generated
vendored
2
vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/helpers.go
generated
vendored
@@ -79,6 +79,8 @@ func toRuntimeProtocol(protocol v1.Protocol) runtimeapi.Protocol {
|
||||
return runtimeapi.Protocol_TCP
|
||||
case v1.ProtocolUDP:
|
||||
return runtimeapi.Protocol_UDP
|
||||
case v1.ProtocolSCTP:
|
||||
return runtimeapi.Protocol_SCTP
|
||||
}
|
||||
|
||||
glog.Warningf("Unknown protocol %q: defaulting to TCP", protocol)
|
||||
|
18
vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/helpers_linux.go
generated
vendored
18
vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/helpers_linux.go
generated
vendored
@@ -18,6 +18,11 @@ limitations under the License.
|
||||
|
||||
package kuberuntime
|
||||
|
||||
import (
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
kubefeatures "k8s.io/kubernetes/pkg/features"
|
||||
)
|
||||
|
||||
const (
|
||||
// Taken from lmctfy https://github.com/google/lmctfy/blob/master/lmctfy/controllers/cpu_controller.cc
|
||||
minShares = 2
|
||||
@@ -25,7 +30,7 @@ const (
|
||||
milliCPUToCPU = 1000
|
||||
|
||||
// 100000 is equivalent to 100ms
|
||||
quotaPeriod = 100 * minQuotaPeriod
|
||||
quotaPeriod = 100000
|
||||
minQuotaPeriod = 1000
|
||||
)
|
||||
|
||||
@@ -44,21 +49,22 @@ func milliCPUToShares(milliCPU int64) int64 {
|
||||
}
|
||||
|
||||
// milliCPUToQuota converts milliCPU to CFS quota and period values
|
||||
func milliCPUToQuota(milliCPU int64) (quota int64, period int64) {
|
||||
func milliCPUToQuota(milliCPU int64, period int64) (quota int64) {
|
||||
// CFS quota is measured in two values:
|
||||
// - cfs_period_us=100ms (the amount of time to measure usage across)
|
||||
// - cfs_quota=20ms (the amount of cpu time allowed to be used across a period)
|
||||
// so in the above example, you are limited to 20% of a single CPU
|
||||
// for multi-cpu environments, you just scale equivalent amounts
|
||||
// see https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt for details
|
||||
if milliCPU == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
// we set the period to 100ms by default
|
||||
period = quotaPeriod
|
||||
if !utilfeature.DefaultFeatureGate.Enabled(kubefeatures.CPUCFSQuotaPeriod) {
|
||||
period = quotaPeriod
|
||||
}
|
||||
|
||||
// we then convert your milliCPU to a value normalized over a period
|
||||
quota = (milliCPU * quotaPeriod) / milliCPUToCPU
|
||||
quota = (milliCPU * period) / milliCPUToCPU
|
||||
|
||||
// quota needs to be a minimum of 1ms.
|
||||
if quota < minQuotaPeriod {
|
||||
|
204
vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/helpers_linux_test.go
generated
vendored
Normal file
204
vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/helpers_linux_test.go
generated
vendored
Normal file
@@ -0,0 +1,204 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package kuberuntime
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
)
|
||||
|
||||
func TestMilliCPUToQuota(t *testing.T) {
|
||||
for _, testCase := range []struct {
|
||||
msg string
|
||||
input int64
|
||||
expected int64
|
||||
period uint64
|
||||
}{
|
||||
{
|
||||
msg: "all-zero",
|
||||
input: int64(0),
|
||||
expected: int64(0),
|
||||
period: uint64(0),
|
||||
},
|
||||
{
|
||||
msg: "5 input default quota and period",
|
||||
input: int64(5),
|
||||
expected: int64(1000),
|
||||
period: uint64(100000),
|
||||
},
|
||||
{
|
||||
msg: "9 input default quota and period",
|
||||
input: int64(9),
|
||||
expected: int64(1000),
|
||||
period: uint64(100000),
|
||||
},
|
||||
{
|
||||
msg: "10 input default quota and period",
|
||||
input: int64(10),
|
||||
expected: int64(1000),
|
||||
period: uint64(100000),
|
||||
},
|
||||
{
|
||||
msg: "200 input 20k quota and default period",
|
||||
input: int64(200),
|
||||
expected: int64(20000),
|
||||
period: uint64(100000),
|
||||
},
|
||||
{
|
||||
msg: "500 input 50k quota and default period",
|
||||
input: int64(500),
|
||||
expected: int64(50000),
|
||||
period: uint64(100000),
|
||||
},
|
||||
{
|
||||
msg: "1k input 100k quota and default period",
|
||||
input: int64(1000),
|
||||
expected: int64(100000),
|
||||
period: uint64(100000),
|
||||
},
|
||||
{
|
||||
msg: "1500 input 150k quota and default period",
|
||||
input: int64(1500),
|
||||
expected: int64(150000),
|
||||
period: uint64(100000),
|
||||
}} {
|
||||
t.Run(testCase.msg, func(t *testing.T) {
|
||||
quota := milliCPUToQuota(testCase.input, int64(testCase.period))
|
||||
if quota != testCase.expected {
|
||||
t.Errorf("Input %v and %v, expected quota %v, but got quota %v", testCase.input, testCase.period, testCase.expected, quota)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestMilliCPUToQuotaWithCustomCPUCFSQuotaPeriod(t *testing.T) {
|
||||
utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.CPUCFSQuotaPeriod, true)
|
||||
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.CPUCFSQuotaPeriod, false)
|
||||
|
||||
for _, testCase := range []struct {
|
||||
msg string
|
||||
input int64
|
||||
expected int64
|
||||
period uint64
|
||||
}{
|
||||
{
|
||||
msg: "all-zero",
|
||||
input: int64(0),
|
||||
expected: int64(0),
|
||||
period: uint64(0),
|
||||
},
|
||||
{
|
||||
msg: "5 input default quota and period",
|
||||
input: int64(5),
|
||||
expected: minQuotaPeriod,
|
||||
period: uint64(100000),
|
||||
},
|
||||
{
|
||||
msg: "9 input default quota and period",
|
||||
input: int64(9),
|
||||
expected: minQuotaPeriod,
|
||||
period: uint64(100000),
|
||||
},
|
||||
{
|
||||
msg: "10 input default quota and period",
|
||||
input: int64(10),
|
||||
expected: minQuotaPeriod,
|
||||
period: uint64(100000),
|
||||
},
|
||||
{
|
||||
msg: "200 input 20k quota and default period",
|
||||
input: int64(200),
|
||||
expected: int64(20000),
|
||||
period: uint64(100000),
|
||||
},
|
||||
{
|
||||
msg: "500 input 50k quota and default period",
|
||||
input: int64(500),
|
||||
expected: int64(50000),
|
||||
period: uint64(100000),
|
||||
},
|
||||
{
|
||||
msg: "1k input 100k quota and default period",
|
||||
input: int64(1000),
|
||||
expected: int64(100000),
|
||||
period: uint64(100000),
|
||||
},
|
||||
{
|
||||
msg: "1500 input 150k quota and default period",
|
||||
input: int64(1500),
|
||||
expected: int64(150000),
|
||||
period: uint64(100000),
|
||||
},
|
||||
{
|
||||
msg: "5 input 10k period and default quota expected",
|
||||
input: int64(5),
|
||||
period: uint64(10000),
|
||||
expected: minQuotaPeriod,
|
||||
},
|
||||
{
|
||||
msg: "5 input 5k period and default quota expected",
|
||||
input: int64(5),
|
||||
period: uint64(5000),
|
||||
expected: minQuotaPeriod,
|
||||
},
|
||||
{
|
||||
msg: "9 input 10k period and default quota expected",
|
||||
input: int64(9),
|
||||
period: uint64(10000),
|
||||
expected: minQuotaPeriod,
|
||||
},
|
||||
{
|
||||
msg: "10 input 200k period and 2000 quota expected",
|
||||
input: int64(10),
|
||||
period: uint64(200000),
|
||||
expected: int64(2000),
|
||||
},
|
||||
{
|
||||
msg: "200 input 200k period and 40k quota",
|
||||
input: int64(200),
|
||||
period: uint64(200000),
|
||||
expected: int64(40000),
|
||||
},
|
||||
{
|
||||
msg: "500 input 20k period and 20k expected quota",
|
||||
input: int64(500),
|
||||
period: uint64(20000),
|
||||
expected: int64(10000),
|
||||
},
|
||||
{
|
||||
msg: "1000 input 10k period and 10k expected quota",
|
||||
input: int64(1000),
|
||||
period: uint64(10000),
|
||||
expected: int64(10000),
|
||||
},
|
||||
{
|
||||
msg: "1500 input 5000 period and 7500 expected quota",
|
||||
input: int64(1500),
|
||||
period: uint64(5000),
|
||||
expected: int64(7500),
|
||||
}} {
|
||||
t.Run(testCase.msg, func(t *testing.T) {
|
||||
quota := milliCPUToQuota(testCase.input, int64(testCase.period))
|
||||
if quota != testCase.expected {
|
||||
t.Errorf("Input %v and %v, expected quota %v, but got quota %v", testCase.input, testCase.period, testCase.expected, quota)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
4
vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/instrumented_services.go
generated
vendored
4
vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/instrumented_services.go
generated
vendored
@@ -176,11 +176,11 @@ func (in instrumentedRuntimeService) Attach(req *runtimeapi.AttachRequest) (*run
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (in instrumentedRuntimeService) RunPodSandbox(config *runtimeapi.PodSandboxConfig) (string, error) {
|
||||
func (in instrumentedRuntimeService) RunPodSandbox(config *runtimeapi.PodSandboxConfig, runtimeHandler string) (string, error) {
|
||||
const operation = "run_podsandbox"
|
||||
defer recordOperation(operation, time.Now())
|
||||
|
||||
out, err := in.service.RunPodSandbox(config)
|
||||
out, err := in.service.RunPodSandbox(config, runtimeHandler)
|
||||
recordError(operation, err)
|
||||
return out, err
|
||||
}
|
||||
|
12
vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/kuberuntime_container.go
generated
vendored
12
vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/kuberuntime_container.go
generated
vendored
@@ -17,6 +17,7 @@ limitations under the License.
|
||||
package kuberuntime
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
@@ -51,6 +52,7 @@ import (
|
||||
var (
|
||||
ErrCreateContainerConfig = errors.New("CreateContainerConfigError")
|
||||
ErrCreateContainer = errors.New("CreateContainerError")
|
||||
ErrPreStartHook = errors.New("PreStartHookError")
|
||||
ErrPostStartHook = errors.New("PostStartHookError")
|
||||
)
|
||||
|
||||
@@ -122,8 +124,8 @@ func (m *kubeGenericRuntimeManager) startContainer(podSandboxID string, podSandb
|
||||
}
|
||||
err = m.internalLifecycle.PreStartContainer(pod, container, containerID)
|
||||
if err != nil {
|
||||
m.recorder.Eventf(ref, v1.EventTypeWarning, events.FailedToStartContainer, "Internal PreStartContainer hook failed: %v", err)
|
||||
return "Internal PreStartContainer hook failed", err
|
||||
m.recordContainerEvent(pod, container, containerID, v1.EventTypeWarning, events.FailedToStartContainer, "Internal PreStartContainer hook failed: %v", grpc.ErrorDesc(err))
|
||||
return grpc.ErrorDesc(err), ErrPreStartHook
|
||||
}
|
||||
m.recordContainerEvent(pod, container, containerID, v1.EventTypeNormal, events.CreatedContainer, "Created container")
|
||||
|
||||
@@ -366,7 +368,7 @@ func getTerminationMessage(status *runtimeapi.ContainerStatus, terminationMessag
|
||||
func (m *kubeGenericRuntimeManager) readLastStringFromContainerLogs(path string) string {
|
||||
value := int64(kubecontainer.MaxContainerTerminationMessageLogLines)
|
||||
buf, _ := circbuf.NewBuffer(kubecontainer.MaxContainerTerminationMessageLogLength)
|
||||
if err := m.ReadLogs(path, "", &v1.PodLogOptions{TailLines: &value}, buf, buf); err != nil {
|
||||
if err := m.ReadLogs(context.Background(), path, "", &v1.PodLogOptions{TailLines: &value}, buf, buf); err != nil {
|
||||
return fmt.Sprintf("Error on reading termination message from logs: %v", err)
|
||||
}
|
||||
return buf.String()
|
||||
@@ -730,13 +732,13 @@ func findNextInitContainerToRun(pod *v1.Pod, podStatus *kubecontainer.PodStatus)
|
||||
}
|
||||
|
||||
// GetContainerLogs returns logs of a specific container.
|
||||
func (m *kubeGenericRuntimeManager) GetContainerLogs(pod *v1.Pod, containerID kubecontainer.ContainerID, logOptions *v1.PodLogOptions, stdout, stderr io.Writer) (err error) {
|
||||
func (m *kubeGenericRuntimeManager) GetContainerLogs(ctx context.Context, pod *v1.Pod, containerID kubecontainer.ContainerID, logOptions *v1.PodLogOptions, stdout, stderr io.Writer) (err error) {
|
||||
status, err := m.runtimeService.ContainerStatus(containerID.ID)
|
||||
if err != nil {
|
||||
glog.V(4).Infof("failed to get container status for %v: %v", containerID.String(), err)
|
||||
return fmt.Errorf("Unable to retrieve container logs for %v", containerID.String())
|
||||
}
|
||||
return m.ReadLogs(status.GetLogPath(), containerID.ID, logOptions, stdout, stderr)
|
||||
return m.ReadLogs(ctx, status.GetLogPath(), containerID.ID, logOptions, stdout, stderr)
|
||||
}
|
||||
|
||||
// GetExec gets the endpoint the runtime will serve the exec request from.
|
||||
|
5
vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/kuberuntime_container_linux.go
generated
vendored
5
vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/kuberuntime_container_linux.go
generated
vendored
@@ -19,6 +19,8 @@ limitations under the License.
|
||||
package kuberuntime
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2"
|
||||
"k8s.io/kubernetes/pkg/kubelet/qos"
|
||||
@@ -65,7 +67,8 @@ func (m *kubeGenericRuntimeManager) generateLinuxContainerConfig(container *v1.C
|
||||
if m.cpuCFSQuota {
|
||||
// if cpuLimit.Amount is nil, then the appropriate default value is returned
|
||||
// to allow full usage of cpu resource.
|
||||
cpuQuota, cpuPeriod := milliCPUToQuota(cpuLimit.MilliValue())
|
||||
cpuPeriod := int64(m.cpuCFSQuotaPeriod.Duration / time.Microsecond)
|
||||
cpuQuota := milliCPUToQuota(cpuLimit.MilliValue(), cpuPeriod)
|
||||
lc.Resources.CpuQuota = cpuQuota
|
||||
lc.Resources.CpuPeriod = cpuPeriod
|
||||
}
|
||||
|
12
vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/kuberuntime_container_linux_test.go
generated
vendored
12
vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/kuberuntime_container_linux_test.go
generated
vendored
@@ -64,6 +64,8 @@ func TestGenerateContainerConfig(t *testing.T) {
|
||||
_, imageService, m, err := createTestRuntimeManager()
|
||||
assert.NoError(t, err)
|
||||
|
||||
runAsUser := int64(1000)
|
||||
runAsGroup := int64(2000)
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
UID: "12345678",
|
||||
@@ -78,6 +80,10 @@ func TestGenerateContainerConfig(t *testing.T) {
|
||||
ImagePullPolicy: v1.PullIfNotPresent,
|
||||
Command: []string{"testCommand"},
|
||||
WorkingDir: "testWorkingDir",
|
||||
SecurityContext: &v1.SecurityContext{
|
||||
RunAsUser: &runAsUser,
|
||||
RunAsGroup: &runAsGroup,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -87,8 +93,10 @@ func TestGenerateContainerConfig(t *testing.T) {
|
||||
containerConfig, _, err := m.generateContainerConfig(&pod.Spec.Containers[0], pod, 0, "", pod.Spec.Containers[0].Image, kubecontainer.ContainerTypeRegular)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, expectedConfig, containerConfig, "generate container config for kubelet runtime v1.")
|
||||
assert.Equal(t, runAsUser, containerConfig.GetLinux().GetSecurityContext().GetRunAsUser().GetValue(), "RunAsUser should be set")
|
||||
assert.Equal(t, runAsGroup, containerConfig.GetLinux().GetSecurityContext().GetRunAsGroup().GetValue(), "RunAsGroup should be set")
|
||||
|
||||
runAsUser := int64(0)
|
||||
runAsRoot := int64(0)
|
||||
runAsNonRootTrue := true
|
||||
podWithContainerSecurityContext := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
@@ -106,7 +114,7 @@ func TestGenerateContainerConfig(t *testing.T) {
|
||||
WorkingDir: "testWorkingDir",
|
||||
SecurityContext: &v1.SecurityContext{
|
||||
RunAsNonRoot: &runAsNonRootTrue,
|
||||
RunAsUser: &runAsUser,
|
||||
RunAsUser: &runAsRoot,
|
||||
},
|
||||
},
|
||||
},
|
||||
|
5
vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/kuberuntime_logs.go
generated
vendored
5
vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/kuberuntime_logs.go
generated
vendored
@@ -17,6 +17,7 @@ limitations under the License.
|
||||
package kuberuntime
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"time"
|
||||
|
||||
@@ -27,9 +28,9 @@ import (
|
||||
// ReadLogs read the container log and redirect into stdout and stderr.
|
||||
// Note that containerID is only needed when following the log, or else
|
||||
// just pass in empty string "".
|
||||
func (m *kubeGenericRuntimeManager) ReadLogs(path, containerID string, apiOpts *v1.PodLogOptions, stdout, stderr io.Writer) error {
|
||||
func (m *kubeGenericRuntimeManager) ReadLogs(ctx context.Context, path, containerID string, apiOpts *v1.PodLogOptions, stdout, stderr io.Writer) error {
|
||||
// Convert v1.PodLogOptions into internal log options.
|
||||
opts := logs.NewLogOptions(apiOpts, time.Now())
|
||||
|
||||
return logs.ReadLogs(path, containerID, opts, m.runtimeService, stdout, stderr)
|
||||
return logs.ReadLogs(ctx, path, containerID, opts, m.runtimeService, stdout, stderr)
|
||||
}
|
||||
|
35
vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/kuberuntime_manager.go
generated
vendored
35
vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/kuberuntime_manager.go
generated
vendored
@@ -42,6 +42,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/kubelet/images"
|
||||
"k8s.io/kubernetes/pkg/kubelet/lifecycle"
|
||||
proberesults "k8s.io/kubernetes/pkg/kubelet/prober/results"
|
||||
"k8s.io/kubernetes/pkg/kubelet/runtimeclass"
|
||||
"k8s.io/kubernetes/pkg/kubelet/types"
|
||||
"k8s.io/kubernetes/pkg/kubelet/util/cache"
|
||||
"k8s.io/kubernetes/pkg/kubelet/util/format"
|
||||
@@ -98,6 +99,9 @@ type kubeGenericRuntimeManager struct {
|
||||
// If true, enforce container cpu limits with CFS quota support
|
||||
cpuCFSQuota bool
|
||||
|
||||
// CPUCFSQuotaPeriod sets the CPU CFS quota period value, cpu.cfs_period_us, defaults to 100ms
|
||||
cpuCFSQuotaPeriod metav1.Duration
|
||||
|
||||
// wrapped image puller.
|
||||
imagePuller images.ImageManager
|
||||
|
||||
@@ -116,6 +120,9 @@ type kubeGenericRuntimeManager struct {
|
||||
|
||||
// A shim to legacy functions for backward compatibility.
|
||||
legacyLogProvider LegacyLogProvider
|
||||
|
||||
// Manage RuntimeClass resources.
|
||||
runtimeClassManager *runtimeclass.Manager
|
||||
}
|
||||
|
||||
type KubeGenericRuntime interface {
|
||||
@@ -146,14 +153,17 @@ func NewKubeGenericRuntimeManager(
|
||||
imagePullQPS float32,
|
||||
imagePullBurst int,
|
||||
cpuCFSQuota bool,
|
||||
cpuCFSQuotaPeriod metav1.Duration,
|
||||
runtimeService internalapi.RuntimeService,
|
||||
imageService internalapi.ImageManagerService,
|
||||
internalLifecycle cm.InternalContainerLifecycle,
|
||||
legacyLogProvider LegacyLogProvider,
|
||||
runtimeClassManager *runtimeclass.Manager,
|
||||
) (KubeGenericRuntime, error) {
|
||||
kubeRuntimeManager := &kubeGenericRuntimeManager{
|
||||
recorder: recorder,
|
||||
cpuCFSQuota: cpuCFSQuota,
|
||||
cpuCFSQuotaPeriod: cpuCFSQuotaPeriod,
|
||||
seccompProfileRoot: seccompProfileRoot,
|
||||
livenessManager: livenessManager,
|
||||
containerRefManager: containerRefManager,
|
||||
@@ -165,6 +175,7 @@ func NewKubeGenericRuntimeManager(
|
||||
keyring: credentialprovider.NewDockerKeyring(),
|
||||
internalLifecycle: internalLifecycle,
|
||||
legacyLogProvider: legacyLogProvider,
|
||||
runtimeClassManager: runtimeClassManager,
|
||||
}
|
||||
|
||||
typedVersion, err := kubeRuntimeManager.runtimeService.Version(kubeRuntimeAPIVersion)
|
||||
@@ -510,7 +521,7 @@ func (m *kubeGenericRuntimeManager) computePodActions(pod *v1.Pod, podStatus *ku
|
||||
if containerStatus == nil || containerStatus.State != kubecontainer.ContainerStateRunning {
|
||||
if kubecontainer.ShouldContainerBeRestarted(&container, pod, podStatus) {
|
||||
message := fmt.Sprintf("Container %+v is dead, but RestartPolicy says that we should restart it.", container)
|
||||
glog.Info(message)
|
||||
glog.V(3).Infof(message)
|
||||
changes.ContainersToStart = append(changes.ContainersToStart, idx)
|
||||
}
|
||||
continue
|
||||
@@ -754,7 +765,7 @@ func (m *kubeGenericRuntimeManager) doBackOff(pod *v1.Pod, container *v1.Contain
|
||||
return false, "", nil
|
||||
}
|
||||
|
||||
glog.Infof("checking backoff for container %q in pod %q", container.Name, format.Pod(pod))
|
||||
glog.V(3).Infof("checking backoff for container %q in pod %q", container.Name, format.Pod(pod))
|
||||
// Use the finished time of the latest exited container as the start point to calculate whether to do back-off.
|
||||
ts := cStatus.FinishedAt
|
||||
// backOff requires a unique key to identify the container.
|
||||
@@ -764,7 +775,7 @@ func (m *kubeGenericRuntimeManager) doBackOff(pod *v1.Pod, container *v1.Contain
|
||||
m.recorder.Eventf(ref, v1.EventTypeWarning, events.BackOffStartContainer, "Back-off restarting failed container")
|
||||
}
|
||||
err := fmt.Errorf("Back-off %s restarting failed container=%s pod=%s", backOff.Get(key), container.Name, format.Pod(pod))
|
||||
glog.Infof("%s", err.Error())
|
||||
glog.V(3).Infof("%s", err.Error())
|
||||
return true, err.Error(), kubecontainer.ErrCrashLoopBackOff
|
||||
}
|
||||
|
||||
@@ -803,24 +814,6 @@ func (m *kubeGenericRuntimeManager) killPodWithSyncResult(pod *v1.Pod, runningPo
|
||||
return
|
||||
}
|
||||
|
||||
// isHostNetwork checks whether the pod is running in host-network mode.
|
||||
func (m *kubeGenericRuntimeManager) isHostNetwork(podSandBoxID string, pod *v1.Pod) (bool, error) {
|
||||
if pod != nil {
|
||||
return kubecontainer.IsHostNetworkPod(pod), nil
|
||||
}
|
||||
|
||||
podStatus, err := m.runtimeService.PodSandboxStatus(podSandBoxID)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if podStatus.GetLinux().GetNamespaces().GetOptions().GetNetwork() == runtimeapi.NamespaceMode_NODE {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// GetPodStatus retrieves the status of the pod, including the
|
||||
// information of all containers in the pod that are visible in Runtime.
|
||||
func (m *kubeGenericRuntimeManager) GetPodStatus(uid kubetypes.UID, name, namespace string) (*kubecontainer.PodStatus, error) {
|
||||
|
14
vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/kuberuntime_sandbox.go
generated
vendored
14
vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/kuberuntime_sandbox.go
generated
vendored
@@ -50,7 +50,16 @@ func (m *kubeGenericRuntimeManager) createPodSandbox(pod *v1.Pod, attempt uint32
|
||||
return "", message, err
|
||||
}
|
||||
|
||||
podSandBoxID, err := m.runtimeService.RunPodSandbox(podSandboxConfig)
|
||||
runtimeHandler := ""
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.RuntimeClass) && m.runtimeClassManager != nil {
|
||||
runtimeHandler, err = m.runtimeClassManager.LookupRuntimeHandler(pod.Spec.RuntimeClassName)
|
||||
if err != nil {
|
||||
message := fmt.Sprintf("CreatePodSandbox for pod %q failed: %v", format.Pod(pod), err)
|
||||
return "", message, err
|
||||
}
|
||||
}
|
||||
|
||||
podSandBoxID, err := m.runtimeService.RunPodSandbox(podSandboxConfig, runtimeHandler)
|
||||
if err != nil {
|
||||
message := fmt.Sprintf("CreatePodSandbox for pod %q failed: %v", format.Pod(pod), err)
|
||||
glog.Error(message)
|
||||
@@ -152,6 +161,9 @@ func (m *kubeGenericRuntimeManager) generatePodSandboxLinuxConfig(pod *v1.Pod) (
|
||||
if sc.RunAsUser != nil {
|
||||
lc.SecurityContext.RunAsUser = &runtimeapi.Int64Value{Value: int64(*sc.RunAsUser)}
|
||||
}
|
||||
if sc.RunAsGroup != nil {
|
||||
lc.SecurityContext.RunAsGroup = &runtimeapi.Int64Value{Value: int64(*sc.RunAsGroup)}
|
||||
}
|
||||
lc.SecurityContext.NamespaceOptions = namespacesForPod(pod)
|
||||
|
||||
if sc.FSGroup != nil {
|
||||
|
82
vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/kuberuntime_sandbox_test.go
generated
vendored
82
vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/kuberuntime_sandbox_test.go
generated
vendored
@@ -22,31 +22,24 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2"
|
||||
containertest "k8s.io/kubernetes/pkg/kubelet/container/testing"
|
||||
"k8s.io/kubernetes/pkg/kubelet/runtimeclass"
|
||||
rctest "k8s.io/kubernetes/pkg/kubelet/runtimeclass/testing"
|
||||
"k8s.io/utils/pointer"
|
||||
)
|
||||
|
||||
// TestCreatePodSandbox tests creating sandbox and its corresponding pod log directory.
|
||||
func TestCreatePodSandbox(t *testing.T) {
|
||||
fakeRuntime, _, m, err := createTestRuntimeManager()
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
UID: "12345678",
|
||||
Name: "bar",
|
||||
Namespace: "new",
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "foo",
|
||||
Image: "busybox",
|
||||
ImagePullPolicy: v1.PullIfNotPresent,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
require.NoError(t, err)
|
||||
pod := newTestPod()
|
||||
|
||||
fakeOS := m.osInterface.(*containertest.FakeOS)
|
||||
fakeOS.MkdirAllFn = func(path string, perm os.FileMode) error {
|
||||
@@ -63,3 +56,60 @@ func TestCreatePodSandbox(t *testing.T) {
|
||||
assert.Equal(t, len(sandboxes), 1)
|
||||
// TODO Check pod sandbox configuration
|
||||
}
|
||||
|
||||
// TestCreatePodSandbox_RuntimeClass tests creating sandbox with RuntimeClasses enabled.
|
||||
func TestCreatePodSandbox_RuntimeClass(t *testing.T) {
|
||||
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.RuntimeClass, true)()
|
||||
|
||||
rcm := runtimeclass.NewManager(rctest.NewPopulatedDynamicClient())
|
||||
defer rctest.StartManagerSync(t, rcm)()
|
||||
|
||||
fakeRuntime, _, m, err := createTestRuntimeManager()
|
||||
require.NoError(t, err)
|
||||
m.runtimeClassManager = rcm
|
||||
|
||||
tests := map[string]struct {
|
||||
rcn *string
|
||||
expectedHandler string
|
||||
expectError bool
|
||||
}{
|
||||
"unspecified RuntimeClass": {rcn: nil, expectedHandler: ""},
|
||||
"valid RuntimeClass": {rcn: pointer.StringPtr(rctest.SandboxRuntimeClass), expectedHandler: rctest.SandboxRuntimeHandler},
|
||||
"missing RuntimeClass": {rcn: pointer.StringPtr("phantom"), expectError: true},
|
||||
}
|
||||
for name, test := range tests {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
pod := newTestPod()
|
||||
pod.Spec.RuntimeClassName = test.rcn
|
||||
|
||||
id, _, err := m.createPodSandbox(pod, 1)
|
||||
if test.expectError {
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, fakeRuntime.Called, "RunPodSandbox")
|
||||
} else {
|
||||
assert.NoError(t, err)
|
||||
assert.Contains(t, fakeRuntime.Called, "RunPodSandbox")
|
||||
assert.Equal(t, test.expectedHandler, fakeRuntime.Sandboxes[id].RuntimeHandler)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func newTestPod() *v1.Pod {
|
||||
return &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
UID: "12345678",
|
||||
Name: "bar",
|
||||
Namespace: "new",
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "foo",
|
||||
Image: "busybox",
|
||||
ImagePullPolicy: v1.PullIfNotPresent,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
8
vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/logs/BUILD
generated
vendored
8
vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/logs/BUILD
generated
vendored
@@ -9,10 +9,10 @@ go_library(
|
||||
"//pkg/kubelet/apis/cri:go_default_library",
|
||||
"//pkg/kubelet/apis/cri/runtime/v1alpha2:go_default_library",
|
||||
"//pkg/util/tail:go_default_library",
|
||||
"//vendor/github.com/docker/docker/pkg/jsonlog:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/github.com/docker/docker/daemon/logger/jsonfilelog/jsonlog:go_default_library",
|
||||
"//vendor/github.com/fsnotify/fsnotify:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -22,9 +22,9 @@ go_test(
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//pkg/kubelet/apis/cri/runtime/v1alpha2:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/github.com/stretchr/testify/assert:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
11
vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/logs/logs.go
generated
vendored
11
vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/logs/logs.go
generated
vendored
@@ -19,6 +19,7 @@ package logs
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
@@ -27,7 +28,7 @@ import (
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/pkg/jsonlog"
|
||||
"github.com/docker/docker/daemon/logger/jsonfilelog/jsonlog"
|
||||
"github.com/fsnotify/fsnotify"
|
||||
"github.com/golang/glog"
|
||||
|
||||
@@ -266,7 +267,7 @@ func (w *logWriter) write(msg *logMessage) error {
|
||||
// ReadLogs read the container log and redirect into stdout and stderr.
|
||||
// Note that containerID is only needed when following the log, or else
|
||||
// just pass in empty string "".
|
||||
func ReadLogs(path, containerID string, opts *LogOptions, runtimeService internalapi.RuntimeService, stdout, stderr io.Writer) error {
|
||||
func ReadLogs(ctx context.Context, path, containerID string, opts *LogOptions, runtimeService internalapi.RuntimeService, stdout, stderr io.Writer) error {
|
||||
f, err := os.Open(path)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to open log file %q: %v", path, err)
|
||||
@@ -317,7 +318,7 @@ func ReadLogs(path, containerID string, opts *LogOptions, runtimeService interna
|
||||
}
|
||||
}
|
||||
// Wait until the next log change.
|
||||
if found, err := waitLogs(containerID, watcher, runtimeService); !found {
|
||||
if found, err := waitLogs(ctx, containerID, watcher, runtimeService); !found {
|
||||
return err
|
||||
}
|
||||
continue
|
||||
@@ -371,7 +372,7 @@ func isContainerRunning(id string, r internalapi.RuntimeService) (bool, error) {
|
||||
|
||||
// waitLogs wait for the next log write. It returns a boolean and an error. The boolean
|
||||
// indicates whether a new log is found; the error is error happens during waiting new logs.
|
||||
func waitLogs(id string, w *fsnotify.Watcher, runtimeService internalapi.RuntimeService) (bool, error) {
|
||||
func waitLogs(ctx context.Context, id string, w *fsnotify.Watcher, runtimeService internalapi.RuntimeService) (bool, error) {
|
||||
// no need to wait if the pod is not running
|
||||
if running, err := isContainerRunning(id, runtimeService); !running {
|
||||
return false, err
|
||||
@@ -379,6 +380,8 @@ func waitLogs(id string, w *fsnotify.Watcher, runtimeService internalapi.Runtime
|
||||
errRetry := 5
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return false, fmt.Errorf("context cancelled")
|
||||
case e := <-w.Events:
|
||||
switch e.Op {
|
||||
case fsnotify.Write:
|
||||
|
11
vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/security_context.go
generated
vendored
11
vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/security_context.go
generated
vendored
@@ -30,7 +30,10 @@ func (m *kubeGenericRuntimeManager) determineEffectiveSecurityContext(pod *v1.Po
|
||||
effectiveSc := securitycontext.DetermineEffectiveSecurityContext(pod, container)
|
||||
synthesized := convertToRuntimeSecurityContext(effectiveSc)
|
||||
if synthesized == nil {
|
||||
synthesized = &runtimeapi.LinuxContainerSecurityContext{}
|
||||
synthesized = &runtimeapi.LinuxContainerSecurityContext{
|
||||
MaskedPaths: securitycontext.ConvertToRuntimeMaskedPaths(effectiveSc.ProcMount),
|
||||
ReadonlyPaths: securitycontext.ConvertToRuntimeReadonlyPaths(effectiveSc.ProcMount),
|
||||
}
|
||||
}
|
||||
|
||||
// set SeccompProfilePath.
|
||||
@@ -67,6 +70,9 @@ func (m *kubeGenericRuntimeManager) determineEffectiveSecurityContext(pod *v1.Po
|
||||
|
||||
synthesized.NoNewPrivs = securitycontext.AddNoNewPrivileges(effectiveSc)
|
||||
|
||||
synthesized.MaskedPaths = securitycontext.ConvertToRuntimeMaskedPaths(effectiveSc.ProcMount)
|
||||
synthesized.ReadonlyPaths = securitycontext.ConvertToRuntimeReadonlyPaths(effectiveSc.ProcMount)
|
||||
|
||||
return synthesized
|
||||
}
|
||||
|
||||
@@ -108,6 +114,9 @@ func convertToRuntimeSecurityContext(securityContext *v1.SecurityContext) *runti
|
||||
if securityContext.RunAsUser != nil {
|
||||
sc.RunAsUser = &runtimeapi.Int64Value{Value: int64(*securityContext.RunAsUser)}
|
||||
}
|
||||
if securityContext.RunAsGroup != nil {
|
||||
sc.RunAsGroup = &runtimeapi.Int64Value{Value: int64(*securityContext.RunAsGroup)}
|
||||
}
|
||||
if securityContext.Privileged != nil {
|
||||
sc.Privileged = *securityContext.Privileged
|
||||
}
|
||||
|
Reference in New Issue
Block a user