Bumping k8s dependencies to 1.13
This commit is contained in:
50
vendor/k8s.io/kubernetes/test/e2e/auth/BUILD
generated
vendored
50
vendor/k8s.io/kubernetes/test/e2e/auth/BUILD
generated
vendored
@@ -12,45 +12,49 @@ go_library(
|
||||
"certificates.go",
|
||||
"framework.go",
|
||||
"metadata_concealment.go",
|
||||
"node_authn.go",
|
||||
"node_authz.go",
|
||||
"pod_security_policy.go",
|
||||
"service_accounts.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/test/e2e/auth",
|
||||
deps = [
|
||||
"//pkg/master/ports:go_default_library",
|
||||
"//pkg/security/apparmor:go_default_library",
|
||||
"//pkg/security/podsecuritypolicy/seccomp:go_default_library",
|
||||
"//pkg/security/podsecuritypolicy/util:go_default_library",
|
||||
"//pkg/util/pointer:go_default_library",
|
||||
"//plugin/pkg/admission/serviceaccount:go_default_library",
|
||||
"//staging/src/k8s.io/api/apps/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/batch/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/certificates/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/extensions/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/api/policy/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/api/rbac/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset:go_default_library",
|
||||
"//staging/src/k8s.io/apiextensions-apiserver/test/integration/fixtures:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/apis/audit:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/apis/audit/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/authentication/serviceaccount:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes/typed/certificates/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/rest:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/util/cert:go_default_library",
|
||||
"//test/e2e/common:go_default_library",
|
||||
"//test/e2e/framework:go_default_library",
|
||||
"//test/utils:go_default_library",
|
||||
"//test/utils/image:go_default_library",
|
||||
"//vendor/github.com/evanphx/json-patch:go_default_library",
|
||||
"//vendor/github.com/onsi/ginkgo:go_default_library",
|
||||
"//vendor/github.com/onsi/gomega:go_default_library",
|
||||
"//vendor/k8s.io/api/apps/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/batch/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/certificates/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/api/policy/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/api/rbac/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset:go_default_library",
|
||||
"//vendor/k8s.io/apiextensions-apiserver/test/integration/testserver:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/apis/audit/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/authentication/serviceaccount:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/rest:go_default_library",
|
||||
"//vendor/k8s.io/client-go/util/cert:go_default_library",
|
||||
"//vendor/k8s.io/utils/pointer:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
931
vendor/k8s.io/kubernetes/test/e2e/auth/audit.go
generated
vendored
931
vendor/k8s.io/kubernetes/test/e2e/auth/audit.go
generated
vendored
File diff suppressed because it is too large
Load Diff
5
vendor/k8s.io/kubernetes/test/e2e/auth/certificates.go
generated
vendored
5
vendor/k8s.io/kubernetes/test/e2e/auth/certificates.go
generated
vendored
@@ -91,9 +91,10 @@ var _ = SIGDescribe("Certificates API", func() {
|
||||
|
||||
framework.Logf("waiting for CSR to be signed")
|
||||
framework.ExpectNoError(wait.Poll(5*time.Second, time.Minute, func() (bool, error) {
|
||||
csr, _ = csrs.Get(csrName, metav1.GetOptions{})
|
||||
csr, err = csrs.Get(csrName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
framework.Logf("error getting csr: %v", err)
|
||||
return false, nil
|
||||
}
|
||||
if len(csr.Status.Certificate) == 0 {
|
||||
framework.Logf("csr not signed yet")
|
||||
|
2
vendor/k8s.io/kubernetes/test/e2e/auth/metadata_concealment.go
generated
vendored
2
vendor/k8s.io/kubernetes/test/e2e/auth/metadata_concealment.go
generated
vendored
@@ -58,7 +58,7 @@ var _ = SIGDescribe("Metadata Concealment", func() {
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Ensuring job reaches completions")
|
||||
err = framework.WaitForJobFinish(f.ClientSet, f.Namespace.Name, job.Name, int32(1))
|
||||
err = framework.WaitForJobComplete(f.ClientSet, f.Namespace.Name, job.Name, int32(1))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
})
|
||||
|
107
vendor/k8s.io/kubernetes/test/e2e/auth/node_authn.go
generated
vendored
Normal file
107
vendor/k8s.io/kubernetes/test/e2e/auth/node_authn.go
generated
vendored
Normal file
@@ -0,0 +1,107 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package auth
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/kubernetes/pkg/master/ports"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
var _ = SIGDescribe("[Feature:NodeAuthenticator]", func() {
|
||||
|
||||
f := framework.NewDefaultFramework("node-authn")
|
||||
var ns string
|
||||
var nodeIPs []string
|
||||
BeforeEach(func() {
|
||||
ns = f.Namespace.Name
|
||||
|
||||
nodeList, err := f.ClientSet.CoreV1().Nodes().List(metav1.ListOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(len(nodeList.Items)).NotTo(BeZero())
|
||||
|
||||
pickedNode := nodeList.Items[0]
|
||||
nodeIPs = framework.GetNodeAddresses(&pickedNode, v1.NodeExternalIP)
|
||||
// The pods running in the cluster can see the internal addresses.
|
||||
nodeIPs = append(nodeIPs, framework.GetNodeAddresses(&pickedNode, v1.NodeInternalIP)...)
|
||||
|
||||
// make sure ServiceAccount admission controller is enabled, so secret generation on SA creation works
|
||||
sa, err := f.ClientSet.CoreV1().ServiceAccounts(ns).Get("default", metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(len(sa.Secrets)).NotTo(BeZero())
|
||||
})
|
||||
|
||||
It("The kubelet's main port 10250 should reject requests with no credentials", func() {
|
||||
pod := createNodeAuthTestPod(f)
|
||||
for _, nodeIP := range nodeIPs {
|
||||
// Anonymous authentication is disabled by default
|
||||
result := framework.RunHostCmdOrDie(ns, pod.Name, fmt.Sprintf("curl -sIk -o /dev/null -w '%s' https://%s:%v/metrics", "%{http_code}", nodeIP, ports.KubeletPort))
|
||||
Expect(result).To(Or(Equal("401"), Equal("403")), "the kubelet's main port 10250 should reject requests with no credentials")
|
||||
}
|
||||
})
|
||||
|
||||
It("The kubelet can delegate ServiceAccount tokens to the API server", func() {
|
||||
By("create a new ServiceAccount for authentication")
|
||||
trueValue := true
|
||||
newSA := &v1.ServiceAccount{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: ns,
|
||||
Name: "node-auth-newSA",
|
||||
},
|
||||
AutomountServiceAccountToken: &trueValue,
|
||||
}
|
||||
_, err := f.ClientSet.CoreV1().ServiceAccounts(ns).Create(newSA)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
pod := createNodeAuthTestPod(f)
|
||||
|
||||
for _, nodeIP := range nodeIPs {
|
||||
result := framework.RunHostCmdOrDie(ns,
|
||||
pod.Name,
|
||||
fmt.Sprintf("curl -sIk -o /dev/null -w '%s' --header \"Authorization: Bearer `%s`\" https://%s:%v/metrics",
|
||||
"%{http_code}",
|
||||
"cat /var/run/secrets/kubernetes.io/serviceaccount/token",
|
||||
nodeIP, ports.KubeletPort))
|
||||
Expect(result).To(Or(Equal("401"), Equal("403")), "the kubelet can delegate ServiceAccount tokens to the API server")
|
||||
}
|
||||
})
|
||||
})
|
||||
|
||||
func createNodeAuthTestPod(f *framework.Framework) *v1.Pod {
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "test-node-authn-",
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{{
|
||||
Name: "test-node-authn",
|
||||
Image: imageutils.GetE2EImage(imageutils.Hostexec),
|
||||
Command: []string{"sleep 3600"},
|
||||
}},
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
},
|
||||
}
|
||||
|
||||
return f.PodClient().CreateSync(pod)
|
||||
}
|
36
vendor/k8s.io/kubernetes/test/e2e/auth/pod_security_policy.go
generated
vendored
36
vendor/k8s.io/kubernetes/test/e2e/auth/pod_security_policy.go
generated
vendored
@@ -32,10 +32,10 @@ import (
|
||||
"k8s.io/kubernetes/pkg/security/apparmor"
|
||||
"k8s.io/kubernetes/pkg/security/podsecuritypolicy/seccomp"
|
||||
psputil "k8s.io/kubernetes/pkg/security/podsecuritypolicy/util"
|
||||
utilpointer "k8s.io/kubernetes/pkg/util/pointer"
|
||||
"k8s.io/kubernetes/test/e2e/common"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
utilpointer "k8s.io/utils/pointer"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
@@ -75,7 +75,7 @@ var _ = SIGDescribe("PodSecurityPolicy", func() {
|
||||
|
||||
It("should forbid pod creation when no PSP is available", func() {
|
||||
By("Running a restricted pod")
|
||||
_, err := c.CoreV1().Pods(ns).Create(restrictedPod(f, "restricted"))
|
||||
_, err := c.CoreV1().Pods(ns).Create(restrictedPod("restricted"))
|
||||
expectForbidden(err)
|
||||
})
|
||||
|
||||
@@ -87,11 +87,11 @@ var _ = SIGDescribe("PodSecurityPolicy", func() {
|
||||
defer cleanup()
|
||||
|
||||
By("Running a restricted pod")
|
||||
pod, err := c.CoreV1().Pods(ns).Create(restrictedPod(f, "allowed"))
|
||||
pod, err := c.CoreV1().Pods(ns).Create(restrictedPod("allowed"))
|
||||
framework.ExpectNoError(err)
|
||||
framework.ExpectNoError(framework.WaitForPodNameRunningInNamespace(c, pod.Name, pod.Namespace))
|
||||
|
||||
testPrivilegedPods(f, func(pod *v1.Pod) {
|
||||
testPrivilegedPods(func(pod *v1.Pod) {
|
||||
_, err := c.CoreV1().Pods(ns).Create(pod)
|
||||
expectForbidden(err)
|
||||
})
|
||||
@@ -103,11 +103,11 @@ var _ = SIGDescribe("PodSecurityPolicy", func() {
|
||||
defer cleanup()
|
||||
|
||||
By("Running a restricted pod")
|
||||
pod, err := c.CoreV1().Pods(ns).Create(restrictedPod(f, "allowed"))
|
||||
pod, err := c.CoreV1().Pods(ns).Create(restrictedPod("allowed"))
|
||||
framework.ExpectNoError(err)
|
||||
framework.ExpectNoError(framework.WaitForPodNameRunningInNamespace(c, pod.Name, pod.Namespace))
|
||||
|
||||
testPrivilegedPods(f, func(pod *v1.Pod) {
|
||||
testPrivilegedPods(func(pod *v1.Pod) {
|
||||
_, err := c.CoreV1().Pods(ns).Create(pod)
|
||||
expectForbidden(err)
|
||||
})
|
||||
@@ -121,7 +121,7 @@ var _ = SIGDescribe("PodSecurityPolicy", func() {
|
||||
expectedPSP, cleanup := createAndBindPSP(f, framework.PrivilegedPSP("permissive"))
|
||||
defer cleanup()
|
||||
|
||||
testPrivilegedPods(f, func(pod *v1.Pod) {
|
||||
testPrivilegedPods(func(pod *v1.Pod) {
|
||||
p, err := c.CoreV1().Pods(ns).Create(pod)
|
||||
framework.ExpectNoError(err)
|
||||
framework.ExpectNoError(framework.WaitForPodNameRunningInNamespace(c, p.Name, p.Namespace))
|
||||
@@ -143,7 +143,7 @@ var _ = SIGDescribe("PodSecurityPolicy", func() {
|
||||
expectedPSP, cleanup := createAndBindPSPInPolicy(f, privilegedPSPInPolicy("permissive"))
|
||||
defer cleanup()
|
||||
|
||||
testPrivilegedPods(f, func(pod *v1.Pod) {
|
||||
testPrivilegedPods(func(pod *v1.Pod) {
|
||||
p, err := c.CoreV1().Pods(ns).Create(pod)
|
||||
framework.ExpectNoError(err)
|
||||
framework.ExpectNoError(framework.WaitForPodNameRunningInNamespace(c, p.Name, p.Namespace))
|
||||
@@ -163,16 +163,16 @@ func expectForbidden(err error) {
|
||||
Expect(apierrs.IsForbidden(err)).To(BeTrue(), "should be forbidden error")
|
||||
}
|
||||
|
||||
func testPrivilegedPods(f *framework.Framework, tester func(pod *v1.Pod)) {
|
||||
func testPrivilegedPods(tester func(pod *v1.Pod)) {
|
||||
By("Running a privileged pod", func() {
|
||||
privileged := restrictedPod(f, "privileged")
|
||||
privileged := restrictedPod("privileged")
|
||||
privileged.Spec.Containers[0].SecurityContext.Privileged = boolPtr(true)
|
||||
privileged.Spec.Containers[0].SecurityContext.AllowPrivilegeEscalation = nil
|
||||
tester(privileged)
|
||||
})
|
||||
|
||||
By("Running a HostPath pod", func() {
|
||||
hostpath := restrictedPod(f, "hostpath")
|
||||
hostpath := restrictedPod("hostpath")
|
||||
hostpath.Spec.Containers[0].VolumeMounts = []v1.VolumeMount{{
|
||||
Name: "hp",
|
||||
MountPath: "/hp",
|
||||
@@ -187,26 +187,26 @@ func testPrivilegedPods(f *framework.Framework, tester func(pod *v1.Pod)) {
|
||||
})
|
||||
|
||||
By("Running a HostNetwork pod", func() {
|
||||
hostnet := restrictedPod(f, "hostnet")
|
||||
hostnet := restrictedPod("hostnet")
|
||||
hostnet.Spec.HostNetwork = true
|
||||
tester(hostnet)
|
||||
})
|
||||
|
||||
By("Running a HostPID pod", func() {
|
||||
hostpid := restrictedPod(f, "hostpid")
|
||||
hostpid := restrictedPod("hostpid")
|
||||
hostpid.Spec.HostPID = true
|
||||
tester(hostpid)
|
||||
})
|
||||
|
||||
By("Running a HostIPC pod", func() {
|
||||
hostipc := restrictedPod(f, "hostipc")
|
||||
hostipc := restrictedPod("hostipc")
|
||||
hostipc.Spec.HostIPC = true
|
||||
tester(hostipc)
|
||||
})
|
||||
|
||||
if common.IsAppArmorSupported() {
|
||||
By("Running a custom AppArmor profile pod", func() {
|
||||
aa := restrictedPod(f, "apparmor")
|
||||
aa := restrictedPod("apparmor")
|
||||
// Every node is expected to have the docker-default profile.
|
||||
aa.Annotations[apparmor.ContainerAnnotationKeyPrefix+"pause"] = "localhost/docker-default"
|
||||
tester(aa)
|
||||
@@ -214,13 +214,13 @@ func testPrivilegedPods(f *framework.Framework, tester func(pod *v1.Pod)) {
|
||||
}
|
||||
|
||||
By("Running an unconfined Seccomp pod", func() {
|
||||
unconfined := restrictedPod(f, "seccomp")
|
||||
unconfined := restrictedPod("seccomp")
|
||||
unconfined.Annotations[v1.SeccompPodAnnotationKey] = "unconfined"
|
||||
tester(unconfined)
|
||||
})
|
||||
|
||||
By("Running a SYS_ADMIN pod", func() {
|
||||
sysadmin := restrictedPod(f, "sysadmin")
|
||||
sysadmin := restrictedPod("sysadmin")
|
||||
sysadmin.Spec.Containers[0].SecurityContext.Capabilities = &v1.Capabilities{
|
||||
Add: []v1.Capability{"SYS_ADMIN"},
|
||||
}
|
||||
@@ -311,7 +311,7 @@ func createAndBindPSPInPolicy(f *framework.Framework, pspTemplate *policy.PodSec
|
||||
}
|
||||
}
|
||||
|
||||
func restrictedPod(f *framework.Framework, name string) *v1.Pod {
|
||||
func restrictedPod(name string) *v1.Pod {
|
||||
return &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
|
35
vendor/k8s.io/kubernetes/test/e2e/auth/service_accounts.go
generated
vendored
35
vendor/k8s.io/kubernetes/test/e2e/auth/service_accounts.go
generated
vendored
@@ -153,6 +153,15 @@ var _ = SIGDescribe("ServiceAccounts", func() {
|
||||
}
|
||||
})
|
||||
|
||||
/*
|
||||
Release: v1.9
|
||||
Testname: Service Account Tokens Must AutoMount
|
||||
Description: Ensure that Service Account keys are mounted into the Container. Pod
|
||||
contains three containers each will read Service Account token,
|
||||
root CA and default namespace respectively from the default API
|
||||
Token Mount path. All these three files MUST exist and the Service
|
||||
Account mount path MUST be auto mounted to the Container.
|
||||
*/
|
||||
framework.ConformanceIt("should mount an API token into pods ", func() {
|
||||
var tokenContent string
|
||||
var rootCAContent string
|
||||
@@ -235,7 +244,33 @@ var _ = SIGDescribe("ServiceAccounts", func() {
|
||||
})
|
||||
})
|
||||
|
||||
/*
|
||||
Release: v1.9
|
||||
Testname: Service account tokens auto mount optionally
|
||||
Description: Ensure that Service Account keys are mounted into the Pod only
|
||||
when AutoMountServiceToken is not set to false. We test the
|
||||
following scenarios here.
|
||||
1. Create Pod, Pod Spec has AutomountServiceAccountToken set to nil
|
||||
a) Service Account with default value,
|
||||
b) Service Account is an configured AutomountServiceAccountToken set to true,
|
||||
c) Service Account is an configured AutomountServiceAccountToken set to false
|
||||
2. Create Pod, Pod Spec has AutomountServiceAccountToken set to true
|
||||
a) Service Account with default value,
|
||||
b) Service Account is configured with AutomountServiceAccountToken set to true,
|
||||
c) Service Account is configured with AutomountServiceAccountToken set to false
|
||||
3. Create Pod, Pod Spec has AutomountServiceAccountToken set to false
|
||||
a) Service Account with default value,
|
||||
b) Service Account is configured with AutomountServiceAccountToken set to true,
|
||||
c) Service Account is configured with AutomountServiceAccountToken set to false
|
||||
|
||||
The Containers running in these pods MUST verify that the ServiceTokenVolume path is
|
||||
auto mounted only when Pod Spec has AutomountServiceAccountToken not set to false
|
||||
and ServiceAccount object has AutomountServiceAccountToken not set to false, this
|
||||
include test cases 1a,1b,2a,2b and 2c.
|
||||
In the test cases 1c,3a,3b and 3c the ServiceTokenVolume MUST not be auto mounted.
|
||||
*/
|
||||
framework.ConformanceIt("should allow opting out of API token automount ", func() {
|
||||
|
||||
var err error
|
||||
trueValue := true
|
||||
falseValue := false
|
||||
|
Reference in New Issue
Block a user