verify-shellcheck.sh: import from Kubernetes
This is an unmodified copy of kubernetes/hack/verify-shellcheck.sh revision d5a3db003916b1d33b503ccd2e4897e094d8af77.
This commit is contained in:
839
util.sh
Executable file
839
util.sh
Executable file
@@ -0,0 +1,839 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright 2014 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
function kube::util::sourced_variable {
|
||||
# Call this function to tell shellcheck that a variable is supposed to
|
||||
# be used from other calling context. This helps quiet an "unused
|
||||
# variable" warning from shellcheck and also document your code.
|
||||
true
|
||||
}
|
||||
|
||||
kube::util::sortable_date() {
|
||||
date "+%Y%m%d-%H%M%S"
|
||||
}
|
||||
|
||||
# arguments: target, item1, item2, item3, ...
|
||||
# returns 0 if target is in the given items, 1 otherwise.
|
||||
kube::util::array_contains() {
|
||||
local search="$1"
|
||||
local element
|
||||
shift
|
||||
for element; do
|
||||
if [[ "${element}" == "${search}" ]]; then
|
||||
return 0
|
||||
fi
|
||||
done
|
||||
return 1
|
||||
}
|
||||
|
||||
kube::util::wait_for_url() {
|
||||
local url=$1
|
||||
local prefix=${2:-}
|
||||
local wait=${3:-1}
|
||||
local times=${4:-30}
|
||||
local maxtime=${5:-1}
|
||||
|
||||
command -v curl >/dev/null || {
|
||||
kube::log::usage "curl must be installed"
|
||||
exit 1
|
||||
}
|
||||
|
||||
local i
|
||||
for i in $(seq 1 "${times}"); do
|
||||
local out
|
||||
if out=$(curl --max-time "${maxtime}" -gkfs "${url}" 2>/dev/null); then
|
||||
kube::log::status "On try ${i}, ${prefix}: ${out}"
|
||||
return 0
|
||||
fi
|
||||
sleep "${wait}"
|
||||
done
|
||||
kube::log::error "Timed out waiting for ${prefix} to answer at ${url}; tried ${times} waiting ${wait} between each"
|
||||
return 1
|
||||
}
|
||||
|
||||
# Example: kube::util::trap_add 'echo "in trap DEBUG"' DEBUG
|
||||
# See: http://stackoverflow.com/questions/3338030/multiple-bash-traps-for-the-same-signal
|
||||
kube::util::trap_add() {
|
||||
local trap_add_cmd
|
||||
trap_add_cmd=$1
|
||||
shift
|
||||
|
||||
for trap_add_name in "$@"; do
|
||||
local existing_cmd
|
||||
local new_cmd
|
||||
|
||||
# Grab the currently defined trap commands for this trap
|
||||
existing_cmd=$(trap -p "${trap_add_name}" | awk -F"'" '{print $2}')
|
||||
|
||||
if [[ -z "${existing_cmd}" ]]; then
|
||||
new_cmd="${trap_add_cmd}"
|
||||
else
|
||||
new_cmd="${trap_add_cmd};${existing_cmd}"
|
||||
fi
|
||||
|
||||
# Assign the test. Disable the shellcheck warning telling that trap
|
||||
# commands should be single quoted to avoid evaluating them at this
|
||||
# point instead evaluating them at run time. The logic of adding new
|
||||
# commands to a single trap requires them to be evaluated right away.
|
||||
# shellcheck disable=SC2064
|
||||
trap "${new_cmd}" "${trap_add_name}"
|
||||
done
|
||||
}
|
||||
|
||||
# Opposite of kube::util::ensure-temp-dir()
|
||||
kube::util::cleanup-temp-dir() {
|
||||
rm -rf "${KUBE_TEMP}"
|
||||
}
|
||||
|
||||
# Create a temp dir that'll be deleted at the end of this bash session.
|
||||
#
|
||||
# Vars set:
|
||||
# KUBE_TEMP
|
||||
kube::util::ensure-temp-dir() {
|
||||
if [[ -z ${KUBE_TEMP-} ]]; then
|
||||
KUBE_TEMP=$(mktemp -d 2>/dev/null || mktemp -d -t kubernetes.XXXXXX)
|
||||
kube::util::trap_add kube::util::cleanup-temp-dir EXIT
|
||||
fi
|
||||
}
|
||||
|
||||
kube::util::host_os() {
|
||||
local host_os
|
||||
case "$(uname -s)" in
|
||||
Darwin)
|
||||
host_os=darwin
|
||||
;;
|
||||
Linux)
|
||||
host_os=linux
|
||||
;;
|
||||
*)
|
||||
kube::log::error "Unsupported host OS. Must be Linux or Mac OS X."
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
echo "${host_os}"
|
||||
}
|
||||
|
||||
kube::util::host_arch() {
|
||||
local host_arch
|
||||
case "$(uname -m)" in
|
||||
x86_64*)
|
||||
host_arch=amd64
|
||||
;;
|
||||
i?86_64*)
|
||||
host_arch=amd64
|
||||
;;
|
||||
amd64*)
|
||||
host_arch=amd64
|
||||
;;
|
||||
aarch64*)
|
||||
host_arch=arm64
|
||||
;;
|
||||
arm64*)
|
||||
host_arch=arm64
|
||||
;;
|
||||
arm*)
|
||||
host_arch=arm
|
||||
;;
|
||||
i?86*)
|
||||
host_arch=x86
|
||||
;;
|
||||
s390x*)
|
||||
host_arch=s390x
|
||||
;;
|
||||
ppc64le*)
|
||||
host_arch=ppc64le
|
||||
;;
|
||||
*)
|
||||
kube::log::error "Unsupported host arch. Must be x86_64, 386, arm, arm64, s390x or ppc64le."
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
echo "${host_arch}"
|
||||
}
|
||||
|
||||
# This figures out the host platform without relying on golang. We need this as
|
||||
# we don't want a golang install to be a prerequisite to building yet we need
|
||||
# this info to figure out where the final binaries are placed.
|
||||
kube::util::host_platform() {
|
||||
echo "$(kube::util::host_os)/$(kube::util::host_arch)"
|
||||
}
|
||||
|
||||
kube::util::find-binary-for-platform() {
|
||||
local -r lookfor="$1"
|
||||
local -r platform="$2"
|
||||
local locations=(
|
||||
"${KUBE_ROOT}/_output/bin/${lookfor}"
|
||||
"${KUBE_ROOT}/_output/dockerized/bin/${platform}/${lookfor}"
|
||||
"${KUBE_ROOT}/_output/local/bin/${platform}/${lookfor}"
|
||||
"${KUBE_ROOT}/platforms/${platform}/${lookfor}"
|
||||
)
|
||||
# Also search for binary in bazel build tree.
|
||||
# The bazel go rules place some binaries in subtrees like
|
||||
# "bazel-bin/source/path/linux_amd64_pure_stripped/binaryname", so make sure
|
||||
# the platform name is matched in the path.
|
||||
while IFS=$'\n' read -r location; do
|
||||
locations+=("$location");
|
||||
done < <(find "${KUBE_ROOT}/bazel-bin/" -type f -executable \
|
||||
\( -path "*/${platform/\//_}*/${lookfor}" -o -path "*/${lookfor}" \) 2>/dev/null || true)
|
||||
|
||||
# List most recently-updated location.
|
||||
local -r bin=$( (ls -t "${locations[@]}" 2>/dev/null || true) | head -1 )
|
||||
echo -n "${bin}"
|
||||
}
|
||||
|
||||
kube::util::find-binary() {
|
||||
kube::util::find-binary-for-platform "$1" "$(kube::util::host_platform)"
|
||||
}
|
||||
|
||||
# Run all known doc generators (today gendocs and genman for kubectl)
|
||||
# $1 is the directory to put those generated documents
|
||||
kube::util::gen-docs() {
|
||||
local dest="$1"
|
||||
|
||||
# Find binary
|
||||
gendocs=$(kube::util::find-binary "gendocs")
|
||||
genkubedocs=$(kube::util::find-binary "genkubedocs")
|
||||
genman=$(kube::util::find-binary "genman")
|
||||
genyaml=$(kube::util::find-binary "genyaml")
|
||||
genfeddocs=$(kube::util::find-binary "genfeddocs")
|
||||
|
||||
# TODO: If ${genfeddocs} is not used from anywhere (it isn't used at
|
||||
# least from k/k tree), remove it completely.
|
||||
kube::util::sourced_variable "${genfeddocs}"
|
||||
|
||||
mkdir -p "${dest}/docs/user-guide/kubectl/"
|
||||
"${gendocs}" "${dest}/docs/user-guide/kubectl/"
|
||||
mkdir -p "${dest}/docs/admin/"
|
||||
"${genkubedocs}" "${dest}/docs/admin/" "kube-apiserver"
|
||||
"${genkubedocs}" "${dest}/docs/admin/" "kube-controller-manager"
|
||||
"${genkubedocs}" "${dest}/docs/admin/" "cloud-controller-manager"
|
||||
"${genkubedocs}" "${dest}/docs/admin/" "kube-proxy"
|
||||
"${genkubedocs}" "${dest}/docs/admin/" "kube-scheduler"
|
||||
"${genkubedocs}" "${dest}/docs/admin/" "kubelet"
|
||||
"${genkubedocs}" "${dest}/docs/admin/" "kubeadm"
|
||||
|
||||
mkdir -p "${dest}/docs/man/man1/"
|
||||
"${genman}" "${dest}/docs/man/man1/" "kube-apiserver"
|
||||
"${genman}" "${dest}/docs/man/man1/" "kube-controller-manager"
|
||||
"${genman}" "${dest}/docs/man/man1/" "cloud-controller-manager"
|
||||
"${genman}" "${dest}/docs/man/man1/" "kube-proxy"
|
||||
"${genman}" "${dest}/docs/man/man1/" "kube-scheduler"
|
||||
"${genman}" "${dest}/docs/man/man1/" "kubelet"
|
||||
"${genman}" "${dest}/docs/man/man1/" "kubectl"
|
||||
"${genman}" "${dest}/docs/man/man1/" "kubeadm"
|
||||
|
||||
mkdir -p "${dest}/docs/yaml/kubectl/"
|
||||
"${genyaml}" "${dest}/docs/yaml/kubectl/"
|
||||
|
||||
# create the list of generated files
|
||||
pushd "${dest}" > /dev/null || return 1
|
||||
touch docs/.generated_docs
|
||||
find . -type f | cut -sd / -f 2- | LC_ALL=C sort > docs/.generated_docs
|
||||
popd > /dev/null || return 1
|
||||
}
|
||||
|
||||
# Removes previously generated docs-- we don't want to check them in. $KUBE_ROOT
|
||||
# must be set.
|
||||
kube::util::remove-gen-docs() {
|
||||
if [ -e "${KUBE_ROOT}/docs/.generated_docs" ]; then
|
||||
# remove all of the old docs; we don't want to check them in.
|
||||
while read -r file; do
|
||||
rm "${KUBE_ROOT}/${file}" 2>/dev/null || true
|
||||
done <"${KUBE_ROOT}/docs/.generated_docs"
|
||||
# The docs/.generated_docs file lists itself, so we don't need to explicitly
|
||||
# delete it.
|
||||
fi
|
||||
}
|
||||
|
||||
# Takes a group/version and returns the path to its location on disk, sans
|
||||
# "pkg". E.g.:
|
||||
# * default behavior: extensions/v1beta1 -> apis/extensions/v1beta1
|
||||
# * default behavior for only a group: experimental -> apis/experimental
|
||||
# * Special handling for empty group: v1 -> api/v1, unversioned -> api/unversioned
|
||||
# * Special handling for groups suffixed with ".k8s.io": foo.k8s.io/v1 -> apis/foo/v1
|
||||
# * Very special handling for when both group and version are "": / -> api
|
||||
kube::util::group-version-to-pkg-path() {
|
||||
local group_version="$1"
|
||||
|
||||
while IFS=$'\n' read -r api; do
|
||||
if [[ "${api}" = "${group_version/.*k8s.io/}" ]]; then
|
||||
echo "vendor/k8s.io/api/${group_version/.*k8s.io/}"
|
||||
return
|
||||
fi
|
||||
done < <(cd "${KUBE_ROOT}/staging/src/k8s.io/api" && find . -name types.go -exec dirname {} \; | sed "s|\./||g" | sort)
|
||||
|
||||
# "v1" is the API GroupVersion
|
||||
if [[ "${group_version}" == "v1" ]]; then
|
||||
echo "vendor/k8s.io/api/core/v1"
|
||||
return
|
||||
fi
|
||||
|
||||
# Special cases first.
|
||||
# TODO(lavalamp): Simplify this by moving pkg/api/v1 and splitting pkg/api,
|
||||
# moving the results to pkg/apis/api.
|
||||
case "${group_version}" in
|
||||
# both group and version are "", this occurs when we generate deep copies for internal objects of the legacy v1 API.
|
||||
__internal)
|
||||
echo "pkg/apis/core"
|
||||
;;
|
||||
meta/v1)
|
||||
echo "vendor/k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
;;
|
||||
meta/v1beta1)
|
||||
echo "vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1"
|
||||
;;
|
||||
*.k8s.io)
|
||||
echo "pkg/apis/${group_version%.*k8s.io}"
|
||||
;;
|
||||
*.k8s.io/*)
|
||||
echo "pkg/apis/${group_version/.*k8s.io/}"
|
||||
;;
|
||||
*)
|
||||
echo "pkg/apis/${group_version%__internal}"
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
# Takes a group/version and returns the swagger-spec file name.
|
||||
# default behavior: extensions/v1beta1 -> extensions_v1beta1
|
||||
# special case for v1: v1 -> v1
|
||||
kube::util::gv-to-swagger-name() {
|
||||
local group_version="$1"
|
||||
case "${group_version}" in
|
||||
v1)
|
||||
echo "v1"
|
||||
;;
|
||||
*)
|
||||
echo "${group_version%/*}_${group_version#*/}"
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
# Returns the name of the upstream remote repository name for the local git
|
||||
# repo, e.g. "upstream" or "origin".
|
||||
kube::util::git_upstream_remote_name() {
|
||||
git remote -v | grep fetch |\
|
||||
grep -E 'github.com[/:]kubernetes/kubernetes|k8s.io/kubernetes' |\
|
||||
head -n 1 | awk '{print $1}'
|
||||
}
|
||||
|
||||
# Ensures the current directory is a git tree for doing things like restoring or
|
||||
# validating godeps
|
||||
kube::util::create-fake-git-tree() {
|
||||
local -r target_dir=${1:-$(pwd)}
|
||||
|
||||
pushd "${target_dir}" >/dev/null || return 1
|
||||
git init >/dev/null
|
||||
git config --local user.email "nobody@k8s.io"
|
||||
git config --local user.name "$0"
|
||||
git add . >/dev/null
|
||||
git commit -q -m "Snapshot" >/dev/null
|
||||
if (( ${KUBE_VERBOSE:-5} >= 6 )); then
|
||||
kube::log::status "${target_dir} is now a git tree."
|
||||
fi
|
||||
popd >/dev/null || return 1
|
||||
}
|
||||
|
||||
# Checks whether godep restore was run in the current GOPATH, i.e. that all referenced repos exist
|
||||
# and are checked out to the referenced rev.
|
||||
kube::util::godep_restored() {
|
||||
local -r godeps_json=${1:-Godeps/Godeps.json}
|
||||
local -r gopath=${2:-${GOPATH%:*}}
|
||||
|
||||
kube::util::require-jq
|
||||
|
||||
local root
|
||||
local old_rev=""
|
||||
while read -r path rev; do
|
||||
rev="${rev//[\'\"]}" # remove quotes which are around revs sometimes
|
||||
|
||||
if [[ "${rev}" == "${old_rev}" ]] && [[ "${path}" == "${root}"* ]]; then
|
||||
# avoid checking the same git/hg root again
|
||||
continue
|
||||
fi
|
||||
|
||||
root="${path}"
|
||||
while [ "${root}" != "." ] && [ ! -d "${gopath}/src/${root}/.git" ] && [ ! -d "${gopath}/src/${root}/.hg" ]; do
|
||||
root=$(dirname "${root}")
|
||||
done
|
||||
if [ "${root}" == "." ]; then
|
||||
echo "No checkout of ${path} found in GOPATH \"${gopath}\"." 1>&2
|
||||
return 1
|
||||
fi
|
||||
local head
|
||||
if [ -d "${gopath}/src/${root}/.git" ]; then
|
||||
head="$(cd "${gopath}/src/${root}" && git rev-parse HEAD)"
|
||||
else
|
||||
head="$(cd "${gopath}/src/${root}" && hg parent --template '{node}')"
|
||||
fi
|
||||
if [ "${head}" != "${rev}" ]; then
|
||||
echo "Unexpected HEAD '${head}' at ${gopath}/src/${root}, expected '${rev}'." 1>&2
|
||||
return 1
|
||||
fi
|
||||
old_rev="${rev}"
|
||||
done < <(jq '.Deps|.[]|.ImportPath + " " + .Rev' -r < "${godeps_json}")
|
||||
return 0
|
||||
}
|
||||
|
||||
# Exits script if working directory is dirty. If it's run interactively in the terminal
|
||||
# the user can commit changes in a second terminal. This script will wait.
|
||||
kube::util::ensure_clean_working_dir() {
|
||||
while ! git diff HEAD --exit-code &>/dev/null; do
|
||||
echo -e "\nUnexpected dirty working directory:\n"
|
||||
if tty -s; then
|
||||
git status -s
|
||||
else
|
||||
git diff -a # be more verbose in log files without tty
|
||||
exit 1
|
||||
fi | sed 's/^/ /'
|
||||
echo -e "\nCommit your changes in another terminal and then continue here by pressing enter."
|
||||
read -r
|
||||
done 1>&2
|
||||
}
|
||||
|
||||
# Ensure that the given godep version is installed and in the path. Almost
|
||||
# nobody should use any version but the default.
|
||||
#
|
||||
# Sets:
|
||||
# KUBE_GODEP: The path to the godep binary
|
||||
#
|
||||
kube::util::ensure_godep_version() {
|
||||
local godep_target_version=${1:-"v80-k8s-r1"} # this version is known to work
|
||||
|
||||
# If KUBE_GODEP is already set, and it's the right version, then use it.
|
||||
if [[ -n "${KUBE_GODEP:-}" && "$(${KUBE_GODEP:?} version 2>/dev/null)" == *"godep ${godep_target_version}"* ]]; then
|
||||
kube::log::status "Using ${KUBE_GODEP}"
|
||||
return
|
||||
fi
|
||||
|
||||
# Otherwise, install forked godep
|
||||
kube::log::status "Installing godep version ${godep_target_version}"
|
||||
GOBIN="${KUBE_OUTPUT_BINPATH}" go install k8s.io/kubernetes/third_party/forked/godep
|
||||
export KUBE_GODEP="${KUBE_OUTPUT_BINPATH}/godep"
|
||||
kube::log::status "Installed ${KUBE_GODEP}"
|
||||
|
||||
# Verify that the installed godep from fork is what we expect
|
||||
if [[ "$(${KUBE_GODEP:?} version 2>/dev/null)" != *"godep ${godep_target_version}"* ]]; then
|
||||
kube::log::error "Expected godep ${godep_target_version} from ${KUBE_GODEP}, got $(${KUBE_GODEP:?} version)"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Ensure that none of the staging repos is checked out in the GOPATH because this
|
||||
# easily confused godep.
|
||||
kube::util::ensure_no_staging_repos_in_gopath() {
|
||||
kube::util::ensure_single_dir_gopath
|
||||
local error=0
|
||||
for repo_file in "${KUBE_ROOT}"/staging/src/k8s.io/*; do
|
||||
if [[ ! -d "${repo_file}" ]]; then
|
||||
# not a directory or there were no files
|
||||
continue;
|
||||
fi
|
||||
repo="$(basename "${repo_file}")"
|
||||
if [ -e "${GOPATH}/src/k8s.io/${repo}" ]; then
|
||||
echo "k8s.io/${repo} exists in GOPATH. Remove before running godep-save.sh." 1>&2
|
||||
error=1
|
||||
fi
|
||||
done
|
||||
if [ "${error}" = "1" ]; then
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Checks that the GOPATH is simple, i.e. consists only of one directory, not multiple.
|
||||
kube::util::ensure_single_dir_gopath() {
|
||||
if [[ "${GOPATH}" == *:* ]]; then
|
||||
echo "GOPATH must consist of a single directory." 1>&2
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Find the base commit using:
|
||||
# $PULL_BASE_SHA if set (from Prow)
|
||||
# current ref from the remote upstream branch
|
||||
kube::util::base_ref() {
|
||||
local -r git_branch=$1
|
||||
|
||||
if [[ -n ${PULL_BASE_SHA:-} ]]; then
|
||||
echo "${PULL_BASE_SHA}"
|
||||
return
|
||||
fi
|
||||
|
||||
full_branch="$(kube::util::git_upstream_remote_name)/${git_branch}"
|
||||
|
||||
# make sure the branch is valid, otherwise the check will pass erroneously.
|
||||
if ! git describe "${full_branch}" >/dev/null; then
|
||||
# abort!
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "${full_branch}"
|
||||
}
|
||||
|
||||
# Checks whether there are any files matching pattern $2 changed between the
|
||||
# current branch and upstream branch named by $1.
|
||||
# Returns 1 (false) if there are no changes
|
||||
# 0 (true) if there are changes detected.
|
||||
kube::util::has_changes() {
|
||||
local -r git_branch=$1
|
||||
local -r pattern=$2
|
||||
local -r not_pattern=${3:-totallyimpossiblepattern}
|
||||
|
||||
local base_ref
|
||||
base_ref=$(kube::util::base_ref "${git_branch}")
|
||||
echo "Checking for '${pattern}' changes against '${base_ref}'"
|
||||
|
||||
# notice this uses ... to find the first shared ancestor
|
||||
if git diff --name-only "${base_ref}...HEAD" | grep -v -E "${not_pattern}" | grep "${pattern}" > /dev/null; then
|
||||
return 0
|
||||
fi
|
||||
# also check for pending changes
|
||||
if git status --porcelain | grep -v -E "${not_pattern}" | grep "${pattern}" > /dev/null; then
|
||||
echo "Detected '${pattern}' uncommitted changes."
|
||||
return 0
|
||||
fi
|
||||
echo "No '${pattern}' changes detected."
|
||||
return 1
|
||||
}
|
||||
|
||||
kube::util::download_file() {
|
||||
local -r url=$1
|
||||
local -r destination_file=$2
|
||||
|
||||
rm "${destination_file}" 2&> /dev/null || true
|
||||
|
||||
for i in $(seq 5)
|
||||
do
|
||||
if ! curl -fsSL --retry 3 --keepalive-time 2 "${url}" -o "${destination_file}"; then
|
||||
echo "Downloading ${url} failed. $((5-i)) retries left."
|
||||
sleep 1
|
||||
else
|
||||
echo "Downloading ${url} succeed"
|
||||
return 0
|
||||
fi
|
||||
done
|
||||
return 1
|
||||
}
|
||||
|
||||
# Test whether openssl is installed.
|
||||
# Sets:
|
||||
# OPENSSL_BIN: The path to the openssl binary to use
|
||||
function kube::util::test_openssl_installed {
|
||||
if ! openssl version >& /dev/null; then
|
||||
echo "Failed to run openssl. Please ensure openssl is installed"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
OPENSSL_BIN=$(command -v openssl)
|
||||
}
|
||||
|
||||
# creates a client CA, args are sudo, dest-dir, ca-id, purpose
|
||||
# purpose is dropped in after "key encipherment", you usually want
|
||||
# '"client auth"'
|
||||
# '"server auth"'
|
||||
# '"client auth","server auth"'
|
||||
function kube::util::create_signing_certkey {
|
||||
local sudo=$1
|
||||
local dest_dir=$2
|
||||
local id=$3
|
||||
local purpose=$4
|
||||
# Create client ca
|
||||
${sudo} /usr/bin/env bash -e <<EOF
|
||||
rm -f "${dest_dir}/${id}-ca.crt" "${dest_dir}/${id}-ca.key"
|
||||
${OPENSSL_BIN} req -x509 -sha256 -new -nodes -days 365 -newkey rsa:2048 -keyout "${dest_dir}/${id}-ca.key" -out "${dest_dir}/${id}-ca.crt" -subj "/C=xx/ST=x/L=x/O=x/OU=x/CN=ca/emailAddress=x/"
|
||||
echo '{"signing":{"default":{"expiry":"43800h","usages":["signing","key encipherment",${purpose}]}}}' > "${dest_dir}/${id}-ca-config.json"
|
||||
EOF
|
||||
}
|
||||
|
||||
# signs a client certificate: args are sudo, dest-dir, CA, filename (roughly), username, groups...
|
||||
function kube::util::create_client_certkey {
|
||||
local sudo=$1
|
||||
local dest_dir=$2
|
||||
local ca=$3
|
||||
local id=$4
|
||||
local cn=${5:-$4}
|
||||
local groups=""
|
||||
local SEP=""
|
||||
shift 5
|
||||
while [ -n "${1:-}" ]; do
|
||||
groups+="${SEP}{\"O\":\"$1\"}"
|
||||
SEP=","
|
||||
shift 1
|
||||
done
|
||||
${sudo} /usr/bin/env bash -e <<EOF
|
||||
cd ${dest_dir}
|
||||
echo '{"CN":"${cn}","names":[${groups}],"hosts":[""],"key":{"algo":"rsa","size":2048}}' | ${CFSSL_BIN} gencert -ca=${ca}.crt -ca-key=${ca}.key -config=${ca}-config.json - | ${CFSSLJSON_BIN} -bare client-${id}
|
||||
mv "client-${id}-key.pem" "client-${id}.key"
|
||||
mv "client-${id}.pem" "client-${id}.crt"
|
||||
rm -f "client-${id}.csr"
|
||||
EOF
|
||||
}
|
||||
|
||||
# signs a serving certificate: args are sudo, dest-dir, ca, filename (roughly), subject, hosts...
|
||||
function kube::util::create_serving_certkey {
|
||||
local sudo=$1
|
||||
local dest_dir=$2
|
||||
local ca=$3
|
||||
local id=$4
|
||||
local cn=${5:-$4}
|
||||
local hosts=""
|
||||
local SEP=""
|
||||
shift 5
|
||||
while [ -n "${1:-}" ]; do
|
||||
hosts+="${SEP}\"$1\""
|
||||
SEP=","
|
||||
shift 1
|
||||
done
|
||||
${sudo} /usr/bin/env bash -e <<EOF
|
||||
cd ${dest_dir}
|
||||
echo '{"CN":"${cn}","hosts":[${hosts}],"key":{"algo":"rsa","size":2048}}' | ${CFSSL_BIN} gencert -ca=${ca}.crt -ca-key=${ca}.key -config=${ca}-config.json - | ${CFSSLJSON_BIN} -bare serving-${id}
|
||||
mv "serving-${id}-key.pem" "serving-${id}.key"
|
||||
mv "serving-${id}.pem" "serving-${id}.crt"
|
||||
rm -f "serving-${id}.csr"
|
||||
EOF
|
||||
}
|
||||
|
||||
# creates a self-contained kubeconfig: args are sudo, dest-dir, ca file, host, port, client id, token(optional)
|
||||
function kube::util::write_client_kubeconfig {
|
||||
local sudo=$1
|
||||
local dest_dir=$2
|
||||
local ca_file=$3
|
||||
local api_host=$4
|
||||
local api_port=$5
|
||||
local client_id=$6
|
||||
local token=${7:-}
|
||||
cat <<EOF | ${sudo} tee "${dest_dir}"/"${client_id}".kubeconfig > /dev/null
|
||||
apiVersion: v1
|
||||
kind: Config
|
||||
clusters:
|
||||
- cluster:
|
||||
certificate-authority: ${ca_file}
|
||||
server: https://${api_host}:${api_port}/
|
||||
name: local-up-cluster
|
||||
users:
|
||||
- user:
|
||||
token: ${token}
|
||||
client-certificate: ${dest_dir}/client-${client_id}.crt
|
||||
client-key: ${dest_dir}/client-${client_id}.key
|
||||
name: local-up-cluster
|
||||
contexts:
|
||||
- context:
|
||||
cluster: local-up-cluster
|
||||
user: local-up-cluster
|
||||
name: local-up-cluster
|
||||
current-context: local-up-cluster
|
||||
EOF
|
||||
|
||||
# flatten the kubeconfig files to make them self contained
|
||||
username=$(whoami)
|
||||
${sudo} /usr/bin/env bash -e <<EOF
|
||||
$(kube::util::find-binary kubectl) --kubeconfig="${dest_dir}/${client_id}.kubeconfig" config view --minify --flatten > "/tmp/${client_id}.kubeconfig"
|
||||
mv -f "/tmp/${client_id}.kubeconfig" "${dest_dir}/${client_id}.kubeconfig"
|
||||
chown ${username} "${dest_dir}/${client_id}.kubeconfig"
|
||||
EOF
|
||||
}
|
||||
|
||||
# Determines if docker can be run, failures may simply require that the user be added to the docker group.
|
||||
function kube::util::ensure_docker_daemon_connectivity {
|
||||
IFS=" " read -ra DOCKER <<< "${DOCKER_OPTS}"
|
||||
# Expand ${DOCKER[@]} only if it's not unset. This is to work around
|
||||
# Bash 3 issue with unbound variable.
|
||||
DOCKER=(docker ${DOCKER[@]:+"${DOCKER[@]}"})
|
||||
if ! "${DOCKER[@]}" info > /dev/null 2>&1 ; then
|
||||
cat <<'EOF' >&2
|
||||
Can't connect to 'docker' daemon. please fix and retry.
|
||||
|
||||
Possible causes:
|
||||
- Docker Daemon not started
|
||||
- Linux: confirm via your init system
|
||||
- macOS w/ docker-machine: run `docker-machine ls` and `docker-machine start <name>`
|
||||
- macOS w/ Docker for Mac: Check the menu bar and start the Docker application
|
||||
- DOCKER_HOST hasn't been set or is set incorrectly
|
||||
- Linux: domain socket is used, DOCKER_* should be unset. In Bash run `unset ${!DOCKER_*}`
|
||||
- macOS w/ docker-machine: run `eval "$(docker-machine env <name>)"`
|
||||
- macOS w/ Docker for Mac: domain socket is used, DOCKER_* should be unset. In Bash run `unset ${!DOCKER_*}`
|
||||
- Other things to check:
|
||||
- Linux: User isn't in 'docker' group. Add and relogin.
|
||||
- Something like 'sudo usermod -a -G docker ${USER}'
|
||||
- RHEL7 bug and workaround: https://bugzilla.redhat.com/show_bug.cgi?id=1119282#c8
|
||||
EOF
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Wait for background jobs to finish. Return with
|
||||
# an error status if any of the jobs failed.
|
||||
kube::util::wait-for-jobs() {
|
||||
local fail=0
|
||||
local job
|
||||
for job in $(jobs -p); do
|
||||
wait "${job}" || fail=$((fail + 1))
|
||||
done
|
||||
return ${fail}
|
||||
}
|
||||
|
||||
# kube::util::join <delim> <list...>
|
||||
# Concatenates the list elements with the delimiter passed as first parameter
|
||||
#
|
||||
# Ex: kube::util::join , a b c
|
||||
# -> a,b,c
|
||||
function kube::util::join {
|
||||
local IFS="$1"
|
||||
shift
|
||||
echo "$*"
|
||||
}
|
||||
|
||||
# Downloads cfssl/cfssljson into $1 directory if they do not already exist in PATH
|
||||
#
|
||||
# Assumed vars:
|
||||
# $1 (cfssl directory) (optional)
|
||||
#
|
||||
# Sets:
|
||||
# CFSSL_BIN: The path of the installed cfssl binary
|
||||
# CFSSLJSON_BIN: The path of the installed cfssljson binary
|
||||
#
|
||||
function kube::util::ensure-cfssl {
|
||||
if command -v cfssl &>/dev/null && command -v cfssljson &>/dev/null; then
|
||||
CFSSL_BIN=$(command -v cfssl)
|
||||
CFSSLJSON_BIN=$(command -v cfssljson)
|
||||
return 0
|
||||
fi
|
||||
|
||||
host_arch=$(kube::util::host_arch)
|
||||
|
||||
if [[ "${host_arch}" != "amd64" ]]; then
|
||||
echo "Cannot download cfssl on non-amd64 hosts and cfssl does not appear to be installed."
|
||||
echo "Please install cfssl and cfssljson and verify they are in \$PATH."
|
||||
echo "Hint: export PATH=\$PATH:\$GOPATH/bin; go get -u github.com/cloudflare/cfssl/cmd/..."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Create a temp dir for cfssl if no directory was given
|
||||
local cfssldir=${1:-}
|
||||
if [[ -z "${cfssldir}" ]]; then
|
||||
kube::util::ensure-temp-dir
|
||||
cfssldir="${KUBE_TEMP}/cfssl"
|
||||
fi
|
||||
|
||||
mkdir -p "${cfssldir}"
|
||||
pushd "${cfssldir}" > /dev/null || return 1
|
||||
|
||||
echo "Unable to successfully run 'cfssl' from ${PATH}; downloading instead..."
|
||||
kernel=$(uname -s)
|
||||
case "${kernel}" in
|
||||
Linux)
|
||||
curl --retry 10 -L -o cfssl https://pkg.cfssl.org/R1.2/cfssl_linux-amd64
|
||||
curl --retry 10 -L -o cfssljson https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64
|
||||
;;
|
||||
Darwin)
|
||||
curl --retry 10 -L -o cfssl https://pkg.cfssl.org/R1.2/cfssl_darwin-amd64
|
||||
curl --retry 10 -L -o cfssljson https://pkg.cfssl.org/R1.2/cfssljson_darwin-amd64
|
||||
;;
|
||||
*)
|
||||
echo "Unknown, unsupported platform: ${kernel}." >&2
|
||||
echo "Supported platforms: Linux, Darwin." >&2
|
||||
exit 2
|
||||
esac
|
||||
|
||||
chmod +x cfssl || true
|
||||
chmod +x cfssljson || true
|
||||
|
||||
CFSSL_BIN="${cfssldir}/cfssl"
|
||||
CFSSLJSON_BIN="${cfssldir}/cfssljson"
|
||||
if [[ ! -x ${CFSSL_BIN} || ! -x ${CFSSLJSON_BIN} ]]; then
|
||||
echo "Failed to download 'cfssl'. Please install cfssl and cfssljson and verify they are in \$PATH."
|
||||
echo "Hint: export PATH=\$PATH:\$GOPATH/bin; go get -u github.com/cloudflare/cfssl/cmd/..."
|
||||
exit 1
|
||||
fi
|
||||
popd > /dev/null || return 1
|
||||
}
|
||||
|
||||
# kube::util::ensure_dockerized
|
||||
# Confirms that the script is being run inside a kube-build image
|
||||
#
|
||||
function kube::util::ensure_dockerized {
|
||||
if [[ -f /kube-build-image ]]; then
|
||||
return 0
|
||||
else
|
||||
echo "ERROR: This script is designed to be run inside a kube-build container"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
# kube::util::ensure-gnu-sed
|
||||
# Determines which sed binary is gnu-sed on linux/darwin
|
||||
#
|
||||
# Sets:
|
||||
# SED: The name of the gnu-sed binary
|
||||
#
|
||||
function kube::util::ensure-gnu-sed {
|
||||
if LANG=C sed --help 2>&1 | grep -q GNU; then
|
||||
SED="sed"
|
||||
elif command -v gsed &>/dev/null; then
|
||||
SED="gsed"
|
||||
else
|
||||
kube::log::error "Failed to find GNU sed as sed or gsed. If you are on Mac: brew install gnu-sed." >&2
|
||||
return 1
|
||||
fi
|
||||
kube::util::sourced_variable "${SED}"
|
||||
}
|
||||
|
||||
# kube::util::check-file-in-alphabetical-order <file>
|
||||
# Check that the file is in alphabetical order
|
||||
#
|
||||
function kube::util::check-file-in-alphabetical-order {
|
||||
local failure_file="$1"
|
||||
if ! diff -u "${failure_file}" <(LC_ALL=C sort "${failure_file}"); then
|
||||
{
|
||||
echo
|
||||
echo "${failure_file} is not in alphabetical order. Please sort it:"
|
||||
echo
|
||||
echo " LC_ALL=C sort -o ${failure_file} ${failure_file}"
|
||||
echo
|
||||
} >&2
|
||||
false
|
||||
fi
|
||||
}
|
||||
|
||||
# kube::util::require-jq
|
||||
# Checks whether jq is installed.
|
||||
function kube::util::require-jq {
|
||||
if ! command -v jq &>/dev/null; then
|
||||
echo "jq not found. Please install." 1>&2
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Some useful colors.
|
||||
if [[ -z "${color_start-}" ]]; then
|
||||
declare -r color_start="\033["
|
||||
declare -r color_red="${color_start}0;31m"
|
||||
declare -r color_yellow="${color_start}0;33m"
|
||||
declare -r color_green="${color_start}0;32m"
|
||||
declare -r color_blue="${color_start}1;34m"
|
||||
declare -r color_cyan="${color_start}1;36m"
|
||||
declare -r color_norm="${color_start}0m"
|
||||
|
||||
kube::util::sourced_variable "${color_start}"
|
||||
kube::util::sourced_variable "${color_red}"
|
||||
kube::util::sourced_variable "${color_yellow}"
|
||||
kube::util::sourced_variable "${color_green}"
|
||||
kube::util::sourced_variable "${color_blue}"
|
||||
kube::util::sourced_variable "${color_cyan}"
|
||||
kube::util::sourced_variable "${color_norm}"
|
||||
fi
|
||||
|
||||
# ex: ts=2 sw=2 et filetype=sh
|
187
verify-shellcheck.sh
Executable file
187
verify-shellcheck.sh
Executable file
@@ -0,0 +1,187 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright 2018 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
KUBE_ROOT=$(dirname "${BASH_SOURCE[0]}")/..
|
||||
source "${KUBE_ROOT}/hack/lib/init.sh"
|
||||
source "${KUBE_ROOT}/hack/lib/util.sh"
|
||||
|
||||
# required version for this script, if not installed on the host we will
|
||||
# use the official docker image instead. keep this in sync with SHELLCHECK_IMAGE
|
||||
SHELLCHECK_VERSION="0.6.0"
|
||||
# upstream shellcheck latest stable image as of January 10th, 2019
|
||||
SHELLCHECK_IMAGE="koalaman/shellcheck-alpine:v0.6.0@sha256:7d4d712a2686da99d37580b4e2f45eb658b74e4b01caf67c1099adc294b96b52"
|
||||
|
||||
# fixed name for the shellcheck docker container so we can reliably clean it up
|
||||
SHELLCHECK_CONTAINER="k8s-shellcheck"
|
||||
|
||||
# disabled lints
|
||||
disabled=(
|
||||
# this lint disallows non-constant source, which we use extensively without
|
||||
# any known bugs
|
||||
1090
|
||||
# this lint prefers command -v to which, they are not the same
|
||||
2230
|
||||
)
|
||||
# comma separate for passing to shellcheck
|
||||
join_by() {
|
||||
local IFS="$1";
|
||||
shift;
|
||||
echo "$*";
|
||||
}
|
||||
SHELLCHECK_DISABLED="$(join_by , "${disabled[@]}")"
|
||||
readonly SHELLCHECK_DISABLED
|
||||
|
||||
# creates the shellcheck container for later use
|
||||
create_container () {
|
||||
# TODO(bentheelder): this is a performance hack, we create the container with
|
||||
# a sleep MAX_INT32 so that it is effectively paused.
|
||||
# We then repeatedly exec to it to run each shellcheck, and later rm it when
|
||||
# we're done.
|
||||
# This is incredibly much faster than creating a container for each shellcheck
|
||||
# call ...
|
||||
docker run --name "${SHELLCHECK_CONTAINER}" -d --rm -v "${KUBE_ROOT}:${KUBE_ROOT}" -w "${KUBE_ROOT}" --entrypoint="sleep" "${SHELLCHECK_IMAGE}" 2147483647
|
||||
}
|
||||
# removes the shellcheck container
|
||||
remove_container () {
|
||||
docker rm -f "${SHELLCHECK_CONTAINER}" &> /dev/null || true
|
||||
}
|
||||
|
||||
# ensure we're linting the k8s source tree
|
||||
cd "${KUBE_ROOT}"
|
||||
|
||||
# find all shell scripts excluding ./_*, ./.git/*, ./vendor*,
|
||||
# and anything git-ignored
|
||||
all_shell_scripts=()
|
||||
while IFS=$'\n' read -r script;
|
||||
do git check-ignore -q "$script" || all_shell_scripts+=("$script");
|
||||
done < <(find . -name "*.sh" \
|
||||
-not \( \
|
||||
-path ./_\* -o \
|
||||
-path ./.git\* -o \
|
||||
-path ./vendor\* \
|
||||
\))
|
||||
|
||||
# make sure known failures are sorted
|
||||
failure_file="${KUBE_ROOT}/hack/.shellcheck_failures"
|
||||
kube::util::check-file-in-alphabetical-order "${failure_file}"
|
||||
|
||||
# load known failure files
|
||||
failing_files=()
|
||||
while IFS=$'\n' read -r script;
|
||||
do failing_files+=("$script");
|
||||
done < <(cat "${failure_file}")
|
||||
|
||||
# detect if the host machine has the required shellcheck version installed
|
||||
# if so, we will use that instead.
|
||||
HAVE_SHELLCHECK=false
|
||||
if which shellcheck &>/dev/null; then
|
||||
detected_version="$(shellcheck --version | grep 'version: .*')"
|
||||
if [[ "${detected_version}" = "version: ${SHELLCHECK_VERSION}" ]]; then
|
||||
HAVE_SHELLCHECK=true
|
||||
fi
|
||||
fi
|
||||
|
||||
# tell the user which we've selected and possibly set up the container
|
||||
if ${HAVE_SHELLCHECK}; then
|
||||
echo "Using host shellcheck ${SHELLCHECK_VERSION} binary."
|
||||
else
|
||||
echo "Using shellcheck ${SHELLCHECK_VERSION} docker image."
|
||||
# remove any previous container, ensure we will attempt to cleanup on exit,
|
||||
# and create the container
|
||||
remove_container
|
||||
kube::util::trap_add 'remove_container' EXIT
|
||||
if ! output="$(create_container 2>&1)"; then
|
||||
{
|
||||
echo "Failed to create shellcheck container with output: "
|
||||
echo ""
|
||||
echo "${output}"
|
||||
} >&2
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
# lint each script, tracking failures
|
||||
errors=()
|
||||
not_failing=()
|
||||
for f in "${all_shell_scripts[@]}"; do
|
||||
set +o errexit
|
||||
if ${HAVE_SHELLCHECK}; then
|
||||
failedLint=$(shellcheck --exclude="${SHELLCHECK_DISABLED}" "${f}")
|
||||
else
|
||||
failedLint=$(docker exec -t ${SHELLCHECK_CONTAINER} \
|
||||
shellcheck --exclude="${SHELLCHECK_DISABLED}" "${f}")
|
||||
fi
|
||||
set -o errexit
|
||||
kube::util::array_contains "${f}" "${failing_files[@]}" && in_failing=$? || in_failing=$?
|
||||
if [[ -n "${failedLint}" ]] && [[ "${in_failing}" -ne "0" ]]; then
|
||||
errors+=( "${failedLint}" )
|
||||
fi
|
||||
if [[ -z "${failedLint}" ]] && [[ "${in_failing}" -eq "0" ]]; then
|
||||
not_failing+=( "${f}" )
|
||||
fi
|
||||
done
|
||||
|
||||
# Check to be sure all the packages that should pass lint are.
|
||||
if [ ${#errors[@]} -eq 0 ]; then
|
||||
echo 'Congratulations! All shell files are passing lint (excluding those in hack/.shellcheck_failures).'
|
||||
else
|
||||
{
|
||||
echo "Errors from shellcheck:"
|
||||
for err in "${errors[@]}"; do
|
||||
echo "$err"
|
||||
done
|
||||
echo
|
||||
echo 'Please review the above warnings. You can test via "./hack/verify-shellcheck"'
|
||||
echo 'If the above warnings do not make sense, you can exempt this package from shellcheck'
|
||||
echo 'checking by adding it to hack/.shellcheck_failures (if your reviewer is okay with it).'
|
||||
echo
|
||||
} >&2
|
||||
false
|
||||
fi
|
||||
|
||||
if [[ ${#not_failing[@]} -gt 0 ]]; then
|
||||
{
|
||||
echo "Some packages in hack/.shellcheck_failures are passing shellcheck. Please remove them."
|
||||
echo
|
||||
for f in "${not_failing[@]}"; do
|
||||
echo " $f"
|
||||
done
|
||||
echo
|
||||
} >&2
|
||||
false
|
||||
fi
|
||||
|
||||
# Check that all failing_packages actually still exist
|
||||
gone=()
|
||||
for f in "${failing_files[@]}"; do
|
||||
kube::util::array_contains "$f" "${all_shell_scripts[@]}" || gone+=( "$f" )
|
||||
done
|
||||
|
||||
if [[ ${#gone[@]} -gt 0 ]]; then
|
||||
{
|
||||
echo "Some files in hack/.shellcheck_failures do not exist anymore. Please remove them."
|
||||
echo
|
||||
for f in "${gone[@]}"; do
|
||||
echo " $f"
|
||||
done
|
||||
echo
|
||||
} >&2
|
||||
false
|
||||
fi
|
Reference in New Issue
Block a user