diff --git a/.prow.sh b/.prow.sh new file mode 100755 index 000000000..9ea8bee75 --- /dev/null +++ b/.prow.sh @@ -0,0 +1,37 @@ +#! /bin/bash + +# Only these tests work for csi-drivers-flex, E2E and sanity testing +# will require further work. +: ${CSI_PROW_TESTS:="unit serial parallel serial-alpha parallel-alpha"} + +# Customize deployment and E2E testing. +: ${CSI_PROW_DEPLOY_SCRIPT:="deploy.sh"} +: ${CSI_PROW_DEPLOYMENT:="kubernetes"} +: ${CSI_PROW_E2E_TEST_PREFIX:="CSI Volumes"} + +. release-tools/prow.sh + +# Install custom E2E test suite as bin/tests. +install_e2e () { + if [ -e "${CSI_PROW_WORK}/e2e.test" ]; then + return + fi + + make build-tests && cp bin/tests "${CSI_PROW_WORK}/e2e.test" +} + +# Invoke the custom E2E test suite for a certain subset of the tests (serial, parallel, ...) +run_e2e () ( + name="$1" + shift + + install_e2e || die "building e2e.test failed" + install_ginkgo || die "installing ginkgo failed" + + trap "move_junit '$name'" EXIT + + cd "${GOPATH}/src/${CSI_PROW_E2E_IMPORT_PATH}" && + run_with_loggers env KUBECONFIG="$KUBECONFIG" ginkgo -v "$@" "${CSI_PROW_WORK}/e2e.test" -- -report-dir "${ARTIFACTS}" +) + +main diff --git a/README.md b/README.md index 4d3e76473..3501698b3 100644 --- a/README.md +++ b/README.md @@ -65,6 +65,7 @@ nfstestvol $ csc node get-id --endpoint tcp://127.0.0.1:10000 CSINode ``` + ## Running Kubernetes End To End tests on an NFS Driver First, stand up a local cluster `ALLOW_PRIVILEGED=1 hack/local-up-cluster.sh` (from your Kubernetes repo) @@ -74,18 +75,67 @@ For Fedora/RHEL clusters, the following might be required: sudo chown -R $USER:$USER /var/lib/kubelet sudo chcon -R -t svirt_sandbox_file_t /var/lib/kubelet ``` -If you are plannig to test using your own private image, you could either install your nfs driver using your own set of YAML files, or edit the existing YAML files to use that private image. -When using the [existing set of YAML files](https://github.com/kubernetes-csi/csi-driver-nfs/tree/master/deploy/kubernetes), you would edit the [csi-attacher-nfsplugin.yaml](https://github.com/kubernetes-csi/csi-driver-nfs/blob/master/deploy/kubernetes/csi-attacher-nfsplugin.yaml#L46) and [csi-nodeplugin-nfsplugin.yaml](https://github.com/kubernetes-csi/csi-driver-nfs/blob/master/deploy/kubernetes/csi-nodeplugin-nfsplugin.yaml#L45) files to include your private image instead of the default one. After editing these files, skip to step 3 of the following steps. +For all of the following commands, set the `KUBECONFIG` env variables as instructed by `local-up-cluster.sh` or as needed for some other cluster. + +`deploy/kubernetes/deploy.sh` will deploy the nfs driver using an +image from quay.io which (at the time of writing this) isn't available +yet. + +It is possible to use a locally built image without any registry: +``` sh +$ make container +... +Successfully tagged nfsplugin:latest + +$ NFSPLUGIN_REGISTRY=none NFSPLUGIN_TAG=latest deploy/kubernetes/deploy.sh +applying RBAC rules +kubectl apply -f https://raw.githubusercontent.com/kubernetes-csi/external-attacher/v1.0.1/deploy/kubernetes/rbac.yaml +serviceaccount/csi-attacher created +clusterrole.rbac.authorization.k8s.io/external-attacher-runner created +clusterrolebinding.rbac.authorization.k8s.io/csi-attacher-role created +role.rbac.authorization.k8s.io/external-attacher-cfg created +rolebinding.rbac.authorization.k8s.io/csi-attacher-role-cfg created +deploying nfs plugin components + deploy/kubernetes/csi-attacher-nfsplugin.yaml + using image: quay.io/k8scsi/csi-attacher:v1.0.1 + using image: nfsplugin:latest +service/csi-attacher-nfsplugin created +statefulset.apps/csi-attacher-nfsplugin created + deploy/kubernetes/csi-nodeplugin-nfsplugin.yaml + using image: quay.io/k8scsi/csi-node-driver-registrar:v1.0.2 + using image: nfsplugin:latest +daemonset.apps/csi-nodeplugin-nfsplugin created + deploy/kubernetes/csi-nodeplugin-rbac.yaml +serviceaccount/csi-nodeplugin created +clusterrole.rbac.authorization.k8s.io/csi-nodeplugin created +clusterrolebinding.rbac.authorization.k8s.io/csi-nodeplugin created +10:53:11 waiting for nfs deployment to complete, attempt #0 +10:53:21 waiting for nfs deployment to complete, attempt #1 +``` -If you already have a driver installed, skip to step 4 of the following steps. +Other clusters may need a registry to pull from: +``` sh +$ make push REGISTRY_NAME=my-registry:5000 +... +$ NFSPLUGIN_REGISTRY=my-registry:5000 NFSPLUGIN_TAG=latest deploy/kubernetes/deploy.sh +``` -1) Build the nfs driver by running `make` -2) Create NFS Driver Image, where the image tag would be whatever that is required by your YAML deployment files `docker build -t quay.io/k8scsi/nfsplugin:v1.0.0 .` -3) Install the Driver: `kubectl create -f deploy/kubernetes` -4) Build E2E test binary: `make build-tests` -5) Run E2E Tests using the following command: `./bin/tests --ginkgo.v --ginkgo.progress --kubeconfig=/var/run/kubernetes/admin.kubeconfig` +Once you have the driver installed, tests can be run with: +``` sh +$ make build-tests +mkdir -p bin +CGO_ENABLED=0 GOOS=linux go build -a -ldflags '-X main.version=4fa924a251193c9eef937042112462433089d658 -extldflags "-static"' -o ./bin/tests ./cmd/tests +$ ./bin/tests --ginkgo.v --ginkgo.progress +Jun 7 10:57:39.667: INFO: The --provider flag is not set. Continuing as if --provider=skeleton had been used. +Running Suite: CSI Suite +======================== +Random Seed: 1559897859 - Will randomize all specs +Will run 103 of 103 specs +... + +``` ## Community, discussion, contribution, and support diff --git a/cmd/tests/nfs-e2e.go b/cmd/tests/nfs-e2e.go index ea8be6345..bff3cc25a 100644 --- a/cmd/tests/nfs-e2e.go +++ b/cmd/tests/nfs-e2e.go @@ -14,25 +14,16 @@ limitations under the License. package main import ( - "flag" _ "github.com/kubernetes-csi/csi-driver-nfs/test" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" + "github.com/onsi/ginkgo" + "github.com/onsi/gomega" "k8s.io/kubernetes/test/e2e/framework" "testing" ) -func init() { +func main() { framework.HandleFlags() framework.AfterReadingAllFlags(&framework.TestContext) -} - -func Test(t *testing.T) { - flag.Parse() - RegisterFailHandler(Fail) - RunSpecs(t, "CSI Suite") -} - -func main() { - Test(&testing.T{}) + gomega.RegisterFailHandler(ginkgo.Fail) + ginkgo.RunSpecs(&testing.T{}, "CSI Suite") } diff --git a/deploy/kubernetes/csi-attacher-rbac.yaml b/deploy/kubernetes/csi-attacher-rbac.yaml deleted file mode 100644 index 975fdd67f..000000000 --- a/deploy/kubernetes/csi-attacher-rbac.yaml +++ /dev/null @@ -1,37 +0,0 @@ -# This YAML file contains RBAC API objects that are necessary to run external -# CSI attacher for nfs flex adapter - -apiVersion: v1 -kind: ServiceAccount -metadata: - name: csi-attacher - ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: external-attacher-runner -rules: - - apiGroups: [""] - resources: ["persistentvolumes"] - verbs: ["get", "list", "watch", "update"] - - apiGroups: [""] - resources: ["nodes"] - verbs: ["get", "list", "watch"] - - apiGroups: ["storage.k8s.io"] - resources: ["volumeattachments"] - verbs: ["get", "list", "watch", "update"] - ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: csi-attacher-role -subjects: - - kind: ServiceAccount - name: csi-attacher - namespace: default -roleRef: - kind: ClusterRole - name: external-attacher-runner - apiGroup: rbac.authorization.k8s.io diff --git a/deploy/kubernetes/deploy.sh b/deploy/kubernetes/deploy.sh new file mode 100755 index 000000000..f423100cc --- /dev/null +++ b/deploy/kubernetes/deploy.sh @@ -0,0 +1,154 @@ +#!/usr/bin/env bash + +# This script captures the steps required to successfully +# deploy the hostpath plugin driver. This should be considered +# authoritative and all updates for this process should be +# done here and referenced elsewhere. + +# The script assumes that kubectl is available on the OS path +# where it is executed. + +set -e +set -o pipefail + +BASE_DIR=$(dirname "$0") + +# If set, the following env variables override image registry and/or tag for each of the images. +# They are named after the image name, with hyphen replaced by underscore and in upper case. +# +# - CSI_ATTACHER_REGISTRY +# - CSI_ATTACHER_TAG +# - CSI_NODE_DRIVER_REGISTRAR_REGISTRY +# - CSI_NODE_DRIVER_REGISTRAR_TAG +# - NFSPLUGIN_REGISTRY +# - NFSPLUGIN_TAG +# +# Alternatively, it is possible to override all registries or tags with: +# - IMAGE_REGISTRY +# - IMAGE_TAG +# These are used as fallback when the more specific variables are unset or empty. +# +# Beware that the .yaml files do not have "imagePullPolicy: Always". That means that +# also the "canary" images will only be pulled once. This is good for testing +# (starting a pod multiple times will always run with the same canary image), but +# implies that refreshing that image has to be done manually. +# +# As a special case, 'none' as registry removes the registry name. + +# The default is to use the RBAC rules that match the image that is +# being used, also in the case that the image gets overridden. This +# way if there are breaking changes in the RBAC rules, the deployment +# will continue to work. +# +# However, such breaking changes should be rare and only occur when updating +# to a new major version of a sidecar. Nonetheless, to allow testing the scenario +# where the image gets overridden but not the RBAC rules, updating the RBAC +# rules can be disabled. +: ${UPDATE_RBAC_RULES:=true} +function rbac_version () { + yaml="$1" + image="$2" + update_rbac="$3" + + # get version from `image: quay.io/k8scsi/csi-attacher:v1.0.1`, ignoring comments + version="$(sed -e 's/ *#.*$//' "$yaml" | grep "image:.*$image" | sed -e 's/ *#.*//' -e 's/.*://')" + + if $update_rbac; then + # apply overrides + varname=$(echo $image | tr - _ | tr a-z A-Z) + eval version=\${${varname}_TAG:-\${IMAGE_TAG:-\$version}} + fi + + # When using canary images, we have to assume that the + # canary images were built from the corresponding branch. + case "$version" in canary) version=master;; + *-canary) version="$(echo "$version" | sed -e 's/\(.*\)-canary/release-\1/')";; + esac + + echo "$version" +} + +# In addition, the RBAC rules can be overridden separately. +CSI_ATTACHER_RBAC_YAML="https://raw.githubusercontent.com/kubernetes-csi/external-attacher/$(rbac_version "${BASE_DIR}/csi-attacher-nfsplugin.yaml" csi-attacher false)/deploy/kubernetes/rbac.yaml" +: ${CSI_ATTACHER_RBAC:=https://raw.githubusercontent.com/kubernetes-csi/external-attacher/$(rbac_version "${BASE_DIR}/csi-attacher-nfsplugin.yaml" csi-attacher "${UPDATE_RBAC_RULES}")/deploy/kubernetes/rbac.yaml} + +INSTALL_CRD=${INSTALL_CRD:-"false"} + +# Some images are not affected by *_REGISTRY/*_TAG and IMAGE_* variables. +# The default is to update unless explicitly excluded. +update_image () { + case "$1" in socat) return 1;; esac +} + +run () { + echo "$@" >&2 + "$@" +} + +# rbac rules +echo "applying RBAC rules" +for component in CSI_ATTACHER; do + eval current="\${${component}_RBAC}" + eval original="\${${component}_RBAC_YAML}" + if [ "$current" != "$original" ]; then + echo "Using non-default RBAC rules for $component. Changes from $original to $current are:" + diff -c <(wget --quiet -O - "$original") <(if [[ "$current" =~ ^http ]]; then wget --quiet -O - "$current"; else cat "$current"; fi) || true + fi + run kubectl apply -f "${current}" +done + +# deploy nfs plugin and registrar sidecar +echo "deploying nfs plugin components" +for i in $(ls ${BASE_DIR}/*.yaml | sort); do + echo " $i" + modified="$(cat "$i" | while IFS= read -r line; do + nocomments="$(echo "$line" | sed -e 's/ *#.*$//')" + if echo "$nocomments" | grep -q '^[[:space:]]*image:[[:space:]]*'; then + # Split 'image: quay.io/k8scsi/csi-attacher:v1.0.1' + # into image (quay.io/k8scsi/csi-attacher:v1.0.1), + # registry (quay.io/k8scsi), + # name (csi-attacher), + # tag (v1.0.1). + image=$(echo "$nocomments" | sed -e 's;.*image:[[:space:]]*;;') + registry=$(echo "$image" | sed -e 's;\(.*\)/.*;\1;') + name=$(echo "$image" | sed -e 's;.*/\([^:]*\).*;\1;') + tag=$(echo "$image" | sed -e 's;.*:;;') + + # Variables are with underscores and upper case. + varname=$(echo $name | tr - _ | tr a-z A-Z) + + # Now replace registry and/or tag, if set as env variables. + # If not set, the replacement is the same as the original value. + # Only do this for the images which are meant to be configurable. + if update_image "$name"; then + prefix=$(eval echo \${${varname}_REGISTRY:-${IMAGE_REGISTRY:-${registry}}}/ | sed -e 's;none/;;') + suffix=$(eval echo :\${${varname}_TAG:-${IMAGE_TAG:-${tag}}}) + line="$(echo "$nocomments" | sed -e "s;$image;${prefix}${name}${suffix};")" + fi + echo " using $line" >&2 + fi + echo "$line" + done)" + if ! echo "$modified" | kubectl apply -f -; then + echo "modified version of $i:" + echo "$modified" + exit 1 + fi +done + +# Wait until all pods are running. We have to make some assumptions +# about the deployment here, otherwise we wouldn't know what to wait +# for: the expectation is that we run attacher and nfs plugin in the default namespace. +cnt=0 +while [ $(kubectl get pods 2>/dev/null | grep '^csi-.*nfsplugin.* Running ' | wc -l) -lt 2 ]; do + if [ $cnt -gt 30 ]; then + echo "Running pods:" + kubectl describe pods + + echo >&2 "ERROR: nfs deployment not ready after over 5min" + exit 1 + fi + echo $(date +%H:%M:%S) "waiting for nfs deployment to complete, attempt #$cnt" + cnt=$(($cnt + 1)) + sleep 10 +done diff --git a/release-tools/.prow.sh b/release-tools/.prow.sh new file mode 100755 index 000000000..b18c53581 --- /dev/null +++ b/release-tools/.prow.sh @@ -0,0 +1,7 @@ +#! /bin/bash -e +# +# This is for testing csi-release-tools itself in Prow. All other +# repos use prow.sh for that, but as csi-release-tools isn't a normal +# repo with some Go code in it, it has a custom Prow test script. + +./verify-shellcheck.sh "$(pwd)" diff --git a/release-tools/README.md b/release-tools/README.md index 56d2248c0..bc061aeeb 100644 --- a/release-tools/README.md +++ b/release-tools/README.md @@ -49,3 +49,60 @@ Cheat sheet: - `git subtree add --prefix=release-tools https://github.com/kubernetes-csi/csi-release-tools.git master` - add release tools to a repo which does not have them yet (only once) - `git subtree pull --prefix=release-tools https://github.com/kubernetes-csi/csi-release-tools.git master` - update local copy to latest upstream (whenever upstream changes) - edit, `git commit`, `git subtree push --prefix=release-tools git@github.com:/csi-release-tools.git ` - push to a new branch before submitting a PR + +verify-shellcheck.sh +-------------------- + +The [verify-shellcheck.sh](./verify-shellcheck.sh) script in this repo +is a stripped down copy of the [corresponding +script](https://github.com/kubernetes/kubernetes/blob/release-1.14/hack/verify-shellcheck.sh) +in the Kubernetes repository. It can be used to check for certain +errors shell scripts, like missing quotation marks. The default +`test-shellcheck` target in [build.make](./build.make) only checks the +scripts in this directory. Components can add more directories to +`TEST_SHELLCHECK_DIRS` to check also other scripts. + +End-to-end testing +------------------ + +A repo that wants to opt into testing via Prow must set up a top-level +`.prow.sh`. Typically that will source `prow.sh` and then transfer +control to it: + +``` bash +#! /bin/bash -e + +. release-tools/prow.sh +main +``` + +All Kubernetes-CSI repos are expected to switch to Prow. For details +on what is enabled in Prow, see +https://github.com/kubernetes/test-infra/tree/master/config/jobs/kubernetes-csi + +Test results for periodic jobs are visible in +https://testgrid.k8s.io/sig-storage-csi + +It is possible to reproduce the Prow testing locally on a suitable machine: +- Linux host +- Docker installed +- code to be tested checkout out in `$GOPATH/src/` +- `cd $GOPATH/src/ && ./.prow.sh` + +Beware that the script intentionally doesn't clean up after itself and +modifies the content of `$GOPATH`, in particular the `kubernetes` and +`kind` repositories there. Better run it in an empty, disposable +`$GOPATH`. + +When it terminates, the following command can be used to get access to +the Kubernetes cluster that was brought up for testing (assuming that +this step succeeded): + + export KUBECONFIG="$(kind get kubeconfig-path --name="csi-prow")" + +It is possible to control the execution via environment variables. See +`prow.sh` for details. Particularly useful is testing against different +Kubernetes releases: + + CSI_PROW_KUBERNETES_VERSION=1.13.3 ./.prow.sh + CSI_PROW_KUBERNETES_VERSION=latest ./.prow.sh diff --git a/release-tools/build.make b/release-tools/build.make index 8ca0b2c2f..142c85780 100644 --- a/release-tools/build.make +++ b/release-tools/build.make @@ -98,7 +98,7 @@ test: test: test-go test-go: @ echo; echo "### $@:" - go test `go list ./... | grep -v 'vendor' $(TEST_GO_FILTER_CMD)` $(TESTARGS) + go test `go list ./... | grep -v -e 'vendor' -e '/test/e2e$$' $(TEST_GO_FILTER_CMD)` $(TESTARGS) .PHONY: test-vet test: test-vet @@ -117,8 +117,62 @@ test-fmt: false; \ fi +# This test only runs when dep >= 0.5 is installed, which is the case for the CI setup. +# When using 'go mod', we allow the test to be skipped in the Prow CI under some special +# circumstances, because it depends on accessing all remote repos and thus +# running it all the time would defeat the purpose of vendoring: +# - not handling a PR or +# - the fabricated merge commit leaves go.mod, go.sum and vendor dir unchanged +# - release-tools also didn't change (changing rules or Go version might lead to +# a different result and thus must be tested) +.PHONY: test-vendor +test: test-vendor +test-vendor: + @ echo; echo "### $@:" + @ if [ -f Gopkg.toml ]; then \ + echo "Repo uses 'dep' for vendoring."; \ + case "$$(dep version 2>/dev/null | grep 'version *:')" in \ + *v0.[56789]*) dep check && echo "vendor up-to-date" || false;; \ + *) echo "skipping check, dep >= 0.5 required";; \ + esac; \ + else \ + echo "Repo uses 'go mod' for vendoring."; \ + if [ "$${JOB_NAME}" ] && \ + ( [ "$${JOB_TYPE}" != "presubmit" ] || \ + [ $$(git diff "${PULL_BASE_SHA}..HEAD" -- go.mod go.sum vendor release-tools | wc -l) -eq 0 ] ); then \ + echo "Skipping vendor check because the Prow pre-submit job does not change vendoring."; \ + elif ! GO111MODULE=on go mod vendor; then \ + echo "ERROR: vendor check failed."; \ + false; \ + elif [ $$(git status --porcelain -- vendor | wc -l) -gt 0 ]; then \ + echo "ERROR: vendor directory *not* up-to-date, it did get modified by 'GO111MODULE=on go mod vendor':"; \ + git status -- vendor; \ + git diff -- vendor; \ + false; \ + fi; \ + fi; + .PHONY: test-subtree test: test-subtree test-subtree: @ echo; echo "### $@:" ./release-tools/verify-subtree.sh release-tools + +# Components can extend the set of directories which must pass shellcheck. +# The default is to check only the release-tools directory itself. +TEST_SHELLCHECK_DIRS=release-tools +.PHONY: test-shellcheck +test: test-shellcheck +test-shellcheck: + @ echo; echo "### $@:" + @ ret=0; \ + if ! command -v docker; then \ + echo "skipped, no Docker"; \ + exit 0; \ + fi; \ + for dir in $(abspath $(TEST_SHELLCHECK_DIRS)); do \ + echo; \ + echo "$$dir:"; \ + ./release-tools/verify-shellcheck.sh "$$dir" || ret=1; \ + done; \ + exit $$ret diff --git a/release-tools/filter-junit.go b/release-tools/filter-junit.go new file mode 100644 index 000000000..2f51be00e --- /dev/null +++ b/release-tools/filter-junit.go @@ -0,0 +1,133 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* + * This command filters a JUnit file such that only tests with a name + * matching a regular expression are passed through. By concatenating + * multiple input files it is possible to merge them into a single file. + */ +package main + +import ( + "encoding/xml" + "flag" + "io/ioutil" + "os" + "regexp" +) + +var ( + output = flag.String("o", "-", "junit file to write, - for stdout") + tests = flag.String("t", "", "regular expression matching the test names that are to be included in the output") +) + +/* + * TestSuite represents a JUnit file. Due to how encoding/xml works, we have + * represent all fields that we want to be passed through. It's therefore + * not a complete solution, but good enough for Ginkgo + Spyglass. + */ +type TestSuite struct { + XMLName string `xml:"testsuite"` + TestCases []TestCase `xml:"testcase"` +} + +type TestCase struct { + Name string `xml:"name,attr"` + Time string `xml:"time,attr"` + SystemOut string `xml:"system-out,omitempty"` + Failure string `xml:"failure,omitempty"` + Skipped SkipReason `xml:"skipped,omitempty"` +} + +// SkipReason deals with the special : +// if present, we must re-encode it, even if empty. +type SkipReason string + +func (s *SkipReason) UnmarshalText(text []byte) error { + *s = SkipReason(text) + if *s == "" { + *s = " " + } + return nil +} + +func (s SkipReason) MarshalText() ([]byte, error) { + if s == " " { + return []byte{}, nil + } + return []byte(s), nil +} + +func main() { + var junit TestSuite + var data []byte + + flag.Parse() + + re := regexp.MustCompile(*tests) + + // Read all input files. + for _, input := range flag.Args() { + if input == "-" { + if _, err := os.Stdin.Read(data); err != nil { + panic(err) + } + } else { + var err error + data, err = ioutil.ReadFile(input) + if err != nil { + panic(err) + } + } + if err := xml.Unmarshal(data, &junit); err != nil { + panic(err) + } + } + + // Keep only matching testcases. Testcases skipped in all test runs are only stored once. + filtered := map[string]TestCase{} + for _, testcase := range junit.TestCases { + if !re.MatchString(testcase.Name) { + continue + } + entry, ok := filtered[testcase.Name] + if !ok || // not present yet + entry.Skipped != "" && testcase.Skipped == "" { // replaced skipped test with real test run + filtered[testcase.Name] = testcase + } + } + junit.TestCases = nil + for _, testcase := range filtered { + junit.TestCases = append(junit.TestCases, testcase) + } + + // Re-encode. + data, err := xml.MarshalIndent(junit, "", " ") + if err != nil { + panic(err) + } + + // Write to output. + if *output == "-" { + if _, err := os.Stdout.Write(data); err != nil { + panic(err) + } + } else { + if err := ioutil.WriteFile(*output, data, 0644); err != nil { + panic(err) + } + } +} diff --git a/release-tools/prow.sh b/release-tools/prow.sh new file mode 100755 index 000000000..95d6dd04a --- /dev/null +++ b/release-tools/prow.sh @@ -0,0 +1,997 @@ +#! /bin/bash +# +# Copyright 2019 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# This script runs inside a Prow job. It can run unit tests ("make test") +# and E2E testing. This E2E testing covers different scenarios (see +# https://github.com/kubernetes/enhancements/pull/807): +# - running the stable hostpath example against a Kubernetes release +# - running the canary hostpath example against a Kubernetes release +# - building the component in the current repo and running the +# stable hostpath example with that one component replaced against +# a Kubernetes release +# +# The intended usage of this script is that individual repos import +# csi-release-tools, then link their top-level prow.sh to this or +# include it in that file. When including it, several of the variables +# can be overridden in the top-level prow.sh to customize the script +# for the repo. +# +# The expected environment is: +# - $GOPATH/src/ for the repository that is to be tested, +# with PR branch merged (when testing a PR) +# - running on linux-amd64 +# - bazel installed (when testing against Kubernetes master), must be recent +# enough for Kubernetes master +# - kind (https://github.com/kubernetes-sigs/kind) installed +# - optional: Go already installed + +RELEASE_TOOLS_ROOT="$(realpath "$(dirname "${BASH_SOURCE[0]}")")" +REPO_DIR="$(pwd)" + +# Sets the default value for a variable if not set already and logs the value. +# Any variable set this way is usually something that a repo's .prow.sh +# or the job can set. +configvar () { + # Ignore: Word is of the form "A"B"C" (B indicated). Did you mean "ABC" or "A\"B\"C"? + # shellcheck disable=SC2140 + eval : \$\{"$1":="\$2"\} + eval echo "\$3:" "$1=\${$1}" +} + +# Prints the value of a variable + version suffix, falling back to variable + "LATEST". +get_versioned_variable () { + local var="$1" + local version="$2" + local value + + eval value="\${${var}_${version}}" + if ! [ "$value" ]; then + eval value="\${${var}_LATEST}" + fi + echo "$value" +} + +# Go versions can be specified seperately for different tasks +# If the pre-installed Go is missing or a different +# version, the required version here will get installed +# from https://golang.org/dl/. +go_from_travis_yml () { + grep "^ *- go:" "${RELEASE_TOOLS_ROOT}/travis.yml" | sed -e 's/.*go: *//' +} +configvar CSI_PROW_GO_VERSION_BUILD "$(go_from_travis_yml)" "Go version for building the component" # depends on component's source code +configvar CSI_PROW_GO_VERSION_E2E "" "override Go version for building the Kubernetes E2E test suite" # normally doesn't need to be set, see install_e2e +configvar CSI_PROW_GO_VERSION_SANITY "${CSI_PROW_GO_VERSION_BUILD}" "Go version for building the csi-sanity test suite" # depends on CSI_PROW_SANITY settings below +configvar CSI_PROW_GO_VERSION_KIND "${CSI_PROW_GO_VERSION_BUILD}" "Go version for building 'kind'" # depends on CSI_PROW_KIND_VERSION below +configvar CSI_PROW_GO_VERSION_GINKGO "${CSI_PROW_GO_VERSION_BUILD}" "Go version for building ginkgo" # depends on CSI_PROW_GINKGO_VERSION below + +# kind version to use. If the pre-installed version is different, +# the desired version is downloaded from https://github.com/kubernetes-sigs/kind/releases/download/ +# (if available), otherwise it is built from source. +configvar CSI_PROW_KIND_VERSION 0.2.1 "kind" + +# ginkgo test runner version to use. If the pre-installed version is +# different, the desired version is built from source. +configvar CSI_PROW_GINKGO_VERSION v1.7.0 "Ginkgo" + +# Ginkgo runs the E2E test in parallel. The default is based on the number +# of CPUs, but typically this can be set to something higher in the job. +configvar CSI_PROW_GINKO_PARALLEL "-p" "Ginko parallelism parameter(s)" + +# Enables building the code in the repository. On by default, can be +# disabled in jobs which only use pre-built components. +configvar CSI_PROW_BUILD_JOB true "building code in repo enabled" + +# Kubernetes version to test against. This must be a version number +# (like 1.13.3) for which there is a pre-built kind image (see +# https://hub.docker.com/r/kindest/node/tags), "latest" (builds +# Kubernetes from the master branch) or "release-x.yy" (builds +# Kubernetes from a release branch). +# +# This can also be a version that was not released yet at the time +# that the settings below were chose. The script will then +# use the same settings as for "latest" Kubernetes. This works +# as long as there are no breaking changes in Kubernetes, like +# deprecating or changing the implementation of an alpha feature. +configvar CSI_PROW_KUBERNETES_VERSION 1.13.3 "Kubernetes" + +# CSI_PROW_KUBERNETES_VERSION reduced to first two version numbers and +# with underscore (1_13 instead of 1.13.3) and in uppercase (LATEST +# instead of latest). +# +# This is used to derive the right defaults for the variables below +# when a Prow job just defines the Kubernetes version. +csi_prow_kubernetes_version_suffix="$(echo "${CSI_PROW_KUBERNETES_VERSION}" | tr . _ | tr '[:lower:]' '[:upper:]' | sed -e 's/^RELEASE-//' -e 's/\([0-9]*\)_\([0-9]*\).*/\1_\2/')" + +# Work directory. It has to allow running executables, therefore /tmp +# is avoided. Cleaning up after the script is intentionally left to +# the caller. +configvar CSI_PROW_WORK "$(mkdir -p "$GOPATH/pkg" && mktemp -d "$GOPATH/pkg/csiprow.XXXXXXXXXX")" "work directory" + +# The hostpath deployment script is searched for in several places. +# +# - The "deploy" directory in the current repository: this is useful +# for the situation that a component becomes incompatible with the +# shared deployment, because then it can (temporarily!) provide its +# own example until the shared one can be updated; it's also how +# csi-driver-host-path itself provides the example. +# +# - CSI_PROW_HOSTPATH_VERSION of the CSI_PROW_HOSTPATH_REPO is checked +# out: this allows other repos to reference a version of the example +# that is known to be compatible. +# +# - The csi-driver-host-path/deploy directory has multiple sub-directories, +# each with different deployments (stable set of images for Kubernetes 1.13, +# stable set of images for Kubernetes 1.14, canary for latest Kubernetes, etc.). +# This is necessary because there may be incompatible changes in the +# "API" of a component (for example, its command line options or RBAC rules) +# or in its support for different Kubernetes versions (CSIDriverInfo as +# CRD in Kubernetes 1.13 vs builtin API in Kubernetes 1.14). +# +# When testing an update for a component in a PR job, the +# CSI_PROW_DEPLOYMENT variable can be set in the +# .prow.sh of each component when there are breaking changes +# that require using a non-default deployment. The default +# is a deployment named "kubernetes-x.yy" (if available), +# otherwise "kubernetes-latest". +# "none" disables the deployment of the hostpath driver. +# +# When no deploy script is found (nothing in `deploy` directory, +# CSI_PROW_HOSTPATH_REPO=none), nothing gets deployed. +configvar CSI_PROW_HOSTPATH_VERSION fc52d13ba07922c80555a24616a5b16480350c3f "hostpath driver" # pre-1.1.0 +configvar CSI_PROW_HOSTPATH_REPO https://github.com/kubernetes-csi/csi-driver-host-path "hostpath repo" +configvar CSI_PROW_DEPLOYMENT "" "deployment" +configvar CSI_PROW_DEPLOY_SCRIPT "deploy-hostpath.sh" "deploy script inside the deployment directory" + +# If CSI_PROW_HOSTPATH_CANARY is set (typically to "canary", but also +# "1.0-canary"), then all image versions are replaced with that +# version tag. +configvar CSI_PROW_HOSTPATH_CANARY "" "hostpath image" + +# The E2E testing can come from an arbitrary repo. The expectation is that +# the repo supports "go test ./test/e2e -args --storage.testdriver" (https://github.com/kubernetes/kubernetes/pull/72836) +# after setting KUBECONFIG. As a special case, if the repository is Kubernetes, +# then `make WHAT=test/e2e/e2e.test` is called first to ensure that +# all generated files are present. +# +# CSI_PROW_E2E_REPO=none disables E2E testing. +configvar CSI_PROW_E2E_VERSION_1_13 v1.14.0 "E2E version for Kubernetes 1.13.x" # we can't use the one from 1.13.x because it didn't have --storage.testdriver +configvar CSI_PROW_E2E_VERSION_1_14 v1.14.0 "E2E version for Kubernetes 1.14.x" +# TODO: add new CSI_PROW_E2E_VERSION entry for future Kubernetes releases +configvar CSI_PROW_E2E_VERSION_LATEST master "E2E version for Kubernetes master" # testing against Kubernetes master is already tracking a moving target, so we might as well use a moving E2E version +configvar CSI_PROW_E2E_REPO_LATEST https://github.com/kubernetes/kubernetes "E2E repo for Kubernetes >= 1.13.x" # currently the same for all versions +configvar CSI_PROW_E2E_IMPORT_PATH_LATEST k8s.io/kubernetes "E2E package for Kubernetes >= 1.13.x" # currently the same for all versions +configvar CSI_PROW_E2E_VERSION "$(get_versioned_variable CSI_PROW_E2E_VERSION "${csi_prow_kubernetes_version_suffix}")" "E2E version" +configvar CSI_PROW_E2E_REPO "$(get_versioned_variable CSI_PROW_E2E_REPO "${csi_prow_kubernetes_version_suffix}")" "E2E repo" +configvar CSI_PROW_E2E_IMPORT_PATH "$(get_versioned_variable CSI_PROW_E2E_IMPORT_PATH "${csi_prow_kubernetes_version_suffix}")" "E2E package" +configvar CSI_PROW_E2E_TEST_PREFIX "External Storage" "common name of all E2E tests" + +# csi-sanity testing from the csi-test repo can be run against the installed +# CSI driver. For this to work, deploying the driver must expose the Unix domain +# csi.sock as a TCP service for use by the csi-sanity command, which runs outside +# of the cluster. The alternative would have been to (cross-)compile csi-sanity +# and install it inside the cluster, which is not necessarily easier. +configvar CSI_PROW_SANITY_REPO https://github.com/kubernetes-csi/csi-test "csi-test repo" +configvar CSI_PROW_SANITY_VERSION 5421d9f3c37be3b95b241b44a094a3db11bee789 "csi-test version" # latest master +configvar CSI_PROW_SANITY_IMPORT_PATH github.com/kubernetes-csi/csi-test "csi-test package" +configvar CSI_PROW_SANITY_SERVICE "hostpath-service" "Kubernetes TCP service name that exposes csi.sock" +configvar CSI_PROW_SANITY_POD "csi-hostpathplugin-0" "Kubernetes pod with CSI driver" +configvar CSI_PROW_SANITY_CONTAINER "hostpath" "Kubernetes container with CSI driver" + +# Each job can run one or more of the following tests, identified by +# a single word: +# - unit testing +# - parallel excluding alpha features +# - serial excluding alpha features +# - parallel, only alpha feature +# - serial, only alpha features +# - sanity +# +# Unknown or unsupported entries are ignored. +# +# Sanity testing with csi-sanity only covers the CSI driver itself and +# thus only makes sense in repos which provide their own CSI +# driver. Repos can enable sanity testing by setting +# CSI_PROW_TESTS_SANITY=sanity. +configvar CSI_PROW_TESTS "unit parallel serial parallel-alpha serial-alpha sanity" "tests to run" +tests_enabled () { + local t1 t2 + # We want word-splitting here, so ignore: Quote to prevent word splitting, or split robustly with mapfile or read -a. + # shellcheck disable=SC2206 + local tests=(${CSI_PROW_TESTS}) + for t1 in "$@"; do + for t2 in "${tests[@]}"; do + if [ "$t1" = "$t2" ]; then + return + fi + done + done + return 1 +} +sanity_enabled () { + [ "${CSI_PROW_TESTS_SANITY}" = "sanity" ] && tests_enabled "sanity" +} +tests_need_kind () { + tests_enabled "parallel" "serial" "serial-alpha" "parallel-alpha" || + sanity_enabled +} +tests_need_non_alpha_cluster () { + tests_enabled "parallel" "serial" || + sanity_enabled +} +tests_need_alpha_cluster () { + tests_enabled "parallel-alpha" "serial-alpha" +} + + +# Serial vs. parallel is always determined by these regular expressions. +# Individual regular expressions are seperated by spaces for readability +# and expected to not contain spaces. Use dots instead. The complete +# regex for Ginkgo will be created by joining the individual terms. +configvar CSI_PROW_E2E_SERIAL '\[Serial\] \[Disruptive\]' "tags for serial E2E tests" +regex_join () { + echo "$@" | sed -e 's/ */|/g' -e 's/^|*//' -e 's/|*$//' -e 's/^$/this-matches-nothing/g' +} + +# Which tests are alpha depends on the Kubernetes version. We could +# use the same E2E test for all Kubernetes version. This would have +# the advantage that new tests can be applied to older versions +# without having to backport tests. +# +# But the feature tag gets removed from E2E tests when the corresponding +# feature becomes beta, so we would have to track which tests were +# alpha in previous Kubernetes releases. This was considered too +# error prone. Therefore we use E2E tests that match the Kubernetes +# version that is getting tested. +# +# However, for 1.13.x testing we have to use the E2E tests from 1.14 +# because 1.13 didn't have --storage.testdriver yet, so for that (and only +# that version) we have to define alpha tests differently. +configvar CSI_PROW_E2E_ALPHA_1_13 '\[Feature: \[Testpattern:.Dynamic.PV..block.volmode.\] should.create.and.delete.block.persistent.volumes' "alpha tests for Kubernetes 1.13" # Raw block was an alpha feature in 1.13. +configvar CSI_PROW_E2E_ALPHA_LATEST '\[Feature:' "alpha tests for Kubernetes >= 1.14" # there's no need to update this, adding a new case for CSI_PROW_E2E for a new Kubernetes is enough +configvar CSI_PROW_E2E_ALPHA "$(get_versioned_variable CSI_PROW_E2E_ALPHA "${csi_prow_kubernetes_version_suffix}")" "alpha tests" + +# After the parallel E2E test without alpha features, a test cluster +# with alpha features is brought up and tests that were previously +# disabled are run. The alpha gates in each release have to be listed +# explicitly. If none are set (= variable empty), alpha testing +# is skipped. +# +# Testing against "latest" Kubernetes is problematic because some alpha +# feature which used to work might stop working or change their behavior +# such that the current tests no longer pass. If that happens, +# kubernetes-csi components must be updated, either by disabling +# the failing test for "latest" or by updating the test and not running +# it anymore for older releases. +configvar CSI_PROW_E2E_ALPHA_GATES_1_13 'VolumeSnapshotDataSource=true,BlockVolume=true,CSIBlockVolume=true' "alpha feature gates for Kubernetes 1.13" +configvar CSI_PROW_E2E_ALPHA_GATES_1_14 'VolumeSnapshotDataSource=true,ExpandCSIVolumes=true' "alpha feature gates for Kubernetes 1.14" +# TODO: add new CSI_PROW_ALPHA_GATES_xxx entry for future Kubernetes releases and +# add new gates to CSI_PROW_E2E_ALPHA_GATES_LATEST. +configvar CSI_PROW_E2E_ALPHA_GATES_LATEST 'VolumeSnapshotDataSource=true,ExpandCSIVolumes=true' "alpha feature gates for latest Kubernetes" +configvar CSI_PROW_E2E_ALPHA_GATES "$(get_versioned_variable CSI_PROW_E2E_ALPHA_GATES "${csi_prow_kubernetes_version_suffix}")" "alpha E2E feature gates" + +# Some tests are known to be unusable in a KinD cluster. For example, +# stopping kubelet with "ssh systemctl stop kubelet" simply +# doesn't work. Such tests should be written in a way that they verify +# whether they can run with the current cluster provider, but until +# they are, we filter them out by name. Like the other test selection +# variables, this is again a space separated list of regular expressions. +configvar CSI_PROW_E2E_SKIP 'while.kubelet.is.down.*Disruptive' "tests that need to be skipped" + +# This is the directory for additional result files. Usually set by Prow, but +# if not (for example, when invoking manually) it defaults to the work directory. +configvar ARTIFACTS "${CSI_PROW_WORK}/artifacts" "artifacts" +mkdir -p "${ARTIFACTS}" + +run () { + echo "$(date) $(go version | sed -e 's/.*version \(go[^ ]*\).*/\1/') $(if [ "$(pwd)" != "${REPO_DIR}" ]; then pwd; fi)\$" "$@" >&2 + "$@" +} + +info () { + echo >&2 INFO: "$@" +} + +warn () { + echo >&2 WARNING: "$@" +} + +die () { + echo >&2 ERROR: "$@" + exit 1 +} + +# For additional tools. +CSI_PROW_BIN="${CSI_PROW_WORK}/bin" +mkdir -p "${CSI_PROW_BIN}" +PATH="${CSI_PROW_BIN}:$PATH" + +# Ensure that PATH has the desired version of the Go tools, then run command given as argument. +# Empty parameter uses the already installed Go. In Prow, that version is kept up-to-date by +# bumping the container image regularly. +run_with_go () { + local version + version="$1" + shift + + if ! [ "$version" ] || go version 2>/dev/null | grep -q "go$version"; then + run "$@" + else + if ! [ -d "${CSI_PROW_WORK}/go-$version" ]; then + run curl --fail --location "https://dl.google.com/go/go$version.linux-amd64.tar.gz" | tar -C "${CSI_PROW_WORK}" -zxf - || die "installation of Go $version failed" + mv "${CSI_PROW_WORK}/go" "${CSI_PROW_WORK}/go-$version" + fi + PATH="${CSI_PROW_WORK}/go-$version/bin:$PATH" run "$@" + fi +} + +# Ensure that we have the desired version of kind. +install_kind () { + if kind --version 2>/dev/null | grep -q " ${CSI_PROW_KIND_VERSION}$"; then + return + fi + if run curl --fail --location -o "${CSI_PROW_WORK}/bin/kind" "https://github.com/kubernetes-sigs/kind/releases/download/${CSI_PROW_KIND_VERSION}/kind-linux-amd64"; then + chmod u+x "${CSI_PROW_WORK}/bin/kind" + else + git_checkout https://github.com/kubernetes-sigs/kind "$GOPATH/src/sigs.k8s.io/kind" "${CSI_PROW_KIND_VERSION}" --depth=1 && + run_with_go "${CSI_PROW_GO_VERSION_KIND}" go build -o "${CSI_PROW_WORK}/bin/kind" sigs.k8s.io/kind + fi +} + +# Ensure that we have the desired version of the ginkgo test runner. +install_ginkgo () { + # CSI_PROW_GINKGO_VERSION contains the tag with v prefix, the command line output does not. + if [ "v$(ginkgo version 2>/dev/null | sed -e 's/.* //')" = "${CSI_PROW_GINKGO_VERSION}" ]; then + return + fi + git_checkout https://github.com/onsi/ginkgo "$GOPATH/src/github.com/onsi/ginkgo" "${CSI_PROW_GINKGO_VERSION}" --depth=1 && + # We have to get dependencies and hence can't call just "go build". + run_with_go "${CSI_PROW_GO_VERSION_GINKGO}" go get github.com/onsi/ginkgo/ginkgo || die "building ginkgo failed" && + mv "$GOPATH/bin/ginkgo" "${CSI_PROW_BIN}" +} + +# This checks out a repo ("https://github.com/kubernetes/kubernetes") +# in a certain location ("$GOPATH/src/k8s.io/kubernetes") at +# a certain revision (a hex commit hash, v1.13.1, master). It's okay +# for that directory to exist already. +git_checkout () { + local repo path revision + repo="$1" + shift + path="$1" + shift + revision="$1" + shift + + mkdir -p "$path" + if ! [ -d "$path/.git" ]; then + run git init "$path" + fi + if (cd "$path" && run git fetch "$@" "$repo" "$revision"); then + (cd "$path" && run git checkout FETCH_HEAD) || die "checking out $repo $revision failed" + else + # Might have been because fetching by revision is not + # supported by GitHub (https://github.com/isaacs/github/issues/436). + # Fall back to fetching everything. + (cd "$path" && run git fetch "$repo" '+refs/heads/*:refs/remotes/csiprow/heads/*' '+refs/tags/*:refs/tags/*') || die "fetching $repo failed" + (cd "$path" && run git checkout "$revision") || die "checking out $repo $revision failed" + fi + # This is useful for local testing or when switching between different revisions in the same + # repo. + (cd "$path" && run git clean -fdx) || die "failed to clean $path" +} + +list_gates () ( + set -f; IFS=',' + # Ignore: Double quote to prevent globbing and word splitting. + # shellcheck disable=SC2086 + set -- $1 + while [ "$1" ]; do + # Ignore: See if you can use ${variable//search/replace} instead. + # shellcheck disable=SC2001 + echo "$1" | sed -e 's/ *\([^ =]*\) *= *\([^ ]*\) */ \1: \2/' + shift + done +) + +go_version_for_kubernetes () ( + local path="$1" + local version="$2" + local go_version + + # We use the minimal Go version specified for each K8S release (= minimum_go_version in hack/lib/golang.sh). + # More recent versions might also work, but we don't want to count on that. + go_version="$(grep minimum_go_version= "$path/hack/lib/golang.sh" | sed -e 's/.*=go//')" + if ! [ "$go_version" ]; then + die "Unable to determine Go version for Kubernetes $version from hack/lib/golang.sh." + fi + echo "$go_version" +) + +csi_prow_kind_have_kubernetes=false +# Brings up a Kubernetes cluster and sets KUBECONFIG. +# Accepts additional feature gates in the form gate1=true|false,gate2=... +start_cluster () { + local image gates + gates="$1" + + if kind get clusters | grep -q csi-prow; then + run kind delete cluster --name=csi-prow || die "kind delete failed" + fi + + # Build from source? + if [[ "${CSI_PROW_KUBERNETES_VERSION}" =~ ^release-|^latest$ ]]; then + if ! ${csi_prow_kind_have_kubernetes}; then + local version="${CSI_PROW_KUBERNETES_VERSION}" + if [ "$version" = "latest" ]; then + version=master + fi + git_checkout https://github.com/kubernetes/kubernetes "$GOPATH/src/k8s.io/kubernetes" "$version" --depth=1 || die "checking out Kubernetes $version failed" + + # "kind build" and/or the Kubernetes build rules need at least one tag, which we don't have + # when doing a shallow fetch. Therefore we fake one: + # release-1.12 -> v1.12.0-release..csiprow + # latest or -> v1.14.0-.csiprow + case "${CSI_PROW_KUBERNETES_VERSION}" in + release-*) + # Ignore: See if you can use ${variable//search/replace} instead. + # shellcheck disable=SC2001 + tag="$(echo "${CSI_PROW_KUBERNETES_VERSION}" | sed -e 's/release-\(.*\)/v\1.0-release./')";; + *) + # We have to make something up. v1.0.0 did not work for some reasons. + tag="v1.14.0-";; + esac + tag="$tag$(cd "$GOPATH/src/k8s.io/kubernetes" && git rev-list --abbrev-commit HEAD).csiprow" + (cd "$GOPATH/src/k8s.io/kubernetes" && run git tag -f "$tag") || die "git tag failed" + go_version="$(go_version_for_kubernetes "$GOPATH/src/k8s.io/kubernetes" "$version")" || die "cannot proceed without knowing Go version for Kubernetes" + run_with_go "$go_version" kind build node-image --type bazel --image csiprow/node:latest --kube-root "$GOPATH/src/k8s.io/kubernetes" || die "'kind build node-image' failed" + csi_prow_kind_have_kubernetes=true + fi + image="csiprow/node:latest" + else + image="kindest/node:v${CSI_PROW_KUBERNETES_VERSION}" + fi + cat >"${CSI_PROW_WORK}/kind-config.yaml" </dev/null; wait) + info "For container output see job artifacts." + die "deploying the CSI driver with ${deploy} failed" + fi +} + +# collect logs and cluster status (like the version of all components, Kubernetes version, test version) +collect_cluster_info () { + cat <>"${ARTIFACTS}/$pod/$container.log" & + echo "$!" + done + done +} + +# Makes the E2E test suite binary available as "${CSI_PROW_WORK}/e2e.test". +install_e2e () { + if [ -e "${CSI_PROW_WORK}/e2e.test" ]; then + return + fi + + git_checkout "${CSI_PROW_E2E_REPO}" "${GOPATH}/src/${CSI_PROW_E2E_IMPORT_PATH}" "${CSI_PROW_E2E_VERSION}" --depth=1 && + if [ "${CSI_PROW_E2E_IMPORT_PATH}" = "k8s.io/kubernetes" ]; then + go_version="${CSI_PROW_GO_VERSION_E2E:-$(go_version_for_kubernetes "${GOPATH}/src/${CSI_PROW_E2E_IMPORT_PATH}" "${CSI_PROW_E2E_VERSION}")}" && + run_with_go "$go_version" make WHAT=test/e2e/e2e.test "-C${GOPATH}/src/${CSI_PROW_E2E_IMPORT_PATH}" && + ln -s "${GOPATH}/src/${CSI_PROW_E2E_IMPORT_PATH}/_output/bin/e2e.test" "${CSI_PROW_WORK}" + else + run_with_go "${CSI_PROW_GO_VERSION_E2E}" go test -c -o "${CSI_PROW_WORK}/e2e.test" "${CSI_PROW_E2E_IMPORT_PATH}/test/e2e" + fi +} + +# Makes the csi-sanity test suite binary available as +# "${CSI_PROW_WORK}/csi-sanity". +install_sanity () ( + if [ -e "${CSI_PROW_WORK}/csi-sanity" ]; then + return + fi + + git_checkout "${CSI_PROW_SANITY_REPO}" "${GOPATH}/src/${CSI_PROW_SANITY_IMPORT_PATH}" "${CSI_PROW_SANITY_VERSION}" --depth=1 || die "checking out csi-sanity failed" + run_with_go "${CSI_PROW_GO_VERSION_SANITY}" go test -c -o "${CSI_PROW_WORK}/csi-sanity" "${CSI_PROW_SANITY_IMPORT_PATH}/cmd/csi-sanity" || die "building csi-sanity failed" +) + +# Whether the hostpath driver supports raw block devices depends on which version +# we are testing. It would be much nicer if we could determine that by querying the +# installed driver's capabilities instead of having to do a version check. +hostpath_supports_block () { + local result + result="$(docker exec csi-prow-control-plane docker image ls --format='{{.Repository}} {{.Tag}} {{.ID}}' | grep hostpath | while read -r repo tag id; do + if [ "$tag" == "v1.0.1" ]; then + # Old version because the revision label is missing: didn't have support yet. + echo "false" + return + fi + done)" + # If not set, then it must be a newer driver with support. + echo "${result:-true}" +} + +# Captures pod output while running some other command. +run_with_loggers () ( + loggers=$(start_loggers -f) + trap 'kill $loggers' EXIT + + run "$@" +) + +# Invokes the filter-junit.go tool. +run_filter_junit () { + run_with_go "${CSI_PROW_GO_VERSION_BUILD}" go run "${RELEASE_TOOLS_ROOT}/filter-junit.go" "$@" +} + +# Rename, merge and filter JUnit files. Necessary in case that we run the E2E suite again +# and to avoid the large number of "skipped" tests that we get from using +# the full Kubernetes E2E testsuite while only running a few tests. +move_junit () { + local name="$1" + if ls "${ARTIFACTS}"/junit_[0-9]*.xml 2>/dev/null >/dev/null; then + run_filter_junit -t="${CSI_PROW_E2E_TEST_PREFIX}" -o "${ARTIFACTS}/junit_${name}.xml" "${ARTIFACTS}"/junit_[0-9]*.xml && rm -f "${ARTIFACTS}"/junit_[0-9]*.xml + fi +} + +# Runs the E2E test suite in a sub-shell. +run_e2e () ( + name="$1" + shift + + install_e2e || die "building e2e.test failed" + install_ginkgo || die "installing ginkgo failed" + + # TODO (?): multi-node cluster (depends on https://github.com/kubernetes-csi/csi-driver-host-path/pull/14). + # When running on a multi-node cluster, we need to figure out where the + # hostpath driver was deployed and set ClientNodeName accordingly. + + # The content of this file depends on both what the E2E suite expects and + # what the installed hostpath driver supports. Generating it here seems + # prone to breakage, but it is uncertain where a better place might be. + cat >"${CSI_PROW_WORK}/hostpath-test-driver.yaml" <"${CSI_PROW_WORK}/mkdir_in_pod.sh" <"${CSI_PROW_WORK}/rmdir_in_pod.sh" </\>/g' -e 's/\x1B...//g' +} + +# The "make test" output starts each test with "### :" +# and then ends when the next test starts or with "make: *** +# [] Error 1" when there was a failure. Here we read each +# line of that output, split it up into individual tests and generate +# a make-test.xml file in JUnit format. +make_test_to_junit () { + local ret out testname testoutput + ret=0 + # Plain make-test.xml was not delivered as text/xml by the web + # server and ignored by spyglass. It seems that the name has to + # match junit*.xml. + out="${ARTIFACTS}/junit_make_test.xml" + testname= + echo "" >>"$out" + + while IFS= read -r line; do + echo "$line" # pass through + if echo "$line" | grep -q "^### [^ ]*:$"; then + if [ "$testname" ]; then + # previous test succesful + echo " " >>"$out" + echo " " >>"$out" + fi + # Ignore: See if you can use ${variable//search/replace} instead. + # shellcheck disable=SC2001 + # + # start new test + testname="$(echo "$line" | sed -e 's/^### \([^ ]*\):$/\1/')" + testoutput= + echo " " >>"$out" + echo " " >>"$out" + elif echo "$line" | grep -q '^make: .*Error [0-9]*$'; then + if [ "$testname" ]; then + # Ignore: Consider using { cmd1; cmd2; } >> file instead of individual redirects. + # shellcheck disable=SC2129 + # + # end test with failure + echo " " >>"$out" + # Include the same text as in also in , + # because then it is easier to view in spyglass (shown directly + # instead of having to click through to stdout). + echo " " >>"$out" + echo -n "$testoutput" | ascii_to_xml >>"$out" + echo " " >>"$out" + echo " " >>"$out" + fi + # remember failure for exit code + ret=1 + # not currently inside a test + testname= + else + if [ "$testname" ]; then + # Test output. + echo "$line" | ascii_to_xml >>"$out" + testoutput="$testoutput$line +" + fi + fi + done + # if still in a test, close it now + if [ "$testname" ]; then + echo " " >>"$out" + echo " " >>"$out" + fi + echo "" >>"$out" + + # this makes the error more visible in spyglass + if [ "$ret" -ne 0 ]; then + echo "ERROR: 'make test' failed" + return 1 + fi +} + +main () { + local images ret + ret=0 + + images= + if ${CSI_PROW_BUILD_JOB}; then + # A successful build is required for testing. + run_with_go "${CSI_PROW_GO_VERSION_BUILD}" make all || die "'make all' failed" + # We don't want test failures to prevent E2E testing below, because the failure + # might have been minor or unavoidable, for example when experimenting with + # changes in "release-tools" in a PR (that fails the "is release-tools unmodified" + # test). + if tests_enabled "unit"; then + if ! run_with_go "${CSI_PROW_GO_VERSION_BUILD}" make -k test 2>&1 | make_test_to_junit; then + warn "'make test' failed, proceeding anyway" + ret=1 + fi + fi + # Required for E2E testing. + run_with_go "${CSI_PROW_GO_VERSION_BUILD}" make container || die "'make container' failed" + fi + + if tests_need_kind; then + install_kind || die "installing kind failed" + + if ${CSI_PROW_BUILD_JOB}; then + cmds="$(grep '^\s*CMDS\s*=' Makefile | sed -e 's/\s*CMDS\s*=//')" + # Get the image that was just built (if any) from the + # top-level Makefile CMDS variable and set the + # deploy env variables for it. We also need to + # side-load those images into the cluster. + for i in $cmds; do + e=$(echo "$i" | tr '[:lower:]' '[:upper:]' | tr - _) + images="$images ${e}_REGISTRY=none ${e}_TAG=csiprow" + + # We must avoid the tag "latest" because that implies + # always pulling the image + # (https://github.com/kubernetes-sigs/kind/issues/328). + docker tag "$i:latest" "$i:csiprow" || die "tagging the locally built container image for $i failed" + done + + if [ -e deploy/kubernetes/rbac.yaml ]; then + # This is one of those components which has its own RBAC rules (like external-provisioner). + # We are testing a locally built image and also want to test with the the current, + # potentially modified RBAC rules. + if [ "$(echo "$cmds" | wc -w)" != 1 ]; then + die "ambiguous deploy/kubernetes/rbac.yaml: need exactly one command, got: $cmds" + fi + e=$(echo "$cmds" | tr '[:lower:]' '[:upper:]' | tr - _) + images="$images ${e}_RBAC=$(pwd)/deploy/kubernetes/rbac.yaml" + fi + fi + + if tests_need_non_alpha_cluster; then + start_cluster || die "starting the non-alpha cluster failed" + + # Installing the driver might be disabled. + if install_driver "$images"; then + collect_cluster_info + + if sanity_enabled; then + if ! run_sanity; then + ret=1 + fi + fi + + if tests_enabled "parallel"; then + # Ignore: Double quote to prevent globbing and word splitting. + # shellcheck disable=SC2086 + if ! run_e2e parallel ${CSI_PROW_GINKO_PARALLEL} \ + -focus="${CSI_PROW_E2E_TEST_PREFIX}" \ + -skip="$(regex_join "${CSI_PROW_E2E_SERIAL}" "${CSI_PROW_E2E_ALPHA}" "${CSI_PROW_E2E_SKIP}")"; then + warn "E2E parallel failed" + ret=1 + fi + fi + + if tests_enabled "serial"; then + if ! run_e2e serial \ + -focus="${CSI_PROW_E2E_TEST_PREFIX}.*($(regex_join "${CSI_PROW_E2E_SERIAL}"))" \ + -skip="$(regex_join "${CSI_PROW_E2E_ALPHA}" "${CSI_PROW_E2E_SKIP}")"; then + warn "E2E serial failed" + ret=1 + fi + fi + fi + fi + + if tests_need_alpha_cluster && [ "${CSI_PROW_E2E_ALPHA_GATES}" ]; then + # Need to (re)create the cluster. + start_cluster "${CSI_PROW_E2E_ALPHA_GATES}" || die "starting alpha cluster failed" + + # Installing the driver might be disabled. + if install_driver "$images"; then + collect_cluster_info + + if tests_enabled "parallel-alpha"; then + # Ignore: Double quote to prevent globbing and word splitting. + # shellcheck disable=SC2086 + if ! run_e2e parallel-alpha ${CSI_PROW_GINKO_PARALLEL} \ + -focus="${CSI_PROW_E2E_TEST_PREFIX}.*($(regex_join "${CSI_PROW_E2E_ALPHA}"))" \ + -skip="$(regex_join "${CSI_PROW_E2E_SERIAL}" "${CSI_PROW_E2E_SKIP}")"; then + warn "E2E parallel alpha failed" + ret=1 + fi + fi + + if tests_enabled "serial-alpha"; then + if ! run_e2e serial-alpha \ + -focus="${CSI_PROW_E2E_TEST_PREFIX}.*(($(regex_join "${CSI_PROW_E2E_SERIAL}")).*($(regex_join "${CSI_PROW_E2E_ALPHA}"))|($(regex_join "${CSI_PROW_E2E_ALPHA}")).*($(regex_join "${CSI_PROW_E2E_SERIAL}")))" \ + -skip="$(regex_join "${CSI_PROW_E2E_SKIP}")"; then + warn "E2E serial alpha failed" + ret=1 + fi + fi + fi + fi + fi + + # Merge all junit files into one. This gets rid of duplicated "skipped" tests. + if ls "${ARTIFACTS}"/junit_*.xml 2>/dev/null >&2; then + run_filter_junit -o "${CSI_PROW_WORK}/junit_final.xml" "${ARTIFACTS}"/junit_*.xml && rm "${ARTIFACTS}"/junit_*.xml && mv "${CSI_PROW_WORK}/junit_final.xml" "${ARTIFACTS}" + fi + + return "$ret" +} diff --git a/release-tools/travis.yml b/release-tools/travis.yml index b5a360af8..5b0425226 100644 --- a/release-tools/travis.yml +++ b/release-tools/travis.yml @@ -4,7 +4,12 @@ services: - docker matrix: include: - - go: 1.11.1 + - go: 1.12.4 +before_script: +- mkdir -p bin +- wget https://github.com/golang/dep/releases/download/v0.5.1/dep-linux-amd64 -O bin/dep +- chmod u+x bin/dep +- export PATH=$PWD/bin:$PATH script: - make -k all test after_success: diff --git a/release-tools/util.sh b/release-tools/util.sh new file mode 100755 index 000000000..abeb1b2e1 --- /dev/null +++ b/release-tools/util.sh @@ -0,0 +1,148 @@ +#!/usr/bin/env bash + +# Copyright 2014 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +function kube::util::sourced_variable { + # Call this function to tell shellcheck that a variable is supposed to + # be used from other calling context. This helps quiet an "unused + # variable" warning from shellcheck and also document your code. + true +} + +kube::util::sortable_date() { + date "+%Y%m%d-%H%M%S" +} + +# arguments: target, item1, item2, item3, ... +# returns 0 if target is in the given items, 1 otherwise. +kube::util::array_contains() { + local search="$1" + local element + shift + for element; do + if [[ "${element}" == "${search}" ]]; then + return 0 + fi + done + return 1 +} + +# Example: kube::util::trap_add 'echo "in trap DEBUG"' DEBUG +# See: http://stackoverflow.com/questions/3338030/multiple-bash-traps-for-the-same-signal +kube::util::trap_add() { + local trap_add_cmd + trap_add_cmd=$1 + shift + + for trap_add_name in "$@"; do + local existing_cmd + local new_cmd + + # Grab the currently defined trap commands for this trap + existing_cmd=$(trap -p "${trap_add_name}" | awk -F"'" '{print $2}') + + if [[ -z "${existing_cmd}" ]]; then + new_cmd="${trap_add_cmd}" + else + new_cmd="${trap_add_cmd};${existing_cmd}" + fi + + # Assign the test. Disable the shellcheck warning telling that trap + # commands should be single quoted to avoid evaluating them at this + # point instead evaluating them at run time. The logic of adding new + # commands to a single trap requires them to be evaluated right away. + # shellcheck disable=SC2064 + trap "${new_cmd}" "${trap_add_name}" + done +} + +kube::util::download_file() { + local -r url=$1 + local -r destination_file=$2 + + rm "${destination_file}" 2&> /dev/null || true + + for i in $(seq 5) + do + if ! curl -fsSL --retry 3 --keepalive-time 2 "${url}" -o "${destination_file}"; then + echo "Downloading ${url} failed. $((5-i)) retries left." + sleep 1 + else + echo "Downloading ${url} succeed" + return 0 + fi + done + return 1 +} + +# Wait for background jobs to finish. Return with +# an error status if any of the jobs failed. +kube::util::wait-for-jobs() { + local fail=0 + local job + for job in $(jobs -p); do + wait "${job}" || fail=$((fail + 1)) + done + return ${fail} +} + +# kube::util::join +# Concatenates the list elements with the delimiter passed as first parameter +# +# Ex: kube::util::join , a b c +# -> a,b,c +function kube::util::join { + local IFS="$1" + shift + echo "$*" +} + +# kube::util::check-file-in-alphabetical-order +# Check that the file is in alphabetical order +# +function kube::util::check-file-in-alphabetical-order { + local failure_file="$1" + if ! diff -u "${failure_file}" <(LC_ALL=C sort "${failure_file}"); then + { + echo + echo "${failure_file} is not in alphabetical order. Please sort it:" + echo + echo " LC_ALL=C sort -o ${failure_file} ${failure_file}" + echo + } >&2 + false + fi +} + +# Some useful colors. +if [[ -z "${color_start-}" ]]; then + declare -r color_start="\033[" + declare -r color_red="${color_start}0;31m" + declare -r color_yellow="${color_start}0;33m" + declare -r color_green="${color_start}0;32m" + declare -r color_blue="${color_start}1;34m" + declare -r color_cyan="${color_start}1;36m" + declare -r color_norm="${color_start}0m" + + kube::util::sourced_variable "${color_start}" + kube::util::sourced_variable "${color_red}" + kube::util::sourced_variable "${color_yellow}" + kube::util::sourced_variable "${color_green}" + kube::util::sourced_variable "${color_blue}" + kube::util::sourced_variable "${color_cyan}" + kube::util::sourced_variable "${color_norm}" +fi + +# ex: ts=2 sw=2 et filetype=sh diff --git a/release-tools/verify-shellcheck.sh b/release-tools/verify-shellcheck.sh new file mode 100755 index 000000000..fd28021ac --- /dev/null +++ b/release-tools/verify-shellcheck.sh @@ -0,0 +1,146 @@ +#!/usr/bin/env bash + +# Copyright 2018 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -o errexit +set -o nounset +set -o pipefail + +# The csi-release-tools directory. +TOOLS="$(dirname "${BASH_SOURCE[0]}")" +. "${TOOLS}/util.sh" + +# Directory to check. Default is the parent of the tools themselves. +ROOT="${1:-${TOOLS}/..}" + +# required version for this script, if not installed on the host we will +# use the official docker image instead. keep this in sync with SHELLCHECK_IMAGE +SHELLCHECK_VERSION="0.6.0" +# upstream shellcheck latest stable image as of January 10th, 2019 +SHELLCHECK_IMAGE="koalaman/shellcheck-alpine:v0.6.0@sha256:7d4d712a2686da99d37580b4e2f45eb658b74e4b01caf67c1099adc294b96b52" + +# fixed name for the shellcheck docker container so we can reliably clean it up +SHELLCHECK_CONTAINER="k8s-shellcheck" + +# disabled lints +disabled=( + # this lint disallows non-constant source, which we use extensively without + # any known bugs + 1090 + # this lint prefers command -v to which, they are not the same + 2230 +) +# comma separate for passing to shellcheck +join_by() { + local IFS="$1"; + shift; + echo "$*"; +} +SHELLCHECK_DISABLED="$(join_by , "${disabled[@]}")" +readonly SHELLCHECK_DISABLED + +# creates the shellcheck container for later use +create_container () { + # TODO(bentheelder): this is a performance hack, we create the container with + # a sleep MAX_INT32 so that it is effectively paused. + # We then repeatedly exec to it to run each shellcheck, and later rm it when + # we're done. + # This is incredibly much faster than creating a container for each shellcheck + # call ... + docker run --name "${SHELLCHECK_CONTAINER}" -d --rm -v "${ROOT}:${ROOT}" -w "${ROOT}" --entrypoint="sleep" "${SHELLCHECK_IMAGE}" 2147483647 +} +# removes the shellcheck container +remove_container () { + docker rm -f "${SHELLCHECK_CONTAINER}" &> /dev/null || true +} + +# ensure we're linting the source tree +cd "${ROOT}" + +# find all shell scripts excluding ./_*, ./.git/*, ./vendor*, +# and anything git-ignored +all_shell_scripts=() +while IFS=$'\n' read -r script; + do git check-ignore -q "$script" || all_shell_scripts+=("$script"); +done < <(find . -name "*.sh" \ + -not \( \ + -path ./_\* -o \ + -path ./.git\* -o \ + -path ./vendor\* \ + \)) + +# detect if the host machine has the required shellcheck version installed +# if so, we will use that instead. +HAVE_SHELLCHECK=false +if which shellcheck &>/dev/null; then + detected_version="$(shellcheck --version | grep 'version: .*')" + if [[ "${detected_version}" = "version: ${SHELLCHECK_VERSION}" ]]; then + HAVE_SHELLCHECK=true + fi +fi + +# tell the user which we've selected and possibly set up the container +if ${HAVE_SHELLCHECK}; then + echo "Using host shellcheck ${SHELLCHECK_VERSION} binary." +else + echo "Using shellcheck ${SHELLCHECK_VERSION} docker image." + # remove any previous container, ensure we will attempt to cleanup on exit, + # and create the container + remove_container + kube::util::trap_add 'remove_container' EXIT + if ! output="$(create_container 2>&1)"; then + { + echo "Failed to create shellcheck container with output: " + echo "" + echo "${output}" + } >&2 + exit 1 + fi +fi + +# lint each script, tracking failures +errors=() +for f in "${all_shell_scripts[@]}"; do + set +o errexit + if ${HAVE_SHELLCHECK}; then + failedLint=$(shellcheck --exclude="${SHELLCHECK_DISABLED}" "${f}") + else + failedLint=$(docker exec -t ${SHELLCHECK_CONTAINER} \ + shellcheck --exclude="${SHELLCHECK_DISABLED}" "${f}") + fi + set -o errexit + if [[ -n "${failedLint}" ]]; then + errors+=( "${failedLint}" ) + fi +done + +# Check to be sure all the packages that should pass lint are. +if [ ${#errors[@]} -eq 0 ]; then + echo 'Congratulations! All shell files are passing lint.' +else + { + echo "Errors from shellcheck:" + for err in "${errors[@]}"; do + echo "$err" + done + echo + echo 'Please review the above warnings. You can test via "./hack/verify-shellcheck"' + echo 'If the above warnings do not make sense, you can exempt them from shellcheck' + echo 'checking by adding the "shellcheck disable" directive' + echo '(https://github.com/koalaman/shellcheck/wiki/Directive#disable).' + echo + } >&2 + false +fi diff --git a/release-tools/verify-subtree.sh b/release-tools/verify-subtree.sh index ce8375fc9..f04a9fa26 100755 --- a/release-tools/verify-subtree.sh +++ b/release-tools/verify-subtree.sh @@ -30,7 +30,7 @@ if [ ! "$DIR" ]; then exit 1 fi -REV=$(git log -n1 --format=format:%H --no-merges -- "$DIR") +REV=$(git log -n1 --remove-empty --format=format:%H --no-merges -- "$DIR") if [ "$REV" ]; then echo "Directory '$DIR' contains non-upstream changes:" echo