diff --git a/.travis.yml b/.travis.yml index 5a8ac0065..49c9b093a 100644 --- a/.travis.yml +++ b/.travis.yml @@ -8,6 +8,7 @@ addons: packages: - realpath - ruby + - socat services: - docker @@ -26,6 +27,7 @@ env: - TEST_COVERAGE=stdout - GO_METALINTER_THREADS=1 - GO_COVER_DIR=_output + - HELM_VERSION=v2.16.5 - VM_DRIVER=none - MINIKUBE_VERSION=v1.6.0 - CHANGE_MINIKUBE_NONE_USER=true @@ -102,6 +104,11 @@ jobs: # - Travis Arm64 CI job runs inside unprivileged LXD which blocks # launching minikube test environment - travis_terminate 0 # deploy only on x86 + - name: cephcsi helm charts with kube 1.17.0 + script: + - scripts/skip-doc-change.sh || travis_terminate 0; + - make image-cephcsi || travis_terminate 1; + - scripts/travis-helmtest.sh v1.17.0 || travis_terminate 1; deploy: - provider: script diff --git a/Makefile b/Makefile index d61c58270..9cf04fc30 100644 --- a/Makefile +++ b/Makefile @@ -58,7 +58,7 @@ static-check: ./scripts/gosec.sh func-test: - go test github.com/ceph/ceph-csi/e2e $(TESTOPTIONS) + go test -mod=vendor github.com/ceph/ceph-csi/e2e $(TESTOPTIONS) .PHONY: cephcsi cephcsi: diff --git a/e2e/README.md b/e2e/README.md index 144835e2f..8ee61f9a8 100644 --- a/e2e/README.md +++ b/e2e/README.md @@ -92,7 +92,7 @@ cluster or you can pass `kubeconfig`flag while running tests. Functional tests are run by the `go test` command. ```console - $go test ./e2e/ -timeout=20m -v + $go test ./e2e/ -timeout=20m -v -mod=vendor ``` Functional tests can be invoked by `make` command diff --git a/e2e/cephfs.go b/e2e/cephfs.go index 4a26f3892..8c949d069 100644 --- a/e2e/cephfs.go +++ b/e2e/cephfs.go @@ -129,6 +129,8 @@ var _ = Describe("cephfs", func() { AfterEach(func() { if CurrentGinkgoTestDescription().Failed { + // log pods created by helm chart + logsCSIPods("app=ceph-csi-cephfs", c) // log provisoner logsCSIPods("app=csi-cephfsplugin-provisioner", c) // log node plugin diff --git a/e2e/deploy-vault.go b/e2e/deploy-vault.go index fcaa471bf..c01310364 100644 --- a/e2e/deploy-vault.go +++ b/e2e/deploy-vault.go @@ -19,6 +19,11 @@ var ( ) func deployVault(c kubernetes.Interface, deployTimeout int) { + // hack to make helm E2E pass as helm charts creates this configmap as part + // of cephcsi deployment + _, err := framework.RunKubectl("delete", "cm", "ceph-csi-encryption-kms-config", "--namespace", cephCSINamespace, "--ignore-not-found=true") + Expect(err).Should(BeNil()) + createORDeleteVault("create") opt := metav1.ListOptions{ LabelSelector: "app=vault", diff --git a/e2e/rbd.go b/e2e/rbd.go index 577a00311..cd548a76b 100644 --- a/e2e/rbd.go +++ b/e2e/rbd.go @@ -131,6 +131,8 @@ var _ = Describe("RBD", func() { AfterEach(func() { if CurrentGinkgoTestDescription().Failed { + // log pods created by helm chart + logsCSIPods("app=ceph-csi-rbd", c) // log provisoner logsCSIPods("app=csi-rbdplugin-provisioner", c) // log node plugin @@ -382,7 +384,7 @@ var _ = Describe("RBD", func() { } // delete rbd nodeplugin pods - err = deletePodWithLabel("app=csi-rbdplugin") + err = deletePodWithLabel("app=csi-rbdplugin", cephCSINamespace, false) if err != nil { Fail(err.Error()) } diff --git a/e2e/utils.go b/e2e/utils.go index e58f8176e..99ad3536a 100644 --- a/e2e/utils.go +++ b/e2e/utils.go @@ -775,8 +775,8 @@ func validateEncryptedPVCAndAppBinding(pvcPath, appPath, kms string, f *framewor } } -func deletePodWithLabel(label string) error { - _, err := framework.RunKubectl("delete", "po", "-l", label) +func deletePodWithLabel(label, ns string, skipNotFound bool) error { + _, err := framework.RunKubectl("delete", "po", "-l", label, fmt.Sprintf("--ignore-not-found=%t", skipNotFound), fmt.Sprintf("--namespace=%s", ns)) if err != nil { e2elog.Logf("failed to delete pod %v", err) } diff --git a/scripts/install-helm.sh b/scripts/install-helm.sh new file mode 100755 index 000000000..0c4eff357 --- /dev/null +++ b/scripts/install-helm.sh @@ -0,0 +1,193 @@ +#!/bin/bash -e + +#Based on ideas from https://github.com/rook/rook/blob/master/tests/scripts/helm.sh + +TEMP="/tmp/cephcsi-helm-test" + +HELM="helm" +HELM_VERSION=${HELM_VERSION:-"v2.16.5"} +arch="${ARCH:-}" +CEPHFS_CHART_NAME="ceph-csi-cephfs" +RBD_CHART_NAME="ceph-csi-rbd" +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)" +DEPLOY_TIMEOUT=600 + +function check_deployment_status() { + LABEL=$1 + NAMESPACE=$2 + echo "Checking Deployment status for label $LABEL in Namespace $NAMESPACE" + for ((retry = 0; retry <= DEPLOY_TIMEOUT; retry = retry + 5)); do + total_replicas=$(kubectl get deployment -l "$LABEL" -n "$NAMESPACE" -o jsonpath='{.items[0].status.replicas}') + + ready_replicas=$(kubectl get deployment -l "$LABEL" -n "$NAMESPACE" -o jsonpath='{.items[0].status.readyReplicas}') + if [ "$total_replicas" != "$ready_replicas" ]; then + echo "Total replicas $total_replicas is not equal to ready count $ready_replicas" + kubectl get deployment -l "$LABEL" -n "$NAMESPACE" + sleep 10 + else + echo "Total replicas $total_replicas is equal to ready count $ready_replicas" + break + fi + done + + if [ "$retry" -gt "$DEPLOY_TIMEOUT" ]; then + echo "[Timeout] Failed to get deployment" + exit 1 + fi +} + +function check_daemonset_status() { + LABEL=$1 + NAMESPACE=$2 + echo "Checking Daemonset status for label $LABEL in Namespace $NAMESPACE" + for ((retry = 0; retry <= DEPLOY_TIMEOUT; retry = retry + 5)); do + total_replicas=$(kubectl get daemonset -l "$LABEL" -n "$NAMESPACE" -o jsonpath='{.items[0].status.numberAvailable}') + + ready_replicas=$(kubectl get daemonset -l "$LABEL" -n "$NAMESPACE" -o jsonpath='{.items[0].status.numberReady}') + if [ "$total_replicas" != "$ready_replicas" ]; then + echo "Total replicas $total_replicas is not equal to ready count $ready_replicas" + kubectl get daemonset -l "$LABEL" -n "$NAMESPACE" + sleep 10 + else + echo "Total replicas $total_replicas is equal to ready count $ready_replicas" + break + + fi + done + + if [ "$retry" -gt "$DEPLOY_TIMEOUT" ]; then + echo "[Timeout] Failed to get daemonset" + exit 1 + fi +} + +detectArch() { + case "$(uname -m)" in + "x86_64" | "amd64") + arch="amd64" + ;; + "aarch64") + arch="arm64" + ;; + "i386") + arch="i386" + ;; + *) + echo "Couldn't translate 'uname -m' output to an available arch." + echo "Try setting ARCH environment variable to your system arch:" + echo "amd64, x86_64. aarch64, i386" + exit 1 + ;; + esac +} + +install() { + if ! helm_loc="$(type -p "helm")" || [[ -z ${helm_loc} ]]; then + # Download and unpack helm + local dist + dist="$(uname -s)" + mkdir -p ${TEMP} + # shellcheck disable=SC2021 + dist=$(echo "${dist}" | tr "[A-Z]" "[a-z]") + wget "https://storage.googleapis.com/kubernetes-helm/helm-${HELM_VERSION}-${dist}-${arch}.tar.gz" -O "${TEMP}/helm.tar.gz" + tar -C "${TEMP}" -zxvf "${TEMP}/helm.tar.gz" + fi + + # set up RBAC for helm + kubectl --namespace kube-system create sa tiller + kubectl create clusterrolebinding tiller --clusterrole cluster-admin --serviceaccount=kube-system:tiller + + # Init helm + "${HELM}" init --service-account tiller --output yaml | + sed 's@apiVersion: extensions/v1beta1@apiVersion: apps/v1@' | + sed 's@strategy: {}@selector: {"matchLabels": {"app": "helm", "name": "tiller"}}@' | kubectl apply -f - + + kubectl -n kube-system patch deploy/tiller-deploy -p '{"spec": {"template": {"spec": {"serviceAccountName": "tiller"}}}}' + + sleep 5 + + helm_ready=$(kubectl get pods -l app=helm -n kube-system -o jsonpath='{.items[0].status.containerStatuses[0].ready}') + INC=0 + until [[ "${helm_ready}" == "true" || $INC -gt 20 ]]; do + sleep 10 + ((++INC)) + helm_ready=$(kubectl get pods -l app=helm -n kube-system -o jsonpath='{.items[0].status.containerStatuses[0].ready}') + echo "helm pod status: ${helm_ready}" + done + + if [ "${helm_ready}" != "true" ]; then + echo "Helm init not successful" + kubectl get pods -l app=helm -n kube-system + kubectl logs -lapp=helm --all-containers=true -nkube-system + exit 1 + fi + + echo "Helm init successful" +} + +install_cephcsi_helm_charts() { + NAMESPACE=$1 + if [ -z "$NAMESPACE" ]; then + NAMESPACE="default" + fi + # install ceph-csi-cephfs and ceph-csi-rbd charts + "${HELM}" install "${SCRIPT_DIR}"/../charts/ceph-csi-cephfs --name ${CEPHFS_CHART_NAME} --namespace ${NAMESPACE} --set provisioner.fullnameOverride=csi-cephfsplugin-provisioner --set nodeplugin.fullnameOverride=csi-cephfsplugin --set configMapName=ceph-csi-config --set provisioner.podSecurityPolicy.enabled=true --set nodeplugin.podSecurityPolicy.enabled=true + + check_deployment_status app=ceph-csi-cephfs ${NAMESPACE} + check_daemonset_status app=ceph-csi-cephfs ${NAMESPACE} + + # deleting configmap as a workaround to avoid configmap already present + # issue when installing ceph-csi-rbd + kubectl delete cm ceph-csi-config --namespace ${NAMESPACE} + "${HELM}" install "${SCRIPT_DIR}"/../charts/ceph-csi-rbd --name ${RBD_CHART_NAME} --namespace ${NAMESPACE} --set provisioner.fullnameOverride=csi-rbdplugin-provisioner --set nodeplugin.fullnameOverride=csi-rbdplugin --set configMapName=ceph-csi-config --set provisioner.podSecurityPolicy.enabled=true --set nodeplugin.podSecurityPolicy.enabled=true + + check_deployment_status app=ceph-csi-rbd ${NAMESPACE} + check_daemonset_status app=ceph-csi-rbd ${NAMESPACE} + +} + +cleanup_cephcsi_helm_charts() { + "${HELM}" del --purge ${CEPHFS_CHART_NAME} + "${HELM}" del --purge ${RBD_CHART_NAME} +} + +helm_reset() { + "${HELM}" reset + # shellcheck disable=SC2021 + rm -rf "${TEMP}" + kubectl --namespace kube-system delete sa tiller + kubectl delete clusterrolebinding tiller +} + +if [ -z "${arch}" ]; then + detectArch +fi + +if ! helm_loc="$(type -p "helm")" || [[ -z ${helm_loc} ]]; then + dist="$(uname -s)" + # shellcheck disable=SC2021 + dist=$(echo "${dist}" | tr "[A-Z]" "[a-z]") + HELM="${TEMP}/${dist}-${arch}/helm" +fi + +case "${1:-}" in +up) + install + ;; +clean) + helm_reset + ;; +install-cephcsi) + install_cephcsi_helm_charts "$2" + ;; +cleanup-cephcsi) + cleanup_cephcsi_helm_charts + ;; +*) + echo "usage:" >&2 + echo " $0 up" >&2 + echo " $0 clean" >&2 + echo " $0 install-cephcsi" >&2 + echo " $0 cleanup-cephcsi" >&2 + ;; +esac diff --git a/scripts/travis-functest.sh b/scripts/travis-functest.sh index f0efd2762..07ae2022c 100755 --- a/scripts/travis-functest.sh +++ b/scripts/travis-functest.sh @@ -11,6 +11,6 @@ sudo scripts/minikube.sh cephcsi sudo scripts/minikube.sh k8s-sidecar sudo chown -R travis: "$HOME"/.minikube /usr/local/bin/kubectl # functional tests -go test github.com/ceph/ceph-csi/e2e --deploy-timeout=10 -timeout=30m --cephcsi-namespace=cephcsi-e2e-$RANDOM -v +go test github.com/ceph/ceph-csi/e2e --deploy-timeout=10 -timeout=30m --cephcsi-namespace=cephcsi-e2e-$RANDOM -v -mod=vendor sudo scripts/minikube.sh clean diff --git a/scripts/travis-helmtest.sh b/scripts/travis-helmtest.sh new file mode 100755 index 000000000..99a338b35 --- /dev/null +++ b/scripts/travis-helmtest.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -e + +# This script will be used by travis to run functional test +# against different kuberentes version +export KUBE_VERSION=$1 +sudo scripts/minikube.sh up +sudo scripts/minikube.sh deploy-rook +# pull docker images to speed up e2e +sudo scripts/minikube.sh cephcsi +sudo scripts/minikube.sh k8s-sidecar +sudo chown -R travis: "$HOME"/.minikube /usr/local/bin/kubectl + +NAMESPACE=cephcsi-e2e-$RANDOM +# set up helm +scripts/install-helm.sh up +# install cephcsi helm charts +scripts/install-helm.sh install-cephcsi ${NAMESPACE} +# functional tests +go test github.com/ceph/ceph-csi/e2e -mod=vendor --deploy-timeout=10 -timeout=30m --cephcsi-namespace=${NAMESPACE} --deploy-cephfs=false --deploy-rbd=false -v + +#cleanup +scripts/install-helm.sh cleanup-cephcsi +scripts/install-helm.sh cleanup +sudo scripts/minikube.sh clean