diff --git a/scripts/k8s-storage/README.md b/scripts/k8s-storage/README.md new file mode 100644 index 000000000..7c023f9ff --- /dev/null +++ b/scripts/k8s-storage/README.md @@ -0,0 +1,10 @@ +# Kubernetes external storage e2e test suite + +The files in this directory are used by the k8s-e2e-external-storage CI job. +This job runs the [Kubernetes end-to-end external storage tests][1] with +different driver configurations/manifests (in the `driver-*.yaml` files). Each +driver configuration refers to a StorageClass that is used while testing. The +StorageClasses are created with the `create-storageclass.sh` script and the +`sc-*.yaml.in` templates. + +[1]: https://github.com/kubernetes/kubernetes/tree/master/test/e2e/storage/external diff --git a/scripts/k8s-storage/create-storageclasses.sh b/scripts/k8s-storage/create-storageclasses.sh new file mode 100755 index 000000000..326705572 --- /dev/null +++ b/scripts/k8s-storage/create-storageclasses.sh @@ -0,0 +1,27 @@ +#!/bin/sh +# +# Create StorageClasses from a template (sc-*.yaml.in) and replace keywords +# like @@CLUSTER_ID@@. +# +# These StorageClasses can then be used by driver-*.yaml manifests in the +# k8s-e2e-external-storage CI job. +# +# Requirements: +# - kubectl in the path +# - working KUBE_CONFIG either in environment, or default config files +# - deployment done with Rook +# + +# exit on error +set -e + +WORKDIR=$(dirname "${0}") + +TOOLBOX_POD=$(kubectl -n rook-ceph get pods --no-headers -l app=rook-ceph-tools -o=jsonpath='{.items[0].metadata.name}') +FS_ID=$(kubectl -n rook-ceph exec "${TOOLBOX_POD}" ceph fsid) + +for sc in "${WORKDIR}"/sc-*.yaml.in +do + sed "s/@@CLUSTER_ID@@/${FS_ID}/" "${sc}" | + kubectl create -f - +done diff --git a/scripts/k8s-storage/driver-cephfs.yaml b/scripts/k8s-storage/driver-cephfs.yaml new file mode 100644 index 000000000..44a9addfa --- /dev/null +++ b/scripts/k8s-storage/driver-cephfs.yaml @@ -0,0 +1,67 @@ +--- +ShortName: cephcsi-cephfs-test +StorageClass: + FromExistingClassName: k8s-storage-e2e-cephfs + # FromFile: sc-cephfs.yaml + +SnapshotClass: + # Must be set to enable snapshotting tests + FromName: false + +DriverInfo: + # Internal name of the driver, display name in the test case and test objects + Name: cephfs.csi.ceph.com + + # The range of disk size supported by this driver + SupportedSizeRange: + Min: 1Gi + Max: 16Ti + + # Map of strings for supported mount options + SupportedMountOption: + rw: {} + + # Map of strings for required mount options + RequiredMountOption: + rw: {} + + # Optional list of access modes required for provisiong. Default is RWO + # RequiredAcccessModes: + + # Map that represents the capabilities the driver supports + Capabilities: + # Data is persistest accross pod restarts + persistence: true + + # Volume ownership via fsGroup + fsGroup: true + + # Raw block mode + block: false + + # Exec a file in the volume + exec: true + + # Support for volume limits + volumeLimits: false + + # Support for volume expansion in controllers + controllerExpansion: false + + # Support for volume expansion in nodes + nodeExpansion: false + + # Support volume that an run on single node only (like hostpath) + singleNodeVolume: false + + # Support ReadWriteMany access modes + RWX: true + + # Support topology + topology: false + + # Support populate data from snapshot + snapshotDataSource: false + + # Support populated data from PVC + pvcDataSource: false diff --git a/scripts/k8s-storage/driver-rbd-rwo.yaml b/scripts/k8s-storage/driver-rbd-rwo.yaml new file mode 100644 index 000000000..afb3ae6ff --- /dev/null +++ b/scripts/k8s-storage/driver-rbd-rwo.yaml @@ -0,0 +1,72 @@ +--- +ShortName: cephcsi-rbd-rwo-test +StorageClass: + FromExistingClassName: k8s-storage-e2e-rbd-rwo + # FromFile: sc-rbd.yaml + +SnapshotClass: + # Must be set to enable snapshotting tests + FromName: true + +DriverInfo: + # Internal name of the driver, display name in the test case and test objects + Name: rbd-rwo.csi.ceph.com + + # The range of disk size supported by this driver + SupportedSizeRange: + Min: 1Gi + Max: 16Ti + + # Map of strings for supported FS types + SupportedFsType: + ext4: {} + xfs: {} + + # Map of strings for supported mount options + SupportedMountOption: + rw: {} + + # Map of strings for required mount options + RequiredMountOption: + rw: {} + + # Optional list of access modes required for provisiong. Default is RWO + # RequiredAcccessModes: + + # Map that represents the capabilities the driver supports + Capabilities: + # Data is persistest accross pod restarts + persistence: true + + # Volume ownership via fsGroup + fsGroup: false + + # Raw block mode + block: true + + # Exec a file in the volume + exec: true + + # Support for volume limits + volumeLimits: false + + # Support for volume expansion in controllers + controllerExpansion: false + + # Support for volume expansion in nodes + nodeExpansion: false + + # Support volume that an run on single node only (like hostpath) + singleNodeVolume: false + + # Support ReadWriteMany access modes + RWX: false + + # Support topology + topology: false + + # Support populate data from snapshot + snapshotDataSource: false + + # Support populated data from PVC + pvcDataSource: false diff --git a/scripts/k8s-storage/sc-cephfs.yaml.in b/scripts/k8s-storage/sc-cephfs.yaml.in new file mode 100644 index 000000000..020daf9ed --- /dev/null +++ b/scripts/k8s-storage/sc-cephfs.yaml.in @@ -0,0 +1,19 @@ +--- +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: k8s-storage-e2e-cephfs +provisioner: cephfs.csi.ceph.com +parameters: + clusterID: @@CLUSTER_ID@@ + fsName: myfs + csi.storage.k8s.io/provisioner-secret-name: rook-csi-cephfs-provisioner + csi.storage.k8s.io/provisioner-secret-namespace: rook-ceph + csi.storage.k8s.io/controller-expand-secret-name: rook-csi-cephfs-provisioner + csi.storage.k8s.io/controller-expand-secret-namespace: rook-ceph + csi.storage.k8s.io/node-stage-secret-name: rook-csi-cephfs-node + csi.storage.k8s.io/node-stage-secret-namespace: rook-ceph +reclaimPolicy: Delete +allowVolumeExpansion: true +mountOptions: + - debug diff --git a/scripts/k8s-storage/sc-rbd.yaml.in b/scripts/k8s-storage/sc-rbd.yaml.in new file mode 100644 index 000000000..92f2775b8 --- /dev/null +++ b/scripts/k8s-storage/sc-rbd.yaml.in @@ -0,0 +1,21 @@ +--- +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: k8s-storage-e2e-rbd-rwo +provisioner: rbd.csi.ceph.com +parameters: + clusterID: @@CLUSTER_ID@@ + pool: replicapool + imageFeatures: layering + csi.storage.k8s.io/provisioner-secret-name: rook-csi-rbd-provisioner + csi.storage.k8s.io/provisioner-secret-namespace: rook-ceph + csi.storage.k8s.io/controller-expand-secret-name: rook-csi-cephfs-provisioner + csi.storage.k8s.io/controller-expand-secret-namespace: rook-ceph + csi.storage.k8s.io/node-stage-secret-name: rook-csi-rbd-node + csi.storage.k8s.io/node-stage-secret-namespace: rook-ceph + csi.storage.k8s.io/fstype: ext4 +reclaimPolicy: Delete +allowVolumeExpansion: true +mountOptions: + - discard