diff --git a/examples/README.md b/examples/README.md new file mode 100644 index 000000000..36f9c9db2 --- /dev/null +++ b/examples/README.md @@ -0,0 +1,17 @@ +## How to test RBD and CephFS plugins with Kubernetes 1.11 + +Both `rbd` and `cephfs` directories contain `plugin-deploy.sh` and `plugin-teardown.sh` helper scripts. You can use those to help you deploy/tear down RBACs, sidecar containers and the plugin in one go. By default, they look for the YAML manifests in `../../deploy/{rbd,cephfs}/kubernetes`. You can override this path by running `$ ./plugin-deploy.sh /path/to/my/manifests`. + +Once the plugin is successfuly deployed, you'll need to customize `storageclass.yaml` and `secret.yaml` manifests to reflect your Ceph cluster setup. Please consult the documentation for info about available parameters. + +After configuring the secrets, monitors, etc. you can deploy a testing Pod mounting a RBD image / CephFS volume: +```bash +$ kubectl create -f secret.yaml +$ kubectl create -f storageclass.yaml +$ kubectl create -f pvc.yaml +$ kubectl create -f pod.yaml +``` + +Other helper scripts: +* `logs.sh` output of the plugin +* `exec-bash.sh` logs into the plugin's container and runs bash diff --git a/examples/cephfs/deployment.yaml b/examples/cephfs/deployment.yaml new file mode 100644 index 000000000..8253743b5 --- /dev/null +++ b/examples/cephfs/deployment.yaml @@ -0,0 +1,27 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: csicephfs-demo-depl + labels: + app: web-server +spec: + replicas: 1 + selector: + matchLabels: + app: web-server + template: + metadata: + labels: + app: web-server + spec: + containers: + - name: web-server + image: nginx + volumeMounts: + - name: mypvc + mountPath: /var/lib/www/html + volumes: + - name: mypvc + persistentVolumeClaim: + claimName: csi-cephfs-pvc + readOnly: false diff --git a/examples/cephfs/exec-bash.sh b/examples/cephfs/exec-bash.sh new file mode 100755 index 000000000..9ed465768 --- /dev/null +++ b/examples/cephfs/exec-bash.sh @@ -0,0 +1,15 @@ +#!/bin/bash + +CONTAINER_NAME=csi-cephfsplugin +POD_NAME=$(kubectl get pods -l app=$CONTAINER_NAME -o=name | head -n 1) + +function get_pod_status() { + echo -n $(kubectl get $POD_NAME -o jsonpath="{.status.phase}") +} + +while [[ "$(get_pod_status)" != "Running" ]]; do + sleep 1 + echo "Waiting for $POD_NAME (status $(get_pod_status))" +done + +kubectl exec -it ${POD_NAME#*/} -c $CONTAINER_NAME bash diff --git a/examples/cephfs/logs.sh b/examples/cephfs/logs.sh new file mode 100755 index 000000000..25c507430 --- /dev/null +++ b/examples/cephfs/logs.sh @@ -0,0 +1,15 @@ +#!/bin/bash + +CONTAINER_NAME=csi-cephfsplugin +POD_NAME=$(kubectl get pods -l app=$CONTAINER_NAME -o=name | head -n 1) + +function get_pod_status() { + echo -n $(kubectl get $POD_NAME -o jsonpath="{.status.phase}") +} + +while [[ "$(get_pod_status)" != "Running" ]]; do + sleep 1 + echo "Waiting for $POD_NAME (status $(get_pod_status))" +done + +kubectl logs -f $POD_NAME -c $CONTAINER_NAME \ No newline at end of file diff --git a/examples/cephfs/plugin-deploy.sh b/examples/cephfs/plugin-deploy.sh new file mode 100755 index 000000000..328e95e58 --- /dev/null +++ b/examples/cephfs/plugin-deploy.sh @@ -0,0 +1,15 @@ +#!/bin/bash + +deployment_base="${1}" + +if [[ -z $deployment_base ]]; then + deployment_base="../../deploy/cephfs/kubernetes" +fi + +cd "$deployment_base" || exit 1 + +objects=(csi-attacher-rbac csi-provisioner-rbac csi-nodeplugin-rbac csi-cephfsplugin-attacher csi-cephfsplugin-provisioner csi-cephfsplugin) + +for obj in ${objects[@]}; do + kubectl create -f "./$obj.yaml" +done diff --git a/examples/cephfs/plugin-teardown.sh b/examples/cephfs/plugin-teardown.sh new file mode 100755 index 000000000..763805b8e --- /dev/null +++ b/examples/cephfs/plugin-teardown.sh @@ -0,0 +1,15 @@ +#!/bin/bash + +deployment_base="${1}" + +if [[ -z $deployment_base ]]; then + deployment_base="../../deploy/cephfs/kubernetes" +fi + +cd "$deployment_base" || exit 1 + +objects=(csi-cephfsplugin-attacher csi-cephfsplugin-provisioner csi-cephfsplugin csi-attacher-rbac csi-provisioner-rbac csi-nodeplugin-rbac) + +for obj in ${objects[@]}; do + kubectl delete -f "./$obj.yaml" +done diff --git a/examples/cephfs/pod.yaml b/examples/cephfs/pod.yaml new file mode 100644 index 000000000..d78b6d012 --- /dev/null +++ b/examples/cephfs/pod.yaml @@ -0,0 +1,17 @@ +apiVersion: v1 +kind: Pod +metadata: + name: csicephfs-demo-pod +spec: + containers: + - name: web-server + image: nginx + volumeMounts: + - name: mypvc + mountPath: /var/lib/www + volumes: + - name: mypvc + persistentVolumeClaim: + claimName: csi-cephfs-pvc + readOnly: false + diff --git a/examples/cephfs/pvc.yaml b/examples/cephfs/pvc.yaml new file mode 100644 index 000000000..864b7f128 --- /dev/null +++ b/examples/cephfs/pvc.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: csi-cephfs-pvc +spec: + accessModes: + - ReadWriteMany + resources: + requests: + storage: 5Gi + storageClassName: csi-cephfs diff --git a/examples/cephfs/secret.yaml b/examples/cephfs/secret.yaml new file mode 100644 index 000000000..cf82f6b4f --- /dev/null +++ b/examples/cephfs/secret.yaml @@ -0,0 +1,13 @@ +apiVersion: v1 +kind: Secret +metadata: + name: csi-cephfs-secret + namespace: default +data: + # Required if provisionVolume is set to false + userID: BASE64-ENCODED-VALUE + userKey: BASE64-ENCODED-VALUE + + # Required if provisionVolume is set to true + adminID: BASE64-ENCODED-VALUE + adminKey: BASE64-ENCODED-VALUE diff --git a/examples/cephfs/storageclass.yaml b/examples/cephfs/storageclass.yaml new file mode 100644 index 000000000..5d8e6a938 --- /dev/null +++ b/examples/cephfs/storageclass.yaml @@ -0,0 +1,33 @@ +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: csi-cephfs +provisioner: csi-cephfsplugin +parameters: + # Comma separated list of Ceph monitors + monitors: mon1:port,mon2:port,... + + # If set to true, a new volume will be created along with a RADOS user - this requires admin access. + # If set to false, it is assumed the volume already exists and the user is expected to provide + # a rootPath to a cephfs volume and user credentials. + provisionVolume: "true" + + # Ceph pool into which the volume shall be created + # Required for provisionVolume: "true" + pool: cephfs_data + + # Root path of an existing CephFS volume + # Required for provisionVolume: "false" + # rootPath: /absolute/path + + # The secrets have to contain user and/or Ceph admin credentials. + csiProvisionerSecretName: csi-cephfs-secret + csiProvisionerSecretNamespace: default + csiNodeStageSecretName: csi-cephfs-secret + csiNodeStageSecretNamespace: default + + # (optional) The driver can use either ceph-fuse (fuse) or ceph kernel client (kernel) + # If left out, default volume mounter will be used - this is determined by probing for ceph-fuse + # or by setting the default mounter explicitly via --volumemounter command-line argument. + # mounter: kernel +reclaimPolicy: Delete diff --git a/examples/rbd/exec-bash.sh b/examples/rbd/exec-bash.sh new file mode 100755 index 000000000..a4b32fd7e --- /dev/null +++ b/examples/rbd/exec-bash.sh @@ -0,0 +1,15 @@ +#!/bin/bash + +CONTAINER_NAME=csi-rbdplugin +POD_NAME=$(kubectl get pods -l app=$CONTAINER_NAME -o=name | head -n 1) + +function get_pod_status() { + echo -n $(kubectl get $POD_NAME -o jsonpath="{.status.phase}") +} + +while [[ "$(get_pod_status)" != "Running" ]]; do + sleep 1 + echo "Waiting for $POD_NAME (status $(get_pod_status))" +done + +kubectl exec -it ${POD_NAME#*/} -c $CONTAINER_NAME bash \ No newline at end of file diff --git a/examples/rbd/logs.sh b/examples/rbd/logs.sh new file mode 100755 index 000000000..e103d70bf --- /dev/null +++ b/examples/rbd/logs.sh @@ -0,0 +1,15 @@ +#!/bin/bash + +CONTAINER_NAME=csi-rbdplugin +POD_NAME=$(kubectl get pods -l app=$CONTAINER_NAME -o=name | head -n 1) + +function get_pod_status() { + echo -n $(kubectl get $POD_NAME -o jsonpath="{.status.phase}") +} + +while [[ "$(get_pod_status)" != "Running" ]]; do + sleep 1 + echo "Waiting for $POD_NAME (status $(get_pod_status))" +done + +kubectl logs -f $POD_NAME -c $CONTAINER_NAME \ No newline at end of file diff --git a/examples/rbd/plugin-deploy.sh b/examples/rbd/plugin-deploy.sh new file mode 100755 index 000000000..b5bc40b18 --- /dev/null +++ b/examples/rbd/plugin-deploy.sh @@ -0,0 +1,15 @@ +#!/bin/bash + +deployment_base="${1}" + +if [[ -z $deployment_base ]]; then + deployment_base="../../deploy/rbd/kubernetes" +fi + +cd "$deployment_base" || exit 1 + +objects=(csi-attacher-rbac csi-provisioner-rbac csi-nodeplugin-rbac csi-rbdplugin-attacher csi-rbdplugin-provisioner csi-rbdplugin) + +for obj in ${objects[@]}; do + kubectl create -f "./$obj.yaml" +done diff --git a/examples/rbd/plugin-teardown.sh b/examples/rbd/plugin-teardown.sh new file mode 100755 index 000000000..cf8d32a73 --- /dev/null +++ b/examples/rbd/plugin-teardown.sh @@ -0,0 +1,15 @@ +#!/bin/bash + +deployment_base="${1}" + +if [[ -z $deployment_base ]]; then + deployment_base="../../deploy/rbd/kubernetes" +fi + +cd "$deployment_base" || exit 1 + +objects=(csi-rbdplugin-attacher csi-rbdplugin-provisioner csi-rbdplugin csi-attacher-rbac csi-provisioner-rbac csi-nodeplugin-rbac) + +for obj in ${objects[@]}; do + kubectl delete -f "./$obj.yaml" +done diff --git a/examples/rbd/pod.yaml b/examples/rbd/pod.yaml new file mode 100644 index 000000000..4544bc142 --- /dev/null +++ b/examples/rbd/pod.yaml @@ -0,0 +1,17 @@ +apiVersion: v1 +kind: Pod +metadata: + name: csirbd-demo-pod +spec: + containers: + - name: web-server + image: nginx + volumeMounts: + - name: mypvc + mountPath: /var/lib/www/html + volumes: + - name: mypvc + persistentVolumeClaim: + claimName: rbd-pvc + readOnly: false + diff --git a/examples/rbd/pvc.yaml b/examples/rbd/pvc.yaml new file mode 100644 index 000000000..1a459f72f --- /dev/null +++ b/examples/rbd/pvc.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: rbd-pvc +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + storageClassName: csi-rbd diff --git a/examples/rbd/secret.yaml b/examples/rbd/secret.yaml new file mode 100644 index 000000000..594fd8ab4 --- /dev/null +++ b/examples/rbd/secret.yaml @@ -0,0 +1,8 @@ +apiVersion: v1 +kind: Secret +metadata: + name: csi-rbd-secret + namespace: default +data: + # Key value corresponds to a user name defined in ceph cluster + admin: BASE64-ENCODED-PASSWORD diff --git a/examples/rbd/storageclass.yaml b/examples/rbd/storageclass.yaml new file mode 100644 index 000000000..476c2bef3 --- /dev/null +++ b/examples/rbd/storageclass.yaml @@ -0,0 +1,24 @@ +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: csi-rbd +provisioner: csi-rbdplugin +parameters: + # Comma separated list of Ceph monitors + monitors: mon1:port,mon2:port,... + + # Ceph pool into which the RBD image shall be created + pool: rbd + + # RBD image format. Defaults to "2". + imageFormat: "2" + + # RBD image features. Available for imageFormat: "2". CSI RBD currently supports only `layering` feature. + imageFeatures: layering + + # The secrets have to contain Ceph admin credentials. + csiProvisionerSecretName: csi-rbd-secret + csiProvisionerSecretNamespace: default + csiNodePublishSecretName: csi-rbd-secret + csiNodePublishSecretNamespace: default +reclaimPolicy: Delete