From 4c5c67b8f9d27541a6dd6bc942c151555743a5d1 Mon Sep 17 00:00:00 2001 From: gman Date: Thu, 22 Mar 2018 14:11:51 +0100 Subject: [PATCH] cephfs: check volumeOptions.Mounter and choose ceph-fuse or mount.ceph accordingly --- .../kubernetes/cephfs-storage-class.yaml | 6 +- pkg/cephfs/controllerserver.go | 6 -- pkg/cephfs/nodeserver.go | 95 +++++++++++++------ pkg/cephfs/volumeoptions.go | 25 ++++- 4 files changed, 95 insertions(+), 37 deletions(-) diff --git a/deploy/cephfs/kubernetes/cephfs-storage-class.yaml b/deploy/cephfs/kubernetes/cephfs-storage-class.yaml index c7f4ef448..26f3a2345 100644 --- a/deploy/cephfs/kubernetes/cephfs-storage-class.yaml +++ b/deploy/cephfs/kubernetes/cephfs-storage-class.yaml @@ -4,9 +4,13 @@ metadata: name: csi-cephfs provisioner: csi-cephfsplugin parameters: - monitors: 192.168.122.11:6789 + # The driver can use either ceph-fuse (fuse) or ceph kernel client (kernel) + mounter: fuse + + monitors: mon1:port,mon2:port rootPath: / user: admin + csiProvisionerSecretName: csi-cephfs-secret csiProvisionerSecretNameSpace: default reclaimPolicy: Delete diff --git a/pkg/cephfs/controllerserver.go b/pkg/cephfs/controllerserver.go index 4a0693059..18982022b 100644 --- a/pkg/cephfs/controllerserver.go +++ b/pkg/cephfs/controllerserver.go @@ -74,12 +74,6 @@ func (cs *controllerServer) CreateVolume(ctx context.Context, req *csi.CreateVol volId := newVolumeIdentifier(volOptions, req) - conf := cephConfigData{Monitors: volOptions.Monitors} - if err = conf.writeToFile(); err != nil { - glog.Errorf("couldn't generate ceph.conf: %v", err) - return nil, status.Error(codes.Internal, err.Error()) - } - glog.V(4).Infof("cephfs: volume %s successfuly created", volId.id) return &csi.CreateVolumeResponse{ diff --git a/pkg/cephfs/nodeserver.go b/pkg/cephfs/nodeserver.go index 175a1f729..02553fcc1 100644 --- a/pkg/cephfs/nodeserver.go +++ b/pkg/cephfs/nodeserver.go @@ -63,32 +63,63 @@ func validateNodeUnpublishVolumeRequest(req *csi.NodeUnpublishVolumeRequest) err return nil } +func newMounter(volOptions *volumeOptions, key string, readOnly bool) (volumeMounter, error) { + var m volumeMounter + + if volOptions.Mounter == volumeMounter_fuse { + keyring := cephKeyringData{ + User: volOptions.User, + Key: key, + RootPath: volOptions.RootPath, + ReadOnly: readOnly, + } + + if err := keyring.writeToFile(); err != nil { + msg := fmt.Sprintf("couldn't write ceph keyring for user %s: %v", volOptions.User, err) + glog.Error(msg) + return nil, status.Error(codes.Internal, msg) + } + + m = &fuseMounter{} + } else if volOptions.Mounter == volumeMounter_kernel { + secret := cephSecretData{ + User: volOptions.User, + Key: key, + } + + if err := secret.writeToFile(); err != nil { + msg := fmt.Sprintf("couldn't write ceph secret for user %s: %v", volOptions.User, err) + glog.Error(msg) + return nil, status.Error(codes.Internal, msg) + } + + m = &kernelMounter{} + } + + return m, nil +} + func (ns *nodeServer) NodePublishVolume(ctx context.Context, req *csi.NodePublishVolumeRequest) (*csi.NodePublishVolumeResponse, error) { if err := validateNodePublishVolumeRequest(req); err != nil { return nil, err } - // Configuration - - targetPath := req.GetTargetPath() - - volOptions, err := newVolumeOptions(req.GetVolumeAttributes()) - if err != nil { - glog.Errorf("error reading volume options: %v", err) - return nil, status.Error(codes.InvalidArgument, err.Error()) - } - /* - volId := req.GetVolumeId() if err = tryLock(volId, nsMtx, "NodeServer"); err != nil { return nil, err } defer nsMtx.UnlockKey(volId) */ - if err = createMountPoint(targetPath); err != nil { - glog.Errorf("failed to create mount point at %s: %v", targetPath, err) - return nil, status.Error(codes.Internal, err.Error()) + // Configuration + + targetPath := req.GetTargetPath() + volId := req.GetVolumeId() + + volOptions, err := newVolumeOptions(req.GetVolumeAttributes()) + if err != nil { + glog.Errorf("error reading volume options: %v", err) + return nil, status.Error(codes.InvalidArgument, err.Error()) } key, err := getKeyFromCredentials(req.GetNodePublishSecrets()) @@ -97,17 +128,15 @@ func (ns *nodeServer) NodePublishVolume(ctx context.Context, req *csi.NodePublis return nil, status.Error(codes.InvalidArgument, err.Error()) } - keyring := cephKeyringData{ - User: volOptions.User, - Key: key, - RootPath: volOptions.RootPath, - ReadOnly: req.GetReadonly(), + if err = createMountPoint(targetPath); err != nil { + glog.Errorf("failed to create mount point at %s: %v", targetPath, err) + return nil, status.Error(codes.Internal, err.Error()) } - if err = keyring.writeToFile(); err != nil { - msg := fmt.Sprintf("couldn't write ceph keyring for user %s: %v", volOptions.User, err) - glog.Error(msg) - return nil, status.Error(codes.Internal, msg) + conf := cephConfigData{Monitors: volOptions.Monitors} + if err = conf.writeToFile(); err != nil { + glog.Errorf("couldn't generate ceph.conf: %v", err) + return nil, status.Error(codes.Internal, err.Error()) } // Check if the volume is already mounted @@ -120,19 +149,24 @@ func (ns *nodeServer) NodePublishVolume(ctx context.Context, req *csi.NodePublis } if isMnt { + glog.V(4).Infof("cephfs: volume %s is already mounted to %s", volId, targetPath) return &csi.NodePublishVolumeResponse{}, nil } // It's not, exec ceph-fuse now - vol := volume{RootPath: volOptions.RootPath, User: volOptions.User} - - if err := vol.mount(targetPath); err != nil { - glog.Errorf("mounting volume %s to %s failed: %v", vol.RootPath, targetPath, err) + m, err := newMounter(volOptions, key, req.GetReadonly()) + if err != nil { + glog.Errorf("error while creating volumeMounter: %v", err) return nil, status.Error(codes.Internal, err.Error()) } - glog.V(4).Infof("cephfs: volume %s successfuly mounted to %s", vol.RootPath, targetPath) + if err = m.mount(targetPath, volOptions); err != nil { + glog.Errorf("mounting volume %s to %s failed: %v", volId, targetPath, err) + return nil, status.Error(codes.Internal, err.Error()) + } + + glog.V(4).Infof("cephfs: volume %s successfuly mounted to %s", volId, targetPath) return &csi.NodePublishVolumeResponse{}, nil } @@ -142,8 +176,9 @@ func (ns *nodeServer) NodeUnpublishVolume(ctx context.Context, req *csi.NodeUnpu return nil, err } + volId := req.GetVolumeId() + /* - volId := req.GetVolumeId() if err := tryLock(volId, nsMtx, "NodeServer"); err != nil { return nil, err } @@ -154,6 +189,8 @@ func (ns *nodeServer) NodeUnpublishVolume(ctx context.Context, req *csi.NodeUnpu return nil, status.Error(codes.Internal, err.Error()) } + glog.V(4).Infof("cephfs: volume %s successfuly unmounted from %s", volId, req.GetTargetPath()) + return &csi.NodeUnpublishVolumeResponse{}, nil } diff --git a/pkg/cephfs/volumeoptions.go b/pkg/cephfs/volumeoptions.go index 31818138c..249e56afa 100644 --- a/pkg/cephfs/volumeoptions.go +++ b/pkg/cephfs/volumeoptions.go @@ -16,12 +16,16 @@ limitations under the License. package cephfs -import "errors" +import ( + "errors" + "fmt" +) type volumeOptions struct { Monitors string `json:"monitors"` RootPath string `json:"rootPath"` User string `json:"user"` + Mounter string `json:"mounter"` } func extractOption(dest *string, optionLabel string, options map[string]string) error { @@ -33,6 +37,17 @@ func extractOption(dest *string, optionLabel string, options map[string]string) } } +func validateMounter(m string) error { + switch m { + case volumeMounter_fuse: + case volumeMounter_kernel: + default: + return fmt.Errorf("Unknown mounter '%s'. Valid options are 'fuse' and 'kernel'", m) + } + + return nil +} + func newVolumeOptions(volOptions map[string]string) (*volumeOptions, error) { var opts volumeOptions @@ -48,5 +63,13 @@ func newVolumeOptions(volOptions map[string]string) (*volumeOptions, error) { return nil, err } + if err := extractOption(&opts.Mounter, "mounter", volOptions); err != nil { + return nil, err + } + + if err := validateMounter(opts.Mounter); err != nil { + return nil, err + } + return &opts, nil }