From c2835183e5b9a68f2ef22dea32907409160a3fd5 Mon Sep 17 00:00:00 2001 From: Poornima G Date: Thu, 20 Jun 2019 07:35:53 +0000 Subject: [PATCH] Remove user creation for every volume Currently, provisioner creates user for every volume and nodeplugin uses this user to mount that volume. But nodeplugin and provisioner already have admin credentials, hence using the admin credentials to mount the volume and getting rid of user creation for each volume. Signed-off-by: Poornima G --- pkg/cephfs/cephuser.go | 74 ++-------------------------------- pkg/cephfs/controllerserver.go | 15 +------ pkg/cephfs/mountcache.go | 7 ---- pkg/cephfs/nodeserver.go | 24 ++++------- pkg/cephfs/volume.go | 5 --- 5 files changed, 13 insertions(+), 112 deletions(-) diff --git a/pkg/cephfs/cephuser.go b/pkg/cephfs/cephuser.go index 3424aed82..7e174b669 100644 --- a/pkg/cephfs/cephuser.go +++ b/pkg/cephfs/cephuser.go @@ -17,8 +17,6 @@ limitations under the License. package cephfs import ( - "fmt" - "github.com/ceph/ceph-csi/pkg/util" ) @@ -27,81 +25,15 @@ const ( cephEntityClientPrefix = "client." ) -type cephEntityCaps struct { - Mds string `json:"mds"` - Mon string `json:"mon"` - Osd string `json:"osd"` -} - -type cephEntity struct { - Entity string `json:"entity"` - Key string `json:"key"` - Caps cephEntityCaps `json:"caps"` -} - -func (ent *cephEntity) toCredentials() *util.Credentials { - return &util.Credentials{ - ID: ent.Entity[len(cephEntityClientPrefix):], - Key: ent.Key, - } +func genUserIDs(adminCr *util.Credentials, volID volumeID) (adminID, userID string) { + return cephEntityClientPrefix + adminCr.ID, cephEntityClientPrefix + getCephUserName(volID) } func getCephUserName(volID volumeID) string { return cephUserPrefix + string(volID) } -func getSingleCephEntity(args ...string) (*cephEntity, error) { - var ents []cephEntity - if err := execCommandJSON(&ents, "ceph", args...); err != nil { - return nil, err - } - - if len(ents) != 1 { - return nil, fmt.Errorf("got unexpected number of entities: expected 1, got %d", len(ents)) - } - - return &ents[0], nil -} - -func genUserIDs(adminCr *util.Credentials, volID volumeID) (adminID, userID string) { - return cephEntityClientPrefix + adminCr.ID, cephEntityClientPrefix + getCephUserName(volID) -} - -func getCephUser(volOptions *volumeOptions, adminCr *util.Credentials, volID volumeID) (*cephEntity, error) { - adminID, userID := genUserIDs(adminCr, volID) - - return getSingleCephEntity( - "-m", volOptions.Monitors, - "-n", adminID, - "--key="+adminCr.Key, - "-c", util.CephConfigPath, - "-f", "json", - "auth", "get", userID, - ) -} - -func createCephUser(volOptions *volumeOptions, adminCr *util.Credentials, volID volumeID) (*cephEntity, error) { - adminID, userID := genUserIDs(adminCr, volID) - volRootPath, err := getVolumeRootPathCeph(volOptions, adminCr, volID) - if err != nil { - return nil, err - } - - return getSingleCephEntity( - "-m", volOptions.Monitors, - "-n", adminID, - "--key="+adminCr.Key, - "-c", util.CephConfigPath, - "-f", "json", - "auth", "get-or-create", userID, - // User capabilities - "mds", fmt.Sprintf("allow rw path=%s", volRootPath), - "mon", "allow r", - "osd", fmt.Sprintf("allow rw pool=%s namespace=%s", volOptions.Pool, getVolumeNamespace(volID)), - ) -} - -func deleteCephUser(volOptions *volumeOptions, adminCr *util.Credentials, volID volumeID) error { +func deleteCephUserDeprecated(volOptions *volumeOptions, adminCr *util.Credentials, volID volumeID) error { adminID, userID := genUserIDs(adminCr, volID) // TODO: Need to return success if userID is not found diff --git a/pkg/cephfs/controllerserver.go b/pkg/cephfs/controllerserver.go index b75406e2d..3680be67c 100644 --- a/pkg/cephfs/controllerserver.go +++ b/pkg/cephfs/controllerserver.go @@ -44,8 +44,7 @@ var ( volumeNameLocker = util.NewIDLocker() ) -// createBackingVolume creates the backing subvolume and user/key for the given volOptions and vID, -// and on any error cleans up any created entities +// createBackingVolume creates the backing subvolume and on any error cleans up any created entities func (cs *ControllerServer) createBackingVolume(volOptions *volumeOptions, vID *volumeIdentifier, secret map[string]string) error { cr, err := util.GetAdminCredentials(secret) if err != nil { @@ -64,11 +63,6 @@ func (cs *ControllerServer) createBackingVolume(volOptions *volumeOptions, vID * } }() - if _, err = createCephUser(volOptions, cr, volumeID(vID.FsSubvolName)); err != nil { - klog.Errorf("failed to create ceph user for volume %s: %v", volOptions.RequestName, err) - return status.Error(codes.Internal, err.Error()) - } - return nil } @@ -188,7 +182,7 @@ func (cs *ControllerServer) deleteVolumeDeprecated(req *csi.DeleteVolumeRequest) return nil, status.Error(codes.Internal, err.Error()) } - if err = deleteCephUser(&ce.VolOptions, cr, volID); err != nil { + if err = deleteCephUserDeprecated(&ce.VolOptions, cr, volID); err != nil { klog.Errorf("failed to delete ceph user for volume %s: %v", volID, err) return nil, status.Error(codes.Internal, err.Error()) } @@ -247,11 +241,6 @@ func (cs *ControllerServer) DeleteVolume(ctx context.Context, req *csi.DeleteVol return nil, status.Error(codes.Internal, err.Error()) } - if err = deleteCephUser(volOptions, cr, volumeID(vID.FsSubvolName)); err != nil { - klog.Errorf("failed to delete ceph user for volume %s: %v", volID, err) - return nil, status.Error(codes.Internal, err.Error()) - } - if err := undoVolReservation(volOptions, *vID, secrets); err != nil { return nil, status.Error(codes.Internal, err.Error()) } diff --git a/pkg/cephfs/mountcache.go b/pkg/cephfs/mountcache.go index 9991df1e7..267d17560 100644 --- a/pkg/cephfs/mountcache.go +++ b/pkg/cephfs/mountcache.go @@ -104,13 +104,6 @@ func mountOneCacheEntry(volOptions *volumeOptions, vid *volumeIdentifier, me *vo if err != nil { return err } - - var entity *cephEntity - entity, err = getCephUser(volOptions, cr, volumeID(vid.FsSubvolName)) - if err != nil { - return err - } - cr = entity.toCredentials() } else { cr, err = util.GetUserCredentials(decodeCredentials(me.Secrets)) if err != nil { diff --git a/pkg/cephfs/nodeserver.go b/pkg/cephfs/nodeserver.go index e5c7eb6c9..83f21141e 100644 --- a/pkg/cephfs/nodeserver.go +++ b/pkg/cephfs/nodeserver.go @@ -42,7 +42,7 @@ var ( nodeVolumeIDLocker = util.NewIDLocker() ) -func getCredentialsForVolume(volOptions *volumeOptions, volID volumeID, req *csi.NodeStageVolumeRequest) (*util.Credentials, error) { +func getCredentialsForVolume(volOptions *volumeOptions, req *csi.NodeStageVolumeRequest) (*util.Credentials, error) { var ( cr *util.Credentials secrets = req.GetSecrets() @@ -58,14 +58,7 @@ func getCredentialsForVolume(volOptions *volumeOptions, volID volumeID, req *csi return nil, fmt.Errorf("failed to get admin credentials from node stage secrets: %v", err) } - // Then get the ceph user - - entity, err := getCephUser(volOptions, adminCr, volID) - if err != nil { - return nil, fmt.Errorf("failed to get ceph user: %v", err) - } - - cr = entity.toCredentials() + cr = adminCr } else { // The volume is pre-made, credentials are in node stage secrets @@ -84,7 +77,6 @@ func getCredentialsForVolume(volOptions *volumeOptions, volID volumeID, req *csi func (ns *NodeServer) NodeStageVolume(ctx context.Context, req *csi.NodeStageVolumeRequest) (*csi.NodeStageVolumeResponse, error) { var ( volOptions *volumeOptions - vid *volumeIdentifier ) if err := util.ValidateNodeStageVolumeRequest(req); err != nil { return nil, err @@ -95,21 +87,21 @@ func (ns *NodeServer) NodeStageVolume(ctx context.Context, req *csi.NodeStageVol stagingTargetPath := req.GetStagingTargetPath() volID := volumeID(req.GetVolumeId()) - volOptions, vid, err := newVolumeOptionsFromVolID(string(volID), req.GetVolumeContext(), req.GetSecrets()) + volOptions, _, err := newVolumeOptionsFromVolID(string(volID), req.GetVolumeContext(), req.GetSecrets()) if err != nil { if _, ok := err.(ErrInvalidVolID); !ok { return nil, status.Error(codes.Internal, err.Error()) } // check for pre-provisioned volumes (plugin versions > 1.0.0) - volOptions, vid, err = newVolumeOptionsFromStaticVolume(string(volID), req.GetVolumeContext()) + volOptions, _, err = newVolumeOptionsFromStaticVolume(string(volID), req.GetVolumeContext()) if err != nil { if _, ok := err.(ErrNonStaticVolume); !ok { return nil, status.Error(codes.Internal, err.Error()) } // check for volumes from plugin versions <= 1.0.0 - volOptions, vid, err = newVolumeOptionsFromVersion1Context(string(volID), req.GetVolumeContext(), + volOptions, _, err = newVolumeOptionsFromVersion1Context(string(volID), req.GetVolumeContext(), req.GetSecrets()) if err != nil { return nil, status.Error(codes.Internal, err.Error()) @@ -140,7 +132,7 @@ func (ns *NodeServer) NodeStageVolume(ctx context.Context, req *csi.NodeStageVol } // It's not, mount now - if err = ns.mount(volOptions, req, vid); err != nil { + if err = ns.mount(volOptions, req); err != nil { return nil, err } @@ -149,11 +141,11 @@ func (ns *NodeServer) NodeStageVolume(ctx context.Context, req *csi.NodeStageVol return &csi.NodeStageVolumeResponse{}, nil } -func (*NodeServer) mount(volOptions *volumeOptions, req *csi.NodeStageVolumeRequest, vid *volumeIdentifier) error { +func (*NodeServer) mount(volOptions *volumeOptions, req *csi.NodeStageVolumeRequest) error { stagingTargetPath := req.GetStagingTargetPath() volID := volumeID(req.GetVolumeId()) - cr, err := getCredentialsForVolume(volOptions, volumeID(vid.FsSubvolName), req) + cr, err := getCredentialsForVolume(volOptions, req) if err != nil { klog.Errorf("failed to get ceph credentials for volume %s: %v", volID, err) return status.Error(codes.Internal, err.Error()) diff --git a/pkg/cephfs/volume.go b/pkg/cephfs/volume.go index 3c34b9adc..d06a10a70 100644 --- a/pkg/cephfs/volume.go +++ b/pkg/cephfs/volume.go @@ -29,7 +29,6 @@ import ( ) const ( - namespacePrefix = "fsvolumens_" csiSubvolumeGroup = "csi" ) @@ -74,10 +73,6 @@ func getVolumeRootPathCeph(volOptions *volumeOptions, cr *util.Credentials, volI return strings.TrimSuffix(string(stdout), "\n"), nil } -func getVolumeNamespace(volID volumeID) string { - return namespacePrefix + string(volID) -} - func createVolume(volOptions *volumeOptions, cr *util.Credentials, volID volumeID, bytesQuota int64) error { //TODO: When we support multiple fs, need to hande subvolume group create for all fs's if !cephfsInit {