diff --git a/internal/util/cryptsetup.go b/internal/util/cryptsetup.go index a7c27f362..22bc30e72 100644 --- a/internal/util/cryptsetup.go +++ b/internal/util/cryptsetup.go @@ -25,7 +25,17 @@ import ( // LuksFormat sets up volume as an encrypted LUKS partition. func LuksFormat(devicePath, passphrase string) (stdout, stderr []byte, err error) { - return execCryptsetupCommand(&passphrase, "-q", "luksFormat", "--type", "luks2", "--hash", "sha256", devicePath, "-d", "/dev/stdin") + return execCryptsetupCommand( + &passphrase, + "-q", + "luksFormat", + "--type", + "luks2", + "--hash", + "sha256", + devicePath, + "-d", + "/dev/stdin") } // LuksOpen opens LUKS encrypted partition and sets up a mapping. diff --git a/internal/util/kms.go b/internal/util/kms.go index 1b5913d57..660ccac88 100644 --- a/internal/util/kms.go +++ b/internal/util/kms.go @@ -252,7 +252,10 @@ func RegisterKMSProvider(provider KMSProvider) bool { // buildKMS creates a new KMSProvider instance, based on the configuration that // was passed. This uses getKMSProvider() internally to identify the // KMSProvider to instantiate. -func (kf *kmsProviderList) buildKMS(tenant string, config map[string]interface{}, secrets map[string]string) (EncryptionKMS, error) { +func (kf *kmsProviderList) buildKMS( + tenant string, + config map[string]interface{}, + secrets map[string]string) (EncryptionKMS, error) { providerName, err := getKMSProvider(config) if err != nil { return nil, err diff --git a/internal/util/topology.go b/internal/util/topology.go index 773b3efcd..299efae7d 100644 --- a/internal/util/topology.go +++ b/internal/util/topology.go @@ -131,7 +131,8 @@ type TopologyConstrainedPool struct { // GetTopologyFromRequest extracts TopologyConstrainedPools and passed in accessibility constraints // from a CSI CreateVolume request. -func GetTopologyFromRequest(req *csi.CreateVolumeRequest) (*[]TopologyConstrainedPool, *csi.TopologyRequirement, error) { +func GetTopologyFromRequest( + req *csi.CreateVolumeRequest) (*[]TopologyConstrainedPool, *csi.TopologyRequirement, error) { var ( topologyPools []TopologyConstrainedPool ) @@ -151,7 +152,10 @@ func GetTopologyFromRequest(req *csi.CreateVolumeRequest) (*[]TopologyConstraine // extract topology based pools configuration err := json.Unmarshal([]byte(strings.Replace(topologyPoolsStr, "\n", " ", -1)), &topologyPools) if err != nil { - return nil, nil, fmt.Errorf("failed to parse JSON encoded topology constrained pools parameter (%s): %v", topologyPoolsStr, err) + return nil, nil, fmt.Errorf( + "failed to parse JSON encoded topology constrained pools parameter (%s): %v", + topologyPoolsStr, + err) } return &topologyPools, accessibilityRequirements, nil diff --git a/internal/util/util.go b/internal/util/util.go index 52b89fb6b..5039b03ba 100644 --- a/internal/util/util.go +++ b/internal/util/util.go @@ -76,8 +76,9 @@ type Config struct { DomainLabels string // list of domain labels to read from the node // metrics related flags - MetricsPath string // path of prometheus endpoint where metrics will be available - HistogramOption string // Histogram option for grpc metrics, should be comma separated value, ex:= "0.5,2,6" where start=0.5 factor=2, count=6 + MetricsPath string // path of prometheus endpoint where metrics will be available + HistogramOption string // Histogram option for grpc metrics, should be comma separated value, + // ex:= "0.5,2,6" where start=0.5 factor=2, count=6 MetricsIP string // TCP port for liveness/ metrics requests PidLimit int // PID limit to configure through cgroups") MetricsPort int // TCP port for liveness/grpc metrics requests @@ -97,10 +98,12 @@ type Config struct { // cephfs related flags ForceKernelCephFS bool // force to use the ceph kernel client even if the kernel is < 4.17 - // RbdHardMaxCloneDepth is the hard limit for maximum number of nested volume clones that are taken before a flatten occurs + // RbdHardMaxCloneDepth is the hard limit for maximum number of nested volume clones that are taken before a flatten + // occurs RbdHardMaxCloneDepth uint - // RbdSoftMaxCloneDepth is the soft limit for maximum number of nested volume clones that are taken before a flatten occurs + // RbdSoftMaxCloneDepth is the soft limit for maximum number of nested volume clones that are taken before a flatten + // occurs RbdSoftMaxCloneDepth uint // MaxSnapshotsOnImage represents the maximum number of snapshots allowed @@ -233,7 +236,13 @@ func CheckKernelSupport(release string, supportedVersions []KernelVersion) bool // GenerateVolID generates a volume ID based on passed in parameters and version, to be returned // to the CO system. -func GenerateVolID(ctx context.Context, monitors string, cr *Credentials, locationID int64, pool, clusterID, objUUID string, volIDVersion uint16) (string, error) { +func GenerateVolID( + ctx context.Context, + monitors string, + cr *Credentials, + locationID int64, + pool, clusterID, objUUID string, + volIDVersion uint16) (string, error) { var err error if locationID == InvalidPoolID { diff --git a/internal/util/validate.go b/internal/util/validate.go index 1083f831e..d0350b9ba 100644 --- a/internal/util/validate.go +++ b/internal/util/validate.go @@ -27,7 +27,10 @@ func ValidateNodeStageVolumeRequest(req *csi.NodeStageVolumeRequest) error { // validate stagingpath exists ok := checkDirExists(req.GetStagingTargetPath()) if !ok { - return status.Errorf(codes.InvalidArgument, "staging path %s does not exist on node", req.GetStagingTargetPath()) + return status.Errorf( + codes.InvalidArgument, + "staging path %s does not exist on node", + req.GetStagingTargetPath()) } return nil } @@ -83,7 +86,8 @@ func ValidateNodeUnpublishVolumeRequest(req *csi.NodeUnpublishVolumeRequest) err // volume is from source as empty ReadOnlyMany is not supported. func CheckReadOnlyManyIsSupported(req *csi.CreateVolumeRequest) error { for _, capability := range req.GetVolumeCapabilities() { - if m := capability.GetAccessMode().Mode; m == csi.VolumeCapability_AccessMode_MULTI_NODE_READER_ONLY || m == csi.VolumeCapability_AccessMode_SINGLE_NODE_READER_ONLY { + if m := capability.GetAccessMode().Mode; m == csi.VolumeCapability_AccessMode_MULTI_NODE_READER_ONLY || + m == csi.VolumeCapability_AccessMode_SINGLE_NODE_READER_ONLY { if req.GetVolumeContentSource() == nil { return status.Error(codes.InvalidArgument, "readOnly accessMode is supported only with content source") } diff --git a/internal/util/vault_tokens.go b/internal/util/vault_tokens.go index 80090ccfc..c3706fedc 100644 --- a/internal/util/vault_tokens.go +++ b/internal/util/vault_tokens.go @@ -340,7 +340,10 @@ func (kms *VaultTokensKMS) initCertificates(config map[string]interface{}) error if vaultClientCertKeyFromSecret != "" { certKey, err := getCertificate(kms.Tenant, vaultClientCertKeyFromSecret, "key") if err != nil && !apierrs.IsNotFound(err) { - return fmt.Errorf("failed to get client certificate key from secret %s: %w", vaultClientCertKeyFromSecret, err) + return fmt.Errorf( + "failed to get client certificate key from secret %s: %w", + vaultClientCertKeyFromSecret, + err) } // if the certificate is not present in tenant namespace get it from // cephcsi pod namespace