mirror of
https://github.com/minio/mc.git
synced 2025-07-30 07:23:03 +03:00
cleanup commands to use Go 'err' as 'e' (#4232)
many commands seem to have sort of had stopped following the expected convention in `mc` to differentiate *probe.Error. To keep things readable this PR is the first attempt at such a cleanup.
This commit is contained in:
@ -133,9 +133,12 @@ func mainAdminBucketQuota(ctx *cli.Context) error {
|
||||
quotaStr := ctx.String("hard")
|
||||
quota, e := humanize.ParseBytes(quotaStr)
|
||||
fatalIf(probe.NewError(e).Trace(quotaStr), "Unable to parse quota")
|
||||
if e = client.SetBucketQuota(globalContext, targetURL, &madmin.BucketQuota{Quota: quota, Type: qType}); e != nil {
|
||||
fatalIf(probe.NewError(e).Trace(args...), "Unable to set bucket quota")
|
||||
}
|
||||
|
||||
fatalIf(probe.NewError(client.SetBucketQuota(globalContext, targetURL, &madmin.BucketQuota{
|
||||
Quota: quota,
|
||||
Type: qType,
|
||||
})).Trace(args...), "Unable to set bucket quota")
|
||||
|
||||
printMsg(quotaMessage{
|
||||
op: "set",
|
||||
Bucket: targetURL,
|
||||
@ -144,8 +147,8 @@ func mainAdminBucketQuota(ctx *cli.Context) error {
|
||||
Status: "success",
|
||||
})
|
||||
} else if ctx.Bool("clear") {
|
||||
if err := client.SetBucketQuota(globalContext, targetURL, &madmin.BucketQuota{}); err != nil {
|
||||
fatalIf(probe.NewError(err).Trace(args...), "Unable to clear bucket quota config")
|
||||
if e := client.SetBucketQuota(globalContext, targetURL, &madmin.BucketQuota{}); e != nil {
|
||||
fatalIf(probe.NewError(e).Trace(args...), "Unable to clear bucket quota config")
|
||||
}
|
||||
printMsg(quotaMessage{
|
||||
op: "unset",
|
||||
|
@ -84,8 +84,8 @@ func mainClusterBucketExport(ctx *cli.Context) error {
|
||||
aliasedURL = filepath.Clean(aliasedURL)
|
||||
_, bucket := url2Alias(aliasedURL)
|
||||
|
||||
r, ierr := client.ExportBucketMetadata(context.Background(), bucket)
|
||||
fatalIf(probe.NewError(ierr).Trace(aliasedURL), "Unable to export bucket metadata.")
|
||||
r, e := client.ExportBucketMetadata(context.Background(), bucket)
|
||||
fatalIf(probe.NewError(e).Trace(aliasedURL), "Unable to export bucket metadata.")
|
||||
|
||||
if bucket == "" {
|
||||
bucket = "cluster"
|
||||
|
@ -109,8 +109,9 @@ func mainClusterBucketImport(ctx *cli.Context) error {
|
||||
aliasedURL = filepath.Clean(aliasedURL)
|
||||
_, bucket := url2Alias(aliasedURL)
|
||||
|
||||
rpt, ierr := client.ImportBucketMetadata(context.Background(), bucket, f)
|
||||
fatalIf(probe.NewError(ierr).Trace(aliasedURL), "Unable to import bucket metadata.")
|
||||
rpt, e := client.ImportBucketMetadata(context.Background(), bucket, f)
|
||||
fatalIf(probe.NewError(e).Trace(aliasedURL), "Unable to import bucket metadata.")
|
||||
|
||||
printMsg(importMetaMsg{
|
||||
BucketMetaImportErrs: rpt,
|
||||
Status: "success",
|
||||
|
@ -78,8 +78,8 @@ func mainClusterIAMExport(ctx *cli.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
r, ierr := client.ExportIAM(context.Background())
|
||||
fatalIf(probe.NewError(ierr).Trace(aliasedURL), "Unable to export IAM info.")
|
||||
r, e := client.ExportIAM(context.Background())
|
||||
fatalIf(probe.NewError(e).Trace(aliasedURL), "Unable to export IAM info.")
|
||||
|
||||
// Create iam info zip file
|
||||
tmpFile, e := ioutil.TempFile("", fmt.Sprintf("%s-iam-info", aliasedURL))
|
||||
|
@ -92,8 +92,9 @@ func mainClusterIAMImport(ctx *cli.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
ierr := client.ImportIAM(context.Background(), f)
|
||||
fatalIf(probe.NewError(ierr).Trace(aliasedURL), "Unable to import IAM info.")
|
||||
e = client.ImportIAM(context.Background(), f)
|
||||
fatalIf(probe.NewError(e).Trace(aliasedURL), "Unable to import IAM info.")
|
||||
|
||||
if !globalJSON {
|
||||
console.Infof("IAM info imported to %s from %s\n", aliasedURL, args.Get(1))
|
||||
}
|
||||
|
@ -18,8 +18,6 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"github.com/fatih/color"
|
||||
"github.com/minio/cli"
|
||||
"github.com/minio/madmin-go"
|
||||
@ -71,18 +69,17 @@ func mainAdminGroupEnableDisable(ctx *cli.Context) error {
|
||||
fatalIf(err, "Unable to initialize admin connection.")
|
||||
|
||||
group := args.Get(1)
|
||||
var err1 error
|
||||
var status madmin.GroupStatus
|
||||
if ctx.Command.Name == "enable" {
|
||||
switch ctx.Command.Name {
|
||||
case "enable":
|
||||
status = madmin.GroupEnabled
|
||||
} else if ctx.Command.Name == "disable" {
|
||||
case "disable":
|
||||
status = madmin.GroupDisabled
|
||||
} else {
|
||||
err1 = errors.New("cannot happen")
|
||||
fatalIf(probe.NewError(err1).Trace(args...), "Could not get group enable")
|
||||
default:
|
||||
fatalIf(errInvalidArgument().Trace(ctx.Command.Name), "Invalid group status name")
|
||||
}
|
||||
err1 = client.SetGroupStatus(globalContext, group, status)
|
||||
fatalIf(probe.NewError(err1).Trace(args...), "Could not get group enable")
|
||||
e := client.SetGroupStatus(globalContext, group, status)
|
||||
fatalIf(probe.NewError(e).Trace(args...), "Unable set group status")
|
||||
|
||||
printMsg(groupMessage{
|
||||
op: ctx.Command.Name,
|
||||
|
@ -68,8 +68,8 @@ func mainAdminGroupInfo(ctx *cli.Context) error {
|
||||
fatalIf(err, "Unable to initialize admin connection.")
|
||||
|
||||
group := args.Get(1)
|
||||
gd, err1 := client.GetGroupDescription(globalContext, group)
|
||||
fatalIf(probe.NewError(err1).Trace(args...), "Could not get group info")
|
||||
gd, e := client.GetGroupDescription(globalContext, group)
|
||||
fatalIf(probe.NewError(e).Trace(args...), "Unable to fetch group info")
|
||||
|
||||
printMsg(groupMessage{
|
||||
op: "info",
|
||||
|
@ -67,8 +67,8 @@ func mainAdminGroupList(ctx *cli.Context) error {
|
||||
client, err := newAdminClient(aliasedURL)
|
||||
fatalIf(err, "Unable to initialize admin connection.")
|
||||
|
||||
gs, err1 := client.ListGroups(globalContext)
|
||||
fatalIf(probe.NewError(err1).Trace(args...), "Could not get group list")
|
||||
gs, e := client.ListGroups(globalContext)
|
||||
fatalIf(probe.NewError(e).Trace(args...), "Unable to list groups")
|
||||
|
||||
printMsg(groupMessage{
|
||||
op: "list",
|
||||
|
@ -98,10 +98,7 @@ func mainAdminScannerInfo(ctx *cli.Context) error {
|
||||
|
||||
// Create a new MinIO Admin Client
|
||||
client, err := newAdminClient(aliasedURL)
|
||||
if err != nil {
|
||||
fatalIf(err.Trace(aliasedURL), "Unable to initialize admin client.")
|
||||
return nil
|
||||
}
|
||||
|
||||
ctxt, cancel := context.WithCancel(globalContext)
|
||||
defer cancel()
|
||||
@ -123,7 +120,7 @@ func mainAdminScannerInfo(ctx *cli.Context) error {
|
||||
os.Exit(0)
|
||||
}()
|
||||
}
|
||||
cerr := client.Metrics(ctxt, opts, func(metrics madmin.RealtimeMetrics) {
|
||||
e := client.Metrics(ctxt, opts, func(metrics madmin.RealtimeMetrics) {
|
||||
if globalJSON {
|
||||
printMsg(metricsMessage{RealtimeMetrics: metrics})
|
||||
return
|
||||
@ -131,8 +128,8 @@ func mainAdminScannerInfo(ctx *cli.Context) error {
|
||||
ui.Send(metrics)
|
||||
})
|
||||
|
||||
if cerr != nil && !errors.Is(cerr, context.Canceled) {
|
||||
fatalIf(probe.NewError(cerr).Trace(aliasedURL), "Error making request")
|
||||
if e != nil && !errors.Is(e, context.Canceled) {
|
||||
fatalIf(probe.NewError(e).Trace(aliasedURL), "Unable to fetch scanner metrics")
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -363,8 +363,8 @@ func mainAdminTierAdd(ctx *cli.Context) error {
|
||||
|
||||
args := ctx.Args()
|
||||
tierTypeStr := args.Get(0)
|
||||
tierType, err := madmin.NewTierType(tierTypeStr)
|
||||
fatalIf(probe.NewError(err), "Unsupported tier type")
|
||||
tierType, e := madmin.NewTierType(tierTypeStr)
|
||||
fatalIf(probe.NewError(e), "Unsupported tier type")
|
||||
|
||||
aliasedURL := args.Get(1)
|
||||
tierName := args.Get(2)
|
||||
@ -377,9 +377,7 @@ func mainAdminTierAdd(ctx *cli.Context) error {
|
||||
fatalIf(cerr, "Unable to initialize admin connection.")
|
||||
|
||||
tCfg := fetchTierConfig(ctx, strings.ToUpper(tierName), tierType)
|
||||
if err = client.AddTier(globalContext, tCfg); err != nil {
|
||||
fatalIf(probe.NewError(err).Trace(args...), "Unable to configure remote tier target")
|
||||
}
|
||||
fatalIf(probe.NewError(client.AddTier(globalContext, tCfg)).Trace(args...), "Unable to configure remote tier target")
|
||||
|
||||
msg := &tierMessage{
|
||||
op: "add",
|
||||
|
@ -135,9 +135,8 @@ func mainAdminTierEdit(ctx *cli.Context) error {
|
||||
fatalIf(errInvalidArgument().Trace(args.Tail()...), "Insufficient credential information supplied to update remote tier target credentials")
|
||||
}
|
||||
|
||||
if err := client.EditTier(globalContext, tierName, creds); err != nil {
|
||||
fatalIf(probe.NewError(err).Trace(args...), "Unable to edit remote tier")
|
||||
}
|
||||
e := client.EditTier(globalContext, tierName, creds)
|
||||
fatalIf(probe.NewError(e).Trace(args...), "Unable to edit remote tier")
|
||||
|
||||
printMsg(&tierMessage{
|
||||
op: "edit",
|
||||
|
@ -180,19 +180,18 @@ func mainAdminTierInfo(ctx *cli.Context) error {
|
||||
checkAdminTierInfoSyntax(ctx)
|
||||
args := ctx.Args()
|
||||
aliasedURL := args.Get(0)
|
||||
var err error
|
||||
|
||||
// Create a new MinIO Admin Client
|
||||
client, cerr := newAdminClient(aliasedURL)
|
||||
fatalIf(cerr, "Unable to initialize admin connection.")
|
||||
|
||||
var msg tierInfoMessage
|
||||
tInfos, err := client.TierStats(globalContext)
|
||||
if err != nil {
|
||||
tInfos, e := client.TierStats(globalContext)
|
||||
if e != nil {
|
||||
msg = tierInfoMessage{
|
||||
Status: "error",
|
||||
Context: ctx,
|
||||
Error: err.Error(),
|
||||
Error: e.Error(),
|
||||
}
|
||||
} else {
|
||||
msg = tierInfoMessage{
|
||||
|
@ -178,10 +178,8 @@ func mainAdminTierList(ctx *cli.Context) error {
|
||||
client, cerr := newAdminClient(aliasedURL)
|
||||
fatalIf(cerr, "Unable to initialize admin connection.")
|
||||
|
||||
tiers, err := client.ListTiers(globalContext)
|
||||
if err != nil {
|
||||
fatalIf(probe.NewError(err).Trace(args...), "Unable to list configured remote tier targets")
|
||||
}
|
||||
tiers, e := client.ListTiers(globalContext)
|
||||
fatalIf(probe.NewError(e).Trace(args...), "Unable to list configured remote tier targets")
|
||||
|
||||
printMsg(&tierListMessage{
|
||||
Status: "success",
|
||||
|
@ -68,9 +68,8 @@ func mainAdminTierRm(ctx *cli.Context) error {
|
||||
client, cerr := newAdminClient(aliasedURL)
|
||||
fatalIf(cerr, "Unable to initialize admin connection.")
|
||||
|
||||
if err := client.RemoveTier(globalContext, tierName); err != nil {
|
||||
fatalIf(probe.NewError(err).Trace(args...), "Unable to remove remote tier target")
|
||||
}
|
||||
e := client.RemoveTier(globalContext, tierName)
|
||||
fatalIf(probe.NewError(e).Trace(args...), "Unable to remove remote tier target")
|
||||
|
||||
printMsg(&tierMessage{
|
||||
op: "rm",
|
||||
|
@ -68,9 +68,8 @@ func mainAdminTierVerify(ctx *cli.Context) error {
|
||||
client, cerr := newAdminClient(aliasedURL)
|
||||
fatalIf(cerr, "Unable to initialize admin connection.")
|
||||
|
||||
if err := client.VerifyTier(globalContext, tierName); err != nil {
|
||||
fatalIf(probe.NewError(err).Trace(args...), "Unable to verify remote tier target")
|
||||
}
|
||||
e := client.VerifyTier(globalContext, tierName)
|
||||
fatalIf(probe.NewError(e).Trace(args...), "Unable to verify remote tier target")
|
||||
|
||||
printMsg(&tierMessage{
|
||||
op: "verify",
|
||||
|
@ -136,7 +136,7 @@ func odCopy(ctx context.Context, odURLs URLs, args argKVS, odType string) (odMes
|
||||
|
||||
// Create reader from source.
|
||||
reader, err := getSourceStreamFromURL(ctx, sourcePath, encKeyDB, getSourceOpts{GetOptions: getOpts})
|
||||
fatalIf(err, "Unable to get source stream")
|
||||
fatalIf(err.Trace(sourcePath), "Unable to get source stream")
|
||||
defer reader.Close()
|
||||
|
||||
putOpts := PutOptions{
|
||||
@ -155,11 +155,11 @@ func odCopy(ctx context.Context, odURLs URLs, args argKVS, odType string) (odMes
|
||||
|
||||
// Write to target.
|
||||
targetClnt, err := newClientFromAlias(targetAlias, targetURL.String())
|
||||
fatalIf(err, "Unable to initialize target client")
|
||||
fatalIf(err.Trace(targetURL.String()), "Unable to initialize target client")
|
||||
|
||||
// Put object.
|
||||
total, err := targetClnt.PutPart(ctx, reader, combinedSize, pg, putOpts)
|
||||
fatalIf(err, "Unable to put object")
|
||||
fatalIf(err.Trace(targetURL.String()), "Unable to upload")
|
||||
|
||||
// Get upload time.
|
||||
elapsed := time.Since(pg.startTime)
|
||||
@ -242,7 +242,7 @@ func odDownload(ctx context.Context, odURLs URLs, args argKVS) (odMessage, error
|
||||
// Upload the file.
|
||||
total, err := putTargetStream(ctx, "", targetPath, "", "", "",
|
||||
reader, -1, pg, PutOptions{})
|
||||
fatalIf(err, "Unable to download object")
|
||||
fatalIf(err.Trace(targetPath), "Unable to upload an object")
|
||||
|
||||
// Get upload time.
|
||||
elapsed := time.Since(pg.startTime)
|
||||
@ -264,7 +264,7 @@ func odDownload(ctx context.Context, odURLs URLs, args argKVS) (odMessage, error
|
||||
// singleGet helps odDownload download a single part.
|
||||
func singleGet(ctx context.Context, cli Client) io.ReadCloser {
|
||||
reader, err := cli.GetPart(ctx, 0)
|
||||
fatalIf(err, "Unable to get object reader")
|
||||
fatalIf(err, "Unable to download object")
|
||||
|
||||
return reader
|
||||
}
|
||||
@ -276,7 +276,7 @@ func multiGet(ctx context.Context, cli Client, parts, skip int) io.Reader {
|
||||
// Get reader for each part.
|
||||
for i := 1 + skip; i <= parts; i++ {
|
||||
reader, err := cli.GetPart(ctx, parts)
|
||||
fatalIf(err, "Unable to get object reader")
|
||||
fatalIf(err, "Unable to download part of an object")
|
||||
readers = append(readers, reader)
|
||||
}
|
||||
reader := io.MultiReader(readers...)
|
||||
|
@ -105,9 +105,9 @@ func checkSupportDiagSyntax(ctx *cli.Context) {
|
||||
|
||||
// compress and tar MinIO diagnostics output
|
||||
func tarGZ(healthInfo interface{}, version string, filename string) error {
|
||||
f, err := os.OpenFile(filename, os.O_CREATE|os.O_RDWR, 0o666)
|
||||
if err != nil {
|
||||
return err
|
||||
f, e := os.OpenFile(filename, os.O_CREATE|os.O_RDWR, 0o666)
|
||||
if e != nil {
|
||||
return e
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
@ -120,12 +120,12 @@ func tarGZ(healthInfo interface{}, version string, filename string) error {
|
||||
Version string `json:"version"`
|
||||
}{Version: version}
|
||||
|
||||
if err := enc.Encode(header); err != nil {
|
||||
return err
|
||||
if e := enc.Encode(header); e != nil {
|
||||
return e
|
||||
}
|
||||
|
||||
if err := enc.Encode(healthInfo); err != nil {
|
||||
return err
|
||||
if e := enc.Encode(healthInfo); e != nil {
|
||||
return e
|
||||
}
|
||||
|
||||
if globalAirgapped {
|
||||
@ -355,12 +355,11 @@ func fetchServerDiagInfo(ctx *cli.Context, client *madmin.AdminClient) (interfac
|
||||
admin(len(info.Minio.Info.Servers) > 0)
|
||||
}
|
||||
|
||||
var err error
|
||||
// Fetch info of all servers (cluster or single server)
|
||||
resp, version, err := client.ServerHealthInfo(cont, *opts, ctx.Duration("deadline"))
|
||||
if err != nil {
|
||||
resp, version, e := client.ServerHealthInfo(cont, *opts, ctx.Duration("deadline"))
|
||||
if e != nil {
|
||||
cancel()
|
||||
return nil, "", err
|
||||
return nil, "", e
|
||||
}
|
||||
|
||||
var healthInfo interface{}
|
||||
@ -370,9 +369,9 @@ func fetchServerDiagInfo(ctx *cli.Context, client *madmin.AdminClient) (interfac
|
||||
case madmin.HealthInfoVersion0:
|
||||
info := madmin.HealthInfoV0{}
|
||||
for {
|
||||
if err = decoder.Decode(&info); err != nil {
|
||||
if errors.Is(err, io.EOF) {
|
||||
err = nil
|
||||
if e = decoder.Decode(&info); e != nil {
|
||||
if errors.Is(e, io.EOF) {
|
||||
e = nil
|
||||
}
|
||||
|
||||
break
|
||||
@ -383,9 +382,9 @@ func fetchServerDiagInfo(ctx *cli.Context, client *madmin.AdminClient) (interfac
|
||||
|
||||
// Old minio versions don't return the MinIO info in
|
||||
// response of the healthinfo api. So fetch it separately
|
||||
minioInfo, err := client.ServerInfo(globalContext)
|
||||
if err != nil {
|
||||
info.Minio.Error = err.Error()
|
||||
minioInfo, e := client.ServerInfo(globalContext)
|
||||
if e != nil {
|
||||
info.Minio.Error = e.Error()
|
||||
} else {
|
||||
info.Minio.Info = minioInfo
|
||||
}
|
||||
@ -395,9 +394,9 @@ func fetchServerDiagInfo(ctx *cli.Context, client *madmin.AdminClient) (interfac
|
||||
case madmin.HealthInfoVersion2:
|
||||
info := madmin.HealthInfoV2{}
|
||||
for {
|
||||
if err = decoder.Decode(&info); err != nil {
|
||||
if errors.Is(err, io.EOF) {
|
||||
err = nil
|
||||
if e = decoder.Decode(&info); e != nil {
|
||||
if errors.Is(e, io.EOF) {
|
||||
e = nil
|
||||
}
|
||||
|
||||
break
|
||||
@ -409,9 +408,9 @@ func fetchServerDiagInfo(ctx *cli.Context, client *madmin.AdminClient) (interfac
|
||||
case madmin.HealthInfoVersion:
|
||||
info := madmin.HealthInfo{}
|
||||
for {
|
||||
if err = decoder.Decode(&info); err != nil {
|
||||
if errors.Is(err, io.EOF) {
|
||||
err = nil
|
||||
if e = decoder.Decode(&info); e != nil {
|
||||
if errors.Is(e, io.EOF) {
|
||||
e = nil
|
||||
}
|
||||
|
||||
break
|
||||
@ -422,9 +421,9 @@ func fetchServerDiagInfo(ctx *cli.Context, client *madmin.AdminClient) (interfac
|
||||
healthInfo = info
|
||||
}
|
||||
|
||||
// cancel the context if obdChan has returned.
|
||||
// cancel the context if supportDiagChan has returned.
|
||||
cancel()
|
||||
return healthInfo, version, err
|
||||
return healthInfo, version, e
|
||||
}
|
||||
|
||||
// HealthDataTypeSlice is a typed list of health tests
|
||||
@ -433,8 +432,8 @@ type HealthDataTypeSlice []madmin.HealthDataType
|
||||
// Set - sets the flag to the given value
|
||||
func (d *HealthDataTypeSlice) Set(value string) error {
|
||||
for _, v := range strings.Split(value, ",") {
|
||||
if obdData, ok := madmin.HealthDataTypesMap[strings.Trim(v, " ")]; ok {
|
||||
*d = append(*d, obdData)
|
||||
if supportDiagData, ok := madmin.HealthDataTypesMap[strings.Trim(v, " ")]; ok {
|
||||
*d = append(*d, supportDiagData)
|
||||
} else {
|
||||
return fmt.Errorf("valid options include %s", options.String())
|
||||
}
|
||||
@ -445,14 +444,14 @@ func (d *HealthDataTypeSlice) Set(value string) error {
|
||||
// String - returns the string representation of the health datatypes
|
||||
func (d *HealthDataTypeSlice) String() string {
|
||||
val := ""
|
||||
for _, obdData := range *d {
|
||||
for _, supportDiagData := range *d {
|
||||
formatStr := "%s"
|
||||
if val != "" {
|
||||
formatStr = fmt.Sprintf("%s,%%s", formatStr)
|
||||
} else {
|
||||
formatStr = fmt.Sprintf("%s%%s", formatStr)
|
||||
}
|
||||
val = fmt.Sprintf(formatStr, val, string(obdData))
|
||||
val = fmt.Sprintf(formatStr, val, string(supportDiagData))
|
||||
}
|
||||
return val
|
||||
}
|
||||
@ -518,8 +517,8 @@ func (f HealthDataTypeFlag) ApplyWithError(set *flag.FlagSet) error {
|
||||
newVal := &HealthDataTypeSlice{}
|
||||
for _, s := range strings.Split(envVal, ",") {
|
||||
s = strings.TrimSpace(s)
|
||||
if err := newVal.Set(s); err != nil {
|
||||
return fmt.Errorf("could not parse %s as health datatype value for flag %s: %s", envVal, f.Name, err)
|
||||
if e := newVal.Set(s); e != nil {
|
||||
return fmt.Errorf("could not parse %s as health datatype value for flag %s: %s", envVal, f.Name, e)
|
||||
}
|
||||
}
|
||||
f.Value = newVal
|
||||
|
@ -121,11 +121,11 @@ func mainSupportInspect(ctx *cli.Context) error {
|
||||
console.Infoln("Your shell is auto determined as '" + shellName + "', wildcard patterns are only supported with 'bash' SHELL.")
|
||||
}
|
||||
|
||||
key, r, ierr := client.Inspect(context.Background(), madmin.InspectOptions{
|
||||
key, r, e := client.Inspect(context.Background(), madmin.InspectOptions{
|
||||
Volume: bucket,
|
||||
File: prefix,
|
||||
})
|
||||
fatalIf(probe.NewError(ierr).Trace(aliasedURL), "Unable to inspect file.")
|
||||
fatalIf(probe.NewError(e).Trace(aliasedURL), "Unable to inspect file.")
|
||||
|
||||
// Create profile zip file
|
||||
tmpFile, e := ioutil.TempFile("", "mc-inspect-")
|
||||
|
@ -60,17 +60,17 @@ func mainAdminSpeedTestDrive(ctx *cli.Context, aliasedURL string) error {
|
||||
|
||||
serial := ctx.Bool("serial")
|
||||
|
||||
resultCh, speedTestErr := client.DriveSpeedtest(ctxt, madmin.DriveSpeedTestOpts{
|
||||
resultCh, e := client.DriveSpeedtest(ctxt, madmin.DriveSpeedTestOpts{
|
||||
Serial: serial,
|
||||
BlockSize: uint64(blocksize),
|
||||
FileSize: uint64(filesize),
|
||||
})
|
||||
|
||||
if globalJSON {
|
||||
if speedTestErr != nil {
|
||||
if e != nil {
|
||||
printMsg(speedTestResult{
|
||||
Type: driveSpeedTest,
|
||||
Err: speedTestErr.Error(),
|
||||
Err: e.Error(),
|
||||
Final: true,
|
||||
})
|
||||
return nil
|
||||
@ -102,10 +102,10 @@ func mainAdminSpeedTestDrive(ctx *cli.Context, aliasedURL string) error {
|
||||
}()
|
||||
|
||||
go func() {
|
||||
if speedTestErr != nil {
|
||||
if e != nil {
|
||||
printMsg(speedTestResult{
|
||||
Type: driveSpeedTest,
|
||||
Err: speedTestErr.Error(),
|
||||
Err: e.Error(),
|
||||
})
|
||||
return
|
||||
}
|
||||
|
@ -54,19 +54,19 @@ func mainAdminSpeedTestNetperf(ctx *cli.Context, aliasedURL string) error {
|
||||
defer close(resultCh)
|
||||
defer close(errorCh)
|
||||
|
||||
result, err := client.Netperf(ctxt, duration)
|
||||
if err != nil {
|
||||
errorCh <- err
|
||||
result, e := client.Netperf(ctxt, duration)
|
||||
if e != nil {
|
||||
errorCh <- e
|
||||
}
|
||||
resultCh <- result
|
||||
}()
|
||||
|
||||
if globalJSON {
|
||||
select {
|
||||
case err := <-errorCh:
|
||||
case e := <-errorCh:
|
||||
printMsg(speedTestResult{
|
||||
Type: netSpeedTest,
|
||||
Err: err.Error(),
|
||||
Err: e.Error(),
|
||||
Final: true,
|
||||
})
|
||||
case result := <-resultCh:
|
||||
@ -92,10 +92,10 @@ func mainAdminSpeedTestNetperf(ctx *cli.Context, aliasedURL string) error {
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case err := <-errorCh:
|
||||
case e := <-errorCh:
|
||||
p.Send(speedTestResult{
|
||||
Type: netSpeedTest,
|
||||
Err: err.Error(),
|
||||
Err: e.Error(),
|
||||
Final: true,
|
||||
})
|
||||
return
|
||||
|
@ -85,7 +85,7 @@ func mainAdminSpeedTestObject(ctx *cli.Context, aliasedURL string) error {
|
||||
// in all other scenarios keep auto-tuning on.
|
||||
autotune := !ctx.IsSet("concurrent")
|
||||
|
||||
resultCh, speedTestErr := client.Speedtest(ctxt, madmin.SpeedtestOpts{
|
||||
resultCh, e := client.Speedtest(ctxt, madmin.SpeedtestOpts{
|
||||
Size: int(size),
|
||||
Duration: duration,
|
||||
Concurrency: concurrent,
|
||||
@ -94,10 +94,10 @@ func mainAdminSpeedTestObject(ctx *cli.Context, aliasedURL string) error {
|
||||
})
|
||||
|
||||
if globalJSON {
|
||||
if speedTestErr != nil {
|
||||
if e != nil {
|
||||
printMsg(speedTestResult{
|
||||
Type: objectSpeedTest,
|
||||
Err: speedTestErr.Error(),
|
||||
Err: e.Error(),
|
||||
Final: true,
|
||||
})
|
||||
return nil
|
||||
@ -134,10 +134,10 @@ func mainAdminSpeedTestObject(ctx *cli.Context, aliasedURL string) error {
|
||||
}()
|
||||
|
||||
go func() {
|
||||
if speedTestErr != nil {
|
||||
if e != nil {
|
||||
p.Send(speedTestResult{
|
||||
Type: objectSpeedTest,
|
||||
Err: speedTestErr.Error(),
|
||||
Err: e.Error(),
|
||||
Final: true,
|
||||
})
|
||||
return
|
||||
|
@ -180,9 +180,9 @@ func mainSupportProfile(ctx *cli.Context) error {
|
||||
}
|
||||
|
||||
console.Infof("Profiling '%s' for %d seconds... ", aliasedURL, duration)
|
||||
data, adminErr := client.Profile(globalContext, madmin.ProfilerType(profilers), time.Second*time.Duration(duration))
|
||||
data, e := client.Profile(globalContext, madmin.ProfilerType(profilers), time.Second*time.Duration(duration))
|
||||
|
||||
fatalIf(probe.NewError(adminErr), "Unable to save profile data")
|
||||
fatalIf(probe.NewError(e), "Unable to save profile data")
|
||||
clr := color.New(color.FgGreen, color.Bold)
|
||||
clr.Printf("saved successfully at '%s'\n", getProfileData(data))
|
||||
return nil
|
||||
|
Reference in New Issue
Block a user