1
0
mirror of https://github.com/minio/mc.git synced 2025-04-18 10:04:03 +03:00

upgrade golang-lint to v2 (#5178)

This commit is contained in:
Harshavardhana 2025-03-28 01:05:40 -07:00 committed by GitHub
parent d249b65941
commit c652022dab
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
30 changed files with 154 additions and 150 deletions

3
.gitignore vendored
View File

@ -14,4 +14,5 @@ mc
mc.RELEASE*
mc.gz
.DS_Store
.vscode/
.vscode/
.bin/

View File

@ -1,32 +1,42 @@
version: "2"
linters:
disable-all: true
default: none
enable:
- typecheck
- goimports
- misspell
- staticcheck
- govet
- revive
- ineffassign
- gomodguard
- gofmt
- govet
- ineffassign
- misspell
- revive
- staticcheck
- unused
settings:
misspell:
locale: US
exclusions:
generated: lax
rules:
- path: (.+)\.go$
text: instead of using struct literal
- path: (.+)\.go$
text: should have a package comment
- path: (.+)\.go$
text: error strings should not be capitalized or end with punctuation or a newline
- path: (.+)\.go$
text: error strings should not end with punctuation or newlines
- path: (.+)\.go$
text: error strings should not be capitalized
paths:
- third_party$
- builtin$
- examples$
formatters:
enable:
- gofmt
- gofumpt
linters-settings:
golint:
min-confidence: 0
misspell:
locale: US
# Choose whether or not to use the extra rules that are disabled
# by default
extra-rules: false
issues:
exclude-use-default: false
exclude:
- instead of using struct literal
- should have a package comment
- error strings should not be capitalized or end with punctuation or a newline
- goimports
exclusions:
generated: lax
paths:
- third_party$
- builtin$
- examples$

View File

@ -142,7 +142,7 @@ func statusTick(s madmin.MetaStatus) string {
}
func (i importMetaMsg) String() string {
m := i.BucketMetaImportErrs.Buckets
m := i.Buckets
totBuckets := len(m)
totErrs := 0
for _, st := range m {
@ -186,7 +186,7 @@ func (i importMetaMsg) JSON() string {
// Disable escaping special chars to display XML tags correctly
enc.SetEscapeHTML(false)
fatalIf(probe.NewError(enc.Encode(i.BucketMetaImportErrs.Buckets)), "Unable to marshal into JSON.")
fatalIf(probe.NewError(enc.Encode(i.Buckets)), "Unable to marshal into JSON.")
return buf.String()
}

View File

@ -87,11 +87,11 @@ func (h hri) getBucketHCCChange() (b, a col, err error) {
a, b = colGrey, colGrey
if len(h.HealResultItem.Before.Drives) > 0 {
b = getColCode(h.HealResultItem.Before.Drives)
if len(h.Before.Drives) > 0 {
b = getColCode(h.Before.Drives)
}
if len(h.HealResultItem.After.Drives) > 0 {
a = getColCode(h.HealResultItem.After.Drives)
if len(h.After.Drives) > 0 {
a = getColCode(h.After.Drives)
}
return
}

View File

@ -398,7 +398,6 @@ func (ui *uiData) UpdateDisplay(s *madmin.HealTaskStatus) {
default:
ui.updateUI(s)
}
return
}
func (ui *uiData) healResumeMsg(aliasedURL string) string {

View File

@ -107,7 +107,7 @@ func userAttachOrDetachPolicy(ctx *cli.Context, attach bool) error {
}
var emptyResp madmin.PolicyAssociationResp
if res.UpdatedAt == emptyResp.UpdatedAt {
if res.UpdatedAt.Equal(emptyResp.UpdatedAt) {
// Older minio does not send a result, so we populate res manually to
// simulate a result. TODO(aditya): remove this after newer minio is
// released in a few months (Older API Deprecated in Jun 2023)

View File

@ -83,13 +83,13 @@ func (i srRemoveStatus) String() string {
if i.RemoveAll {
return console.Colorize("UserMessage", "All site(s) were removed successfully")
}
if i.ReplicateRemoveStatus.Status == madmin.ReplicateRemoveStatusSuccess {
if i.Status == madmin.ReplicateRemoveStatusSuccess {
return console.Colorize("UserMessage", fmt.Sprintf("Following site(s) %s were removed successfully", i.sites))
}
if len(i.sites) == 1 {
return console.Colorize("UserMessage", fmt.Sprintf("Following site %s was removed partially, some operations failed:\nERROR: '%s'", i.sites, i.ReplicateRemoveStatus.ErrDetail))
return console.Colorize("UserMessage", fmt.Sprintf("Following site %s was removed partially, some operations failed:\nERROR: '%s'", i.sites, i.ErrDetail))
}
return console.Colorize("UserMessage", fmt.Sprintf("Following site(s) %s were removed partially, some operations failed: \nERROR: '%s'", i.sites, i.ReplicateRemoveStatus.ErrDetail))
return console.Colorize("UserMessage", fmt.Sprintf("Following site(s) %s were removed partially, some operations failed: \nERROR: '%s'", i.sites, i.ErrDetail))
}
func checkAdminReplicateRemoveSyntax(ctx *cli.Context) {

View File

@ -167,8 +167,8 @@ func (i srStatus) String() string {
if i.opts.Buckets {
messages = append(messages,
console.Colorize("SummaryHdr", "Bucket replication status:"))
switch {
case i.MaxBuckets == 0:
switch i.MaxBuckets {
case 0:
messages = append(messages, console.Colorize("Summary", "No Buckets present\n"))
default:
msg := console.Colorize(i.getTheme(len(info.BucketStats) == 0), fmt.Sprintf("%d/%d Buckets in sync", info.MaxBuckets-len(info.BucketStats), info.MaxBuckets)) + "\n"
@ -204,8 +204,8 @@ func (i srStatus) String() string {
if i.opts.Policies {
messages = append(messages,
console.Colorize("SummaryHdr", "Policy replication status:"))
switch {
case i.MaxPolicies == 0:
switch i.MaxPolicies {
case 0:
messages = append(messages, console.Colorize("Summary", "No Policies present\n"))
default:
msg := console.Colorize(i.getTheme(len(i.PolicyStats) == 0), fmt.Sprintf("%d/%d Policies in sync", info.MaxPolicies-len(info.PolicyStats), info.MaxPolicies)) + "\n"
@ -243,8 +243,8 @@ func (i srStatus) String() string {
if i.opts.Users {
messages = append(messages,
console.Colorize("SummaryHdr", "User replication status:"))
switch {
case i.MaxUsers == 0:
switch i.MaxUsers {
case 0:
messages = append(messages, console.Colorize("Summary", "No Users present\n"))
default:
msg := console.Colorize(i.getTheme(len(i.UserStats) == 0), fmt.Sprintf("%d/%d Users in sync", info.MaxUsers-len(i.UserStats), info.MaxUsers)) + "\n"
@ -284,8 +284,8 @@ func (i srStatus) String() string {
if i.opts.Groups {
messages = append(messages,
console.Colorize("SummaryHdr", "Group replication status:"))
switch {
case i.MaxGroups == 0:
switch i.MaxGroups {
case 0:
messages = append(messages, console.Colorize("Summary", "No Groups present\n"))
default:
msg := console.Colorize(i.getTheme(len(i.GroupStats) == 0), fmt.Sprintf("%d/%d Groups in sync", i.MaxGroups-len(i.GroupStats), i.MaxGroups)) + "\n"
@ -478,7 +478,7 @@ func (i srStatus) getBucketStatusSummary(siteNames []string, nameIDMap map[strin
var messages []string
coloredDot := console.Colorize("Status", dot)
var found bool
for _, st := range i.SRStatusInfo.BucketStats[i.opts.EntityValue] {
for _, st := range i.BucketStats[i.opts.EntityValue] {
if st.HasBucket {
found = true
break
@ -518,7 +518,7 @@ func (i srStatus) getBucketStatusSummary(siteNames []string, nameIDMap map[strin
rows := make([]string, len(rowLegend))
for j, sname := range siteNames {
dID := nameIDMap[sname]
ss := i.SRStatusInfo.BucketStats[i.opts.EntityValue][dID]
ss := i.BucketStats[i.opts.EntityValue][dID]
var theme, msgStr string
for r := range rowLegend {
switch r {
@ -582,7 +582,7 @@ func (i srStatus) getPolicyStatusSummary(siteNames []string, nameIDMap map[strin
var messages []string
coloredDot := console.Colorize("Status", dot)
var found bool
for _, st := range i.SRStatusInfo.PolicyStats[i.opts.EntityValue] {
for _, st := range i.PolicyStats[i.opts.EntityValue] {
if st.HasPolicy {
found = true
break
@ -603,7 +603,7 @@ func (i srStatus) getPolicyStatusSummary(siteNames []string, nameIDMap map[strin
rows := make([]string, len(rowLegend))
for j, sname := range siteNames {
dID := nameIDMap[sname]
ss := i.SRStatusInfo.PolicyStats[i.opts.EntityValue][dID]
ss := i.PolicyStats[i.opts.EntityValue][dID]
var theme, msgStr string
for r := range rowLegend {
switch r {
@ -634,7 +634,7 @@ func (i srStatus) getUserStatusSummary(siteNames []string, nameIDMap map[string]
var messages []string
coloredDot := console.Colorize("Status", dot)
var found bool
for _, st := range i.SRStatusInfo.UserStats[i.opts.EntityValue] {
for _, st := range i.UserStats[i.opts.EntityValue] {
if st.HasUser {
found = true
break
@ -662,7 +662,7 @@ func (i srStatus) getUserStatusSummary(siteNames []string, nameIDMap map[string]
rows := make([]string, len(rowLegend))
for j, sname := range siteNames {
dID := nameIDMap[sname]
ss := i.SRStatusInfo.UserStats[i.opts.EntityValue][dID]
ss := i.UserStats[i.opts.EntityValue][dID]
var theme, msgStr string
for r := range rowLegend {
switch r {
@ -702,7 +702,7 @@ func (i srStatus) getGroupStatusSummary(siteNames []string, nameIDMap map[string
rowLegend := []string{"Info", "Policy mapping"}
detailFields := make([][]Field, len(rowLegend))
var found bool
for _, st := range i.SRStatusInfo.GroupStats[i.opts.EntityValue] {
for _, st := range i.GroupStats[i.opts.EntityValue] {
if st.HasGroup {
found = true
break
@ -728,7 +728,7 @@ func (i srStatus) getGroupStatusSummary(siteNames []string, nameIDMap map[string
// b := i.opts.EntityValue
for j, sname := range siteNames {
dID := nameIDMap[sname]
ss := i.SRStatusInfo.GroupStats[i.opts.EntityValue][dID]
ss := i.GroupStats[i.opts.EntityValue][dID]
// sm := i.SRStatusInfo.StatsSummary
var theme, msgStr string
for r := range rowLegend {
@ -767,7 +767,7 @@ func (i srStatus) getILMExpiryStatusSummary(siteNames []string, nameIDMap map[st
var messages []string
coloredDot := console.Colorize("Status", dot)
var found bool
for _, st := range i.SRStatusInfo.ILMExpiryStats[i.opts.EntityValue] {
for _, st := range i.ILMExpiryStats[i.opts.EntityValue] {
if st.HasILMExpiryRules {
found = true
break
@ -788,7 +788,7 @@ func (i srStatus) getILMExpiryStatusSummary(siteNames []string, nameIDMap map[st
rows := make([]string, len(rowLegend))
for j, sname := range siteNames {
dID := nameIDMap[sname]
ss := i.SRStatusInfo.ILMExpiryStats[i.opts.EntityValue][dID]
ss := i.ILMExpiryStats[i.opts.EntityValue][dID]
var theme, msgStr string
for r := range rowLegend {
switch r {
@ -817,17 +817,7 @@ func (i srStatus) getILMExpiryStatusSummary(siteNames []string, nameIDMap map[st
// Calculate srstatus options for command line flags
func srStatusOpts(ctx *cli.Context) (opts madmin.SRStatusOptions) {
if !(ctx.IsSet("buckets") ||
ctx.IsSet("users") ||
ctx.IsSet("groups") ||
ctx.IsSet("policies") ||
ctx.IsSet("ilm-expiry-rules") ||
ctx.IsSet("bucket") ||
ctx.IsSet("user") ||
ctx.IsSet("group") ||
ctx.IsSet("policy") ||
ctx.IsSet("ilm-expiry-rule") ||
ctx.IsSet("all")) || ctx.IsSet("all") {
if (!ctx.IsSet("buckets") && !ctx.IsSet("users") && !ctx.IsSet("groups") && !ctx.IsSet("policies") && !ctx.IsSet("ilm-expiry-rules") && !ctx.IsSet("bucket") && !ctx.IsSet("user") && !ctx.IsSet("group") && !ctx.IsSet("policy") && !ctx.IsSet("ilm-expiry-rule") && !ctx.IsSet("all")) || ctx.IsSet("all") {
opts.Buckets = true
opts.Users = true
opts.Groups = true

View File

@ -137,10 +137,7 @@ func newAnonymousClient(aliasedURL string) (*madmin.AnonymousClient, *probe.Erro
}
// By default enable HTTPs.
useTLS := true
if targetURL.Scheme == "http" {
useTLS = false
}
useTLS := targetURL.Scheme != "http"
// Construct an anonymous client
anonClient, e := madmin.NewAnonymousClient(targetURL.Host, useTLS)

View File

@ -35,8 +35,8 @@ func (h adminPolicyHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusForbidden)
return
}
switch {
case r.Method == "PUT":
switch r.Method {
case "PUT":
length, e := strconv.Atoi(r.Header.Get("Content-Length"))
if e != nil {
w.WriteHeader(http.StatusBadRequest)

View File

@ -35,8 +35,8 @@ type bucketHandler struct {
}
func (h bucketHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
switch {
case r.Method == "GET":
switch r.Method {
case "GET":
// Handler for incoming getBucketLocation request.
if _, ok := r.URL.Query()["location"]; ok {
response := []byte("<LocationConstraint xmlns=\"http://doc.s3.amazonaws.com/2006-03-01\"></LocationConstraint>")
@ -44,28 +44,28 @@ func (h bucketHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
w.Write(response)
return
}
switch {
case r.URL.Path == "/":
switch r.URL.Path {
case "/":
// Handler for incoming ListBuckets request.
response := []byte("<ListAllMyBucketsResult xmlns=\"http://doc.s3.amazonaws.com/2006-03-01\"><Buckets><Bucket><Name>bucket</Name><CreationDate>2015-05-20T23:05:09.230Z</CreationDate></Bucket></Buckets><Owner><ID>minio</ID><DisplayName>minio</DisplayName></Owner></ListAllMyBucketsResult>")
w.Header().Set("Content-Length", strconv.Itoa(len(response)))
w.Write(response)
case r.URL.Path == "/bucket/":
case "/bucket/":
// Handler for incoming ListObjects request.
response := []byte("<ListBucketResult xmlns=\"http://doc.s3.amazonaws.com/2006-03-01\"><Contents><ETag>259d04a13802ae09c7e41be50ccc6baa</ETag><Key>object</Key><LastModified>2015-05-21T18:24:21.097Z</LastModified><Size>22061</Size><Owner><ID>minio</ID><DisplayName>minio</DisplayName></Owner><StorageClass>STANDARD</StorageClass></Contents><Delimiter></Delimiter><EncodingType></EncodingType><IsTruncated>false</IsTruncated><Marker></Marker><MaxKeys>1000</MaxKeys><Name>testbucket</Name><NextMarker></NextMarker><Prefix></Prefix></ListBucketResult>")
w.Header().Set("Content-Length", strconv.Itoa(len(response)))
w.Write(response)
}
case r.Method == "PUT":
switch {
case r.URL.Path == h.resource:
case "PUT":
switch r.URL.Path {
case h.resource:
w.WriteHeader(http.StatusOK)
default:
w.WriteHeader(http.StatusBadRequest)
}
case r.Method == "HEAD":
switch {
case r.URL.Path == h.resource:
case "HEAD":
switch r.URL.Path {
case h.resource:
w.WriteHeader(http.StatusOK)
default:
w.WriteHeader(http.StatusForbidden)
@ -85,8 +85,8 @@ func (h objectHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
return
}
switch {
case r.Method == http.MethodPut:
switch r.Method {
case http.MethodPut:
// Handler for PUT object request.
length, e := strconv.Atoi(r.Header.Get("Content-Length"))
if e != nil {
@ -100,7 +100,7 @@ func (h objectHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
}
w.Header().Set("ETag", "9af2f8218b150c351ad802c6f3d66abe")
w.WriteHeader(http.StatusOK)
case r.Method == http.MethodHead:
case http.MethodHead:
// Handler for Stat object request.
if r.URL.Path != h.resource {
w.WriteHeader(http.StatusNotFound)
@ -110,7 +110,7 @@ func (h objectHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Last-Modified", UTCNow().Format(http.TimeFormat))
w.Header().Set("ETag", "9af2f8218b150c351ad802c6f3d66abe")
w.WriteHeader(http.StatusOK)
case r.Method == http.MethodPost:
case http.MethodPost:
// Handler for multipart upload request.
if _, ok := r.URL.Query()["uploads"]; ok {
if r.URL.Path == h.resource {
@ -132,7 +132,7 @@ func (h objectHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusNotFound)
return
}
case r.Method == http.MethodGet:
case http.MethodGet:
// Handler for get bucket location request.
if _, ok := r.URL.Query()["location"]; ok {
response := []byte("<LocationConstraint xmlns=\"http://doc.s3.amazonaws.com/2006-03-01\"></LocationConstraint>")
@ -171,8 +171,8 @@ func (h stsHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusInternalServerError)
return
}
switch {
case r.Method == http.MethodPost:
switch r.Method {
case http.MethodPost:
token := r.Form.Get("WebIdentityToken")
if token == string(h.jwt) {
response := []byte("<AssumeRoleWithWebIdentityResponse xmlns=\"https://sts.amazonaws.com/doc/2011-06-15/\"><AssumeRoleWithWebIdentityResult><AssumedRoleUser><Arn></Arn><AssumeRoleId></AssumeRoleId></AssumedRoleUser><Credentials><AccessKeyId>7NL5BR739GUQ0ZOD4JNB</AccessKeyId><SecretAccessKey>A2mxZSxPnHNhSduedUHczsXZpVSSssOLpDruUmTV</SecretAccessKey><Expiration>0001-01-01T00:00:00Z</Expiration><SessionToken>eyJhbGciOiJIUzUxMiIsInR5cCI6IkpXVCJ9.eyJhY2Nlc3NLZXkiOiI3Tkw1QlI3MzlHVVEwWk9ENEpOQiIsImV4cCI6MTY5OTYwMzMwNiwicGFyZW50IjoibWluaW8iLCJzZXNzaW9uUG9saWN5IjoiZXlKV1pYSnphVzl1SWpvaU1qQXhNaTB4TUMweE55SXNJbE4wWVhSbGJXVnVkQ0k2VzNzaVJXWm1aV04wSWpvaVFXeHNiM2NpTENKQlkzUnBiMjRpT2xzaVlXUnRhVzQ2S2lKZGZTeDdJa1ZtWm1WamRDSTZJa0ZzYkc5M0lpd2lRV04wYVc5dUlqcGJJbXR0Y3pvcUlsMTlMSHNpUldabVpXTjBJam9pUVd4c2IzY2lMQ0pCWTNScGIyNGlPbHNpY3pNNktpSmRMQ0pTWlhOdmRYSmpaU0k2V3lKaGNtNDZZWGR6T25Nek9qbzZLaUpkZlYxOSJ9.uuE_x7PO8QoPfUk9KzUELoAqxihIknZAvJLl5aYJjwpSjJYFTPLp6EvuyJX2hc18s9HzeiJ-vU0dPzsy50dXmg</SessionToken></Credentials></AssumeRoleWithWebIdentityResult><ResponseMetadata></ResponseMetadata></AssumeRoleWithWebIdentityResponse>")

View File

@ -65,8 +65,7 @@ func TestSTSS3Operation(t *testing.T) {
t.Fatal(err)
}
var reader io.Reader
reader = bytes.NewReader(object.data)
var reader io.Reader = bytes.NewReader(object.data)
n, err := s3c.Put(context.Background(), reader, int64(len(object.data)), nil, PutOptions{
metadata: map[string]string{
"Content-Type": "application/octet-stream",

View File

@ -366,7 +366,8 @@ func migrateConfigV6ToV7() {
continue
}
}
if host == "https://s3.amazonaws.com" {
switch host {
case "https://s3.amazonaws.com":
// Only one entry can exist for "s3" domain.
cfgV7.Hosts["s3"] = hostConfigV7{
URL: host,
@ -374,7 +375,7 @@ func migrateConfigV6ToV7() {
SecretKey: hostCfgV6.SecretAccessKey,
API: hostCfgV6.API,
}
} else if host == "https://storage.googleapis.com" {
case "https://storage.googleapis.com":
// Only one entry can exist for "gcs" domain.
cfgV7.Hosts["gcs"] = hostConfigV7{
URL: host,
@ -382,7 +383,7 @@ func migrateConfigV6ToV7() {
SecretKey: hostCfgV6.SecretAccessKey,
API: hostCfgV6.API,
}
} else {
default:
// Assign a generic "cloud1", cloud2..." key
// for all other entries that has valid keys set.
alias := fmt.Sprintf("cloud%d", aliasIndex)

View File

@ -248,9 +248,12 @@ func parseEnvURLStr(envURL string) (*url.URL, string, string, string, *probe.Err
return nil, "", "", "", probe.NewError(e)
}
// Look for if URL has invalid values and return error.
if !((u.Scheme == "http" || u.Scheme == "https") &&
(u.Path == "/" || u.Path == "") && u.Opaque == "" &&
!u.ForceQuery && u.RawQuery == "" && u.Fragment == "") {
if (u.Scheme != "http" && u.Scheme != "https") ||
(u.Path != "/" && u.Path != "") ||
u.Opaque != "" ||
u.ForceQuery ||
u.RawQuery != "" ||
u.Fragment != "" {
return nil, "", "", "", errInvalidArgument().Trace(u.String())
}
if accessKey == "" && secretKey == "" {

View File

@ -279,7 +279,7 @@ func doCopy(ctx context.Context, copyOpts doCopyOpts) URLs {
// doCopyFake - Perform a fake copy to update the progress bar appropriately.
func doCopyFake(cpURLs URLs, pg Progress) URLs {
if progressReader, ok := pg.(*progressBar); ok {
progressReader.ProgressBar.Add64(cpURLs.SourceContent.Size)
progressReader.Add64(cpURLs.SourceContent.Size)
}
return cpURLs
@ -493,13 +493,13 @@ loop:
errSeen = true
if progressReader, pgok := pg.(*progressBar); pgok {
if progressReader.ProgressBar.Get() > 0 {
if progressReader.Get() > 0 {
writeContSize := (int)(cpURLs.SourceContent.Size)
totalPGSize := (int)(progressReader.ProgressBar.Total)
written := (int)(progressReader.ProgressBar.Get())
totalPGSize := (int)(progressReader.Total)
written := (int)(progressReader.Get())
if totalPGSize > writeContSize && written > writeContSize {
progressReader.ProgressBar.Set((written - writeContSize))
progressReader.ProgressBar.Update()
progressReader.Set((written - writeContSize))
progressReader.Update()
}
}
}
@ -514,7 +514,7 @@ loop:
if !globalQuiet && !globalJSON {
console.Eraseline()
}
} else if progressReader.ProgressBar.Get() > 0 {
} else if progressReader.Get() > 0 {
progressReader.Finish()
}
} else {

View File

@ -48,11 +48,12 @@ func getMetaDataEntry(metadataString string) (map[string]string, *probe.Error) {
var key, value strings.Builder
writeRune := func(ch rune, pt pToken) {
if pt == KEY {
switch pt {
case KEY:
key.WriteRune(ch)
} else if pt == VALUE {
case VALUE:
value.WriteRune(ch)
} else {
default:
panic("Invalid parser token type")
}
}
@ -73,26 +74,28 @@ func getMetaDataEntry(metadataString string) (map[string]string, *probe.Error) {
}
if ch == '"' {
if ps == DQSTRING {
switch ps {
case DQSTRING:
ps = NORMAL
} else if ps == QSTRING {
case QSTRING:
writeRune(ch, pt)
} else if ps == NORMAL {
case NORMAL:
ps = DQSTRING
} else {
default:
break
}
continue
}
if ch == '\'' {
if ps == QSTRING {
switch ps {
case QSTRING:
ps = NORMAL
} else if ps == DQSTRING {
case DQSTRING:
writeRune(ch, pt)
} else if ps == NORMAL {
case NORMAL:
ps = QSTRING
} else {
default:
break
}
continue

View File

@ -125,7 +125,7 @@ func ParseDuration(s string) (Duration, error) {
var err error
// The next character must be [0-9.]
if !(s[0] == '.' || '0' <= s[0] && s[0] <= '9') {
if s[0] != '.' && ('0' > s[0] || s[0] > '9') {
return 0, errors.New("invalid duration " + orig)
}
// Consume [0-9]*

View File

@ -50,9 +50,9 @@ type findMessage struct {
// String calls tells the console what to print and how to print it.
func (f findMessage) String() string {
var msg string
msg += f.contentMessage.Key
msg += f.Key
if f.VersionID != "" {
msg += " (" + f.contentMessage.VersionID + ")"
msg += " (" + f.VersionID + ")"
}
return console.Colorize("Find", msg)
}
@ -118,7 +118,7 @@ func getExitStatus(err error) int {
return 0
}
if pe, ok := err.(*exec.ExitError); ok {
if es, ok := pe.ProcessState.Sys().(syscall.WaitStatus); ok {
if es, ok := pe.Sys().(syscall.WaitStatus); ok {
return es.ExitStatus()
}
}

View File

@ -185,7 +185,7 @@ func mainIDPLDAPRemove(ctx *cli.Context) error {
showCommandHelpAndExit(ctx, 1)
}
var cfgName string = madmin.Default
cfgName := madmin.Default
return idpRemove(ctx, false, cfgName)
}
@ -247,7 +247,7 @@ func mainIDPLDAPInfo(ctx *cli.Context) error {
showCommandHelpAndExit(ctx, 1)
}
var cfgName string = madmin.Default
cfgName := madmin.Default
return idpInfo(ctx, false, cfgName)
}

View File

@ -64,7 +64,7 @@ func getExpirationDays(rule lifecycle.Rule) int {
if rule.Expiration.Days > 0 {
return int(rule.Expiration.Days)
}
if !rule.Expiration.Date.Time.IsZero() {
if !rule.Expiration.Date.IsZero() {
return int(time.Until(rule.Expiration.Date.Time).Hours() / 24)
}

View File

@ -652,10 +652,11 @@ func (mj *mirrorJob) watchMirrorEvents(ctx context.Context, events []EventInfo)
}
}
eventPath := event.Path
if runtime.GOOS == "darwin" {
switch runtime.GOOS {
case "darwin":
// Strip the prefixes in the event path. Happens in darwin OS only
eventPath = eventPath[strings.Index(eventPath, sourceURLFull):]
} else if runtime.GOOS == "windows" {
case "windows":
// Shared folder as source URL and if event path is an absolute path.
eventPath = getEventPathURLWin(mj.sourceURL, eventPath)
}

View File

@ -45,7 +45,7 @@ func (p *cpuProfiler) Start() error {
func (p *cpuProfiler) Stop() error {
pprof.StopCPUProfile()
return p.File.Close()
return p.Close()
}
type memProfiler struct {
@ -65,7 +65,7 @@ func (p *memProfiler) Stop() error {
if e := pprof.Lookup("heap").WriteTo(p.File, 0); e != nil {
return e
}
return p.File.Close()
return p.Close()
}
type blockProfiler struct {
@ -86,7 +86,7 @@ func (p *blockProfiler) Stop() error {
return e
}
runtime.SetBlockProfileRate(0)
return p.File.Close()
return p.Close()
}
type goroutineProfiler struct {
@ -105,7 +105,7 @@ func (p *goroutineProfiler) Stop() error {
if e := pprof.Lookup("goroutine").WriteTo(p.File, 1); e != nil {
return e
}
return p.File.Close()
return p.Close()
}
var globalProfilers []profiler

View File

@ -94,8 +94,8 @@ func newProgressBar(total int64) *progressBar {
// Set caption.
func (p *progressBar) SetCaption(caption string) *progressBar {
caption = fixateBarCaption(caption, getFixedWidth(p.ProgressBar.GetWidth(), 18))
p.ProgressBar.Prefix(caption)
caption = fixateBarCaption(caption, getFixedWidth(p.GetWidth(), 18))
p.Prefix(caption)
return p
}
@ -111,7 +111,7 @@ func (p *progressBar) Set64(length int64) *progressBar {
func (p *progressBar) Read(buf []byte) (n int, err error) {
defer func() {
// Upload retry can read one object twice; Avoid read to be greater than Total
if n, t := p.ProgressBar.Get(), p.ProgressBar.Total; t > 0 && n > t {
if n, t := p.Get(), p.Total; t > 0 && n > t {
p.ProgressBar.Set64(t)
}
}()
@ -120,7 +120,7 @@ func (p *progressBar) Read(buf []byte) (n int, err error) {
}
func (p *progressBar) SetTotal(total int64) {
p.ProgressBar.Total = total
p.Total = total
}
// cursorAnimate - returns a animated rune through read channel for every read.

View File

@ -113,7 +113,7 @@ func (m replicateMRFMessage) String() string {
Field{getNodeTheme(m.ReplicationMRF.NodeName), len(m.ReplicationMRF.NodeName) + 3},
Field{"Count", 7},
Field{"Object", -1},
).buildRow(m.ReplicationMRF.NodeName, fmt.Sprintf("Retry=%d", m.ReplicationMRF.RetryCount), fmt.Sprintf("%s (%s)", m.ReplicationMRF.Object, m.ReplicationMRF.VersionID)))
).buildRow(m.NodeName, fmt.Sprintf("Retry=%d", m.RetryCount), fmt.Sprintf("%s (%s)", m.Object, m.VersionID)))
}
type replicateBacklogMessage struct {

View File

@ -357,7 +357,7 @@ func (m replicateXferMessage) String() string {
var rows []string
maxLen := 0
for _, rqs := range m.ReplQueueStats.Nodes {
for _, rqs := range m.Nodes {
if len(rqs.NodeName) > maxLen {
maxLen = len(rqs.NodeName)
}

View File

@ -224,7 +224,7 @@ func checkRmSyntax(ctx context.Context, cliCtx *cli.Context) {
"You cannot specify --version-id with any of --versions, --rewind and --recursive flags.")
}
if isNoncurrentVersion && !(isVersions && isRecursive) {
if isNoncurrentVersion && (!isVersions || !isRecursive) {
fatalIf(errDummy().Trace(),
"You cannot specify --non-current without --versions --recursive, please use --non-current --versions --recursive.")
}
@ -278,7 +278,7 @@ func checkRmSyntax(ctx context.Context, cliCtx *cli.Context) {
"Removal requires --force flag. This operation is *IRREVERSIBLE*. Please review carefully before performing this *DANGEROUS* operation.")
}
if isNamespaceRemoval && !(isDangerous && isForce) {
if isNamespaceRemoval && (!isDangerous || !isForce) {
fatalIf(errDummy().Trace(),
"This operation results in site-wide removal of objects. If you are really sure, retry this command with --dangerous and --force flags.")
}

View File

@ -86,9 +86,9 @@ func (s shareMessage) JSON() string {
// JSON encoding escapes ampersand into its unicode character
// which is not usable directly for share and fails with cloud
// storage. convert them back so that they are usable.
shareMessageBytes = bytes.Replace(shareMessageBytes, []byte("\\u0026"), []byte("&"), -1)
shareMessageBytes = bytes.Replace(shareMessageBytes, []byte("\\u003c"), []byte("<"), -1)
shareMessageBytes = bytes.Replace(shareMessageBytes, []byte("\\u003e"), []byte(">"), -1)
shareMessageBytes = bytes.ReplaceAll(shareMessageBytes, []byte("\\u0026"), []byte("&"))
shareMessageBytes = bytes.ReplaceAll(shareMessageBytes, []byte("\\u003c"), []byte("<"))
shareMessageBytes = bytes.ReplaceAll(shareMessageBytes, []byte("\\u003e"), []byte(">"))
return string(shareMessageBytes)
}

View File

@ -126,7 +126,7 @@ func (qs *QuietStatus) Start() {
// Finish displays the accounting summary
func (qs *QuietStatus) Finish() {
printMsg(qs.accounter.Stat())
printMsg(qs.Stat())
}
// Update is ignored for quietstatus
@ -203,7 +203,7 @@ func (ps *ProgressStatus) SetTotal(v int64) Status {
// Add bytes to current number of bytes
func (ps *ProgressStatus) Add(v int64) Status {
ps.progressBar.Add64(v)
ps.Add64(v)
return ps
}

View File

@ -578,19 +578,19 @@ func ShareURLUploadErrorTests(t *testing.T) {
shareMsg, err := parseShareMessageFromJSONOutput(out)
fatalIfErrorWMsg(err, out, t)
finalURL := strings.Replace(shareMsg.ShareURL, "<FILE>", file.diskFile.Name(), -1)
finalURL := strings.ReplaceAll(shareMsg.ShareURL, "<FILE>", file.diskFile.Name())
splitCommand := strings.Split(finalURL, " ")
if skipInsecure {
splitCommand = append(splitCommand, "--insecure")
}
bucketOnly := strings.Replace(shareURLErrorBucket, defaultAlias+"/", "", -1)
bucketOnly := strings.ReplaceAll(shareURLErrorBucket, defaultAlias+"/", "")
// Modify base url bucket path
newCmd := make([]string, len(splitCommand))
copy(newCmd, splitCommand)
newCmd[1] = strings.Replace(newCmd[1], bucketOnly, "fake-bucket-name", -1)
newCmd[1] = strings.ReplaceAll(newCmd[1], bucketOnly, "fake-bucket-name")
out, _ = RunCommand(newCmd[0], newCmd[1:]...)
curlFatalIfNoErrorTag(out, t)
@ -626,7 +626,7 @@ func ShareURLUploadTest(t *testing.T) {
shareMsg, err := parseShareMessageFromJSONOutput(out)
fatalIfErrorWMsg(err, out, t)
finalURL := strings.Replace(shareMsg.ShareURL, "<FILE>", file.diskFile.Name(), -1)
finalURL := strings.ReplaceAll(shareMsg.ShareURL, "<FILE>", file.diskFile.Name())
splitCommand := strings.Split(finalURL, " ")
if skipInsecure {
@ -3013,7 +3013,7 @@ func BuildCLI() error {
os.Remove(mcCmd)
out, err := exec.Command("go", "build", "-o", mcCmd, buildPath).CombinedOutput()
if err != nil {
log.Println("BUILD OUT:", out)
log.Println("BUILD OUT:", string(out))
log.Println(err)
panic(err)
}

View File

@ -115,7 +115,7 @@ func validateClusterRegistered(alias string, cmdTalksToSubnet bool) string {
// command talks to subnet: dev+airgapped mode (both `--dev` and `--airgap` passed)
requireRegistration := !GlobalDevMode
if cmdTalksToSubnet {
requireRegistration = !(GlobalDevMode && globalAirgapped)
requireRegistration = !GlobalDevMode || !globalAirgapped
}
apiKey, e := getSubnetAPIKey(alias)