diff --git a/.github/workflows/automerge.yml b/.github/workflows/automerge.yml index 4eaff9686..36d301e59 100644 --- a/.github/workflows/automerge.yml +++ b/.github/workflows/automerge.yml @@ -25,4 +25,4 @@ jobs: uses: "pascalgn/automerge-action@135f0bdb927d9807b5446f7ca9ecc2c51de03c4a" env: GITHUB_TOKEN: "${{ secrets.GITHUB_TOKEN }}" - MERGE_METHOD: rebase \ No newline at end of file + MERGE_METHOD: rebase diff --git a/.github/workflows/cd.yml b/.github/workflows/cd.yml index a07cf1154..8e720d96b 100644 --- a/.github/workflows/cd.yml +++ b/.github/workflows/cd.yml @@ -16,7 +16,7 @@ jobs: - name: Setup Go uses: actions/setup-go@v1 with: - go-version: 1.16.x + go-version: 1.18.x - name: Run goreleaser uses: goreleaser/goreleaser-action@v1 env: diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 2176fef97..f933ddb1b 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -1,5 +1,8 @@ name: Continuous Integration +env: + GO_VERSION: 1.18 + on: push: branches: @@ -24,7 +27,7 @@ jobs: - name: Setup Go uses: actions/setup-go@v1 with: - go-version: 1.16.x + go-version: 1.18.x - name: Cache build uses: actions/cache@v1 with: @@ -46,7 +49,7 @@ jobs: - name: Setup Go uses: actions/setup-go@v1 with: - go-version: 1.16.x + go-version: 1.18.x - name: Cache build uses: actions/cache@v1 with: @@ -74,7 +77,7 @@ jobs: - name: Setup Go uses: actions/setup-go@v1 with: - go-version: 1.16.x + go-version: 1.18.x - name: Cache build uses: actions/cache@v1 with: @@ -87,11 +90,24 @@ jobs: go run scripts/cheatsheet/main.go check lint: runs-on: ubuntu-latest + env: + GOFLAGS: -mod=vendor steps: - - name: Checkout + - name: Checkout code uses: actions/checkout@v2 + - name: Setup Go + uses: actions/setup-go@v1 + with: + go-version: 1.18.x + - name: Cache build + uses: actions/cache@v1 + with: + path: ~/.cache/go-build + key: ${{runner.os}}-go-${{hashFiles('**/go.sum')}}-test + restore-keys: | + ${{runner.os}}-go- - name: Lint - uses: golangci/golangci-lint-action@v2 + uses: golangci/golangci-lint-action@v3.1.0 with: version: latest - name: Format code diff --git a/.gitignore b/.gitignore index 84258eeee..ea0475b55 100644 --- a/.gitignore +++ b/.gitignore @@ -23,8 +23,10 @@ lazygit.exe # Exceptions !.gitignore !.goreleaser.yml +!.golangci.yml !.circleci/ !.github/ + # these are for our integration tests !.git_keep !.gitmodules_keep diff --git a/.golangci.yml b/.golangci.yml new file mode 100644 index 000000000..358a5d12a --- /dev/null +++ b/.golangci.yml @@ -0,0 +1,24 @@ +linters: + disable: + - structcheck # gives false positives + enable: + - gofumpt + - thelper + - goimports + - tparallel + - wastedassign + - exportloopref + - unparam + - prealloc + - unconvert + - exhaustive + - makezero + # - goconst # TODO: enable and fix issues + fast: false + +linters-settings: + exhaustive: + default-signifies-exhaustive: true + +run: + go: 1.18 diff --git a/go.mod b/go.mod index 677a482f9..3e7598384 100644 --- a/go.mod +++ b/go.mod @@ -30,6 +30,7 @@ require ( github.com/spkg/bom v0.0.0-20160624110644-59b7046e48ad github.com/stretchr/testify v1.7.0 github.com/xo/terminfo v0.0.0-20210125001918-ca9a967f8778 + golang.org/x/exp v0.0.0-20220318154914-8dddf5d87bd8 gopkg.in/ozeidan/fuzzy-patricia.v3 v3.0.0 ) @@ -63,5 +64,5 @@ require ( golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 // indirect golang.org/x/text v0.3.7 // indirect gopkg.in/warnings.v0 v0.1.2 // indirect - gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c // indirect + gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect ) diff --git a/go.sum b/go.sum index cc15e70e0..fc2d2400d 100644 --- a/go.sum +++ b/go.sum @@ -157,6 +157,8 @@ golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0 h1:hb9wdF1z5waM+dSIICn1l0DkLVDT3hqhhQsDNUmHPRE= golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/exp v0.0.0-20220318154914-8dddf5d87bd8 h1:s/+U+w0teGzcoH2mdIlFQ6KfVKGaYpgyGdUefZrn9TU= +golang.org/x/exp v0.0.0-20220318154914-8dddf5d87bd8/go.mod h1:lgLbSvA5ygNOMpwM/9anMpWVlVJ7Z+cHWq/eFuinpGE= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -205,5 +207,6 @@ gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/pkg/app/app.go b/pkg/app/app.go index 0ee7e4adf..2d279936f 100644 --- a/pkg/app/app.go +++ b/pkg/app/app.go @@ -65,7 +65,7 @@ func newDevelopmentLogger() *logrus.Logger { if err != nil { log.Fatal(err) } - file, err := os.OpenFile(logPath, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0666) + file, err := os.OpenFile(logPath, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0o666) if err != nil { log.Fatalf("Unable to log to log file: %v", err) } @@ -269,10 +269,9 @@ func (app *App) Rebase() error { app.Log.Info("args: ", os.Args) if strings.HasSuffix(os.Args[1], "git-rebase-todo") { - if err := ioutil.WriteFile(os.Args[1], []byte(os.Getenv("LAZYGIT_REBASE_TODO")), 0644); err != nil { + if err := ioutil.WriteFile(os.Args[1], []byte(os.Getenv("LAZYGIT_REBASE_TODO")), 0o644); err != nil { return err } - } else if strings.HasSuffix(os.Args[1], filepath.Join(gitDir(), "COMMIT_EDITMSG")) { // TODO: test // if we are rebasing and squashing, we'll see a COMMIT_EDITMSG // but in this case we don't need to edit it, so we'll just return diff --git a/pkg/app/logging_windows.go b/pkg/app/logging_windows.go index f8b3d4990..efbdfbbe1 100644 --- a/pkg/app/logging_windows.go +++ b/pkg/app/logging_windows.go @@ -5,11 +5,12 @@ package app import ( "bufio" - "github.com/aybabtme/humanlog" "log" "os" "strings" "time" + + "github.com/aybabtme/humanlog" ) func TailLogsForPlatform(logFilePath string, opts *humanlog.HandlerOptions) { diff --git a/pkg/cheatsheet/check.go b/pkg/cheatsheet/check.go index 03f65d910..ebcd0629f 100644 --- a/pkg/cheatsheet/check.go +++ b/pkg/cheatsheet/check.go @@ -19,7 +19,7 @@ func Check() { if err != nil { log.Fatalf("Error occured while checking if cheatsheets are up to date: %v", err) } - err = os.Mkdir(tmpDir, 0700) + err = os.Mkdir(tmpDir, 0o700) if err != nil { log.Fatalf("Error occured while checking if cheatsheets are up to date: %v", err) } @@ -70,7 +70,6 @@ func obtainContent(dir string) string { return nil }) - if err != nil { log.Fatalf("Error occured while checking if cheatsheets are up to date: %v", err) } diff --git a/pkg/cheatsheet/generate.go b/pkg/cheatsheet/generate.go index 804cb6b45..c7c2b0d37 100644 --- a/pkg/cheatsheet/generate.go +++ b/pkg/cheatsheet/generate.go @@ -174,7 +174,7 @@ outer: bindings []*types.Binding } - groupedBindings := make([]groupedBindingsType, len(contextAndViewBindingMap)) + groupedBindings := make([]groupedBindingsType, 0, len(contextAndViewBindingMap)) for contextAndView, contextBindings := range contextAndViewBindingMap { groupedBindings = append(groupedBindings, groupedBindingsType{contextAndView: contextAndView, bindings: contextBindings}) diff --git a/pkg/commands/git.go b/pkg/commands/git.go index 3880e0dfc..6c6a3ac7c 100644 --- a/pkg/commands/git.go +++ b/pkg/commands/git.go @@ -223,7 +223,6 @@ func setupRepository(openGitRepository func(string) (*gogit.Repository, error), } repository, err := openGitRepository(path) - if err != nil { if strings.Contains(err.Error(), `unquoted '\' must be followed by new line`) { return nil, errors.New(gitConfigParseErrorStr) diff --git a/pkg/commands/git_commands/rebase.go b/pkg/commands/git_commands/rebase.go index c726cad7e..71c8b0e63 100644 --- a/pkg/commands/git_commands/rebase.go +++ b/pkg/commands/git_commands/rebase.go @@ -185,7 +185,7 @@ func (self *RebaseCommands) EditRebaseTodo(index int, action string) error { content[contentIndex] = action + " " + strings.Join(splitLine[1:], " ") result := strings.Join(content, "\n") - return ioutil.WriteFile(fileName, []byte(result), 0644) + return ioutil.WriteFile(fileName, []byte(result), 0o644) } func (self *RebaseCommands) getTodoCommitCount(content []string) int { @@ -215,7 +215,7 @@ func (self *RebaseCommands) MoveTodoDown(index int) error { rearrangedContent = append(rearrangedContent, content[contentIndex+1:]...) result := strings.Join(rearrangedContent, "\n") - return ioutil.WriteFile(fileName, []byte(result), 0644) + return ioutil.WriteFile(fileName, []byte(result), 0o644) } // SquashAllAboveFixupCommits squashes all fixup! commits above the given one diff --git a/pkg/commands/hosting_service/hosting_service.go b/pkg/commands/hosting_service/hosting_service.go index 4a0a49681..b448e3925 100644 --- a/pkg/commands/hosting_service/hosting_service.go +++ b/pkg/commands/hosting_service/hosting_service.go @@ -9,6 +9,8 @@ import ( "github.com/jesseduffield/lazygit/pkg/i18n" "github.com/jesseduffield/lazygit/pkg/utils" "github.com/sirupsen/logrus" + + "golang.org/x/exp/slices" ) // This package is for handling logic specific to a git hosting service like github, gitlab, bitbucket, etc. @@ -94,8 +96,7 @@ func (self *HostingServiceMgr) getCandidateServiceDomains() []ServiceDomain { serviceDefinitionByProvider[serviceDefinition.provider] = serviceDefinition } - var serviceDomains = make([]ServiceDomain, len(defaultServiceDomains)) - copy(serviceDomains, defaultServiceDomains) + serviceDomains := slices.Clone(defaultServiceDomains) if len(self.configServiceDomains) > 0 { for gitDomain, typeAndDomain := range self.configServiceDomains { diff --git a/pkg/commands/oscommands/cmd_obj_runner_win.go b/pkg/commands/oscommands/cmd_obj_runner_win.go index 9e3d1fd02..9a64dfa77 100644 --- a/pkg/commands/oscommands/cmd_obj_runner_win.go +++ b/pkg/commands/oscommands/cmd_obj_runner_win.go @@ -20,6 +20,7 @@ func (b *Buffer) Read(p []byte) (n int, err error) { defer b.m.Unlock() return b.b.Read(p) } + func (b *Buffer) Write(p []byte) (n int, err error) { b.m.Lock() defer b.m.Unlock() diff --git a/pkg/commands/oscommands/fake_cmd_obj_runner.go b/pkg/commands/oscommands/fake_cmd_obj_runner.go index b542bfee3..d06861251 100644 --- a/pkg/commands/oscommands/fake_cmd_obj_runner.go +++ b/pkg/commands/oscommands/fake_cmd_obj_runner.go @@ -21,7 +21,7 @@ type FakeCmdObjRunner struct { var _ ICmdObjRunner = &FakeCmdObjRunner{} -func NewFakeRunner(t *testing.T) *FakeCmdObjRunner { +func NewFakeRunner(t *testing.T) *FakeCmdObjRunner { //nolint:thelper return &FakeCmdObjRunner{t: t} } diff --git a/pkg/commands/oscommands/os.go b/pkg/commands/oscommands/os.go index 1c4f5bf28..f3df3956f 100644 --- a/pkg/commands/oscommands/os.go +++ b/pkg/commands/oscommands/os.go @@ -103,7 +103,7 @@ func (c *OSCommand) Quote(message string) string { // AppendLineToFile adds a new line in file func (c *OSCommand) AppendLineToFile(filename, line string) error { c.LogCommand(fmt.Sprintf("Appending '%s' to file '%s'", line, filename), false) - f, err := os.OpenFile(filename, os.O_APPEND|os.O_WRONLY|os.O_CREATE, 0600) + f, err := os.OpenFile(filename, os.O_APPEND|os.O_WRONLY|os.O_CREATE, 0o600) if err != nil { return utils.WrapError(err) } @@ -145,7 +145,7 @@ func (c *OSCommand) CreateFileWithContent(path string, content string) error { return err } - if err := ioutil.WriteFile(path, []byte(content), 0644); err != nil { + if err := ioutil.WriteFile(path, []byte(content), 0o644); err != nil { c.Log.Error(err) return utils.WrapError(err) } diff --git a/pkg/commands/oscommands/os_test.go b/pkg/commands/oscommands/os_test.go index efda5a3a1..9c2d9a2a7 100644 --- a/pkg/commands/oscommands/os_test.go +++ b/pkg/commands/oscommands/os_test.go @@ -141,7 +141,7 @@ func TestOSCommandFileType(t *testing.T) { { "testDirectory", func() { - if err := os.Mkdir("testDirectory", 0644); err != nil { + if err := os.Mkdir("testDirectory", 0o644); err != nil { panic(err) } }, diff --git a/pkg/commands/patch/patch_manager.go b/pkg/commands/patch/patch_manager.go index c8e16a7fd..cbdf7b2d4 100644 --- a/pkg/commands/patch/patch_manager.go +++ b/pkg/commands/patch/patch_manager.go @@ -26,8 +26,10 @@ type fileInfo struct { diff string } -type applyPatchFunc func(patch string, flags ...string) error -type loadFileDiffFunc func(from string, to string, reverse bool, filename string, plain bool) (string, error) +type ( + applyPatchFunc func(patch string, flags ...string) error + loadFileDiffFunc func(from string, to string, reverse bool, filename string, plain bool) (string, error) +) // PatchManager manages the building of a patch for a commit to be applied to another commit (or the working tree, or removed from the current commit). We also support building patches from things like stashes, for which there is less flexibility type PatchManager struct { diff --git a/pkg/commands/patch/patch_modifier.go b/pkg/commands/patch/patch_modifier.go index 2109ad1f0..2d060ec18 100644 --- a/pkg/commands/patch/patch_modifier.go +++ b/pkg/commands/patch/patch_modifier.go @@ -8,8 +8,10 @@ import ( "github.com/sirupsen/logrus" ) -var hunkHeaderRegexp = regexp.MustCompile(`(?m)^@@ -(\d+)[^\+]+\+(\d+)[^@]+@@(.*)$`) -var patchHeaderRegexp = regexp.MustCompile(`(?ms)(^diff.*?)^@@`) +var ( + hunkHeaderRegexp = regexp.MustCompile(`(?m)^@@ -(\d+)[^\+]+\+(\d+)[^@]+@@(.*)$`) + patchHeaderRegexp = regexp.MustCompile(`(?ms)(^diff.*?)^@@`) +) func GetHeaderFromDiff(diff string) string { match := patchHeaderRegexp.FindStringSubmatch(diff) diff --git a/pkg/config/app_config.go b/pkg/config/app_config.go index 98620ad43..40509e86a 100644 --- a/pkg/config/app_config.go +++ b/pkg/config/app_config.go @@ -123,7 +123,7 @@ func configDirForVendor(vendor string) string { func findOrCreateConfigDir() (string, error) { folder := ConfigDir() - return folder, os.MkdirAll(folder, 0755) + return folder, os.MkdirAll(folder, 0o755) } func loadUserConfigWithDefaults(configFiles []string) (*UserConfig, error) { @@ -249,7 +249,7 @@ func (c *AppConfig) SaveAppState() error { return err } - err = ioutil.WriteFile(filepath, marshalledAppState, 0644) + err = ioutil.WriteFile(filepath, marshalledAppState, 0o644) if err != nil && os.IsPermission(err) { // apparently when people have read-only permissions they prefer us to fail silently return nil diff --git a/pkg/config/user_config.go b/pkg/config/user_config.go index ac8a2bbc1..51f443243 100644 --- a/pkg/config/user_config.go +++ b/pkg/config/user_config.go @@ -358,7 +358,8 @@ func GetDefaultConfig() *UserConfig { Paging: PagingConfig{ ColorArg: "always", Pager: "", - UseConfig: false}, + UseConfig: false, + }, Commit: CommitConfig{ SignOff: false, }, diff --git a/pkg/gui/context/local_commits_context.go b/pkg/gui/context/local_commits_context.go index d8d64392c..46e3be2cd 100644 --- a/pkg/gui/context/local_commits_context.go +++ b/pkg/gui/context/local_commits_context.go @@ -45,7 +45,8 @@ func NewLocalCommitsContext( viewTrait: NewViewTrait(view), getDisplayStrings: getDisplayStrings, c: c, - }}, + }, + }, } } diff --git a/pkg/gui/context/sub_commits_context.go b/pkg/gui/context/sub_commits_context.go index 0f16f1688..93a0c3593 100644 --- a/pkg/gui/context/sub_commits_context.go +++ b/pkg/gui/context/sub_commits_context.go @@ -45,7 +45,8 @@ func NewSubCommitsContext( viewTrait: NewViewTrait(view), getDisplayStrings: getDisplayStrings, c: c, - }}, + }, + }, } } diff --git a/pkg/gui/controllers/branches_controller.go b/pkg/gui/controllers/branches_controller.go index 08ea95119..c578a405b 100644 --- a/pkg/gui/controllers/branches_controller.go +++ b/pkg/gui/controllers/branches_controller.go @@ -185,7 +185,8 @@ func (self *BranchesController) checkoutByName() error { }) }, }) - }}, + }, + }, ) } @@ -377,8 +378,8 @@ func (self *BranchesController) createPullRequestMenu(selectedBranch *models.Bra FindSuggestionsFunc: self.helpers.Suggestions.GetBranchNameSuggestionsFunc(), HandleConfirm: func(targetBranchName string) error { return self.createPullRequest(branch.Name, targetBranchName) - }}, - ) + }, + }) }, }, } diff --git a/pkg/gui/controllers/commits_files_controller.go b/pkg/gui/controllers/commits_files_controller.go index 933d17321..978d6c6a7 100644 --- a/pkg/gui/controllers/commits_files_controller.go +++ b/pkg/gui/controllers/commits_files_controller.go @@ -170,7 +170,6 @@ func (self *CommitFilesController) toggleForPatch(node *filetree.CommitFileNode) return self.git.Patch.PatchManager.RemoveFile(file.Name) } }) - if err != nil { return self.c.Error(err) } diff --git a/pkg/gui/controllers/helpers/refs_helper.go b/pkg/gui/controllers/helpers/refs_helper.go index e3e050117..65c01d4a7 100644 --- a/pkg/gui/controllers/helpers/refs_helper.go +++ b/pkg/gui/controllers/helpers/refs_helper.go @@ -71,7 +71,6 @@ func (self *RefsHelper) CheckoutRef(ref string, options types.CheckoutRefOptions if strings.Contains(err.Error(), "Please commit your changes or stash them before you switch branch") { // offer to autostash changes return self.c.Ask(types.AskOpts{ - Title: self.c.Tr.AutoStashTitle, Prompt: self.c.Tr.AutoStashPrompt, HandleConfirm: func() error { diff --git a/pkg/gui/controllers/submodules_controller.go b/pkg/gui/controllers/submodules_controller.go index 83c05da4b..10b25df2b 100644 --- a/pkg/gui/controllers/submodules_controller.go +++ b/pkg/gui/controllers/submodules_controller.go @@ -90,7 +90,6 @@ func (self *SubmodulesController) add() error { Title: self.c.Tr.LcNewSubmoduleName, InitialContent: nameSuggestion, HandleConfirm: func(submoduleName string) error { - return self.c.Prompt(types.PromptOpts{ Title: self.c.Tr.LcNewSubmodulePath, InitialContent: submoduleName, diff --git a/pkg/gui/controllers/sync_controller.go b/pkg/gui/controllers/sync_controller.go index 74db3d527..8501c5484 100644 --- a/pkg/gui/controllers/sync_controller.go +++ b/pkg/gui/controllers/sync_controller.go @@ -194,7 +194,6 @@ func (self *SyncController) pushAux(opts pushOpts) error { UpstreamBranch: opts.upstreamBranch, SetUpstream: opts.setUpstream, }) - if err != nil { if !opts.force && strings.Contains(err.Error(), "Updates were rejected") { forcePushDisabled := self.c.UserConfig.Git.DisableForcePushing diff --git a/pkg/gui/filetree/commit_file_node.go b/pkg/gui/filetree/commit_file_node.go index 98428348e..a8f7d0a95 100644 --- a/pkg/gui/filetree/commit_file_node.go +++ b/pkg/gui/filetree/commit_file_node.go @@ -12,8 +12,10 @@ type CommitFileNode struct { CompressionLevel int // equal to the number of forward slashes you'll see in the path when it's rendered in tree mode } -var _ INode = &CommitFileNode{} -var _ types.ListItem = &CommitFileNode{} +var ( + _ INode = &CommitFileNode{} + _ types.ListItem = &CommitFileNode{} +) func (s *CommitFileNode) ID() string { return s.GetPath() diff --git a/pkg/gui/filetree/file_node.go b/pkg/gui/filetree/file_node.go index 5a99b3e12..841f723fc 100644 --- a/pkg/gui/filetree/file_node.go +++ b/pkg/gui/filetree/file_node.go @@ -12,8 +12,10 @@ type FileNode struct { CompressionLevel int // equal to the number of forward slashes you'll see in the path when it's rendered in tree mode } -var _ INode = &FileNode{} -var _ types.ListItem = &FileNode{} +var ( + _ INode = &FileNode{} + _ types.ListItem = &FileNode{} +) func (s *FileNode) ID() string { return s.GetPath() diff --git a/pkg/gui/gui.go b/pkg/gui/gui.go index b65493d49..144be8df5 100644 --- a/pkg/gui/gui.go +++ b/pkg/gui/gui.go @@ -5,9 +5,8 @@ import ( "io/ioutil" "log" "os" - "sync" - "strings" + "sync" "time" "github.com/jesseduffield/gocui" diff --git a/pkg/gui/gui_test.go b/pkg/gui/gui_test.go index e35ab1896..58f0b0958 100644 --- a/pkg/gui/gui_test.go +++ b/pkg/gui/gui_test.go @@ -55,6 +55,7 @@ func Test(t *testing.T) { mode, speedEnv, func(t *testing.T, expected string, actual string, prefix string) { + t.Helper() assert.Equal(t, expected, actual, fmt.Sprintf("Unexpected %s. Expected:\n%s\nActual:\n%s\n", prefix, expected, actual)) }, includeSkipped, diff --git a/pkg/gui/keybindings.go b/pkg/gui/keybindings.go index 2bf2b9815..13fdf7d26 100644 --- a/pkg/gui/keybindings.go +++ b/pkg/gui/keybindings.go @@ -4,7 +4,6 @@ import ( "fmt" "log" "strings" - "unicode/utf8" "github.com/jesseduffield/gocui" @@ -1021,7 +1020,8 @@ func (self *Gui) GetInitialKeybindings() ([]*types.Binding, []*gocui.ViewMouseBi ViewName: "", Key: opts.GetKey(opts.Config.Universal.JumpToBlock[i]), Modifier: gocui.ModNone, - Handler: self.goToSideWindow(window)}) + Handler: self.goToSideWindow(window), + }) } } diff --git a/pkg/gui/merge_panel.go b/pkg/gui/merge_panel.go index 60aa93a17..b9a00eaa2 100644 --- a/pkg/gui/merge_panel.go +++ b/pkg/gui/merge_panel.go @@ -54,7 +54,7 @@ func (gui *Gui) handleMergeConflictUndo() error { gui.c.LogAction("Restoring file to previous state") gui.LogCommand("Undoing last conflict resolution", false) - if err := ioutil.WriteFile(state.GetPath(), []byte(state.GetContent()), 0644); err != nil { + if err := ioutil.WriteFile(state.GetPath(), []byte(state.GetContent()), 0o644); err != nil { return err } @@ -127,7 +127,7 @@ func (gui *Gui) resolveConflict(selection mergeconflicts.Selection) (bool, error gui.c.LogAction("Resolve merge conflict") gui.LogCommand(logStr, false) state.PushContent(content) - return true, ioutil.WriteFile(state.GetPath(), []byte(content), 0644) + return true, ioutil.WriteFile(state.GetPath(), []byte(content), 0o644) } // precondition: we actually have conflicts to render diff --git a/pkg/gui/mergeconflicts/find_conflicts.go b/pkg/gui/mergeconflicts/find_conflicts.go index 14a08fd68..3802a66b7 100644 --- a/pkg/gui/mergeconflicts/find_conflicts.go +++ b/pkg/gui/mergeconflicts/find_conflicts.go @@ -57,10 +57,12 @@ func findConflicts(content string) []*mergeConflict { return conflicts } -var CONFLICT_START = "<<<<<<< " -var CONFLICT_END = ">>>>>>> " -var CONFLICT_START_BYTES = []byte(CONFLICT_START) -var CONFLICT_END_BYTES = []byte(CONFLICT_END) +var ( + CONFLICT_START = "<<<<<<< " + CONFLICT_END = ">>>>>>> " + CONFLICT_START_BYTES = []byte(CONFLICT_START) + CONFLICT_END_BYTES = []byte(CONFLICT_END) +) func determineLineType(line string) LineType { // TODO: find out whether we ever actually get this prefix diff --git a/pkg/gui/mergeconflicts/state.go b/pkg/gui/mergeconflicts/state.go index b40b979e9..3d0254e15 100644 --- a/pkg/gui/mergeconflicts/state.go +++ b/pkg/gui/mergeconflicts/state.go @@ -176,7 +176,6 @@ func (s *State) ContentAfterConflictResolve(selection Selection) (bool, string, content += line } }) - if err != nil { return false, "", err } diff --git a/pkg/gui/options_menu_panel.go b/pkg/gui/options_menu_panel.go index 85ed34b5d..0073bb041 100644 --- a/pkg/gui/options_menu_panel.go +++ b/pkg/gui/options_menu_panel.go @@ -11,9 +11,7 @@ import ( ) func (gui *Gui) getBindings(context types.Context) []*types.Binding { - var ( - bindingsGlobal, bindingsPanel, bindingsNavigation []*types.Binding - ) + var bindingsGlobal, bindingsPanel, bindingsNavigation []*types.Binding bindings, _ := gui.GetInitialKeybindings() customBindings, err := gui.CustomCommandsClient.GetCustomCommandKeybindings() diff --git a/pkg/gui/patch_building_panel.go b/pkg/gui/patch_building_panel.go index dd82f998a..e734433c4 100644 --- a/pkg/gui/patch_building_panel.go +++ b/pkg/gui/patch_building_panel.go @@ -88,7 +88,6 @@ func (gui *Gui) handleToggleSelectionForPatch() error { return nil }) - if err != nil { return err } diff --git a/pkg/gui/presentation/commits.go b/pkg/gui/presentation/commits.go index 2bc9f475c..2d5262e89 100644 --- a/pkg/gui/presentation/commits.go +++ b/pkg/gui/presentation/commits.go @@ -19,8 +19,10 @@ type pipeSetCacheKey struct { commitCount int } -var pipeSetCache = make(map[pipeSetCacheKey][][]*graph.Pipe) -var mutex sync.Mutex +var ( + pipeSetCache = make(map[pipeSetCacheKey][][]*graph.Pipe) + mutex sync.Mutex +) type bisectBounds struct { newIndex int @@ -226,6 +228,8 @@ func getBisectStatusText(bisectStatus BisectStatus, bisectInfo *git_commands.Bis return style.Sprintf("<-- skipped") case BisectStatusCandidate: return style.Sprintf("?") + case BisectStatusNone: + return "" } return "" diff --git a/pkg/gui/presentation/files.go b/pkg/gui/presentation/files.go index 3efb8d29b..be57a3510 100644 --- a/pkg/gui/presentation/files.go +++ b/pkg/gui/presentation/files.go @@ -12,13 +12,17 @@ import ( "github.com/jesseduffield/lazygit/pkg/utils" ) -const EXPANDED_ARROW = "▼" -const COLLAPSED_ARROW = "►" +const ( + EXPANDED_ARROW = "▼" + COLLAPSED_ARROW = "►" +) -const INNER_ITEM = "├─ " -const LAST_ITEM = "└─ " -const NESTED = "│ " -const NOTHING = " " +const ( + INNER_ITEM = "├─ " + LAST_ITEM = "└─ " + NESTED = "│ " + NOTHING = " " +) func RenderFileTree( tree filetree.IFileTree, diff --git a/pkg/gui/presentation/graph/cell.go b/pkg/gui/presentation/graph/cell.go index e970c6dd2..cc2ad53c3 100644 --- a/pkg/gui/presentation/graph/cell.go +++ b/pkg/gui/presentation/graph/cell.go @@ -8,8 +8,10 @@ import ( "github.com/jesseduffield/lazygit/pkg/gui/style" ) -const MergeSymbol = '⏣' -const CommitSymbol = '◯' +const ( + MergeSymbol = '⏣' + CommitSymbol = '◯' +) type cellType int @@ -66,8 +68,10 @@ type rgbCacheKey struct { str string } -var rgbCache = make(map[rgbCacheKey]string) -var rgbCacheMutex sync.RWMutex +var ( + rgbCache = make(map[rgbCacheKey]string) + rgbCacheMutex sync.RWMutex +) func cachedSprint(style style.TextStyle, str string) string { switch v := style.Style.(type) { diff --git a/pkg/gui/recording.go b/pkg/gui/recording.go index 0a7f723df..9edd50f08 100644 --- a/pkg/gui/recording.go +++ b/pkg/gui/recording.go @@ -70,5 +70,5 @@ func (gui *Gui) saveRecording(recording *gocui.Recording) error { path := recordEventsTo() - return ioutil.WriteFile(path, jsonEvents, 0600) + return ioutil.WriteFile(path, jsonEvents, 0o600) } diff --git a/pkg/gui/types/context.go b/pkg/gui/types/context.go index bf56cf5db..58dee1c0e 100644 --- a/pkg/gui/types/context.go +++ b/pkg/gui/types/context.go @@ -93,8 +93,10 @@ type KeybindingsOpts struct { Guards KeybindingGuards } -type KeybindingsFn func(opts KeybindingsOpts) []*Binding -type MouseKeybindingsFn func(opts KeybindingsOpts) []*gocui.ViewMouseBinding +type ( + KeybindingsFn func(opts KeybindingsOpts) []*Binding + MouseKeybindingsFn func(opts KeybindingsOpts) []*gocui.ViewMouseBinding +) type HasKeybindings interface { GetKeybindings(opts KeybindingsOpts) []*Binding diff --git a/pkg/integration/integration.go b/pkg/integration/integration.go index a55d460fe..86049a230 100644 --- a/pkg/integration/integration.go +++ b/pkg/integration/integration.go @@ -95,6 +95,7 @@ func RunTests( } fnWrapper(test, func(t *testing.T) error { + t.Helper() speeds := getTestSpeeds(test.Speed, mode, speedEnv) testPath := filepath.Join(testDir, test.Name) actualRepoDir := filepath.Join(testPath, "actual") @@ -218,7 +219,7 @@ func prepareIntegrationTestDir(actualDir string) { dir, err := ioutil.ReadDir(actualDir) if err != nil { if os.IsNotExist(err) { - err = os.Mkdir(actualDir, 0777) + err = os.Mkdir(actualDir, 0o777) if err != nil { panic(err) } @@ -332,7 +333,7 @@ func findOrCreateDir(path string) { _, err := os.Stat(path) if err != nil { if os.IsNotExist(err) { - err = os.MkdirAll(path, 0777) + err = os.MkdirAll(path, 0o777) if err != nil { panic(err) } diff --git a/pkg/tasks/tasks.go b/pkg/tasks/tasks.go index 4a987039c..fe257ae96 100644 --- a/pkg/tasks/tasks.go +++ b/pkg/tasks/tasks.go @@ -30,7 +30,7 @@ type ViewBufferManager struct { waitingMutex sync.Mutex taskIDMutex sync.Mutex Log *logrus.Entry - newTaskId int + newTaskID int readLines chan int taskKey string onNewKey func() @@ -70,14 +70,14 @@ func NewViewBufferManager( } } -func (m *ViewBufferManager) ReadLines(n int) { +func (self *ViewBufferManager) ReadLines(n int) { go utils.Safe(func() { - m.readLines <- n + self.readLines <- n }) } // note: onDone may be called twice -func (m *ViewBufferManager) NewCmdTask(start func() (*exec.Cmd, io.Reader), prefix string, linesToRead int, onDone func()) func(chan struct{}) error { +func (self *ViewBufferManager) NewCmdTask(start func() (*exec.Cmd, io.Reader), prefix string, linesToRead int, onDone func()) func(chan struct{}) error { return func(stop chan struct{}) error { var once sync.Once var onDoneWrapper func() @@ -85,8 +85,8 @@ func (m *ViewBufferManager) NewCmdTask(start func() (*exec.Cmd, io.Reader), pref onDoneWrapper = func() { once.Do(onDone) } } - if m.throttle { - m.Log.Info("throttling task") + if self.throttle { + self.Log.Info("throttling task") time.Sleep(THROTTLE_TIME) } @@ -106,10 +106,10 @@ func (m *ViewBufferManager) NewCmdTask(start func() (*exec.Cmd, io.Reader), pref // are running slow at the moment. This is admittedly a crude estimate, but // the point is that we only want to throttle when things are running slow // and the user is flicking through a bunch of items. - m.throttle = time.Since(startTime) < THROTTLE_TIME && timeToStart > COMMAND_START_THRESHOLD + self.throttle = time.Since(startTime) < THROTTLE_TIME && timeToStart > COMMAND_START_THRESHOLD if err := oscommands.Kill(cmd); err != nil { if !strings.Contains(err.Error(), "process already finished") { - m.Log.Errorf("error when running cmd task: %v", err) + self.Log.Errorf("error when running cmd task: %v", err) } } @@ -122,7 +122,7 @@ func (m *ViewBufferManager) NewCmdTask(start func() (*exec.Cmd, io.Reader), pref loadingMutex := sync.Mutex{} // not sure if it's the right move to redefine this or not - m.readLines = make(chan int, 1024) + self.readLines = make(chan int, 1024) done := make(chan struct{}) @@ -140,9 +140,9 @@ func (m *ViewBufferManager) NewCmdTask(start func() (*exec.Cmd, io.Reader), pref case <-ticker.C: loadingMutex.Lock() if !loaded { - m.beforeStart() - _, _ = m.writer.Write([]byte("loading...")) - m.refreshView() + self.beforeStart() + _, _ = self.writer.Write([]byte("loading...")) + self.refreshView() } loadingMutex.Unlock() } @@ -154,7 +154,7 @@ func (m *ViewBufferManager) NewCmdTask(start func() (*exec.Cmd, io.Reader), pref select { case <-stop: break outer - case linesToRead := <-m.readLines: + case linesToRead := <-self.readLines: for i := 0; i < linesToRead; i++ { select { case <-stop: @@ -165,9 +165,9 @@ func (m *ViewBufferManager) NewCmdTask(start func() (*exec.Cmd, io.Reader), pref ok := scanner.Scan() loadingMutex.Lock() if !loaded { - m.beforeStart() + self.beforeStart() if prefix != "" { - _, _ = m.writer.Write([]byte(prefix)) + _, _ = self.writer.Write([]byte(prefix)) } loaded = true } @@ -176,21 +176,21 @@ func (m *ViewBufferManager) NewCmdTask(start func() (*exec.Cmd, io.Reader), pref if !ok { // if we're here then there's nothing left to scan from the source // so we're at the EOF and can flush the stale content - m.onEndOfInput() + self.onEndOfInput() break outer } - _, _ = m.writer.Write(append(scanner.Bytes(), '\n')) + _, _ = self.writer.Write(append(scanner.Bytes(), '\n')) } - m.refreshView() + self.refreshView() } } - m.refreshView() + self.refreshView() if err := cmd.Wait(); err != nil { // it's fine if we've killed this program ourselves if !strings.Contains(err.Error(), "signal: killed") { - m.Log.Error(err) + self.Log.Error(err) } } @@ -202,7 +202,7 @@ func (m *ViewBufferManager) NewCmdTask(start func() (*exec.Cmd, io.Reader), pref close(done) }) - m.readLines <- linesToRead + self.readLines <- linesToRead <-done @@ -211,15 +211,15 @@ func (m *ViewBufferManager) NewCmdTask(start func() (*exec.Cmd, io.Reader), pref } // Close closes the task manager, killing whatever task may currently be running -func (t *ViewBufferManager) Close() { - if t.stopCurrentTask == nil { +func (self *ViewBufferManager) Close() { + if self.stopCurrentTask == nil { return } c := make(chan struct{}) go utils.Safe(func() { - t.stopCurrentTask() + self.stopCurrentTask() c <- struct{}{} }) @@ -235,28 +235,28 @@ func (t *ViewBufferManager) Close() { // 1) command based, where the manager can be asked to read more lines, but the command can be killed // 2) string based, where the manager can also be asked to read more lines -func (m *ViewBufferManager) NewTask(f func(stop chan struct{}) error, key string) error { +func (self *ViewBufferManager) NewTask(f func(stop chan struct{}) error, key string) error { go utils.Safe(func() { - m.taskIDMutex.Lock() - m.newTaskId++ - taskID := m.newTaskId + self.taskIDMutex.Lock() + self.newTaskID++ + taskID := self.newTaskID - if m.GetTaskKey() != key && m.onNewKey != nil { - m.onNewKey() + if self.GetTaskKey() != key && self.onNewKey != nil { + self.onNewKey() } - m.taskKey = key + self.taskKey = key - m.taskIDMutex.Unlock() + self.taskIDMutex.Unlock() - m.waitingMutex.Lock() - defer m.waitingMutex.Unlock() + self.waitingMutex.Lock() + defer self.waitingMutex.Unlock() - if taskID < m.newTaskId { + if taskID < self.newTaskID { return } - if m.stopCurrentTask != nil { - m.stopCurrentTask() + if self.stopCurrentTask != nil { + self.stopCurrentTask() } stop := make(chan struct{}) @@ -268,11 +268,11 @@ func (m *ViewBufferManager) NewTask(f func(stop chan struct{}) error, key string <-notifyStopped } - m.stopCurrentTask = func() { once.Do(onStop) } + self.stopCurrentTask = func() { once.Do(onStop) } go utils.Safe(func() { if err := f(stop); err != nil { - m.Log.Error(err) // might need an onError callback + self.Log.Error(err) // might need an onError callback } close(notifyStopped) diff --git a/pkg/test/log.go b/pkg/test/log.go index 3b166bb5d..32d79b987 100644 --- a/pkg/test/log.go +++ b/pkg/test/log.go @@ -8,9 +8,7 @@ import ( "github.com/stretchr/testify/assert" ) -var ( - _ logrus.FieldLogger = &FakeFieldLogger{} -) +var _ logrus.FieldLogger = &FakeFieldLogger{} // for now we're just tracking calls to the Error and Errorf methods type FakeFieldLogger struct { @@ -37,5 +35,6 @@ func (self *FakeFieldLogger) Errorf(format string, args ...interface{}) { } func (self *FakeFieldLogger) AssertErrors(t *testing.T, expectedErrors []string) { + t.Helper() assert.EqualValues(t, expectedErrors, self.loggedErrors) } diff --git a/pkg/utils/color.go b/pkg/utils/color.go index 37c60179a..2eced49e2 100644 --- a/pkg/utils/color.go +++ b/pkg/utils/color.go @@ -8,8 +8,10 @@ import ( "github.com/jesseduffield/lazygit/pkg/gui/style" ) -var decoloriseCache = make(map[string]string) -var decoloriseMutex sync.RWMutex +var ( + decoloriseCache = make(map[string]string) + decoloriseMutex sync.RWMutex +) // Decolorise strips a string of color func Decolorise(str string) string { diff --git a/pkg/utils/color_test.go b/pkg/utils/color_test.go index 37144e955..1440f946c 100644 --- a/pkg/utils/color_test.go +++ b/pkg/utils/color_test.go @@ -5,7 +5,7 @@ import ( ) func TestDecolorise(t *testing.T) { - var tests = []struct { + tests := []struct { input string output string }{ diff --git a/pkg/utils/lines_test.go b/pkg/utils/lines_test.go index 6069b8f93..faafb863a 100644 --- a/pkg/utils/lines_test.go +++ b/pkg/utils/lines_test.go @@ -65,7 +65,7 @@ func TestNormalizeLinefeeds(t *testing.T) { byteArray []byte expected []byte } - var scenarios = []scenario{ + scenarios := []scenario{ { // \r\n []byte{97, 115, 100, 102, 13, 10}, diff --git a/test/runner/main.go b/test/runner/main.go index af6195cbc..509b66772 100644 --- a/test/runner/main.go +++ b/test/runner/main.go @@ -38,7 +38,7 @@ func main() { }, mode, speedEnv, - func(_t *testing.T, expected string, actual string, prefix string) { + func(_t *testing.T, expected string, actual string, prefix string) { //nolint:thelper assert.Equal(MockTestingT{}, expected, actual, fmt.Sprintf("Unexpected %s. Expected:\n%s\nActual:\n%s\n", prefix, expected, actual)) }, includeSkipped, diff --git a/vendor/golang.org/x/exp/AUTHORS b/vendor/golang.org/x/exp/AUTHORS new file mode 100644 index 000000000..15167cd74 --- /dev/null +++ b/vendor/golang.org/x/exp/AUTHORS @@ -0,0 +1,3 @@ +# This source code refers to The Go Authors for copyright purposes. +# The master list of authors is in the main Go distribution, +# visible at http://tip.golang.org/AUTHORS. diff --git a/vendor/golang.org/x/exp/CONTRIBUTORS b/vendor/golang.org/x/exp/CONTRIBUTORS new file mode 100644 index 000000000..1c4577e96 --- /dev/null +++ b/vendor/golang.org/x/exp/CONTRIBUTORS @@ -0,0 +1,3 @@ +# This source code was written by the Go contributors. +# The master list of contributors is in the main Go distribution, +# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/golang.org/x/exp/LICENSE b/vendor/golang.org/x/exp/LICENSE new file mode 100644 index 000000000..6a66aea5e --- /dev/null +++ b/vendor/golang.org/x/exp/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/exp/PATENTS b/vendor/golang.org/x/exp/PATENTS new file mode 100644 index 000000000..733099041 --- /dev/null +++ b/vendor/golang.org/x/exp/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/vendor/golang.org/x/exp/constraints/constraints.go b/vendor/golang.org/x/exp/constraints/constraints.go new file mode 100644 index 000000000..2c033dff4 --- /dev/null +++ b/vendor/golang.org/x/exp/constraints/constraints.go @@ -0,0 +1,50 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package constraints defines a set of useful constraints to be used +// with type parameters. +package constraints + +// Signed is a constraint that permits any signed integer type. +// If future releases of Go add new predeclared signed integer types, +// this constraint will be modified to include them. +type Signed interface { + ~int | ~int8 | ~int16 | ~int32 | ~int64 +} + +// Unsigned is a constraint that permits any unsigned integer type. +// If future releases of Go add new predeclared unsigned integer types, +// this constraint will be modified to include them. +type Unsigned interface { + ~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~uintptr +} + +// Integer is a constraint that permits any integer type. +// If future releases of Go add new predeclared integer types, +// this constraint will be modified to include them. +type Integer interface { + Signed | Unsigned +} + +// Float is a constraint that permits any floating-point type. +// If future releases of Go add new predeclared floating-point types, +// this constraint will be modified to include them. +type Float interface { + ~float32 | ~float64 +} + +// Complex is a constraint that permits any complex numeric type. +// If future releases of Go add new predeclared complex numeric types, +// this constraint will be modified to include them. +type Complex interface { + ~complex64 | ~complex128 +} + +// Ordered is a constraint that permits any ordered type: any type +// that supports the operators < <= >= >. +// If future releases of Go add new ordered types, +// this constraint will be modified to include them. +type Ordered interface { + Integer | Float | ~string +} diff --git a/vendor/golang.org/x/exp/slices/slices.go b/vendor/golang.org/x/exp/slices/slices.go new file mode 100644 index 000000000..df78daf90 --- /dev/null +++ b/vendor/golang.org/x/exp/slices/slices.go @@ -0,0 +1,213 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package slices defines various functions useful with slices of any type. +// Unless otherwise specified, these functions all apply to the elements +// of a slice at index 0 <= i < len(s). +package slices + +import "golang.org/x/exp/constraints" + +// Equal reports whether two slices are equal: the same length and all +// elements equal. If the lengths are different, Equal returns false. +// Otherwise, the elements are compared in increasing index order, and the +// comparison stops at the first unequal pair. +// Floating point NaNs are not considered equal. +func Equal[E comparable](s1, s2 []E) bool { + if len(s1) != len(s2) { + return false + } + for i := range s1 { + if s1[i] != s2[i] { + return false + } + } + return true +} + +// EqualFunc reports whether two slices are equal using a comparison +// function on each pair of elements. If the lengths are different, +// EqualFunc returns false. Otherwise, the elements are compared in +// increasing index order, and the comparison stops at the first index +// for which eq returns false. +func EqualFunc[E1, E2 any](s1 []E1, s2 []E2, eq func(E1, E2) bool) bool { + if len(s1) != len(s2) { + return false + } + for i, v1 := range s1 { + v2 := s2[i] + if !eq(v1, v2) { + return false + } + } + return true +} + +// Compare compares the elements of s1 and s2. +// The elements are compared sequentially, starting at index 0, +// until one element is not equal to the other. +// The result of comparing the first non-matching elements is returned. +// If both slices are equal until one of them ends, the shorter slice is +// considered less than the longer one. +// The result is 0 if s1 == s2, -1 if s1 < s2, and +1 if s1 > s2. +// Comparisons involving floating point NaNs are ignored. +func Compare[E constraints.Ordered](s1, s2 []E) int { + s2len := len(s2) + for i, v1 := range s1 { + if i >= s2len { + return +1 + } + v2 := s2[i] + switch { + case v1 < v2: + return -1 + case v1 > v2: + return +1 + } + } + if len(s1) < s2len { + return -1 + } + return 0 +} + +// CompareFunc is like Compare but uses a comparison function +// on each pair of elements. The elements are compared in increasing +// index order, and the comparisons stop after the first time cmp +// returns non-zero. +// The result is the first non-zero result of cmp; if cmp always +// returns 0 the result is 0 if len(s1) == len(s2), -1 if len(s1) < len(s2), +// and +1 if len(s1) > len(s2). +func CompareFunc[E1, E2 any](s1 []E1, s2 []E2, cmp func(E1, E2) int) int { + s2len := len(s2) + for i, v1 := range s1 { + if i >= s2len { + return +1 + } + v2 := s2[i] + if c := cmp(v1, v2); c != 0 { + return c + } + } + if len(s1) < s2len { + return -1 + } + return 0 +} + +// Index returns the index of the first occurrence of v in s, +// or -1 if not present. +func Index[E comparable](s []E, v E) int { + for i, vs := range s { + if v == vs { + return i + } + } + return -1 +} + +// IndexFunc returns the first index i satisfying f(s[i]), +// or -1 if none do. +func IndexFunc[E any](s []E, f func(E) bool) int { + for i, v := range s { + if f(v) { + return i + } + } + return -1 +} + +// Contains reports whether v is present in s. +func Contains[E comparable](s []E, v E) bool { + return Index(s, v) >= 0 +} + +// Insert inserts the values v... into s at index i, +// returning the modified slice. +// In the returned slice r, r[i] == v[0]. +// Insert panics if i is out of range. +// This function is O(len(s) + len(v)). +func Insert[S ~[]E, E any](s S, i int, v ...E) S { + tot := len(s) + len(v) + if tot <= cap(s) { + s2 := s[:tot] + copy(s2[i+len(v):], s[i:]) + copy(s2[i:], v) + return s2 + } + s2 := make(S, tot) + copy(s2, s[:i]) + copy(s2[i:], v) + copy(s2[i+len(v):], s[i:]) + return s2 +} + +// Delete removes the elements s[i:j] from s, returning the modified slice. +// Delete panics if s[i:j] is not a valid slice of s. +// Delete modifies the contents of the slice s; it does not create a new slice. +// Delete is O(len(s)-(j-i)), so if many items must be deleted, it is better to +// make a single call deleting them all together than to delete one at a time. +func Delete[S ~[]E, E any](s S, i, j int) S { + return append(s[:i], s[j:]...) +} + +// Clone returns a copy of the slice. +// The elements are copied using assignment, so this is a shallow clone. +func Clone[S ~[]E, E any](s S) S { + // Preserve nil in case it matters. + if s == nil { + return nil + } + return append(S([]E{}), s...) +} + +// Compact replaces consecutive runs of equal elements with a single copy. +// This is like the uniq command found on Unix. +// Compact modifies the contents of the slice s; it does not create a new slice. +func Compact[S ~[]E, E comparable](s S) S { + if len(s) == 0 { + return s + } + i := 1 + last := s[0] + for _, v := range s[1:] { + if v != last { + s[i] = v + i++ + last = v + } + } + return s[:i] +} + +// CompactFunc is like Compact but uses a comparison function. +func CompactFunc[S ~[]E, E any](s S, eq func(E, E) bool) S { + if len(s) == 0 { + return s + } + i := 1 + last := s[0] + for _, v := range s[1:] { + if !eq(v, last) { + s[i] = v + i++ + last = v + } + } + return s[:i] +} + +// Grow increases the slice's capacity, if necessary, to guarantee space for +// another n elements. After Grow(n), at least n elements can be appended +// to the slice without another allocation. Grow may modify elements of the +// slice between the length and the capacity. If n is negative or too large to +// allocate the memory, Grow panics. +func Grow[S ~[]E, E any](s S, n int) S { + return append(s, make(S, n)...)[:len(s)] +} + +// Clip removes unused capacity from the slice, returning s[:len(s):len(s)]. +func Clip[S ~[]E, E any](s S) S { + return s[:len(s):len(s)] +} diff --git a/vendor/golang.org/x/exp/slices/sort.go b/vendor/golang.org/x/exp/slices/sort.go new file mode 100644 index 000000000..b2035abe8 --- /dev/null +++ b/vendor/golang.org/x/exp/slices/sort.go @@ -0,0 +1,95 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package slices + +import "golang.org/x/exp/constraints" + +// Sort sorts a slice of any ordered type in ascending order. +func Sort[E constraints.Ordered](x []E) { + n := len(x) + quickSortOrdered(x, 0, n, maxDepth(n)) +} + +// Sort sorts the slice x in ascending order as determined by the less function. +// This sort is not guaranteed to be stable. +func SortFunc[E any](x []E, less func(a, b E) bool) { + n := len(x) + quickSortLessFunc(x, 0, n, maxDepth(n), less) +} + +// SortStable sorts the slice x while keeping the original order of equal +// elements, using less to compare elements. +func SortStableFunc[E any](x []E, less func(a, b E) bool) { + stableLessFunc(x, len(x), less) +} + +// IsSorted reports whether x is sorted in ascending order. +func IsSorted[E constraints.Ordered](x []E) bool { + for i := len(x) - 1; i > 0; i-- { + if x[i] < x[i-1] { + return false + } + } + return true +} + +// IsSortedFunc reports whether x is sorted in ascending order, with less as the +// comparison function. +func IsSortedFunc[E any](x []E, less func(a, b E) bool) bool { + for i := len(x) - 1; i > 0; i-- { + if less(x[i], x[i-1]) { + return false + } + } + return true +} + +// BinarySearch searches for target in a sorted slice and returns the smallest +// index at which target is found. If the target is not found, the index at +// which it could be inserted into the slice is returned; therefore, if the +// intention is to find target itself a separate check for equality with the +// element at the returned index is required. +func BinarySearch[E constraints.Ordered](x []E, target E) int { + return search(len(x), func(i int) bool { return x[i] >= target }) +} + +// BinarySearchFunc uses binary search to find and return the smallest index i +// in [0, n) at which ok(i) is true, assuming that on the range [0, n), +// ok(i) == true implies ok(i+1) == true. That is, BinarySearchFunc requires +// that ok is false for some (possibly empty) prefix of the input range [0, n) +// and then true for the (possibly empty) remainder; BinarySearchFunc returns +// the first true index. If there is no such index, BinarySearchFunc returns n. +// (Note that the "not found" return value is not -1 as in, for instance, +// strings.Index.) Search calls ok(i) only for i in the range [0, n). +func BinarySearchFunc[E any](x []E, ok func(E) bool) int { + return search(len(x), func(i int) bool { return ok(x[i]) }) +} + +// maxDepth returns a threshold at which quicksort should switch +// to heapsort. It returns 2*ceil(lg(n+1)). +func maxDepth(n int) int { + var depth int + for i := n; i > 0; i >>= 1 { + depth++ + } + return depth * 2 +} + +func search(n int, f func(int) bool) int { + // Define f(-1) == false and f(n) == true. + // Invariant: f(i-1) == false, f(j) == true. + i, j := 0, n + for i < j { + h := int(uint(i+j) >> 1) // avoid overflow when computing h + // i ≤ h < j + if !f(h) { + i = h + 1 // preserves f(i-1) == false + } else { + j = h // preserves f(j) == true + } + } + // i == j, f(i-1) == false, and f(j) (= f(i)) == true => answer is i. + return i +} diff --git a/vendor/golang.org/x/exp/slices/zsortfunc.go b/vendor/golang.org/x/exp/slices/zsortfunc.go new file mode 100644 index 000000000..82f156fd6 --- /dev/null +++ b/vendor/golang.org/x/exp/slices/zsortfunc.go @@ -0,0 +1,342 @@ +// Code generated by gen_sort_variants.go; DO NOT EDIT. + +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package slices + +// insertionSortLessFunc sorts data[a:b] using insertion sort. +func insertionSortLessFunc[Elem any](data []Elem, a, b int, less func(a, b Elem) bool) { + for i := a + 1; i < b; i++ { + for j := i; j > a && less(data[j], data[j-1]); j-- { + data[j], data[j-1] = data[j-1], data[j] + } + } +} + +// siftDownLessFunc implements the heap property on data[lo:hi]. +// first is an offset into the array where the root of the heap lies. +func siftDownLessFunc[Elem any](data []Elem, lo, hi, first int, less func(a, b Elem) bool) { + root := lo + for { + child := 2*root + 1 + if child >= hi { + break + } + if child+1 < hi && less(data[first+child], data[first+child+1]) { + child++ + } + if !less(data[first+root], data[first+child]) { + return + } + data[first+root], data[first+child] = data[first+child], data[first+root] + root = child + } +} + +func heapSortLessFunc[Elem any](data []Elem, a, b int, less func(a, b Elem) bool) { + first := a + lo := 0 + hi := b - a + + // Build heap with greatest element at top. + for i := (hi - 1) / 2; i >= 0; i-- { + siftDownLessFunc(data, i, hi, first, less) + } + + // Pop elements, largest first, into end of data. + for i := hi - 1; i >= 0; i-- { + data[first], data[first+i] = data[first+i], data[first] + siftDownLessFunc(data, lo, i, first, less) + } +} + +// Quicksort, loosely following Bentley and McIlroy, +// "Engineering a Sort Function" SP&E November 1993. + +// medianOfThreeLessFunc moves the median of the three values data[m0], data[m1], data[m2] into data[m1]. +func medianOfThreeLessFunc[Elem any](data []Elem, m1, m0, m2 int, less func(a, b Elem) bool) { + // sort 3 elements + if less(data[m1], data[m0]) { + data[m1], data[m0] = data[m0], data[m1] + } + // data[m0] <= data[m1] + if less(data[m2], data[m1]) { + data[m2], data[m1] = data[m1], data[m2] + // data[m0] <= data[m2] && data[m1] < data[m2] + if less(data[m1], data[m0]) { + data[m1], data[m0] = data[m0], data[m1] + } + } + // now data[m0] <= data[m1] <= data[m2] +} + +func swapRangeLessFunc[Elem any](data []Elem, a, b, n int, less func(a, b Elem) bool) { + for i := 0; i < n; i++ { + data[a+i], data[b+i] = data[b+i], data[a+i] + } +} + +func doPivotLessFunc[Elem any](data []Elem, lo, hi int, less func(a, b Elem) bool) (midlo, midhi int) { + m := int(uint(lo+hi) >> 1) // Written like this to avoid integer overflow. + if hi-lo > 40 { + // Tukey's "Ninther" median of three medians of three. + s := (hi - lo) / 8 + medianOfThreeLessFunc(data, lo, lo+s, lo+2*s, less) + medianOfThreeLessFunc(data, m, m-s, m+s, less) + medianOfThreeLessFunc(data, hi-1, hi-1-s, hi-1-2*s, less) + } + medianOfThreeLessFunc(data, lo, m, hi-1, less) + + // Invariants are: + // data[lo] = pivot (set up by ChoosePivot) + // data[lo < i < a] < pivot + // data[a <= i < b] <= pivot + // data[b <= i < c] unexamined + // data[c <= i < hi-1] > pivot + // data[hi-1] >= pivot + pivot := lo + a, c := lo+1, hi-1 + + for ; a < c && less(data[a], data[pivot]); a++ { + } + b := a + for { + for ; b < c && !less(data[pivot], data[b]); b++ { // data[b] <= pivot + } + for ; b < c && less(data[pivot], data[c-1]); c-- { // data[c-1] > pivot + } + if b >= c { + break + } + // data[b] > pivot; data[c-1] <= pivot + data[b], data[c-1] = data[c-1], data[b] + b++ + c-- + } + // If hi-c<3 then there are duplicates (by property of median of nine). + // Let's be a bit more conservative, and set border to 5. + protect := hi-c < 5 + if !protect && hi-c < (hi-lo)/4 { + // Lets test some points for equality to pivot + dups := 0 + if !less(data[pivot], data[hi-1]) { // data[hi-1] = pivot + data[c], data[hi-1] = data[hi-1], data[c] + c++ + dups++ + } + if !less(data[b-1], data[pivot]) { // data[b-1] = pivot + b-- + dups++ + } + // m-lo = (hi-lo)/2 > 6 + // b-lo > (hi-lo)*3/4-1 > 8 + // ==> m < b ==> data[m] <= pivot + if !less(data[m], data[pivot]) { // data[m] = pivot + data[m], data[b-1] = data[b-1], data[m] + b-- + dups++ + } + // if at least 2 points are equal to pivot, assume skewed distribution + protect = dups > 1 + } + if protect { + // Protect against a lot of duplicates + // Add invariant: + // data[a <= i < b] unexamined + // data[b <= i < c] = pivot + for { + for ; a < b && !less(data[b-1], data[pivot]); b-- { // data[b] == pivot + } + for ; a < b && less(data[a], data[pivot]); a++ { // data[a] < pivot + } + if a >= b { + break + } + // data[a] == pivot; data[b-1] < pivot + data[a], data[b-1] = data[b-1], data[a] + a++ + b-- + } + } + // Swap pivot into middle + data[pivot], data[b-1] = data[b-1], data[pivot] + return b - 1, c +} + +func quickSortLessFunc[Elem any](data []Elem, a, b, maxDepth int, less func(a, b Elem) bool) { + for b-a > 12 { // Use ShellSort for slices <= 12 elements + if maxDepth == 0 { + heapSortLessFunc(data, a, b, less) + return + } + maxDepth-- + mlo, mhi := doPivotLessFunc(data, a, b, less) + // Avoiding recursion on the larger subproblem guarantees + // a stack depth of at most lg(b-a). + if mlo-a < b-mhi { + quickSortLessFunc(data, a, mlo, maxDepth, less) + a = mhi // i.e., quickSortLessFunc(data, mhi, b) + } else { + quickSortLessFunc(data, mhi, b, maxDepth, less) + b = mlo // i.e., quickSortLessFunc(data, a, mlo) + } + } + if b-a > 1 { + // Do ShellSort pass with gap 6 + // It could be written in this simplified form cause b-a <= 12 + for i := a + 6; i < b; i++ { + if less(data[i], data[i-6]) { + data[i], data[i-6] = data[i-6], data[i] + } + } + insertionSortLessFunc(data, a, b, less) + } +} + +func stableLessFunc[Elem any](data []Elem, n int, less func(a, b Elem) bool) { + blockSize := 20 // must be > 0 + a, b := 0, blockSize + for b <= n { + insertionSortLessFunc(data, a, b, less) + a = b + b += blockSize + } + insertionSortLessFunc(data, a, n, less) + + for blockSize < n { + a, b = 0, 2*blockSize + for b <= n { + symMergeLessFunc(data, a, a+blockSize, b, less) + a = b + b += 2 * blockSize + } + if m := a + blockSize; m < n { + symMergeLessFunc(data, a, m, n, less) + } + blockSize *= 2 + } +} + +// symMergeLessFunc merges the two sorted subsequences data[a:m] and data[m:b] using +// the SymMerge algorithm from Pok-Son Kim and Arne Kutzner, "Stable Minimum +// Storage Merging by Symmetric Comparisons", in Susanne Albers and Tomasz +// Radzik, editors, Algorithms - ESA 2004, volume 3221 of Lecture Notes in +// Computer Science, pages 714-723. Springer, 2004. +// +// Let M = m-a and N = b-n. Wolog M < N. +// The recursion depth is bound by ceil(log(N+M)). +// The algorithm needs O(M*log(N/M + 1)) calls to data.Less. +// The algorithm needs O((M+N)*log(M)) calls to data.Swap. +// +// The paper gives O((M+N)*log(M)) as the number of assignments assuming a +// rotation algorithm which uses O(M+N+gcd(M+N)) assignments. The argumentation +// in the paper carries through for Swap operations, especially as the block +// swapping rotate uses only O(M+N) Swaps. +// +// symMerge assumes non-degenerate arguments: a < m && m < b. +// Having the caller check this condition eliminates many leaf recursion calls, +// which improves performance. +func symMergeLessFunc[Elem any](data []Elem, a, m, b int, less func(a, b Elem) bool) { + // Avoid unnecessary recursions of symMerge + // by direct insertion of data[a] into data[m:b] + // if data[a:m] only contains one element. + if m-a == 1 { + // Use binary search to find the lowest index i + // such that data[i] >= data[a] for m <= i < b. + // Exit the search loop with i == b in case no such index exists. + i := m + j := b + for i < j { + h := int(uint(i+j) >> 1) + if less(data[h], data[a]) { + i = h + 1 + } else { + j = h + } + } + // Swap values until data[a] reaches the position before i. + for k := a; k < i-1; k++ { + data[k], data[k+1] = data[k+1], data[k] + } + return + } + + // Avoid unnecessary recursions of symMerge + // by direct insertion of data[m] into data[a:m] + // if data[m:b] only contains one element. + if b-m == 1 { + // Use binary search to find the lowest index i + // such that data[i] > data[m] for a <= i < m. + // Exit the search loop with i == m in case no such index exists. + i := a + j := m + for i < j { + h := int(uint(i+j) >> 1) + if !less(data[m], data[h]) { + i = h + 1 + } else { + j = h + } + } + // Swap values until data[m] reaches the position i. + for k := m; k > i; k-- { + data[k], data[k-1] = data[k-1], data[k] + } + return + } + + mid := int(uint(a+b) >> 1) + n := mid + m + var start, r int + if m > mid { + start = n - b + r = mid + } else { + start = a + r = m + } + p := n - 1 + + for start < r { + c := int(uint(start+r) >> 1) + if !less(data[p-c], data[c]) { + start = c + 1 + } else { + r = c + } + } + + end := n - start + if start < m && m < end { + rotateLessFunc(data, start, m, end, less) + } + if a < start && start < mid { + symMergeLessFunc(data, a, start, mid, less) + } + if mid < end && end < b { + symMergeLessFunc(data, mid, end, b, less) + } +} + +// rotateLessFunc rotates two consecutive blocks u = data[a:m] and v = data[m:b] in data: +// Data of the form 'x u v y' is changed to 'x v u y'. +// rotate performs at most b-a many calls to data.Swap, +// and it assumes non-degenerate arguments: a < m && m < b. +func rotateLessFunc[Elem any](data []Elem, a, m, b int, less func(a, b Elem) bool) { + i := m - a + j := b - m + + for i != j { + if i > j { + swapRangeLessFunc(data, m-i, m, j, less) + i -= j + } else { + swapRangeLessFunc(data, m-i, m+j-i, i, less) + j -= i + } + } + // i == j + swapRangeLessFunc(data, m-i, m, i, less) +} diff --git a/vendor/golang.org/x/exp/slices/zsortordered.go b/vendor/golang.org/x/exp/slices/zsortordered.go new file mode 100644 index 000000000..6fa64a2e2 --- /dev/null +++ b/vendor/golang.org/x/exp/slices/zsortordered.go @@ -0,0 +1,344 @@ +// Code generated by gen_sort_variants.go; DO NOT EDIT. + +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package slices + +import "golang.org/x/exp/constraints" + +// insertionSortOrdered sorts data[a:b] using insertion sort. +func insertionSortOrdered[Elem constraints.Ordered](data []Elem, a, b int) { + for i := a + 1; i < b; i++ { + for j := i; j > a && (data[j] < data[j-1]); j-- { + data[j], data[j-1] = data[j-1], data[j] + } + } +} + +// siftDownOrdered implements the heap property on data[lo:hi]. +// first is an offset into the array where the root of the heap lies. +func siftDownOrdered[Elem constraints.Ordered](data []Elem, lo, hi, first int) { + root := lo + for { + child := 2*root + 1 + if child >= hi { + break + } + if child+1 < hi && (data[first+child] < data[first+child+1]) { + child++ + } + if !(data[first+root] < data[first+child]) { + return + } + data[first+root], data[first+child] = data[first+child], data[first+root] + root = child + } +} + +func heapSortOrdered[Elem constraints.Ordered](data []Elem, a, b int) { + first := a + lo := 0 + hi := b - a + + // Build heap with greatest element at top. + for i := (hi - 1) / 2; i >= 0; i-- { + siftDownOrdered(data, i, hi, first) + } + + // Pop elements, largest first, into end of data. + for i := hi - 1; i >= 0; i-- { + data[first], data[first+i] = data[first+i], data[first] + siftDownOrdered(data, lo, i, first) + } +} + +// Quicksort, loosely following Bentley and McIlroy, +// "Engineering a Sort Function" SP&E November 1993. + +// medianOfThreeOrdered moves the median of the three values data[m0], data[m1], data[m2] into data[m1]. +func medianOfThreeOrdered[Elem constraints.Ordered](data []Elem, m1, m0, m2 int) { + // sort 3 elements + if data[m1] < data[m0] { + data[m1], data[m0] = data[m0], data[m1] + } + // data[m0] <= data[m1] + if data[m2] < data[m1] { + data[m2], data[m1] = data[m1], data[m2] + // data[m0] <= data[m2] && data[m1] < data[m2] + if data[m1] < data[m0] { + data[m1], data[m0] = data[m0], data[m1] + } + } + // now data[m0] <= data[m1] <= data[m2] +} + +func swapRangeOrdered[Elem constraints.Ordered](data []Elem, a, b, n int) { + for i := 0; i < n; i++ { + data[a+i], data[b+i] = data[b+i], data[a+i] + } +} + +func doPivotOrdered[Elem constraints.Ordered](data []Elem, lo, hi int) (midlo, midhi int) { + m := int(uint(lo+hi) >> 1) // Written like this to avoid integer overflow. + if hi-lo > 40 { + // Tukey's "Ninther" median of three medians of three. + s := (hi - lo) / 8 + medianOfThreeOrdered(data, lo, lo+s, lo+2*s) + medianOfThreeOrdered(data, m, m-s, m+s) + medianOfThreeOrdered(data, hi-1, hi-1-s, hi-1-2*s) + } + medianOfThreeOrdered(data, lo, m, hi-1) + + // Invariants are: + // data[lo] = pivot (set up by ChoosePivot) + // data[lo < i < a] < pivot + // data[a <= i < b] <= pivot + // data[b <= i < c] unexamined + // data[c <= i < hi-1] > pivot + // data[hi-1] >= pivot + pivot := lo + a, c := lo+1, hi-1 + + for ; a < c && (data[a] < data[pivot]); a++ { + } + b := a + for { + for ; b < c && !(data[pivot] < data[b]); b++ { // data[b] <= pivot + } + for ; b < c && (data[pivot] < data[c-1]); c-- { // data[c-1] > pivot + } + if b >= c { + break + } + // data[b] > pivot; data[c-1] <= pivot + data[b], data[c-1] = data[c-1], data[b] + b++ + c-- + } + // If hi-c<3 then there are duplicates (by property of median of nine). + // Let's be a bit more conservative, and set border to 5. + protect := hi-c < 5 + if !protect && hi-c < (hi-lo)/4 { + // Lets test some points for equality to pivot + dups := 0 + if !(data[pivot] < data[hi-1]) { // data[hi-1] = pivot + data[c], data[hi-1] = data[hi-1], data[c] + c++ + dups++ + } + if !(data[b-1] < data[pivot]) { // data[b-1] = pivot + b-- + dups++ + } + // m-lo = (hi-lo)/2 > 6 + // b-lo > (hi-lo)*3/4-1 > 8 + // ==> m < b ==> data[m] <= pivot + if !(data[m] < data[pivot]) { // data[m] = pivot + data[m], data[b-1] = data[b-1], data[m] + b-- + dups++ + } + // if at least 2 points are equal to pivot, assume skewed distribution + protect = dups > 1 + } + if protect { + // Protect against a lot of duplicates + // Add invariant: + // data[a <= i < b] unexamined + // data[b <= i < c] = pivot + for { + for ; a < b && !(data[b-1] < data[pivot]); b-- { // data[b] == pivot + } + for ; a < b && (data[a] < data[pivot]); a++ { // data[a] < pivot + } + if a >= b { + break + } + // data[a] == pivot; data[b-1] < pivot + data[a], data[b-1] = data[b-1], data[a] + a++ + b-- + } + } + // Swap pivot into middle + data[pivot], data[b-1] = data[b-1], data[pivot] + return b - 1, c +} + +func quickSortOrdered[Elem constraints.Ordered](data []Elem, a, b, maxDepth int) { + for b-a > 12 { // Use ShellSort for slices <= 12 elements + if maxDepth == 0 { + heapSortOrdered(data, a, b) + return + } + maxDepth-- + mlo, mhi := doPivotOrdered(data, a, b) + // Avoiding recursion on the larger subproblem guarantees + // a stack depth of at most lg(b-a). + if mlo-a < b-mhi { + quickSortOrdered(data, a, mlo, maxDepth) + a = mhi // i.e., quickSortOrdered(data, mhi, b) + } else { + quickSortOrdered(data, mhi, b, maxDepth) + b = mlo // i.e., quickSortOrdered(data, a, mlo) + } + } + if b-a > 1 { + // Do ShellSort pass with gap 6 + // It could be written in this simplified form cause b-a <= 12 + for i := a + 6; i < b; i++ { + if data[i] < data[i-6] { + data[i], data[i-6] = data[i-6], data[i] + } + } + insertionSortOrdered(data, a, b) + } +} + +func stableOrdered[Elem constraints.Ordered](data []Elem, n int) { + blockSize := 20 // must be > 0 + a, b := 0, blockSize + for b <= n { + insertionSortOrdered(data, a, b) + a = b + b += blockSize + } + insertionSortOrdered(data, a, n) + + for blockSize < n { + a, b = 0, 2*blockSize + for b <= n { + symMergeOrdered(data, a, a+blockSize, b) + a = b + b += 2 * blockSize + } + if m := a + blockSize; m < n { + symMergeOrdered(data, a, m, n) + } + blockSize *= 2 + } +} + +// symMergeOrdered merges the two sorted subsequences data[a:m] and data[m:b] using +// the SymMerge algorithm from Pok-Son Kim and Arne Kutzner, "Stable Minimum +// Storage Merging by Symmetric Comparisons", in Susanne Albers and Tomasz +// Radzik, editors, Algorithms - ESA 2004, volume 3221 of Lecture Notes in +// Computer Science, pages 714-723. Springer, 2004. +// +// Let M = m-a and N = b-n. Wolog M < N. +// The recursion depth is bound by ceil(log(N+M)). +// The algorithm needs O(M*log(N/M + 1)) calls to data.Less. +// The algorithm needs O((M+N)*log(M)) calls to data.Swap. +// +// The paper gives O((M+N)*log(M)) as the number of assignments assuming a +// rotation algorithm which uses O(M+N+gcd(M+N)) assignments. The argumentation +// in the paper carries through for Swap operations, especially as the block +// swapping rotate uses only O(M+N) Swaps. +// +// symMerge assumes non-degenerate arguments: a < m && m < b. +// Having the caller check this condition eliminates many leaf recursion calls, +// which improves performance. +func symMergeOrdered[Elem constraints.Ordered](data []Elem, a, m, b int) { + // Avoid unnecessary recursions of symMerge + // by direct insertion of data[a] into data[m:b] + // if data[a:m] only contains one element. + if m-a == 1 { + // Use binary search to find the lowest index i + // such that data[i] >= data[a] for m <= i < b. + // Exit the search loop with i == b in case no such index exists. + i := m + j := b + for i < j { + h := int(uint(i+j) >> 1) + if data[h] < data[a] { + i = h + 1 + } else { + j = h + } + } + // Swap values until data[a] reaches the position before i. + for k := a; k < i-1; k++ { + data[k], data[k+1] = data[k+1], data[k] + } + return + } + + // Avoid unnecessary recursions of symMerge + // by direct insertion of data[m] into data[a:m] + // if data[m:b] only contains one element. + if b-m == 1 { + // Use binary search to find the lowest index i + // such that data[i] > data[m] for a <= i < m. + // Exit the search loop with i == m in case no such index exists. + i := a + j := m + for i < j { + h := int(uint(i+j) >> 1) + if !(data[m] < data[h]) { + i = h + 1 + } else { + j = h + } + } + // Swap values until data[m] reaches the position i. + for k := m; k > i; k-- { + data[k], data[k-1] = data[k-1], data[k] + } + return + } + + mid := int(uint(a+b) >> 1) + n := mid + m + var start, r int + if m > mid { + start = n - b + r = mid + } else { + start = a + r = m + } + p := n - 1 + + for start < r { + c := int(uint(start+r) >> 1) + if !(data[p-c] < data[c]) { + start = c + 1 + } else { + r = c + } + } + + end := n - start + if start < m && m < end { + rotateOrdered(data, start, m, end) + } + if a < start && start < mid { + symMergeOrdered(data, a, start, mid) + } + if mid < end && end < b { + symMergeOrdered(data, mid, end, b) + } +} + +// rotateOrdered rotates two consecutive blocks u = data[a:m] and v = data[m:b] in data: +// Data of the form 'x u v y' is changed to 'x v u y'. +// rotate performs at most b-a many calls to data.Swap, +// and it assumes non-degenerate arguments: a < m && m < b. +func rotateOrdered[Elem constraints.Ordered](data []Elem, a, m, b int) { + i := m - a + j := b - m + + for i != j { + if i > j { + swapRangeOrdered(data, m-i, m, j) + i -= j + } else { + swapRangeOrdered(data, m-i, m+j-i, i) + j -= i + } + } + // i == j + swapRangeOrdered(data, m-i, m, i) +} diff --git a/vendor/gopkg.in/yaml.v3/apic.go b/vendor/gopkg.in/yaml.v3/apic.go index 65846e674..ae7d049f1 100644 --- a/vendor/gopkg.in/yaml.v3/apic.go +++ b/vendor/gopkg.in/yaml.v3/apic.go @@ -108,6 +108,7 @@ func yaml_emitter_initialize(emitter *yaml_emitter_t) { raw_buffer: make([]byte, 0, output_raw_buffer_size), states: make([]yaml_emitter_state_t, 0, initial_stack_size), events: make([]yaml_event_t, 0, initial_queue_size), + best_width: -1, } } diff --git a/vendor/gopkg.in/yaml.v3/decode.go b/vendor/gopkg.in/yaml.v3/decode.go index be63169b7..df36e3a30 100644 --- a/vendor/gopkg.in/yaml.v3/decode.go +++ b/vendor/gopkg.in/yaml.v3/decode.go @@ -35,6 +35,7 @@ type parser struct { doc *Node anchors map[string]*Node doneInit bool + textless bool } func newParser(b []byte) *parser { @@ -108,14 +109,18 @@ func (p *parser) peek() yaml_event_type_t { func (p *parser) fail() { var where string var line int - if p.parser.problem_mark.line != 0 { + if p.parser.context_mark.line != 0 { + line = p.parser.context_mark.line + // Scanner errors don't iterate line before returning error + if p.parser.error == yaml_SCANNER_ERROR { + line++ + } + } else if p.parser.problem_mark.line != 0 { line = p.parser.problem_mark.line // Scanner errors don't iterate line before returning error if p.parser.error == yaml_SCANNER_ERROR { line++ } - } else if p.parser.context_mark.line != 0 { - line = p.parser.context_mark.line } if line != 0 { where = "line " + strconv.Itoa(line) + ": " @@ -169,17 +174,20 @@ func (p *parser) node(kind Kind, defaultTag, tag, value string) *Node { } else if kind == ScalarNode { tag, _ = resolve("", value) } - return &Node{ - Kind: kind, - Tag: tag, - Value: value, - Style: style, - Line: p.event.start_mark.line + 1, - Column: p.event.start_mark.column + 1, - HeadComment: string(p.event.head_comment), - LineComment: string(p.event.line_comment), - FootComment: string(p.event.foot_comment), + n := &Node{ + Kind: kind, + Tag: tag, + Value: value, + Style: style, } + if !p.textless { + n.Line = p.event.start_mark.line + 1 + n.Column = p.event.start_mark.column + 1 + n.HeadComment = string(p.event.head_comment) + n.LineComment = string(p.event.line_comment) + n.FootComment = string(p.event.foot_comment) + } + return n } func (p *parser) parseChild(parent *Node) *Node { @@ -497,8 +505,13 @@ func (d *decoder) unmarshal(n *Node, out reflect.Value) (good bool) { good = d.mapping(n, out) case SequenceNode: good = d.sequence(n, out) + case 0: + if n.IsZero() { + return d.null(out) + } + fallthrough default: - panic("internal error: unknown node kind: " + strconv.Itoa(int(n.Kind))) + failf("cannot decode node with unknown kind %d", n.Kind) } return good } @@ -533,6 +546,17 @@ func resetMap(out reflect.Value) { } } +func (d *decoder) null(out reflect.Value) bool { + if out.CanAddr() { + switch out.Kind() { + case reflect.Interface, reflect.Ptr, reflect.Map, reflect.Slice: + out.Set(reflect.Zero(out.Type())) + return true + } + } + return false +} + func (d *decoder) scalar(n *Node, out reflect.Value) bool { var tag string var resolved interface{} @@ -550,14 +574,7 @@ func (d *decoder) scalar(n *Node, out reflect.Value) bool { } } if resolved == nil { - if out.CanAddr() { - switch out.Kind() { - case reflect.Interface, reflect.Ptr, reflect.Map, reflect.Slice: - out.Set(reflect.Zero(out.Type())) - return true - } - } - return false + return d.null(out) } if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() { // We've resolved to exactly the type we want, so use that. @@ -791,8 +808,10 @@ func (d *decoder) mapping(n *Node, out reflect.Value) (good bool) { } } + mapIsNew := false if out.IsNil() { out.Set(reflect.MakeMap(outt)) + mapIsNew = true } for i := 0; i < l; i += 2 { if isMerge(n.Content[i]) { @@ -809,7 +828,7 @@ func (d *decoder) mapping(n *Node, out reflect.Value) (good bool) { failf("invalid map key: %#v", k.Interface()) } e := reflect.New(et).Elem() - if d.unmarshal(n.Content[i+1], e) { + if d.unmarshal(n.Content[i+1], e) || n.Content[i+1].ShortTag() == nullTag && (mapIsNew || !out.MapIndex(k).IsValid()) { out.SetMapIndex(k, e) } } diff --git a/vendor/gopkg.in/yaml.v3/emitterc.go b/vendor/gopkg.in/yaml.v3/emitterc.go index ab2a06619..0f47c9ca8 100644 --- a/vendor/gopkg.in/yaml.v3/emitterc.go +++ b/vendor/gopkg.in/yaml.v3/emitterc.go @@ -235,10 +235,13 @@ func yaml_emitter_increase_indent(emitter *yaml_emitter_t, flow, indentless bool emitter.indent = 0 } } else if !indentless { - emitter.indent += emitter.best_indent - // [Go] If inside a block sequence item, discount the space taken by the indicator. - if emitter.best_indent > 2 && emitter.states[len(emitter.states)-1] == yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE { - emitter.indent -= 2 + // [Go] This was changed so that indentations are more regular. + if emitter.states[len(emitter.states)-1] == yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE { + // The first indent inside a sequence will just skip the "- " indicator. + emitter.indent += 2 + } else { + // Everything else aligns to the chosen indentation. + emitter.indent = emitter.best_indent*((emitter.indent+emitter.best_indent)/emitter.best_indent) } } return true @@ -725,16 +728,9 @@ func yaml_emitter_emit_flow_mapping_value(emitter *yaml_emitter_t, event *yaml_e // Expect a block item node. func yaml_emitter_emit_block_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { if first { - // [Go] The original logic here would not indent the sequence when inside a mapping. - // In Go we always indent it, but take the sequence indicator out of the indentation. - indentless := emitter.best_indent == 2 && emitter.mapping_context && (emitter.column == 0 || !emitter.indention) - original := emitter.indent - if !yaml_emitter_increase_indent(emitter, false, indentless) { + if !yaml_emitter_increase_indent(emitter, false, false) { return false } - if emitter.indent > original+2 { - emitter.indent -= 2 - } } if event.typ == yaml_SEQUENCE_END_EVENT { emitter.indent = emitter.indents[len(emitter.indents)-1] @@ -785,6 +781,13 @@ func yaml_emitter_emit_block_mapping_key(emitter *yaml_emitter_t, event *yaml_ev if !yaml_emitter_write_indent(emitter) { return false } + if len(emitter.line_comment) > 0 { + // [Go] A line comment was provided for the key. That's unusual as the + // scanner associates line comments with the value. Either way, + // save the line comment and render it appropriately later. + emitter.key_line_comment = emitter.line_comment + emitter.line_comment = nil + } if yaml_emitter_check_simple_key(emitter) { emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE) return yaml_emitter_emit_node(emitter, event, false, false, true, true) @@ -810,6 +813,27 @@ func yaml_emitter_emit_block_mapping_value(emitter *yaml_emitter_t, event *yaml_ return false } } + if len(emitter.key_line_comment) > 0 { + // [Go] Line comments are generally associated with the value, but when there's + // no value on the same line as a mapping key they end up attached to the + // key itself. + if event.typ == yaml_SCALAR_EVENT { + if len(emitter.line_comment) == 0 { + // A scalar is coming and it has no line comments by itself yet, + // so just let it handle the line comment as usual. If it has a + // line comment, we can't have both so the one from the key is lost. + emitter.line_comment = emitter.key_line_comment + emitter.key_line_comment = nil + } + } else if event.sequence_style() != yaml_FLOW_SEQUENCE_STYLE && (event.typ == yaml_MAPPING_START_EVENT || event.typ == yaml_SEQUENCE_START_EVENT) { + // An indented block follows, so write the comment right now. + emitter.line_comment, emitter.key_line_comment = emitter.key_line_comment, emitter.line_comment + if !yaml_emitter_process_line_comment(emitter) { + return false + } + emitter.line_comment, emitter.key_line_comment = emitter.key_line_comment, emitter.line_comment + } + } emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_KEY_STATE) if !yaml_emitter_emit_node(emitter, event, false, false, true, false) { return false @@ -823,6 +847,10 @@ func yaml_emitter_emit_block_mapping_value(emitter *yaml_emitter_t, event *yaml_ return true } +func yaml_emitter_silent_nil_event(emitter *yaml_emitter_t, event *yaml_event_t) bool { + return event.typ == yaml_SCALAR_EVENT && event.implicit && !emitter.canonical && len(emitter.scalar_data.value) == 0 +} + // Expect a node. func yaml_emitter_emit_node(emitter *yaml_emitter_t, event *yaml_event_t, root bool, sequence bool, mapping bool, simple_key bool) bool { @@ -1866,7 +1894,7 @@ func yaml_emitter_write_literal_scalar(emitter *yaml_emitter_t, value []byte) bo if !yaml_emitter_write_block_scalar_hints(emitter, value) { return false } - if !put_break(emitter) { + if !yaml_emitter_process_line_comment(emitter) { return false } //emitter.indention = true @@ -1903,10 +1931,10 @@ func yaml_emitter_write_folded_scalar(emitter *yaml_emitter_t, value []byte) boo if !yaml_emitter_write_block_scalar_hints(emitter, value) { return false } - - if !put_break(emitter) { + if !yaml_emitter_process_line_comment(emitter) { return false } + //emitter.indention = true emitter.whitespace = true diff --git a/vendor/gopkg.in/yaml.v3/encode.go b/vendor/gopkg.in/yaml.v3/encode.go index 1f37271ce..de9e72a3e 100644 --- a/vendor/gopkg.in/yaml.v3/encode.go +++ b/vendor/gopkg.in/yaml.v3/encode.go @@ -119,6 +119,14 @@ func (e *encoder) marshal(tag string, in reflect.Value) { case *Node: e.nodev(in) return + case Node: + if !in.CanAddr() { + var n = reflect.New(in.Type()).Elem() + n.Set(in) + in = n + } + e.nodev(in.Addr()) + return case time.Time: e.timev(tag, in) return @@ -422,18 +430,23 @@ func (e *encoder) nodev(in reflect.Value) { } func (e *encoder) node(node *Node, tail string) { + // Zero nodes behave as nil. + if node.Kind == 0 && node.IsZero() { + e.nilv() + return + } + // If the tag was not explicitly requested, and dropping it won't change the // implicit tag of the value, don't include it in the presentation. var tag = node.Tag var stag = shortTag(tag) - var rtag string var forceQuoting bool if tag != "" && node.Style&TaggedStyle == 0 { if node.Kind == ScalarNode { if stag == strTag && node.Style&(SingleQuotedStyle|DoubleQuotedStyle|LiteralStyle|FoldedStyle) != 0 { tag = "" } else { - rtag, _ = resolve("", node.Value) + rtag, _ := resolve("", node.Value) if rtag == stag { tag = "" } else if stag == strTag { @@ -442,6 +455,7 @@ func (e *encoder) node(node *Node, tail string) { } } } else { + var rtag string switch node.Kind { case MappingNode: rtag = mapTag @@ -471,7 +485,7 @@ func (e *encoder) node(node *Node, tail string) { if node.Style&FlowStyle != 0 { style = yaml_FLOW_SEQUENCE_STYLE } - e.must(yaml_sequence_start_event_initialize(&e.event, []byte(node.Anchor), []byte(tag), tag == "", style)) + e.must(yaml_sequence_start_event_initialize(&e.event, []byte(node.Anchor), []byte(longTag(tag)), tag == "", style)) e.event.head_comment = []byte(node.HeadComment) e.emit() for _, node := range node.Content { @@ -487,7 +501,7 @@ func (e *encoder) node(node *Node, tail string) { if node.Style&FlowStyle != 0 { style = yaml_FLOW_MAPPING_STYLE } - yaml_mapping_start_event_initialize(&e.event, []byte(node.Anchor), []byte(tag), tag == "", style) + yaml_mapping_start_event_initialize(&e.event, []byte(node.Anchor), []byte(longTag(tag)), tag == "", style) e.event.tail_comment = []byte(tail) e.event.head_comment = []byte(node.HeadComment) e.emit() @@ -528,11 +542,11 @@ func (e *encoder) node(node *Node, tail string) { case ScalarNode: value := node.Value if !utf8.ValidString(value) { - if tag == binaryTag { + if stag == binaryTag { failf("explicitly tagged !!binary data must be base64-encoded") } - if tag != "" { - failf("cannot marshal invalid UTF-8 data as %s", shortTag(tag)) + if stag != "" { + failf("cannot marshal invalid UTF-8 data as %s", stag) } // It can't be encoded directly as YAML so use a binary tag // and encode it as base64. @@ -557,5 +571,7 @@ func (e *encoder) node(node *Node, tail string) { } e.emitScalar(value, node.Anchor, tag, style, []byte(node.HeadComment), []byte(node.LineComment), []byte(node.FootComment), []byte(tail)) + default: + failf("cannot encode node with unknown kind %d", node.Kind) } } diff --git a/vendor/gopkg.in/yaml.v3/parserc.go b/vendor/gopkg.in/yaml.v3/parserc.go index aea9050b8..ac66fccc0 100644 --- a/vendor/gopkg.in/yaml.v3/parserc.go +++ b/vendor/gopkg.in/yaml.v3/parserc.go @@ -648,6 +648,10 @@ func yaml_parser_parse_node(parser *yaml_parser_t, event *yaml_event_t, block, i implicit: implicit, style: yaml_style_t(yaml_BLOCK_MAPPING_STYLE), } + if parser.stem_comment != nil { + event.head_comment = parser.stem_comment + parser.stem_comment = nil + } return true } if len(anchor) > 0 || len(tag) > 0 { @@ -694,25 +698,13 @@ func yaml_parser_parse_block_sequence_entry(parser *yaml_parser_t, event *yaml_e if token.typ == yaml_BLOCK_ENTRY_TOKEN { mark := token.end_mark - prior_head := len(parser.head_comment) + prior_head_len := len(parser.head_comment) skip_token(parser) + yaml_parser_split_stem_comment(parser, prior_head_len) token = peek_token(parser) if token == nil { return false } - if prior_head > 0 && token.typ == yaml_BLOCK_SEQUENCE_START_TOKEN { - // [Go] It's a sequence under a sequence entry, so the former head comment - // is for the list itself, not the first list item under it. - parser.stem_comment = parser.head_comment[:prior_head] - if len(parser.head_comment) == prior_head { - parser.head_comment = nil - } else { - // Copy suffix to prevent very strange bugs if someone ever appends - // further bytes to the prefix in the stem_comment slice above. - parser.head_comment = append([]byte(nil), parser.head_comment[prior_head+1:]...) - } - - } if token.typ != yaml_BLOCK_ENTRY_TOKEN && token.typ != yaml_BLOCK_END_TOKEN { parser.states = append(parser.states, yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE) return yaml_parser_parse_node(parser, event, true, false) @@ -754,7 +746,9 @@ func yaml_parser_parse_indentless_sequence_entry(parser *yaml_parser_t, event *y if token.typ == yaml_BLOCK_ENTRY_TOKEN { mark := token.end_mark + prior_head_len := len(parser.head_comment) skip_token(parser) + yaml_parser_split_stem_comment(parser, prior_head_len) token = peek_token(parser) if token == nil { return false @@ -780,6 +774,32 @@ func yaml_parser_parse_indentless_sequence_entry(parser *yaml_parser_t, event *y return true } +// Split stem comment from head comment. +// +// When a sequence or map is found under a sequence entry, the former head comment +// is assigned to the underlying sequence or map as a whole, not the individual +// sequence or map entry as would be expected otherwise. To handle this case the +// previous head comment is moved aside as the stem comment. +func yaml_parser_split_stem_comment(parser *yaml_parser_t, stem_len int) { + if stem_len == 0 { + return + } + + token := peek_token(parser) + if token.typ != yaml_BLOCK_SEQUENCE_START_TOKEN && token.typ != yaml_BLOCK_MAPPING_START_TOKEN { + return + } + + parser.stem_comment = parser.head_comment[:stem_len] + if len(parser.head_comment) == stem_len { + parser.head_comment = nil + } else { + // Copy suffix to prevent very strange bugs if someone ever appends + // further bytes to the prefix in the stem_comment slice above. + parser.head_comment = append([]byte(nil), parser.head_comment[stem_len+1:]...) + } +} + // Parse the productions: // block_mapping ::= BLOCK-MAPPING_START // ******************* diff --git a/vendor/gopkg.in/yaml.v3/scannerc.go b/vendor/gopkg.in/yaml.v3/scannerc.go index 57e954ca5..ca0070108 100644 --- a/vendor/gopkg.in/yaml.v3/scannerc.go +++ b/vendor/gopkg.in/yaml.v3/scannerc.go @@ -749,6 +749,11 @@ func yaml_parser_fetch_next_token(parser *yaml_parser_t) (ok bool) { if !ok { return } + if len(parser.tokens) > 0 && parser.tokens[len(parser.tokens)-1].typ == yaml_BLOCK_ENTRY_TOKEN { + // Sequence indicators alone have no line comments. It becomes + // a head comment for whatever follows. + return + } if !yaml_parser_scan_line_comment(parser, comment_mark) { ok = false return @@ -2255,10 +2260,9 @@ func yaml_parser_scan_block_scalar(parser *yaml_parser_t, token *yaml_token_t, l } } if parser.buffer[parser.buffer_pos] == '#' { - // TODO Test this and then re-enable it. - //if !yaml_parser_scan_line_comment(parser, start_mark) { - // return false - //} + if !yaml_parser_scan_line_comment(parser, start_mark) { + return false + } for !is_breakz(parser.buffer, parser.buffer_pos) { skip(parser) if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { @@ -2856,13 +2860,12 @@ func yaml_parser_scan_line_comment(parser *yaml_parser_t, token_mark yaml_mark_t return false } skip_line(parser) - } else { - if parser.mark.index >= seen { - if len(text) == 0 { - start_mark = parser.mark - } - text = append(text, parser.buffer[parser.buffer_pos]) + } else if parser.mark.index >= seen { + if len(text) == 0 { + start_mark = parser.mark } + text = read(parser, text) + } else { skip(parser) } } @@ -2888,6 +2891,10 @@ func yaml_parser_scan_comments(parser *yaml_parser_t, scan_mark yaml_mark_t) boo var token_mark = token.start_mark var start_mark yaml_mark_t + var next_indent = parser.indent + if next_indent < 0 { + next_indent = 0 + } var recent_empty = false var first_empty = parser.newlines <= 1 @@ -2919,15 +2926,18 @@ func yaml_parser_scan_comments(parser *yaml_parser_t, scan_mark yaml_mark_t) boo continue } c := parser.buffer[parser.buffer_pos+peek] - if is_breakz(parser.buffer, parser.buffer_pos+peek) || parser.flow_level > 0 && (c == ']' || c == '}') { + var close_flow = parser.flow_level > 0 && (c == ']' || c == '}') + if close_flow || is_breakz(parser.buffer, parser.buffer_pos+peek) { // Got line break or terminator. - if !recent_empty { - if first_empty && (start_mark.line == foot_line || start_mark.column-1 < parser.indent) { + if close_flow || !recent_empty { + if close_flow || first_empty && (start_mark.line == foot_line && token.typ != yaml_VALUE_TOKEN || start_mark.column-1 < next_indent) { // This is the first empty line and there were no empty lines before, // so this initial part of the comment is a foot of the prior token // instead of being a head for the following one. Split it up. + // Alternatively, this might also be the last comment inside a flow + // scope, so it must be a footer. if len(text) > 0 { - if start_mark.column-1 < parser.indent { + if start_mark.column-1 < next_indent { // If dedented it's unrelated to the prior token. token_mark = start_mark } @@ -2958,7 +2968,7 @@ func yaml_parser_scan_comments(parser *yaml_parser_t, scan_mark yaml_mark_t) boo continue } - if len(text) > 0 && column < parser.indent+1 && column != start_mark.column { + if len(text) > 0 && (close_flow || column-1 < next_indent && column != start_mark.column) { // The comment at the different indentation is a foot of the // preceding data rather than a head of the upcoming one. parser.comments = append(parser.comments, yaml_comment_t{ @@ -2999,10 +3009,9 @@ func yaml_parser_scan_comments(parser *yaml_parser_t, scan_mark yaml_mark_t) boo return false } skip_line(parser) + } else if parser.mark.index >= seen { + text = read(parser, text) } else { - if parser.mark.index >= seen { - text = append(text, parser.buffer[parser.buffer_pos]) - } skip(parser) } } @@ -3010,6 +3019,10 @@ func yaml_parser_scan_comments(parser *yaml_parser_t, scan_mark yaml_mark_t) boo peek = 0 column = 0 line = parser.mark.line + next_indent = parser.indent + if next_indent < 0 { + next_indent = 0 + } } if len(text) > 0 { diff --git a/vendor/gopkg.in/yaml.v3/yaml.go b/vendor/gopkg.in/yaml.v3/yaml.go index b5d35a50d..8cec6da48 100644 --- a/vendor/gopkg.in/yaml.v3/yaml.go +++ b/vendor/gopkg.in/yaml.v3/yaml.go @@ -89,7 +89,7 @@ func Unmarshal(in []byte, out interface{}) (err error) { return unmarshal(in, out, false) } -// A Decorder reads and decodes YAML values from an input stream. +// A Decoder reads and decodes YAML values from an input stream. type Decoder struct { parser *parser knownFields bool @@ -194,7 +194,7 @@ func unmarshal(in []byte, out interface{}, strict bool) (err error) { // Zero valued structs will be omitted if all their public // fields are zero, unless they implement an IsZero // method (see the IsZeroer interface type), in which -// case the field will be included if that method returns true. +// case the field will be excluded if IsZero returns true. // // flow Marshal using a flow style (useful for structs, // sequences and maps). @@ -252,6 +252,24 @@ func (e *Encoder) Encode(v interface{}) (err error) { return nil } +// Encode encodes value v and stores its representation in n. +// +// See the documentation for Marshal for details about the +// conversion of Go values into YAML. +func (n *Node) Encode(v interface{}) (err error) { + defer handleErr(&err) + e := newEncoder() + defer e.destroy() + e.marshalDoc("", reflect.ValueOf(v)) + e.finish() + p := newParser(e.out) + p.textless = true + defer p.destroy() + doc := p.parse() + *n = *doc.Content[0] + return nil +} + // SetIndent changes the used indentation used when encoding. func (e *Encoder) SetIndent(spaces int) { if spaces < 0 { @@ -328,6 +346,12 @@ const ( // and maps, Node is an intermediate representation that allows detailed // control over the content being decoded or encoded. // +// It's worth noting that although Node offers access into details such as +// line numbers, colums, and comments, the content when re-encoded will not +// have its original textual representation preserved. An effort is made to +// render the data plesantly, and to preserve comments near the data they +// describe, though. +// // Values that make use of the Node type interact with the yaml package in the // same way any other type would do, by encoding and decoding yaml data // directly or indirectly into them. @@ -391,6 +415,13 @@ type Node struct { Column int } +// IsZero returns whether the node has all of its fields unset. +func (n *Node) IsZero() bool { + return n.Kind == 0 && n.Style == 0 && n.Tag == "" && n.Value == "" && n.Anchor == "" && n.Alias == nil && n.Content == nil && + n.HeadComment == "" && n.LineComment == "" && n.FootComment == "" && n.Line == 0 && n.Column == 0 +} + + // LongTag returns the long form of the tag that indicates the data type for // the node. If the Tag field isn't explicitly defined, one will be computed // based on the node properties. @@ -418,6 +449,11 @@ func (n *Node) ShortTag() string { case ScalarNode: tag, _ := resolve("", n.Value) return tag + case 0: + // Special case to make the zero value convenient. + if n.IsZero() { + return nullTag + } } return "" } diff --git a/vendor/gopkg.in/yaml.v3/yamlh.go b/vendor/gopkg.in/yaml.v3/yamlh.go index 2719cfbb0..7c6d00770 100644 --- a/vendor/gopkg.in/yaml.v3/yamlh.go +++ b/vendor/gopkg.in/yaml.v3/yamlh.go @@ -787,6 +787,8 @@ type yaml_emitter_t struct { foot_comment []byte tail_comment []byte + key_line_comment []byte + // Dumper stuff opened bool // If the stream was already opened? diff --git a/vendor/modules.txt b/vendor/modules.txt index 95470036a..f24c14661 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -262,6 +262,10 @@ golang.org/x/crypto/ssh golang.org/x/crypto/ssh/agent golang.org/x/crypto/ssh/internal/bcrypt_pbkdf golang.org/x/crypto/ssh/knownhosts +# golang.org/x/exp v0.0.0-20220318154914-8dddf5d87bd8 +## explicit; go 1.18 +golang.org/x/exp/constraints +golang.org/x/exp/slices # golang.org/x/net v0.0.0-20201002202402-0a1ea396d57c ## explicit; go 1.11 golang.org/x/net/context @@ -288,6 +292,6 @@ gopkg.in/ozeidan/fuzzy-patricia.v3/patricia # gopkg.in/warnings.v0 v0.1.2 ## explicit gopkg.in/warnings.v0 -# gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c +# gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b ## explicit gopkg.in/yaml.v3