1
0
mirror of https://github.com/regclient/regclient.git synced 2025-04-18 22:44:00 +03:00

Merge pull request #910 from sudo-bmitch/pr-go-1-22-modernize

Chore: Modernize Go to the 1.22 specs
This commit is contained in:
Brandon Mitchell 2025-02-18 14:39:13 -05:00 committed by GitHub
commit a4aa2bd97f
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
67 changed files with 325 additions and 463 deletions

View File

@ -9,7 +9,7 @@ import (
)
// Export takes an input Go interface and converts it to a Lua value
func Export(ls *lua.LState, v interface{}) lua.LValue {
func Export(ls *lua.LState, v any) lua.LValue {
return exportReflect(ls, reflect.ValueOf(v))
}
@ -30,13 +30,13 @@ func exportReflect(ls *lua.LState, v reflect.Value) lua.LValue {
return lua.LString(v.String())
case reflect.Array:
lTab := ls.NewTable()
for i := 0; i < v.Len(); i++ {
for i := range v.Len() {
lTab.RawSetInt(i+1, exportReflect(ls, v.Index(i)))
}
return lTab
case reflect.Slice:
lTab := ls.NewTable()
for i := 0; i < v.Len(); i++ {
for i := range v.Len() {
lTab.RawSetInt(i+1, exportReflect(ls, v.Index(i)))
}
return lTab
@ -55,7 +55,7 @@ func exportReflect(ls *lua.LState, v reflect.Value) lua.LValue {
vType := v.Type()
lTab := ls.NewTable()
foundExported := false
for i := 0; i < vType.NumField(); i++ {
for i := range vType.NumField() {
field := vType.Field(i)
// skip unexported fields
if !v.FieldByName(field.Name).CanInterface() {

View File

@ -1,6 +1,7 @@
package go2lua
import (
"errors"
"fmt"
"reflect"
"runtime/debug"
@ -11,7 +12,7 @@ import (
// Import takes a Lua value and copies matching values into the provided Go interface.
// By providing the orig interface, values that cannot be imported from Lua will be copied from orig.
func Import(ls *lua.LState, lv lua.LValue, v, orig interface{}) (err error) {
func Import(ls *lua.LState, lv lua.LValue, v, orig any) (err error) {
defer func() {
if r := recover(); r != nil {
err = fmt.Errorf("go2lua import panic: %v\n%s", r, string(debug.Stack()))
@ -63,7 +64,7 @@ func importReflect(ls *lua.LState, lv lua.LValue, v, orig reflect.Value) error {
case reflect.Array:
// If we have an array, and lua is the expected table, iterate and recursively import contents.
if lvi, ok := lv.(*lua.LTable); ok {
for i := 0; i < v.Len(); i++ {
for i := range v.Len() {
// Orig is also iterated on if it has a matching type and length
var origI reflect.Value
if orig.IsValid() && orig.Type() == v.Type() && orig.Len() < i {
@ -80,7 +81,7 @@ func importReflect(ls *lua.LState, lv lua.LValue, v, orig reflect.Value) error {
// Slice follows the same pattern as array, except the slice is first created with the desired size.
if lvi, ok := lv.(*lua.LTable); ok {
newV := reflect.MakeSlice(v.Type(), lvi.Len(), lvi.Len())
for i := 0; i < newV.Len(); i++ {
for i := range newV.Len() {
var origI reflect.Value
if orig.IsValid() && orig.Type() == v.Type() && orig.Len() > i {
origI = orig.Index(i)
@ -94,8 +95,7 @@ func importReflect(ls *lua.LState, lv lua.LValue, v, orig reflect.Value) error {
}
return nil
case reflect.Map:
// TODO: with go 1.20, switch to error list and return errors.Join
var retErr error
var errs []error
if lvi, ok := lv.(*lua.LTable); ok {
newV := reflect.MakeMap(v.Type())
lvi.ForEach(func(lvtKey, lvtElem lua.LValue) {
@ -103,7 +103,7 @@ func importReflect(ls *lua.LState, lv lua.LValue, v, orig reflect.Value) error {
newElem := reflect.Indirect(reflect.New(v.Type().Elem()))
err := importReflect(ls, lvtKey, newKey, reflect.Value{})
if err != nil {
retErr = err
errs = append(errs, err)
}
var origElem reflect.Value
if orig.IsValid() && orig.Type() == v.Type() {
@ -111,18 +111,18 @@ func importReflect(ls *lua.LState, lv lua.LValue, v, orig reflect.Value) error {
}
err = importReflect(ls, lvtElem, newElem, origElem)
if err != nil {
retErr = err
errs = append(errs, err)
}
newV.SetMapIndex(newKey, newElem)
})
v.Set(newV)
}
return retErr
return errors.Join(errs...)
case reflect.Struct:
foundExported := false
if lvi, ok := lv.(*lua.LTable); ok {
vType := v.Type()
for i := 0; i < vType.NumField(); i++ {
for i := range vType.NumField() {
field := vType.Field(i)
// skip unexported fields
if !v.FieldByName(field.Name).CanInterface() {

View File

@ -135,7 +135,6 @@ func (rootOpts *rootCmd) runOnce(cmd *cobra.Command, args []string) error {
var wg sync.WaitGroup
var mainErr error
for _, s := range rootOpts.conf.Scripts {
s := s
if rootOpts.conf.Defaults.Parallel > 0 {
wg.Add(1)
go func() {
@ -174,7 +173,6 @@ func (rootOpts *rootCmd) runServer(cmd *cobra.Command, args []string) error {
cron.SkipIfStillRunning(cron.DefaultLogger),
))
for _, s := range rootOpts.conf.Scripts {
s := s
sched := s.Schedule
if sched == "" && s.Interval != 0 {
sched = "@every " + s.Interval.String()
@ -310,7 +308,7 @@ func (rootOpts *rootCmd) process(ctx context.Context, s ConfigScript) error {
rootOpts.log.Warn("Error running script",
slog.String("script", s.Name),
slog.String("error", err.Error()))
return ErrScriptFailed
return fmt.Errorf("%w%.0w", err, ErrScriptFailed)
}
rootOpts.log.Debug("Finished script",
slog.String("script", s.Name))

View File

@ -7,6 +7,7 @@ import (
lua "github.com/yuin/gopher-lua"
"github.com/regclient/regclient"
"github.com/regclient/regclient/cmd/regbot/internal/go2lua"
"github.com/regclient/regclient/types/manifest"
"github.com/regclient/regclient/types/platform"
@ -104,8 +105,7 @@ func (s *Sandbox) manifestDelete(ls *lua.LState) int {
m := s.checkManifest(ls, 1, true, true)
r := m.r
if r.Digest == "" {
d := m.m.GetDescriptor()
r.Digest = d.Digest.String()
r = r.AddDigest(m.m.GetDescriptor().Digest.String())
}
s.log.Info("Delete manifest",
slog.String("script", s.name),
@ -290,8 +290,7 @@ func (s *Sandbox) rcManifestGet(r ref.Ref, list bool, pStr string) (manifest.Man
if err != nil {
return m, err
}
r.Digest = desc.Digest.String()
m, err = s.rc.ManifestGet(s.ctx, r)
m, err = s.rc.ManifestGet(s.ctx, r, regclient.WithManifestDesc(*desc))
if err != nil {
return m, err
}

View File

@ -106,7 +106,7 @@ func (s *Sandbox) referenceString(ls *lua.LState) int {
func (s *Sandbox) referenceGetSetDigest(ls *lua.LState) int {
r := s.checkReference(ls, 1)
if ls.GetTop() == 2 {
r.r.Digest = ls.CheckString(2)
r.r = r.r.SetDigest(ls.CheckString(2))
return 0
}
ls.Push(lua.LString(r.r.Digest))
@ -116,7 +116,7 @@ func (s *Sandbox) referenceGetSetDigest(ls *lua.LState) int {
func (s *Sandbox) referenceGetSetTag(ls *lua.LState) int {
r := s.checkReference(ls, 1)
if ls.GetTop() == 2 {
r.r.Tag = ls.CheckString(2)
r.r = r.r.SetTag(ls.CheckString(2))
return 0
}
ls.Push(lua.LString(r.r.Tag))

View File

@ -3,6 +3,7 @@ package sandbox
import (
"context"
"fmt"
"log/slog"
"os"
@ -138,7 +139,7 @@ func (s *Sandbox) RunScript(script string) (err error) {
s.log.Error("Runtime error from script",
slog.String("script", s.name),
slog.Any("error", r))
err = ErrScriptFailed
err = fmt.Errorf("%w: %v", ErrScriptFailed, r)
}
}()
return s.ls.DoString(script)
@ -160,7 +161,7 @@ func (s *Sandbox) sandboxLog(ls *lua.LState) int {
// wrapUserData creates a userdata -> wrapped table -> userdata metatable
// structure. This allows references to a struct to resolve for read access,
// while providing access to only the desired methods on the userdata.
func wrapUserData(ls *lua.LState, udVal interface{}, wrapVal interface{}, udType string) (lua.LValue, error) {
func wrapUserData(ls *lua.LState, udVal any, wrapVal any, udType string) (lua.LValue, error) {
ud := ls.NewUserData()
ud.Value = udVal
udTypeMT, ok := (ls.GetTypeMetatable(udType)).(*lua.LTable)

View File

@ -9,6 +9,7 @@ import (
"os"
"path"
"path/filepath"
"slices"
"strings"
// crypto libraries included for go-digest
@ -382,8 +383,7 @@ func (artifactOpts *artifactCmd) runArtifactGet(cmd *cobra.Command, args []strin
if err != nil {
return fmt.Errorf("no matching artifacts found in index: %w", err)
}
r.Digest = d.Digest.String()
m, err = rc.ManifestGet(ctx, r)
m, err = rc.ManifestGet(ctx, r, regclient.WithManifestDesc(d))
if err != nil {
return err
}
@ -434,41 +434,23 @@ func (artifactOpts *artifactCmd) runArtifactGet(cmd *cobra.Command, args []strin
// filter by media-type if defined
if len(artifactOpts.artifactFileMT) > 0 {
for i := len(layers) - 1; i >= 0; i-- {
found := false
for _, mt := range artifactOpts.artifactFileMT {
if layers[i].MediaType == mt {
found = true
break
}
}
if !found {
// remove from slice
layers = append(layers[:i], layers[i+1:]...)
if !slices.Contains(artifactOpts.artifactFileMT, layers[i].MediaType) {
layers = slices.Delete(layers, i, i+1)
}
}
}
// filter by filename if defined
if len(artifactOpts.artifactFile) > 0 {
for i := len(layers) - 1; i >= 0; i-- {
found := false
af, ok := layers[i].Annotations[ociAnnotTitle]
if ok {
for _, f := range artifactOpts.artifactFile {
if af == f {
found = true
break
}
}
}
if !found {
// remove from slice
layers = append(layers[:i], layers[i+1:]...)
if !ok || !slices.Contains(artifactOpts.artifactFile, af) {
layers = slices.Delete(layers, i, i+1)
}
}
}
if len(layers) == 0 {
return fmt.Errorf("no matching layers found in the artifact, verify media-type and filename")
return fmt.Errorf("no matching layers found in the artifact, verify media-type and filename%.0w", errs.ErrNotFound)
}
if artifactOpts.outputDir != "" {
@ -639,7 +621,7 @@ func (artifactOpts *artifactCmd) runArtifactList(cmd *cobra.Command, args []stri
return fmt.Errorf("failed to compute fallback tag: %w", err)
}
for _, t := range tl.Tags {
if strings.HasPrefix(t, prefix.Tag) && !sliceHasStr(rl.Tags, t) {
if strings.HasPrefix(t, prefix.Tag) && !slices.Contains(rl.Tags, t) {
rTag := rl.Subject.SetTag(t)
mh, err := rc.ManifestHead(ctx, rTag, regclient.WithManifestRequireDigest())
if err != nil {
@ -973,8 +955,7 @@ func (artifactOpts *artifactCmd) runArtifactPut(cmd *cobra.Command, args []strin
}
if artifactOpts.byDigest || artifactOpts.index || rArt.IsZero() {
r.Tag = ""
r.Digest = mm.GetDescriptor().Digest.String()
r = r.SetDigest(mm.GetDescriptor().Digest.String())
}
// push manifest
@ -1123,13 +1104,11 @@ func (artifactOpts *artifactCmd) treeAddResult(ctx context.Context, rc *regclien
return nil, err
}
tr.Manifest = m
if r.Digest == "" {
r.Digest = m.GetDescriptor().Digest.String()
}
r = r.AddDigest(m.GetDescriptor().Digest.String())
// track already seen manifests
dig := m.GetDescriptor().Digest.String()
if sliceHasStr(seen, dig) {
if slices.Contains(seen, dig) {
return &tr, fmt.Errorf("%w, already processed %s", ErrLoopEncountered, dig)
}
seen = append(seen, dig)
@ -1199,7 +1178,7 @@ func (artifactOpts *artifactCmd) treeAddResult(ctx context.Context, rc *regclien
return &tr, fmt.Errorf("failed to compute fallback tag: %w", err)
}
for _, t := range tags {
if strings.HasPrefix(t, prefix.Tag) && !sliceHasStr(rl.Tags, t) {
if strings.HasPrefix(t, prefix.Tag) && !slices.Contains(rl.Tags, t) {
rTag := r.SetTag(t)
tReferrer, err := artifactOpts.treeAddResult(ctx, rc, rTag, seen, rOpts, tags)
if tReferrer != nil {
@ -1216,15 +1195,6 @@ func (artifactOpts *artifactCmd) treeAddResult(ctx context.Context, rc *regclien
return &tr, nil
}
func sliceHasStr(list []string, search string) bool {
for _, el := range list {
if el == search {
return true
}
}
return false
}
type treeResult struct {
Ref ref.Ref `json:"reference"`
Manifest manifest.Manifest `json:"manifest"`
@ -1240,7 +1210,7 @@ func (tr *treeResult) MarshalPretty() ([]byte, error) {
if err != nil {
return nil, err
}
return []byte(fmt.Sprintf("Ref: %s\nDigest: %s", tr.Ref.CommonName(), mp)), nil
return fmt.Appendf(nil, "Ref: %s\nDigest: %s", tr.Ref.CommonName(), mp), nil
}
func (tr *treeResult) marshalPretty(indent string) ([]byte, error) {

View File

@ -40,6 +40,26 @@ func TestArtifactGet(t *testing.T) {
args: []string{"artifact", "get", "ocidir://../../testdata/testrepo:a1"},
expectOut: "eggs",
},
{
name: "By Manifest filter layer media type",
args: []string{"artifact", "get", "ocidir://../../testdata/testrepo:a3", "--file-media-type", "application/example.layer.2"},
expectOut: "2",
},
{
name: "By Manifest filter layer filename",
args: []string{"artifact", "get", "ocidir://../../testdata/testrepo:a3", "--file", "layer3.txt"},
expectOut: "3",
},
{
name: "By Manifest filter layer media type missing",
args: []string{"artifact", "get", "ocidir://../../testdata/testrepo:a3", "--file-media-type", "application/example.missing"},
expectErr: errs.ErrNotFound,
},
{
name: "By Manifest filter layer filename missing",
args: []string{"artifact", "get", "ocidir://../../testdata/testrepo:a3", "--file", "missing.txt"},
expectErr: errs.ErrNotFound,
},
{
name: "By Subject",
args: []string{"artifact", "get", "--subject", "ocidir://../../testdata/testrepo:v2", "--filter-artifact-type", "application/example.sbom"},

View File

@ -71,7 +71,7 @@ func (rootOpts *rootCmd) completeArgTag(cmd *cobra.Command, args []string, toCom
}
for _, tag := range tags {
resultRef, _ := ref.New(input)
resultRef.Tag = tag
resultRef = resultRef.SetTag(tag)
resultCN := resultRef.CommonName()
if strings.HasPrefix(resultCN, toComplete) {
result = append(result, resultCN)

View File

@ -3,6 +3,7 @@ package main
import (
"archive/tar"
"bytes"
"cmp"
"context"
"encoding/json"
"errors"
@ -11,7 +12,7 @@ import (
"log/slog"
"os"
"regexp"
"sort"
"slices"
"strconv"
"strings"
"sync"
@ -1081,7 +1082,7 @@ func (imageOpts *imageCmd) runImageCopy(cmd *cobra.Command, args []string) error
if err != nil {
return err
}
rSrc = rSrc.SetDigest(m.GetDescriptor().Digest.String())
rSrc = rSrc.AddDigest(m.GetDescriptor().Digest.String())
}
imageOpts.rootOpts.log.Debug("Image copy",
slog.String("source", rSrc.CommonName()),
@ -1223,13 +1224,18 @@ func (ip *imageProgress) display(final bool) {
for k := range ip.entries {
keys = append(keys, k)
}
sort.Slice(keys, func(a, b int) bool {
if ip.entries[keys[a]].state != ip.entries[keys[b]].state {
return ip.entries[keys[a]].state > ip.entries[keys[b]].state
} else if ip.entries[keys[a]].state != types.CallbackActive {
return ip.entries[keys[a]].last.Before(ip.entries[keys[b]].last)
slices.SortFunc(keys, func(a, b string) int {
// show finished entries at the top, queued entries on the bottom
if ip.entries[a].state > ip.entries[b].state {
return -1
} else if ip.entries[a].state < ip.entries[b].state {
return 1
} else if ip.entries[a].state != types.CallbackActive {
// sort inactive entries by finish time
return ip.entries[a].last.Compare(ip.entries[b].last)
} else {
return ip.entries[keys[a]].cur > ip.entries[keys[b]].cur
// sort bytes sent descending
return cmp.Compare(ip.entries[a].cur, ip.entries[b].cur) * -1
}
})
startCount, startLimit := 0, 2
@ -1276,15 +1282,14 @@ func (ip *imageProgress) display(final bool) {
}
}
// show stats summary
ip.asciiOut.Add([]byte(fmt.Sprintf("Manifests: %d/%d | Blobs: %s copied, %s skipped",
ip.asciiOut.Add(fmt.Appendf(nil, "Manifests: %d/%d | Blobs: %s copied, %s skipped",
manifestFinished, manifestTotal,
units.HumanSize(float64(sum)),
units.HumanSize(float64(skipped)))))
units.HumanSize(float64(skipped))))
if queued > 0 {
ip.asciiOut.Add([]byte(fmt.Sprintf(", %s queued",
units.HumanSize(float64(queued)))))
ip.asciiOut.Add(fmt.Appendf(nil, ", %s queued", units.HumanSize(float64(queued))))
}
ip.asciiOut.Add([]byte(fmt.Sprintf(" | Elapsed: %ds\n", int64(time.Since(ip.start).Seconds()))))
ip.asciiOut.Add(fmt.Appendf(nil, " | Elapsed: %ds\n", int64(time.Since(ip.start).Seconds())))
ip.asciiOut.Flush()
if !final {
ip.asciiOut.Return()
@ -1406,8 +1411,7 @@ func (imageOpts *imageCmd) runImageCreate(cmd *cobra.Command, args []string) err
// push the image
if imageOpts.byDigest {
r.Tag = ""
r.Digest = mm.GetDescriptor().Digest.String()
r = r.SetDigest(mm.GetDescriptor().Digest.String())
}
err = rc.ManifestPut(ctx, r, mm)
if err != nil {
@ -1457,7 +1461,7 @@ func (imageOpts *imageCmd) runImageExport(cmd *cobra.Command, args []string) err
if err != nil {
return err
}
r = r.SetDigest(m.GetDescriptor().Digest.String())
r = r.AddDigest(m.GetDescriptor().Digest.String())
}
if imageOpts.exportCompress {
opts = append(opts, regclient.ImageWithExportCompress())
@ -1660,8 +1664,7 @@ func (imageOpts *imageCmd) runImageMod(cmd *cobra.Command, args []string) error
} else if imageOpts.replace {
rTgt = rSrc
} else {
rTgt = rSrc
rTgt.Tag = ""
rTgt = rSrc.SetTag("")
}
imageOpts.modOpts = append(imageOpts.modOpts, mod.WithRefTgt(rTgt))
rc := imageOpts.rootOpts.newRegClient()

View File

@ -3,6 +3,7 @@ package main
import (
"context"
"fmt"
"slices"
"strings"
"github.com/opencontainers/go-digest"
@ -191,8 +192,8 @@ func (indexOpts *indexCmd) runIndexAdd(cmd *cobra.Command, args []string) error
}
// push the index
if r.Tag == "" && r.Digest != "" {
r.Digest = m.GetDescriptor().Digest.String()
if r.Digest != "" {
r = r.AddDigest(m.GetDescriptor().Digest.String())
}
err = rc.ManifestPut(ctx, r, m)
if err != nil {
@ -301,8 +302,7 @@ func (indexOpts *indexCmd) runIndexCreate(cmd *cobra.Command, args []string) err
// push the index
if indexOpts.byDigest {
r.Tag = ""
r.Digest = mm.GetDescriptor().Digest.String()
r = r.SetDigest(mm.GetDescriptor().Digest.String())
}
err = rc.ManifestPut(ctx, r, mm)
if err != nil {
@ -357,11 +357,7 @@ func (indexOpts *indexCmd) runIndexDelete(cmd *cobra.Command, args []string) err
i := len(curDesc) - 1
for i >= 0 {
if curDesc[i].Digest.String() == dig {
if i < len(curDesc)-1 {
curDesc = append(curDesc[:i], curDesc[i+1:]...)
} else {
curDesc = curDesc[:i]
}
curDesc = slices.Delete(curDesc, i, i+1)
}
i--
}
@ -374,11 +370,7 @@ func (indexOpts *indexCmd) runIndexDelete(cmd *cobra.Command, args []string) err
i := len(curDesc) - 1
for i >= 0 {
if curDesc[i].Platform != nil && platform.Match(plat, *curDesc[i].Platform) {
if i < len(curDesc)-1 {
curDesc = append(curDesc[:i], curDesc[i+1:]...)
} else {
curDesc = curDesc[:i]
}
curDesc = slices.Delete(curDesc, i, i+1)
}
i--
}
@ -391,8 +383,8 @@ func (indexOpts *indexCmd) runIndexDelete(cmd *cobra.Command, args []string) err
}
// push the index
if r.Tag == "" && r.Digest != "" {
r.Digest = m.GetDescriptor().Digest.String()
if r.Digest != "" {
r = r.AddDigest(m.GetDescriptor().Digest.String())
}
err = rc.ManifestPut(ctx, r, m)
if err != nil {
@ -556,11 +548,7 @@ func indexDescListRmDup(dl []descriptor.Descriptor) []descriptor.Descriptor {
j := len(dl) - 1
for j > i {
if dl[i].Equal(dl[j]) {
if j < len(dl)-1 {
dl = append(dl[:j], dl[j+1:]...)
} else {
dl = dl[:j]
}
dl = slices.Delete(dl, j, j+1)
}
j--
}

View File

@ -195,10 +195,10 @@ func (manifestOpts *manifestCmd) runManifestDelete(cmd *cobra.Command, args []st
if err != nil {
return err
}
r.Digest = manifest.GetDigest(m).String()
r = r.AddDigest(manifest.GetDigest(m).String())
manifestOpts.rootOpts.log.Debug("Forced dereference of tag",
slog.String("tag", r.Tag),
slog.String("digest", r.Digest))
slog.String("orig", args[0]),
slog.String("resolved", r.CommonName()))
}
manifestOpts.rootOpts.log.Debug("Manifest delete",
@ -391,8 +391,7 @@ func (manifestOpts *manifestCmd) runManifestPut(cmd *cobra.Command, args []strin
return err
}
if manifestOpts.byDigest {
r.Tag = ""
r.Digest = rcM.GetDescriptor().Digest.String()
r = r.SetDigest(rcM.GetDescriptor().Digest.String())
}
err = rc.ManifestPut(ctx, r, rcM)

View File

@ -69,5 +69,4 @@ func TestManifestHead(t *testing.T) {
}
})
}
}

View File

@ -7,6 +7,7 @@ import (
"log/slog"
"os"
"regexp"
"slices"
"strings"
"sync"
"time"
@ -196,7 +197,6 @@ func (rootOpts *rootCmd) runOnce(cmd *cobra.Command, args []string) error {
var wg sync.WaitGroup
var mainErr error
for _, s := range rootOpts.conf.Sync {
s := s
if rootOpts.conf.Defaults.Parallel > 0 {
wg.Add(1)
go func() {
@ -236,7 +236,6 @@ func (rootOpts *rootCmd) runServer(cmd *cobra.Command, args []string) error {
cron.SkipIfStillRunning(cron.DefaultLogger),
))
for _, s := range rootOpts.conf.Sync {
s := s
sched := s.Schedule
if sched == "" && s.Interval != 0 {
sched = "@every " + s.Interval.String()
@ -535,7 +534,7 @@ func (rootOpts *rootCmd) processRepo(ctx context.Context, s ConfigSync, src, tgt
for sI >= 0 && tI >= 0 {
switch strings.Compare(sTagList[sI], tTagList[tI]) {
case 0:
sTagList = append(sTagList[:sI], sTagList[sI+1:]...)
sTagList = slices.Delete(sTagList, sI, sI+1)
sI--
tI--
case -1:
@ -628,14 +627,7 @@ func (rootOpts *rootCmd) processRef(ctx context.Context, s ConfigSync, src, tgt
// skip when source manifest is an unsupported type
smt := manifest.GetMediaType(mSrc)
found := false
for _, mt := range s.MediaTypes {
if mt == smt {
found = true
break
}
}
if !found {
if !slices.Contains(s.MediaTypes, smt) {
rootOpts.log.Info("Skipping unsupported media type",
slog.String("ref", src.CommonName()),
slog.String("mediaType", manifest.GetMediaType(mSrc)),
@ -767,7 +759,7 @@ func (rootOpts *rootCmd) processRef(ctx context.Context, s ConfigSync, src, tgt
}
} else {
// else parse backup string as just a tag
backupRef.Tag = backupStr
backupRef = backupRef.SetTag(backupStr)
}
defer rootOpts.rc.Close(ctx, backupRef)
// run copy from tgt ref to backup ref

View File

@ -23,7 +23,6 @@ func TestDocker(t *testing.T) {
}
hostMap := map[string]*Host{}
for _, h := range hosts {
h := h // shadow h for unique var/pointer
hostMap[h.Name] = &h
}
tt := []struct {

View File

@ -6,6 +6,7 @@ import (
"fmt"
"io"
"log/slog"
"slices"
"strings"
"time"
@ -411,7 +412,7 @@ func (host *Host) Merge(newHost Host, log *slog.Logger) error {
}
if len(newHost.Mirrors) > 0 {
if len(host.Mirrors) > 0 && !stringSliceEq(host.Mirrors, newHost.Mirrors) {
if len(host.Mirrors) > 0 && !slices.Equal(host.Mirrors, newHost.Mirrors) {
log.Warn("Changing mirror settings for registry",
slog.Any("orig", host.Mirrors),
slog.Any("new", newHost.Mirrors),
@ -510,15 +511,3 @@ func copyMapString(src map[string]string) map[string]string {
}
return copy
}
func stringSliceEq(a, b []string) bool {
if len(a) != len(b) {
return false
}
for i, v := range a {
if v != b[i] {
return false
}
}
return true
}

View File

@ -2,6 +2,7 @@ package regclient
import (
"archive/tar"
"cmp"
"compress/gzip"
"context"
"encoding/json"
@ -11,6 +12,7 @@ import (
"log/slog"
"net/url"
"path/filepath"
"slices"
"strings"
"sync"
"time"
@ -325,8 +327,7 @@ func (rc *RegClient) ImageCheckBase(ctx context.Context, r ref.Ref, opts ...Imag
if err != nil {
return err
}
rp := r
rp.Digest = d.Digest.String()
rp := r.AddDigest(d.Digest.String())
m, err = rc.ManifestGet(ctx, rp)
if err != nil {
return err
@ -342,9 +343,8 @@ func (rc *RegClient) ImageCheckBase(ctx context.Context, r ref.Ref, opts ...Imag
if err != nil {
return err
}
rp := r
for _, d := range dl {
rp.Digest = d.Digest.String()
rp := r.AddDigest(d.Digest.String())
optP := append(opts, ImageWithPlatform(d.Platform.String()))
err = rc.ImageCheckBase(ctx, rp, optP...)
if err != nil {
@ -374,9 +374,7 @@ func (rc *RegClient) ImageCheckBase(ctx context.Context, r ref.Ref, opts ...Imag
if err != nil {
return err
}
rp := baseR
rp.Digest = d.Digest.String()
baseM, err = rc.ManifestGet(ctx, rp)
baseM, err = rc.ManifestGet(ctx, baseR, WithManifestDesc(*d))
if err != nil {
return err
}
@ -656,7 +654,6 @@ func (rc *RegClient) imageCopyOpt(ctx context.Context, refSrc ref.Ref, refTgt re
continue
}
}
dEntry := dEntry
waitCount++
go func() {
var err error
@ -737,7 +734,6 @@ func (rc *RegClient) imageCopyOpt(ctx context.Context, refSrc ref.Ref, refTgt re
continue
}
waitCount++
layerSrc := layerSrc
go func() {
rc.slog.Info("Copy layer",
slog.String("source", refSrc.Reference),
@ -830,7 +826,6 @@ func (rc *RegClient) imageCopyOpt(ctx context.Context, refSrc ref.Ref, refTgt re
}
referrerSrc := referrerSrc.SetDigest(rDesc.Digest.String())
referrerTgt := referrerTgt.SetDigest(rDesc.Digest.String())
rDesc := rDesc
waitCount++
go func() {
err := rc.imageCopyOpt(ctx, referrerSrc, referrerTgt, rDesc, true, parentsNew, opt)
@ -886,19 +881,11 @@ func (rc *RegClient) imageCopyOpt(ctx context.Context, refSrc ref.Ref, refTgt re
for _, tag := range opt.tagList {
if strings.HasPrefix(tag, prefix) {
// skip referrers that were copied above
found := false
for _, referrerTag := range referrerTags {
if referrerTag == tag {
found = true
break
}
}
if found {
if slices.Contains(referrerTags, tag) {
continue
}
refTagSrc := refSrc.SetTag(tag)
refTagTgt := refTgt.SetTag(tag)
tag := tag
waitCount++
go func() {
err := rc.imageCopyOpt(ctx, refTagSrc, refTagTgt, descriptor.Descriptor{}, false, parentsNew, opt)
@ -1121,12 +1108,7 @@ func (rc *RegClient) ImageExport(ctx context.Context, r ref.Ref, outStream io.Wr
return err
}
refTag := opt.exportRef.ToReg()
if refTag.Digest != "" {
refTag.Digest = ""
}
if refTag.Tag == "" {
refTag.Tag = "latest"
}
refTag = refTag.SetTag(cmp.Or(refTag.Tag, "latest"))
dockerManifest := dockerTarManifest{
RepoTags: []string{refTag.CommonName()},
Config: tarOCILayoutDescPath(conf),
@ -1384,14 +1366,9 @@ func (rc *RegClient) imageImportDockerAddLayerHandlers(ctx context.Context, r re
tags := []string{}
for i, entry := range trd.dockerManifestList {
tags = append(tags, entry.RepoTags...)
for _, tag := range entry.RepoTags {
if tag == trd.name {
index = i
found = true
break
}
}
if found {
if slices.Contains(entry.RepoTags, trd.name) {
index = i
found = true
break
}
}
@ -1674,8 +1651,7 @@ func (rc *RegClient) imageImportOCIHandleManifest(ctx context.Context, r ref.Ref
// add a finish func to push the manifest, this gets skipped for the index.json
if push {
trd.finish = append(trd.finish, func() error {
mRef := r
mRef.Digest = string(m.GetDescriptor().Digest)
mRef := r.SetDigest(m.GetDescriptor().Digest.String())
_, err := rc.ManifestHead(ctx, mRef)
if err == nil {
return nil
@ -1706,10 +1682,8 @@ func (rc *RegClient) imageImportOCIPushManifests(_ context.Context, _ ref.Ref, t
func imagePlatformInList(target *platform.Platform, list []string) (bool, error) {
// special case for an unset platform
if target == nil || target.OS == "" {
for _, entry := range list {
if entry == "" {
return true, nil
}
if slices.Contains(list, "") {
return true, nil
}
return false, nil
}
@ -1818,10 +1792,8 @@ func (trd *tarReadData) tarReadAll(rs io.ReadSeeker) error {
}
func (trd *tarReadData) linkAdd(src, tgt string) bool {
for _, entry := range trd.links[tgt] {
if entry == src {
return false
}
if slices.Contains(trd.links[tgt], src) {
return false
}
trd.links[tgt] = append(trd.links[tgt], src)
return true
@ -1839,7 +1811,7 @@ func (trd *tarReadData) linkList(tgt string) ([]string, error) {
}
// tarReadFileJSON reads the current tar entry and unmarshals json into provided interface.
func (trd *tarReadData) tarReadFileJSON(data interface{}) error {
func (trd *tarReadData) tarReadFileJSON(data any) error {
b, err := io.ReadAll(trd.tr)
if err != nil {
return err
@ -1895,7 +1867,7 @@ func (td *tarWriteData) tarWriteHeader(filename string, size int64) error {
return td.tw.WriteHeader(&header)
}
func (td *tarWriteData) tarWriteFileJSON(filename string, data interface{}) error {
func (td *tarWriteData) tarWriteFileJSON(filename string, data any) error {
dataJSON, err := json.Marshal(data)
if err != nil {
return err

View File

@ -401,7 +401,6 @@ func TestCopy(t *testing.T) {
},
}
for _, tc := range tt {
tc := tc
t.Run(tc.name, func(t *testing.T) {
t.Parallel()
rSrc, err := ref.New(tc.src)

View File

@ -42,15 +42,11 @@ func (p *ProgressBar) Generate(pct float64, pre, post string) []byte {
pct = 1
}
curWidth := p.Width - (len(pre) + len(post) + 2)
if curWidth < p.Min {
curWidth = p.Min
} else if curWidth > p.Max {
curWidth = p.Max
}
curWidth = min(max(curWidth, p.Min), p.Max)
buf := make([]byte, curWidth)
doneLen := int(float64(curWidth) * pct)
for i := 0; i < doneLen; i++ {
for i := range doneLen {
buf[i] = p.Done
}
if doneLen < curWidth {
@ -59,5 +55,5 @@ func (p *ProgressBar) Generate(pct float64, pre, post string) []byte {
for i := doneLen + 1; i < curWidth; i++ {
buf[i] = p.Pending
}
return []byte(fmt.Sprintf("%s%c%s%c%s\n", pre, p.Start, buf, p.End, post))
return fmt.Appendf(nil, "%s%c%s%c%s\n", pre, p.Start, buf, p.End, post)
}

View File

@ -36,7 +36,7 @@ const (
)
func init() {
for c := 0; c < 256; c++ {
for c := range 256 {
charLUs[c] = 0
if strings.ContainsRune(" \t\r\n", rune(c)) {
charLUs[c] |= isSpace

View File

@ -25,7 +25,7 @@ const (
)
func init() {
for c := 0; c < 256; c++ {
for c := range 256 {
charLUs[c] = 0
if strings.ContainsRune(" \t\r\n", rune(c)) {
charLUs[c] |= isSpace

View File

@ -4,6 +4,7 @@ package pqueue
import (
"context"
"fmt"
"slices"
"sync"
)
@ -63,18 +64,11 @@ func (q *Queue[T]) Acquire(ctx context.Context, e T) (func(), error) {
case <-ctx.Done():
// context abort, remove queued entry
q.mu.Lock()
for i := range q.queued {
if q.queued[i] == &e {
if len(q.queued) >= i+1 {
q.queued = q.queued[:i]
q.wait = q.wait[:i]
} else {
q.queued = append(q.queued[:i], q.queued[i+1:]...)
q.wait = append(q.wait[:i], q.wait[i+1:]...)
}
q.mu.Unlock()
return nil, ctx.Err()
}
if i := slices.Index(q.queued, &e); i >= 0 {
q.queued = slices.Delete(q.queued, i, i+1)
q.wait = slices.Delete(q.wait, i, i+1)
q.mu.Unlock()
return nil, ctx.Err()
}
q.mu.Unlock()
// queued entry found, assume race condition with context and entry being released, release next entry
@ -113,15 +107,8 @@ func (q *Queue[T]) release(prev *T) {
q.mu.Lock()
defer q.mu.Unlock()
// remove prev entry from active list
for i := range q.active {
if q.active[i] == prev {
if i == len(q.active)+1 {
q.active = q.active[:i]
} else {
q.active = append(q.active[:i], q.active[i+1:]...)
}
break
}
if i := slices.Index(q.active, prev); i >= 0 {
q.active = slices.Delete(q.active, i, i+1)
}
// skip checks when at limit or nothing queued
if len(q.queued) == 0 {
@ -140,23 +127,13 @@ func (q *Queue[T]) release(prev *T) {
if q.next != nil && len(q.queued) > 1 {
i = q.next(q.queued, q.active)
// validate response
if i < 0 {
i = 0
}
if i >= len(q.queued) {
i = len(q.queued) - 1
}
i = max(min(i, len(q.queued)-1), 0)
}
// release queued entry, move to active list, and remove from queued/wait lists
close(*q.wait[i])
q.active = append(q.active, q.queued[i])
if i == len(q.queued)-1 {
q.queued = q.queued[:i]
q.wait = q.wait[:i]
} else {
q.queued = append(q.queued[:i], q.queued[i+1:]...)
q.wait = append(q.wait[:i], q.wait[i+1:]...)
}
q.queued = slices.Delete(q.queued, i, i+1)
q.wait = slices.Delete(q.wait, i, i+1)
}
// releaseFn is a convenience wrapper around [release].
@ -191,11 +168,7 @@ func AcquireMulti[T any](ctx context.Context, e T, qList ...*Queue[T]) (context.
// delete nil entries
for i := len(qList) - 1; i >= 0; i-- {
if qList[i] == nil {
if i == len(qList)-1 {
qList = qList[:i]
} else {
qList = append(qList[:i], qList[i+1:]...)
}
qList = slices.Delete(qList, i, i+1)
}
}
// empty/nil list is a noop
@ -276,11 +249,9 @@ func (q *Queue[T]) checkContext(ctx context.Context) (bool, error) {
if qCtxVal.qList == nil {
return false, nil
}
for _, cur := range qCtxVal.qList {
if cur == q {
// instance already locked
return true, nil
}
if slices.Contains(qCtxVal.qList, q) {
// instance already locked
return true, nil
}
return true, fmt.Errorf("cannot acquire new locks during a transaction")
}

View File

@ -63,14 +63,14 @@ func TestQueue(t *testing.T) {
}
// background acquire two more, which should block
for _, i := range []int{2, 3} {
go func(i int) {
go func() {
done, err := q.Acquire(ctx, eList[i])
if err != nil {
t.Errorf("failed to acquire queue %d: %v", i, err)
}
finished <- i
done()
}(i)
}()
}
// verify background jobs blocked
sleepMS(2)

View File

@ -16,6 +16,7 @@ import (
"os"
"path/filepath"
"regexp"
"slices"
"sort"
"strconv"
"strings"
@ -528,7 +529,7 @@ func (resp *Resp) next() error {
}
err = loopErr
if dropHost {
hosts = append(hosts[:curHost], hosts[curHost+1:]...)
hosts = slices.Delete(hosts, curHost, curHost+1)
} else if !retryHost {
curHost++
}
@ -643,9 +644,7 @@ func (resp *Resp) backoffGet() time.Time {
defer ch.mu.Unlock()
if ch.backoffCur > 0 {
delay := c.delayInit << ch.backoffCur
if delay > c.delayMax {
delay = c.delayMax
}
delay = min(delay, c.delayMax)
next := ch.backoffLast.Add(delay)
now := time.Now()
if now.After(next) {

View File

@ -1644,7 +1644,7 @@ func TestRegHttp(t *testing.T) {
Headers: headers,
}
chResults := make(chan error)
for i := 0; i < count; i++ {
for range count {
go func() {
resp, err := hc.Do(ctxTimeout, getReq)
if err == nil {
@ -1653,7 +1653,7 @@ func TestRegHttp(t *testing.T) {
chResults <- err
}()
}
for i := 0; i < count; i++ {
for range count {
err := <-chResults
if err == nil {
t.Errorf("unexpected success on get for missing manifest")
@ -1670,7 +1670,7 @@ func TestRegHttp(t *testing.T) {
}
start := time.Now()
count := 10
for i := 0; i < count; i++ {
for range count {
resp, err := hc.Do(ctx, getReq)
if err != nil {
t.Fatalf("failed to run get: %v", err)

View File

@ -8,6 +8,7 @@ import (
"math/rand"
"net/http"
"regexp"
"slices"
"sync"
"testing"
@ -66,14 +67,7 @@ func strMapMatch(a, b map[string][]string) bool {
return false
}
for _, ave := range av {
found := false
for _, bve := range bv {
if ave == bve {
found = true
break
}
}
if !found {
if !slices.Contains(bv, ave) {
return false
}
}
@ -81,18 +75,6 @@ func strMapMatch(a, b map[string][]string) bool {
return true
}
func stateMatch(state string, list []string) bool {
if len(list) == 0 {
return true
}
for _, entry := range list {
if entry == state {
return true
}
}
return false
}
func (r *rrHandler) ServeHTTP(rw http.ResponseWriter, req *http.Request) {
reqBody, err := io.ReadAll(req.Body)
if err != nil {
@ -105,7 +87,7 @@ func (r *rrHandler) ServeHTTP(rw http.ResponseWriter, req *http.Request) {
defer r.mu.Unlock()
for i, rr := range r.rrs {
reqMatch := rr.ReqEntry
if !stateMatch(r.state, reqMatch.IfState) ||
if (len(reqMatch.IfState) > 0 && !slices.Contains(reqMatch.IfState, r.state)) ||
reqMatch.Method != req.Method ||
(reqMatch.PathRE != nil && !reqMatch.PathRE.MatchString(req.URL.Path)) ||
(reqMatch.Path != "" && reqMatch.Path != req.URL.Path) ||
@ -129,7 +111,7 @@ func (r *rrHandler) ServeHTTP(rw http.ResponseWriter, req *http.Request) {
// for single use test cases, delete this entry
if reqMatch.DelOnUse {
r.rrs = append(r.rrs[:i], r.rrs[i+1:]...)
r.rrs = slices.Delete(r.rrs, i, i+1)
}
// update current state
if reqMatch.SetState != "" {

View File

@ -20,7 +20,7 @@ func (d Duration) MarshalJSON() ([]byte, error) {
// UnmarshalJSON converts json to a duration
func (d *Duration) UnmarshalJSON(b []byte) error {
var v interface{}
var v any
if err := json.Unmarshal(b, &v); err != nil {
return err
}

View File

@ -99,7 +99,7 @@ func (rc *RegClient) ManifestGet(ctx context.Context, r ref.Ref, opts ...Manifes
fn(&opt)
}
if opt.d.Digest != "" {
r.Digest = opt.d.Digest.String()
r = r.AddDigest(opt.d.Digest.String())
data, err := opt.d.GetData()
if err == nil {
return manifest.New(

View File

@ -4,6 +4,7 @@ import (
"context"
"fmt"
"regexp"
"slices"
"strconv"
"strings"
"time"
@ -29,7 +30,7 @@ func WithBuildArgRm(arg string, value *regexp.Regexp) Opts {
for i := len(oc.History) - 1; i >= 0; i-- {
if argexp.MatchString(oc.History[i].CreatedBy) && oc.History[i].EmptyLayer {
// delete empty build arg history entry
oc.History = append(oc.History[:i], oc.History[i+1:]...)
oc.History = slices.Delete(oc.History, i, i+1)
changed = true
} else if match := runexp.FindStringSubmatch(oc.History[i].CreatedBy); len(match) == 4 {
// delete arg from run steps
@ -61,7 +62,7 @@ func WithConfigCmd(cmd []string) Opts {
return func(dc *dagConfig, dm *dagManifest) error {
dc.stepsOCIConfig = append(dc.stepsOCIConfig, func(ctx context.Context, rc *regclient.RegClient, rSrc, rTgt ref.Ref, doc *dagOCIConfig) error {
oc := doc.oc.GetConfig()
if eqStrSlice(cmd, oc.Config.Cmd) {
if slices.Equal(cmd, oc.Config.Cmd) {
return nil
}
oc.Config.Cmd = cmd
@ -112,7 +113,7 @@ func WithConfigEntrypoint(entrypoint []string) Opts {
return func(dc *dagConfig, dm *dagManifest) error {
dc.stepsOCIConfig = append(dc.stepsOCIConfig, func(ctx context.Context, rc *regclient.RegClient, rSrc, rTgt ref.Ref, doc *dagOCIConfig) error {
oc := doc.oc.GetConfig()
if eqStrSlice(entrypoint, oc.Config.Entrypoint) {
if slices.Equal(entrypoint, oc.Config.Entrypoint) {
return nil
}
oc.Config.Entrypoint = entrypoint
@ -320,11 +321,7 @@ func WithEnv(name, value string) Opts {
found = true
if value == "" {
// delete an entry
if i < len(oc.Config.Env)-1 {
oc.Config.Env = append(oc.Config.Env[:i], oc.Config.Env[i+1:]...)
} else {
oc.Config.Env = oc.Config.Env[:i]
}
oc.Config.Env = slices.Delete(oc.Config.Env, i, i+1)
changed = true
} else if len(kvSplit) < 2 || value != kvSplit[1] {
// change an entry

View File

@ -3,10 +3,12 @@ package mod
import (
"archive/tar"
"bytes"
"cmp"
"context"
"errors"
"fmt"
"io"
"slices"
"github.com/opencontainers/go-digest"
@ -96,7 +98,7 @@ func dagGet(ctx context.Context, rc *regclient.RegClient, rSrc ref.Ref, d descri
cd, err := mi.GetConfig()
if err != nil && !errors.Is(err, errs.ErrUnsupportedMediaType) {
return nil, err
} else if err == nil && inListStr(cd.MediaType, mtKnownConfig) {
} else if err == nil && slices.Contains(mtKnownConfig, cd.MediaType) {
oc, err := rc.BlobGetOCIConfig(ctx, rSrc, cd)
if err != nil {
return nil, err
@ -202,7 +204,7 @@ func dagPut(ctx context.Context, rc *regclient.RegClient, mc dagConfig, rSrc, rT
if child.mod != deleted {
continue
}
ociI.Manifests = append(ociI.Manifests[:i], ociI.Manifests[i+1:]...)
ociI.Manifests = slices.Delete(ociI.Manifests, i, i+1)
changed = true
}
err = manifest.OCIIndexToAny(ociI, &om)
@ -315,12 +317,12 @@ func dagPut(ctx context.Context, rc *regclient.RegClient, mc dagConfig, rSrc, rT
}
continue
}
ociM.Layers = append(ociM.Layers[:i], ociM.Layers[i+1:]...)
ociM.Layers = slices.Delete(ociM.Layers, i, i+1)
if oc.RootFS.DiffIDs != nil && len(oc.RootFS.DiffIDs) >= i+1 {
oc.RootFS.DiffIDs = append(oc.RootFS.DiffIDs[:i], oc.RootFS.DiffIDs[i+1:]...)
oc.RootFS.DiffIDs = slices.Delete(oc.RootFS.DiffIDs, i, i+1)
}
if iConfig >= 0 {
oc.History = append(oc.History[:iConfig], oc.History[iConfig+1:]...)
oc.History = slices.Delete(oc.History, iConfig, iConfig+1)
iConfig--
}
changed = true
@ -435,18 +437,10 @@ func dagPut(ctx context.Context, rc *regclient.RegClient, mc dagConfig, rSrc, rT
rPut := rTgt
if !dm.top {
mpOpts = append(mpOpts, regclient.WithManifestChild())
rPut.Tag = ""
}
if rPut.Tag == "" {
// push by digest
if dm.newDesc.Digest != "" {
rPut.Digest = dm.newDesc.Digest.String()
} else {
rPut.Digest = dm.origDesc.Digest.String()
}
} else {
// push by tag
rPut.Digest = ""
rPut = rPut.SetDigest(cmp.Or(dm.newDesc.Digest.String(), dm.origDesc.Digest.String()))
} else if rPut.Tag == "" || rPut.Digest != "" {
// update digest, prefer newDesc if set
rPut = rPut.AddDigest(cmp.Or(dm.newDesc.Digest.String(), dm.origDesc.Digest.String()))
}
err = rc.ManifestPut(ctx, rPut, dm.m, mpOpts...)
if err != nil {

View File

@ -3,6 +3,7 @@ package mod
import (
"context"
"fmt"
"slices"
"strings"
"github.com/opencontainers/go-digest"
@ -830,17 +831,14 @@ func rebaseAddStep(dc *dagConfig, rBaseOld, rBaseNew ref.Ref) error {
pruneNum := len(layersOld)
i := 0
for pruneNum > 0 {
if i >= len(dm.layers) {
break
}
if dm.layers[i].mod == added {
i++
continue
}
if i == 0 {
dm.layers = dm.layers[1:]
} else if i >= len(dm.layers)-1 {
dm.layers = dm.layers[:i]
} else {
dm.layers = append(dm.layers[:i], dm.layers[i+1:]...)
}
dm.layers = slices.Delete(dm.layers, i, i+1)
pruneNum--
}
layers = layers[len(layersOld):]

View File

@ -8,6 +8,7 @@ import (
"fmt"
"io"
"os"
"slices"
"time"
"github.com/klauspost/compress/zstd"
@ -65,9 +66,7 @@ func Apply(ctx context.Context, rc *regclient.RegClient, rSrc ref.Ref, opts ...O
dm.top = true
// load the options
rTgt := rSrc
rTgt.Tag = ""
rTgt.Digest = ""
rTgt := rSrc.SetTag("")
dc := dagConfig{
stepsManifest: []func(context.Context, *regclient.RegClient, ref.Ref, ref.Ref, *dagManifest) error{},
stepsOCIConfig: []func(context.Context, *regclient.RegClient, ref.Ref, ref.Ref, *dagOCIConfig) error{},
@ -147,7 +146,7 @@ func Apply(ctx context.Context, rc *regclient.RegClient, rSrc ref.Ref, opts ...O
}
}
// changes for files within layers require extracting the tar and then repackaging it
if len(dc.stepsLayerFile) > 0 && inListStr(dl.desc.MediaType, mtKnownTar) {
if len(dc.stepsLayerFile) > 0 && slices.Contains(mtKnownTar, dl.desc.MediaType) {
if dl.mod == deleted {
return dl, nil
}
@ -343,8 +342,8 @@ func Apply(ctx context.Context, rc *regclient.RegClient, rSrc ref.Ref, opts ...O
if err != nil {
return rTgt, err
}
if rTgt.Tag == "" {
rTgt.Digest = dm.m.GetDescriptor().Digest.String()
if rTgt.Tag == "" || rTgt.Digest != "" {
rTgt = rTgt.AddDigest(dm.m.GetDescriptor().Digest.String())
}
return rTgt, nil
}
@ -388,24 +387,3 @@ func WithDigestAlgo(algo digest.Algorithm) Opts {
return nil
}
}
func inListStr(str string, list []string) bool {
for _, s := range list {
if str == s {
return true
}
}
return false
}
func eqStrSlice(a, b []string) bool {
if len(a) != len(b) {
return false
}
for i := range a {
if a[i] != b[i] {
return false
}
}
return true
}

View File

@ -24,7 +24,6 @@ import (
"github.com/regclient/regclient/scheme/reg"
"github.com/regclient/regclient/types/errs"
"github.com/regclient/regclient/types/manifest"
"github.com/regclient/regclient/types/mediatype"
"github.com/regclient/regclient/types/platform"
"github.com/regclient/regclient/types/ref"
)
@ -1090,17 +1089,3 @@ func TestMod(t *testing.T) {
})
}
}
func TestInList(t *testing.T) {
t.Parallel()
t.Run("match", func(t *testing.T) {
if !inListStr(mediatype.Docker2LayerGzip, mtKnownTar) {
t.Errorf("did not find docker layer in known tar list")
}
})
t.Run("mismatch", func(t *testing.T) {
if inListStr(mediatype.Docker2LayerGzip, mtKnownConfig) {
t.Errorf("found docker layer in known config list")
}
})
}

View File

@ -42,7 +42,6 @@ func TestRoundtrip(t *testing.T) {
},
}
for _, algo := range []CompressType{CompressNone, CompressGzip, CompressXz, CompressZstd} {
algo := algo
t.Run(algo.String(), func(t *testing.T) {
for _, tc := range tt {
tc := tc

View File

@ -9,7 +9,7 @@ type prettyPrinter interface {
MarshalPretty() ([]byte, error)
}
func printPretty(v interface{}) string {
func printPretty(v any) string {
if pp, ok := v.(prettyPrinter); ok {
b, err := pp.MarshalPretty()
if err != nil {

View File

@ -12,7 +12,7 @@ import (
)
var tmplFuncs = gotemplate.FuncMap{
"default": func(def, orig interface{}) interface{} {
"default": func(def, orig any) any {
if orig == nil || orig == reflect.Zero(reflect.TypeOf(orig)).Interface() {
return def
}
@ -30,14 +30,14 @@ var tmplFuncs = gotemplate.FuncMap{
return strings.TrimSpace(string(b))
},
"join": strings.Join,
"json": func(v interface{}) string {
"json": func(v any) string {
buf := &bytes.Buffer{}
enc := json.NewEncoder(buf)
enc.SetEscapeHTML(false)
_ = enc.Encode(v)
return buf.String()
},
"jsonPretty": func(v interface{}) string {
"jsonPretty": func(v any) string {
buf := &bytes.Buffer{}
enc := json.NewEncoder(buf)
enc.SetEscapeHTML(false)
@ -57,7 +57,7 @@ var tmplFuncs = gotemplate.FuncMap{
type Opt func(*gotemplate.Template) (*gotemplate.Template, error)
// Writer outputs a template to an io.Writer
func Writer(out io.Writer, tmpl string, data interface{}, opts ...Opt) error {
func Writer(out io.Writer, tmpl string, data any, opts ...Opt) error {
var err error
t := gotemplate.New("out").Funcs(tmplFuncs)
for _, opt := range opts {
@ -74,7 +74,7 @@ func Writer(out io.Writer, tmpl string, data interface{}, opts ...Opt) error {
}
// String converts a template to a string
func String(tmpl string, data interface{}, opts ...Opt) (string, error) {
func String(tmpl string, data any, opts ...Opt) (string, error) {
var sb strings.Builder
err := Writer(&sb, tmpl, data)
if err != nil {

View File

@ -112,7 +112,6 @@ func TestReferrerList(t *testing.T) {
},
}
for _, tcServer := range ttServers {
tcServer := tcServer
t.Run(tcServer.name, func(t *testing.T) {
t.Parallel()
refTag, err := ref.New(fmt.Sprintf("%s/%s:%s", tcServer.reg, testRepo, testTag))
@ -168,7 +167,6 @@ func TestReferrerList(t *testing.T) {
},
}
for _, tc := range tt {
tc := tc
t.Run(tc.name, func(t *testing.T) {
t.Parallel()
rl, err := rc.ReferrerList(ctx, tc.ref, tc.opts...)

View File

@ -156,7 +156,7 @@ func TestBlob(t *testing.T) {
count := 5
var wg sync.WaitGroup
wg.Add(count)
for i := 0; i < 5; i++ {
for range 5 {
go func() {
defer wg.Done()
bRdr := bytes.NewReader(bBytes)

View File

@ -10,6 +10,7 @@ import (
"log/slog"
"os"
"path"
"slices"
// crypto libraries included for go-digest
_ "crypto/sha256"
@ -69,7 +70,7 @@ func (o *OCIDir) ManifestDelete(ctx context.Context, r ref.Ref, opts ...scheme.M
// remove matching entry from index
if r.Digest != "" && index.Manifests[i].Digest.String() == r.Digest {
changed = true
index.Manifests = append(index.Manifests[:i], index.Manifests[i+1:]...)
index.Manifests = slices.Delete(index.Manifests, i, i+1)
}
}
// push manifest back out
@ -104,7 +105,7 @@ func (o *OCIDir) manifestGet(_ context.Context, r ref.Ref) (manifest.Manifest, e
return nil, fmt.Errorf("unable to read oci index: %w", err)
}
if r.Digest == "" && r.Tag == "" {
r.Tag = "latest"
r = r.SetTag("latest")
}
desc, err := indexGet(index, r)
if err != nil {
@ -151,7 +152,7 @@ func (o *OCIDir) ManifestHead(ctx context.Context, r ref.Ref) (manifest.Manifest
return nil, fmt.Errorf("unable to read oci index: %w", err)
}
if r.Digest == "" && r.Tag == "" {
r.Tag = "latest"
r = r.SetTag("latest")
}
desc, err := indexGet(index, r)
if err != nil {
@ -181,9 +182,9 @@ func (o *OCIDir) ManifestHead(ctx context.Context, r ref.Ref) (manifest.Manifest
return nil, err
}
mt := struct {
MediaType string `json:"mediaType,omitempty"`
SchemaVersion int `json:"schemaVersion,omitempty"`
Signatures []interface{} `json:"signatures,omitempty"`
MediaType string `json:"mediaType,omitempty"`
SchemaVersion int `json:"schemaVersion,omitempty"`
Signatures []any `json:"signatures,omitempty"`
}{}
err = json.Unmarshal(raw, &mt)
if err != nil {
@ -218,7 +219,7 @@ func (o *OCIDir) manifestPut(ctx context.Context, r ref.Ref, m manifest.Manifest
opt(&config)
}
if !config.Child && r.Digest == "" && r.Tag == "" {
r.Tag = "latest"
r = r.SetTag("latest")
}
err := o.initIndex(r, true)
if err != nil {
@ -232,9 +233,13 @@ func (o *OCIDir) manifestPut(ctx context.Context, r ref.Ref, m manifest.Manifest
if err != nil {
return fmt.Errorf("could not serialize manifest: %w", err)
}
if r.Tag == "" {
// force digest to match manifest value
r.Digest = desc.Digest.String()
if r.Digest != "" && desc.Digest.String() != r.Digest {
// Digest algorithm may have changed, try recreating the manifest with the provided ref.
// This will fail if the ref digest does not match the manifest.
m, err = manifest.New(manifest.WithRef(r), manifest.WithRaw(b))
if err != nil {
return fmt.Errorf("failed to rebuilding manifest with ref \"%s\": %w", r.CommonName(), err)
}
}
if r.Tag != "" {
desc.Annotations = map[string]string{

View File

@ -10,6 +10,8 @@ import (
"path/filepath"
"testing"
"github.com/opencontainers/go-digest"
"github.com/regclient/regclient/internal/copyfs"
"github.com/regclient/regclient/types/manifest"
"github.com/regclient/regclient/types/mediatype"
@ -47,6 +49,10 @@ func TestManifest(t *testing.T) {
if !ml.IsList() {
t.Errorf("expected manifest list")
}
mlb, err := ml.RawBody()
if err != nil {
t.Fatalf("failed to get body of manifest: %v", err)
}
mli, ok := ml.(manifest.Indexer)
if !ok {
t.Fatalf("manifest doesn't support index methods")
@ -61,11 +67,18 @@ func TestManifest(t *testing.T) {
if err != nil {
t.Errorf("manifest head failed on child digest: %v", err)
}
rMissing := r.SetDigest("sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa")
digMissing := digest.Canonical.FromString("missing")
digSHA512 := digest.SHA512.FromBytes(mlb)
rMissing := r.SetDigest(digMissing.String())
rSHA512 := r.SetDigest(digSHA512.String())
_, err = o.ManifestHead(ctx, rMissing)
if err == nil {
t.Errorf("manifest head succeeded on missing digest: %s", rMissing.CommonName())
}
_, err = o.ManifestHead(ctx, rSHA512)
if err == nil {
t.Errorf("manifest head succeeded on alternate algorithm: %s", rSHA512.CommonName())
}
// image manifest
m, err := o.ManifestGet(ctx, r)
if err != nil {
@ -201,6 +214,17 @@ func TestManifest(t *testing.T) {
if err != nil {
t.Errorf("failed pushing manifest: %v", err)
}
// push invalid digest
err = o.manifestPut(ctx, rMissing, ml)
if err == nil {
t.Errorf("succeeded pushing with invalid digest")
}
// push with alternate digest
err = o.manifestPut(ctx, rSHA512, ml)
if err != nil {
t.Errorf("failed pushing with alternate digest algorithm: %v", err)
}
// close and reopen
err = o.Close(ctx, r)
if err != nil {
t.Errorf("failed closing: %v", err)

View File

@ -10,6 +10,7 @@ import (
"log/slog"
"os"
"path"
"slices"
"strings"
"sync"
@ -335,7 +336,7 @@ func indexCreate() v1.Index {
func indexGet(index v1.Index, r ref.Ref) (descriptor.Descriptor, error) {
if r.Digest == "" && r.Tag == "" {
r.Tag = "latest"
r = r.SetTag("latest")
}
if r.Digest != "" {
for _, im := range index.Manifests {
@ -395,7 +396,7 @@ func indexSet(index *v1.Index, r ref.Ref, d descriptor.Descriptor) error {
// prune entries without any tag and a matching digest
// or entries with a matching tag
if (name == "" && index.Manifests[i].Digest == d.Digest) || (r.Tag != "" && name == r.Tag) {
index.Manifests = append(index.Manifests[:i], index.Manifests[i+1:]...)
index.Manifests = slices.Delete(index.Manifests, i, i+1)
}
}
} else {

View File

@ -158,12 +158,12 @@ func TestReferrer(t *testing.T) {
if err != nil {
t.Fatalf("Failed running ManifestPut on Manifest again: %v", err)
}
r.Digest = artifactBM.GetDescriptor().Digest.String()
r = r.AddDigest(artifactBM.GetDescriptor().Digest.String())
err = o.ManifestPut(ctx, r, artifactBM, scheme.WithManifestChild())
if err != nil {
t.Fatalf("Failed running ManifestPut on Artifact: %v", err)
}
r.Digest = artifactCM.GetDescriptor().Digest.String()
r = r.AddDigest(artifactCM.GetDescriptor().Digest.String())
err = o.ManifestPut(ctx, r, artifactCM, scheme.WithManifestChild())
if err != nil {
t.Fatalf("Failed running ManifestPut on Artifact: %v", err)

View File

@ -4,6 +4,7 @@ import (
"context"
"encoding/json"
"fmt"
"slices"
"sort"
"strings"
@ -34,7 +35,7 @@ func (o *OCIDir) tagDelete(_ context.Context, r ref.Ref) error {
for i, desc := range index.Manifests {
if t, ok := desc.Annotations[aOCIRefName]; ok && t == r.Tag {
// remove matching entry from index
index.Manifests = append(index.Manifests[:i], index.Manifests[i+1:]...)
index.Manifests = slices.Delete(index.Manifests, i, i+1)
changed = true
}
}
@ -63,14 +64,7 @@ func (o *OCIDir) TagList(ctx context.Context, r ref.Ref, opts ...scheme.TagOpts)
if i := strings.LastIndex(t, ":"); i >= 0 {
t = t[i+1:]
}
found := false
for _, cur := range tl {
if cur == t {
found = true
break
}
}
if !found {
if !slices.Contains(tl, t) {
tl = append(tl, t)
}
}

View File

@ -4,6 +4,7 @@ import (
"context"
"errors"
"path/filepath"
"slices"
"testing"
"github.com/regclient/regclient/internal/copyfs"
@ -37,7 +38,7 @@ func TestTag(t *testing.T) {
t.Fatalf("failed to get tags: %v", err)
}
for _, exTag := range exTags {
if !inListStr(exTag, tlTags) {
if !slices.Contains(tlTags, exTag) {
t.Errorf("missing tag: %s", exTag)
}
}
@ -67,23 +68,14 @@ func TestTag(t *testing.T) {
t.Errorf("failed to get tags: %v", err)
}
for _, keep := range keepTags {
if !inListStr(keep, tlTags) {
if !slices.Contains(tlTags, keep) {
t.Errorf("missing tag: %s", keep)
}
}
for _, rm := range rmTags {
if inListStr(rm, tlTags) {
if slices.Contains(tlTags, rm) {
t.Errorf("tag not removed: %s", rm)
}
}
})
}
func inListStr(str string, list []string) bool {
for _, s := range list {
if str == s {
return true
}
}
return false
}

View File

@ -266,16 +266,14 @@ func (reg *Reg) blobGetUploadURL(ctx context.Context, r ref.Ref, d descriptor.De
slog.String("err", err.Error()))
} else {
host := reg.hostGet(r.Registry)
reg.muHost.Lock()
if (host.BlobChunk > 0 && minSize > host.BlobChunk) || (host.BlobChunk <= 0 && minSize > reg.blobChunkSize) {
if minSize > reg.blobChunkLimit {
host.BlobChunk = reg.blobChunkLimit
} else {
host.BlobChunk = minSize
}
host.BlobChunk = min(minSize, reg.blobChunkLimit)
reg.slog.Debug("Registry requested min chunk size",
slog.Int64("size", host.BlobChunk),
slog.String("host", host.Name))
}
reg.muHost.Unlock()
}
}
// Extract the location into a new putURL based on whether it's relative, fqdn with a scheme, or without a scheme.
@ -335,17 +333,14 @@ func (reg *Reg) blobMount(ctx context.Context, rTgt ref.Ref, d descriptor.Descri
slog.String("err", err.Error()))
} else {
host := reg.hostGet(rTgt.Registry)
reg.muHost.Lock()
if (host.BlobChunk > 0 && minSize > host.BlobChunk) || (host.BlobChunk <= 0 && minSize > reg.blobChunkSize) {
// TODO(bmitch): potential race condition, may need a lock before setting/using values in host
if minSize > reg.blobChunkLimit {
host.BlobChunk = reg.blobChunkLimit
} else {
host.BlobChunk = minSize
}
host.BlobChunk = min(minSize, reg.blobChunkLimit)
reg.slog.Debug("Registry requested min chunk size",
slog.Int64("size", host.BlobChunk),
slog.String("host", host.Name))
}
reg.muHost.Unlock()
}
}
// 201 indicates the blob mount succeeded

View File

@ -94,7 +94,7 @@ func TestRepo(t *testing.T) {
},
RespEntry: reqresp.RespEntry{
Status: http.StatusOK,
Body: []byte(fmt.Sprintf(`{"repositories":["%s"]}`, strings.Join(listRegistry[:partialLen], `","`))),
Body: fmt.Appendf(nil, `{"repositories":["%s"]}`, strings.Join(listRegistry[:partialLen], `","`)),
Headers: http.Header{
"Content-Type": {"text/plain; charset=utf-8"},
},
@ -111,7 +111,7 @@ func TestRepo(t *testing.T) {
},
RespEntry: reqresp.RespEntry{
Status: http.StatusOK,
Body: []byte(fmt.Sprintf(`{"repositories":["%s"]}`, strings.Join(listRegistry[partialLen:], `","`))),
Body: fmt.Appendf(nil, `{"repositories":["%s"]}`, strings.Join(listRegistry[partialLen:], `","`)),
Headers: http.Header{
"Content-Type": {"text/plain; charset=utf-8"},
},
@ -125,7 +125,7 @@ func TestRepo(t *testing.T) {
},
RespEntry: reqresp.RespEntry{
Status: http.StatusOK,
Body: []byte(fmt.Sprintf(`{"repositories":["%s"]}`, strings.Join(listRegistry, `","`))),
Body: fmt.Appendf(nil, `{"repositories":["%s"]}`, strings.Join(listRegistry, `","`)),
Headers: http.Header{
"Content-Type": {"text/plain; charset=utf-8"},
},

View File

@ -181,9 +181,8 @@ func (reg *Reg) TagDelete(ctx context.Context, r ref.Ref) error {
return fmt.Errorf("failed sending dummy manifest to delete %s: %w", r.CommonName(), err)
}
r.Digest = tempManifest.GetDescriptor().Digest.String()
// delete manifest by digest
r = r.AddDigest(tempManifest.GetDescriptor().Digest.String())
reg.slog.Debug("Deleting dummy manifest",
slog.String("ref", r.Reference),
slog.String("digest", r.Digest))

View File

@ -32,15 +32,15 @@ func TestTag(t *testing.T) {
repoPath2 := "/proj2"
pageLen := 2
listTagList := []string{"latest", "v1", "v1.1", "v1.1.1"}
listTagBody := []byte(fmt.Sprintf("{\"name\":\"%s\",\"tags\":[\"%s\"]}",
listTagBody := fmt.Appendf(nil, "{\"name\":\"%s\",\"tags\":[\"%s\"]}",
strings.TrimLeft(repoPath, "/"),
strings.Join(listTagList, "\",\"")))
listTagBody1 := []byte(fmt.Sprintf("{\"name\":\"%s\",\"tags\":[\"%s\"]}",
strings.Join(listTagList, "\",\""))
listTagBody1 := fmt.Appendf(nil, "{\"name\":\"%s\",\"tags\":[\"%s\"]}",
strings.TrimLeft(repoPath, "/"),
strings.Join(listTagList[:pageLen], "\",\"")))
listTagBody2 := []byte(fmt.Sprintf("{\"name\":\"%s\",\"tags\":[\"%s\"]}",
strings.Join(listTagList[:pageLen], "\",\""))
listTagBody2 := fmt.Appendf(nil, "{\"name\":\"%s\",\"tags\":[\"%s\"]}",
strings.TrimLeft(repoPath, "/"),
strings.Join(listTagList[pageLen:], "\",\"")))
strings.Join(listTagList[pageLen:], "\",\""))
missingRepo := "/missing"
delOCITag := "del-oci"
delFallbackTag := "del-fallback"

View File

@ -105,6 +105,14 @@ echo 64 arms | regctl artifact put \
--artifact-type application/example.arms -m application/example.arms \
--subject ocidir://testrepo:v2 --platform linux/arm64
# another standalone artifact with multiple layers
regctl artifact put \
--artifact-type application/example.layers \
-m application/example.layer.1 --file-title -f layer1.txt \
-m application/example.layer.2 --file-title -f layer2.txt \
-m application/example.layer.3 --file-title -f layer3.txt \
ocidir://testrepo:a3
# include a external artifacts
echo "bacon" | regctl artifact put \
--artifact-type application/example.sbom -m application/example.sbom.breakfast \

View File

@ -0,0 +1 @@
{"schemaVersion":2,"mediaType":"application/vnd.oci.image.manifest.v1+json","artifactType":"application/example.layers","config":{"mediaType":"application/vnd.oci.empty.v1+json","digest":"sha256:44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a","size":2},"layers":[{"mediaType":"application/example.layer.1","digest":"sha256:4355a46b19d348dc2f57c046f8ef63d4538ebb936000f3c9ee954a27460dd865","size":2,"annotations":{"org.opencontainers.image.title":"layer1.txt"}},{"mediaType":"application/example.layer.2","digest":"sha256:53c234e5e8472b6ac51c1ae1cab3fe06fad053beb8ebfd8977b010655bfdd3c3","size":2,"annotations":{"org.opencontainers.image.title":"layer2.txt"}},{"mediaType":"application/example.layer.3","digest":"sha256:1121cfccd5913f0a63fec40a6ffd44ea64f9dc135c66634ba001d10bcf4302a2","size":2,"annotations":{"org.opencontainers.image.title":"layer3.txt"}}]}

File diff suppressed because one or more lines are too long

View File

@ -342,7 +342,7 @@ func TestReader(t *testing.T) {
go func() {
defer wg.Done()
out := make([]byte, exLen)
for i := 0; i < chunkCount-1; i++ {
for i := range chunkCount - 1 {
l, err := b.Read(out[i*int(chunkLen) : (i+1)*int(chunkLen)])
if l != int(chunkLen) {
t.Errorf("did not read enough bytes: expected %d, received %d", chunkLen, l)

View File

@ -82,10 +82,10 @@ func (m *docker1SignedManifest) GetLayers() ([]descriptor.Descriptor, error) {
return dl, nil
}
func (m *docker1Manifest) GetOrig() interface{} {
func (m *docker1Manifest) GetOrig() any {
return m.Manifest
}
func (m *docker1SignedManifest) GetOrig() interface{} {
func (m *docker1SignedManifest) GetOrig() any {
return m.SignedManifest
}
@ -186,7 +186,7 @@ func (m *docker1SignedManifest) SetLayers(dl []descriptor.Descriptor) error {
return fmt.Errorf("set methods not supported for for media type %s%.0w", m.desc.MediaType, errs.ErrUnsupportedMediaType)
}
func (m *docker1Manifest) SetOrig(origIn interface{}) error {
func (m *docker1Manifest) SetOrig(origIn any) error {
orig, ok := origIn.(schema1.Manifest)
if !ok {
return errs.ErrUnsupportedMediaType
@ -211,7 +211,7 @@ func (m *docker1Manifest) SetOrig(origIn interface{}) error {
return nil
}
func (m *docker1SignedManifest) SetOrig(origIn interface{}) error {
func (m *docker1SignedManifest) SetOrig(origIn any) error {
orig, ok := origIn.(schema1.SignedManifest)
if !ok {
return errs.ErrUnsupportedMediaType

View File

@ -88,10 +88,10 @@ func (m *docker2ManifestList) GetLayers() ([]descriptor.Descriptor, error) {
return []descriptor.Descriptor{}, fmt.Errorf("layers are not available for media type %s%.0w", m.desc.MediaType, errs.ErrUnsupportedMediaType)
}
func (m *docker2Manifest) GetOrig() interface{} {
func (m *docker2Manifest) GetOrig() any {
return m.Manifest
}
func (m *docker2ManifestList) GetOrig() interface{} {
func (m *docker2ManifestList) GetOrig() any {
return m.ManifestList
}
@ -229,7 +229,7 @@ func (m *docker2ManifestList) MarshalPretty() ([]byte, error) {
fmt.Fprintf(tw, "\t\n")
dRef := m.r
if dRef.Reference != "" {
dRef.Digest = d.Digest.String()
dRef = dRef.AddDigest(d.Digest.String())
fmt.Fprintf(tw, " Name:\t%s\n", dRef.CommonName())
}
err := d.MarshalPrettyTW(tw, " ")
@ -294,7 +294,7 @@ func (m *docker2ManifestList) SetManifestList(dl []descriptor.Descriptor) error
return m.updateDesc()
}
func (m *docker2Manifest) SetOrig(origIn interface{}) error {
func (m *docker2Manifest) SetOrig(origIn any) error {
orig, ok := origIn.(schema2.Manifest)
if !ok {
return errs.ErrUnsupportedMediaType
@ -308,7 +308,7 @@ func (m *docker2Manifest) SetOrig(origIn interface{}) error {
return m.updateDesc()
}
func (m *docker2ManifestList) SetOrig(origIn interface{}) error {
func (m *docker2ManifestList) SetOrig(origIn any) error {
orig, ok := origIn.(schema2.ManifestList)
if !ok {
return errs.ErrUnsupportedMediaType

View File

@ -31,14 +31,14 @@ import (
// many calls are only supported by certain underlying media types.
type Manifest interface {
GetDescriptor() descriptor.Descriptor
GetOrig() interface{}
GetOrig() any
GetRef() ref.Ref
IsList() bool
IsSet() bool
MarshalJSON() ([]byte, error)
RawBody() ([]byte, error)
RawHeaders() (http.Header, error)
SetOrig(interface{}) error
SetOrig(any) error
// Deprecated: GetConfig should be accessed using [Imager] interface.
GetConfig() (descriptor.Descriptor, error)
@ -96,7 +96,7 @@ type manifestConfig struct {
r ref.Ref
desc descriptor.Descriptor
raw []byte
orig interface{}
orig any
header http.Header
}
type Opts func(*manifestConfig)
@ -156,7 +156,7 @@ func WithHeader(header http.Header) Opts {
}
// WithOrig provides the original manifest variable.
func WithOrig(orig interface{}) Opts {
func WithOrig(orig any) Opts {
return func(mc *manifestConfig) {
mc.orig = orig
}
@ -275,7 +275,7 @@ func HasRateLimit(m Manifest) bool {
}
// OCIIndexFromAny converts manifest lists to an OCI index.
func OCIIndexFromAny(orig interface{}) (v1.Index, error) {
func OCIIndexFromAny(orig any) (v1.Index, error) {
ociI := v1.Index{
Versioned: v1.IndexSchemaVersion,
MediaType: mediatype.OCI1ManifestList,
@ -293,8 +293,8 @@ func OCIIndexFromAny(orig interface{}) (v1.Index, error) {
}
// OCIIndexToAny converts from an OCI index back to the manifest list.
func OCIIndexToAny(ociI v1.Index, origP interface{}) error {
// reflect is used to handle both *interface{} and *Manifest
func OCIIndexToAny(ociI v1.Index, origP any) error {
// reflect is used to handle both *interface and *Manifest
rv := reflect.ValueOf(origP)
for rv.IsValid() && rv.Type().Kind() == reflect.Ptr {
rv = rv.Elem()
@ -321,7 +321,7 @@ func OCIIndexToAny(ociI v1.Index, origP interface{}) error {
}
// OCIManifestFromAny converts an image manifest to an OCI manifest.
func OCIManifestFromAny(orig interface{}) (v1.Manifest, error) {
func OCIManifestFromAny(orig any) (v1.Manifest, error) {
ociM := v1.Manifest{
Versioned: v1.ManifestSchemaVersion,
MediaType: mediatype.OCI1Manifest,
@ -341,8 +341,8 @@ func OCIManifestFromAny(orig interface{}) (v1.Manifest, error) {
}
// OCIManifestToAny converts an OCI manifest back to the image manifest.
func OCIManifestToAny(ociM v1.Manifest, origP interface{}) error {
// reflect is used to handle both *interface{} and *Manifest
func OCIManifestToAny(ociM v1.Manifest, origP any) error {
// reflect is used to handle both *interface and *Manifest
rv := reflect.ValueOf(origP)
for rv.IsValid() && rv.Type().Kind() == reflect.Ptr {
rv = rv.Elem()
@ -372,7 +372,7 @@ func OCIManifestToAny(ociM v1.Manifest, origP interface{}) error {
// FromOrig creates a new manifest from the original upstream manifest type.
// This method should be used if you are creating a new manifest rather than pulling one from a registry.
func fromOrig(c common, orig interface{}) (Manifest, error) {
func fromOrig(c common, orig any) (Manifest, error) {
var mt string
var m Manifest
origDigest := c.desc.Digest
@ -473,7 +473,7 @@ func fromCommon(c common) (Manifest, error) {
mt := struct {
MediaType string `json:"mediaType,omitempty"`
SchemaVersion int `json:"schemaVersion,omitempty"`
Signatures []interface{} `json:"signatures,omitempty"`
Signatures []any `json:"signatures,omitempty"`
Manifests []descriptor.Descriptor `json:"manifests,omitempty"`
Layers []descriptor.Descriptor `json:"layers,omitempty"`
}{}

View File

@ -115,13 +115,13 @@ func (m *oci1Artifact) GetLayers() ([]descriptor.Descriptor, error) {
return m.Blobs, nil
}
func (m *oci1Manifest) GetOrig() interface{} {
func (m *oci1Manifest) GetOrig() any {
return m.Manifest
}
func (m *oci1Index) GetOrig() interface{} {
func (m *oci1Index) GetOrig() any {
return m.Index
}
func (m *oci1Artifact) GetOrig() interface{} {
func (m *oci1Artifact) GetOrig() any {
return m.ArtifactManifest
}
@ -301,7 +301,7 @@ func (m *oci1Index) MarshalPretty() ([]byte, error) {
fmt.Fprintf(tw, "\t\n")
dRef := m.r
if dRef.Reference != "" {
dRef.Digest = d.Digest.String()
dRef = dRef.AddDigest(d.Digest.String())
fmt.Fprintf(tw, " Name:\t%s\n", dRef.CommonName())
}
err := d.MarshalPrettyTW(tw, " ")
@ -473,7 +473,7 @@ func (m *oci1Index) SetManifestList(dl []descriptor.Descriptor) error {
return m.updateDesc()
}
func (m *oci1Manifest) SetOrig(origIn interface{}) error {
func (m *oci1Manifest) SetOrig(origIn any) error {
orig, ok := origIn.(v1.Manifest)
if !ok {
return errs.ErrUnsupportedMediaType
@ -488,7 +488,7 @@ func (m *oci1Manifest) SetOrig(origIn interface{}) error {
return m.updateDesc()
}
func (m *oci1Index) SetOrig(origIn interface{}) error {
func (m *oci1Index) SetOrig(origIn any) error {
orig, ok := origIn.(v1.Index)
if !ok {
return errs.ErrUnsupportedMediaType
@ -525,7 +525,7 @@ func (m *oci1Index) SetSubject(d *descriptor.Descriptor) error {
return m.updateDesc()
}
func (m *oci1Artifact) SetOrig(origIn interface{}) error {
func (m *oci1Artifact) SetOrig(origIn any) error {
orig, ok := origIn.(v1.ArtifactManifest)
if !ok {
return errs.ErrUnsupportedMediaType

View File

@ -161,6 +161,14 @@ func NewHost(parse string) (Ref, error) {
return ret, nil
}
// AddDigest returns a ref with the requested digest set.
// The tag will NOT be unset and the reference value will be reset.
func (r Ref) AddDigest(digest string) Ref {
r.Digest = digest
r.Reference = r.CommonName()
return r
}
// CommonName outputs a parsable name from a reference.
func (r Ref) CommonName() string {
cn := ""

View File

@ -902,11 +902,12 @@ func TestIsSet(t *testing.T) {
}
}
func TestSet(t *testing.T) {
func TestSetAndAdd(t *testing.T) {
t.Parallel()
rStr := "example.com/repo:v1"
rDigStr := "example.com/repo@" + testDigest
rTagStr := "example.com/repo:v2"
rAddStr := "example.com/repo:v2@" + testDigest
r, err := New(rStr)
if err != nil {
t.Fatalf("unexpected parse failure: %v", err)
@ -931,6 +932,17 @@ func TestSet(t *testing.T) {
if r.Reference != rTagStr {
t.Errorf("SetTag reference mismatch, expected %s, received %s", rTagStr, r.Reference)
}
r = r.AddDigest(testDigest)
if r.Tag != "v2" {
t.Errorf("AddDigest tag mismatch, expected v2, received %s", r.Tag)
}
if r.Digest != testDigest {
t.Errorf("AddDigest digest mismatch, expected %s, received %s", testDigest, r.Digest)
}
if r.Reference != rAddStr {
t.Errorf("AddDigest reference mismatch, expected %s, received %s", rAddStr, r.Reference)
}
}
func TestToReg(t *testing.T) {

View File

@ -4,6 +4,7 @@ package referrer
import (
"bytes"
"fmt"
"slices"
"sort"
"text/tabwriter"
@ -79,11 +80,7 @@ func (rl *ReferrerList) Delete(m manifest.Manifest) error {
found := false
for i := len(rlM.Manifests) - 1; i >= 0; i-- {
if rlM.Manifests[i].Digest == mDesc.Digest {
if i < len(rlM.Manifests)-1 {
rlM.Manifests = append(rlM.Manifests[:i], rlM.Manifests[i+1:]...)
} else {
rlM.Manifests = rlM.Manifests[:i]
}
rlM.Manifests = slices.Delete(rlM.Manifests, i, i+1)
found = true
}
}

View File

@ -21,7 +21,7 @@ type RepoList struct {
type repoCommon struct {
host string
mt string
orig interface{}
orig any
rawHeader http.Header
rawBody []byte
}
@ -92,7 +92,7 @@ type RepoRegistryList struct {
Repositories []string `json:"repositories"`
}
func (r repoCommon) GetOrig() interface{} {
func (r repoCommon) GetOrig() any {
return r.orig
}

View File

@ -15,7 +15,7 @@ func TestNew(t *testing.T) {
t.Parallel()
emptyRaw := []byte("{}")
registryList := []string{"library/alpine", "library/debian", "library/golang"}
registryRaw := []byte(fmt.Sprintf(`{"repositories":["%s"]}`, strings.Join(registryList, `","`)))
registryRaw := fmt.Appendf(nil, `{"repositories":["%s"]}`, strings.Join(registryList, `","`))
registryHost := "localhost:5000"
registryMT := "application/json; charset=utf-8"
registryHeaders := http.Header{

View File

@ -29,7 +29,7 @@ type List struct {
type tagCommon struct {
r ref.Ref
mt string
orig interface{}
orig any
rawHeader http.Header
rawBody []byte
url *url.URL
@ -206,7 +206,7 @@ func (l *List) Append(add *List) error {
}
// GetOrig returns the underlying tag data structure if defined.
func (t tagCommon) GetOrig() interface{} {
func (t tagCommon) GetOrig() any {
return t.orig
}

View File

@ -24,8 +24,8 @@ func TestNew(t *testing.T) {
}
registryRef, _ := ref.New("localhost:5000/regclient/test")
registryRepoName := "regclient/test"
registryRaw := []byte(fmt.Sprintf(`{"name":"%s","tags":["%s"]}`, registryRepoName,
strings.Join(registryTags, `","`)))
registryRaw := fmt.Appendf(nil, `{"name":"%s","tags":["%s"]}`, registryRepoName,
strings.Join(registryTags, `","`))
registryMediaType := "application/json"
registryHeaders := http.Header{
"Content-Type": {registryMediaType},
@ -104,11 +104,11 @@ func TestNew(t *testing.T) {
gcrTags := []string{"v0.3.0", "sha256-96ef6fb02c5a56901dc3c2e0ca34eec9ed926ab8d936ea30ec38f9ec9db017a5.sig"}
gcrRef, _ := ref.New("gcr.io/example/test")
gcrRepoName := "example/test"
gcrRaw := []byte(fmt.Sprintf(`{"child": ["%s"], "manifest":{%s}, "name":"%s", "tags":["%s"]}`,
gcrRaw := fmt.Appendf(nil, `{"child": ["%s"], "manifest":{%s}, "name":"%s", "tags":["%s"]}`,
strings.Join(gcrChild, `","`),
gcrManifestRaw,
gcrRepoName,
strings.Join(gcrTags, `","`)))
strings.Join(gcrTags, `","`))
gcrMediaType := "application/json"
gcrHeaders := http.Header{
"Content-Type": {gcrMediaType},

View File

@ -4,6 +4,7 @@ package warning
import (
"context"
"log/slog"
"slices"
"sync"
)
@ -20,11 +21,8 @@ type Warning struct {
func (w *Warning) Handle(ctx context.Context, slog *slog.Logger, msg string) {
w.mu.Lock()
defer w.mu.Unlock()
// dedup
for _, entry := range w.List {
if entry == msg {
return
}
if slices.Contains(w.List, msg) {
return
}
w.List = append(w.List, msg)
// handle new warning if hook defined