1
0
mirror of https://github.com/moby/buildkit.git synced 2025-04-18 18:04:03 +03:00

apply x/tools/modernize fixes

Autogenerated with couple of manual patches.

Signed-off-by: Tonis Tiigi <tonistiigi@gmail.com>
This commit is contained in:
Tonis Tiigi 2025-03-06 17:55:05 -08:00
parent e62f1ed8bc
commit b5286f8dcb
No known key found for this signature in database
GPG Key ID: AFA9DE5F8AB7AF39
109 changed files with 439 additions and 497 deletions

7
cache/blobs.go vendored
View File

@ -5,6 +5,7 @@ import (
"fmt"
"maps"
"os"
"slices"
"strconv"
"github.com/containerd/containerd/v2/core/diff"
@ -418,10 +419,8 @@ func isTypeWindows(sr *immutableRef) bool {
}
switch sr.kind() {
case Merge:
for _, p := range sr.mergeParents {
if isTypeWindows(p) {
return true
}
if slices.ContainsFunc(sr.mergeParents, isTypeWindows) {
return true
}
case Layer:
return isTypeWindows(sr.layerParent)

View File

@ -8,6 +8,7 @@ import (
"os"
"path"
"path/filepath"
"slices"
"strings"
"sync"
"sync/atomic"
@ -542,7 +543,7 @@ func (cc *cacheContext) includedPaths(ctx context.Context, m *mount, p string, o
if origPrefix != "" {
if keyOk {
iter.SeekLowerBound(append(append([]byte{}, k...), 0))
iter.SeekLowerBound(append(slices.Clone(k), 0))
}
resolvedPrefix = convertKeyToPath(k)
@ -883,7 +884,7 @@ func (cc *cacheContext) checksum(ctx context.Context, root *iradix.Node[*CacheRe
h := sha256.New()
next := append(k, 0)
iter := root.Iterator()
iter.SeekLowerBound(append(append([]byte{}, next...), 0))
iter.SeekLowerBound(append(slices.Clone(next), 0))
subk := next
ok := true
for {
@ -1246,7 +1247,7 @@ func ensureOriginMetadata(md cache.RefMetadata) cache.RefMetadata {
}
var pool32K = sync.Pool{
New: func() interface{} {
New: func() any {
buf := make([]byte, 32*1024) // 32K
return &buf
},

View File

@ -112,6 +112,6 @@ func (s *statInfo) IsDir() bool {
return s.Mode().IsDir()
}
func (s *statInfo) Sys() interface{} {
func (s *statInfo) Sys() any {
return s.Stat
}

2
cache/manager.go vendored
View File

@ -1505,7 +1505,7 @@ func IsNotFound(err error) bool {
return errors.Is(err, errNotFound)
}
type RefOption interface{}
type RefOption any
type cachePolicy int

18
cache/manager_test.go vendored
View File

@ -391,10 +391,10 @@ func TestMergeBlobchainID(t *testing.T) {
var mergeInputs []ImmutableRef
var descs []ocispecs.Descriptor
descHandlers := DescHandlers(map[digest.Digest]*DescHandler{})
for i := 0; i < 3; i++ {
for i := range 3 {
contentBuffer := contentutil.NewBuffer()
var curBlob ImmutableRef
for j := 0; j < 3; j++ {
for j := range 3 {
blobBytes, desc, err := mapToBlob(map[string]string{strconv.Itoa(i): strconv.Itoa(j)}, true)
require.NoError(t, err)
cw, err := contentBuffer.Writer(ctx)
@ -1194,7 +1194,7 @@ func TestLoopLeaseContent(t *testing.T) {
gotChain := []digest.Digest{orgDesc.Digest}
cur := orgDesc
previous := chain[len(chain)-1].Digest
for i := 0; i < 1000; i++ {
for range 1000 {
dgst := cur.Digest
visited[dgst] = struct{}{}
info, err := co.cs.Info(ctx, dgst)
@ -1632,7 +1632,7 @@ func TestGetRemotes(t *testing.T) {
// make some lazy refs from blobs
expectedContent := map[digest.Digest]struct{}{}
var descs []ocispecs.Descriptor
for i := 0; i < 2; i++ {
for i := range 2 {
blobmap := map[string]string{"foo": strconv.Itoa(i)}
blobBytes, desc, err := mapToBlob(blobmap, true)
require.NoError(t, err)
@ -1670,10 +1670,10 @@ func TestGetRemotes(t *testing.T) {
require.NoError(t, err)
refs := []ImmutableRef{lazyRef}
for i := 0; i < 3; i++ {
for i := range 3 {
var newRefs []ImmutableRef
for j, ir := range refs {
for k := 0; k < 2; k++ {
for k := range 2 {
mutRef, err := cm.New(ctx, ir, nil, descHandlers)
require.NoError(t, err)
@ -1834,7 +1834,7 @@ func TestGetRemotes(t *testing.T) {
require.Equal(t, 1, len(mainOnly))
mainRemote := mainOnly[0]
require.Equal(t, len(mainRemote.Descriptors), len(gotMain.Descriptors))
for i := 0; i < len(mainRemote.Descriptors); i++ {
for i := range mainRemote.Descriptors {
require.Equal(t, mainRemote.Descriptors[i].Digest, gotMain.Descriptors[i].Digest)
}
@ -2053,7 +2053,7 @@ func TestMergeOp(t *testing.T) {
require.Nil(t, emptyMerge)
var baseRefs []ImmutableRef
for i := 0; i < 6; i++ {
for i := range 6 {
active, err := cm.New(ctx, nil, nil)
require.NoError(t, err)
m, err := active.Mount(ctx, false, nil)
@ -2352,7 +2352,7 @@ func TestMountReadOnly(t *testing.T) {
mutRef, err := cm.New(ctx, nil, nil)
require.NoError(t, err)
for i := 0; i < 2; i++ {
for range 2 {
rwMntable, err := mutRef.Mount(ctx, false, nil)
require.NoError(t, err)
rwMnts, release, err := rwMntable.Mount()

4
cache/metadata.go vendored
View File

@ -434,7 +434,7 @@ func (md *cacheMetadata) updateLastUsed() error {
})
}
func (md *cacheMetadata) queueValue(key string, value interface{}, index string) error {
func (md *cacheMetadata) queueValue(key string, value any, index string) error {
v, err := metadata.NewValue(value)
if err != nil {
return errors.Wrap(err, "failed to create value")
@ -450,7 +450,7 @@ func (md *cacheMetadata) SetString(key, value string, index string) error {
return md.setValue(key, value, index)
}
func (md *cacheMetadata) setValue(key string, value interface{}, index string) error {
func (md *cacheMetadata) setValue(key string, value any, index string) error {
v, err := metadata.NewValue(value)
if err != nil {
return errors.Wrap(err, "failed to create value")

View File

@ -432,7 +432,7 @@ type Value struct {
Index string `json:"index,omitempty"`
}
func NewValue(v interface{}) (*Value, error) {
func NewValue(v any) (*Value, error) {
dt, err := json.Marshal(v)
if err != nil {
return nil, errors.WithStack(err)
@ -440,7 +440,7 @@ func NewValue(v interface{}) (*Value, error) {
return &Value{Value: json.RawMessage(dt)}, nil
}
func (v *Value) Unmarshal(target interface{}) error {
func (v *Value) Unmarshal(target any) error {
return errors.WithStack(json.Unmarshal(v.Value, target))
}

11
cache/refs.go vendored
View File

@ -6,6 +6,7 @@ import (
"maps"
"os"
"path/filepath"
"slices"
"strings"
"sync"
"time"
@ -1714,14 +1715,8 @@ func (sm *sharableMountable) Mount() (_ []mount.Mount, _ func() error, retErr er
release()
}
}()
var isOverlay bool
for _, m := range mounts {
if overlay.IsOverlayMountType(m) {
isOverlay = true
break
}
}
if !isOverlay {
if !slices.ContainsFunc(mounts, overlay.IsOverlayMountType) {
// Don't need temporary mount wrapper for non-overlayfs mounts
return mounts, release, nil
}

10
cache/remote.go vendored
View File

@ -5,6 +5,7 @@ import (
"fmt"
"maps"
"net/url"
"slices"
"strings"
"github.com/containerd/containerd/v2/core/content"
@ -199,14 +200,7 @@ func (sr *immutableRef) getRemote(ctx context.Context, createIfNeeded bool, refC
if existings, ok := desc.Annotations[dslKey]; ok {
existingRepos = strings.Split(existings, ",")
}
addNewRepo := true
for _, existing := range existingRepos {
if existing == repo {
addNewRepo = false
break
}
}
if addNewRepo {
if !slices.Contains(existingRepos, repo) {
existingRepos = append(existingRepos, repo)
}
desc.Annotations[dslKey] = strings.Join(existingRepos, ",")

View File

@ -155,9 +155,9 @@ func testBasicGhaCacheImportExportExtraTimeout(t *testing.T, sb integration.Sand
}
func ensurePruneAll(t *testing.T, c *client.Client, sb integration.Sandbox) {
for i := 0; i < 2; i++ {
for i := range 2 {
require.NoError(t, c.Prune(sb.Context(), nil, client.PruneAll))
for j := 0; j < 20; j++ {
for range 20 {
du, err := c.DiskUsage(sb.Context())
require.NoError(t, err)
if len(du) == 0 {

View File

@ -106,13 +106,7 @@ func (ce *exporter) ExportForLayers(ctx context.Context, layers []digest.Digest)
if len(resultBlobs) <= len(layers) {
match = true
for k, resultBlob := range resultBlobs {
matchesBlob := false
for _, layerBlob := range layerBlobDigests[k] {
if layerBlob == resultBlob {
matchesBlob = true
break
}
}
matchesBlob := slices.Contains(layerBlobDigests[k], resultBlob)
if !matchesBlob {
match = false
break

View File

@ -214,7 +214,7 @@ func (e *exporter) Finalize(ctx context.Context) (map[string]string, error) {
close(tasks)
}()
for k := 0; k < e.config.UploadParallelism; k++ {
for range e.config.UploadParallelism {
eg.Go(func() error {
for index := range tasks {
blob := cacheConfig.Layers[index].Blob

View File

@ -2,6 +2,7 @@ package cacheimport
import (
"context"
"slices"
"time"
"github.com/moby/buildkit/identity"
@ -194,10 +195,8 @@ func (cs *cacheKeyStorage) HasLink(id string, link solver.CacheInfoLink, target
selector: link.Selector.String(),
}
if it, ok := cs.byID[id]; ok {
for _, id := range it.links[l] {
if id == target {
return true
}
if slices.Contains(it.links[l], target) {
return true
}
}
return false

View File

@ -15,12 +15,12 @@ import (
)
func NewCacheChains() *CacheChains {
return &CacheChains{visited: map[interface{}]struct{}{}}
return &CacheChains{visited: map[any]struct{}{}}
}
type CacheChains struct {
items []*item
visited map[interface{}]struct{}
visited map[any]struct{}
}
var _ solver.CacheExporterTarget = &CacheChains{}

View File

@ -113,7 +113,7 @@ func sortConfig(cc *CacheConfig) {
}
func outputKey(dgst digest.Digest, idx int) digest.Digest {
return digest.FromBytes([]byte(fmt.Sprintf("%s@%d", dgst, idx)))
return digest.FromBytes(fmt.Appendf(nil, "%s@%d", dgst, idx))
}
type nlink struct {
@ -232,7 +232,7 @@ func normalizeItem(it *item, state *normalizeState) (*item, error) {
} else {
// keep tmp IDs deterministic
state.next++
id = digest.FromBytes([]byte(fmt.Sprintf("%d", state.next)))
id = digest.FromBytes(fmt.Appendf(nil, "%d", state.next))
state.byKey[id] = it
it.links = make([]map[link]struct{}, len(it.links))
for i := range it.links {

View File

@ -68,7 +68,7 @@ func TestClientGatewayIntegration(t *testing.T) {
testClientGatewayContainerSecurityModeCaps,
testClientGatewayContainerSecurityModeValidation,
), integration.WithMirroredImages(integration.OfficialImages("busybox:latest")),
integration.WithMatrix("secmode", map[string]interface{}{
integration.WithMatrix("secmode", map[string]any{
"sandbox": securitySandbox,
"insecure": securityInsecure,
}),
@ -79,7 +79,7 @@ func TestClientGatewayIntegration(t *testing.T) {
testClientGatewayContainerHostNetworkingValidation,
),
integration.WithMirroredImages(integration.OfficialImages("busybox:latest")),
integration.WithMatrix("netmode", map[string]interface{}{
integration.WithMatrix("netmode", map[string]any{
"default": defaultNetwork,
"host": hostNetwork,
}),
@ -1061,7 +1061,7 @@ func newTestPrompt(ctx context.Context, t *testing.T, input io.Writer, output *b
func (p *testPrompt) String() string { return p.prompt }
func (p *testPrompt) SendExit(status int) {
p.input.Write([]byte(fmt.Sprintf("exit %d\n", status)))
p.input.Write(fmt.Appendf(nil, "exit %d\n", status))
}
func (p *testPrompt) Send(cmd string) {

View File

@ -253,7 +253,7 @@ func testIntegration(t *testing.T, funcs ...func(t *testing.T, sb integration.Sa
testSecurityModeErrors,
),
mirrors,
integration.WithMatrix("secmode", map[string]interface{}{
integration.WithMatrix("secmode", map[string]any{
"sandbox": securitySandbox,
"insecure": securityInsecure,
}),
@ -263,7 +263,7 @@ func testIntegration(t *testing.T, funcs ...func(t *testing.T, sb integration.Sa
testHostNetworking,
),
mirrors,
integration.WithMatrix("netmode", map[string]interface{}{
integration.WithMatrix("netmode", map[string]any{
"default": defaultNetwork,
"host": hostNetwork,
}),
@ -273,7 +273,7 @@ func testIntegration(t *testing.T, funcs ...func(t *testing.T, sb integration.Sa
t,
integration.TestFuncs(testBridgeNetworkingDNSNoRootless),
mirrors,
integration.WithMatrix("netmode", map[string]interface{}{
integration.WithMatrix("netmode", map[string]any{
"dns": bridgeDNSNetwork,
}),
)
@ -8065,9 +8065,9 @@ func requiresLinux(t *testing.T) {
// cleanup cache because some records still haven't been released.
// This function tries to ensure prune by retrying it.
func ensurePruneAll(t *testing.T, c *Client, sb integration.Sandbox) {
for i := 0; i < 2; i++ {
for i := range 2 {
require.NoError(t, c.Prune(sb.Context(), nil, PruneAll))
for j := 0; j < 20; j++ {
for range 20 {
du, err := c.DiskUsage(sb.Context())
require.NoError(t, err)
if len(du) == 0 {
@ -8308,7 +8308,7 @@ func testParallelLocalBuilds(t *testing.T, sb integration.Sandbox) {
eg, ctx := errgroup.WithContext(ctx)
for i := 0; i < 3; i++ {
for i := range 3 {
func(i int) {
eg.Go(func() error {
fn := fmt.Sprintf("test%d", i)
@ -8944,7 +8944,7 @@ func testExportAttestations(t *testing.T, sb integration.Sandbox, ociArtifact bo
// build image
st := llb.Scratch().File(
llb.Mkfile("/greeting", 0600, []byte(fmt.Sprintf("hello %s!", pk))),
llb.Mkfile("/greeting", 0600, fmt.Appendf(nil, "hello %s!", pk)),
)
def, err := st.Marshal(ctx)
if err != nil {
@ -9056,7 +9056,7 @@ func testExportAttestations(t *testing.T, sb integration.Sandbox, ociArtifact bo
require.NotNil(t, img)
require.Equal(t, pk, platforms.Format(*img.Desc.Platform))
require.Equal(t, 1, len(img.Layers))
require.Equal(t, []byte(fmt.Sprintf("hello %s!", pk)), img.Layers[0]["greeting"].Data)
require.Equal(t, fmt.Appendf(nil, "hello %s!", pk), img.Layers[0]["greeting"].Data)
bases = append(bases, img)
}
@ -9103,7 +9103,7 @@ func testExportAttestations(t *testing.T, sb integration.Sandbox, ociArtifact bo
require.Equal(t, "https://in-toto.io/Statement/v0.1", attest.Type)
require.Equal(t, "https://example.com/attestations/v1.0", attest.PredicateType)
require.Equal(t, map[string]interface{}{"success": true}, attest.Predicate)
require.Equal(t, map[string]any{"success": true}, attest.Predicate)
subjects := []intoto.Subject{
{
Name: purls[targets[0]],
@ -9174,7 +9174,7 @@ func testExportAttestations(t *testing.T, sb integration.Sandbox, ociArtifact bo
require.Equal(t, "https://in-toto.io/Statement/v0.1", attest.Type)
require.Equal(t, "https://example.com/attestations/v1.0", attest.PredicateType)
require.Equal(t, map[string]interface{}{"success": true}, attest.Predicate)
require.Equal(t, map[string]any{"success": true}, attest.Predicate)
require.Equal(t, []intoto.Subject{{
Name: "greeting",
@ -9232,7 +9232,7 @@ func testExportAttestations(t *testing.T, sb integration.Sandbox, ociArtifact bo
require.Equal(t, "https://in-toto.io/Statement/v0.1", attest.Type)
require.Equal(t, "https://example.com/attestations/v1.0", attest.PredicateType)
require.Equal(t, map[string]interface{}{"success": true}, attest.Predicate)
require.Equal(t, map[string]any{"success": true}, attest.Predicate)
require.Equal(t, []intoto.Subject{{
Name: "greeting",
@ -9287,7 +9287,7 @@ func testAttestationDefaultSubject(t *testing.T, sb integration.Sandbox) {
// build image
st := llb.Scratch().File(
llb.Mkfile("/greeting", 0600, []byte(fmt.Sprintf("hello %s!", pk))),
llb.Mkfile("/greeting", 0600, fmt.Appendf(nil, "hello %s!", pk)),
)
def, err := st.Marshal(ctx)
if err != nil {
@ -9383,7 +9383,7 @@ func testAttestationDefaultSubject(t *testing.T, sb integration.Sandbox) {
require.Equal(t, "https://in-toto.io/Statement/v0.1", attest.Type)
require.Equal(t, "https://example.com/attestations/v1.0", attest.PredicateType)
require.Equal(t, map[string]interface{}{"success": true}, attest.Predicate)
require.Equal(t, map[string]any{"success": true}, attest.Predicate)
name := fmt.Sprintf("pkg:docker/%s/buildkit/testattestationsemptysubject@latest?platform=%s", url.QueryEscape(registry), url.QueryEscape(platforms.Format(ps[i])))
subjects := []intoto.Subject{{
@ -9423,7 +9423,7 @@ func testAttestationBundle(t *testing.T, sb integration.Sandbox) {
// build image
st := llb.Scratch().File(
llb.Mkfile("/greeting", 0600, []byte(fmt.Sprintf("hello %s!", pk))),
llb.Mkfile("/greeting", 0600, fmt.Appendf(nil, "hello %s!", pk)),
)
def, err := st.Marshal(ctx)
if err != nil {
@ -9450,7 +9450,7 @@ func testAttestationBundle(t *testing.T, sb integration.Sandbox) {
Type: intoto.StatementInTotoV01,
PredicateType: "https://example.com/attestations/v1.0",
},
Predicate: map[string]interface{}{
Predicate: map[string]any{
"foo": "1",
},
}
@ -9535,7 +9535,7 @@ func testAttestationBundle(t *testing.T, sb integration.Sandbox) {
require.NoError(t, json.Unmarshal(att.LayersRaw[0], &attest))
require.Equal(t, "https://example.com/attestations/v1.0", attest.PredicateType)
require.Equal(t, map[string]interface{}{"foo": "1"}, attest.Predicate)
require.Equal(t, map[string]any{"foo": "1"}, attest.Predicate)
name := fmt.Sprintf("pkg:docker/%s/buildkit/testattestationsbundle@latest?platform=%s", url.QueryEscape(registry), url.QueryEscape(platforms.Format(ps[i])))
subjects := []intoto.Subject{{
Name: name,
@ -9765,7 +9765,7 @@ EOF
require.NoError(t, json.Unmarshal(att.LayersRaw[0], &attest))
require.Equal(t, "https://in-toto.io/Statement/v0.1", attest.Type)
require.Equal(t, intoto.PredicateSPDX, attest.PredicateType)
require.Subset(t, attest.Predicate, map[string]interface{}{"name": "frontend"})
require.Subset(t, attest.Predicate, map[string]any{"name": "frontend"})
// test the specified fallback scanner
target = registry + "/buildkit/testsbom3:latest"
@ -9797,7 +9797,7 @@ EOF
require.NoError(t, json.Unmarshal(att.LayersRaw[0], &attest))
require.Equal(t, "https://in-toto.io/Statement/v0.1", attest.Type)
require.Equal(t, intoto.PredicateSPDX, attest.PredicateType)
require.Subset(t, attest.Predicate, map[string]interface{}{"name": "fallback"})
require.Subset(t, attest.Predicate, map[string]any{"name": "fallback"})
// test the builtin frontend scanner and the specified fallback scanner together
target = registry + "/buildkit/testsbom3:latest"
@ -9829,7 +9829,7 @@ EOF
require.NoError(t, json.Unmarshal(att.LayersRaw[0], &attest))
require.Equal(t, "https://in-toto.io/Statement/v0.1", attest.Type)
require.Equal(t, intoto.PredicateSPDX, attest.PredicateType)
require.Subset(t, attest.Predicate, map[string]interface{}{"name": "frontend"})
require.Subset(t, attest.Predicate, map[string]any{"name": "frontend"})
// test configuring the scanner (simple)
target = registry + "/buildkit/testsbom4:latest"
@ -9861,8 +9861,8 @@ EOF
require.NoError(t, json.Unmarshal(att.LayersRaw[0], &attest))
require.Equal(t, "https://in-toto.io/Statement/v0.1", attest.Type)
require.Equal(t, intoto.PredicateSPDX, attest.PredicateType)
require.Subset(t, attest.Predicate, map[string]interface{}{
"extraParams": map[string]interface{}{"ARG1": "foo", "ARG2": "bar"},
require.Subset(t, attest.Predicate, map[string]any{
"extraParams": map[string]any{"ARG1": "foo", "ARG2": "bar"},
})
// test configuring the scanner (complex)
@ -9895,8 +9895,8 @@ EOF
require.NoError(t, json.Unmarshal(att.LayersRaw[0], &attest))
require.Equal(t, "https://in-toto.io/Statement/v0.1", attest.Type)
require.Equal(t, intoto.PredicateSPDX, attest.PredicateType)
require.Subset(t, attest.Predicate, map[string]interface{}{
"extraParams": map[string]interface{}{"ARG1": "foo", "ARG2": "hello,world"},
require.Subset(t, attest.Predicate, map[string]any{
"extraParams": map[string]any{"ARG1": "foo", "ARG2": "hello,world"},
})
}
@ -10065,7 +10065,7 @@ EOF
require.NoError(t, json.Unmarshal(att.LayersRaw[0], &attest))
require.Equal(t, "https://in-toto.io/Statement/v0.1", attest.Type)
require.Equal(t, intoto.PredicateSPDX, attest.PredicateType)
require.Subset(t, attest.Predicate, map[string]interface{}{"name": "fallback"})
require.Subset(t, attest.Predicate, map[string]any{"name": "fallback"})
}
func testSBOMSupplements(t *testing.T, sb integration.Sandbox) {
@ -11025,7 +11025,7 @@ func testLLBMountPerformance(t *testing.T, sb integration.Sandbox) {
mntInput := llb.Image("busybox:latest")
st := llb.Image("busybox:latest")
var mnts []llb.State
for i := 0; i < 20; i++ {
for range 20 {
execSt := st.Run(
llb.Args([]string{"true"}),
)
@ -11058,7 +11058,7 @@ func testLayerLimitOnMounts(t *testing.T, sb integration.Sandbox) {
const numLayers = 110
for i := 0; i < numLayers; i++ {
for range numLayers {
base = base.Run(llb.Shlex("sh -c 'echo hello >> /hello'")).Root()
}
@ -11085,7 +11085,7 @@ func testClientCustomGRPCOpts(t *testing.T, sb integration.Sandbox) {
ctx context.Context,
method string,
req,
reply interface{},
reply any,
cc *grpc.ClientConn,
invoker grpc.UnaryInvoker,
opts ...grpc.CallOption,

View File

@ -49,7 +49,7 @@ func TestDefinitionEquivalence(t *testing.T) {
require.Equal(t, len(def.Def), len(def2.Def))
require.Equal(t, len(def.Metadata), len(def2.Metadata))
for i := 0; i < len(def.Def); i++ {
for i := range def.Def {
res := bytes.Compare(def.Def[i], def2.Def[i])
require.Equal(t, 0, res)
}
@ -121,9 +121,9 @@ func TestDefinitionInputCache(t *testing.T) {
// make sure that walking vertices in parallel doesn't cause panic
var all []RunOption
for i := 0; i < 100; i++ {
for i := range 100 {
var sts []RunOption
for j := 0; j < 100; j++ {
for j := range 100 {
sts = append(sts, AddMount("/mnt", Scratch().Run(Shlex(fmt.Sprintf("%d-%d", i, j))).Root()))
}
all = append(all, AddMount("/mnt", Scratch().Run(append([]RunOption{Shlex("args")}, sts...)...).Root()))

View File

@ -614,7 +614,7 @@ func Shlex(str string) RunOption {
})
}
func Shlexf(str string, v ...interface{}) RunOption {
func Shlexf(str string, v ...any) RunOption {
return runOptionFunc(func(ei *ExecInfo) {
ei.State = shlexf(str, true, v...)(ei.State)
})

View File

@ -59,7 +59,7 @@ func TestExecOpMarshalConsistency(t *testing.T) {
AddMount("/b", Scratch().File(Mkfile("file1", 0644, []byte("file1 contents")))),
).AddMount("/a", Scratch().File(Mkfile("file2", 0644, []byte("file2 contents"))))
for i := 0; i < 100; i++ {
for range 100 {
def, err := st.Marshal(context.TODO())
require.NoError(t, err)

View File

@ -219,7 +219,7 @@ func TestFileSymlink(t *testing.T) {
{Oldpath: "/src/dir/subdir", Newpath: "/src/dir/subdir/nested"},
}
for i := 0; i < numActions; i++ {
for i := range numActions {
expectedOutput := -1
if i == numActions-1 {
expectedOutput = 0
@ -788,7 +788,7 @@ func TestFileOpMarshalConsistency(t *testing.T) {
File(Copy(f1, "/foo", "/bar")).
File(Copy(f2, "/a", "/b"))
for i := 0; i < 100; i++ {
for range 100 {
def, err := st.Marshal(context.TODO())
require.NoError(t, err)
@ -803,7 +803,7 @@ func TestFileOpMarshalConsistency(t *testing.T) {
func TestParallelMarshal(t *testing.T) {
st := Scratch().File(Mkfile("/tmp", 0644, []byte("tmp 1")))
eg, ctx := errgroup.WithContext(context.Background())
for i := 0; i < 100; i++ {
for range 100 {
eg.Go(func() error {
_, err := st.Marshal(ctx)
return err

View File

@ -2,6 +2,7 @@ package llb
import (
"io"
"slices"
"sync"
cerrdefs "github.com/containerd/errdefs"
@ -84,7 +85,7 @@ func ReadFrom(r io.Reader) (*Definition, error) {
func MarshalConstraints(base, override *Constraints) (*pb.Op, *pb.OpMetadata) {
c := *base
c.WorkerConstraints = append([]string{}, c.WorkerConstraints...)
c.WorkerConstraints = slices.Clone(c.WorkerConstraints)
if p := override.Platform; p != nil {
c.Platform = p
@ -105,7 +106,7 @@ func MarshalConstraints(base, override *Constraints) (*pb.Op, *pb.OpMetadata) {
OSVersion: c.Platform.OSVersion,
}
if c.Platform.OSFeatures != nil {
opPlatform.OSFeatures = append([]string{}, c.Platform.OSFeatures...)
opPlatform.OSFeatures = slices.Clone(c.Platform.OSFeatures)
}
return &pb.Op{

View File

@ -35,7 +35,7 @@ var (
// AddEnvf is the same as [AddEnv] but allows for a format string.
// This is the equivalent of `[State.AddEnvf]`
func AddEnvf(key, value string, v ...interface{}) StateOption {
func AddEnvf(key, value string, v ...any) StateOption {
return addEnvf(key, value, true, v...)
}
@ -46,12 +46,12 @@ func AddEnv(key, value string) StateOption {
return addEnvf(key, value, false)
}
func addEnvf(key, value string, replace bool, v ...interface{}) StateOption {
func addEnvf(key, value string, replace bool, v ...any) StateOption {
if replace {
value = fmt.Sprintf(value, v...)
}
return func(s State) State {
return s.withValue(keyEnv, func(ctx context.Context, c *Constraints) (interface{}, error) {
return s.withValue(keyEnv, func(ctx context.Context, c *Constraints) (any, error) {
env, err := getEnv(s)(ctx, c)
if err != nil {
return nil, err
@ -69,16 +69,16 @@ func Dir(str string) StateOption {
}
// Dirf is the same as [Dir] but allows for a format string.
func Dirf(str string, v ...interface{}) StateOption {
func Dirf(str string, v ...any) StateOption {
return dirf(str, true, v...)
}
func dirf(value string, replace bool, v ...interface{}) StateOption {
func dirf(value string, replace bool, v ...any) StateOption {
if replace {
value = fmt.Sprintf(value, v...)
}
return func(s State) State {
return s.withValue(keyDir, func(ctx context.Context, c *Constraints) (interface{}, error) {
return s.withValue(keyDir, func(ctx context.Context, c *Constraints) (any, error) {
if !path.IsAbs(value) {
prev, err := getDir(s)(ctx, c)
if err != nil {
@ -213,7 +213,7 @@ func args(args ...string) StateOption {
}
}
func shlexf(str string, replace bool, v ...interface{}) StateOption {
func shlexf(str string, replace bool, v ...any) StateOption {
if replace {
str = fmt.Sprintf(str, v...)
}
@ -248,7 +248,7 @@ func getPlatform(s State) func(context.Context, *Constraints) (*ocispecs.Platfor
func extraHost(host string, ip net.IP) StateOption {
return func(s State) State {
return s.withValue(keyExtraHost, func(ctx context.Context, c *Constraints) (interface{}, error) {
return s.withValue(keyExtraHost, func(ctx context.Context, c *Constraints) (any, error) {
v, err := getExtraHosts(s)(ctx, c)
if err != nil {
return nil, err
@ -278,7 +278,7 @@ type HostIP struct {
func ulimit(name UlimitName, soft int64, hard int64) StateOption {
return func(s State) State {
return s.withValue(keyUlimit, func(ctx context.Context, c *Constraints) (interface{}, error) {
return s.withValue(keyUlimit, func(ctx context.Context, c *Constraints) (any, error) {
v, err := getUlimit(s)(ctx, c)
if err != nil {
return nil, err

View File

@ -6,6 +6,7 @@ import (
"fmt"
"maps"
"net"
"slices"
"strings"
"github.com/containerd/platforms"
@ -59,8 +60,8 @@ func NewState(o Output) State {
type State struct {
out Output
prev *State
key interface{}
value func(context.Context, *Constraints) (interface{}, error)
key any
value func(context.Context, *Constraints) (any, error)
opts []ConstraintsOpt
async *asyncState
}
@ -76,13 +77,13 @@ func (s State) ensurePlatform() State {
return s
}
func (s State) WithValue(k, v interface{}) State {
return s.withValue(k, func(context.Context, *Constraints) (interface{}, error) {
func (s State) WithValue(k, v any) State {
return s.withValue(k, func(context.Context, *Constraints) (any, error) {
return v, nil
})
}
func (s State) withValue(k interface{}, v func(context.Context, *Constraints) (interface{}, error)) State {
func (s State) withValue(k any, v func(context.Context, *Constraints) (any, error)) State {
return State{
out: s.Output(),
prev: &s, // doesn't need to be original pointer
@ -91,7 +92,7 @@ func (s State) withValue(k interface{}, v func(context.Context, *Constraints) (i
}
}
func (s State) Value(ctx context.Context, k interface{}, co ...ConstraintsOpt) (interface{}, error) {
func (s State) Value(ctx context.Context, k any, co ...ConstraintsOpt) (any, error) {
c := &Constraints{}
for _, f := range co {
f.SetConstraintsOption(c)
@ -99,12 +100,12 @@ func (s State) Value(ctx context.Context, k interface{}, co ...ConstraintsOpt) (
return s.getValue(k)(ctx, c)
}
func (s State) getValue(k interface{}) func(context.Context, *Constraints) (interface{}, error) {
func (s State) getValue(k any) func(context.Context, *Constraints) (any, error) {
if s.key == k {
return s.value
}
if s.async != nil {
return func(ctx context.Context, c *Constraints) (interface{}, error) {
return func(ctx context.Context, c *Constraints) (any, error) {
target, err := s.async.Do(ctx, c)
if err != nil {
return nil, err
@ -271,7 +272,7 @@ func (s State) WithImageConfig(c []byte) (State, error) {
OSVersion: img.OSVersion,
}
if img.OSFeatures != nil {
plat.OSFeatures = append([]string{}, img.OSFeatures...)
plat.OSFeatures = slices.Clone(img.OSFeatures)
}
s = s.Platform(plat)
}
@ -321,7 +322,7 @@ func (s State) AddEnv(key, value string) State {
}
// AddEnvf is the same as [State.AddEnv] but with a format string.
func (s State) AddEnvf(key, value string, v ...interface{}) State {
func (s State) AddEnvf(key, value string, v ...any) State {
return AddEnvf(key, value, v...)(s)
}
@ -332,7 +333,7 @@ func (s State) Dir(str string) State {
}
// Dirf is the same as [State.Dir] but with a format string.
func (s State) Dirf(str string, v ...interface{}) State {
func (s State) Dirf(str string, v ...any) State {
return Dirf(str, v...)(s)
}
@ -608,7 +609,7 @@ func WithCustomName(name string) ConstraintsOpt {
})
}
func WithCustomNamef(name string, a ...interface{}) ConstraintsOpt {
func WithCustomNamef(name string, a ...any) ConstraintsOpt {
return WithCustomName(fmt.Sprintf(name, a...))
}
@ -746,6 +747,6 @@ func Require(filters ...string) ConstraintsOpt {
})
}
func nilValue(context.Context, *Constraints) (interface{}, error) {
func nilValue(context.Context, *Constraints) (any, error) {
return nil, nil
}

View File

@ -433,14 +433,14 @@ func buildAction(clicontext *cli.Context) error {
}
func writeMetadataFile(filename string, exporterResponse map[string]string) error {
out := make(map[string]interface{})
out := make(map[string]any)
for k, v := range exporterResponse {
dt, err := base64.StdEncoding.DecodeString(v)
if err != nil {
out[k] = v
continue
}
var raw map[string]interface{}
var raw map[string]any
if err = json.Unmarshal(dt, &raw); err != nil || len(raw) == 0 {
out[k] = v
continue

View File

@ -140,7 +140,7 @@ func testBuildMetadataFile(t *testing.T, sb integration.Sandbox) {
metadataBytes, err := os.ReadFile(metadataFile)
require.NoError(t, err)
var metadata map[string]interface{}
var metadata map[string]any
err = json.Unmarshal(metadataBytes, &metadata)
require.NoError(t, err)

View File

@ -43,7 +43,7 @@ func TestWriteMetadataFile(t *testing.T) {
cases := []struct {
name string
exporterResponse map[string]string
expected map[string]interface{}
expected map[string]any
}{
{
name: "common",
@ -52,10 +52,10 @@ func TestWriteMetadataFile(t *testing.T) {
"containerimage.descriptor": "eyJtZWRpYVR5cGUiOiJhcHBsaWNhdGlvbi92bmQub2NpLmltYWdlLm1hbmlmZXN0LnYxK2pzb24iLCJkaWdlc3QiOiJzaGEyNTY6MTlmZmVhYjZmOGJjOTI5M2FjMmMzZmRmOTRlYmUyODM5NjI1NGM5OTNhZWEwYjVhNTQyY2ZiMDJlMDg4M2ZhMyIsInNpemUiOjUwNiwiYW5ub3RhdGlvbnMiOnsib3JnLm9wZW5jb250YWluZXJzLmltYWdlLmNyZWF0ZWQiOiIyMDIyLTAyLTA4VDE5OjIxOjAzWiJ9fQ==", // {"mediaType":"application/vnd.oci.image.manifest.v1+json","digest":"sha256:19ffeab6f8bc9293ac2c3fdf94ebe28396254c993aea0b5a542cfb02e0883fa3","size":506,"annotations":{"org.opencontainers.image.created":"2022-02-08T19:21:03Z"}}
"containerimage.digest": "sha256:19ffeab6f8bc9293ac2c3fdf94ebe28396254c993aea0b5a542cfb02e0883fa3",
},
expected: map[string]interface{}{
expected: map[string]any{
"containerimage.config.digest": "sha256:2937f66a9722f7f4a2df583de2f8cb97fc9196059a410e7f00072fc918930e66",
"containerimage.descriptor": map[string]interface{}{
"annotations": map[string]interface{}{
"containerimage.descriptor": map[string]any{
"annotations": map[string]any{
"org.opencontainers.image.created": "2022-02-08T19:21:03Z",
},
"digest": "sha256:19ffeab6f8bc9293ac2c3fdf94ebe28396254c993aea0b5a542cfb02e0883fa3",
@ -71,7 +71,7 @@ func TestWriteMetadataFile(t *testing.T) {
"key": "MTI=", // 12
"containerimage.digest": "sha256:19ffeab6f8bc9293ac2c3fdf94ebe28396254c993aea0b5a542cfb02e0883fa3",
},
expected: map[string]interface{}{
expected: map[string]any{
"key": "MTI=",
"containerimage.digest": "sha256:19ffeab6f8bc9293ac2c3fdf94ebe28396254c993aea0b5a542cfb02e0883fa3",
},
@ -82,7 +82,7 @@ func TestWriteMetadataFile(t *testing.T) {
"key": "e30=", // {}
"containerimage.digest": "sha256:19ffeab6f8bc9293ac2c3fdf94ebe28396254c993aea0b5a542cfb02e0883fa3",
},
expected: map[string]interface{}{
expected: map[string]any{
"key": "e30=",
"containerimage.digest": "sha256:19ffeab6f8bc9293ac2c3fdf94ebe28396254c993aea0b5a542cfb02e0883fa3",
},
@ -93,7 +93,7 @@ func TestWriteMetadataFile(t *testing.T) {
"key": "W10=", // []
"containerimage.digest": "sha256:19ffeab6f8bc9293ac2c3fdf94ebe28396254c993aea0b5a542cfb02e0883fa3",
},
expected: map[string]interface{}{
expected: map[string]any{
"key": "W10=",
"containerimage.digest": "sha256:19ffeab6f8bc9293ac2c3fdf94ebe28396254c993aea0b5a542cfb02e0883fa3",
},
@ -104,8 +104,8 @@ func TestWriteMetadataFile(t *testing.T) {
"key": "eyJmb28iOm51bGwsImJhciI6ImJheiJ9", // {"foo":null,"bar":"baz"}
"containerimage.digest": "sha256:19ffeab6f8bc9293ac2c3fdf94ebe28396254c993aea0b5a542cfb02e0883fa3",
},
expected: map[string]interface{}{
"key": map[string]interface{}{
expected: map[string]any{
"key": map[string]any{
"foo": nil,
"bar": "baz",
},
@ -121,7 +121,7 @@ func TestWriteMetadataFile(t *testing.T) {
require.NoError(t, writeMetadataFile(fname, tt.exporterResponse))
current, err := os.ReadFile(fname)
require.NoError(t, err)
var raw map[string]interface{}
var raw map[string]any
require.NoError(t, json.Unmarshal(current, &raw))
require.Equal(t, tt.expected, raw)
})

View File

@ -114,7 +114,7 @@ func ParseTemplate(format string) (*template.Template, error) {
}
// funcs is from https://github.com/docker/cli/blob/v20.10.12/templates/templates.go#L12-L20 (Apache License 2.0)
funcs := template.FuncMap{
"json": func(v interface{}) string {
"json": func(v any) string {
buf := &bytes.Buffer{}
enc := json.NewEncoder(buf)
enc.SetEscapeHTML(false)

View File

@ -75,7 +75,7 @@ func diskUsage(clicontext *cli.Context) error {
return nil
}
func printKV(w io.Writer, k string, v interface{}) {
func printKV(w io.Writer, k string, v any) {
fmt.Fprintf(w, "%s:\t%v\n", k, v)
}

View File

@ -121,7 +121,7 @@ type OCIConfig struct {
// StargzSnapshotterConfig is configuration for stargz snapshotter.
// We use a generic map[string]interface{} in order to remove the dependency
// on stargz snapshotter's config pkg from our config.
StargzSnapshotterConfig map[string]interface{} `toml:"stargzSnapshotter"`
StargzSnapshotterConfig map[string]any `toml:"stargzSnapshotter"`
// ApparmorProfile is the name of the apparmor profile that should be used to constrain build containers.
// The profile should already be loaded (by a higher level system) before creating a worker.
@ -160,9 +160,9 @@ type ContainerdConfig struct {
}
type ContainerdRuntime struct {
Name string `toml:"name"`
Path string `toml:"path"`
Options map[string]interface{} `toml:"options"`
Name string `toml:"name"`
Path string `toml:"path"`
Options map[string]any `toml:"options"`
}
type GCPolicy struct {

View File

@ -9,7 +9,7 @@ import (
)
// getRuntimeOptionsType gets empty runtime options by the runtime type name.
func getRuntimeOptionsType(t string) interface{} {
func getRuntimeOptionsType(t string) any {
if t == plugins.RuntimeRuncV2 {
return &runcoptions.Options{}
}

View File

@ -7,7 +7,7 @@ import (
)
// getRuntimeOptionsType gets empty runtime options by the runtime type name.
func getRuntimeOptionsType(t string) interface{} {
func getRuntimeOptionsType(t string) any {
if t == plugins.RuntimeRunhcsV1 {
return &runhcsoptions.Options{}
}

View File

@ -74,7 +74,7 @@ func xmain() error {
return nil
}
func writeJSON(f string, x interface{}) error {
func writeJSON(f string, x any) error {
b, err := json.Marshal(x)
if err != nil {
return err

View File

@ -49,7 +49,7 @@ func makeHostsFile(stateDir string, extraHosts []executor.HostIP, idmap *idtools
}
for _, h := range extraHosts {
if _, err := b.Write([]byte(fmt.Sprintf("%s\t%s\n", h.IP.String(), h.Host))); err != nil {
if _, err := b.Write(fmt.Appendf(nil, "%s\t%s\n", h.IP.String(), h.Host)); err != nil {
return "", nil, errors.WithStack(err)
}
}

View File

@ -111,7 +111,7 @@ func TestResolvConf(t *testing.T) {
t.Cleanup(func() {
resolvconfPath = oldResolvconfPath
})
for i := 0; i < tt.execution; i++ {
for i := range tt.execution {
resolvconfPath = func(netMode pb.NetMode) string {
if tt.dt == nil {
return "no-such-file"

View File

@ -3,6 +3,7 @@ package oci
import (
"context"
"os"
"slices"
"strconv"
"strings"
@ -112,10 +113,8 @@ func setProcess(s *containerdoci.Spec) {
// From https://github.com/containerd/containerd/blob/v1.7.0-beta.4/oci/spec_opts.go#L124-L133
func ensureAdditionalGids(s *containerdoci.Spec) {
setProcess(s)
for _, f := range s.Process.User.AdditionalGids {
if f == s.Process.User.GID {
return
}
if slices.Contains(s.Process.User.AdditionalGids, s.Process.User.GID) {
return
}
s.Process.User.AdditionalGids = append([]uint32{s.Process.User.GID}, s.Process.User.AdditionalGids...)
}

View File

@ -43,7 +43,7 @@ func CheckInvalidPlatforms[T comparable](ctx context.Context, res *result.Result
p, err := platforms.Parse(v)
if err != nil {
warnings = append(warnings, client.VertexWarning{
Short: []byte(fmt.Sprintf("Invalid platform result requested %q: %s", v, err.Error())),
Short: fmt.Appendf(nil, "Invalid platform result requested %q: %s", v, err.Error()),
})
}
p = platforms.Normalize(p)
@ -51,7 +51,7 @@ func CheckInvalidPlatforms[T comparable](ctx context.Context, res *result.Result
_, ok := reqMap[formatted]
if ok {
warnings = append(warnings, client.VertexWarning{
Short: []byte(fmt.Sprintf("Duplicate platform result requested %q", v)),
Short: fmt.Appendf(nil, "Duplicate platform result requested %q", v),
})
}
reqMap[formatted] = struct{}{}
@ -80,7 +80,7 @@ func CheckInvalidPlatforms[T comparable](ctx context.Context, res *result.Result
if _, ok := reqMap[platforms.FormatAll(pp)]; !ok {
return []client.VertexWarning{{
Short: []byte(fmt.Sprintf("Requested platform %q does not match result platform %q", req.Platforms[0], platforms.FormatAll(pp))),
Short: fmt.Appendf(nil, "Requested platform %q does not match result platform %q", req.Platforms[0], platforms.FormatAll(pp)),
}}, nil
}
}
@ -107,7 +107,7 @@ func CheckInvalidPlatforms[T comparable](ctx context.Context, res *result.Result
if mismatch {
return []client.VertexWarning{{
Short: []byte(fmt.Sprintf("Requested platforms %s do not match result platforms %s", platformsString(reqList), platformsString(ps.Platforms))),
Short: fmt.Appendf(nil, "Requested platforms %s do not match result platforms %s", platformsString(reqList), platformsString(ps.Platforms)),
}}, nil
}

View File

@ -503,7 +503,7 @@ func toDispatchState(ctx context.Context, dt []byte, opt ConvertOpt) (*dispatchS
OSVersion: img.OSVersion,
}
if img.OSFeatures != nil {
d.platform.OSFeatures = append([]string{}, img.OSFeatures...)
d.platform.OSFeatures = slices.Clone(img.OSFeatures)
}
}
}
@ -812,7 +812,7 @@ func toDispatchState(ctx context.Context, dt []byte, opt ConvertOpt) (*dispatchS
target.image.OSVersion = platformOpt.targetPlatform.OSVersion
}
if platformOpt.targetPlatform.OSFeatures != nil {
target.image.OSFeatures = append([]string{}, platformOpt.targetPlatform.OSFeatures...)
target.image.OSFeatures = slices.Clone(platformOpt.targetPlatform.OSFeatures)
}
}
target.image.Platform = platforms.Normalize(target.image.Platform)
@ -1644,7 +1644,7 @@ func dispatchCopy(d *dispatchState, cfg copyConfig) error {
copyOpts = append(copyOpts, fileOpt...)
copyOpts = append(copyOpts, llb.ProgressGroup(pgID, pgName, true))
mergeOpts := append([]llb.ConstraintsOpt{}, fileOpt...)
mergeOpts := slices.Clone(fileOpt)
d.cmdIndex--
mergeOpts = append(mergeOpts, llb.ProgressGroup(pgID, pgName, false), llb.WithCustomName(prefixCommand(d, "LINK "+name, d.prefixPlatform, &platform, env)))
@ -1911,7 +1911,7 @@ func parseKeyValue(env string) (string, string) {
return parts[0], v
}
func dfCmd(cmd interface{}) llb.ConstraintsOpt {
func dfCmd(cmd any) llb.ConstraintsOpt {
// TODO: add fmt.Stringer to instructions.Command to remove interface{}
var cmdStr string
if cmd, ok := cmd.(fmt.Stringer); ok {
@ -2054,9 +2054,7 @@ func normalizeContextPaths(paths map[string]struct{}) []string {
pathSlice = append(pathSlice, path.Join(".", p))
}
sort.Slice(pathSlice, func(i, j int) bool {
return pathSlice[i] < pathSlice[j]
})
slices.Sort(pathSlice)
return pathSlice
}
@ -2107,7 +2105,7 @@ type mutableOutput struct {
func withShell(img dockerspec.DockerOCIImage, args []string) []string {
var shell []string
if len(img.Config.Shell) > 0 {
shell = append([]string{}, img.Config.Shell...)
shell = slices.Clone(img.Config.Shell)
} else {
shell = defaultShell(img.OS)
}
@ -2272,12 +2270,7 @@ func isEnabledForStage(stage string, value string) bool {
}
vv := strings.Split(value, ",")
for _, v := range vv {
if v == stage {
return true
}
}
return false
return slices.Contains(vv, stage)
}
func isSelfConsistentCasing(s string) bool {

View File

@ -1,6 +1,8 @@
package dockerfile2llb
import (
"slices"
"github.com/moby/buildkit/util/system"
dockerspec "github.com/moby/docker-image-spec/specs-go/v1"
ocispecs "github.com/opencontainers/image-spec/specs-go/v1"
@ -9,10 +11,10 @@ import (
func clone(src dockerspec.DockerOCIImage) dockerspec.DockerOCIImage {
img := src
img.Config = src.Config
img.Config.Env = append([]string{}, src.Config.Env...)
img.Config.Cmd = append([]string{}, src.Config.Cmd...)
img.Config.Entrypoint = append([]string{}, src.Config.Entrypoint...)
img.Config.OnBuild = append([]string{}, src.Config.OnBuild...)
img.Config.Env = slices.Clone(src.Config.Env)
img.Config.Cmd = slices.Clone(src.Config.Cmd)
img.Config.Entrypoint = slices.Clone(src.Config.Entrypoint)
img.Config.OnBuild = slices.Clone(src.Config.OnBuild)
return img
}
@ -30,7 +32,7 @@ func emptyImage(platform ocispecs.Platform) dockerspec.DockerOCIImage {
img.OS = platform.OS
img.OSVersion = platform.OSVersion
if platform.OSFeatures != nil {
img.OSFeatures = append([]string{}, platform.OSFeatures...)
img.OSFeatures = slices.Clone(platform.OSFeatures)
}
img.Variant = platform.Variant
img.RootFS.Type = "layers"

View File

@ -42,10 +42,10 @@ func testAddChecksum(t *testing.T, sb integration.Sandbox) {
defer c.Close()
t.Run("Valid", func(t *testing.T) {
dockerfile := []byte(fmt.Sprintf(`
dockerfile := fmt.Appendf(nil, `
FROM scratch
ADD --checksum=%s %s /tmp/foo
`, digest.FromBytes(resp.Content).String(), server.URL+"/foo"))
`, digest.FromBytes(resp.Content).String(), server.URL+"/foo")
dir := integration.Tmpdir(
t,
fstest.CreateFile("Dockerfile", dockerfile, 0600),
@ -59,12 +59,12 @@ ADD --checksum=%s %s /tmp/foo
require.NoError(t, err)
})
t.Run("DigestFromEnv", func(t *testing.T) {
dockerfile := []byte(fmt.Sprintf(`
dockerfile := fmt.Appendf(nil, `
FROM scratch
ENV DIGEST=%s
ENV LINK=%s
ADD --checksum=${DIGEST} ${LINK} /tmp/foo
`, digest.FromBytes(resp.Content).String(), server.URL+"/foo"))
`, digest.FromBytes(resp.Content).String(), server.URL+"/foo")
dir := integration.Tmpdir(
t,
fstest.CreateFile("Dockerfile", dockerfile, 0600),
@ -78,10 +78,10 @@ ADD --checksum=${DIGEST} ${LINK} /tmp/foo
require.NoError(t, err)
})
t.Run("DigestMismatch", func(t *testing.T) {
dockerfile := []byte(fmt.Sprintf(`
dockerfile := fmt.Appendf(nil, `
FROM scratch
ADD --checksum=%s %s /tmp/foo
`, digest.FromBytes(nil).String(), server.URL+"/foo"))
`, digest.FromBytes(nil).String(), server.URL+"/foo")
dir := integration.Tmpdir(
t,
fstest.CreateFile("Dockerfile", dockerfile, 0600),
@ -95,10 +95,10 @@ ADD --checksum=%s %s /tmp/foo
require.Error(t, err, "digest mismatch")
})
t.Run("DigestWithKnownButUnsupportedAlgoName", func(t *testing.T) {
dockerfile := []byte(fmt.Sprintf(`
dockerfile := fmt.Appendf(nil, `
FROM scratch
ADD --checksum=md5:7e55db001d319a94b0b713529a756623 %s /tmp/foo
`, server.URL+"/foo"))
`, server.URL+"/foo")
dir := integration.Tmpdir(
t,
fstest.CreateFile("Dockerfile", dockerfile, 0600),
@ -112,10 +112,10 @@ ADD --checksum=md5:7e55db001d319a94b0b713529a756623 %s /tmp/foo
require.Error(t, err, "unsupported digest algorithm")
})
t.Run("DigestWithUnknownAlgoName", func(t *testing.T) {
dockerfile := []byte(fmt.Sprintf(`
dockerfile := fmt.Appendf(nil, `
FROM scratch
ADD --checksum=unknown:%s %s /tmp/foo
`, digest.FromBytes(resp.Content).Encoded(), server.URL+"/foo"))
`, digest.FromBytes(resp.Content).Encoded(), server.URL+"/foo")
dir := integration.Tmpdir(
t,
fstest.CreateFile("Dockerfile", dockerfile, 0600),
@ -129,10 +129,10 @@ ADD --checksum=unknown:%s %s /tmp/foo
require.Error(t, err, "unsupported digest algorithm")
})
t.Run("DigestWithoutAlgoName", func(t *testing.T) {
dockerfile := []byte(fmt.Sprintf(`
dockerfile := fmt.Appendf(nil, `
FROM scratch
ADD --checksum=%s %s /tmp/foo
`, digest.FromBytes(resp.Content).Encoded(), server.URL+"/foo"))
`, digest.FromBytes(resp.Content).Encoded(), server.URL+"/foo")
dir := integration.Tmpdir(
t,
fstest.CreateFile("Dockerfile", dockerfile, 0600),
@ -147,10 +147,10 @@ ADD --checksum=%s %s /tmp/foo
})
t.Run("NonHTTPSource", func(t *testing.T) {
foo := []byte("local file")
dockerfile := []byte(fmt.Sprintf(`
dockerfile := fmt.Appendf(nil, `
FROM scratch
ADD --checksum=%s foo /tmp/foo
`, digest.FromBytes(foo).String()))
`, digest.FromBytes(foo).String())
dir := integration.Tmpdir(
t,
fstest.CreateFile("foo", foo, 0600),

View File

@ -99,7 +99,7 @@ RUN cd /buildkit-chowned && \
require.NoError(t, err)
}
func applyTemplate(tmpl string, x interface{}) (string, error) {
func applyTemplate(tmpl string, x any) (string, error) {
var buf bytes.Buffer
parsed, err := template.New("").Parse(tmpl)
if err != nil {

View File

@ -77,7 +77,7 @@ func testChmodNonOctal(t *testing.T, sb integration.Sandbox) {
verifyCommands += "RUN [ \"$(stat -c %A /actual/" + tc.dst + ")\" = \"$(stat -c %A /expected/" + tc.dst + ")\" ]\n"
}
dockerfile := []byte(fmt.Sprintf(`
dockerfile := fmt.Appendf(nil, `
FROM alpine as base
RUN <<eot
set -ex
@ -97,7 +97,7 @@ FROM base
COPY --from=result / /actual/
%s
`, expectedCommands, copyCommands, verifyCommands))
`, expectedCommands, copyCommands, verifyCommands)
dir := integration.Tmpdir(
t,

View File

@ -646,11 +646,11 @@ EOF
}, nil)
require.NoError(t, err)
dockerfile = []byte(fmt.Sprintf(`
dockerfile = fmt.Appendf(nil, `
FROM %s AS base
FROM scratch
COPY --from=base /dest /dest
`, target))
`, target)
dir = integration.Tmpdir(
t,

View File

@ -659,7 +659,7 @@ COPY Dockerfile /bar
`)
checkLinterWarnings(t, sb, &lintTestParams{Dockerfile: dockerfile})
dockerfile = []byte(fmt.Sprintf(
dockerfile = fmt.Appendf(nil,
`
FROM %s
RUN <<'EOT'
@ -668,7 +668,7 @@ EOT
`,
integration.UnixOrWindows("alpine", "nanoserver"),
integration.UnixOrWindows("env", "set"),
))
)
checkLinterWarnings(t, sb, &lintTestParams{Dockerfile: dockerfile})
}
@ -925,14 +925,14 @@ COPY Dockerfile .
osName := integration.UnixOrWindows("linux", "windows")
baseImg := integration.UnixOrWindows("busybox", "nanoserver")
dockerfile = []byte(fmt.Sprintf(
dockerfile = fmt.Appendf(nil,
`
ARG MY_OS=%s
ARG MY_ARCH=amd64
FROM --platform=%s/${MYARCH} %s
COPY Dockerfile .
`,
osName, osName, baseImg))
osName, osName, baseImg)
osStr := integration.UnixOrWindows("linux", "windows")
buildErr := fmt.Sprintf(
@ -954,13 +954,13 @@ COPY Dockerfile .
BuildErrLocation: 4,
})
dockerfile = []byte(fmt.Sprintf(
dockerfile = fmt.Appendf(nil,
`
ARG tag=latest
FROM %s:${tag}${version} AS b
COPY Dockerfile .
`,
baseImg))
baseImg)
checkLinterWarnings(t, sb, &lintTestParams{
Dockerfile: dockerfile,
Warnings: []expectedLintWarning{
@ -1014,7 +1014,7 @@ COPY Dockerfile${foo} .
checkLinterWarnings(t, sb, &lintTestParams{Dockerfile: dockerfile})
baseImg := integration.UnixOrWindows("alpine", "nanoserver")
dockerfile = []byte(fmt.Sprintf(
dockerfile = fmt.Appendf(nil,
`
FROM %s AS base
ARG foo=Dockerfile
@ -1022,25 +1022,25 @@ ARG foo=Dockerfile
FROM base
COPY $foo .
`,
baseImg))
baseImg)
checkLinterWarnings(t, sb, &lintTestParams{Dockerfile: dockerfile})
dockerfile = []byte(fmt.Sprintf(
dockerfile = fmt.Appendf(nil,
`
FROM %s
RUN echo $PATH
`,
baseImg))
baseImg)
checkLinterWarnings(t, sb, &lintTestParams{Dockerfile: dockerfile})
dockerfile = []byte(fmt.Sprintf(
dockerfile = fmt.Appendf(nil,
`
FROM %s
COPY $foo .
ARG foo=bar
RUN echo $foo
`,
baseImg))
baseImg)
checkLinterWarnings(t, sb, &lintTestParams{
Dockerfile: dockerfile,
Warnings: []expectedLintWarning{
@ -1055,7 +1055,7 @@ RUN echo $foo
},
})
dockerfile = []byte(fmt.Sprintf(
dockerfile = fmt.Appendf(nil,
`
FROM %s
ARG DIR_BINARIES=binaries/
@ -1063,7 +1063,7 @@ ARG DIR_ASSETS=assets/
ARG DIR_CONFIG=config/
COPY $DIR_ASSET .
`,
baseImg))
baseImg)
checkLinterWarnings(t, sb, &lintTestParams{
Dockerfile: dockerfile,
Warnings: []expectedLintWarning{
@ -1078,12 +1078,12 @@ COPY $DIR_ASSET .
},
})
dockerfile = []byte(fmt.Sprintf(
dockerfile = fmt.Appendf(nil,
`
FROM %s
ENV PATH=$PAHT:/tmp/bin
`,
baseImg))
baseImg)
checkLinterWarnings(t, sb, &lintTestParams{
Dockerfile: dockerfile,
Warnings: []expectedLintWarning{
@ -1273,12 +1273,12 @@ FROM --platform=${TARGETPLATFORM} scratch
func testInvalidDefaultArgInFrom(t *testing.T, sb integration.Sandbox) {
baseImg := integration.UnixOrWindows("busybox", "nanoserver")
dockerfile := []byte(fmt.Sprintf(
dockerfile := fmt.Appendf(nil,
`
ARG VERSION
FROM %s:$VERSION
`,
baseImg))
baseImg)
checkLinterWarnings(t, sb, &lintTestParams{
Dockerfile: dockerfile,
FrontendAttrs: map[string]string{
@ -1350,12 +1350,12 @@ FROM nano${SFX}
},
})
dockerfile = []byte(fmt.Sprintf(
dockerfile = fmt.Appendf(nil,
`
ARG VERSION="latest"
FROM %s:${VERSION}
`,
baseImg))
baseImg)
checkLinterWarnings(t, sb, &lintTestParams{
Dockerfile: dockerfile,
FrontendAttrs: map[string]string{

View File

@ -684,7 +684,7 @@ COPY --from=b2 /License.txt p2
require.NoError(t, err)
defer c.Close()
for i := 0; i < 20; i++ {
for range 20 {
_, err = f.Solve(sb.Context(), c, client.SolveOpt{
FrontendAttrs: map[string]string{
"no-cache": "",

View File

@ -1283,7 +1283,7 @@ func testFrontendDeduplicateSources(t *testing.T, sb integration.Sandbox) {
require.NoError(t, err)
defer c.Close()
dockerfile := []byte(fmt.Sprintf(
dockerfile := fmt.Appendf(nil,
`
FROM %s as base
COPY foo foo2
@ -1291,7 +1291,7 @@ COPY foo foo2
FROM linked
COPY bar bar2
`,
integration.UnixOrWindows("scratch", "nanoserver")),
integration.UnixOrWindows("scratch", "nanoserver"),
)
dir := integration.Tmpdir(

View File

@ -117,7 +117,7 @@ RUN --mount=type=ssh apk update \
sshAgentCmd.Stderr = sshAgentOutputBuf
require.NoError(t, sshAgentCmd.Start())
var found bool
for i := 0; i < 100; i++ {
for range 100 {
_, err := os.Stat(sockPath)
if err == nil {
found = true

View File

@ -248,7 +248,7 @@ type frontend interface {
}
func init() {
frontends := map[string]interface{}{}
frontends := map[string]any{}
images := integration.UnixOrWindows(
[]string{"busybox:latest", "alpine:latest"},
@ -293,13 +293,13 @@ func TestIntegration(t *testing.T) {
)...)
integration.Run(t, securityTests, append(append(opts, securityOpts...),
integration.WithMatrix("security.insecure", map[string]interface{}{
integration.WithMatrix("security.insecure", map[string]any{
"granted": securityInsecureGranted,
"denied": securityInsecureDenied,
}))...)
integration.Run(t, networkTests, append(opts,
integration.WithMatrix("network.host", map[string]interface{}{
integration.WithMatrix("network.host", map[string]any{
"granted": networkHostGranted,
"denied": networkHostDenied,
}))...)
@ -1284,7 +1284,7 @@ FROM target
`,
)
for _, src := range []string{"/", "/d2"} {
dockerfile := []byte(fmt.Sprintf(dockerfile, src))
dockerfile := fmt.Appendf(nil, dockerfile, src)
dir := integration.Tmpdir(
t,
@ -1652,10 +1652,10 @@ func testGlobalArgErrors(t *testing.T, sb integration.Sandbox) {
f := getFrontend(t, sb)
imgName := integration.UnixOrWindows("busybox", "nanoserver")
dockerfile := []byte(fmt.Sprintf(`
dockerfile := fmt.Appendf(nil, `
ARG FOO=${FOO:?"custom error"}
FROM %s
`, imgName))
`, imgName)
dir := integration.Tmpdir(
t,
@ -1691,11 +1691,11 @@ FROM %s
func testArgDefaultExpansion(t *testing.T, sb integration.Sandbox) {
f := getFrontend(t, sb)
dockerfile := []byte(fmt.Sprintf(`
dockerfile := fmt.Appendf(nil, `
FROM %s
ARG FOO
ARG BAR=${FOO:?"foo missing"}
`, integration.UnixOrWindows("scratch", "nanoserver")))
`, integration.UnixOrWindows("scratch", "nanoserver"))
dir := integration.Tmpdir(
t,
@ -1981,11 +1981,11 @@ COPY --from=base /out/ /
// stage name defined in Dockerfile but not passed in request
imgName := integration.UnixOrWindows("scratch", "nanoserver")
dockerfile = append(dockerfile, []byte(fmt.Sprintf(`
dockerfile = append(dockerfile, fmt.Appendf(nil, `
FROM %s AS final
COPY --from=base /out/ /
`, imgName))...)
`, imgName)...)
dir = integration.Tmpdir(
t,
@ -2517,11 +2517,11 @@ CMD ["test"]
}, nil)
require.NoError(t, err)
dockerfile = []byte(fmt.Sprintf(`
dockerfile = fmt.Appendf(nil, `
FROM %s
SHELL ["ls"]
ENTRYPOINT my entrypoint
`, target))
`, target)
dir = integration.Tmpdir(
t,
@ -3002,10 +3002,10 @@ func testDockerfileADDFromURL(t *testing.T, sb integration.Sandbox) {
})
defer server.Close()
dockerfile := []byte(fmt.Sprintf(`
dockerfile := fmt.Appendf(nil, `
FROM scratch
ADD %s /dest/
`, server.URL+"/foo"))
`, server.URL+"/foo")
dir := integration.Tmpdir(
t,
@ -3031,10 +3031,10 @@ ADD %s /dest/
require.NoError(t, err)
// test the default properties
dockerfile = []byte(fmt.Sprintf(`
dockerfile = fmt.Appendf(nil, `
FROM scratch
ADD %s /dest/
`, server.URL+"/"))
`, server.URL+"/")
dir = integration.Tmpdir(
t,
@ -3088,10 +3088,10 @@ func testDockerfileAddArchive(t *testing.T, sb integration.Sandbox) {
baseImage := integration.UnixOrWindows("scratch", "nanoserver")
dockerfile := []byte(fmt.Sprintf(`
dockerfile := fmt.Appendf(nil, `
FROM %s
ADD t.tar /
`, baseImage))
`, baseImage)
dir := integration.Tmpdir(
t,
@ -3119,10 +3119,10 @@ ADD t.tar /
err = gz.Close()
require.NoError(t, err)
dockerfile = []byte(fmt.Sprintf(`
dockerfile = fmt.Appendf(nil, `
FROM %s
ADD t.tar.gz /
`, baseImage))
`, baseImage)
dir = integration.Tmpdir(
t,
@ -3143,10 +3143,10 @@ ADD t.tar.gz /
require.Equal(t, expectedContent, dt)
// COPY doesn't extract
dockerfile = []byte(fmt.Sprintf(`
dockerfile = fmt.Appendf(nil, `
FROM %s
COPY t.tar.gz /
`, baseImage))
`, baseImage)
dir = integration.Tmpdir(
t,
@ -3177,10 +3177,10 @@ COPY t.tar.gz /
})
defer server.Close()
dockerfile = []byte(fmt.Sprintf(`
dockerfile = fmt.Appendf(nil, `
FROM %s
ADD %s /
`, baseImage, server.URL+"/t.tar.gz"))
`, baseImage, server.URL+"/t.tar.gz")
dir = integration.Tmpdir(
t,
@ -3200,10 +3200,10 @@ ADD %s /
require.Equal(t, buf2.Bytes(), dt)
// https://github.com/moby/buildkit/issues/386
dockerfile = []byte(fmt.Sprintf(`
dockerfile = fmt.Appendf(nil, `
FROM %s
ADD %s /newname.tar.gz
`, baseImage, server.URL+"/t.tar.gz"))
`, baseImage, server.URL+"/t.tar.gz")
dir = integration.Tmpdir(
t,
@ -4494,7 +4494,7 @@ func testAddURLChmod(t *testing.T, sb integration.Sandbox) {
})
defer server.Close()
dockerfile := []byte(fmt.Sprintf(`
dockerfile := fmt.Appendf(nil, `
FROM busybox AS build
ADD --chmod=644 %[1]s /tmp/foo1
ADD --chmod=755 %[1]s /tmp/foo2
@ -4510,7 +4510,7 @@ RUN stat -c "%%04a" /tmp/foo1 >> /dest && \
FROM scratch
COPY --from=build /dest /dest
`, server.URL+"/foo"))
`, server.URL+"/foo")
dir := integration.Tmpdir(
t,
@ -4855,7 +4855,7 @@ FROM %s
COPY --from=stage1 baz bax
`
baseImage := integration.UnixOrWindows("scratch", "nanoserver")
dockerfile := []byte(fmt.Sprintf(dockerfileStr, baseImage, baseImage, baseImage))
dockerfile := fmt.Appendf(nil, dockerfileStr, baseImage, baseImage, baseImage)
dir := integration.Tmpdir(
t,
fstest.CreateFile("Dockerfile", dockerfile, 0600),
@ -5063,9 +5063,9 @@ ONBUILD RUN mkdir \out && echo 11>> \out\foo
}, nil)
require.NoError(t, err)
dockerfile = []byte(fmt.Sprintf(`
dockerfile = fmt.Appendf(nil, `
FROM %s
`, target))
`, target)
dir = integration.Tmpdir(
t,
@ -5091,11 +5091,11 @@ ONBUILD RUN mkdir \out && echo 11>> \out\foo
}, nil)
require.NoError(t, err)
dockerfile = []byte(fmt.Sprintf(`
dockerfile = fmt.Appendf(nil, `
FROM %s AS base
FROM %s
COPY --from=base /out /
`, target2, integration.UnixOrWindows("scratch", "nanoserver")))
`, target2, integration.UnixOrWindows("scratch", "nanoserver"))
dir = integration.Tmpdir(
t,
@ -5169,7 +5169,7 @@ ONBUILD RUN mkdir -p /out && echo -n yes >> /out/didrun
}, nil)
require.NoError(t, err)
dockerfile = []byte(fmt.Sprintf(`
dockerfile = fmt.Appendf(nil, `
FROM %s AS base
RUN [ -f /out/didrun ] && touch /step1
RUN rm /out/didrun
@ -5180,7 +5180,7 @@ RUN [ ! -f /out/didrun ] && touch /step3
FROM scratch
COPY --from=child /step* /
`, target))
`, target)
dir = integration.Tmpdir(
t,
@ -5460,12 +5460,12 @@ ONBUILD COPY --from=alpine /etc/alpine-release /out/alpine-release2
}, nil)
require.NoError(t, err)
dockerfile = []byte(fmt.Sprintf(`
dockerfile = fmt.Appendf(nil, `
FROM %s AS base
RUN cat /out/alpine-release2 > /out/alpine-release3
FROM scratch
COPY --from=base /out /
`, target))
`, target)
dir = integration.Tmpdir(
t,
@ -5521,7 +5521,7 @@ ONBUILD RUN --mount=type=bind,target=/in,from=inputstage mkdir /out && cat /in/f
}, nil)
require.NoError(t, err)
dockerfile = []byte(fmt.Sprintf(`
dockerfile = fmt.Appendf(nil, `
FROM %s AS inputstage
RUN cat /out/alpine-release2 > /out/alpine-release4
RUN echo -n foo > /foo
@ -5529,7 +5529,7 @@ ONBUILD RUN --mount=type=bind,target=/in,from=inputstage mkdir /out && cat /in/f
RUN echo -n bar3 > /out/bar3
FROM scratch
COPY --from=base /out /
`, target, target2))
`, target, target2)
dir = integration.Tmpdir(
t,
@ -5608,9 +5608,9 @@ ONBUILD RUN --mount=type=cache,target=/cache echo -n 42 >> /cache/foo && echo -n
}, nil)
require.NoError(t, err)
dockerfile = []byte(fmt.Sprintf(`FROM %s
dockerfile = fmt.Appendf(nil, `FROM %s
RUN --mount=type=cache,target=/cache [ "$(cat /cache/foo)" = "42" ] && [ "$(cat /bar)" = "11" ]
`, target))
`, target)
dir = integration.Tmpdir(
t,
@ -5711,7 +5711,7 @@ COPY --from=base arch /
dtarm := imgs.Find("linux/arm/v7").Layers[0]["unique"].Data
require.NotEqual(t, dtamd, dtarm)
for i := 0; i < 2; i++ {
for range 2 {
ensurePruneAll(t, c, sb)
_, err = f.Solve(sb.Context(), c, client.SolveOpt{
@ -6279,7 +6279,7 @@ COPY foo2 bar2
`,
)
dockerfile := []byte(fmt.Sprintf(dockerfileStr, runtime.GOOS))
dockerfile := fmt.Appendf(nil, dockerfileStr, runtime.GOOS)
dir := integration.Tmpdir(
t,
@ -6496,9 +6496,9 @@ func testTarContext(t *testing.T, sb integration.Sandbox) {
f := getFrontend(t, sb)
imgName := integration.UnixOrWindows("scratch", "nanoserver")
dockerfile := []byte(fmt.Sprintf(`
dockerfile := fmt.Appendf(nil, `
FROM %s
COPY foo /`, imgName))
COPY foo /`, imgName)
foo := []byte("contents")
@ -6561,10 +6561,10 @@ func testTarContextExternalDockerfile(t *testing.T, sb integration.Sandbox) {
require.NoError(t, err)
imgName := integration.UnixOrWindows("scratch", "nanoserver")
dockerfile := []byte(fmt.Sprintf(`
dockerfile := fmt.Appendf(nil, `
FROM %s
COPY foo bar
`, imgName))
`, imgName)
dir := integration.Tmpdir(
t,
fstest.CreateFile("Dockerfile", dockerfile, 0600),
@ -6613,7 +6613,7 @@ func testFrontendUseForwardedSolveResults(t *testing.T, sb integration.Sandbox)
FROM %s
COPY foo foo2
`
dockerfile := []byte(fmt.Sprintf(dockerfileStr, integration.UnixOrWindows("scratch", "nanoserver")))
dockerfile := fmt.Appendf(nil, dockerfileStr, integration.UnixOrWindows("scratch", "nanoserver"))
dir := integration.Tmpdir(
t,
fstest.CreateFile("Dockerfile", dockerfile, 0600),
@ -7375,7 +7375,7 @@ func testNamedImageContextPlatform(t *testing.T, sb integration.Sandbox) {
"nanoserver",
)
dockerfile := []byte(fmt.Sprintf(`FROM --platform=$BUILDPLATFORM %s:latest`, baseImage))
dockerfile := fmt.Appendf(nil, `FROM --platform=$BUILDPLATFORM %s:latest`, baseImage)
target := registry + "/buildkit/testnamedimagecontextplatform:latest"
@ -7406,10 +7406,10 @@ func testNamedImageContextPlatform(t *testing.T, sb integration.Sandbox) {
}, nil)
require.NoError(t, err)
dockerfile = []byte(fmt.Sprintf(`
dockerfile = fmt.Appendf(nil, `
FROM --platform=$BUILDPLATFORM %s AS target
RUN echo hello
`, baseImage))
`, baseImage)
dir = integration.Tmpdir(
t,
@ -7526,14 +7526,14 @@ func testNamedImageContextScratch(t *testing.T, sb integration.Sandbox) {
require.NoError(t, err)
defer c.Close()
dockerfile := []byte(fmt.Sprintf(
dockerfile := fmt.Appendf(nil,
`
FROM %s AS build
COPY <<EOF /out
hello world!
EOF
`,
integration.UnixOrWindows("busybox", "nanoserver")))
integration.UnixOrWindows("busybox", "nanoserver"))
dir := integration.Tmpdir(
t,
@ -8548,7 +8548,7 @@ EOF
require.NoError(t, json.Unmarshal(att.LayersRaw[0], &attest))
require.Equal(t, "https://in-toto.io/Statement/v0.1", attest.Type)
require.Equal(t, intoto.PredicateSPDX, attest.PredicateType)
require.Subset(t, attest.Predicate, map[string]interface{}{"name": "sbom-scan"})
require.Subset(t, attest.Predicate, map[string]any{"name": "sbom-scan"})
}
func testSBOMScannerArgs(t *testing.T, sb integration.Sandbox) {
@ -8666,7 +8666,7 @@ FROM base
require.Equal(t, 1, len(att.LayersRaw))
var attest intoto.Statement
require.NoError(t, json.Unmarshal(att.LayersRaw[0], &attest))
require.Subset(t, attest.Predicate, map[string]interface{}{"name": "core"})
require.Subset(t, attest.Predicate, map[string]any{"name": "core"})
dockerfile = []byte(`
ARG BUILDKIT_SBOM_SCAN_CONTEXT=true
@ -8731,7 +8731,7 @@ ARG BUILDKIT_SBOM_SCAN_STAGE=true
for _, l := range att.LayersRaw {
var attest intoto.Statement
require.NoError(t, json.Unmarshal(l, &attest))
att := attest.Predicate.(map[string]interface{})
att := attest.Predicate.(map[string]any)
switch att["name"] {
case "core":
case "extra":
@ -9353,10 +9353,10 @@ COPY foo /foo
}, nil)
require.NoError(t, err)
dockerfile = []byte(fmt.Sprintf(`
dockerfile = fmt.Appendf(nil, `
FROM %s
ENV foo=bar
`, target))
`, target)
checkLinterWarnings(t, sb, &lintTestParams{
Dockerfile: dockerfile,
@ -9789,12 +9789,12 @@ EOF
require.Len(t, info.Images, 1)
require.Equal(t, info.Images[0].Img.Platform.OSVersion, p1.OSVersion)
dockerfile = []byte(fmt.Sprintf(`
dockerfile = fmt.Appendf(nil, `
FROM %s
COPY <<EOF /other
hello
EOF
`, target))
`, target)
dir = integration.Tmpdir(
t,
@ -9855,9 +9855,9 @@ func runShell(dir string, cmds ...string) error {
// cleanup cache because some records still haven't been released.
// This function tries to ensure prune by retrying it.
func ensurePruneAll(t *testing.T, c *client.Client, sb integration.Sandbox) {
for i := 0; i < 2; i++ {
for i := range 2 {
require.NoError(t, c.Prune(sb.Context(), nil, client.PruneAll))
for j := 0; j < 20; j++ {
for range 20 {
du, err := c.DiskUsage(sb.Context())
require.NoError(t, err)
if len(du) == 0 {

View File

@ -551,16 +551,16 @@ func HasStage(s []Stage, name string) (int, bool) {
}
type withExternalData struct {
m map[interface{}]interface{}
m map[any]any
}
func (c *withExternalData) getExternalValue(k interface{}) interface{} {
func (c *withExternalData) getExternalValue(k any) any {
return c.m[k]
}
func (c *withExternalData) setExternalValue(k, v interface{}) {
func (c *withExternalData) setExternalValue(k, v any) {
if c.m == nil {
c.m = map[interface{}]interface{}{}
c.m = map[any]any{}
}
c.m[k] = v
}

View File

@ -7,6 +7,7 @@ package instructions
import (
"fmt"
"regexp"
"slices"
"sort"
"strconv"
"strings"
@ -66,12 +67,12 @@ func newParseRequestFromNode(node *parser.Node) parseRequest {
}
}
func ParseInstruction(node *parser.Node) (v interface{}, err error) {
func ParseInstruction(node *parser.Node) (v any, err error) {
return ParseInstructionWithLinter(node, nil)
}
// ParseInstruction converts an AST to a typed instruction (either a command or a build stage beginning when encountering a `FROM` statement)
func ParseInstructionWithLinter(node *parser.Node, lint *linter.Linter) (v interface{}, err error) {
func ParseInstructionWithLinter(node *parser.Node, lint *linter.Linter) (v any, err error) {
defer func() {
if err != nil {
err = parser.WithLocation(err, node.Location())
@ -880,10 +881,8 @@ func validateDefinitionDescription(instruction string, argKeys []string, descCom
return
}
descCommentParts := strings.Split(descComments[len(descComments)-1], " ")
for _, key := range argKeys {
if key == descCommentParts[0] {
return
}
if slices.Contains(argKeys, descCommentParts[0]) {
return
}
exampleKey := argKeys[0]
if len(argKeys) > 1 {

View File

@ -281,7 +281,7 @@ func parseJSON(rest string) (*Node, map[string]bool, error) {
return nil, nil, errDockerfileNotJSONArray
}
var myJSON []interface{}
var myJSON []any
if err := json.Unmarshal([]byte(rest), &myJSON); err != nil {
return nil, nil, err
}

View File

@ -4,6 +4,7 @@ import (
"context"
"encoding/json"
"fmt"
"slices"
"github.com/containerd/platforms"
"github.com/moby/buildkit/exporter/containerimage/exptypes"
@ -67,7 +68,7 @@ func (bc *Client) Build(ctx context.Context, fn BuildFunc) (*ResultBuilder, erro
p.OSVersion = img.OSVersion
}
if p.OSFeatures == nil && len(img.OSFeatures) > 0 {
p.OSFeatures = append([]string{}, img.OSFeatures...)
p.OSFeatures = slices.Clone(img.OSFeatures)
}
}

View File

@ -9,6 +9,7 @@ import (
"net"
"os"
"path/filepath"
"slices"
"strconv"
"strings"
"sync"
@ -113,11 +114,9 @@ func (gf *gatewayFrontend) checkSourceIsAllowed(source string) error {
taglessSource := reference.TrimNamed(sourceRef).Name()
for _, allowedRepository := range gf.allowedRepositories {
if taglessSource == allowedRepository {
// Allowed
return nil
}
if slices.Contains(gf.allowedRepositories, taglessSource) {
// Allowed
return nil
}
return errors.Errorf("'%s' is not an allowed gateway source", source)
}

View File

@ -134,7 +134,7 @@ func (results *LintResults) ToResult(scb SourceInfoMap) (*client.Result, error)
if len(results.Warnings) > 0 || results.Error != nil {
status = 1
}
res.AddMeta("result.statuscode", []byte(fmt.Sprintf("%d", status)))
res.AddMeta("result.statuscode", fmt.Appendf(nil, "%d", status))
res.AddMeta("version", []byte(SubrequestLintDefinition.Version))
return res, nil

View File

@ -5,9 +5,10 @@ ARG ALPINE_VERSION=3.21
ARG XX_VERSION=1.6.1
ARG PROTOLINT_VERSION=0.50.5
ARG GOLANGCI_LINT_VERSION=1.61.0
ARG GOPLS_VERSION=v0.26.0
# v0.31 requires go1.24
ARG GOPLS_VERSION=v0.30.0
# GOPLS_ANALYZERS defines gopls analyzers to be run. disabled by default: deprecated unusedvariable simplifyrange
ARG GOPLS_ANALYZERS="embeddirective fillreturns infertypeargs nonewvars norangeoverfunc noresultvalues simplifycompositelit simplifyslice undeclaredname unusedparams useany"
ARG GOPLS_ANALYZERS="embeddirective fillreturns hostport infertypeargs modernize nonewvars noresultvalues simplifycompositelit simplifyslice unusedparams yield"
FROM --platform=$BUILDPLATFORM golang:${GO_VERSION}-alpine${ALPINE_VERSION} AS golang-base
FROM --platform=$BUILDPLATFORM yoheimuta/protolint:${PROTOLINT_VERSION} AS protolint-base

View File

@ -17,8 +17,8 @@ import (
type Stream interface {
Context() context.Context
SendMsg(m interface{}) error
RecvMsg(m interface{}) error
SendMsg(m any) error
RecvMsg(m any) error
}
func newStreamWriter(stream grpc.ClientStream) io.WriteCloser {

View File

@ -32,8 +32,8 @@ func Dialer(api controlapi.ControlClient) session.Dialer {
type stream interface {
Context() context.Context
SendMsg(m interface{}) error
RecvMsg(m interface{}) error
SendMsg(m any) error
RecvMsg(m any) error
}
func streamToConn(stream stream) (net.Conn, <-chan struct{}) {

View File

@ -9,8 +9,8 @@ import (
)
type Stream interface {
SendMsg(m interface{}) error
RecvMsg(m interface{}) error
SendMsg(m any) error
RecvMsg(m any) error
}
func Copy(ctx context.Context, conn io.ReadWriteCloser, stream Stream, closeStream func() error) error {

View File

@ -802,12 +802,12 @@ const (
)
func isOpaqueXattr(s string) bool {
for _, k := range []string{trustedOpaqueXattr, userOpaqueXattr} {
if s == k {
return true
}
switch s {
case trustedOpaqueXattr, userOpaqueXattr:
return true
default:
return false
}
return false
}
func opaqueXattr(userxattr bool) string {

View File

@ -6,6 +6,7 @@ import (
"github.com/containerd/containerd/v2/core/mount"
"github.com/pkg/errors"
"golang.org/x/sys/unix"
"slices"
)
func (lm *localMounter) Mount() (string, error) {
@ -22,13 +23,7 @@ func (lm *localMounter) Mount() (string, error) {
}
if len(lm.mounts) == 1 && lm.mounts[0].Type == "bind" {
ro := false
for _, opt := range lm.mounts[0].Options {
if opt == "ro" {
ro = true
break
}
}
ro := slices.Contains(lm.mounts[0].Options, "ro")
if !ro {
return lm.mounts[0].Source, nil
}

View File

@ -2,6 +2,7 @@ package snapshot
import (
"os"
"slices"
"github.com/containerd/containerd/v2/core/mount"
"github.com/pkg/errors"
@ -21,13 +22,7 @@ func (lm *localMounter) Mount() (string, error) {
}
if !lm.forceRemount && len(lm.mounts) == 1 && lm.mounts[0].Type == "nullfs" {
ro := false
for _, opt := range lm.mounts[0].Options {
if opt == "ro" {
ro = true
break
}
}
ro := slices.Contains(lm.mounts[0].Options, "ro")
if !ro {
return lm.mounts[0].Source, nil
}

View File

@ -3,6 +3,7 @@ package snapshot
import (
"os"
"path/filepath"
"slices"
"syscall"
"github.com/containerd/containerd/v2/core/mount"
@ -35,13 +36,7 @@ func (lm *localMounter) Mount() (string, error) {
var isFile bool
if len(lm.mounts) == 1 && (lm.mounts[0].Type == "bind" || lm.mounts[0].Type == "rbind") {
if !lm.forceRemount {
ro := false
for _, opt := range lm.mounts[0].Options {
if opt == "ro" {
ro = true
break
}
}
ro := slices.Contains(lm.mounts[0].Options, "ro")
if !ro {
return lm.mounts[0].Source, nil
}

View File

@ -440,7 +440,7 @@ func (s *Store) WalkBacklinks(id string, fn func(id string, link solver.CacheInf
if err := json.Unmarshal(parts[0], &l); err != nil {
return err
}
l.Digest = digest.FromBytes([]byte(fmt.Sprintf("%s@%d", l.Digest, l.Output)))
l.Digest = digest.FromBytes(fmt.Appendf(nil, "%s@%d", l.Digest, l.Output))
l.Output = 0
outIDs = append(outIDs, string(bid))
outLinks = append(outLinks, l)

View File

@ -1,6 +1,7 @@
package solver
import (
"slices"
"sync"
digest "github.com/opencontainers/go-digest"
@ -79,7 +80,7 @@ func (ck *CacheKey) Deps() [][]CacheKeyWithSelector {
defer ck.mu.RUnlock()
deps := make([][]CacheKeyWithSelector, len(ck.deps))
for i := range ck.deps {
deps[i] = append([]CacheKeyWithSelector(nil), ck.deps[i]...)
deps[i] = slices.Clone(ck.deps[i])
}
return deps
}

View File

@ -449,7 +449,7 @@ func (c *cacheManager) getIDFromDeps(k *CacheKey) string {
func rootKey(dgst digest.Digest, output Index) digest.Digest {
if strings.HasPrefix(dgst.String(), "random:") {
return digest.Digest("random:" + digest.FromBytes([]byte(fmt.Sprintf("%s@%d", dgst, output))).Encoded())
return digest.Digest("random:" + digest.FromBytes(fmt.Appendf(nil, "%s@%d", dgst, output)).Encoded())
}
return digest.FromBytes([]byte(fmt.Sprintf("%s@%d", dgst, output)))
return digest.FromBytes(fmt.Appendf(nil, "%s@%d", dgst, output))
}

View File

@ -9,32 +9,32 @@ import (
digest "github.com/opencontainers/go-digest"
)
type CacheOpts map[interface{}]interface{}
type CacheOpts map[any]any
type progressKey struct{}
type cacheOptGetterKey struct{}
func CacheOptGetterOf(ctx context.Context) func(includeAncestors bool, keys ...interface{}) map[interface{}]interface{} {
func CacheOptGetterOf(ctx context.Context) func(includeAncestors bool, keys ...any) map[any]any {
if v := ctx.Value(cacheOptGetterKey{}); v != nil {
if getter, ok := v.(func(includeAncestors bool, keys ...interface{}) map[interface{}]interface{}); ok {
if getter, ok := v.(func(includeAncestors bool, keys ...any) map[any]any); ok {
return getter
}
}
return nil
}
func WithCacheOptGetter(ctx context.Context, getter func(includeAncestors bool, keys ...interface{}) map[interface{}]interface{}) context.Context {
func WithCacheOptGetter(ctx context.Context, getter func(includeAncestors bool, keys ...any) map[any]any) context.Context {
return context.WithValue(ctx, cacheOptGetterKey{}, getter)
}
func withAncestorCacheOpts(ctx context.Context, start *state) context.Context {
return WithCacheOptGetter(ctx, func(includeAncestors bool, keys ...interface{}) map[interface{}]interface{} {
keySet := make(map[interface{}]struct{})
return WithCacheOptGetter(ctx, func(includeAncestors bool, keys ...any) map[any]any {
keySet := make(map[any]struct{})
for _, k := range keys {
keySet[k] = struct{}{}
}
values := make(map[interface{}]interface{})
values := make(map[any]any)
walkAncestors(ctx, start, func(st *state) bool {
if st.clientVertex.Error != "" {
// don't use values from cancelled or otherwise error'd vertexes

View File

@ -340,7 +340,7 @@ func (e *edge) unpark(incoming []pipeSender, updates, allPipes []pipeReceiver, f
// set up new outgoing requests if needed
if e.cacheMapReq == nil && (e.cacheMap == nil || len(e.cacheRecords) == 0) {
index := e.cacheMapIndex
e.cacheMapReq = f.NewFuncRequest(func(ctx context.Context) (interface{}, error) {
e.cacheMapReq = f.NewFuncRequest(func(ctx context.Context) (any, error) {
cm, err := e.op.CacheMap(ctx, index)
return cm, errors.Wrap(err, "failed to load cache key")
})
@ -495,10 +495,7 @@ func (e *edge) recalcCurrentState() {
isSlowIncomplete := (e.slowCacheFunc(dep) != nil || e.preprocessFunc(dep) != nil) && (dep.state == edgeStatusCacheSlow || (dep.state == edgeStatusComplete && !dep.slowCacheComplete))
if dep.state > stLow && len(dep.keyMap) == 0 && !isSlowIncomplete {
stLow = dep.state
if stLow > edgeStatusCacheSlow {
stLow = edgeStatusCacheSlow
}
stLow = min(dep.state, edgeStatusCacheSlow)
}
effectiveState := dep.state
if dep.state == edgeStatusCacheSlow && isSlowCacheIncomplete {
@ -898,7 +895,7 @@ func (e *edge) computeCacheKeyFromDep(dep *dep, f *pipeFactory) (addedNew bool)
res := dep.result
index := dep.index
dep.slowCacheReq = f.NewFuncRequest(func(ctx context.Context) (interface{}, error) {
dep.slowCacheReq = f.NewFuncRequest(func(ctx context.Context) (any, error) {
v, err := e.op.CalcSlowCache(ctx, index, pfn, fn, res)
return v, errors.Wrap(err, "failed to compute cache key")
})
@ -933,13 +930,13 @@ func (e *edge) execIfPossible(f *pipeFactory) bool {
// postpone delays exec to next unpark invocation if we have unprocessed keys
func (e *edge) postpone(f *pipeFactory) {
f.NewFuncRequest(func(context.Context) (interface{}, error) {
f.NewFuncRequest(func(context.Context) (any, error) {
return nil, nil
})
}
// loadCache creates a request to load edge result from cache
func (e *edge) loadCache(ctx context.Context) (interface{}, error) {
func (e *edge) loadCache(ctx context.Context) (any, error) {
recs := make([]*CacheRecord, 0, len(e.cacheRecords))
for _, r := range e.cacheRecords {
recs = append(recs, r)
@ -959,7 +956,7 @@ func (e *edge) loadCache(ctx context.Context) (interface{}, error) {
}
// execOp creates a request to execute the vertex operation
func (e *edge) execOp(ctx context.Context) (interface{}, error) {
func (e *edge) execOp(ctx context.Context) (any, error) {
cacheKeys, inputs := e.commitOptions()
results, subExporters, err := e.op.Exec(ctx, toResultSlice(inputs))
if err != nil {

View File

@ -12,7 +12,7 @@ func (e *OpError) Unwrap() error {
return e.error
}
func WithOp(err error, anyOp interface{}, opDesc map[string]string) error {
func WithOp(err error, anyOp any, opDesc map[string]string) error {
op, ok := anyOp.(*pb.Op)
if err == nil || !ok {
return err

View File

@ -98,10 +98,7 @@ func (s *Source) Print(w io.Writer) error {
func containsLine(rr []*pb.Range, l int) bool {
for _, r := range rr {
e := r.End.Line
if e < r.Start.Line {
e = r.Start.Line
}
e := max(r.End.Line, r.Start.Line)
if r.Start.Line <= int32(l) && e >= int32(l) {
return true
}
@ -112,10 +109,7 @@ func containsLine(rr []*pb.Range, l int) bool {
func getStartEndLine(rr []*pb.Range) (start int, end int, ok bool) {
first := true
for _, r := range rr {
e := r.End.Line
if e < r.Start.Line {
e = r.Start.Line
}
e := max(r.End.Line, r.Start.Line)
if first || int(r.Start.Line) < start {
start = int(r.Start.Line)
}

View File

@ -28,7 +28,7 @@ type ResolveOpFunc func(Vertex, Builder) (Op, error)
type Builder interface {
Build(ctx context.Context, e Edge) (CachedResultWithProvenance, error)
InContext(ctx context.Context, f func(ctx context.Context, g session.Group) error) error
EachValue(ctx context.Context, key string, fn func(interface{}) error) error
EachValue(ctx context.Context, key string, fn func(any) error) error
}
// Solver provides a shared graph of all the vertexes currently being
@ -283,7 +283,7 @@ func (sb *subBuilder) InContext(ctx context.Context, f func(context.Context, ses
return f(ctx, sb.state)
}
func (sb *subBuilder) EachValue(ctx context.Context, key string, fn func(interface{}) error) error {
func (sb *subBuilder) EachValue(ctx context.Context, key string, fn func(any) error) error {
sb.mu.Lock()
defer sb.mu.Unlock()
for j := range sb.jobs {
@ -465,7 +465,7 @@ func (jl *Solver) loadUnlocked(ctx context.Context, v, parent Vertex, j *Job, ca
dgst := v.Digest()
dgstWithoutCache := digest.FromBytes([]byte(fmt.Sprintf("%s-ignorecache", dgst)))
dgstWithoutCache := digest.FromBytes(fmt.Appendf(nil, "%s-ignorecache", dgst))
// if same vertex is already loaded without cache just use that
st, ok := jl.actives[dgstWithoutCache]
@ -810,11 +810,11 @@ func (j *Job) InContext(ctx context.Context, f func(context.Context, session.Gro
return f(progress.WithProgress(ctx, j.pw), session.NewGroup(j.SessionID))
}
func (j *Job) SetValue(key string, v interface{}) {
func (j *Job) SetValue(key string, v any) {
j.values.Store(key, v)
}
func (j *Job) EachValue(ctx context.Context, key string, fn func(interface{}) error) error {
func (j *Job) EachValue(ctx context.Context, key string, fn func(any) error) error {
v, ok := j.values.Load(key)
if ok {
return fn(v)
@ -1036,7 +1036,7 @@ func (s *sharedOp) CacheMap(ctx context.Context, index int) (resp *cacheMapResp,
if complete {
if err == nil {
if res.Opts == nil {
res.Opts = CacheOpts(make(map[interface{}]interface{}))
res.Opts = CacheOpts(make(map[any]any))
}
res.Opts[progressKey{}] = &controller.Controller{
WriterFactory: progress.FromContext(ctx),

View File

@ -24,7 +24,7 @@ func TestJobsIntegration(t *testing.T) {
testParallelism,
),
mirrors,
integration.WithMatrix("max-parallelism", map[string]interface{}{
integration.WithMatrix("max-parallelism", map[string]any{
"single": maxParallelismSingle,
"unlimited": maxParallelismUnlimited,
}),

View File

@ -229,7 +229,7 @@ func (rp *resultProxy) Definition() *pb.Definition {
return rp.req.Definition
}
func (rp *resultProxy) Provenance() interface{} {
func (rp *resultProxy) Provenance() any {
if rp.provenance == nil {
return nil
}

View File

@ -8,7 +8,7 @@ import (
"os"
"path"
"runtime"
"sort"
"slices"
"strings"
"github.com/containerd/platforms"
@ -247,9 +247,7 @@ func dedupePaths(inp []string) []string {
paths = append(paths, p1)
}
}
sort.Slice(paths, func(i, j int) bool {
return paths[i] < paths[j]
})
slices.Sort(paths)
return paths
}

View File

@ -7,6 +7,7 @@ import (
"fmt"
"path"
"runtime"
"slices"
"sort"
"sync"
@ -352,7 +353,7 @@ func (s *FileOpSolver) Solve(ctx context.Context, inputs []fileoptypes.Ref, acti
return nil, errors.Errorf("no outputs specified")
}
for i := 0; i < len(s.outs); i++ {
for i := range len(s.outs) {
if _, ok := s.outs[i]; !ok {
return nil, errors.Errorf("missing output index %d", i)
}
@ -398,10 +399,8 @@ func (s *FileOpSolver) Solve(ctx context.Context, inputs []fileoptypes.Ref, acti
}
func (s *FileOpSolver) validate(idx int, inputs []fileoptypes.Ref, actions []*pb.FileAction, loaded []int) error {
for _, check := range loaded {
if idx == check {
return errors.Errorf("loop from index %d", idx)
}
if slices.Contains(loaded, idx) {
return errors.Errorf("loop from index %d", idx)
}
if idx < len(inputs) {
return nil

View File

@ -2,6 +2,7 @@ package ops
import (
"context"
"slices"
"sync"
"sync/atomic"
"testing"
@ -696,7 +697,7 @@ func (b *testFileRefBackend) Prepare(ctx context.Context, ref fileoptypes.Ref, r
b.mounts[m.initID] = m
b.mu.Unlock()
m2 := *m
m2.chain = append([]mod{}, m2.chain...)
m2.chain = slices.Clone(m2.chain)
return &m2, nil
}

View File

@ -483,14 +483,14 @@ type edge struct {
func newCacheExporter() *cacheExporter {
return &cacheExporter{
m: map[interface{}]struct{}{},
m: map[any]struct{}{},
layers: map[edge][][]ocispecs.Descriptor{},
}
}
type cacheExporter struct {
layers map[edge][][]ocispecs.Descriptor
m map[interface{}]struct{}
m map[any]struct{}
}
func (ce *cacheExporter) Add(dgst digest.Digest) solver.CacheExporterRecord {

View File

@ -1012,8 +1012,8 @@ func inlineCache(ctx context.Context, ie inlineCacheExporter, res solver.CachedR
}
func withDescHandlerCacheOpts(ctx context.Context, ref cache.ImmutableRef) context.Context {
return solver.WithCacheOptGetter(ctx, func(includeAncestors bool, keys ...interface{}) map[interface{}]interface{} {
vals := make(map[interface{}]interface{})
return solver.WithCacheOptGetter(ctx, func(includeAncestors bool, keys ...any) map[any]any {
vals := make(map[any]any)
for _, k := range keys {
if key, ok := k.(cache.DescHandlerKey); ok {
if handler := ref.DescHandler(digest.Digest(key)); handler != nil {
@ -1119,7 +1119,7 @@ func supportedEntitlements(ents []string) []entitlements.Entitlement {
func loadEntitlements(b solver.Builder) (entitlements.Set, error) {
var ent entitlements.Set = map[entitlements.Entitlement]entitlements.EntitlementsConfig{}
err := b.EachValue(context.TODO(), keyEntitlements, func(v interface{}) error {
err := b.EachValue(context.TODO(), keyEntitlements, func(v any) error {
set, ok := v.(entitlements.Set)
if !ok {
return errors.Errorf("invalid entitlements %T", v)
@ -1141,7 +1141,7 @@ func loadEntitlements(b solver.Builder) (entitlements.Set, error) {
func loadSourcePolicy(b solver.Builder) (*spb.Policy, error) {
var srcPol spb.Policy
err := b.EachValue(context.TODO(), keySourcePolicy, func(v interface{}) error {
err := b.EachValue(context.TODO(), keySourcePolicy, func(v any) error {
x, ok := v.(*spb.Policy)
if !ok {
return errors.Errorf("invalid source policy %T", v)

View File

@ -3,6 +3,7 @@ package llbsolver
import (
"context"
"fmt"
"slices"
"strings"
"github.com/containerd/platforms"
@ -18,7 +19,7 @@ import (
)
type vertex struct {
sys interface{}
sys any
options solver.VertexOptions
inputs []solver.Edge
digest digest.Digest
@ -29,7 +30,7 @@ func (v *vertex) Digest() digest.Digest {
return v.digest
}
func (v *vertex) Sys() interface{} {
func (v *vertex) Sys() any {
return v.sys
}
@ -103,7 +104,7 @@ func NormalizeRuntimePlatforms() LoadOpt {
OSVersion: normalizedPlatform.OSVersion,
}
if normalizedPlatform.OSFeatures != nil {
op.Platform.OSFeatures = append([]string{}, normalizedPlatform.OSFeatures...)
op.Platform.OSFeatures = slices.Clone(normalizedPlatform.OSFeatures)
}
return nil

View File

@ -1,6 +1,8 @@
package pb
import (
"slices"
ocispecs "github.com/opencontainers/image-spec/specs-go/v1"
)
@ -12,7 +14,7 @@ func (p *Platform) Spec() ocispecs.Platform {
OSVersion: p.OSVersion,
}
if p.OSFeatures != nil {
result.OSFeatures = append([]string{}, p.OSFeatures...)
result.OSFeatures = slices.Clone(p.OSFeatures)
}
return result
}
@ -25,7 +27,7 @@ func PlatformFromSpec(p ocispecs.Platform) *Platform {
OSVersion: p.OSVersion,
}
if p.OSFeatures != nil {
result.OSFeatures = append([]string{}, p.OSFeatures...)
result.OSFeatures = slices.Clone(p.OSFeatures)
}
return result
}

View File

@ -344,7 +344,7 @@ func (pf *pipeFactory) NewInputRequest(ee Edge, req *edgeRequest) pipeReceiver {
target := pf.s.ef.getEdge(ee)
if target == nil {
debugSchedulerInconsistentGraphState(ee)
return pf.NewFuncRequest(func(_ context.Context) (interface{}, error) {
return pf.NewFuncRequest(func(_ context.Context) (any, error) {
return nil, errdefs.Internal(errors.Errorf("failed to get edge: inconsistent graph state in edge %s %s %d", ee.Vertex.Name(), ee.Vertex.Digest(), ee.Index))
})
}
@ -353,7 +353,7 @@ func (pf *pipeFactory) NewInputRequest(ee Edge, req *edgeRequest) pipeReceiver {
return p.Receiver
}
func (pf *pipeFactory) NewFuncRequest(f func(context.Context) (interface{}, error)) pipeReceiver {
func (pf *pipeFactory) NewFuncRequest(f func(context.Context) (any, error)) pipeReceiver {
p := pf.s.newRequestWithFunc(pf.e, f)
debugSchedulerNewFunc(pf.e, p)
return p

View File

@ -3040,7 +3040,7 @@ func TestMergedEdgesLookup(t *testing.T) {
t.Parallel()
// this test requires multiple runs to trigger the race
for i := 0; i < 20; i++ {
for range 20 {
func() {
ctx := context.TODO()
@ -3092,7 +3092,7 @@ func TestMergedEdgesLookup(t *testing.T) {
func TestMergedEdgesCycle(t *testing.T) {
t.Parallel()
for i := 0; i < 20; i++ {
for range 20 {
ctx := context.TODO()
cacheManager := newTrackingCacheManager(NewInMemoryCacheManager())
@ -3147,7 +3147,7 @@ func TestMergedEdgesCycle(t *testing.T) {
func TestMergedEdgesCycleMultipleOwners(t *testing.T) {
t.Parallel()
for i := 0; i < 20; i++ {
for range 20 {
ctx := context.TODO()
cacheManager := newTrackingCacheManager(NewInMemoryCacheManager())
@ -3586,10 +3586,7 @@ func generateSubGraph(nodes int) (Edge, int) {
return Edge{Vertex: vtxConst(value, vtxOpt{})}, value
}
spread := rand.Int()%5 + 2 //nolint:gosec
inc := int(math.Ceil(float64(nodes) / float64(spread)))
if inc > nodes {
inc = nodes
}
inc := min(int(math.Ceil(float64(nodes)/float64(spread))), nodes)
added := 1
value := 0
inputs := []Edge{}
@ -3648,7 +3645,7 @@ type vertex struct {
func (v *vertex) Digest() digest.Digest {
return digest.FromBytes([]byte(v.opt.name))
}
func (v *vertex) Sys() interface{} {
func (v *vertex) Sys() any {
return v
}
func (v *vertex) Inputs() []Edge {
@ -3727,7 +3724,7 @@ func (v *vertex) CacheMap(ctx context.Context, g session.Group, index int) (*Cac
return v.makeCacheMap(), len(v.opt.cacheKeySeeds) == index, nil
}
return &CacheMap{
Digest: digest.FromBytes([]byte(fmt.Sprintf("seed:%s", v.opt.cacheKeySeeds[index-1]()))),
Digest: digest.FromBytes(fmt.Appendf(nil, "seed:%s", v.opt.cacheKeySeeds[index-1]())),
}, len(v.opt.cacheKeySeeds) == index, nil
}
@ -3769,7 +3766,7 @@ func (v *vertex) Acquire(ctx context.Context) (ReleaseFunc, error) {
func (v *vertex) makeCacheMap() *CacheMap {
m := &CacheMap{
Digest: digest.FromBytes([]byte(fmt.Sprintf("seed:%s", v.opt.cacheKeySeed))),
Digest: digest.FromBytes(fmt.Appendf(nil, "seed:%s", v.opt.cacheKeySeed)),
Deps: make([]struct {
Selector digest.Digest
ComputeDigestFunc ResultBasedCacheFunc
@ -3801,7 +3798,7 @@ type vertexConst struct {
value int
}
func (v *vertexConst) Sys() interface{} {
func (v *vertexConst) Sys() any {
return v
}
@ -3832,7 +3829,7 @@ type vertexSum struct {
value int
}
func (v *vertexSum) Sys() interface{} {
func (v *vertexSum) Sys() any {
return v
}
@ -3871,7 +3868,7 @@ type vertexAdd struct {
value int
}
func (v *vertexAdd) Sys() interface{} {
func (v *vertexAdd) Sys() any {
return v
}
@ -3909,7 +3906,7 @@ type vertexSubBuild struct {
b Builder
}
func (v *vertexSubBuild) Sys() interface{} {
func (v *vertexSubBuild) Sys() any {
return v
}
@ -3945,7 +3942,7 @@ type dummyResult struct {
func (r *dummyResult) ID() string { return r.id }
func (r *dummyResult) Release(context.Context) error { return nil }
func (r *dummyResult) Sys() interface{} { return r }
func (r *dummyResult) Sys() any { return r }
func (r *dummyResult) Clone() Result { return r }
func testOpResolver(v Vertex, b Builder) (Op, error) {
@ -4029,12 +4026,12 @@ func testExporterOpts(all bool) CacheExportOpt {
func newTestExporterTarget() *testExporterTarget {
return &testExporterTarget{
visited: map[interface{}]struct{}{},
visited: map[any]struct{}{},
}
}
type testExporterTarget struct {
visited map[interface{}]struct{}
visited map[any]struct{}
records []*testExporterRecord
}
@ -4044,11 +4041,11 @@ func (t *testExporterTarget) Add(dgst digest.Digest) CacheExporterRecord {
return r
}
func (t *testExporterTarget) Visit(v interface{}) {
func (t *testExporterTarget) Visit(v any) {
t.visited[v] = struct{}{}
}
func (t *testExporterTarget) Visited(v interface{}) bool {
func (t *testExporterTarget) Visited(v any) bool {
_, ok := t.visited[v]
return ok
}

View File

@ -382,12 +382,12 @@ func testWalkIDsByResult(t *testing.T, st solver.CacheKeyStorage) {
require.False(t, ok)
}
func getFunctionName(i interface{}) string {
func getFunctionName(i any) string {
fullname := runtime.FuncForPC(reflect.ValueOf(i).Pointer()).Name()
dot := strings.LastIndex(fullname, ".") + 1
return strings.Title(fullname[dot:]) //nolint:staticcheck // ignoring "SA1019: strings.Title is deprecated", as for our use we don't need full unicode support
}
func rootKey(dgst digest.Digest, output solver.Index) digest.Digest {
return digest.FromBytes([]byte(fmt.Sprintf("%s@%d", dgst, output)))
return digest.FromBytes(fmt.Appendf(nil, "%s@%d", dgst, output))
}

View File

@ -21,7 +21,7 @@ type Vertex interface {
// Sys returns an object used to resolve the executor for this vertex.
// In LLB solver, this value would be of type `llb.Op`.
Sys() interface{}
Sys() any
// Options return metadata associated with the vertex that doesn't change the
// definition or equality check of it.
@ -62,7 +62,7 @@ type VertexOptions struct {
type Result interface {
ID() string
Release(context.Context) error
Sys() interface{}
Sys() any
Clone() Result
}
@ -82,7 +82,7 @@ type ResultProxy interface {
Result(context.Context) (CachedResult, error)
Release(context.Context) error
Definition() *pb.Definition
Provenance() interface{}
Provenance() any
}
// CacheExportMode is the type for setting cache exporting modes

View File

@ -186,7 +186,7 @@ func (p *puller) CacheKey(ctx context.Context, g session.Group, index int) (cach
return "", "", nil, false, err
}
cacheOpts = solver.CacheOpts(make(map[interface{}]interface{}))
cacheOpts = solver.CacheOpts(make(map[any]any))
for dgst, descHandler := range p.descHandlers {
cacheOpts[cache.DescHandlerKey(dgst)] = descHandler
}

View File

@ -2,6 +2,7 @@ package containerimage
import (
"context"
"slices"
"strconv"
"github.com/containerd/containerd/v2/core/content"
@ -217,7 +218,7 @@ func (is *Source) registryIdentifier(ref string, attrs map[string]string, platfo
OSVersion: platform.OSVersion,
}
if platform.OSFeatures != nil {
id.Platform.OSFeatures = append([]string{}, platform.OSFeatures...)
id.Platform.OSFeatures = slices.Clone(platform.OSFeatures)
}
}
@ -264,7 +265,7 @@ func (is *Source) ociIdentifier(ref string, attrs map[string]string, platform *p
OSVersion: platform.OSVersion,
}
if platform.OSFeatures != nil {
id.Platform.OSFeatures = append([]string{}, platform.OSFeatures...)
id.Platform.OSFeatures = slices.Clone(platform.OSFeatures)
}
}

View File

@ -266,7 +266,7 @@ func (gs *gitSourceHandler) getAuthToken(ctx context.Context, g session.Group) e
return err
}
if s.token {
dt = []byte("basic " + base64.StdEncoding.EncodeToString([]byte(fmt.Sprintf("x-access-token:%s", dt))))
dt = []byte("basic " + base64.StdEncoding.EncodeToString(fmt.Appendf(nil, "x-access-token:%s", dt)))
}
gs.authArgs = []string{"-c", "http." + tokenScope(gs.src.Remote) + ".extraheader=Authorization: " + string(dt)}
break

View File

@ -2,6 +2,7 @@ package contentutil
import (
"net/url"
"slices"
"strings"
"github.com/containerd/containerd/v2/core/content"
@ -24,11 +25,8 @@ func HasSource(info content.Info, refspec reference.Spec) (bool, error) {
return false, nil
}
for _, repo := range strings.Split(repoLabel, ",") {
// the target repo is not a candidate
if repo == target {
return true, nil
}
if slices.Contains(strings.Split(repoLabel, ","), target) {
return true, nil
}
return false, nil
}

View File

@ -69,7 +69,7 @@ type conversion struct {
}
var bufioPool = sync.Pool{
New: func() interface{} {
New: func() any {
return nil
},
}

View File

@ -4,6 +4,7 @@ import (
"context"
"io"
"math/rand"
"slices"
"sort"
"sync"
"time"
@ -211,7 +212,7 @@ func (c *call[T]) Err() error {
}
}
func (c *call[T]) Value(key interface{}) interface{} {
func (c *call[T]) Value(key any) any {
if key == contextKey {
return c.progressState
}
@ -353,7 +354,7 @@ func (ps *progressState) close(pw progress.Writer) {
for i, w := range ps.writers {
if w == rw {
w.Close()
ps.writers = append(ps.writers[:i], ps.writers[i+1:]...)
ps.writers = slices.Delete(ps.writers, i, i+1)
break
}
}

View File

@ -209,8 +209,8 @@ func TestContention(t *testing.T) {
g := &Group[int]{}
for i := 0; i < threads; i++ {
for j := 0; j < perthread; j++ {
for range threads {
for range perthread {
_, err := g.Do(context.TODO(), "foo", func(ctx context.Context) (int, error) {
time.Sleep(time.Microsecond)
return 0, nil
@ -227,7 +227,7 @@ func TestMassiveParallel(t *testing.T) {
var retryCount int64
g := &Group[string]{}
eg, ctx := errgroup.WithContext(context.Background())
for i := 0; i < 1000; i++ {
for range 1000 {
eg.Go(func() error {
_, err := g.Do(ctx, "key", func(ctx context.Context) (string, error) {
return "", errors.Errorf("always fail")

View File

@ -6,6 +6,7 @@ import (
"io"
"os"
"os/exec"
"slices"
"strings"
"github.com/pkg/errors"
@ -120,7 +121,7 @@ func NewGitCLI(opts ...Option) *GitCLI {
// with the given options applied on top.
func (cli *GitCLI) New(opts ...Option) *GitCLI {
clone := *cli
clone.args = append([]string{}, cli.args...)
clone.args = slices.Clone(cli.args)
for _, opt := range opts {
opt(&clone)

View File

@ -10,7 +10,7 @@ import (
"google.golang.org/grpc"
)
func UnaryServerInterceptor(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp interface{}, err error) {
func UnaryServerInterceptor(ctx context.Context, req any, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp any, err error) {
resp, err = handler(ctx, req)
oldErr := err
if err != nil {
@ -29,7 +29,7 @@ func UnaryServerInterceptor(ctx context.Context, req interface{}, info *grpc.Una
return resp, err
}
func StreamServerInterceptor(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error {
func StreamServerInterceptor(srv any, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error {
err := ToGRPC(ss.Context(), handler(srv, ss))
if err != nil {
stack.Helper()
@ -37,7 +37,7 @@ func StreamServerInterceptor(srv interface{}, ss grpc.ServerStream, info *grpc.S
return err
}
func UnaryClientInterceptor(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error {
func UnaryClientInterceptor(ctx context.Context, method string, req, reply any, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error {
err := FromGRPC(invoker(ctx, method, req, reply, cc, opts...))
if err != nil {
stack.Helper()

View File

@ -86,7 +86,7 @@ func NewBridge(opt Opt) (network.Provider, error) {
firewallBackend = "iptables"
}
cniOptions = append(cniOptions, cni.WithConfListBytes([]byte(fmt.Sprintf(`{
cniOptions = append(cniOptions, cni.WithConfListBytes(fmt.Appendf(nil, `{
"cniVersion": "1.0.0",
"name": "buildkit",
"plugins": [
@ -113,7 +113,7 @@ func NewBridge(opt Opt) (network.Provider, error) {
"ingressPolicy": "same-bridge"
}
]
}`, loopbackBinName, bridgeBinName, opt.BridgeName, hostLocalBinName, opt.BridgeSubnet, firewallBinName, firewallBackend))))
}`, loopbackBinName, bridgeBinName, opt.BridgeName, hostLocalBinName, opt.BridgeSubnet, firewallBinName, firewallBackend)))
unlock, err := initLock()
if err != nil {

View File

@ -65,7 +65,7 @@ func GetUpperdir(lower, upper []mount.Mount) (string, error) {
if len(upperlayers) != len(lowerlayers)+1 {
return "", errors.Errorf("cannot determine diff of more than one upper directories")
}
for i := 0; i < len(lowerlayers); i++ {
for i := range lowerlayers {
if upperlayers[i] != lowerlayers[i] {
return "", errors.Errorf("layer %d must be common between upper and lower snapshots", i)
}
@ -361,7 +361,7 @@ func sameDirent(f1, f2 os.FileInfo, f1fullPath, f2fullPath string) (bool, error)
// Ported from continuity project
// https://github.com/containerd/continuity/blob/v0.1.0/fs/diff_unix.go#L43-L54
// Copyright The containerd Authors.
func compareSysStat(s1, s2 interface{}) (bool, error) {
func compareSysStat(s1, s2 any) (bool, error) {
ls1, ok := s1.(*syscall.Stat_t)
if !ok {
return false, nil
@ -405,7 +405,7 @@ func compareSymlinkTarget(p1, p2 string) (bool, error) {
}
var bufPool = sync.Pool{
New: func() interface{} {
New: func() any {
b := make([]byte, 32*1024)
return &b
},

View File

@ -6,6 +6,7 @@ import (
"io"
"math"
"os"
"slices"
"strconv"
"sync"
"time"
@ -116,13 +117,13 @@ func (sw *streamWriter) Write(dt []byte) (int, error) {
sw.buf.Write(dt)
}
dt = append([]byte{}, dt[:limit]...)
dt = slices.Clone(dt[:limit])
if sw.clipping && oldSize == len(dt) {
sw.clipping = false
}
if !sw.clipping && oldSize != len(dt) {
dt = append(dt, []byte(fmt.Sprintf("\n[output clipped, log limit %s reached]\n", sw.clipLimitMessage()))...)
dt = append(dt, fmt.Appendf(nil, "\n[output clipped, log limit %s reached]\n", sw.clipLimitMessage())...)
sw.clipping = true
}

View File

@ -16,7 +16,7 @@ type MultiWriter struct {
mu sync.Mutex
items []*Progress
writers map[rawProgressWriter]struct{}
meta map[string]interface{}
meta map[string]any
}
var _ rawProgressWriter = &MultiWriter{}
@ -24,7 +24,7 @@ var _ rawProgressWriter = &MultiWriter{}
func NewMultiWriter(opts ...WriterOption) *MultiWriter {
mw := &MultiWriter{
writers: map[rawProgressWriter]struct{}{},
meta: map[string]interface{}{},
meta: map[string]any{},
}
for _, o := range opts {
o(mw)
@ -70,7 +70,7 @@ func (ps *MultiWriter) Delete(pw Writer) {
ps.mu.Unlock()
}
func (ps *MultiWriter) Write(id string, v interface{}) error {
func (ps *MultiWriter) Write(id string, v any) error {
p := &Progress{
ID: id,
Timestamp: time.Now(),
@ -83,7 +83,7 @@ func (ps *MultiWriter) Write(id string, v interface{}) error {
func (ps *MultiWriter) WriteRawProgress(p *Progress) error {
meta := p.meta
if len(ps.meta) > 0 {
meta = map[string]interface{}{}
meta = map[string]any{}
maps.Copy(meta, p.meta)
for k, v := range ps.meta {
if _, ok := meta[k]; !ok {

View File

@ -67,7 +67,7 @@ func WithProgress(ctx context.Context, pw Writer) context.Context {
return context.WithValue(ctx, contextKey, pw)
}
func WithMetadata(key string, val interface{}) WriterOption {
func WithMetadata(key string, val any) WriterOption {
return func(w Writer) {
if pw, ok := w.(*progressWriter); ok {
pw.meta[key] = val
@ -84,7 +84,7 @@ type Controller interface {
}
type Writer interface {
Write(id string, value interface{}) error
Write(id string, value any) error
Close() error
}
@ -95,8 +95,8 @@ type Reader interface {
type Progress struct {
ID string
Timestamp time.Time
Sys interface{}
meta map[string]interface{}
Sys any
meta map[string]any
}
type Status struct {
@ -207,7 +207,7 @@ func pipe() (*progressReader, *progressWriter, func(error)) {
}
func newWriter(pw *progressWriter) *progressWriter {
meta := make(map[string]interface{})
meta := make(map[string]any)
maps.Copy(meta, pw.meta)
pw = &progressWriter{
reader: pw.reader,
@ -220,10 +220,10 @@ func newWriter(pw *progressWriter) *progressWriter {
type progressWriter struct {
done bool
reader *progressReader
meta map[string]interface{}
meta map[string]any
}
func (pw *progressWriter) Write(id string, v interface{}) error {
func (pw *progressWriter) Write(id string, v any) error {
if pw.done {
return errors.Errorf("writing %s to closed progress writer", id)
}
@ -238,7 +238,7 @@ func (pw *progressWriter) Write(id string, v interface{}) error {
func (pw *progressWriter) WriteRawProgress(p *Progress) error {
meta := p.meta
if len(pw.meta) > 0 {
meta = map[string]interface{}{}
meta = map[string]any{}
maps.Copy(meta, p.meta)
for k, v := range pw.meta {
if _, ok := meta[k]; !ok {
@ -267,14 +267,14 @@ func (pw *progressWriter) Close() error {
return nil
}
func (p *Progress) Meta(key string) (interface{}, bool) {
func (p *Progress) Meta(key string) (any, bool) {
v, ok := p.meta[key]
return v, ok
}
type noOpWriter struct{}
func (pw *noOpWriter) Write(_ string, _ interface{}) error {
func (pw *noOpWriter) Write(_ string, _ any) error {
return nil
}

View File

@ -104,7 +104,7 @@ func reduceCalc(ctx context.Context, total int) (int, error) {
return 0, err
}
// parallel steps
for i := 0; i < 2; i++ {
for i := range 2 {
func(i int) {
eg.Go(func() error {
_, err := calc(ctx, total, fmt.Sprintf("calc-%d", i))

View File

@ -765,7 +765,7 @@ func (t *trace) update(s *client.SolveStatus, termWidth int) {
} else if sec < 100 {
prec = 2
}
v.logs = append(v.logs, []byte(fmt.Sprintf("%s %s", fmt.Sprintf("%.[2]*[1]f", sec, prec), dt)))
v.logs = append(v.logs, fmt.Appendf(nil, "%s %s", fmt.Sprintf("%.[2]*[1]f", sec, prec), dt))
}
i++
})
@ -787,7 +787,7 @@ func (t *trace) printErrorLogs(f io.Writer) {
}
// printer keeps last logs buffer
if v.logsBuffer != nil {
for i := 0; i < v.logsBuffer.Len(); i++ {
for range v.logsBuffer.Len() {
if v.logsBuffer.Value != nil {
fmt.Fprintln(f, string(v.logsBuffer.Value.([]byte)))
}
@ -1071,7 +1071,7 @@ func (disp *ttyDisplay) print(d displayInfo, width, height int, all bool) {
}
// override previous content
if diff := disp.lineCount - lineCount; diff > 0 {
for i := 0; i < diff; i++ {
for range diff {
fmt.Fprintln(disp.c, strings.Repeat(" ", width))
}
fmt.Fprint(disp.c, aec.EmptyBuilder.Up(uint(diff)).Column(0).ANSI)

View File

@ -33,7 +33,7 @@ func New(f images.HandlerFunc, logger func([]byte)) images.HandlerFunc {
}
}
if logger != nil {
logger([]byte(fmt.Sprintf("error: %v\n", err.Error())))
logger(fmt.Appendf(nil, "error: %v\n", err.Error()))
}
} else {
return descs, nil
@ -43,7 +43,7 @@ func New(f images.HandlerFunc, logger func([]byte)) images.HandlerFunc {
return nil, err
}
if logger != nil {
logger([]byte(fmt.Sprintf("retrying in %v\n", backoff)))
logger(fmt.Appendf(nil, "retrying in %v\n", backoff))
}
time.Sleep(backoff)
backoff *= 2

Some files were not shown because too many files have changed in this diff Show More