1
0
mirror of https://github.com/moby/buildkit.git synced 2025-07-30 15:03:06 +03:00

exporter: support for compression-level

compression-level option can be set on export to
define the preferred speed vs compression ratio. The
value is a number dependent on the compression algorithm.

Signed-off-by: Tonis Tiigi <tonistiigi@gmail.com>
This commit is contained in:
Tonis Tiigi
2022-01-31 21:42:15 -08:00
parent 2633c96bac
commit cab33b1e31
21 changed files with 195 additions and 143 deletions

View File

@ -234,6 +234,7 @@ Keys supported by image output:
* `dangling-name-prefix=[value]`: name image with `prefix@<digest>` , used for anonymous images
* `name-canonical=true`: add additional canonical name `name@<digest>`
* `compression=[uncompressed,gzip,estargz,zstd]`: choose compression type for layers newly created and cached, gzip is default value. estargz should be used with `oci-mediatypes=true`.
* `compression-level=[value]`: compression level for gzip, estargz (0-9) and zstd (0-22)
* `force-compression=true`: forcefully apply `compression` option to all layers (including already existing layers).
* `buildinfo=[all,imageconfig,metadata,none]`: choose [build dependency](docs/build-repro.md#build-dependencies) version to export (default `all`).

81
cache/blobs.go vendored
View File

@ -1,6 +1,7 @@
package cache
import (
"compress/gzip"
"context"
"fmt"
"io"
@ -34,7 +35,7 @@ var ErrNoBlobs = errors.Errorf("no blobs for snapshot")
// a blob is missing and createIfNeeded is true, then the blob will be created, otherwise ErrNoBlobs will
// be returned. Caller must hold a lease when calling this function.
// If forceCompression is specified but the blob of compressionType doesn't exist, this function creates it.
func (sr *immutableRef) computeBlobChain(ctx context.Context, createIfNeeded bool, compressionType compression.Type, forceCompression bool, s session.Group) error {
func (sr *immutableRef) computeBlobChain(ctx context.Context, createIfNeeded bool, comp compression.Config, s session.Group) error {
if _, ok := leases.FromContext(ctx); !ok {
return errors.Errorf("missing lease requirement for computeBlobChain")
}
@ -52,31 +53,31 @@ func (sr *immutableRef) computeBlobChain(ctx context.Context, createIfNeeded boo
// refs rather than every single layer present among their ancestors.
filter := sr.layerSet()
return computeBlobChain(ctx, sr, createIfNeeded, compressionType, forceCompression, s, filter)
return computeBlobChain(ctx, sr, createIfNeeded, comp, s, filter)
}
type compressor func(dest io.Writer, requiredMediaType string) (io.WriteCloser, error)
func computeBlobChain(ctx context.Context, sr *immutableRef, createIfNeeded bool, compressionType compression.Type, forceCompression bool, s session.Group, filter map[string]struct{}) error {
func computeBlobChain(ctx context.Context, sr *immutableRef, createIfNeeded bool, comp compression.Config, s session.Group, filter map[string]struct{}) error {
eg, ctx := errgroup.WithContext(ctx)
switch sr.kind() {
case Merge:
for _, parent := range sr.mergeParents {
parent := parent
eg.Go(func() error {
return computeBlobChain(ctx, parent, createIfNeeded, compressionType, forceCompression, s, filter)
return computeBlobChain(ctx, parent, createIfNeeded, comp, s, filter)
})
}
case Diff:
if _, ok := filter[sr.ID()]; !ok && sr.diffParents.upper != nil {
// This diff is just re-using the upper blob, compute that
eg.Go(func() error {
return computeBlobChain(ctx, sr.diffParents.upper, createIfNeeded, compressionType, forceCompression, s, filter)
return computeBlobChain(ctx, sr.diffParents.upper, createIfNeeded, comp, s, filter)
})
}
case Layer:
eg.Go(func() error {
return computeBlobChain(ctx, sr.layerParent, createIfNeeded, compressionType, forceCompression, s, filter)
return computeBlobChain(ctx, sr.layerParent, createIfNeeded, comp, s, filter)
})
}
@ -93,19 +94,24 @@ func computeBlobChain(ctx context.Context, sr *immutableRef, createIfNeeded bool
var mediaType string
var compressorFunc compressor
var finalize func(context.Context, content.Store) (map[string]string, error)
switch compressionType {
switch comp.Type {
case compression.Uncompressed:
mediaType = ocispecs.MediaTypeImageLayer
case compression.Gzip:
compressorFunc = func(dest io.Writer, _ string) (io.WriteCloser, error) {
return gzipWriter(comp)(dest)
}
mediaType = ocispecs.MediaTypeImageLayerGzip
case compression.EStargz:
compressorFunc, finalize = compressEStargz()
compressorFunc, finalize = compressEStargz(comp)
mediaType = ocispecs.MediaTypeImageLayerGzip
case compression.Zstd:
compressorFunc = zstdWriter
compressorFunc = func(dest io.Writer, _ string) (io.WriteCloser, error) {
return zstdWriter(comp)(dest)
}
mediaType = ocispecs.MediaTypeImageLayer + "+zstd"
default:
return nil, errors.Errorf("unknown layer compression type: %q", compressionType)
return nil, errors.Errorf("unknown layer compression type: %q", comp.Type)
}
var lowerRef *immutableRef
@ -235,7 +241,7 @@ func computeBlobChain(ctx context.Context, sr *immutableRef, createIfNeeded bool
return nil, errors.Errorf("unknown layer compression type")
}
if err := sr.setBlob(ctx, compressionType, desc); err != nil {
if err := sr.setBlob(ctx, comp.Type, desc); err != nil {
return nil, err
}
return nil, nil
@ -244,9 +250,9 @@ func computeBlobChain(ctx context.Context, sr *immutableRef, createIfNeeded bool
return err
}
if forceCompression {
if err := ensureCompression(ctx, sr, compressionType, s); err != nil {
return errors.Wrapf(err, "failed to ensure compression type of %q", compressionType)
if comp.Force {
if err := ensureCompression(ctx, sr, comp, s); err != nil {
return errors.Wrapf(err, "failed to ensure compression type of %q", comp.Type)
}
}
return nil
@ -412,15 +418,15 @@ func isTypeWindows(sr *immutableRef) bool {
}
// ensureCompression ensures the specified ref has the blob of the specified compression Type.
func ensureCompression(ctx context.Context, ref *immutableRef, compressionType compression.Type, s session.Group) error {
_, err := g.Do(ctx, fmt.Sprintf("%s-%d", ref.ID(), compressionType), func(ctx context.Context) (interface{}, error) {
func ensureCompression(ctx context.Context, ref *immutableRef, comp compression.Config, s session.Group) error {
_, err := g.Do(ctx, fmt.Sprintf("%s-%d", ref.ID(), comp.Type), func(ctx context.Context) (interface{}, error) {
desc, err := ref.ociDesc(ctx, ref.descHandlers)
if err != nil {
return nil, err
}
// Resolve converters
layerConvertFunc, err := getConverter(ctx, ref.cm.ContentStore, desc, compressionType)
layerConvertFunc, err := getConverter(ctx, ref.cm.ContentStore, desc, comp)
if err != nil {
return nil, err
} else if layerConvertFunc == nil {
@ -430,11 +436,11 @@ func ensureCompression(ctx context.Context, ref *immutableRef, compressionType c
// This ref can be used as the specified compressionType. Keep it lazy.
return nil, nil
}
return nil, ref.addCompressionBlob(ctx, desc, compressionType)
return nil, ref.addCompressionBlob(ctx, desc, comp.Type)
}
// First, lookup local content store
if _, err := ref.getCompressionBlob(ctx, compressionType); err == nil {
if _, err := ref.getCompressionBlob(ctx, comp.Type); err == nil {
return nil, nil // found the compression variant. no need to convert.
}
@ -453,7 +459,7 @@ func ensureCompression(ctx context.Context, ref *immutableRef, compressionType c
}
// Start to track converted layer
if err := ref.addCompressionBlob(ctx, *newDesc, compressionType); err != nil {
if err := ref.addCompressionBlob(ctx, *newDesc, comp.Type); err != nil {
return nil, errors.Wrapf(err, "failed to add compression blob")
}
return nil, nil
@ -461,6 +467,37 @@ func ensureCompression(ctx context.Context, ref *immutableRef, compressionType c
return err
}
func zstdWriter(dest io.Writer, requiredMediaType string) (io.WriteCloser, error) {
return zstd.NewWriter(dest)
func gzipWriter(comp compression.Config) func(io.Writer) (io.WriteCloser, error) {
return func(dest io.Writer) (io.WriteCloser, error) {
level := gzip.DefaultCompression
if comp.Level != nil {
level = *comp.Level
}
return gzip.NewWriterLevel(dest, level)
}
}
func zstdWriter(comp compression.Config) func(io.Writer) (io.WriteCloser, error) {
return func(dest io.Writer) (io.WriteCloser, error) {
level := zstd.SpeedDefault
if comp.Level != nil {
level = toZstdEncoderLevel(*comp.Level)
}
return zstd.NewWriter(dest, zstd.WithEncoderLevel(level))
}
}
func toZstdEncoderLevel(level int) zstd.EncoderLevel {
// map zstd compression levels to go-zstd levels
// once we also have c based implementation move this to helper pkg
if level < 0 {
return zstd.SpeedDefault
} else if level < 3 {
return zstd.SpeedFastest
} else if level < 7 {
return zstd.SpeedDefault
} else if level < 9 {
return zstd.SpeedBetterCompression
}
return zstd.SpeedBestCompression
}

15
cache/blobs_linux.go vendored
View File

@ -8,7 +8,6 @@ import (
"context"
"io"
ctdcompression "github.com/containerd/containerd/archive/compression"
"github.com/containerd/containerd/content"
"github.com/containerd/containerd/errdefs"
"github.com/containerd/containerd/mount"
@ -33,20 +32,6 @@ func (sr *immutableRef) tryComputeOverlayBlob(ctx context.Context, lower, upper
return emptyDesc, false, nil
}
if compressorFunc == nil {
switch mediaType {
case ocispecs.MediaTypeImageLayer:
case ocispecs.MediaTypeImageLayerGzip:
compressorFunc = func(dest io.Writer, requiredMediaType string) (io.WriteCloser, error) {
return ctdcompression.CompressStream(dest, ctdcompression.Gzip)
}
case ocispecs.MediaTypeImageLayer + "+zstd":
compressorFunc = zstdWriter
default:
return emptyDesc, false, errors.Errorf("unsupported diff media type: %v", mediaType)
}
}
cw, err := sr.cm.ContentStore.Writer(ctx,
content.WithRef(ref),
content.WithDescriptor(ocispecs.Descriptor{

28
cache/converter.go vendored
View File

@ -1,7 +1,6 @@
package cache
import (
"compress/gzip"
"context"
"fmt"
"io"
@ -12,7 +11,6 @@ import (
"github.com/containerd/containerd/images"
"github.com/containerd/containerd/images/converter"
"github.com/containerd/containerd/labels"
"github.com/klauspost/compress/zstd"
"github.com/moby/buildkit/identity"
"github.com/moby/buildkit/util/compression"
digest "github.com/opencontainers/go-digest"
@ -57,15 +55,15 @@ func needsConversion(ctx context.Context, cs content.Store, desc ocispecs.Descri
// getConverter returns converter function according to the specified compression type.
// If no conversion is needed, this returns nil without error.
func getConverter(ctx context.Context, cs content.Store, desc ocispecs.Descriptor, compressionType compression.Type) (converter.ConvertFunc, error) {
if needs, err := needsConversion(ctx, cs, desc, compressionType); err != nil {
func getConverter(ctx context.Context, cs content.Store, desc ocispecs.Descriptor, comp compression.Config) (converter.ConvertFunc, error) {
if needs, err := needsConversion(ctx, cs, desc, comp.Type); err != nil {
return nil, errors.Wrapf(err, "failed to determine conversion needs")
} else if !needs {
// No conversion. No need to return an error here.
return nil, nil
}
c := conversion{target: compressionType}
c := conversion{target: comp}
from := compression.FromMediaType(desc.MediaType)
switch from {
@ -96,31 +94,27 @@ func getConverter(ctx context.Context, cs content.Store, desc ocispecs.Descripto
return nil, errors.Errorf("unsupported source compression type %q from mediatype %q", from, desc.MediaType)
}
switch compressionType {
switch comp.Type {
case compression.Uncompressed:
case compression.Gzip:
c.compress = func(w io.Writer) (io.WriteCloser, error) {
return gzip.NewWriter(w), nil
}
c.compress = gzipWriter(comp)
case compression.Zstd:
c.compress = func(w io.Writer) (io.WriteCloser, error) {
return zstd.NewWriter(w)
}
c.compress = zstdWriter(comp)
case compression.EStargz:
compressorFunc, finalize := compressEStargz()
compressorFunc, finalize := compressEStargz(comp)
c.compress = func(w io.Writer) (io.WriteCloser, error) {
return compressorFunc(w, ocispecs.MediaTypeImageLayerGzip)
}
c.finalize = finalize
default:
return nil, errors.Errorf("unknown target compression type during conversion: %q", compressionType)
return nil, errors.Errorf("unknown target compression type during conversion: %q", comp.Type)
}
return (&c).convert, nil
}
type conversion struct {
target compression.Type
target compression.Config
decompress func(context.Context, ocispecs.Descriptor) (io.ReadCloser, error)
compress func(w io.Writer) (io.WriteCloser, error)
finalize func(context.Context, content.Store) (map[string]string, error)
@ -129,7 +123,7 @@ type conversion struct {
func (c *conversion) convert(ctx context.Context, cs content.Store, desc ocispecs.Descriptor) (*ocispecs.Descriptor, error) {
// prepare the source and destination
labelz := make(map[string]string)
ref := fmt.Sprintf("convert-from-%s-to-%s-%s", desc.Digest, c.target.String(), identity.NewID())
ref := fmt.Sprintf("convert-from-%s-to-%s-%s", desc.Digest, c.target.Type.String(), identity.NewID())
w, err := cs.Writer(ctx, content.WithRef(ref))
if err != nil {
return nil, err
@ -188,7 +182,7 @@ func (c *conversion) convert(ctx context.Context, cs content.Store, desc ocispec
}
newDesc := desc
newDesc.MediaType = c.target.DefaultMediaType()
newDesc.MediaType = c.target.Type.DefaultMediaType()
newDesc.Digest = info.Digest
newDesc.Size = info.Size
newDesc.Annotations = map[string]string{labels.LabelUncompressed: diffID.Digest().String()}

9
cache/estargz.go vendored
View File

@ -2,6 +2,7 @@ package cache
import (
"archive/tar"
"compress/gzip"
"context"
"fmt"
"io"
@ -21,7 +22,7 @@ var eStargzAnnotations = []string{estargz.TOCJSONDigestAnnotation, estargz.Store
// compressEStargz writes the passed blobs stream as an eStargz-compressed blob.
// finalize function finalizes the written blob metadata and returns all eStargz annotations.
func compressEStargz() (compressorFunc compressor, finalize func(context.Context, content.Store) (map[string]string, error)) {
func compressEStargz(comp compression.Config) (compressorFunc compressor, finalize func(context.Context, content.Store) (map[string]string, error)) {
var cInfo *compressionInfo
var writeErr error
var mu sync.Mutex
@ -43,7 +44,11 @@ func compressEStargz() (compressorFunc compressor, finalize func(context.Context
blobInfoW, bInfoCh := calculateBlobInfo()
defer blobInfoW.Close()
w := estargz.NewWriter(io.MultiWriter(dest, blobInfoW))
level := gzip.BestCompression
if comp.Level != nil {
level = *comp.Level
}
w := estargz.NewWriterLevel(io.MultiWriter(dest, blobInfoW), level)
// Using lossless API here to make sure that decompressEStargz provides the exact
// same tar as the original.

17
cache/manager_test.go vendored
View File

@ -386,7 +386,7 @@ func TestMergeBlobchainID(t *testing.T) {
mergeRef, err := cm.Merge(ctx, mergeInputs)
require.NoError(t, err)
_, err = mergeRef.GetRemotes(ctx, true, solver.CompressionOpt{Type: compression.Default}, false, nil)
_, err = mergeRef.GetRemotes(ctx, true, compression.New(compression.Default), false, nil)
require.NoError(t, err)
// verify the merge blobchain ID isn't just set to one of the inputs (regression test)
@ -1159,13 +1159,15 @@ func TestConversion(t *testing.T) {
eg, egctx := errgroup.WithContext(ctx)
for _, orgDesc := range []ocispecs.Descriptor{orgDescGo, orgDescSys} {
for _, i := range allCompression {
compSrc := compression.New(i)
for _, j := range allCompression {
i, j, orgDesc := i, j, orgDesc
compDest := compression.New(j)
eg.Go(func() error {
testName := fmt.Sprintf("%s=>%s", i, j)
// Prepare the source compression type
convertFunc, err := getConverter(egctx, store, orgDesc, i)
convertFunc, err := getConverter(egctx, store, orgDesc, compSrc)
require.NoError(t, err, testName)
srcDesc := &orgDesc
if convertFunc != nil {
@ -1174,7 +1176,7 @@ func TestConversion(t *testing.T) {
}
// Convert the blob
convertFunc, err = getConverter(egctx, store, *srcDesc, j)
convertFunc, err = getConverter(egctx, store, *srcDesc, compDest)
require.NoError(t, err, testName)
resDesc := srcDesc
if convertFunc != nil {
@ -1183,7 +1185,7 @@ func TestConversion(t *testing.T) {
}
// Check the uncompressed digest is the same as the original
convertFunc, err = getConverter(egctx, store, *resDesc, compression.Uncompressed)
convertFunc, err = getConverter(egctx, store, *resDesc, compression.New(compression.Uncompressed))
require.NoError(t, err, testName)
recreatedDesc := resDesc
if convertFunc != nil {
@ -1338,10 +1340,7 @@ func TestGetRemotes(t *testing.T) {
ir := ir.(*immutableRef)
for _, compressionType := range []compression.Type{compression.Uncompressed, compression.Gzip, compression.EStargz, compression.Zstd} {
compressionType := compressionType
compressionopt := solver.CompressionOpt{
Type: compressionType,
Force: true,
}
compressionopt := compression.New(compressionType).SetForce(true)
eg.Go(func() error {
remotes, err := ir.GetRemotes(egctx, true, compressionopt, false, nil)
require.NoError(t, err)
@ -1430,7 +1429,7 @@ func TestGetRemotes(t *testing.T) {
require.True(t, ok, ir.ID())
for _, compressionType := range []compression.Type{compression.Uncompressed, compression.Gzip, compression.EStargz, compression.Zstd} {
compressionType := compressionType
compressionopt := solver.CompressionOpt{Type: compressionType}
compressionopt := compression.New(compressionType)
eg.Go(func() error {
remotes, err := ir.GetRemotes(egctx, false, compressionopt, true, nil)
require.NoError(t, err)

2
cache/refs.go vendored
View File

@ -46,7 +46,7 @@ type ImmutableRef interface {
Finalize(context.Context) error
Extract(ctx context.Context, s session.Group) error // +progress
GetRemotes(ctx context.Context, createIfNeeded bool, compressionopt solver.CompressionOpt, all bool, s session.Group) ([]*solver.Remote, error)
GetRemotes(ctx context.Context, createIfNeeded bool, compressionopt compression.Config, all bool, s session.Group) ([]*solver.Remote, error)
LayerChain() RefList
}

17
cache/remote.go vendored
View File

@ -31,7 +31,7 @@ type Unlazier interface {
// layers. If all is true, all available chains that has the specified compression type of topmost blob are
// appended to the result.
// Note: Use WorkerRef.GetRemotes instead as moby integration requires custom GetRemotes implementation.
func (sr *immutableRef) GetRemotes(ctx context.Context, createIfNeeded bool, compressionopt solver.CompressionOpt, all bool, s session.Group) ([]*solver.Remote, error) {
func (sr *immutableRef) GetRemotes(ctx context.Context, createIfNeeded bool, compressionopt compression.Config, all bool, s session.Group) ([]*solver.Remote, error) {
ctx, done, err := leaseutil.WithLease(ctx, sr.cm.LeaseManager, leaseutil.MakeTemporary)
if err != nil {
return nil, err
@ -136,11 +136,8 @@ func getAvailableBlobs(ctx context.Context, cs content.Store, chain *solver.Remo
return res, nil
}
func (sr *immutableRef) getRemote(ctx context.Context, createIfNeeded bool, compressionopt solver.CompressionOpt, s session.Group) (*solver.Remote, error) {
compressionType := compressionopt.Type
forceCompression := compressionopt.Force
err := sr.computeBlobChain(ctx, createIfNeeded, compressionType, forceCompression, s)
func (sr *immutableRef) getRemote(ctx context.Context, createIfNeeded bool, comp compression.Config, s session.Group) (*solver.Remote, error) {
err := sr.computeBlobChain(ctx, createIfNeeded, comp, s)
if err != nil {
return nil, err
}
@ -213,15 +210,15 @@ func (sr *immutableRef) getRemote(ctx context.Context, createIfNeeded bool, comp
}
}
if forceCompression {
if needs, err := needsConversion(ctx, sr.cm.ContentStore, desc, compressionType); err != nil {
if comp.Force {
if needs, err := needsConversion(ctx, sr.cm.ContentStore, desc, comp.Type); err != nil {
return nil, err
} else if needs {
// ensure the compression type.
// compressed blob must be created and stored in the content store.
blobDesc, err := ref.getCompressionBlob(ctx, compressionType)
blobDesc, err := ref.getCompressionBlob(ctx, comp.Type)
if err != nil {
return nil, errors.Wrapf(err, "compression blob for %q not found", compressionType)
return nil, errors.Wrapf(err, "compression blob for %q not found", comp.Type)
}
newDesc := desc
newDesc.MediaType = blobDesc.MediaType

View File

@ -7,6 +7,7 @@ import (
"github.com/moby/buildkit/identity"
"github.com/moby/buildkit/session"
"github.com/moby/buildkit/solver"
"github.com/moby/buildkit/util/compression"
"github.com/moby/buildkit/worker"
digest "github.com/opencontainers/go-digest"
"github.com/pkg/errors"
@ -267,7 +268,7 @@ func (cs *cacheResultStorage) Load(ctx context.Context, res solver.CacheResult)
return worker.NewWorkerRefResult(ref, cs.w), nil
}
func (cs *cacheResultStorage) LoadRemotes(ctx context.Context, res solver.CacheResult, compressionopts *solver.CompressionOpt, _ session.Group) ([]*solver.Remote, error) {
func (cs *cacheResultStorage) LoadRemotes(ctx context.Context, res solver.CacheResult, compressionopts *compression.Config, _ session.Group) ([]*solver.Remote, error) {
if r := cs.byResultID(res.ID); r != nil && r.result != nil {
if compressionopts == nil {
return []*solver.Remote{r.result}, nil

View File

@ -19,7 +19,6 @@ import (
"github.com/moby/buildkit/exporter/containerimage/exptypes"
"github.com/moby/buildkit/session"
"github.com/moby/buildkit/snapshot"
"github.com/moby/buildkit/solver"
"github.com/moby/buildkit/util/buildinfo"
"github.com/moby/buildkit/util/compression"
"github.com/moby/buildkit/util/contentutil"
@ -42,6 +41,7 @@ const (
keyNameCanonical = "name-canonical"
keyLayerCompression = "compression"
keyForceCompression = "force-compression"
keyCompressionLevel = "compression-level"
keyBuildInfo = "buildinfo"
ociTypes = "oci-mediatypes"
)
@ -162,9 +162,16 @@ func (e *imageExporter) Resolve(ctx context.Context, opt map[string]string) (exp
}
b, err := strconv.ParseBool(v)
if err != nil {
return nil, errors.Wrapf(err, "non-bool value specified for %s", k)
return nil, errors.Wrapf(err, "non-bool value %s specified for %s", v, k)
}
i.forceCompression = b
case keyCompressionLevel:
ii, err := strconv.ParseInt(v, 10, 64)
if err != nil {
return nil, errors.Wrapf(err, "non-integer value %s specified for %s", v, k)
}
v := int(ii)
i.compressionLevel = &v
case keyBuildInfo:
if v == "" {
continue
@ -200,6 +207,7 @@ type imageExporterInstance struct {
danglingPrefix string
layerCompression compression.Type
forceCompression bool
compressionLevel *int
buildInfoMode buildinfo.ExportMode
meta map[string][]byte
}
@ -210,13 +218,18 @@ func (e *imageExporterInstance) Name() string {
func (e *imageExporterInstance) Config() exporter.Config {
return exporter.Config{
Compression: solver.CompressionOpt{
Type: e.layerCompression,
Force: e.forceCompression,
},
Compression: e.compression(),
}
}
func (e *imageExporterInstance) compression() compression.Config {
c := compression.New(e.layerCompression).SetForce(e.forceCompression)
if e.compressionLevel != nil {
c = c.SetLevel(*e.compressionLevel)
}
return c
}
func (e *imageExporterInstance) Export(ctx context.Context, src exporter.Source, sessionID string) (map[string]string, error) {
if src.Metadata == nil {
src.Metadata = make(map[string][]byte)
@ -231,7 +244,7 @@ func (e *imageExporterInstance) Export(ctx context.Context, src exporter.Source,
}
defer done(context.TODO())
desc, err := e.opt.ImageWriter.Commit(ctx, src, e.ociTypes, e.layerCompression, e.buildInfoMode, e.forceCompression, sessionID)
desc, err := e.opt.ImageWriter.Commit(ctx, src, e.ociTypes, e.compression(), e.buildInfoMode, sessionID)
if err != nil {
return nil, err
}
@ -297,12 +310,8 @@ func (e *imageExporterInstance) Export(ctx context.Context, src exporter.Source,
if e.push {
annotations := map[digest.Digest]map[string]string{}
mprovider := contentutil.NewMultiProvider(e.opt.ImageWriter.ContentStore())
compressionopt := solver.CompressionOpt{
Type: e.layerCompression,
Force: e.forceCompression,
}
if src.Ref != nil {
remotes, err := src.Ref.GetRemotes(ctx, false, compressionopt, false, session.NewGroup(sessionID))
remotes, err := src.Ref.GetRemotes(ctx, false, e.compression(), false, session.NewGroup(sessionID))
if err != nil {
return nil, err
}
@ -314,7 +323,7 @@ func (e *imageExporterInstance) Export(ctx context.Context, src exporter.Source,
}
if len(src.Refs) > 0 {
for _, r := range src.Refs {
remotes, err := r.GetRemotes(ctx, false, compressionopt, false, session.NewGroup(sessionID))
remotes, err := r.GetRemotes(ctx, false, e.compression(), false, session.NewGroup(sessionID))
if err != nil {
return nil, err
}
@ -368,11 +377,7 @@ func (e *imageExporterInstance) unpackImage(ctx context.Context, img images.Imag
}
}
compressionopt := solver.CompressionOpt{
Type: e.layerCompression,
Force: e.forceCompression,
}
remotes, err := topLayerRef.GetRemotes(ctx, true, compressionopt, false, s)
remotes, err := topLayerRef.GetRemotes(ctx, true, e.compression(), false, s)
if err != nil {
return err
}

View File

@ -48,7 +48,7 @@ type ImageWriter struct {
opt WriterOpt
}
func (ic *ImageWriter) Commit(ctx context.Context, inp exporter.Source, oci bool, compressionType compression.Type, buildInfoMode buildinfo.ExportMode, forceCompression bool, sessionID string) (*ocispecs.Descriptor, error) {
func (ic *ImageWriter) Commit(ctx context.Context, inp exporter.Source, oci bool, comp compression.Config, buildInfoMode buildinfo.ExportMode, sessionID string) (*ocispecs.Descriptor, error) {
platformsBytes, ok := inp.Metadata[exptypes.ExporterPlatformsKey]
if len(inp.Refs) > 0 && !ok {
@ -56,7 +56,7 @@ func (ic *ImageWriter) Commit(ctx context.Context, inp exporter.Source, oci bool
}
if len(inp.Refs) == 0 {
remotes, err := ic.exportLayers(ctx, compressionType, forceCompression, session.NewGroup(sessionID), inp.Ref)
remotes, err := ic.exportLayers(ctx, comp, session.NewGroup(sessionID), inp.Ref)
if err != nil {
return nil, err
}
@ -94,7 +94,7 @@ func (ic *ImageWriter) Commit(ctx context.Context, inp exporter.Source, oci bool
refs = append(refs, r)
}
remotes, err := ic.exportLayers(ctx, compressionType, forceCompression, session.NewGroup(sessionID), refs...)
remotes, err := ic.exportLayers(ctx, comp, session.NewGroup(sessionID), refs...)
if err != nil {
return nil, err
}
@ -165,20 +165,20 @@ func (ic *ImageWriter) Commit(ctx context.Context, inp exporter.Source, oci bool
return &idxDesc, nil
}
func (ic *ImageWriter) exportLayers(ctx context.Context, compressionType compression.Type, forceCompression bool, s session.Group, refs ...cache.ImmutableRef) ([]solver.Remote, error) {
span, ctx := tracing.StartSpan(ctx, "export layers", trace.WithAttributes(
attribute.String("exportLayers.compressionType", compressionType.String()),
attribute.Bool("exportLayers.forceCompression", forceCompression),
))
func (ic *ImageWriter) exportLayers(ctx context.Context, comp compression.Config, s session.Group, refs ...cache.ImmutableRef) ([]solver.Remote, error) {
attr := []attribute.KeyValue{
attribute.String("exportLayers.compressionType", comp.Type.String()),
attribute.Bool("exportLayers.forceCompression", comp.Force),
}
if comp.Level != nil {
attr = append(attr, attribute.Int("exportLayers.compressionLevel", *comp.Level))
}
span, ctx := tracing.StartSpan(ctx, "export layers", trace.WithAttributes(attr...))
eg, ctx := errgroup.WithContext(ctx)
layersDone := oneOffProgress(ctx, "exporting layers")
out := make([]solver.Remote, len(refs))
compressionopt := solver.CompressionOpt{
Type: compressionType,
Force: forceCompression,
}
for i, ref := range refs {
func(i int, ref cache.ImmutableRef) {
@ -186,7 +186,7 @@ func (ic *ImageWriter) exportLayers(ctx context.Context, compressionType compres
return
}
eg.Go(func() error {
remotes, err := ref.GetRemotes(ctx, true, compressionopt, false, s)
remotes, err := ref.GetRemotes(ctx, true, comp, false, s)
if err != nil {
return err
}

View File

@ -4,7 +4,7 @@ import (
"context"
"github.com/moby/buildkit/cache"
"github.com/moby/buildkit/solver"
"github.com/moby/buildkit/util/compression"
)
type Exporter interface {
@ -24,5 +24,5 @@ type Source struct {
}
type Config struct {
Compression solver.CompressionOpt
Compression compression.Config
}

View File

@ -15,7 +15,6 @@ import (
"github.com/moby/buildkit/exporter/containerimage/exptypes"
"github.com/moby/buildkit/session"
"github.com/moby/buildkit/session/filesync"
"github.com/moby/buildkit/solver"
"github.com/moby/buildkit/util/buildinfo"
"github.com/moby/buildkit/util/compression"
"github.com/moby/buildkit/util/contentutil"
@ -37,6 +36,7 @@ const (
VariantDocker = "docker"
ociTypes = "oci-mediatypes"
keyForceCompression = "force-compression"
keyCompressionLevel = "compression-level"
keyBuildInfo = "buildinfo"
)
@ -89,9 +89,16 @@ func (e *imageExporter) Resolve(ctx context.Context, opt map[string]string) (exp
}
b, err := strconv.ParseBool(v)
if err != nil {
return nil, errors.Wrapf(err, "non-bool value specified for %s", k)
return nil, errors.Wrapf(err, "non-bool value %v specified for %s", v, k)
}
i.forceCompression = b
case keyCompressionLevel:
ii, err := strconv.ParseInt(v, 10, 64)
if err != nil {
return nil, errors.Wrapf(err, "non-int value %s specified for %s", v, k)
}
v := int(ii)
i.compressionLevel = &v
case ociTypes:
ot = new(bool)
if v == "" {
@ -138,6 +145,7 @@ type imageExporterInstance struct {
ociTypes bool
layerCompression compression.Type
forceCompression bool
compressionLevel *int
buildInfoMode buildinfo.ExportMode
}
@ -147,13 +155,18 @@ func (e *imageExporterInstance) Name() string {
func (e *imageExporterInstance) Config() exporter.Config {
return exporter.Config{
Compression: solver.CompressionOpt{
Type: e.layerCompression,
Force: e.forceCompression,
},
Compression: e.compression(),
}
}
func (e *imageExporterInstance) compression() compression.Config {
c := compression.New(e.layerCompression).SetForce(e.forceCompression)
if e.compressionLevel != nil {
c = c.SetLevel(*e.compressionLevel)
}
return c
}
func (e *imageExporterInstance) Export(ctx context.Context, src exporter.Source, sessionID string) (map[string]string, error) {
if e.opt.Variant == VariantDocker && len(src.Refs) > 0 {
return nil, errors.Errorf("docker exporter does not currently support exporting manifest lists")
@ -172,7 +185,7 @@ func (e *imageExporterInstance) Export(ctx context.Context, src exporter.Source,
}
defer done(context.TODO())
desc, err := e.opt.ImageWriter.Commit(ctx, src, e.ociTypes, e.layerCompression, e.buildInfoMode, e.forceCompression, sessionID)
desc, err := e.opt.ImageWriter.Commit(ctx, src, e.ociTypes, e.compression(), e.buildInfoMode, sessionID)
if err != nil {
return nil, err
}
@ -237,12 +250,8 @@ func (e *imageExporterInstance) Export(ctx context.Context, src exporter.Source,
}
mprovider := contentutil.NewMultiProvider(e.opt.ImageWriter.ContentStore())
compressionopt := solver.CompressionOpt{
Type: e.layerCompression,
Force: e.forceCompression,
}
if src.Ref != nil {
remotes, err := src.Ref.GetRemotes(ctx, false, compressionopt, false, session.NewGroup(sessionID))
remotes, err := src.Ref.GetRemotes(ctx, false, e.compression(), false, session.NewGroup(sessionID))
if err != nil {
return nil, err
}
@ -260,7 +269,7 @@ func (e *imageExporterInstance) Export(ctx context.Context, src exporter.Source,
}
if len(src.Refs) > 0 {
for _, r := range src.Refs {
remotes, err := r.GetRemotes(ctx, false, compressionopt, false, session.NewGroup(sessionID))
remotes, err := r.GetRemotes(ctx, false, e.compression(), false, session.NewGroup(sessionID))
if err != nil {
return nil, err
}

View File

@ -5,6 +5,7 @@ import (
"time"
"github.com/moby/buildkit/session"
"github.com/moby/buildkit/util/compression"
digest "github.com/opencontainers/go-digest"
"github.com/pkg/errors"
)
@ -47,6 +48,6 @@ type CacheInfoLink struct {
type CacheResultStorage interface {
Save(Result, time.Time) (CacheResult, error)
Load(ctx context.Context, res CacheResult) (Result, error)
LoadRemotes(ctx context.Context, res CacheResult, compression *CompressionOpt, s session.Group) ([]*Remote, error)
LoadRemotes(ctx context.Context, res CacheResult, compression *compression.Config, s session.Group) ([]*Remote, error)
Exists(id string) bool
}

View File

@ -8,6 +8,7 @@ import (
"github.com/moby/buildkit/cache/contenthash"
"github.com/moby/buildkit/session"
"github.com/moby/buildkit/solver"
"github.com/moby/buildkit/util/compression"
"github.com/moby/buildkit/worker"
digest "github.com/opencontainers/go-digest"
"github.com/pkg/errors"
@ -81,7 +82,7 @@ func NewContentHashFunc(selectors []Selector) solver.ResultBasedCacheFunc {
}
}
func workerRefResolver(compressionopt solver.CompressionOpt, all bool, g session.Group) func(ctx context.Context, res solver.Result) ([]*solver.Remote, error) {
func workerRefResolver(compressionopt compression.Config, all bool, g session.Group) func(ctx context.Context, res solver.Result) ([]*solver.Remote, error) {
return func(ctx context.Context, res solver.Result) ([]*solver.Remote, error) {
ref, ok := res.Sys().(*worker.WorkerRef)
if !ok {

View File

@ -254,9 +254,7 @@ func (s *Solver) Solve(ctx context.Context, id string, sessionID string, req fro
// all keys have same export chain so exporting others is not needed
_, err = r.CacheKeys()[0].Exporter.ExportTo(ctx, e, solver.CacheExportOpt{
ResolveRemotes: workerRefResolver(solver.CompressionOpt{
Type: compression.Default, // TODO: make configurable
}, false, g),
ResolveRemotes: workerRefResolver(compression.New(compression.Default), false, g), // TODO: make configurable
Mode: exp.CacheExportMode,
Session: g,
})
@ -295,7 +293,7 @@ func (s *Solver) Solve(ctx context.Context, id string, sessionID string, req fro
}, nil
}
func inlineCache(ctx context.Context, e remotecache.Exporter, res solver.CachedResult, compressionopt solver.CompressionOpt, g session.Group) ([]byte, error) {
func inlineCache(ctx context.Context, e remotecache.Exporter, res solver.CachedResult, compressionopt compression.Config, g session.Group) ([]byte, error) {
if efl, ok := e.(interface {
ExportForLayers(context.Context, []digest.Digest) ([]byte, error)
}); ok {

View File

@ -6,6 +6,7 @@ import (
"time"
"github.com/moby/buildkit/session"
"github.com/moby/buildkit/util/compression"
"github.com/pkg/errors"
)
@ -298,7 +299,7 @@ func (s *inMemoryResultStore) Load(ctx context.Context, res CacheResult) (Result
return v.(Result), nil
}
func (s *inMemoryResultStore) LoadRemotes(_ context.Context, _ CacheResult, _ *CompressionOpt, _ session.Group) ([]*Remote, error) {
func (s *inMemoryResultStore) LoadRemotes(_ context.Context, _ CacheResult, _ *compression.Config, _ session.Group) ([]*Remote, error) {
return nil, nil
}

View File

@ -102,13 +102,7 @@ type CacheExportOpt struct {
Session session.Group
// CompressionOpt is an option to specify the compression of the object to load.
// If specified, all objects that meet the option will be cached.
CompressionOpt *CompressionOpt
}
// CompressionOpt is compression information of a blob
type CompressionOpt struct {
Type compression.Type
Force bool
CompressionOpt *compression.Config
}
// CacheExporter can export the artifacts of the build chain

View File

@ -34,6 +34,28 @@ const (
UnknownCompression Type = -1
)
type Config struct {
Type Type
Force bool
Level *int
}
func New(t Type) Config {
return Config{
Type: t,
}
}
func (c Config) SetForce(v bool) Config {
c.Force = v
return c
}
func (c Config) SetLevel(l int) Config {
c.Level = &l
return c
}
const (
mediaTypeDockerSchema2LayerZstd = images.MediaTypeDockerSchema2Layer + ".zstd"
mediaTypeImageLayerZstd = ocispecs.MediaTypeImageLayer + "+zstd" // unreleased image-spec#790

View File

@ -66,7 +66,7 @@ func (s *cacheResultStorage) load(ctx context.Context, id string, hidden bool) (
return NewWorkerRefResult(ref, w), nil
}
func (s *cacheResultStorage) LoadRemotes(ctx context.Context, res solver.CacheResult, compressionopt *solver.CompressionOpt, g session.Group) ([]*solver.Remote, error) {
func (s *cacheResultStorage) LoadRemotes(ctx context.Context, res solver.CacheResult, compressionopt *compression.Config, g session.Group) ([]*solver.Remote, error) {
w, refID, err := s.getWorkerRef(res.ID)
if err != nil {
return nil, err
@ -81,7 +81,8 @@ func (s *cacheResultStorage) LoadRemotes(ctx context.Context, res solver.CacheRe
wref := WorkerRef{ref, w}
all := true // load as many compression blobs as possible
if compressionopt == nil {
compressionopt = &solver.CompressionOpt{Type: compression.Default}
comp := compression.New(compression.Default)
compressionopt = &comp
all = false
}
remotes, err := wref.GetRemotes(ctx, false, *compressionopt, all, g)

View File

@ -6,6 +6,7 @@ import (
"github.com/moby/buildkit/cache"
"github.com/moby/buildkit/session"
"github.com/moby/buildkit/solver"
"github.com/moby/buildkit/util/compression"
)
func NewWorkerRefResult(ref cache.ImmutableRef, worker Worker) solver.Result {
@ -28,9 +29,9 @@ func (wr *WorkerRef) ID() string {
// GetRemotes method abstracts ImmutableRef's GetRemotes to allow a Worker to override.
// This is needed for moby integration.
// Use this method instead of calling ImmutableRef.GetRemotes() directly.
func (wr *WorkerRef) GetRemotes(ctx context.Context, createIfNeeded bool, compressionopt solver.CompressionOpt, all bool, g session.Group) ([]*solver.Remote, error) {
func (wr *WorkerRef) GetRemotes(ctx context.Context, createIfNeeded bool, compressionopt compression.Config, all bool, g session.Group) ([]*solver.Remote, error) {
if w, ok := wr.Worker.(interface {
GetRemotes(context.Context, cache.ImmutableRef, bool, solver.CompressionOpt, bool, session.Group) ([]*solver.Remote, error)
GetRemotes(context.Context, cache.ImmutableRef, bool, compression.Config, bool, session.Group) ([]*solver.Remote, error)
}); ok {
return w.GetRemotes(ctx, wr.ImmutableRef, createIfNeeded, compressionopt, all, g)
}