1
0
mirror of https://github.com/moby/buildkit.git synced 2025-07-30 15:03:06 +03:00

exporter: support creating blobs with zstd compression

Signed-off-by: Tonis Tiigi <tonistiigi@gmail.com>
This commit is contained in:
Tonis Tiigi
2021-09-05 00:01:13 -07:00
parent 9b010e774d
commit 8b5c4d74ef
12 changed files with 315 additions and 219 deletions

View File

@ -230,7 +230,7 @@ Keys supported by image output:
* `unpack=true`: unpack image after creation (for use with containerd) * `unpack=true`: unpack image after creation (for use with containerd)
* `dangling-name-prefix=[value]`: name image with `prefix@<digest>` , used for anonymous images * `dangling-name-prefix=[value]`: name image with `prefix@<digest>` , used for anonymous images
* `name-canonical=true`: add additional canonical name `name@<digest>` * `name-canonical=true`: add additional canonical name `name@<digest>`
* `compression=[uncompressed,gzip,estargz]`: choose compression type for layers newly created and cached, gzip is default value. estargz should be used with `oci-mediatypes=true`. * `compression=[uncompressed,gzip,estargz,zstd]`: choose compression type for layers newly created and cached, gzip is default value. estargz should be used with `oci-mediatypes=true`.
* `force-compression=true`: forcefully apply `compression` option to all layers (including already existing layers). * `force-compression=true`: forcefully apply `compression` option to all layers (including already existing layers).
If credentials are required, `buildctl` will attempt to read Docker configuration file `$DOCKER_CONFIG/config.json`. If credentials are required, `buildctl` will attempt to read Docker configuration file `$DOCKER_CONFIG/config.json`.

8
cache/blobs.go vendored
View File

@ -11,6 +11,7 @@ import (
"github.com/containerd/containerd/diff" "github.com/containerd/containerd/diff"
"github.com/containerd/containerd/leases" "github.com/containerd/containerd/leases"
"github.com/containerd/containerd/mount" "github.com/containerd/containerd/mount"
"github.com/klauspost/compress/zstd"
"github.com/moby/buildkit/session" "github.com/moby/buildkit/session"
"github.com/moby/buildkit/util/compression" "github.com/moby/buildkit/util/compression"
"github.com/moby/buildkit/util/flightcontrol" "github.com/moby/buildkit/util/flightcontrol"
@ -79,6 +80,9 @@ func computeBlobChain(ctx context.Context, sr *immutableRef, createIfNeeded bool
case compression.EStargz: case compression.EStargz:
compressorFunc, finalize = writeEStargz() compressorFunc, finalize = writeEStargz()
mediaType = ocispecs.MediaTypeImageLayerGzip mediaType = ocispecs.MediaTypeImageLayerGzip
case compression.Zstd:
compressorFunc = zstdWriter
mediaType = ocispecs.MediaTypeImageLayer + "+zstd"
default: default:
return nil, errors.Errorf("unknown layer compression type: %q", compressionType) return nil, errors.Errorf("unknown layer compression type: %q", compressionType)
} }
@ -350,3 +354,7 @@ func ensureCompression(ctx context.Context, ref *immutableRef, compressionType c
}) })
return err return err
} }
func zstdWriter(dest io.Writer, requiredMediaType string) (io.WriteCloser, error) {
return zstd.NewWriter(dest)
}

View File

@ -1,3 +1,4 @@
//go:build linux
// +build linux // +build linux
package cache package cache
@ -34,7 +35,6 @@ var emptyDesc = ocispecs.Descriptor{}
// be computed (e.g. because the mounts aren't overlayfs), it returns // be computed (e.g. because the mounts aren't overlayfs), it returns
// an error. // an error.
func (sr *immutableRef) tryComputeOverlayBlob(ctx context.Context, lower, upper []mount.Mount, mediaType string, ref string, compressorFunc compressor) (_ ocispecs.Descriptor, ok bool, err error) { func (sr *immutableRef) tryComputeOverlayBlob(ctx context.Context, lower, upper []mount.Mount, mediaType string, ref string, compressorFunc compressor) (_ ocispecs.Descriptor, ok bool, err error) {
// Get upperdir location if mounts are overlayfs that can be processed by this differ. // Get upperdir location if mounts are overlayfs that can be processed by this differ.
upperdir, err := getOverlayUpperdir(lower, upper) upperdir, err := getOverlayUpperdir(lower, upper)
if err != nil { if err != nil {
@ -50,6 +50,8 @@ func (sr *immutableRef) tryComputeOverlayBlob(ctx context.Context, lower, upper
compressorFunc = func(dest io.Writer, requiredMediaType string) (io.WriteCloser, error) { compressorFunc = func(dest io.Writer, requiredMediaType string) (io.WriteCloser, error) {
return ctdcompression.CompressStream(dest, ctdcompression.Gzip) return ctdcompression.CompressStream(dest, ctdcompression.Gzip)
} }
case ocispecs.MediaTypeImageLayer + "+zstd":
compressorFunc = zstdWriter
default: default:
return emptyDesc, false, errors.Errorf("unsupported diff media type: %v", mediaType) return emptyDesc, false, errors.Errorf("unsupported diff media type: %v", mediaType)
} }

199
cache/converter.go vendored
View File

@ -6,12 +6,13 @@ import (
"fmt" "fmt"
"io" "io"
cdcompression "github.com/containerd/containerd/archive/compression"
"github.com/containerd/containerd/content" "github.com/containerd/containerd/content"
"github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/errdefs"
"github.com/containerd/containerd/images" "github.com/containerd/containerd/images"
"github.com/containerd/containerd/images/converter" "github.com/containerd/containerd/images/converter"
"github.com/containerd/containerd/images/converter/uncompress"
"github.com/containerd/containerd/labels" "github.com/containerd/containerd/labels"
"github.com/klauspost/compress/zstd"
"github.com/moby/buildkit/util/compression" "github.com/moby/buildkit/util/compression"
digest "github.com/opencontainers/go-digest" digest "github.com/opencontainers/go-digest"
ocispecs "github.com/opencontainers/image-spec/specs-go/v1" ocispecs "github.com/opencontainers/image-spec/specs-go/v1"
@ -23,11 +24,15 @@ import (
func needsConversion(mediaType string, compressionType compression.Type) (bool, error) { func needsConversion(mediaType string, compressionType compression.Type) (bool, error) {
switch compressionType { switch compressionType {
case compression.Uncompressed: case compression.Uncompressed:
if !images.IsLayerType(mediaType) || uncompress.IsUncompressedType(mediaType) { if !images.IsLayerType(mediaType) || compression.FromMediaType(mediaType) == compression.Uncompressed {
return false, nil return false, nil
} }
case compression.Gzip: case compression.Gzip:
if !images.IsLayerType(mediaType) || isGzipCompressedType(mediaType) { if !images.IsLayerType(mediaType) || compression.FromMediaType(mediaType) == compression.Gzip {
return false, nil
}
case compression.Zstd:
if !images.IsLayerType(mediaType) || compression.FromMediaType(mediaType) == compression.Zstd {
return false, nil return false, nil
} }
case compression.EStargz: case compression.EStargz:
@ -49,113 +54,129 @@ func getConverter(desc ocispecs.Descriptor, compressionType compression.Type) (c
// No conversion. No need to return an error here. // No conversion. No need to return an error here.
return nil, nil return nil, nil
} }
c := conversion{target: compressionType}
from := compression.FromMediaType(desc.MediaType)
switch from {
case compression.Uncompressed:
case compression.Gzip, compression.Zstd:
c.decompress = cdcompression.DecompressStream
default:
return nil, errors.Errorf("unsupported source compression type %q from mediatype %q", from, desc.MediaType)
}
switch compressionType { switch compressionType {
case compression.Uncompressed: case compression.Uncompressed:
return uncompress.LayerConvertFunc, nil
case compression.Gzip: case compression.Gzip:
convertFunc := func(w io.Writer) (io.WriteCloser, error) { return gzip.NewWriter(w), nil } c.compress = func(w io.Writer) (io.WriteCloser, error) {
return gzipLayerConvertFunc(compressionType, convertFunc, nil), nil return gzip.NewWriter(w), nil
}
case compression.Zstd:
c.compress = func(w io.Writer) (io.WriteCloser, error) {
return zstd.NewWriter(w)
}
case compression.EStargz: case compression.EStargz:
compressorFunc, finalize := writeEStargz() compressorFunc, finalize := writeEStargz()
convertFunc := func(w io.Writer) (io.WriteCloser, error) { return compressorFunc(w, ocispecs.MediaTypeImageLayerGzip) } c.compress = func(w io.Writer) (io.WriteCloser, error) {
return gzipLayerConvertFunc(compressionType, convertFunc, finalize), nil return compressorFunc(w, ocispecs.MediaTypeImageLayerGzip)
}
c.finalize = finalize
default: default:
return nil, fmt.Errorf("unknown compression type during conversion: %q", compressionType) return nil, errors.Errorf("unknown target compression type during conversion: %q", compressionType)
} }
return (&c).convert, nil
} }
func gzipLayerConvertFunc(compressionType compression.Type, convertFunc func(w io.Writer) (io.WriteCloser, error), finalize func(context.Context, content.Store) (map[string]string, error)) converter.ConvertFunc { type conversion struct {
return func(ctx context.Context, cs content.Store, desc ocispecs.Descriptor) (*ocispecs.Descriptor, error) { target compression.Type
// prepare the source and destination decompress func(io.Reader) (cdcompression.DecompressReadCloser, error)
info, err := cs.Info(ctx, desc.Digest) compress func(w io.Writer) (io.WriteCloser, error)
if err != nil { finalize func(context.Context, content.Store) (map[string]string, error)
return nil, err }
}
labelz := info.Labels func (c *conversion) convert(ctx context.Context, cs content.Store, desc ocispecs.Descriptor) (*ocispecs.Descriptor, error) {
if labelz == nil { // prepare the source and destination
labelz = make(map[string]string) info, err := cs.Info(ctx, desc.Digest)
} if err != nil {
ra, err := cs.ReaderAt(ctx, desc) return nil, err
if err != nil { }
return nil, err labelz := info.Labels
} if labelz == nil {
defer ra.Close() labelz = make(map[string]string)
ref := fmt.Sprintf("convert-from-%s-to-%s", desc.Digest, compressionType.String()) }
w, err := cs.Writer(ctx, content.WithRef(ref)) ra, err := cs.ReaderAt(ctx, desc)
if err != nil { if err != nil {
return nil, err return nil, err
} }
defer w.Close() defer ra.Close()
if err := w.Truncate(0); err != nil { // Old written data possibly remains ref := fmt.Sprintf("convert-from-%s-to-%s", desc.Digest, c.target.String())
return nil, err w, err := cs.Writer(ctx, content.WithRef(ref))
} if err != nil {
zw, err := convertFunc(w) return nil, err
}
defer w.Close()
if err := w.Truncate(0); err != nil { // Old written data possibly remains
return nil, err
}
var zw io.WriteCloser = w
var compress io.WriteCloser
if c.compress != nil {
zw, err = c.compress(zw)
if err != nil { if err != nil {
return nil, err return nil, err
} }
defer zw.Close() defer zw.Close()
compress = zw
}
// convert this layer // convert this layer
diffID := digest.Canonical.Digester() diffID := digest.Canonical.Digester()
if _, err := io.Copy(zw, io.TeeReader(io.NewSectionReader(ra, 0, ra.Size()), diffID.Hash())); err != nil { var rdr io.Reader = io.NewSectionReader(ra, 0, ra.Size())
return nil, err if c.decompress != nil {
} rc, err := c.decompress(rdr)
if err := zw.Close(); err != nil { // Flush the writer
return nil, err
}
labelz[labels.LabelUncompressed] = diffID.Digest().String() // update diffID label
if err = w.Commit(ctx, 0, "", content.WithLabels(labelz)); err != nil && !errdefs.IsAlreadyExists(err) {
return nil, err
}
if err := w.Close(); err != nil {
return nil, err
}
info, err = cs.Info(ctx, w.Digest())
if err != nil { if err != nil {
return nil, err return nil, err
} }
defer rc.Close()
newDesc := desc rdr = rc
newDesc.MediaType = convertMediaTypeToGzip(desc.MediaType) }
newDesc.Digest = info.Digest if _, err := io.Copy(zw, io.TeeReader(rdr, diffID.Hash())); err != nil {
newDesc.Size = info.Size return nil, err
if finalize != nil { }
a, err := finalize(ctx, cs) if compress != nil {
if err != nil { if err := compress.Close(); err != nil { // Flush the writer
return nil, errors.Wrapf(err, "failed finalize compression") return nil, err
}
for k, v := range a {
if newDesc.Annotations == nil {
newDesc.Annotations = make(map[string]string)
}
newDesc.Annotations[k] = v
}
} }
return &newDesc, nil
} }
} labelz[labels.LabelUncompressed] = diffID.Digest().String() // update diffID label
if err = w.Commit(ctx, 0, "", content.WithLabels(labelz)); err != nil && !errdefs.IsAlreadyExists(err) {
func isGzipCompressedType(mt string) bool { return nil, err
switch mt { }
case if err := w.Close(); err != nil {
images.MediaTypeDockerSchema2LayerGzip, return nil, err
images.MediaTypeDockerSchema2LayerForeignGzip, }
ocispecs.MediaTypeImageLayerGzip, info, err = cs.Info(ctx, w.Digest())
ocispecs.MediaTypeImageLayerNonDistributableGzip: if err != nil {
return true return nil, err
default:
return false
} }
}
func convertMediaTypeToGzip(mt string) string { newDesc := desc
if uncompress.IsUncompressedType(mt) { newDesc.MediaType = c.target.DefaultMediaType()
if images.IsDockerType(mt) { newDesc.Digest = info.Digest
mt += ".gzip" newDesc.Size = info.Size
} else { if c.finalize != nil {
mt += "+gzip" a, err := c.finalize(ctx, cs)
if err != nil {
return nil, errors.Wrapf(err, "failed finalize compression")
}
for k, v := range a {
if newDesc.Annotations == nil {
newDesc.Annotations = make(map[string]string)
}
newDesc.Annotations[k] = v
} }
return mt
} }
return mt return &newDesc, nil
} }

9
cache/estargz.go vendored
View File

@ -6,9 +6,10 @@ import (
"io" "io"
"sync" "sync"
"github.com/containerd/containerd/archive/compression" cdcompression "github.com/containerd/containerd/archive/compression"
"github.com/containerd/containerd/content" "github.com/containerd/containerd/content"
"github.com/containerd/stargz-snapshotter/estargz" "github.com/containerd/stargz-snapshotter/estargz"
"github.com/moby/buildkit/util/compression"
digest "github.com/opencontainers/go-digest" digest "github.com/opencontainers/go-digest"
"github.com/pkg/errors" "github.com/pkg/errors"
) )
@ -22,8 +23,8 @@ func writeEStargz() (compressorFunc compressor, finalize func(context.Context, c
var bInfo blobInfo var bInfo blobInfo
var mu sync.Mutex var mu sync.Mutex
return func(dest io.Writer, requiredMediaType string) (io.WriteCloser, error) { return func(dest io.Writer, requiredMediaType string) (io.WriteCloser, error) {
if !isGzipCompressedType(requiredMediaType) { if compression.FromMediaType(requiredMediaType) != compression.Gzip {
return nil, fmt.Errorf("unsupported media type for estargz compressor %q", requiredMediaType) return nil, errors.Errorf("unsupported media type for estargz compressor %q", requiredMediaType)
} }
done := make(chan struct{}) done := make(chan struct{})
pr, pw := io.Pipe() pr, pw := io.Pipe()
@ -127,7 +128,7 @@ func calculateBlob() (io.WriteCloser, chan blobInfo) {
c := new(counter) c := new(counter)
dgstr := digest.Canonical.Digester() dgstr := digest.Canonical.Digester()
diffID := digest.Canonical.Digester() diffID := digest.Canonical.Digester()
decompressR, err := compression.DecompressStream(io.TeeReader(pr, dgstr.Hash())) decompressR, err := cdcompression.DecompressStream(io.TeeReader(pr, dgstr.Hash()))
if err != nil { if err != nil {
pr.CloseWithError(err) pr.CloseWithError(err)
return return

View File

@ -131,6 +131,8 @@ func TestIntegration(t *testing.T) {
testFileOpInputSwap, testFileOpInputSwap,
testRelativeMountpoint, testRelativeMountpoint,
testLocalSourceDiffer, testLocalSourceDiffer,
testBuildExportZstd,
testPullZstdImage,
}, mirrors) }, mirrors)
integration.Run(t, []integration.Test{ integration.Run(t, []integration.Test{
@ -2165,6 +2167,156 @@ func testBuildExportWithUncompressed(t *testing.T, sb integration.Sandbox) {
require.Equal(t, []byte("gzip"), item.Data) require.Equal(t, []byte("gzip"), item.Data)
} }
func testBuildExportZstd(t *testing.T, sb integration.Sandbox) {
c, err := New(sb.Context(), sb.Address())
require.NoError(t, err)
defer c.Close()
busybox := llb.Image("busybox:latest")
cmd := `sh -e -c "echo -n zstd > data"`
st := llb.Scratch()
st = busybox.Run(llb.Shlex(cmd), llb.Dir("/wd")).AddMount("/wd", st)
def, err := st.Marshal(sb.Context())
require.NoError(t, err)
destDir, err := ioutil.TempDir("", "buildkit")
require.NoError(t, err)
defer os.RemoveAll(destDir)
out := filepath.Join(destDir, "out.tar")
outW, err := os.Create(out)
require.NoError(t, err)
_, err = c.Solve(sb.Context(), def, SolveOpt{
Exports: []ExportEntry{
{
Type: ExporterOCI,
Output: fixedWriteCloser(outW),
Attrs: map[string]string{
"compression": "zstd",
},
},
},
}, nil)
require.NoError(t, err)
dt, err := ioutil.ReadFile(out)
require.NoError(t, err)
m, err := testutil.ReadTarToMap(dt, false)
require.NoError(t, err)
var index ocispecs.Index
err = json.Unmarshal(m["index.json"].Data, &index)
require.NoError(t, err)
var mfst ocispecs.Manifest
err = json.Unmarshal(m["blobs/sha256/"+index.Manifests[0].Digest.Hex()].Data, &mfst)
require.NoError(t, err)
lastLayer := mfst.Layers[len(mfst.Layers)-1]
require.Equal(t, ocispecs.MediaTypeImageLayer+"+zstd", lastLayer.MediaType)
zstdLayerDigest := lastLayer.Digest.Hex()
require.Equal(t, m["blobs/sha256/"+zstdLayerDigest].Data[:4], []byte{0x28, 0xb5, 0x2f, 0xfd})
// repeat without oci mediatype
outW, err = os.Create(out)
require.NoError(t, err)
_, err = c.Solve(sb.Context(), def, SolveOpt{
Exports: []ExportEntry{
{
Type: ExporterOCI,
Output: fixedWriteCloser(outW),
Attrs: map[string]string{
"compression": "zstd",
"oci-mediatypes": "false",
},
},
},
}, nil)
require.NoError(t, err)
dt, err = ioutil.ReadFile(out)
require.NoError(t, err)
m, err = testutil.ReadTarToMap(dt, false)
require.NoError(t, err)
err = json.Unmarshal(m["index.json"].Data, &index)
require.NoError(t, err)
err = json.Unmarshal(m["blobs/sha256/"+index.Manifests[0].Digest.Hex()].Data, &mfst)
require.NoError(t, err)
lastLayer = mfst.Layers[len(mfst.Layers)-1]
require.Equal(t, images.MediaTypeDockerSchema2Layer+".zstd", lastLayer.MediaType)
require.Equal(t, lastLayer.Digest.Hex(), zstdLayerDigest)
}
func testPullZstdImage(t *testing.T, sb integration.Sandbox) {
c, err := New(sb.Context(), sb.Address())
require.NoError(t, err)
defer c.Close()
busybox := llb.Image("busybox:latest")
cmd := `sh -e -c "echo -n zstd > data"`
st := llb.Scratch()
st = busybox.Run(llb.Shlex(cmd), llb.Dir("/wd")).AddMount("/wd", st)
def, err := st.Marshal(sb.Context())
require.NoError(t, err)
registry, err := sb.NewRegistry()
if errors.Is(err, integration.ErrorRequirements) {
t.Skip(err.Error())
}
require.NoError(t, err)
target := registry + "/buildkit/build/exporter:zstd"
_, err = c.Solve(sb.Context(), def, SolveOpt{
Exports: []ExportEntry{
{
Type: ExporterImage,
Attrs: map[string]string{
"name": target,
"push": "true",
"compression": "zstd",
},
},
},
}, nil)
require.NoError(t, err)
st = llb.Scratch().File(llb.Copy(llb.Image(target), "/data", "/zdata"))
def, err = st.Marshal(sb.Context())
require.NoError(t, err)
destDir, err := ioutil.TempDir("", "buildkit")
require.NoError(t, err)
defer os.RemoveAll(destDir)
_, err = c.Solve(sb.Context(), def, SolveOpt{
Exports: []ExportEntry{
{
Type: ExporterLocal,
OutputDir: destDir,
},
},
}, nil)
require.NoError(t, err)
dt, err := ioutil.ReadFile(filepath.Join(destDir, "zdata"))
require.NoError(t, err)
require.Equal(t, dt, []byte("zstd"))
}
func testBuildPushAndValidate(t *testing.T, sb integration.Sandbox) { func testBuildPushAndValidate(t *testing.T, sb integration.Sandbox) {
skipDockerd(t, sb) skipDockerd(t, sb)
requiresLinux(t) requiresLinux(t)

View File

@ -144,6 +144,8 @@ func (e *imageExporter) Resolve(ctx context.Context, opt map[string]string) (exp
case "estargz": case "estargz":
i.layerCompression = compression.EStargz i.layerCompression = compression.EStargz
esgz = true esgz = true
case "zstd":
i.layerCompression = compression.Zstd
case "uncompressed": case "uncompressed":
i.layerCompression = compression.Uncompressed i.layerCompression = compression.Uncompressed
default: default:

View File

@ -71,6 +71,8 @@ func (e *imageExporter) Resolve(ctx context.Context, opt map[string]string) (exp
case "estargz": case "estargz":
i.layerCompression = compression.EStargz i.layerCompression = compression.EStargz
esgz = true esgz = true
case "zstd":
i.layerCompression = compression.Zstd
case "uncompressed": case "uncompressed":
i.layerCompression = compression.Uncompressed i.layerCompression = compression.Uncompressed
default: default:

2
go.mod
View File

@ -98,7 +98,7 @@ require (
github.com/hashicorp/errwrap v1.0.0 // indirect github.com/hashicorp/errwrap v1.0.0 // indirect
github.com/hashicorp/go-multierror v1.1.1 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect
github.com/ishidawataru/sctp v0.0.0-20210226210310-f2269e66cdee // indirect github.com/ishidawataru/sctp v0.0.0-20210226210310-f2269e66cdee // indirect
github.com/klauspost/compress v1.12.3 // indirect github.com/klauspost/compress v1.12.3
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect
github.com/moby/sys/mount v0.2.0 // indirect github.com/moby/sys/mount v0.2.0 // indirect
github.com/moby/sys/mountinfo v0.4.1 // indirect github.com/moby/sys/mountinfo v0.4.1 // indirect

View File

@ -27,10 +27,18 @@ const (
// EStargz is used for estargz data. // EStargz is used for estargz data.
EStargz EStargz
// Zstd is used for Zstandard data.
Zstd
// UnknownCompression means not supported yet. // UnknownCompression means not supported yet.
UnknownCompression Type = -1 UnknownCompression Type = -1
) )
const (
mediaTypeDockerSchema2LayerZstd = images.MediaTypeDockerSchema2Layer + ".zstd"
mediaTypeImageLayerZstd = ocispecs.MediaTypeImageLayer + "+zstd" // unreleased image-spec#790
)
var Default = Gzip var Default = Gzip
func (ct Type) String() string { func (ct Type) String() string {
@ -41,17 +49,34 @@ func (ct Type) String() string {
return "gzip" return "gzip"
case EStargz: case EStargz:
return "estargz" return "estargz"
case Zstd:
return "zstd"
default: default:
return "unknown" return "unknown"
} }
} }
func (ct Type) DefaultMediaType() string {
switch ct {
case Uncompressed:
return ocispecs.MediaTypeImageLayer
case Gzip, EStargz:
return ocispecs.MediaTypeImageLayerGzip
case Zstd:
return mediaTypeImageLayerZstd
default:
return ocispecs.MediaTypeImageLayer + "+unknown"
}
}
func FromMediaType(mediaType string) Type { func FromMediaType(mediaType string) Type {
switch toOCILayerType[mediaType] { switch toOCILayerType[mediaType] {
case ocispecs.MediaTypeImageLayer: case ocispecs.MediaTypeImageLayer:
return Uncompressed return Uncompressed
case ocispecs.MediaTypeImageLayerGzip: case ocispecs.MediaTypeImageLayerGzip:
return Gzip return Gzip
case mediaTypeImageLayerZstd:
return Zstd
default: default:
return UnknownCompression return UnknownCompression
} }
@ -81,6 +106,7 @@ func DetectLayerMediaType(ctx context.Context, cs content.Store, id digest.Diges
return ocispecs.MediaTypeImageLayerGzip, nil return ocispecs.MediaTypeImageLayerGzip, nil
} }
return images.MediaTypeDockerSchema2LayerGzip, nil return images.MediaTypeDockerSchema2LayerGzip, nil
default: default:
return "", errors.Errorf("failed to detect layer %v compression type", id) return "", errors.Errorf("failed to detect layer %v compression type", id)
} }
@ -108,6 +134,7 @@ func detectCompressionType(cr *io.SectionReader) (Type, error) {
for c, m := range map[Type][]byte{ for c, m := range map[Type][]byte{
Gzip: {0x1F, 0x8B, 0x08}, Gzip: {0x1F, 0x8B, 0x08},
Zstd: {0x28, 0xB5, 0x2F, 0xFD},
} { } {
if n < len(m) { if n < len(m) {
continue continue
@ -127,6 +154,8 @@ var toDockerLayerType = map[string]string{
images.MediaTypeDockerSchema2LayerGzip: images.MediaTypeDockerSchema2LayerGzip, images.MediaTypeDockerSchema2LayerGzip: images.MediaTypeDockerSchema2LayerGzip,
images.MediaTypeDockerSchema2LayerForeign: images.MediaTypeDockerSchema2Layer, images.MediaTypeDockerSchema2LayerForeign: images.MediaTypeDockerSchema2Layer,
images.MediaTypeDockerSchema2LayerForeignGzip: images.MediaTypeDockerSchema2LayerGzip, images.MediaTypeDockerSchema2LayerForeignGzip: images.MediaTypeDockerSchema2LayerGzip,
mediaTypeImageLayerZstd: mediaTypeDockerSchema2LayerZstd,
mediaTypeDockerSchema2LayerZstd: mediaTypeDockerSchema2LayerZstd,
} }
var toOCILayerType = map[string]string{ var toOCILayerType = map[string]string{
@ -136,6 +165,8 @@ var toOCILayerType = map[string]string{
images.MediaTypeDockerSchema2LayerGzip: ocispecs.MediaTypeImageLayerGzip, images.MediaTypeDockerSchema2LayerGzip: ocispecs.MediaTypeImageLayerGzip,
images.MediaTypeDockerSchema2LayerForeign: ocispecs.MediaTypeImageLayer, images.MediaTypeDockerSchema2LayerForeign: ocispecs.MediaTypeImageLayer,
images.MediaTypeDockerSchema2LayerForeignGzip: ocispecs.MediaTypeImageLayerGzip, images.MediaTypeDockerSchema2LayerForeignGzip: ocispecs.MediaTypeImageLayerGzip,
mediaTypeImageLayerZstd: mediaTypeImageLayerZstd,
mediaTypeDockerSchema2LayerZstd: mediaTypeImageLayerZstd,
} }
func convertLayerMediaType(mediaType string, oci bool) string { func convertLayerMediaType(mediaType string, oci bool) string {

View File

@ -1,122 +0,0 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package uncompress
import (
"compress/gzip"
"context"
"fmt"
"io"
"github.com/containerd/containerd/content"
"github.com/containerd/containerd/errdefs"
"github.com/containerd/containerd/images"
"github.com/containerd/containerd/images/converter"
"github.com/containerd/containerd/labels"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
)
var _ converter.ConvertFunc = LayerConvertFunc
// LayerConvertFunc converts tar.gz layers into uncompressed tar layers.
// Media type is changed, e.g., "application/vnd.oci.image.layer.v1.tar+gzip" -> "application/vnd.oci.image.layer.v1.tar"
func LayerConvertFunc(ctx context.Context, cs content.Store, desc ocispec.Descriptor) (*ocispec.Descriptor, error) {
if !images.IsLayerType(desc.MediaType) || IsUncompressedType(desc.MediaType) {
// No conversion. No need to return an error here.
return nil, nil
}
info, err := cs.Info(ctx, desc.Digest)
if err != nil {
return nil, err
}
readerAt, err := cs.ReaderAt(ctx, desc)
if err != nil {
return nil, err
}
defer readerAt.Close()
sr := io.NewSectionReader(readerAt, 0, desc.Size)
newR, err := gzip.NewReader(sr)
if err != nil {
return nil, err
}
defer newR.Close()
ref := fmt.Sprintf("convert-uncompress-from-%s", desc.Digest)
w, err := content.OpenWriter(ctx, cs, content.WithRef(ref))
if err != nil {
return nil, err
}
defer w.Close()
// Reset the writing position
// Old writer possibly remains without aborted
// (e.g. conversion interrupted by a signal)
if err := w.Truncate(0); err != nil {
return nil, err
}
n, err := io.Copy(w, newR)
if err != nil {
return nil, err
}
if err := newR.Close(); err != nil {
return nil, err
}
// no need to retain "containerd.io/uncompressed" label, but retain other labels ("containerd.io/distribution.source.*")
labelsMap := info.Labels
delete(labelsMap, labels.LabelUncompressed)
if err = w.Commit(ctx, 0, "", content.WithLabels(labelsMap)); err != nil && !errdefs.IsAlreadyExists(err) {
return nil, err
}
if err := w.Close(); err != nil {
return nil, err
}
newDesc := desc
newDesc.Digest = w.Digest()
newDesc.Size = n
newDesc.MediaType = convertMediaType(newDesc.MediaType)
return &newDesc, nil
}
// IsUncompressedType returns whether the provided media type is considered
// an uncompressed layer type
func IsUncompressedType(mt string) bool {
switch mt {
case
images.MediaTypeDockerSchema2Layer,
images.MediaTypeDockerSchema2LayerForeign,
ocispec.MediaTypeImageLayer,
ocispec.MediaTypeImageLayerNonDistributable:
return true
default:
return false
}
}
func convertMediaType(mt string) string {
switch mt {
case images.MediaTypeDockerSchema2LayerGzip:
return images.MediaTypeDockerSchema2Layer
case images.MediaTypeDockerSchema2LayerForeignGzip:
return images.MediaTypeDockerSchema2LayerForeign
case ocispec.MediaTypeImageLayerGzip:
return ocispec.MediaTypeImageLayer
case ocispec.MediaTypeImageLayerNonDistributableGzip:
return ocispec.MediaTypeImageLayerNonDistributable
default:
return mt
}
}

1
vendor/modules.txt vendored
View File

@ -93,7 +93,6 @@ github.com/containerd/containerd/identifiers
github.com/containerd/containerd/images github.com/containerd/containerd/images
github.com/containerd/containerd/images/archive github.com/containerd/containerd/images/archive
github.com/containerd/containerd/images/converter github.com/containerd/containerd/images/converter
github.com/containerd/containerd/images/converter/uncompress
github.com/containerd/containerd/labels github.com/containerd/containerd/labels
github.com/containerd/containerd/leases github.com/containerd/containerd/leases
github.com/containerd/containerd/leases/proxy github.com/containerd/containerd/leases/proxy