diff --git a/README.md b/README.md index c3a9263af..798e9e029 100644 --- a/README.md +++ b/README.md @@ -230,7 +230,7 @@ Keys supported by image output: * `unpack=true`: unpack image after creation (for use with containerd) * `dangling-name-prefix=[value]`: name image with `prefix@` , used for anonymous images * `name-canonical=true`: add additional canonical name `name@` -* `compression=[uncompressed,gzip,estargz]`: choose compression type for layers newly created and cached, gzip is default value. estargz should be used with `oci-mediatypes=true`. +* `compression=[uncompressed,gzip,estargz,zstd]`: choose compression type for layers newly created and cached, gzip is default value. estargz should be used with `oci-mediatypes=true`. * `force-compression=true`: forcefully apply `compression` option to all layers (including already existing layers). If credentials are required, `buildctl` will attempt to read Docker configuration file `$DOCKER_CONFIG/config.json`. diff --git a/cache/blobs.go b/cache/blobs.go index 3345fa6eb..99b82bec0 100644 --- a/cache/blobs.go +++ b/cache/blobs.go @@ -11,6 +11,7 @@ import ( "github.com/containerd/containerd/diff" "github.com/containerd/containerd/leases" "github.com/containerd/containerd/mount" + "github.com/klauspost/compress/zstd" "github.com/moby/buildkit/session" "github.com/moby/buildkit/util/compression" "github.com/moby/buildkit/util/flightcontrol" @@ -79,6 +80,9 @@ func computeBlobChain(ctx context.Context, sr *immutableRef, createIfNeeded bool case compression.EStargz: compressorFunc, finalize = writeEStargz() mediaType = ocispecs.MediaTypeImageLayerGzip + case compression.Zstd: + compressorFunc = zstdWriter + mediaType = ocispecs.MediaTypeImageLayer + "+zstd" default: return nil, errors.Errorf("unknown layer compression type: %q", compressionType) } @@ -350,3 +354,7 @@ func ensureCompression(ctx context.Context, ref *immutableRef, compressionType c }) return err } + +func zstdWriter(dest io.Writer, requiredMediaType string) (io.WriteCloser, error) { + return zstd.NewWriter(dest) +} diff --git a/cache/blobs_linux.go b/cache/blobs_linux.go index 6f5f4ce2b..614895f23 100644 --- a/cache/blobs_linux.go +++ b/cache/blobs_linux.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package cache @@ -34,7 +35,6 @@ var emptyDesc = ocispecs.Descriptor{} // be computed (e.g. because the mounts aren't overlayfs), it returns // an error. func (sr *immutableRef) tryComputeOverlayBlob(ctx context.Context, lower, upper []mount.Mount, mediaType string, ref string, compressorFunc compressor) (_ ocispecs.Descriptor, ok bool, err error) { - // Get upperdir location if mounts are overlayfs that can be processed by this differ. upperdir, err := getOverlayUpperdir(lower, upper) if err != nil { @@ -50,6 +50,8 @@ func (sr *immutableRef) tryComputeOverlayBlob(ctx context.Context, lower, upper compressorFunc = func(dest io.Writer, requiredMediaType string) (io.WriteCloser, error) { return ctdcompression.CompressStream(dest, ctdcompression.Gzip) } + case ocispecs.MediaTypeImageLayer + "+zstd": + compressorFunc = zstdWriter default: return emptyDesc, false, errors.Errorf("unsupported diff media type: %v", mediaType) } diff --git a/cache/converter.go b/cache/converter.go index caf15cf22..beb557208 100644 --- a/cache/converter.go +++ b/cache/converter.go @@ -6,12 +6,13 @@ import ( "fmt" "io" + cdcompression "github.com/containerd/containerd/archive/compression" "github.com/containerd/containerd/content" "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/images" "github.com/containerd/containerd/images/converter" - "github.com/containerd/containerd/images/converter/uncompress" "github.com/containerd/containerd/labels" + "github.com/klauspost/compress/zstd" "github.com/moby/buildkit/util/compression" digest "github.com/opencontainers/go-digest" ocispecs "github.com/opencontainers/image-spec/specs-go/v1" @@ -23,11 +24,15 @@ import ( func needsConversion(mediaType string, compressionType compression.Type) (bool, error) { switch compressionType { case compression.Uncompressed: - if !images.IsLayerType(mediaType) || uncompress.IsUncompressedType(mediaType) { + if !images.IsLayerType(mediaType) || compression.FromMediaType(mediaType) == compression.Uncompressed { return false, nil } case compression.Gzip: - if !images.IsLayerType(mediaType) || isGzipCompressedType(mediaType) { + if !images.IsLayerType(mediaType) || compression.FromMediaType(mediaType) == compression.Gzip { + return false, nil + } + case compression.Zstd: + if !images.IsLayerType(mediaType) || compression.FromMediaType(mediaType) == compression.Zstd { return false, nil } case compression.EStargz: @@ -49,113 +54,129 @@ func getConverter(desc ocispecs.Descriptor, compressionType compression.Type) (c // No conversion. No need to return an error here. return nil, nil } + + c := conversion{target: compressionType} + + from := compression.FromMediaType(desc.MediaType) + switch from { + case compression.Uncompressed: + case compression.Gzip, compression.Zstd: + c.decompress = cdcompression.DecompressStream + default: + return nil, errors.Errorf("unsupported source compression type %q from mediatype %q", from, desc.MediaType) + } + switch compressionType { case compression.Uncompressed: - return uncompress.LayerConvertFunc, nil case compression.Gzip: - convertFunc := func(w io.Writer) (io.WriteCloser, error) { return gzip.NewWriter(w), nil } - return gzipLayerConvertFunc(compressionType, convertFunc, nil), nil + c.compress = func(w io.Writer) (io.WriteCloser, error) { + return gzip.NewWriter(w), nil + } + case compression.Zstd: + c.compress = func(w io.Writer) (io.WriteCloser, error) { + return zstd.NewWriter(w) + } case compression.EStargz: compressorFunc, finalize := writeEStargz() - convertFunc := func(w io.Writer) (io.WriteCloser, error) { return compressorFunc(w, ocispecs.MediaTypeImageLayerGzip) } - return gzipLayerConvertFunc(compressionType, convertFunc, finalize), nil + c.compress = func(w io.Writer) (io.WriteCloser, error) { + return compressorFunc(w, ocispecs.MediaTypeImageLayerGzip) + } + c.finalize = finalize default: - return nil, fmt.Errorf("unknown compression type during conversion: %q", compressionType) + return nil, errors.Errorf("unknown target compression type during conversion: %q", compressionType) } + + return (&c).convert, nil } -func gzipLayerConvertFunc(compressionType compression.Type, convertFunc func(w io.Writer) (io.WriteCloser, error), finalize func(context.Context, content.Store) (map[string]string, error)) converter.ConvertFunc { - return func(ctx context.Context, cs content.Store, desc ocispecs.Descriptor) (*ocispecs.Descriptor, error) { - // prepare the source and destination - info, err := cs.Info(ctx, desc.Digest) - if err != nil { - return nil, err - } - labelz := info.Labels - if labelz == nil { - labelz = make(map[string]string) - } - ra, err := cs.ReaderAt(ctx, desc) - if err != nil { - return nil, err - } - defer ra.Close() - ref := fmt.Sprintf("convert-from-%s-to-%s", desc.Digest, compressionType.String()) - w, err := cs.Writer(ctx, content.WithRef(ref)) - if err != nil { - return nil, err - } - defer w.Close() - if err := w.Truncate(0); err != nil { // Old written data possibly remains - return nil, err - } - zw, err := convertFunc(w) +type conversion struct { + target compression.Type + decompress func(io.Reader) (cdcompression.DecompressReadCloser, error) + compress func(w io.Writer) (io.WriteCloser, error) + finalize func(context.Context, content.Store) (map[string]string, error) +} + +func (c *conversion) convert(ctx context.Context, cs content.Store, desc ocispecs.Descriptor) (*ocispecs.Descriptor, error) { + // prepare the source and destination + info, err := cs.Info(ctx, desc.Digest) + if err != nil { + return nil, err + } + labelz := info.Labels + if labelz == nil { + labelz = make(map[string]string) + } + ra, err := cs.ReaderAt(ctx, desc) + if err != nil { + return nil, err + } + defer ra.Close() + ref := fmt.Sprintf("convert-from-%s-to-%s", desc.Digest, c.target.String()) + w, err := cs.Writer(ctx, content.WithRef(ref)) + if err != nil { + return nil, err + } + defer w.Close() + if err := w.Truncate(0); err != nil { // Old written data possibly remains + return nil, err + } + var zw io.WriteCloser = w + var compress io.WriteCloser + if c.compress != nil { + zw, err = c.compress(zw) if err != nil { return nil, err } defer zw.Close() + compress = zw + } - // convert this layer - diffID := digest.Canonical.Digester() - if _, err := io.Copy(zw, io.TeeReader(io.NewSectionReader(ra, 0, ra.Size()), diffID.Hash())); err != nil { - return nil, err - } - if err := zw.Close(); err != nil { // Flush the writer - return nil, err - } - labelz[labels.LabelUncompressed] = diffID.Digest().String() // update diffID label - if err = w.Commit(ctx, 0, "", content.WithLabels(labelz)); err != nil && !errdefs.IsAlreadyExists(err) { - return nil, err - } - if err := w.Close(); err != nil { - return nil, err - } - info, err = cs.Info(ctx, w.Digest()) + // convert this layer + diffID := digest.Canonical.Digester() + var rdr io.Reader = io.NewSectionReader(ra, 0, ra.Size()) + if c.decompress != nil { + rc, err := c.decompress(rdr) if err != nil { return nil, err } - - newDesc := desc - newDesc.MediaType = convertMediaTypeToGzip(desc.MediaType) - newDesc.Digest = info.Digest - newDesc.Size = info.Size - if finalize != nil { - a, err := finalize(ctx, cs) - if err != nil { - return nil, errors.Wrapf(err, "failed finalize compression") - } - for k, v := range a { - if newDesc.Annotations == nil { - newDesc.Annotations = make(map[string]string) - } - newDesc.Annotations[k] = v - } + defer rc.Close() + rdr = rc + } + if _, err := io.Copy(zw, io.TeeReader(rdr, diffID.Hash())); err != nil { + return nil, err + } + if compress != nil { + if err := compress.Close(); err != nil { // Flush the writer + return nil, err } - return &newDesc, nil } -} - -func isGzipCompressedType(mt string) bool { - switch mt { - case - images.MediaTypeDockerSchema2LayerGzip, - images.MediaTypeDockerSchema2LayerForeignGzip, - ocispecs.MediaTypeImageLayerGzip, - ocispecs.MediaTypeImageLayerNonDistributableGzip: - return true - default: - return false + labelz[labels.LabelUncompressed] = diffID.Digest().String() // update diffID label + if err = w.Commit(ctx, 0, "", content.WithLabels(labelz)); err != nil && !errdefs.IsAlreadyExists(err) { + return nil, err + } + if err := w.Close(); err != nil { + return nil, err + } + info, err = cs.Info(ctx, w.Digest()) + if err != nil { + return nil, err } -} -func convertMediaTypeToGzip(mt string) string { - if uncompress.IsUncompressedType(mt) { - if images.IsDockerType(mt) { - mt += ".gzip" - } else { - mt += "+gzip" + newDesc := desc + newDesc.MediaType = c.target.DefaultMediaType() + newDesc.Digest = info.Digest + newDesc.Size = info.Size + if c.finalize != nil { + a, err := c.finalize(ctx, cs) + if err != nil { + return nil, errors.Wrapf(err, "failed finalize compression") + } + for k, v := range a { + if newDesc.Annotations == nil { + newDesc.Annotations = make(map[string]string) + } + newDesc.Annotations[k] = v } - return mt } - return mt + return &newDesc, nil } diff --git a/cache/estargz.go b/cache/estargz.go index aafe7f579..2ad1a264b 100644 --- a/cache/estargz.go +++ b/cache/estargz.go @@ -6,9 +6,10 @@ import ( "io" "sync" - "github.com/containerd/containerd/archive/compression" + cdcompression "github.com/containerd/containerd/archive/compression" "github.com/containerd/containerd/content" "github.com/containerd/stargz-snapshotter/estargz" + "github.com/moby/buildkit/util/compression" digest "github.com/opencontainers/go-digest" "github.com/pkg/errors" ) @@ -22,8 +23,8 @@ func writeEStargz() (compressorFunc compressor, finalize func(context.Context, c var bInfo blobInfo var mu sync.Mutex return func(dest io.Writer, requiredMediaType string) (io.WriteCloser, error) { - if !isGzipCompressedType(requiredMediaType) { - return nil, fmt.Errorf("unsupported media type for estargz compressor %q", requiredMediaType) + if compression.FromMediaType(requiredMediaType) != compression.Gzip { + return nil, errors.Errorf("unsupported media type for estargz compressor %q", requiredMediaType) } done := make(chan struct{}) pr, pw := io.Pipe() @@ -127,7 +128,7 @@ func calculateBlob() (io.WriteCloser, chan blobInfo) { c := new(counter) dgstr := digest.Canonical.Digester() diffID := digest.Canonical.Digester() - decompressR, err := compression.DecompressStream(io.TeeReader(pr, dgstr.Hash())) + decompressR, err := cdcompression.DecompressStream(io.TeeReader(pr, dgstr.Hash())) if err != nil { pr.CloseWithError(err) return diff --git a/client/client_test.go b/client/client_test.go index 4a52383e3..63a22230b 100644 --- a/client/client_test.go +++ b/client/client_test.go @@ -131,6 +131,8 @@ func TestIntegration(t *testing.T) { testFileOpInputSwap, testRelativeMountpoint, testLocalSourceDiffer, + testBuildExportZstd, + testPullZstdImage, }, mirrors) integration.Run(t, []integration.Test{ @@ -2165,6 +2167,156 @@ func testBuildExportWithUncompressed(t *testing.T, sb integration.Sandbox) { require.Equal(t, []byte("gzip"), item.Data) } +func testBuildExportZstd(t *testing.T, sb integration.Sandbox) { + c, err := New(sb.Context(), sb.Address()) + require.NoError(t, err) + defer c.Close() + + busybox := llb.Image("busybox:latest") + cmd := `sh -e -c "echo -n zstd > data"` + + st := llb.Scratch() + st = busybox.Run(llb.Shlex(cmd), llb.Dir("/wd")).AddMount("/wd", st) + + def, err := st.Marshal(sb.Context()) + require.NoError(t, err) + + destDir, err := ioutil.TempDir("", "buildkit") + require.NoError(t, err) + defer os.RemoveAll(destDir) + + out := filepath.Join(destDir, "out.tar") + outW, err := os.Create(out) + require.NoError(t, err) + + _, err = c.Solve(sb.Context(), def, SolveOpt{ + Exports: []ExportEntry{ + { + Type: ExporterOCI, + Output: fixedWriteCloser(outW), + Attrs: map[string]string{ + "compression": "zstd", + }, + }, + }, + }, nil) + require.NoError(t, err) + + dt, err := ioutil.ReadFile(out) + require.NoError(t, err) + + m, err := testutil.ReadTarToMap(dt, false) + require.NoError(t, err) + + var index ocispecs.Index + err = json.Unmarshal(m["index.json"].Data, &index) + require.NoError(t, err) + + var mfst ocispecs.Manifest + err = json.Unmarshal(m["blobs/sha256/"+index.Manifests[0].Digest.Hex()].Data, &mfst) + require.NoError(t, err) + + lastLayer := mfst.Layers[len(mfst.Layers)-1] + require.Equal(t, ocispecs.MediaTypeImageLayer+"+zstd", lastLayer.MediaType) + + zstdLayerDigest := lastLayer.Digest.Hex() + require.Equal(t, m["blobs/sha256/"+zstdLayerDigest].Data[:4], []byte{0x28, 0xb5, 0x2f, 0xfd}) + + // repeat without oci mediatype + outW, err = os.Create(out) + require.NoError(t, err) + + _, err = c.Solve(sb.Context(), def, SolveOpt{ + Exports: []ExportEntry{ + { + Type: ExporterOCI, + Output: fixedWriteCloser(outW), + Attrs: map[string]string{ + "compression": "zstd", + "oci-mediatypes": "false", + }, + }, + }, + }, nil) + require.NoError(t, err) + + dt, err = ioutil.ReadFile(out) + require.NoError(t, err) + + m, err = testutil.ReadTarToMap(dt, false) + require.NoError(t, err) + + err = json.Unmarshal(m["index.json"].Data, &index) + require.NoError(t, err) + + err = json.Unmarshal(m["blobs/sha256/"+index.Manifests[0].Digest.Hex()].Data, &mfst) + require.NoError(t, err) + + lastLayer = mfst.Layers[len(mfst.Layers)-1] + require.Equal(t, images.MediaTypeDockerSchema2Layer+".zstd", lastLayer.MediaType) + + require.Equal(t, lastLayer.Digest.Hex(), zstdLayerDigest) +} + +func testPullZstdImage(t *testing.T, sb integration.Sandbox) { + c, err := New(sb.Context(), sb.Address()) + require.NoError(t, err) + defer c.Close() + + busybox := llb.Image("busybox:latest") + cmd := `sh -e -c "echo -n zstd > data"` + + st := llb.Scratch() + st = busybox.Run(llb.Shlex(cmd), llb.Dir("/wd")).AddMount("/wd", st) + + def, err := st.Marshal(sb.Context()) + require.NoError(t, err) + + registry, err := sb.NewRegistry() + if errors.Is(err, integration.ErrorRequirements) { + t.Skip(err.Error()) + } + require.NoError(t, err) + + target := registry + "/buildkit/build/exporter:zstd" + + _, err = c.Solve(sb.Context(), def, SolveOpt{ + Exports: []ExportEntry{ + { + Type: ExporterImage, + Attrs: map[string]string{ + "name": target, + "push": "true", + "compression": "zstd", + }, + }, + }, + }, nil) + require.NoError(t, err) + + st = llb.Scratch().File(llb.Copy(llb.Image(target), "/data", "/zdata")) + + def, err = st.Marshal(sb.Context()) + require.NoError(t, err) + + destDir, err := ioutil.TempDir("", "buildkit") + require.NoError(t, err) + defer os.RemoveAll(destDir) + + _, err = c.Solve(sb.Context(), def, SolveOpt{ + Exports: []ExportEntry{ + { + Type: ExporterLocal, + OutputDir: destDir, + }, + }, + }, nil) + require.NoError(t, err) + + dt, err := ioutil.ReadFile(filepath.Join(destDir, "zdata")) + require.NoError(t, err) + require.Equal(t, dt, []byte("zstd")) +} func testBuildPushAndValidate(t *testing.T, sb integration.Sandbox) { skipDockerd(t, sb) requiresLinux(t) diff --git a/exporter/containerimage/export.go b/exporter/containerimage/export.go index 57b503266..a96bfd1b5 100644 --- a/exporter/containerimage/export.go +++ b/exporter/containerimage/export.go @@ -144,6 +144,8 @@ func (e *imageExporter) Resolve(ctx context.Context, opt map[string]string) (exp case "estargz": i.layerCompression = compression.EStargz esgz = true + case "zstd": + i.layerCompression = compression.Zstd case "uncompressed": i.layerCompression = compression.Uncompressed default: diff --git a/exporter/oci/export.go b/exporter/oci/export.go index fb9a91aa1..d57a37d30 100644 --- a/exporter/oci/export.go +++ b/exporter/oci/export.go @@ -71,6 +71,8 @@ func (e *imageExporter) Resolve(ctx context.Context, opt map[string]string) (exp case "estargz": i.layerCompression = compression.EStargz esgz = true + case "zstd": + i.layerCompression = compression.Zstd case "uncompressed": i.layerCompression = compression.Uncompressed default: diff --git a/go.mod b/go.mod index 5e1f3cd29..108b7d53c 100644 --- a/go.mod +++ b/go.mod @@ -98,7 +98,7 @@ require ( github.com/hashicorp/errwrap v1.0.0 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect github.com/ishidawataru/sctp v0.0.0-20210226210310-f2269e66cdee // indirect - github.com/klauspost/compress v1.12.3 // indirect + github.com/klauspost/compress v1.12.3 github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect github.com/moby/sys/mount v0.2.0 // indirect github.com/moby/sys/mountinfo v0.4.1 // indirect diff --git a/util/compression/compression.go b/util/compression/compression.go index 1ea5df7a0..8437cd195 100644 --- a/util/compression/compression.go +++ b/util/compression/compression.go @@ -27,10 +27,18 @@ const ( // EStargz is used for estargz data. EStargz + // Zstd is used for Zstandard data. + Zstd + // UnknownCompression means not supported yet. UnknownCompression Type = -1 ) +const ( + mediaTypeDockerSchema2LayerZstd = images.MediaTypeDockerSchema2Layer + ".zstd" + mediaTypeImageLayerZstd = ocispecs.MediaTypeImageLayer + "+zstd" // unreleased image-spec#790 +) + var Default = Gzip func (ct Type) String() string { @@ -41,17 +49,34 @@ func (ct Type) String() string { return "gzip" case EStargz: return "estargz" + case Zstd: + return "zstd" default: return "unknown" } } +func (ct Type) DefaultMediaType() string { + switch ct { + case Uncompressed: + return ocispecs.MediaTypeImageLayer + case Gzip, EStargz: + return ocispecs.MediaTypeImageLayerGzip + case Zstd: + return mediaTypeImageLayerZstd + default: + return ocispecs.MediaTypeImageLayer + "+unknown" + } +} + func FromMediaType(mediaType string) Type { switch toOCILayerType[mediaType] { case ocispecs.MediaTypeImageLayer: return Uncompressed case ocispecs.MediaTypeImageLayerGzip: return Gzip + case mediaTypeImageLayerZstd: + return Zstd default: return UnknownCompression } @@ -81,6 +106,7 @@ func DetectLayerMediaType(ctx context.Context, cs content.Store, id digest.Diges return ocispecs.MediaTypeImageLayerGzip, nil } return images.MediaTypeDockerSchema2LayerGzip, nil + default: return "", errors.Errorf("failed to detect layer %v compression type", id) } @@ -108,6 +134,7 @@ func detectCompressionType(cr *io.SectionReader) (Type, error) { for c, m := range map[Type][]byte{ Gzip: {0x1F, 0x8B, 0x08}, + Zstd: {0x28, 0xB5, 0x2F, 0xFD}, } { if n < len(m) { continue @@ -127,6 +154,8 @@ var toDockerLayerType = map[string]string{ images.MediaTypeDockerSchema2LayerGzip: images.MediaTypeDockerSchema2LayerGzip, images.MediaTypeDockerSchema2LayerForeign: images.MediaTypeDockerSchema2Layer, images.MediaTypeDockerSchema2LayerForeignGzip: images.MediaTypeDockerSchema2LayerGzip, + mediaTypeImageLayerZstd: mediaTypeDockerSchema2LayerZstd, + mediaTypeDockerSchema2LayerZstd: mediaTypeDockerSchema2LayerZstd, } var toOCILayerType = map[string]string{ @@ -136,6 +165,8 @@ var toOCILayerType = map[string]string{ images.MediaTypeDockerSchema2LayerGzip: ocispecs.MediaTypeImageLayerGzip, images.MediaTypeDockerSchema2LayerForeign: ocispecs.MediaTypeImageLayer, images.MediaTypeDockerSchema2LayerForeignGzip: ocispecs.MediaTypeImageLayerGzip, + mediaTypeImageLayerZstd: mediaTypeImageLayerZstd, + mediaTypeDockerSchema2LayerZstd: mediaTypeImageLayerZstd, } func convertLayerMediaType(mediaType string, oci bool) string { diff --git a/vendor/github.com/containerd/containerd/images/converter/uncompress/uncompress.go b/vendor/github.com/containerd/containerd/images/converter/uncompress/uncompress.go deleted file mode 100644 index aca003552..000000000 --- a/vendor/github.com/containerd/containerd/images/converter/uncompress/uncompress.go +++ /dev/null @@ -1,122 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package uncompress - -import ( - "compress/gzip" - "context" - "fmt" - "io" - - "github.com/containerd/containerd/content" - "github.com/containerd/containerd/errdefs" - "github.com/containerd/containerd/images" - "github.com/containerd/containerd/images/converter" - "github.com/containerd/containerd/labels" - ocispec "github.com/opencontainers/image-spec/specs-go/v1" -) - -var _ converter.ConvertFunc = LayerConvertFunc - -// LayerConvertFunc converts tar.gz layers into uncompressed tar layers. -// Media type is changed, e.g., "application/vnd.oci.image.layer.v1.tar+gzip" -> "application/vnd.oci.image.layer.v1.tar" -func LayerConvertFunc(ctx context.Context, cs content.Store, desc ocispec.Descriptor) (*ocispec.Descriptor, error) { - if !images.IsLayerType(desc.MediaType) || IsUncompressedType(desc.MediaType) { - // No conversion. No need to return an error here. - return nil, nil - } - info, err := cs.Info(ctx, desc.Digest) - if err != nil { - return nil, err - } - readerAt, err := cs.ReaderAt(ctx, desc) - if err != nil { - return nil, err - } - defer readerAt.Close() - sr := io.NewSectionReader(readerAt, 0, desc.Size) - newR, err := gzip.NewReader(sr) - if err != nil { - return nil, err - } - defer newR.Close() - ref := fmt.Sprintf("convert-uncompress-from-%s", desc.Digest) - w, err := content.OpenWriter(ctx, cs, content.WithRef(ref)) - if err != nil { - return nil, err - } - defer w.Close() - - // Reset the writing position - // Old writer possibly remains without aborted - // (e.g. conversion interrupted by a signal) - if err := w.Truncate(0); err != nil { - return nil, err - } - - n, err := io.Copy(w, newR) - if err != nil { - return nil, err - } - if err := newR.Close(); err != nil { - return nil, err - } - // no need to retain "containerd.io/uncompressed" label, but retain other labels ("containerd.io/distribution.source.*") - labelsMap := info.Labels - delete(labelsMap, labels.LabelUncompressed) - if err = w.Commit(ctx, 0, "", content.WithLabels(labelsMap)); err != nil && !errdefs.IsAlreadyExists(err) { - return nil, err - } - if err := w.Close(); err != nil { - return nil, err - } - newDesc := desc - newDesc.Digest = w.Digest() - newDesc.Size = n - newDesc.MediaType = convertMediaType(newDesc.MediaType) - return &newDesc, nil -} - -// IsUncompressedType returns whether the provided media type is considered -// an uncompressed layer type -func IsUncompressedType(mt string) bool { - switch mt { - case - images.MediaTypeDockerSchema2Layer, - images.MediaTypeDockerSchema2LayerForeign, - ocispec.MediaTypeImageLayer, - ocispec.MediaTypeImageLayerNonDistributable: - return true - default: - return false - } -} - -func convertMediaType(mt string) string { - switch mt { - case images.MediaTypeDockerSchema2LayerGzip: - return images.MediaTypeDockerSchema2Layer - case images.MediaTypeDockerSchema2LayerForeignGzip: - return images.MediaTypeDockerSchema2LayerForeign - case ocispec.MediaTypeImageLayerGzip: - return ocispec.MediaTypeImageLayer - case ocispec.MediaTypeImageLayerNonDistributableGzip: - return ocispec.MediaTypeImageLayerNonDistributable - default: - return mt - } -} diff --git a/vendor/modules.txt b/vendor/modules.txt index 15b4ab3bd..bbb476829 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -93,7 +93,6 @@ github.com/containerd/containerd/identifiers github.com/containerd/containerd/images github.com/containerd/containerd/images/archive github.com/containerd/containerd/images/converter -github.com/containerd/containerd/images/converter/uncompress github.com/containerd/containerd/labels github.com/containerd/containerd/leases github.com/containerd/containerd/leases/proxy