1
0
mirror of https://github.com/regclient/regclient.git synced 2025-04-18 22:44:00 +03:00

Feat: Adding zstd support

Breaking: pkg/archive.Compress no longer decompresses the input.
Signed-off-by: Brandon Mitchell <git@bmitch.net>
This commit is contained in:
Brandon Mitchell 2024-05-03 08:11:03 -04:00
parent ab33dd316d
commit 5417b713d0
No known key found for this signature in database
GPG Key ID: 6E0FF28C767A8BEE
14 changed files with 267 additions and 68 deletions

View File

@ -277,6 +277,7 @@ func (s *Sandbox) imageImportTar(ls *lua.LState) int {
if err != nil {
ls.RaiseError("Failed to read from \"%s\": %v", file, err)
}
defer rs.Close()
err = s.rc.ImageImport(s.ctx, tgt.r, rs)
if err != nil {
ls.RaiseError("Failed to import image \"%s\" from \"%s\": %v", tgt.r.CommonName(), file, err)

View File

@ -604,7 +604,7 @@ regctl image ratelimit alpine --format '{{.Remain}}'`,
mod.WithLayerCompression(algo))
return nil
},
}, "layer-compress", "", `change layer compression (gzip, none)`)
}, "layer-compress", "", `change layer compression (gzip, none, zstd)`)
imageModCmd.Flags().VarP(&modFlagFunc{
t: "string",
f: func(val string) error {

1
go.mod
View File

@ -4,6 +4,7 @@ go 1.20
require (
github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7
github.com/klauspost/compress v1.17.8
github.com/olareg/olareg v0.1.0
github.com/opencontainers/go-digest v1.0.0
github.com/robfig/cron/v3 v3.0.1

2
go.sum
View File

@ -6,6 +6,8 @@ github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7 h1:UhxFibDNY/bfvqU
github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE=
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
github.com/klauspost/compress v1.17.8 h1:YcnTYrq7MikUT7k0Yb5eceMmALQPYBW/Xltxn0NAMnU=
github.com/klauspost/compress v1.17.8/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw=
github.com/olareg/olareg v0.1.0 h1:1dXBOgPrig5N7zoXyIZVQqU0QBo6sD9pbL6UYjY75CA=
github.com/olareg/olareg v0.1.0/go.mod h1:RBuU7JW7SoIIxZKzLRhq8sVtQeAHzCAtRrXEBx2KlM4=
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=

View File

@ -654,7 +654,8 @@ func (rc *RegClient) imageCopyOpt(ctx context.Context, refSrc ref.Ref, refTgt re
// known manifest media type
err = rc.imageCopyOpt(ctx, entrySrc, entryTgt, dEntry, true, parentsNew, opt)
case mediatype.Docker2ImageConfig, mediatype.OCI1ImageConfig,
mediatype.Docker2LayerGzip, mediatype.OCI1Layer, mediatype.OCI1LayerGzip,
mediatype.Docker2Layer, mediatype.Docker2LayerGzip, mediatype.Docker2LayerZstd,
mediatype.OCI1Layer, mediatype.OCI1LayerGzip, mediatype.OCI1LayerZstd,
mediatype.BuildkitCacheConfig:
// known blob media type
err = rc.imageCopyBlob(ctx, entrySrc, entryTgt, dEntry, opt, bOpt...)
@ -1390,11 +1391,16 @@ func (rc *RegClient) imageImportDockerAddLayerHandlers(ctx context.Context, r re
for i, layerFile := range trd.dockerManifestList[index].Layers {
func(i int) {
trd.handlers[filepath.Clean(layerFile)] = func(header *tar.Header, trd *tarReadData) error {
// ensure blob is compressed with gzip to match media type
gzipR, err := archive.Compress(trd.tr, archive.CompressGzip)
// ensure blob is compressed
rdrUC, err := archive.Decompress(trd.tr)
if err != nil {
return err
}
gzipR, err := archive.Compress(rdrUC, archive.CompressGzip)
if err != nil {
return err
}
defer gzipR.Close()
// upload blob, digest and size is unknown
d, err := rc.BlobPut(ctx, r, descriptor.Descriptor{}, gzipR)
if err != nil {
@ -1499,7 +1505,8 @@ func (rc *RegClient) imageImportOCIHandleManifest(ctx context.Context, r ref.Ref
}
return rc.imageImportOCIHandleManifest(ctx, r, md, trd, true, child)
case mediatype.Docker2ImageConfig, mediatype.OCI1ImageConfig,
mediatype.Docker2LayerGzip, mediatype.OCI1Layer, mediatype.OCI1LayerGzip,
mediatype.Docker2Layer, mediatype.Docker2LayerGzip, mediatype.Docker2LayerZstd,
mediatype.OCI1Layer, mediatype.OCI1LayerGzip, mediatype.OCI1LayerZstd,
mediatype.BuildkitCacheConfig:
// known blob media types
return rc.imageImportBlob(ctx, r, d, trd)

View File

@ -491,6 +491,7 @@ func TestExportImport(t *testing.T) {
if err != nil {
t.Fatalf("failed to open tar: %v", err)
}
defer fileIn2.Close()
fileIn2Seeker, ok := fileIn2.(io.ReadSeeker)
if !ok {
t.Fatalf("could not convert fileIn to io.ReadSeeker, type %T", fileIn2)
@ -504,6 +505,7 @@ func TestExportImport(t *testing.T) {
if err != nil {
t.Fatalf("failed to open tar: %v", err)
}
defer fileIn3.Close()
fileIn3Seeker, ok := fileIn3.(io.ReadSeeker)
if !ok {
t.Fatalf("could not convert fileIn to io.ReadSeeker, type %T", fileIn3)

View File

@ -24,8 +24,9 @@ import (
// WithLayerCompression alters the media type and compression algorithm of the layers.
func WithLayerCompression(algo archive.CompressType) Opts {
return func(dc *dagConfig, dm *dagManifest) error {
// TODO: add zstd support here, and in cases below
if algo != archive.CompressNone && algo != archive.CompressGzip {
switch algo {
case archive.CompressNone, archive.CompressGzip, archive.CompressZstd:
default:
return fmt.Errorf("unsupported layer compression: %s", algo.String())
}
dc.stepsLayer = append(dc.stepsLayer, func(ctx context.Context, rc *regclient.RegClient, rSrc, rTgt ref.Ref, dl *dagLayer, rdr io.ReadCloser) (io.ReadCloser, error) {
@ -39,11 +40,11 @@ func WithLayerCompression(algo archive.CompressType) Opts {
switch algo {
case archive.CompressGzip:
switch mt {
case mediatype.Docker2Layer:
case mediatype.Docker2Layer, mediatype.Docker2LayerZstd:
dl.newDesc = descriptor.Descriptor{
MediaType: mediatype.Docker2LayerGzip,
}
case mediatype.OCI1Layer:
case mediatype.OCI1Layer, mediatype.OCI1LayerZstd:
dl.newDesc = descriptor.Descriptor{
MediaType: mediatype.OCI1LayerGzip,
}
@ -57,11 +58,13 @@ func WithLayerCompression(algo archive.CompressType) Opts {
digUC := digest.Canonical.Digester() // uncompressed digest
ucRdr, err := archive.Decompress(rdr)
if err != nil {
_ = rdr.Close()
return nil, err
}
ucDigRdr := io.TeeReader(ucRdr, digUC.Hash())
cRdr, err := archive.Compress(ucDigRdr, algo)
if err != nil {
_ = rdr.Close()
return nil, err
}
digRdr := io.TeeReader(cRdr, digRaw.Hash())
@ -72,7 +75,50 @@ func WithLayerCompression(algo archive.CompressType) Opts {
if err != nil {
return err
}
// TODO: close cRdr when it returns a ReadCloser
_ = cRdr.Close()
dl.newDesc.Digest = digRaw.Digest()
dl.ucDigest = digUC.Digest()
return nil
}}, nil
case archive.CompressZstd:
switch mt {
case mediatype.Docker2Layer, mediatype.Docker2LayerGzip:
dl.newDesc = descriptor.Descriptor{
MediaType: mediatype.Docker2LayerZstd,
}
case mediatype.OCI1Layer, mediatype.OCI1LayerGzip:
dl.newDesc = descriptor.Descriptor{
MediaType: mediatype.OCI1LayerZstd,
}
default:
return rdr, nil
}
if dl.mod == unchanged {
dl.mod = replaced
}
digRaw := digest.Canonical.Digester() // raw/compressed digest
digUC := digest.Canonical.Digester() // uncompressed digest
ucRdr, err := archive.Decompress(rdr)
if err != nil {
_ = rdr.Close()
return nil, err
}
ucDigRdr := io.TeeReader(ucRdr, digUC.Hash())
cRdr, err := archive.Compress(ucDigRdr, algo)
if err != nil {
_ = rdr.Close()
return nil, err
}
digRdr := io.TeeReader(cRdr, digRaw.Hash())
return readCloserFn{
Reader: digRdr,
closeFn: func() error {
err := rdr.Close()
if err != nil {
return err
}
_ = cRdr.Close()
dl.newDesc.Digest = digRaw.Digest()
dl.ucDigest = digUC.Digest()
return nil
@ -80,11 +126,11 @@ func WithLayerCompression(algo archive.CompressType) Opts {
case archive.CompressNone:
switch mt {
case mediatype.Docker2LayerGzip:
case mediatype.Docker2LayerGzip, mediatype.Docker2LayerZstd:
dl.newDesc = descriptor.Descriptor{
MediaType: mediatype.Docker2Layer,
}
case mediatype.OCI1LayerGzip:
case mediatype.OCI1LayerGzip, mediatype.OCI1LayerZstd:
dl.newDesc = descriptor.Descriptor{
MediaType: mediatype.OCI1Layer,
}
@ -97,6 +143,7 @@ func WithLayerCompression(algo archive.CompressType) Opts {
dig := digest.Canonical.Digester()
ucRdr, err := archive.Decompress(rdr)
if err != nil {
_ = rdr.Close()
return nil, err
}
digRdr := io.TeeReader(ucRdr, dig.Hash())

View File

@ -320,13 +320,19 @@ func WithManifestToDocker() Opts {
changed = true
}
for i, l := range ociM.Layers {
if l.MediaType == mediatype.OCI1LayerGzip {
switch l.MediaType {
case mediatype.OCI1Layer:
ociM.Layers[i].MediaType = mediatype.Docker2Layer
case mediatype.OCI1LayerGzip:
ociM.Layers[i].MediaType = mediatype.Docker2LayerGzip
changed = true
} else if l.MediaType == mediatype.OCI1ForeignLayerGzip {
case mediatype.OCI1LayerZstd:
ociM.Layers[i].MediaType = mediatype.Docker2LayerZstd
case mediatype.OCI1ForeignLayerGzip:
ociM.Layers[i].MediaType = mediatype.Docker2ForeignLayer
changed = true
default:
continue
}
changed = true
}
if changed {
dm := schema2.Manifest{}
@ -384,13 +390,19 @@ func WithManifestToOCI() Opts {
changed = true
}
for i, l := range ociM.Layers {
if l.MediaType == mediatype.Docker2LayerGzip {
switch l.MediaType {
case mediatype.Docker2Layer:
ociM.Layers[i].MediaType = mediatype.OCI1Layer
case mediatype.Docker2LayerGzip:
ociM.Layers[i].MediaType = mediatype.OCI1LayerGzip
changed = true
} else if l.MediaType == mediatype.Docker2ForeignLayer {
case mediatype.Docker2LayerZstd:
ociM.Layers[i].MediaType = mediatype.OCI1LayerZstd
case mediatype.Docker2ForeignLayer:
ociM.Layers[i].MediaType = mediatype.OCI1ForeignLayerGzip
changed = true
default:
continue
}
changed = true
}
if changed {
om = ociM

View File

@ -10,6 +10,7 @@ import (
"os"
"time"
"github.com/klauspost/compress/zstd"
"github.com/opencontainers/go-digest"
"github.com/regclient/regclient"
@ -36,8 +37,10 @@ var (
mtKnownTar = []string{
mediatype.Docker2Layer,
mediatype.Docker2LayerGzip,
mediatype.Docker2LayerZstd,
mediatype.OCI1Layer,
mediatype.OCI1LayerGzip,
mediatype.OCI1LayerZstd,
}
// known config media types
mtKnownConfig = []string{
@ -160,6 +163,7 @@ func Apply(ctx context.Context, rc *regclient.RegClient, rSrc ref.Ref, opts ...O
if mt != mediatype.OCI1Layer && mt != mediatype.Docker2Layer {
dr, err := archive.Decompress(rdr)
if err != nil {
_ = rdr.Close()
return nil, err
}
rdr = readCloserFn{Reader: dr, closeFn: rdr.Close}
@ -169,6 +173,7 @@ func Apply(ctx context.Context, rc *regclient.RegClient, rSrc ref.Ref, opts ...O
// create temp file and setup tar writer
fh, err := os.CreateTemp("", "regclient-mod-")
if err != nil {
_ = rdr.Close()
return nil, err
}
defer func() {
@ -177,6 +182,7 @@ func Apply(ctx context.Context, rc *regclient.RegClient, rSrc ref.Ref, opts ...O
}()
var tw *tar.Writer
var gw *gzip.Writer
var zw *zstd.Encoder
digRaw := digest.Canonical.Digester() // raw/compressed digest
digUC := digest.Canonical.Digester() // uncompressed digest
if dl.desc.MediaType == mediatype.Docker2LayerGzip || dl.desc.MediaType == mediatype.OCI1LayerGzip {
@ -185,6 +191,16 @@ func Apply(ctx context.Context, rc *regclient.RegClient, rSrc ref.Ref, opts ...O
defer gw.Close()
ucw := io.MultiWriter(gw, digUC.Hash())
tw = tar.NewWriter(ucw)
} else if dl.desc.MediaType == mediatype.Docker2LayerZstd || dl.desc.MediaType == mediatype.OCI1LayerZstd {
cw := io.MultiWriter(fh, digRaw.Hash())
zw, err = zstd.NewWriter(cw)
if err != nil {
_ = rdr.Close()
return nil, err
}
defer zw.Close()
ucw := io.MultiWriter(zw, digUC.Hash())
tw = tar.NewWriter(ucw)
} else {
dw := io.MultiWriter(fh, digRaw.Hash(), digUC.Hash())
tw = tar.NewWriter(dw)
@ -199,12 +215,13 @@ func Apply(ctx context.Context, rc *regclient.RegClient, rSrc ref.Ref, opts ...O
return nil, err
}
changeFile := unchanged
var rdr io.Reader
rdr = tr
var fileRdr io.Reader
fileRdr = tr
for _, slf := range dc.stepsLayerFile {
var changeCur changes
th, rdr, changeCur, err = slf(ctx, rc, rSrc, rTgt, dl, th, rdr)
th, fileRdr, changeCur, err = slf(ctx, rc, rSrc, rTgt, dl, th, fileRdr)
if err != nil {
_ = rdr.Close()
return nil, err
}
if changeCur != unchanged {
@ -220,11 +237,13 @@ func Apply(ctx context.Context, rc *regclient.RegClient, rSrc ref.Ref, opts ...O
empty = false
err = tw.WriteHeader(th)
if err != nil {
_ = rdr.Close()
return nil, err
}
if th.Typeflag == tar.TypeReg && th.Size > 0 {
_, err := io.CopyN(tw, rdr, th.Size)
_, err := io.CopyN(tw, fileRdr, th.Size)
if err != nil {
_ = rdr.Close()
return nil, err
}
}
@ -238,14 +257,23 @@ func Apply(ctx context.Context, rc *regclient.RegClient, rSrc ref.Ref, opts ...O
// close to flush remaining content
err = tw.Close()
if err != nil {
_ = rdr.Close()
return nil, fmt.Errorf("failed to close temporary tar layer: %w", err)
}
if gw != nil {
err = gw.Close()
if err != nil {
_ = rdr.Close()
return nil, fmt.Errorf("failed to close gzip writer: %w", err)
}
}
if zw != nil {
err = zw.Close()
if err != nil {
_ = rdr.Close()
return nil, fmt.Errorf("failed to close zstd writer: %w", err)
}
}
err = rdr.Close()
if err != nil {
return nil, fmt.Errorf("failed to close layer reader: %w", err)

View File

@ -474,6 +474,13 @@ func TestMod(t *testing.T) {
ref: "ocidir://testrepo:v1",
wantSame: true,
},
{
name: "Layer Compressed zstd",
opts: []Opts{
WithLayerCompression(archive.CompressZstd),
},
ref: "ocidir://testrepo:v1",
},
{
name: "Layer Reproducible",
opts: []Opts{

View File

@ -5,9 +5,11 @@ import (
"bytes"
"compress/bzip2"
"compress/gzip"
"errors"
"fmt"
"io"
"github.com/klauspost/compress/zstd"
"github.com/ulikunitz/xz"
)
@ -15,14 +17,11 @@ import (
type CompressType int
const (
// CompressNone detected no compression
CompressNone CompressType = iota
// CompressBzip2 compression
CompressBzip2
// CompressGzip compression
CompressGzip
// CompressXz compression
CompressXz
CompressNone CompressType = iota // uncompressed or unable to detect compression
CompressBzip2 // bzip2
CompressGzip // gzip
CompressXz // xz
CompressZstd // zstd
)
// compressHeaders are used to detect the compression type
@ -30,43 +29,58 @@ var compressHeaders = map[CompressType][]byte{
CompressBzip2: []byte("\x42\x5A\x68"),
CompressGzip: []byte("\x1F\x8B\x08"),
CompressXz: []byte("\xFD\x37\x7A\x58\x5A\x00"),
CompressZstd: []byte("\x28\xB5\x2F\xFD"),
}
func Compress(r io.Reader, oComp CompressType) (io.Reader, error) {
br := bufio.NewReader(r)
head, err := br.Peek(10)
if err != nil {
return br, err
}
rComp := DetectCompression(head)
if rComp == oComp {
return br, nil
}
func Compress(r io.Reader, oComp CompressType) (io.ReadCloser, error) {
switch oComp {
// note, bzip2 compression is not supported
case CompressGzip:
switch rComp {
case CompressNone:
return compressGzip(br)
case CompressBzip2:
return compressGzip(bzip2.NewReader(br))
case CompressXz:
cbr, _ := xz.NewReader(br)
return compressGzip(cbr)
}
return writeToRead(r, newGzipWriter)
case CompressXz:
return writeToRead(r, xz.NewWriter)
case CompressZstd:
return writeToRead(r, newZstdWriter)
case CompressNone:
return io.NopCloser(r), nil
default:
return nil, ErrUnknownType
}
// No other types currently supported
return nil, ErrUnknownType
}
func compressGzip(src io.Reader) (io.Reader, error) {
pipeR, pipeW := io.Pipe()
// newGzipWriter generates a writer and an always nil error.
func newGzipWriter(w io.Writer) (io.WriteCloser, error) {
return gzip.NewWriter(w), nil
}
// newZstdWriter generates a writer with the default options.
func newZstdWriter(w io.Writer) (io.WriteCloser, error) {
return zstd.NewWriter(w)
}
// writeToRead uses a pipe + goroutine + copy to switch from a writer to a reader.
func writeToRead[wc io.WriteCloser](src io.Reader, newWriterFn func(io.Writer) (wc, error)) (io.ReadCloser, error) {
pr, pw := io.Pipe()
go func() {
defer pipeW.Close()
gzipW := gzip.NewWriter(pipeW)
defer gzipW.Close()
_, _ = io.Copy(gzipW, src)
// buffer output to avoid lots of small reads
bw := bufio.NewWriterSize(pw, 2<<16)
dest, err := newWriterFn(bw)
if err != nil {
_ = pw.CloseWithError(err)
return
}
if _, err := io.Copy(dest, src); err != nil {
_ = pw.CloseWithError(err)
}
if err := dest.Close(); err != nil {
_ = pw.CloseWithError(err)
}
if err := bw.Flush(); err != nil {
_ = pw.CloseWithError(err)
}
_ = pw.Close()
}()
return pipeR, nil
return pr, nil
}
// Decompress extracts gzip and bzip streams
@ -74,8 +88,8 @@ func Decompress(r io.Reader) (io.Reader, error) {
// create bufio to peak on first few bytes
br := bufio.NewReader(r)
head, err := br.Peek(10)
if err != nil {
return br, err
if err != nil && !errors.Is(err, io.EOF) {
return br, fmt.Errorf("failed to detect compression: %w", err)
}
// compare peaked data against known compression types
@ -86,6 +100,8 @@ func Decompress(r io.Reader) (io.Reader, error) {
return gzip.NewReader(br)
case CompressXz:
return xz.NewReader(br)
case CompressZstd:
return zstd.NewReader(br)
default:
return br, nil
}
@ -119,6 +135,8 @@ func (ct CompressType) MarshalText() ([]byte, error) {
return []byte("gzip"), nil
case CompressXz:
return []byte("xz"), nil
case CompressZstd:
return []byte("zstd"), nil
}
return nil, fmt.Errorf("unknown compression type")
}
@ -133,6 +151,8 @@ func (ct *CompressType) UnmarshalText(text []byte) error {
*ct = CompressGzip
case "xz":
*ct = CompressXz
case "zstd":
*ct = CompressZstd
default:
return fmt.Errorf("unknown compression type %s", string(text))
}

View File

@ -0,0 +1,70 @@
package archive
import (
"bytes"
"io"
"testing"
)
func TestMarshal(t *testing.T) {
for _, algo := range []CompressType{CompressNone, CompressGzip, CompressBzip2, CompressXz, CompressZstd} {
t.Run(algo.String(), func(t *testing.T) {
var newAlgo CompressType
b, err := algo.MarshalText()
if err != nil {
t.Fatalf("failed to marshal: %v", err)
}
err = newAlgo.UnmarshalText(b)
if err != nil {
t.Fatalf("failed to unmarshal: %v", err)
}
if algo != newAlgo {
t.Errorf("marshaling round trip failed for %s: %v -> %s -> %v", algo.String(), algo, string(b), newAlgo)
}
})
}
}
func TestRoundtrip(t *testing.T) {
t.Parallel()
tt := []struct {
name string
content []byte
}{
{
name: "empty",
content: []byte(``),
},
{
name: "hello-world",
content: []byte(`hello world`),
},
}
for _, algo := range []CompressType{CompressNone, CompressGzip, CompressXz, CompressZstd} {
algo := algo
t.Run(algo.String(), func(t *testing.T) {
for _, tc := range tt {
tc := tc
t.Run(tc.name, func(t *testing.T) {
t.Parallel()
br := bytes.NewReader(tc.content)
cr, err := Compress(br, algo)
if err != nil {
t.Fatalf("failed to compress: %v", err)
}
dr, err := Decompress(cr)
if err != nil {
t.Fatalf("failed to decompress: %v", err)
}
out, err := io.ReadAll(dr)
if err != nil {
t.Fatalf("failed to ReadAll: %v", err)
}
if !bytes.Equal(tc.content, out) {
t.Errorf("output mismatch: expected %s, received %s", tc.content, out)
}
})
}
})
}
}

View File

@ -63,11 +63,15 @@ func init() {
mediatype.Docker2ManifestList: mediatype.OCI1ManifestList,
mediatype.Docker2Manifest: mediatype.OCI1Manifest,
mediatype.Docker2ImageConfig: mediatype.OCI1ImageConfig,
mediatype.Docker2Layer: mediatype.OCI1Layer,
mediatype.Docker2LayerGzip: mediatype.OCI1LayerGzip,
mediatype.Docker2LayerZstd: mediatype.OCI1LayerZstd,
mediatype.OCI1ManifestList: mediatype.OCI1ManifestList,
mediatype.OCI1Manifest: mediatype.OCI1Manifest,
mediatype.OCI1ImageConfig: mediatype.OCI1ImageConfig,
mediatype.OCI1Layer: mediatype.OCI1Layer,
mediatype.OCI1LayerGzip: mediatype.OCI1LayerGzip,
mediatype.OCI1LayerZstd: mediatype.OCI1LayerZstd,
}
}
@ -143,12 +147,8 @@ func (d Descriptor) Same(d2 Descriptor) bool {
return false
}
// loosen the check on media type since this can be converted from a build
if d.MediaType != d2.MediaType {
if _, ok := mtToOCI[d.MediaType]; !ok {
return false
} else if mtToOCI[d.MediaType] != mtToOCI[d2.MediaType] {
return false
}
if d.MediaType != d2.MediaType && (mtToOCI[d.MediaType] != mtToOCI[d2.MediaType] || mtToOCI[d.MediaType] == "") {
return false
}
return true
}

View File

@ -29,6 +29,8 @@ const (
Docker2Layer = "application/vnd.docker.image.rootfs.diff.tar"
// Docker2LayerGzip is the default compressed layer for docker schema2.
Docker2LayerGzip = "application/vnd.docker.image.rootfs.diff.tar.gzip"
// Docker2LayerZstd is the default compressed layer for docker schema2.
Docker2LayerZstd = "application/vnd.docker.image.rootfs.diff.tar.zstd"
// Docker2ForeignLayer is the default compressed layer for foreign layers in docker schema2.
Docker2ForeignLayer = "application/vnd.docker.image.rootfs.foreign.diff.tar.gzip"
// OCI1Layer is the uncompressed layer for OCIv1.