mirror of
https://github.com/containers/image.git
synced 2025-04-18 19:44:05 +03:00
compress: define some consts for the compression algos
Signed-off-by: Giuseppe Scrivano <gscrivan@redhat.com>
This commit is contained in:
parent
5b23c4b55a
commit
d0fb29f2d7
@ -96,7 +96,7 @@ type copier struct {
|
||||
progress chan types.ProgressProperties
|
||||
blobInfoCache types.BlobInfoCache
|
||||
copyInParallel bool
|
||||
compressionFormat string
|
||||
compressionFormat compression.Algorithm
|
||||
compressionLevel *int
|
||||
}
|
||||
|
||||
@ -183,7 +183,7 @@ func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef,
|
||||
// we might want to add a separate CommonCtx — or would that be too confusing?
|
||||
blobInfoCache: blobinfocache.DefaultCache(options.DestinationCtx),
|
||||
|
||||
compressionFormat: options.DestinationCtx.CompressionFormat,
|
||||
compressionFormat: *options.DestinationCtx.CompressionFormat,
|
||||
compressionLevel: options.DestinationCtx.CompressionLevel,
|
||||
}
|
||||
|
||||
@ -844,7 +844,7 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr
|
||||
destStream = pipeReader
|
||||
inputInfo.Digest = ""
|
||||
inputInfo.Size = -1
|
||||
} else if canModifyBlob && c.dest.DesiredLayerCompression() == types.Compress && isCompressed && desiredCompressionFormat != compressionFormat {
|
||||
} else if canModifyBlob && c.dest.DesiredLayerCompression() == types.Compress && isCompressed && desiredCompressionFormat.Name() != compressionFormat.Name() {
|
||||
// When the blob is compressed, but the desired format is different, it first needs to be decompressed and finally
|
||||
// re-compressed using the desired format.
|
||||
logrus.Debugf("Blob will be converted")
|
||||
@ -937,7 +937,7 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr
|
||||
}
|
||||
|
||||
// compressGoroutine reads all input from src and writes its compressed equivalent to dest.
|
||||
func (c *copier) compressGoroutine(dest *io.PipeWriter, src io.Reader, compressionFormat string) {
|
||||
func (c *copier) compressGoroutine(dest *io.PipeWriter, src io.Reader, compressionFormat compression.Algorithm) {
|
||||
err := errors.New("Internal error: unexpected panic in compressGoroutine")
|
||||
defer func() { // Note that this is not the same as {defer dest.CloseWithError(err)}; we need err to be evaluated lazily.
|
||||
dest.CloseWithError(err) // CloseWithError(nil) is equivalent to Close()
|
||||
|
BIN
copy/fixtures/Hello.std
Normal file
BIN
copy/fixtures/Hello.std
Normal file
Binary file not shown.
@ -58,51 +58,60 @@ func xzCompressor(r io.Writer, level *int) (io.WriteCloser, error) {
|
||||
return xz.NewWriter(r)
|
||||
}
|
||||
|
||||
// compressionAlgos is an internal implementation detail of DetectCompression
|
||||
var compressionAlgos = map[string]struct {
|
||||
// Algorithm is a compression algorithm that can be used for CompressStream.
|
||||
type Algorithm struct {
|
||||
name string
|
||||
prefix []byte
|
||||
decompressor DecompressorFunc
|
||||
}{
|
||||
"gzip": {[]byte{0x1F, 0x8B, 0x08}, GzipDecompressor}, // gzip (RFC 1952)
|
||||
"bzip2": {[]byte{0x42, 0x5A, 0x68}, Bzip2Decompressor}, // bzip2 (decompress.c:BZ2_decompress)
|
||||
"xz": {[]byte{0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00}, XzDecompressor}, // xz (/usr/share/doc/xz/xz-file-format.txt)
|
||||
"zstd": {[]byte{0x28, 0xb5, 0x2f, 0xfd}, ZstdDecompressor}, // zstd (http://www.zstd.net)
|
||||
compressor compressorFunc
|
||||
}
|
||||
|
||||
// compressors maps an algorithm to its compression function
|
||||
var compressors = map[string]compressorFunc{
|
||||
"gzip": gzipCompressor,
|
||||
"bzip2": bzip2Compressor,
|
||||
"xz": xzCompressor,
|
||||
"zstd": zstdCompressor,
|
||||
// Name returns the name for the compression algorithm.
|
||||
func (c Algorithm) Name() string {
|
||||
return c.name
|
||||
}
|
||||
|
||||
// compressionAlgos is an internal implementation detail of DetectCompression
|
||||
var compressionAlgos = []Algorithm{
|
||||
{"gzip", []byte{0x1F, 0x8B, 0x08}, GzipDecompressor, gzipCompressor}, // gzip (RFC 1952)
|
||||
{"bzip2", []byte{0x42, 0x5A, 0x68}, Bzip2Decompressor, bzip2Compressor}, // bzip2 (decompress.c:BZ2_decompress)
|
||||
{"xz", []byte{0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00}, XzDecompressor, xzCompressor}, // xz (/usr/share/doc/xz/xz-file-format.txt)
|
||||
{"zstd", []byte{0x28, 0xb5, 0x2f, 0xfd}, ZstdDecompressor, zstdCompressor}, // zstd (http://www.zstd.net)
|
||||
}
|
||||
|
||||
// AlgorithmByName returns the compressor by its name
|
||||
func AlgorithmByName(name string) (Algorithm, error) {
|
||||
for _, c := range compressionAlgos {
|
||||
if c.name == name {
|
||||
return c, nil
|
||||
}
|
||||
}
|
||||
return Algorithm{}, fmt.Errorf("cannot find compressor for %q", name)
|
||||
}
|
||||
|
||||
// CompressStream returns the compressor by its name
|
||||
func CompressStream(dest io.Writer, name string, level *int) (io.WriteCloser, error) {
|
||||
c, found := compressors[name]
|
||||
if !found {
|
||||
return nil, fmt.Errorf("cannot find compressor for '%s'", name)
|
||||
}
|
||||
return c(dest, level)
|
||||
func CompressStream(dest io.Writer, algo Algorithm, level *int) (io.WriteCloser, error) {
|
||||
return algo.compressor(dest, level)
|
||||
}
|
||||
|
||||
// DetectCompressionFormat returns a DecompressorFunc if the input is recognized as a compressed format, nil otherwise.
|
||||
// Because it consumes the start of input, other consumers must use the returned io.Reader instead to also read from the beginning.
|
||||
func DetectCompressionFormat(input io.Reader) (string, DecompressorFunc, io.Reader, error) {
|
||||
func DetectCompressionFormat(input io.Reader) (Algorithm, DecompressorFunc, io.Reader, error) {
|
||||
buffer := [8]byte{}
|
||||
|
||||
n, err := io.ReadAtLeast(input, buffer[:], len(buffer))
|
||||
if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF {
|
||||
// This is a “real” error. We could just ignore it this time, process the data we have, and hope that the source will report the same error again.
|
||||
// Instead, fail immediately with the original error cause instead of a possibly secondary/misleading error returned later.
|
||||
return "", nil, nil, err
|
||||
return Algorithm{}, nil, nil, err
|
||||
}
|
||||
|
||||
name := ""
|
||||
var retAlgo Algorithm
|
||||
var decompressor DecompressorFunc
|
||||
for algoname, algo := range compressionAlgos {
|
||||
for _, algo := range compressionAlgos {
|
||||
if bytes.HasPrefix(buffer[:n], algo.prefix) {
|
||||
logrus.Debugf("Detected compression format %s", algoname)
|
||||
name = algoname
|
||||
logrus.Debugf("Detected compression format %s", algo.name)
|
||||
retAlgo = algo
|
||||
decompressor = algo.decompressor
|
||||
break
|
||||
}
|
||||
@ -111,7 +120,7 @@ func DetectCompressionFormat(input io.Reader) (string, DecompressorFunc, io.Read
|
||||
logrus.Debugf("No compression detected")
|
||||
}
|
||||
|
||||
return name, decompressor, io.MultiReader(bytes.NewReader(buffer[:n]), input), nil
|
||||
return retAlgo, decompressor, io.MultiReader(bytes.NewReader(buffer[:n]), input), nil
|
||||
}
|
||||
|
||||
// DetectCompression returns a DecompressorFunc if the input is recognized as a compressed format, nil otherwise.
|
||||
|
@ -6,6 +6,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/containers/image/docker/reference"
|
||||
"github.com/containers/image/pkg/compression"
|
||||
"github.com/opencontainers/go-digest"
|
||||
"github.com/opencontainers/image-spec/specs-go/v1"
|
||||
)
|
||||
@ -513,7 +514,7 @@ type SystemContext struct {
|
||||
DirForceCompress bool
|
||||
|
||||
// CompressionFormat is the format to use for the compression of the blobs
|
||||
CompressionFormat string
|
||||
CompressionFormat *compression.Algorithm
|
||||
// CompressionLevel specifies what compression level is used
|
||||
CompressionLevel *int
|
||||
}
|
||||
|
Loading…
x
Reference in New Issue
Block a user