1
0
mirror of https://github.com/containers/image.git synced 2025-04-18 19:44:05 +03:00

Move copy.digestingReader and tests to separate files

The concept is independent enough from the rest of the
copy implementation, and copy/copy.go is large enough
that splitting it up a bit should improve orientation.

Only moves the code without changes, should not change behavior.

Signed-off-by: Miloslav Trmač <mitr@redhat.com>
This commit is contained in:
Miloslav Trmač 2021-06-24 14:33:33 +02:00
parent 09579fbf65
commit 848f37c136
4 changed files with 136 additions and 117 deletions

View File

@ -36,14 +36,6 @@ import (
"golang.org/x/term"
)
type digestingReader struct {
source io.Reader
digester digest.Digester
expectedDigest digest.Digest
validationFailed bool
validationSucceeded bool
}
var (
// ErrDecryptParamsMissing is returned if there is missing decryption parameters
ErrDecryptParamsMissing = errors.New("Necessary DecryptParameters not present")
@ -68,49 +60,6 @@ var expectedCompressionFormats = map[string]*compressiontypes.Algorithm{
manifest.DockerV2Schema2LayerMediaType: &compression.Gzip,
}
// newDigestingReader returns an io.Reader implementation with contents of source, which will eventually return a non-EOF error
// or set validationSucceeded/validationFailed to true if the source stream does/does not match expectedDigest.
// (neither is set if EOF is never reached).
func newDigestingReader(source io.Reader, expectedDigest digest.Digest) (*digestingReader, error) {
var digester digest.Digester
if err := expectedDigest.Validate(); err != nil {
return nil, errors.Errorf("Invalid digest specification %s", expectedDigest)
}
digestAlgorithm := expectedDigest.Algorithm()
if !digestAlgorithm.Available() {
return nil, errors.Errorf("Invalid digest specification %s: unsupported digest algorithm %s", expectedDigest, digestAlgorithm)
}
digester = digestAlgorithm.Digester()
return &digestingReader{
source: source,
digester: digester,
expectedDigest: expectedDigest,
validationFailed: false,
}, nil
}
func (d *digestingReader) Read(p []byte) (int, error) {
n, err := d.source.Read(p)
if n > 0 {
if n2, err := d.digester.Hash().Write(p[:n]); n2 != n || err != nil {
// Coverage: This should not happen, the hash.Hash interface requires
// d.digest.Write to never return an error, and the io.Writer interface
// requires n2 == len(input) if no error is returned.
return 0, errors.Wrapf(err, "updating digest during verification: %d vs. %d", n2, n)
}
}
if err == io.EOF {
actualDigest := d.digester.Digest()
if actualDigest != d.expectedDigest {
d.validationFailed = true
return 0, errors.Errorf("Digest did not match, expected %s, got %s", d.expectedDigest, actualDigest)
}
d.validationSucceeded = true
}
return n, err
}
// copier allows us to keep track of diffID values for blobs, and other
// data shared across one or more images in a possible manifest list.
type copier struct {

View File

@ -15,72 +15,6 @@ import (
"github.com/stretchr/testify/require"
)
func TestNewDigestingReader(t *testing.T) {
// Only the failure cases, success is tested in TestDigestingReaderRead below.
source := bytes.NewReader([]byte("abc"))
for _, input := range []digest.Digest{
"abc", // Not algo:hexvalue
"crc32:", // Unknown algorithm, empty value
"crc32:012345678", // Unknown algorithm
"sha256:", // Empty value
"sha256:0", // Invalid hex value
"sha256:01", // Invalid length of hex value
} {
_, err := newDigestingReader(source, input)
assert.Error(t, err, input.String())
}
}
func TestDigestingReaderRead(t *testing.T) {
cases := []struct {
input []byte
digest digest.Digest
}{
{[]byte(""), "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"},
{[]byte("abc"), "sha256:ba7816bf8f01cfea414140de5dae2223b00361a396177a9cb410ff61f20015ad"},
{make([]byte, 65537), "sha256:3266304f31be278d06c3bd3eb9aa3e00c59bedec0a890de466568b0b90b0e01f"},
}
// Valid input
for _, c := range cases {
source := bytes.NewReader(c.input)
reader, err := newDigestingReader(source, c.digest)
require.NoError(t, err, c.digest.String())
dest := bytes.Buffer{}
n, err := io.Copy(&dest, reader)
assert.NoError(t, err, c.digest.String())
assert.Equal(t, int64(len(c.input)), n, c.digest.String())
assert.Equal(t, c.input, dest.Bytes(), c.digest.String())
assert.False(t, reader.validationFailed, c.digest.String())
assert.True(t, reader.validationSucceeded, c.digest.String())
}
// Modified input
for _, c := range cases {
source := bytes.NewReader(bytes.Join([][]byte{c.input, []byte("x")}, nil))
reader, err := newDigestingReader(source, c.digest)
require.NoError(t, err, c.digest.String())
dest := bytes.Buffer{}
_, err = io.Copy(&dest, reader)
assert.Error(t, err, c.digest.String())
assert.True(t, reader.validationFailed, c.digest.String())
assert.False(t, reader.validationSucceeded, c.digest.String())
}
// Truncated input
for _, c := range cases {
source := bytes.NewReader(c.input)
reader, err := newDigestingReader(source, c.digest)
require.NoError(t, err, c.digest.String())
if len(c.input) != 0 {
dest := bytes.Buffer{}
truncatedLen := int64(len(c.input) - 1)
n, err := io.CopyN(&dest, reader, truncatedLen)
assert.NoError(t, err, c.digest.String())
assert.Equal(t, truncatedLen, n, c.digest.String())
}
assert.False(t, reader.validationFailed, c.digest.String())
assert.False(t, reader.validationSucceeded, c.digest.String())
}
}
func goDiffIDComputationGoroutineWithTimeout(layerStream io.ReadCloser, decompressor compressiontypes.DecompressorFunc) *diffIDResult {
ch := make(chan diffIDResult)
go diffIDComputationGoroutine(ch, layerStream, nil)

59
copy/digesting_reader.go Normal file
View File

@ -0,0 +1,59 @@
package copy
import (
"io"
digest "github.com/opencontainers/go-digest"
"github.com/pkg/errors"
)
type digestingReader struct {
source io.Reader
digester digest.Digester
expectedDigest digest.Digest
validationFailed bool
validationSucceeded bool
}
// newDigestingReader returns an io.Reader implementation with contents of source, which will eventually return a non-EOF error
// or set validationSucceeded/validationFailed to true if the source stream does/does not match expectedDigest.
// (neither is set if EOF is never reached).
func newDigestingReader(source io.Reader, expectedDigest digest.Digest) (*digestingReader, error) {
var digester digest.Digester
if err := expectedDigest.Validate(); err != nil {
return nil, errors.Errorf("Invalid digest specification %s", expectedDigest)
}
digestAlgorithm := expectedDigest.Algorithm()
if !digestAlgorithm.Available() {
return nil, errors.Errorf("Invalid digest specification %s: unsupported digest algorithm %s", expectedDigest, digestAlgorithm)
}
digester = digestAlgorithm.Digester()
return &digestingReader{
source: source,
digester: digester,
expectedDigest: expectedDigest,
validationFailed: false,
}, nil
}
func (d *digestingReader) Read(p []byte) (int, error) {
n, err := d.source.Read(p)
if n > 0 {
if n2, err := d.digester.Hash().Write(p[:n]); n2 != n || err != nil {
// Coverage: This should not happen, the hash.Hash interface requires
// d.digest.Write to never return an error, and the io.Writer interface
// requires n2 == len(input) if no error is returned.
return 0, errors.Wrapf(err, "updating digest during verification: %d vs. %d", n2, n)
}
}
if err == io.EOF {
actualDigest := d.digester.Digest()
if actualDigest != d.expectedDigest {
d.validationFailed = true
return 0, errors.Errorf("Digest did not match, expected %s, got %s", d.expectedDigest, actualDigest)
}
d.validationSucceeded = true
}
return n, err
}

View File

@ -0,0 +1,77 @@
package copy
import (
"bytes"
"io"
"testing"
digest "github.com/opencontainers/go-digest"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestNewDigestingReader(t *testing.T) {
// Only the failure cases, success is tested in TestDigestingReaderRead below.
source := bytes.NewReader([]byte("abc"))
for _, input := range []digest.Digest{
"abc", // Not algo:hexvalue
"crc32:", // Unknown algorithm, empty value
"crc32:012345678", // Unknown algorithm
"sha256:", // Empty value
"sha256:0", // Invalid hex value
"sha256:01", // Invalid length of hex value
} {
_, err := newDigestingReader(source, input)
assert.Error(t, err, input.String())
}
}
func TestDigestingReaderRead(t *testing.T) {
cases := []struct {
input []byte
digest digest.Digest
}{
{[]byte(""), "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"},
{[]byte("abc"), "sha256:ba7816bf8f01cfea414140de5dae2223b00361a396177a9cb410ff61f20015ad"},
{make([]byte, 65537), "sha256:3266304f31be278d06c3bd3eb9aa3e00c59bedec0a890de466568b0b90b0e01f"},
}
// Valid input
for _, c := range cases {
source := bytes.NewReader(c.input)
reader, err := newDigestingReader(source, c.digest)
require.NoError(t, err, c.digest.String())
dest := bytes.Buffer{}
n, err := io.Copy(&dest, reader)
assert.NoError(t, err, c.digest.String())
assert.Equal(t, int64(len(c.input)), n, c.digest.String())
assert.Equal(t, c.input, dest.Bytes(), c.digest.String())
assert.False(t, reader.validationFailed, c.digest.String())
assert.True(t, reader.validationSucceeded, c.digest.String())
}
// Modified input
for _, c := range cases {
source := bytes.NewReader(bytes.Join([][]byte{c.input, []byte("x")}, nil))
reader, err := newDigestingReader(source, c.digest)
require.NoError(t, err, c.digest.String())
dest := bytes.Buffer{}
_, err = io.Copy(&dest, reader)
assert.Error(t, err, c.digest.String())
assert.True(t, reader.validationFailed, c.digest.String())
assert.False(t, reader.validationSucceeded, c.digest.String())
}
// Truncated input
for _, c := range cases {
source := bytes.NewReader(c.input)
reader, err := newDigestingReader(source, c.digest)
require.NoError(t, err, c.digest.String())
if len(c.input) != 0 {
dest := bytes.Buffer{}
truncatedLen := int64(len(c.input) - 1)
n, err := io.CopyN(&dest, reader, truncatedLen)
assert.NoError(t, err, c.digest.String())
assert.Equal(t, truncatedLen, n, c.digest.String())
}
assert.False(t, reader.validationFailed, c.digest.String())
assert.False(t, reader.validationSucceeded, c.digest.String())
}
}