mirror of
https://github.com/containers/buildah.git
synced 2025-07-31 15:24:26 +03:00
Update to work with newer image library
Update shallowCopy() to work with the newer version of image. Remove things from Push() that we don't need to do any more. Preserve digests in image names, make sure we update creation times, and add a test to ensure that we can pull, commit, and push using such names as sources. Signed-off-by: Nalin Dahyabhai <nalin@redhat.com> Closes: #187 Approved by: rhatdan
This commit is contained in:
committed by
Atomic Bot
parent
544e63de42
commit
8b2b56d9b8
162
commit.go
162
commit.go
@ -4,6 +4,7 @@ import (
|
|||||||
"bytes"
|
"bytes"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
|
"syscall"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
cp "github.com/containers/image/copy"
|
cp "github.com/containers/image/copy"
|
||||||
@ -19,17 +20,6 @@ import (
|
|||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
|
||||||
// gzippedEmptyLayer is a gzip-compressed version of an empty tar file (just 1024 zero bytes). This
|
|
||||||
// comes from github.com/docker/distribution/manifest/schema1/config_builder.go by way of
|
|
||||||
// github.com/containers/image/image/docker_schema2.go; there is a non-zero embedded timestamp; we could
|
|
||||||
// zero that, but that would just waste storage space in registries, so let’s use the same values.
|
|
||||||
gzippedEmptyLayer = []byte{
|
|
||||||
31, 139, 8, 0, 0, 9, 110, 136, 0, 255, 98, 24, 5, 163, 96, 20, 140, 88,
|
|
||||||
0, 8, 0, 0, 255, 255, 46, 175, 181, 239, 0, 4, 0, 0,
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
// CommitOptions can be used to alter how an image is committed.
|
// CommitOptions can be used to alter how an image is committed.
|
||||||
type CommitOptions struct {
|
type CommitOptions struct {
|
||||||
// PreferredManifestType is the preferred type of image manifest. The
|
// PreferredManifestType is the preferred type of image manifest. The
|
||||||
@ -90,7 +80,7 @@ type PushOptions struct {
|
|||||||
// almost any other destination has higher expectations.
|
// almost any other destination has higher expectations.
|
||||||
// We assume that "dest" is a reference to a local image (specifically, a containers/image/storage.storageReference),
|
// We assume that "dest" is a reference to a local image (specifically, a containers/image/storage.storageReference),
|
||||||
// and will fail if it isn't.
|
// and will fail if it isn't.
|
||||||
func (b *Builder) shallowCopy(dest types.ImageReference, src types.ImageReference, systemContext *types.SystemContext) error {
|
func (b *Builder) shallowCopy(dest types.ImageReference, src types.ImageReference, systemContext *types.SystemContext, compression archive.Compression) error {
|
||||||
var names []string
|
var names []string
|
||||||
// Read the target image name.
|
// Read the target image name.
|
||||||
if dest.DockerReference() != nil {
|
if dest.DockerReference() != nil {
|
||||||
@ -106,10 +96,34 @@ func (b *Builder) shallowCopy(dest types.ImageReference, src types.ImageReferenc
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrapf(err, "error opening image %q for writing", transports.ImageName(dest))
|
return errors.Wrapf(err, "error opening image %q for writing", transports.ImageName(dest))
|
||||||
}
|
}
|
||||||
// Write an empty filesystem layer, because the image layer requires at least one.
|
// Look up the container's read-write layer.
|
||||||
_, err = destImage.PutBlob(bytes.NewReader(gzippedEmptyLayer), types.BlobInfo{Size: int64(len(gzippedEmptyLayer))})
|
container, err := b.store.Container(b.ContainerID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrapf(err, "error writing dummy layer for image %q", transports.ImageName(dest))
|
return errors.Wrapf(err, "error reading information about working container %q", b.ContainerID)
|
||||||
|
}
|
||||||
|
// Extract the read-write layer's contents, using whatever compression the container image used to
|
||||||
|
// calculate the blob sum in the manifest.
|
||||||
|
switch compression {
|
||||||
|
case archive.Gzip:
|
||||||
|
logrus.Debugf("extracting layer %q with gzip", container.LayerID)
|
||||||
|
case archive.Bzip2:
|
||||||
|
// Until the image specs define a media type for bzip2-compressed layers, even if we know
|
||||||
|
// how to decompress them, we can't try to compress layers with bzip2.
|
||||||
|
return errors.Wrapf(syscall.ENOTSUP, "media type for bzip2-compressed layers is not defined")
|
||||||
|
default:
|
||||||
|
logrus.Debugf("extracting layer %q with unknown compressor(?)", container.LayerID)
|
||||||
|
}
|
||||||
|
diffOptions := &storage.DiffOptions{
|
||||||
|
Compression: &compression,
|
||||||
|
}
|
||||||
|
layerDiff, err := b.store.Diff("", container.LayerID, diffOptions)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrapf(err, "error reading layer %q from source image %q", container.LayerID, transports.ImageName(src))
|
||||||
|
}
|
||||||
|
defer layerDiff.Close()
|
||||||
|
// Write a copy of the layer as a blob, for the new image to reference.
|
||||||
|
if _, err = destImage.PutBlob(layerDiff, types.BlobInfo{Digest: "", Size: -1}); err != nil {
|
||||||
|
return errors.Wrapf(err, "error creating new read-only layer from container %q", b.ContainerID)
|
||||||
}
|
}
|
||||||
// Read the newly-generated configuration blob.
|
// Read the newly-generated configuration blob.
|
||||||
config, err := srcImage.ConfigBlob()
|
config, err := srcImage.ConfigBlob()
|
||||||
@ -125,11 +139,10 @@ func (b *Builder) shallowCopy(dest types.ImageReference, src types.ImageReferenc
|
|||||||
Digest: digest.Canonical.FromBytes(config),
|
Digest: digest.Canonical.FromBytes(config),
|
||||||
Size: int64(len(config)),
|
Size: int64(len(config)),
|
||||||
}
|
}
|
||||||
_, err = destImage.PutBlob(bytes.NewReader(config), configBlobInfo)
|
if _, err = destImage.PutBlob(bytes.NewReader(config), configBlobInfo); err != nil {
|
||||||
if err != nil && len(config) > 0 {
|
|
||||||
return errors.Wrapf(err, "error writing image configuration for temporary copy of %q", transports.ImageName(dest))
|
return errors.Wrapf(err, "error writing image configuration for temporary copy of %q", transports.ImageName(dest))
|
||||||
}
|
}
|
||||||
// Read the newly-generated, mostly fake, manifest.
|
// Read the newly-generated manifest, which already contains a layer entry for the read-write layer.
|
||||||
manifest, _, err := srcImage.Manifest()
|
manifest, _, err := srcImage.Manifest()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrapf(err, "error reading new manifest for image %q", transports.ImageName(dest))
|
return errors.Wrapf(err, "error reading new manifest for image %q", transports.ImageName(dest))
|
||||||
@ -148,79 +161,9 @@ func (b *Builder) shallowCopy(dest types.ImageReference, src types.ImageReferenc
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrapf(err, "error closing new image %q", transports.ImageName(dest))
|
return errors.Wrapf(err, "error closing new image %q", transports.ImageName(dest))
|
||||||
}
|
}
|
||||||
// Locate the new image in the lower-level API. Extract its items.
|
image, err := is.Transport.GetStoreImage(b.store, dest)
|
||||||
destImg, err := is.Transport.GetStoreImage(b.store, dest)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrapf(err, "error locating new image %q", transports.ImageName(dest))
|
return errors.Wrapf(err, "error locating just-written image %q", transports.ImageName(dest))
|
||||||
}
|
|
||||||
items, err := b.store.ListImageBigData(destImg.ID)
|
|
||||||
if err != nil {
|
|
||||||
return errors.Wrapf(err, "error reading list of named data for image %q", destImg.ID)
|
|
||||||
}
|
|
||||||
bigdata := make(map[string][]byte)
|
|
||||||
for _, itemName := range items {
|
|
||||||
var data []byte
|
|
||||||
data, err = b.store.ImageBigData(destImg.ID, itemName)
|
|
||||||
if err != nil {
|
|
||||||
return errors.Wrapf(err, "error reading named data %q for image %q", itemName, destImg.ID)
|
|
||||||
}
|
|
||||||
bigdata[itemName] = data
|
|
||||||
}
|
|
||||||
// Delete the image so that we can recreate it.
|
|
||||||
_, err = b.store.DeleteImage(destImg.ID, true)
|
|
||||||
if err != nil {
|
|
||||||
return errors.Wrapf(err, "error deleting image %q for rewriting", destImg.ID)
|
|
||||||
}
|
|
||||||
// Look up the container's read-write layer.
|
|
||||||
container, err := b.store.Container(b.ContainerID)
|
|
||||||
if err != nil {
|
|
||||||
return errors.Wrapf(err, "error reading information about working container %q", b.ContainerID)
|
|
||||||
}
|
|
||||||
parentLayer := ""
|
|
||||||
// Look up the container's source image's layer, if there is a source image.
|
|
||||||
if container.ImageID != "" {
|
|
||||||
img, err2 := b.store.Image(container.ImageID)
|
|
||||||
if err2 != nil {
|
|
||||||
return errors.Wrapf(err2, "error reading information about working container %q's source image", b.ContainerID)
|
|
||||||
}
|
|
||||||
parentLayer = img.TopLayer
|
|
||||||
}
|
|
||||||
// Extract the read-write layer's contents.
|
|
||||||
layerDiff, err := b.store.Diff(parentLayer, container.LayerID, nil)
|
|
||||||
if err != nil {
|
|
||||||
return errors.Wrapf(err, "error reading layer %q from source image %q", container.LayerID, transports.ImageName(src))
|
|
||||||
}
|
|
||||||
defer layerDiff.Close()
|
|
||||||
// Write a copy of the layer for the new image to reference.
|
|
||||||
layer, _, err := b.store.PutLayer("", parentLayer, []string{}, "", false, layerDiff)
|
|
||||||
if err != nil {
|
|
||||||
return errors.Wrapf(err, "error creating new read-only layer from container %q", b.ContainerID)
|
|
||||||
}
|
|
||||||
// Create a low-level image record that uses the new layer, discarding the old metadata.
|
|
||||||
image, err := b.store.CreateImage(destImg.ID, []string{}, layer.ID, "{}", nil)
|
|
||||||
if err != nil {
|
|
||||||
err2 := b.store.DeleteLayer(layer.ID)
|
|
||||||
if err2 != nil {
|
|
||||||
logrus.Debugf("error removing layer %q: %v", layer, err2)
|
|
||||||
}
|
|
||||||
return errors.Wrapf(err, "error creating new low-level image %q", transports.ImageName(dest))
|
|
||||||
}
|
|
||||||
logrus.Debugf("(re-)created image ID %q using layer %q", image.ID, layer.ID)
|
|
||||||
defer func() {
|
|
||||||
if err != nil {
|
|
||||||
_, err2 := b.store.DeleteImage(image.ID, true)
|
|
||||||
if err2 != nil {
|
|
||||||
logrus.Debugf("error removing image %q: %v", image.ID, err2)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
// Store the configuration and manifest, which are big data items, along with whatever else is there.
|
|
||||||
for itemName, data := range bigdata {
|
|
||||||
err = b.store.SetImageBigData(image.ID, itemName, data)
|
|
||||||
if err != nil {
|
|
||||||
return errors.Wrapf(err, "error saving data item %q", itemName)
|
|
||||||
}
|
|
||||||
logrus.Debugf("saved data item %q to %q", itemName, image.ID)
|
|
||||||
}
|
}
|
||||||
// Add the target name(s) to the new image.
|
// Add the target name(s) to the new image.
|
||||||
if len(names) > 0 {
|
if len(names) > 0 {
|
||||||
@ -253,7 +196,7 @@ func (b *Builder) Commit(dest types.ImageReference, options CommitOptions) error
|
|||||||
// Check if we're keeping everything in local storage. If so, we can take certain shortcuts.
|
// Check if we're keeping everything in local storage. If so, we can take certain shortcuts.
|
||||||
_, destIsStorage := dest.Transport().(is.StoreTransport)
|
_, destIsStorage := dest.Transport().(is.StoreTransport)
|
||||||
exporting := !destIsStorage
|
exporting := !destIsStorage
|
||||||
src, err := b.makeContainerImageRef(options.PreferredManifestType, exporting, options.Compression, options.HistoryTimestamp)
|
src, err := b.makeImageRef(options.PreferredManifestType, exporting, options.Compression, options.HistoryTimestamp)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrapf(err, "error computing layer digests and building metadata")
|
return errors.Wrapf(err, "error computing layer digests and building metadata")
|
||||||
}
|
}
|
||||||
@ -265,7 +208,7 @@ func (b *Builder) Commit(dest types.ImageReference, options CommitOptions) error
|
|||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// Copy only the most recent layer, the configuration, and the manifest.
|
// Copy only the most recent layer, the configuration, and the manifest.
|
||||||
err = b.shallowCopy(dest, src, getSystemContext(options.SignaturePolicyPath))
|
err = b.shallowCopy(dest, src, getSystemContext(options.SignaturePolicyPath), options.Compression)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrapf(err, "error copying layer and metadata")
|
return errors.Wrapf(err, "error copying layer and metadata")
|
||||||
}
|
}
|
||||||
@ -300,35 +243,14 @@ func Push(image string, dest types.ImageReference, options PushOptions) error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrapf(err, "error creating new signature policy context")
|
return errors.Wrapf(err, "error creating new signature policy context")
|
||||||
}
|
}
|
||||||
defer func() {
|
// Look up the image.
|
||||||
if err2 := policyContext.Destroy(); err2 != nil {
|
src, err := is.Transport.ParseStoreReference(options.Store, image)
|
||||||
logrus.Debugf("error destroying signature polcy context: %v", err2)
|
if err != nil {
|
||||||
|
src2, err2 := is.Transport.ParseStoreReference(options.Store, "@"+image)
|
||||||
|
if err2 != nil {
|
||||||
|
return errors.Wrapf(err, "error parsing reference to image %q", image)
|
||||||
}
|
}
|
||||||
}()
|
src = src2
|
||||||
importOptions := ImportFromImageOptions{
|
|
||||||
Image: image,
|
|
||||||
SignaturePolicyPath: options.SignaturePolicyPath,
|
|
||||||
}
|
|
||||||
builder, err := importBuilderFromImage(options.Store, importOptions)
|
|
||||||
if err != nil {
|
|
||||||
return errors.Wrap(err, "error importing builder information from image")
|
|
||||||
}
|
|
||||||
// Look up the image name and its layer.
|
|
||||||
ref, err := is.Transport.ParseStoreReference(options.Store, builder.FromImage)
|
|
||||||
if err != nil {
|
|
||||||
return errors.Wrapf(err, "error parsing reference to image %q", image)
|
|
||||||
}
|
|
||||||
img, err := is.Transport.GetStoreImage(options.Store, ref)
|
|
||||||
if err != nil {
|
|
||||||
return errors.Wrapf(err, "error locating image %q", image)
|
|
||||||
}
|
|
||||||
// Give the image we're producing the same ancestors as its source image.
|
|
||||||
builder.FromImage = builder.Docker.ContainerConfig.Image
|
|
||||||
builder.FromImageID = string(builder.Docker.Parent)
|
|
||||||
// Prep the layers and manifest for export.
|
|
||||||
src, err := builder.makeImageImageRef(options.Compression, img.Names, img.TopLayer, nil)
|
|
||||||
if err != nil {
|
|
||||||
return errors.Wrapf(err, "error recomputing layer digests and building metadata")
|
|
||||||
}
|
}
|
||||||
// Copy everything.
|
// Copy everything.
|
||||||
err = cp.Image(policyContext, dest, src, getCopyOptions(options.ReportWriter, nil, options.SystemContext, options.ManifestType))
|
err = cp.Image(policyContext, dest, src, getCopyOptions(options.ReportWriter, nil, options.SystemContext, options.ManifestType))
|
||||||
|
163
image.go
163
image.go
@ -12,7 +12,6 @@ import (
|
|||||||
|
|
||||||
"github.com/containers/image/docker/reference"
|
"github.com/containers/image/docker/reference"
|
||||||
"github.com/containers/image/image"
|
"github.com/containers/image/image"
|
||||||
"github.com/containers/image/manifest"
|
|
||||||
is "github.com/containers/image/storage"
|
is "github.com/containers/image/storage"
|
||||||
"github.com/containers/image/types"
|
"github.com/containers/image/types"
|
||||||
"github.com/containers/storage"
|
"github.com/containers/storage"
|
||||||
@ -43,7 +42,6 @@ type containerImageRef struct {
|
|||||||
name reference.Named
|
name reference.Named
|
||||||
names []string
|
names []string
|
||||||
layerID string
|
layerID string
|
||||||
addHistory bool
|
|
||||||
oconfig []byte
|
oconfig []byte
|
||||||
dconfig []byte
|
dconfig []byte
|
||||||
created time.Time
|
created time.Time
|
||||||
@ -59,7 +57,6 @@ type containerImageSource struct {
|
|||||||
store storage.Store
|
store storage.Store
|
||||||
layerID string
|
layerID string
|
||||||
names []string
|
names []string
|
||||||
addHistory bool
|
|
||||||
compression archive.Compression
|
compression archive.Compression
|
||||||
config []byte
|
config []byte
|
||||||
configDigest digest.Digest
|
configDigest digest.Digest
|
||||||
@ -68,12 +65,12 @@ type containerImageSource struct {
|
|||||||
exporting bool
|
exporting bool
|
||||||
}
|
}
|
||||||
|
|
||||||
func (i *containerImageRef) NewImage(sc *types.SystemContext) (types.Image, error) {
|
func (i *containerImageRef) NewImage(sc *types.SystemContext) (types.ImageCloser, error) {
|
||||||
src, err := i.NewImageSource(sc)
|
src, err := i.NewImageSource(sc)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return image.FromSource(src)
|
return image.FromSource(sc, src)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (i *containerImageRef) NewImageSource(sc *types.SystemContext) (src types.ImageSource, err error) {
|
func (i *containerImageRef) NewImageSource(sc *types.SystemContext) (src types.ImageSource, err error) {
|
||||||
@ -128,11 +125,14 @@ func (i *containerImageRef) NewImageSource(sc *types.SystemContext) (src types.I
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
created := i.created
|
||||||
|
oimage.Created = &created
|
||||||
dimage := docker.V2Image{}
|
dimage := docker.V2Image{}
|
||||||
err = json.Unmarshal(i.dconfig, &dimage)
|
err = json.Unmarshal(i.dconfig, &dimage)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
dimage.Created = created
|
||||||
|
|
||||||
// Start building manifests.
|
// Start building manifests.
|
||||||
omanifest := v1.Manifest{
|
omanifest := v1.Manifest{
|
||||||
@ -164,9 +164,39 @@ func (i *containerImageRef) NewImageSource(sc *types.SystemContext) (src types.I
|
|||||||
|
|
||||||
// Extract each layer and compute its digests, both compressed (if requested) and uncompressed.
|
// Extract each layer and compute its digests, both compressed (if requested) and uncompressed.
|
||||||
for _, layerID := range layers {
|
for _, layerID := range layers {
|
||||||
|
// The default layer media type assumes no compression.
|
||||||
omediaType := v1.MediaTypeImageLayer
|
omediaType := v1.MediaTypeImageLayer
|
||||||
dmediaType := docker.V2S2MediaTypeUncompressedLayer
|
dmediaType := docker.V2S2MediaTypeUncompressedLayer
|
||||||
// Figure out which media type we want to call this. Assume no compression.
|
// If we're not re-exporting the data, reuse the blobsum and diff IDs.
|
||||||
|
if !i.exporting && layerID != i.layerID {
|
||||||
|
layer, err2 := i.store.Layer(layerID)
|
||||||
|
if err2 != nil {
|
||||||
|
return nil, errors.Wrapf(err, "unable to locate layer %q", layerID)
|
||||||
|
}
|
||||||
|
if layer.UncompressedDigest == "" {
|
||||||
|
return nil, errors.Errorf("unable to look up size of layer %q", layerID)
|
||||||
|
}
|
||||||
|
layerBlobSum := layer.UncompressedDigest
|
||||||
|
layerBlobSize := layer.UncompressedSize
|
||||||
|
// Note this layer in the manifest, using the uncompressed blobsum.
|
||||||
|
olayerDescriptor := v1.Descriptor{
|
||||||
|
MediaType: omediaType,
|
||||||
|
Digest: layerBlobSum,
|
||||||
|
Size: layerBlobSize,
|
||||||
|
}
|
||||||
|
omanifest.Layers = append(omanifest.Layers, olayerDescriptor)
|
||||||
|
dlayerDescriptor := docker.V2S2Descriptor{
|
||||||
|
MediaType: dmediaType,
|
||||||
|
Digest: layerBlobSum,
|
||||||
|
Size: layerBlobSize,
|
||||||
|
}
|
||||||
|
dmanifest.Layers = append(dmanifest.Layers, dlayerDescriptor)
|
||||||
|
// Note this layer in the list of diffIDs, again using the uncompressed blobsum.
|
||||||
|
oimage.RootFS.DiffIDs = append(oimage.RootFS.DiffIDs, layerBlobSum)
|
||||||
|
dimage.RootFS.DiffIDs = append(dimage.RootFS.DiffIDs, layerBlobSum)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
// Figure out if we need to change the media type, in case we're using compression.
|
||||||
if i.compression != archive.Uncompressed {
|
if i.compression != archive.Uncompressed {
|
||||||
switch i.compression {
|
switch i.compression {
|
||||||
case archive.Gzip:
|
case archive.Gzip:
|
||||||
@ -181,46 +211,18 @@ func (i *containerImageRef) NewImageSource(sc *types.SystemContext) (src types.I
|
|||||||
logrus.Debugf("compressing layer %q with unknown compressor(?)", layerID)
|
logrus.Debugf("compressing layer %q with unknown compressor(?)", layerID)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// If we're not re-exporting the data, just fake up layer and diff IDs for the manifest.
|
|
||||||
if !i.exporting {
|
|
||||||
fakeLayerDigest := digest.NewDigestFromHex(digest.Canonical.String(), layerID)
|
|
||||||
// Add a note in the manifest about the layer. The blobs should be identified by their
|
|
||||||
// possibly-compressed blob digests, but just use the layer IDs here.
|
|
||||||
olayerDescriptor := v1.Descriptor{
|
|
||||||
MediaType: omediaType,
|
|
||||||
Digest: fakeLayerDigest,
|
|
||||||
Size: -1,
|
|
||||||
}
|
|
||||||
omanifest.Layers = append(omanifest.Layers, olayerDescriptor)
|
|
||||||
dlayerDescriptor := docker.V2S2Descriptor{
|
|
||||||
MediaType: dmediaType,
|
|
||||||
Digest: fakeLayerDigest,
|
|
||||||
Size: -1,
|
|
||||||
}
|
|
||||||
dmanifest.Layers = append(dmanifest.Layers, dlayerDescriptor)
|
|
||||||
// Add a note about the diffID, which should be uncompressed digest of the blob, but
|
|
||||||
// just use the layer ID here.
|
|
||||||
oimage.RootFS.DiffIDs = append(oimage.RootFS.DiffIDs, fakeLayerDigest)
|
|
||||||
dimage.RootFS.DiffIDs = append(dimage.RootFS.DiffIDs, fakeLayerDigest)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
// Start reading the layer.
|
// Start reading the layer.
|
||||||
rc, err := i.store.Diff("", layerID, nil)
|
noCompression := archive.Uncompressed
|
||||||
|
diffOptions := &storage.DiffOptions{
|
||||||
|
Compression: &noCompression,
|
||||||
|
}
|
||||||
|
rc, err := i.store.Diff("", layerID, diffOptions)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrapf(err, "error extracting layer %q", layerID)
|
return nil, errors.Wrapf(err, "error extracting layer %q", layerID)
|
||||||
}
|
}
|
||||||
defer rc.Close()
|
defer rc.Close()
|
||||||
// Set up to decompress the layer, in case it's coming out compressed. Due to implementation
|
|
||||||
// differences, the result may not match the digest the blob had when it was originally imported,
|
|
||||||
// so we have to recompute all of this anyway if we want to be sure the digests we use will be
|
|
||||||
// correct.
|
|
||||||
uncompressed, err := archive.DecompressStream(rc)
|
|
||||||
if err != nil {
|
|
||||||
return nil, errors.Wrapf(err, "error decompressing layer %q", layerID)
|
|
||||||
}
|
|
||||||
defer uncompressed.Close()
|
|
||||||
srcHasher := digest.Canonical.Digester()
|
srcHasher := digest.Canonical.Digester()
|
||||||
reader := io.TeeReader(uncompressed, srcHasher.Hash())
|
reader := io.TeeReader(rc, srcHasher.Hash())
|
||||||
// Set up to write the possibly-recompressed blob.
|
// Set up to write the possibly-recompressed blob.
|
||||||
layerFile, err := os.OpenFile(filepath.Join(path, "layer"), os.O_CREATE|os.O_WRONLY, 0600)
|
layerFile, err := os.OpenFile(filepath.Join(path, "layer"), os.O_CREATE|os.O_WRONLY, 0600)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -229,7 +231,7 @@ func (i *containerImageRef) NewImageSource(sc *types.SystemContext) (src types.I
|
|||||||
destHasher := digest.Canonical.Digester()
|
destHasher := digest.Canonical.Digester()
|
||||||
counter := ioutils.NewWriteCounter(layerFile)
|
counter := ioutils.NewWriteCounter(layerFile)
|
||||||
multiWriter := io.MultiWriter(counter, destHasher.Hash())
|
multiWriter := io.MultiWriter(counter, destHasher.Hash())
|
||||||
// Compress the layer, if we're compressing it.
|
// Compress the layer, if we're recompressing it.
|
||||||
writer, err := archive.CompressStream(multiWriter, i.compression)
|
writer, err := archive.CompressStream(multiWriter, i.compression)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrapf(err, "error compressing layer %q", layerID)
|
return nil, errors.Wrapf(err, "error compressing layer %q", layerID)
|
||||||
@ -267,28 +269,26 @@ func (i *containerImageRef) NewImageSource(sc *types.SystemContext) (src types.I
|
|||||||
Size: size,
|
Size: size,
|
||||||
}
|
}
|
||||||
dmanifest.Layers = append(dmanifest.Layers, dlayerDescriptor)
|
dmanifest.Layers = append(dmanifest.Layers, dlayerDescriptor)
|
||||||
// Add a note about the diffID, which is always an uncompressed value.
|
// Add a note about the diffID, which is always the layer's uncompressed digest.
|
||||||
oimage.RootFS.DiffIDs = append(oimage.RootFS.DiffIDs, srcHasher.Digest())
|
oimage.RootFS.DiffIDs = append(oimage.RootFS.DiffIDs, srcHasher.Digest())
|
||||||
dimage.RootFS.DiffIDs = append(dimage.RootFS.DiffIDs, srcHasher.Digest())
|
dimage.RootFS.DiffIDs = append(dimage.RootFS.DiffIDs, srcHasher.Digest())
|
||||||
}
|
}
|
||||||
|
|
||||||
if i.addHistory {
|
// Build history notes in the image configurations.
|
||||||
// Build history notes in the image configurations.
|
onews := v1.History{
|
||||||
onews := v1.History{
|
Created: &i.created,
|
||||||
Created: &i.created,
|
CreatedBy: i.createdBy,
|
||||||
CreatedBy: i.createdBy,
|
Author: oimage.Author,
|
||||||
Author: oimage.Author,
|
EmptyLayer: false,
|
||||||
EmptyLayer: false,
|
|
||||||
}
|
|
||||||
oimage.History = append(oimage.History, onews)
|
|
||||||
dnews := docker.V2S2History{
|
|
||||||
Created: i.created,
|
|
||||||
CreatedBy: i.createdBy,
|
|
||||||
Author: dimage.Author,
|
|
||||||
EmptyLayer: false,
|
|
||||||
}
|
|
||||||
dimage.History = append(dimage.History, dnews)
|
|
||||||
}
|
}
|
||||||
|
oimage.History = append(oimage.History, onews)
|
||||||
|
dnews := docker.V2S2History{
|
||||||
|
Created: i.created,
|
||||||
|
CreatedBy: i.createdBy,
|
||||||
|
Author: dimage.Author,
|
||||||
|
EmptyLayer: false,
|
||||||
|
}
|
||||||
|
dimage.History = append(dimage.History, dnews)
|
||||||
|
|
||||||
// Encode the image configuration blob.
|
// Encode the image configuration blob.
|
||||||
oconfig, err := json.Marshal(&oimage)
|
oconfig, err := json.Marshal(&oimage)
|
||||||
@ -347,7 +347,6 @@ func (i *containerImageRef) NewImageSource(sc *types.SystemContext) (src types.I
|
|||||||
store: i.store,
|
store: i.store,
|
||||||
layerID: i.layerID,
|
layerID: i.layerID,
|
||||||
names: i.names,
|
names: i.names,
|
||||||
addHistory: i.addHistory,
|
|
||||||
compression: i.compression,
|
compression: i.compression,
|
||||||
config: config,
|
config: config,
|
||||||
configDigest: digest.Canonical.FromBytes(config),
|
configDigest: digest.Canonical.FromBytes(config),
|
||||||
@ -402,16 +401,22 @@ func (i *containerImageSource) Reference() types.ImageReference {
|
|||||||
return i.ref
|
return i.ref
|
||||||
}
|
}
|
||||||
|
|
||||||
func (i *containerImageSource) GetSignatures(ctx context.Context) ([][]byte, error) {
|
func (i *containerImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) {
|
||||||
|
if instanceDigest != nil && *instanceDigest != digest.FromBytes(i.manifest) {
|
||||||
|
return nil, errors.Errorf("TODO")
|
||||||
|
}
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (i *containerImageSource) GetTargetManifest(digest digest.Digest) ([]byte, string, error) {
|
func (i *containerImageSource) GetManifest(instanceDigest *digest.Digest) ([]byte, string, error) {
|
||||||
return []byte{}, "", errors.Errorf("TODO")
|
if instanceDigest != nil && *instanceDigest != digest.FromBytes(i.manifest) {
|
||||||
|
return nil, "", errors.Errorf("TODO")
|
||||||
|
}
|
||||||
|
return i.manifest, i.manifestType, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (i *containerImageSource) GetManifest() ([]byte, string, error) {
|
func (i *containerImageSource) LayerInfosForCopy() []types.BlobInfo {
|
||||||
return i.manifest, i.manifestType, nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (i *containerImageSource) GetBlob(blob types.BlobInfo) (reader io.ReadCloser, size int64, err error) {
|
func (i *containerImageSource) GetBlob(blob types.BlobInfo) (reader io.ReadCloser, size int64, err error) {
|
||||||
@ -445,10 +450,14 @@ func (i *containerImageSource) GetBlob(blob types.BlobInfo) (reader io.ReadClose
|
|||||||
return ioutils.NewReadCloserWrapper(layerFile, closer), size, nil
|
return ioutils.NewReadCloserWrapper(layerFile, closer), size, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *Builder) makeImageRef(manifestType string, exporting, addHistory bool, compress archive.Compression, names []string, layerID string, historyTimestamp *time.Time) (types.ImageReference, error) {
|
func (b *Builder) makeImageRef(manifestType string, exporting bool, compress archive.Compression, historyTimestamp *time.Time) (types.ImageReference, error) {
|
||||||
var name reference.Named
|
var name reference.Named
|
||||||
if len(names) > 0 {
|
container, err := b.store.Container(b.ContainerID)
|
||||||
if parsed, err := reference.ParseNamed(names[0]); err == nil {
|
if err != nil {
|
||||||
|
return nil, errors.Wrapf(err, "error locating container %q", b.ContainerID)
|
||||||
|
}
|
||||||
|
if len(container.Names) > 0 {
|
||||||
|
if parsed, err2 := reference.ParseNamed(container.Names[0]); err2 == nil {
|
||||||
name = parsed
|
name = parsed
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -471,9 +480,8 @@ func (b *Builder) makeImageRef(manifestType string, exporting, addHistory bool,
|
|||||||
store: b.store,
|
store: b.store,
|
||||||
compression: compress,
|
compression: compress,
|
||||||
name: name,
|
name: name,
|
||||||
names: names,
|
names: container.Names,
|
||||||
layerID: layerID,
|
layerID: container.LayerID,
|
||||||
addHistory: addHistory,
|
|
||||||
oconfig: oconfig,
|
oconfig: oconfig,
|
||||||
dconfig: dconfig,
|
dconfig: dconfig,
|
||||||
created: created,
|
created: created,
|
||||||
@ -484,18 +492,3 @@ func (b *Builder) makeImageRef(manifestType string, exporting, addHistory bool,
|
|||||||
}
|
}
|
||||||
return ref, nil
|
return ref, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *Builder) makeContainerImageRef(manifestType string, exporting bool, compress archive.Compression, historyTimestamp *time.Time) (types.ImageReference, error) {
|
|
||||||
if manifestType == "" {
|
|
||||||
manifestType = OCIv1ImageManifest
|
|
||||||
}
|
|
||||||
container, err := b.store.Container(b.ContainerID)
|
|
||||||
if err != nil {
|
|
||||||
return nil, errors.Wrapf(err, "error locating container %q", b.ContainerID)
|
|
||||||
}
|
|
||||||
return b.makeImageRef(manifestType, exporting, true, compress, container.Names, container.LayerID, historyTimestamp)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *Builder) makeImageImageRef(compress archive.Compression, names []string, layerID string, historyTimestamp *time.Time) (types.ImageReference, error) {
|
|
||||||
return b.makeImageRef(manifest.GuessMIMEType(b.Manifest), true, false, compress, names, layerID, historyTimestamp)
|
|
||||||
}
|
|
||||||
|
40
tests/digest.bats
Normal file
40
tests/digest.bats
Normal file
@ -0,0 +1,40 @@
|
|||||||
|
#!/usr/bin/env bats
|
||||||
|
|
||||||
|
load helpers
|
||||||
|
|
||||||
|
fromreftest() {
|
||||||
|
cid=$(buildah from --pull --signature-policy ${TESTSDIR}/policy.json $1)
|
||||||
|
pushdir=${TESTDIR}/fromreftest
|
||||||
|
mkdir -p ${pushdir}/{1,2,3}
|
||||||
|
buildah push --signature-policy ${TESTSDIR}/policy.json $1 dir:${pushdir}/1
|
||||||
|
buildah commit --signature-policy ${TESTSDIR}/policy.json $cid new-image
|
||||||
|
buildah push --signature-policy ${TESTSDIR}/policy.json new-image dir:${pushdir}/2
|
||||||
|
buildah rmi new-image
|
||||||
|
buildah commit --signature-policy ${TESTSDIR}/policy.json $cid dir:${pushdir}/3
|
||||||
|
buildah rm $cid
|
||||||
|
rm -fr ${pushdir}
|
||||||
|
}
|
||||||
|
|
||||||
|
@test "from-by-digest-s1" {
|
||||||
|
fromreftest kubernetes/pause@sha256:f8cd50c5a287dd8c5f226cf69c60c737d34ed43726c14b8a746d9de2d23eda2b
|
||||||
|
}
|
||||||
|
|
||||||
|
@test "from-by-tag-s1" {
|
||||||
|
fromreftest kubernetes/pause:go
|
||||||
|
}
|
||||||
|
|
||||||
|
@test "from-by-repo-only-s1" {
|
||||||
|
fromreftest kubernetes/pause
|
||||||
|
}
|
||||||
|
|
||||||
|
@test "from-by-digest-s2" {
|
||||||
|
fromreftest alpine@sha256:e9cec9aec697d8b9d450edd32860ecd363f2f3174c8338beb5f809422d182c63
|
||||||
|
}
|
||||||
|
|
||||||
|
@test "from-by-tag-s2" {
|
||||||
|
fromreftest alpine:2.6
|
||||||
|
}
|
||||||
|
|
||||||
|
@test "from-by-repo-only-s2" {
|
||||||
|
fromreftest alpine
|
||||||
|
}
|
@ -8,6 +8,7 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
is "github.com/containers/image/storage"
|
is "github.com/containers/image/storage"
|
||||||
|
"github.com/containers/image/transports/alltransports"
|
||||||
"github.com/containers/image/types"
|
"github.com/containers/image/types"
|
||||||
"github.com/containers/storage"
|
"github.com/containers/storage"
|
||||||
"github.com/opencontainers/image-spec/specs-go/v1"
|
"github.com/opencontainers/image-spec/specs-go/v1"
|
||||||
@ -33,7 +34,9 @@ func main() {
|
|||||||
policy := flag.String("signature-policy", "", "signature policy file")
|
policy := flag.String("signature-policy", "", "signature policy file")
|
||||||
mtype := flag.String("expected-manifest-type", buildah.OCIv1ImageManifest, "expected manifest type")
|
mtype := flag.String("expected-manifest-type", buildah.OCIv1ImageManifest, "expected manifest type")
|
||||||
showm := flag.Bool("show-manifest", false, "output the manifest JSON")
|
showm := flag.Bool("show-manifest", false, "output the manifest JSON")
|
||||||
|
rebuildm := flag.Bool("rebuild-manifest", false, "rebuild the manifest JSON")
|
||||||
showc := flag.Bool("show-config", false, "output the configuration JSON")
|
showc := flag.Bool("show-config", false, "output the configuration JSON")
|
||||||
|
rebuildc := flag.Bool("rebuild-config", false, "rebuild the configuration JSON")
|
||||||
flag.Parse()
|
flag.Parse()
|
||||||
logrus.SetLevel(logrus.ErrorLevel)
|
logrus.SetLevel(logrus.ErrorLevel)
|
||||||
if debug != nil && *debug {
|
if debug != nil && *debug {
|
||||||
@ -79,6 +82,7 @@ func main() {
|
|||||||
logrus.Errorf("error opening storage: %v", err)
|
logrus.Errorf("error opening storage: %v", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
is.Transport.SetStore(store)
|
||||||
|
|
||||||
errors := false
|
errors := false
|
||||||
defer func() {
|
defer func() {
|
||||||
@ -88,6 +92,7 @@ func main() {
|
|||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
for _, image := range args {
|
for _, image := range args {
|
||||||
|
var ref types.ImageReference
|
||||||
oImage := v1.Image{}
|
oImage := v1.Image{}
|
||||||
dImage := docker.V2Image{}
|
dImage := docker.V2Image{}
|
||||||
oManifest := v1.Manifest{}
|
oManifest := v1.Manifest{}
|
||||||
@ -97,9 +102,13 @@ func main() {
|
|||||||
|
|
||||||
ref, err := is.Transport.ParseStoreReference(store, image)
|
ref, err := is.Transport.ParseStoreReference(store, image)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logrus.Errorf("error parsing reference %q: %v", image, err)
|
ref2, err2 := alltransports.ParseImageName(image)
|
||||||
errors = true
|
if err2 != nil {
|
||||||
continue
|
logrus.Errorf("error parsing reference %q: %v", image, err)
|
||||||
|
errors = true
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
ref = ref2
|
||||||
}
|
}
|
||||||
|
|
||||||
img, err := ref.NewImage(systemContext)
|
img, err := ref.NewImage(systemContext)
|
||||||
@ -161,6 +170,66 @@ func main() {
|
|||||||
errors = true
|
errors = true
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
switch manifestType {
|
||||||
|
case buildah.OCIv1ImageManifest:
|
||||||
|
if rebuildm != nil && *rebuildm {
|
||||||
|
err = json.Unmarshal(manifest, &oManifest)
|
||||||
|
if err != nil {
|
||||||
|
logrus.Errorf("error parsing manifest from %q: %v", image, err)
|
||||||
|
errors = true
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
manifest, err = json.Marshal(oManifest)
|
||||||
|
if err != nil {
|
||||||
|
logrus.Errorf("error rebuilding manifest from %q: %v", image, err)
|
||||||
|
errors = true
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if rebuildc != nil && *rebuildc {
|
||||||
|
err = json.Unmarshal(config, &oImage)
|
||||||
|
if err != nil {
|
||||||
|
logrus.Errorf("error parsing config from %q: %v", image, err)
|
||||||
|
errors = true
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
config, err = json.Marshal(oImage)
|
||||||
|
if err != nil {
|
||||||
|
logrus.Errorf("error rebuilding config from %q: %v", image, err)
|
||||||
|
errors = true
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case buildah.Dockerv2ImageManifest:
|
||||||
|
if rebuildm != nil && *rebuildm {
|
||||||
|
err = json.Unmarshal(manifest, &dManifest)
|
||||||
|
if err != nil {
|
||||||
|
logrus.Errorf("error parsing manifest from %q: %v", image, err)
|
||||||
|
errors = true
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
manifest, err = json.Marshal(dManifest)
|
||||||
|
if err != nil {
|
||||||
|
logrus.Errorf("error rebuilding manifest from %q: %v", image, err)
|
||||||
|
errors = true
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if rebuildc != nil && *rebuildc {
|
||||||
|
err = json.Unmarshal(config, &dImage)
|
||||||
|
if err != nil {
|
||||||
|
logrus.Errorf("error parsing config from %q: %v", image, err)
|
||||||
|
errors = true
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
config, err = json.Marshal(dImage)
|
||||||
|
if err != nil {
|
||||||
|
logrus.Errorf("error rebuilding config from %q: %v", image, err)
|
||||||
|
errors = true
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
if expectedConfigType != "" && configType != expectedConfigType {
|
if expectedConfigType != "" && configType != expectedConfigType {
|
||||||
logrus.Errorf("expected config type %q in %q, got %q", expectedConfigType, image, configType)
|
logrus.Errorf("expected config type %q in %q, got %q", expectedConfigType, image, configType)
|
||||||
errors = true
|
errors = true
|
||||||
|
@ -8,11 +8,15 @@ load helpers
|
|||||||
cid=$(buildah from --pull=false --signature-policy ${TESTSDIR}/policy.json ${source})
|
cid=$(buildah from --pull=false --signature-policy ${TESTSDIR}/policy.json ${source})
|
||||||
for format in "" docker oci ; do
|
for format in "" docker oci ; do
|
||||||
mkdir -p ${TESTDIR}/committed${format:+.${format}}
|
mkdir -p ${TESTDIR}/committed${format:+.${format}}
|
||||||
buildah commit ${format:+--format ${format}} --reference-time ${TESTDIR}/reference-time-file --signature-policy ${TESTSDIR}/policy.json "$cid" scratch-image${format:+-${format}}
|
# Force no compression to generate what we push.
|
||||||
buildah commit ${format:+--format ${format}} --reference-time ${TESTDIR}/reference-time-file --signature-policy ${TESTSDIR}/policy.json "$cid" dir:${TESTDIR}/committed${format:+.${format}}
|
buildah commit -D ${format:+--format ${format}} --reference-time ${TESTDIR}/reference-time-file --signature-policy ${TESTSDIR}/policy.json "$cid" scratch-image${format:+-${format}}
|
||||||
|
buildah commit -D ${format:+--format ${format}} --reference-time ${TESTDIR}/reference-time-file --signature-policy ${TESTSDIR}/policy.json "$cid" dir:${TESTDIR}/committed${format:+.${format}}
|
||||||
mkdir -p ${TESTDIR}/pushed${format:+.${format}}
|
mkdir -p ${TESTDIR}/pushed${format:+.${format}}
|
||||||
buildah push --signature-policy ${TESTSDIR}/policy.json scratch-image${format:+-${format}} dir:${TESTDIR}/pushed${format:+.${format}}
|
buildah push --signature-policy ${TESTSDIR}/policy.json scratch-image${format:+-${format}} dir:${TESTDIR}/pushed${format:+.${format}}
|
||||||
diff -u ${TESTDIR}/committed${format:+.${format}}/manifest.json ${TESTDIR}/pushed${format:+.${format}}/manifest.json
|
# Reencode the manifest to lose variations due to different encoders or definitions of structures.
|
||||||
|
imgtype -expected-manifest-type "*" -rebuild-manifest -show-manifest dir:${TESTDIR}/committed${format:+.${format}} > ${TESTDIR}/manifest.committed${format:+.${format}}
|
||||||
|
imgtype -expected-manifest-type "*" -rebuild-manifest -show-manifest dir:${TESTDIR}/pushed${format:+.${format}} > ${TESTDIR}/manifest.pushed${format:+.${format}}
|
||||||
|
diff -u ${TESTDIR}/manifest.committed${format:+.${format}} ${TESTDIR}/manifest.pushed${format:+.${format}}
|
||||||
[ "$output" = "" ]
|
[ "$output" = "" ]
|
||||||
done
|
done
|
||||||
buildah rm "$cid"
|
buildah rm "$cid"
|
||||||
|
Reference in New Issue
Block a user