1
0
mirror of https://github.com/containers/buildah.git synced 2025-07-30 04:23:09 +03:00

Implement --squash for build-using-dockerfile and commit

Handle a Squash option when committing images, and make it available as
a flag for "buildah commit" and "buildah build-using-dockerfile".

Breaks up containerImageRef.NewImageSource to keep the complexity more
manageable.

Signed-off-by: Nalin Dahyabhai <nalin@redhat.com>

Closes: #714
Approved by: rhatdan
This commit is contained in:
Nalin Dahyabhai
2018-05-21 17:02:50 -04:00
committed by Atomic Bot
parent e59fd7a19d
commit c806e6e065
10 changed files with 301 additions and 89 deletions

View File

@ -167,10 +167,6 @@ func budCmd(c *cli.Context) error {
logrus.Debugf("build caching not enabled so --rm flag has no effect") logrus.Debugf("build caching not enabled so --rm flag has no effect")
} }
if c.IsSet("squash") {
logrus.Debugf("build caching not enabled so --squash flag has no effect")
}
options := imagebuildah.BuildOptions{ options := imagebuildah.BuildOptions{
ContextDirectory: contextDir, ContextDirectory: contextDir,
PullPolicy: pullPolicy, PullPolicy: pullPolicy,
@ -187,6 +183,7 @@ func budCmd(c *cli.Context) error {
CommonBuildOpts: commonOpts, CommonBuildOpts: commonOpts,
DefaultMountsFilePath: c.GlobalString("default-mounts-file"), DefaultMountsFilePath: c.GlobalString("default-mounts-file"),
IIDFile: c.String("iidfile"), IIDFile: c.String("iidfile"),
Squash: c.Bool("squash"),
Labels: c.StringSlice("label"), Labels: c.StringSlice("label"),
} }

View File

@ -62,6 +62,10 @@ var (
Name: "signature-policy", Name: "signature-policy",
Usage: "`pathname` of signature policy file (not usually used)", Usage: "`pathname` of signature policy file (not usually used)",
}, },
cli.BoolFlag{
Name: "squash",
Usage: "produce an image with only one layer",
},
cli.BoolTFlag{ cli.BoolTFlag{
Name: "tls-verify", Name: "tls-verify",
Usage: "Require HTTPS and verify certificates when accessing the registry", Usage: "Require HTTPS and verify certificates when accessing the registry",
@ -156,6 +160,7 @@ func commitCmd(c *cli.Context) error {
HistoryTimestamp: &timestamp, HistoryTimestamp: &timestamp,
SystemContext: systemContext, SystemContext: systemContext,
IIDFile: c.String("iidfile"), IIDFile: c.String("iidfile"),
Squash: c.Bool("squash"),
} }
if !c.Bool("quiet") { if !c.Bool("quiet") {
options.ReportWriter = os.Stderr options.ReportWriter = os.Stderr

View File

@ -50,6 +50,9 @@ type CommitOptions struct {
SystemContext *types.SystemContext SystemContext *types.SystemContext
// IIDFile tells the builder to write the image ID to the specified file // IIDFile tells the builder to write the image ID to the specified file
IIDFile string IIDFile string
// Squash tells the builder to produce an image with a single layer
// instead of with possibly more than one layer.
Squash bool
// Labels metadata for an image // Labels metadata for an image
Labels []string Labels []string
} }
@ -112,7 +115,7 @@ func (b *Builder) Commit(ctx context.Context, dest types.ImageReference, options
// Check if we're keeping everything in local storage. If so, we can take certain shortcuts. // Check if we're keeping everything in local storage. If so, we can take certain shortcuts.
_, destIsStorage := dest.Transport().(is.StoreTransport) _, destIsStorage := dest.Transport().(is.StoreTransport)
exporting := !destIsStorage exporting := !destIsStorage
src, err := b.makeImageRef(options.PreferredManifestType, exporting, options.Compression, options.HistoryTimestamp) src, err := b.makeImageRef(options.PreferredManifestType, exporting, options.Squash, options.Compression, options.HistoryTimestamp)
if err != nil { if err != nil {
return imgID, errors.Wrapf(err, "error computing layer digests and building metadata") return imgID, errors.Wrapf(err, "error computing layer digests and building metadata")
} }

View File

@ -308,6 +308,7 @@ return 1
--quiet --quiet
-q -q
--rm --rm
--squash
--tls-verify --tls-verify
" "
@ -359,6 +360,7 @@ return 1
--pull-always --pull-always
--quiet --quiet
-q -q
--squash
--tls-verify --tls-verify
" "

View File

@ -250,7 +250,7 @@ option be used, as the default behavior of using the system-wide default policy
**--squash** **--squash**
Squash newly built layers into a single new layer. Buildah does not currently support caching so this is a NOOP. Squash all of the new image's layers (including those inherited from a base image) into a single new layer.
**--tag, -t** *imageName* **--tag, -t** *imageName*

View File

@ -60,6 +60,10 @@ Pathname of a signature policy file to use. It is not recommended that this
option be used, as the default behavior of using the system-wide default policy option be used, as the default behavior of using the system-wide default policy
(frequently */etc/containers/policy.json*) is most often preferred. (frequently */etc/containers/policy.json*) is most often preferred.
**--squash**
Squash all of the new image's layers (including those inherited from a base image) into a single new layer.
**--tls-verify** *bool-value* **--tls-verify** *bool-value*
Require HTTPS and verify certificates when talking to container registries (defaults to true) Require HTTPS and verify certificates when talking to container registries (defaults to true)

241
image.go
View File

@ -4,6 +4,7 @@ import (
"bytes" "bytes"
"context" "context"
"encoding/json" "encoding/json"
"fmt"
"io" "io"
"io/ioutil" "io/ioutil"
"os" "os"
@ -41,6 +42,8 @@ type containerImageRef struct {
compression archive.Compression compression archive.Compression
name reference.Named name reference.Named
names []string names []string
containerID string
mountLabel string
layerID string layerID string
oconfig []byte oconfig []byte
dconfig []byte dconfig []byte
@ -50,12 +53,15 @@ type containerImageRef struct {
annotations map[string]string annotations map[string]string
preferredManifestType string preferredManifestType string
exporting bool exporting bool
squash bool
} }
type containerImageSource struct { type containerImageSource struct {
path string path string
ref *containerImageRef ref *containerImageRef
store storage.Store store storage.Store
containerID string
mountLabel string
layerID string layerID string
names []string names []string
compression archive.Compression compression archive.Compression
@ -94,6 +100,124 @@ func expectedDockerDiffIDs(image docker.V2Image) int {
return expected return expected
} }
// Compute the media types which we need to attach to a layer, given the type of
// compression that we'll be applying.
func (i *containerImageRef) computeLayerMIMEType(what string) (omediaType, dmediaType string, err error) {
omediaType = v1.MediaTypeImageLayer
dmediaType = docker.V2S2MediaTypeUncompressedLayer
if i.compression != archive.Uncompressed {
switch i.compression {
case archive.Gzip:
omediaType = v1.MediaTypeImageLayerGzip
dmediaType = docker.V2S2MediaTypeLayer
logrus.Debugf("compressing %s with gzip", what)
case archive.Bzip2:
// Until the image specs define a media type for bzip2-compressed layers, even if we know
// how to decompress them, we can't try to compress layers with bzip2.
return "", "", errors.New("media type for bzip2-compressed layers is not defined")
case archive.Xz:
// Until the image specs define a media type for xz-compressed layers, even if we know
// how to decompress them, we can't try to compress layers with xz.
return "", "", errors.New("media type for xz-compressed layers is not defined")
default:
logrus.Debugf("compressing %s with unknown compressor(?)", what)
}
}
return omediaType, dmediaType, nil
}
// Extract the container's whole filesystem as if it were a single layer.
func (i *containerImageRef) extractRootfs() (io.ReadCloser, error) {
mountPoint, err := i.store.Mount(i.containerID, i.mountLabel)
if err != nil {
return nil, errors.Wrapf(err, "error extracting container %q", i.containerID)
}
tarOptions := &archive.TarOptions{
Compression: archive.Uncompressed,
}
rc, err := archive.TarWithOptions(mountPoint, tarOptions)
if err != nil {
return nil, errors.Wrapf(err, "error extracting container %q", i.containerID)
}
return ioutils.NewReadCloserWrapper(rc, func() error {
err := rc.Close()
if err != nil {
err = errors.Wrapf(err, "error closing tar archive of container %q", i.containerID)
}
if err2 := i.store.Unmount(i.containerID); err == nil {
if err2 != nil {
err2 = errors.Wrapf(err2, "error unmounting container %q", i.containerID)
}
err = err2
}
return err
}), nil
}
// Build fresh copies of the container configuration structures so that we can edit them
// without making unintended changes to the original Builder.
func (i *containerImageRef) createConfigsAndManifests() (v1.Image, v1.Manifest, docker.V2Image, docker.V2S2Manifest, error) {
created := i.created
// Build an empty image, and then decode over it.
oimage := v1.Image{}
if err := json.Unmarshal(i.oconfig, &oimage); err != nil {
return v1.Image{}, v1.Manifest{}, docker.V2Image{}, docker.V2S2Manifest{}, err
}
// Always replace this value, since we're newer than our base image.
oimage.Created = &created
// Clear the list of diffIDs, since we always repopulate it.
oimage.RootFS.Type = docker.TypeLayers
oimage.RootFS.DiffIDs = []digest.Digest{}
// Only clear the history if we're squashing, otherwise leave it be so that we can append
// entries to it.
if i.squash {
oimage.History = []v1.History{}
}
// Build an empty image, and then decode over it.
dimage := docker.V2Image{}
if err := json.Unmarshal(i.dconfig, &dimage); err != nil {
return v1.Image{}, v1.Manifest{}, docker.V2Image{}, docker.V2S2Manifest{}, err
}
// Always replace this value, since we're newer than our base image.
dimage.Created = created
// Clear the list of diffIDs, since we always repopulate it.
dimage.RootFS = &docker.V2S2RootFS{}
dimage.RootFS.Type = docker.TypeLayers
dimage.RootFS.DiffIDs = []digest.Digest{}
// Only clear the history if we're squashing, otherwise leave it be so that we can append
// entries to it.
if i.squash {
dimage.History = []docker.V2S2History{}
}
// Build empty manifests. The Layers lists will be populated later.
omanifest := v1.Manifest{
Versioned: specs.Versioned{
SchemaVersion: 2,
},
Config: v1.Descriptor{
MediaType: v1.MediaTypeImageConfig,
},
Layers: []v1.Descriptor{},
Annotations: i.annotations,
}
dmanifest := docker.V2S2Manifest{
V2Versioned: docker.V2Versioned{
SchemaVersion: 2,
MediaType: docker.V2S2MediaTypeManifest,
},
Config: docker.V2S2Descriptor{
MediaType: docker.V2S2MediaTypeImageConfig,
},
Layers: []docker.V2S2Descriptor{},
}
return oimage, omanifest, dimage, dmanifest, nil
}
func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.SystemContext) (src types.ImageSource, err error) { func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.SystemContext) (src types.ImageSource, err error) {
// Decide which type of manifest and configuration output we're going to provide. // Decide which type of manifest and configuration output we're going to provide.
manifestType := i.preferredManifestType manifestType := i.preferredManifestType
@ -109,11 +233,12 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System
if err != nil { if err != nil {
return nil, errors.Wrapf(err, "unable to read layer %q", layerID) return nil, errors.Wrapf(err, "unable to read layer %q", layerID)
} }
// Walk the list of parent layers, prepending each as we go. // Walk the list of parent layers, prepending each as we go. If we're squashing,
// stop at the layer ID of the top layer, which we won't really be using anyway.
for layer != nil { for layer != nil {
layers = append(append([]string{}, layerID), layers...) layers = append(append([]string{}, layerID), layers...)
layerID = layer.Parent layerID = layer.Parent
if layerID == "" { if layerID == "" || i.squash {
err = nil err = nil
break break
} }
@ -139,57 +264,25 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System
} }
}() }()
// Build fresh copies of the configurations so that we don't mess with the values in the Builder // Build fresh copies of the configurations and manifest so that we don't mess with any
// object itself. // values in the Builder object itself.
oimage := v1.Image{} oimage, omanifest, dimage, dmanifest, err := i.createConfigsAndManifests()
err = json.Unmarshal(i.oconfig, &oimage)
if err != nil { if err != nil {
return nil, err return nil, err
} }
created := i.created
oimage.Created = &created
dimage := docker.V2Image{}
err = json.Unmarshal(i.dconfig, &dimage)
if err != nil {
return nil, err
}
dimage.Created = created
// Start building manifests.
omanifest := v1.Manifest{
Versioned: specs.Versioned{
SchemaVersion: 2,
},
Config: v1.Descriptor{
MediaType: v1.MediaTypeImageConfig,
},
Layers: []v1.Descriptor{},
Annotations: i.annotations,
}
dmanifest := docker.V2S2Manifest{
V2Versioned: docker.V2Versioned{
SchemaVersion: 2,
MediaType: docker.V2S2MediaTypeManifest,
},
Config: docker.V2S2Descriptor{
MediaType: docker.V2S2MediaTypeImageConfig,
},
Layers: []docker.V2S2Descriptor{},
}
oimage.RootFS.Type = docker.TypeLayers
oimage.RootFS.DiffIDs = []digest.Digest{}
dimage.RootFS = &docker.V2S2RootFS{}
dimage.RootFS.Type = docker.TypeLayers
dimage.RootFS.DiffIDs = []digest.Digest{}
// Extract each layer and compute its digests, both compressed (if requested) and uncompressed. // Extract each layer and compute its digests, both compressed (if requested) and uncompressed.
for _, layerID := range layers { for _, layerID := range layers {
what := fmt.Sprintf("layer %q", layerID)
if i.squash {
what = fmt.Sprintf("container %q", i.containerID)
}
// The default layer media type assumes no compression. // The default layer media type assumes no compression.
omediaType := v1.MediaTypeImageLayer omediaType := v1.MediaTypeImageLayer
dmediaType := docker.V2S2MediaTypeUncompressedLayer dmediaType := docker.V2S2MediaTypeUncompressedLayer
// If we're not re-exporting the data, reuse the blobsum and diff IDs. // If we're not re-exporting the data, and we're reusing layers individually, reuse
if !i.exporting && layerID != i.layerID { // the blobsum and diff IDs.
if !i.exporting && !i.squash && layerID != i.layerID {
layer, err2 := i.store.Layer(layerID) layer, err2 := i.store.Layer(layerID)
if err2 != nil { if err2 != nil {
return nil, errors.Wrapf(err, "unable to locate layer %q", layerID) return nil, errors.Wrapf(err, "unable to locate layer %q", layerID)
@ -218,40 +311,37 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System
continue continue
} }
// Figure out if we need to change the media type, in case we're using compression. // Figure out if we need to change the media type, in case we're using compression.
if i.compression != archive.Uncompressed { omediaType, dmediaType, err = i.computeLayerMIMEType(what)
switch i.compression { if err != nil {
case archive.Gzip: return nil, err
omediaType = v1.MediaTypeImageLayerGzip
dmediaType = docker.V2S2MediaTypeLayer
logrus.Debugf("compressing layer %q with gzip", layerID)
case archive.Bzip2:
// Until the image specs define a media type for bzip2-compressed layers, even if we know
// how to decompress them, we can't try to compress layers with bzip2.
return nil, errors.New("media type for bzip2-compressed layers is not defined")
case archive.Xz:
// Until the image specs define a media type for xz-compressed layers, even if we know
// how to decompress them, we can't try to compress layers with xz.
return nil, errors.New("media type for xz-compressed layers is not defined")
default:
logrus.Debugf("compressing layer %q with unknown compressor(?)", layerID)
}
} }
// Start reading the layer. // Start reading either the layer or the whole container rootfs.
noCompression := archive.Uncompressed noCompression := archive.Uncompressed
diffOptions := &storage.DiffOptions{ diffOptions := &storage.DiffOptions{
Compression: &noCompression, Compression: &noCompression,
} }
rc, err := i.store.Diff("", layerID, diffOptions) var rc io.ReadCloser
if err != nil { if i.squash {
return nil, errors.Wrapf(err, "error extracting layer %q", layerID) // Extract the root filesystem as a single layer.
rc, err = i.extractRootfs()
if err != nil {
return nil, err
}
defer rc.Close()
} else {
// Extract this layer, one of possibly many.
rc, err = i.store.Diff("", layerID, diffOptions)
if err != nil {
return nil, errors.Wrapf(err, "error extracting %s", what)
}
defer rc.Close()
} }
defer rc.Close()
srcHasher := digest.Canonical.Digester() srcHasher := digest.Canonical.Digester()
reader := io.TeeReader(rc, srcHasher.Hash()) reader := io.TeeReader(rc, srcHasher.Hash())
// Set up to write the possibly-recompressed blob. // Set up to write the possibly-recompressed blob.
layerFile, err := os.OpenFile(filepath.Join(path, "layer"), os.O_CREATE|os.O_WRONLY, 0600) layerFile, err := os.OpenFile(filepath.Join(path, "layer"), os.O_CREATE|os.O_WRONLY, 0600)
if err != nil { if err != nil {
return nil, errors.Wrapf(err, "error opening file for layer %q", layerID) return nil, errors.Wrapf(err, "error opening file for %s", what)
} }
destHasher := digest.Canonical.Digester() destHasher := digest.Canonical.Digester()
counter := ioutils.NewWriteCounter(layerFile) counter := ioutils.NewWriteCounter(layerFile)
@ -259,26 +349,26 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System
// Compress the layer, if we're recompressing it. // Compress the layer, if we're recompressing it.
writer, err := archive.CompressStream(multiWriter, i.compression) writer, err := archive.CompressStream(multiWriter, i.compression)
if err != nil { if err != nil {
return nil, errors.Wrapf(err, "error compressing layer %q", layerID) return nil, errors.Wrapf(err, "error compressing %s", what)
} }
size, err := io.Copy(writer, reader) size, err := io.Copy(writer, reader)
if err != nil { if err != nil {
return nil, errors.Wrapf(err, "error storing layer %q to file", layerID) return nil, errors.Wrapf(err, "error storing %s to file", what)
} }
writer.Close() writer.Close()
layerFile.Close() layerFile.Close()
if i.compression == archive.Uncompressed { if i.compression == archive.Uncompressed {
if size != counter.Count { if size != counter.Count {
return nil, errors.Errorf("error storing layer %q to file: inconsistent layer size (copied %d, wrote %d)", layerID, size, counter.Count) return nil, errors.Errorf("error storing %s to file: inconsistent layer size (copied %d, wrote %d)", what, size, counter.Count)
} }
} else { } else {
size = counter.Count size = counter.Count
} }
logrus.Debugf("layer %q size is %d bytes", layerID, size) logrus.Debugf("%s size is %d bytes", what, size)
// Rename the layer so that we can more easily find it by digest later. // Rename the layer so that we can more easily find it by digest later.
err = os.Rename(filepath.Join(path, "layer"), filepath.Join(path, destHasher.Digest().String())) err = os.Rename(filepath.Join(path, "layer"), filepath.Join(path, destHasher.Digest().String()))
if err != nil { if err != nil {
return nil, errors.Wrapf(err, "error storing layer %q to file", layerID) return nil, errors.Wrapf(err, "error storing %s to file", what)
} }
// Add a note in the manifest about the layer. The blobs are identified by their possibly- // Add a note in the manifest about the layer. The blobs are identified by their possibly-
// compressed blob digests. // compressed blob digests.
@ -383,6 +473,8 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System
path: path, path: path,
ref: i, ref: i,
store: i.store, store: i.store,
containerID: i.containerID,
mountLabel: i.mountLabel,
layerID: i.layerID, layerID: i.layerID,
names: i.names, names: i.names,
compression: i.compression, compression: i.compression,
@ -488,7 +580,7 @@ func (i *containerImageSource) GetBlob(ctx context.Context, blob types.BlobInfo)
return ioutils.NewReadCloserWrapper(layerFile, closer), size, nil return ioutils.NewReadCloserWrapper(layerFile, closer), size, nil
} }
func (b *Builder) makeImageRef(manifestType string, exporting bool, compress archive.Compression, historyTimestamp *time.Time) (types.ImageReference, error) { func (b *Builder) makeImageRef(manifestType string, exporting bool, squash bool, compress archive.Compression, historyTimestamp *time.Time) (types.ImageReference, error) {
var name reference.Named var name reference.Named
container, err := b.store.Container(b.ContainerID) container, err := b.store.Container(b.ContainerID)
if err != nil { if err != nil {
@ -519,6 +611,8 @@ func (b *Builder) makeImageRef(manifestType string, exporting bool, compress arc
compression: compress, compression: compress,
name: name, name: name,
names: container.Names, names: container.Names,
containerID: container.ID,
mountLabel: b.MountLabel,
layerID: container.LayerID, layerID: container.LayerID,
oconfig: oconfig, oconfig: oconfig,
dconfig: dconfig, dconfig: dconfig,
@ -528,6 +622,7 @@ func (b *Builder) makeImageRef(manifestType string, exporting bool, compress arc
annotations: b.Annotations(), annotations: b.Annotations(),
preferredManifestType: manifestType, preferredManifestType: manifestType,
exporting: exporting, exporting: exporting,
squash: squash,
} }
return ref, nil return ref, nil
} }

View File

@ -113,6 +113,9 @@ type BuildOptions struct {
DefaultMountsFilePath string DefaultMountsFilePath string
// IIDFile tells the builder to write the image ID to the specified file // IIDFile tells the builder to write the image ID to the specified file
IIDFile string IIDFile string
// Squash tells the builder to produce an image with a single layer
// instead of with possibly more than one layer.
Squash bool
// Labels metadata for an image // Labels metadata for an image
Labels []string Labels []string
} }
@ -152,6 +155,7 @@ type Executor struct {
commonBuildOptions *buildah.CommonBuildOptions commonBuildOptions *buildah.CommonBuildOptions
defaultMountsFilePath string defaultMountsFilePath string
iidfile string iidfile string
squash bool
labels []string labels []string
} }
@ -485,6 +489,7 @@ func NewExecutor(store storage.Store, options BuildOptions) (*Executor, error) {
commonBuildOptions: options.CommonBuildOpts, commonBuildOptions: options.CommonBuildOpts,
defaultMountsFilePath: options.DefaultMountsFilePath, defaultMountsFilePath: options.DefaultMountsFilePath,
iidfile: options.IIDFile, iidfile: options.IIDFile,
squash: options.Squash,
labels: options.Labels, labels: options.Labels,
} }
if exec.err == nil { if exec.err == nil {
@ -696,6 +701,7 @@ func (b *Executor) Commit(ctx context.Context, ib *imagebuilder.Builder) (err er
ReportWriter: b.reportWriter, ReportWriter: b.reportWriter,
PreferredManifestType: b.outputFormat, PreferredManifestType: b.outputFormat,
IIDFile: b.iidfile, IIDFile: b.iidfile,
Squash: b.squash,
Labels: b.labels, Labels: b.labels,
} }
imgID, err := b.builder.Commit(ctx, imageRef, options) imgID, err := b.builder.Commit(ctx, imageRef, options)

View File

@ -595,16 +595,6 @@ load helpers
buildah rmi ${target} buildah rmi ${target}
} }
@test "bud with --squash noop flag" {
target=noop-image
run buildah bud --squash --signature-policy ${TESTSDIR}/policy.json -t ${target} -f ${TESTSDIR}/bud/run-scenarios/Dockerfile.noop-flags
echo "$output"
[ "$status" -eq 0 ]
cid=$(buildah from ${target})
buildah rm ${cid}
buildah rmi ${target}
}
@test "bud with --cpu-shares flag, no argument" { @test "bud with --cpu-shares flag, no argument" {
target=bud-flag target=bud-flag
run buildah bud --cpu-shares --signature-policy ${TESTSDIR}/policy.json -t ${target} -f ${TESTSDIR}/bud/from-scratch/Dockerfile run buildah bud --cpu-shares --signature-policy ${TESTSDIR}/policy.json -t ${target} -f ${TESTSDIR}/bud/from-scratch/Dockerfile

110
tests/squash.bats Normal file
View File

@ -0,0 +1,110 @@
#!/usr/bin/env bats
load helpers
@test "squash" {
createrandom ${TESTDIR}/randomfile
cid=$(buildah from scratch)
image=stage0
remove=(8 5)
for stage in $(seq 10) ; do
buildah copy "$cid" ${TESTDIR}/randomfile /layer${stage}
image=stage${stage}
if test $stage -eq ${remove[0]} ; then
mountpoint=$(buildah mount "$cid")
rm -f ${mountpoint}/layer${remove[1]}
fi
buildah commit --signature-policy ${TESTSDIR}/policy.json --rm "$cid" ${image}
run buildah --debug=false inspect -t image -f '{{len .Docker.RootFS.DiffIDs}}' ${image}
echo "$output"
[ "$status" -eq 0 ]
[ "$output" -eq $stage ]
run buildah --debug=false inspect -t image -f '{{len .OCIv1.RootFS.DiffIDs}}' ${image}
echo "$output"
[ "$status" -eq 0 ]
[ "$output" -eq $stage ]
cid=$(buildah from ${image})
done
buildah commit --signature-policy ${TESTSDIR}/policy.json --rm --squash "$cid" squashed
run buildah --debug=false inspect -t image -f '{{len .Docker.RootFS.DiffIDs}}' squashed
echo "$output"
[ "$status" -eq 0 ]
[ "$output" -eq 1 ]
run buildah --debug=false inspect -t image -f '{{len .OCIv1.RootFS.DiffIDs}}' squashed
echo "$output"
[ "$status" -eq 0 ]
[ "$output" -eq 1 ]
run buildah --debug=false inspect -t image -f '{{len .Docker.History}}' squashed
echo "$output"
[ "$status" -eq 0 ]
[ "$output" -eq 1 ]
run buildah --debug=false inspect -t image -f '{{len .OCIv1.History}}' squashed
echo "$output"
[ "$status" -eq 0 ]
[ "$output" -eq 1 ]
cid=$(buildah from squashed)
mountpoint=$(buildah mount $cid)
for stage in $(seq 10) ; do
if test $stage -eq ${remove[1]} ; then
if test -e $mountpoint/layer${remove[1]} ; then
echo file /layer${remove[1]} should not be there
exit 1
fi
continue
fi
cmp $mountpoint/layer${stage} ${TESTDIR}/randomfile
done
}
@test "squash-using-dockerfile" {
createrandom ${TESTDIR}/randomfile
image=stage0
from=scratch
for stage in $(seq 10) ; do
mkdir -p ${TESTDIR}/stage${stage}
echo FROM ${from} > ${TESTDIR}/stage${stage}/Dockerfile
cp ${TESTDIR}/randomfile ${TESTDIR}/stage${stage}/
echo COPY randomfile /layer${stage} >> ${TESTDIR}/stage${stage}/Dockerfile
image=stage${stage}
from=${image}
buildah build-using-dockerfile --signature-policy ${TESTSDIR}/policy.json -t ${image} ${TESTDIR}/stage${stage}
run buildah --debug=false inspect -t image -f '{{len .Docker.RootFS.DiffIDs}}' ${image}
echo "$output"
[ "$status" -eq 0 ]
[ "$output" -eq $stage ]
run buildah --debug=false inspect -t image -f '{{len .OCIv1.RootFS.DiffIDs}}' ${image}
echo "$output"
[ "$status" -eq 0 ]
[ "$output" -eq $stage ]
done
mkdir -p ${TESTDIR}/squashed
echo FROM ${from} > ${TESTDIR}/squashed/Dockerfile
cp ${TESTDIR}/randomfile ${TESTDIR}/squashed/
echo COPY randomfile /layer-squashed >> ${TESTDIR}/stage${stage}/Dockerfile
buildah build-using-dockerfile --signature-policy ${TESTSDIR}/policy.json --squash -t squashed ${TESTDIR}/squashed
run buildah --debug=false inspect -t image -f '{{len .Docker.RootFS.DiffIDs}}' squashed
echo "$output"
[ "$status" -eq 0 ]
[ "$output" -eq 1 ]
run buildah --debug=false inspect -t image -f '{{len .OCIv1.RootFS.DiffIDs}}' squashed
echo "$output"
[ "$status" -eq 0 ]
[ "$output" -eq 1 ]
run buildah --debug=false inspect -t image -f '{{len .Docker.History}}' squashed
echo "$output"
[ "$status" -eq 0 ]
[ "$output" -eq 1 ]
run buildah --debug=false inspect -t image -f '{{len .OCIv1.History}}' squashed
echo "$output"
[ "$status" -eq 0 ]
[ "$output" -eq 1 ]
cid=$(buildah from squashed)
mountpoint=$(buildah mount $cid)
for stage in $(seq 10) ; do
cmp $mountpoint/layer${stage} ${TESTDIR}/randomfile
done
}