diff --git a/cmd/buildah/bud.go b/cmd/buildah/bud.go index 665481d9e..4cd9043d7 100644 --- a/cmd/buildah/bud.go +++ b/cmd/buildah/bud.go @@ -167,10 +167,6 @@ func budCmd(c *cli.Context) error { logrus.Debugf("build caching not enabled so --rm flag has no effect") } - if c.IsSet("squash") { - logrus.Debugf("build caching not enabled so --squash flag has no effect") - } - options := imagebuildah.BuildOptions{ ContextDirectory: contextDir, PullPolicy: pullPolicy, @@ -187,6 +183,7 @@ func budCmd(c *cli.Context) error { CommonBuildOpts: commonOpts, DefaultMountsFilePath: c.GlobalString("default-mounts-file"), IIDFile: c.String("iidfile"), + Squash: c.Bool("squash"), Labels: c.StringSlice("label"), } diff --git a/cmd/buildah/commit.go b/cmd/buildah/commit.go index b91a372be..5fc7ecf3f 100644 --- a/cmd/buildah/commit.go +++ b/cmd/buildah/commit.go @@ -62,6 +62,10 @@ var ( Name: "signature-policy", Usage: "`pathname` of signature policy file (not usually used)", }, + cli.BoolFlag{ + Name: "squash", + Usage: "produce an image with only one layer", + }, cli.BoolTFlag{ Name: "tls-verify", Usage: "Require HTTPS and verify certificates when accessing the registry", @@ -156,6 +160,7 @@ func commitCmd(c *cli.Context) error { HistoryTimestamp: ×tamp, SystemContext: systemContext, IIDFile: c.String("iidfile"), + Squash: c.Bool("squash"), } if !c.Bool("quiet") { options.ReportWriter = os.Stderr diff --git a/commit.go b/commit.go index c24c00602..ec8c8b6de 100644 --- a/commit.go +++ b/commit.go @@ -50,6 +50,9 @@ type CommitOptions struct { SystemContext *types.SystemContext // IIDFile tells the builder to write the image ID to the specified file IIDFile string + // Squash tells the builder to produce an image with a single layer + // instead of with possibly more than one layer. + Squash bool // Labels metadata for an image Labels []string } @@ -112,7 +115,7 @@ func (b *Builder) Commit(ctx context.Context, dest types.ImageReference, options // Check if we're keeping everything in local storage. If so, we can take certain shortcuts. _, destIsStorage := dest.Transport().(is.StoreTransport) exporting := !destIsStorage - src, err := b.makeImageRef(options.PreferredManifestType, exporting, options.Compression, options.HistoryTimestamp) + src, err := b.makeImageRef(options.PreferredManifestType, exporting, options.Squash, options.Compression, options.HistoryTimestamp) if err != nil { return imgID, errors.Wrapf(err, "error computing layer digests and building metadata") } diff --git a/contrib/completions/bash/buildah b/contrib/completions/bash/buildah index 501b1fdcd..a18150825 100644 --- a/contrib/completions/bash/buildah +++ b/contrib/completions/bash/buildah @@ -308,6 +308,7 @@ return 1 --quiet -q --rm + --squash --tls-verify " @@ -359,6 +360,7 @@ return 1 --pull-always --quiet -q + --squash --tls-verify " diff --git a/docs/buildah-bud.md b/docs/buildah-bud.md index 49b1ed260..c50330000 100644 --- a/docs/buildah-bud.md +++ b/docs/buildah-bud.md @@ -250,7 +250,7 @@ option be used, as the default behavior of using the system-wide default policy **--squash** -Squash newly built layers into a single new layer. Buildah does not currently support caching so this is a NOOP. +Squash all of the new image's layers (including those inherited from a base image) into a single new layer. **--tag, -t** *imageName* diff --git a/docs/buildah-commit.md b/docs/buildah-commit.md index c9ba8b61d..70d43c6ab 100644 --- a/docs/buildah-commit.md +++ b/docs/buildah-commit.md @@ -60,6 +60,10 @@ Pathname of a signature policy file to use. It is not recommended that this option be used, as the default behavior of using the system-wide default policy (frequently */etc/containers/policy.json*) is most often preferred. +**--squash** + +Squash all of the new image's layers (including those inherited from a base image) into a single new layer. + **--tls-verify** *bool-value* Require HTTPS and verify certificates when talking to container registries (defaults to true) diff --git a/image.go b/image.go index 678a13040..c66a5cd08 100644 --- a/image.go +++ b/image.go @@ -4,6 +4,7 @@ import ( "bytes" "context" "encoding/json" + "fmt" "io" "io/ioutil" "os" @@ -41,6 +42,8 @@ type containerImageRef struct { compression archive.Compression name reference.Named names []string + containerID string + mountLabel string layerID string oconfig []byte dconfig []byte @@ -50,12 +53,15 @@ type containerImageRef struct { annotations map[string]string preferredManifestType string exporting bool + squash bool } type containerImageSource struct { path string ref *containerImageRef store storage.Store + containerID string + mountLabel string layerID string names []string compression archive.Compression @@ -94,6 +100,124 @@ func expectedDockerDiffIDs(image docker.V2Image) int { return expected } +// Compute the media types which we need to attach to a layer, given the type of +// compression that we'll be applying. +func (i *containerImageRef) computeLayerMIMEType(what string) (omediaType, dmediaType string, err error) { + omediaType = v1.MediaTypeImageLayer + dmediaType = docker.V2S2MediaTypeUncompressedLayer + if i.compression != archive.Uncompressed { + switch i.compression { + case archive.Gzip: + omediaType = v1.MediaTypeImageLayerGzip + dmediaType = docker.V2S2MediaTypeLayer + logrus.Debugf("compressing %s with gzip", what) + case archive.Bzip2: + // Until the image specs define a media type for bzip2-compressed layers, even if we know + // how to decompress them, we can't try to compress layers with bzip2. + return "", "", errors.New("media type for bzip2-compressed layers is not defined") + case archive.Xz: + // Until the image specs define a media type for xz-compressed layers, even if we know + // how to decompress them, we can't try to compress layers with xz. + return "", "", errors.New("media type for xz-compressed layers is not defined") + default: + logrus.Debugf("compressing %s with unknown compressor(?)", what) + } + } + return omediaType, dmediaType, nil +} + +// Extract the container's whole filesystem as if it were a single layer. +func (i *containerImageRef) extractRootfs() (io.ReadCloser, error) { + mountPoint, err := i.store.Mount(i.containerID, i.mountLabel) + if err != nil { + return nil, errors.Wrapf(err, "error extracting container %q", i.containerID) + } + tarOptions := &archive.TarOptions{ + Compression: archive.Uncompressed, + } + rc, err := archive.TarWithOptions(mountPoint, tarOptions) + if err != nil { + return nil, errors.Wrapf(err, "error extracting container %q", i.containerID) + } + return ioutils.NewReadCloserWrapper(rc, func() error { + err := rc.Close() + if err != nil { + err = errors.Wrapf(err, "error closing tar archive of container %q", i.containerID) + } + if err2 := i.store.Unmount(i.containerID); err == nil { + if err2 != nil { + err2 = errors.Wrapf(err2, "error unmounting container %q", i.containerID) + } + err = err2 + } + return err + }), nil +} + +// Build fresh copies of the container configuration structures so that we can edit them +// without making unintended changes to the original Builder. +func (i *containerImageRef) createConfigsAndManifests() (v1.Image, v1.Manifest, docker.V2Image, docker.V2S2Manifest, error) { + created := i.created + + // Build an empty image, and then decode over it. + oimage := v1.Image{} + if err := json.Unmarshal(i.oconfig, &oimage); err != nil { + return v1.Image{}, v1.Manifest{}, docker.V2Image{}, docker.V2S2Manifest{}, err + } + // Always replace this value, since we're newer than our base image. + oimage.Created = &created + // Clear the list of diffIDs, since we always repopulate it. + oimage.RootFS.Type = docker.TypeLayers + oimage.RootFS.DiffIDs = []digest.Digest{} + // Only clear the history if we're squashing, otherwise leave it be so that we can append + // entries to it. + if i.squash { + oimage.History = []v1.History{} + } + + // Build an empty image, and then decode over it. + dimage := docker.V2Image{} + if err := json.Unmarshal(i.dconfig, &dimage); err != nil { + return v1.Image{}, v1.Manifest{}, docker.V2Image{}, docker.V2S2Manifest{}, err + } + // Always replace this value, since we're newer than our base image. + dimage.Created = created + // Clear the list of diffIDs, since we always repopulate it. + dimage.RootFS = &docker.V2S2RootFS{} + dimage.RootFS.Type = docker.TypeLayers + dimage.RootFS.DiffIDs = []digest.Digest{} + // Only clear the history if we're squashing, otherwise leave it be so that we can append + // entries to it. + if i.squash { + dimage.History = []docker.V2S2History{} + } + + // Build empty manifests. The Layers lists will be populated later. + omanifest := v1.Manifest{ + Versioned: specs.Versioned{ + SchemaVersion: 2, + }, + Config: v1.Descriptor{ + MediaType: v1.MediaTypeImageConfig, + }, + Layers: []v1.Descriptor{}, + Annotations: i.annotations, + } + + dmanifest := docker.V2S2Manifest{ + V2Versioned: docker.V2Versioned{ + SchemaVersion: 2, + MediaType: docker.V2S2MediaTypeManifest, + }, + Config: docker.V2S2Descriptor{ + MediaType: docker.V2S2MediaTypeImageConfig, + }, + Layers: []docker.V2S2Descriptor{}, + } + + return oimage, omanifest, dimage, dmanifest, nil +} + func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.SystemContext) (src types.ImageSource, err error) { // Decide which type of manifest and configuration output we're going to provide. manifestType := i.preferredManifestType @@ -109,11 +233,12 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System if err != nil { return nil, errors.Wrapf(err, "unable to read layer %q", layerID) } - // Walk the list of parent layers, prepending each as we go. + // Walk the list of parent layers, prepending each as we go. If we're squashing, + // stop at the layer ID of the top layer, which we won't really be using anyway. for layer != nil { layers = append(append([]string{}, layerID), layers...) layerID = layer.Parent - if layerID == "" { + if layerID == "" || i.squash { err = nil break } @@ -139,57 +264,25 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System } }() - // Build fresh copies of the configurations so that we don't mess with the values in the Builder - // object itself. - oimage := v1.Image{} - err = json.Unmarshal(i.oconfig, &oimage) + // Build fresh copies of the configurations and manifest so that we don't mess with any + // values in the Builder object itself. + oimage, omanifest, dimage, dmanifest, err := i.createConfigsAndManifests() if err != nil { return nil, err } - created := i.created - oimage.Created = &created - dimage := docker.V2Image{} - err = json.Unmarshal(i.dconfig, &dimage) - if err != nil { - return nil, err - } - dimage.Created = created - - // Start building manifests. - omanifest := v1.Manifest{ - Versioned: specs.Versioned{ - SchemaVersion: 2, - }, - Config: v1.Descriptor{ - MediaType: v1.MediaTypeImageConfig, - }, - Layers: []v1.Descriptor{}, - Annotations: i.annotations, - } - dmanifest := docker.V2S2Manifest{ - V2Versioned: docker.V2Versioned{ - SchemaVersion: 2, - MediaType: docker.V2S2MediaTypeManifest, - }, - Config: docker.V2S2Descriptor{ - MediaType: docker.V2S2MediaTypeImageConfig, - }, - Layers: []docker.V2S2Descriptor{}, - } - - oimage.RootFS.Type = docker.TypeLayers - oimage.RootFS.DiffIDs = []digest.Digest{} - dimage.RootFS = &docker.V2S2RootFS{} - dimage.RootFS.Type = docker.TypeLayers - dimage.RootFS.DiffIDs = []digest.Digest{} // Extract each layer and compute its digests, both compressed (if requested) and uncompressed. for _, layerID := range layers { + what := fmt.Sprintf("layer %q", layerID) + if i.squash { + what = fmt.Sprintf("container %q", i.containerID) + } // The default layer media type assumes no compression. omediaType := v1.MediaTypeImageLayer dmediaType := docker.V2S2MediaTypeUncompressedLayer - // If we're not re-exporting the data, reuse the blobsum and diff IDs. - if !i.exporting && layerID != i.layerID { + // If we're not re-exporting the data, and we're reusing layers individually, reuse + // the blobsum and diff IDs. + if !i.exporting && !i.squash && layerID != i.layerID { layer, err2 := i.store.Layer(layerID) if err2 != nil { return nil, errors.Wrapf(err, "unable to locate layer %q", layerID) @@ -218,40 +311,37 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System continue } // Figure out if we need to change the media type, in case we're using compression. - if i.compression != archive.Uncompressed { - switch i.compression { - case archive.Gzip: - omediaType = v1.MediaTypeImageLayerGzip - dmediaType = docker.V2S2MediaTypeLayer - logrus.Debugf("compressing layer %q with gzip", layerID) - case archive.Bzip2: - // Until the image specs define a media type for bzip2-compressed layers, even if we know - // how to decompress them, we can't try to compress layers with bzip2. - return nil, errors.New("media type for bzip2-compressed layers is not defined") - case archive.Xz: - // Until the image specs define a media type for xz-compressed layers, even if we know - // how to decompress them, we can't try to compress layers with xz. - return nil, errors.New("media type for xz-compressed layers is not defined") - default: - logrus.Debugf("compressing layer %q with unknown compressor(?)", layerID) - } + omediaType, dmediaType, err = i.computeLayerMIMEType(what) + if err != nil { + return nil, err } - // Start reading the layer. + // Start reading either the layer or the whole container rootfs. noCompression := archive.Uncompressed diffOptions := &storage.DiffOptions{ Compression: &noCompression, } - rc, err := i.store.Diff("", layerID, diffOptions) - if err != nil { - return nil, errors.Wrapf(err, "error extracting layer %q", layerID) + var rc io.ReadCloser + if i.squash { + // Extract the root filesystem as a single layer. + rc, err = i.extractRootfs() + if err != nil { + return nil, err + } + defer rc.Close() + } else { + // Extract this layer, one of possibly many. + rc, err = i.store.Diff("", layerID, diffOptions) + if err != nil { + return nil, errors.Wrapf(err, "error extracting %s", what) + } + defer rc.Close() } - defer rc.Close() srcHasher := digest.Canonical.Digester() reader := io.TeeReader(rc, srcHasher.Hash()) // Set up to write the possibly-recompressed blob. layerFile, err := os.OpenFile(filepath.Join(path, "layer"), os.O_CREATE|os.O_WRONLY, 0600) if err != nil { - return nil, errors.Wrapf(err, "error opening file for layer %q", layerID) + return nil, errors.Wrapf(err, "error opening file for %s", what) } destHasher := digest.Canonical.Digester() counter := ioutils.NewWriteCounter(layerFile) @@ -259,26 +349,26 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System // Compress the layer, if we're recompressing it. writer, err := archive.CompressStream(multiWriter, i.compression) if err != nil { - return nil, errors.Wrapf(err, "error compressing layer %q", layerID) + return nil, errors.Wrapf(err, "error compressing %s", what) } size, err := io.Copy(writer, reader) if err != nil { - return nil, errors.Wrapf(err, "error storing layer %q to file", layerID) + return nil, errors.Wrapf(err, "error storing %s to file", what) } writer.Close() layerFile.Close() if i.compression == archive.Uncompressed { if size != counter.Count { - return nil, errors.Errorf("error storing layer %q to file: inconsistent layer size (copied %d, wrote %d)", layerID, size, counter.Count) + return nil, errors.Errorf("error storing %s to file: inconsistent layer size (copied %d, wrote %d)", what, size, counter.Count) } } else { size = counter.Count } - logrus.Debugf("layer %q size is %d bytes", layerID, size) + logrus.Debugf("%s size is %d bytes", what, size) // Rename the layer so that we can more easily find it by digest later. err = os.Rename(filepath.Join(path, "layer"), filepath.Join(path, destHasher.Digest().String())) if err != nil { - return nil, errors.Wrapf(err, "error storing layer %q to file", layerID) + return nil, errors.Wrapf(err, "error storing %s to file", what) } // Add a note in the manifest about the layer. The blobs are identified by their possibly- // compressed blob digests. @@ -383,6 +473,8 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System path: path, ref: i, store: i.store, + containerID: i.containerID, + mountLabel: i.mountLabel, layerID: i.layerID, names: i.names, compression: i.compression, @@ -488,7 +580,7 @@ func (i *containerImageSource) GetBlob(ctx context.Context, blob types.BlobInfo) return ioutils.NewReadCloserWrapper(layerFile, closer), size, nil } -func (b *Builder) makeImageRef(manifestType string, exporting bool, compress archive.Compression, historyTimestamp *time.Time) (types.ImageReference, error) { +func (b *Builder) makeImageRef(manifestType string, exporting bool, squash bool, compress archive.Compression, historyTimestamp *time.Time) (types.ImageReference, error) { var name reference.Named container, err := b.store.Container(b.ContainerID) if err != nil { @@ -519,6 +611,8 @@ func (b *Builder) makeImageRef(manifestType string, exporting bool, compress arc compression: compress, name: name, names: container.Names, + containerID: container.ID, + mountLabel: b.MountLabel, layerID: container.LayerID, oconfig: oconfig, dconfig: dconfig, @@ -528,6 +622,7 @@ func (b *Builder) makeImageRef(manifestType string, exporting bool, compress arc annotations: b.Annotations(), preferredManifestType: manifestType, exporting: exporting, + squash: squash, } return ref, nil } diff --git a/imagebuildah/build.go b/imagebuildah/build.go index 3558d785d..a18638c8c 100644 --- a/imagebuildah/build.go +++ b/imagebuildah/build.go @@ -113,6 +113,9 @@ type BuildOptions struct { DefaultMountsFilePath string // IIDFile tells the builder to write the image ID to the specified file IIDFile string + // Squash tells the builder to produce an image with a single layer + // instead of with possibly more than one layer. + Squash bool // Labels metadata for an image Labels []string } @@ -152,6 +155,7 @@ type Executor struct { commonBuildOptions *buildah.CommonBuildOptions defaultMountsFilePath string iidfile string + squash bool labels []string } @@ -485,6 +489,7 @@ func NewExecutor(store storage.Store, options BuildOptions) (*Executor, error) { commonBuildOptions: options.CommonBuildOpts, defaultMountsFilePath: options.DefaultMountsFilePath, iidfile: options.IIDFile, + squash: options.Squash, labels: options.Labels, } if exec.err == nil { @@ -696,6 +701,7 @@ func (b *Executor) Commit(ctx context.Context, ib *imagebuilder.Builder) (err er ReportWriter: b.reportWriter, PreferredManifestType: b.outputFormat, IIDFile: b.iidfile, + Squash: b.squash, Labels: b.labels, } imgID, err := b.builder.Commit(ctx, imageRef, options) diff --git a/tests/bud.bats b/tests/bud.bats index 10131ca0d..23442bbf8 100644 --- a/tests/bud.bats +++ b/tests/bud.bats @@ -595,16 +595,6 @@ load helpers buildah rmi ${target} } -@test "bud with --squash noop flag" { - target=noop-image - run buildah bud --squash --signature-policy ${TESTSDIR}/policy.json -t ${target} -f ${TESTSDIR}/bud/run-scenarios/Dockerfile.noop-flags - echo "$output" - [ "$status" -eq 0 ] - cid=$(buildah from ${target}) - buildah rm ${cid} - buildah rmi ${target} -} - @test "bud with --cpu-shares flag, no argument" { target=bud-flag run buildah bud --cpu-shares --signature-policy ${TESTSDIR}/policy.json -t ${target} -f ${TESTSDIR}/bud/from-scratch/Dockerfile diff --git a/tests/squash.bats b/tests/squash.bats new file mode 100644 index 000000000..1681bf45e --- /dev/null +++ b/tests/squash.bats @@ -0,0 +1,110 @@ +#!/usr/bin/env bats + +load helpers + +@test "squash" { + createrandom ${TESTDIR}/randomfile + cid=$(buildah from scratch) + image=stage0 + remove=(8 5) + for stage in $(seq 10) ; do + buildah copy "$cid" ${TESTDIR}/randomfile /layer${stage} + image=stage${stage} + if test $stage -eq ${remove[0]} ; then + mountpoint=$(buildah mount "$cid") + rm -f ${mountpoint}/layer${remove[1]} + fi + buildah commit --signature-policy ${TESTSDIR}/policy.json --rm "$cid" ${image} + run buildah --debug=false inspect -t image -f '{{len .Docker.RootFS.DiffIDs}}' ${image} + echo "$output" + [ "$status" -eq 0 ] + [ "$output" -eq $stage ] + run buildah --debug=false inspect -t image -f '{{len .OCIv1.RootFS.DiffIDs}}' ${image} + echo "$output" + [ "$status" -eq 0 ] + [ "$output" -eq $stage ] + cid=$(buildah from ${image}) + done + buildah commit --signature-policy ${TESTSDIR}/policy.json --rm --squash "$cid" squashed + run buildah --debug=false inspect -t image -f '{{len .Docker.RootFS.DiffIDs}}' squashed + echo "$output" + [ "$status" -eq 0 ] + [ "$output" -eq 1 ] + run buildah --debug=false inspect -t image -f '{{len .OCIv1.RootFS.DiffIDs}}' squashed + echo "$output" + [ "$status" -eq 0 ] + [ "$output" -eq 1 ] + run buildah --debug=false inspect -t image -f '{{len .Docker.History}}' squashed + echo "$output" + [ "$status" -eq 0 ] + [ "$output" -eq 1 ] + run buildah --debug=false inspect -t image -f '{{len .OCIv1.History}}' squashed + echo "$output" + [ "$status" -eq 0 ] + [ "$output" -eq 1 ] + + cid=$(buildah from squashed) + mountpoint=$(buildah mount $cid) + for stage in $(seq 10) ; do + if test $stage -eq ${remove[1]} ; then + if test -e $mountpoint/layer${remove[1]} ; then + echo file /layer${remove[1]} should not be there + exit 1 + fi + continue + fi + cmp $mountpoint/layer${stage} ${TESTDIR}/randomfile + done +} + +@test "squash-using-dockerfile" { + createrandom ${TESTDIR}/randomfile + image=stage0 + from=scratch + for stage in $(seq 10) ; do + mkdir -p ${TESTDIR}/stage${stage} + echo FROM ${from} > ${TESTDIR}/stage${stage}/Dockerfile + cp ${TESTDIR}/randomfile ${TESTDIR}/stage${stage}/ + echo COPY randomfile /layer${stage} >> ${TESTDIR}/stage${stage}/Dockerfile + image=stage${stage} + from=${image} + buildah build-using-dockerfile --signature-policy ${TESTSDIR}/policy.json -t ${image} ${TESTDIR}/stage${stage} + run buildah --debug=false inspect -t image -f '{{len .Docker.RootFS.DiffIDs}}' ${image} + echo "$output" + [ "$status" -eq 0 ] + [ "$output" -eq $stage ] + run buildah --debug=false inspect -t image -f '{{len .OCIv1.RootFS.DiffIDs}}' ${image} + echo "$output" + [ "$status" -eq 0 ] + [ "$output" -eq $stage ] + done + + mkdir -p ${TESTDIR}/squashed + echo FROM ${from} > ${TESTDIR}/squashed/Dockerfile + cp ${TESTDIR}/randomfile ${TESTDIR}/squashed/ + echo COPY randomfile /layer-squashed >> ${TESTDIR}/stage${stage}/Dockerfile + buildah build-using-dockerfile --signature-policy ${TESTSDIR}/policy.json --squash -t squashed ${TESTDIR}/squashed + + run buildah --debug=false inspect -t image -f '{{len .Docker.RootFS.DiffIDs}}' squashed + echo "$output" + [ "$status" -eq 0 ] + [ "$output" -eq 1 ] + run buildah --debug=false inspect -t image -f '{{len .OCIv1.RootFS.DiffIDs}}' squashed + echo "$output" + [ "$status" -eq 0 ] + [ "$output" -eq 1 ] + run buildah --debug=false inspect -t image -f '{{len .Docker.History}}' squashed + echo "$output" + [ "$status" -eq 0 ] + [ "$output" -eq 1 ] + run buildah --debug=false inspect -t image -f '{{len .OCIv1.History}}' squashed + echo "$output" + [ "$status" -eq 0 ] + [ "$output" -eq 1 ] + + cid=$(buildah from squashed) + mountpoint=$(buildah mount $cid) + for stage in $(seq 10) ; do + cmp $mountpoint/layer${stage} ${TESTDIR}/randomfile + done +}