1
0
mirror of https://github.com/containers/buildah.git synced 2025-07-31 15:24:26 +03:00

commit: add a --add-file flag

Add a flag to `buildah commit` which allows adding arbitrary files to
the image while we're committing it.  When not squashing, they'll take
the form of a second new layer.

Signed-off-by: Nalin Dahyabhai <nalin@redhat.com>
This commit is contained in:
Nalin Dahyabhai
2023-12-12 14:54:42 -05:00
parent e089136922
commit 041388f87c
5 changed files with 273 additions and 37 deletions

View File

@ -5,6 +5,7 @@ import (
"errors" "errors"
"fmt" "fmt"
"os" "os"
"strings"
"time" "time"
"github.com/containers/buildah" "github.com/containers/buildah"
@ -49,6 +50,7 @@ type commitInputOptions struct {
encryptionKeys []string encryptionKeys []string
encryptLayers []int encryptLayers []int
unsetenvs []string unsetenvs []string
addFile []string
} }
func init() { func init() {
@ -77,6 +79,7 @@ func commitListFlagSet(cmd *cobra.Command, opts *commitInputOptions) {
flags := cmd.Flags() flags := cmd.Flags()
flags.SetInterspersed(false) flags.SetInterspersed(false)
flags.StringArrayVar(&opts.addFile, "add-file", nil, "add contents of a file to the image at a specified path (`source:destination`)")
flags.StringVar(&opts.authfile, "authfile", auth.GetDefaultAuthFile(), "path of the authentication file. Use REGISTRY_AUTH_FILE environment variable to override") flags.StringVar(&opts.authfile, "authfile", auth.GetDefaultAuthFile(), "path of the authentication file. Use REGISTRY_AUTH_FILE environment variable to override")
_ = cmd.RegisterFlagCompletionFunc("authfile", completion.AutocompleteDefault) _ = cmd.RegisterFlagCompletionFunc("authfile", completion.AutocompleteDefault)
flags.StringVar(&opts.blobCache, "blob-cache", "", "assume image blobs in the specified directory will be available for pushing") flags.StringVar(&opts.blobCache, "blob-cache", "", "assume image blobs in the specified directory will be available for pushing")
@ -223,6 +226,28 @@ func commitCmd(c *cobra.Command, args []string, iopts commitInputOptions) error
} }
} }
var addFiles map[string]string
if len(iopts.addFile) > 0 {
addFiles = make(map[string]string)
for _, spec := range iopts.addFile {
specSlice := strings.SplitN(spec, ":", 2)
if len(specSlice) == 1 {
specSlice = []string{specSlice[0], specSlice[0]}
}
if len(specSlice) != 2 {
return fmt.Errorf("parsing add-file argument %q: expected 1 or 2 parts, got %d", spec, len(strings.SplitN(spec, ":", 2)))
}
st, err := os.Stat(specSlice[0])
if err != nil {
return fmt.Errorf("parsing add-file argument %q: source %q: %w", spec, specSlice[0], err)
}
if st.IsDir() {
return fmt.Errorf("parsing add-file argument %q: source %q is not a regular file", spec, specSlice[0])
}
addFiles[specSlice[1]] = specSlice[0]
}
}
options := buildah.CommitOptions{ options := buildah.CommitOptions{
PreferredManifestType: format, PreferredManifestType: format,
Manifest: iopts.manifest, Manifest: iopts.manifest,
@ -239,6 +264,7 @@ func commitCmd(c *cobra.Command, args []string, iopts commitInputOptions) error
UnsetEnvs: iopts.unsetenvs, UnsetEnvs: iopts.unsetenvs,
OverrideChanges: iopts.changes, OverrideChanges: iopts.changes,
OverrideConfig: overrideConfig, OverrideConfig: overrideConfig,
ExtraImageContent: addFiles,
} }
exclusiveFlags := 0 exclusiveFlags := 0
if c.Flag("reference-time").Changed { if c.Flag("reference-time").Changed {

View File

@ -118,6 +118,12 @@ type CommitOptions struct {
// to the configuration of the image that is being committed, after // to the configuration of the image that is being committed, after
// OverrideConfig is applied. // OverrideConfig is applied.
OverrideChanges []string OverrideChanges []string
// ExtraImageContent is a map which describes additional content to add
// to the committted image. The map's keys are filesystem paths in the
// image and the corresponding values are the paths of files whose
// contents will be used in their place. The contents will be owned by
// 0:0 and have mode 0644. Currently only accepts regular files.
ExtraImageContent map[string]string
} }
var ( var (

View File

@ -19,6 +19,14 @@ The image ID of the image that was created. On error, 1 is returned and errno i
## OPTIONS ## OPTIONS
**--add-file** *source[:destination]*
Read the contents of the file `source` and add it to the committed image as a
file at `destination`. If `destination` is not specified, the path of `source`
will be used. The new file will be owned by UID 0, GID 0, have 0644
permissions, and be given a current timestamp unless the **--timestamp** option
is also specified. This option can be specified multiple times.
**--authfile** *path* **--authfile** *path*
Path of the authentication file. Default is ${XDG_RUNTIME_DIR}/containers/auth.json. If XDG_RUNTIME_DIR is not set, the default is /run/containers/$UID/auth.json. This file is created using `buildah login`. Path of the authentication file. Default is ${XDG_RUNTIME_DIR}/containers/auth.json. If XDG_RUNTIME_DIR is not set, the default is /run/containers/$UID/auth.json. This file is created using `buildah login`.

208
image.go
View File

@ -45,9 +45,9 @@ const (
Dockerv2ImageManifest = define.Dockerv2ImageManifest Dockerv2ImageManifest = define.Dockerv2ImageManifest
) )
// ExtractRootfsOptions is consumed by ExtractRootfs() which allows // ExtractRootfsOptions is consumed by ExtractRootfs() which allows users to
// users to preserve nature of various modes like setuid, setgid and xattrs // control whether various information like the like setuid and setgid bits and
// over the extracted file system objects. // xattrs are preserved when extracting file system objects.
type ExtractRootfsOptions struct { type ExtractRootfsOptions struct {
StripSetuidBit bool // strip the setuid bit off of items being extracted. StripSetuidBit bool // strip the setuid bit off of items being extracted.
StripSetgidBit bool // strip the setgid bit off of items being extracted. StripSetgidBit bool // strip the setgid bit off of items being extracted.
@ -82,6 +82,7 @@ type containerImageRef struct {
postEmptyLayers []v1.History postEmptyLayers []v1.History
overrideChanges []string overrideChanges []string
overrideConfig *manifest.Schema2Config overrideConfig *manifest.Schema2Config
extraImageContent map[string]string
} }
type blobLayerInfo struct { type blobLayerInfo struct {
@ -187,6 +188,9 @@ func (i *containerImageRef) extractConfidentialWorkloadFS(options ConfidentialWo
Slop: options.Slop, Slop: options.Slop,
FirmwareLibrary: options.FirmwareLibrary, FirmwareLibrary: options.FirmwareLibrary,
} }
if len(i.extraImageContent) > 0 {
logrus.Warnf("ignoring extra requested content %v, not implemented (yet)", i.extraImageContent)
}
rc, _, err := mkcw.Archive(mountPoint, &image, archiveOptions) rc, _, err := mkcw.Archive(mountPoint, &image, archiveOptions)
if err != nil { if err != nil {
if _, err2 := i.store.Unmount(i.containerID, false); err2 != nil { if _, err2 := i.store.Unmount(i.containerID, false); err2 != nil {
@ -211,9 +215,8 @@ func (i *containerImageRef) extractConfidentialWorkloadFS(options ConfidentialWo
} }
// Extract the container's whole filesystem as if it were a single layer. // Extract the container's whole filesystem as if it were a single layer.
// Takes ExtractRootfsOptions as argument which allows caller to configure // The ExtractRootfsOptions control whether or not to preserve setuid and
// preserve nature of setuid,setgid,sticky and extended attributes // setgid bits and extended attributes on contents.
// on extracted rootfs.
func (i *containerImageRef) extractRootfs(opts ExtractRootfsOptions) (io.ReadCloser, chan error, error) { func (i *containerImageRef) extractRootfs(opts ExtractRootfsOptions) (io.ReadCloser, chan error, error) {
var uidMap, gidMap []idtools.IDMap var uidMap, gidMap []idtools.IDMap
mountPoint, err := i.store.Mount(i.containerID, i.mountLabel) mountPoint, err := i.store.Mount(i.containerID, i.mountLabel)
@ -224,6 +227,27 @@ func (i *containerImageRef) extractRootfs(opts ExtractRootfsOptions) (io.ReadClo
errChan := make(chan error, 1) errChan := make(chan error, 1)
go func() { go func() {
defer close(errChan) defer close(errChan)
if len(i.extraImageContent) > 0 {
// Abuse the tar format and _prepend_ the synthesized
// data items to the archive we'll get from
// copier.Get(), in a way that looks right to a reader
// as long as we DON'T Close() the tar Writer.
filename, _, _, err := i.makeExtraImageContentDiff(false)
if err != nil {
errChan <- err
return
}
file, err := os.Open(filename)
if err != nil {
errChan <- err
return
}
defer file.Close()
if _, err = io.Copy(pipeWriter, file); err != nil {
errChan <- err
return
}
}
if i.idMappingOptions != nil { if i.idMappingOptions != nil {
uidMap, gidMap = convertRuntimeIDMaps(i.idMappingOptions.UIDMap, i.idMappingOptions.GIDMap) uidMap, gidMap = convertRuntimeIDMaps(i.idMappingOptions.UIDMap, i.idMappingOptions.GIDMap)
} }
@ -294,8 +318,8 @@ func (i *containerImageRef) createConfigsAndManifests() (v1.Image, v1.Manifest,
dimage.RootFS.Type = docker.TypeLayers dimage.RootFS.Type = docker.TypeLayers
dimage.RootFS.DiffIDs = []digest.Digest{} dimage.RootFS.DiffIDs = []digest.Digest{}
// Only clear the history if we're squashing, otherwise leave it be so // Only clear the history if we're squashing, otherwise leave it be so
// that we can append entries to it. Clear the parent, too, we no // that we can append entries to it. Clear the parent, too, to reflect
// longer include its layers and history. // that we no longer include its layers and history.
if i.confidentialWorkload.Convert || i.squash || i.omitHistory { if i.confidentialWorkload.Convert || i.squash || i.omitHistory {
dimage.Parent = "" dimage.Parent = ""
dimage.History = []docker.V2S2History{} dimage.History = []docker.V2S2History{}
@ -368,8 +392,9 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System
if err != nil { if err != nil {
return nil, fmt.Errorf("unable to read layer %q: %w", layerID, err) return nil, fmt.Errorf("unable to read layer %q: %w", layerID, err)
} }
// Walk the list of parent layers, prepending each as we go. If we're squashing, // Walk the list of parent layers, prepending each as we go. If we're squashing
// stop at the layer ID of the top layer, which we won't really be using anyway. // or making a confidential workload, we're only producing one layer, so stop at
// the layer ID of the top layer, which we won't really be using anyway.
for layer != nil { for layer != nil {
layers = append(append([]string{}, layerID), layers...) layers = append(append([]string{}, layerID), layers...)
layerID = layer.Parent layerID = layer.Parent
@ -382,6 +407,14 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System
return nil, fmt.Errorf("unable to read layer %q: %w", layerID, err) return nil, fmt.Errorf("unable to read layer %q: %w", layerID, err)
} }
} }
layer = nil
// If we're slipping in a synthesized layer, we need to add a placeholder for it
// to the list.
const synthesizedLayerID = "(synthesized layer)"
if len(i.extraImageContent) > 0 && !i.confidentialWorkload.Convert && !i.squash {
layers = append(layers, synthesizedLayerID)
}
logrus.Debugf("layer list: %q", layers) logrus.Debugf("layer list: %q", layers)
// Make a temporary directory to hold blobs. // Make a temporary directory to hold blobs.
@ -407,6 +440,8 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System
} }
// Extract each layer and compute its digests, both compressed (if requested) and uncompressed. // Extract each layer and compute its digests, both compressed (if requested) and uncompressed.
var extraImageContentDiff string
var extraImageContentDiffDigest digest.Digest
blobLayers := make(map[digest.Digest]blobLayerInfo) blobLayers := make(map[digest.Digest]blobLayerInfo)
for _, layerID := range layers { for _, layerID := range layers {
what := fmt.Sprintf("layer %q", layerID) what := fmt.Sprintf("layer %q", layerID)
@ -417,16 +452,32 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System
omediaType := v1.MediaTypeImageLayer omediaType := v1.MediaTypeImageLayer
dmediaType := docker.V2S2MediaTypeUncompressedLayer dmediaType := docker.V2S2MediaTypeUncompressedLayer
// Look up this layer. // Look up this layer.
layer, err := i.store.Layer(layerID) var layerUncompressedDigest digest.Digest
if err != nil { var layerUncompressedSize int64
return nil, fmt.Errorf("unable to locate layer %q: %w", layerID, err) if layerID != synthesizedLayerID {
layer, err := i.store.Layer(layerID)
if err != nil {
return nil, fmt.Errorf("unable to locate layer %q: %w", layerID, err)
}
layerID = layer.ID
layerUncompressedDigest = layer.UncompressedDigest
layerUncompressedSize = layer.UncompressedSize
} else {
diffFilename, digest, size, err := i.makeExtraImageContentDiff(true)
if err != nil {
return nil, fmt.Errorf("unable to generate layer for additional content: %w", err)
}
extraImageContentDiff = diffFilename
extraImageContentDiffDigest = digest
layerUncompressedDigest = digest
layerUncompressedSize = size
} }
// If we already know the digest of the contents of parent // If we already know the digest of the contents of parent
// layers, reuse their blobsums, diff IDs, and sizes. // layers, reuse their blobsums, diff IDs, and sizes.
if !i.confidentialWorkload.Convert && !i.squash && layerID != i.layerID && layer.UncompressedDigest != "" { if !i.confidentialWorkload.Convert && !i.squash && layerID != i.layerID && layerID != synthesizedLayerID && layerUncompressedDigest != "" {
layerBlobSum := layer.UncompressedDigest layerBlobSum := layerUncompressedDigest
layerBlobSize := layer.UncompressedSize layerBlobSize := layerUncompressedSize
diffID := layer.UncompressedDigest diffID := layerUncompressedDigest
// Note this layer in the manifest, using the appropriate blobsum. // Note this layer in the manifest, using the appropriate blobsum.
olayerDescriptor := v1.Descriptor{ olayerDescriptor := v1.Descriptor{
MediaType: omediaType, MediaType: omediaType,
@ -444,7 +495,7 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System
oimage.RootFS.DiffIDs = append(oimage.RootFS.DiffIDs, diffID) oimage.RootFS.DiffIDs = append(oimage.RootFS.DiffIDs, diffID)
dimage.RootFS.DiffIDs = append(dimage.RootFS.DiffIDs, diffID) dimage.RootFS.DiffIDs = append(dimage.RootFS.DiffIDs, diffID)
blobLayers[diffID] = blobLayerInfo{ blobLayers[diffID] = blobLayerInfo{
ID: layer.ID, ID: layerID,
Size: layerBlobSize, Size: layerBlobSize,
} }
continue continue
@ -474,15 +525,22 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System
return nil, err return nil, err
} }
} else { } else {
// If we're up to the final layer, but we don't want to if layerID != synthesizedLayerID {
// include a diff for it, we're done. // If we're up to the final layer, but we don't want to
if i.emptyLayer && layerID == i.layerID { // include a diff for it, we're done.
continue if i.emptyLayer && layerID == i.layerID {
} continue
// Extract this layer, one of possibly many. }
rc, err = i.store.Diff("", layerID, diffOptions) // Extract this layer, one of possibly many.
if err != nil { rc, err = i.store.Diff("", layerID, diffOptions)
return nil, fmt.Errorf("extracting %s: %w", what, err) if err != nil {
return nil, fmt.Errorf("extracting %s: %w", what, err)
}
} else {
// Slip in additional content as an additional layer.
if rc, err = os.Open(extraImageContentDiff); err != nil {
return nil, err
}
} }
} }
srcHasher := digest.Canonical.Digester() srcHasher := digest.Canonical.Digester()
@ -624,20 +682,19 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System
} }
} }
// Calculate base image history for special scenarios
// when base layers does not contains any history.
// We will ignore sanity checks if baseImage history is null
// but still add new history for docker parity.
baseImageHistoryLen := len(oimage.History)
// Only attempt to append history if history was not disabled explicitly. // Only attempt to append history if history was not disabled explicitly.
if !i.omitHistory { if !i.omitHistory {
// Keep track of how many entries the base image's history had
// before we started adding to it.
baseImageHistoryLen := len(oimage.History)
appendHistory(i.preEmptyLayers) appendHistory(i.preEmptyLayers)
created := time.Now().UTC() created := time.Now().UTC()
if i.created != nil { if i.created != nil {
created = (*i.created).UTC() created = (*i.created).UTC()
} }
comment := i.historyComment comment := i.historyComment
// Add a comment for which base image is being used // Add a comment indicating which base image was used, if it wasn't
// just an image ID.
if strings.Contains(i.parent, i.fromImageID) && i.fromImageName != i.fromImageID { if strings.Contains(i.parent, i.fromImageID) && i.fromImageName != i.fromImageID {
comment += "FROM " + i.fromImageName comment += "FROM " + i.fromImageName
} }
@ -659,10 +716,24 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System
dimage.History = append(dimage.History, dnews) dimage.History = append(dimage.History, dnews)
appendHistory(i.postEmptyLayers) appendHistory(i.postEmptyLayers)
// Sanity check that we didn't just create a mismatch between non-empty layers in the // Add a history entry for the extra image content if we added a layer for it.
// history and the number of diffIDs. Following sanity check is ignored if build history if extraImageContentDiff != "" {
// is disabled explicitly by the user. createdBy := fmt.Sprintf(`/bin/sh -c #(nop) ADD dir:%s in /",`, extraImageContentDiffDigest.Encoded())
// Disable sanity check when baseImageHistory is null for docker parity onews := v1.History{
Created: &created,
CreatedBy: createdBy,
}
oimage.History = append(oimage.History, onews)
dnews := docker.V2S2History{
Created: created,
CreatedBy: createdBy,
}
dimage.History = append(dimage.History, dnews)
}
// Confidence check that we didn't just create a mismatch between non-empty layers in the
// history and the number of diffIDs. Only applicable if the base image (if there was
// one) provided us at least one entry to use as a starting point.
if baseImageHistoryLen != 0 { if baseImageHistoryLen != 0 {
expectedDiffIDs := expectedOCIDiffIDs(oimage) expectedDiffIDs := expectedOCIDiffIDs(oimage)
if len(oimage.RootFS.DiffIDs) != expectedDiffIDs { if len(oimage.RootFS.DiffIDs) != expectedDiffIDs {
@ -859,6 +930,68 @@ func (i *containerImageSource) GetBlob(ctx context.Context, blob types.BlobInfo,
return ioutils.NewReadCloserWrapper(layerReadCloser, closer), size, nil return ioutils.NewReadCloserWrapper(layerReadCloser, closer), size, nil
} }
// makeExtraImageContentDiff creates an archive file containing the contents of
// files named in i.extraImageContent. The footer that marks the end of the
// archive may be omitted.
func (i *containerImageRef) makeExtraImageContentDiff(includeFooter bool) (string, digest.Digest, int64, error) {
cdir, err := i.store.ContainerDirectory(i.containerID)
if err != nil {
return "", "", -1, err
}
diff, err := os.CreateTemp(cdir, "extradiff")
if err != nil {
return "", "", -1, err
}
defer diff.Close()
digester := digest.Canonical.Digester()
counter := ioutils.NewWriteCounter(digester.Hash())
tw := tar.NewWriter(io.MultiWriter(diff, counter))
created := time.Now()
if i.created != nil {
created = *i.created
}
for path, contents := range i.extraImageContent {
if err := func() error {
content, err := os.Open(contents)
if err != nil {
return err
}
defer content.Close()
st, err := content.Stat()
if err != nil {
return err
}
if err := tw.WriteHeader(&tar.Header{
Name: path,
Typeflag: tar.TypeReg,
Mode: 0o644,
ModTime: created,
Size: st.Size(),
}); err != nil {
return err
}
if _, err := io.Copy(tw, content); err != nil {
return err
}
if err := tw.Flush(); err != nil {
return err
}
return nil
}(); err != nil {
return "", "", -1, err
}
}
if !includeFooter {
return diff.Name(), "", -1, err
}
tw.Close()
return diff.Name(), digester.Digest(), counter.Count, err
}
// makeContainerImageRef creates a containers/image/v5/types.ImageReference
// which is mainly used for representing the working container as a source
// image that can be copied, which is how we commit container to create the
// image.
func (b *Builder) makeContainerImageRef(options CommitOptions) (*containerImageRef, error) { func (b *Builder) makeContainerImageRef(options CommitOptions) (*containerImageRef, error) {
var name reference.Named var name reference.Named
container, err := b.store.Container(b.ContainerID) container, err := b.store.Container(b.ContainerID)
@ -935,6 +1068,7 @@ func (b *Builder) makeContainerImageRef(options CommitOptions) (*containerImageR
postEmptyLayers: b.AppendedEmptyLayers, postEmptyLayers: b.AppendedEmptyLayers,
overrideChanges: options.OverrideChanges, overrideChanges: options.OverrideChanges,
overrideConfig: options.OverrideConfig, overrideConfig: options.OverrideConfig,
extraImageContent: copyStringStringMap(options.ExtraImageContent),
} }
return ref, nil return ref, nil
} }

View File

@ -326,3 +326,65 @@ load helpers
# instead of name/name because the names are gone # instead of name/name because the names are gone
assert "$output" =~ $(id -u)/$(id -g) assert "$output" =~ $(id -u)/$(id -g)
} }
@test "commit-with-extra-files" {
_prefetch busybox
run_buildah from --quiet --pull=false $WITH_POLICY_JSON busybox
cid=$output
createrandom ${BATS_TMPDIR}/randomfile1
createrandom ${BATS_TMPDIR}/randomfile2
for method in --squash=false --squash=true ; do
run_buildah commit $method --add-file ${BATS_TMPDIR}/randomfile1:/randomfile1 $cid with-random-1
run_buildah commit $method --add-file ${BATS_TMPDIR}/randomfile2:/in-a-subdir/randomfile2 $cid with-random-2
run_buildah commit $method --add-file ${BATS_TMPDIR}/randomfile1:/randomfile1 --add-file ${BATS_TMPDIR}/randomfile2:/in-a-subdir/randomfile2 $cid with-random-both
# first one should have the first file and not the second, and the shell should be there
run_buildah from --quiet --pull=false $WITH_POLICY_JSON with-random-1
cid=$output
run_buildah mount $cid
mountpoint=$output
test -s $mountpoint/bin/sh || test -L $mountpoint/bin/sh
cmp ${BATS_TMPDIR}/randomfile1 $mountpoint/randomfile1
run stat -c %u:%g $mountpoint
[ $status -eq 0 ]
rootowner=$output
run stat -c %u:%g:%A $mountpoint/randomfile1
[ $status -eq 0 ]
assert ${rootowner}:-rw-r--r--
! test -f $mountpoint/randomfile2
# second one should have the second file and not the first, and the shell should be there
run_buildah from --quiet --pull=false $WITH_POLICY_JSON with-random-2
cid=$output
run_buildah mount $cid
mountpoint=$output
test -s $mountpoint/bin/sh || test -L $mountpoint/bin/sh
cmp ${BATS_TMPDIR}/randomfile2 $mountpoint/in-a-subdir/randomfile2
run stat -c %u:%g $mountpoint
[ $status -eq 0 ]
rootowner=$output
run stat -c %u:%g:%A $mountpoint/in-a-subdir/randomfile2
[ $status -eq 0 ]
assert ${rootowner}:-rw-r--r--
! test -f $mountpoint/randomfile1
# third one should have both files, and the shell should be there
run_buildah from --quiet --pull=false $WITH_POLICY_JSON with-random-both
cid=$output
run_buildah mount $cid
mountpoint=$output
test -s $mountpoint/bin/sh || test -L $mountpoint/bin/sh
cmp ${BATS_TMPDIR}/randomfile1 $mountpoint/randomfile1
run stat -c %u:%g $mountpoint
[ $status -eq 0 ]
rootowner=$output
run stat -c %u:%g:%A $mountpoint/randomfile1
[ $status -eq 0 ]
assert ${rootowner}:-rw-r--r--
cmp ${BATS_TMPDIR}/randomfile2 $mountpoint/in-a-subdir/randomfile2
run stat -c %u:%g:%A $mountpoint/in-a-subdir/randomfile2
[ $status -eq 0 ]
assert ${rootowner}:-rw-r--r--
done
}