diff --git a/builder/builder.go b/builder/builder.go index a98ca29f67..f7a4f91f27 100644 --- a/builder/builder.go +++ b/builder/builder.go @@ -146,6 +146,9 @@ type Backend interface { // SquashImage squashes the fs layers from the provided image down to the specified `to` image SquashImage(from string, to string) (string, error) + + // MountImage returns mounted path with rootfs of an image. + MountImage(name string) (string, func() error, error) } // Image represents a Docker image used by the builder. diff --git a/builder/dockerfile/builder.go b/builder/dockerfile/builder.go index 3f2a9f67ee..fc0f584407 100644 --- a/builder/dockerfile/builder.go +++ b/builder/dockerfile/builder.go @@ -69,7 +69,8 @@ type Builder struct { runConfig *container.Config // runconfig for cmd, run, entrypoint etc. flags *BFlags tmpContainers map[string]struct{} - image string // imageID + image string // imageID + imageContexts *imageContexts // helper for storing contexts from builds noBaseImage bool maintainer string cmdSet bool @@ -88,12 +89,13 @@ type Builder struct { // BuildManager implements builder.Backend and is shared across all Builder objects. type BuildManager struct { - backend builder.Backend + backend builder.Backend + pathCache *pathCache // TODO: make this persistent } // NewBuildManager creates a BuildManager. func NewBuildManager(b builder.Backend) (bm *BuildManager) { - return &BuildManager{backend: b} + return &BuildManager{backend: b, pathCache: &pathCache{}} } // BuildFromContext builds a new image from a given context. @@ -118,6 +120,7 @@ func (bm *BuildManager) BuildFromContext(ctx context.Context, src io.ReadCloser, if err != nil { return "", err } + b.imageContexts.cache = bm.pathCache return b.build(pg.StdoutFormatter, pg.StderrFormatter, pg.Output) } @@ -147,6 +150,7 @@ func NewBuilder(clientCtx context.Context, config *types.ImageBuildOptions, back LookingForDirectives: true, }, } + b.imageContexts = &imageContexts{b: b} parser.SetEscapeToken(parser.DefaultEscapeToken, &b.directive) // Assume the default token for escape @@ -239,6 +243,8 @@ func (b *Builder) processLabels() error { // * Print a happy message and return the image ID. // func (b *Builder) build(stdout io.Writer, stderr io.Writer, out io.Writer) (string, error) { + defer b.imageContexts.unmount() + b.Stdout = stdout b.Stderr = stderr b.Output = out diff --git a/builder/dockerfile/dispatchers.go b/builder/dockerfile/dispatchers.go index 9dad7e0f00..b63ec65914 100644 --- a/builder/dockerfile/dispatchers.go +++ b/builder/dockerfile/dispatchers.go @@ -8,7 +8,6 @@ package dockerfile // package. import ( - "errors" "fmt" "regexp" "runtime" @@ -25,6 +24,7 @@ import ( "github.com/docker/docker/builder" "github.com/docker/docker/pkg/signal" "github.com/docker/go-connections/nat" + "github.com/pkg/errors" ) // ENV foo bar @@ -169,7 +169,7 @@ func add(b *Builder, args []string, attributes map[string]bool, original string) return err } - return b.runContextCommand(args, true, true, "ADD") + return b.runContextCommand(args, true, true, "ADD", nil) } // COPY foo /path @@ -181,11 +181,26 @@ func dispatchCopy(b *Builder, args []string, attributes map[string]bool, origina return errAtLeastTwoArguments("COPY") } + flFrom := b.flags.AddString("from", "") + if err := b.flags.Parse(); err != nil { return err } - return b.runContextCommand(args, false, false, "COPY") + var contextID *int + if flFrom.IsUsed() { + var err error + context, err := strconv.Atoi(flFrom.Value) + if err != nil { + return errors.Wrap(err, "from expects an integer value corresponding to the context number") + } + if err := b.imageContexts.validate(context); err != nil { + return err + } + contextID = &context + } + + return b.runContextCommand(args, false, false, "COPY", contextID) } // FROM imagename @@ -206,6 +221,7 @@ func from(b *Builder, args []string, attributes map[string]bool, original string var image builder.Image b.resetImageCache() + b.imageContexts.new() // Windows cannot support a container with no base image. if name == api.NoBaseImageSpecifier { @@ -227,6 +243,7 @@ func from(b *Builder, args []string, attributes map[string]bool, original string return err } } + b.imageContexts.update(image.ImageID()) } b.from = image diff --git a/builder/dockerfile/dispatchers_test.go b/builder/dockerfile/dispatchers_test.go index 61ef3a753d..6b9b538102 100644 --- a/builder/dockerfile/dispatchers_test.go +++ b/builder/dockerfile/dispatchers_test.go @@ -191,6 +191,7 @@ func TestLabel(t *testing.T) { func TestFrom(t *testing.T) { b := &Builder{flags: &BFlags{}, runConfig: &container.Config{}, disableCommit: true} + b.imageContexts = &imageContexts{b: b} err := from(b, []string{"scratch"}, nil, "") diff --git a/builder/dockerfile/imagecontext.go b/builder/dockerfile/imagecontext.go new file mode 100644 index 0000000000..32ce2aacbc --- /dev/null +++ b/builder/dockerfile/imagecontext.go @@ -0,0 +1,121 @@ +package dockerfile + +import ( + "sync" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/builder" + "github.com/docker/docker/builder/remotecontext" + "github.com/pkg/errors" +) + +// imageContexts is a helper for stacking up built image rootfs and reusing +// them as contexts +type imageContexts struct { + b *Builder + list []*imageMount + cache *pathCache +} + +type imageMount struct { + id string + ctx builder.Context + release func() error +} + +func (ic *imageContexts) new() { + ic.list = append(ic.list, &imageMount{}) +} + +func (ic *imageContexts) update(imageID string) { + ic.list[len(ic.list)-1].id = imageID +} + +func (ic *imageContexts) validate(i int) error { + if i < 0 || i >= len(ic.list)-1 { + var extraMsg string + if i == len(ic.list)-1 { + extraMsg = " refers current build block" + } + return errors.Errorf("invalid from flag value %d%s", i, extraMsg) + } + return nil +} + +func (ic *imageContexts) context(i int) (builder.Context, error) { + if err := ic.validate(i); err != nil { + return nil, err + } + im := ic.list[i] + if im.ctx == nil { + if im.id == "" { + return nil, errors.Errorf("could not copy from empty context") + } + p, release, err := ic.b.docker.MountImage(im.id) + if err != nil { + return nil, errors.Wrapf(err, "failed to mount %s", im.id) + } + ctx, err := remotecontext.NewLazyContext(p) + if err != nil { + return nil, errors.Wrapf(err, "failed to create lazycontext for %s", p) + } + logrus.Debugf("mounted image: %s %s", im.id, p) + im.release = release + im.ctx = ctx + } + return im.ctx, nil +} + +func (ic *imageContexts) unmount() (retErr error) { + for _, im := range ic.list { + if im.release != nil { + if err := im.release(); err != nil { + logrus.Error(errors.Wrapf(err, "failed to unmount previous build image")) + retErr = err + } + } + } + return +} + +func (ic *imageContexts) getCache(i int, path string) (interface{}, bool) { + if ic.cache != nil { + im := ic.list[i] + if im.id == "" { + return nil, false + } + return ic.cache.get(im.id + path) + } + return nil, false +} + +func (ic *imageContexts) setCache(i int, path string, v interface{}) { + if ic.cache != nil { + ic.cache.set(ic.list[i].id+path, v) + } +} + +type pathCache struct { + mu sync.Mutex + items map[string]interface{} +} + +func (c *pathCache) set(k string, v interface{}) { + c.mu.Lock() + if c.items == nil { + c.items = make(map[string]interface{}) + } + c.items[k] = v + c.mu.Unlock() +} + +func (c *pathCache) get(k string) (interface{}, bool) { + c.mu.Lock() + if c.items == nil { + c.mu.Unlock() + return nil, false + } + v, ok := c.items[k] + c.mu.Unlock() + return v, ok +} diff --git a/builder/dockerfile/internals.go b/builder/dockerfile/internals.go index 6eb6b1dfeb..1a47c5d1ca 100644 --- a/builder/dockerfile/internals.go +++ b/builder/dockerfile/internals.go @@ -6,7 +6,6 @@ package dockerfile import ( "crypto/sha256" "encoding/hex" - "errors" "fmt" "io" "io/ioutil" @@ -14,6 +13,8 @@ import ( "net/url" "os" "path/filepath" + "regexp" + "runtime" "sort" "strings" "time" @@ -36,6 +37,7 @@ import ( "github.com/docker/docker/pkg/tarsum" "github.com/docker/docker/pkg/urlutil" "github.com/docker/docker/runconfig/opts" + "github.com/pkg/errors" ) func (b *Builder) commit(id string, autoCmd strslice.StrSlice, comment string) error { @@ -83,6 +85,7 @@ func (b *Builder) commit(id string, autoCmd strslice.StrSlice, comment string) e } b.image = imageID + b.imageContexts.update(imageID) return nil } @@ -91,11 +94,7 @@ type copyInfo struct { decompress bool } -func (b *Builder) runContextCommand(args []string, allowRemote bool, allowLocalDecompression bool, cmdName string) error { - if b.context == nil { - return fmt.Errorf("No context given. Impossible to use %s", cmdName) - } - +func (b *Builder) runContextCommand(args []string, allowRemote bool, allowLocalDecompression bool, cmdName string, contextID *int) error { if len(args) < 2 { return fmt.Errorf("Invalid %s format - at least two arguments required", cmdName) } @@ -129,7 +128,7 @@ func (b *Builder) runContextCommand(args []string, allowRemote bool, allowLocalD continue } // not a URL - subInfos, err := b.calcCopyInfo(cmdName, orig, allowLocalDecompression, true) + subInfos, err := b.calcCopyInfo(cmdName, orig, allowLocalDecompression, true, contextID) if err != nil { return err } @@ -299,20 +298,41 @@ func (b *Builder) download(srcURL string) (fi builder.FileInfo, err error) { return &builder.HashedFileInfo{FileInfo: builder.PathFileInfo{FileInfo: tmpFileSt, FilePath: tmpFileName}, FileHash: hash}, nil } -func (b *Builder) calcCopyInfo(cmdName, origPath string, allowLocalDecompression, allowWildcards bool) ([]copyInfo, error) { +func (b *Builder) calcCopyInfo(cmdName, origPath string, allowLocalDecompression, allowWildcards bool, contextID *int) ([]copyInfo, error) { // Work in daemon-specific OS filepath semantics origPath = filepath.FromSlash(origPath) + // validate windows paths from other images + if contextID != nil && runtime.GOOS == "windows" { + forbid := regexp.MustCompile("(?i)^" + string(os.PathSeparator) + "?(windows(" + string(os.PathSeparator) + ".+)?)?$") + if p := filepath.Clean(origPath); p == "." || forbid.MatchString(p) { + return nil, errors.Errorf("copy from %s is not allowed on windows", origPath) + } + } + if origPath != "" && origPath[0] == os.PathSeparator && len(origPath) > 1 { origPath = origPath[1:] } origPath = strings.TrimPrefix(origPath, "."+string(os.PathSeparator)) + context := b.context + var err error + if contextID != nil { + context, err = b.imageContexts.context(*contextID) + if err != nil { + return nil, err + } + } + + if context == nil { + return nil, errors.Errorf("No context given. Impossible to use %s", cmdName) + } + // Deal with wildcards if allowWildcards && containsWildcards(origPath) { var copyInfos []copyInfo - if err := b.context.Walk("", func(path string, info builder.FileInfo, err error) error { + if err := context.Walk("", func(path string, info builder.FileInfo, err error) error { if err != nil { return err } @@ -326,7 +346,7 @@ func (b *Builder) calcCopyInfo(cmdName, origPath string, allowLocalDecompression // Note we set allowWildcards to false in case the name has // a * in it - subInfos, err := b.calcCopyInfo(cmdName, path, allowLocalDecompression, false) + subInfos, err := b.calcCopyInfo(cmdName, path, allowLocalDecompression, false, contextID) if err != nil { return err } @@ -339,8 +359,7 @@ func (b *Builder) calcCopyInfo(cmdName, origPath string, allowLocalDecompression } // Must be a dir or a file - - statPath, fi, err := b.context.Stat(origPath) + statPath, fi, err := context.Stat(origPath) if err != nil { return nil, err } @@ -351,6 +370,13 @@ func (b *Builder) calcCopyInfo(cmdName, origPath string, allowLocalDecompression if !handleHash { return copyInfos, nil } + if contextID != nil { + // fast-cache based on imageID + if h, ok := b.imageContexts.getCache(*contextID, origPath); ok { + hfi.SetHash(h.(string)) + return copyInfos, nil + } + } // Deal with the single file case if !fi.IsDir() { @@ -359,7 +385,7 @@ func (b *Builder) calcCopyInfo(cmdName, origPath string, allowLocalDecompression } // Must be a dir var subfiles []string - err = b.context.Walk(statPath, func(path string, info builder.FileInfo, err error) error { + err = context.Walk(statPath, func(path string, info builder.FileInfo, err error) error { if err != nil { return err } @@ -375,6 +401,9 @@ func (b *Builder) calcCopyInfo(cmdName, origPath string, allowLocalDecompression hasher := sha256.New() hasher.Write([]byte(strings.Join(subfiles, ","))) hfi.SetHash("dir:" + hex.EncodeToString(hasher.Sum(nil))) + if contextID != nil { + b.imageContexts.setCache(*contextID, origPath, hfi.Hash()) + } return copyInfos, nil } @@ -468,6 +497,7 @@ func (b *Builder) probeCache() (bool, error) { fmt.Fprint(b.Stdout, " ---> Using cache\n") logrus.Debugf("[BUILDER] Use cached version: %s", b.runConfig.Cmd) b.image = string(cache) + b.imageContexts.update(b.image) return true, nil } diff --git a/builder/remotecontext/filehash.go b/builder/remotecontext/filehash.go new file mode 100644 index 0000000000..a9b324272b --- /dev/null +++ b/builder/remotecontext/filehash.go @@ -0,0 +1,34 @@ +package remotecontext + +import ( + "archive/tar" + "crypto/sha256" + "hash" + "os" + + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/tarsum" +) + +// NewFileHash returns new hash that is used for the builder cache keys +func NewFileHash(path, name string, fi os.FileInfo) (hash.Hash, error) { + hdr, err := archive.FileInfoHeader(path, name, fi) + if err != nil { + return nil, err + } + tsh := &tarsumHash{hdr: hdr, Hash: sha256.New()} + tsh.Reset() // initialize header + return tsh, nil +} + +type tarsumHash struct { + hash.Hash + hdr *tar.Header +} + +// Reset resets the Hash to its initial state. +func (tsh *tarsumHash) Reset() { + // comply with hash.Hash and reset to the state hash had before any writes + tsh.Hash.Reset() + tarsum.WriteV1Header(tsh.hdr, tsh.Hash) +} diff --git a/builder/remotecontext/lazycontext.go b/builder/remotecontext/lazycontext.go new file mode 100644 index 0000000000..3b7625ed3a --- /dev/null +++ b/builder/remotecontext/lazycontext.go @@ -0,0 +1,149 @@ +package remotecontext + +import ( + "encoding/hex" + "io" + "os" + "path/filepath" + + "github.com/docker/docker/builder" + "github.com/docker/docker/pkg/pools" + "github.com/docker/docker/pkg/symlink" + "github.com/pkg/errors" +) + +// NewLazyContext creates a new LazyContext. LazyContext defines a hashed build +// context based on a root directory. Individual files are hashed first time +// they are asked. It is not safe to call methods of LazyContext concurrently. +func NewLazyContext(root string) (builder.Context, error) { + return &lazyContext{ + root: root, + sums: make(map[string]string), + }, nil +} + +type lazyContext struct { + root string + sums map[string]string +} + +func (c *lazyContext) Close() error { + return nil +} + +func (c *lazyContext) Open(path string) (io.ReadCloser, error) { + cleanPath, fullPath, err := c.normalize(path) + if err != nil { + return nil, err + } + + r, err := os.Open(fullPath) + if err != nil { + return nil, errors.WithStack(convertPathError(err, cleanPath)) + } + return r, nil +} + +func (c *lazyContext) Stat(path string) (string, builder.FileInfo, error) { + // TODO: although stat returns builder.FileInfo it builder.Context actually requires Hashed + cleanPath, fullPath, err := c.normalize(path) + if err != nil { + return "", nil, err + } + + st, err := os.Lstat(fullPath) + if err != nil { + return "", nil, errors.WithStack(convertPathError(err, cleanPath)) + } + + relPath, err := filepath.Rel(c.root, fullPath) + if err != nil { + return "", nil, errors.WithStack(convertPathError(err, cleanPath)) + } + + sum, ok := c.sums[relPath] + if !ok { + sum, err = c.prepareHash(relPath, st) + if err != nil { + return "", nil, err + } + } + + fi := &builder.HashedFileInfo{ + builder.PathFileInfo{st, fullPath, filepath.Base(cleanPath)}, + sum, + } + return relPath, fi, nil +} + +func (c *lazyContext) Walk(root string, walkFn builder.WalkFunc) error { + _, fullPath, err := c.normalize(root) + if err != nil { + return err + } + return filepath.Walk(fullPath, func(fullPath string, fi os.FileInfo, err error) error { + relPath, err := filepath.Rel(c.root, fullPath) + if err != nil { + return errors.WithStack(err) + } + if relPath == "." { + return nil + } + + sum, ok := c.sums[relPath] + if !ok { + sum, err = c.prepareHash(relPath, fi) + if err != nil { + return err + } + } + + hfi := &builder.HashedFileInfo{ + builder.PathFileInfo{FileInfo: fi, FilePath: fullPath}, + sum, + } + if err := walkFn(relPath, hfi, nil); err != nil { + return err + } + return nil + }) +} + +func (c *lazyContext) prepareHash(relPath string, fi os.FileInfo) (string, error) { + p := filepath.Join(c.root, relPath) + h, err := NewFileHash(p, relPath, fi) + if err != nil { + return "", errors.Wrapf(err, "failed to create hash for %s", relPath) + } + if fi.Mode().IsRegular() && fi.Size() > 0 { + f, err := os.Open(p) + if err != nil { + return "", errors.Wrapf(err, "failed to open %s", relPath) + } + defer f.Close() + if _, err := pools.Copy(h, f); err != nil { + return "", errors.Wrapf(err, "failed to copy file data for %s", relPath) + } + } + sum := hex.EncodeToString(h.Sum(nil)) + c.sums[relPath] = sum + return sum, nil +} + +func (c *lazyContext) normalize(path string) (cleanPath, fullPath string, err error) { + // todo: combine these helpers with tarsum after they are moved to same package + cleanPath = filepath.Clean(string(os.PathSeparator) + path)[1:] + fullPath, err = symlink.FollowSymlinkInScope(filepath.Join(c.root, path), c.root) + if err != nil { + return "", "", errors.Wrapf(err, "forbidden path outside the build context: %s (%s)", path, fullPath) + } + return +} + +func convertPathError(err error, cleanpath string) error { + if err, ok := err.(*os.PathError); ok { + err.Path = cleanpath + return err + } + return err +} diff --git a/daemon/archive.go b/daemon/archive.go index 4dc43344d1..b1401600cc 100644 --- a/daemon/archive.go +++ b/daemon/archive.go @@ -1,7 +1,6 @@ package daemon import ( - "errors" "io" "os" "path/filepath" @@ -10,11 +9,14 @@ import ( "github.com/docker/docker/api/types" "github.com/docker/docker/builder" "github.com/docker/docker/container" + "github.com/docker/docker/layer" "github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/chrootarchive" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/stringid" "github.com/docker/docker/pkg/system" + "github.com/pkg/errors" ) // ErrExtractPointNotDirectory is used to convey that the operation to extract @@ -454,3 +456,34 @@ func (daemon *Daemon) CopyOnBuild(cID string, destPath string, src builder.FileI return fixPermissions(srcPath, destPath, rootUID, rootGID, destExists) } + +// MountImage returns mounted path with rootfs of an image. +func (daemon *Daemon) MountImage(name string) (string, func() error, error) { + img, err := daemon.GetImage(name) + if err != nil { + return "", nil, errors.Wrapf(err, "no such image: %s", name) + } + + mountID := stringid.GenerateRandomID() + rwLayer, err := daemon.layerStore.CreateRWLayer(mountID, img.RootFS.ChainID(), nil) + if err != nil { + return "", nil, errors.Wrap(err, "failed to create rwlayer") + } + + mountPath, err := rwLayer.Mount("") + if err != nil { + metadata, releaseErr := daemon.layerStore.ReleaseRWLayer(rwLayer) + if releaseErr != nil { + err = errors.Wrapf(err, "failed to release rwlayer: %s", releaseErr.Error()) + } + layer.LogReleaseMetadata(metadata) + return "", nil, errors.Wrap(err, "failed to mount rwlayer") + } + + return mountPath, func() error { + rwLayer.Unmount() + metadata, err := daemon.layerStore.ReleaseRWLayer(rwLayer) + layer.LogReleaseMetadata(metadata) + return err + }, nil +} diff --git a/integration-cli/docker_cli_build_test.go b/integration-cli/docker_cli_build_test.go index 7a57ade031..1abc984ec3 100644 --- a/integration-cli/docker_cli_build_test.go +++ b/integration-cli/docker_cli_build_test.go @@ -5755,6 +5755,138 @@ func (s *DockerSuite) TestBuildContChar(c *check.C) { c.Assert(result.Combined(), checker.Contains, "Step 2/2 : RUN echo hi \\\\\n") } +func (s *DockerSuite) TestBuildCopyFromPreviousRootFS(c *check.C) { + dockerfile := ` + FROM busybox + COPY foo bar + FROM busybox + %s + COPY baz baz + RUN echo mno > baz/cc + FROM busybox + COPY bar / + COPY --from=1 baz sub/ + COPY --from=0 bar baz + COPY --from=0 bar bay` + ctx := fakeContext(c, fmt.Sprintf(dockerfile, ""), map[string]string{ + "Dockerfile": dockerfile, + "foo": "abc", + "bar": "def", + "baz/aa": "ghi", + "baz/bb": "jkl", + }) + defer ctx.Close() + + result := buildImage("build1", withExternalBuildContext(ctx)) + result.Assert(c, icmd.Success) + + out, _ := dockerCmd(c, "run", "build1", "cat", "bar") + c.Assert(strings.TrimSpace(out), check.Equals, "def") + out, _ = dockerCmd(c, "run", "build1", "cat", "sub/aa") + c.Assert(strings.TrimSpace(out), check.Equals, "ghi") + out, _ = dockerCmd(c, "run", "build1", "cat", "sub/cc") + c.Assert(strings.TrimSpace(out), check.Equals, "mno") + out, _ = dockerCmd(c, "run", "build1", "cat", "baz") + c.Assert(strings.TrimSpace(out), check.Equals, "abc") + out, _ = dockerCmd(c, "run", "build1", "cat", "bay") + c.Assert(strings.TrimSpace(out), check.Equals, "abc") + + result = buildImage("build2", withExternalBuildContext(ctx)) + result.Assert(c, icmd.Success) + + // all commands should be cached + c.Assert(strings.Count(result.Combined(), "Using cache"), checker.Equals, 7) + + err := ioutil.WriteFile(filepath.Join(ctx.Dir, "Dockerfile"), []byte(fmt.Sprintf(dockerfile, "COPY baz/aa foo")), 0644) + c.Assert(err, checker.IsNil) + + // changing file in parent block should not affect last block + result = buildImage("build3", withExternalBuildContext(ctx)) + result.Assert(c, icmd.Success) + + c.Assert(strings.Count(result.Combined(), "Using cache"), checker.Equals, 5) + + c.Assert(getIDByName(c, "build1"), checker.Equals, getIDByName(c, "build2")) + + err = ioutil.WriteFile(filepath.Join(ctx.Dir, "foo"), []byte("pqr"), 0644) + c.Assert(err, checker.IsNil) + + // changing file in parent block should affect both first and last block + result = buildImage("build4", withExternalBuildContext(ctx)) + result.Assert(c, icmd.Success) + c.Assert(strings.Count(result.Combined(), "Using cache"), checker.Equals, 5) + + out, _ = dockerCmd(c, "run", "build4", "cat", "bay") + c.Assert(strings.TrimSpace(out), check.Equals, "pqr") + out, _ = dockerCmd(c, "run", "build4", "cat", "baz") + c.Assert(strings.TrimSpace(out), check.Equals, "pqr") +} + +func (s *DockerSuite) TestBuildCopyFromPreviousRootFSErrors(c *check.C) { + dockerfile := ` + FROM busybox + COPY --from=foo foo bar` + + ctx := fakeContext(c, dockerfile, map[string]string{ + "Dockerfile": dockerfile, + "foo": "abc", + }) + defer ctx.Close() + + buildImage("build1", withExternalBuildContext(ctx)).Assert(c, icmd.Expected{ + ExitCode: 1, + Err: "from expects an integer value corresponding to the context number", + }) + + dockerfile = ` + FROM busybox + COPY --from=0 foo bar` + + ctx = fakeContext(c, dockerfile, map[string]string{ + "Dockerfile": dockerfile, + "foo": "abc", + }) + defer ctx.Close() + + buildImage("build1", withExternalBuildContext(ctx)).Assert(c, icmd.Expected{ + ExitCode: 1, + Err: "invalid from flag value 0 refers current build block", + }) +} + +func (s *DockerSuite) TestBuildCopyFromPreviousFrom(c *check.C) { + dockerfile := ` + FROM busybox + COPY foo bar` + ctx := fakeContext(c, dockerfile, map[string]string{ + "Dockerfile": dockerfile, + "foo": "abc", + }) + defer ctx.Close() + + result := buildImage("build1", withExternalBuildContext(ctx)) + result.Assert(c, icmd.Success) + + dockerfile = ` + FROM build1:latest + FROM busybox + COPY --from=0 bar / + COPY foo /` + ctx = fakeContext(c, dockerfile, map[string]string{ + "Dockerfile": dockerfile, + "foo": "def", + }) + defer ctx.Close() + + result = buildImage("build2", withExternalBuildContext(ctx)) + result.Assert(c, icmd.Success) + + out, _ := dockerCmd(c, "run", "build2", "cat", "bar") + c.Assert(strings.TrimSpace(out), check.Equals, "abc") + out, _ = dockerCmd(c, "run", "build2", "cat", "foo") + c.Assert(strings.TrimSpace(out), check.Equals, "def") +} + // TestBuildOpaqueDirectory tests that a build succeeds which // creates opaque directories. // See https://github.com/docker/docker/issues/25244 diff --git a/pkg/archive/archive.go b/pkg/archive/archive.go index 45e3ad8954..f521388d1f 100644 --- a/pkg/archive/archive.go +++ b/pkg/archive/archive.go @@ -240,6 +240,38 @@ func (compression *Compression) Extension() string { return "" } +// FileInfoHeader creates a populated Header from fi. +// Compared to archive pkg this function fills in more information. +func FileInfoHeader(path, name string, fi os.FileInfo) (*tar.Header, error) { + var link string + if fi.Mode()&os.ModeSymlink != 0 { + var err error + link, err = os.Readlink(path) + if err != nil { + return nil, err + } + } + hdr, err := tar.FileInfoHeader(fi, link) + if err != nil { + return nil, err + } + hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode))) + name, err = canonicalTarName(name, fi.IsDir()) + if err != nil { + return nil, fmt.Errorf("tar: cannot canonicalize path: %v", err) + } + hdr.Name = name + if err := setHeaderForSpecialDevice(hdr, name, fi.Sys()); err != nil { + return nil, err + } + capability, _ := system.Lgetxattr(path, "security.capability") + if capability != nil { + hdr.Xattrs = make(map[string]string) + hdr.Xattrs["security.capability"] = string(capability) + } + return hdr, nil +} + type tarWhiteoutConverter interface { ConvertWrite(*tar.Header, string, os.FileInfo) (*tar.Header, error) ConvertRead(*tar.Header, string) (bool, error) @@ -283,26 +315,7 @@ func (ta *tarAppender) addTarFile(path, name string) error { return err } - link := "" - if fi.Mode()&os.ModeSymlink != 0 { - if link, err = os.Readlink(path); err != nil { - return err - } - } - - hdr, err := tar.FileInfoHeader(fi, link) - if err != nil { - return err - } - hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode))) - - name, err = canonicalTarName(name, fi.IsDir()) - if err != nil { - return fmt.Errorf("tar: cannot canonicalize path: %v", err) - } - hdr.Name = name - - inode, err := setHeaderForSpecialDevice(hdr, ta, name, fi.Sys()) + hdr, err := FileInfoHeader(path, name, fi) if err != nil { return err } @@ -310,6 +323,10 @@ func (ta *tarAppender) addTarFile(path, name string) error { // if it's not a directory and has more than 1 link, // it's hard linked, so set the type flag accordingly if !fi.IsDir() && hasHardlinks(fi) { + inode, err := getInodeFromStat(fi.Sys()) + if err != nil { + return err + } // a link should have a name that it links too // and that linked name should be first in the tar archive if oldpath, ok := ta.SeenFiles[inode]; ok { @@ -321,12 +338,6 @@ func (ta *tarAppender) addTarFile(path, name string) error { } } - capability, _ := system.Lgetxattr(path, "security.capability") - if capability != nil { - hdr.Xattrs = make(map[string]string) - hdr.Xattrs["security.capability"] = string(capability) - } - //handle re-mapping container ID mappings back to host ID mappings before //writing tar headers/files. We skip whiteout files because they were written //by the kernel and already have proper ownership relative to the host diff --git a/pkg/archive/archive_unix.go b/pkg/archive/archive_unix.go index 7083f2fa53..d94858887d 100644 --- a/pkg/archive/archive_unix.go +++ b/pkg/archive/archive_unix.go @@ -41,7 +41,25 @@ func chmodTarEntry(perm os.FileMode) os.FileMode { return perm // noop for unix as golang APIs provide perm bits correctly } -func setHeaderForSpecialDevice(hdr *tar.Header, ta *tarAppender, name string, stat interface{}) (inode uint64, err error) { +func setHeaderForSpecialDevice(hdr *tar.Header, name string, stat interface{}) (err error) { + s, ok := stat.(*syscall.Stat_t) + + if !ok { + err = errors.New("cannot convert stat value to syscall.Stat_t") + return + } + + // Currently go does not fill in the major/minors + if s.Mode&syscall.S_IFBLK != 0 || + s.Mode&syscall.S_IFCHR != 0 { + hdr.Devmajor = int64(major(uint64(s.Rdev))) + hdr.Devminor = int64(minor(uint64(s.Rdev))) + } + + return +} + +func getInodeFromStat(stat interface{}) (inode uint64, err error) { s, ok := stat.(*syscall.Stat_t) if !ok { @@ -51,13 +69,6 @@ func setHeaderForSpecialDevice(hdr *tar.Header, ta *tarAppender, name string, st inode = uint64(s.Ino) - // Currently go does not fill in the major/minors - if s.Mode&syscall.S_IFBLK != 0 || - s.Mode&syscall.S_IFCHR != 0 { - hdr.Devmajor = int64(major(uint64(s.Rdev))) - hdr.Devminor = int64(minor(uint64(s.Rdev))) - } - return } diff --git a/pkg/archive/archive_windows.go b/pkg/archive/archive_windows.go index 5c3a1be340..3d0f6277c3 100644 --- a/pkg/archive/archive_windows.go +++ b/pkg/archive/archive_windows.go @@ -49,8 +49,13 @@ func chmodTarEntry(perm os.FileMode) os.FileMode { return perm } -func setHeaderForSpecialDevice(hdr *tar.Header, ta *tarAppender, name string, stat interface{}) (inode uint64, err error) { - // do nothing. no notion of Rdev, Inode, Nlink in stat on Windows +func setHeaderForSpecialDevice(hdr *tar.Header, name string, stat interface{}) (err error) { + // do nothing. no notion of Rdev, Nlink in stat on Windows + return +} + +func getInodeFromStat(stat interface{}) (inode uint64, err error) { + // do nothing. no notion of Inode in stat on Windows return } diff --git a/pkg/tarsum/versioning.go b/pkg/tarsum/versioning.go index 2882286854..a62cc3ebc0 100644 --- a/pkg/tarsum/versioning.go +++ b/pkg/tarsum/versioning.go @@ -3,6 +3,7 @@ package tarsum import ( "archive/tar" "errors" + "io" "sort" "strconv" "strings" @@ -21,6 +22,13 @@ const ( VersionDev ) +// WriteV1Header writes a tar header to a writer in V1 tarsum format. +func WriteV1Header(h *tar.Header, w io.Writer) { + for _, elem := range v1TarHeaderSelect(h) { + w.Write([]byte(elem[0] + elem[1])) + } +} + // VersionLabelForChecksum returns the label for the given tarsum // checksum, i.e., everything before the first `+` character in // the string or an empty string if no label separator is found.