mirror of
https://github.com/moby/moby.git
synced 2025-08-01 05:47:11 +03:00
Adds ability to squash image after build
Allow built images to be squash to scratch. Squashing does not destroy any images or layers, and preserves the build cache. Introduce a new CLI argument --squash to docker build Introduce a new param to the build API endpoint `squash` Once the build is complete, docker creates a new image loading the diffs from each layer into a single new layer and references all the parent's layers. Signed-off-by: Brian Goff <cpuguy83@gmail.com>
This commit is contained in:
@ -54,6 +54,7 @@ func newImageBuildOptions(ctx context.Context, r *http.Request) (*types.ImageBui
|
|||||||
options.NetworkMode = r.FormValue("networkmode")
|
options.NetworkMode = r.FormValue("networkmode")
|
||||||
options.Tags = r.Form["t"]
|
options.Tags = r.Form["t"]
|
||||||
options.SecurityOpt = r.Form["securityopt"]
|
options.SecurityOpt = r.Form["securityopt"]
|
||||||
|
options.Squash = httputils.BoolValue(r, "squash")
|
||||||
|
|
||||||
if r.Form.Get("shmsize") != "" {
|
if r.Form.Get("shmsize") != "" {
|
||||||
shmSize, err := strconv.ParseInt(r.Form.Get("shmsize"), 10, 64)
|
shmSize, err := strconv.ParseInt(r.Form.Get("shmsize"), 10, 64)
|
||||||
|
@ -135,9 +135,15 @@ type Backend interface {
|
|||||||
// TODO: make an Extract method instead of passing `decompress`
|
// TODO: make an Extract method instead of passing `decompress`
|
||||||
// TODO: do not pass a FileInfo, instead refactor the archive package to export a Walk function that can be used
|
// TODO: do not pass a FileInfo, instead refactor the archive package to export a Walk function that can be used
|
||||||
// with Context.Walk
|
// with Context.Walk
|
||||||
//ContainerCopy(name string, res string) (io.ReadCloser, error)
|
// ContainerCopy(name string, res string) (io.ReadCloser, error)
|
||||||
// TODO: use copyBackend api
|
// TODO: use copyBackend api
|
||||||
CopyOnBuild(containerID string, destPath string, src FileInfo, decompress bool) error
|
CopyOnBuild(containerID string, destPath string, src FileInfo, decompress bool) error
|
||||||
|
|
||||||
|
// HasExperimental checks if the backend supports experimental features
|
||||||
|
HasExperimental() bool
|
||||||
|
|
||||||
|
// SquashImage squashes the fs layers from the provided image down to the specified `to` image
|
||||||
|
SquashImage(from string, to string) (string, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Image represents a Docker image used by the builder.
|
// Image represents a Docker image used by the builder.
|
||||||
|
@ -10,6 +10,7 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/Sirupsen/logrus"
|
"github.com/Sirupsen/logrus"
|
||||||
|
apierrors "github.com/docker/docker/api/errors"
|
||||||
"github.com/docker/docker/api/types"
|
"github.com/docker/docker/api/types"
|
||||||
"github.com/docker/docker/api/types/backend"
|
"github.com/docker/docker/api/types/backend"
|
||||||
"github.com/docker/docker/api/types/container"
|
"github.com/docker/docker/api/types/container"
|
||||||
@ -18,6 +19,7 @@ import (
|
|||||||
"github.com/docker/docker/image"
|
"github.com/docker/docker/image"
|
||||||
"github.com/docker/docker/pkg/stringid"
|
"github.com/docker/docker/pkg/stringid"
|
||||||
"github.com/docker/docker/reference"
|
"github.com/docker/docker/reference"
|
||||||
|
perrors "github.com/pkg/errors"
|
||||||
"golang.org/x/net/context"
|
"golang.org/x/net/context"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -77,6 +79,7 @@ type Builder struct {
|
|||||||
id string
|
id string
|
||||||
|
|
||||||
imageCache builder.ImageCache
|
imageCache builder.ImageCache
|
||||||
|
from builder.Image
|
||||||
}
|
}
|
||||||
|
|
||||||
// BuildManager implements builder.Backend and is shared across all Builder objects.
|
// BuildManager implements builder.Backend and is shared across all Builder objects.
|
||||||
@ -91,6 +94,9 @@ func NewBuildManager(b builder.Backend) (bm *BuildManager) {
|
|||||||
|
|
||||||
// BuildFromContext builds a new image from a given context.
|
// BuildFromContext builds a new image from a given context.
|
||||||
func (bm *BuildManager) BuildFromContext(ctx context.Context, src io.ReadCloser, remote string, buildOptions *types.ImageBuildOptions, pg backend.ProgressWriter) (string, error) {
|
func (bm *BuildManager) BuildFromContext(ctx context.Context, src io.ReadCloser, remote string, buildOptions *types.ImageBuildOptions, pg backend.ProgressWriter) (string, error) {
|
||||||
|
if buildOptions.Squash && !bm.backend.HasExperimental() {
|
||||||
|
return "", apierrors.NewBadRequestError(errors.New("squash is only supported with experimental mode"))
|
||||||
|
}
|
||||||
buildContext, dockerfileName, err := builder.DetectContextFromRemoteURL(src, remote, pg.ProgressReaderFunc)
|
buildContext, dockerfileName, err := builder.DetectContextFromRemoteURL(src, remote, pg.ProgressReaderFunc)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
@ -100,6 +106,7 @@ func (bm *BuildManager) BuildFromContext(ctx context.Context, src io.ReadCloser,
|
|||||||
logrus.Debugf("[BUILDER] failed to remove temporary context: %v", err)
|
logrus.Debugf("[BUILDER] failed to remove temporary context: %v", err)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
if len(dockerfileName) > 0 {
|
if len(dockerfileName) > 0 {
|
||||||
buildOptions.Dockerfile = dockerfileName
|
buildOptions.Dockerfile = dockerfileName
|
||||||
}
|
}
|
||||||
@ -286,6 +293,17 @@ func (b *Builder) build(stdout io.Writer, stderr io.Writer, out io.Writer) (stri
|
|||||||
return "", fmt.Errorf("No image was generated. Is your Dockerfile empty?")
|
return "", fmt.Errorf("No image was generated. Is your Dockerfile empty?")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if b.options.Squash {
|
||||||
|
var fromID string
|
||||||
|
if b.from != nil {
|
||||||
|
fromID = b.from.ImageID()
|
||||||
|
}
|
||||||
|
b.image, err = b.docker.SquashImage(b.image, fromID)
|
||||||
|
if err != nil {
|
||||||
|
return "", perrors.Wrap(err, "error squashing image")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
imageID := image.ID(b.image)
|
imageID := image.ID(b.image)
|
||||||
for _, rt := range repoAndTags {
|
for _, rt := range repoAndTags {
|
||||||
if err := b.docker.TagImageWithReference(imageID, rt); err != nil {
|
if err := b.docker.TagImageWithReference(imageID, rt); err != nil {
|
||||||
|
@ -221,6 +221,7 @@ func from(b *Builder, args []string, attributes map[string]bool, original string
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
b.from = image
|
||||||
|
|
||||||
return b.processImageFrom(image)
|
return b.processImageFrom(image)
|
||||||
}
|
}
|
||||||
|
@ -59,6 +59,7 @@ type buildOptions struct {
|
|||||||
compress bool
|
compress bool
|
||||||
securityOpt []string
|
securityOpt []string
|
||||||
networkMode string
|
networkMode string
|
||||||
|
squash bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewBuildCommand creates a new `docker build` command
|
// NewBuildCommand creates a new `docker build` command
|
||||||
@ -110,6 +111,10 @@ func NewBuildCommand(dockerCli *command.DockerCli) *cobra.Command {
|
|||||||
|
|
||||||
command.AddTrustedFlags(flags, true)
|
command.AddTrustedFlags(flags, true)
|
||||||
|
|
||||||
|
if dockerCli.HasExperimental() {
|
||||||
|
flags.BoolVar(&options.squash, "squash", false, "Squash newly built layers into a single new layer")
|
||||||
|
}
|
||||||
|
|
||||||
return cmd
|
return cmd
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -305,6 +310,7 @@ func runBuild(dockerCli *command.DockerCli, options buildOptions) error {
|
|||||||
CacheFrom: options.cacheFrom,
|
CacheFrom: options.cacheFrom,
|
||||||
SecurityOpt: options.securityOpt,
|
SecurityOpt: options.securityOpt,
|
||||||
NetworkMode: options.networkMode,
|
NetworkMode: options.networkMode,
|
||||||
|
Squash: options.squash,
|
||||||
}
|
}
|
||||||
|
|
||||||
response, err := dockerCli.Client().ImageBuild(ctx, body, buildOptions)
|
response, err := dockerCli.Client().ImageBuild(ctx, body, buildOptions)
|
||||||
|
@ -74,6 +74,7 @@ type Driver struct {
|
|||||||
ctr *graphdriver.RefCounter
|
ctr *graphdriver.RefCounter
|
||||||
pathCacheLock sync.Mutex
|
pathCacheLock sync.Mutex
|
||||||
pathCache map[string]string
|
pathCache map[string]string
|
||||||
|
naiveDiff graphdriver.DiffDriver
|
||||||
}
|
}
|
||||||
|
|
||||||
// Init returns a new AUFS driver.
|
// Init returns a new AUFS driver.
|
||||||
@ -137,6 +138,8 @@ func Init(root string, options []string, uidMaps, gidMaps []idtools.IDMap) (grap
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
a.naiveDiff = graphdriver.NewNaiveDiffDriver(a, uidMaps, gidMaps)
|
||||||
return a, nil
|
return a, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -225,7 +228,7 @@ func (a *Driver) Create(id, parent, mountLabel string, storageOpt map[string]str
|
|||||||
defer f.Close()
|
defer f.Close()
|
||||||
|
|
||||||
if parent != "" {
|
if parent != "" {
|
||||||
ids, err := getParentIds(a.rootPath(), parent)
|
ids, err := getParentIDs(a.rootPath(), parent)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -427,9 +430,22 @@ func (a *Driver) Put(id string) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// isParent returns if the passed in parent is the direct parent of the passed in layer
|
||||||
|
func (a *Driver) isParent(id, parent string) bool {
|
||||||
|
parents, _ := getParentIDs(a.rootPath(), id)
|
||||||
|
if parent == "" && len(parents) > 0 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return !(len(parents) > 0 && parent != parents[0])
|
||||||
|
}
|
||||||
|
|
||||||
// Diff produces an archive of the changes between the specified
|
// Diff produces an archive of the changes between the specified
|
||||||
// layer and its parent layer which may be "".
|
// layer and its parent layer which may be "".
|
||||||
func (a *Driver) Diff(id, parent string) (io.ReadCloser, error) {
|
func (a *Driver) Diff(id, parent string) (io.ReadCloser, error) {
|
||||||
|
if !a.isParent(id, parent) {
|
||||||
|
return a.naiveDiff.Diff(id, parent)
|
||||||
|
}
|
||||||
|
|
||||||
// AUFS doesn't need the parent layer to produce a diff.
|
// AUFS doesn't need the parent layer to produce a diff.
|
||||||
return archive.TarWithOptions(path.Join(a.rootPath(), "diff", id), &archive.TarOptions{
|
return archive.TarWithOptions(path.Join(a.rootPath(), "diff", id), &archive.TarOptions{
|
||||||
Compression: archive.Uncompressed,
|
Compression: archive.Uncompressed,
|
||||||
@ -465,6 +481,9 @@ func (a *Driver) applyDiff(id string, diff io.Reader) error {
|
|||||||
// and its parent and returns the size in bytes of the changes
|
// and its parent and returns the size in bytes of the changes
|
||||||
// relative to its base filesystem directory.
|
// relative to its base filesystem directory.
|
||||||
func (a *Driver) DiffSize(id, parent string) (size int64, err error) {
|
func (a *Driver) DiffSize(id, parent string) (size int64, err error) {
|
||||||
|
if !a.isParent(id, parent) {
|
||||||
|
return a.naiveDiff.DiffSize(id, parent)
|
||||||
|
}
|
||||||
// AUFS doesn't need the parent layer to calculate the diff size.
|
// AUFS doesn't need the parent layer to calculate the diff size.
|
||||||
return directory.Size(path.Join(a.rootPath(), "diff", id))
|
return directory.Size(path.Join(a.rootPath(), "diff", id))
|
||||||
}
|
}
|
||||||
@ -473,7 +492,11 @@ func (a *Driver) DiffSize(id, parent string) (size int64, err error) {
|
|||||||
// layer with the specified id and parent, returning the size of the
|
// layer with the specified id and parent, returning the size of the
|
||||||
// new layer in bytes.
|
// new layer in bytes.
|
||||||
func (a *Driver) ApplyDiff(id, parent string, diff io.Reader) (size int64, err error) {
|
func (a *Driver) ApplyDiff(id, parent string, diff io.Reader) (size int64, err error) {
|
||||||
// AUFS doesn't need the parent id to apply the diff.
|
if !a.isParent(id, parent) {
|
||||||
|
return a.naiveDiff.ApplyDiff(id, parent, diff)
|
||||||
|
}
|
||||||
|
|
||||||
|
// AUFS doesn't need the parent id to apply the diff if it is the direct parent.
|
||||||
if err = a.applyDiff(id, diff); err != nil {
|
if err = a.applyDiff(id, diff); err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@ -484,6 +507,10 @@ func (a *Driver) ApplyDiff(id, parent string, diff io.Reader) (size int64, err e
|
|||||||
// Changes produces a list of changes between the specified layer
|
// Changes produces a list of changes between the specified layer
|
||||||
// and its parent layer. If parent is "", then all changes will be ADD changes.
|
// and its parent layer. If parent is "", then all changes will be ADD changes.
|
||||||
func (a *Driver) Changes(id, parent string) ([]archive.Change, error) {
|
func (a *Driver) Changes(id, parent string) ([]archive.Change, error) {
|
||||||
|
if !a.isParent(id, parent) {
|
||||||
|
return a.naiveDiff.Changes(id, parent)
|
||||||
|
}
|
||||||
|
|
||||||
// AUFS doesn't have snapshots, so we need to get changes from all parent
|
// AUFS doesn't have snapshots, so we need to get changes from all parent
|
||||||
// layers.
|
// layers.
|
||||||
layers, err := a.getParentLayerPaths(id)
|
layers, err := a.getParentLayerPaths(id)
|
||||||
@ -494,7 +521,7 @@ func (a *Driver) Changes(id, parent string) ([]archive.Change, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (a *Driver) getParentLayerPaths(id string) ([]string, error) {
|
func (a *Driver) getParentLayerPaths(id string) ([]string, error) {
|
||||||
parentIds, err := getParentIds(a.rootPath(), id)
|
parentIds, err := getParentIDs(a.rootPath(), id)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -424,7 +424,7 @@ func TestChanges(t *testing.T) {
|
|||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
changes, err = d.Changes("3", "")
|
changes, err = d.Changes("3", "2")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -530,7 +530,7 @@ func TestChildDiffSize(t *testing.T) {
|
|||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
diffSize, err = d.DiffSize("2", "")
|
diffSize, err = d.DiffSize("2", "1")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
@ -29,7 +29,7 @@ func loadIds(root string) ([]string, error) {
|
|||||||
//
|
//
|
||||||
// If there are no lines in the file then the id has no parent
|
// If there are no lines in the file then the id has no parent
|
||||||
// and an empty slice is returned.
|
// and an empty slice is returned.
|
||||||
func getParentIds(root, id string) ([]string, error) {
|
func getParentIDs(root, id string) ([]string, error) {
|
||||||
f, err := os.Open(path.Join(root, "layers", id))
|
f, err := os.Open(path.Join(root, "layers", id))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -78,9 +78,8 @@ type ProtoDriver interface {
|
|||||||
Cleanup() error
|
Cleanup() error
|
||||||
}
|
}
|
||||||
|
|
||||||
// Driver is the interface for layered/snapshot file system drivers.
|
// DiffDriver is the interface to use to implement graph diffs
|
||||||
type Driver interface {
|
type DiffDriver interface {
|
||||||
ProtoDriver
|
|
||||||
// Diff produces an archive of the changes between the specified
|
// Diff produces an archive of the changes between the specified
|
||||||
// layer and its parent layer which may be "".
|
// layer and its parent layer which may be "".
|
||||||
Diff(id, parent string) (io.ReadCloser, error)
|
Diff(id, parent string) (io.ReadCloser, error)
|
||||||
@ -98,6 +97,12 @@ type Driver interface {
|
|||||||
DiffSize(id, parent string) (size int64, err error)
|
DiffSize(id, parent string) (size int64, err error)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Driver is the interface for layered/snapshot file system drivers.
|
||||||
|
type Driver interface {
|
||||||
|
ProtoDriver
|
||||||
|
DiffDriver
|
||||||
|
}
|
||||||
|
|
||||||
// DiffGetterDriver is the interface for layered file system drivers that
|
// DiffGetterDriver is the interface for layered file system drivers that
|
||||||
// provide a specialized function for getting file contents for tar-split.
|
// provide a specialized function for getting file contents for tar-split.
|
||||||
type DiffGetterDriver interface {
|
type DiffGetterDriver interface {
|
||||||
|
@ -11,6 +11,7 @@ import (
|
|||||||
"os"
|
"os"
|
||||||
"os/exec"
|
"os/exec"
|
||||||
"path"
|
"path"
|
||||||
|
"path/filepath"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"syscall"
|
"syscall"
|
||||||
@ -44,7 +45,7 @@ var (
|
|||||||
|
|
||||||
// Each container/image has at least a "diff" directory and "link" file.
|
// Each container/image has at least a "diff" directory and "link" file.
|
||||||
// If there is also a "lower" file when there are diff layers
|
// If there is also a "lower" file when there are diff layers
|
||||||
// below as well as "merged" and "work" directories. The "diff" directory
|
// below as well as "merged" and "work" directories. The "diff" directory
|
||||||
// has the upper layer of the overlay and is used to capture any
|
// has the upper layer of the overlay and is used to capture any
|
||||||
// changes to the layer. The "lower" file contains all the lower layer
|
// changes to the layer. The "lower" file contains all the lower layer
|
||||||
// mounts separated by ":" and ordered from uppermost to lowermost
|
// mounts separated by ":" and ordered from uppermost to lowermost
|
||||||
@ -86,12 +87,13 @@ type overlayOptions struct {
|
|||||||
|
|
||||||
// Driver contains information about the home directory and the list of active mounts that are created using this driver.
|
// Driver contains information about the home directory and the list of active mounts that are created using this driver.
|
||||||
type Driver struct {
|
type Driver struct {
|
||||||
home string
|
home string
|
||||||
uidMaps []idtools.IDMap
|
uidMaps []idtools.IDMap
|
||||||
gidMaps []idtools.IDMap
|
gidMaps []idtools.IDMap
|
||||||
ctr *graphdriver.RefCounter
|
ctr *graphdriver.RefCounter
|
||||||
quotaCtl *quota.Control
|
quotaCtl *quota.Control
|
||||||
options overlayOptions
|
options overlayOptions
|
||||||
|
naiveDiff graphdriver.DiffDriver
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@ -163,6 +165,8 @@ func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (grap
|
|||||||
ctr: graphdriver.NewRefCounter(graphdriver.NewFsChecker(graphdriver.FsMagicOverlay)),
|
ctr: graphdriver.NewRefCounter(graphdriver.NewFsChecker(graphdriver.FsMagicOverlay)),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
d.naiveDiff = graphdriver.NewNaiveDiffDriver(d, uidMaps, gidMaps)
|
||||||
|
|
||||||
if backingFs == "xfs" {
|
if backingFs == "xfs" {
|
||||||
// Try to enable project quota support over xfs.
|
// Try to enable project quota support over xfs.
|
||||||
if d.quotaCtl, err = quota.NewControl(home); err == nil {
|
if d.quotaCtl, err = quota.NewControl(home); err == nil {
|
||||||
@ -525,7 +529,7 @@ func (d *Driver) Put(id string) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
if err := syscall.Unmount(mountpoint, 0); err != nil {
|
if err := syscall.Unmount(mountpoint, 0); err != nil {
|
||||||
logrus.Debugf("Failed to unmount %s overlay: %v", id, err)
|
logrus.Debugf("Failed to unmount %s overlay: %s - %v", id, mountpoint, err)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -536,8 +540,33 @@ func (d *Driver) Exists(id string) bool {
|
|||||||
return err == nil
|
return err == nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// isParent returns if the passed in parent is the direct parent of the passed in layer
|
||||||
|
func (d *Driver) isParent(id, parent string) bool {
|
||||||
|
lowers, err := d.getLowerDirs(id)
|
||||||
|
if err != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if parent == "" && len(lowers) > 0 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
parentDir := d.dir(parent)
|
||||||
|
var ld string
|
||||||
|
if len(lowers) > 0 {
|
||||||
|
ld = filepath.Dir(lowers[0])
|
||||||
|
}
|
||||||
|
if ld == "" && parent == "" {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return ld == parentDir
|
||||||
|
}
|
||||||
|
|
||||||
// ApplyDiff applies the new layer into a root
|
// ApplyDiff applies the new layer into a root
|
||||||
func (d *Driver) ApplyDiff(id string, parent string, diff io.Reader) (size int64, err error) {
|
func (d *Driver) ApplyDiff(id string, parent string, diff io.Reader) (size int64, err error) {
|
||||||
|
if !d.isParent(id, parent) {
|
||||||
|
return d.naiveDiff.ApplyDiff(id, parent, diff)
|
||||||
|
}
|
||||||
|
|
||||||
applyDir := d.getDiffPath(id)
|
applyDir := d.getDiffPath(id)
|
||||||
|
|
||||||
logrus.Debugf("Applying tar in %s", applyDir)
|
logrus.Debugf("Applying tar in %s", applyDir)
|
||||||
@ -563,12 +592,19 @@ func (d *Driver) getDiffPath(id string) string {
|
|||||||
// and its parent and returns the size in bytes of the changes
|
// and its parent and returns the size in bytes of the changes
|
||||||
// relative to its base filesystem directory.
|
// relative to its base filesystem directory.
|
||||||
func (d *Driver) DiffSize(id, parent string) (size int64, err error) {
|
func (d *Driver) DiffSize(id, parent string) (size int64, err error) {
|
||||||
|
if !d.isParent(id, parent) {
|
||||||
|
return d.naiveDiff.DiffSize(id, parent)
|
||||||
|
}
|
||||||
return directory.Size(d.getDiffPath(id))
|
return directory.Size(d.getDiffPath(id))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Diff produces an archive of the changes between the specified
|
// Diff produces an archive of the changes between the specified
|
||||||
// layer and its parent layer which may be "".
|
// layer and its parent layer which may be "".
|
||||||
func (d *Driver) Diff(id, parent string) (io.ReadCloser, error) {
|
func (d *Driver) Diff(id, parent string) (io.ReadCloser, error) {
|
||||||
|
if !d.isParent(id, parent) {
|
||||||
|
return d.naiveDiff.Diff(id, parent)
|
||||||
|
}
|
||||||
|
|
||||||
diffPath := d.getDiffPath(id)
|
diffPath := d.getDiffPath(id)
|
||||||
logrus.Debugf("Tar with options on %s", diffPath)
|
logrus.Debugf("Tar with options on %s", diffPath)
|
||||||
return archive.TarWithOptions(diffPath, &archive.TarOptions{
|
return archive.TarWithOptions(diffPath, &archive.TarOptions{
|
||||||
@ -582,6 +618,9 @@ func (d *Driver) Diff(id, parent string) (io.ReadCloser, error) {
|
|||||||
// Changes produces a list of changes between the specified layer
|
// Changes produces a list of changes between the specified layer
|
||||||
// and its parent layer. If parent is "", then all changes will be ADD changes.
|
// and its parent layer. If parent is "", then all changes will be ADD changes.
|
||||||
func (d *Driver) Changes(id, parent string) ([]archive.Change, error) {
|
func (d *Driver) Changes(id, parent string) ([]archive.Change, error) {
|
||||||
|
if !d.isParent(id, parent) {
|
||||||
|
return d.naiveDiff.Changes(id, parent)
|
||||||
|
}
|
||||||
// Overlay doesn't have snapshots, so we need to get changes from all parent
|
// Overlay doesn't have snapshots, so we need to get changes from all parent
|
||||||
// layers.
|
// layers.
|
||||||
diffPath := d.getDiffPath(id)
|
diffPath := d.getDiffPath(id)
|
||||||
|
@ -1,9 +1,13 @@
|
|||||||
package daemon
|
package daemon
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"path"
|
"path"
|
||||||
"sort"
|
"sort"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
|
||||||
"github.com/docker/docker/api/types"
|
"github.com/docker/docker/api/types"
|
||||||
"github.com/docker/docker/api/types/filters"
|
"github.com/docker/docker/api/types/filters"
|
||||||
@ -241,6 +245,89 @@ func (daemon *Daemon) Images(filterArgs, filter string, all bool, withExtraAttrs
|
|||||||
return images, nil
|
return images, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SquashImage creates a new image with the diff of the specified image and the specified parent.
|
||||||
|
// This new image contains only the layers from it's parent + 1 extra layer which contains the diff of all the layers in between.
|
||||||
|
// The existing image(s) is not destroyed.
|
||||||
|
// If no parent is specified, a new image with the diff of all the specified image's layers merged into a new layer that has no parents.
|
||||||
|
func (daemon *Daemon) SquashImage(id, parent string) (string, error) {
|
||||||
|
img, err := daemon.imageStore.Get(image.ID(id))
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
var parentImg *image.Image
|
||||||
|
var parentChainID layer.ChainID
|
||||||
|
if len(parent) != 0 {
|
||||||
|
parentImg, err = daemon.imageStore.Get(image.ID(parent))
|
||||||
|
if err != nil {
|
||||||
|
return "", errors.Wrap(err, "error getting specified parent layer")
|
||||||
|
}
|
||||||
|
parentChainID = parentImg.RootFS.ChainID()
|
||||||
|
} else {
|
||||||
|
rootFS := image.NewRootFS()
|
||||||
|
parentImg = &image.Image{RootFS: rootFS}
|
||||||
|
}
|
||||||
|
|
||||||
|
l, err := daemon.layerStore.Get(img.RootFS.ChainID())
|
||||||
|
if err != nil {
|
||||||
|
return "", errors.Wrap(err, "error getting image layer")
|
||||||
|
}
|
||||||
|
defer daemon.layerStore.Release(l)
|
||||||
|
|
||||||
|
ts, err := l.TarStreamFrom(parentChainID)
|
||||||
|
if err != nil {
|
||||||
|
return "", errors.Wrapf(err, "error getting tar stream to parent")
|
||||||
|
}
|
||||||
|
defer ts.Close()
|
||||||
|
|
||||||
|
newL, err := daemon.layerStore.Register(ts, parentChainID)
|
||||||
|
if err != nil {
|
||||||
|
return "", errors.Wrap(err, "error registering layer")
|
||||||
|
}
|
||||||
|
defer daemon.layerStore.Release(newL)
|
||||||
|
|
||||||
|
var newImage image.Image
|
||||||
|
newImage = *img
|
||||||
|
newImage.RootFS = nil
|
||||||
|
|
||||||
|
var rootFS image.RootFS
|
||||||
|
rootFS = *parentImg.RootFS
|
||||||
|
rootFS.DiffIDs = append(rootFS.DiffIDs, newL.DiffID())
|
||||||
|
newImage.RootFS = &rootFS
|
||||||
|
|
||||||
|
for i, hi := range newImage.History {
|
||||||
|
if i >= len(parentImg.History) {
|
||||||
|
hi.EmptyLayer = true
|
||||||
|
}
|
||||||
|
newImage.History[i] = hi
|
||||||
|
}
|
||||||
|
|
||||||
|
now := time.Now()
|
||||||
|
var historyComment string
|
||||||
|
if len(parent) > 0 {
|
||||||
|
historyComment = fmt.Sprintf("merge %s to %s", id, parent)
|
||||||
|
} else {
|
||||||
|
historyComment = fmt.Sprintf("create new from %s", id)
|
||||||
|
}
|
||||||
|
|
||||||
|
newImage.History = append(newImage.History, image.History{
|
||||||
|
Created: now,
|
||||||
|
Comment: historyComment,
|
||||||
|
})
|
||||||
|
newImage.Created = now
|
||||||
|
|
||||||
|
b, err := json.Marshal(&newImage)
|
||||||
|
if err != nil {
|
||||||
|
return "", errors.Wrap(err, "error marshalling image config")
|
||||||
|
}
|
||||||
|
|
||||||
|
newImgID, err := daemon.imageStore.Create(b)
|
||||||
|
if err != nil {
|
||||||
|
return "", errors.Wrap(err, "error creating new image after squash")
|
||||||
|
}
|
||||||
|
return string(newImgID), nil
|
||||||
|
}
|
||||||
|
|
||||||
func newImage(image *image.Image, virtualSize int64) *types.ImageSummary {
|
func newImage(image *image.Image, virtualSize int64) *types.ImageSummary {
|
||||||
newImage := new(types.ImageSummary)
|
newImage := new(types.ImageSummary)
|
||||||
newImage.ParentID = image.Parent.String()
|
newImage.ParentID = image.Parent.String()
|
||||||
|
@ -3,6 +3,7 @@ package xfer
|
|||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"errors"
|
"errors"
|
||||||
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"runtime"
|
"runtime"
|
||||||
@ -31,6 +32,10 @@ func (ml *mockLayer) TarStream() (io.ReadCloser, error) {
|
|||||||
return ioutil.NopCloser(bytes.NewBuffer(ml.layerData.Bytes())), nil
|
return ioutil.NopCloser(bytes.NewBuffer(ml.layerData.Bytes())), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (ml *mockLayer) TarStreamFrom(layer.ChainID) (io.ReadCloser, error) {
|
||||||
|
return nil, fmt.Errorf("not implemented")
|
||||||
|
}
|
||||||
|
|
||||||
func (ml *mockLayer) ChainID() layer.ChainID {
|
func (ml *mockLayer) ChainID() layer.ChainID {
|
||||||
return ml.chainID
|
return ml.chainID
|
||||||
}
|
}
|
||||||
|
@ -1800,6 +1800,7 @@ or being killed.
|
|||||||
variable expansion in other Dockerfile instructions. This is not meant for
|
variable expansion in other Dockerfile instructions. This is not meant for
|
||||||
passing secret values. [Read more about the buildargs instruction](../../reference/builder.md#arg)
|
passing secret values. [Read more about the buildargs instruction](../../reference/builder.md#arg)
|
||||||
- **shmsize** - Size of `/dev/shm` in bytes. The size must be greater than 0. If omitted the system uses 64MB.
|
- **shmsize** - Size of `/dev/shm` in bytes. The size must be greater than 0. If omitted the system uses 64MB.
|
||||||
|
- **squash** - squash the resulting images layers into a single layer (boolean) **Experimental Only**
|
||||||
- **labels** – JSON map of string pairs for labels to set on the image.
|
- **labels** – JSON map of string pairs for labels to set on the image.
|
||||||
- **networkmode** - Sets the networking mode for the run commands during
|
- **networkmode** - Sets the networking mode for the run commands during
|
||||||
build. Supported standard values are: `bridge`, `host`, `none`, and
|
build. Supported standard values are: `bridge`, `host`, `none`, and
|
||||||
|
@ -54,6 +54,7 @@ Options:
|
|||||||
The format is `<number><unit>`. `number` must be greater than `0`.
|
The format is `<number><unit>`. `number` must be greater than `0`.
|
||||||
Unit is optional and can be `b` (bytes), `k` (kilobytes), `m` (megabytes),
|
Unit is optional and can be `b` (bytes), `k` (kilobytes), `m` (megabytes),
|
||||||
or `g` (gigabytes). If you omit the unit, the system uses bytes.
|
or `g` (gigabytes). If you omit the unit, the system uses bytes.
|
||||||
|
--squash Squash newly built layers into a single new layer (**Experimental Only**)
|
||||||
-t, --tag value Name and optionally a tag in the 'name:tag' format (default [])
|
-t, --tag value Name and optionally a tag in the 'name:tag' format (default [])
|
||||||
--ulimit value Ulimit options (default [])
|
--ulimit value Ulimit options (default [])
|
||||||
```
|
```
|
||||||
@ -432,3 +433,20 @@ Linux namespaces. On Microsoft Windows, you can specify these values:
|
|||||||
| `hyperv` | Hyper-V hypervisor partition-based isolation. |
|
| `hyperv` | Hyper-V hypervisor partition-based isolation. |
|
||||||
|
|
||||||
Specifying the `--isolation` flag without a value is the same as setting `--isolation="default"`.
|
Specifying the `--isolation` flag without a value is the same as setting `--isolation="default"`.
|
||||||
|
|
||||||
|
|
||||||
|
### Squash an image's layers (--squash) **Experimental Only**
|
||||||
|
|
||||||
|
Once the image is built, squash the new layers into a new image with a single
|
||||||
|
new layer. Squashing does not destroy any existing image, rather it creates a new
|
||||||
|
image with the content of the squshed layers. This effectively makes it look
|
||||||
|
like all `Dockerfile` commands were created with a single layer. The build
|
||||||
|
cache is preserved with this method.
|
||||||
|
|
||||||
|
**Note**: using this option means the new image will not be able to take
|
||||||
|
advantage of layer sharing with other images and may use significantly more
|
||||||
|
space.
|
||||||
|
|
||||||
|
**Note**: using this option you may see significantly more space used due to
|
||||||
|
storing two copies of the image, one for the build cache with all the cache
|
||||||
|
layers in tact, and one for the squashed version.
|
||||||
|
@ -7195,3 +7195,44 @@ RUN ["cat", "/foo/file"]
|
|||||||
c.Fatal(err)
|
c.Fatal(err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *DockerSuite) TestBuildSquashParent(c *check.C) {
|
||||||
|
testRequires(c, ExperimentalDaemon)
|
||||||
|
dockerFile := `
|
||||||
|
FROM busybox
|
||||||
|
RUN echo hello > /hello
|
||||||
|
RUN echo world >> /hello
|
||||||
|
RUN echo hello > /remove_me
|
||||||
|
ENV HELLO world
|
||||||
|
RUN rm /remove_me
|
||||||
|
`
|
||||||
|
// build and get the ID that we can use later for history comparison
|
||||||
|
origID, err := buildImage("test", dockerFile, false)
|
||||||
|
c.Assert(err, checker.IsNil)
|
||||||
|
|
||||||
|
// build with squash
|
||||||
|
id, err := buildImage("test", dockerFile, true, "--squash")
|
||||||
|
c.Assert(err, checker.IsNil)
|
||||||
|
|
||||||
|
out, _ := dockerCmd(c, "run", "--rm", id, "/bin/sh", "-c", "cat /hello")
|
||||||
|
c.Assert(strings.TrimSpace(out), checker.Equals, "hello\nworld")
|
||||||
|
|
||||||
|
dockerCmd(c, "run", "--rm", id, "/bin/sh", "-c", "[ ! -f /remove_me ]")
|
||||||
|
dockerCmd(c, "run", "--rm", id, "/bin/sh", "-c", `[ "$(echo $HELLO)" == "world" ]`)
|
||||||
|
|
||||||
|
// make sure the ID produced is the ID of the tag we specified
|
||||||
|
inspectID, err := inspectImage("test", ".ID")
|
||||||
|
c.Assert(err, checker.IsNil)
|
||||||
|
c.Assert(inspectID, checker.Equals, id)
|
||||||
|
|
||||||
|
origHistory, _ := dockerCmd(c, "history", origID)
|
||||||
|
testHistory, _ := dockerCmd(c, "history", "test")
|
||||||
|
|
||||||
|
splitOrigHistory := strings.Split(strings.TrimSpace(origHistory), "\n")
|
||||||
|
splitTestHistory := strings.Split(strings.TrimSpace(testHistory), "\n")
|
||||||
|
c.Assert(len(splitTestHistory), checker.Equals, len(splitOrigHistory)+1)
|
||||||
|
|
||||||
|
out, err = inspectImage(id, "len .RootFS.Layers")
|
||||||
|
c.Assert(err, checker.IsNil)
|
||||||
|
c.Assert(strings.TrimSpace(out), checker.Equals, "3")
|
||||||
|
}
|
||||||
|
@ -3,6 +3,7 @@ package layer
|
|||||||
import (
|
import (
|
||||||
"archive/tar"
|
"archive/tar"
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
)
|
)
|
||||||
@ -23,6 +24,10 @@ func (el *emptyLayer) TarStream() (io.ReadCloser, error) {
|
|||||||
return ioutil.NopCloser(buf), nil
|
return ioutil.NopCloser(buf), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (el *emptyLayer) TarStreamFrom(ChainID) (io.ReadCloser, error) {
|
||||||
|
return nil, fmt.Errorf("can't get parent tar stream of an empty layer")
|
||||||
|
}
|
||||||
|
|
||||||
func (el *emptyLayer) ChainID() ChainID {
|
func (el *emptyLayer) ChainID() ChainID {
|
||||||
return ChainID(DigestSHA256EmptyTar)
|
return ChainID(DigestSHA256EmptyTar)
|
||||||
}
|
}
|
||||||
|
@ -78,6 +78,9 @@ type TarStreamer interface {
|
|||||||
// TarStream returns a tar archive stream
|
// TarStream returns a tar archive stream
|
||||||
// for the contents of a layer.
|
// for the contents of a layer.
|
||||||
TarStream() (io.ReadCloser, error)
|
TarStream() (io.ReadCloser, error)
|
||||||
|
// TarStreamFrom returns a tar archive stream for all the layer chain with
|
||||||
|
// arbitrary depth.
|
||||||
|
TarStreamFrom(ChainID) (io.ReadCloser, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Layer represents a read-only layer
|
// Layer represents a read-only layer
|
||||||
|
@ -1,6 +1,7 @@
|
|||||||
package layer
|
package layer
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
|
|
||||||
"github.com/docker/docker/pkg/archive"
|
"github.com/docker/docker/pkg/archive"
|
||||||
@ -28,11 +29,14 @@ func (ml *mountedLayer) cacheParent() string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (ml *mountedLayer) TarStream() (io.ReadCloser, error) {
|
func (ml *mountedLayer) TarStream() (io.ReadCloser, error) {
|
||||||
archiver, err := ml.layerStore.driver.Diff(ml.mountID, ml.cacheParent())
|
return ml.layerStore.driver.Diff(ml.mountID, ml.cacheParent())
|
||||||
if err != nil {
|
}
|
||||||
return nil, err
|
|
||||||
}
|
func (ml *mountedLayer) TarStreamFrom(parent ChainID) (io.ReadCloser, error) {
|
||||||
return archiver, nil
|
// Not supported since this will include the init layer as well
|
||||||
|
// This can already be acheived with mount + tar.
|
||||||
|
// Should probably never reach this point, but error out here.
|
||||||
|
return nil, fmt.Errorf("getting a layer diff from an arbitrary parent is not supported on mounted layer")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ml *mountedLayer) Name() string {
|
func (ml *mountedLayer) Name() string {
|
||||||
|
@ -21,6 +21,8 @@ type roLayer struct {
|
|||||||
references map[Layer]struct{}
|
references map[Layer]struct{}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TarStream for roLayer guarentees that the data that is produced is the exact
|
||||||
|
// data that the layer was registered with.
|
||||||
func (rl *roLayer) TarStream() (io.ReadCloser, error) {
|
func (rl *roLayer) TarStream() (io.ReadCloser, error) {
|
||||||
r, err := rl.layerStore.store.TarSplitReader(rl.chainID)
|
r, err := rl.layerStore.store.TarSplitReader(rl.chainID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -43,6 +45,24 @@ func (rl *roLayer) TarStream() (io.ReadCloser, error) {
|
|||||||
return rc, nil
|
return rc, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TarStreamFrom does not make any guarentees to the correctness of the produced
|
||||||
|
// data. As such it should not be used when the layer content must be verified
|
||||||
|
// to be an exact match to the registered layer.
|
||||||
|
func (rl *roLayer) TarStreamFrom(parent ChainID) (io.ReadCloser, error) {
|
||||||
|
var parentCacheID string
|
||||||
|
for pl := rl.parent; pl != nil; pl = pl.parent {
|
||||||
|
if pl.chainID == parent {
|
||||||
|
parentCacheID = pl.cacheID
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if parent != ChainID("") && parentCacheID == "" {
|
||||||
|
return nil, fmt.Errorf("layer ID '%s' is not a parent of the specified layer: cannot provide diff to non-parent", parent)
|
||||||
|
}
|
||||||
|
return rl.layerStore.driver.Diff(rl.cacheID, parentCacheID)
|
||||||
|
}
|
||||||
|
|
||||||
func (rl *roLayer) ChainID() ChainID {
|
func (rl *roLayer) ChainID() ChainID {
|
||||||
return rl.chainID
|
return rl.chainID
|
||||||
}
|
}
|
||||||
|
@ -11,6 +11,7 @@ docker-build - Build a new image from the source code at PATH
|
|||||||
[**--cgroup-parent**[=*CGROUP-PARENT*]]
|
[**--cgroup-parent**[=*CGROUP-PARENT*]]
|
||||||
[**--help**]
|
[**--help**]
|
||||||
[**-f**|**--file**[=*PATH/Dockerfile*]]
|
[**-f**|**--file**[=*PATH/Dockerfile*]]
|
||||||
|
[**-squash**] *Experimental*
|
||||||
[**--force-rm**]
|
[**--force-rm**]
|
||||||
[**--isolation**[=*default*]]
|
[**--isolation**[=*default*]]
|
||||||
[**--label**[=*[]*]]
|
[**--label**[=*[]*]]
|
||||||
@ -57,6 +58,22 @@ set as the **URL**, the repository is cloned locally and then sent as the contex
|
|||||||
the remote context. In all cases, the file must be within the build context.
|
the remote context. In all cases, the file must be within the build context.
|
||||||
The default is *Dockerfile*.
|
The default is *Dockerfile*.
|
||||||
|
|
||||||
|
**--squash**=*true*|*false*
|
||||||
|
**Experimental Only**
|
||||||
|
Once the image is built, squash the new layers into a new image with a single
|
||||||
|
new layer. Squashing does not destroy any existing image, rather it creates a new
|
||||||
|
image with the content of the squshed layers. This effectively makes it look
|
||||||
|
like all `Dockerfile` commands were created with a single layer. The build
|
||||||
|
cache is preserved with this method.
|
||||||
|
|
||||||
|
**Note**: using this option means the new image will not be able to take
|
||||||
|
advantage of layer sharing with other images and may use significantly more
|
||||||
|
space.
|
||||||
|
|
||||||
|
**Note**: using this option you may see significantly more space used due to
|
||||||
|
storing two copies of the image, one for the build cache with all the cache
|
||||||
|
layers in tact, and one for the squashed version.
|
||||||
|
|
||||||
**--build-arg**=*variable*
|
**--build-arg**=*variable*
|
||||||
name and value of a **buildarg**.
|
name and value of a **buildarg**.
|
||||||
|
|
||||||
|
@ -406,6 +406,9 @@ type mockLayer struct {
|
|||||||
func (l *mockLayer) TarStream() (io.ReadCloser, error) {
|
func (l *mockLayer) TarStream() (io.ReadCloser, error) {
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
func (l *mockLayer) TarStreamFrom(layer.ChainID) (io.ReadCloser, error) {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
func (l *mockLayer) ChainID() layer.ChainID {
|
func (l *mockLayer) ChainID() layer.ChainID {
|
||||||
return layer.CreateChainID(l.diffIDs)
|
return layer.CreateChainID(l.diffIDs)
|
||||||
|
Reference in New Issue
Block a user