mirror of
https://github.com/docker/cli.git
synced 2026-01-26 15:41:42 +03:00
vendor: github.com/moby/moby/api master, moby/client master
Signed-off-by: Austin Vazquez <austin.vazquez@docker.com>
This commit is contained in:
@@ -3,33 +3,18 @@ package container
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"reflect"
|
||||
"net/http"
|
||||
"strings"
|
||||
"unsafe"
|
||||
|
||||
"github.com/moby/moby/client"
|
||||
)
|
||||
|
||||
func mockContainerExportResult(content string) client.ContainerExportResult {
|
||||
out := client.ContainerExportResult{}
|
||||
|
||||
// Set unexported field "rc"
|
||||
v := reflect.ValueOf(&out).Elem()
|
||||
f := v.FieldByName("rc")
|
||||
r := io.NopCloser(strings.NewReader(content))
|
||||
reflect.NewAt(f.Type(), unsafe.Pointer(f.UnsafeAddr())).Elem().Set(reflect.ValueOf(r))
|
||||
return out
|
||||
return io.NopCloser(strings.NewReader(content))
|
||||
}
|
||||
|
||||
func mockContainerLogsResult(content string) client.ContainerLogsResult {
|
||||
out := client.ContainerLogsResult{}
|
||||
|
||||
// Set unexported field "rc"
|
||||
v := reflect.ValueOf(&out).Elem()
|
||||
f := v.FieldByName("rc")
|
||||
r := io.NopCloser(strings.NewReader(content))
|
||||
reflect.NewAt(f.Type(), unsafe.Pointer(f.UnsafeAddr())).Elem().Set(reflect.ValueOf(r))
|
||||
return out
|
||||
return io.NopCloser(strings.NewReader(content))
|
||||
}
|
||||
|
||||
type fakeStreamResult struct {
|
||||
@@ -147,7 +132,7 @@ func (f *fakeClient) ContainerLogs(_ context.Context, containerID string, option
|
||||
if f.logFunc != nil {
|
||||
return f.logFunc(containerID, options)
|
||||
}
|
||||
return client.ContainerLogsResult{}, nil
|
||||
return http.NoBody, nil
|
||||
}
|
||||
|
||||
func (f *fakeClient) ClientVersion() string {
|
||||
@@ -172,7 +157,7 @@ func (f *fakeClient) ContainerExport(_ context.Context, containerID string, _ cl
|
||||
if f.containerExportFunc != nil {
|
||||
return f.containerExportFunc(containerID)
|
||||
}
|
||||
return client.ContainerExportResult{}, nil
|
||||
return http.NoBody, nil
|
||||
}
|
||||
|
||||
func (f *fakeClient) ExecResize(_ context.Context, id string, options client.ExecResizeOptions) (client.ExecResizeResult, error) {
|
||||
@@ -189,7 +174,7 @@ func (f *fakeClient) ContainerKill(ctx context.Context, containerID string, opti
|
||||
return client.ContainerKillResult{}, nil
|
||||
}
|
||||
|
||||
func (f *fakeClient) ContainersPrune(ctx context.Context, options client.ContainerPruneOptions) (client.ContainerPruneResult, error) {
|
||||
func (f *fakeClient) ContainerPrune(ctx context.Context, options client.ContainerPruneOptions) (client.ContainerPruneResult, error) {
|
||||
if f.containerPruneFunc != nil {
|
||||
return f.containerPruneFunc(ctx, options)
|
||||
}
|
||||
|
||||
@@ -101,7 +101,7 @@ func RunExec(ctx context.Context, dockerCLI command.Cli, containerIDorName strin
|
||||
return err
|
||||
}
|
||||
if !options.Detach {
|
||||
if err := dockerCLI.In().CheckTty(execOptions.AttachStdin, execOptions.Tty); err != nil {
|
||||
if err := dockerCLI.In().CheckTty(execOptions.AttachStdin, execOptions.TTY); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@@ -119,17 +119,10 @@ func RunExec(ctx context.Context, dockerCLI command.Cli, containerIDorName strin
|
||||
}
|
||||
|
||||
if options.Detach {
|
||||
var cs client.ConsoleSize
|
||||
if execOptions.ConsoleSize != nil {
|
||||
cs = client.ConsoleSize{
|
||||
Height: execOptions.ConsoleSize[0],
|
||||
Width: execOptions.ConsoleSize[1],
|
||||
}
|
||||
}
|
||||
_, err := apiClient.ExecStart(ctx, execID, client.ExecStartOptions{
|
||||
Detach: options.Detach,
|
||||
TTY: execOptions.Tty,
|
||||
ConsoleSize: cs,
|
||||
TTY: execOptions.TTY,
|
||||
ConsoleSize: client.ConsoleSize{Height: execOptions.ConsoleSize.Height, Width: execOptions.ConsoleSize.Width},
|
||||
})
|
||||
return err
|
||||
}
|
||||
@@ -137,9 +130,9 @@ func RunExec(ctx context.Context, dockerCLI command.Cli, containerIDorName strin
|
||||
}
|
||||
|
||||
func fillConsoleSize(execOptions *client.ExecCreateOptions, dockerCli command.Cli) {
|
||||
if execOptions.Tty {
|
||||
if execOptions.TTY {
|
||||
height, width := dockerCli.Out().GetTtySize()
|
||||
execOptions.ConsoleSize = &[2]uint{height, width}
|
||||
execOptions.ConsoleSize = client.ConsoleSize{Height: height, Width: width}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -157,7 +150,7 @@ func interactiveExec(ctx context.Context, dockerCli command.Cli, execOptions *cl
|
||||
out = dockerCli.Out()
|
||||
}
|
||||
if execOptions.AttachStderr {
|
||||
if execOptions.Tty {
|
||||
if execOptions.TTY {
|
||||
stderr = dockerCli.Out()
|
||||
} else {
|
||||
stderr = dockerCli.Err()
|
||||
@@ -166,16 +159,9 @@ func interactiveExec(ctx context.Context, dockerCli command.Cli, execOptions *cl
|
||||
fillConsoleSize(execOptions, dockerCli)
|
||||
|
||||
apiClient := dockerCli.Client()
|
||||
var cs client.ConsoleSize
|
||||
if execOptions.ConsoleSize != nil {
|
||||
cs = client.ConsoleSize{
|
||||
Height: execOptions.ConsoleSize[0],
|
||||
Width: execOptions.ConsoleSize[1],
|
||||
}
|
||||
}
|
||||
resp, err := apiClient.ExecAttach(ctx, execID, client.ExecAttachOptions{
|
||||
TTY: execOptions.Tty,
|
||||
ConsoleSize: cs,
|
||||
TTY: execOptions.TTY,
|
||||
ConsoleSize: client.ConsoleSize{Height: execOptions.ConsoleSize.Height, Width: execOptions.ConsoleSize.Width},
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -193,7 +179,7 @@ func interactiveExec(ctx context.Context, dockerCli command.Cli, execOptions *cl
|
||||
outputStream: out,
|
||||
errorStream: stderr,
|
||||
resp: resp.HijackedResponse,
|
||||
tty: execOptions.Tty,
|
||||
tty: execOptions.TTY,
|
||||
detachKeys: execOptions.DetachKeys,
|
||||
}
|
||||
|
||||
@@ -201,7 +187,7 @@ func interactiveExec(ctx context.Context, dockerCli command.Cli, execOptions *cl
|
||||
}()
|
||||
}()
|
||||
|
||||
if execOptions.Tty && dockerCli.In().IsTerminal() {
|
||||
if execOptions.TTY && dockerCli.In().IsTerminal() {
|
||||
if err := MonitorTtySize(ctx, dockerCli, execID, true); err != nil {
|
||||
_, _ = fmt.Fprintln(dockerCli.Err(), "Error monitoring TTY size:", err)
|
||||
}
|
||||
@@ -237,7 +223,7 @@ func parseExec(execOpts ExecOptions, configFile *configfile.ConfigFile) (*client
|
||||
execOptions := &client.ExecCreateOptions{
|
||||
User: execOpts.User,
|
||||
Privileged: execOpts.Privileged,
|
||||
Tty: execOpts.TTY,
|
||||
TTY: execOpts.TTY,
|
||||
Cmd: execOpts.Command,
|
||||
WorkingDir: execOpts.Workdir,
|
||||
}
|
||||
|
||||
@@ -69,7 +69,7 @@ TWO=2
|
||||
AttachStdin: true,
|
||||
AttachStdout: true,
|
||||
AttachStderr: true,
|
||||
Tty: true,
|
||||
TTY: true,
|
||||
Cmd: []string{"command"},
|
||||
},
|
||||
},
|
||||
@@ -86,7 +86,7 @@ TWO=2
|
||||
Detach: true,
|
||||
}),
|
||||
expected: client.ExecCreateOptions{
|
||||
Tty: true,
|
||||
TTY: true,
|
||||
Cmd: []string{"command"},
|
||||
},
|
||||
},
|
||||
|
||||
@@ -75,7 +75,7 @@ func runPrune(ctx context.Context, dockerCli command.Cli, options pruneOptions)
|
||||
}
|
||||
}
|
||||
|
||||
res, err := dockerCli.Client().ContainersPrune(ctx, client.ContainerPruneOptions{
|
||||
res, err := dockerCli.Client().ContainerPrune(ctx, client.ContainerPruneOptions{
|
||||
Filters: pruneFilters,
|
||||
})
|
||||
if err != nil {
|
||||
|
||||
@@ -51,7 +51,7 @@ shared: {{.Shared}}
|
||||
return Format(source)
|
||||
}
|
||||
|
||||
func buildCacheSort(buildCache []*build.CacheRecord) {
|
||||
func buildCacheSort(buildCache []build.CacheRecord) {
|
||||
sort.Slice(buildCache, func(i, j int) bool {
|
||||
lui, luj := buildCache[i].LastUsedAt, buildCache[j].LastUsedAt
|
||||
switch {
|
||||
@@ -70,7 +70,7 @@ func buildCacheSort(buildCache []*build.CacheRecord) {
|
||||
}
|
||||
|
||||
// BuildCacheWrite renders the context for a list of containers
|
||||
func BuildCacheWrite(ctx Context, buildCaches []*build.CacheRecord) error {
|
||||
func BuildCacheWrite(ctx Context, buildCaches []build.CacheRecord) error {
|
||||
render := func(format func(subContext SubContext) error) error {
|
||||
buildCacheSort(buildCaches)
|
||||
for _, bc := range buildCaches {
|
||||
@@ -87,7 +87,7 @@ func BuildCacheWrite(ctx Context, buildCaches []*build.CacheRecord) error {
|
||||
type buildCacheContext struct {
|
||||
HeaderContext
|
||||
trunc bool
|
||||
v *build.CacheRecord
|
||||
v build.CacheRecord
|
||||
}
|
||||
|
||||
func newBuildCacheContext() *buildCacheContext {
|
||||
|
||||
@@ -12,6 +12,7 @@ import (
|
||||
"github.com/moby/moby/api/types/container"
|
||||
"github.com/moby/moby/api/types/image"
|
||||
"github.com/moby/moby/api/types/volume"
|
||||
"github.com/moby/moby/client"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -33,13 +34,12 @@ const (
|
||||
// DiskUsageContext contains disk usage specific information required by the formatter, encapsulate a Context struct.
|
||||
type DiskUsageContext struct {
|
||||
Context
|
||||
Verbose bool
|
||||
LayersSize int64
|
||||
Images []*image.Summary
|
||||
Containers []*container.Summary
|
||||
Volumes []*volume.Volume
|
||||
BuildCache []*build.CacheRecord
|
||||
BuilderSize int64
|
||||
Verbose bool
|
||||
|
||||
ImageDiskUsage client.ImagesDiskUsage
|
||||
BuildCacheDiskUsage client.BuildCacheDiskUsage
|
||||
ContainerDiskUsage client.ContainersDiskUsage
|
||||
VolumeDiskUsage client.VolumesDiskUsage
|
||||
}
|
||||
|
||||
func (ctx *DiskUsageContext) startSubsection(format string) (*template.Template, error) {
|
||||
@@ -96,35 +96,49 @@ func (ctx *DiskUsageContext) Write() (err error) {
|
||||
}
|
||||
|
||||
err = ctx.contextFormat(tmpl, &diskUsageImagesContext{
|
||||
totalSize: ctx.LayersSize,
|
||||
images: ctx.Images,
|
||||
totalCount: ctx.ImageDiskUsage.TotalImages,
|
||||
activeCount: ctx.ImageDiskUsage.ActiveImages,
|
||||
totalSize: ctx.ImageDiskUsage.TotalSize,
|
||||
reclaimable: ctx.ImageDiskUsage.Reclaimable,
|
||||
images: ctx.ImageDiskUsage.Items,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = ctx.contextFormat(tmpl, &diskUsageContainersContext{
|
||||
containers: ctx.Containers,
|
||||
totalCount: ctx.ContainerDiskUsage.TotalContainers,
|
||||
activeCount: ctx.ContainerDiskUsage.ActiveContainers,
|
||||
totalSize: ctx.ContainerDiskUsage.TotalSize,
|
||||
reclaimable: ctx.ContainerDiskUsage.Reclaimable,
|
||||
containers: ctx.ContainerDiskUsage.Items,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = ctx.contextFormat(tmpl, &diskUsageVolumesContext{
|
||||
volumes: ctx.Volumes,
|
||||
totalCount: ctx.VolumeDiskUsage.TotalVolumes,
|
||||
activeCount: ctx.VolumeDiskUsage.ActiveVolumes,
|
||||
totalSize: ctx.VolumeDiskUsage.TotalSize,
|
||||
reclaimable: ctx.VolumeDiskUsage.Reclaimable,
|
||||
volumes: ctx.VolumeDiskUsage.Items,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = ctx.contextFormat(tmpl, &diskUsageBuilderContext{
|
||||
builderSize: ctx.BuilderSize,
|
||||
buildCache: ctx.BuildCache,
|
||||
totalCount: ctx.BuildCacheDiskUsage.TotalBuildCacheRecords,
|
||||
activeCount: ctx.BuildCacheDiskUsage.ActiveBuildCacheRecords,
|
||||
builderSize: ctx.BuildCacheDiskUsage.TotalSize,
|
||||
reclaimable: ctx.BuildCacheDiskUsage.Reclaimable,
|
||||
buildCache: ctx.BuildCacheDiskUsage.Items,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
diskUsageContainersCtx := diskUsageContainersContext{containers: []*container.Summary{}}
|
||||
diskUsageContainersCtx := diskUsageContainersContext{containers: []container.Summary{}}
|
||||
diskUsageContainersCtx.Header = SubHeaderContext{
|
||||
"Type": typeHeader,
|
||||
"TotalCount": totalHeader,
|
||||
@@ -146,18 +160,18 @@ type diskUsageContext struct {
|
||||
|
||||
func (ctx *DiskUsageContext) verboseWrite() error {
|
||||
duc := &diskUsageContext{
|
||||
Images: make([]*imageContext, 0, len(ctx.Images)),
|
||||
Containers: make([]*ContainerContext, 0, len(ctx.Containers)),
|
||||
Volumes: make([]*volumeContext, 0, len(ctx.Volumes)),
|
||||
BuildCache: make([]*buildCacheContext, 0, len(ctx.BuildCache)),
|
||||
Images: make([]*imageContext, 0, len(ctx.ImageDiskUsage.Items)),
|
||||
Containers: make([]*ContainerContext, 0, len(ctx.ContainerDiskUsage.Items)),
|
||||
Volumes: make([]*volumeContext, 0, len(ctx.VolumeDiskUsage.Items)),
|
||||
BuildCache: make([]*buildCacheContext, 0, len(ctx.BuildCacheDiskUsage.Items)),
|
||||
}
|
||||
trunc := ctx.Format.IsTable()
|
||||
|
||||
// First images
|
||||
for _, i := range ctx.Images {
|
||||
for _, i := range ctx.ImageDiskUsage.Items {
|
||||
repo := "<none>"
|
||||
tag := "<none>"
|
||||
if len(i.RepoTags) > 0 && !isDangling(*i) {
|
||||
if len(i.RepoTags) > 0 && !isDangling(i) {
|
||||
// Only show the first tag
|
||||
ref, err := reference.ParseNormalizedNamed(i.RepoTags[0])
|
||||
if err != nil {
|
||||
@@ -173,25 +187,25 @@ func (ctx *DiskUsageContext) verboseWrite() error {
|
||||
repo: repo,
|
||||
tag: tag,
|
||||
trunc: trunc,
|
||||
i: *i,
|
||||
i: i,
|
||||
})
|
||||
}
|
||||
|
||||
// Now containers
|
||||
for _, c := range ctx.Containers {
|
||||
for _, c := range ctx.ContainerDiskUsage.Items {
|
||||
// Don't display the virtual size
|
||||
c.SizeRootFs = 0
|
||||
duc.Containers = append(duc.Containers, &ContainerContext{trunc: trunc, c: *c})
|
||||
duc.Containers = append(duc.Containers, &ContainerContext{trunc: trunc, c: c})
|
||||
}
|
||||
|
||||
// And volumes
|
||||
for _, v := range ctx.Volumes {
|
||||
duc.Volumes = append(duc.Volumes, &volumeContext{v: *v})
|
||||
for _, v := range ctx.VolumeDiskUsage.Items {
|
||||
duc.Volumes = append(duc.Volumes, &volumeContext{v: v})
|
||||
}
|
||||
|
||||
// And build cache
|
||||
buildCacheSort(ctx.BuildCache)
|
||||
for _, v := range ctx.BuildCache {
|
||||
buildCacheSort(ctx.BuildCacheDiskUsage.Items)
|
||||
for _, v := range ctx.BuildCacheDiskUsage.Items {
|
||||
duc.BuildCache = append(duc.BuildCache, &buildCacheContext{v: v, trunc: trunc})
|
||||
}
|
||||
|
||||
@@ -248,7 +262,7 @@ func (ctx *DiskUsageContext) verboseWriteTable(duc *diskUsageContext) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, _ = fmt.Fprintf(ctx.Output, "\nBuild cache usage: %s\n\n", units.HumanSize(float64(ctx.BuilderSize)))
|
||||
_, _ = fmt.Fprintf(ctx.Output, "\nBuild cache usage: %s\n\n", units.HumanSize(float64(ctx.BuildCacheDiskUsage.TotalSize)))
|
||||
for _, v := range duc.BuildCache {
|
||||
if err := ctx.contextFormat(tmpl, v); err != nil {
|
||||
return err
|
||||
@@ -261,8 +275,11 @@ func (ctx *DiskUsageContext) verboseWriteTable(duc *diskUsageContext) error {
|
||||
|
||||
type diskUsageImagesContext struct {
|
||||
HeaderContext
|
||||
totalSize int64
|
||||
images []*image.Summary
|
||||
totalSize int64
|
||||
reclaimable int64
|
||||
totalCount int64
|
||||
activeCount int64
|
||||
images []image.Summary
|
||||
}
|
||||
|
||||
func (c *diskUsageImagesContext) MarshalJSON() ([]byte, error) {
|
||||
@@ -274,18 +291,11 @@ func (*diskUsageImagesContext) Type() string {
|
||||
}
|
||||
|
||||
func (c *diskUsageImagesContext) TotalCount() string {
|
||||
return strconv.Itoa(len(c.images))
|
||||
return strconv.FormatInt(c.totalCount, 10)
|
||||
}
|
||||
|
||||
func (c *diskUsageImagesContext) Active() string {
|
||||
used := 0
|
||||
for _, i := range c.images {
|
||||
if i.Containers > 0 {
|
||||
used++
|
||||
}
|
||||
}
|
||||
|
||||
return strconv.Itoa(used)
|
||||
return strconv.FormatInt(c.activeCount, 10)
|
||||
}
|
||||
|
||||
func (c *diskUsageImagesContext) Size() string {
|
||||
@@ -293,27 +303,19 @@ func (c *diskUsageImagesContext) Size() string {
|
||||
}
|
||||
|
||||
func (c *diskUsageImagesContext) Reclaimable() string {
|
||||
var used int64
|
||||
|
||||
for _, i := range c.images {
|
||||
if i.Containers != 0 {
|
||||
if i.Size == -1 || i.SharedSize == -1 {
|
||||
continue
|
||||
}
|
||||
used += i.Size - i.SharedSize
|
||||
}
|
||||
}
|
||||
|
||||
reclaimable := c.totalSize - used
|
||||
if c.totalSize > 0 {
|
||||
return fmt.Sprintf("%s (%v%%)", units.HumanSize(float64(reclaimable)), (reclaimable*100)/c.totalSize)
|
||||
return fmt.Sprintf("%s (%v%%)", units.HumanSize(float64(c.reclaimable)), (c.reclaimable*100)/c.totalSize)
|
||||
}
|
||||
return units.HumanSize(float64(reclaimable))
|
||||
return units.HumanSize(float64(c.reclaimable))
|
||||
}
|
||||
|
||||
type diskUsageContainersContext struct {
|
||||
HeaderContext
|
||||
containers []*container.Summary
|
||||
totalCount int64
|
||||
activeCount int64
|
||||
totalSize int64
|
||||
reclaimable int64
|
||||
containers []container.Summary
|
||||
}
|
||||
|
||||
func (c *diskUsageContainersContext) MarshalJSON() ([]byte, error) {
|
||||
@@ -325,62 +327,32 @@ func (*diskUsageContainersContext) Type() string {
|
||||
}
|
||||
|
||||
func (c *diskUsageContainersContext) TotalCount() string {
|
||||
return strconv.Itoa(len(c.containers))
|
||||
}
|
||||
|
||||
func (*diskUsageContainersContext) isActive(ctr container.Summary) bool {
|
||||
switch ctr.State {
|
||||
case container.StateRunning, container.StatePaused, container.StateRestarting:
|
||||
return true
|
||||
case container.StateCreated, container.StateRemoving, container.StateExited, container.StateDead:
|
||||
return false
|
||||
default:
|
||||
// Unknown state (should never happen).
|
||||
return false
|
||||
}
|
||||
return strconv.FormatInt(c.totalCount, 10)
|
||||
}
|
||||
|
||||
func (c *diskUsageContainersContext) Active() string {
|
||||
used := 0
|
||||
for _, ctr := range c.containers {
|
||||
if c.isActive(*ctr) {
|
||||
used++
|
||||
}
|
||||
}
|
||||
|
||||
return strconv.Itoa(used)
|
||||
return strconv.FormatInt(c.activeCount, 10)
|
||||
}
|
||||
|
||||
func (c *diskUsageContainersContext) Size() string {
|
||||
var size int64
|
||||
|
||||
for _, ctr := range c.containers {
|
||||
size += ctr.SizeRw
|
||||
}
|
||||
|
||||
return units.HumanSize(float64(size))
|
||||
return units.HumanSize(float64(c.totalSize))
|
||||
}
|
||||
|
||||
func (c *diskUsageContainersContext) Reclaimable() string {
|
||||
var reclaimable, totalSize int64
|
||||
|
||||
for _, ctr := range c.containers {
|
||||
if !c.isActive(*ctr) {
|
||||
reclaimable += ctr.SizeRw
|
||||
}
|
||||
totalSize += ctr.SizeRw
|
||||
if c.totalSize > 0 {
|
||||
return fmt.Sprintf("%s (%v%%)", units.HumanSize(float64(c.reclaimable)), (c.reclaimable*100)/c.totalSize)
|
||||
}
|
||||
|
||||
if totalSize > 0 {
|
||||
return fmt.Sprintf("%s (%v%%)", units.HumanSize(float64(reclaimable)), (reclaimable*100)/totalSize)
|
||||
}
|
||||
|
||||
return units.HumanSize(float64(reclaimable))
|
||||
return units.HumanSize(float64(c.reclaimable))
|
||||
}
|
||||
|
||||
type diskUsageVolumesContext struct {
|
||||
HeaderContext
|
||||
volumes []*volume.Volume
|
||||
totalCount int64
|
||||
activeCount int64
|
||||
totalSize int64
|
||||
reclaimable int64
|
||||
volumes []volume.Volume
|
||||
}
|
||||
|
||||
func (c *diskUsageVolumesContext) MarshalJSON() ([]byte, error) {
|
||||
@@ -392,56 +364,32 @@ func (*diskUsageVolumesContext) Type() string {
|
||||
}
|
||||
|
||||
func (c *diskUsageVolumesContext) TotalCount() string {
|
||||
return strconv.Itoa(len(c.volumes))
|
||||
return strconv.FormatInt(c.totalCount, 10)
|
||||
}
|
||||
|
||||
func (c *diskUsageVolumesContext) Active() string {
|
||||
used := 0
|
||||
for _, v := range c.volumes {
|
||||
if v.UsageData.RefCount > 0 {
|
||||
used++
|
||||
}
|
||||
}
|
||||
|
||||
return strconv.Itoa(used)
|
||||
return strconv.FormatInt(c.activeCount, 10)
|
||||
}
|
||||
|
||||
func (c *diskUsageVolumesContext) Size() string {
|
||||
var size int64
|
||||
|
||||
for _, v := range c.volumes {
|
||||
if v.UsageData.Size != -1 {
|
||||
size += v.UsageData.Size
|
||||
}
|
||||
}
|
||||
|
||||
return units.HumanSize(float64(size))
|
||||
return units.HumanSize(float64(c.totalSize))
|
||||
}
|
||||
|
||||
func (c *diskUsageVolumesContext) Reclaimable() string {
|
||||
var reclaimable int64
|
||||
var totalSize int64
|
||||
|
||||
for _, v := range c.volumes {
|
||||
if v.UsageData.Size != -1 {
|
||||
if v.UsageData.RefCount == 0 {
|
||||
reclaimable += v.UsageData.Size
|
||||
}
|
||||
totalSize += v.UsageData.Size
|
||||
}
|
||||
if c.totalSize > 0 {
|
||||
return fmt.Sprintf("%s (%v%%)", units.HumanSize(float64(c.reclaimable)), (c.reclaimable*100)/c.totalSize)
|
||||
}
|
||||
|
||||
if totalSize > 0 {
|
||||
return fmt.Sprintf("%s (%v%%)", units.HumanSize(float64(reclaimable)), (reclaimable*100)/totalSize)
|
||||
}
|
||||
|
||||
return units.HumanSize(float64(reclaimable))
|
||||
return units.HumanSize(float64(c.reclaimable))
|
||||
}
|
||||
|
||||
type diskUsageBuilderContext struct {
|
||||
HeaderContext
|
||||
totalCount int64
|
||||
activeCount int64
|
||||
builderSize int64
|
||||
buildCache []*build.CacheRecord
|
||||
reclaimable int64
|
||||
buildCache []build.CacheRecord
|
||||
}
|
||||
|
||||
func (c *diskUsageBuilderContext) MarshalJSON() ([]byte, error) {
|
||||
@@ -453,17 +401,11 @@ func (*diskUsageBuilderContext) Type() string {
|
||||
}
|
||||
|
||||
func (c *diskUsageBuilderContext) TotalCount() string {
|
||||
return strconv.Itoa(len(c.buildCache))
|
||||
return strconv.FormatInt(c.totalCount, 10)
|
||||
}
|
||||
|
||||
func (c *diskUsageBuilderContext) Active() string {
|
||||
numActive := 0
|
||||
for _, bc := range c.buildCache {
|
||||
if bc.InUse {
|
||||
numActive++
|
||||
}
|
||||
}
|
||||
return strconv.Itoa(numActive)
|
||||
return strconv.FormatInt(c.activeCount, 10)
|
||||
}
|
||||
|
||||
func (c *diskUsageBuilderContext) Size() string {
|
||||
@@ -471,12 +413,5 @@ func (c *diskUsageBuilderContext) Size() string {
|
||||
}
|
||||
|
||||
func (c *diskUsageBuilderContext) Reclaimable() string {
|
||||
var inUseBytes int64
|
||||
for _, bc := range c.buildCache {
|
||||
if bc.InUse && !bc.Shared {
|
||||
inUseBytes += bc.Size
|
||||
}
|
||||
}
|
||||
|
||||
return units.HumanSize(float64(c.builderSize - inUseBytes))
|
||||
return units.HumanSize(float64(c.reclaimable))
|
||||
}
|
||||
|
||||
@@ -19,7 +19,7 @@ type fakeClient struct {
|
||||
imagePushFunc func(ref string, options client.ImagePushOptions) (client.ImagePushResponse, error)
|
||||
infoFunc func() (client.SystemInfoResult, error)
|
||||
imagePullFunc func(ref string, options client.ImagePullOptions) (client.ImagePullResponse, error)
|
||||
imagesPruneFunc func(options client.ImagePruneOptions) (client.ImagePruneResult, error)
|
||||
imagePruneFunc func(options client.ImagePruneOptions) (client.ImagePruneResult, error)
|
||||
imageLoadFunc func(input io.Reader, options ...client.ImageLoadOption) (client.ImageLoadResult, error)
|
||||
imageListFunc func(options client.ImageListOptions) (client.ImageListResult, error)
|
||||
imageInspectFunc func(img string) (client.ImageInspectResult, error)
|
||||
@@ -47,7 +47,7 @@ func (cli *fakeClient) ImageSave(_ context.Context, images []string, options ...
|
||||
if cli.imageSaveFunc != nil {
|
||||
return cli.imageSaveFunc(images, options...)
|
||||
}
|
||||
return client.ImageSaveResult{}, nil
|
||||
return http.NoBody, nil
|
||||
}
|
||||
|
||||
func (cli *fakeClient) ImageRemove(_ context.Context, img string, options client.ImageRemoveOptions) (client.ImageRemoveResult, error) {
|
||||
@@ -80,9 +80,9 @@ func (cli *fakeClient) ImagePull(_ context.Context, ref string, options client.I
|
||||
return fakeStreamResult{ReadCloser: http.NoBody}, nil
|
||||
}
|
||||
|
||||
func (cli *fakeClient) ImagesPrune(_ context.Context, opts client.ImagePruneOptions) (client.ImagePruneResult, error) {
|
||||
if cli.imagesPruneFunc != nil {
|
||||
return cli.imagesPruneFunc(opts)
|
||||
func (cli *fakeClient) ImagePrune(_ context.Context, opts client.ImagePruneOptions) (client.ImagePruneResult, error) {
|
||||
if cli.imagePruneFunc != nil {
|
||||
return cli.imagePruneFunc(opts)
|
||||
}
|
||||
return client.ImagePruneResult{}, nil
|
||||
}
|
||||
@@ -91,7 +91,7 @@ func (cli *fakeClient) ImageLoad(_ context.Context, input io.Reader, options ...
|
||||
if cli.imageLoadFunc != nil {
|
||||
return cli.imageLoadFunc(input, options...)
|
||||
}
|
||||
return client.ImageLoadResult{}, nil
|
||||
return http.NoBody, nil
|
||||
}
|
||||
|
||||
func (cli *fakeClient) ImageList(_ context.Context, options client.ImageListOptions) (client.ImageListResult, error) {
|
||||
@@ -112,7 +112,7 @@ func (cli *fakeClient) ImageImport(_ context.Context, source client.ImageImportS
|
||||
if cli.imageImportFunc != nil {
|
||||
return cli.imageImportFunc(source, ref, options)
|
||||
}
|
||||
return client.ImageImportResult{}, nil
|
||||
return http.NoBody, nil
|
||||
}
|
||||
|
||||
func (cli *fakeClient) ImageHistory(_ context.Context, img string, options ...client.ImageHistoryOption) (client.ImageHistoryResult, error) {
|
||||
|
||||
@@ -3,6 +3,7 @@ package image
|
||||
import (
|
||||
"errors"
|
||||
"io"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/cli/internal/test"
|
||||
@@ -28,7 +29,7 @@ func TestNewImportCommandErrors(t *testing.T) {
|
||||
args: []string{"testdata/import-command-success.input.txt"},
|
||||
expectedError: "something went wrong",
|
||||
imageImportFunc: func(source client.ImageImportSource, ref string, options client.ImageImportOptions) (client.ImageImportResult, error) {
|
||||
return client.ImageImportResult{}, errors.New("something went wrong")
|
||||
return nil, errors.New("something went wrong")
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -68,7 +69,7 @@ func TestNewImportCommandSuccess(t *testing.T) {
|
||||
args: []string{"-", "image:local"},
|
||||
imageImportFunc: func(source client.ImageImportSource, ref string, options client.ImageImportOptions) (client.ImageImportResult, error) {
|
||||
assert.Check(t, is.Equal("image:local", ref))
|
||||
return client.ImageImportResult{}, nil
|
||||
return io.NopCloser(strings.NewReader("")), nil
|
||||
},
|
||||
},
|
||||
{
|
||||
@@ -76,7 +77,7 @@ func TestNewImportCommandSuccess(t *testing.T) {
|
||||
args: []string{"--message", "test message", "-"},
|
||||
imageImportFunc: func(source client.ImageImportSource, ref string, options client.ImageImportOptions) (client.ImageImportResult, error) {
|
||||
assert.Check(t, is.Equal("test message", options.Message))
|
||||
return client.ImageImportResult{}, nil
|
||||
return io.NopCloser(strings.NewReader("")), nil
|
||||
},
|
||||
},
|
||||
{
|
||||
@@ -84,7 +85,7 @@ func TestNewImportCommandSuccess(t *testing.T) {
|
||||
args: []string{"--change", "ENV DEBUG=true", "-"},
|
||||
imageImportFunc: func(source client.ImageImportSource, ref string, options client.ImageImportOptions) (client.ImageImportResult, error) {
|
||||
assert.Check(t, is.Equal("ENV DEBUG=true", options.Changes[0]))
|
||||
return client.ImageImportResult{}, nil
|
||||
return io.NopCloser(strings.NewReader("")), nil
|
||||
},
|
||||
},
|
||||
{
|
||||
@@ -92,7 +93,7 @@ func TestNewImportCommandSuccess(t *testing.T) {
|
||||
args: []string{"--change", "ENV DEBUG true", "-"},
|
||||
imageImportFunc: func(source client.ImageImportSource, ref string, options client.ImageImportOptions) (client.ImageImportResult, error) {
|
||||
assert.Check(t, is.Equal("ENV DEBUG true", options.Changes[0]))
|
||||
return client.ImageImportResult{}, nil
|
||||
return io.NopCloser(strings.NewReader("")), nil
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
@@ -4,10 +4,8 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
"unsafe"
|
||||
|
||||
"github.com/docker/cli/internal/test"
|
||||
"github.com/moby/moby/client"
|
||||
@@ -39,7 +37,7 @@ func TestNewLoadCommandErrors(t *testing.T) {
|
||||
args: []string{},
|
||||
expectedError: "something went wrong",
|
||||
imageLoadFunc: func(input io.Reader, options ...client.ImageLoadOption) (client.ImageLoadResult, error) {
|
||||
return client.ImageLoadResult{}, errors.New("something went wrong")
|
||||
return nil, errors.New("something went wrong")
|
||||
},
|
||||
},
|
||||
{
|
||||
@@ -47,7 +45,7 @@ func TestNewLoadCommandErrors(t *testing.T) {
|
||||
args: []string{"--platform", "<invalid>"},
|
||||
expectedError: `invalid platform`,
|
||||
imageLoadFunc: func(input io.Reader, options ...client.ImageLoadOption) (client.ImageLoadResult, error) {
|
||||
return client.ImageLoadResult{}, nil
|
||||
return io.NopCloser(strings.NewReader("")), nil
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -75,14 +73,7 @@ func TestNewLoadCommandInvalidInput(t *testing.T) {
|
||||
}
|
||||
|
||||
func mockImageLoadResult(content string) client.ImageLoadResult {
|
||||
out := client.ImageLoadResult{}
|
||||
|
||||
// Set unexported field "body"
|
||||
v := reflect.ValueOf(&out).Elem()
|
||||
f := v.FieldByName("body")
|
||||
r := io.NopCloser(strings.NewReader(content))
|
||||
reflect.NewAt(f.Type(), unsafe.Pointer(f.UnsafeAddr())).Elem().Set(reflect.ValueOf(r))
|
||||
return out
|
||||
return io.NopCloser(strings.NewReader(content))
|
||||
}
|
||||
|
||||
func TestNewLoadCommandSuccess(t *testing.T) {
|
||||
|
||||
@@ -87,7 +87,7 @@ func runPrune(ctx context.Context, dockerCli command.Cli, options pruneOptions)
|
||||
}
|
||||
}
|
||||
|
||||
res, err := dockerCli.Client().ImagesPrune(ctx, client.ImagePruneOptions{
|
||||
res, err := dockerCli.Client().ImagePrune(ctx, client.ImagePruneOptions{
|
||||
Filters: pruneFilters,
|
||||
})
|
||||
if err != nil {
|
||||
|
||||
@@ -18,10 +18,10 @@ import (
|
||||
|
||||
func TestNewPruneCommandErrors(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
args []string
|
||||
expectedError string
|
||||
imagesPruneFunc func(client.ImagePruneOptions) (client.ImagePruneResult, error)
|
||||
name string
|
||||
args []string
|
||||
expectedError string
|
||||
imagePruneFunc func(client.ImagePruneOptions) (client.ImagePruneResult, error)
|
||||
}{
|
||||
{
|
||||
name: "wrong-args",
|
||||
@@ -32,7 +32,7 @@ func TestNewPruneCommandErrors(t *testing.T) {
|
||||
name: "prune-error",
|
||||
args: []string{"--force"},
|
||||
expectedError: "something went wrong",
|
||||
imagesPruneFunc: func(client.ImagePruneOptions) (client.ImagePruneResult, error) {
|
||||
imagePruneFunc: func(client.ImagePruneOptions) (client.ImagePruneResult, error) {
|
||||
return client.ImagePruneResult{}, errors.New("something went wrong")
|
||||
},
|
||||
},
|
||||
@@ -40,7 +40,7 @@ func TestNewPruneCommandErrors(t *testing.T) {
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
cmd := newPruneCommand(test.NewFakeCli(&fakeClient{
|
||||
imagesPruneFunc: tc.imagesPruneFunc,
|
||||
imagePruneFunc: tc.imagePruneFunc,
|
||||
}))
|
||||
cmd.SetOut(io.Discard)
|
||||
cmd.SetErr(io.Discard)
|
||||
@@ -52,14 +52,14 @@ func TestNewPruneCommandErrors(t *testing.T) {
|
||||
|
||||
func TestNewPruneCommandSuccess(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
args []string
|
||||
imagesPruneFunc func(client.ImagePruneOptions) (client.ImagePruneResult, error)
|
||||
name string
|
||||
args []string
|
||||
imagePruneFunc func(client.ImagePruneOptions) (client.ImagePruneResult, error)
|
||||
}{
|
||||
{
|
||||
name: "all",
|
||||
args: []string{"--all"},
|
||||
imagesPruneFunc: func(opts client.ImagePruneOptions) (client.ImagePruneResult, error) {
|
||||
imagePruneFunc: func(opts client.ImagePruneOptions) (client.ImagePruneResult, error) {
|
||||
assert.Check(t, opts.Filters["dangling"]["false"])
|
||||
return client.ImagePruneResult{}, nil
|
||||
},
|
||||
@@ -67,7 +67,7 @@ func TestNewPruneCommandSuccess(t *testing.T) {
|
||||
{
|
||||
name: "force-deleted",
|
||||
args: []string{"--force"},
|
||||
imagesPruneFunc: func(opts client.ImagePruneOptions) (client.ImagePruneResult, error) {
|
||||
imagePruneFunc: func(opts client.ImagePruneOptions) (client.ImagePruneResult, error) {
|
||||
assert.Check(t, opts.Filters["dangling"]["true"])
|
||||
return client.ImagePruneResult{
|
||||
Report: image.PruneReport{
|
||||
@@ -80,7 +80,7 @@ func TestNewPruneCommandSuccess(t *testing.T) {
|
||||
{
|
||||
name: "label-filter",
|
||||
args: []string{"--force", "--filter", "label=foobar"},
|
||||
imagesPruneFunc: func(opts client.ImagePruneOptions) (client.ImagePruneResult, error) {
|
||||
imagePruneFunc: func(opts client.ImagePruneOptions) (client.ImagePruneResult, error) {
|
||||
assert.Check(t, opts.Filters["label"]["foobar"])
|
||||
return client.ImagePruneResult{}, nil
|
||||
},
|
||||
@@ -88,7 +88,7 @@ func TestNewPruneCommandSuccess(t *testing.T) {
|
||||
{
|
||||
name: "force-untagged",
|
||||
args: []string{"--force"},
|
||||
imagesPruneFunc: func(opts client.ImagePruneOptions) (client.ImagePruneResult, error) {
|
||||
imagePruneFunc: func(opts client.ImagePruneOptions) (client.ImagePruneResult, error) {
|
||||
assert.Check(t, opts.Filters["dangling"]["true"])
|
||||
return client.ImagePruneResult{
|
||||
Report: image.PruneReport{
|
||||
@@ -101,7 +101,7 @@ func TestNewPruneCommandSuccess(t *testing.T) {
|
||||
}
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
cli := test.NewFakeCli(&fakeClient{imagesPruneFunc: tc.imagesPruneFunc})
|
||||
cli := test.NewFakeCli(&fakeClient{imagePruneFunc: tc.imagePruneFunc})
|
||||
// when prompted, answer "Y" to confirm the prune.
|
||||
// will not be prompted if --force is used.
|
||||
cli.SetIn(streams.NewIn(io.NopCloser(strings.NewReader("Y\n"))))
|
||||
@@ -120,8 +120,8 @@ func TestPrunePromptTermination(t *testing.T) {
|
||||
t.Cleanup(cancel)
|
||||
|
||||
cli := test.NewFakeCli(&fakeClient{
|
||||
imagesPruneFunc: func(client.ImagePruneOptions) (client.ImagePruneResult, error) {
|
||||
return client.ImagePruneResult{}, errors.New("fakeClient imagesPruneFunc should not be called")
|
||||
imagePruneFunc: func(client.ImagePruneOptions) (client.ImagePruneResult, error) {
|
||||
return client.ImagePruneResult{}, errors.New("fakeClient imagePruneFunc should not be called")
|
||||
},
|
||||
})
|
||||
cmd := newPruneCommand(cli)
|
||||
|
||||
@@ -38,7 +38,7 @@ func TestNewSaveCommandErrors(t *testing.T) {
|
||||
isTerminal: false,
|
||||
expectedError: "error saving image",
|
||||
imageSaveFunc: func(images []string, options ...client.ImageSaveOption) (client.ImageSaveResult, error) {
|
||||
return client.ImageSaveResult{}, errors.New("error saving image")
|
||||
return nil, errors.New("error saving image")
|
||||
},
|
||||
},
|
||||
{
|
||||
@@ -83,7 +83,7 @@ func TestNewSaveCommandSuccess(t *testing.T) {
|
||||
imageSaveFunc: func(images []string, options ...client.ImageSaveOption) (client.ImageSaveResult, error) {
|
||||
assert.Assert(t, is.Len(images, 1))
|
||||
assert.Check(t, is.Equal("arg1", images[0]))
|
||||
return client.ImageSaveResult{}, nil
|
||||
return io.NopCloser(strings.NewReader("")), nil
|
||||
},
|
||||
deferredFunc: func() {
|
||||
_ = os.Remove("save_tmp_file")
|
||||
@@ -96,7 +96,7 @@ func TestNewSaveCommandSuccess(t *testing.T) {
|
||||
assert.Assert(t, is.Len(images, 2))
|
||||
assert.Check(t, is.Equal("arg1", images[0]))
|
||||
assert.Check(t, is.Equal("arg2", images[1]))
|
||||
return client.ImageSaveResult{}, nil
|
||||
return io.NopCloser(strings.NewReader("")), nil
|
||||
},
|
||||
},
|
||||
{
|
||||
@@ -108,7 +108,7 @@ func TestNewSaveCommandSuccess(t *testing.T) {
|
||||
// FIXME(thaJeztah): need to find appropriate way to test the result of "ImageHistoryWithPlatform" being applied
|
||||
assert.Check(t, len(options) > 0) // can be 1 or two depending on whether a terminal is attached :/
|
||||
// assert.Check(t, is.Contains(options, client.ImageHistoryWithPlatform(ocispec.Platform{OS: "linux", Architecture: "amd64"})))
|
||||
return client.ImageSaveResult{}, nil
|
||||
return io.NopCloser(strings.NewReader("")), nil
|
||||
},
|
||||
},
|
||||
{
|
||||
@@ -118,7 +118,7 @@ func TestNewSaveCommandSuccess(t *testing.T) {
|
||||
assert.Assert(t, is.Len(images, 1))
|
||||
assert.Check(t, is.Equal("arg1", images[0]))
|
||||
assert.Check(t, len(options) > 0) // can be 1 or 2 depending on whether a terminal is attached :/
|
||||
return client.ImageSaveResult{}, nil
|
||||
return io.NopCloser(strings.NewReader("")), nil
|
||||
},
|
||||
},
|
||||
{
|
||||
@@ -128,7 +128,7 @@ func TestNewSaveCommandSuccess(t *testing.T) {
|
||||
assert.Assert(t, is.Len(images, 1))
|
||||
assert.Check(t, is.Equal("arg1", images[0]))
|
||||
assert.Check(t, len(options) > 0) // can be 1 or 2 depending on whether a terminal is attached :/
|
||||
return client.ImageSaveResult{}, nil
|
||||
return io.NopCloser(strings.NewReader("")), nil
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
@@ -72,7 +72,7 @@ func runPrune(ctx context.Context, dockerCli command.Cli, options pruneOptions)
|
||||
}
|
||||
}
|
||||
|
||||
res, err := dockerCli.Client().NetworksPrune(ctx, client.NetworkPruneOptions{
|
||||
res, err := dockerCli.Client().NetworkPrune(ctx, client.NetworkPruneOptions{
|
||||
Filters: pruneFilters,
|
||||
})
|
||||
if err != nil {
|
||||
|
||||
@@ -382,11 +382,11 @@ func (ctx *serviceInspectContext) UpdateDelay() time.Duration {
|
||||
}
|
||||
|
||||
func (ctx *serviceInspectContext) UpdateOnFailure() string {
|
||||
return ctx.Service.Spec.UpdateConfig.FailureAction
|
||||
return string(ctx.Service.Spec.UpdateConfig.FailureAction)
|
||||
}
|
||||
|
||||
func (ctx *serviceInspectContext) UpdateOrder() string {
|
||||
return ctx.Service.Spec.UpdateConfig.Order
|
||||
return string(ctx.Service.Spec.UpdateConfig.Order)
|
||||
}
|
||||
|
||||
func (ctx *serviceInspectContext) HasUpdateMonitor() bool {
|
||||
@@ -418,7 +418,7 @@ func (ctx *serviceInspectContext) RollbackDelay() time.Duration {
|
||||
}
|
||||
|
||||
func (ctx *serviceInspectContext) RollbackOnFailure() string {
|
||||
return ctx.Service.Spec.RollbackConfig.FailureAction
|
||||
return string(ctx.Service.Spec.RollbackConfig.FailureAction)
|
||||
}
|
||||
|
||||
func (ctx *serviceInspectContext) HasRollbackMonitor() bool {
|
||||
@@ -434,7 +434,7 @@ func (ctx *serviceInspectContext) RollbackMaxFailureRatio() float32 {
|
||||
}
|
||||
|
||||
func (ctx *serviceInspectContext) RollbackOrder() string {
|
||||
return ctx.Service.Spec.RollbackConfig.Order
|
||||
return string(ctx.Service.Spec.RollbackConfig.Order)
|
||||
}
|
||||
|
||||
func (ctx *serviceInspectContext) ContainerImage() string {
|
||||
|
||||
@@ -164,9 +164,9 @@ func updateConfigFromDefaults(defaultUpdateConfig *api.UpdateConfig) *swarm.Upda
|
||||
Parallelism: defaultUpdateConfig.Parallelism,
|
||||
Delay: defaultUpdateConfig.Delay,
|
||||
Monitor: defaultMonitor,
|
||||
FailureAction: defaultFailureAction,
|
||||
FailureAction: swarm.FailureAction(defaultFailureAction),
|
||||
MaxFailureRatio: defaultUpdateConfig.MaxFailureRatio,
|
||||
Order: defaultOrder(defaultUpdateConfig.Order),
|
||||
Order: swarm.UpdateOrder(defaultOrder(defaultUpdateConfig.Order)),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -187,13 +187,13 @@ func (o updateOptions) updateConfig(flags *pflag.FlagSet) *swarm.UpdateConfig {
|
||||
updateConfig.Monitor = o.monitor
|
||||
}
|
||||
if flags.Changed(flagUpdateFailureAction) {
|
||||
updateConfig.FailureAction = o.onFailure
|
||||
updateConfig.FailureAction = swarm.FailureAction(o.onFailure)
|
||||
}
|
||||
if flags.Changed(flagUpdateMaxFailureRatio) {
|
||||
updateConfig.MaxFailureRatio = o.maxFailureRatio.Value()
|
||||
}
|
||||
if flags.Changed(flagUpdateOrder) {
|
||||
updateConfig.Order = o.order
|
||||
updateConfig.Order = swarm.UpdateOrder(o.order)
|
||||
}
|
||||
|
||||
return updateConfig
|
||||
@@ -216,13 +216,13 @@ func (o updateOptions) rollbackConfig(flags *pflag.FlagSet) *swarm.UpdateConfig
|
||||
updateConfig.Monitor = o.monitor
|
||||
}
|
||||
if flags.Changed(flagRollbackFailureAction) {
|
||||
updateConfig.FailureAction = o.onFailure
|
||||
updateConfig.FailureAction = swarm.FailureAction(o.onFailure)
|
||||
}
|
||||
if flags.Changed(flagRollbackMaxFailureRatio) {
|
||||
updateConfig.MaxFailureRatio = o.maxFailureRatio.Value()
|
||||
}
|
||||
if flags.Changed(flagRollbackOrder) {
|
||||
updateConfig.Order = o.order
|
||||
updateConfig.Order = swarm.UpdateOrder(o.order)
|
||||
}
|
||||
|
||||
return updateConfig
|
||||
@@ -299,9 +299,9 @@ func defaultRestartCondition() swarm.RestartPolicyCondition {
|
||||
func defaultOrder(order api.UpdateConfig_UpdateOrder) string {
|
||||
switch order {
|
||||
case api.UpdateConfig_STOP_FIRST:
|
||||
return swarm.UpdateOrderStopFirst
|
||||
return string(swarm.UpdateOrderStopFirst)
|
||||
case api.UpdateConfig_START_FIRST:
|
||||
return swarm.UpdateOrderStartFirst
|
||||
return string(swarm.UpdateOrderStartFirst)
|
||||
default:
|
||||
return ""
|
||||
}
|
||||
|
||||
@@ -269,16 +269,16 @@ func TestToServiceUpdateRollback(t *testing.T) {
|
||||
flags.Set("update-parallelism", "23")
|
||||
flags.Set("update-delay", "34s")
|
||||
flags.Set("update-monitor", "54321ns")
|
||||
flags.Set("update-failure-action", swarm.UpdateFailureActionPause)
|
||||
flags.Set("update-failure-action", string(swarm.UpdateFailureActionPause))
|
||||
flags.Set("update-max-failure-ratio", "0.6")
|
||||
flags.Set("update-order", swarm.UpdateOrderStopFirst)
|
||||
flags.Set("update-order", string(swarm.UpdateOrderStopFirst))
|
||||
|
||||
flags.Set("rollback-parallelism", "12")
|
||||
flags.Set("rollback-delay", "23s")
|
||||
flags.Set("rollback-monitor", "12345ns")
|
||||
flags.Set("rollback-failure-action", swarm.UpdateFailureActionContinue)
|
||||
flags.Set("rollback-failure-action", string(swarm.UpdateFailureActionContinue))
|
||||
flags.Set("rollback-max-failure-ratio", "0.5")
|
||||
flags.Set("rollback-order", swarm.UpdateOrderStartFirst)
|
||||
flags.Set("rollback-order", string(swarm.UpdateOrderStartFirst))
|
||||
|
||||
o := newServiceOptions()
|
||||
o.mode = "replicated"
|
||||
@@ -286,17 +286,17 @@ func TestToServiceUpdateRollback(t *testing.T) {
|
||||
parallelism: 23,
|
||||
delay: 34 * time.Second,
|
||||
monitor: 54321 * time.Nanosecond,
|
||||
onFailure: swarm.UpdateFailureActionPause,
|
||||
onFailure: string(swarm.UpdateFailureActionPause),
|
||||
maxFailureRatio: 0.6,
|
||||
order: swarm.UpdateOrderStopFirst,
|
||||
order: string(swarm.UpdateOrderStopFirst),
|
||||
}
|
||||
o.rollback = updateOptions{
|
||||
parallelism: 12,
|
||||
delay: 23 * time.Second,
|
||||
monitor: 12345 * time.Nanosecond,
|
||||
onFailure: swarm.UpdateFailureActionContinue,
|
||||
onFailure: string(swarm.UpdateFailureActionContinue),
|
||||
maxFailureRatio: 0.5,
|
||||
order: swarm.UpdateOrderStartFirst,
|
||||
order: string(swarm.UpdateOrderStartFirst),
|
||||
}
|
||||
|
||||
service, err := o.ToService(context.Background(), &fakeClient{}, flags)
|
||||
@@ -307,18 +307,18 @@ func TestToServiceUpdateRollback(t *testing.T) {
|
||||
|
||||
func TestToServiceUpdateRollbackOrder(t *testing.T) {
|
||||
flags := newCreateCommand(nil).Flags()
|
||||
flags.Set("update-order", swarm.UpdateOrderStartFirst)
|
||||
flags.Set("rollback-order", swarm.UpdateOrderStartFirst)
|
||||
flags.Set("update-order", string(swarm.UpdateOrderStartFirst))
|
||||
flags.Set("rollback-order", string(swarm.UpdateOrderStartFirst))
|
||||
|
||||
o := newServiceOptions()
|
||||
o.mode = "replicated"
|
||||
o.update = updateOptions{order: swarm.UpdateOrderStartFirst}
|
||||
o.rollback = updateOptions{order: swarm.UpdateOrderStartFirst}
|
||||
o.update = updateOptions{order: string(swarm.UpdateOrderStartFirst)}
|
||||
o.rollback = updateOptions{order: string(swarm.UpdateOrderStartFirst)}
|
||||
|
||||
service, err := o.ToService(context.Background(), &fakeClient{}, flags)
|
||||
assert.NilError(t, err)
|
||||
assert.Check(t, is.Equal(service.UpdateConfig.Order, o.update.order))
|
||||
assert.Check(t, is.Equal(service.RollbackConfig.Order, o.rollback.order))
|
||||
assert.Check(t, is.Equal(string(service.UpdateConfig.Order), o.update.order))
|
||||
assert.Check(t, is.Equal(string(service.RollbackConfig.Order), o.rollback.order))
|
||||
}
|
||||
|
||||
func TestToServiceMaxReplicasGlobalModeConflict(t *testing.T) {
|
||||
|
||||
@@ -228,7 +228,7 @@ func runUpdate(ctx context.Context, dockerCLI command.Cli, flags *pflag.FlagSet,
|
||||
}
|
||||
updateOpts.EncodedRegistryAuth = encodedAuth
|
||||
} else {
|
||||
registryAuthFrom = swarm.RegistryAuthFromSpec
|
||||
registryAuthFrom = string(swarm.RegistryAuthFromSpec)
|
||||
}
|
||||
|
||||
response, err := apiClient.ServiceUpdate(ctx, res.Service.ID, client.ServiceUpdateOptions{
|
||||
@@ -236,7 +236,7 @@ func runUpdate(ctx context.Context, dockerCLI command.Cli, flags *pflag.FlagSet,
|
||||
Spec: *spec,
|
||||
|
||||
EncodedRegistryAuth: encodedAuth,
|
||||
RegistryAuthFrom: registryAuthFrom,
|
||||
RegistryAuthFrom: swarm.RegistryAuthSource(registryAuthFrom),
|
||||
Rollback: rollbackAction,
|
||||
})
|
||||
if err != nil {
|
||||
@@ -433,9 +433,15 @@ func updateService(ctx context.Context, apiClient client.NetworkAPIClient, flags
|
||||
updateUint64(flagUpdateParallelism, &spec.UpdateConfig.Parallelism)
|
||||
updateDuration(flagUpdateDelay, &spec.UpdateConfig.Delay)
|
||||
updateDuration(flagUpdateMonitor, &spec.UpdateConfig.Monitor)
|
||||
updateString(flagUpdateFailureAction, &spec.UpdateConfig.FailureAction)
|
||||
if flags.Changed(flagUpdateFailureAction) {
|
||||
value, _ := flags.GetString(flagUpdateFailureAction)
|
||||
spec.UpdateConfig.FailureAction = swarm.FailureAction(value)
|
||||
}
|
||||
updateFloatValue(flagUpdateMaxFailureRatio, &spec.UpdateConfig.MaxFailureRatio)
|
||||
updateString(flagUpdateOrder, &spec.UpdateConfig.Order)
|
||||
if flags.Changed(flagUpdateOrder) {
|
||||
value, _ := flags.GetString(flagUpdateOrder)
|
||||
spec.UpdateConfig.Order = swarm.UpdateOrder(value)
|
||||
}
|
||||
}
|
||||
|
||||
if anyChanged(flags, flagRollbackParallelism, flagRollbackDelay, flagRollbackMonitor, flagRollbackFailureAction, flagRollbackMaxFailureRatio, flagRollbackOrder) {
|
||||
@@ -445,9 +451,15 @@ func updateService(ctx context.Context, apiClient client.NetworkAPIClient, flags
|
||||
updateUint64(flagRollbackParallelism, &spec.RollbackConfig.Parallelism)
|
||||
updateDuration(flagRollbackDelay, &spec.RollbackConfig.Delay)
|
||||
updateDuration(flagRollbackMonitor, &spec.RollbackConfig.Monitor)
|
||||
updateString(flagRollbackFailureAction, &spec.RollbackConfig.FailureAction)
|
||||
if flags.Changed(flagRollbackFailureAction) {
|
||||
value, _ := flags.GetString(flagRollbackFailureAction)
|
||||
spec.RollbackConfig.FailureAction = swarm.FailureAction(value)
|
||||
}
|
||||
updateFloatValue(flagRollbackMaxFailureRatio, &spec.RollbackConfig.MaxFailureRatio)
|
||||
updateString(flagRollbackOrder, &spec.RollbackConfig.Order)
|
||||
if flags.Changed(flagRollbackOrder) {
|
||||
value, _ := flags.GetString(flagRollbackOrder)
|
||||
spec.RollbackConfig.Order = swarm.UpdateOrder(value)
|
||||
}
|
||||
}
|
||||
|
||||
if flags.Changed(flagEndpointMode) {
|
||||
|
||||
@@ -38,7 +38,7 @@ func (cli *fakeClient) ContainerList(ctx context.Context, options client.Contain
|
||||
return client.ContainerListResult{}, nil
|
||||
}
|
||||
|
||||
func (cli *fakeClient) ContainersPrune(ctx context.Context, opts client.ContainerPruneOptions) (client.ContainerPruneResult, error) {
|
||||
func (cli *fakeClient) ContainerPrune(ctx context.Context, opts client.ContainerPruneOptions) (client.ContainerPruneResult, error) {
|
||||
if cli.containerPruneFunc != nil {
|
||||
return cli.containerPruneFunc(ctx, opts)
|
||||
}
|
||||
|
||||
@@ -42,7 +42,9 @@ func newDiskUsageCommand(dockerCLI command.Cli) *cobra.Command {
|
||||
|
||||
func runDiskUsage(ctx context.Context, dockerCli command.Cli, opts diskUsageOptions) error {
|
||||
// TODO expose types.DiskUsageOptions.Types as flag on the command-line and/or as separate commands (docker container df / docker container usage)
|
||||
du, err := dockerCli.Client().DiskUsage(ctx, client.DiskUsageOptions{})
|
||||
du, err := dockerCli.Client().DiskUsage(ctx, client.DiskUsageOptions{
|
||||
Verbose: opts.verbose,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -52,25 +54,16 @@ func runDiskUsage(ctx context.Context, dockerCli command.Cli, opts diskUsageOpti
|
||||
format = formatter.TableFormatKey
|
||||
}
|
||||
|
||||
var bsz int64
|
||||
for _, bc := range du.BuildCache {
|
||||
if !bc.Shared {
|
||||
bsz += bc.Size
|
||||
}
|
||||
}
|
||||
|
||||
duCtx := formatter.DiskUsageContext{
|
||||
Context: formatter.Context{
|
||||
Output: dockerCli.Out(),
|
||||
Format: formatter.NewDiskUsageFormat(format, opts.verbose),
|
||||
},
|
||||
LayersSize: du.LayersSize,
|
||||
BuilderSize: bsz,
|
||||
BuildCache: du.BuildCache,
|
||||
Images: du.Images,
|
||||
Containers: du.Containers,
|
||||
Volumes: du.Volumes,
|
||||
Verbose: opts.verbose,
|
||||
Verbose: opts.verbose,
|
||||
ImageDiskUsage: du.Images,
|
||||
BuildCacheDiskUsage: du.BuildCache,
|
||||
ContainerDiskUsage: du.Containers,
|
||||
VolumeDiskUsage: du.Volumes,
|
||||
}
|
||||
|
||||
return duCtx.Write()
|
||||
|
||||
@@ -202,9 +202,7 @@ func prettyPrintInfo(streams command.Streams, info dockerInfo) error {
|
||||
fprintln(streams.Out())
|
||||
fprintln(streams.Out(), "Server:")
|
||||
if info.Info != nil {
|
||||
for _, err := range prettyPrintServerInfo(streams, &info) {
|
||||
info.ServerErrors = append(info.ServerErrors, err.Error())
|
||||
}
|
||||
prettyPrintServerInfo(streams, &info)
|
||||
}
|
||||
for _, err := range info.ServerErrors {
|
||||
fprintln(streams.Err(), "ERROR:", err)
|
||||
@@ -240,8 +238,7 @@ func prettyPrintClientInfo(streams command.Streams, info clientInfo) {
|
||||
}
|
||||
|
||||
//nolint:gocyclo
|
||||
func prettyPrintServerInfo(streams command.Streams, info *dockerInfo) []error {
|
||||
var errs []error
|
||||
func prettyPrintServerInfo(streams command.Streams, info *dockerInfo) {
|
||||
output := streams.Out()
|
||||
|
||||
fprintln(output, " Containers:", info.Containers)
|
||||
@@ -306,17 +303,14 @@ func prettyPrintServerInfo(streams command.Streams, info *dockerInfo) []error {
|
||||
fprintln(output, " containerd version:", info.ContainerdCommit.ID)
|
||||
fprintln(output, " runc version:", info.RuncCommit.ID)
|
||||
fprintln(output, " init version:", info.InitCommit.ID)
|
||||
if len(info.SecurityOptions) != 0 {
|
||||
if kvs, err := security.DecodeOptions(info.SecurityOptions); err != nil {
|
||||
errs = append(errs, err)
|
||||
} else {
|
||||
fprintln(output, " Security Options:")
|
||||
for _, so := range kvs {
|
||||
fprintln(output, " "+so.Name)
|
||||
for _, o := range so.Options {
|
||||
if o.Key == "profile" {
|
||||
fprintln(output, " Profile:", o.Value)
|
||||
}
|
||||
secopts := security.DecodeOptions(info.SecurityOptions)
|
||||
if len(secopts) != 0 {
|
||||
fprintln(output, " Security Options:")
|
||||
for _, so := range secopts {
|
||||
fprintln(output, " "+so.Name)
|
||||
for _, o := range so.Options {
|
||||
if o.Key == "profile" {
|
||||
fprintln(output, " Profile:", o.Value)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -407,8 +401,6 @@ func prettyPrintServerInfo(streams command.Streams, info *dockerInfo) []error {
|
||||
for _, w := range info.Warnings {
|
||||
fprintln(streams.Err(), w)
|
||||
}
|
||||
|
||||
return errs
|
||||
}
|
||||
|
||||
//nolint:gocyclo
|
||||
|
||||
@@ -1,2 +1 @@
|
||||
ERROR: a server error occurred
|
||||
ERROR: invalid empty security option
|
||||
|
||||
@@ -36,7 +36,7 @@ func (c *fakeClient) VolumeList(_ context.Context, options client.VolumeListOpti
|
||||
return client.VolumeListResult{}, nil
|
||||
}
|
||||
|
||||
func (c *fakeClient) VolumesPrune(_ context.Context, opts client.VolumePruneOptions) (client.VolumePruneResult, error) {
|
||||
func (c *fakeClient) VolumePrune(_ context.Context, opts client.VolumePruneOptions) (client.VolumePruneResult, error) {
|
||||
if c.volumePruneFunc != nil {
|
||||
return c.volumePruneFunc(opts)
|
||||
}
|
||||
|
||||
@@ -90,7 +90,7 @@ func runPrune(ctx context.Context, dockerCli command.Cli, options pruneOptions)
|
||||
}
|
||||
}
|
||||
|
||||
res, err := dockerCli.Client().VolumesPrune(ctx, client.VolumePruneOptions{
|
||||
res, err := dockerCli.Client().VolumePrune(ctx, client.VolumePruneOptions{
|
||||
Filters: pruneFilters,
|
||||
})
|
||||
if err != nil {
|
||||
|
||||
@@ -507,10 +507,10 @@ func convertUpdateConfig(source *composetypes.UpdateConfig) *swarm.UpdateConfig
|
||||
return &swarm.UpdateConfig{
|
||||
Parallelism: parallel,
|
||||
Delay: time.Duration(source.Delay),
|
||||
FailureAction: source.FailureAction,
|
||||
FailureAction: swarm.FailureAction(source.FailureAction),
|
||||
Monitor: time.Duration(source.Monitor),
|
||||
MaxFailureRatio: source.MaxFailureRatio,
|
||||
Order: source.Order,
|
||||
Order: swarm.UpdateOrder(source.Order),
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -427,19 +427,19 @@ func TestConvertCredentialSpec(t *testing.T) {
|
||||
func TestConvertUpdateConfigOrder(t *testing.T) {
|
||||
// test default behavior
|
||||
updateConfig := convertUpdateConfig(&composetypes.UpdateConfig{})
|
||||
assert.Check(t, is.Equal("", updateConfig.Order))
|
||||
assert.Check(t, is.Equal("", string(updateConfig.Order)))
|
||||
|
||||
// test start-first
|
||||
updateConfig = convertUpdateConfig(&composetypes.UpdateConfig{
|
||||
Order: "start-first",
|
||||
})
|
||||
assert.Check(t, is.Equal(updateConfig.Order, "start-first"))
|
||||
assert.Check(t, is.Equal(string(updateConfig.Order), "start-first"))
|
||||
|
||||
// test stop-first
|
||||
updateConfig = convertUpdateConfig(&composetypes.UpdateConfig{
|
||||
Order: "stop-first",
|
||||
})
|
||||
assert.Check(t, is.Equal(updateConfig.Order, "stop-first"))
|
||||
assert.Check(t, is.Equal(string(updateConfig.Order), "stop-first"))
|
||||
}
|
||||
|
||||
func TestConvertFileObject(t *testing.T) {
|
||||
|
||||
Reference in New Issue
Block a user