1
0
mirror of https://github.com/containers/buildah.git synced 2025-07-31 15:24:26 +03:00

use new containers/common/libimage package

Move all code related handling container image over to the new
`libimage` package in containers/common.  The new package is an
attempt to consolidate the code across the containers tools under the
github.com/containers umbrella.

The new `libimage` packages provides functionality to perform all kinds
of operations for managing images such as local lookups, pushing,
pulling, listing, removing, etc.

The following packages have been moved over the containers/common:

`manifests` -> `common/image/manifests`
`pkg/manifests` -> `common/pkg/manifests`
`pkg/supplemented` -> `common/pkg/supplemented`

Signed-off-by: Valentin Rothberg <rothberg@redhat.com>
This commit is contained in:
Valentin Rothberg
2021-04-10 19:44:51 +02:00
parent 85bdc0086c
commit dcd2a92e56
128 changed files with 6740 additions and 4031 deletions

View File

@ -11,7 +11,8 @@ import (
"github.com/containers/buildah/pkg/parse"
"github.com/containers/buildah/util"
"github.com/containers/common/pkg/auth"
"github.com/containers/image/v5/storage"
"github.com/containers/image/v5/pkg/shortnames"
storageTransport "github.com/containers/image/v5/storage"
"github.com/containers/image/v5/transports/alltransports"
"github.com/containers/image/v5/types"
"github.com/pkg/errors"
@ -150,16 +151,18 @@ func commitCmd(c *cobra.Command, args []string, iopts commitInputOptions) error
return errors.Wrapf(err, "error building system context")
}
// If the user specified an image, we may need to massage it a bit if
// no transport is specified.
if image != "" {
if dest, err = alltransports.ParseImageName(image); err != nil {
candidates, _, _, err := util.ResolveName(image, "", systemContext, store)
candidates, err := shortnames.ResolveLocally(systemContext, image)
if err != nil {
return errors.Wrapf(err, "error parsing target image name %q", image)
return err
}
if len(candidates) == 0 {
return errors.Errorf("error parsing target image name %q", image)
}
dest2, err2 := storage.Transport.ParseStoreReference(store, candidates[0])
dest2, err2 := storageTransport.Transport.ParseStoreReference(store, candidates[0].String())
if err2 != nil {
return errors.Wrapf(err, "error parsing target image name %q", image)
}

View File

@ -16,7 +16,6 @@ import (
enchelpers "github.com/containers/ocicrypt/helpers"
"github.com/containers/storage"
"github.com/containers/storage/pkg/unshare"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
@ -224,178 +223,6 @@ func defaultFormat() string {
return buildah.OCI
}
// imageIsParent goes through the layers in the store and checks if i.TopLayer is
// the parent of any other layer in store. Double check that image with that
// layer exists as well.
func imageIsParent(ctx context.Context, sc *types.SystemContext, store storage.Store, image *storage.Image) (bool, error) {
children, err := getChildren(ctx, sc, store, image, 1)
if err != nil {
return false, err
}
return len(children) > 0, nil
}
func getImageConfig(ctx context.Context, sc *types.SystemContext, store storage.Store, imageID string) (*imgspecv1.Image, error) {
ref, err := is.Transport.ParseStoreReference(store, imageID)
if err != nil {
return nil, errors.Wrapf(err, "unable to parse reference to image %q", imageID)
}
image, err := ref.NewImage(ctx, sc)
if err != nil {
if img, err2 := store.Image(imageID); err2 == nil && img.ID == imageID {
return nil, nil
}
return nil, errors.Wrapf(err, "unable to open image %q", imageID)
}
config, err := image.OCIConfig(ctx)
defer image.Close()
if err != nil {
return nil, errors.Wrapf(err, "unable to read configuration from image %q", imageID)
}
return config, nil
}
func historiesDiffer(a, b []imgspecv1.History) bool {
if len(a) != len(b) {
return true
}
i := 0
for i < len(a) {
if a[i].Created == nil && b[i].Created != nil {
break
}
if a[i].Created != nil && b[i].Created == nil {
break
}
if a[i].Created != nil && b[i].Created != nil && !a[i].Created.Equal(*(b[i].Created)) {
break
}
if a[i].CreatedBy != b[i].CreatedBy {
break
}
if a[i].Author != b[i].Author {
break
}
if a[i].Comment != b[i].Comment {
break
}
if a[i].EmptyLayer != b[i].EmptyLayer {
break
}
i++
}
return i != len(a)
}
// getParent returns the image's parent image. Return nil if a parent is not found.
func getParent(ctx context.Context, sc *types.SystemContext, store storage.Store, child *storage.Image) (*storage.Image, error) {
images, err := store.Images()
if err != nil {
return nil, errors.Wrapf(err, "unable to retrieve image list from store")
}
var childTopLayer *storage.Layer
if child.TopLayer != "" {
childTopLayer, err = store.Layer(child.TopLayer)
if err != nil {
return nil, errors.Wrapf(err, "unable to retrieve information about layer %s from store", child.TopLayer)
}
}
childConfig, err := getImageConfig(ctx, sc, store, child.ID)
if err != nil {
return nil, errors.Wrapf(err, "unable to read configuration from image %q", child.ID)
}
if childConfig == nil {
return nil, nil
}
for _, parent := range images {
if parent.ID == child.ID {
continue
}
if childTopLayer != nil && parent.TopLayer != childTopLayer.Parent && parent.TopLayer != childTopLayer.ID {
continue
}
parentConfig, err := getImageConfig(ctx, sc, store, parent.ID)
if err != nil {
return nil, errors.Wrapf(err, "unable to read configuration from image %q", parent.ID)
}
if parentConfig == nil {
continue
}
if len(parentConfig.History)+1 != len(childConfig.History) {
continue
}
if len(parentConfig.RootFS.DiffIDs) > 0 {
if len(childConfig.RootFS.DiffIDs) < len(parentConfig.RootFS.DiffIDs) {
continue
}
childUsesAllParentLayers := true
for i := range parentConfig.RootFS.DiffIDs {
if childConfig.RootFS.DiffIDs[i] != parentConfig.RootFS.DiffIDs[i] {
childUsesAllParentLayers = false
break
}
}
if !childUsesAllParentLayers {
continue
}
}
if historiesDiffer(parentConfig.History, childConfig.History[:len(parentConfig.History)]) {
continue
}
return &parent, nil
}
return nil, nil
}
// getChildren returns a list of the imageIDs that depend on the image
func getChildren(ctx context.Context, sc *types.SystemContext, store storage.Store, parent *storage.Image, max int) ([]string, error) {
var children []string
images, err := store.Images()
if err != nil {
return nil, errors.Wrapf(err, "unable to retrieve images from store")
}
parentConfig, err := getImageConfig(ctx, sc, store, parent.ID)
if err != nil {
return nil, errors.Wrapf(err, "unable to read configuration from image %q", parent.ID)
}
if parentConfig == nil {
return nil, nil
}
for _, child := range images {
if child.ID == parent.ID {
continue
}
var childTopLayer *storage.Layer
if child.TopLayer != "" {
childTopLayer, err = store.Layer(child.TopLayer)
if err != nil {
return nil, errors.Wrapf(err, "unable to retrieve information about layer %q from store", child.TopLayer)
}
if childTopLayer.Parent != parent.TopLayer && childTopLayer.ID != parent.TopLayer {
continue
}
}
childConfig, err := getImageConfig(ctx, sc, store, child.ID)
if err != nil {
return nil, errors.Wrapf(err, "unable to read configuration from image %q", child.ID)
}
if childConfig == nil {
continue
}
if len(parentConfig.History)+1 != len(childConfig.History) {
continue
}
if historiesDiffer(parentConfig.History, childConfig.History[:len(parentConfig.History)]) {
continue
}
children = append(children, child.ID)
if max > 0 && len(children) >= max {
break
}
}
return children, nil
}
func getFormat(format string) (string, error) {
switch format {
case define.OCI:

View File

@ -1,7 +1,10 @@
package main
import (
"bytes"
"fmt"
"io"
"os"
"strings"
"testing"
)
@ -106,3 +109,37 @@ func TestContainerHeaderOutput(t *testing.T) {
t.Errorf("Error outputting using format string:\n\texpected: %s\n\treceived: %s\n", expectedOutput, output)
}
}
func captureOutputWithError(f func() error) (string, error) {
old := os.Stdout
r, w, err := os.Pipe()
if err != nil {
return "", err
}
os.Stdout = w
if err := f(); err != nil {
return "", err
}
w.Close()
os.Stdout = old
var buf bytes.Buffer
io.Copy(&buf, r) //nolint
return buf.String(), err
}
// Captures output so that it can be compared to expected values
func captureOutput(f func()) string {
old := os.Stdout
r, w, _ := os.Pipe()
os.Stdout = w
f()
w.Close()
os.Stdout = old
var buf bytes.Buffer
io.Copy(&buf, r) //nolint
return buf.String()
}

View File

@ -11,12 +11,9 @@ import (
buildahcli "github.com/containers/buildah/pkg/cli"
"github.com/containers/buildah/pkg/formats"
"github.com/containers/buildah/pkg/parse"
is "github.com/containers/image/v5/storage"
"github.com/containers/image/v5/types"
"github.com/containers/storage"
units "github.com/docker/go-units"
"github.com/containers/common/libimage"
"github.com/docker/go-units"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/spf13/cobra"
)
@ -57,18 +54,6 @@ type imageOptions struct {
history bool
}
type filterParams struct {
dangling string
label string
beforeImage string
sinceImage string
beforeDate time.Time
sinceDate time.Time
referencePattern string
readOnly string
history string
}
type imageResults struct {
imageOptions
filter string
@ -119,8 +104,6 @@ func init() {
}
func imagesCmd(c *cobra.Command, args []string, iopts *imageResults) error {
name := ""
if len(args) > 0 {
if iopts.all {
return errors.Errorf("when using the --all switch, you may not pass any images names or IDs")
@ -129,9 +112,7 @@ func imagesCmd(c *cobra.Command, args []string, iopts *imageResults) error {
if err := buildahcli.VerifyFlagsArgsOrder(args); err != nil {
return err
}
if len(args) == 1 {
name = args[0]
} else {
if len(args) > 1 {
return errors.New("'buildah images' requires at most 1 argument")
}
}
@ -140,15 +121,28 @@ func imagesCmd(c *cobra.Command, args []string, iopts *imageResults) error {
if err != nil {
return err
}
systemContext, err := parse.SystemContextFromOptions(c)
if err != nil {
return errors.Wrapf(err, "error building system context")
}
images, err := store.Images()
runtime, err := libimage.RuntimeFromStore(store, &libimage.RuntimeOptions{SystemContext: systemContext})
if err != nil {
return errors.Wrapf(err, "error reading images")
return err
}
ctx := context.Background()
options := &libimage.ListImagesOptions{}
if iopts.filter != "" {
options.Filters = []string{iopts.filter}
}
if !iopts.all {
options.Filters = append(options.Filters, "intermediate=false")
}
images, err := runtime.ListImages(ctx, args, options)
if err != nil {
return err
}
if iopts.quiet && iopts.format != "" {
@ -165,89 +159,12 @@ func imagesCmd(c *cobra.Command, args []string, iopts *imageResults) error {
quiet: iopts.quiet,
history: iopts.history,
}
ctx := getContext()
var params *filterParams
if iopts.filter != "" {
params, err = parseFilter(ctx, store, images, iopts.filter)
if err != nil {
return err
}
if opts.json {
return formatImagesJSON(images, opts)
}
return outputImages(ctx, systemContext, store, images, params, name, opts)
}
func parseFilter(ctx context.Context, store storage.Store, images []storage.Image, filter string) (*filterParams, error) {
params := new(filterParams)
filterStrings := strings.Split(filter, ",")
for _, param := range filterStrings {
pair := strings.SplitN(param, "=", 2)
if len(pair) < 2 {
return nil, errors.Errorf("invalid filter: %q requires value", filter)
}
switch strings.TrimSpace(pair[0]) {
case "dangling":
if pair[1] == "true" || pair[1] == "false" {
params.dangling = pair[1]
} else {
return nil, errors.Errorf("invalid filter: '%s=[%s]'", pair[0], pair[1])
}
case "label":
params.label = pair[1]
case "before":
beforeDate, err := setFilterDate(ctx, store, images, pair[1])
if err != nil {
return nil, errors.Wrapf(err, "invalid filter: '%s=[%s]'", pair[0], pair[1])
}
params.beforeDate = beforeDate
params.beforeImage = pair[1]
case "since":
sinceDate, err := setFilterDate(ctx, store, images, pair[1])
if err != nil {
return nil, errors.Wrapf(err, "invalid filter: '%s=[%s]'", pair[0], pair[1])
}
params.sinceDate = sinceDate
params.sinceImage = pair[1]
case "reference":
params.referencePattern = pair[1]
case "readonly":
if pair[1] == "true" || pair[1] == "false" {
params.readOnly = pair[1]
} else {
return nil, errors.Errorf("invalid filter: '%s=[%s]'", pair[0], pair[1])
}
default:
return nil, errors.Errorf("invalid filter: '%s'", pair[0])
}
}
return params, nil
}
func setFilterDate(ctx context.Context, store storage.Store, images []storage.Image, imgName string) (time.Time, error) {
for _, image := range images {
for _, name := range image.Names {
if matchesReference(name, imgName) {
// Set the date to this image
ref, err := is.Transport.ParseStoreReference(store, image.ID)
if err != nil {
return time.Time{}, errors.Wrapf(err, "error parsing reference to image %q", image.ID)
}
img, err := ref.NewImage(ctx, nil)
if err != nil {
return time.Time{}, errors.Wrapf(err, "error reading image %q", image.ID)
}
defer img.Close()
inspect, err := img.Inspect(ctx)
if err != nil {
return time.Time{}, errors.Wrapf(err, "error inspecting image %q", image.ID)
}
date := *inspect.Created
return date, nil
}
}
}
return time.Time{}, errors.Errorf("could not locate image %q", imgName)
return formatImages(images, opts)
}
func outputHeader(opts imageOptions) string {
@ -275,104 +192,91 @@ func outputHeader(opts imageOptions) string {
return format
}
type imagesSorted []imageOutputParams
func outputImages(ctx context.Context, systemContext *types.SystemContext, store storage.Store, images []storage.Image, filters *filterParams, argName string, opts imageOptions) error {
found := false
var imagesParams imagesSorted
func formatImagesJSON(images []*libimage.Image, opts imageOptions) error {
jsonImages := []jsonImage{}
for _, image := range images {
if image.ReadOnly {
opts.readOnly = true
}
createdTime := image.Created
inspectedTime, digest, size, _ := getDateAndDigestAndSize(ctx, systemContext, store, image)
if !inspectedTime.IsZero() {
if createdTime != inspectedTime {
logrus.Debugf("image record and configuration disagree on the image's creation time for %q, using the configuration creation time: %s", image.ID, inspectedTime)
createdTime = inspectedTime
}
}
createdTime = createdTime.Local()
// If "all" is false and this image doesn't have a name, check
// to see if the image is the parent of any other image. If it
// is, then it is an intermediate image, so don't list it if
// the --all flag is not set.
if !opts.all && len(image.Names) == 0 {
isParent, err := imageIsParent(ctx, systemContext, store, &image)
// Copy the base data over to the output param.
size, err := image.Size()
if err != nil {
logrus.Errorf("error checking if image is a parent %q: %v", image.ID, err)
return err
}
if isParent {
continue
}
}
imageID := "sha256:" + image.ID
if opts.truncate {
imageID = shortID(image.ID)
}
outer:
for name, tags := range imageReposToMap(image.Names) {
for _, tag := range tags {
if !matchesReference(name+":"+tag, argName) {
continue
}
found = true
if !matchesFilter(ctx, store, image, name+":"+tag, filters) {
continue
}
if opts.json {
created := image.Created()
jsonImages = append(jsonImages,
jsonImage{ID: image.ID,
Names: image.Names,
Digest: digest,
CreatedAtRaw: createdTime,
CreatedAt: units.HumanDuration(time.Since((createdTime))) + " ago",
jsonImage{
CreatedAtRaw: created,
CreatedAt: units.HumanDuration(time.Since(created)) + " ago",
Digest: image.Digest().String(),
ID: truncateID(image.ID(), opts.truncate),
Names: image.Names(),
ReadOnly: image.IsReadOnly(),
Size: formattedSize(size),
ReadOnly: image.ReadOnly,
History: image.NamesHistory,
})
// We only want to print each id once
break outer
}
params := imageOutputParams{
Tag: tag,
ID: imageID,
Name: name,
Digest: digest,
CreatedAtRaw: createdTime,
CreatedAt: units.HumanDuration(time.Since((createdTime))) + " ago",
Size: formattedSize(size),
ReadOnly: image.ReadOnly,
History: formatHistory(image.NamesHistory, name, tag),
}
imagesParams = append(imagesParams, params)
if opts.quiet {
// We only want to print each id once
break outer
}
}
}
}
if !found && argName != "" {
return errors.Errorf("No such image: %s", argName)
}
if opts.json {
data, err := json.MarshalIndent(jsonImages, "", " ")
if err != nil {
return err
}
fmt.Printf("%s\n", data)
return nil
}
type imagesSorted []imageOutputParams
func (a imagesSorted) Less(i, j int) bool {
return a[i].CreatedAtRaw.After(a[j].CreatedAtRaw)
}
func (a imagesSorted) Len() int {
return len(a)
}
func (a imagesSorted) Swap(i, j int) {
a[i], a[j] = a[j], a[i]
}
func formatImages(images []*libimage.Image, opts imageOptions) error {
var outputData imagesSorted
for _, image := range images {
var outputParam imageOutputParams
size, err := image.Size()
if err != nil {
return err
}
created := image.Created()
outputParam.CreatedAtRaw = created
outputParam.CreatedAt = units.HumanDuration(time.Since(created)) + " ago"
outputParam.Digest = image.Digest().String()
outputParam.ID = truncateID(image.ID(), opts.truncate)
outputParam.Size = formattedSize(size)
outputParam.ReadOnly = image.IsReadOnly()
repoTags, err := image.NamedTaggedRepoTags()
if err != nil {
return err
}
imagesParams = sortImagesOutput(imagesParams)
out := formats.StdoutTemplateArray{Output: imagesToGeneric(imagesParams), Template: outputHeader(opts), Fields: imagesHeader}
nameTagPairs, err := libimage.ToNameTagPairs(repoTags)
if err != nil {
return err
}
for _, pair := range nameTagPairs {
newParam := outputParam
newParam.Name = pair.Name
newParam.Tag = pair.Tag
newParam.History = formatHistory(image.NamesHistory(), pair.Name, pair.Tag)
outputData = append(outputData, newParam)
// `images -q` should a given ID only once.
if opts.quiet {
break
}
}
}
sort.Sort(outputData)
out := formats.StdoutTemplateArray{Output: imagesToGeneric(outputData), Template: outputHeader(opts), Fields: imagesHeader}
return formats.Writer(out).Out()
}
@ -390,7 +294,10 @@ func formatHistory(history []string, name, tag string) string {
return strings.Join(history, ", ")
}
func shortID(id string) string {
func truncateID(id string, truncate bool) string {
if !truncate {
return "sha256:" + id
}
idTruncLength := 12
if len(id) > idTruncLength {
return id[:idTruncLength]
@ -398,17 +305,6 @@ func shortID(id string) string {
return id
}
func sortImagesOutput(imagesOutput imagesSorted) imagesSorted {
sort.Sort(imagesOutput)
return imagesOutput
}
func (a imagesSorted) Less(i, j int) bool {
return a[i].CreatedAtRaw.After(a[j].CreatedAtRaw)
}
func (a imagesSorted) Len() int { return len(a) }
func (a imagesSorted) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func imagesToGeneric(templParams []imageOutputParams) (genericParams []interface{}) {
if len(templParams) > 0 {
for _, v := range templParams {
@ -418,103 +314,16 @@ func imagesToGeneric(templParams []imageOutputParams) (genericParams []interface
return genericParams
}
func matchesFilter(ctx context.Context, store storage.Store, image storage.Image, name string, params *filterParams) bool {
if params == nil {
return true
}
if params.dangling != "" && !matchesDangling(name, params.dangling) {
return false
}
if params.label != "" && !matchesLabel(ctx, store, image, params.label) {
return false
}
if params.beforeImage != "" && !matchesBeforeImage(image, params) {
return false
}
if params.sinceImage != "" && !matchesSinceImage(image, params) {
return false
}
if params.referencePattern != "" && !matchesReference(name, params.referencePattern) {
return false
}
if params.readOnly != "" && !matchesReadOnly(image, params.readOnly) {
return false
}
if params.history != "" && !matchesHistory(image, params.history) {
return false
}
return true
}
func formattedSize(size int64) string {
suffixes := [5]string{"B", "KB", "MB", "GB", "TB"}
func matchesDangling(name string, dangling string) bool {
if dangling == "false" && !strings.Contains(name, none) {
return true
count := 0
formattedSize := float64(size)
for formattedSize >= 1000 && count < 4 {
formattedSize /= 1000
count++
}
if dangling == "true" && strings.Contains(name, none) {
return true
}
return false
}
func matchesReadOnly(image storage.Image, readOnly string) bool {
if readOnly == "false" && !image.ReadOnly {
return true
}
if readOnly == "true" && image.ReadOnly {
return true
}
return false
}
func matchesHistory(image storage.Image, readOnly string) bool {
if readOnly == "false" && len(image.NamesHistory) == 0 {
return true
}
if readOnly == "true" && len(image.NamesHistory) > 0 {
return true
}
return false
}
func matchesLabel(ctx context.Context, store storage.Store, image storage.Image, label string) bool {
storeRef, err := is.Transport.ParseStoreReference(store, image.ID)
if err != nil {
return false
}
img, err := storeRef.NewImage(ctx, nil)
if err != nil {
return false
}
defer img.Close()
info, err := img.Inspect(ctx)
if err != nil {
return false
}
pair := strings.SplitN(label, "=", 2)
for key, value := range info.Labels {
if key == pair[0] {
if len(pair) == 2 {
if value == pair[1] {
return true
}
} else {
return false
}
}
}
return false
}
// Returns true if the image was created since the filter image. Returns
// false otherwise
func matchesBeforeImage(image storage.Image, params *filterParams) bool {
return image.Created.IsZero() || image.Created.Before(params.beforeDate)
}
// Returns true if the image was created since the filter image. Returns
// false otherwise
func matchesSinceImage(image storage.Image, params *filterParams) bool {
return image.Created.IsZero() || image.Created.After(params.sinceDate)
return fmt.Sprintf("%.3g %s", formattedSize, suffixes[count])
}
func matchesID(imageID, argID string) bool {
@ -533,44 +342,3 @@ func matchesReference(name, argName string) bool {
}
return strings.HasSuffix(splitName[0], argName)
}
/*
According to https://en.wikipedia.org/wiki/Binary_prefix
We should be return numbers based on 1000, rather then 1024
*/
func formattedSize(size int64) string {
suffixes := [5]string{"B", "KB", "MB", "GB", "TB"}
count := 0
formattedSize := float64(size)
for formattedSize >= 1000 && count < 4 {
formattedSize /= 1000
count++
}
return fmt.Sprintf("%.3g %s", formattedSize, suffixes[count])
}
// reposToMap parses the specified repotags and returns a map with repositories
// as keys and the corresponding arrays of tags as values.
func imageReposToMap(repotags []string) map[string][]string {
// map format is repo -> tag
repos := make(map[string][]string)
for _, repo := range repotags {
var repository, tag string
if strings.Contains(repo, ":") {
li := strings.LastIndex(repo, ":")
repository = repo[0:li]
tag = repo[li+1:]
} else if len(repo) > 0 {
repository = repo
tag = none
} else {
logrus.Warnf("Found image with empty name")
}
repos[repository] = append(repos[repository], tag)
}
if len(repos) == 0 {
repos[none] = []string{none}
}
return repos
}

View File

@ -1,17 +1,7 @@
package main
import (
"bytes"
"fmt"
"io"
"os"
"strings"
"testing"
"time"
"github.com/containers/buildah/util"
is "github.com/containers/image/v5/storage"
"github.com/containers/storage"
)
func TestSizeFormatting(t *testing.T) {
@ -78,445 +68,3 @@ func TestNoMatchesReferenceWithoutTag(t *testing.T) {
t.Error("expected no match, got match")
}
}
func TestOutputImagesQuietNotTruncated(t *testing.T) {
// Make sure the tests are running as root
failTestIfNotRoot(t)
opts := imageOptions{
quiet: true,
}
store, err := storage.GetStore(storeOptions)
if err != nil {
t.Fatal(err)
} else if store != nil {
is.Transport.SetStore(store)
}
// Pull an image so that we know we have at least one
pullTestImage(t)
images, err := store.Images()
if err != nil {
t.Fatalf("Error reading images: %v", err)
}
// Tests quiet and non-truncated output
output, err := captureOutputWithError(func() error {
return outputImages(getContext(), &testSystemContext, store, images[:1], nil, "", opts)
})
expectedOutput := fmt.Sprintf("sha256:%s\n", images[0].ID)
if err != nil {
t.Error("quiet/non-truncated output produces error")
} else if strings.TrimSpace(output) != strings.TrimSpace(expectedOutput) {
t.Errorf("quiet/non-truncated output does not match expected value\nExpected: %s\nReceived: %s\n", expectedOutput, output)
}
}
func TestOutputImagesFormatString(t *testing.T) {
// Make sure the tests are running as root
failTestIfNotRoot(t)
opts := imageOptions{
format: "{{.ID}}",
truncate: true,
}
store, err := storage.GetStore(storeOptions)
if err != nil {
t.Fatal(err)
} else if store != nil {
is.Transport.SetStore(store)
}
// Pull an image so that we know we have at least one
pullTestImage(t)
images, err := store.Images()
if err != nil {
t.Fatalf("Error reading images: %v", err)
}
// Tests output with format template
output, err := captureOutputWithError(func() error {
return outputImages(getContext(), &testSystemContext, store, images[:1], nil, "", opts)
})
expectedOutput := images[0].ID
if err != nil {
t.Error("format string output produces error")
} else if !strings.Contains(expectedOutput, strings.TrimSpace(output)) {
t.Errorf("format string output does not match expected value\nExpected: %s\nReceived: %s\n", expectedOutput, output)
}
}
func TestOutputImagesArgNoMatch(t *testing.T) {
// Make sure the tests are running as root
failTestIfNotRoot(t)
opts := imageOptions{
truncate: true,
}
store, err := storage.GetStore(storeOptions)
if err != nil {
t.Fatal(err)
} else if store != nil {
is.Transport.SetStore(store)
}
// Pull an image so that we know we have at least one
pullTestImage(t)
images, err := store.Images()
if err != nil {
t.Fatalf("Error reading images: %v", err)
}
// Tests output with an arg name that does not match. Args ending in ":" cannot match
// because all images in the repository must have a tag, and here the tag is an
// empty string
_, err = captureOutputWithError(func() error {
return outputImages(getContext(), &testSystemContext, store, images[:1], nil, "foo:", opts)
})
if err == nil || err.Error() != "No such image: foo:" {
t.Fatalf("expected error arg no match")
}
}
func TestParseFilterAllParams(t *testing.T) {
// Make sure the tests are running as root
failTestIfNotRoot(t)
store, err := storage.GetStore(storeOptions)
if err != nil {
t.Fatal(err)
} else if store != nil {
is.Transport.SetStore(store)
}
images, err := store.Images()
if err != nil {
t.Fatalf("Error reading images: %v", err)
}
// Pull an image so we know we have it
pullTestImage(t)
label := "dangling=true,label=a=b,before=busybox:latest,since=busybox:latest,reference=abcdef"
params, err := parseFilter(getContext(), store, images, label)
if err != nil {
t.Fatalf("error parsing filter: %v", err)
}
ref, _, err := util.FindImage(store, "", &testSystemContext, "busybox:latest")
if err != nil {
t.Fatalf("error finding local copy of image: %v", err)
}
img, err := ref.NewImage(getContext(), nil)
if err != nil {
t.Fatalf("error reading image from store: %v", err)
}
defer img.Close()
inspect, err := img.Inspect(getContext())
if err != nil {
t.Fatalf("error inspecting image in store: %v", err)
}
expectedParams := &filterParams{
dangling: "true",
label: "a=b",
beforeImage: "busybox:latest",
beforeDate: *inspect.Created,
sinceImage: "busybox:latest",
sinceDate: *inspect.Created,
referencePattern: "abcdef",
}
if *params != *expectedParams {
t.Errorf("filter did not return expected result\n\tExpected: %v\n\tReceived: %v", expectedParams, params)
}
}
func TestParseFilterInvalidDangling(t *testing.T) {
// Make sure the tests are running as root
failTestIfNotRoot(t)
store, err := storage.GetStore(storeOptions)
if err != nil {
t.Fatal(err)
} else if store != nil {
is.Transport.SetStore(store)
}
images, err := store.Images()
if err != nil {
t.Fatalf("Error reading images: %v", err)
}
// Pull an image so we know we have it
pullTestImage(t)
label := "dangling=NO,label=a=b,before=busybox:latest,since=busybox:latest,reference=abcdef"
_, err = parseFilter(getContext(), store, images, label)
if err == nil || err.Error() != "invalid filter: 'dangling=[NO]'" {
t.Fatalf("expected error parsing filter")
}
}
func TestParseFilterInvalidBefore(t *testing.T) {
// Make sure the tests are running as root
failTestIfNotRoot(t)
store, err := storage.GetStore(storeOptions)
if err != nil {
t.Fatal(err)
} else if store != nil {
is.Transport.SetStore(store)
}
images, err := store.Images()
if err != nil {
t.Fatalf("Error reading images: %v", err)
}
// Pull an image so we know we have it
pullTestImage(t)
label := "dangling=false,label=a=b,before=:,since=busybox:latest,reference=abcdef"
_, err = parseFilter(getContext(), store, images, label)
if err == nil || !strings.Contains(err.Error(), "invalid filter") {
t.Fatalf("expected error parsing filter")
}
}
func TestParseFilterInvalidSince(t *testing.T) {
// Make sure the tests are running as root
failTestIfNotRoot(t)
store, err := storage.GetStore(storeOptions)
if err != nil {
t.Fatal(err)
} else if store != nil {
is.Transport.SetStore(store)
}
images, err := store.Images()
if err != nil {
t.Fatalf("Error reading images: %v", err)
}
// Pull an image so we know we have it
pullTestImage(t)
label := "dangling=false,label=a=b,before=busybox:latest,since=:,reference=abcdef"
_, err = parseFilter(getContext(), store, images, label)
if err == nil || !strings.Contains(err.Error(), "invalid filter") {
t.Fatalf("expected error parsing filter")
}
}
func TestParseFilterInvalidFilter(t *testing.T) {
// Make sure the tests are running as root
failTestIfNotRoot(t)
store, err := storage.GetStore(storeOptions)
if err != nil {
t.Fatal(err)
} else if store != nil {
is.Transport.SetStore(store)
}
images, err := store.Images()
if err != nil {
t.Fatalf("Error reading images: %v", err)
}
// Pull an image so we know we have it
pullTestImage(t)
label := "foo=bar"
_, err = parseFilter(getContext(), store, images, label)
if err == nil || err.Error() != "invalid filter: 'foo'" {
t.Fatalf("expected error parsing filter")
}
}
func TestMatchesDanglingTrue(t *testing.T) {
if !matchesDangling("<none>", "true") {
t.Error("matchesDangling() should return true with dangling=true and name=<none>")
}
if !matchesDangling("hello", "false") {
t.Error("matchesDangling() should return true with dangling=false and name='hello'")
}
}
func TestMatchesDanglingFalse(t *testing.T) {
if matchesDangling("hello", "true") {
t.Error("matchesDangling() should return false with dangling=true and name=hello")
}
if matchesDangling("<none>", "false") {
t.Error("matchesDangling() should return false with dangling=false and name=<none>")
}
}
func TestMatchesLabelTrue(t *testing.T) {
//TODO: How do I implement this?
}
func TestMatchesLabelFalse(t *testing.T) {
// TODO: How do I implement this?
}
func TestMatchesBeforeImageTrue(t *testing.T) {
// Make sure the tests are running as root
failTestIfNotRoot(t)
store, err := storage.GetStore(storeOptions)
if err != nil {
t.Fatal(err)
} else if store != nil {
is.Transport.SetStore(store)
}
// Pull an image so that we know we have at least one
pullTestImage(t)
images, err := store.Images()
if err != nil {
t.Fatalf("Error reading images: %v", err)
}
// by default, params.seenImage is false
params := new(filterParams)
params.beforeDate = time.Now()
params.beforeImage = "foo:bar"
if !matchesBeforeImage(images[0], params) {
t.Error("should have matched beforeImage")
}
}
func TestMatchesBeforeImageFalse(t *testing.T) {
// Make sure the tests are running as root
failTestIfNotRoot(t)
store, err := storage.GetStore(storeOptions)
if err != nil {
t.Fatal(err)
} else if store != nil {
is.Transport.SetStore(store)
}
// Pull an image so that we know we have at least one
pullTestImage(t)
images, err := store.Images()
if err != nil {
t.Fatalf("Error reading images: %v", err)
}
// by default, params.seenImage is false
params := new(filterParams)
params.beforeDate = time.Time{}
params.beforeImage = "foo:bar"
// Should return false because the image has been seen
if matchesBeforeImage(images[0], params) {
t.Error("should not have matched beforeImage")
}
}
func TestMatchesSinceImageTrue(t *testing.T) {
// Make sure the tests are running as root
failTestIfNotRoot(t)
store, err := storage.GetStore(storeOptions)
if err != nil {
t.Fatal(err)
} else if store != nil {
is.Transport.SetStore(store)
}
// Pull an image so that we know we have at least one
pullTestImage(t)
images, err := store.Images()
if err != nil {
t.Fatalf("Error reading images: %v", err)
}
// by default, params.seenImage is false
params := new(filterParams)
params.sinceDate = time.Time{}
params.sinceImage = "foo:bar"
if !matchesSinceImage(images[0], params) {
t.Error("should have matched SinceImage")
}
}
func TestMatchesSinceImageFalse(t *testing.T) {
// Make sure the tests are running as root
failTestIfNotRoot(t)
store, err := storage.GetStore(storeOptions)
if err != nil {
t.Fatal(err)
} else if store != nil {
is.Transport.SetStore(store)
}
// Pull an image so that we know we have at least one
pullTestImage(t)
images, err := store.Images()
if err != nil {
t.Fatalf("Error reading images: %v", err)
}
// by default, params.seenImage is false
params := new(filterParams)
params.sinceDate = time.Now()
params.sinceImage = "foo:bar"
// Should return false because the image has been seen
if matchesSinceImage(images[0], params) {
t.Error("should not have matched sinceImage")
}
}
func captureOutputWithError(f func() error) (string, error) {
old := os.Stdout
r, w, err := os.Pipe()
if err != nil {
return "", err
}
os.Stdout = w
if err := f(); err != nil {
return "", err
}
w.Close()
os.Stdout = old
var buf bytes.Buffer
io.Copy(&buf, r) //nolint
return buf.String(), err
}
// Captures output so that it can be compared to expected values
func captureOutput(f func()) string {
old := os.Stdout
r, w, _ := os.Pipe()
os.Stdout = w
f()
w.Close()
os.Stdout = old
var buf bytes.Buffer
io.Copy(&buf, r) //nolint
return buf.String()
}
func TestFormatImages(t *testing.T) {
for i, test := range []struct {
history []string
name, tag string
expectation string
}{
{history: nil, name: "", tag: "", expectation: none},
{history: []string{"image:tag"}, name: "", tag: "", expectation: "image:tag"},
{history: []string{"image:tag"}, name: "image", tag: "tag", expectation: none},
{history: []string{"image1:tag", "image2:tag"}, name: "image1", tag: "tag", expectation: "image2:tag"},
{history: []string{"image1:tag", "image2:tag"}, name: "image3", tag: "tag", expectation: "image1:tag, image2:tag"},
} {
result := formatHistory(test.history, test.name, test.tag)
if result != test.expectation {
t.Errorf(
"error in formatHistory [%d]: expected '%s' got '%s'",
i, test.expectation, result,
)
}
}
}

View File

@ -9,10 +9,10 @@ import (
"os"
"strings"
"github.com/containers/buildah/manifests"
"github.com/containers/buildah/pkg/cli"
"github.com/containers/buildah/pkg/parse"
"github.com/containers/buildah/util"
"github.com/containers/common/libimage/manifests"
"github.com/containers/common/pkg/auth"
cp "github.com/containers/image/v5/copy"
"github.com/containers/image/v5/manifest"
@ -225,7 +225,7 @@ func manifestCreateCmd(c *cobra.Command, args []string, opts manifestCreateOpts)
list := manifests.Create()
names, err := util.ExpandNames([]string{listImageSpec}, "", systemContext, store)
names, err := util.ExpandNames([]string{listImageSpec}, systemContext, store)
if err != nil {
return errors.Wrapf(err, "error encountered while expanding image name %q", listImageSpec)
}
@ -235,7 +235,7 @@ func manifestCreateCmd(c *cobra.Command, args []string, opts manifestCreateOpts)
if err != nil {
if ref, err = alltransports.ParseImageName(util.DefaultTransport + imageSpec); err != nil {
// check if the local image exists
if ref, _, err = util.FindImage(store, "", systemContext, imageSpec); err != nil {
if ref, _, err = util.FindImage(store, systemContext, imageSpec); err != nil {
return err
}
}
@ -285,7 +285,7 @@ func manifestAddCmd(c *cobra.Command, args []string, opts manifestAddOpts) error
return errors.Wrapf(err, "error building system context")
}
_, listImage, err := util.FindImage(store, "", systemContext, listImageSpec)
_, listImage, err := util.FindImage(store, systemContext, listImageSpec)
if err != nil {
return err
}
@ -294,7 +294,7 @@ func manifestAddCmd(c *cobra.Command, args []string, opts manifestAddOpts) error
if err != nil {
if ref, err = alltransports.ParseImageName(util.DefaultTransport + imageSpec); err != nil {
// check if the local image exists
if ref, _, err = util.FindImage(store, "", systemContext, imageSpec); err != nil {
if ref, _, err = util.FindImage(store, systemContext, imageSpec); err != nil {
return err
}
}
@ -309,7 +309,7 @@ func manifestAddCmd(c *cobra.Command, args []string, opts manifestAddOpts) error
if err != nil {
var storeErr error
// check if the local image exists
if ref, _, storeErr = util.FindImage(store, "", systemContext, imageSpec); storeErr != nil {
if ref, _, storeErr = util.FindImage(store, systemContext, imageSpec); storeErr != nil {
return err
}
digest, storeErr = list.Add(getContext(), systemContext, ref, opts.all)
@ -404,7 +404,7 @@ func manifestRemoveCmd(c *cobra.Command, args []string, opts manifestRemoveOpts)
return errors.Wrapf(err, "error building system context")
}
_, listImage, err := util.FindImage(store, "", systemContext, listImageSpec)
_, listImage, err := util.FindImage(store, systemContext, listImageSpec)
if err != nil {
return err
}
@ -461,7 +461,7 @@ func manifestAnnotateCmd(c *cobra.Command, args []string, opts manifestAnnotateO
return errors.Wrapf(err, "error building system context")
}
_, listImage, err := util.FindImage(store, "", systemContext, listImageSpec)
_, listImage, err := util.FindImage(store, systemContext, listImageSpec)
if err != nil {
return err
}
@ -474,7 +474,7 @@ func manifestAnnotateCmd(c *cobra.Command, args []string, opts manifestAnnotateO
digest, err := digest.Parse(imageSpec)
if err != nil {
ctx := getContext()
ref, _, err := util.FindImage(store, "", systemContext, imageSpec)
ref, _, err := util.FindImage(store, systemContext, imageSpec)
if err != nil {
return err
}
@ -573,12 +573,15 @@ func manifestInspectCmd(c *cobra.Command, args []string, opts manifestInspectOpt
}
func manifestInspect(ctx context.Context, store storage.Store, systemContext *types.SystemContext, imageSpec string) error {
// TODO: at some point `libimage` should support resolving manifests
// like that. Similar to `libimage.Runtime.LookupImage` we could
// implement a `*.LookupImageIndex`.
refs, err := util.ResolveNameToReferences(store, systemContext, imageSpec)
if err != nil {
logrus.Debugf("error parsing reference to image %q: %v", imageSpec, err)
}
if ref, _, err := util.FindImage(store, "", systemContext, imageSpec); err == nil {
if ref, _, err := util.FindImage(store, systemContext, imageSpec); err == nil {
refs = append(refs, ref)
} else if ref, err := alltransports.ParseImageName(imageSpec); err == nil {
refs = append(refs, ref)
@ -676,7 +679,7 @@ func manifestPushCmd(c *cobra.Command, args []string, opts pushOptions) error {
}
func manifestPush(systemContext *types.SystemContext, store storage.Store, listImageSpec, destSpec string, opts pushOptions) error {
_, listImage, err := util.FindImage(store, "", systemContext, listImageSpec)
_, listImage, err := util.FindImage(store, systemContext, listImageSpec)
if err != nil {
return err
}

View File

@ -1,31 +1,32 @@
package main
import (
"context"
"fmt"
"os"
"runtime"
"github.com/containers/buildah"
"github.com/containers/buildah/define"
"github.com/containers/buildah/pkg/blobcache"
buildahcli "github.com/containers/buildah/pkg/cli"
"github.com/containers/buildah/pkg/parse"
"github.com/containers/common/libimage"
libimageTypes "github.com/containers/common/libimage/types"
"github.com/containers/common/pkg/auth"
"github.com/containers/image/v5/types"
"github.com/pkg/errors"
"github.com/spf13/cobra"
)
type pullOptions struct {
allTags bool
authfile string
// We can feed many flags directly to the options of libmage.
libimage.PullOptions
// Other flags need some massaging and validation.
blobCache string
certDir string
creds string
signaturePolicy string
quiet bool
removeSignatures bool
tlsVerify bool
decryptionKeys []string
pullPolicy string
decryptionKeys []string
tlsVerify bool
quiet bool
}
func init() {
@ -42,7 +43,7 @@ func init() {
Short: "Pull an image from the specified location",
Long: pullDescription,
RunE: func(cmd *cobra.Command, args []string) error {
return pullCmd(cmd, args, opts)
return pullCmd(cmd, args, &opts)
},
Example: `buildah pull imagename
buildah pull docker-daemon:imagename:imagetag
@ -52,22 +53,22 @@ func init() {
flags := pullCommand.Flags()
flags.SetInterspersed(false)
flags.BoolVarP(&opts.allTags, "all-tags", "a", false, "download all tagged images in the repository")
flags.StringVar(&opts.authfile, "authfile", auth.GetDefaultAuthFile(), "path of the authentication file. Use REGISTRY_AUTH_FILE environment variable to override")
flags.BoolVarP(&opts.AllTags, "all-tags", "a", false, "download all tagged images in the repository")
flags.StringVar(&opts.AuthFilePath, "authfile", auth.GetDefaultAuthFile(), "path of the authentication file. Use REGISTRY_AUTH_FILE environment variable to override")
flags.StringVar(&opts.blobCache, "blob-cache", "", "store copies of pulled image blobs in the specified directory")
flags.StringVar(&opts.certDir, "cert-dir", "", "use certificates at the specified path to access the registry")
flags.StringVar(&opts.creds, "creds", "", "use `[username[:password]]` for accessing the registry")
flags.StringVar(&opts.CertDirPath, "cert-dir", "", "use certificates at the specified path to access the registry")
flags.StringVar(&opts.Credentials, "creds", "", "use `[username[:password]]` for accessing the registry")
flags.StringVar(&opts.pullPolicy, "policy", "missing", "missing, always, or never.")
flags.BoolVarP(&opts.removeSignatures, "remove-signatures", "", false, "don't copy signatures when pulling image")
flags.StringVar(&opts.signaturePolicy, "signature-policy", "", "`pathname` of signature policy file (not usually used)")
flags.BoolVarP(&opts.RemoveSignatures, "remove-signatures", "", false, "don't copy signatures when pulling image")
flags.StringVar(&opts.SignaturePolicyPath, "signature-policy", "", "`pathname` of signature policy file (not usually used)")
flags.StringSliceVar(&opts.decryptionKeys, "decryption-key", nil, "key needed to decrypt the image")
if err := flags.MarkHidden("signature-policy"); err != nil {
panic(fmt.Sprintf("error marking signature-policy as hidden: %v", err))
}
flags.BoolVarP(&opts.quiet, "quiet", "q", false, "don't output progress information when pulling images")
flags.String("os", runtime.GOOS, "prefer `OS` instead of the running OS for choosing images")
flags.String("arch", runtime.GOARCH, "prefer `ARCH` instead of the architecture of the machine for choosing images")
flags.String("variant", "", "override the `variant` of the specified image")
flags.StringVar(&opts.OS, "os", runtime.GOOS, "prefer `OS` instead of the running OS for choosing images")
flags.StringVar(&opts.Architecture, "arch", runtime.GOARCH, "prefer `ARCH` instead of the architecture of the machine for choosing images")
flags.StringVar(&opts.Variant, "variant", "", "override the `variant` of the specified image")
flags.BoolVar(&opts.tlsVerify, "tls-verify", true, "require HTTPS and verify certificates when accessing the registry. TLS verification cannot be used when talking to an insecure registry.")
if err := flags.MarkHidden("blob-cache"); err != nil {
panic(fmt.Sprintf("error marking blob-cache as hidden: %v", err))
@ -76,7 +77,8 @@ func init() {
rootCmd.AddCommand(pullCommand)
}
func pullCmd(c *cobra.Command, args []string, iopts pullOptions) error {
func pullCmd(c *cobra.Command, args []string, options *pullOptions) error {
var err error
if len(args) == 0 {
return errors.Errorf("an image name must be specified")
}
@ -86,7 +88,27 @@ func pullCmd(c *cobra.Command, args []string, iopts pullOptions) error {
if len(args) > 1 {
return errors.Errorf("too many arguments specified")
}
if err := auth.CheckAuthFile(iopts.authfile); err != nil {
if err := auth.CheckAuthFile(options.AuthFilePath); err != nil {
return err
}
options.OciDecryptConfig, err = getDecryptConfig(options.decryptionKeys)
if err != nil {
return errors.Wrapf(err, "unable to obtain decrypt config")
}
options.Writer = os.Stderr
if options.quiet {
options.Writer = nil
}
if options.blobCache != "" {
// options.SourceLookupReferenceFunc = blobcache.CacheLookupReferenceFunc(options.blobCache, types.PreserveOriginal)
options.DestinationLookupReferenceFunc = blobcache.CacheLookupReferenceFunc(options.blobCache, types.PreserveOriginal)
}
pullPolicy, err := libimageTypes.ParsePullPolicy(options.pullPolicy)
if err != nil {
return err
}
@ -100,37 +122,19 @@ func pullCmd(c *cobra.Command, args []string, iopts pullOptions) error {
return err
}
decConfig, err := getDecryptConfig(iopts.decryptionKeys)
if err != nil {
return errors.Wrapf(err, "unable to obtain decrypt config")
}
policy, ok := define.PolicyMap[iopts.pullPolicy]
if !ok {
return fmt.Errorf("unrecognized pull policy %s", iopts.pullPolicy)
}
options := buildah.PullOptions{
SignaturePolicyPath: iopts.signaturePolicy,
Store: store,
SystemContext: systemContext,
BlobDirectory: iopts.blobCache,
AllTags: iopts.allTags,
ReportWriter: os.Stderr,
RemoveSignatures: iopts.removeSignatures,
MaxRetries: maxPullPushRetries,
RetryDelay: pullPushRetryDelay,
OciDecryptConfig: decConfig,
PullPolicy: policy,
}
if iopts.quiet {
options.ReportWriter = nil // Turns off logging output
}
id, err := buildah.Pull(getContext(), args[0], options)
runtime, err := libimage.RuntimeFromStore(store, &libimage.RuntimeOptions{SystemContext: systemContext})
if err != nil {
return err
}
fmt.Printf("%s\n", id)
pulledImages, err := runtime.Pull(context.Background(), args[0], pullPolicy, &options.PullOptions)
if err != nil {
return err
}
for _, pulledImage := range pulledImages {
fmt.Printf("%s\n", pulledImage.ID())
}
return nil
}

View File

@ -1,20 +1,20 @@
package main
import (
"context"
"fmt"
"io/ioutil"
"os"
"strings"
"github.com/containers/buildah"
"github.com/containers/buildah/define"
"github.com/containers/buildah/pkg/blobcache"
buildahcli "github.com/containers/buildah/pkg/cli"
"github.com/containers/buildah/pkg/parse"
"github.com/containers/buildah/util"
"github.com/containers/common/libimage"
"github.com/containers/common/pkg/auth"
"github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/transports"
"github.com/containers/image/v5/transports/alltransports"
"github.com/containers/image/v5/types"
"github.com/containers/storage"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
@ -41,6 +41,55 @@ type pushOptions struct {
encryptLayers []int
}
// translates the pushOptions into libimage.PushOptions.
func (iopts *pushOptions) toLibimagePushOptions() (*libimage.PushOptions, error) {
pushOptions := &libimage.PushOptions{}
pushOptions.PolicyAllowStorage = true
pushOptions.AuthFilePath = iopts.authfile
pushOptions.CertDirPath = iopts.certDir
pushOptions.Credentials = iopts.creds
pushOptions.RemoveSignatures = iopts.removeSignatures
pushOptions.SignaturePolicyPath = iopts.signaturePolicy
pushOptions.SignBy = iopts.signBy
if iopts.blobCache != "" {
compress := types.Compress
if iopts.disableCompression {
compress = types.PreserveOriginal
}
pushOptions.SourceLookupReferenceFunc = blobcache.CacheLookupReferenceFunc(iopts.blobCache, compress)
}
var manifestType string
if iopts.format != "" {
switch iopts.format {
case "oci":
manifestType = imgspecv1.MediaTypeImageManifest
case "v2s1":
manifestType = manifest.DockerV2Schema1SignedMediaType
case "v2s2", "docker":
manifestType = manifest.DockerV2Schema2MediaType
default:
return nil, errors.Errorf("unknown format %q. Choose on of the supported formats: 'oci', 'v2s1', or 'v2s2'", iopts.format)
}
}
pushOptions.ManifestMIMEType = manifestType
encConfig, encLayers, err := getEncryptConfig(iopts.encryptionKeys, iopts.encryptLayers)
if err != nil {
return nil, errors.Wrapf(err, "unable to obtain encryption config")
}
pushOptions.OciEncryptConfig = encConfig
pushOptions.OciEncryptLayers = encLayers
pushOptions.InsecureSkipTLSVerify = types.NewOptionalBool(!iopts.tlsVerify)
if !iopts.quiet {
pushOptions.Writer = os.Stderr
}
return pushOptions, nil
}
func init() {
var (
opts pushOptions
@ -125,108 +174,52 @@ func pushCmd(c *cobra.Command, args []string, iopts pushOptions) error {
return errors.New("Only two arguments are necessary to push: source and destination")
}
compress := define.Gzip
if iopts.disableCompression {
compress = define.Uncompressed
}
store, err := getStore(c)
if err != nil {
return err
}
dest, err := alltransports.ParseImageName(destSpec)
// add the docker:// transport to see if they neglected it.
if err != nil {
destTransport := strings.Split(destSpec, ":")[0]
if t := transports.Get(destTransport); t != nil {
return err
}
if strings.Contains(destSpec, "://") {
return err
}
destSpec = "docker://" + destSpec
dest2, err2 := alltransports.ParseImageName(destSpec)
if err2 != nil {
return err
}
dest = dest2
logrus.Debugf("Assuming docker:// as the transport method for DESTINATION: %s", destSpec)
}
systemContext, err := parse.SystemContextFromOptions(c)
if err != nil {
return errors.Wrapf(err, "error building system context")
}
var manifestType string
if iopts.format != "" {
switch iopts.format {
case "oci":
manifestType = imgspecv1.MediaTypeImageManifest
case "v2s1":
manifestType = manifest.DockerV2Schema1SignedMediaType
case "v2s2", "docker":
manifestType = manifest.DockerV2Schema2MediaType
default:
return errors.Errorf("unknown format %q. Choose on of the supported formats: 'oci', 'v2s1', or 'v2s2'", iopts.format)
}
}
encConfig, encLayers, err := getEncryptConfig(iopts.encryptionKeys, iopts.encryptLayers)
runtime, err := libimage.RuntimeFromStore(store, &libimage.RuntimeOptions{SystemContext: systemContext})
if err != nil {
return errors.Wrapf(err, "unable to obtain encryption config")
return err
}
options := buildah.PushOptions{
Compression: compress,
ManifestType: manifestType,
SignaturePolicyPath: iopts.signaturePolicy,
Store: store,
SystemContext: systemContext,
BlobDirectory: iopts.blobCache,
RemoveSignatures: iopts.removeSignatures,
SignBy: iopts.signBy,
MaxRetries: maxPullPushRetries,
RetryDelay: pullPushRetryDelay,
OciEncryptConfig: encConfig,
OciEncryptLayers: encLayers,
}
if !iopts.quiet {
options.ReportWriter = os.Stderr
}
ref, digest, err := buildah.Push(getContext(), src, dest, options)
pushOptions, err := iopts.toLibimagePushOptions()
if err != nil {
if errors.Cause(err) != storage.ErrImageUnknown {
return err
}
pushedManifestBytes, pushError := runtime.Push(context.Background(), src, destSpec, pushOptions)
if pushError != nil {
// TODO: maybe we find a way to handle that transparently in libimage?
if errors.Cause(pushError) != storage.ErrImageUnknown {
// Image might be a manifest so attempt a manifest push
if manifestsErr := manifestPush(systemContext, store, src, destSpec, iopts); manifestsErr == nil {
return nil
}
}
return util.GetFailureCause(err, errors.Wrapf(err, "error pushing image %q to %q", src, destSpec))
return pushError
}
if ref != nil {
logrus.Debugf("pushed image %q with digest %s", ref, digest.String())
} else {
logrus.Debugf("pushed image with digest %s", digest.String())
}
logrus.Debugf("Successfully pushed %s with digest %s", transports.ImageName(dest), digest.String())
if iopts.digestfile != "" {
if err = ioutil.WriteFile(iopts.digestfile, []byte(digest.String()), 0644); err != nil {
return util.GetFailureCause(err, errors.Wrapf(err, "failed to write digest to file %q", iopts.digestfile))
manifestDigest, err := manifest.Digest(pushedManifestBytes)
if err != nil {
return err
}
if err := ioutil.WriteFile(iopts.digestfile, []byte(manifestDigest.String()), 0644); err != nil {
return err
}
}
return nil
}
// getListOfTransports gets the transports supported from the image library
// and strips of the "tarball" transport from the string of transports returned
func getListOfTransports() string {
allTransports := strings.Join(transports.ListNames(), ",")
return strings.Replace(allTransports, ",tarball", "", 1)

View File

@ -3,23 +3,15 @@ package main
import (
"context"
"fmt"
"os"
"syscall"
buildahcli "github.com/containers/buildah/pkg/cli"
"github.com/containers/buildah/pkg/parse"
"github.com/containers/buildah/util"
is "github.com/containers/image/v5/storage"
"github.com/containers/image/v5/transports"
"github.com/containers/image/v5/transports/alltransports"
"github.com/containers/image/v5/types"
"github.com/containers/storage"
"github.com/containers/common/libimage"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/spf13/cobra"
)
type rmiResults struct {
type rmiOptions struct {
all bool
prune bool
force bool
@ -28,7 +20,7 @@ type rmiResults struct {
func init() {
var (
rmiDescription = "\n Removes one or more locally stored images."
opts rmiResults
opts rmiOptions
)
rmiCommand := &cobra.Command{
Use: "rmi",
@ -53,7 +45,7 @@ func init() {
rootCmd.AddCommand(rmiCommand)
}
func rmiCmd(c *cobra.Command, args []string, iopts rmiResults) error {
func rmiCmd(c *cobra.Command, args []string, iopts rmiOptions) error {
if len(args) == 0 && !iopts.all && !iopts.prune {
return errors.Errorf("image name or ID must be specified")
}
@ -76,330 +68,31 @@ func rmiCmd(c *cobra.Command, args []string, iopts rmiResults) error {
return err
}
imagesToDelete := args[:]
if iopts.all {
imagesToDelete, err = findAllRWImages(store)
if err != nil {
return err
}
}
if iopts.prune {
imagesToDelete, err = findDanglingImages(store)
if err != nil {
return err
}
}
ctx := getContext()
systemContext, err := parse.SystemContextFromOptions(c)
if err != nil {
return errors.Wrapf(err, "error building system context")
return err
}
return deleteImages(ctx, systemContext, store, imagesToDelete, iopts.all, iopts.force, iopts.prune)
}
func deleteImages(ctx context.Context, systemContext *types.SystemContext, store storage.Store, imagesToDelete []string, removeAll, force, prune bool) error {
var lastError error
for _, id := range imagesToDelete {
image, err := getImage(ctx, systemContext, store, id)
if err != nil || image == nil {
if lastError != nil {
fmt.Fprintln(os.Stderr, lastError)
}
if err == nil {
err = storage.ErrNotAnImage
}
lastError = errors.Wrapf(err, "could not get image %q", id)
continue
}
if image.ReadOnly {
if lastError != nil {
fmt.Fprintln(os.Stderr, lastError)
}
lastError = errors.Wrapf(syscall.EINVAL, "can not remove readonly image %q", id)
continue
}
ctrIDs, err := runningContainers(store, image)
runtime, err := libimage.RuntimeFromStore(store, &libimage.RuntimeOptions{SystemContext: systemContext})
if err != nil {
if lastError != nil {
fmt.Fprintln(os.Stderr, lastError)
}
lastError = errors.Wrapf(err, "error getting running containers for image %q", id)
continue
}
if len(ctrIDs) > 0 && len(image.Names) <= 1 {
if force {
err = removeContainers(ctrIDs, store)
if err != nil {
if lastError != nil {
fmt.Fprintln(os.Stderr, lastError)
}
lastError = errors.Wrapf(err, "error removing containers %v for image %q", ctrIDs, id)
continue
}
} else {
for _, ctrID := range ctrIDs {
if lastError != nil {
fmt.Fprintln(os.Stderr, lastError)
}
lastError = errors.Wrapf(storage.ErrImageUsedByContainer, "Could not remove image %q (must force) - container %q is using its reference image", id, ctrID)
}
continue
}
}
// If the user supplied an ID, we cannot delete the image if it is referred to by multiple tags
if matchesID(image.ID, id) {
if len(image.Names) > 1 && !force {
if lastError != nil {
fmt.Fprintln(os.Stderr, lastError)
}
lastError = errors.Errorf("unable to delete %s (must force) - image is referred to in multiple tags", image.ID)
continue
}
// If it is forced, we have to untag the image so that it can be deleted
image.Names = image.Names[:0]
} else {
name, err2 := untagImage(id, store, image)
if err2 != nil {
if lastError != nil {
fmt.Fprintln(os.Stderr, lastError)
}
lastError = errors.Wrapf(err2, "error removing tag %q from image %q", id, image.ID)
continue
}
fmt.Printf("untagged: %s\n", name)
// Need to fetch the image state again after making changes to it i.e untag
// because only a copy of the image state is returned
image1, err := getImage(ctx, systemContext, store, image.ID)
if err != nil || image1 == nil {
if lastError != nil {
fmt.Fprintln(os.Stderr, lastError)
}
lastError = errors.Wrapf(err, "error getting image after untag %q", image.ID)
} else {
image = image1
}
return err
}
isParent, err := imageIsParent(ctx, systemContext, store, image)
if err != nil {
if lastError != nil {
fmt.Fprintln(os.Stderr, lastError)
options := &libimage.RemoveImagesOptions{
Filters: []string{"readonly=false"},
}
lastError = errors.Wrapf(err, "error determining if the image %q is a parent", image.ID)
continue
}
// If the --all flag is not set and the image has named references or is
// a parent, do not delete image.
if len(image.Names) > 0 && !removeAll {
continue
if iopts.prune {
options.Filters = append(options.Filters, "dangling=true")
} else if !iopts.all {
options.Filters = append(options.Filters, "intermediate=false")
}
options.Force = iopts.force
if isParent && len(image.Names) == 0 && !removeAll {
if !prune {
if lastError != nil {
fmt.Fprintln(os.Stderr, lastError)
untagged, removed, err := runtime.RemoveImages(context.Background(), args, options)
for _, u := range untagged {
fmt.Printf("untagged: %s\n", u)
}
lastError = errors.Errorf("unable to delete %q (cannot be forced) - image has dependent child images", image.ID)
for _, r := range removed {
fmt.Printf("%s\n", r)
}
continue
}
id, err := removeImage(ctx, systemContext, store, image)
if err != nil {
if lastError != nil {
fmt.Fprintln(os.Stderr, lastError)
}
lastError = errors.Wrapf(err, "error removing image %q", image.ID)
continue
}
fmt.Printf("%s\n", id)
}
return lastError
}
func getImage(ctx context.Context, systemContext *types.SystemContext, store storage.Store, id string) (*storage.Image, error) {
var ref types.ImageReference
ref, err := properImageRef(ctx, id)
if err != nil {
logrus.Debug(err)
}
if ref == nil {
if ref, err = storageImageRef(systemContext, store, id); err != nil {
logrus.Debug(err)
}
}
if ref == nil {
if ref, err = storageImageID(ctx, store, id); err != nil {
logrus.Debug(err)
}
}
if ref != nil {
image, err2 := is.Transport.GetStoreImage(store, ref)
if err2 != nil {
return nil, errors.Wrapf(err2, "error reading image using reference %q", transports.ImageName(ref))
}
return image, nil
}
return nil, err
}
func untagImage(imgArg string, store storage.Store, image *storage.Image) (string, error) {
newNames := []string{}
removedName := ""
for _, name := range image.Names {
if matchesReference(name, imgArg) {
removedName = name
continue
}
newNames = append(newNames, name)
}
if removedName != "" {
if err := store.SetNames(image.ID, newNames); err != nil {
return "", errors.Wrapf(err, "error removing name %q from image %q", removedName, image.ID)
}
}
return removedName, nil
}
func removeImage(ctx context.Context, systemContext *types.SystemContext, store storage.Store, image *storage.Image) (string, error) {
parent, err := getParent(ctx, systemContext, store, image)
if err != nil {
return "", err
}
if _, err := store.DeleteImage(image.ID, true); err != nil {
return "", errors.Wrapf(err, "could not remove image %q", image.ID)
}
for parent != nil {
nextParent, err := getParent(ctx, systemContext, store, parent)
if err != nil {
return image.ID, errors.Wrapf(err, "unable to get parent from image %q", image.ID)
}
isParent, err := imageIsParent(ctx, systemContext, store, parent)
if err != nil {
return image.ID, errors.Wrapf(err, "unable to get check if image %q is a parent", image.ID)
}
// Do not remove if image is a base image and is not untagged, or if
// the image has more children.
if len(parent.Names) > 0 || isParent {
return image.ID, nil
}
id := parent.ID
if _, err := store.DeleteImage(id, true); err != nil {
logrus.Debugf("unable to remove intermediate image %q: %v", id, err)
} else {
fmt.Println(id)
}
parent = nextParent
}
return image.ID, nil
}
// Returns a list of running containers associated with the given ImageReference
func runningContainers(store storage.Store, image *storage.Image) ([]string, error) {
ctrIDs := []string{}
containers, err := store.Containers()
if err != nil {
return nil, err
}
for _, ctr := range containers {
if ctr.ImageID == image.ID {
ctrIDs = append(ctrIDs, ctr.ID)
}
}
return ctrIDs, nil
}
func removeContainers(ctrIDs []string, store storage.Store) error {
for _, ctrID := range ctrIDs {
if err := store.DeleteContainer(ctrID); err != nil {
return errors.Wrapf(err, "could not remove container %q", ctrID)
}
}
return nil
}
// If it's looks like a proper image reference, parse it and check if it
// corresponds to an image that actually exists.
func properImageRef(ctx context.Context, id string) (types.ImageReference, error) {
var err error
if ref, err := alltransports.ParseImageName(id); err == nil {
if img, err2 := ref.NewImageSource(ctx, nil); err2 == nil {
img.Close()
return ref, nil
}
return nil, errors.Wrapf(err, "error confirming presence of image reference %q", transports.ImageName(ref))
}
return nil, errors.Wrapf(err, "error parsing %q as an image reference", id)
}
// If it's looks like an image reference that's relative to our storage, parse
// it and check if it corresponds to an image that actually exists.
func storageImageRef(systemContext *types.SystemContext, store storage.Store, id string) (types.ImageReference, error) {
ref, _, err := util.FindImage(store, "", systemContext, id)
if err != nil {
if ref != nil {
return nil, errors.Wrapf(err, "error confirming presence of storage image reference %q", transports.ImageName(ref))
}
return nil, errors.Wrapf(err, "error confirming presence of storage image name %q", id)
}
return ref, err
}
// If it might be an ID that's relative to our storage, truncated or not, so
// parse it and check if it corresponds to an image that we have stored
// locally.
func storageImageID(ctx context.Context, store storage.Store, id string) (types.ImageReference, error) {
var err error
imageID := id
if img, err := store.Image(id); err == nil && img != nil {
imageID = img.ID
}
if ref, err := is.Transport.ParseStoreReference(store, imageID); err == nil {
if img, err2 := ref.NewImageSource(ctx, nil); err2 == nil {
img.Close()
return ref, nil
}
return nil, errors.Wrapf(err, "error confirming presence of storage image reference %q", transports.ImageName(ref))
}
return nil, errors.Wrapf(err, "error parsing %q as a storage image reference", id)
}
// Returns a list of all existing images
func findAllRWImages(store storage.Store) ([]string, error) {
imagesToDelete := []string{}
images, err := store.Images()
if err != nil {
return nil, errors.Wrapf(err, "error reading images")
}
for _, image := range images {
if image.ReadOnly {
continue
}
imagesToDelete = append(imagesToDelete, image.ID)
}
return imagesToDelete, nil
}
// Returns a list of all dangling images
func findDanglingImages(store storage.Store) ([]string, error) {
imagesToDelete := []string{}
images, err := store.Images()
if err != nil {
return nil, errors.Wrapf(err, "error reading images")
}
for _, image := range images {
if len(image.Names) == 0 {
imagesToDelete = append(imagesToDelete, image.ID)
}
}
return imagesToDelete, nil
return err
}

View File

@ -1,131 +0,0 @@
package main
import (
"strings"
"testing"
is "github.com/containers/image/v5/storage"
"github.com/containers/storage"
)
func TestProperImageRefTrue(t *testing.T) {
// Pull an image so we know we have it
pullTestImage(t)
// This should match a url path
imgRef, err := properImageRef(getContext(), "docker://busybox:latest")
if err != nil {
t.Errorf("could not match image: %v", err)
} else if imgRef == nil {
t.Error("Returned nil Image Reference")
}
}
func TestProperImageRefFalse(t *testing.T) {
// Pull an image so we know we have it
pullTestImage(t)
// This should match a url path
imgRef, _ := properImageRef(getContext(), "docker://:")
if imgRef != nil {
t.Error("should not have found an Image Reference")
}
}
func TestStorageImageRefTrue(t *testing.T) {
// Make sure the tests are running as root
failTestIfNotRoot(t)
store, err := storage.GetStore(storeOptions)
if store != nil {
is.Transport.SetStore(store)
}
if err != nil {
t.Fatalf("could not get store: %v", err)
}
// Pull an image so we know we have it
pullTestImage(t)
imgRef, err := storageImageRef(&testSystemContext, store, "busybox")
if err != nil {
t.Errorf("could not match image: %v", err)
} else if imgRef == nil {
t.Error("Returned nil Image Reference")
}
}
func TestStorageImageRefFalse(t *testing.T) {
// Make sure the tests are running as root
failTestIfNotRoot(t)
store, err := storage.GetStore(storeOptions)
if store != nil {
is.Transport.SetStore(store)
}
if err != nil {
t.Fatalf("could not get store: %v", err)
}
// Pull an image so we know we have it
pullTestImage(t)
imgRef, _ := storageImageRef(&testSystemContext, store, "")
if imgRef != nil {
t.Error("should not have found an Image Reference")
}
}
func TestStorageImageIDTrue(t *testing.T) {
// Make sure the tests are running as root
failTestIfNotRoot(t)
store, err := storage.GetStore(storeOptions)
if store != nil {
is.Transport.SetStore(store)
}
if err != nil {
t.Fatalf("could not get store: %v", err)
}
// Pull an image so we know we have it
pullTestImage(t)
//Somehow I have to get the id of the image I just pulled
images, err := store.Images()
if err != nil {
t.Fatalf("Error reading images: %v", err)
}
var id string
if len(images) > 0 {
id = strings.TrimSpace(images[0].ID)
}
if id == "" {
t.Fatalf("Error getting image id")
}
imgRef, err := storageImageID(getContext(), store, id)
if err != nil {
t.Errorf("could not match image: %v", err)
} else if imgRef == nil {
t.Error("Returned nil Image Reference")
}
}
func TestStorageImageIDFalse(t *testing.T) {
// Make sure the tests are running as root
failTestIfNotRoot(t)
store, err := storage.GetStore(storeOptions)
if store != nil {
is.Transport.SetStore(store)
}
if err != nil {
t.Fatalf("could not get store: %v", err)
}
// Pull an image so we know we have it
id := ""
imgRef, _ := storageImageID(getContext(), store, id)
if imgRef != nil {
t.Error("should not have returned Image Reference")
}
}

View File

@ -2,7 +2,7 @@ package main
import (
"github.com/containers/buildah/pkg/parse"
"github.com/containers/buildah/util"
"github.com/containers/common/libimage"
"github.com/pkg/errors"
"github.com/spf13/cobra"
)
@ -30,12 +30,21 @@ func tagCmd(c *cobra.Command, args []string) error {
if err != nil {
return errors.Wrapf(err, "error building system context")
}
_, img, err := util.FindImage(store, "", systemContext, args[0])
runtime, err := libimage.RuntimeFromStore(store, &libimage.RuntimeOptions{SystemContext: systemContext})
if err != nil {
return errors.Wrapf(err, "error finding local image %q", args[0])
return err
}
lookupOptions := libimage.LookupImageOptions{IgnorePlatform: true}
image, _, err := runtime.LookupImage(args[0], &lookupOptions)
if err != nil {
return err
}
for _, tag := range args[1:] {
if err := image.Tag(tag); err != nil {
return err
}
if err := util.AddImageNames(store, "", systemContext, img, args[1:]); err != nil {
return errors.Wrapf(err, "error adding names %v to image %q", args[1:], args[0])
}
return nil
}

158
commit.go
View File

@ -3,16 +3,15 @@ package buildah
import (
"context"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"os"
"strings"
"time"
"github.com/containers/buildah/manifests"
"github.com/containers/buildah/pkg/blobcache"
"github.com/containers/buildah/util"
"github.com/containers/common/libimage/manifests"
"github.com/containers/image/v5/docker"
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/manifest"
@ -104,59 +103,6 @@ type CommitOptions struct {
OciEncryptLayers *[]int
}
// PushOptions can be used to alter how an image is copied somewhere.
type PushOptions struct {
// Compression specifies the type of compression which is applied to
// layer blobs. The default is to not use compression, but
// archive.Gzip is recommended.
Compression archive.Compression
// SignaturePolicyPath specifies an override location for the signature
// policy which should be used for verifying the new image as it is
// being written. Except in specific circumstances, no value should be
// specified, indicating that the shared, system-wide default policy
// should be used.
SignaturePolicyPath string
// ReportWriter is an io.Writer which will be used to log the writing
// of the new image.
ReportWriter io.Writer
// Store is the local storage store which holds the source image.
Store storage.Store
// github.com/containers/image/types SystemContext to hold credentials
// and other authentication/authorization information.
SystemContext *types.SystemContext
// ManifestType is the format to use when saving the image using the 'dir' transport
// possible options are oci, v2s1, and v2s2
ManifestType string
// BlobDirectory is the name of a directory in which we'll look for
// prebuilt copies of layer blobs that we might otherwise need to
// regenerate from on-disk layers, substituting them in the list of
// blobs to copy whenever possible.
BlobDirectory string
// Quiet is a boolean value that determines if minimal output to
// the user will be displayed, this is best used for logging.
// The default is false.
Quiet bool
// SignBy is the fingerprint of a GPG key to use for signing the image.
SignBy string
// RemoveSignatures causes any existing signatures for the image to be
// discarded for the pushed copy.
RemoveSignatures bool
// MaxRetries is the maximum number of attempts we'll make to push any
// one image to the external registry if the first attempt fails.
MaxRetries int
// RetryDelay is how long to wait before retrying a push attempt.
RetryDelay time.Duration
// OciEncryptConfig when non-nil indicates that an image should be encrypted.
// The encryption options is derived from the construction of EncryptConfig object.
OciEncryptConfig *encconfig.EncryptConfig
// OciEncryptLayers represents the list of layers to encrypt.
// If nil, don't encrypt any layers.
// If non-nil and len==0, denotes encrypt all layers.
// integers in the slice represent 0-indexed layer indices, with support for negative
// indexing. i.e. 0 is the first layer, -1 is the last (top-most) layer.
OciEncryptLayers *[]int
}
var (
// storageAllowedPolicyScopes overrides the policy for local storage
// to ensure that we can read images from it.
@ -228,7 +174,7 @@ func (b *Builder) addManifest(ctx context.Context, manifestName string, imageSpe
var create bool
systemContext := &types.SystemContext{}
var list manifests.List
_, listImage, err := util.FindImage(b.store, "", systemContext, manifestName)
_, listImage, err := util.FindImage(b.store, systemContext, manifestName)
if err != nil {
create = true
list = manifests.Create()
@ -239,7 +185,7 @@ func (b *Builder) addManifest(ctx context.Context, manifestName string, imageSpe
}
}
names, err := util.ExpandNames([]string{manifestName}, "", systemContext, b.store)
names, err := util.ExpandNames([]string{manifestName}, systemContext, b.store)
if err != nil {
return "", errors.Wrapf(err, "error encountered while expanding image name %q", manifestName)
}
@ -248,7 +194,7 @@ func (b *Builder) addManifest(ctx context.Context, manifestName string, imageSpe
if err != nil {
if ref, err = alltransports.ParseImageName(util.DefaultTransport + imageSpec); err != nil {
// check if the local image exists
if ref, _, err = util.FindImage(b.store, "", systemContext, imageSpec); err != nil {
if ref, _, err = util.FindImage(b.store, systemContext, imageSpec); err != nil {
return "", err
}
}
@ -409,7 +355,7 @@ func (b *Builder) Commit(ctx context.Context, dest types.ImageReference, options
if err != nil {
return imgID, nil, "", errors.Wrapf(err, "error locating just-written image %q", transports.ImageName(dest))
}
if err = util.AddImageNames(b.store, "", systemContext, img, options.AdditionalTags); err != nil {
if err = util.TagImage(b.store, systemContext, img, options.AdditionalTags); err != nil {
return imgID, nil, "", errors.Wrapf(err, "error setting image names to %v", append(img.Names, options.AdditionalTags...))
}
logrus.Debugf("assigned names %v to image %q", img.Names, img.ID)
@ -471,97 +417,3 @@ func (b *Builder) Commit(ctx context.Context, dest types.ImageReference, options
}
return imgID, ref, manifestDigest, nil
}
// Push copies the contents of the image to a new location.
func Push(ctx context.Context, image string, dest types.ImageReference, options PushOptions) (reference.Canonical, digest.Digest, error) {
systemContext := getSystemContext(options.Store, options.SystemContext, options.SignaturePolicyPath)
if options.Quiet {
options.ReportWriter = nil // Turns off logging output
}
blocked, err := isReferenceBlocked(dest, systemContext)
if err != nil {
return nil, "", errors.Wrapf(err, "error checking if pushing to registry for %q is blocked", transports.ImageName(dest))
}
if blocked {
return nil, "", errors.Errorf("push access to registry for %q is blocked by configuration", transports.ImageName(dest))
}
// Load the system signing policy.
pushPolicy, err := signature.DefaultPolicy(systemContext)
if err != nil {
return nil, "", errors.Wrapf(err, "error obtaining default signature policy")
}
// Override the settings for local storage to make sure that we can always read the source "image".
pushPolicy.Transports[is.Transport.Name()] = storageAllowedPolicyScopes
policyContext, err := signature.NewPolicyContext(pushPolicy)
if err != nil {
return nil, "", errors.Wrapf(err, "error creating new signature policy context")
}
defer func() {
if err2 := policyContext.Destroy(); err2 != nil {
logrus.Debugf("error destroying signature policy context: %v", err2)
}
}()
// Look up the image.
src, _, err := util.FindImage(options.Store, "", systemContext, image)
if err != nil {
return nil, "", err
}
maybeCachedSrc := src
if options.BlobDirectory != "" {
compress := types.PreserveOriginal
if options.Compression != archive.Uncompressed {
compress = types.Compress
}
cache, err := blobcache.NewBlobCache(src, options.BlobDirectory, compress)
if err != nil {
return nil, "", errors.Wrapf(err, "error wrapping image reference %q in blob cache at %q", transports.ImageName(src), options.BlobDirectory)
}
maybeCachedSrc = cache
}
// Check if the push is blocked by $BUILDER_REGISTRY_SOURCES.
insecure, err := checkRegistrySourcesAllows("push to", dest)
if err != nil {
return nil, "", err
}
if insecure {
if systemContext.DockerInsecureSkipTLSVerify == types.OptionalBoolFalse {
return nil, "", errors.Errorf("can't require tls verification on an insecured registry")
}
systemContext.DockerInsecureSkipTLSVerify = types.OptionalBoolTrue
systemContext.OCIInsecureSkipTLSVerify = true
systemContext.DockerDaemonInsecureSkipTLSVerify = true
}
logrus.Debugf("pushing image to reference %q is allowed by policy", transports.ImageName(dest))
// Copy everything.
switch options.Compression {
case archive.Uncompressed:
systemContext.OCIAcceptUncompressedLayers = true
case archive.Gzip:
systemContext.DirForceCompress = true
}
var manifestBytes []byte
if manifestBytes, err = retryCopyImage(ctx, policyContext, dest, maybeCachedSrc, dest, getCopyOptions(options.Store, options.ReportWriter, nil, systemContext, options.ManifestType, options.RemoveSignatures, options.SignBy, options.OciEncryptLayers, options.OciEncryptConfig, nil), options.MaxRetries, options.RetryDelay); err != nil {
return nil, "", errors.Wrapf(err, "error copying layers and metadata from %q to %q", transports.ImageName(maybeCachedSrc), transports.ImageName(dest))
}
if options.ReportWriter != nil {
fmt.Fprintf(options.ReportWriter, "")
}
manifestDigest, err := manifest.Digest(manifestBytes)
if err != nil {
return nil, "", errors.Wrapf(err, "error computing digest of manifest of new image %q", transports.ImageName(dest))
}
var ref reference.Canonical
if name := dest.DockerReference(); name != nil {
ref, err = reference.WithDigest(name, manifestDigest)
if err != nil {
logrus.Warnf("error generating canonical reference with name %q and digest %s: %v", name, manifestDigest.String(), err)
}
}
return ref, manifestDigest, nil
}

View File

@ -22,25 +22,33 @@ Show the image digests.
**--filter**, **-f**=[]
Filter output based on conditions provided (default []). Valid
keywords are 'before', 'dangling', 'label', 'readonly' and 'since' .
Filter output based on conditions provided (default []).
Filters:
**before==TIMESTRING**
Filter on images created before the given time.Time.
**after,since=image**
Filter on images created since the given image.
**before=image**
Filter on images created before the given image.
**dangling=true|false**
Show dangling images. Dangling images are a file system layer that was used in a previous build of an image and is no longer referenced by any active images. They are denoted with the <none> tag, consume disk space and serve no active purpose.
Show dangling images. An images is considered to be dangling if it has no associated names and tags.
**label**
**id=id**
Show image with this specific ID.
**intermediate=true|false**
Show intermediate images. An images is considered to be an indermediate image if it is dangling and has no children.
**label=key[=value]**
Filter by images labels key and/or value.
**readonly=true|false**
Show only read only images or Read/Write images. The default is to show both. Read/Only images can be configured by modifying the "additionalimagestores" in the /etc/containers/storage.conf file.
**since==TIMESTRING**
Filter on images created since the given time.Time.
**reference=reference**
Show images matching the specified reference. Wildcards are supported (e.g., "reference=*fedora:3*").
**--format**="TEMPLATE"

6
go.mod
View File

@ -4,17 +4,17 @@ go 1.12
require (
github.com/containernetworking/cni v0.8.1
github.com/containers/common v0.36.0
github.com/containers/common v0.37.0
github.com/containers/image/v5 v5.11.1
github.com/containers/ocicrypt v1.1.1
github.com/containers/storage v1.29.0
github.com/containers/storage v1.30.0
github.com/docker/distribution v2.7.1+incompatible
github.com/docker/go-units v0.4.0
github.com/docker/libnetwork v0.8.0-dev.2.0.20190625141545-5a177b73e316
github.com/fsouza/go-dockerclient v1.7.2
github.com/ghodss/yaml v1.0.0
github.com/hashicorp/go-multierror v1.1.1
github.com/ishidawataru/sctp v0.0.0-20191218070446-00ab2ac2db07 // indirect
github.com/ishidawataru/sctp v0.0.0-20210226210310-f2269e66cdee // indirect
github.com/mattn/go-shellwords v1.0.11
github.com/onsi/ginkgo v1.16.1
github.com/onsi/gomega v1.11.0

43
go.sum
View File

@ -99,7 +99,6 @@ github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/checkpoint-restore/go-criu/v4 v4.0.2/go.mod h1:xUQBLp4RLc5zJtWY++yjOoMoB5lihDt7fai+75m+rGw=
github.com/checkpoint-restore/go-criu/v4 v4.1.0/go.mod h1:xUQBLp4RLc5zJtWY++yjOoMoB5lihDt7fai+75m+rGw=
github.com/chzyer/logex v1.1.10 h1:Swpa1K6QvQznwJRcfTfQJmTE72DqScAa40E+fbHEXEE=
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
@ -108,7 +107,6 @@ github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5P
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1 h1:q763qf9huN11kDQavWsoZXJNW3xEE4JJyHa5Q25/sd8=
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
github.com/cilium/ebpf v0.0.0-20200110133405-4032b1d8aae3/go.mod h1:MA5e5Lr8slmEg9bt0VpxxWqJlO4iwu3FBdHUzV7wQVg=
github.com/cilium/ebpf v0.0.0-20200507155900-a9f01edf17e3/go.mod h1:XT+cAw5wfvsodedcijoh1l9cf7v1x9FlFB/3VmF/O8s=
github.com/cilium/ebpf v0.0.0-20200702112145-1c8d4c9ef775/go.mod h1:7cR51M8ViRLIdUjrmSXlK9pkrsDlLHbO8jiB8X8JnOc=
github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
@ -129,7 +127,6 @@ github.com/containerd/cgroups v0.0.0-20210114181951-8a68de567b68/go.mod h1:ZJeTF
github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw=
github.com/containerd/console v0.0.0-20181022165439-0650fd9eeb50/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw=
github.com/containerd/console v0.0.0-20191206165004-02ecf6a7291e/go.mod h1:8Pf4gM6VEbTNRIT26AyyU7hxdQU3MvAvxVI0sc00XBE=
github.com/containerd/console v1.0.0/go.mod h1:8Pf4gM6VEbTNRIT26AyyU7hxdQU3MvAvxVI0sc00XBE=
github.com/containerd/console v1.0.1/go.mod h1:XUsP6YE/mKtz6bxc+I8UiKKTP04qjQL4qcS3XoQ5xkw=
github.com/containerd/containerd v1.2.10/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
github.com/containerd/containerd v1.3.0-beta.2.0.20190828155532-0293cbd26c69/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
@ -181,22 +178,19 @@ github.com/containernetworking/cni v0.8.0/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ
github.com/containernetworking/cni v0.8.1 h1:7zpDnQ3T3s4ucOuJ/ZCLrYBxzkg0AELFfII3Epo9TmI=
github.com/containernetworking/cni v0.8.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY=
github.com/containernetworking/plugins v0.8.6/go.mod h1:qnw5mN19D8fIwkqW7oHHYDHVlzhJpcY6TQxn/fUyDDM=
github.com/containers/common v0.36.0 h1:7/0GM3oi2ROmKAg/8pDWJ8BU2BXdbmy7Gk2/SFCTV38=
github.com/containers/common v0.36.0/go.mod h1:rMzxgD7nMGw++cEbsp+NZv0UJO4rgXbm7F7IbJPTwIE=
github.com/containers/image/v5 v5.10.5/go.mod h1:SgIbWEedCNBbn2FI5cH0/jed1Ecy2s8XK5zTxvJTzII=
github.com/containers/common v0.37.0 h1:RRyR8FITTJXfrF7J9KXKSplywY4zsXoA2kuQXMaUaNo=
github.com/containers/common v0.37.0/go.mod h1:dgbJcccCPTmncqxhma56+XW+6d5VzqGF6jtkMHyu3v0=
github.com/containers/image/v5 v5.11.1 h1:mNybUvU6zXUwcMsQaa3n+Idsru5pV+GE7k4oRuPzYi0=
github.com/containers/image/v5 v5.11.1/go.mod h1:HC9lhJ/Nz5v3w/5Co7H431kLlgzlVlOC+auD/er3OqE=
github.com/containers/libtrust v0.0.0-20190913040956-14b96171aa3b h1:Q8ePgVfHDplZ7U33NwHZkrVELsZP5fYj9pM5WBZB2GE=
github.com/containers/libtrust v0.0.0-20190913040956-14b96171aa3b/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY=
github.com/containers/ocicrypt v1.0.1/go.mod h1:MeJDzk1RJHv89LjsH0Sp5KTY3ZYkjXO/C+bKAeWFIrc=
github.com/containers/ocicrypt v1.0.3/go.mod h1:CUBa+8MRNL/VkpxYIpaMtgn1WgXGyvPQj8jcy0EVG6g=
github.com/containers/ocicrypt v1.1.0/go.mod h1:b8AOe0YR67uU8OqfVNcznfFpAzu3rdgUV4GP9qXPfu4=
github.com/containers/ocicrypt v1.1.1 h1:prL8l9w3ntVqXvNH1CiNn5ENjcCnr38JqpSyvKKB4GI=
github.com/containers/ocicrypt v1.1.1/go.mod h1:Dm55fwWm1YZAjYRaJ94z2mfZikIyIN4B0oB3dj3jFxY=
github.com/containers/storage v1.24.8/go.mod h1:YC+2pY8SkfEAcZkwycxYbpK8EiRbx5soPPwz9dxe4IQ=
github.com/containers/storage v1.28.0/go.mod h1:ixAwO7Bj31cigqPEG7aCz+PYmxkDxbIFdUFioYdxbzI=
github.com/containers/storage v1.29.0 h1:l3Vh6+IiMKLGfQZ3rDkF84m+KF1Qb0XEcilWC+pYo2o=
github.com/containers/storage v1.29.0/go.mod h1:u84RU4CCufGeJBNTRNwMB+FoE+AiFeFw4SsMoqAOeCM=
github.com/containers/storage v1.30.0 h1:KS6zmoPyy0Qcx1HCCiseQ0ysSckRvtiuoVpIGh9iwQA=
github.com/containers/storage v1.30.0/go.mod h1:M/xn0pg6ReYFrLtWl5YELI/a4Xjq+Z3e5GJxQrJCcDI=
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
github.com/coreos/go-iptables v0.4.5/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU=
@ -235,7 +229,6 @@ github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible
github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug=
github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
github.com/docker/docker v1.4.2-0.20191219165747-a9416c67da9f/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/docker v17.12.0-ce-rc1.0.20201020191947-73dc6a680cdd+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/docker v20.10.3-0.20210216175712-646072ed6524+incompatible h1:Yu2uGErhwEoOT/OxAFe+/SiJCqRLs+pgcS5XKrDXnG4=
github.com/docker/docker v20.10.3-0.20210216175712-646072ed6524+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/docker-credential-helpers v0.6.3 h1:zI2p9+1NQYdnG6sMU26EX4aVGlqbInSQxQXLvzJ4RPQ=
@ -339,6 +332,8 @@ github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QD
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.4.3 h1:JjCZWpVbqXDqFVmTfYWEVTMIYrL/NPdPSCHPJ0T/raM=
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/snappy v0.0.3 h1:fHPg5GQYlCeLIPB9BZqMVR5nR9A+IM5zcgeTdjMYmLA=
github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
@ -393,7 +388,6 @@ github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjh
github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=
github.com/hashicorp/go-multierror v0.0.0-20161216184304-ed905158d874/go.mod h1:JMRHfdO9jKNzS/+BTlxCjKNQHg/jZAft8U7LloJvN7I=
github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA=
github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo=
github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM=
github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU=
@ -418,8 +412,8 @@ github.com/imdario/mergo v0.3.11 h1:3tnifQM4i+fbajXKBHXWEH+KvNHqojZ778UH75j3bGA=
github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
github.com/ishidawataru/sctp v0.0.0-20191218070446-00ab2ac2db07 h1:rw3IAne6CDuVFlZbPOkA7bhxlqawFh7RJJ+CejfMaxE=
github.com/ishidawataru/sctp v0.0.0-20191218070446-00ab2ac2db07/go.mod h1:co9pwDoBCm1kGxawmb4sPq0cSIOOWNPT4KnHotMP1Zg=
github.com/ishidawataru/sctp v0.0.0-20210226210310-f2269e66cdee h1:PAXLXk1heNZ5yokbMBpVLZQxo43wCZxRwl00mX+dd44=
github.com/ishidawataru/sctp v0.0.0-20210226210310-f2269e66cdee/go.mod h1:co9pwDoBCm1kGxawmb4sPq0cSIOOWNPT4KnHotMP1Zg=
github.com/j-keck/arping v0.0.0-20160618110441-2cf9dc699c56/go.mod h1:ymszkNOg6tORTn+6F6j+Jc8TOr5osrynvN6ivFWZ2GA=
github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
@ -438,11 +432,9 @@ github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQL
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
github.com/klauspost/compress v1.11.5/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
github.com/klauspost/compress v1.11.7/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
github.com/klauspost/compress v1.11.12/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
github.com/klauspost/compress v1.11.13 h1:eSvu8Tmq6j2psUJqJrLcWH6K3w5Dwc+qipbaA6eVEN4=
github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
github.com/klauspost/compress v1.12.1 h1:/+xsCsk06wE38cyiqOR/o7U2fSftcH72xD+BQXmja/g=
github.com/klauspost/compress v1.12.1/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg=
github.com/klauspost/pgzip v1.2.5 h1:qnWYvvKqedOF2ulHpMG72XQol4ILEJ8k2wwRl/Km8oE=
github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
@ -473,11 +465,9 @@ github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNx
github.com/mattn/go-isatty v0.0.4 h1:bnP0vzxcAdeI1zdubAl5PjU6zsERjGZb7raWodagDYs=
github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
github.com/mattn/go-runewidth v0.0.10 h1:CoZ3S2P7pvtP45xOtBw+/mDL2z0RKI576gSkzRRpdGg=
github.com/mattn/go-runewidth v0.0.10/go.mod h1:RAqKPSqVFrSLVXbA8x7dzmKdmGzieGRCM46jaSJTDAk=
github.com/mattn/go-shellwords v1.0.3/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o=
github.com/mattn/go-shellwords v1.0.10/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y=
github.com/mattn/go-shellwords v1.0.11 h1:vCoR9VPpsk/TZFW2JwK5I9S0xdrtUq2bph6/YjEPnaw=
github.com/mattn/go-shellwords v1.0.11/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
@ -486,7 +476,6 @@ github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182aff
github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
github.com/miekg/pkcs11 v1.0.3 h1:iMwmD7I5225wv84WxIG/bmxz9AXjWvTWIbM/TYHvWtw=
github.com/miekg/pkcs11 v1.0.3/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs=
github.com/mistifyio/go-zfs v2.1.1+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4=
github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible h1:aKW/4cBs+yK6gpqU3K/oIwk9Q/XICqd3zOX/UFuvqmk=
github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4=
github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
@ -500,7 +489,6 @@ github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh
github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f/go.mod h1:OkQIRizQZAeMln+1tSwduZz7+Af5oFlKirV/MSYes2A=
github.com/moby/sys/mount v0.2.0 h1:WhCW5B355jtxndN5ovugJlMFJawbUODuW8fSnEH6SSM=
github.com/moby/sys/mount v0.2.0/go.mod h1:aAivFE2LB3W4bACsUXChRHQ0qKWsetY4Y9V7sxOougM=
github.com/moby/sys/mountinfo v0.1.3/go.mod h1:w2t2Avltqx8vE7gX5l+QiBKxODu2TX0+Syr3h52Tw4o=
github.com/moby/sys/mountinfo v0.4.0/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A=
github.com/moby/sys/mountinfo v0.4.1 h1:1O+1cHA1aujwEwwVMa2Xm2l+gIpUHyd3+D+d7LZh1kM=
github.com/moby/sys/mountinfo v0.4.1/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A=
@ -514,7 +502,6 @@ github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lN
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A=
github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc=
github.com/mrunalp/fileutils v0.0.0-20171103030105-7d4729fb3618/go.mod h1:x8F1gnqOkIEiO4rqoeEEEqQbo7HjGMTvyoq3gej4iT0=
github.com/mrunalp/fileutils v0.5.0/go.mod h1:M1WthSahJixYnrXQl/DFQuteStB1weuxD2QJNHXfbSQ=
github.com/mtrmac/gpgme v0.1.2 h1:dNOmvYmsrakgW7LcgiprD0yfRuQQe8/C8F6Z+zogO3s=
github.com/mtrmac/gpgme v0.1.2/go.mod h1:GYYHnGSuS7HK3zVS2n3y73y0okK/BeKzwnn5jgiVFNI=
@ -535,7 +522,6 @@ github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+
github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
github.com/onsi/ginkgo v1.15.2/go.mod h1:Dd6YFfwBW84ETqqtL0CPyPXillHgY6XhQH3uuCCTr/o=
github.com/onsi/ginkgo v1.16.1 h1:foqVmeWDD6yYpK+Yz3fHyNIxFYNxswxqNFjSKe+vI54=
github.com/onsi/ginkgo v1.16.1/go.mod h1:CObGmKUOKaSC0RjmoAK7tKyn4Azo5P2IWuoMnvwxz1E=
github.com/onsi/gomega v0.0.0-20151007035656-2152b45fa28a/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
@ -559,20 +545,17 @@ github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5X
github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
github.com/opencontainers/runc v1.0.0-rc8.0.20190926000215-3e425f80a8c9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
github.com/opencontainers/runc v1.0.0-rc9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
github.com/opencontainers/runc v1.0.0-rc91/go.mod h1:3Sm6Dt7OT8z88EbdQqqcRN2oCT54jbi72tT/HqgflT8=
github.com/opencontainers/runc v1.0.0-rc93 h1:x2UMpOOVf3kQ8arv/EsDGwim8PTNqzL1/EYDr/+scOM=
github.com/opencontainers/runc v1.0.0-rc93/go.mod h1:3NOsor4w32B2tC0Zbl8Knk4Wg84SM2ImC1fxBuqJ/H0=
github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
github.com/opencontainers/runtime-spec v1.0.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
github.com/opencontainers/runtime-spec v1.0.2-0.20190207185410-29686dbc5559/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
github.com/opencontainers/runtime-spec v1.0.3-0.20200520003142-237cc4f519e2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
github.com/opencontainers/runtime-spec v1.0.3-0.20200929063507-e6143ca7d51d h1:pNa8metDkwZjb9g4T8s+krQ+HRgZAkqnXml+wNir/+s=
github.com/opencontainers/runtime-spec v1.0.3-0.20200929063507-e6143ca7d51d/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
github.com/opencontainers/runtime-tools v0.0.0-20181011054405-1d69bd0f9c39/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs=
github.com/opencontainers/runtime-tools v0.9.0 h1:FYgwVsKRI/H9hU32MJ/4MLOzXWodKK5zsQavY8NPMkU=
github.com/opencontainers/runtime-tools v0.9.0/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs=
github.com/opencontainers/selinux v1.5.1/go.mod h1:yTcKuYAh6R95iDpefGLQaPaRwJFwyzAJufJyiTt7s0g=
github.com/opencontainers/selinux v1.6.0/go.mod h1:VVGKuOLlE7v4PJyT6h7mNWvq1rzqiriPsEqVhc+svHE=
github.com/opencontainers/selinux v1.8.0 h1:+77ba4ar4jsCbL1GLbFL8fFM57w6suPfSS9PDLDY7KM=
github.com/opencontainers/selinux v1.8.0/go.mod h1:RScLhm78qiWa2gbVCcGkC7tCGdgk3ogry1nUQF8Evvo=
@ -688,7 +671,6 @@ github.com/tchap/go-patricia v2.3.0+incompatible h1:GkY4dP3cEfEASBPPkWd+AmjYxhmD
github.com/tchap/go-patricia v2.3.0+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I=
github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
github.com/ulikunitz/xz v0.5.9/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14=
github.com/ulikunitz/xz v0.5.10 h1:t92gobL9l3HE202wg3rlk19F6X+JOxl9BBrCCMYEYd8=
github.com/ulikunitz/xz v0.5.10/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14=
github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
@ -697,7 +679,6 @@ github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtX
github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
github.com/vbatts/tar-split v0.11.1 h1:0Odu65rhcZ3JZaPHxl7tCI3V/C/Q9Zf82UFravl02dE=
github.com/vbatts/tar-split v0.11.1/go.mod h1:LEuURwDEiWjRjwu46yU3KVGuUdVv/dcnpcEPSzR8z6g=
github.com/vbauerster/mpb/v5 v5.4.0/go.mod h1:fi4wVo7BVQ22QcvFObm+VwliQXlV1eBT8JDaKXR4JGI=
github.com/vbauerster/mpb/v6 v6.0.3 h1:j+twHHhSUe8aXWaT/27E98G5cSBeqEuJSVCMjmLg0PI=
github.com/vbauerster/mpb/v6 v6.0.3/go.mod h1:5luBx4rDLWxpA4t6I5sdeeQuZhqDxc+wr5Nqf35+tnM=
github.com/vishvananda/netlink v0.0.0-20181108222139-023a6dafdcdf/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk=
@ -748,7 +729,6 @@ golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8U
golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200423211502-4bdfaf469ed5/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
@ -833,7 +813,6 @@ golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a h1:DcqTD9SDLc+1P/r1EmRBwnVsrOwW+kk2vWf9n+1sGhs=
@ -883,7 +862,6 @@ golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200327173247-9dae0f8f5775/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@ -897,7 +875,6 @@ golang.org/x/sys v0.0.0-20201112073958-5cba982894dd/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201202213521-69691e467435/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201218084310-7d0127a74742/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210216224549-f992740a1bac/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=

View File

@ -16,10 +16,12 @@ import (
"github.com/containers/buildah/define"
"github.com/containers/buildah/pkg/parse"
"github.com/containers/buildah/util"
"github.com/containers/common/libimage"
"github.com/containers/common/pkg/config"
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/manifest"
is "github.com/containers/image/v5/storage"
storageTransport "github.com/containers/image/v5/storage"
"github.com/containers/image/v5/transports"
"github.com/containers/image/v5/transports/alltransports"
"github.com/containers/image/v5/types"
@ -301,22 +303,34 @@ func (b *Executor) startStage(ctx context.Context, stage *imagebuilder.Stage, st
// resolveNameToImageRef creates a types.ImageReference for the output name in local storage
func (b *Executor) resolveNameToImageRef(output string) (types.ImageReference, error) {
imageRef, err := alltransports.ParseImageName(output)
if err != nil {
candidates, _, _, err := util.ResolveName(output, "", b.systemContext, b.store)
if err != nil {
return nil, errors.Wrapf(err, "error parsing target image name %q", output)
}
if len(candidates) == 0 {
return nil, errors.Errorf("error parsing target image name %q", output)
}
imageRef2, err2 := is.Transport.ParseStoreReference(b.store, candidates[0])
if err2 != nil {
return nil, errors.Wrapf(err, "error parsing target image name %q", output)
}
return imageRef2, nil
}
if imageRef, err := alltransports.ParseImageName(output); err == nil {
return imageRef, nil
}
runtime, err := libimage.RuntimeFromStore(b.store, &libimage.RuntimeOptions{SystemContext: b.systemContext})
if err != nil {
return nil, err
}
// If we can resolve the image locally, make sure we use the resolved name.
localImage, resolvedName, err := runtime.LookupImage(output, nil)
if err != nil {
return nil, err
}
if localImage != nil {
output = resolvedName
}
// If we cannot find an image, make sure we normalize the name
// according the conventions and rules in libimage (e.g.,
// "localhost/" prefixing).
named, err := libimage.NormalizeName(output)
if err != nil {
return nil, err
}
imageRef, err := storageTransport.Transport.ParseStoreReference(b.store, named.String())
if err == nil {
return imageRef, nil
}
return imageRef, err
}
// waitForStage waits for an entry to be added to terminatedStage indicating
@ -670,7 +684,7 @@ func (b *Executor) Build(ctx context.Context, stages imagebuilder.Stages) (image
return imageID, ref, errors.Wrapf(err, "error locating just-written image %q", transports.ImageName(dest))
}
if len(b.additionalTags) > 0 {
if err = util.AddImageNames(b.store, "", b.systemContext, img, b.additionalTags); err != nil {
if err = util.TagImage(b.store, b.systemContext, img, b.additionalTags); err != nil {
return imageID, ref, errors.Wrapf(err, "error setting image names to %v", append(img.Names, b.additionalTags...))
}
logrus.Debugf("assigned names %v to image %q", img.Names, img.ID)

View File

@ -153,7 +153,7 @@ func importBuilderFromImage(ctx context.Context, store storage.Store, options Im
systemContext := getSystemContext(store, options.SystemContext, options.SignaturePolicyPath)
_, img, err := util.FindImage(store, "", systemContext, options.Image)
_, img, err := util.FindImage(store, systemContext, options.Image)
if err != nil {
return nil, errors.Wrapf(err, "importing settings")
}

View File

@ -1,319 +0,0 @@
package manifests
import (
"context"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"testing"
"github.com/containers/buildah/pkg/manifests"
cp "github.com/containers/image/v5/copy"
"github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/transports/alltransports"
"github.com/containers/image/v5/types"
"github.com/containers/storage"
digest "github.com/opencontainers/go-digest"
"github.com/stretchr/testify/assert"
)
var (
_ List = &list{}
sys = &types.SystemContext{
SystemRegistriesConfPath: "../tests/registries.conf",
SignaturePolicyPath: "../tests/policy.json",
}
amd64sys = &types.SystemContext{ArchitectureChoice: "amd64"}
arm64sys = &types.SystemContext{ArchitectureChoice: "arm64"}
ppc64sys = &types.SystemContext{ArchitectureChoice: "ppc64le"}
)
const (
listImageName = "foo"
otherListImage = "docker://k8s.gcr.io/pause:3.1"
otherListDigest = "sha256:f78411e19d84a252e53bff71a4407a5686c46983a2c2eeed83929b888179acea"
otherListAmd64Digest = "sha256:59eec8837a4d942cc19a52b8c09ea75121acc38114a2c68b98983ce9356b8610"
otherListArm64Digest = "sha256:f365626a556e58189fc21d099fc64603db0f440bff07f77c740989515c544a39"
otherListPpc64Digest = "sha256:bcf9771c0b505e68c65440474179592ffdfa98790eb54ffbf129969c5e429990"
otherListInstanceDigest = "docker://k8s.gcr.io/pause@sha256:f365626a556e58189fc21d099fc64603db0f440bff07f77c740989515c544a39"
)
func TestSaveLoad(t *testing.T) {
dir, err := ioutil.TempDir("", "manifests")
assert.Nilf(t, err, "error creating temporary directory")
defer os.RemoveAll(dir)
storeOptions := storage.StoreOptions{
GraphRoot: filepath.Join(dir, "root"),
RunRoot: filepath.Join(dir, "runroot"),
GraphDriverName: "vfs",
}
store, err := storage.GetStore(storeOptions)
assert.Nilf(t, err, "error opening store")
if store == nil {
return
}
defer func() {
if _, err := store.Shutdown(true); err != nil {
assert.Nilf(t, err, "error closing store")
}
}()
list := Create()
assert.NotNil(t, list, "Create() returned nil?")
image, err := list.SaveToImage(store, "", []string{listImageName}, manifest.DockerV2ListMediaType)
assert.Nilf(t, err, "SaveToImage(1)")
imageReused, err := list.SaveToImage(store, image, nil, manifest.DockerV2ListMediaType)
assert.Nilf(t, err, "SaveToImage(2)")
_, list, err = LoadFromImage(store, image)
assert.Nilf(t, err, "LoadFromImage(1)")
assert.NotNilf(t, list, "LoadFromImage(1)")
_, list, err = LoadFromImage(store, imageReused)
assert.Nilf(t, err, "LoadFromImage(2)")
assert.NotNilf(t, list, "LoadFromImage(2)")
_, list, err = LoadFromImage(store, listImageName)
assert.Nilf(t, err, "LoadFromImage(3)")
assert.NotNilf(t, list, "LoadFromImage(3)")
}
func TestAddRemove(t *testing.T) {
ctx := context.TODO()
ref, err := alltransports.ParseImageName(otherListImage)
assert.Nilf(t, err, "ParseImageName(%q)", otherListImage)
src, err := ref.NewImageSource(ctx, sys)
assert.Nilf(t, err, "NewImageSource(%q)", otherListImage)
defer assert.Nilf(t, src.Close(), "ImageSource.Close()")
m, _, err := src.GetManifest(ctx, nil)
assert.Nilf(t, err, "ImageSource.GetManifest()")
assert.Nilf(t, src.Close(), "ImageSource.GetManifest()")
listDigest, err := manifest.Digest(m)
assert.Nilf(t, err, "manifest.Digest()")
assert.Equalf(t, listDigest.String(), otherListDigest, "digest for image %q changed?", otherListImage)
l, err := manifests.FromBlob(m)
assert.Nilf(t, err, "manifests.FromBlob()")
assert.NotNilf(t, l, "manifests.FromBlob()")
assert.Equalf(t, len(l.Instances()), 5, "image %q had an arch added?", otherListImage)
list := Create()
instanceDigest, err := list.Add(ctx, amd64sys, ref, false)
assert.Nilf(t, err, "list.Add(all=false)")
assert.Equal(t, instanceDigest.String(), otherListAmd64Digest)
assert.Equalf(t, len(list.Instances()), 1, "too many instances added")
list = Create()
instanceDigest, err = list.Add(ctx, arm64sys, ref, false)
assert.Nilf(t, err, "list.Add(all=false)")
assert.Equal(t, instanceDigest.String(), otherListArm64Digest)
assert.Equalf(t, len(list.Instances()), 1, "too many instances added")
list = Create()
instanceDigest, err = list.Add(ctx, ppc64sys, ref, false)
assert.Nilf(t, err, "list.Add(all=false)")
assert.Equal(t, instanceDigest.String(), otherListPpc64Digest)
assert.Equalf(t, len(list.Instances()), 1, "too many instances added")
_, err = list.Add(ctx, sys, ref, true)
assert.Nilf(t, err, "list.Add(all=true)")
assert.Equalf(t, len(list.Instances()), 5, "too many instances added")
list = Create()
_, err = list.Add(ctx, sys, ref, true)
assert.Nilf(t, err, "list.Add(all=true)")
assert.Equalf(t, len(list.Instances()), 5, "too many instances added", otherListImage)
for _, instance := range list.Instances() {
assert.Nilf(t, list.Remove(instance), "error removing instance %q", instance)
}
assert.Equalf(t, len(list.Instances()), 0, "should have removed all instances")
ref, err = alltransports.ParseImageName(otherListInstanceDigest)
assert.Nilf(t, err, "ParseImageName(%q)", otherListInstanceDigest)
list = Create()
_, err = list.Add(ctx, sys, ref, false)
assert.Nilf(t, err, "list.Add(all=false)")
assert.Equalf(t, len(list.Instances()), 1, "too many instances added", otherListInstanceDigest)
list = Create()
_, err = list.Add(ctx, sys, ref, true)
assert.Nilf(t, err, "list.Add(all=true)")
assert.Equalf(t, len(list.Instances()), 1, "too many instances added", otherListInstanceDigest)
}
func TestReference(t *testing.T) {
ctx := context.TODO()
dir, err := ioutil.TempDir("", "manifests")
assert.Nilf(t, err, "error creating temporary directory")
defer os.RemoveAll(dir)
storeOptions := storage.StoreOptions{
GraphRoot: filepath.Join(dir, "root"),
RunRoot: filepath.Join(dir, "runroot"),
GraphDriverName: "vfs",
}
store, err := storage.GetStore(storeOptions)
assert.Nilf(t, err, "error opening store")
if store == nil {
return
}
defer func() {
if _, err := store.Shutdown(true); err != nil {
assert.Nilf(t, err, "error closing store")
}
}()
ref, err := alltransports.ParseImageName(otherListImage)
assert.Nilf(t, err, "ParseImageName(%q)", otherListImage)
list := Create()
_, err = list.Add(ctx, ppc64sys, ref, false)
assert.Nilf(t, err, "list.Add(all=false)")
listRef, err := list.Reference(store, cp.CopyAllImages, nil)
assert.NotNilf(t, err, "list.Reference(never saved)")
assert.Nilf(t, listRef, "list.Reference(never saved)")
listRef, err = list.Reference(store, cp.CopyAllImages, nil)
assert.NotNilf(t, err, "list.Reference(never saved)")
assert.Nilf(t, listRef, "list.Reference(never saved)")
listRef, err = list.Reference(store, cp.CopySystemImage, nil)
assert.NotNilf(t, err, "list.Reference(never saved)")
assert.Nilf(t, listRef, "list.Reference(never saved)")
listRef, err = list.Reference(store, cp.CopySpecificImages, []digest.Digest{otherListAmd64Digest})
assert.NotNilf(t, err, "list.Reference(never saved)")
assert.Nilf(t, listRef, "list.Reference(never saved)")
listRef, err = list.Reference(store, cp.CopySpecificImages, []digest.Digest{otherListAmd64Digest, otherListArm64Digest})
assert.NotNilf(t, err, "list.Reference(never saved)")
assert.Nilf(t, listRef, "list.Reference(never saved)")
_, err = list.SaveToImage(store, "", []string{listImageName}, "")
assert.Nilf(t, err, "SaveToImage")
listRef, err = list.Reference(store, cp.CopyAllImages, nil)
assert.Nilf(t, err, "list.Reference(saved)")
assert.NotNilf(t, listRef, "list.Reference(saved)")
listRef, err = list.Reference(store, cp.CopySystemImage, nil)
assert.Nilf(t, err, "list.Reference(saved)")
assert.NotNilf(t, listRef, "list.Reference(saved)")
listRef, err = list.Reference(store, cp.CopySpecificImages, nil)
assert.Nilf(t, err, "list.Reference(saved)")
assert.NotNilf(t, listRef, "list.Reference(saved)")
listRef, err = list.Reference(store, cp.CopySpecificImages, []digest.Digest{otherListAmd64Digest})
assert.Nilf(t, err, "list.Reference(saved)")
assert.NotNilf(t, listRef, "list.Reference(saved)")
listRef, err = list.Reference(store, cp.CopySpecificImages, []digest.Digest{otherListAmd64Digest, otherListArm64Digest})
assert.Nilf(t, err, "list.Reference(saved)")
assert.NotNilf(t, listRef, "list.Reference(saved)")
_, err = list.Add(ctx, sys, ref, true)
assert.Nilf(t, err, "list.Add(all=true)")
listRef, err = list.Reference(store, cp.CopyAllImages, nil)
assert.Nilf(t, err, "list.Reference(saved)")
assert.NotNilf(t, listRef, "list.Reference(saved)")
listRef, err = list.Reference(store, cp.CopySystemImage, nil)
assert.Nilf(t, err, "list.Reference(saved)")
assert.NotNilf(t, listRef, "list.Reference(saved)")
listRef, err = list.Reference(store, cp.CopySpecificImages, nil)
assert.Nilf(t, err, "list.Reference(saved)")
assert.NotNilf(t, listRef, "list.Reference(saved)")
listRef, err = list.Reference(store, cp.CopySpecificImages, []digest.Digest{otherListAmd64Digest})
assert.Nilf(t, err, "list.Reference(saved)")
assert.NotNilf(t, listRef, "list.Reference(saved)")
listRef, err = list.Reference(store, cp.CopySpecificImages, []digest.Digest{otherListAmd64Digest, otherListArm64Digest})
assert.Nilf(t, err, "list.Reference(saved)")
assert.NotNilf(t, listRef, "list.Reference(saved)")
}
func TestPush(t *testing.T) {
ctx := context.TODO()
dir, err := ioutil.TempDir("", "manifests")
assert.Nilf(t, err, "error creating temporary directory")
defer os.RemoveAll(dir)
storeOptions := storage.StoreOptions{
GraphRoot: filepath.Join(dir, "root"),
RunRoot: filepath.Join(dir, "runroot"),
GraphDriverName: "vfs",
}
store, err := storage.GetStore(storeOptions)
assert.Nilf(t, err, "error opening store")
if store == nil {
return
}
defer func() {
if _, err := store.Shutdown(true); err != nil {
assert.Nilf(t, err, "error closing store")
}
}()
dest, err := ioutil.TempDir("", "manifests")
assert.Nilf(t, err, "error creating temporary directory")
defer os.RemoveAll(dest)
destRef, err := alltransports.ParseImageName(fmt.Sprintf("dir:%s", dest))
assert.Nilf(t, err, "ParseImageName()")
ref, err := alltransports.ParseImageName(otherListImage)
assert.Nilf(t, err, "ParseImageName(%q)", otherListImage)
list := Create()
_, err = list.Add(ctx, sys, ref, true)
assert.Nilf(t, err, "list.Add(all=true)")
_, err = list.SaveToImage(store, "", []string{listImageName}, "")
assert.Nilf(t, err, "SaveToImage")
options := PushOptions{
Store: store,
SystemContext: sys,
ImageListSelection: cp.CopyAllImages,
Instances: nil,
}
_, _, err = list.Push(ctx, destRef, options)
assert.Nilf(t, err, "list.Push(all)")
options.ImageListSelection = cp.CopySystemImage
_, _, err = list.Push(ctx, destRef, options)
assert.Nilf(t, err, "list.Push(local)")
options.ImageListSelection = cp.CopySpecificImages
_, _, err = list.Push(ctx, destRef, options)
assert.Nilf(t, err, "list.Push(none specified)")
options.Instances = []digest.Digest{otherListAmd64Digest}
_, _, err = list.Push(ctx, destRef, options)
assert.Nilf(t, err, "list.Push(one specified)")
options.Instances = append(options.Instances, otherListArm64Digest)
_, _, err = list.Push(ctx, destRef, options)
assert.Nilf(t, err, "list.Push(two specified)")
options.Instances = append(options.Instances, otherListPpc64Digest)
_, _, err = list.Push(ctx, destRef, options)
assert.Nilf(t, err, "list.Push(three specified)")
options.Instances = append(options.Instances, otherListDigest)
_, _, err = list.Push(ctx, destRef, options)
assert.Nilf(t, err, "list.Push(four specified)")
}

255
new.go
View File

@ -4,18 +4,15 @@ import (
"context"
"fmt"
"math/rand"
"runtime"
"strings"
"github.com/containers/buildah/define"
"github.com/containers/buildah/util"
"github.com/containers/image/v5/docker"
"github.com/containers/buildah/pkg/blobcache"
"github.com/containers/common/libimage"
libimageTypes "github.com/containers/common/libimage/types"
"github.com/containers/image/v5/image"
"github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/pkg/shortnames"
is "github.com/containers/image/v5/storage"
"github.com/containers/image/v5/transports"
"github.com/containers/image/v5/transports/alltransports"
"github.com/containers/image/v5/types"
"github.com/containers/storage"
digest "github.com/opencontainers/go-digest"
@ -30,29 +27,6 @@ const (
BaseImageFakeName = imagebuilder.NoBaseImageSpecifier
)
func pullAndFindImage(ctx context.Context, store storage.Store, srcRef types.ImageReference, options BuilderOptions, sc *types.SystemContext) (*storage.Image, types.ImageReference, error) {
pullOptions := PullOptions{
ReportWriter: options.ReportWriter,
Store: store,
SystemContext: options.SystemContext,
BlobDirectory: options.BlobDirectory,
MaxRetries: options.MaxPullRetries,
RetryDelay: options.PullRetryDelay,
OciDecryptConfig: options.OciDecryptConfig,
}
ref, err := pullImage(ctx, store, srcRef, pullOptions, sc)
if err != nil {
logrus.Debugf("error pulling image %q: %v", transports.ImageName(srcRef), err)
return nil, nil, err
}
img, err := is.Transport.GetStoreImage(store, ref)
if err != nil {
logrus.Debugf("error reading pulled image %q: %v", transports.ImageName(srcRef), err)
return nil, nil, errors.Wrapf(err, "error locating image %q in local storage", transports.ImageName(ref))
}
return img, ref, nil
}
func getImageName(name string, img *storage.Image) string {
imageName := name
if len(img.Names) > 0 {
@ -105,191 +79,6 @@ func newContainerIDMappingOptions(idmapOptions *define.IDMappingOptions) storage
return options
}
func resolveLocalImage(systemContext *types.SystemContext, store storage.Store, options BuilderOptions) (types.ImageReference, string, string, *storage.Image, error) {
candidates, _, _, err := util.ResolveName(options.FromImage, options.Registry, systemContext, store)
if err != nil {
return nil, "", "", nil, errors.Wrapf(err, "error resolving local image %q", options.FromImage)
}
for _, imageName := range candidates {
img, err := store.Image(imageName)
if err != nil {
if errors.Cause(err) == storage.ErrImageUnknown {
continue
}
return nil, "", "", nil, err
}
ref, err := is.Transport.ParseStoreReference(store, img.ID)
if err != nil {
return nil, "", "", nil, errors.Wrapf(err, "error parsing reference to image %q", img.ID)
}
if !imageMatch(context.Background(), ref, systemContext) {
logrus.Debugf("Found local image %s but it does not match the provided context", imageName)
continue
}
return ref, ref.Transport().Name(), imageName, img, nil
}
return nil, "", "", nil, nil
}
func imageMatch(ctx context.Context, ref types.ImageReference, systemContext *types.SystemContext) bool {
img, err := ref.NewImage(ctx, systemContext)
if err != nil {
logrus.Warnf("Failed to create newImage in imageMatch: %v", err)
return false
}
defer img.Close()
data, err := img.Inspect(ctx)
if err != nil {
logrus.Warnf("Failed to inspect img %s: %v", ref, err)
return false
}
os := systemContext.OSChoice
if os == "" {
os = runtime.GOOS
}
arch := systemContext.ArchitectureChoice
if arch == "" {
arch = runtime.GOARCH
}
if os == data.Os && arch == data.Architecture {
if systemContext.VariantChoice == "" || systemContext.VariantChoice == data.Variant {
return true
}
}
return false
}
func resolveImage(ctx context.Context, systemContext *types.SystemContext, store storage.Store, options BuilderOptions) (types.ImageReference, string, *storage.Image, error) {
if systemContext == nil {
systemContext = &types.SystemContext{}
}
fromImage := options.FromImage
// If the image name includes a transport we can use it as it. Special
// treatment for docker references which are subject to pull policies
// that we're handling below.
srcRef, err := alltransports.ParseImageName(options.FromImage)
if err == nil {
if srcRef.Transport().Name() == docker.Transport.Name() {
fromImage = srcRef.DockerReference().String()
} else {
pulledImg, pulledReference, err := pullAndFindImage(ctx, store, srcRef, options, systemContext)
return pulledReference, srcRef.Transport().Name(), pulledImg, err
}
}
localImageRef, _, localImageName, localImage, err := resolveLocalImage(systemContext, store, options)
if err != nil {
return nil, "", nil, err
}
// If we could resolve the image locally, check if it was clearly
// referring to a local image, either by ID or digest. In that case,
// we don't need to perform a remote lookup.
if localImage != nil && (strings.HasPrefix(localImage.ID, options.FromImage) || strings.HasPrefix(options.FromImage, "sha256:")) {
return localImageRef, localImageRef.Transport().Name(), localImage, nil
}
if options.PullPolicy == define.PullNever || options.PullPolicy == define.PullIfMissing {
if localImage != nil {
return localImageRef, localImageRef.Transport().Name(), localImage, nil
}
if options.PullPolicy == define.PullNever {
return nil, "", nil, errors.Errorf("pull policy is %q but %q could not be found locally", "never", options.FromImage)
}
}
// If we found a local image, we must use it's name.
// See #2904.
if localImageRef != nil {
fromImage = localImageName
}
resolved, err := shortnames.Resolve(systemContext, fromImage)
if err != nil {
return nil, "", nil, err
}
// Print the image-resolution description unless we're looking for a
// new image and already found a local image. In many cases, the
// description will be more confusing than helpful (e.g., `buildah from
// localImage`).
if desc := resolved.Description(); len(desc) > 0 {
logrus.Debug(desc)
if !(options.PullPolicy == define.PullIfNewer && localImage != nil) {
if options.ReportWriter != nil {
if _, err := options.ReportWriter.Write([]byte(desc + "\n")); err != nil {
return nil, "", nil, err
}
}
}
}
var pullErrors []error
for _, pullCandidate := range resolved.PullCandidates {
ref, err := docker.NewReference(pullCandidate.Value)
if err != nil {
return nil, "", nil, err
}
// We're tasked to pull a "newer" image. If there's no local
// image, we have no base for comparison, so we'll pull the
// first available image.
//
// If there's a local image, the `pullCandidate` is considered
// to be newer if its time stamp differs from the local one.
// Otherwise, we don't pull and skip it.
if options.PullPolicy == define.PullIfNewer && localImage != nil {
remoteImage, err := ref.NewImage(ctx, systemContext)
if err != nil {
logrus.Debugf("unable to remote-inspect image %q: %v", pullCandidate.Value.String(), err)
pullErrors = append(pullErrors, err)
continue
}
defer remoteImage.Close()
remoteData, err := remoteImage.Inspect(ctx)
if err != nil {
logrus.Debugf("unable to remote-inspect image %q: %v", pullCandidate.Value.String(), err)
pullErrors = append(pullErrors, err)
continue
}
// FIXME: we should compare image digests not time stamps.
// Comparing time stamps is flawed. Be aware that fixing
// it may entail non-trivial changes to the tests. Please
// refer to https://github.com/containers/buildah/issues/2779
// for more.
if localImage.Created.Equal(*remoteData.Created) {
continue
}
}
pulledImg, pulledReference, err := pullAndFindImage(ctx, store, ref, options, systemContext)
if err != nil {
logrus.Debugf("unable to pull and read image %q: %v", pullCandidate.Value.String(), err)
pullErrors = append(pullErrors, err)
continue
}
// Make sure to record the short-name alias if necessary.
if err = pullCandidate.Record(); err != nil {
return nil, "", nil, err
}
return pulledReference, "", pulledImg, nil
}
// If we were looking for a newer image but could not find one, return
// the local image if present.
if options.PullPolicy == define.PullIfNewer && localImage != nil {
return localImageRef, localImageRef.Transport().Name(), localImage, nil
}
return nil, "", nil, resolved.FormatPullErrors(pullErrors)
}
func containerNameExist(name string, containers []storage.Container) bool {
for _, container := range containers {
for _, cname := range container.Names {
@ -317,6 +106,7 @@ func newBuilder(ctx context.Context, store storage.Store, options BuilderOptions
img *storage.Image
err error
)
if options.FromImage == BaseImageFakeName {
options.FromImage = ""
}
@ -324,11 +114,46 @@ func newBuilder(ctx context.Context, store storage.Store, options BuilderOptions
systemContext := getSystemContext(store, options.SystemContext, options.SignaturePolicyPath)
if options.FromImage != "" && options.FromImage != "scratch" {
ref, _, img, err = resolveImage(ctx, systemContext, store, options)
imageRuntime, err := libimage.RuntimeFromStore(store, &libimage.RuntimeOptions{SystemContext: systemContext})
if err != nil {
return nil, err
}
pullPolicy, err := libimageTypes.ParsePullPolicy(options.PullPolicy.String())
if err != nil {
return nil, err
}
// Note: options.Format does *not* relate to the image we're
// about to pull (see tests/digests.bats). So we're not
// forcing a MIMEType in the pullOptions below.
pullOptions := libimage.PullOptions{}
pullOptions.RetryDelay = &options.PullRetryDelay
pullOptions.OciDecryptConfig = options.OciDecryptConfig
pullOptions.SignaturePolicyPath = options.SignaturePolicyPath
pullOptions.Writer = options.ReportWriter
maxRetries := uint(options.MaxPullRetries)
pullOptions.MaxRetries = &maxRetries
if options.BlobDirectory != "" {
pullOptions.DestinationLookupReferenceFunc = blobcache.CacheLookupReferenceFunc(options.BlobDirectory, types.PreserveOriginal)
// pullOptions.SourceLookupReferenceFunc = blobcache.CacheLookupReferenceFunc(options.BlobDirectory, types.PreserveOriginal)
}
pulledImages, err := imageRuntime.Pull(ctx, options.FromImage, pullPolicy, &pullOptions)
if err != nil {
return nil, err
}
if len(pulledImages) > 0 {
img = pulledImages[0].StorageImage()
ref, err = pulledImages[0].StorageReference()
if err != nil {
return nil, err
}
}
}
imageSpec := options.FromImage
imageID := ""
imageDigest := ""

View File

@ -10,6 +10,7 @@ import (
"sync"
"github.com/containers/buildah/docker"
"github.com/containers/common/libimage"
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/image"
"github.com/containers/image/v5/manifest"
@ -82,6 +83,21 @@ func makeFilename(blobSum digest.Digest, isConfig bool) string {
return blobSum.String()
}
// CacheLookupReferenceFunc wraps a BlobCache into a
// libimage.LookupReferenceFunc to allow for using a BlobCache during
// image-copy operations.
func CacheLookupReferenceFunc(directory string, compress types.LayerCompression) libimage.LookupReferenceFunc {
// NOTE: this prevents us from moving BlobCache around and generalizes
// the libimage API.
return func(ref types.ImageReference) (types.ImageReference, error) {
ref, err := NewBlobCache(ref, directory, compress)
if err != nil {
return nil, errors.Wrapf(err, "error using blobcache %q", directory)
}
return ref, nil
}
}
// NewBlobCache creates a new blob cache that wraps an image reference. Any blobs which are
// written to the destination image created from the resulting reference will also be stored
// as-is to the specified directory or a temporary directory. The cache directory's contents

View File

@ -1,369 +0,0 @@
package manifests
import (
"io/ioutil"
"os"
"reflect"
"testing"
"github.com/containers/image/v5/manifest"
"github.com/containers/storage/pkg/reexec"
digest "github.com/opencontainers/go-digest"
v1 "github.com/opencontainers/image-spec/specs-go/v1"
)
const (
expectedInstance = digest.Digest("sha256:c829b1810d2dbb456e74a695fd3847530c8319e5a95dca623e9f1b1b89020d8b")
ociFixture = "testdata/fedora.index.json"
dockerFixture = "testdata/fedora.list.json"
)
var (
_ List = &list{}
)
func TestMain(m *testing.M) {
if reexec.Init() {
return
}
os.Exit(m.Run())
}
func TestCreate(t *testing.T) {
list := Create()
if list == nil {
t.Fatalf("error creating an empty list")
}
}
func TestFromBlob(t *testing.T) {
for _, version := range []string{
ociFixture,
dockerFixture,
} {
bytes, err := ioutil.ReadFile(version)
if err != nil {
t.Fatalf("error loading %s: %v", version, err)
}
list, err := FromBlob(bytes)
if err != nil {
t.Fatalf("error parsing %s: %v", version, err)
}
if len(list.Docker().Manifests) != len(list.OCIv1().Manifests) {
t.Fatalf("%s: expected the same number of manifests, but %d != %d", version, len(list.Docker().Manifests), len(list.OCIv1().Manifests))
}
for i := range list.Docker().Manifests {
d := list.Docker().Manifests[i]
o := list.OCIv1().Manifests[i]
if d.Platform.OS != o.Platform.OS {
t.Fatalf("%s: expected the same OS", version)
}
if d.Platform.Architecture != o.Platform.Architecture {
t.Fatalf("%s: expected the same Architecture", version)
}
}
}
}
func TestAddInstance(t *testing.T) {
manifestBytes, err := ioutil.ReadFile("testdata/fedora-minimal.schema2.json")
if err != nil {
t.Fatalf("error loading testdata/fedora-minimal.schema2.json: %v", err)
}
manifestType := manifest.GuessMIMEType(manifestBytes)
manifestDigest, err := manifest.Digest(manifestBytes)
if err != nil {
t.Fatalf("error digesting testdata/fedora-minimal.schema2.json: %v", err)
}
for _, version := range []string{
ociFixture,
dockerFixture,
} {
bytes, err := ioutil.ReadFile(version)
if err != nil {
t.Fatalf("error loading %s: %v", version, err)
}
list, err := FromBlob(bytes)
if err != nil {
t.Fatalf("error parsing %s: %v", version, err)
}
if err = list.AddInstance(manifestDigest, int64(len(manifestBytes)), manifestType, "linux", "amd64", "", nil, "", nil, nil); err != nil {
t.Fatalf("adding an instance failed in %s: %v", version, err)
}
if d, err := list.findDocker(manifestDigest); d == nil || err != nil {
t.Fatalf("adding an instance failed in %s: %v", version, err)
}
if o, err := list.findOCIv1(manifestDigest); o == nil || err != nil {
t.Fatalf("adding an instance failed in %s: %v", version, err)
}
}
}
func TestRemove(t *testing.T) {
bytes, err := ioutil.ReadFile(ociFixture)
if err != nil {
t.Fatalf("error loading blob: %v", err)
}
list, err := FromBlob(bytes)
if err != nil {
t.Fatalf("error parsing blob: %v", err)
}
before := len(list.OCIv1().Manifests)
instanceDigest := expectedInstance
if d, err := list.findDocker(instanceDigest); d == nil || err != nil {
t.Fatalf("finding expected instance failed: %v", err)
}
if o, err := list.findOCIv1(instanceDigest); o == nil || err != nil {
t.Fatalf("finding expected instance failed: %v", err)
}
err = list.Remove(instanceDigest)
if err != nil {
t.Fatalf("error parsing blob: %v", err)
}
after := len(list.Docker().Manifests)
if after != before-1 {
t.Fatalf("removing instance should have succeeded")
}
if d, err := list.findDocker(instanceDigest); d != nil || err == nil {
t.Fatalf("finding instance should have failed")
}
if o, err := list.findOCIv1(instanceDigest); o != nil || err == nil {
t.Fatalf("finding instance should have failed")
}
}
func testString(t *testing.T, values []string, set func(List, digest.Digest, string) error, get func(List, digest.Digest) (string, error)) {
bytes, err := ioutil.ReadFile(ociFixture)
if err != nil {
t.Fatalf("error loading blob: %v", err)
}
list, err := FromBlob(bytes)
if err != nil {
t.Fatalf("error parsing blob: %v", err)
}
for _, testString := range values {
if err = set(list, expectedInstance, testString); err != nil {
t.Fatalf("error setting %q: %v", testString, err)
}
b, err := list.Serialize("")
if err != nil {
t.Fatalf("error serializing list: %v", err)
}
list, err := FromBlob(b)
if err != nil {
t.Fatalf("error parsing list: %v", err)
}
value, err := get(list, expectedInstance)
if err != nil {
t.Fatalf("error retrieving value %q: %v", testString, err)
}
if value != testString {
t.Fatalf("expected value %q, got %q: %v", value, testString, err)
}
}
}
func testStringSlice(t *testing.T, values [][]string, set func(List, digest.Digest, []string) error, get func(List, digest.Digest) ([]string, error)) {
bytes, err := ioutil.ReadFile(ociFixture)
if err != nil {
t.Fatalf("error loading blob: %v", err)
}
list, err := FromBlob(bytes)
if err != nil {
t.Fatalf("error parsing blob: %v", err)
}
for _, testSlice := range values {
if err = set(list, expectedInstance, testSlice); err != nil {
t.Fatalf("error setting %v: %v", testSlice, err)
}
b, err := list.Serialize("")
if err != nil {
t.Fatalf("error serializing list: %v", err)
}
list, err := FromBlob(b)
if err != nil {
t.Fatalf("error parsing list: %v", err)
}
values, err := get(list, expectedInstance)
if err != nil {
t.Fatalf("error retrieving value %v: %v", testSlice, err)
}
if !reflect.DeepEqual(values, testSlice) {
t.Fatalf("expected values %v, got %v: %v", testSlice, values, err)
}
}
}
func testMap(t *testing.T, values []map[string]string, set func(List, *digest.Digest, map[string]string) error, get func(List, *digest.Digest) (map[string]string, error)) {
bytes, err := ioutil.ReadFile(ociFixture)
if err != nil {
t.Fatalf("error loading blob: %v", err)
}
list, err := FromBlob(bytes)
if err != nil {
t.Fatalf("error parsing blob: %v", err)
}
instance := expectedInstance
for _, instanceDigest := range []*digest.Digest{nil, &instance} {
for _, testMap := range values {
if err = set(list, instanceDigest, testMap); err != nil {
t.Fatalf("error setting %v: %v", testMap, err)
}
b, err := list.Serialize("")
if err != nil {
t.Fatalf("error serializing list: %v", err)
}
list, err := FromBlob(b)
if err != nil {
t.Fatalf("error parsing list: %v", err)
}
values, err := get(list, instanceDigest)
if err != nil {
t.Fatalf("error retrieving value %v: %v", testMap, err)
}
if len(values) != len(testMap) {
t.Fatalf("expected %d map entries, got %d", len(testMap), len(values))
}
for k, v := range testMap {
if values[k] != v {
t.Fatalf("expected map value %q=%q, got %q", k, v, values[k])
}
}
}
}
}
func TestAnnotations(t *testing.T) {
testMap(t,
[]map[string]string{{"A": "B", "C": "D"}, {"E": "F", "G": "H"}},
func(l List, i *digest.Digest, m map[string]string) error {
return l.SetAnnotations(i, m)
},
func(l List, i *digest.Digest) (map[string]string, error) {
return l.Annotations(i)
},
)
}
func TestArchitecture(t *testing.T) {
testString(t,
[]string{"abacus", "sliderule"},
func(l List, i digest.Digest, s string) error {
return l.SetArchitecture(i, s)
},
func(l List, i digest.Digest) (string, error) {
return l.Architecture(i)
},
)
}
func TestFeatures(t *testing.T) {
testStringSlice(t,
[][]string{{"chrome", "hubcaps"}, {"climate", "control"}},
func(l List, i digest.Digest, s []string) error {
return l.SetFeatures(i, s)
},
func(l List, i digest.Digest) ([]string, error) {
return l.Features(i)
},
)
}
func TestOS(t *testing.T) {
testString(t,
[]string{"linux", "darwin"},
func(l List, i digest.Digest, s string) error {
return l.SetOS(i, s)
},
func(l List, i digest.Digest) (string, error) {
return l.OS(i)
},
)
}
func TestOSFeatures(t *testing.T) {
testStringSlice(t,
[][]string{{"ipv6", "containers"}, {"nested", "virtualization"}},
func(l List, i digest.Digest, s []string) error {
return l.SetOSFeatures(i, s)
},
func(l List, i digest.Digest) ([]string, error) {
return l.OSFeatures(i)
},
)
}
func TestOSVersion(t *testing.T) {
testString(t,
[]string{"el7", "el8"},
func(l List, i digest.Digest, s string) error {
return l.SetOSVersion(i, s)
},
func(l List, i digest.Digest) (string, error) {
return l.OSVersion(i)
},
)
}
func TestURLs(t *testing.T) {
testStringSlice(t,
[][]string{{"https://example.com", "https://example.net"}, {"http://example.com", "http://example.net"}},
func(l List, i digest.Digest, s []string) error {
return l.SetURLs(i, s)
},
func(l List, i digest.Digest) ([]string, error) {
return l.URLs(i)
},
)
}
func TestVariant(t *testing.T) {
testString(t,
[]string{"workstation", "cloud", "server"},
func(l List, i digest.Digest, s string) error {
return l.SetVariant(i, s)
},
func(l List, i digest.Digest) (string, error) {
return l.Variant(i)
},
)
}
func TestSerialize(t *testing.T) {
for _, version := range []string{
ociFixture,
dockerFixture,
} {
bytes, err := ioutil.ReadFile(version)
if err != nil {
t.Fatalf("error loading %s: %v", version, err)
}
list, err := FromBlob(bytes)
if err != nil {
t.Fatalf("error parsing %s: %v", version, err)
}
for _, mimeType := range []string{"", v1.MediaTypeImageIndex, manifest.DockerV2ListMediaType} {
b, err := list.Serialize(mimeType)
if err != nil {
t.Fatalf("error serializing %s with type %q: %v", version, mimeType, err)
}
l, err := FromBlob(b)
if err != nil {
t.Fatalf("error parsing %s re-encoded as %q: %v\n%s", version, mimeType, err, string(b))
}
if !reflect.DeepEqual(list.Docker().Manifests, l.Docker().Manifests) {
t.Fatalf("re-encoded %s as %q was different\n%#v\n%#v", version, mimeType, list, l)
}
for i := range list.OCIv1().Manifests {
manifest := list.OCIv1().Manifests[i]
m := l.OCIv1().Manifests[i]
if manifest.Digest != m.Digest ||
manifest.MediaType != m.MediaType ||
manifest.Size != m.Size ||
!reflect.DeepEqual(list.OCIv1().Manifests[i].Platform, l.OCIv1().Manifests[i].Platform) {
t.Fatalf("re-encoded %s OCI %d as %q was different\n%#v\n%#v", version, i, mimeType, list, l)
}
}
}
}
}

View File

@ -1,16 +0,0 @@
{
"schemaVersion": 2,
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
"config": {
"mediaType": "application/vnd.docker.container.image.v1+json",
"size": 1316,
"digest": "sha256:847a6054047619b8908f61e0211e3480ab20ce4b6cf17b03db081322ade301d3"
},
"layers": [
{
"mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
"size": 42163967,
"digest": "sha256:8fad33c002fa130aceec4fd0cadc8bad0d7561667ab24aee75dea825be933de0"
}
]
}

View File

@ -1,45 +0,0 @@
{
"manifests": [
{
"digest": "sha256:f81f09918379d5442d20dff82a298f29698197035e737f76e511d5af422cabd7",
"mediaType": "application/vnd.oci.image.manifest.v1+json",
"platform": {
"architecture": "amd64",
"os": "linux"
},
"size": 529
},
{
"digest": "sha256:c829b1810d2dbb456e74a695fd3847530c8319e5a95dca623e9f1b1b89020d8b",
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
"platform": {
"architecture": "arm64",
"os": "linux",
"variant": "v8"
},
"size": 529
},
{
"digest": "sha256:68b26da78d8790df143479ec2e3174c57cedb1c2e84ce1b2675d942d6848f2da",
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
"platform": {
"architecture": "ppc64le",
"os": "linux"
},
"size": 529
},
{
"digest": "sha256:15352d97781ffdf357bf3459c037be3efac4133dc9070c2dce7eca7c05c3e736",
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
"platform": {
"architecture": "s390x",
"os": "linux"
},
"size": 529
}
],
"schemaVersion": 2,
"annotations": {
"foo": "bar"
}
}

View File

@ -1,43 +0,0 @@
{
"manifests": [
{
"digest": "sha256:f81f09918379d5442d20dff82a298f29698197035e737f76e511d5af422cabd7",
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
"platform": {
"architecture": "amd64",
"os": "linux"
},
"size": 529
},
{
"digest": "sha256:c829b1810d2dbb456e74a695fd3847530c8319e5a95dca623e9f1b1b89020d8b",
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
"platform": {
"architecture": "arm64",
"os": "linux",
"variant": "v8"
},
"size": 529
},
{
"digest": "sha256:68b26da78d8790df143479ec2e3174c57cedb1c2e84ce1b2675d942d6848f2da",
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
"platform": {
"architecture": "ppc64le",
"os": "linux"
},
"size": 529
},
{
"digest": "sha256:15352d97781ffdf357bf3459c037be3efac4133dc9070c2dce7eca7c05c3e736",
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
"platform": {
"architecture": "s390x",
"os": "linux"
},
"size": 529
}
],
"mediaType": "application/vnd.docker.distribution.manifest.list.v2+json",
"schemaVersion": 2
}

View File

@ -1,16 +0,0 @@
{
"schemaVersion": 2,
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
"config": {
"mediaType": "application/vnd.docker.container.image.v1+json",
"size": 2037,
"digest": "sha256:e9ed59d2baf72308f3a811ebc49ff3f4e0175abf40bf636bea0160759c637999"
},
"layers": [
{
"mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
"size": 69532283,
"digest": "sha256:5a915a173fbc36dc8e1410afdd9de2b08f71efb226f8eb1ebcdc00a1acbced62"
}
]
}

View File

@ -587,6 +587,14 @@ func SystemContextFromOptions(c *cobra.Command) (*types.SystemContext, error) {
ctx.OCIInsecureSkipTLSVerify = !tlsVerify
ctx.DockerDaemonInsecureSkipTLSVerify = !tlsVerify
}
disableCompression, err := c.Flags().GetBool("disable-compression")
if err == nil {
if disableCompression {
ctx.OCIAcceptUncompressedLayers = true
} else {
ctx.DirForceCompress = true
}
}
creds, err := c.Flags().GetString("creds")
if err == nil && c.Flag("creds").Changed {
var err error

View File

@ -1,371 +0,0 @@
package supplemented
import (
"archive/tar"
"bytes"
"context"
"crypto/rand"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"os"
"runtime"
"testing"
"time"
cp "github.com/containers/image/v5/copy"
"github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/pkg/blobinfocache/none"
"github.com/containers/image/v5/signature"
"github.com/containers/image/v5/transports"
"github.com/containers/image/v5/transports/alltransports"
"github.com/containers/image/v5/types"
digest "github.com/opencontainers/go-digest"
specs "github.com/opencontainers/image-spec/specs-go"
v1 "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/stretchr/testify/assert"
)
var (
_ types.ImageReference = &supplementedImageReference{}
_ types.ImageSource = &supplementedImageSource{}
now = time.Now()
)
func makeLayer(t *testing.T) []byte {
var b bytes.Buffer
len := 512
randomLen := 8
tw := tar.NewWriter(&b)
assert.Nilf(t, tw.WriteHeader(&tar.Header{
Typeflag: tar.TypeReg,
Name: "tmpfile",
Size: int64(len),
Mode: 0644,
Uname: "root",
Gname: "root",
ModTime: time.Now(),
}), "error writing in-memory layer")
buf := make([]byte, len)
n, err := rand.Read(buf[0:randomLen])
assert.Nilf(t, err, "error reading a random byte")
assert.Equalf(t, randomLen, n, "error reading random content: wrong length")
for i := randomLen; i < len; i++ {
buf[i] = (buf[i-1] + 1) & 0xff
}
n, err = tw.Write(buf)
assert.Nilf(t, err, "error writing file content")
assert.Equalf(t, n, len, "error writing file content: wrong length")
assert.Nilf(t, tw.Close(), "error flushing file content")
return b.Bytes()
}
func makeConfig(arch, os string, layer []byte) v1.Image {
diffID := digest.Canonical.FromBytes(layer)
return v1.Image{
Created: &now,
Architecture: arch,
OS: os,
Config: v1.ImageConfig{
User: "root",
Entrypoint: []string{"/tmpfile"},
WorkingDir: "/",
},
RootFS: v1.RootFS{
Type: "layers",
DiffIDs: []digest.Digest{diffID},
},
History: []v1.History{{
Created: &now,
CreatedBy: "shenanigans",
}},
}
}
func makeManifest(layer, config []byte) v1.Manifest {
return v1.Manifest{
Versioned: specs.Versioned{
SchemaVersion: 2,
},
Config: v1.Descriptor{
MediaType: v1.MediaTypeImageConfig,
Digest: digest.Canonical.FromBytes(config),
Size: int64(len(config)),
},
Layers: []v1.Descriptor{{
MediaType: v1.MediaTypeImageLayer,
Digest: digest.Canonical.FromBytes(layer),
Size: int64(len(layer)),
}},
}
}
func makeImage(t *testing.T, arch, os string) (ref types.ImageReference, dir string, layer, config, manifest []byte) {
ctx := context.TODO()
dir, err := ioutil.TempDir("", "supplemented")
assert.Nilf(t, err, "error creating temporary directory")
layerBytes := makeLayer(t)
cb := makeConfig(arch, os, layer)
configBytes, err := json.Marshal(&cb)
assert.Nilf(t, err, "error encoding image configuration")
m := makeManifest(layerBytes, configBytes)
manifestBytes, err := json.Marshal(&m)
assert.Nilf(t, err, "error encoding image manifest")
ref, err = alltransports.ParseImageName(fmt.Sprintf("dir:%s", dir))
assert.Nilf(t, err, "error parsing reference 'dir:%s'", dir)
sys := &types.SystemContext{}
dest, err := ref.NewImageDestination(ctx, sys)
assert.Nilf(t, err, "error opening 'dir:%s' as an image destination", dir)
bi := types.BlobInfo{
MediaType: v1.MediaTypeImageLayer,
Digest: digest.Canonical.FromBytes(layerBytes),
Size: int64(len(layerBytes)),
}
_, err = dest.PutBlob(ctx, bytes.NewReader(layerBytes), bi, none.NoCache, false)
assert.Nilf(t, err, "error storing layer blob to 'dir:%s'", dir)
bi = types.BlobInfo{
MediaType: v1.MediaTypeImageConfig,
Digest: digest.Canonical.FromBytes(configBytes),
Size: int64(len(configBytes)),
}
_, err = dest.PutBlob(ctx, bytes.NewReader(configBytes), bi, none.NoCache, true)
assert.Nilf(t, err, "error storing config blob to 'dir:%s'", dir)
err = dest.PutManifest(ctx, manifestBytes, nil)
assert.Nilf(t, err, "error storing manifest to 'dir:%s'", dir)
err = dest.Commit(ctx, nil)
assert.Nilf(t, err, "error committing image to 'dir:%s'", dir)
return ref, dir, layerBytes, configBytes, manifestBytes
}
func TestSupplemented(t *testing.T) {
ctx := context.TODO()
arch2 := "foo"
arch3 := "bar"
sys := &types.SystemContext{
SignaturePolicyPath: "../../tests/policy.json",
}
defaultPolicy, err := signature.DefaultPolicy(sys)
assert.Nilf(t, err, "error obtaining default policy")
policyContext, err := signature.NewPolicyContext(defaultPolicy)
assert.Nilf(t, err, "error obtaining policy context")
ref1, dir1, layer1, config1, manifest1 := makeImage(t, runtime.GOARCH, runtime.GOOS)
defer os.RemoveAll(dir1)
digest1, err := manifest.Digest(manifest1)
assert.Nilf(t, err, "error digesting manifest")
ref2, dir2, layer2, config2, manifest2 := makeImage(t, arch2, runtime.GOOS)
defer os.RemoveAll(dir2)
digest2, err := manifest.Digest(manifest2)
assert.Nilf(t, err, "error digesting manifest")
ref3, dir3, layer3, config3, manifest3 := makeImage(t, arch3, runtime.GOOS)
defer os.RemoveAll(dir3)
digest3, err := manifest.Digest(manifest3)
assert.Nilf(t, err, "error digesting manifest")
multidir, err := ioutil.TempDir("", "supplemented")
assert.Nilf(t, err, "error creating temporary directory")
defer os.RemoveAll(multidir)
destDir, err := ioutil.TempDir("", "supplemented")
assert.Nilf(t, err, "error creating temporary directory")
defer os.RemoveAll(destDir)
index := v1.Index{
Versioned: specs.Versioned{
SchemaVersion: 2,
},
Manifests: []v1.Descriptor{
{
MediaType: v1.MediaTypeImageManifest,
Digest: digest1,
Size: int64(len(manifest1)),
Platform: &v1.Platform{
Architecture: runtime.GOARCH,
OS: runtime.GOOS,
},
},
{
MediaType: v1.MediaTypeImageManifest,
Digest: digest2,
Size: int64(len(manifest2)),
Platform: &v1.Platform{
Architecture: arch2,
OS: runtime.GOOS,
},
},
{
MediaType: v1.MediaTypeImageManifest,
Digest: digest3,
Size: int64(len(manifest3)),
Platform: &v1.Platform{
Architecture: arch3,
OS: runtime.GOOS,
},
},
},
}
indexBytes, err := json.Marshal(&index)
assert.Nilf(t, err, "error encoding image index")
indexDigest, err := manifest.Digest(indexBytes)
assert.Nilf(t, err, "error digesting image index")
destRef, err := alltransports.ParseImageName(fmt.Sprintf("dir:%s", destDir))
assert.Nilf(t, err, "error parsing reference 'dir:%s'", destDir)
multiRef, err := alltransports.ParseImageName(fmt.Sprintf("dir:%s", multidir))
assert.Nilf(t, err, "error parsing reference 'dir:%s'", multidir)
destImg, err := multiRef.NewImageDestination(ctx, sys)
assert.Nilf(t, err, "error opening 'dir:%s' as an image destination", multidir)
err = destImg.PutManifest(ctx, indexBytes, nil)
assert.Nilf(t, err, "error storing index to 'dir:%s'", multidir)
err = destImg.Commit(ctx, nil)
assert.Nilf(t, err, "error committing image to 'dir:%s'", multidir)
t.Logf("list: digest=%q,value=%s", indexDigest, string(indexBytes))
_, err = multiRef.NewImage(ctx, sys)
assert.NotNilf(t, err, "unexpected success opening image 'dir:%s': shouldn't have been able to read config", multidir)
src, err := Reference(multiRef, []types.ImageReference{ref1}, cp.CopyAllImages, nil).NewImageSource(ctx, sys)
assert.NotNilf(t, err, "unexpected success opening image 'dir:%s': shouldn't have been able to read all manifests", multidir)
assert.Nilf(t, src, "unexpected success opening image 'dir:%s': shouldn't have been able to read all manifests", multidir)
src, err = Reference(multiRef, []types.ImageReference{ref1}, cp.CopySpecificImages, []digest.Digest{digest1}).NewImageSource(ctx, sys)
assert.Nilf(t, err, "error opening image 'dir:%s' with specific instances", multidir)
assert.Nilf(t, src.Close(), "error closing image 'dir:%s' with specific instances", multidir)
img, err := Reference(multiRef, nil, cp.CopySystemImage, nil).NewImage(ctx, sys)
assert.NotNilf(t, err, "unexpected success opening image 'dir:%s': shouldn't have been able to read config", multidir)
assert.Nilf(t, img, "unexpected success opening image 'dir:%s': shouldn't have been able to read config", multidir)
img, err = Reference(multiRef, []types.ImageReference{ref1}, cp.CopySystemImage, []digest.Digest{digest1}).NewImage(ctx, sys)
assert.Nilf(t, err, "error opening image %q+%q", transports.ImageName(multiRef), transports.ImageName(ref1))
assert.Nilf(t, img.Close(), "error closing image %q+%q", transports.ImageName(multiRef), transports.ImageName(ref1))
type testCase struct {
label string
supplements []types.ImageReference
expectToFind [][]byte
expectToNotFind [][]byte
multiple cp.ImageListSelection
instances []digest.Digest
}
for _, test := range []testCase{
{
label: "no supplements, nil instances",
supplements: nil,
expectToFind: nil,
expectToNotFind: [][]byte{layer1, config1, layer2, config2, layer3, config3},
multiple: cp.CopySpecificImages,
instances: nil,
},
{
label: "no supplements, 0 instances",
supplements: nil,
expectToFind: nil,
expectToNotFind: [][]byte{layer1, config1, layer2, config2, layer3, config3},
multiple: cp.CopySpecificImages,
instances: []digest.Digest{},
},
{
label: "just ref1 supplementing",
supplements: []types.ImageReference{ref1},
expectToFind: [][]byte{layer1, config1},
expectToNotFind: [][]byte{layer2, config2, layer3, config3},
multiple: cp.CopySpecificImages,
instances: []digest.Digest{digest1},
},
{
label: "just ref2 supplementing",
supplements: []types.ImageReference{ref2},
expectToFind: [][]byte{layer2, config2},
expectToNotFind: [][]byte{layer1, config1, layer3, config3},
multiple: cp.CopySpecificImages,
instances: []digest.Digest{digest2},
},
{
label: "just ref3 supplementing",
supplements: []types.ImageReference{ref3},
expectToFind: [][]byte{layer3, config3},
expectToNotFind: [][]byte{layer1, config1, layer2, config2},
multiple: cp.CopySpecificImages,
instances: []digest.Digest{digest3},
},
{
label: "refs 1 and 2 supplementing",
supplements: []types.ImageReference{ref1, ref2},
expectToFind: [][]byte{layer1, config1, layer2, config2},
expectToNotFind: [][]byte{layer3, config3},
multiple: cp.CopySpecificImages,
instances: []digest.Digest{digest1, digest2},
},
{
label: "refs 2 and 3 supplementing",
supplements: []types.ImageReference{ref2, ref3},
expectToFind: [][]byte{layer2, config2, layer3, config3},
expectToNotFind: [][]byte{layer1, config1},
multiple: cp.CopySpecificImages,
instances: []digest.Digest{digest2, digest3},
},
{
label: "refs 1 and 3 supplementing",
supplements: []types.ImageReference{ref1, ref3},
expectToFind: [][]byte{layer1, config1, layer3, config3},
expectToNotFind: [][]byte{layer2, config2},
multiple: cp.CopySpecificImages,
instances: []digest.Digest{digest1, digest3},
},
{
label: "all refs supplementing, all instances",
supplements: []types.ImageReference{ref1, ref2, ref3},
expectToFind: [][]byte{layer1, config1, layer2, config2, layer3, config3},
expectToNotFind: nil,
multiple: cp.CopySpecificImages,
instances: []digest.Digest{digest1, digest2, digest3},
},
{
label: "all refs supplementing, all images",
supplements: []types.ImageReference{ref1, ref2, ref3},
expectToFind: [][]byte{layer1, config1, layer2, config2, layer3, config3},
expectToNotFind: nil,
multiple: cp.CopyAllImages,
},
} {
supplemented := Reference(multiRef, test.supplements, test.multiple, test.instances)
src, err := supplemented.NewImageSource(ctx, sys)
assert.Nilf(t, err, "error opening image source 'dir:%s'[%s]", multidir, test.label)
defer src.Close()
for i, expect := range test.expectToFind {
bi := types.BlobInfo{
Digest: digest.Canonical.FromBytes(expect),
Size: int64(len(expect)),
}
rc, _, err := src.GetBlob(ctx, bi, none.NoCache)
assert.Nilf(t, err, "error reading blob 'dir:%s'[%s][%d]", multidir, test.label, i)
_, err = io.Copy(ioutil.Discard, rc)
assert.Nilf(t, err, "error discarding blob 'dir:%s'[%s][%d]", multidir, test.label, i)
rc.Close()
}
for i, expect := range test.expectToNotFind {
bi := types.BlobInfo{
Digest: digest.Canonical.FromBytes(expect),
Size: int64(len(expect)),
}
_, _, err := src.GetBlob(ctx, bi, none.NoCache)
assert.NotNilf(t, err, "unexpected success reading blob 'dir:%s'[%s][%d]", multidir, test.label, i)
}
options := cp.Options{
ImageListSelection: test.multiple,
Instances: test.instances,
}
_, err = cp.Image(ctx, policyContext, destRef, supplemented, &options)
assert.Nilf(t, err, "error copying image 'dir:%s'[%s]", multidir, test.label)
}
}

322
pull.go
View File

@ -1,322 +0,0 @@
package buildah
import (
"context"
"io"
"strings"
"time"
"github.com/containers/buildah/define"
"github.com/containers/buildah/pkg/blobcache"
"github.com/containers/image/v5/directory"
"github.com/containers/image/v5/docker"
dockerarchive "github.com/containers/image/v5/docker/archive"
"github.com/containers/image/v5/docker/reference"
tarfile "github.com/containers/image/v5/docker/tarfile"
ociarchive "github.com/containers/image/v5/oci/archive"
oci "github.com/containers/image/v5/oci/layout"
"github.com/containers/image/v5/signature"
is "github.com/containers/image/v5/storage"
"github.com/containers/image/v5/transports"
"github.com/containers/image/v5/transports/alltransports"
"github.com/containers/image/v5/types"
encconfig "github.com/containers/ocicrypt/config"
"github.com/containers/storage"
multierror "github.com/hashicorp/go-multierror"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
// PullOptions can be used to alter how an image is copied in from somewhere.
type PullOptions struct {
// SignaturePolicyPath specifies an override location for the signature
// policy which should be used for verifying the new image as it is
// being written. Except in specific circumstances, no value should be
// specified, indicating that the shared, system-wide default policy
// should be used.
SignaturePolicyPath string
// ReportWriter is an io.Writer which will be used to log the writing
// of the new image.
ReportWriter io.Writer
// Store is the local storage store which holds the source image.
Store storage.Store
// github.com/containers/image/types SystemContext to hold credentials
// and other authentication/authorization information.
SystemContext *types.SystemContext
// BlobDirectory is the name of a directory in which we'll attempt to
// store copies of layer blobs that we pull down, if any. It should
// already exist.
BlobDirectory string
// AllTags is a boolean value that determines if all tagged images
// will be downloaded from the repository. The default is false.
AllTags bool
// RemoveSignatures causes any existing signatures for the image to be
// discarded when pulling it.
RemoveSignatures bool
// MaxRetries is the maximum number of attempts we'll make to pull any
// one image from the external registry if the first attempt fails.
MaxRetries int
// RetryDelay is how long to wait before retrying a pull attempt.
RetryDelay time.Duration
// OciDecryptConfig contains the config that can be used to decrypt an image if it is
// encrypted if non-nil. If nil, it does not attempt to decrypt an image.
OciDecryptConfig *encconfig.DecryptConfig
// PullPolicy takes the value PullIfMissing, PullAlways, PullIfNewer, or PullNever.
PullPolicy define.PullPolicy
}
func localImageNameForReference(ctx context.Context, store storage.Store, srcRef types.ImageReference) (string, error) {
if srcRef == nil {
return "", errors.Errorf("reference to image is empty")
}
var name string
switch srcRef.Transport().Name() {
case dockerarchive.Transport.Name():
file := srcRef.StringWithinTransport()
tarSource, err := tarfile.NewSourceFromFile(file)
if err != nil {
return "", errors.Wrapf(err, "error opening tarfile %q as a source image", file)
}
defer tarSource.Close()
manifest, err := tarSource.LoadTarManifest()
if err != nil {
return "", errors.Errorf("error retrieving manifest.json from tarfile %q: %v", file, err)
}
// to pull the first image stored in the tar file
if len(manifest) == 0 {
// use the hex of the digest if no manifest is found
name, err = getImageDigest(ctx, srcRef, nil)
if err != nil {
return "", err
}
} else {
if len(manifest[0].RepoTags) > 0 {
name = manifest[0].RepoTags[0]
} else {
// If the input image has no repotags, we need to feed it a dest anyways
name, err = getImageDigest(ctx, srcRef, nil)
if err != nil {
return "", err
}
}
}
case ociarchive.Transport.Name():
// retrieve the manifest from index.json to access the image name
manifest, err := ociarchive.LoadManifestDescriptor(srcRef)
if err != nil {
return "", errors.Wrapf(err, "error loading manifest for %q", transports.ImageName(srcRef))
}
// if index.json has no reference name, compute the image digest instead
if manifest.Annotations == nil || manifest.Annotations["org.opencontainers.image.ref.name"] == "" {
name, err = getImageDigest(ctx, srcRef, nil)
if err != nil {
return "", err
}
} else {
name = manifest.Annotations["org.opencontainers.image.ref.name"]
}
case directory.Transport.Name():
// supports pull from a directory
name = toLocalImageName(srcRef.StringWithinTransport())
case oci.Transport.Name():
// supports pull from a directory
split := strings.SplitN(srcRef.StringWithinTransport(), ":", 2)
name = toLocalImageName(split[0])
default:
ref := srcRef.DockerReference()
if ref == nil {
name = srcRef.StringWithinTransport()
_, err := is.Transport.ParseStoreReference(store, name)
if err == nil {
return name, nil
}
logrus.Debugf("error parsing local storage reference %q: %v", name, err)
if strings.LastIndex(name, "/") != -1 {
name = name[strings.LastIndex(name, "/")+1:]
_, err = is.Transport.ParseStoreReference(store, name)
if err == nil {
return name, errors.Wrapf(err, "error parsing local storage reference %q", name)
}
}
return "", errors.Errorf("reference to image %q is not a named reference", transports.ImageName(srcRef))
}
if named, ok := ref.(reference.Named); ok {
name = named.Name()
if namedTagged, ok := ref.(reference.NamedTagged); ok {
name = name + ":" + namedTagged.Tag()
}
if canonical, ok := ref.(reference.Canonical); ok {
name = name + "@" + canonical.Digest().String()
}
}
}
if _, err := is.Transport.ParseStoreReference(store, name); err != nil {
return "", errors.Wrapf(err, "error parsing computed local image name %q", name)
}
return name, nil
}
// Pull copies the contents of the image from somewhere else to local storage. Returns the
// ID of the local image or an error.
func Pull(ctx context.Context, imageName string, options PullOptions) (imageID string, err error) {
systemContext := getSystemContext(options.Store, options.SystemContext, options.SignaturePolicyPath)
boptions := BuilderOptions{
FromImage: imageName,
SignaturePolicyPath: options.SignaturePolicyPath,
SystemContext: systemContext,
BlobDirectory: options.BlobDirectory,
ReportWriter: options.ReportWriter,
MaxPullRetries: options.MaxRetries,
PullRetryDelay: options.RetryDelay,
OciDecryptConfig: options.OciDecryptConfig,
PullPolicy: options.PullPolicy,
}
if !options.AllTags {
_, _, img, err := resolveImage(ctx, systemContext, options.Store, boptions)
if err != nil {
return "", err
}
return img.ID, nil
}
srcRef, err := alltransports.ParseImageName(imageName)
if err == nil && srcRef.Transport().Name() != docker.Transport.Name() {
return "", errors.New("Non-docker transport is not supported, for --all-tags pulling")
}
storageRef, _, _, err := resolveImage(ctx, systemContext, options.Store, boptions)
if err != nil {
return "", err
}
var errs *multierror.Error
repo := reference.TrimNamed(storageRef.DockerReference())
dockerRef, err := docker.NewReference(reference.TagNameOnly(storageRef.DockerReference()))
if err != nil {
return "", errors.Wrapf(err, "internal error creating docker.Transport reference for %s", storageRef.DockerReference().String())
}
tags, err := docker.GetRepositoryTags(ctx, systemContext, dockerRef)
if err != nil {
return "", errors.Wrapf(err, "error getting repository tags")
}
for _, tag := range tags {
tagged, err := reference.WithTag(repo, tag)
if err != nil {
errs = multierror.Append(errs, err)
continue
}
taggedRef, err := docker.NewReference(tagged)
if err != nil {
return "", errors.Wrapf(err, "internal error creating docker.Transport reference for %s", tagged.String())
}
if options.ReportWriter != nil {
if _, err := options.ReportWriter.Write([]byte("Pulling " + tagged.String() + "\n")); err != nil {
return "", errors.Wrapf(err, "error writing pull report")
}
}
ref, err := pullImage(ctx, options.Store, taggedRef, options, systemContext)
if err != nil {
errs = multierror.Append(errs, err)
continue
}
taggedImg, err := is.Transport.GetStoreImage(options.Store, ref)
if err != nil {
errs = multierror.Append(errs, err)
continue
}
imageID = taggedImg.ID
}
return imageID, errs.ErrorOrNil()
}
func pullImage(ctx context.Context, store storage.Store, srcRef types.ImageReference, options PullOptions, sc *types.SystemContext) (types.ImageReference, error) {
blocked, err := isReferenceBlocked(srcRef, sc)
if err != nil {
return nil, errors.Wrapf(err, "error checking if pulling from registry for %q is blocked", transports.ImageName(srcRef))
}
if blocked {
return nil, errors.Errorf("pull access to registry for %q is blocked by configuration", transports.ImageName(srcRef))
}
insecure, err := checkRegistrySourcesAllows("pull from", srcRef)
if err != nil {
return nil, err
}
if insecure {
if sc.DockerInsecureSkipTLSVerify == types.OptionalBoolFalse {
return nil, errors.Errorf("can't require tls verification on an insecured registry")
}
sc.DockerInsecureSkipTLSVerify = types.OptionalBoolTrue
sc.OCIInsecureSkipTLSVerify = true
sc.DockerDaemonInsecureSkipTLSVerify = true
}
destName, err := localImageNameForReference(ctx, store, srcRef)
if err != nil {
return nil, errors.Wrapf(err, "error computing local image name for %q", transports.ImageName(srcRef))
}
if destName == "" {
return nil, errors.Errorf("error computing local image name for %q", transports.ImageName(srcRef))
}
destRef, err := is.Transport.ParseStoreReference(store, destName)
if err != nil {
return nil, errors.Wrapf(err, "error parsing image name %q", destName)
}
var maybeCachedDestRef = types.ImageReference(destRef)
if options.BlobDirectory != "" {
cachedRef, err := blobcache.NewBlobCache(destRef, options.BlobDirectory, types.PreserveOriginal)
if err != nil {
return nil, errors.Wrapf(err, "error wrapping image reference %q in blob cache at %q", transports.ImageName(destRef), options.BlobDirectory)
}
maybeCachedDestRef = cachedRef
}
policy, err := signature.DefaultPolicy(sc)
if err != nil {
return nil, errors.Wrapf(err, "error obtaining default signature policy")
}
policyContext, err := signature.NewPolicyContext(policy)
if err != nil {
return nil, errors.Wrapf(err, "error creating new signature policy context")
}
defer func() {
if err2 := policyContext.Destroy(); err2 != nil {
logrus.Debugf("error destroying signature policy context: %v", err2)
}
}()
logrus.Debugf("copying %q to %q", transports.ImageName(srcRef), destName)
if _, err := retryCopyImage(ctx, policyContext, maybeCachedDestRef, srcRef, srcRef, getCopyOptions(store, options.ReportWriter, sc, sc, "", options.RemoveSignatures, "", nil, nil, options.OciDecryptConfig), options.MaxRetries, options.RetryDelay); err != nil {
logrus.Debugf("error copying src image [%q] to dest image [%q] err: %v", transports.ImageName(srcRef), destName, err)
return nil, err
}
return destRef, nil
}
// getImageDigest creates an image object and uses the hex value of the digest as the image ID
// for parsing the store reference
func getImageDigest(ctx context.Context, src types.ImageReference, sc *types.SystemContext) (string, error) {
newImg, err := src.NewImage(ctx, sc)
if err != nil {
return "", errors.Wrapf(err, "error opening image %q for reading", transports.ImageName(src))
}
defer newImg.Close()
digest := newImg.ConfigInfo().Digest
if err = digest.Validate(); err != nil {
return "", errors.Wrapf(err, "error getting config info from image %q", transports.ImageName(src))
}
return "@" + digest.Hex(), nil
}
// toLocalImageName converts an image name into a 'localhost/' prefixed one
func toLocalImageName(imageName string) string {
return "localhost/" + strings.TrimLeft(imageName, "/")
}

View File

@ -19,7 +19,7 @@ load helpers
run_buildah 0 login --username testuserfoo --password testpassword docker.io
run_buildah 125 logout --authfile /tmp/nonexistent docker.io
expect_output "error checking authfile path /tmp/nonexistent: stat /tmp/nonexistent: no such file or directory"
expect_output "checking authfile: stat /tmp/nonexistent: no such file or directory"
run_buildah 0 logout docker.io
}

View File

@ -78,7 +78,7 @@ load helpers
# Build, create a container, mount it, and list all files therein
run_buildah bud -t testbud2 --signature-policy ${TESTSDIR}/policy.json ${TESTDIR}/dockerignore2
run_buildah from testbud2
run_buildah from --pull=false testbud2
cid=$output
run_buildah mount $cid
@ -249,7 +249,7 @@ symlink(subdir)"
_prefetch busybox
target=foo
run_buildah bud --signature-policy ${TESTSDIR}/policy.json -t ${target} ${TESTSDIR}/bud/dest-final-slash
run_buildah from --signature-policy ${TESTSDIR}/policy.json ${target}
run_buildah from --pull=false --signature-policy ${TESTSDIR}/policy.json ${target}
cid="$output"
run_buildah run ${cid} /test/ls -lR /test/ls
}
@ -2152,8 +2152,7 @@ _EOF
@test "bud pull never" {
target=pull
run_buildah 125 bud --signature-policy ${TESTSDIR}/policy.json -t ${target} --pull-never ${TESTSDIR}/bud/pull
expect_output --substring "pull policy is \"never\" but \""
expect_output --substring "\" could not be found locally"
expect_output --substring "busybox: image not known"
run_buildah bud --signature-policy ${TESTSDIR}/policy.json -t ${target} --pull ${TESTSDIR}/bud/pull
expect_output --substring "COMMIT pull"

View File

@ -9,8 +9,7 @@ load helpers
target=pull
run_buildah 125 --storage-driver=overlay bud --signature-policy ${TESTSDIR}/policy.json -t ${target} --pull-never ${TESTSDIR}/bud/pull
expect_output --substring "pull policy is \"never\" but \""
expect_output --substring "\" could not be found locally"
expect_output --substring "image not known"
leftover=$(mount | grep $TESTDIR | cat)
if [ -n "$leftover" ]; then

View File

@ -28,7 +28,7 @@ load helpers
run_buildah rm $output
run_buildah 125 from sha256:1111111111111111111111111111111111111111111111111111111111111111
expect_output --substring "error locating image with ID \"1111111111111111111111111111111111111111111111111111111111111111\""
expect_output --substring "sha256:1111111111111111111111111111111111111111111111111111111111111111: image not known"
}
@test "commit-to-from-elsewhere" {
@ -68,8 +68,9 @@ load helpers
run_buildah commit --signature-policy ${TESTSDIR}/policy.json "$cid" scratch2
run_buildah rm $cid
run_buildah tag scratch2 scratch3
run_buildah from --signature-policy ${TESTSDIR}/policy.json scratch3
expect_output "scratch3-working-container"
# Set --pull=false to prevent looking for a newer scratch3 image.
run_buildah from --pull=false --signature-policy ${TESTSDIR}/policy.json scratch3
expect_output --substring "scratch3-working-container"
run_buildah rm $output
run_buildah rmi scratch2 scratch3
@ -137,7 +138,7 @@ load helpers
run_buildah rmi alpine
run_buildah from --quiet --signature-policy ${TESTSDIR}/policy.json docker-archive:${TESTDIR}/docker-alp.tar
expect_output "docker-archive-working-container"
expect_output "alpine-working-container"
run_buildah rm $output
run_buildah rmi -a
@ -276,7 +277,7 @@ load helpers
# Create a container that uses that mapping and U volume flag.
_prefetch alpine
run_buildah from --signature-policy ${TESTSDIR}/policy.json --userns-uid-map 0:$uidbase:$uidsize --userns-gid-map 0:$gidbase:$gidsize --volume ${TESTDIR}/testdata:/mnt:z,U alpine
run_buildah from --pull=false --signature-policy ${TESTSDIR}/policy.json --userns-uid-map 0:$uidbase:$uidsize --userns-gid-map 0:$gidbase:$gidsize --volume ${TESTDIR}/testdata:/mnt:z,U alpine
ctr="$output"
# Test mounted volume has correct UID and GID ownership.
@ -330,8 +331,7 @@ load helpers
@test "from pull never" {
run_buildah 125 from --signature-policy ${TESTSDIR}/policy.json --pull-never busybox
echo "$output"
expect_output --substring "pull policy is \"never\" but \""
expect_output --substring "\" could not be found locally"
expect_output --substring "busybox: image not known"
run_buildah from --signature-policy ${TESTSDIR}/policy.json --pull=false busybox
echo "$output"
@ -352,7 +352,7 @@ load helpers
@test "from with nonexistent authfile: fails" {
run_buildah 125 from --authfile /no/such/file --pull --signature-policy ${TESTSDIR}/policy.json alpine
expect_output "error checking authfile path /no/such/file: stat /no/such/file: no such file or directory"
expect_output "checking authfile: stat /no/such/file: no such file or directory"
}
@test "from --pull-always: emits 'Getting' even if image is cached" {
@ -490,7 +490,7 @@ load helpers
@test "from ulimit test" {
_prefetch alpine
run_buildah from -q --ulimit cpu=300 --signature-policy ${TESTDIR}/policy.json alpine
run_buildah from -q --ulimit cpu=300 --signature-policy ${TESTSDIR}/policy.json alpine
cid=$output
run_buildah run $cid /bin/sh -c "ulimit -t"
expect_output "300" "ulimit -t"
@ -498,7 +498,7 @@ load helpers
@test "from isolation test" {
_prefetch alpine
run_buildah from -q --isolation chroot --signature-policy ${TESTDIR}/policy.json alpine
run_buildah from -q --isolation chroot --signature-policy ${TESTSDIR}/policy.json alpine
cid=$output
run_buildah inspect $cid
expect_output --substring '"Isolation": "chroot"'
@ -517,13 +517,13 @@ load helpers
_prefetch alpine
# with cgroup-parent
run_buildah from -q --cgroup-parent test-cgroup --signature-policy ${TESTDIR}/policy.json alpine
run_buildah from -q --cgroup-parent test-cgroup --signature-policy ${TESTSDIR}/policy.json alpine
cid=$output
run_buildah run $cid /bin/sh -c 'cat /proc/$$/cgroup'
expect_output --substring "test-cgroup"
# without cgroup-parent
run_buildah from -q --signature-policy ${TESTDIR}/policy.json alpine
run_buildah from -q --signature-policy ${TESTSDIR}/policy.json alpine
cid=$output
run_buildah run $cid /bin/sh -c 'cat /proc/$$/cgroup'
if [ -n "$(grep "test-cgroup" <<< "$output")" ]; then
@ -538,7 +538,7 @@ load helpers
cni_plugin_path=${TESTDIR}/no-cni-plugin
mkdir -p ${cni_config_dir}
mkdir -p ${cni_plugin_path}
run_buildah from -q --cni-config-dir=${cni_config_dir} --cni-plugin-path=${cni_plugin_path} --signature-policy ${TESTDIR}/policy.json alpine
run_buildah from -q --cni-config-dir=${cni_config_dir} --cni-plugin-path=${cni_plugin_path} --signature-policy ${TESTSDIR}/policy.json alpine
cid=$output
run_buildah inspect --format '{{.CNIConfigDir}}' $cid

View File

@ -48,7 +48,7 @@ load helpers
cid2=$output
run_buildah 125 images --noheading --filter since k8s.gcr.io/pause
expect_output 'invalid filter: "since" requires value'
expect_output 'invalid image filter "since": must be in the format "filter=value"'
run_buildah images --noheading --filter since=k8s.gcr.io/pause
@ -148,7 +148,7 @@ load helpers
@test "specify a nonexistent image" {
run_buildah 125 images alpine
expect_output --from="${lines[0]}" "No such image: alpine"
expect_output --from="${lines[0]}" "alpine: image not known"
expect_line_count 1
}

View File

@ -105,7 +105,7 @@ func main() {
manifestType := ""
configType := ""
ref, _, err := util.FindImage(store, "", systemContext, image)
ref, _, err := util.FindImage(store, systemContext, image)
if err != nil {
ref2, err2 := alltransports.ParseImageName(image)
if err2 != nil {

View File

@ -30,7 +30,7 @@ load helpers
@test "pull-blocked" {
run_buildah 125 --registries-conf ${TESTSDIR}/registries.conf.block pull --signature-policy ${TESTSDIR}/policy.json docker.io/alpine
expect_output --substring "is blocked by configuration"
expect_output --substring "registry docker.io is blocked in"
run_buildah --retry --registries-conf ${TESTSDIR}/registries.conf pull --signature-policy ${TESTSDIR}/policy.json docker.io/alpine
}
@ -65,7 +65,7 @@ load helpers
run_buildah images --format "{{.Name}}:{{.Tag}}"
expect_output --substring "alpine"
run_buildah 125 pull --all-tags --signature-policy ${TESTSDIR}/policy.json docker-archive:${TESTDIR}/alp.tar
expect_output "Non-docker transport is not supported, for --all-tags pulling"
expect_output --substring "pulling all tags is not supported for docker-archive transport"
}
@test "pull-from-oci-archive" {
@ -76,7 +76,7 @@ load helpers
run_buildah images --format "{{.Name}}:{{.Tag}}"
expect_output --substring "alpine"
run_buildah 125 pull --all-tags --signature-policy ${TESTSDIR}/policy.json oci-archive:${TESTDIR}/alp.tar
expect_output "Non-docker transport is not supported, for --all-tags pulling"
expect_output --substring "pulling all tags is not supported for oci-archive transport"
}
@test "pull-from-local-directory" {
@ -88,7 +88,7 @@ load helpers
run_buildah images --format "{{.Name}}:{{.Tag}}"
expect_output --substring "localhost${TESTDIR}/buildahtest:latest"
run_buildah 125 pull --all-tags --signature-policy ${TESTSDIR}/policy.json dir:${TESTDIR}/buildahtest
expect_output "Non-docker transport is not supported, for --all-tags pulling"
expect_output --substring "pulling all tags is not supported for dir transport"
}
@test "pull-from-docker-daemon" {
@ -102,7 +102,7 @@ load helpers
expect_output --substring "alpine:latest"
run_buildah rmi alpine
run_buildah 125 pull --all-tags --signature-policy ${TESTSDIR}/policy.json docker-daemon:docker.io/library/alpine:latest
expect_output --substring "Non-docker transport is not supported, for --all-tags pulling"
expect_output --substring "pulling all tags is not supported for docker-daemon transport"
}
@test "pull-all-tags" {
@ -127,7 +127,7 @@ load helpers
# Now pull with --all-tags, and confirm that we see all expected tag strings
run_buildah pull $opts --all-tags localhost:5000/myalpine
for tag in "${tags[@]}"; do
expect_output --substring "Pulling localhost:5000/myalpine:$tag"
expect_output --substring "Trying to pull localhost:5000/myalpine:$tag"
done
# Confirm that 'images -a' lists all of them. <Brackets> help confirm
@ -151,25 +151,25 @@ load helpers
run_buildah images --format "{{.Name}}:{{.Tag}}"
expect_output --substring "localhost${TESTDIR}/alpine:latest"
run_buildah 125 pull --all-tags --signature-policy ${TESTSDIR}/policy.json oci:${TESTDIR}/alpine
expect_output "Non-docker transport is not supported, for --all-tags pulling"
expect_output --substring "pulling all tags is not supported for oci transport"
}
@test "pull-denied-by-registry-sources" {
export BUILD_REGISTRY_SOURCES='{"blockedRegistries": ["docker.io"]}'
run_buildah 125 pull --signature-policy ${TESTSDIR}/policy.json --registries-conf ${TESTSDIR}/registries.conf.hub --quiet busybox
expect_output --substring 'pull from registry at "docker.io" denied by policy: it is in the blocked registries list'
expect_output --substring 'registry "docker.io" denied by policy: it is in the blocked registries list'
run_buildah 125 pull --signature-policy ${TESTSDIR}/policy.json --registries-conf ${TESTSDIR}/registries.conf.hub --quiet busybox
expect_output --substring 'pull from registry at "docker.io" denied by policy: it is in the blocked registries list'
expect_output --substring 'registry "docker.io" denied by policy: it is in the blocked registries list'
export BUILD_REGISTRY_SOURCES='{"allowedRegistries": ["some-other-registry.example.com"]}'
run_buildah 125 pull --signature-policy ${TESTSDIR}/policy.json --registries-conf ${TESTSDIR}/registries.conf.hub --quiet busybox
expect_output --substring 'pull from registry at "docker.io" denied by policy: not in allowed registries list'
expect_output --substring 'registry "docker.io" denied by policy: not in allowed registries list'
run_buildah 125 pull --signature-policy ${TESTSDIR}/policy.json --registries-conf ${TESTSDIR}/registries.conf.hub --quiet busybox
expect_output --substring 'pull from registry at "docker.io" denied by policy: not in allowed registries list'
expect_output --substring 'registry "docker.io" denied by policy: not in allowed registries list'
}
@test "pull should fail with nonexistent authfile" {
@ -278,11 +278,11 @@ load helpers
@test "pull-policy" {
mkdir ${TESTDIR}/buildahtest
run_buildah 125 pull --signature-policy ${TESTSDIR}/policy.json --policy bogus alpine
expect_output --substring "unrecognized pull policy bogus"
expect_output --substring "unsupported pull policy \"bogus\""
# If image does not exist the never will fail
run_buildah 125 pull -q --signature-policy ${TESTSDIR}/policy.json --policy never alpine
expect_output --substring "could not be found locally"
expect_output --substring "image not known"
run_buildah 125 inspect --type image alpine
expect_output --substring "image not known"

View File

@ -83,7 +83,7 @@ load helpers
_prefetch busybox
run_buildah pull --signature-policy ${TESTSDIR}/policy.json busybox
run_buildah 125 push --signature-policy ${TESTSDIR}/policy.json busybox
expect_output --substring "docker://busybox"
expect_output --substring "busybox"
}
@test "push should fail with nonexistent authfile" {
@ -107,7 +107,6 @@ load helpers
run_buildah pull --signature-policy ${TESTSDIR}/policy.json --quiet busybox
run_buildah 125 push --signature-policy ${TESTSDIR}/policy.json busybox docker://registry.example.com/evenbusierbox
expect_output --substring 'push to registry at "registry.example.com" denied by policy: it is in the blocked registries list'
export BUILD_REGISTRY_SOURCES='{"allowedRegistries": ["some-other-registry.example.com"]}'
@ -118,7 +117,7 @@ load helpers
run_buildah pull --signature-policy ${TESTSDIR}/policy.json --quiet busybox
run_buildah 125 push --signature-policy ${TESTSDIR}/policy.json busybox docker://registry.example.com/evenbusierbox
expect_output --substring 'push to registry at "registry.example.com" denied by policy: not in allowed registries list'
expect_output --substring 'registry "registry.example.com" denied by policy: not in allowed registries list'
}

View File

@ -25,9 +25,9 @@ load helpers
@test "remove multiple images" {
_prefetch alpine busybox
run_buildah from --quiet --signature-policy ${TESTSDIR}/policy.json alpine
run_buildah from --pull=false --quiet --signature-policy ${TESTSDIR}/policy.json alpine
cid2=$output
run_buildah from --quiet --signature-policy ${TESTSDIR}/policy.json busybox
run_buildah from --pull=false --quiet --signature-policy ${TESTSDIR}/policy.json busybox
cid3=$output
run_buildah 125 rmi alpine busybox
run_buildah images -q
@ -40,10 +40,13 @@ load helpers
@test "remove multiple non-existent images errors" {
run_buildah 125 rmi image1 image2 image3
expect_output --from="${lines[0]}" "could not get image \"image1\": identifier is not an image" "output line 1"
expect_output --from="${lines[1]}" "could not get image \"image2\": identifier is not an image" "output line 2"
expect_output --from="${lines[2]}" "could not get image \"image3\": identifier is not an image" "output line 3"
[ $(wc -l <<< "$output") -gt 2 ]
expect_output --from="${lines[0]}" "image1: image not known"
run_buildah 125 rmi image2 image3
expect_output --from="${lines[0]}" "image2: image not known"
run_buildah 125 rmi image3
expect_output --from="${lines[0]}" "image3: image not known"
}
@test "remove all images" {
@ -80,7 +83,7 @@ load helpers
createrandom ${TESTDIR}/randomfile
createrandom ${TESTDIR}/other-randomfile
run_buildah from --quiet --signature-policy ${TESTSDIR}/policy.json busybox
run_buildah from --pull=false --quiet --signature-policy ${TESTSDIR}/policy.json busybox
cid=$output
run_buildah images -q
@ -180,22 +183,19 @@ load helpers
@test "remove image that is a parent of another image" {
_prefetch alpine
run_buildah from --quiet --pull=true --signature-policy ${TESTSDIR}/policy.json alpine
run_buildah from --quiet --pull=false --signature-policy ${TESTSDIR}/policy.json alpine
cid=$output
run_buildah config --entrypoint '[ "/ENTRYPOINT" ]' $cid
run_buildah commit --signature-policy ${TESTSDIR}/policy.json $cid new-image
run_buildah rm -a
run_buildah 125 rmi alpine
expect_line_count 2
# Since it has children, alpine will only be untagged (Podman compat).
run_buildah rmi alpine
expect_output --substring "untagged: "
run_buildah images -q
expect_line_count 1
run_buildah images -q -a
expect_line_count 2
local try_to_delete=${lines[1]}
run_buildah 125 rmi $try_to_delete
expect_output --substring "unable to delete \"$try_to_delete.*\" \(cannot be forced\) - image has dependent child images"
run_buildah rmi new-image
}
@test "rmi with cached images" {
@ -208,13 +208,12 @@ load helpers
expect_line_count 9
run_buildah rmi test2
run_buildah images -a -q
expect_line_count 7
expect_line_count 6
run_buildah rmi test1
run_buildah images -a -q
expect_line_count 1
run_buildah bud --signature-policy ${TESTSDIR}/policy.json --layers -t test3 -f Dockerfile.2 ${TESTSDIR}/bud/use-layers
run_buildah 125 rmi alpine
expect_line_count 2
run_buildah rmi alpine
run_buildah rmi test3
run_buildah images -a -q
expect_output ""

View File

@ -5,7 +5,6 @@ import (
"io"
"net/url"
"os"
"path"
"path/filepath"
"sort"
"strings"
@ -13,12 +12,12 @@ import (
"syscall"
"github.com/containers/buildah/define"
"github.com/containers/common/libimage"
"github.com/containers/common/pkg/config"
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/pkg/shortnames"
"github.com/containers/image/v5/pkg/sysregistriesv2"
"github.com/containers/image/v5/signature"
is "github.com/containers/image/v5/storage"
"github.com/containers/image/v5/transports/alltransports"
"github.com/containers/image/v5/types"
"github.com/containers/storage"
@ -46,7 +45,7 @@ var (
}
)
// ResolveName checks if name is a valid image name, and if that name doesn't
// resolveName checks if name is a valid image name, and if that name doesn't
// include a domain portion, returns a list of the names which it might
// correspond to in the set of configured registries, the transport used to
// pull the image, and a boolean which is true iff
@ -59,7 +58,7 @@ var (
//
// NOTE: The "list of search registries is empty" check does not count blocked registries,
// and neither the implied "localhost" nor a possible firstRegistry are counted
func ResolveName(name string, firstRegistry string, sc *types.SystemContext, store storage.Store) ([]string, string, bool, error) {
func resolveName(name string, sc *types.SystemContext, store storage.Store) ([]string, string, bool, error) {
if name == "" {
return nil, "", false, nil
}
@ -112,16 +111,6 @@ func ResolveName(name string, firstRegistry string, sc *types.SystemContext, sto
searchRegistriesAreEmpty := len(registries) == 0
var candidates []string
// Set the first registry if requested.
if firstRegistry != "" && firstRegistry != "localhost" {
middle := ""
if prefix, ok := RegistryDefaultPathPrefix[firstRegistry]; ok && !strings.ContainsRune(name, '/') {
middle = prefix
}
candidate := path.Join(firstRegistry, middle, name)
candidates = append(candidates, candidate)
}
// Local short-name resolution.
namedCandidates, err := shortnames.ResolveLocally(sc, name)
if err != nil {
@ -144,11 +133,11 @@ func StartsWithValidTransport(name string) bool {
// the fully expanded result, including a tag. Names which don't include a registry
// name will be marked for the most-preferred registry (i.e., the first one in our
// configuration).
func ExpandNames(names []string, firstRegistry string, systemContext *types.SystemContext, store storage.Store) ([]string, error) {
func ExpandNames(names []string, systemContext *types.SystemContext, store storage.Store) ([]string, error) {
expanded := make([]string, 0, len(names))
for _, n := range names {
var name reference.Named
nameList, _, _, err := ResolveName(n, firstRegistry, systemContext, store)
nameList, _, _, err := resolveName(n, systemContext, store)
if err != nil {
return nil, errors.Wrapf(err, "error parsing name %q", n)
}
@ -172,45 +161,35 @@ func ExpandNames(names []string, firstRegistry string, systemContext *types.Syst
}
// FindImage locates the locally-stored image which corresponds to a given name.
func FindImage(store storage.Store, firstRegistry string, systemContext *types.SystemContext, image string) (types.ImageReference, *storage.Image, error) {
var ref types.ImageReference
var img *storage.Image
var err error
names, _, _, err := ResolveName(image, firstRegistry, systemContext, store)
func FindImage(store storage.Store, systemContext *types.SystemContext, image string) (types.ImageReference, *storage.Image, error) {
runtime, err := libimage.RuntimeFromStore(store, &libimage.RuntimeOptions{SystemContext: systemContext})
if err != nil {
return nil, nil, errors.Wrapf(err, "error parsing name %q", image)
return nil, nil, err
}
for _, name := range names {
ref, err = is.Transport.ParseStoreReference(store, name)
localImage, _, err := runtime.LookupImage(image, &libimage.LookupImageOptions{IgnorePlatform: true})
if err != nil {
logrus.Debugf("error parsing reference to image %q: %v", name, err)
continue
return nil, nil, err
}
img, err = is.Transport.GetStoreImage(store, ref)
if localImage == nil {
return nil, nil, errors.Wrap(storage.ErrImageUnknown, image)
}
ref, err := localImage.StorageReference()
if err != nil {
img2, err2 := store.Image(name)
if err2 != nil {
logrus.Debugf("error locating image %q: %v", name, err2)
continue
return nil, nil, err
}
img = img2
}
break
}
if ref == nil || img == nil {
return nil, nil, errors.Wrapf(err, "error locating image with name %q (%v)", image, names)
}
return ref, img, nil
return ref, localImage.StorageImage(), nil
}
// ResolveNameToReferences tries to create a list of possible references
// resolveNameToReferences tries to create a list of possible references
// (including their transports) from the provided image name.
func ResolveNameToReferences(
store storage.Store,
systemContext *types.SystemContext,
image string,
) (refs []types.ImageReference, err error) {
names, transport, _, err := ResolveName(image, "", systemContext, store)
names, transport, _, err := resolveName(image, systemContext, store)
if err != nil {
return nil, errors.Wrapf(err, "error parsing name %q", image)
}
@ -233,16 +212,27 @@ func ResolveNameToReferences(
return refs, nil
}
// AddImageNames adds the specified names to the specified image.
func AddImageNames(store storage.Store, firstRegistry string, systemContext *types.SystemContext, image *storage.Image, addNames []string) error {
names, err := ExpandNames(addNames, firstRegistry, systemContext, store)
// TagImage adds the specified names to the specified image.
func TagImage(store storage.Store, systemContext *types.SystemContext, image *storage.Image, addNames []string) error {
runtime, err := libimage.RuntimeFromStore(store, &libimage.RuntimeOptions{SystemContext: systemContext})
if err != nil {
return err
}
err = store.SetNames(image.ID, append(image.Names, names...))
localImage, _, err := runtime.LookupImage(image.ID, nil)
if err != nil {
return errors.Wrapf(err, "error adding names (%v) to image %q", names, image.ID)
return err
}
if localImage == nil {
return errors.Errorf("could not find libimage for %s", image.ID)
}
for _, tag := range addNames {
if err := localImage.Tag(tag); err != nil {
return errors.Wrapf(err, "error tagging image %s", image.ID)
}
}
return nil
}

403
vendor/github.com/containers/common/libimage/copier.go generated vendored Normal file
View File

@ -0,0 +1,403 @@
package libimage
import (
"context"
"encoding/json"
"io"
"os"
"strings"
"time"
"github.com/containers/common/pkg/retry"
"github.com/containers/image/v5/copy"
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/signature"
storageTransport "github.com/containers/image/v5/storage"
"github.com/containers/image/v5/types"
encconfig "github.com/containers/ocicrypt/config"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
const (
defaultMaxRetries = 3
defaultRetryDelay = time.Second
)
// LookupReferenceFunc return an image reference based on the specified one.
// This can be used to pass custom blob caches to the copy operation.
type LookupReferenceFunc func(ref types.ImageReference) (types.ImageReference, error)
// CopyOptions allow for customizing image-copy operations.
type CopyOptions struct {
// If set, will be used for copying the image. Fields below may
// override certain settings.
SystemContext *types.SystemContext
// Allows for customizing the source reference lookup. This can be
// used to use custom blob caches.
SourceLookupReferenceFunc LookupReferenceFunc
// Allows for customizing the destination reference lookup. This can
// be used to use custom blob caches.
DestinationLookupReferenceFunc LookupReferenceFunc
// containers-auth.json(5) file to use when authenticating against
// container registries.
AuthFilePath string
// Custom path to a blob-info cache.
BlobInfoCacheDirPath string
// Path to the certificates directory.
CertDirPath string
// Allow contacting registries over HTTP, or HTTPS with failed TLS
// verification. Note that this does not affect other TLS connections.
InsecureSkipTLSVerify types.OptionalBool
// Maximum number of retries with exponential backoff when facing
// transient network errors. A reasonable default is used if not set.
// Default 3.
MaxRetries *uint
// RetryDelay used for the exponential back off of MaxRetries.
// Default 1 time.Scond.
RetryDelay *time.Duration
// ManifestMIMEType is the desired media type the image will be
// converted to if needed. Note that it must contain the exact MIME
// types. Short forms (e.g., oci, v2s2) used by some tools are not
// supported.
ManifestMIMEType string
// If OciEncryptConfig is non-nil, it indicates that an image should be
// encrypted. The encryption options is derived from the construction
// of EncryptConfig object. Note: During initial encryption process of
// a layer, the resultant digest is not known during creation, so
// newDigestingReader has to be set with validateDigest = false
OciEncryptConfig *encconfig.EncryptConfig
// OciEncryptLayers represents the list of layers to encrypt. If nil,
// don't encrypt any layers. If non-nil and len==0, denotes encrypt
// all layers. integers in the slice represent 0-indexed layer
// indices, with support for negative indexing. i.e. 0 is the first
// layer, -1 is the last (top-most) layer.
OciEncryptLayers *[]int
// OciDecryptConfig contains the config that can be used to decrypt an
// image if it is encrypted if non-nil. If nil, it does not attempt to
// decrypt an image.
OciDecryptConfig *encconfig.DecryptConfig
// Reported to when ProgressInterval has arrived for a single
// artifact+offset.
Progress chan types.ProgressProperties
// If set, allow using the storage transport even if it's disabled by
// the specified SignaturePolicyPath.
PolicyAllowStorage bool
// SignaturePolicyPath to overwrite the default one.
SignaturePolicyPath string
// If non-empty, asks for a signature to be added during the copy, and
// specifies a key ID.
SignBy string
// Remove any pre-existing signatures. SignBy will still add a new
// signature.
RemoveSignatures bool
// Writer is used to display copy information including progress bars.
Writer io.Writer
// ----- platform -----------------------------------------------------
// Architecture to use for choosing images.
Architecture string
// OS to use for choosing images.
OS string
// Variant to use when choosing images.
Variant string
// ----- credentials --------------------------------------------------
// Username to use when authenticating at a container registry.
Username string
// Password to use when authenticating at a container registry.
Password string
// Credentials is an alternative way to specify credentials in format
// "username[:password]". Cannot be used in combination with
// Username/Password.
Credentials string
// ----- internal -----------------------------------------------------
// Additional tags when creating or copying a docker-archive.
dockerArchiveAdditionalTags []reference.NamedTagged
}
// copier is an internal helper to conveniently copy images.
type copier struct {
imageCopyOptions copy.Options
retryOptions retry.RetryOptions
systemContext *types.SystemContext
policyContext *signature.PolicyContext
sourceLookup LookupReferenceFunc
destinationLookup LookupReferenceFunc
}
var (
// storageAllowedPolicyScopes overrides the policy for local storage
// to ensure that we can read images from it.
storageAllowedPolicyScopes = signature.PolicyTransportScopes{
"": []signature.PolicyRequirement{
signature.NewPRInsecureAcceptAnything(),
},
}
)
// getDockerAuthConfig extracts a docker auth config from the CopyOptions. Returns
// nil if no credentials are set.
func (options *CopyOptions) getDockerAuthConfig() (*types.DockerAuthConfig, error) {
if options.Username != "" {
if options.Credentials != "" {
return nil, errors.New("username/password cannot be used with credentials")
}
return &types.DockerAuthConfig{
Username: options.Username,
Password: options.Password,
}, nil
}
if options.Credentials != "" {
var username, password string
split := strings.SplitN(options.Credentials, ":", 2)
switch len(split) {
case 1:
username = split[0]
default:
username = split[0]
password = split[1]
}
return &types.DockerAuthConfig{
Username: username,
Password: password,
}, nil
}
return nil, nil
}
// newCopier creates a copier. Note that fields in options *may* overwrite the
// counterparts of the specified system context. Please make sure to call
// `(*copier).close()`.
func newCopier(sys *types.SystemContext, options *CopyOptions) (*copier, error) {
c := copier{}
if options.SourceLookupReferenceFunc != nil {
c.sourceLookup = options.SourceLookupReferenceFunc
}
if options.DestinationLookupReferenceFunc != nil {
c.destinationLookup = options.DestinationLookupReferenceFunc
}
c.systemContext = sys
if c.systemContext == nil {
c.systemContext = &types.SystemContext{}
}
if options.AuthFilePath != "" {
c.systemContext.AuthFilePath = options.AuthFilePath
}
c.systemContext.DockerArchiveAdditionalTags = options.dockerArchiveAdditionalTags
if options.Architecture != "" {
c.systemContext.ArchitectureChoice = options.Architecture
}
if options.OS != "" {
c.systemContext.OSChoice = options.OS
}
if options.Variant != "" {
c.systemContext.VariantChoice = options.Variant
}
if options.SignaturePolicyPath != "" {
c.systemContext.SignaturePolicyPath = options.SignaturePolicyPath
}
dockerAuthConfig, err := options.getDockerAuthConfig()
if err != nil {
return nil, err
}
if dockerAuthConfig != nil {
c.systemContext.DockerAuthConfig = dockerAuthConfig
}
if options.BlobInfoCacheDirPath != "" {
c.systemContext.BlobInfoCacheDir = options.BlobInfoCacheDirPath
}
policy, err := signature.DefaultPolicy(sys)
if err != nil {
return nil, err
}
// Buildah compatibility: even if the policy denies _all_ transports,
// Buildah still wants the storage to be accessible.
if options.PolicyAllowStorage {
policy.Transports[storageTransport.Transport.Name()] = storageAllowedPolicyScopes
}
policyContext, err := signature.NewPolicyContext(policy)
if err != nil {
return nil, err
}
c.policyContext = policyContext
c.retryOptions.MaxRetry = defaultMaxRetries
if options.MaxRetries != nil {
c.retryOptions.MaxRetry = int(*options.MaxRetries)
}
c.retryOptions.Delay = defaultRetryDelay
if options.RetryDelay != nil {
c.retryOptions.Delay = *options.RetryDelay
}
c.imageCopyOptions.Progress = options.Progress
if c.imageCopyOptions.Progress != nil {
c.imageCopyOptions.ProgressInterval = time.Second
}
c.imageCopyOptions.ForceManifestMIMEType = options.ManifestMIMEType
c.imageCopyOptions.SourceCtx = c.systemContext
c.imageCopyOptions.DestinationCtx = c.systemContext
c.imageCopyOptions.OciEncryptConfig = options.OciEncryptConfig
c.imageCopyOptions.OciEncryptLayers = options.OciEncryptLayers
c.imageCopyOptions.OciDecryptConfig = options.OciDecryptConfig
c.imageCopyOptions.RemoveSignatures = options.RemoveSignatures
c.imageCopyOptions.SignBy = options.SignBy
c.imageCopyOptions.ReportWriter = options.Writer
return &c, nil
}
// close open resources.
func (c *copier) close() error {
return c.policyContext.Destroy()
}
// copy the source to the destination. Returns the bytes of the copied
// manifest which may be used for digest computation.
func (c *copier) copy(ctx context.Context, source, destination types.ImageReference) ([]byte, error) {
logrus.Debugf("Copying source image %s to destination image %s", source.StringWithinTransport(), destination.StringWithinTransport())
var err error
if c.sourceLookup != nil {
source, err = c.sourceLookup(source)
if err != nil {
return nil, err
}
}
if c.destinationLookup != nil {
destination, err = c.destinationLookup(destination)
if err != nil {
return nil, err
}
}
// Buildah compat: used when running in OpenShift.
sourceInsecure, err := checkRegistrySourcesAllows(source)
if err != nil {
return nil, err
}
destinationInsecure, err := checkRegistrySourcesAllows(destination)
if err != nil {
return nil, err
}
// Sanity checks for Buildah.
if sourceInsecure != nil && *sourceInsecure {
if c.systemContext.DockerInsecureSkipTLSVerify == types.OptionalBoolFalse {
return nil, errors.Errorf("can't require tls verification on an insecured registry")
}
}
if destinationInsecure != nil && *destinationInsecure {
if c.systemContext.DockerInsecureSkipTLSVerify == types.OptionalBoolFalse {
return nil, errors.Errorf("can't require tls verification on an insecured registry")
}
}
var copiedManifest []byte
f := func() error {
opts := c.imageCopyOptions
if sourceInsecure != nil {
value := types.NewOptionalBool(*sourceInsecure)
opts.SourceCtx.DockerInsecureSkipTLSVerify = value
}
if destinationInsecure != nil {
value := types.NewOptionalBool(*destinationInsecure)
opts.DestinationCtx.DockerInsecureSkipTLSVerify = value
}
var err error
copiedManifest, err = copy.Image(ctx, c.policyContext, destination, source, &opts)
return err
}
return copiedManifest, retry.RetryIfNecessary(ctx, f, &c.retryOptions)
}
// checkRegistrySourcesAllows checks the $BUILD_REGISTRY_SOURCES environment
// variable, if it's set. The contents are expected to be a JSON-encoded
// github.com/openshift/api/config/v1.Image, set by an OpenShift build
// controller that arranged for us to be run in a container.
//
// If set, the insecure return value indicates whether the registry is set to
// be insecure.
//
// NOTE: this functionality is required by Buildah.
func checkRegistrySourcesAllows(dest types.ImageReference) (insecure *bool, err error) {
registrySources, ok := os.LookupEnv("BUILD_REGISTRY_SOURCES")
if !ok || registrySources == "" {
return nil, nil
}
logrus.Debugf("BUILD_REGISTRY_SOURCES set %q", registrySources)
dref := dest.DockerReference()
if dref == nil || reference.Domain(dref) == "" {
return nil, nil
}
// Use local struct instead of github.com/openshift/api/config/v1 RegistrySources
var sources struct {
InsecureRegistries []string `json:"insecureRegistries,omitempty"`
BlockedRegistries []string `json:"blockedRegistries,omitempty"`
AllowedRegistries []string `json:"allowedRegistries,omitempty"`
}
if err := json.Unmarshal([]byte(registrySources), &sources); err != nil {
return nil, errors.Wrapf(err, "error parsing $BUILD_REGISTRY_SOURCES (%q) as JSON", registrySources)
}
blocked := false
if len(sources.BlockedRegistries) > 0 {
for _, blockedDomain := range sources.BlockedRegistries {
if blockedDomain == reference.Domain(dref) {
blocked = true
}
}
}
if blocked {
return nil, errors.Errorf("registry %q denied by policy: it is in the blocked registries list (%s)", reference.Domain(dref), registrySources)
}
allowed := true
if len(sources.AllowedRegistries) > 0 {
allowed = false
for _, allowedDomain := range sources.AllowedRegistries {
if allowedDomain == reference.Domain(dref) {
allowed = true
}
}
}
if !allowed {
return nil, errors.Errorf("registry %q denied by policy: not in allowed registries list (%s)", reference.Domain(dref), registrySources)
}
for _, inseureDomain := range sources.InsecureRegistries {
if inseureDomain == reference.Domain(dref) {
insecure := true
return &insecure, nil
}
}
return nil, nil
}

View File

@ -0,0 +1,46 @@
package libimage
import (
"fmt"
"io"
"io/ioutil"
"net/http"
"os"
"github.com/pkg/errors"
)
// tmpdir returns a path to a temporary directory.
func (r *Runtime) tmpdir() string {
tmpdir := os.Getenv("TMPDIR")
if tmpdir == "" {
tmpdir = "/var/tmp"
}
return tmpdir
}
// downloadFromURL downloads an image in the format "https:/example.com/myimage.tar"
// and temporarily saves in it $TMPDIR/importxyz, which is deleted after the image is imported
func (r *Runtime) downloadFromURL(source string) (string, error) {
fmt.Printf("Downloading from %q\n", source)
outFile, err := ioutil.TempFile(r.tmpdir(), "import")
if err != nil {
return "", errors.Wrap(err, "error creating file")
}
defer outFile.Close()
response, err := http.Get(source) // nolint:noctx
if err != nil {
return "", errors.Wrapf(err, "error downloading %q", source)
}
defer response.Body.Close()
_, err = io.Copy(outFile, response.Body)
if err != nil {
return "", errors.Wrapf(err, "error saving %s to %s", source, outFile.Name())
}
return outFile.Name(), nil
}

43
vendor/github.com/containers/common/libimage/events.go generated vendored Normal file
View File

@ -0,0 +1,43 @@
package libimage
import "time"
// EventType indicates the type of an event. Currrently, there is only one
// supported type for container image but we may add more (e.g., for manifest
// lists) in the future.
type EventType int
const (
// EventTypeUnknow is an unitialized EventType.
EventTypeUnknown EventType = iota
// EventTypeImagePull represents an image pull.
EventTypeImagePull
// EventTypeImagePush represents an image push.
EventTypeImagePush
// EventTypeImageRemove represents an image removal.
EventTypeImageRemove
// EventTypeImageLoad represents an image being loaded.
EventTypeImageLoad
// EventTypeImageSave represents an image being saved.
EventTypeImageSave
// EventTypeImageTag represents an image being tagged.
EventTypeImageTag
// EventTypeImageUntag represents an image being untagged.
EventTypeImageUntag
// EventTypeImageMount represents an image being mounted.
EventTypeImageMount
// EventTypeImageUnmounted represents an image being unmounted.
EventTypeImageUnmount
)
// Event represents an event such an image pull or image tag.
type Event struct {
// ID of the object (e.g., image ID).
ID string
// Name of the object (e.g., image name "quay.io/containers/podman:latest")
Name string
// Time of the event.
Time time.Time
// Type of the event.
Type EventType
}

207
vendor/github.com/containers/common/libimage/filters.go generated vendored Normal file
View File

@ -0,0 +1,207 @@
package libimage
import (
"context"
"fmt"
"path/filepath"
"strconv"
"strings"
"time"
filtersPkg "github.com/containers/common/pkg/filters"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
// filterFunc is a prototype for a positive image filter. Returning `true`
// indicates that the image matches the criteria.
type filterFunc func(*Image) (bool, error)
// filterImages returns a slice of images which are passing all specified
// filters.
func filterImages(images []*Image, filters []filterFunc) ([]*Image, error) {
if len(filters) == 0 {
return images, nil
}
result := []*Image{}
for i := range images {
include := true
var err error
for _, filter := range filters {
include, err = filter(images[i])
if err != nil {
return nil, err
}
if !include {
break
}
}
if include {
result = append(result, images[i])
}
}
return result, nil
}
// compileImageFilters creates `filterFunc`s for the specified filters. The
// required format is `key=value` with the following supported keys:
// after, since, before, dangling, id, label, readonly, reference, intermediate
func (r *Runtime) compileImageFilters(ctx context.Context, filters []string) ([]filterFunc, error) {
logrus.Tracef("Parsing image filters %s", filters)
filterFuncs := []filterFunc{}
visitedKeys := make(map[string]bool)
for _, filter := range filters {
// First, parse the filter.
var key, value string
split := strings.SplitN(filter, "=", 2)
if len(split) != 2 {
return nil, errors.Errorf("invalid image filter %q: must be in the format %q", filter, "filter=value")
}
key = split[0]
value = split[1]
if _, exists := visitedKeys[key]; exists {
return nil, errors.Errorf("image filter %q specified multiple times", key)
}
visitedKeys[key] = true
// Second, dispatch the filters.
switch key {
case "after", "since":
img, _, err := r.LookupImage(value, nil)
if err != nil {
return nil, errors.Wrapf(err, "could not find local image for filter %q", filter)
}
filterFuncs = append(filterFuncs, filterAfter(img.Created()))
case "before":
img, _, err := r.LookupImage(value, nil)
if err != nil {
return nil, errors.Wrapf(err, "could not find local image for filter %q", filter)
}
filterFuncs = append(filterFuncs, filterBefore(img.Created()))
case "dangling":
dangling, err := strconv.ParseBool(value)
if err != nil {
return nil, errors.Wrapf(err, "non-boolean value %q for dangling filter", value)
}
filterFuncs = append(filterFuncs, filterDangling(dangling))
case "id":
filterFuncs = append(filterFuncs, filterID(value))
case "intermediate":
intermediate, err := strconv.ParseBool(value)
if err != nil {
return nil, errors.Wrapf(err, "non-boolean value %q for intermediate filter", value)
}
filterFuncs = append(filterFuncs, filterIntermediate(ctx, intermediate))
case "label":
filterFuncs = append(filterFuncs, filterLabel(ctx, value))
case "readonly":
readOnly, err := strconv.ParseBool(value)
if err != nil {
return nil, errors.Wrapf(err, "non-boolean value %q for readonly filter", value)
}
filterFuncs = append(filterFuncs, filterReadOnly(readOnly))
case "reference":
filterFuncs = append(filterFuncs, filterReference(value))
default:
return nil, errors.Errorf("unsupported image filter %q", key)
}
}
return filterFuncs, nil
}
// filterReference creates a reference filter for matching the specified value.
func filterReference(value string) filterFunc {
// Replacing all '/' with '|' so that filepath.Match() can work '|'
// character is not valid in image name, so this is safe.
//
// TODO: this has been copied from Podman and requires some more review
// and especially tests.
filter := fmt.Sprintf("*%s*", value)
filter = strings.ReplaceAll(filter, "/", "|")
return func(img *Image) (bool, error) {
if len(value) < 1 {
return true, nil
}
for _, name := range img.Names() {
newName := strings.ReplaceAll(name, "/", "|")
match, _ := filepath.Match(filter, newName)
if match {
return true, nil
}
}
return false, nil
}
}
// filterLabel creates a label for matching the specified value.
func filterLabel(ctx context.Context, value string) filterFunc {
return func(img *Image) (bool, error) {
labels, err := img.Labels(ctx)
if err != nil {
return false, err
}
return filtersPkg.MatchLabelFilters([]string{value}, labels), nil
}
}
// filterAfter creates an after filter for matching the specified value.
func filterAfter(value time.Time) filterFunc {
return func(img *Image) (bool, error) {
return img.Created().After(value), nil
}
}
// filterBefore creates a before filter for matching the specified value.
func filterBefore(value time.Time) filterFunc {
return func(img *Image) (bool, error) {
return img.Created().Before(value), nil
}
}
// filterReadOnly creates a readonly filter for matching the specified value.
func filterReadOnly(value bool) filterFunc {
return func(img *Image) (bool, error) {
return img.IsReadOnly() == value, nil
}
}
// filterDangling creates a dangling filter for matching the specified value.
func filterDangling(value bool) filterFunc {
return func(img *Image) (bool, error) {
return img.IsDangling() == value, nil
}
}
// filterID creates an image-ID filter for matching the specified value.
func filterID(value string) filterFunc {
return func(img *Image) (bool, error) {
return img.ID() == value, nil
}
}
// filterIntermediate creates an intermediate filter for images. An image is
// considered to be an intermediate image if it is dangling (i.e., no tags) and
// has no children (i.e., no other image depends on it).
func filterIntermediate(ctx context.Context, value bool) filterFunc {
return func(img *Image) (bool, error) {
isIntermediate, err := img.IsIntermediate(ctx)
if err != nil {
return false, err
}
return isIntermediate == value, nil
}
}

View File

@ -0,0 +1,70 @@
package libimage
import (
"context"
libimageTypes "github.com/containers/common/libimage/types"
"github.com/containers/storage"
)
// History computes the image history of the image including all of its parents.
func (i *Image) History(ctx context.Context) ([]libimageTypes.ImageHistory, error) {
ociImage, err := i.toOCI(ctx)
if err != nil {
return nil, err
}
layerTree, err := i.runtime.layerTree()
if err != nil {
return nil, err
}
var allHistory []libimageTypes.ImageHistory
var layer *storage.Layer
if i.TopLayer() != "" {
layer, err = i.runtime.store.Layer(i.TopLayer())
if err != nil {
return nil, err
}
}
// Iterate in reverse order over the history entries, and lookup the
// corresponding image ID, size and get the next later if needed.
numHistories := len(ociImage.History) - 1
usedIDs := make(map[string]bool) // prevents assigning images IDs more than once
for x := numHistories; x >= 0; x-- {
history := libimageTypes.ImageHistory{
ID: "<missing>", // may be overridden below
Created: ociImage.History[x].Created,
CreatedBy: ociImage.History[x].CreatedBy,
Comment: ociImage.History[x].Comment,
}
if layer != nil {
history.Tags = layer.Names
if !ociImage.History[x].EmptyLayer {
history.Size = layer.UncompressedSize
}
// Query the layer tree if it's the top layer of an
// image.
node := layerTree.node(layer.ID)
if len(node.images) > 0 {
id := node.images[0].ID() // always use the first one
if _, used := usedIDs[id]; !used {
history.ID = id
usedIDs[id] = true
}
}
if layer.Parent != "" && !ociImage.History[x].EmptyLayer {
layer, err = i.runtime.store.Layer(layer.Parent)
if err != nil {
return nil, err
}
}
}
allHistory = append(allHistory, history)
}
return allHistory, nil
}

582
vendor/github.com/containers/common/libimage/image.go generated vendored Normal file
View File

@ -0,0 +1,582 @@
package libimage
import (
"context"
"path/filepath"
"sort"
"time"
libimageTypes "github.com/containers/common/libimage/types"
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/manifest"
storageTransport "github.com/containers/image/v5/storage"
"github.com/containers/image/v5/types"
"github.com/containers/storage"
"github.com/hashicorp/go-multierror"
"github.com/opencontainers/go-digest"
ociv1 "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
// Image represents an image in the containers storage and allows for further
// operations and data manipulation.
type Image struct {
// Backwards pointer to the runtime.
runtime *Runtime
// Counterpart in the local containers storage.
storageImage *storage.Image
// Image reference to the containers storage.
storageReference types.ImageReference
// All fields in the below structure are cached. They may be cleared
// at any time. When adding a new field, please make sure to clear
// it in `(*Image).reload()`.
cached struct {
// Image source. Cached for performance reasons.
imageSource types.ImageSource
// Inspect data we get from containers/image.
partialInspectData *types.ImageInspectInfo
// Fully assembled image data.
completeInspectData *libimageTypes.ImageData
// Corresponding OCI image.
ociv1Image *ociv1.Image
}
}
// reload the image and pessimitically clear all cached data.
func (i *Image) reload() error {
logrus.Tracef("Reloading image %s", i.ID())
img, err := i.runtime.store.Image(i.ID())
if err != nil {
return errors.Wrap(err, "error reloading image")
}
i.storageImage = img
i.cached.imageSource = nil
i.cached.partialInspectData = nil
i.cached.completeInspectData = nil
i.cached.ociv1Image = nil
return nil
}
// Names returns associated names with the image which may be a mix of tags and
// digests.
func (i *Image) Names() []string {
return i.storageImage.Names
}
// StorageImage returns the underlying storage.Image.
func (i *Image) StorageImage() *storage.Image {
return i.storageImage
}
// NamesHistory returns a string array of names previously associated with the
// image, which may be a mixture of tags and digests.
func (i *Image) NamesHistory() []string {
return i.storageImage.NamesHistory
}
// ID returns the ID of the image.
func (i *Image) ID() string {
return i.storageImage.ID
}
// Digest is a digest value that we can use to locate the image, if one was
// specified at creation-time.
func (i *Image) Digest() digest.Digest {
return i.storageImage.Digest
}
// Digests is a list of digest values of the image's manifests, and possibly a
// manually-specified value, that we can use to locate the image. If Digest is
// set, its value is also in this list.
func (i *Image) Digests() []digest.Digest {
return i.storageImage.Digests
}
// IsReadOnly returns whether the image is set read only.
func (i *Image) IsReadOnly() bool {
return i.storageImage.ReadOnly
}
// IsDangling returns true if the image is dangling. An image is considered
// dangling if no names are associated with it in the containers storage.
func (i *Image) IsDangling() bool {
return len(i.Names()) == 0
}
// IsIntermediate returns true if the image is an intermediate image, that is
// a dangling image without children.
func (i *Image) IsIntermediate(ctx context.Context) (bool, error) {
// If the image has tags, it's not an intermediate one.
if !i.IsDangling() {
return false, nil
}
children, err := i.getChildren(ctx, false)
if err != nil {
return false, err
}
// No tags, no children -> intermediate!
return len(children) != 0, nil
}
// Created returns the time the image was created.
func (i *Image) Created() time.Time {
return i.storageImage.Created
}
// Labels returns the label of the image.
func (i *Image) Labels(ctx context.Context) (map[string]string, error) {
data, err := i.inspectInfo(ctx)
if err != nil {
isManifestList, listErr := i.isManifestList(ctx)
if listErr != nil {
err = errors.Wrapf(err, "fallback error checking whether image is a manifest list: %v", err)
} else if isManifestList {
logrus.Debugf("Ignoring error: cannot return labels for manifest list or image index %s", i.ID())
return nil, nil
}
return nil, err
}
return data.Labels, nil
}
// TopLayer returns the top layer id as a string
func (i *Image) TopLayer() string {
return i.storageImage.TopLayer
}
// Parent returns the parent image or nil if there is none
func (i *Image) Parent(ctx context.Context) (*Image, error) {
tree, err := i.runtime.layerTree()
if err != nil {
return nil, err
}
return tree.parent(ctx, i)
}
// HasChildren returns indicates if the image has children.
func (i *Image) HasChildren(ctx context.Context) (bool, error) {
children, err := i.getChildren(ctx, false)
if err != nil {
return false, err
}
return len(children) > 0, nil
}
// Children returns the image's children.
func (i *Image) Children(ctx context.Context) ([]*Image, error) {
children, err := i.getChildren(ctx, true)
if err != nil {
return nil, err
}
return children, nil
}
// getChildren returns a list of imageIDs that depend on the image. If all is
// false, only the first child image is returned.
func (i *Image) getChildren(ctx context.Context, all bool) ([]*Image, error) {
tree, err := i.runtime.layerTree()
if err != nil {
return nil, err
}
return tree.children(ctx, i, all)
}
// Containers returns a list of containers using the image.
func (i *Image) Containers() ([]string, error) {
var containerIDs []string
containers, err := i.runtime.store.Containers()
if err != nil {
return nil, err
}
imageID := i.ID()
for i := range containers {
if containers[i].ImageID == imageID {
containerIDs = append(containerIDs, containers[i].ID)
}
}
return containerIDs, nil
}
// removeContainers removes all containers using the image.
func (i *Image) removeContainers(fn RemoveContainerFunc) error {
// Execute the custom removal func if specified.
if fn != nil {
logrus.Debugf("Removing containers of image %s with custom removal function", i.ID())
return fn(i.ID())
}
containers, err := i.Containers()
if err != nil {
return err
}
logrus.Debugf("Removing containers of image %s from the local containers storage", i.ID())
var multiE error
for _, cID := range containers {
if err := i.runtime.store.DeleteContainer(cID); err != nil {
// If the container does not exist anymore, we're good.
if errors.Cause(err) != storage.ErrContainerUnknown {
multiE = multierror.Append(multiE, err)
}
}
}
return multiE
}
// RemoveContainerFunc allows for customizing the removal of containers using
// an image specified by imageID.
type RemoveContainerFunc func(imageID string) error
// RemoveImageOptions allow for customizing image removal.
type RemoveImageOptions struct {
// Force will remove all containers from the local storage that are
// using a removed image. Use RemoveContainerFunc for a custom logic.
// If set, all child images will be removed as well.
Force bool
// RemoveContainerFunc allows for a custom logic for removing
// containers using a specific image. By default, all containers in
// the local containers storage will be removed (if Force is set).
RemoveContainerFunc RemoveContainerFunc
}
// Remove removes the image along with all dangling parent images that no other
// image depends on. The image must not be set read-only and not be used by
// containers. Callers must make sure to remove containers before image
// removal and may use `(*Image).Containers()` to get a list of containers
// using the image.
//
// If the image is used by containers return storage.ErrImageUsedByContainer.
// Use force to remove these containers.
func (i *Image) Remove(ctx context.Context, options *RemoveImageOptions) error {
logrus.Debugf("Removing image %s", i.ID())
if i.IsReadOnly() {
return errors.Errorf("cannot remove read-only image %q", i.ID())
}
if options == nil {
options = &RemoveImageOptions{}
}
if options.Force {
if err := i.removeContainers(options.RemoveContainerFunc); err != nil {
return err
}
}
// If there's a dangling parent that no other image depends on, remove
// it recursively.
parent, err := i.Parent(ctx)
if err != nil {
return err
}
if _, err := i.runtime.store.DeleteImage(i.ID(), true); err != nil {
return err
}
delete(i.runtime.imageIDmap, i.ID())
if parent == nil || !parent.IsDangling() {
return nil
}
return parent.Remove(ctx, options)
}
// Tag the image with the specified name and store it in the local containers
// storage. The name is normalized according to the rules of NormalizeName.
func (i *Image) Tag(name string) error {
ref, err := NormalizeName(name)
if err != nil {
return errors.Wrapf(err, "error normalizing name %q", name)
}
logrus.Debugf("Tagging image %s with %q", i.ID(), ref.String())
newNames := append(i.Names(), ref.String())
if err := i.runtime.store.SetNames(i.ID(), newNames); err != nil {
return err
}
return i.reload()
}
// Untag the image with the specified name and make the change persistent in
// the local containers storage. The name is normalized according to the rules
// of NormalizeName.
func (i *Image) Untag(name string) error {
ref, err := NormalizeName(name)
if err != nil {
return errors.Wrapf(err, "error normalizing name %q", name)
}
name = ref.String()
removedName := false
newNames := []string{}
for _, n := range i.Names() {
if n == name {
removedName = true
continue
}
newNames = append(newNames, n)
}
if !removedName {
return nil
}
logrus.Debugf("Untagging %q from image %s", ref.String(), i.ID())
if err := i.runtime.store.SetNames(i.ID(), newNames); err != nil {
return err
}
return i.reload()
}
// RepoTags returns a string slice of repotags associated with the image.
func (i *Image) RepoTags() ([]string, error) {
namedTagged, err := i.NamedTaggedRepoTags()
if err != nil {
return nil, err
}
repoTags := make([]string, len(namedTagged))
for i := range namedTagged {
repoTags[i] = namedTagged[i].String()
}
return repoTags, nil
}
// NammedTaggedRepoTags returns the repotags associated with the image as a
// slice of reference.NamedTagged.
func (i *Image) NamedTaggedRepoTags() ([]reference.NamedTagged, error) {
var repoTags []reference.NamedTagged
for _, name := range i.Names() {
named, err := reference.ParseNormalizedNamed(name)
if err != nil {
return nil, err
}
if tagged, isTagged := named.(reference.NamedTagged); isTagged {
repoTags = append(repoTags, tagged)
}
}
return repoTags, nil
}
// RepoDigests returns a string array of repodigests associated with the image
func (i *Image) RepoDigests() ([]string, error) {
var repoDigests []string
added := make(map[string]struct{})
for _, name := range i.Names() {
for _, imageDigest := range append(i.Digests(), i.Digest()) {
if imageDigest == "" {
continue
}
named, err := reference.ParseNormalizedNamed(name)
if err != nil {
return nil, err
}
canonical, err := reference.WithDigest(reference.TrimNamed(named), imageDigest)
if err != nil {
return nil, err
}
if _, alreadyInList := added[canonical.String()]; !alreadyInList {
repoDigests = append(repoDigests, canonical.String())
added[canonical.String()] = struct{}{}
}
}
}
sort.Strings(repoDigests)
return repoDigests, nil
}
// Mount the image with the specified mount options and label, both of which
// are directly passed down to the containers storage. Returns the fully
// evaluated path to the mount point.
func (i *Image) Mount(ctx context.Context, mountOptions []string, mountLabel string) (string, error) {
mountPoint, err := i.runtime.store.MountImage(i.ID(), mountOptions, mountLabel)
if err != nil {
return "", err
}
mountPoint, err = filepath.EvalSymlinks(mountPoint)
if err != nil {
return "", err
}
logrus.Debugf("Mounted image %s at %q", i.ID(), mountPoint)
return mountPoint, nil
}
// Unmount the image. Use force to ignore the reference counter and forcefully
// unmount.
func (i *Image) Unmount(force bool) error {
logrus.Debugf("Unmounted image %s", i.ID())
_, err := i.runtime.store.UnmountImage(i.ID(), force)
return err
}
// MountPoint returns the fully-evaluated mount point of the image. If the
// image isn't mounted, an empty string is returned.
func (i *Image) MountPoint() (string, error) {
counter, err := i.runtime.store.Mounted(i.TopLayer())
if err != nil {
return "", err
}
if counter == 0 {
return "", nil
}
layer, err := i.runtime.store.Layer(i.TopLayer())
if err != nil {
return "", err
}
return filepath.EvalSymlinks(layer.MountPoint)
}
// Size computes the size of the image layers and associated data.
func (i *Image) Size() (int64, error) {
return i.runtime.store.ImageSize(i.ID())
}
// HasDifferentDigest returns true if the image specified by `remoteRef` has a
// different digest than the local one. This check can be useful to check for
// updates on remote registries.
func (i *Image) HasDifferentDigest(ctx context.Context, remoteRef types.ImageReference) (bool, error) {
// We need to account for the arch that the image uses. It seems
// common on ARM to tweak this option to pull the correct image. See
// github.com/containers/podman/issues/6613.
inspectInfo, err := i.inspectInfo(ctx)
if err != nil {
return false, err
}
sys := i.runtime.systemContext
sys.ArchitectureChoice = inspectInfo.Architecture
// OS and variant may not be set, so let's check to avoid accidental
// overrides of the runtime settings.
if inspectInfo.Os != "" {
sys.OSChoice = inspectInfo.Os
}
if inspectInfo.Variant != "" {
sys.VariantChoice = inspectInfo.Variant
}
remoteImg, err := remoteRef.NewImage(ctx, &sys)
if err != nil {
return false, err
}
rawManifest, _, err := remoteImg.Manifest(ctx)
if err != nil {
return false, err
}
remoteDigest, err := manifest.Digest(rawManifest)
if err != nil {
return false, err
}
return i.Digest().String() != remoteDigest.String(), nil
}
// driverData gets the driver data from the store on a layer
func (i *Image) driverData() (*libimageTypes.DriverData, error) {
store := i.runtime.store
layerID := i.TopLayer()
driver, err := store.GraphDriver()
if err != nil {
return nil, err
}
metaData, err := driver.Metadata(layerID)
if err != nil {
return nil, err
}
if mountTimes, err := store.Mounted(layerID); mountTimes == 0 || err != nil {
delete(metaData, "MergedDir")
}
return &libimageTypes.DriverData{
Name: driver.String(),
Data: metaData,
}, nil
}
// StorageReference returns the image's reference to the containers storage
// using the image ID.
func (i *Image) StorageReference() (types.ImageReference, error) {
if i.storageReference != nil {
return i.storageReference, nil
}
ref, err := storageTransport.Transport.ParseStoreReference(i.runtime.store, "@"+i.ID())
if err != nil {
return nil, err
}
i.storageReference = ref
return ref, nil
}
// isManifestList returns true if the image is a manifest list (Docker) or an
// image index (OCI). This information may be useful to make certain execution
// paths more robust.
// NOTE: please use this function only to optimize specific execution paths.
// In general, errors should only be suppressed when necessary.
func (i *Image) isManifestList(ctx context.Context) (bool, error) {
ref, err := i.StorageReference()
if err != nil {
return false, err
}
imgRef, err := ref.NewImageSource(ctx, &i.runtime.systemContext)
if err != nil {
return false, err
}
_, manifestType, err := imgRef.GetManifest(ctx, nil)
if err != nil {
return false, err
}
return manifest.MIMETypeIsMultiImage(manifestType), nil
}
// source returns the possibly cached image reference.
func (i *Image) source(ctx context.Context) (types.ImageSource, error) {
if i.cached.imageSource != nil {
return i.cached.imageSource, nil
}
ref, err := i.StorageReference()
if err != nil {
return nil, err
}
src, err := ref.NewImageSource(ctx, &i.runtime.systemContext)
if err != nil {
return nil, err
}
i.cached.imageSource = src
return src, nil
}
// getImageDigest creates an image object and uses the hex value of the digest as the image ID
// for parsing the store reference
func getImageDigest(ctx context.Context, src types.ImageReference, sys *types.SystemContext) (string, error) {
newImg, err := src.NewImage(ctx, sys)
if err != nil {
return "", err
}
defer func() {
if err := newImg.Close(); err != nil {
logrus.Errorf("failed to close image: %q", err)
}
}()
imageDigest := newImg.ConfigInfo().Digest
if err = imageDigest.Validate(); err != nil {
return "", errors.Wrapf(err, "error getting config info")
}
return "@" + imageDigest.Hex(), nil
}

View File

@ -0,0 +1,242 @@
package libimage
import (
"encoding/json"
"fmt"
"path/filepath"
"strconv"
"strings"
"github.com/containers/common/pkg/signal"
ociv1 "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
)
// ImageConfig is a wrapper around the OCIv1 Image Configuration struct exported
// by containers/image, but containing additional fields that are not supported
// by OCIv1 (but are by Docker v2) - notably OnBuild.
type ImageConfig struct {
ociv1.ImageConfig
OnBuild []string
}
// ImageConfigFromChanges produces a v1.ImageConfig from the --change flag that
// is accepted by several Podman commands. It accepts a (limited subset) of
// Dockerfile instructions.
// Valid changes are:
// * USER
// * EXPOSE
// * ENV
// * ENTRYPOINT
// * CMD
// * VOLUME
// * WORKDIR
// * LABEL
// * STOPSIGNAL
// * ONBUILD
func ImageConfigFromChanges(changes []string) (*ImageConfig, error) { // nolint:gocyclo
config := &ImageConfig{}
for _, change := range changes {
// First, let's assume proper Dockerfile format - space
// separator between instruction and value
split := strings.SplitN(change, " ", 2)
if len(split) != 2 {
split = strings.SplitN(change, "=", 2)
if len(split) != 2 {
return nil, errors.Errorf("invalid change %q - must be formatted as KEY VALUE", change)
}
}
outerKey := strings.ToUpper(strings.TrimSpace(split[0]))
value := strings.TrimSpace(split[1])
switch outerKey {
case "USER":
// Assume literal contents are the user.
if value == "" {
return nil, errors.Errorf("invalid change %q - must provide a value to USER", change)
}
config.User = value
case "EXPOSE":
// EXPOSE is either [portnum] or
// [portnum]/[proto]
// Protocol must be "tcp" or "udp"
splitPort := strings.Split(value, "/")
if len(splitPort) > 2 {
return nil, errors.Errorf("invalid change %q - EXPOSE port must be formatted as PORT[/PROTO]", change)
}
portNum, err := strconv.Atoi(splitPort[0])
if err != nil {
return nil, errors.Wrapf(err, "invalid change %q - EXPOSE port must be an integer", change)
}
if portNum > 65535 || portNum <= 0 {
return nil, errors.Errorf("invalid change %q - EXPOSE port must be a valid port number", change)
}
proto := "tcp"
if len(splitPort) > 1 {
testProto := strings.ToLower(splitPort[1])
switch testProto {
case "tcp", "udp":
proto = testProto
default:
return nil, errors.Errorf("invalid change %q - EXPOSE protocol must be TCP or UDP", change)
}
}
if config.ExposedPorts == nil {
config.ExposedPorts = make(map[string]struct{})
}
config.ExposedPorts[fmt.Sprintf("%d/%s", portNum, proto)] = struct{}{}
case "ENV":
// Format is either:
// ENV key=value
// ENV key=value key=value ...
// ENV key value
// Both keys and values can be surrounded by quotes to group them.
// For now: we only support key=value
// We will attempt to strip quotation marks if present.
var (
key, val string
)
splitEnv := strings.SplitN(value, "=", 2)
key = splitEnv[0]
// We do need a key
if key == "" {
return nil, errors.Errorf("invalid change %q - ENV must have at least one argument", change)
}
// Perfectly valid to not have a value
if len(splitEnv) == 2 {
val = splitEnv[1]
}
if strings.HasPrefix(key, `"`) && strings.HasSuffix(key, `"`) {
key = strings.TrimPrefix(strings.TrimSuffix(key, `"`), `"`)
}
if strings.HasPrefix(val, `"`) && strings.HasSuffix(val, `"`) {
val = strings.TrimPrefix(strings.TrimSuffix(val, `"`), `"`)
}
config.Env = append(config.Env, fmt.Sprintf("%s=%s", key, val))
case "ENTRYPOINT":
// Two valid forms.
// First, JSON array.
// Second, not a JSON array - we interpret this as an
// argument to `sh -c`, unless empty, in which case we
// just use a blank entrypoint.
testUnmarshal := []string{}
if err := json.Unmarshal([]byte(value), &testUnmarshal); err != nil {
// It ain't valid JSON, so assume it's an
// argument to sh -c if not empty.
if value != "" {
config.Entrypoint = []string{"/bin/sh", "-c", value}
} else {
config.Entrypoint = []string{}
}
} else {
// Valid JSON
config.Entrypoint = testUnmarshal
}
case "CMD":
// Same valid forms as entrypoint.
// However, where ENTRYPOINT assumes that 'ENTRYPOINT '
// means no entrypoint, CMD assumes it is 'sh -c' with
// no third argument.
testUnmarshal := []string{}
if err := json.Unmarshal([]byte(value), &testUnmarshal); err != nil {
// It ain't valid JSON, so assume it's an
// argument to sh -c.
// Only include volume if it's not ""
config.Cmd = []string{"/bin/sh", "-c"}
if value != "" {
config.Cmd = append(config.Cmd, value)
}
} else {
// Valid JSON
config.Cmd = testUnmarshal
}
case "VOLUME":
// Either a JSON array or a set of space-separated
// paths.
// Acts rather similar to ENTRYPOINT and CMD, but always
// appends rather than replacing, and no sh -c prepend.
testUnmarshal := []string{}
if err := json.Unmarshal([]byte(value), &testUnmarshal); err != nil {
// Not valid JSON, so split on spaces
testUnmarshal = strings.Split(value, " ")
}
if len(testUnmarshal) == 0 {
return nil, errors.Errorf("invalid change %q - must provide at least one argument to VOLUME", change)
}
for _, vol := range testUnmarshal {
if vol == "" {
return nil, errors.Errorf("invalid change %q - VOLUME paths must not be empty", change)
}
if config.Volumes == nil {
config.Volumes = make(map[string]struct{})
}
config.Volumes[vol] = struct{}{}
}
case "WORKDIR":
// This can be passed multiple times.
// Each successive invocation is treated as relative to
// the previous one - so WORKDIR /A, WORKDIR b,
// WORKDIR c results in /A/b/c
// Just need to check it's not empty...
if value == "" {
return nil, errors.Errorf("invalid change %q - must provide a non-empty WORKDIR", change)
}
config.WorkingDir = filepath.Join(config.WorkingDir, value)
case "LABEL":
// Same general idea as ENV, but we no longer allow " "
// as a separator.
// We didn't do that for ENV either, so nice and easy.
// Potentially problematic: LABEL might theoretically
// allow an = in the key? If people really do this, we
// may need to investigate more advanced parsing.
var (
key, val string
)
splitLabel := strings.SplitN(value, "=", 2)
// Unlike ENV, LABEL must have a value
if len(splitLabel) != 2 {
return nil, errors.Errorf("invalid change %q - LABEL must be formatted key=value", change)
}
key = splitLabel[0]
val = splitLabel[1]
if strings.HasPrefix(key, `"`) && strings.HasSuffix(key, `"`) {
key = strings.TrimPrefix(strings.TrimSuffix(key, `"`), `"`)
}
if strings.HasPrefix(val, `"`) && strings.HasSuffix(val, `"`) {
val = strings.TrimPrefix(strings.TrimSuffix(val, `"`), `"`)
}
// Check key after we strip quotations
if key == "" {
return nil, errors.Errorf("invalid change %q - LABEL must have a non-empty key", change)
}
if config.Labels == nil {
config.Labels = make(map[string]string)
}
config.Labels[key] = val
case "STOPSIGNAL":
// Check the provided signal for validity.
killSignal, err := signal.ParseSignal(value)
if err != nil {
return nil, errors.Wrapf(err, "invalid change %q - KILLSIGNAL must be given a valid signal", change)
}
config.StopSignal = fmt.Sprintf("%d", killSignal)
case "ONBUILD":
// Onbuild always appends.
if value == "" {
return nil, errors.Errorf("invalid change %q - ONBUILD must be given an argument", change)
}
config.OnBuild = append(config.OnBuild, value)
default:
return nil, errors.Errorf("invalid change %q - invalid instruction %s", change, outerKey)
}
}
return config, nil
}

View File

@ -0,0 +1,108 @@
package libimage
import (
"fmt"
"strings"
"github.com/docker/go-units"
)
const (
imageTreeMiddleItem = "├── "
imageTreeContinueItem = "│ "
imageTreeLastItem = "└── "
)
// Tree generates a tree for the specified image and its layers. Use
// `traverseChildren` to traverse the layers of all children. By default, only
// layers of the image are printed.
func (i *Image) Tree(traverseChildren bool) (*strings.Builder, error) {
// NOTE: a string builder prevents us from copying to much data around
// and compile the string when and where needed.
sb := &strings.Builder{}
// First print the pretty header for the target image.
size, err := i.Size()
if err != nil {
return nil, err
}
repoTags, err := i.RepoTags()
if err != nil {
return nil, err
}
fmt.Fprintf(sb, "Image ID: %s\n", i.ID()[:12])
fmt.Fprintf(sb, "Tags: %s\n", repoTags)
fmt.Fprintf(sb, "Size: %v\n", units.HumanSizeWithPrecision(float64(size), 4))
if i.TopLayer() != "" {
fmt.Fprintf(sb, "Image Layers\n")
} else {
fmt.Fprintf(sb, "No Image Layers\n")
}
layerTree, err := i.runtime.layerTree()
if err != nil {
return nil, err
}
imageNode := layerTree.node(i.TopLayer())
// Traverse the entire tree down to all children.
if traverseChildren {
return imageTreeTraverseChildren(sb, imageNode, "", true)
}
// Walk all layers of the image and assemlbe their data.
for parentNode := imageNode.parent; parentNode != nil; parentNode = parentNode.parent {
indent := imageTreeMiddleItem
if parentNode.parent == nil {
indent = imageTreeLastItem
}
var tags string
repoTags, err := parentNode.repoTags()
if err != nil {
return nil, err
}
if len(repoTags) > 0 {
tags = fmt.Sprintf(" Top Layer of: %s", repoTags)
}
fmt.Fprintf(sb, "%s ID: %s Size: %7v%s\n", indent, parentNode.layer.ID[:12], units.HumanSizeWithPrecision(float64(parentNode.layer.UncompressedSize), 4), tags)
}
return sb, nil
}
func imageTreeTraverseChildren(sb *strings.Builder, node *layerNode, prefix string, last bool) (*strings.Builder, error) {
numChildren := len(node.children)
if numChildren == 0 {
return sb, nil
}
sb.WriteString(prefix)
intend := imageTreeMiddleItem
if !last {
prefix += imageTreeContinueItem
} else {
intend = imageTreeLastItem
prefix += " "
}
for i := range node.children {
child := node.children[i]
var tags string
repoTags, err := child.repoTags()
if err != nil {
return nil, err
}
if len(repoTags) > 0 {
tags = fmt.Sprintf(" Top Layer of: %s", repoTags)
}
fmt.Fprintf(sb, "%sID: %s Size: %7v%s\n", intend, child.layer.ID[:12], units.HumanSizeWithPrecision(float64(child.layer.UncompressedSize), 4), tags)
sb, err = imageTreeTraverseChildren(sb, child, prefix, i == numChildren-1)
if err != nil {
return nil, err
}
}
return sb, nil
}

107
vendor/github.com/containers/common/libimage/import.go generated vendored Normal file
View File

@ -0,0 +1,107 @@
package libimage
import (
"context"
"net/url"
"os"
storageTransport "github.com/containers/image/v5/storage"
tarballTransport "github.com/containers/image/v5/tarball"
v1 "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
// ImportOptions allow for customizing image imports.
type ImportOptions struct {
CopyOptions
// Apply the specified changes to the created image. Please refer to
// `ImageConfigFromChanges` for supported change instructions.
Changes []string
// Set the commit message as a comment to created image's history.
CommitMessage string
// Tag the imported image with this value.
Tag string
}
// Import imports a custom tarball at the specified path. Returns the name of
// the imported image.
func (r *Runtime) Import(ctx context.Context, path string, options *ImportOptions) (string, error) {
logrus.Debugf("Importing image from %q", path)
if options == nil {
options = &ImportOptions{}
}
ic := v1.ImageConfig{}
if len(options.Changes) > 0 {
config, err := ImageConfigFromChanges(options.Changes)
if err != nil {
return "", err
}
ic = config.ImageConfig
}
hist := []v1.History{
{Comment: options.CommitMessage},
}
config := v1.Image{
Config: ic,
History: hist,
}
u, err := url.ParseRequestURI(path)
if err == nil && u.Scheme != "" {
// If source is a URL, download the file.
file, err := r.downloadFromURL(path)
if err != nil {
return "", err
}
defer os.Remove(file)
path = file
} else if path == "-" {
// "-" special cases stdin
path = os.Stdin.Name()
}
srcRef, err := tarballTransport.Transport.ParseReference(path)
if err != nil {
return "", err
}
updater, ok := srcRef.(tarballTransport.ConfigUpdater)
if !ok {
return "", errors.New("unexpected type, a tarball reference should implement tarball.ConfigUpdater")
}
annotations := make(map[string]string)
if err := updater.ConfigUpdate(config, annotations); err != nil {
return "", err
}
name := options.Tag
if name == "" {
name, err = getImageDigest(ctx, srcRef, &r.systemContext)
if err != nil {
return "", err
}
}
destRef, err := storageTransport.Transport.ParseStoreReference(r.store, name)
if err != nil {
return "", err
}
c, err := newCopier(&r.systemContext, &options.CopyOptions)
if err != nil {
return "", err
}
defer c.close()
if _, err := c.copy(ctx, srcRef, destRef); err != nil {
return "", err
}
return name, nil
}

150
vendor/github.com/containers/common/libimage/inspect.go generated vendored Normal file
View File

@ -0,0 +1,150 @@
package libimage
import (
"context"
"encoding/json"
libimageTypes "github.com/containers/common/libimage/types"
"github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/types"
ociv1 "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/sirupsen/logrus"
)
// Inspect inspects the image. Use `withSize` to also perform the
// comparatively expensive size computation of the image.
func (i *Image) Inspect(ctx context.Context, withSize bool) (*libimageTypes.ImageData, error) {
logrus.Debugf("Inspecting image %s", i.ID())
if i.cached.completeInspectData != nil {
return i.cached.completeInspectData, nil
}
// First assemble data that does not depend on the format of the image.
info, err := i.inspectInfo(ctx)
if err != nil {
return nil, err
}
ociImage, err := i.toOCI(ctx)
if err != nil {
return nil, err
}
parentImage, err := i.Parent(ctx)
if err != nil {
return nil, err
}
repoTags, err := i.RepoTags()
if err != nil {
return nil, err
}
repoDigests, err := i.RepoDigests()
if err != nil {
return nil, err
}
driverData, err := i.driverData()
if err != nil {
return nil, err
}
size := int64(-1)
if withSize {
size, err = i.Size()
if err != nil {
return nil, err
}
}
data := &libimageTypes.ImageData{
ID: i.ID(),
RepoTags: repoTags,
RepoDigests: repoDigests,
Created: ociImage.Created,
Author: ociImage.Author,
Architecture: ociImage.Architecture,
Os: ociImage.OS,
Config: &ociImage.Config,
Version: info.DockerVersion,
Size: size,
VirtualSize: size, // TODO: they should be different (inherited from Podman)
Digest: i.Digest(),
Labels: info.Labels,
RootFS: &libimageTypes.RootFS{
Type: ociImage.RootFS.Type,
Layers: ociImage.RootFS.DiffIDs,
},
GraphDriver: driverData,
User: ociImage.Config.User,
History: ociImage.History,
NamesHistory: i.NamesHistory(),
}
if parentImage != nil {
data.Parent = parentImage.ID()
}
// Determine the format of the image. How we determine certain data
// depends on the format (e.g., Docker v2s2, OCI v1).
src, err := i.source(ctx)
if err != nil {
return nil, err
}
manifestRaw, manifestType, err := src.GetManifest(ctx, nil)
if err != nil {
return nil, err
}
data.ManifestType = manifestType
switch manifestType {
// OCI image
case ociv1.MediaTypeImageManifest:
var ociManifest ociv1.Manifest
if err := json.Unmarshal(manifestRaw, &ociManifest); err != nil {
return nil, err
}
data.Annotations = ociManifest.Annotations
if len(ociImage.History) > 0 {
data.Comment = ociImage.History[0].Comment
}
// Docker image
case manifest.DockerV2Schema2MediaType:
var dockerManifest manifest.Schema2Image
if err := json.Unmarshal(manifestRaw, &dockerManifest); err != nil {
return nil, err
}
data.Comment = dockerManifest.Comment
data.HealthCheck = dockerManifest.ContainerConfig.Healthcheck
}
i.cached.completeInspectData = data
return data, nil
}
// inspectInfo returns the image inspect info.
func (i *Image) inspectInfo(ctx context.Context) (*types.ImageInspectInfo, error) {
if i.cached.partialInspectData != nil {
return i.cached.partialInspectData, nil
}
ref, err := i.StorageReference()
if err != nil {
return nil, err
}
img, err := ref.NewImage(ctx, &i.runtime.systemContext)
if err != nil {
return nil, err
}
defer img.Close()
data, err := img.Inspect(ctx)
if err != nil {
return nil, err
}
i.cached.partialInspectData = data
return data, nil
}

View File

@ -0,0 +1,249 @@
package libimage
import (
"context"
"github.com/containers/storage"
ociv1 "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/sirupsen/logrus"
)
// layerTree is an internal representation of local layers.
type layerTree struct {
// nodes is the actual layer tree with layer IDs being keys.
nodes map[string]*layerNode
// ociCache is a cache for Image.ID -> OCI Image. Translations are done
// on-demand.
ociCache map[string]*ociv1.Image
}
// node returns a layerNode for the specified layerID.
func (t *layerTree) node(layerID string) *layerNode {
node, exists := t.nodes[layerID]
if !exists {
node = &layerNode{}
t.nodes[layerID] = node
}
return node
}
// toOCI returns an OCI image for the specified image.
func (t *layerTree) toOCI(ctx context.Context, i *Image) (*ociv1.Image, error) {
var err error
oci, exists := t.ociCache[i.ID()]
if !exists {
oci, err = i.toOCI(ctx)
if err == nil {
t.ociCache[i.ID()] = oci
}
}
return oci, err
}
// layerNode is a node in a layerTree. It's ID is the key in a layerTree.
type layerNode struct {
children []*layerNode
images []*Image
parent *layerNode
layer *storage.Layer
}
// repoTags assemble all repo tags all of images of the layer node.
func (l *layerNode) repoTags() ([]string, error) {
orderedTags := []string{}
visitedTags := make(map[string]bool)
for _, image := range l.images {
repoTags, err := image.RepoTags()
if err != nil {
return nil, err
}
for _, tag := range repoTags {
if _, visted := visitedTags[tag]; visted {
continue
}
visitedTags[tag] = true
orderedTags = append(orderedTags, tag)
}
}
return orderedTags, nil
}
// layerTree extracts a layerTree from the layers in the local storage and
// relates them to the specified images.
func (r *Runtime) layerTree() (*layerTree, error) {
layers, err := r.store.Layers()
if err != nil {
return nil, err
}
images, err := r.ListImages(context.Background(), nil, nil)
if err != nil {
return nil, err
}
tree := layerTree{
nodes: make(map[string]*layerNode),
ociCache: make(map[string]*ociv1.Image),
}
// First build a tree purely based on layer information.
for i := range layers {
node := tree.node(layers[i].ID)
node.layer = &layers[i]
if layers[i].Parent == "" {
continue
}
parent := tree.node(layers[i].Parent)
node.parent = parent
parent.children = append(parent.children, node)
}
// Now assign the images to each (top) layer.
for i := range images {
img := images[i] // do not leak loop variable outside the scope
topLayer := img.TopLayer()
if topLayer == "" {
continue
}
node, exists := tree.nodes[topLayer]
if !exists {
// Note: erroring out in this case has turned out having been a
// mistake. Users may not be able to recover, so we're now
// throwing a warning to guide them to resolve the issue and
// turn the errors non-fatal.
logrus.Warnf("Top layer %s of image %s not found in layer tree. The storage may be corrupted, consider running `podman system reset`.", topLayer, img.ID())
continue
}
node.images = append(node.images, img)
}
return &tree, nil
}
// children returns the child images of parent. Child images are images with
// either the same top layer as parent or parent being the true parent layer.
// Furthermore, the history of the parent and child images must match with the
// parent having one history item less. If all is true, all images are
// returned. Otherwise, the first image is returned.
func (t *layerTree) children(ctx context.Context, parent *Image, all bool) ([]*Image, error) {
if parent.TopLayer() == "" {
return nil, nil
}
var children []*Image
parentNode, exists := t.nodes[parent.TopLayer()]
if !exists {
// Note: erroring out in this case has turned out having been a
// mistake. Users may not be able to recover, so we're now
// throwing a warning to guide them to resolve the issue and
// turn the errors non-fatal.
logrus.Warnf("Layer %s not found in layer tree. The storage may be corrupted, consider running `podman system reset`.", parent.TopLayer())
return children, nil
}
parentID := parent.ID()
parentOCI, err := t.toOCI(ctx, parent)
if err != nil {
return nil, err
}
// checkParent returns true if child and parent are in such a relation.
checkParent := func(child *Image) (bool, error) {
if parentID == child.ID() {
return false, nil
}
childOCI, err := t.toOCI(ctx, child)
if err != nil {
return false, err
}
// History check.
return areParentAndChild(parentOCI, childOCI), nil
}
// addChildrenFrom adds child images of parent to children. Returns
// true if any image is a child of parent.
addChildrenFromNode := func(node *layerNode) (bool, error) {
foundChildren := false
for i, childImage := range node.images {
isChild, err := checkParent(childImage)
if err != nil {
return foundChildren, err
}
if isChild {
foundChildren = true
children = append(children, node.images[i])
if all {
return foundChildren, nil
}
}
}
return foundChildren, nil
}
// First check images where parent's top layer is also the parent
// layer.
for _, childNode := range parentNode.children {
found, err := addChildrenFromNode(childNode)
if err != nil {
return nil, err
}
if found && all {
return children, nil
}
}
// Now check images with the same top layer.
if _, err := addChildrenFromNode(parentNode); err != nil {
return nil, err
}
return children, nil
}
// parent returns the parent image or nil if no parent image could be found.
func (t *layerTree) parent(ctx context.Context, child *Image) (*Image, error) {
if child.TopLayer() == "" {
return nil, nil
}
node, exists := t.nodes[child.TopLayer()]
if !exists {
// Note: erroring out in this case has turned out having been a
// mistake. Users may not be able to recover, so we're now
// throwing a warning to guide them to resolve the issue and
// turn the errors non-fatal.
logrus.Warnf("Layer %s not found in layer tree. The storage may be corrupted, consider running `podman system reset`.", child.TopLayer())
return nil, nil
}
childOCI, err := t.toOCI(ctx, child)
if err != nil {
return nil, err
}
// Check images from the parent node (i.e., parent layer) and images
// with the same layer (i.e., same top layer).
childID := child.ID()
images := node.images
if node.parent != nil {
images = append(images, node.parent.images...)
}
for _, parent := range images {
if parent.ID() == childID {
continue
}
parentOCI, err := t.toOCI(ctx, parent)
if err != nil {
return nil, err
}
// History check.
if areParentAndChild(parentOCI, childOCI) {
return parent, nil
}
}
return nil, nil
}

83
vendor/github.com/containers/common/libimage/load.go generated vendored Normal file
View File

@ -0,0 +1,83 @@
package libimage
import (
"context"
"errors"
dirTransport "github.com/containers/image/v5/directory"
dockerArchiveTransport "github.com/containers/image/v5/docker/archive"
ociArchiveTransport "github.com/containers/image/v5/oci/archive"
ociTransport "github.com/containers/image/v5/oci/layout"
"github.com/sirupsen/logrus"
)
type LoadOptions struct {
CopyOptions
}
// Load loads one or more images (depending on the transport) from the
// specified path. The path may point to an image the following transports:
// oci, oci-archive, dir, docker-archive.
func (r *Runtime) Load(ctx context.Context, path string, options *LoadOptions) ([]string, error) {
logrus.Debugf("Loading image from %q", path)
var (
loadedImages []string
loadError error
)
if options == nil {
options = &LoadOptions{}
}
for _, f := range []func() ([]string, error){
// OCI
func() ([]string, error) {
ref, err := ociTransport.NewReference(path, "")
if err != nil {
return nil, err
}
return r.copyFromDefault(ctx, ref, &options.CopyOptions)
},
// OCI-ARCHIVE
func() ([]string, error) {
ref, err := ociArchiveTransport.NewReference(path, "")
if err != nil {
return nil, err
}
return r.copyFromDefault(ctx, ref, &options.CopyOptions)
},
// DIR
func() ([]string, error) {
ref, err := dirTransport.NewReference(path)
if err != nil {
return nil, err
}
return r.copyFromDefault(ctx, ref, &options.CopyOptions)
},
// DOCKER-ARCHIVE
func() ([]string, error) {
ref, err := dockerArchiveTransport.ParseReference(path)
if err != nil {
return nil, err
}
return r.copyFromDockerArchive(ctx, ref, &options.CopyOptions)
},
// Give a decent error message if nothing above worked.
func() ([]string, error) {
return nil, errors.New("payload does not match any of the supported image formats (oci, oci-archive, dir, docker-archive)")
},
} {
loadedImages, loadError = f()
if loadError == nil {
return loadedImages, loadError
}
logrus.Debugf("Error loading %s: %v", path, loadError)
}
return nil, loadError
}

View File

@ -6,8 +6,8 @@ import (
stderrors "errors"
"io"
"github.com/containers/buildah/pkg/manifests"
"github.com/containers/buildah/pkg/supplemented"
"github.com/containers/common/pkg/manifests"
"github.com/containers/common/pkg/supplemented"
cp "github.com/containers/image/v5/copy"
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/image"

View File

@ -0,0 +1,84 @@
package libimage
import (
"strings"
"github.com/containers/image/v5/docker/reference"
"github.com/pkg/errors"
)
// NormalizeName normalizes the provided name according to the conventions by
// Podman and Buildah. If tag and digest are missing, the "latest" tag will be
// used. If it's a short name, it will be prefixed with "localhost/".
//
// References to docker.io are normalized according to the Docker conventions.
// For instance, "docker.io/foo" turns into "docker.io/library/foo".
func NormalizeName(name string) (reference.Named, error) {
// NOTE: this code is in symmetrie with containers/image/pkg/shortnames.
ref, err := reference.Parse(name)
if err != nil {
return nil, err
}
named, ok := ref.(reference.Named)
if !ok {
return nil, errors.Errorf("%q is not a named reference", name)
}
// Enforce "localhost" if needed.
registry := reference.Domain(named)
if !(strings.ContainsAny(registry, ".:") || registry == "localhost") {
name = toLocalImageName(ref.String())
}
// Another parse which also makes sure that docker.io references are
// correctly normalized (e.g., docker.io/alpine to
// docker.io/library/alpine).
named, err = reference.ParseNormalizedNamed(name)
if err != nil {
return nil, err
}
if _, hasTag := named.(reference.NamedTagged); hasTag {
return named, nil
}
if _, hasDigest := named.(reference.Digested); hasDigest {
return named, nil
}
// Make sure to tag "latest".
return reference.TagNameOnly(named), nil
}
// prefix the specified name with "localhost/".
func toLocalImageName(name string) string {
return "localhost/" + strings.TrimLeft(name, "/")
}
// NameTagPair represents a RepoTag of an image.
type NameTagPair struct {
// Name of the RepoTag. Maybe "<none>".
Name string
// Tag of the RepoTag. Maybe "<none>".
Tag string
}
// ToNameTagsPairs splits repoTags into name&tag pairs.
// Guaranteed to return at least one pair.
func ToNameTagPairs(repoTags []reference.NamedTagged) ([]NameTagPair, error) {
none := "<none>"
var pairs []NameTagPair
for _, named := range repoTags {
pair := NameTagPair{Name: named.Name(), Tag: none}
if tagged, isTagged := named.(reference.NamedTagged); isTagged {
pair.Tag = tagged.Tag()
}
pairs = append(pairs, pair)
}
if len(pairs) == 0 {
pairs = append(pairs, NameTagPair{Name: none, Tag: none})
}
return pairs, nil
}

97
vendor/github.com/containers/common/libimage/oci.go generated vendored Normal file
View File

@ -0,0 +1,97 @@
package libimage
import (
"context"
ociv1 "github.com/opencontainers/image-spec/specs-go/v1"
)
// toOCI returns the image as OCI v1 image.
func (i *Image) toOCI(ctx context.Context) (*ociv1.Image, error) {
if i.cached.ociv1Image != nil {
return i.cached.ociv1Image, nil
}
ref, err := i.StorageReference()
if err != nil {
return nil, err
}
img, err := ref.NewImage(ctx, &i.runtime.systemContext)
if err != nil {
return nil, err
}
defer img.Close()
return img.OCIConfig(ctx)
}
// historiesMatch returns the number of entries in the histories which have the
// same contents
func historiesMatch(a, b []ociv1.History) int {
i := 0
for i < len(a) && i < len(b) {
if a[i].Created != nil && b[i].Created == nil {
return i
}
if a[i].Created == nil && b[i].Created != nil {
return i
}
if a[i].Created != nil && b[i].Created != nil {
if !a[i].Created.Equal(*(b[i].Created)) {
return i
}
}
if a[i].CreatedBy != b[i].CreatedBy {
return i
}
if a[i].Author != b[i].Author {
return i
}
if a[i].Comment != b[i].Comment {
return i
}
if a[i].EmptyLayer != b[i].EmptyLayer {
return i
}
i++
}
return i
}
// areParentAndChild checks diff ID and history in the two images and return
// true if the second should be considered to be directly based on the first
func areParentAndChild(parent, child *ociv1.Image) bool {
// the child and candidate parent should share all of the
// candidate parent's diff IDs, which together would have
// controlled which layers were used
// Both, child and parent, may be nil when the storage is left in an
// incoherent state. Issue #7444 describes such a case when a build
// has been killed.
if child == nil || parent == nil {
return false
}
if len(parent.RootFS.DiffIDs) > len(child.RootFS.DiffIDs) {
return false
}
childUsesCandidateDiffs := true
for i := range parent.RootFS.DiffIDs {
if child.RootFS.DiffIDs[i] != parent.RootFS.DiffIDs[i] {
childUsesCandidateDiffs = false
break
}
}
if !childUsesCandidateDiffs {
return false
}
// the child should have the same history as the parent, plus
// one more entry
if len(parent.History)+1 != len(child.History) {
return false
}
if historiesMatch(parent.History, child.History) != len(parent.History) {
return false
}
return true
}

451
vendor/github.com/containers/common/libimage/pull.go generated vendored Normal file
View File

@ -0,0 +1,451 @@
package libimage
import (
"context"
"fmt"
"io"
"strings"
libimageTypes "github.com/containers/common/libimage/types"
dirTransport "github.com/containers/image/v5/directory"
dockerTransport "github.com/containers/image/v5/docker"
dockerArchiveTransport "github.com/containers/image/v5/docker/archive"
"github.com/containers/image/v5/docker/reference"
ociArchiveTransport "github.com/containers/image/v5/oci/archive"
ociTransport "github.com/containers/image/v5/oci/layout"
"github.com/containers/image/v5/pkg/shortnames"
storageTransport "github.com/containers/image/v5/storage"
"github.com/containers/image/v5/transports/alltransports"
"github.com/containers/image/v5/types"
"github.com/containers/storage"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
// PullOptions allows for custommizing image pulls.
type PullOptions struct {
CopyOptions
// If true, all tags of the image will be pulled from the container
// registry. Only supported for the docker transport.
AllTags bool
}
// Pull pulls the specified name. Name may refer to any of the supported
// transports from github.com/containers/image. If no transport is encoded,
// name will be treated as a reference to a registry (i.e., docker transport).
//
// Note that pullPolicy is only used when pulling from a container registry but
// it *must* be different than the default value `PullPolicyUnsupported`. This
// way, callers are forced to decide on the pull behaviour. The reasoning
// behind is that some (commands of some) tools have different default pull
// policies (e.g., buildah-bud versus podman-build). Making the pull-policy
// choice explicit is an attempt to prevent silent regressions.
//
// The errror is storage.ErrImageUnknown iff the pull policy is set to "never"
// and no local image has been found. This allows for an easier integration
// into some users of this package (e.g., Buildah).
func (r *Runtime) Pull(ctx context.Context, name string, pullPolicy libimageTypes.PullPolicy, options *PullOptions) ([]*Image, error) {
logrus.Debugf("Pulling image %s", name)
if options == nil {
options = &PullOptions{}
}
ref, err := alltransports.ParseImageName(name)
if err != nil {
// If the image clearly refers to a local one, we can look it up directly.
// In fact, we need to since they are not parseable.
if strings.HasPrefix(name, "sha256:") || (len(name) == 64 && !strings.Contains(name, "/.:@")) {
if pullPolicy == libimageTypes.PullPolicyAlways {
return nil, errors.Errorf("pull policy is always but image has been referred to by ID (%s)", name)
}
local, _, err := r.LookupImage(name, nil)
if err != nil {
return nil, err
}
if local == nil {
return nil, errors.Wrap(storage.ErrImageUnknown, name)
}
return []*Image{local}, err
}
// If the input does not include a transport assume it refers
// to a registry.
dockerRef, dockerErr := alltransports.ParseImageName("docker://" + name)
if dockerErr != nil {
return nil, err
}
ref = dockerRef
}
if options.AllTags && ref.Transport().Name() != dockerTransport.Transport.Name() {
return nil, errors.Errorf("pulling all tags is not supported for %s transport", ref.Transport().Name())
}
var (
pulledImages []string
pullError error
)
// Dispatch the copy operation.
switch ref.Transport().Name() {
// DOCKER/REGISTRY
case dockerTransport.Transport.Name():
pulledImages, pullError = r.copyFromRegistry(ctx, ref, strings.TrimPrefix(name, "docker://"), pullPolicy, options)
// DOCKER ARCHIVE
case dockerArchiveTransport.Transport.Name():
pulledImages, pullError = r.copyFromDockerArchive(ctx, ref, &options.CopyOptions)
// OCI
case ociTransport.Transport.Name():
pulledImages, pullError = r.copyFromDefault(ctx, ref, &options.CopyOptions)
// OCI ARCHIVE
case ociArchiveTransport.Transport.Name():
pulledImages, pullError = r.copyFromDefault(ctx, ref, &options.CopyOptions)
// DIR
case dirTransport.Transport.Name():
pulledImages, pullError = r.copyFromDefault(ctx, ref, &options.CopyOptions)
// UNSUPPORTED
default:
return nil, errors.Errorf("unsupported transport %q for pulling", ref.Transport().Name())
}
if pullError != nil {
return nil, pullError
}
localImages := []*Image{}
for _, name := range pulledImages {
local, _, err := r.LookupImage(name, nil)
if err != nil {
return nil, errors.Wrapf(err, "error locating pulled image %q name in containers storage", name)
}
if local == nil {
return nil, errors.Wrap(storage.ErrImageUnknown, name)
}
localImages = append(localImages, local)
}
return localImages, pullError
}
// copyFromDefault is the default copier for a number of transports. Other
// transports require some specific dancing, sometimes Yoga.
func (r *Runtime) copyFromDefault(ctx context.Context, ref types.ImageReference, options *CopyOptions) ([]string, error) {
c, err := newCopier(&r.systemContext, options)
if err != nil {
return nil, err
}
defer c.close()
// Figure out a name for the storage destination.
var storageName, imageName string
switch ref.Transport().Name() {
case ociTransport.Transport.Name():
split := strings.SplitN(ref.StringWithinTransport(), ":", 2)
storageName = toLocalImageName(split[0])
imageName = storageName
case ociArchiveTransport.Transport.Name():
manifest, err := ociArchiveTransport.LoadManifestDescriptor(ref)
if err != nil {
return nil, err
}
// if index.json has no reference name, compute the image digest instead
if manifest.Annotations == nil || manifest.Annotations["org.opencontainers.image.ref.name"] == "" {
storageName, err = getImageDigest(ctx, ref, nil)
if err != nil {
return nil, err
}
imageName = "sha256:" + storageName[1:]
} else {
storageName = manifest.Annotations["org.opencontainers.image.ref.name"]
imageName = storageName
}
default:
storageName = toLocalImageName(ref.StringWithinTransport())
imageName = storageName
}
// Create a storage reference.
destRef, err := storageTransport.Transport.ParseStoreReference(r.store, storageName)
if err != nil {
return nil, err
}
_, err = c.copy(ctx, ref, destRef)
return []string{imageName}, err
}
// storageReferencesFromArchiveReader returns a slice of image references inside the
// archive reader. A docker archive may include more than one image and this
// method allows for extracting them into containers storage references which
// can later be used from copying.
func (r *Runtime) storageReferencesReferencesFromArchiveReader(ctx context.Context, readerRef types.ImageReference, reader *dockerArchiveTransport.Reader) ([]types.ImageReference, []string, error) {
destNames, err := reader.ManifestTagsForReference(readerRef)
if err != nil {
return nil, nil, err
}
var imageNames []string
if len(destNames) == 0 {
destName, err := getImageDigest(ctx, readerRef, &r.systemContext)
if err != nil {
return nil, nil, err
}
destNames = append(destNames, destName)
// Make sure the image can be loaded after the pull by
// replacing the @ with sha256:.
imageNames = append(imageNames, "sha256:"+destName[1:])
} else {
for i := range destNames {
ref, err := NormalizeName(destNames[i])
if err != nil {
return nil, nil, err
}
destNames[i] = ref.String()
}
imageNames = destNames
}
references := []types.ImageReference{}
for _, destName := range destNames {
destRef, err := storageTransport.Transport.ParseStoreReference(r.store, destName)
if err != nil {
return nil, nil, errors.Wrapf(err, "error parsing dest reference name %#v", destName)
}
references = append(references, destRef)
}
return references, imageNames, nil
}
// copyFromDockerArchive copies one or more images from the specified
// reference.
func (r *Runtime) copyFromDockerArchive(ctx context.Context, ref types.ImageReference, options *CopyOptions) ([]string, error) {
c, err := newCopier(&r.systemContext, options)
if err != nil {
return nil, err
}
defer c.close()
// There may be more than one image inside the docker archive, so we
// need a quick glimpse inside.
reader, readerRef, err := dockerArchiveTransport.NewReaderForReference(&r.systemContext, ref)
if err != nil {
return nil, err
}
// Get a slice of storage references we can copy.
references, destNames, err := r.storageReferencesReferencesFromArchiveReader(ctx, readerRef, reader)
if err != nil {
return nil, err
}
// Now copy all of the images. Use readerRef for performance.
for _, destRef := range references {
if _, err := c.copy(ctx, readerRef, destRef); err != nil {
return nil, err
}
}
return destNames, nil
}
// copyFromRegistry pulls the specified, possibly unqualified, name from a
// registry. On successful pull it returns the used fully-qualified name that
// can later be used to look up the image in the local containers storage.
//
// If options.All is set, all tags from the specified registry will be pulled.
func (r *Runtime) copyFromRegistry(ctx context.Context, ref types.ImageReference, inputName string, pullPolicy libimageTypes.PullPolicy, options *PullOptions) ([]string, error) {
// Sanity check.
if err := pullPolicy.Validate(); err != nil {
return nil, err
}
if !options.AllTags {
return r.copySingleImageFromRegistry(ctx, inputName, pullPolicy, options)
}
named := reference.TrimNamed(ref.DockerReference())
tags, err := dockerTransport.GetRepositoryTags(ctx, &r.systemContext, ref)
if err != nil {
return nil, err
}
pulledTags := []string{}
for _, tag := range tags {
tagged, err := reference.WithTag(named, tag)
if err != nil {
return nil, errors.Wrapf(err, "error creating tagged reference (name %s, tag %s)", named.String(), tag)
}
pulled, err := r.copySingleImageFromRegistry(ctx, tagged.String(), pullPolicy, options)
if err != nil {
return nil, err
}
pulledTags = append(pulledTags, pulled...)
}
return pulledTags, nil
}
// copySingleImageFromRegistry pulls the specified, possibly unqualified, name
// from a registry. On successful pull it returns the used fully-qualified
// name that can later be used to look up the image in the local containers
// storage.
func (r *Runtime) copySingleImageFromRegistry(ctx context.Context, imageName string, pullPolicy libimageTypes.PullPolicy, options *PullOptions) ([]string, error) {
// Sanity check.
if err := pullPolicy.Validate(); err != nil {
return nil, err
}
var (
localImage *Image
resolvedImageName string
err error
)
// Always check if there's a local image. If, we should use it's
// resolved name for pulling. Assume we're doing a `pull foo`.
// If there's already a local image "localhost/foo", then we should
// attempt pulling that instead of doing the full short-name dance.
localImage, resolvedImageName, err = r.LookupImage(imageName, nil)
if err != nil {
return nil, errors.Wrap(err, "error looking up local image")
}
if pullPolicy == libimageTypes.PullPolicyNever {
if localImage != nil {
logrus.Debugf("Pull policy %q but no local image has been found for %s", pullPolicy, imageName)
return []string{resolvedImageName}, nil
}
logrus.Debugf("Pull policy %q and %s resolved to local image %s", pullPolicy, imageName, resolvedImageName)
return nil, errors.Wrap(storage.ErrImageUnknown, imageName)
}
if pullPolicy == libimageTypes.PullPolicyMissing && localImage != nil {
return []string{resolvedImageName}, nil
}
// If we looked up the image by ID, we cannot really pull from anywhere.
if localImage != nil && strings.HasPrefix(localImage.ID(), imageName) {
switch pullPolicy {
case libimageTypes.PullPolicyAlways:
return nil, errors.Errorf("pull policy is always but image has been referred to by ID (%s)", imageName)
default:
return []string{resolvedImageName}, nil
}
}
// If we found a local image, we should use it's locally resolved name
// (see containers/buildah #2904).
if localImage != nil {
if imageName != resolvedImageName {
logrus.Debugf("Image %s resolved to local image %s which will be used for pulling", imageName, resolvedImageName)
}
imageName = resolvedImageName
}
resolved, err := shortnames.Resolve(&r.systemContext, imageName)
if err != nil {
return nil, err
}
// NOTE: Below we print the description from the short-name resolution.
// In theory we could print it here. In practice, however, this is
// causing a hard time for Buildah uses who are doing a `buildah from
// image` and expect just the container name to be printed if the image
// is present locally.
// The pragmatic solution is to only print the description when we found
// a _newer_ image that we're about to pull.
wroteDesc := false
writeDesc := func() error {
if wroteDesc {
return nil
}
wroteDesc = true
if desc := resolved.Description(); len(desc) > 0 {
logrus.Debug(desc)
if options.Writer != nil {
if _, err := options.Writer.Write([]byte(desc + "\n")); err != nil {
return err
}
}
}
return nil
}
c, err := newCopier(&r.systemContext, &options.CopyOptions)
if err != nil {
return nil, err
}
defer c.close()
var pullErrors []error
for _, candidate := range resolved.PullCandidates {
candidateString := candidate.Value.String()
logrus.Debugf("Attempting to pull candidate %s for %s", candidateString, imageName)
srcRef, err := dockerTransport.NewReference(candidate.Value)
if err != nil {
return nil, err
}
if pullPolicy == libimageTypes.PullPolicyNewer && localImage != nil {
isNewer, err := localImage.HasDifferentDigest(ctx, srcRef)
if err != nil {
pullErrors = append(pullErrors, err)
continue
}
if !isNewer {
logrus.Debugf("Skipping pull candidate %s as the image is not newer (pull policy %s)", candidateString, pullPolicy)
continue
}
}
destRef, err := storageTransport.Transport.ParseStoreReference(r.store, candidate.Value.String())
if err != nil {
return nil, err
}
if err := writeDesc(); err != nil {
return nil, err
}
if options.Writer != nil {
if _, err := io.WriteString(options.Writer, fmt.Sprintf("Trying to pull %s...\n", candidateString)); err != nil {
return nil, err
}
}
if _, err := c.copy(ctx, srcRef, destRef); err != nil {
logrus.Debugf("Error pulling candidate %s: %v", candidateString, err)
pullErrors = append(pullErrors, err)
continue
}
if err := candidate.Record(); err != nil {
// Only log the recording errors. Podman has seen
// reports where users set most of the system to
// read-only which can cause issues.
logrus.Errorf("Error recording short-name alias %q: %v", candidateString, err)
}
logrus.Debugf("Pulled candidate %s successfully", candidateString)
return []string{candidate.Value.String()}, nil
}
if localImage != nil && pullPolicy == libimageTypes.PullPolicyNewer {
return []string{resolvedImageName}, nil
}
if len(pullErrors) == 0 {
return nil, errors.Errorf("internal error: no image pulled (pull policy %s)", pullPolicy)
}
return nil, resolved.FormatPullErrors(pullErrors)
}

View File

@ -0,0 +1,90 @@
package libimage
import (
"fmt"
"github.com/pkg/errors"
)
// PullPolicy determines how and which images are being pulled from a container
// registry (i.e., docker transport only).
//
// Supported string values are:
// * "always" <-> PullPolicyAlways
// * "missing" <-> PullPolicyMissing
// * "newer" <-> PullPolicyNewer
// * "never" <-> PullPolicyNever
type PullPolicy int
const (
// This default value forces callers to setup a custom default policy.
// Some tools use different policies (e.g., buildah-bud versus
// podman-build).
PullPolicyUnsupported PullPolicy = iota
// Always pull the image.
PullPolicyAlways
// Pull the image only if it could not be found in the local containers
// storage.
PullPolicyMissing
// Pull if the image on the registry is new than the one in the local
// containers storage. An image is considered to be newer when the
// digests are different. Comparing the time stamps is prone to
// errors.
PullPolicyNewer
// Never pull the image but use the one from the local containers
// storage.
PullPolicyNever
)
// String converts a PullPolicy into a string.
//
// Supported string values are:
// * "always" <-> PullPolicyAlways
// * "missing" <-> PullPolicyMissing
// * "newer" <-> PullPolicyNewer
// * "never" <-> PullPolicyNever
func (p PullPolicy) String() string {
switch p {
case PullPolicyAlways:
return "always"
case PullPolicyMissing:
return "missing"
case PullPolicyNewer:
return "newer"
case PullPolicyNever:
return "never"
}
return fmt.Sprintf("unrecognized policy %d", p)
}
// Validate returns if the pull policy is not supported.
func (p PullPolicy) Validate() error {
switch p {
case PullPolicyAlways, PullPolicyMissing, PullPolicyNewer, PullPolicyNever:
return nil
default:
return errors.Errorf("unsupported pull policy %d", p)
}
}
// ParsePullPolicy parses the string into a pull policy.
//
// Supported string values are:
// * "always" <-> PullPolicyAlways
// * "missing" <-> PullPolicyMissing
// * "newer" <-> PullPolicyNewer
// * "never" <-> PullPolicyNever
func ParsePullPolicy(s string) (PullPolicy, error) {
switch s {
case "always":
return PullPolicyAlways, nil
case "missing":
return PullPolicyMissing, nil
case "newer":
return PullPolicyNewer, nil
case "never":
return PullPolicyMissing, nil
default:
return PullPolicyUnsupported, errors.Errorf("unsupported pull policy %q", s)
}
}

88
vendor/github.com/containers/common/libimage/push.go generated vendored Normal file
View File

@ -0,0 +1,88 @@
package libimage
import (
"context"
dockerArchiveTransport "github.com/containers/image/v5/docker/archive"
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/transports/alltransports"
"github.com/containers/storage"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
// PushOptions allows for custommizing image pushes.
type PushOptions struct {
CopyOptions
}
// Push pushes the specified source which must refer to an image in the local
// containers storage. It may or may not have the `containers-storage:`
// prefix. Use destination to push to a custom destination. The destination
// can refer to any supported transport. If not transport is specified, the
// docker transport (i.e., a registry) is implied. If destination is left
// empty, the docker destination will be extrapolated from the source.
//
// Return storage.ErrImageUnknown if source could not be found in the local
// containers storage.
func (r *Runtime) Push(ctx context.Context, source, destination string, options *PushOptions) ([]byte, error) {
if options == nil {
options = &PushOptions{}
}
// Look up the local image.
image, resolvedSource, err := r.LookupImage(source, nil)
if err != nil {
return nil, err
}
if image == nil {
return nil, errors.Wrap(storage.ErrImageUnknown, source)
}
srcRef, err := image.StorageReference()
if err != nil {
return nil, err
}
// Make sure we have a proper destination, and parse it into an image
// reference for copying.
if destination == "" {
// Doing an ID check here is tempting but false positives (due
// to a short partial IDs) are more painful than false
// negatives.
destination = resolvedSource
}
logrus.Debugf("Pushing image %s to %s", source, destination)
destRef, err := alltransports.ParseImageName(destination)
if err != nil {
// If the input does not include a transport assume it refers
// to a registry.
dockerRef, dockerErr := alltransports.ParseImageName("docker://" + destination)
if dockerErr != nil {
return nil, err
}
destRef = dockerRef
}
// Buildah compat: Make sure to tag the destination image if it's a
// Docker archive. This way, we preseve the image name.
if destRef.Transport().Name() == dockerArchiveTransport.Transport.Name() {
if named, err := reference.ParseNamed(resolvedSource); err == nil {
tagged, isTagged := named.(reference.NamedTagged)
if isTagged {
options.dockerArchiveAdditionalTags = []reference.NamedTagged{tagged}
}
}
}
c, err := newCopier(&r.systemContext, &options.CopyOptions)
if err != nil {
return nil, err
}
defer c.close()
return c.copy(ctx, srcRef, destRef)
}

448
vendor/github.com/containers/common/libimage/runtime.go generated vendored Normal file
View File

@ -0,0 +1,448 @@
package libimage
import (
"context"
"os"
"path/filepath"
"runtime"
"strings"
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/pkg/shortnames"
storageTransport "github.com/containers/image/v5/storage"
"github.com/containers/image/v5/transports/alltransports"
"github.com/containers/image/v5/types"
"github.com/containers/storage"
"github.com/hashicorp/go-multierror"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
// RuntimeOptions allow for creating a customized Runtime.
type RuntimeOptions struct {
SystemContext *types.SystemContext
}
// setRegistriesConfPath sets the registries.conf path for the specified context.
func setRegistriesConfPath(systemContext *types.SystemContext) {
if systemContext.SystemRegistriesConfPath != "" {
return
}
if envOverride, ok := os.LookupEnv("CONTAINERS_REGISTRIES_CONF"); ok {
systemContext.SystemRegistriesConfPath = envOverride
return
}
if envOverride, ok := os.LookupEnv("REGISTRIES_CONFIG_PATH"); ok {
systemContext.SystemRegistriesConfPath = envOverride
return
}
}
// Runtime is responsible for image management and storing them in a containers
// storage.
type Runtime struct {
// Underlying storage store.
store storage.Store
// Global system context. No pointer to simplify copying and modifying
// it.
systemContext types.SystemContext
// maps an image ID to an Image pointer. Allows for aggressive
// caching.
imageIDmap map[string]*Image
}
// RuntimeFromStore returns a Runtime for the specified store.
func RuntimeFromStore(store storage.Store, options *RuntimeOptions) (*Runtime, error) {
if options == nil {
options = &RuntimeOptions{}
}
var systemContext types.SystemContext
if options.SystemContext != nil {
systemContext = *options.SystemContext
} else {
systemContext = types.SystemContext{}
}
setRegistriesConfPath(&systemContext)
if systemContext.BlobInfoCacheDir == "" {
systemContext.BlobInfoCacheDir = filepath.Join(store.GraphRoot(), "cache")
}
return &Runtime{
store: store,
systemContext: systemContext,
imageIDmap: make(map[string]*Image),
}, nil
}
// RuntimeFromStoreOptions returns a return for the specified store options.
func RuntimeFromStoreOptions(runtimeOptions *RuntimeOptions, storeOptions *storage.StoreOptions) (*Runtime, error) {
if storeOptions == nil {
storeOptions = &storage.StoreOptions{}
}
store, err := storage.GetStore(*storeOptions)
if err != nil {
return nil, err
}
storageTransport.Transport.SetStore(store)
return RuntimeFromStore(store, runtimeOptions)
}
// Shutdown attempts to free any kernel resources which are being used by the
// underlying driver. If "force" is true, any mounted (i.e., in use) layers
// are unmounted beforehand. If "force" is not true, then layers being in use
// is considered to be an error condition.
func (r *Runtime) Shutdown(force bool) error {
_, err := r.store.Shutdown(force)
return err
}
// storageToImage transforms a storage.Image to an Image.
func (r *Runtime) storageToImage(storageImage *storage.Image, ref types.ImageReference) *Image {
image, exists := r.imageIDmap[storageImage.ID]
if exists {
return image
}
image = &Image{
runtime: r,
storageImage: storageImage,
storageReference: ref,
}
r.imageIDmap[storageImage.ID] = image
return image
}
// Exists returns true if the specicifed image exists in the local containers
// storage.
func (r *Runtime) Exists(name string) (bool, error) {
image, _, err := r.LookupImage(name, nil)
return image != nil, err
}
// LookupImageOptions allow for customizing local image lookups.
type LookupImageOptions struct {
// If set, the image will be purely looked up by name. No matching to
// the current platform will be performed. This can be helpful when
// the platform does not matter, for instance, for image removal.
IgnorePlatform bool
}
// Lookup Image looks up `name` in the local container storage matching the
// specified SystemContext. Returns the image and the name it has been found
// with. Returns nil if no image has been found. Note that name may also use
// the `containers-storage:` prefix used to refer to the containers-storage
// transport.
//
// If the specified name uses the `containers-storage` transport, the resolved
// name is empty.
func (r *Runtime) LookupImage(name string, options *LookupImageOptions) (*Image, string, error) {
logrus.Debugf("Looking up image %q in local containers storage", name)
if options == nil {
options = &LookupImageOptions{}
}
// If needed extract the name sans transport.
storageRef, err := alltransports.ParseImageName(name)
if err == nil {
if storageRef.Transport().Name() != storageTransport.Transport.Name() {
return nil, "", errors.Errorf("unsupported transport %q for looking up local images", storageRef.Transport().Name())
}
img, err := storageTransport.Transport.GetStoreImage(r.store, storageRef)
if err != nil {
return nil, "", err
}
logrus.Debugf("Found image %q in local containers storage (%s)", name, storageRef.StringWithinTransport())
return r.storageToImage(img, storageRef), "", nil
}
byDigest := false
if strings.HasPrefix(name, "sha256:") {
byDigest = true
name = strings.TrimPrefix(name, "sha256:")
}
// Anonymouns function to lookup the provided image in the storage and
// check whether it's matching the system context.
findImage := func(input string) (*Image, error) {
img, err := r.store.Image(input)
if err != nil && errors.Cause(err) != storage.ErrImageUnknown {
return nil, err
}
if img == nil {
return nil, nil
}
ref, err := storageTransport.Transport.ParseStoreReference(r.store, img.ID)
if err != nil {
return nil, err
}
if options.IgnorePlatform {
logrus.Debugf("Found image %q as %q in local containers storage", name, input)
return r.storageToImage(img, ref), nil
}
matches, err := imageReferenceMatchesContext(context.Background(), ref, &r.systemContext)
if err != nil {
return nil, err
}
if !matches {
return nil, nil
}
// Also print the string within the storage transport. That
// may aid in debugging when using additional stores since we
// see explicitly where the store is and which driver (options)
// are used.
logrus.Debugf("Found image %q as %q in local containers storage (%s)", name, input, ref.StringWithinTransport())
return r.storageToImage(img, ref), nil
}
// First, check if we have an exact match in the storage. Maybe an ID
// or a fully-qualified image name.
img, err := findImage(name)
if err != nil {
return nil, "", err
}
if img != nil {
return img, name, nil
}
// If the name clearly referred to a local image, there's nothing we can
// do anymore.
if storageRef != nil || byDigest {
return nil, "", nil
}
// Second, try out the candidates as resolved by shortnames. This takes
// "localhost/" prefixed images into account as well.
candidates, err := shortnames.ResolveLocally(&r.systemContext, name)
if err != nil {
return nil, "", err
}
// Backwards compat: normalize to docker.io as some users may very well
// rely on that.
dockerNamed, err := reference.ParseDockerRef(name)
if err != nil {
return nil, "", errors.Wrap(err, "error normalizing to docker.io")
}
candidates = append(candidates, dockerNamed)
for _, candidate := range candidates {
img, err := findImage(candidate.String())
if err != nil {
return nil, "", err
}
if img != nil {
return img, candidate.String(), err
}
}
return nil, "", nil
}
// imageReferenceMatchesContext return true if the specified reference matches
// the platform (os, arch, variant) as specified by the system context.
func imageReferenceMatchesContext(ctx context.Context, ref types.ImageReference, sys *types.SystemContext) (bool, error) {
if sys == nil {
return true, nil
}
img, err := ref.NewImage(ctx, sys)
if err != nil {
return false, err
}
defer img.Close()
data, err := img.Inspect(ctx)
if err != nil {
return false, err
}
osChoice := sys.OSChoice
if osChoice == "" {
osChoice = runtime.GOOS
}
arch := sys.ArchitectureChoice
if arch == "" {
arch = runtime.GOARCH
}
if osChoice == data.Os && arch == data.Architecture {
if sys.VariantChoice == "" || sys.VariantChoice == data.Variant {
return true, nil
}
}
return false, nil
}
// ListImagesOptions allow for customizing listing images.
type ListImagesOptions struct {
// Filters to filter the listed images. Supported filters are
// * after,before,since=image
// * dangling=true,false
// * intermediate=true,false (useful for pruning images)
// * id=id
// * label=key[=value]
// * readonly=true,false
// * reference=name[:tag] (wildcards allowed)
Filters []string
}
// ListImages lists images in the local container storage. If names are
// specified, only images with the specified names are looked up and filtered.
func (r *Runtime) ListImages(ctx context.Context, names []string, options *ListImagesOptions) ([]*Image, error) {
if options == nil {
options = &ListImagesOptions{}
}
var images []*Image
if len(names) > 0 {
lookupOpts := LookupImageOptions{IgnorePlatform: true}
for _, name := range names {
image, _, err := r.LookupImage(name, &lookupOpts)
if err != nil {
return nil, err
}
if image == nil {
return nil, errors.Wrap(storage.ErrImageUnknown, name)
}
images = append(images, image)
}
} else {
storageImages, err := r.store.Images()
if err != nil {
return nil, err
}
for i := range storageImages {
images = append(images, r.storageToImage(&storageImages[i], nil))
}
}
var filters []filterFunc
if len(options.Filters) > 0 {
compiledFilters, err := r.compileImageFilters(ctx, options.Filters)
if err != nil {
return nil, err
}
filters = append(filters, compiledFilters...)
}
return filterImages(images, filters)
}
// RemoveImagesOptions allow for customizing image removal.
type RemoveImagesOptions struct {
RemoveImageOptions
// Filters to filter the removed images. Supported filters are
// * after,before,since=image
// * dangling=true,false
// * intermediate=true,false (useful for pruning images)
// * id=id
// * label=key[=value]
// * readonly=true,false
// * reference=name[:tag] (wildcards allowed)
Filters []string
}
// RemoveImages removes images specified by names. All images are expected to
// exist in the local containers storage.
//
// If an image has more names than one name, the image will be untagged with
// the specified name. RemoveImages returns a slice of untagged and removed
// images.
func (r *Runtime) RemoveImages(ctx context.Context, names []string, options *RemoveImagesOptions) (untagged, removed []string, rmError error) {
if options == nil {
options = &RemoveImagesOptions{}
}
// deleteMe bundles an image with a possibly empty string value it has
// been looked up with. The string value is required to implement the
// untagging logic.
type deleteMe struct {
image *Image
name string
}
var images []*deleteMe
switch {
case len(names) > 0:
lookupOptions := LookupImageOptions{IgnorePlatform: true}
for _, name := range names {
img, resolvedName, err := r.LookupImage(name, &lookupOptions)
if err != nil {
return nil, nil, err
}
if img == nil {
return nil, nil, errors.Wrap(storage.ErrImageUnknown, name)
}
images = append(images, &deleteMe{image: img, name: resolvedName})
}
if len(images) == 0 {
return nil, nil, errors.New("no images found")
}
case len(options.Filters) > 0:
filteredImages, err := r.ListImages(ctx, nil, &ListImagesOptions{Filters: options.Filters})
if err != nil {
return nil, nil, err
}
for _, img := range filteredImages {
images = append(images, &deleteMe{image: img})
}
}
// Now remove the images.
for _, delete := range images {
numNames := len(delete.image.Names())
skipRemove := false
if len(names) > 0 {
hasChildren, err := delete.image.HasChildren(ctx)
if err != nil {
rmError = multierror.Append(rmError, err)
continue
}
skipRemove = hasChildren
}
if delete.name != "" {
untagged = append(untagged, delete.name)
}
mustUntag := !options.Force && delete.name != "" && (numNames > 1 || skipRemove)
if mustUntag {
if err := delete.image.Untag(delete.name); err != nil {
rmError = multierror.Append(rmError, err)
continue
}
// If the untag did not reduce the image names, name
// must have been an ID in which case we should throw
// an error. UNLESS there is only one tag left.
newNumNames := len(delete.image.Names())
if newNumNames == numNames && newNumNames != 1 {
err := errors.Errorf("unable to delete image %q by ID with more than one tag (%s): use force removal", delete.image.ID(), delete.image.Names())
rmError = multierror.Append(rmError, err)
continue
}
// If we deleted the last tag/name, we can continue
// removing the image. Otherwise, we mark it as
// untagged and need to continue.
if newNumNames >= 1 || skipRemove {
continue
}
}
if err := delete.image.Remove(ctx, &options.RemoveImageOptions); err != nil {
// If the image does not exist (anymore) we are good.
// We already performed a presence check in the image
// look up when `names` are specified.
if errors.Cause(err) != storage.ErrImageUnknown {
rmError = multierror.Append(rmError, err)
continue
}
}
removed = append(removed, delete.image.ID())
}
return untagged, removed, rmError
}

204
vendor/github.com/containers/common/libimage/save.go generated vendored Normal file
View File

@ -0,0 +1,204 @@
package libimage
import (
"context"
"strings"
dirTransport "github.com/containers/image/v5/directory"
dockerArchiveTransport "github.com/containers/image/v5/docker/archive"
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/manifest"
ociArchiveTransport "github.com/containers/image/v5/oci/archive"
ociTransport "github.com/containers/image/v5/oci/layout"
"github.com/containers/image/v5/types"
ociv1 "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
// SaveOptions allow for customizing saving images.
type SaveOptions struct {
CopyOptions
// AdditionalTags for the saved image. Incompatible when saving
// multiple images.
AdditionalTags []string
}
// Save saves one or more images indicated by `names` in the specified `format`
// to `path`. Supported formats are oci-archive, docker-archive, oci-dir and
// docker-dir. The latter two adhere to the dir transport in the corresponding
// oci or docker v2s2 format. Please note that only docker-archive supports
// saving more than one images. Other formats will yield an error attempting
// to save more than one.
func (r *Runtime) Save(ctx context.Context, names []string, format, path string, options *SaveOptions) error {
logrus.Debugf("Saving one more images (%s) to %q", names, path)
if options == nil {
options = &SaveOptions{}
}
// First some sanity checks to simplify subsequent code.
switch len(names) {
case 0:
return errors.New("no image specified for saving images")
case 1:
// All formats support saving 1.
default:
if format != "docker-archive" {
return errors.Errorf("unspported format %q for saving multiple images (only docker-archive)", format)
}
if len(options.AdditionalTags) > 0 {
return errors.Errorf("cannot save multiple images with multiple tags")
}
}
// Dispatch the save operations.
switch format {
case "oci-archive", "oci-dir", "docker-dir":
return r.saveSingleImage(ctx, names[0], format, path, options)
case "docker-archive":
return r.saveDockerArchive(ctx, names, path, options)
}
return errors.Errorf("unspported format %q for saving images", format)
}
// saveSingleImage saves the specified image name to the specified path.
// Supported formats are "oci-archive", "oci-dir" and "docker-dir".
func (r *Runtime) saveSingleImage(ctx context.Context, name, format, path string, options *SaveOptions) error {
image, imageName, err := r.LookupImage(name, nil)
if err != nil {
return err
}
// Unless the image was referenced by ID, use the resolved name as a
// tag.
var tag string
if strings.HasPrefix(image.ID(), imageName) {
tag = imageName
}
srcRef, err := image.StorageReference()
if err != nil {
return err
}
// Prepare the destination reference.
var destRef types.ImageReference
switch format {
case "oci-archive":
destRef, err = ociArchiveTransport.NewReference(path, tag)
case "oci-dir":
destRef, err = ociTransport.NewReference(path, tag)
options.ManifestMIMEType = ociv1.MediaTypeImageManifest
case "docker-dir":
destRef, err = dirTransport.NewReference(path)
options.ManifestMIMEType = manifest.DockerV2Schema2MediaType
default:
return errors.Errorf("unspported format %q for saving images", format)
}
if err != nil {
return err
}
sys := r.systemContext
c, err := newCopier(&sys, &options.CopyOptions)
if err != nil {
return err
}
defer c.close()
_, err = c.copy(ctx, srcRef, destRef)
return err
}
// saveDockerArchive saves the specified images indicated by names to the path.
// It loads all images from the local containers storage and assembles the meta
// data needed to properly save images. Since multiple names could refer to
// the *same* image, we need to dance a bit and store additional "names".
// Those can then be used as additional tags when copying.
func (r *Runtime) saveDockerArchive(ctx context.Context, names []string, path string, options *SaveOptions) error {
type localImage struct {
image *Image
tags []reference.NamedTagged
}
orderedIDs := []string{} // to preserve the relative order
localImages := make(map[string]*localImage) // to assemble tags
visitedNames := make(map[string]bool) // filters duplicate names
for _, name := range names {
// Look up local images.
image, imageName, err := r.LookupImage(name, nil)
if err != nil {
return err
}
// Make sure to filter duplicates purely based on the resolved
// name.
if _, exists := visitedNames[imageName]; exists {
continue
}
visitedNames[imageName] = true
// Extract and assemble the data.
local, exists := localImages[image.ID()]
if !exists {
local = &localImage{image: image}
orderedIDs = append(orderedIDs, image.ID())
}
// Add the tag if the locally resolved name is properly tagged
// (which it should unless we looked it up by ID).
named, err := reference.ParseNamed(imageName)
if err == nil {
tagged, withTag := named.(reference.NamedTagged)
if withTag {
local.tags = append(local.tags, tagged)
}
}
localImages[image.ID()] = local
}
writer, err := dockerArchiveTransport.NewWriter(&r.systemContext, path)
if err != nil {
return err
}
defer writer.Close()
for _, id := range orderedIDs {
local, exists := localImages[id]
if !exists {
return errors.Errorf("internal error: saveDockerArchive: ID %s not found in local map", id)
}
copyOpts := options.CopyOptions
copyOpts.dockerArchiveAdditionalTags = local.tags
sys := r.systemContext // prevent copier from modifying the runtime's context
c, err := newCopier(&sys, &copyOpts)
if err != nil {
return err
}
defer c.close()
destRef, err := writer.NewReference(nil)
if err != nil {
return err
}
srcRef, err := local.image.StorageReference()
if err != nil {
return err
}
if _, err := c.copy(ctx, srcRef, destRef); err != nil {
return err
}
}
return nil
}

264
vendor/github.com/containers/common/libimage/search.go generated vendored Normal file
View File

@ -0,0 +1,264 @@
package libimage
import (
"context"
"fmt"
"strings"
"sync"
dockerTransport "github.com/containers/image/v5/docker"
"github.com/containers/image/v5/pkg/sysregistriesv2"
"github.com/containers/image/v5/transports/alltransports"
"github.com/containers/image/v5/types"
"github.com/hashicorp/go-multierror"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/sync/semaphore"
)
const (
searchTruncLength = 44
searchMaxQueries = 25
// Let's follow Firefox by limiting parallel downloads to 6. We do the
// same when pulling images in c/image.
searchMaxParallel = int64(6)
)
// SearchResult is holding image-search related data.
type SearchResult struct {
// Index is the image index (e.g., "docker.io" or "quay.io")
Index string
// Name is the canonical name of the image (e.g., "docker.io/library/alpine").
Name string
// Description of the image.
Description string
// Stars is the number of stars of the image.
Stars int
// Official indicates if it's an official image.
Official string
// Automated indicates if the image was created by an automated build.
Automated string
// Tag is the image tag
Tag string
}
// SearchOptions customize searching images.
type SearchOptions struct {
// Filter allows to filter the results.
Filter SearchFilter
// Limit limits the number of queries per index (default: 25). Must be
// greater than 0 to overwrite the default value.
Limit int
// NoTrunc avoids the output to be truncated.
NoTrunc bool
// Authfile is the path to the authentication file.
Authfile string
// InsecureSkipTLSVerify allows to skip TLS verification.
InsecureSkipTLSVerify types.OptionalBool
// ListTags returns the search result with available tags
ListTags bool
}
// SearchFilter allows filtering images while searching.
type SearchFilter struct {
// Stars describes the minimal amount of starts of an image.
Stars int
// IsAutomated decides if only images from automated builds are displayed.
IsAutomated types.OptionalBool
// IsOfficial decides if only official images are displayed.
IsOfficial types.OptionalBool
}
func (r *Runtime) Search(ctx context.Context, term string, options SearchOptions) ([]SearchResult, error) {
searchRegistries, err := sysregistriesv2.UnqualifiedSearchRegistries(&r.systemContext)
if err != nil {
return nil, err
}
logrus.Debugf("Searching images matching term %s at the following registries %s", term, searchRegistries)
// Try to extract a registry from the specified search term. We
// consider everything before the first slash to be the registry. Note
// that we cannot use the reference parser from the containers/image
// library as the search term may container arbitrary input such as
// wildcards. See bugzilla.redhat.com/show_bug.cgi?id=1846629.
if spl := strings.SplitN(term, "/", 2); len(spl) > 1 {
searchRegistries = append(searchRegistries, spl[0])
term = spl[1]
}
// searchOutputData is used as a return value for searching in parallel.
type searchOutputData struct {
data []SearchResult
err error
}
sem := semaphore.NewWeighted(searchMaxParallel)
wg := sync.WaitGroup{}
wg.Add(len(searchRegistries))
data := make([]searchOutputData, len(searchRegistries))
for i := range searchRegistries {
if err := sem.Acquire(ctx, 1); err != nil {
return nil, err
}
index := i
go func() {
defer sem.Release(1)
defer wg.Done()
searchOutput, err := r.searchImageInRegistry(ctx, term, searchRegistries[index], options)
data[index] = searchOutputData{data: searchOutput, err: err}
}()
}
wg.Wait()
results := []SearchResult{}
var multiErr error
for _, d := range data {
if d.err != nil {
multiErr = multierror.Append(multiErr, d.err)
continue
}
results = append(results, d.data...)
}
// Optimistically assume that one successfully searched registry
// includes what the user is looking for.
if len(results) > 0 {
return results, nil
}
return results, multiErr
}
func (r *Runtime) searchImageInRegistry(ctx context.Context, term, registry string, options SearchOptions) ([]SearchResult, error) {
// Max number of queries by default is 25
limit := searchMaxQueries
if options.Limit > 0 {
limit = options.Limit
}
sys := r.systemContext
if options.InsecureSkipTLSVerify != types.OptionalBoolUndefined {
sys.DockerInsecureSkipTLSVerify = options.InsecureSkipTLSVerify
}
if options.ListTags {
results, err := searchRepositoryTags(ctx, &sys, registry, term, options)
if err != nil {
return []SearchResult{}, err
}
return results, nil
}
results, err := dockerTransport.SearchRegistry(ctx, &sys, registry, term, limit)
if err != nil {
return []SearchResult{}, err
}
index := registry
arr := strings.Split(registry, ".")
if len(arr) > 2 {
index = strings.Join(arr[len(arr)-2:], ".")
}
// limit is the number of results to output
// if the total number of results is less than the limit, output all
// if the limit has been set by the user, output those number of queries
limit = searchMaxQueries
if len(results) < limit {
limit = len(results)
}
if options.Limit != 0 {
limit = len(results)
if options.Limit < len(results) {
limit = options.Limit
}
}
paramsArr := []SearchResult{}
for i := 0; i < limit; i++ {
// Check whether query matches filters
if !(options.Filter.matchesAutomatedFilter(results[i]) && options.Filter.matchesOfficialFilter(results[i]) && options.Filter.matchesStarFilter(results[i])) {
continue
}
official := ""
if results[i].IsOfficial {
official = "[OK]"
}
automated := ""
if results[i].IsAutomated {
automated = "[OK]"
}
description := strings.ReplaceAll(results[i].Description, "\n", " ")
if len(description) > 44 && !options.NoTrunc {
description = description[:searchTruncLength] + "..."
}
name := registry + "/" + results[i].Name
if index == "docker.io" && !strings.Contains(results[i].Name, "/") {
name = index + "/library/" + results[i].Name
}
params := SearchResult{
Index: index,
Name: name,
Description: description,
Official: official,
Automated: automated,
Stars: results[i].StarCount,
}
paramsArr = append(paramsArr, params)
}
return paramsArr, nil
}
func searchRepositoryTags(ctx context.Context, sys *types.SystemContext, registry, term string, options SearchOptions) ([]SearchResult, error) {
dockerPrefix := "docker://"
imageRef, err := alltransports.ParseImageName(fmt.Sprintf("%s/%s", registry, term))
if err == nil && imageRef.Transport().Name() != dockerTransport.Transport.Name() {
return nil, errors.Errorf("reference %q must be a docker reference", term)
} else if err != nil {
imageRef, err = alltransports.ParseImageName(fmt.Sprintf("%s%s", dockerPrefix, fmt.Sprintf("%s/%s", registry, term)))
if err != nil {
return nil, errors.Errorf("reference %q must be a docker reference", term)
}
}
tags, err := dockerTransport.GetRepositoryTags(ctx, sys, imageRef)
if err != nil {
return nil, errors.Errorf("error getting repository tags: %v", err)
}
limit := searchMaxQueries
if len(tags) < limit {
limit = len(tags)
}
if options.Limit != 0 {
limit = len(tags)
if options.Limit < limit {
limit = options.Limit
}
}
paramsArr := []SearchResult{}
for i := 0; i < limit; i++ {
params := SearchResult{
Name: imageRef.DockerReference().Name(),
Tag: tags[i],
}
paramsArr = append(paramsArr, params)
}
return paramsArr, nil
}
func (f *SearchFilter) matchesStarFilter(result dockerTransport.SearchResult) bool {
return result.StarCount >= f.Stars
}
func (f *SearchFilter) matchesAutomatedFilter(result dockerTransport.SearchResult) bool {
if f.IsAutomated != types.OptionalBoolUndefined {
return result.IsAutomated == (f.IsAutomated == types.OptionalBoolTrue)
}
return true
}
func (f *SearchFilter) matchesOfficialFilter(result dockerTransport.SearchResult) bool {
if f.IsOfficial != types.OptionalBoolUndefined {
return result.IsOfficial == (f.IsOfficial == types.OptionalBoolTrue)
}
return true
}

View File

@ -0,0 +1,90 @@
package types
import (
"fmt"
"github.com/pkg/errors"
)
// PullPolicy determines how and which images are being pulled from a container
// registry (i.e., docker transport only).
//
// Supported string values are:
// * "always" <-> PullPolicyAlways
// * "missing" <-> PullPolicyMissing
// * "newer" <-> PullPolicyNewer
// * "never" <-> PullPolicyNever
type PullPolicy int
const (
// This default value forces callers to setup a custom default policy.
// Some tools use different policies (e.g., buildah-bud versus
// podman-build).
PullPolicyUnsupported PullPolicy = iota
// Always pull the image.
PullPolicyAlways
// Pull the image only if it could not be found in the local containers
// storage.
PullPolicyMissing
// Pull if the image on the registry is new than the one in the local
// containers storage. An image is considered to be newer when the
// digests are different. Comparing the time stamps is prone to
// errors.
PullPolicyNewer
// Never pull the image but use the one from the local containers
// storage.
PullPolicyNever
)
// String converts a PullPolicy into a string.
//
// Supported string values are:
// * "always" <-> PullPolicyAlways
// * "missing" <-> PullPolicyMissing
// * "newer" <-> PullPolicyNewer
// * "never" <-> PullPolicyNever
func (p PullPolicy) String() string {
switch p {
case PullPolicyAlways:
return "always"
case PullPolicyMissing:
return "missing"
case PullPolicyNewer:
return "newer"
case PullPolicyNever:
return "never"
}
return fmt.Sprintf("unrecognized policy %d", p)
}
// Validate returns if the pull policy is not supported.
func (p PullPolicy) Validate() error {
switch p {
case PullPolicyAlways, PullPolicyMissing, PullPolicyNewer, PullPolicyNever:
return nil
default:
return errors.Errorf("unsupported pull policy %d", p)
}
}
// ParsePullPolicy parses the string into a pull policy.
//
// Supported string values are:
// * "always" <-> PullPolicyAlways
// * "missing" <-> PullPolicyMissing
// * "newer" <-> PullPolicyNewer (also "ifnewer")
// * "never" <-> PullPolicyNever
func ParsePullPolicy(s string) (PullPolicy, error) {
switch s {
case "always":
return PullPolicyAlways, nil
case "missing":
return PullPolicyMissing, nil
case "newer", "ifnewer":
return PullPolicyNewer, nil
case "never":
return PullPolicyNever, nil
default:
return PullPolicyUnsupported, errors.Errorf("unsupported pull policy %q", s)
}
}

View File

@ -0,0 +1,58 @@
package types
import (
"time"
"github.com/containers/image/v5/manifest"
"github.com/opencontainers/go-digest"
ociv1 "github.com/opencontainers/image-spec/specs-go/v1"
)
// ImageData contains the inspected data of an image.
type ImageData struct {
ID string `json:"Id"`
Digest digest.Digest `json:"Digest"`
RepoTags []string `json:"RepoTags"`
RepoDigests []string `json:"RepoDigests"`
Parent string `json:"Parent"`
Comment string `json:"Comment"`
Created *time.Time `json:"Created"`
Config *ociv1.ImageConfig `json:"Config"`
Version string `json:"Version"`
Author string `json:"Author"`
Architecture string `json:"Architecture"`
Os string `json:"Os"`
Size int64 `json:"Size"`
VirtualSize int64 `json:"VirtualSize"`
GraphDriver *DriverData `json:"GraphDriver"`
RootFS *RootFS `json:"RootFS"`
Labels map[string]string `json:"Labels"`
Annotations map[string]string `json:"Annotations"`
ManifestType string `json:"ManifestType"`
User string `json:"User"`
History []ociv1.History `json:"History"`
NamesHistory []string `json:"NamesHistory"`
HealthCheck *manifest.Schema2HealthConfig `json:"Healthcheck,omitempty"`
}
// DriverData includes data on the storage driver of the image.
type DriverData struct {
Name string `json:"Name"`
Data map[string]string `json:"Data"`
}
// RootFS includes data on the root filesystem of the image.
type RootFS struct {
Type string `json:"Type"`
Layers []digest.Digest `json:"Layers"`
}
// ImageHistory contains the history information of an image.
type ImageHistory struct {
ID string `json:"id"`
Created *time.Time `json:"created"`
CreatedBy string `json:"createdBy"`
Size int64 `json:"size"`
Comment string `json:"comment"`
Tags []string `json:"tags"`
}

View File

@ -5,6 +5,7 @@ import (
"context"
"fmt"
"os"
"path/filepath"
"strings"
"github.com/containers/image/v5/docker"
@ -13,18 +14,20 @@ import (
"github.com/containers/image/v5/types"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/crypto/ssh/terminal"
terminal "golang.org/x/term"
)
// GetDefaultAuthFile returns env value REGISTRY_AUTH_FILE as default
// --authfile path used in multiple --authfile flag definitions
// Will fail over to DOCKER_CONFIG if REGISTRY_AUTH_FILE environment is not set
func GetDefaultAuthFile() string {
authfile := os.Getenv("REGISTRY_AUTH_FILE")
if authfile == "" {
authfile = os.Getenv("DOCKER_CONFIG")
}
if authfile := os.Getenv("REGISTRY_AUTH_FILE"); authfile != "" {
return authfile
}
if auth_env := os.Getenv("DOCKER_CONFIG"); auth_env != "" {
return filepath.Join(auth_env, "config.json")
}
return ""
}
// CheckAuthFile validates filepath given by --authfile
@ -34,7 +37,7 @@ func CheckAuthFile(authfile string) error {
return nil
}
if _, err := os.Stat(authfile); err != nil {
return errors.Wrapf(err, "error checking authfile path %s", authfile)
return errors.Wrap(err, "checking authfile")
}
return nil
}
@ -70,11 +73,11 @@ func Login(ctx context.Context, systemContext *types.SystemContext, opts *LoginO
err error
)
if len(args) > 1 {
return errors.Errorf("login accepts only one registry to login to")
return errors.New("login accepts only one registry to login to")
}
if len(args) == 0 {
if !opts.AcceptUnspecifiedRegistry {
return errors.Errorf("please provide a registry to login to")
return errors.New("please provide a registry to login to")
}
if server, err = defaultRegistryWhenUnspecified(systemContext); err != nil {
return err
@ -85,7 +88,7 @@ func Login(ctx context.Context, systemContext *types.SystemContext, opts *LoginO
}
authConfig, err := config.GetCredentials(systemContext, server)
if err != nil {
return errors.Wrapf(err, "error reading auth file")
return errors.Wrap(err, "reading auth file")
}
if opts.GetLoginSet {
if authConfig.Username == "" {
@ -95,17 +98,17 @@ func Login(ctx context.Context, systemContext *types.SystemContext, opts *LoginO
return nil
}
if authConfig.IdentityToken != "" {
return errors.Errorf("currently logged in, auth file contains an Identity token")
return errors.New("currently logged in, auth file contains an Identity token")
}
password := opts.Password
if opts.StdinPassword {
var stdinPasswordStrBuilder strings.Builder
if opts.Password != "" {
return errors.Errorf("Can't specify both --password-stdin and --password")
return errors.New("Can't specify both --password-stdin and --password")
}
if opts.Username == "" {
return errors.Errorf("Must provide --username with --password-stdin")
return errors.New("Must provide --username with --password-stdin")
}
scanner := bufio.NewScanner(opts.Stdin)
for scanner.Scan() {
@ -126,7 +129,7 @@ func Login(ctx context.Context, systemContext *types.SystemContext, opts *LoginO
username, password, err := getUserAndPass(opts, password, authConfig.Username)
if err != nil {
return errors.Wrapf(err, "error getting username and password")
return errors.Wrap(err, "getting username and password")
}
if err = docker.CheckAuth(ctx, systemContext, username, password, server); err == nil {
@ -143,7 +146,7 @@ func Login(ctx context.Context, systemContext *types.SystemContext, opts *LoginO
logrus.Debugf("error logging into %q: %v", server, unauthorized)
return errors.Errorf("error logging into %q: invalid username/password", server)
}
return errors.Wrapf(err, "error authenticating creds for %q", server)
return errors.Wrapf(err, "authenticating creds for %q", server)
}
// getRegistryName scrubs and parses the input to get the server name
@ -172,7 +175,7 @@ func getUserAndPass(opts *LoginOptions, password, userFromAuthFile string) (user
}
username, err = reader.ReadString('\n')
if err != nil {
return "", "", errors.Wrapf(err, "error reading username")
return "", "", errors.Wrap(err, "reading username")
}
// If the user just hit enter, use the displayed user from the
// the authentication file. This allows to do a lazy
@ -186,7 +189,7 @@ func getUserAndPass(opts *LoginOptions, password, userFromAuthFile string) (user
fmt.Fprint(opts.Stdout, "Password: ")
pass, err := terminal.ReadPassword(0)
if err != nil {
return "", "", errors.Wrapf(err, "error reading password")
return "", "", errors.Wrap(err, "reading password")
}
password = string(pass)
fmt.Fprintln(opts.Stdout)
@ -206,11 +209,11 @@ func Logout(systemContext *types.SystemContext, opts *LogoutOptions, args []stri
err error
)
if len(args) > 1 {
return errors.Errorf("logout accepts only one registry to logout from")
return errors.New("logout accepts only one registry to logout from")
}
if len(args) == 0 && !opts.All {
if !opts.AcceptUnspecifiedRegistry {
return errors.Errorf("please provide a registry to logout from")
return errors.New("please provide a registry to logout from")
}
if server, err = defaultRegistryWhenUnspecified(systemContext); err != nil {
return err
@ -219,7 +222,7 @@ func Logout(systemContext *types.SystemContext, opts *LogoutOptions, args []stri
}
if len(args) != 0 {
if opts.All {
return errors.Errorf("--all takes no arguments")
return errors.New("--all takes no arguments")
}
server = getRegistryName(args[0])
}
@ -240,7 +243,7 @@ func Logout(systemContext *types.SystemContext, opts *LogoutOptions, args []stri
case config.ErrNotLoggedIn:
authConfig, err := config.GetCredentials(systemContext, server)
if err != nil {
return errors.Wrapf(err, "error reading auth file")
return errors.Wrap(err, "reading auth file")
}
authInvalid := docker.CheckAuth(context.Background(), systemContext, authConfig.Username, authConfig.Password, server)
if authConfig.Username != "" && authConfig.Password != "" && authInvalid == nil {
@ -249,7 +252,7 @@ func Logout(systemContext *types.SystemContext, opts *LogoutOptions, args []stri
}
return errors.Errorf("Not logged into %s\n", server)
default:
return errors.Wrapf(err, "error logging out of %q", server)
return errors.Wrapf(err, "logging out of %q", server)
}
}
@ -258,10 +261,10 @@ func Logout(systemContext *types.SystemContext, opts *LogoutOptions, args []stri
func defaultRegistryWhenUnspecified(systemContext *types.SystemContext) (string, error) {
registriesFromFile, err := sysregistriesv2.UnqualifiedSearchRegistries(systemContext)
if err != nil {
return "", errors.Wrapf(err, "error getting registry from registry.conf, please specify a registry")
return "", errors.Wrap(err, "getting registry from registry.conf, please specify a registry")
}
if len(registriesFromFile) == 0 {
return "", errors.Errorf("no registries found in registries.conf, a registry must be provided")
return "", errors.New("no registries found in registries.conf, a registry must be provided")
}
return registriesFromFile[0], nil
}

View File

@ -16,7 +16,7 @@ func ChangeHostPathOwnership(path string, recursive bool, uid, gid int) error {
// Validate if host path can be chowned
isDangerous, err := DangerousHostPath(path)
if err != nil {
return errors.Wrapf(err, "failed to validate if host path is dangerous")
return errors.Wrap(err, "failed to validate if host path is dangerous")
}
if isDangerous {
@ -42,13 +42,13 @@ func ChangeHostPathOwnership(path string, recursive bool, uid, gid int) error {
})
if err != nil {
return errors.Wrapf(err, "failed to chown recursively host path")
return errors.Wrap(err, "failed to chown recursively host path")
}
} else {
// Get host path info
f, err := os.Lstat(path)
if err != nil {
return errors.Wrapf(err, "failed to get host path information")
return errors.Wrap(err, "failed to get host path information")
}
// Get current ownership
@ -57,7 +57,7 @@ func ChangeHostPathOwnership(path string, recursive bool, uid, gid int) error {
if uid != currentUID || gid != currentGID {
if err := os.Lchown(path, uid, gid); err != nil {
return errors.Wrapf(err, "failed to chown host path")
return errors.Wrap(err, "failed to chown host path")
}
}
}

View File

@ -7,5 +7,5 @@ import (
// ChangeHostPathOwnership changes the uid and gid ownership of a directory or file within the host.
// This is used by the volume U flag to change source volumes ownership
func ChangeHostPathOwnership(path string, recursive bool, uid, gid int) error {
return errors.Errorf("windows not supported")
return errors.New("windows not supported")
}

View File

@ -465,16 +465,17 @@ func NewConfig(userConfigPath string) (*Config, error) {
// Now, gather the system configs and merge them as needed.
configs, err := systemConfigs()
if err != nil {
return nil, errors.Wrapf(err, "error finding config on system")
return nil, errors.Wrap(err, "finding config on system")
}
for _, path := range configs {
// Merge changes in later configs with the previous configs.
// Each config file that specified fields, will override the
// previous fields.
if err = readConfigFromFile(path, config); err != nil {
return nil, errors.Wrapf(err, "error reading system config %q", path)
return nil, errors.Wrapf(err, "reading system config %q", path)
}
logrus.Debugf("Merged system config %q: %+v", path, config)
logrus.Debugf("Merged system config %q", path)
logrus.Tracef("%+v", config)
}
// If the caller specified a config path to use, then we read it to
@ -484,9 +485,10 @@ func NewConfig(userConfigPath string) (*Config, error) {
// readConfigFromFile reads in container config in the specified
// file and then merge changes with the current default.
if err = readConfigFromFile(userConfigPath, config); err != nil {
return nil, errors.Wrapf(err, "error reading user config %q", userConfigPath)
return nil, errors.Wrapf(err, "reading user config %q", userConfigPath)
}
logrus.Debugf("Merged user config %q: %+v", userConfigPath, config)
logrus.Debugf("Merged user config %q", userConfigPath)
logrus.Tracef("%+v", config)
}
config.addCAPPrefix()
@ -502,9 +504,9 @@ func NewConfig(userConfigPath string) (*Config, error) {
// default config. If the path, only specifies a few fields in the Toml file
// the defaults from the config parameter will be used for all other fields.
func readConfigFromFile(path string, config *Config) error {
logrus.Debugf("Reading configuration file %q", path)
logrus.Tracef("Reading configuration file %q", path)
if _, err := toml.DecodeFile(path, config); err != nil {
return errors.Wrapf(err, "unable to decode configuration %v", path)
return errors.Wrapf(err, "decode configuration %v", path)
}
return nil
}
@ -517,7 +519,7 @@ func systemConfigs() ([]string, error) {
path := os.Getenv("CONTAINERS_CONF")
if path != "" {
if _, err := os.Stat(path); err != nil {
return nil, errors.Wrapf(err, "failed to stat of %s from CONTAINERS_CONF environment variable", path)
return nil, errors.Wrap(err, "CONTAINERS_CONF file")
}
return append(configs, path), nil
}
@ -554,7 +556,7 @@ func (c *Config) CheckCgroupsAndAdjustConfig() {
hasSession = err == nil
}
if !hasSession {
if !hasSession && unshare.GetRootlessUID() != 0 {
logrus.Warningf("The cgroupv2 manager is set to systemd but there is no systemd user session available")
logrus.Warningf("For using systemd, you may need to login using an user session")
logrus.Warningf("Alternatively, you can enable lingering with: `loginctl enable-linger %d` (possibly as root)", unshare.GetRootlessUID())
@ -579,7 +581,7 @@ func (c *Config) addCAPPrefix() {
func (c *Config) Validate() error {
if err := c.Containers.Validate(); err != nil {
return errors.Wrapf(err, " error validating containers config")
return errors.Wrap(err, "validating containers config")
}
if !c.Containers.EnableLabeling {
@ -587,11 +589,11 @@ func (c *Config) Validate() error {
}
if err := c.Engine.Validate(); err != nil {
return errors.Wrapf(err, "error validating engine configs")
return errors.Wrap(err, "validating engine configs")
}
if err := c.Network.Validate(); err != nil {
return errors.Wrapf(err, "error validating network configs")
return errors.Wrap(err, "validating network configs")
}
return nil
@ -606,7 +608,7 @@ func (c *EngineConfig) findRuntime() string {
}
}
if path, err := exec.LookPath(name); err == nil {
logrus.Warningf("Found default OCIruntime %s path which is missing from [engine.runtimes] in containers.conf", path)
logrus.Debugf("Found default OCI runtime %s path via PATH environment variable", path)
return name
}
}
@ -1001,7 +1003,7 @@ func (c *Config) Write() error {
}
configFile, err := os.OpenFile(path, os.O_CREATE|os.O_RDWR|os.O_TRUNC, 0600)
if err != nil {
return errors.Wrapf(err, "cannot open %s", path)
return err
}
defer configFile.Close()
enc := toml.NewEncoder(configFile)

View File

@ -331,10 +331,10 @@ func defaultTmpDir() (string, error) {
if err := os.Mkdir(libpodRuntimeDir, 0700|os.ModeSticky); err != nil {
if !os.IsExist(err) {
return "", errors.Wrapf(err, "cannot mkdir %s", libpodRuntimeDir)
return "", err
} else if err := os.Chmod(libpodRuntimeDir, 0700|os.ModeSticky); err != nil {
// The directory already exist, just set the sticky bit
return "", errors.Wrapf(err, "could not set sticky bit on %s", libpodRuntimeDir)
return "", errors.Wrap(err, "set sticky bit on")
}
}
return filepath.Join(libpodRuntimeDir, "tmp"), nil

View File

@ -40,7 +40,7 @@ func getRuntimeDir() (string, error) {
if runtimeDir == "" {
tmpDir := filepath.Join("/run", "user", uid)
if err := os.MkdirAll(tmpDir, 0700); err != nil {
logrus.Debugf("unable to make temp dir %s", tmpDir)
logrus.Debugf("unable to make temp dir: %v", err)
}
st, err := os.Stat(tmpDir)
if err == nil && int(st.Sys().(*syscall.Stat_t).Uid) == os.Geteuid() && st.Mode().Perm() == 0700 {
@ -50,7 +50,7 @@ func getRuntimeDir() (string, error) {
if runtimeDir == "" {
tmpDir := filepath.Join(os.TempDir(), fmt.Sprintf("run-%s", uid))
if err := os.MkdirAll(tmpDir, 0700); err != nil {
logrus.Debugf("unable to make temp dir %s", tmpDir)
logrus.Debugf("unable to make temp dir %v", err)
}
st, err := os.Stat(tmpDir)
if err == nil && int(st.Sys().(*syscall.Stat_t).Uid) == os.Geteuid() && st.Mode().Perm() == 0700 {
@ -65,7 +65,7 @@ func getRuntimeDir() (string, error) {
}
resolvedHome, err := filepath.EvalSymlinks(home)
if err != nil {
rootlessRuntimeDirError = errors.Wrapf(err, "cannot resolve %s", home)
rootlessRuntimeDirError = errors.Wrap(err, "cannot resolve home")
return
}
runtimeDir = filepath.Join(resolvedHome, "rundir")

View File

@ -0,0 +1,116 @@
package filters
import (
"encoding/json"
"fmt"
"net/http"
"strings"
"time"
"github.com/containers/common/pkg/timetype"
"github.com/pkg/errors"
)
// ComputeUntilTimestamp extracts until timestamp from filters
func ComputeUntilTimestamp(filterValues []string) (time.Time, error) {
invalid := time.Time{}
if len(filterValues) != 1 {
return invalid, errors.Errorf("specify exactly one timestamp for until")
}
ts, err := timetype.GetTimestamp(filterValues[0], time.Now())
if err != nil {
return invalid, err
}
seconds, nanoseconds, err := timetype.ParseTimestamps(ts, 0)
if err != nil {
return invalid, err
}
return time.Unix(seconds, nanoseconds), nil
}
// filtersFromRequests extracts the "filters" parameter from the specified
// http.Request. The parameter can either be a `map[string][]string` as done
// in new versions of Docker and libpod, or a `map[string]map[string]bool` as
// done in older versions of Docker. We have to do a bit of Yoga to support
// both - just as Docker does as well.
//
// Please refer to https://github.com/containers/podman/issues/6899 for some
// background.
func FiltersFromRequest(r *http.Request) ([]string, error) {
var (
compatFilters map[string]map[string]bool
filters map[string][]string
libpodFilters []string
raw []byte
)
if _, found := r.URL.Query()["filters"]; found {
raw = []byte(r.Form.Get("filters"))
} else if _, found := r.URL.Query()["Filters"]; found {
raw = []byte(r.Form.Get("Filters"))
} else {
return []string{}, nil
}
// Backwards compat with older versions of Docker.
if err := json.Unmarshal(raw, &compatFilters); err == nil {
for filterKey, filterMap := range compatFilters {
for filterValue, toAdd := range filterMap {
if toAdd {
libpodFilters = append(libpodFilters, fmt.Sprintf("%s=%s", filterKey, filterValue))
}
}
}
return libpodFilters, nil
}
if err := json.Unmarshal(raw, &filters); err != nil {
return nil, err
}
for filterKey, filterSlice := range filters {
for _, filterValue := range filterSlice {
libpodFilters = append(libpodFilters, fmt.Sprintf("%s=%s", filterKey, filterValue))
}
}
return libpodFilters, nil
}
// PrepareFilters prepares a *map[string][]string of filters to be later searched
// in lipod and compat API to get desired filters
func PrepareFilters(r *http.Request) (map[string][]string, error) {
filtersList, err := FiltersFromRequest(r)
if err != nil {
return nil, err
}
filterMap := map[string][]string{}
for _, filter := range filtersList {
split := strings.SplitN(filter, "=", 2)
if len(split) > 1 {
filterMap[split[0]] = append(filterMap[split[0]], split[1])
}
}
return filterMap, nil
}
// MatchLabelFilters matches labels and returs true if they are valid
func MatchLabelFilters(filterValues []string, labels map[string]string) bool {
outer:
for _, filterValue := range filterValues {
filterArray := strings.SplitN(filterValue, "=", 2)
filterKey := filterArray[0]
if len(filterArray) > 1 {
filterValue = filterArray[1]
} else {
filterValue = ""
}
for labelKey, labelValue := range labels {
if labelKey == filterKey && (filterValue == "" || labelValue == filterValue) {
continue outer
}
}
return false
}
return true
}

View File

@ -299,6 +299,7 @@ func DefaultProfile() *Seccomp {
"sendmmsg",
"sendmsg",
"sendto",
"setns",
"set_robust_list",
"set_thread_area",
"set_tid_address",

View File

@ -303,6 +303,7 @@
"sendmmsg",
"sendmsg",
"sendto",
"setns",
"set_robust_list",
"set_thread_area",
"set_tid_address",

View File

@ -0,0 +1,41 @@
package signal
import (
"fmt"
"strconv"
"strings"
"syscall"
)
// ParseSignal translates a string to a valid syscall signal.
// It returns an error if the signal map doesn't include the given signal.
func ParseSignal(rawSignal string) (syscall.Signal, error) {
s, err := strconv.Atoi(rawSignal)
if err == nil {
if s == 0 {
return -1, fmt.Errorf("invalid signal: %s", rawSignal)
}
return syscall.Signal(s), nil
}
sig, ok := signalMap[strings.TrimPrefix(strings.ToUpper(rawSignal), "SIG")]
if !ok {
return -1, fmt.Errorf("invalid signal: %s", rawSignal)
}
return sig, nil
}
// ParseSignalNameOrNumber translates a string to a valid syscall signal. Input
// can be a name or number representation i.e. "KILL" "9"
func ParseSignalNameOrNumber(rawSignal string) (syscall.Signal, error) {
basename := strings.TrimPrefix(rawSignal, "-")
s, err := ParseSignal(basename)
if err == nil {
return s, nil
}
for k, v := range signalMap {
if strings.EqualFold(k, basename) {
return v, nil
}
}
return -1, fmt.Errorf("invalid signal: %s", basename)
}

View File

@ -0,0 +1,108 @@
// +build linux
// +build !mips,!mipsle,!mips64,!mips64le
// Signal handling for Linux only.
package signal
// Copyright 2013-2018 Docker, Inc.
// NOTE: this package has originally been copied from github.com/docker/docker.
import (
"os"
"os/signal"
"syscall"
"golang.org/x/sys/unix"
)
const (
sigrtmin = 34
sigrtmax = 64
SIGWINCH = syscall.SIGWINCH // For cross-compilation with Windows
)
// signalMap is a map of Linux signals.
var signalMap = map[string]syscall.Signal{
"ABRT": unix.SIGABRT,
"ALRM": unix.SIGALRM,
"BUS": unix.SIGBUS,
"CHLD": unix.SIGCHLD,
"CLD": unix.SIGCLD,
"CONT": unix.SIGCONT,
"FPE": unix.SIGFPE,
"HUP": unix.SIGHUP,
"ILL": unix.SIGILL,
"INT": unix.SIGINT,
"IO": unix.SIGIO,
"IOT": unix.SIGIOT,
"KILL": unix.SIGKILL,
"PIPE": unix.SIGPIPE,
"POLL": unix.SIGPOLL,
"PROF": unix.SIGPROF,
"PWR": unix.SIGPWR,
"QUIT": unix.SIGQUIT,
"SEGV": unix.SIGSEGV,
"STKFLT": unix.SIGSTKFLT,
"STOP": unix.SIGSTOP,
"SYS": unix.SIGSYS,
"TERM": unix.SIGTERM,
"TRAP": unix.SIGTRAP,
"TSTP": unix.SIGTSTP,
"TTIN": unix.SIGTTIN,
"TTOU": unix.SIGTTOU,
"URG": unix.SIGURG,
"USR1": unix.SIGUSR1,
"USR2": unix.SIGUSR2,
"VTALRM": unix.SIGVTALRM,
"WINCH": unix.SIGWINCH,
"XCPU": unix.SIGXCPU,
"XFSZ": unix.SIGXFSZ,
"RTMIN": sigrtmin,
"RTMIN+1": sigrtmin + 1,
"RTMIN+2": sigrtmin + 2,
"RTMIN+3": sigrtmin + 3,
"RTMIN+4": sigrtmin + 4,
"RTMIN+5": sigrtmin + 5,
"RTMIN+6": sigrtmin + 6,
"RTMIN+7": sigrtmin + 7,
"RTMIN+8": sigrtmin + 8,
"RTMIN+9": sigrtmin + 9,
"RTMIN+10": sigrtmin + 10,
"RTMIN+11": sigrtmin + 11,
"RTMIN+12": sigrtmin + 12,
"RTMIN+13": sigrtmin + 13,
"RTMIN+14": sigrtmin + 14,
"RTMIN+15": sigrtmin + 15,
"RTMAX-14": sigrtmax - 14,
"RTMAX-13": sigrtmax - 13,
"RTMAX-12": sigrtmax - 12,
"RTMAX-11": sigrtmax - 11,
"RTMAX-10": sigrtmax - 10,
"RTMAX-9": sigrtmax - 9,
"RTMAX-8": sigrtmax - 8,
"RTMAX-7": sigrtmax - 7,
"RTMAX-6": sigrtmax - 6,
"RTMAX-5": sigrtmax - 5,
"RTMAX-4": sigrtmax - 4,
"RTMAX-3": sigrtmax - 3,
"RTMAX-2": sigrtmax - 2,
"RTMAX-1": sigrtmax - 1,
"RTMAX": sigrtmax,
}
// CatchAll catches all signals and relays them to the specified channel.
func CatchAll(sigc chan os.Signal) {
handledSigs := make([]os.Signal, 0, len(signalMap))
for _, s := range signalMap {
handledSigs = append(handledSigs, s)
}
signal.Notify(sigc, handledSigs...)
}
// StopCatch stops catching the signals and closes the specified channel.
func StopCatch(sigc chan os.Signal) {
signal.Stop(sigc)
close(sigc)
}

View File

@ -0,0 +1,108 @@
// +build linux
// +build mips mipsle mips64 mips64le
// Special signal handling for mips architecture
package signal
// Copyright 2013-2018 Docker, Inc.
// NOTE: this package has originally been copied from github.com/docker/docker.
import (
"os"
"os/signal"
"syscall"
"golang.org/x/sys/unix"
)
const (
sigrtmin = 34
sigrtmax = 127
SIGWINCH = syscall.SIGWINCH
)
// signalMap is a map of Linux signals.
var signalMap = map[string]syscall.Signal{
"ABRT": unix.SIGABRT,
"ALRM": unix.SIGALRM,
"BUS": unix.SIGBUS,
"CHLD": unix.SIGCHLD,
"CLD": unix.SIGCLD,
"CONT": unix.SIGCONT,
"FPE": unix.SIGFPE,
"HUP": unix.SIGHUP,
"ILL": unix.SIGILL,
"INT": unix.SIGINT,
"IO": unix.SIGIO,
"IOT": unix.SIGIOT,
"KILL": unix.SIGKILL,
"PIPE": unix.SIGPIPE,
"POLL": unix.SIGPOLL,
"PROF": unix.SIGPROF,
"PWR": unix.SIGPWR,
"QUIT": unix.SIGQUIT,
"SEGV": unix.SIGSEGV,
"EMT": unix.SIGEMT,
"STOP": unix.SIGSTOP,
"SYS": unix.SIGSYS,
"TERM": unix.SIGTERM,
"TRAP": unix.SIGTRAP,
"TSTP": unix.SIGTSTP,
"TTIN": unix.SIGTTIN,
"TTOU": unix.SIGTTOU,
"URG": unix.SIGURG,
"USR1": unix.SIGUSR1,
"USR2": unix.SIGUSR2,
"VTALRM": unix.SIGVTALRM,
"WINCH": unix.SIGWINCH,
"XCPU": unix.SIGXCPU,
"XFSZ": unix.SIGXFSZ,
"RTMIN": sigrtmin,
"RTMIN+1": sigrtmin + 1,
"RTMIN+2": sigrtmin + 2,
"RTMIN+3": sigrtmin + 3,
"RTMIN+4": sigrtmin + 4,
"RTMIN+5": sigrtmin + 5,
"RTMIN+6": sigrtmin + 6,
"RTMIN+7": sigrtmin + 7,
"RTMIN+8": sigrtmin + 8,
"RTMIN+9": sigrtmin + 9,
"RTMIN+10": sigrtmin + 10,
"RTMIN+11": sigrtmin + 11,
"RTMIN+12": sigrtmin + 12,
"RTMIN+13": sigrtmin + 13,
"RTMIN+14": sigrtmin + 14,
"RTMIN+15": sigrtmin + 15,
"RTMAX-14": sigrtmax - 14,
"RTMAX-13": sigrtmax - 13,
"RTMAX-12": sigrtmax - 12,
"RTMAX-11": sigrtmax - 11,
"RTMAX-10": sigrtmax - 10,
"RTMAX-9": sigrtmax - 9,
"RTMAX-8": sigrtmax - 8,
"RTMAX-7": sigrtmax - 7,
"RTMAX-6": sigrtmax - 6,
"RTMAX-5": sigrtmax - 5,
"RTMAX-4": sigrtmax - 4,
"RTMAX-3": sigrtmax - 3,
"RTMAX-2": sigrtmax - 2,
"RTMAX-1": sigrtmax - 1,
"RTMAX": sigrtmax,
}
// CatchAll catches all signals and relays them to the specified channel.
func CatchAll(sigc chan os.Signal) {
handledSigs := make([]os.Signal, 0, len(signalMap))
for _, s := range signalMap {
handledSigs = append(handledSigs, s)
}
signal.Notify(sigc, handledSigs...)
}
// StopCatch stops catching the signals and closes the specified channel.
func StopCatch(sigc chan os.Signal) {
signal.Stop(sigc)
close(sigc)
}

View File

@ -0,0 +1,99 @@
// +build !linux
// Signal handling for Linux only.
package signal
import (
"os"
"syscall"
)
const (
sigrtmin = 34
sigrtmax = 64
SIGWINCH = syscall.Signal(0xff)
)
// signalMap is a map of Linux signals.
// These constants are sourced from the Linux version of golang.org/x/sys/unix
// (I don't see much risk of this changing).
// This should work as long as Podman only runs containers on Linux, which seems
// a safe assumption for now.
var signalMap = map[string]syscall.Signal{
"ABRT": syscall.Signal(0x6),
"ALRM": syscall.Signal(0xe),
"BUS": syscall.Signal(0x7),
"CHLD": syscall.Signal(0x11),
"CLD": syscall.Signal(0x11),
"CONT": syscall.Signal(0x12),
"FPE": syscall.Signal(0x8),
"HUP": syscall.Signal(0x1),
"ILL": syscall.Signal(0x4),
"INT": syscall.Signal(0x2),
"IO": syscall.Signal(0x1d),
"IOT": syscall.Signal(0x6),
"KILL": syscall.Signal(0x9),
"PIPE": syscall.Signal(0xd),
"POLL": syscall.Signal(0x1d),
"PROF": syscall.Signal(0x1b),
"PWR": syscall.Signal(0x1e),
"QUIT": syscall.Signal(0x3),
"SEGV": syscall.Signal(0xb),
"STKFLT": syscall.Signal(0x10),
"STOP": syscall.Signal(0x13),
"SYS": syscall.Signal(0x1f),
"TERM": syscall.Signal(0xf),
"TRAP": syscall.Signal(0x5),
"TSTP": syscall.Signal(0x14),
"TTIN": syscall.Signal(0x15),
"TTOU": syscall.Signal(0x16),
"URG": syscall.Signal(0x17),
"USR1": syscall.Signal(0xa),
"USR2": syscall.Signal(0xc),
"VTALRM": syscall.Signal(0x1a),
"WINCH": syscall.Signal(0x1c),
"XCPU": syscall.Signal(0x18),
"XFSZ": syscall.Signal(0x19),
"RTMIN": sigrtmin,
"RTMIN+1": sigrtmin + 1,
"RTMIN+2": sigrtmin + 2,
"RTMIN+3": sigrtmin + 3,
"RTMIN+4": sigrtmin + 4,
"RTMIN+5": sigrtmin + 5,
"RTMIN+6": sigrtmin + 6,
"RTMIN+7": sigrtmin + 7,
"RTMIN+8": sigrtmin + 8,
"RTMIN+9": sigrtmin + 9,
"RTMIN+10": sigrtmin + 10,
"RTMIN+11": sigrtmin + 11,
"RTMIN+12": sigrtmin + 12,
"RTMIN+13": sigrtmin + 13,
"RTMIN+14": sigrtmin + 14,
"RTMIN+15": sigrtmin + 15,
"RTMAX-14": sigrtmax - 14,
"RTMAX-13": sigrtmax - 13,
"RTMAX-12": sigrtmax - 12,
"RTMAX-11": sigrtmax - 11,
"RTMAX-10": sigrtmax - 10,
"RTMAX-9": sigrtmax - 9,
"RTMAX-8": sigrtmax - 8,
"RTMAX-7": sigrtmax - 7,
"RTMAX-6": sigrtmax - 6,
"RTMAX-5": sigrtmax - 5,
"RTMAX-4": sigrtmax - 4,
"RTMAX-3": sigrtmax - 3,
"RTMAX-2": sigrtmax - 2,
"RTMAX-1": sigrtmax - 1,
"RTMAX": sigrtmax,
}
// CatchAll catches all signals and relays them to the specified channel.
func CatchAll(sigc chan os.Signal) {
panic("Unsupported on non-linux platforms")
}
// StopCatch stops catching the signals and closes the specified channel.
func StopCatch(sigc chan os.Signal) {
panic("Unsupported on non-linux platforms")
}

View File

@ -225,7 +225,7 @@ func addSubscriptionsFromMountsFile(filePath, mountLabel, containerWorkingDir st
logrus.Warnf("Path %q from %q doesn't exist, skipping", hostDirOrFile, filePath)
continue
}
return nil, errors.Wrapf(err, "failed to stat %q", hostDirOrFile)
return nil, err
}
ctrDirOrFileOnHost := filepath.Join(containerWorkingDir, ctrDirOrFile)
@ -246,11 +246,11 @@ func addSubscriptionsFromMountsFile(filePath, mountLabel, containerWorkingDir st
switch mode := fileInfo.Mode(); {
case mode.IsDir():
if err = os.MkdirAll(ctrDirOrFileOnHost, mode.Perm()); err != nil {
return nil, errors.Wrapf(err, "making container directory %q failed", ctrDirOrFileOnHost)
return nil, errors.Wrap(err, "making container directory")
}
data, err := getHostSubscriptionData(hostDirOrFile, mode.Perm())
if err != nil {
return nil, errors.Wrapf(err, "getting host subscription data failed")
return nil, errors.Wrap(err, "getting host subscription data")
}
for _, s := range data {
if err := s.saveTo(ctrDirOrFileOnHost); err != nil {
@ -260,7 +260,7 @@ func addSubscriptionsFromMountsFile(filePath, mountLabel, containerWorkingDir st
case mode.IsRegular():
data, err := readFileOrDir("", hostDirOrFile, mode.Perm())
if err != nil {
return nil, errors.Wrapf(err, "error reading file %q", hostDirOrFile)
return nil, err
}
for _, s := range data {
@ -268,7 +268,7 @@ func addSubscriptionsFromMountsFile(filePath, mountLabel, containerWorkingDir st
return nil, err
}
if err := ioutil.WriteFile(ctrDirOrFileOnHost, s.data, s.mode); err != nil {
return nil, errors.Wrapf(err, "error saving data to container filesystem on host %q", ctrDirOrFileOnHost)
return nil, errors.Wrap(err, "saving data to container filesystem")
}
}
default:
@ -285,7 +285,7 @@ func addSubscriptionsFromMountsFile(filePath, mountLabel, containerWorkingDir st
}
}
} else if err != nil {
return nil, errors.Wrapf(err, "error getting status of %q", ctrDirOrFileOnHost)
return nil, err
}
m := rspec.Mount{
@ -309,10 +309,10 @@ func addFIPSModeSubscription(mounts *[]rspec.Mount, containerWorkingDir, mountPo
ctrDirOnHost := filepath.Join(containerWorkingDir, subscriptionsDir)
if _, err := os.Stat(ctrDirOnHost); os.IsNotExist(err) {
if err = idtools.MkdirAllAs(ctrDirOnHost, 0755, uid, gid); err != nil { //nolint
return errors.Wrapf(err, "making container directory %q on host failed", ctrDirOnHost)
return err
}
if err = label.Relabel(ctrDirOnHost, mountLabel, false); err != nil {
return errors.Wrapf(err, "error applying correct labels on %q", ctrDirOnHost)
return errors.Wrapf(err, "applying correct labels on %q", ctrDirOnHost)
}
}
fipsFile := filepath.Join(ctrDirOnHost, "system-fips")
@ -320,7 +320,7 @@ func addFIPSModeSubscription(mounts *[]rspec.Mount, containerWorkingDir, mountPo
if _, err := os.Stat(fipsFile); os.IsNotExist(err) {
file, err := os.Create(fipsFile)
if err != nil {
return errors.Wrapf(err, "error creating system-fips file in container for FIPS mode")
return errors.Wrap(err, "creating system-fips file in container for FIPS mode")
}
defer file.Close()
}
@ -342,7 +342,7 @@ func addFIPSModeSubscription(mounts *[]rspec.Mount, containerWorkingDir, mountPo
if os.IsNotExist(err) {
return nil
}
return errors.Wrapf(err, "failed to stat FIPS Backend directory %q", ctrDirOnHost)
return errors.Wrap(err, "FIPS Backend directory")
}
if !mountExists(*mounts, destDir) {

View File

@ -3,7 +3,7 @@ package supplemented
import (
"errors"
"github.com/containers/buildah/pkg/manifests"
"github.com/containers/common/pkg/manifests"
)
var (

View File

@ -0,0 +1,131 @@
package timetype
// code adapted from https://github.com/moby/moby/blob/master/api/types/time/timestamp.go
import (
"fmt"
"math"
"strconv"
"strings"
"time"
)
// These are additional predefined layouts for use in Time.Format and Time.Parse
// with --since and --until parameters for `docker logs` and `docker events`
const (
rFC3339Local = "2006-01-02T15:04:05" // RFC3339 with local timezone
rFC3339NanoLocal = "2006-01-02T15:04:05.999999999" // RFC3339Nano with local timezone
dateWithZone = "2006-01-02Z07:00" // RFC3339 with time at 00:00:00
dateLocal = "2006-01-02" // RFC3339 with local timezone and time at 00:00:00
)
// GetTimestamp tries to parse given string as golang duration,
// then RFC3339 time and finally as a Unix timestamp. If
// any of these were successful, it returns a Unix timestamp
// as string otherwise returns the given value back.
// In case of duration input, the returned timestamp is computed
// as the given reference time minus the amount of the duration.
func GetTimestamp(value string, reference time.Time) (string, error) {
if d, err := time.ParseDuration(value); value != "0" && err == nil {
return strconv.FormatInt(reference.Add(-d).Unix(), 10), nil
}
var format string
// if the string has a Z or a + or three dashes use parse otherwise use parseinlocation
parseInLocation := !(strings.ContainsAny(value, "zZ+") || strings.Count(value, "-") == 3)
if strings.Contains(value, ".") { // nolint:gocritic
if parseInLocation {
format = rFC3339NanoLocal
} else {
format = time.RFC3339Nano
}
} else if strings.Contains(value, "T") {
// we want the number of colons in the T portion of the timestamp
tcolons := strings.Count(value, ":")
// if parseInLocation is off and we have a +/- zone offset (not Z) then
// there will be an extra colon in the input for the tz offset subtract that
// colon from the tcolons count
if !parseInLocation && !strings.ContainsAny(value, "zZ") && tcolons > 0 {
tcolons--
}
if parseInLocation {
switch tcolons {
case 0:
format = "2006-01-02T15"
case 1:
format = "2006-01-02T15:04"
default:
format = rFC3339Local
}
} else {
switch tcolons {
case 0:
format = "2006-01-02T15Z07:00"
case 1:
format = "2006-01-02T15:04Z07:00"
default:
format = time.RFC3339
}
}
} else if parseInLocation {
format = dateLocal
} else {
format = dateWithZone
}
var t time.Time
var err error
if parseInLocation {
t, err = time.ParseInLocation(format, value, time.FixedZone(reference.Zone()))
} else {
t, err = time.Parse(format, value)
}
if err != nil {
// if there is a `-` then it's an RFC3339 like timestamp
if strings.Contains(value, "-") {
return "", err // was probably an RFC3339 like timestamp but the parser failed with an error
}
if _, _, err := parseTimestamp(value); err != nil {
return "", fmt.Errorf("failed to parse value as time or duration: %q", value)
}
return value, nil // unix timestamp in and out case (meaning: the value passed at the command line is already in the right format for passing to the server)
}
return fmt.Sprintf("%d.%09d", t.Unix(), int64(t.Nanosecond())), nil
}
// ParseTimestamps returns seconds and nanoseconds from a timestamp that has the
// format "%d.%09d", time.Unix(), int64(time.Nanosecond()))
// if the incoming nanosecond portion is longer or shorter than 9 digits it is
// converted to nanoseconds. The expectation is that the seconds and
// seconds will be used to create a time variable. For example:
// seconds, nanoseconds, err := ParseTimestamp("1136073600.000000001",0)
// if err == nil since := time.Unix(seconds, nanoseconds)
// returns seconds as def(aultSeconds) if value == ""
func ParseTimestamps(value string, def int64) (secs, nanoSecs int64, err error) {
if value == "" {
return def, 0, nil
}
return parseTimestamp(value)
}
func parseTimestamp(value string) (int64, int64, error) { // nolint:gocritic
sa := strings.SplitN(value, ".", 2)
s, err := strconv.ParseInt(sa[0], 10, 64)
if err != nil {
return s, 0, err
}
if len(sa) != 2 {
return s, 0, nil
}
n, err := strconv.ParseInt(sa[1], 10, 64)
if err != nil {
return s, n, err
}
// should already be in nanoseconds but just in case convert n to nanoseconds
n = int64(float64(n) * math.Pow(float64(10), float64(9-len(sa[1]))))
return s, n, nil
}

View File

@ -1,4 +1,4 @@
package version
// Version is the version of the build.
const Version = "0.36.0"
const Version = "0.37.0"

View File

@ -1,119 +0,0 @@
package tarfile
import (
"context"
"io"
internal "github.com/containers/image/v5/docker/internal/tarfile"
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/types"
"github.com/opencontainers/go-digest"
)
// Destination is a partial implementation of types.ImageDestination for writing to an io.Writer.
type Destination struct {
internal *internal.Destination
archive *internal.Writer
}
// NewDestination returns a tarfile.Destination for the specified io.Writer.
// Deprecated: please use NewDestinationWithContext instead
func NewDestination(dest io.Writer, ref reference.NamedTagged) *Destination {
return NewDestinationWithContext(nil, dest, ref)
}
// NewDestinationWithContext returns a tarfile.Destination for the specified io.Writer.
func NewDestinationWithContext(sys *types.SystemContext, dest io.Writer, ref reference.NamedTagged) *Destination {
archive := internal.NewWriter(dest)
return &Destination{
internal: internal.NewDestination(sys, archive, ref),
archive: archive,
}
}
// AddRepoTags adds the specified tags to the destination's repoTags.
func (d *Destination) AddRepoTags(tags []reference.NamedTagged) {
d.internal.AddRepoTags(tags)
}
// SupportedManifestMIMETypes tells which manifest mime types the destination supports
// If an empty slice or nil it's returned, then any mime type can be tried to upload
func (d *Destination) SupportedManifestMIMETypes() []string {
return d.internal.SupportedManifestMIMETypes()
}
// SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures.
// Note: It is still possible for PutSignatures to fail if SupportsSignatures returns nil.
func (d *Destination) SupportsSignatures(ctx context.Context) error {
return d.internal.SupportsSignatures(ctx)
}
// AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually
// uploaded to the image destination, true otherwise.
func (d *Destination) AcceptsForeignLayerURLs() bool {
return d.internal.AcceptsForeignLayerURLs()
}
// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime architecture and OS. False otherwise.
func (d *Destination) MustMatchRuntimeOS() bool {
return d.internal.MustMatchRuntimeOS()
}
// IgnoresEmbeddedDockerReference returns true iff the destination does not care about Image.EmbeddedDockerReferenceConflicts(),
// and would prefer to receive an unmodified manifest instead of one modified for the destination.
// Does not make a difference if Reference().DockerReference() is nil.
func (d *Destination) IgnoresEmbeddedDockerReference() bool {
return d.internal.IgnoresEmbeddedDockerReference()
}
// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently.
func (d *Destination) HasThreadSafePutBlob() bool {
return d.internal.HasThreadSafePutBlob()
}
// PutBlob writes contents of stream and returns data representing the result (with all data filled in).
// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it.
// inputInfo.Size is the expected length of stream, if known.
// May update cache.
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
// to any other readers for download using the supplied digest.
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
func (d *Destination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) {
return d.internal.PutBlob(ctx, stream, inputInfo, cache, isConfig)
}
// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
// info.Digest must not be empty.
// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input.
// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may
// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be
// reflected in the manifest that will be written.
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
// May use and/or update cache.
func (d *Destination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) {
return d.internal.TryReusingBlob(ctx, info, cache, canSubstitute)
}
// PutManifest writes manifest to the destination.
// The instanceDigest value is expected to always be nil, because this transport does not support manifest lists, so
// there can be no secondary manifests.
// FIXME? This should also receive a MIME type if known, to differentiate between schema versions.
// If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema),
// but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError.
func (d *Destination) PutManifest(ctx context.Context, m []byte, instanceDigest *digest.Digest) error {
return d.internal.PutManifest(ctx, m, instanceDigest)
}
// PutSignatures would add the given signatures to the docker tarfile (currently not supported).
// The instanceDigest value is expected to always be nil, because this transport does not support manifest lists, so
// there can be no secondary manifests. MUST be called after PutManifest (signatures reference manifest contents).
func (d *Destination) PutSignatures(ctx context.Context, signatures [][]byte, instanceDigest *digest.Digest) error {
return d.internal.PutSignatures(ctx, signatures, instanceDigest)
}
// Commit finishes writing data to the underlying io.Writer.
// It is the caller's responsibility to close it, if necessary.
func (d *Destination) Commit(ctx context.Context) error {
return d.archive.Close()
}

View File

@ -1,3 +0,0 @@
// Package tarfile is an internal implementation detail of some transports.
// Do not use outside of the github.com/containers/image repo!
package tarfile

View File

@ -1,104 +0,0 @@
package tarfile
import (
"context"
"io"
internal "github.com/containers/image/v5/docker/internal/tarfile"
"github.com/containers/image/v5/types"
digest "github.com/opencontainers/go-digest"
)
// Source is a partial implementation of types.ImageSource for reading from tarPath.
// Most users should use this via implementations of ImageReference from docker/archive or docker/daemon.
type Source struct {
internal *internal.Source
}
// NewSourceFromFile returns a tarfile.Source for the specified path.
// Deprecated: Please use NewSourceFromFileWithContext which will allows you to configure temp directory
// for big files through SystemContext.BigFilesTemporaryDir
func NewSourceFromFile(path string) (*Source, error) {
return NewSourceFromFileWithContext(nil, path)
}
// NewSourceFromFileWithContext returns a tarfile.Source for the specified path.
func NewSourceFromFileWithContext(sys *types.SystemContext, path string) (*Source, error) {
archive, err := internal.NewReaderFromFile(sys, path)
if err != nil {
return nil, err
}
src := internal.NewSource(archive, true, nil, -1)
return &Source{internal: src}, nil
}
// NewSourceFromStream returns a tarfile.Source for the specified inputStream,
// which can be either compressed or uncompressed. The caller can close the
// inputStream immediately after NewSourceFromFile returns.
// Deprecated: Please use NewSourceFromStreamWithSystemContext which will allows you to configure
// temp directory for big files through SystemContext.BigFilesTemporaryDir
func NewSourceFromStream(inputStream io.Reader) (*Source, error) {
return NewSourceFromStreamWithSystemContext(nil, inputStream)
}
// NewSourceFromStreamWithSystemContext returns a tarfile.Source for the specified inputStream,
// which can be either compressed or uncompressed. The caller can close the
// inputStream immediately after NewSourceFromFile returns.
func NewSourceFromStreamWithSystemContext(sys *types.SystemContext, inputStream io.Reader) (*Source, error) {
archive, err := internal.NewReaderFromStream(sys, inputStream)
if err != nil {
return nil, err
}
src := internal.NewSource(archive, true, nil, -1)
return &Source{internal: src}, nil
}
// Close removes resources associated with an initialized Source, if any.
func (s *Source) Close() error {
return s.internal.Close()
}
// LoadTarManifest loads and decodes the manifest.json
func (s *Source) LoadTarManifest() ([]ManifestItem, error) {
return s.internal.TarManifest(), nil
}
// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available).
// It may use a remote (= slow) service.
// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list);
// this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists).
// This source implementation does not support manifest lists, so the passed-in instanceDigest should always be nil,
// as the primary manifest can not be a list, so there can be no secondary instances.
func (s *Source) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) {
return s.internal.GetManifest(ctx, instanceDigest)
}
// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently.
func (s *Source) HasThreadSafeGetBlob() bool {
return s.internal.HasThreadSafeGetBlob()
}
// GetBlob returns a stream for the specified blob, and the blobs size (or -1 if unknown).
// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
func (s *Source) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) {
return s.internal.GetBlob(ctx, info, cache)
}
// GetSignatures returns the image's signatures. It may use a remote (= slow) service.
// This source implementation does not support manifest lists, so the passed-in instanceDigest should always be nil,
// as there can be no secondary manifests.
func (s *Source) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) {
return s.internal.GetSignatures(ctx, instanceDigest)
}
// LayerInfosForCopy returns either nil (meaning the values in the manifest are fine), or updated values for the layer
// blobsums that are listed in the image's manifest. If values are returned, they should be used when using GetBlob()
// to read the image's layers.
// This source implementation does not support manifest lists, so the passed-in instanceDigest should always be nil,
// as the primary manifest can not be a list, so there can be no secondary manifests.
// The Digest field is guaranteed to be provided; Size may be -1.
// WARNING: The list may contain duplicates, and they are semantically relevant.
func (s *Source) LayerInfosForCopy(ctx context.Context, instanceDigest *digest.Digest) ([]types.BlobInfo, error) {
return s.internal.LayerInfosForCopy(ctx, instanceDigest)
}

View File

@ -1,8 +0,0 @@
package tarfile
import (
internal "github.com/containers/image/v5/docker/internal/tarfile"
)
// ManifestItem is an element of the array stored in the top-level manifest.json file.
type ManifestItem = internal.ManifestItem // All public members from the internal package remain accessible.

View File

@ -1 +1 @@
1.29.0
1.30.0

View File

@ -88,7 +88,7 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error)
}
if userDiskQuota {
if err := driver.subvolEnableQuota(); err != nil {
if err := driver.enableQuota(); err != nil {
return nil, err
}
}
@ -159,10 +159,6 @@ func (d *Driver) Metadata(id string) (map[string]string, error) {
// Cleanup unmounts the home directory.
func (d *Driver) Cleanup() error {
if err := d.subvolDisableQuota(); err != nil {
return err
}
return mount.Unmount(d.home)
}
@ -320,7 +316,7 @@ func (d *Driver) updateQuotaStatus() {
d.once.Do(func() {
if !d.quotaEnabled {
// In case quotaEnabled is not set, check qgroup and update quotaEnabled as needed
if err := subvolQgroupStatus(d.home); err != nil {
if err := qgroupStatus(d.home); err != nil {
// quota is still not enabled
return
}
@ -329,7 +325,7 @@ func (d *Driver) updateQuotaStatus() {
})
}
func (d *Driver) subvolEnableQuota() error {
func (d *Driver) enableQuota() error {
d.updateQuotaStatus()
if d.quotaEnabled {
@ -355,32 +351,6 @@ func (d *Driver) subvolEnableQuota() error {
return nil
}
func (d *Driver) subvolDisableQuota() error {
d.updateQuotaStatus()
if !d.quotaEnabled {
return nil
}
dir, err := openDir(d.home)
if err != nil {
return err
}
defer closeDir(dir)
var args C.struct_btrfs_ioctl_quota_ctl_args
args.cmd = C.BTRFS_QUOTA_CTL_DISABLE
_, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_QUOTA_CTL,
uintptr(unsafe.Pointer(&args)))
if errno != 0 {
return fmt.Errorf("Failed to disable btrfs quota for %s: %v", dir, errno.Error())
}
d.quotaEnabled = false
return nil
}
func (d *Driver) subvolRescanQuota() error {
d.updateQuotaStatus()
@ -423,11 +393,11 @@ func subvolLimitQgroup(path string, size uint64) error {
return nil
}
// subvolQgroupStatus performs a BTRFS_IOC_TREE_SEARCH on the root path
// qgroupStatus performs a BTRFS_IOC_TREE_SEARCH on the root path
// with search key of BTRFS_QGROUP_STATUS_KEY.
// In case qgroup is enabled, the returned key type will match BTRFS_QGROUP_STATUS_KEY.
// For more details please see https://github.com/kdave/btrfs-progs/blob/v4.9/qgroup.c#L1035
func subvolQgroupStatus(path string) error {
func qgroupStatus(path string) error {
dir, err := openDir(path)
if err != nil {
return err
@ -603,7 +573,7 @@ func (d *Driver) setStorageSize(dir string, driver *Driver) error {
return fmt.Errorf("btrfs: storage size cannot be less than %s", units.HumanSize(float64(d.options.minSpace)))
}
if err := d.subvolEnableQuota(); err != nil {
if err := d.enableQuota(); err != nil {
return err
}
@ -674,7 +644,7 @@ func (d *Driver) Get(id string, options graphdriver.MountOpts) (string, error) {
if quota, err := ioutil.ReadFile(d.quotasDirID(id)); err == nil {
if size, err := strconv.ParseUint(string(quota), 10, 64); err == nil && size >= d.options.minSpace {
if err := d.subvolEnableQuota(); err != nil {
if err := d.enableQuota(); err != nil {
return "", err
}
if err := subvolLimitQgroup(dir, size); err != nil {

View File

@ -9,7 +9,7 @@ require (
github.com/docker/go-units v0.4.0
github.com/google/go-intervals v0.0.2
github.com/hashicorp/go-multierror v1.1.1
github.com/klauspost/compress v1.11.13
github.com/klauspost/compress v1.12.1
github.com/klauspost/pgzip v1.2.5
github.com/mattn/go-shellwords v1.0.11
github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible

View File

@ -267,6 +267,8 @@ github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvq
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/snappy v0.0.3 h1:fHPg5GQYlCeLIPB9BZqMVR5nR9A+IM5zcgeTdjMYmLA=
github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
@ -337,8 +339,8 @@ github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQL
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
github.com/klauspost/compress v1.11.13 h1:eSvu8Tmq6j2psUJqJrLcWH6K3w5Dwc+qipbaA6eVEN4=
github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
github.com/klauspost/compress v1.12.1 h1:/+xsCsk06wE38cyiqOR/o7U2fSftcH72xD+BQXmja/g=
github.com/klauspost/compress v1.12.1/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg=
github.com/klauspost/pgzip v1.2.5 h1:qnWYvvKqedOF2ulHpMG72XQol4ILEJ8k2wwRl/Km8oE=
github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=

View File

@ -7,12 +7,17 @@ import (
"sync"
"github.com/pkg/errors"
"github.com/syndtr/gocapability/capability"
)
var (
homeDirOnce sync.Once
homeDirErr error
homeDir string
hasCapSysAdminOnce sync.Once
hasCapSysAdminRet bool
hasCapSysAdminErr error
)
// HomeDir returns the home directory for the current user.
@ -32,3 +37,20 @@ func HomeDir() (string, error) {
})
return homeDir, homeDirErr
}
// HasCapSysAdmin returns whether the current process has CAP_SYS_ADMIN.
func HasCapSysAdmin() (bool, error) {
hasCapSysAdminOnce.Do(func() {
currentCaps, err := capability.NewPid2(0)
if err != nil {
hasCapSysAdminErr = err
return
}
if err = currentCaps.Load(); err != nil {
hasCapSysAdminErr = err
return
}
hasCapSysAdminRet = currentCaps.Get(capability.EFFECTIVE, capability.CAP_SYS_ADMIN)
})
return hasCapSysAdminRet, hasCapSysAdminErr
}

View File

@ -8,8 +8,10 @@
# Please keep the list sorted.
Amazon.com, Inc
Damian Gryski <dgryski@gmail.com>
Google Inc.
Jan Mercl <0xjnml@gmail.com>
Klaus Post <klauspost@gmail.com>
Rodolfo Carvalho <rhcarvalho@gmail.com>
Sebastien Binet <seb.binet@gmail.com>

View File

@ -28,7 +28,9 @@
Damian Gryski <dgryski@gmail.com>
Jan Mercl <0xjnml@gmail.com>
Jonathan Swinney <jswinney@amazon.com>
Kai Backman <kaib@golang.org>
Klaus Post <klauspost@gmail.com>
Marc-Antoine Ruel <maruel@chromium.org>
Nigel Tao <nigeltao@golang.org>
Rob Pike <r@golang.org>

View File

@ -52,6 +52,8 @@ const (
// Otherwise, a newly allocated slice will be returned.
//
// The dst and src must not overlap. It is valid to pass a nil dst.
//
// Decode handles the Snappy block format, not the Snappy stream format.
func Decode(dst, src []byte) ([]byte, error) {
dLen, s, err := decodedLen(src)
if err != nil {
@ -83,6 +85,8 @@ func NewReader(r io.Reader) *Reader {
}
// Reader is an io.Reader that can read Snappy-compressed bytes.
//
// Reader handles the Snappy stream format, not the Snappy block format.
type Reader struct {
r io.Reader
err error

View File

@ -184,7 +184,9 @@ tagLit60Plus:
// checks. In the asm version, we code it once instead of once per switch case.
ADDQ CX, SI
SUBQ $58, SI
CMPQ SI, R13
MOVQ SI, BX
SUBQ R11, BX
CMPQ BX, R12
JA errCorrupt
// case x == 60:
@ -230,7 +232,9 @@ tagCopy4:
ADDQ $5, SI
// if uint(s) > uint(len(src)) { etc }
CMPQ SI, R13
MOVQ SI, BX
SUBQ R11, BX
CMPQ BX, R12
JA errCorrupt
// length = 1 + int(src[s-5])>>2
@ -247,7 +251,9 @@ tagCopy2:
ADDQ $3, SI
// if uint(s) > uint(len(src)) { etc }
CMPQ SI, R13
MOVQ SI, BX
SUBQ R11, BX
CMPQ BX, R12
JA errCorrupt
// length = 1 + int(src[s-3])>>2
@ -271,7 +277,9 @@ tagCopy:
ADDQ $2, SI
// if uint(s) > uint(len(src)) { etc }
CMPQ SI, R13
MOVQ SI, BX
SUBQ R11, BX
CMPQ BX, R12
JA errCorrupt
// offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1]))

494
vendor/github.com/golang/snappy/decode_arm64.s generated vendored Normal file
View File

@ -0,0 +1,494 @@
// Copyright 2020 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !appengine
// +build gc
// +build !noasm
#include "textflag.h"
// The asm code generally follows the pure Go code in decode_other.go, except
// where marked with a "!!!".
// func decode(dst, src []byte) int
//
// All local variables fit into registers. The non-zero stack size is only to
// spill registers and push args when issuing a CALL. The register allocation:
// - R2 scratch
// - R3 scratch
// - R4 length or x
// - R5 offset
// - R6 &src[s]
// - R7 &dst[d]
// + R8 dst_base
// + R9 dst_len
// + R10 dst_base + dst_len
// + R11 src_base
// + R12 src_len
// + R13 src_base + src_len
// - R14 used by doCopy
// - R15 used by doCopy
//
// The registers R8-R13 (marked with a "+") are set at the start of the
// function, and after a CALL returns, and are not otherwise modified.
//
// The d variable is implicitly R7 - R8, and len(dst)-d is R10 - R7.
// The s variable is implicitly R6 - R11, and len(src)-s is R13 - R6.
TEXT ·decode(SB), NOSPLIT, $56-56
// Initialize R6, R7 and R8-R13.
MOVD dst_base+0(FP), R8
MOVD dst_len+8(FP), R9
MOVD R8, R7
MOVD R8, R10
ADD R9, R10, R10
MOVD src_base+24(FP), R11
MOVD src_len+32(FP), R12
MOVD R11, R6
MOVD R11, R13
ADD R12, R13, R13
loop:
// for s < len(src)
CMP R13, R6
BEQ end
// R4 = uint32(src[s])
//
// switch src[s] & 0x03
MOVBU (R6), R4
MOVW R4, R3
ANDW $3, R3
MOVW $1, R1
CMPW R1, R3
BGE tagCopy
// ----------------------------------------
// The code below handles literal tags.
// case tagLiteral:
// x := uint32(src[s] >> 2)
// switch
MOVW $60, R1
LSRW $2, R4, R4
CMPW R4, R1
BLS tagLit60Plus
// case x < 60:
// s++
ADD $1, R6, R6
doLit:
// This is the end of the inner "switch", when we have a literal tag.
//
// We assume that R4 == x and x fits in a uint32, where x is the variable
// used in the pure Go decode_other.go code.
// length = int(x) + 1
//
// Unlike the pure Go code, we don't need to check if length <= 0 because
// R4 can hold 64 bits, so the increment cannot overflow.
ADD $1, R4, R4
// Prepare to check if copying length bytes will run past the end of dst or
// src.
//
// R2 = len(dst) - d
// R3 = len(src) - s
MOVD R10, R2
SUB R7, R2, R2
MOVD R13, R3
SUB R6, R3, R3
// !!! Try a faster technique for short (16 or fewer bytes) copies.
//
// if length > 16 || len(dst)-d < 16 || len(src)-s < 16 {
// goto callMemmove // Fall back on calling runtime·memmove.
// }
//
// The C++ snappy code calls this TryFastAppend. It also checks len(src)-s
// against 21 instead of 16, because it cannot assume that all of its input
// is contiguous in memory and so it needs to leave enough source bytes to
// read the next tag without refilling buffers, but Go's Decode assumes
// contiguousness (the src argument is a []byte).
CMP $16, R4
BGT callMemmove
CMP $16, R2
BLT callMemmove
CMP $16, R3
BLT callMemmove
// !!! Implement the copy from src to dst as a 16-byte load and store.
// (Decode's documentation says that dst and src must not overlap.)
//
// This always copies 16 bytes, instead of only length bytes, but that's
// OK. If the input is a valid Snappy encoding then subsequent iterations
// will fix up the overrun. Otherwise, Decode returns a nil []byte (and a
// non-nil error), so the overrun will be ignored.
//
// Note that on arm64, it is legal and cheap to issue unaligned 8-byte or
// 16-byte loads and stores. This technique probably wouldn't be as
// effective on architectures that are fussier about alignment.
LDP 0(R6), (R14, R15)
STP (R14, R15), 0(R7)
// d += length
// s += length
ADD R4, R7, R7
ADD R4, R6, R6
B loop
callMemmove:
// if length > len(dst)-d || length > len(src)-s { etc }
CMP R2, R4
BGT errCorrupt
CMP R3, R4
BGT errCorrupt
// copy(dst[d:], src[s:s+length])
//
// This means calling runtime·memmove(&dst[d], &src[s], length), so we push
// R7, R6 and R4 as arguments. Coincidentally, we also need to spill those
// three registers to the stack, to save local variables across the CALL.
MOVD R7, 8(RSP)
MOVD R6, 16(RSP)
MOVD R4, 24(RSP)
MOVD R7, 32(RSP)
MOVD R6, 40(RSP)
MOVD R4, 48(RSP)
CALL runtime·memmove(SB)
// Restore local variables: unspill registers from the stack and
// re-calculate R8-R13.
MOVD 32(RSP), R7
MOVD 40(RSP), R6
MOVD 48(RSP), R4
MOVD dst_base+0(FP), R8
MOVD dst_len+8(FP), R9
MOVD R8, R10
ADD R9, R10, R10
MOVD src_base+24(FP), R11
MOVD src_len+32(FP), R12
MOVD R11, R13
ADD R12, R13, R13
// d += length
// s += length
ADD R4, R7, R7
ADD R4, R6, R6
B loop
tagLit60Plus:
// !!! This fragment does the
//
// s += x - 58; if uint(s) > uint(len(src)) { etc }
//
// checks. In the asm version, we code it once instead of once per switch case.
ADD R4, R6, R6
SUB $58, R6, R6
MOVD R6, R3
SUB R11, R3, R3
CMP R12, R3
BGT errCorrupt
// case x == 60:
MOVW $61, R1
CMPW R1, R4
BEQ tagLit61
BGT tagLit62Plus
// x = uint32(src[s-1])
MOVBU -1(R6), R4
B doLit
tagLit61:
// case x == 61:
// x = uint32(src[s-2]) | uint32(src[s-1])<<8
MOVHU -2(R6), R4
B doLit
tagLit62Plus:
CMPW $62, R4
BHI tagLit63
// case x == 62:
// x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16
MOVHU -3(R6), R4
MOVBU -1(R6), R3
ORR R3<<16, R4
B doLit
tagLit63:
// case x == 63:
// x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24
MOVWU -4(R6), R4
B doLit
// The code above handles literal tags.
// ----------------------------------------
// The code below handles copy tags.
tagCopy4:
// case tagCopy4:
// s += 5
ADD $5, R6, R6
// if uint(s) > uint(len(src)) { etc }
MOVD R6, R3
SUB R11, R3, R3
CMP R12, R3
BGT errCorrupt
// length = 1 + int(src[s-5])>>2
MOVD $1, R1
ADD R4>>2, R1, R4
// offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24)
MOVWU -4(R6), R5
B doCopy
tagCopy2:
// case tagCopy2:
// s += 3
ADD $3, R6, R6
// if uint(s) > uint(len(src)) { etc }
MOVD R6, R3
SUB R11, R3, R3
CMP R12, R3
BGT errCorrupt
// length = 1 + int(src[s-3])>>2
MOVD $1, R1
ADD R4>>2, R1, R4
// offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8)
MOVHU -2(R6), R5
B doCopy
tagCopy:
// We have a copy tag. We assume that:
// - R3 == src[s] & 0x03
// - R4 == src[s]
CMP $2, R3
BEQ tagCopy2
BGT tagCopy4
// case tagCopy1:
// s += 2
ADD $2, R6, R6
// if uint(s) > uint(len(src)) { etc }
MOVD R6, R3
SUB R11, R3, R3
CMP R12, R3
BGT errCorrupt
// offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1]))
MOVD R4, R5
AND $0xe0, R5
MOVBU -1(R6), R3
ORR R5<<3, R3, R5
// length = 4 + int(src[s-2])>>2&0x7
MOVD $7, R1
AND R4>>2, R1, R4
ADD $4, R4, R4
doCopy:
// This is the end of the outer "switch", when we have a copy tag.
//
// We assume that:
// - R4 == length && R4 > 0
// - R5 == offset
// if offset <= 0 { etc }
MOVD $0, R1
CMP R1, R5
BLE errCorrupt
// if d < offset { etc }
MOVD R7, R3
SUB R8, R3, R3
CMP R5, R3
BLT errCorrupt
// if length > len(dst)-d { etc }
MOVD R10, R3
SUB R7, R3, R3
CMP R3, R4
BGT errCorrupt
// forwardCopy(dst[d:d+length], dst[d-offset:]); d += length
//
// Set:
// - R14 = len(dst)-d
// - R15 = &dst[d-offset]
MOVD R10, R14
SUB R7, R14, R14
MOVD R7, R15
SUB R5, R15, R15
// !!! Try a faster technique for short (16 or fewer bytes) forward copies.
//
// First, try using two 8-byte load/stores, similar to the doLit technique
// above. Even if dst[d:d+length] and dst[d-offset:] can overlap, this is
// still OK if offset >= 8. Note that this has to be two 8-byte load/stores
// and not one 16-byte load/store, and the first store has to be before the
// second load, due to the overlap if offset is in the range [8, 16).
//
// if length > 16 || offset < 8 || len(dst)-d < 16 {
// goto slowForwardCopy
// }
// copy 16 bytes
// d += length
CMP $16, R4
BGT slowForwardCopy
CMP $8, R5
BLT slowForwardCopy
CMP $16, R14
BLT slowForwardCopy
MOVD 0(R15), R2
MOVD R2, 0(R7)
MOVD 8(R15), R3
MOVD R3, 8(R7)
ADD R4, R7, R7
B loop
slowForwardCopy:
// !!! If the forward copy is longer than 16 bytes, or if offset < 8, we
// can still try 8-byte load stores, provided we can overrun up to 10 extra
// bytes. As above, the overrun will be fixed up by subsequent iterations
// of the outermost loop.
//
// The C++ snappy code calls this technique IncrementalCopyFastPath. Its
// commentary says:
//
// ----
//
// The main part of this loop is a simple copy of eight bytes at a time
// until we've copied (at least) the requested amount of bytes. However,
// if d and d-offset are less than eight bytes apart (indicating a
// repeating pattern of length < 8), we first need to expand the pattern in
// order to get the correct results. For instance, if the buffer looks like
// this, with the eight-byte <d-offset> and <d> patterns marked as
// intervals:
//
// abxxxxxxxxxxxx
// [------] d-offset
// [------] d
//
// a single eight-byte copy from <d-offset> to <d> will repeat the pattern
// once, after which we can move <d> two bytes without moving <d-offset>:
//
// ababxxxxxxxxxx
// [------] d-offset
// [------] d
//
// and repeat the exercise until the two no longer overlap.
//
// This allows us to do very well in the special case of one single byte
// repeated many times, without taking a big hit for more general cases.
//
// The worst case of extra writing past the end of the match occurs when
// offset == 1 and length == 1; the last copy will read from byte positions
// [0..7] and write to [4..11], whereas it was only supposed to write to
// position 1. Thus, ten excess bytes.
//
// ----
//
// That "10 byte overrun" worst case is confirmed by Go's
// TestSlowForwardCopyOverrun, which also tests the fixUpSlowForwardCopy
// and finishSlowForwardCopy algorithm.
//
// if length > len(dst)-d-10 {
// goto verySlowForwardCopy
// }
SUB $10, R14, R14
CMP R14, R4
BGT verySlowForwardCopy
makeOffsetAtLeast8:
// !!! As above, expand the pattern so that offset >= 8 and we can use
// 8-byte load/stores.
//
// for offset < 8 {
// copy 8 bytes from dst[d-offset:] to dst[d:]
// length -= offset
// d += offset
// offset += offset
// // The two previous lines together means that d-offset, and therefore
// // R15, is unchanged.
// }
CMP $8, R5
BGE fixUpSlowForwardCopy
MOVD (R15), R3
MOVD R3, (R7)
SUB R5, R4, R4
ADD R5, R7, R7
ADD R5, R5, R5
B makeOffsetAtLeast8
fixUpSlowForwardCopy:
// !!! Add length (which might be negative now) to d (implied by R7 being
// &dst[d]) so that d ends up at the right place when we jump back to the
// top of the loop. Before we do that, though, we save R7 to R2 so that, if
// length is positive, copying the remaining length bytes will write to the
// right place.
MOVD R7, R2
ADD R4, R7, R7
finishSlowForwardCopy:
// !!! Repeat 8-byte load/stores until length <= 0. Ending with a negative
// length means that we overrun, but as above, that will be fixed up by
// subsequent iterations of the outermost loop.
MOVD $0, R1
CMP R1, R4
BLE loop
MOVD (R15), R3
MOVD R3, (R2)
ADD $8, R15, R15
ADD $8, R2, R2
SUB $8, R4, R4
B finishSlowForwardCopy
verySlowForwardCopy:
// verySlowForwardCopy is a simple implementation of forward copy. In C
// parlance, this is a do/while loop instead of a while loop, since we know
// that length > 0. In Go syntax:
//
// for {
// dst[d] = dst[d - offset]
// d++
// length--
// if length == 0 {
// break
// }
// }
MOVB (R15), R3
MOVB R3, (R7)
ADD $1, R15, R15
ADD $1, R7, R7
SUB $1, R4, R4
CBNZ R4, verySlowForwardCopy
B loop
// The code above handles copy tags.
// ----------------------------------------
end:
// This is the end of the "for s < len(src)".
//
// if d != len(dst) { etc }
CMP R10, R7
BNE errCorrupt
// return 0
MOVD $0, ret+48(FP)
RET
errCorrupt:
// return decodeErrCodeCorrupt
MOVD $1, R2
MOVD R2, ret+48(FP)
RET

View File

@ -5,6 +5,7 @@
// +build !appengine
// +build gc
// +build !noasm
// +build amd64 arm64
package snappy

Some files were not shown because too many files have changed in this diff Show More