mirror of
https://github.com/containers/buildah.git
synced 2025-07-31 15:24:26 +03:00
Vendor in changes to support sirupsen/logrus
Signed-off-by: Daniel J Walsh <dwalsh@redhat.com>
This commit is contained in:
9
add.go
9
add.go
@ -11,10 +11,9 @@ import (
|
|||||||
"syscall"
|
"syscall"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/Sirupsen/logrus"
|
|
||||||
"github.com/containers/storage/pkg/archive"
|
"github.com/containers/storage/pkg/archive"
|
||||||
"github.com/containers/storage/pkg/chrootarchive"
|
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
)
|
)
|
||||||
|
|
||||||
// addURL copies the contents of the source URL to the destination. This is
|
// addURL copies the contents of the source URL to the destination. This is
|
||||||
@ -144,7 +143,7 @@ func (b *Builder) Add(destination string, extract bool, source ...string) error
|
|||||||
return errors.Wrapf(err, "error ensuring directory %q exists", d)
|
return errors.Wrapf(err, "error ensuring directory %q exists", d)
|
||||||
}
|
}
|
||||||
logrus.Debugf("copying %q to %q", gsrc+string(os.PathSeparator)+"*", d+string(os.PathSeparator)+"*")
|
logrus.Debugf("copying %q to %q", gsrc+string(os.PathSeparator)+"*", d+string(os.PathSeparator)+"*")
|
||||||
if err := chrootarchive.CopyWithTar(gsrc, d); err != nil {
|
if err := copyWithTar(gsrc, d); err != nil {
|
||||||
return errors.Wrapf(err, "error copying %q to %q", gsrc, d)
|
return errors.Wrapf(err, "error copying %q to %q", gsrc, d)
|
||||||
}
|
}
|
||||||
continue
|
continue
|
||||||
@ -159,14 +158,14 @@ func (b *Builder) Add(destination string, extract bool, source ...string) error
|
|||||||
}
|
}
|
||||||
// Copy the file, preserving attributes.
|
// Copy the file, preserving attributes.
|
||||||
logrus.Debugf("copying %q to %q", gsrc, d)
|
logrus.Debugf("copying %q to %q", gsrc, d)
|
||||||
if err := chrootarchive.CopyFileWithTar(gsrc, d); err != nil {
|
if err := copyFileWithTar(gsrc, d); err != nil {
|
||||||
return errors.Wrapf(err, "error copying %q to %q", gsrc, d)
|
return errors.Wrapf(err, "error copying %q to %q", gsrc, d)
|
||||||
}
|
}
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
// We're extracting an archive into the destination directory.
|
// We're extracting an archive into the destination directory.
|
||||||
logrus.Debugf("extracting contents of %q into %q", gsrc, dest)
|
logrus.Debugf("extracting contents of %q into %q", gsrc, dest)
|
||||||
if err := chrootarchive.UntarPath(gsrc, dest); err != nil {
|
if err := untarPath(gsrc, dest); err != nil {
|
||||||
return errors.Wrapf(err, "error extracting %q into %q", gsrc, dest)
|
return errors.Wrapf(err, "error extracting %q into %q", gsrc, dest)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -5,9 +5,9 @@ import (
|
|||||||
"path/filepath"
|
"path/filepath"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/Sirupsen/logrus"
|
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/projectatomic/buildah/imagebuildah"
|
"github.com/projectatomic/buildah/imagebuildah"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
"github.com/urfave/cli"
|
"github.com/urfave/cli"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -6,10 +6,10 @@ import (
|
|||||||
"os/user"
|
"os/user"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/Sirupsen/logrus"
|
|
||||||
is "github.com/containers/image/storage"
|
is "github.com/containers/image/storage"
|
||||||
"github.com/containers/storage"
|
"github.com/containers/storage"
|
||||||
"github.com/projectatomic/buildah"
|
"github.com/projectatomic/buildah"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
"github.com/urfave/cli"
|
"github.com/urfave/cli"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -3,10 +3,10 @@ package main
|
|||||||
import (
|
import (
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/Sirupsen/logrus"
|
|
||||||
"github.com/mattn/go-shellwords"
|
"github.com/mattn/go-shellwords"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/projectatomic/buildah"
|
"github.com/projectatomic/buildah"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
"github.com/urfave/cli"
|
"github.com/urfave/cli"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -9,10 +9,10 @@ import (
|
|||||||
|
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
|
|
||||||
"github.com/Sirupsen/logrus"
|
|
||||||
is "github.com/containers/image/storage"
|
is "github.com/containers/image/storage"
|
||||||
"github.com/containers/storage"
|
"github.com/containers/storage"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
"github.com/urfave/cli"
|
"github.com/urfave/cli"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -4,11 +4,11 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
|
|
||||||
"github.com/Sirupsen/logrus"
|
|
||||||
"github.com/containers/storage"
|
"github.com/containers/storage"
|
||||||
ispecs "github.com/opencontainers/image-spec/specs-go"
|
ispecs "github.com/opencontainers/image-spec/specs-go"
|
||||||
rspecs "github.com/opencontainers/runtime-spec/specs-go"
|
rspecs "github.com/opencontainers/runtime-spec/specs-go"
|
||||||
"github.com/projectatomic/buildah"
|
"github.com/projectatomic/buildah"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
"github.com/urfave/cli"
|
"github.com/urfave/cli"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -3,13 +3,13 @@ package main
|
|||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"github.com/Sirupsen/logrus"
|
|
||||||
is "github.com/containers/image/storage"
|
is "github.com/containers/image/storage"
|
||||||
"github.com/containers/image/transports"
|
"github.com/containers/image/transports"
|
||||||
"github.com/containers/image/transports/alltransports"
|
"github.com/containers/image/transports/alltransports"
|
||||||
"github.com/containers/image/types"
|
"github.com/containers/image/types"
|
||||||
"github.com/containers/storage"
|
"github.com/containers/storage"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
"github.com/urfave/cli"
|
"github.com/urfave/cli"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -6,10 +6,10 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"syscall"
|
"syscall"
|
||||||
|
|
||||||
"github.com/Sirupsen/logrus"
|
|
||||||
specs "github.com/opencontainers/runtime-spec/specs-go"
|
specs "github.com/opencontainers/runtime-spec/specs-go"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/projectatomic/buildah"
|
"github.com/projectatomic/buildah"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
"github.com/urfave/cli"
|
"github.com/urfave/cli"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -6,7 +6,6 @@ import (
|
|||||||
"io"
|
"io"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/Sirupsen/logrus"
|
|
||||||
cp "github.com/containers/image/copy"
|
cp "github.com/containers/image/copy"
|
||||||
"github.com/containers/image/signature"
|
"github.com/containers/image/signature"
|
||||||
is "github.com/containers/image/storage"
|
is "github.com/containers/image/storage"
|
||||||
@ -17,6 +16,7 @@ import (
|
|||||||
"github.com/opencontainers/go-digest"
|
"github.com/opencontainers/go-digest"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/projectatomic/buildah/util"
|
"github.com/projectatomic/buildah/util"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
27
image.go
27
image.go
@ -2,6 +2,7 @@ package buildah
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
@ -9,7 +10,6 @@ import (
|
|||||||
"path/filepath"
|
"path/filepath"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/Sirupsen/logrus"
|
|
||||||
"github.com/containers/image/docker/reference"
|
"github.com/containers/image/docker/reference"
|
||||||
"github.com/containers/image/image"
|
"github.com/containers/image/image"
|
||||||
"github.com/containers/image/manifest"
|
"github.com/containers/image/manifest"
|
||||||
@ -23,6 +23,7 @@ import (
|
|||||||
"github.com/opencontainers/image-spec/specs-go/v1"
|
"github.com/opencontainers/image-spec/specs-go/v1"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/projectatomic/buildah/docker"
|
"github.com/projectatomic/buildah/docker"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@ -68,32 +69,16 @@ type containerImageSource struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (i *containerImageRef) NewImage(sc *types.SystemContext) (types.Image, error) {
|
func (i *containerImageRef) NewImage(sc *types.SystemContext) (types.Image, error) {
|
||||||
src, err := i.NewImageSource(sc, nil)
|
src, err := i.NewImageSource(sc)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return image.FromSource(src)
|
return image.FromSource(src)
|
||||||
}
|
}
|
||||||
|
|
||||||
func selectManifestType(preferred string, acceptable, supported []string) string {
|
func (i *containerImageRef) NewImageSource(sc *types.SystemContext) (src types.ImageSource, err error) {
|
||||||
selected := preferred
|
|
||||||
for _, accept := range acceptable {
|
|
||||||
if preferred == accept {
|
|
||||||
return preferred
|
|
||||||
}
|
|
||||||
for _, support := range supported {
|
|
||||||
if accept == support {
|
|
||||||
selected = accept
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return selected
|
|
||||||
}
|
|
||||||
|
|
||||||
func (i *containerImageRef) NewImageSource(sc *types.SystemContext, manifestTypes []string) (src types.ImageSource, err error) {
|
|
||||||
// Decide which type of manifest and configuration output we're going to provide.
|
// Decide which type of manifest and configuration output we're going to provide.
|
||||||
supportedManifestTypes := []string{v1.MediaTypeImageManifest, docker.V2S2MediaTypeManifest}
|
manifestType := i.preferredManifestType
|
||||||
manifestType := selectManifestType(i.preferredManifestType, manifestTypes, supportedManifestTypes)
|
|
||||||
// If it's not a format we support, return an error.
|
// If it's not a format we support, return an error.
|
||||||
if manifestType != v1.MediaTypeImageManifest && manifestType != docker.V2S2MediaTypeManifest {
|
if manifestType != v1.MediaTypeImageManifest && manifestType != docker.V2S2MediaTypeManifest {
|
||||||
return nil, errors.Errorf("no supported manifest types (attempted to use %q, only know %q and %q)",
|
return nil, errors.Errorf("no supported manifest types (attempted to use %q, only know %q and %q)",
|
||||||
@ -417,7 +402,7 @@ func (i *containerImageSource) Reference() types.ImageReference {
|
|||||||
return i.ref
|
return i.ref
|
||||||
}
|
}
|
||||||
|
|
||||||
func (i *containerImageSource) GetSignatures() ([][]byte, error) {
|
func (i *containerImageSource) GetSignatures(ctx context.Context) ([][]byte, error) {
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -8,7 +8,6 @@ import (
|
|||||||
"path/filepath"
|
"path/filepath"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/Sirupsen/logrus"
|
|
||||||
is "github.com/containers/image/storage"
|
is "github.com/containers/image/storage"
|
||||||
"github.com/containers/image/transports"
|
"github.com/containers/image/transports"
|
||||||
"github.com/containers/image/transports/alltransports"
|
"github.com/containers/image/transports/alltransports"
|
||||||
@ -22,6 +21,7 @@ import (
|
|||||||
"github.com/openshift/imagebuilder"
|
"github.com/openshift/imagebuilder"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/projectatomic/buildah"
|
"github.com/projectatomic/buildah"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
@ -9,10 +9,10 @@ import (
|
|||||||
"path"
|
"path"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/Sirupsen/logrus"
|
|
||||||
"github.com/containers/storage/pkg/chrootarchive"
|
"github.com/containers/storage/pkg/chrootarchive"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/projectatomic/buildah"
|
"github.com/projectatomic/buildah"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
)
|
)
|
||||||
|
|
||||||
func cloneToDirectory(url, dir string) error {
|
func cloneToDirectory(url, dir string) error {
|
||||||
|
2
new.go
2
new.go
@ -4,7 +4,6 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/Sirupsen/logrus"
|
|
||||||
is "github.com/containers/image/storage"
|
is "github.com/containers/image/storage"
|
||||||
"github.com/containers/image/transports"
|
"github.com/containers/image/transports"
|
||||||
"github.com/containers/image/transports/alltransports"
|
"github.com/containers/image/transports/alltransports"
|
||||||
@ -12,6 +11,7 @@ import (
|
|||||||
"github.com/containers/storage"
|
"github.com/containers/storage"
|
||||||
"github.com/openshift/imagebuilder"
|
"github.com/openshift/imagebuilder"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
2
pull.go
2
pull.go
@ -3,7 +3,6 @@ package buildah
|
|||||||
import (
|
import (
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/Sirupsen/logrus"
|
|
||||||
cp "github.com/containers/image/copy"
|
cp "github.com/containers/image/copy"
|
||||||
"github.com/containers/image/docker/reference"
|
"github.com/containers/image/docker/reference"
|
||||||
"github.com/containers/image/signature"
|
"github.com/containers/image/signature"
|
||||||
@ -13,6 +12,7 @@ import (
|
|||||||
"github.com/containers/image/types"
|
"github.com/containers/image/types"
|
||||||
"github.com/containers/storage"
|
"github.com/containers/storage"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
)
|
)
|
||||||
|
|
||||||
func localImageNameForReference(store storage.Store, srcRef types.ImageReference) (string, error) {
|
func localImageNameForReference(store storage.Store, srcRef types.ImageReference) (string, error) {
|
||||||
|
8
run.go
8
run.go
@ -8,13 +8,13 @@ import (
|
|||||||
"path/filepath"
|
"path/filepath"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/Sirupsen/logrus"
|
|
||||||
"github.com/containers/storage/pkg/archive"
|
|
||||||
"github.com/containers/storage/pkg/ioutils"
|
"github.com/containers/storage/pkg/ioutils"
|
||||||
digest "github.com/opencontainers/go-digest"
|
digest "github.com/opencontainers/go-digest"
|
||||||
"github.com/opencontainers/runtime-spec/specs-go"
|
"github.com/opencontainers/runtime-spec/specs-go"
|
||||||
"github.com/opencontainers/runtime-tools/generate"
|
"github.com/opencontainers/runtime-tools/generate"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
|
"golang.org/x/crypto/ssh/terminal"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@ -120,7 +120,7 @@ func (b *Builder) setupMounts(mountPoint string, spec *specs.Spec, optionMounts
|
|||||||
return errors.Wrapf(err, "error creating directory %q for volume %q in container %q", volumePath, volume, b.ContainerID)
|
return errors.Wrapf(err, "error creating directory %q for volume %q in container %q", volumePath, volume, b.ContainerID)
|
||||||
}
|
}
|
||||||
srcPath := filepath.Join(mountPoint, volume)
|
srcPath := filepath.Join(mountPoint, volume)
|
||||||
if err = archive.CopyWithTar(srcPath, volumePath); err != nil && !os.IsNotExist(err) {
|
if err = copyFileWithTar(srcPath, volumePath); err != nil && !os.IsNotExist(err) {
|
||||||
return errors.Wrapf(err, "error populating directory %q for volume %q in container %q using contents of %q", volumePath, volume, b.ContainerID, srcPath)
|
return errors.Wrapf(err, "error populating directory %q for volume %q in container %q using contents of %q", volumePath, volume, b.ContainerID, srcPath)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -194,7 +194,7 @@ func (b *Builder) Run(command []string, options RunOptions) error {
|
|||||||
g.SetRootPath(mountPoint)
|
g.SetRootPath(mountPoint)
|
||||||
switch options.Terminal {
|
switch options.Terminal {
|
||||||
case DefaultTerminal:
|
case DefaultTerminal:
|
||||||
g.SetProcessTerminal(logrus.IsTerminal(os.Stdout))
|
g.SetProcessTerminal(terminal.IsTerminal(int(os.Stdout.Fd())))
|
||||||
case WithTerminal:
|
case WithTerminal:
|
||||||
g.SetProcessTerminal(true)
|
g.SetProcessTerminal(true)
|
||||||
case WithoutTerminal:
|
case WithoutTerminal:
|
||||||
|
@ -7,13 +7,13 @@ import (
|
|||||||
"os"
|
"os"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/Sirupsen/logrus"
|
|
||||||
is "github.com/containers/image/storage"
|
is "github.com/containers/image/storage"
|
||||||
"github.com/containers/image/types"
|
"github.com/containers/image/types"
|
||||||
"github.com/containers/storage"
|
"github.com/containers/storage"
|
||||||
"github.com/opencontainers/image-spec/specs-go/v1"
|
"github.com/opencontainers/image-spec/specs-go/v1"
|
||||||
"github.com/projectatomic/buildah"
|
"github.com/projectatomic/buildah"
|
||||||
"github.com/projectatomic/buildah/docker"
|
"github.com/projectatomic/buildah/docker"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
)
|
)
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
|
8
util.go
8
util.go
@ -1,9 +1,17 @@
|
|||||||
package buildah
|
package buildah
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"github.com/containers/storage/pkg/chrootarchive"
|
||||||
"github.com/containers/storage/pkg/reexec"
|
"github.com/containers/storage/pkg/reexec"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// CopyWithTar defines the copy method to use.
|
||||||
|
copyWithTar = chrootarchive.NewArchiver(nil).CopyWithTar
|
||||||
|
copyFileWithTar = chrootarchive.NewArchiver(nil).CopyFileWithTar
|
||||||
|
untarPath = chrootarchive.NewArchiver(nil).UntarPath
|
||||||
|
)
|
||||||
|
|
||||||
// InitReexec is a wrapper for reexec.Init(). It should be called at
|
// InitReexec is a wrapper for reexec.Init(). It should be called at
|
||||||
// the start of main(), and if it returns true, main() should return
|
// the start of main(), and if it returns true, main() should return
|
||||||
// immediately.
|
// immediately.
|
||||||
|
28
vendor.conf
28
vendor.conf
@ -1,14 +1,15 @@
|
|||||||
github.com/BurntSushi/toml master
|
github.com/BurntSushi/toml master
|
||||||
github.com/Nvveen/Gotty master
|
github.com/Nvveen/Gotty master
|
||||||
github.com/blang/semver master
|
github.com/blang/semver master
|
||||||
github.com/containers/image 106607808da3cff168be56821e994611c919d283
|
github.com/containers/image 063852766c3e82ec8359ce5f6612e056f3efaa76
|
||||||
github.com/containers/storage 5d8c2f87387fa5be9fa526ae39fbd79b8bdf27be
|
github.com/containers/storage 43c477703fe73129e2186cf730601c52d309c3ef
|
||||||
github.com/docker/distribution master
|
github.com/docker/distribution 5f6282db7d65e6d72ad7c2cc66310724a57be716
|
||||||
github.com/docker/docker 0f9ec7e47072b0c2e954b5b821bde5c1fe81bfa7
|
github.com/docker/docker 30eb4d8cdc422b023d5f11f29a82ecb73554183b
|
||||||
github.com/docker/engine-api master
|
github.com/docker/engine-api master
|
||||||
github.com/docker/go-connections e15c02316c12de00874640cd76311849de2aeed5
|
github.com/docker/go-connections 3ede32e2033de7505e6500d6c868c2b9ed9f169d
|
||||||
github.com/docker/go-units master
|
github.com/docker/go-units 0dadbb0345b35ec7ef35e228dabb8de89a65bf52
|
||||||
github.com/docker/libtrust master
|
github.com/docker/docker-credential-helpers d68f9aeca33f5fd3f08eeae5e9d175edf4e731d1
|
||||||
|
github.com/docker/libtrust aabc10ec26b754e797f9028f4589c5b7bd90dc20
|
||||||
github.com/fsouza/go-dockerclient master
|
github.com/fsouza/go-dockerclient master
|
||||||
github.com/ghodss/yaml master
|
github.com/ghodss/yaml master
|
||||||
github.com/golang/glog master
|
github.com/golang/glog master
|
||||||
@ -19,19 +20,19 @@ github.com/imdario/mergo master
|
|||||||
github.com/mattn/go-runewidth master
|
github.com/mattn/go-runewidth master
|
||||||
github.com/mattn/go-shellwords master
|
github.com/mattn/go-shellwords master
|
||||||
github.com/mistifyio/go-zfs master
|
github.com/mistifyio/go-zfs master
|
||||||
github.com/moby/moby 0f9ec7e47072b0c2e954b5b821bde5c1fe81bfa7
|
github.com/moby/moby f8806b18b4b92c5e1980f6e11c917fad201cd73c
|
||||||
github.com/mtrmac/gpgme master
|
github.com/mtrmac/gpgme master
|
||||||
github.com/opencontainers/go-digest aa2ec055abd10d26d539eb630a92241b781ce4bc
|
github.com/opencontainers/go-digest aa2ec055abd10d26d539eb630a92241b781ce4bc
|
||||||
github.com/opencontainers/image-spec v1.0.0
|
github.com/opencontainers/image-spec v1.0.0
|
||||||
github.com/opencontainers/runc master
|
github.com/opencontainers/runc master
|
||||||
github.com/opencontainers/runtime-spec v1.0.0
|
github.com/opencontainers/runtime-spec v1.0.0
|
||||||
github.com/opencontainers/runtime-tools 2d270b8764c02228eeb13e36f076f5ce6f2e3591
|
github.com/opencontainers/runtime-tools master
|
||||||
github.com/opencontainers/selinux ba1aefe8057f1d0cfb8e88d0ec1dc85925ef987d
|
github.com/opencontainers/selinux ba1aefe8057f1d0cfb8e88d0ec1dc85925ef987d
|
||||||
github.com/openshift/imagebuilder master
|
github.com/openshift/imagebuilder master
|
||||||
github.com/ostreedev/ostree-go aeb02c6b6aa2889db3ef62f7855650755befd460
|
github.com/ostreedev/ostree-go aeb02c6b6aa2889db3ef62f7855650755befd460
|
||||||
github.com/pborman/uuid master
|
github.com/pborman/uuid master
|
||||||
github.com/pkg/errors master
|
github.com/pkg/errors master
|
||||||
github.com/Sirupsen/logrus master
|
github.com/sirupsen/logrus master
|
||||||
github.com/syndtr/gocapability master
|
github.com/syndtr/gocapability master
|
||||||
github.com/tchap/go-patricia master
|
github.com/tchap/go-patricia master
|
||||||
github.com/urfave/cli master
|
github.com/urfave/cli master
|
||||||
@ -45,3 +46,10 @@ gopkg.in/yaml.v2 cd8b52f8269e0feb286dfeef29f8fe4d5b397e0b
|
|||||||
k8s.io/apimachinery master
|
k8s.io/apimachinery master
|
||||||
k8s.io/client-go master
|
k8s.io/client-go master
|
||||||
k8s.io/kubernetes master
|
k8s.io/kubernetes master
|
||||||
|
github.com/hashicorp/go-multierror master
|
||||||
|
github.com/hashicorp/errwrap master
|
||||||
|
github.com/xeipuuv/gojsonschema master
|
||||||
|
github.com/xeipuuv/gojsonreference master
|
||||||
|
github.com/containerd/continuity master
|
||||||
|
github.com/gogo/protobuf master
|
||||||
|
github.com/xeipuuv/gojsonpointer master
|
||||||
|
10
vendor/github.com/Sirupsen/logrus/terminal_appengine.go
generated
vendored
10
vendor/github.com/Sirupsen/logrus/terminal_appengine.go
generated
vendored
@ -1,10 +0,0 @@
|
|||||||
// +build appengine
|
|
||||||
|
|
||||||
package logrus
|
|
||||||
|
|
||||||
import "io"
|
|
||||||
|
|
||||||
// IsTerminal returns true if stderr's file descriptor is a terminal.
|
|
||||||
func IsTerminal(f io.Writer) bool {
|
|
||||||
return true
|
|
||||||
}
|
|
10
vendor/github.com/Sirupsen/logrus/terminal_bsd.go
generated
vendored
10
vendor/github.com/Sirupsen/logrus/terminal_bsd.go
generated
vendored
@ -1,10 +0,0 @@
|
|||||||
// +build darwin freebsd openbsd netbsd dragonfly
|
|
||||||
// +build !appengine
|
|
||||||
|
|
||||||
package logrus
|
|
||||||
|
|
||||||
import "syscall"
|
|
||||||
|
|
||||||
const ioctlReadTermios = syscall.TIOCGETA
|
|
||||||
|
|
||||||
type Termios syscall.Termios
|
|
28
vendor/github.com/Sirupsen/logrus/terminal_notwindows.go
generated
vendored
28
vendor/github.com/Sirupsen/logrus/terminal_notwindows.go
generated
vendored
@ -1,28 +0,0 @@
|
|||||||
// Based on ssh/terminal:
|
|
||||||
// Copyright 2011 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// +build linux darwin freebsd openbsd netbsd dragonfly
|
|
||||||
// +build !appengine
|
|
||||||
|
|
||||||
package logrus
|
|
||||||
|
|
||||||
import (
|
|
||||||
"io"
|
|
||||||
"os"
|
|
||||||
"syscall"
|
|
||||||
"unsafe"
|
|
||||||
)
|
|
||||||
|
|
||||||
// IsTerminal returns true if stderr's file descriptor is a terminal.
|
|
||||||
func IsTerminal(f io.Writer) bool {
|
|
||||||
var termios Termios
|
|
||||||
switch v := f.(type) {
|
|
||||||
case *os.File:
|
|
||||||
_, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(v.Fd()), ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0)
|
|
||||||
return err == 0
|
|
||||||
default:
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
21
vendor/github.com/Sirupsen/logrus/terminal_solaris.go
generated
vendored
21
vendor/github.com/Sirupsen/logrus/terminal_solaris.go
generated
vendored
@ -1,21 +0,0 @@
|
|||||||
// +build solaris,!appengine
|
|
||||||
|
|
||||||
package logrus
|
|
||||||
|
|
||||||
import (
|
|
||||||
"io"
|
|
||||||
"os"
|
|
||||||
|
|
||||||
"golang.org/x/sys/unix"
|
|
||||||
)
|
|
||||||
|
|
||||||
// IsTerminal returns true if the given file descriptor is a terminal.
|
|
||||||
func IsTerminal(f io.Writer) bool {
|
|
||||||
switch v := f.(type) {
|
|
||||||
case *os.File:
|
|
||||||
_, err := unix.IoctlGetTermios(int(v.Fd()), unix.TCGETA)
|
|
||||||
return err == nil
|
|
||||||
default:
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
33
vendor/github.com/Sirupsen/logrus/terminal_windows.go
generated
vendored
33
vendor/github.com/Sirupsen/logrus/terminal_windows.go
generated
vendored
@ -1,33 +0,0 @@
|
|||||||
// Based on ssh/terminal:
|
|
||||||
// Copyright 2011 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// +build windows,!appengine
|
|
||||||
|
|
||||||
package logrus
|
|
||||||
|
|
||||||
import (
|
|
||||||
"io"
|
|
||||||
"os"
|
|
||||||
"syscall"
|
|
||||||
"unsafe"
|
|
||||||
)
|
|
||||||
|
|
||||||
var kernel32 = syscall.NewLazyDLL("kernel32.dll")
|
|
||||||
|
|
||||||
var (
|
|
||||||
procGetConsoleMode = kernel32.NewProc("GetConsoleMode")
|
|
||||||
)
|
|
||||||
|
|
||||||
// IsTerminal returns true if stderr's file descriptor is a terminal.
|
|
||||||
func IsTerminal(f io.Writer) bool {
|
|
||||||
switch v := f.(type) {
|
|
||||||
case *os.File:
|
|
||||||
var st uint32
|
|
||||||
r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(v.Fd()), uintptr(unsafe.Pointer(&st)), 0)
|
|
||||||
return r != 0 && e == 0
|
|
||||||
default:
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
5
vendor/github.com/containers/image/README.md
generated
vendored
5
vendor/github.com/containers/image/README.md
generated
vendored
@ -62,9 +62,8 @@ or use the build tags described below to avoid the dependencies (e.g. using `go
|
|||||||
|
|
||||||
- `containers_image_openpgp`: Use a Golang-only OpenPGP implementation for signature verification instead of the default cgo/gpgme-based implementation;
|
- `containers_image_openpgp`: Use a Golang-only OpenPGP implementation for signature verification instead of the default cgo/gpgme-based implementation;
|
||||||
the primary downside is that creating new signatures with the Golang-only implementation is not supported.
|
the primary downside is that creating new signatures with the Golang-only implementation is not supported.
|
||||||
- `containers_image_ostree_stub`: Instead of importing `ostree:` transport in `github.com/containers/image/transports/alltransports`, use a stub which reports that the transport is not supported. This allows building the library without requiring the `libostree` development libraries.
|
- `containers_image_ostree_stub`: Instead of importing `ostree:` transport in `github.com/containers/image/transports/alltransports`, use a stub which reports that the transport is not supported. This allows building the library without requiring the `libostree` development libraries. The `github.com/containers/image/ostree` package is completely disabled
|
||||||
|
and impossible to import when this build tag is in use.
|
||||||
(Note that explicitly importing `github.com/containers/image/ostree` will still depend on the `libostree` library, this build tag only affects generic users of …`/alltransports`.)
|
|
||||||
|
|
||||||
## Contributing
|
## Contributing
|
||||||
|
|
||||||
|
13
vendor/github.com/containers/image/copy/copy.go
generated
vendored
13
vendor/github.com/containers/image/copy/copy.go
generated
vendored
@ -3,6 +3,7 @@ package copy
|
|||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"compress/gzip"
|
"compress/gzip"
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
@ -13,7 +14,6 @@ import (
|
|||||||
|
|
||||||
pb "gopkg.in/cheggaaa/pb.v1"
|
pb "gopkg.in/cheggaaa/pb.v1"
|
||||||
|
|
||||||
"github.com/Sirupsen/logrus"
|
|
||||||
"github.com/containers/image/image"
|
"github.com/containers/image/image"
|
||||||
"github.com/containers/image/pkg/compression"
|
"github.com/containers/image/pkg/compression"
|
||||||
"github.com/containers/image/signature"
|
"github.com/containers/image/signature"
|
||||||
@ -21,6 +21,7 @@ import (
|
|||||||
"github.com/containers/image/types"
|
"github.com/containers/image/types"
|
||||||
"github.com/opencontainers/go-digest"
|
"github.com/opencontainers/go-digest"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
)
|
)
|
||||||
|
|
||||||
type digestingReader struct {
|
type digestingReader struct {
|
||||||
@ -128,9 +129,7 @@ func Image(policyContext *signature.PolicyContext, destRef, srcRef types.ImageRe
|
|||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
destSupportedManifestMIMETypes := dest.SupportedManifestMIMETypes()
|
rawSource, err := srcRef.NewImageSource(options.SourceCtx)
|
||||||
|
|
||||||
rawSource, err := srcRef.NewImageSource(options.SourceCtx, destSupportedManifestMIMETypes)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrapf(err, "Error initializing source %s", transports.ImageName(srcRef))
|
return errors.Wrapf(err, "Error initializing source %s", transports.ImageName(srcRef))
|
||||||
}
|
}
|
||||||
@ -171,7 +170,7 @@ func Image(policyContext *signature.PolicyContext, destRef, srcRef types.ImageRe
|
|||||||
sigs = [][]byte{}
|
sigs = [][]byte{}
|
||||||
} else {
|
} else {
|
||||||
writeReport("Getting image source signatures\n")
|
writeReport("Getting image source signatures\n")
|
||||||
s, err := src.Signatures()
|
s, err := src.Signatures(context.TODO())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "Error reading signatures")
|
return errors.Wrap(err, "Error reading signatures")
|
||||||
}
|
}
|
||||||
@ -194,7 +193,7 @@ func Image(policyContext *signature.PolicyContext, destRef, srcRef types.ImageRe
|
|||||||
|
|
||||||
// We compute preferredManifestMIMEType only to show it in error messages.
|
// We compute preferredManifestMIMEType only to show it in error messages.
|
||||||
// Without having to add this context in an error message, we would be happy enough to know only that no conversion is needed.
|
// Without having to add this context in an error message, we would be happy enough to know only that no conversion is needed.
|
||||||
preferredManifestMIMEType, otherManifestMIMETypeCandidates, err := determineManifestConversion(&manifestUpdates, src, destSupportedManifestMIMETypes, canModifyManifest)
|
preferredManifestMIMEType, otherManifestMIMETypeCandidates, err := determineManifestConversion(&manifestUpdates, src, dest.SupportedManifestMIMETypes(), canModifyManifest)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -582,7 +581,7 @@ func (ic *imageCopier) copyBlobFromStream(srcStream io.Reader, srcInfo types.Blo
|
|||||||
bar.ShowPercent = false
|
bar.ShowPercent = false
|
||||||
bar.Start()
|
bar.Start()
|
||||||
destStream = bar.NewProxyReader(destStream)
|
destStream = bar.NewProxyReader(destStream)
|
||||||
defer fmt.Fprint(ic.reportWriter, "\n")
|
defer bar.Finish()
|
||||||
|
|
||||||
// === Send a copy of the original, uncompressed, stream, to a separate path if necessary.
|
// === Send a copy of the original, uncompressed, stream, to a separate path if necessary.
|
||||||
var originalLayerReader io.Reader // DO NOT USE this other than to drain the input if no other consumer in the pipeline has done so.
|
var originalLayerReader io.Reader // DO NOT USE this other than to drain the input if no other consumer in the pipeline has done so.
|
||||||
|
2
vendor/github.com/containers/image/copy/manifest.go
generated
vendored
2
vendor/github.com/containers/image/copy/manifest.go
generated
vendored
@ -3,10 +3,10 @@ package copy
|
|||||||
import (
|
import (
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/Sirupsen/logrus"
|
|
||||||
"github.com/containers/image/manifest"
|
"github.com/containers/image/manifest"
|
||||||
"github.com/containers/image/types"
|
"github.com/containers/image/types"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
)
|
)
|
||||||
|
|
||||||
// preferredManifestMIMETypes lists manifest MIME types in order of our preference, if we can't use the original manifest and need to convert.
|
// preferredManifestMIMETypes lists manifest MIME types in order of our preference, if we can't use the original manifest and need to convert.
|
||||||
|
3
vendor/github.com/containers/image/directory/directory_src.go
generated
vendored
3
vendor/github.com/containers/image/directory/directory_src.go
generated
vendored
@ -1,6 +1,7 @@
|
|||||||
package directory
|
package directory
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"os"
|
"os"
|
||||||
@ -59,7 +60,7 @@ func (s *dirImageSource) GetBlob(info types.BlobInfo) (io.ReadCloser, int64, err
|
|||||||
return r, fi.Size(), nil
|
return r, fi.Size(), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *dirImageSource) GetSignatures() ([][]byte, error) {
|
func (s *dirImageSource) GetSignatures(ctx context.Context) ([][]byte, error) {
|
||||||
signatures := [][]byte{}
|
signatures := [][]byte{}
|
||||||
for i := 0; ; i++ {
|
for i := 0; ; i++ {
|
||||||
signature, err := ioutil.ReadFile(s.ref.signaturePath(i))
|
signature, err := ioutil.ReadFile(s.ref.signaturePath(i))
|
||||||
|
6
vendor/github.com/containers/image/directory/directory_transport.go
generated
vendored
6
vendor/github.com/containers/image/directory/directory_transport.go
generated
vendored
@ -143,11 +143,9 @@ func (ref dirReference) NewImage(ctx *types.SystemContext) (types.Image, error)
|
|||||||
return image.FromSource(src)
|
return image.FromSource(src)
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewImageSource returns a types.ImageSource for this reference,
|
// NewImageSource returns a types.ImageSource for this reference.
|
||||||
// asking the backend to use a manifest from requestedManifestMIMETypes if possible.
|
|
||||||
// nil requestedManifestMIMETypes means manifest.DefaultRequestedManifestMIMETypes.
|
|
||||||
// The caller must call .Close() on the returned ImageSource.
|
// The caller must call .Close() on the returned ImageSource.
|
||||||
func (ref dirReference) NewImageSource(ctx *types.SystemContext, requestedManifestMIMETypes []string) (types.ImageSource, error) {
|
func (ref dirReference) NewImageSource(ctx *types.SystemContext) (types.ImageSource, error) {
|
||||||
return newImageSource(ref), nil
|
return newImageSource(ref), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
2
vendor/github.com/containers/image/docker/archive/src.go
generated
vendored
2
vendor/github.com/containers/image/docker/archive/src.go
generated
vendored
@ -1,9 +1,9 @@
|
|||||||
package archive
|
package archive
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/Sirupsen/logrus"
|
|
||||||
"github.com/containers/image/docker/tarfile"
|
"github.com/containers/image/docker/tarfile"
|
||||||
"github.com/containers/image/types"
|
"github.com/containers/image/types"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
)
|
)
|
||||||
|
|
||||||
type archiveImageSource struct {
|
type archiveImageSource struct {
|
||||||
|
6
vendor/github.com/containers/image/docker/archive/transport.go
generated
vendored
6
vendor/github.com/containers/image/docker/archive/transport.go
generated
vendored
@ -134,11 +134,9 @@ func (ref archiveReference) NewImage(ctx *types.SystemContext) (types.Image, err
|
|||||||
return ctrImage.FromSource(src)
|
return ctrImage.FromSource(src)
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewImageSource returns a types.ImageSource for this reference,
|
// NewImageSource returns a types.ImageSource for this reference.
|
||||||
// asking the backend to use a manifest from requestedManifestMIMETypes if possible.
|
|
||||||
// nil requestedManifestMIMETypes means manifest.DefaultRequestedManifestMIMETypes.
|
|
||||||
// The caller must call .Close() on the returned ImageSource.
|
// The caller must call .Close() on the returned ImageSource.
|
||||||
func (ref archiveReference) NewImageSource(ctx *types.SystemContext, requestedManifestMIMETypes []string) (types.ImageSource, error) {
|
func (ref archiveReference) NewImageSource(ctx *types.SystemContext) (types.ImageSource, error) {
|
||||||
return newImageSource(ctx, ref), nil
|
return newImageSource(ctx, ref), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
2
vendor/github.com/containers/image/docker/daemon/daemon_dest.go
generated
vendored
2
vendor/github.com/containers/image/docker/daemon/daemon_dest.go
generated
vendored
@ -3,12 +3,12 @@ package daemon
|
|||||||
import (
|
import (
|
||||||
"io"
|
"io"
|
||||||
|
|
||||||
"github.com/Sirupsen/logrus"
|
|
||||||
"github.com/containers/image/docker/reference"
|
"github.com/containers/image/docker/reference"
|
||||||
"github.com/containers/image/docker/tarfile"
|
"github.com/containers/image/docker/tarfile"
|
||||||
"github.com/containers/image/types"
|
"github.com/containers/image/types"
|
||||||
"github.com/docker/docker/client"
|
"github.com/docker/docker/client"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
"golang.org/x/net/context"
|
"golang.org/x/net/context"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
6
vendor/github.com/containers/image/docker/daemon/daemon_transport.go
generated
vendored
6
vendor/github.com/containers/image/docker/daemon/daemon_transport.go
generated
vendored
@ -161,11 +161,9 @@ func (ref daemonReference) NewImage(ctx *types.SystemContext) (types.Image, erro
|
|||||||
return image.FromSource(src)
|
return image.FromSource(src)
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewImageSource returns a types.ImageSource for this reference,
|
// NewImageSource returns a types.ImageSource for this reference.
|
||||||
// asking the backend to use a manifest from requestedManifestMIMETypes if possible.
|
|
||||||
// nil requestedManifestMIMETypes means manifest.DefaultRequestedManifestMIMETypes.
|
|
||||||
// The caller must call .Close() on the returned ImageSource.
|
// The caller must call .Close() on the returned ImageSource.
|
||||||
func (ref daemonReference) NewImageSource(ctx *types.SystemContext, requestedManifestMIMETypes []string) (types.ImageSource, error) {
|
func (ref daemonReference) NewImageSource(ctx *types.SystemContext) (types.ImageSource, error) {
|
||||||
return newImageSource(ctx, ref)
|
return newImageSource(ctx, ref)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
137
vendor/github.com/containers/image/docker/docker_client.go
generated
vendored
137
vendor/github.com/containers/image/docker/docker_client.go
generated
vendored
@ -1,28 +1,29 @@
|
|||||||
package docker
|
package docker
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"crypto/tls"
|
"crypto/tls"
|
||||||
"encoding/base64"
|
"encoding/base64"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"net"
|
|
||||||
"net/http"
|
"net/http"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/Sirupsen/logrus"
|
|
||||||
"github.com/containers/image/docker/reference"
|
"github.com/containers/image/docker/reference"
|
||||||
|
"github.com/containers/image/pkg/tlsclientconfig"
|
||||||
"github.com/containers/image/types"
|
"github.com/containers/image/types"
|
||||||
"github.com/containers/storage/pkg/homedir"
|
"github.com/containers/storage/pkg/homedir"
|
||||||
"github.com/docker/distribution/registry/client"
|
"github.com/docker/distribution/registry/client"
|
||||||
"github.com/docker/go-connections/sockets"
|
helperclient "github.com/docker/docker-credential-helpers/client"
|
||||||
"github.com/docker/go-connections/tlsconfig"
|
"github.com/docker/go-connections/tlsconfig"
|
||||||
"github.com/opencontainers/go-digest"
|
"github.com/opencontainers/go-digest"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@ -111,27 +112,7 @@ func serverDefault() *tls.Config {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func newTransport() *http.Transport {
|
// dockerCertDir returns a path to a directory to be consumed by tlsclientconfig.SetupCertificates() depending on ctx and hostPort.
|
||||||
direct := &net.Dialer{
|
|
||||||
Timeout: 30 * time.Second,
|
|
||||||
KeepAlive: 30 * time.Second,
|
|
||||||
DualStack: true,
|
|
||||||
}
|
|
||||||
tr := &http.Transport{
|
|
||||||
Proxy: http.ProxyFromEnvironment,
|
|
||||||
Dial: direct.Dial,
|
|
||||||
TLSHandshakeTimeout: 10 * time.Second,
|
|
||||||
// TODO(dmcgowan): Call close idle connections when complete and use keep alive
|
|
||||||
DisableKeepAlives: true,
|
|
||||||
}
|
|
||||||
proxyDialer, err := sockets.DialerFromEnvironment(direct)
|
|
||||||
if err == nil {
|
|
||||||
tr.Dial = proxyDialer.Dial
|
|
||||||
}
|
|
||||||
return tr
|
|
||||||
}
|
|
||||||
|
|
||||||
// dockerCertDir returns a path to a directory to be consumed by setupCertificates() depending on ctx and hostPort.
|
|
||||||
func dockerCertDir(ctx *types.SystemContext, hostPort string) string {
|
func dockerCertDir(ctx *types.SystemContext, hostPort string) string {
|
||||||
if ctx != nil && ctx.DockerCertPath != "" {
|
if ctx != nil && ctx.DockerCertPath != "" {
|
||||||
return ctx.DockerCertPath
|
return ctx.DockerCertPath
|
||||||
@ -147,65 +128,6 @@ func dockerCertDir(ctx *types.SystemContext, hostPort string) string {
|
|||||||
return filepath.Join(hostCertDir, hostPort)
|
return filepath.Join(hostCertDir, hostPort)
|
||||||
}
|
}
|
||||||
|
|
||||||
func setupCertificates(dir string, tlsc *tls.Config) error {
|
|
||||||
logrus.Debugf("Looking for TLS certificates and private keys in %s", dir)
|
|
||||||
fs, err := ioutil.ReadDir(dir)
|
|
||||||
if err != nil {
|
|
||||||
if os.IsNotExist(err) {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, f := range fs {
|
|
||||||
fullPath := filepath.Join(dir, f.Name())
|
|
||||||
if strings.HasSuffix(f.Name(), ".crt") {
|
|
||||||
systemPool, err := tlsconfig.SystemCertPool()
|
|
||||||
if err != nil {
|
|
||||||
return errors.Wrap(err, "unable to get system cert pool")
|
|
||||||
}
|
|
||||||
tlsc.RootCAs = systemPool
|
|
||||||
logrus.Debugf(" crt: %s", fullPath)
|
|
||||||
data, err := ioutil.ReadFile(fullPath)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
tlsc.RootCAs.AppendCertsFromPEM(data)
|
|
||||||
}
|
|
||||||
if strings.HasSuffix(f.Name(), ".cert") {
|
|
||||||
certName := f.Name()
|
|
||||||
keyName := certName[:len(certName)-5] + ".key"
|
|
||||||
logrus.Debugf(" cert: %s", fullPath)
|
|
||||||
if !hasFile(fs, keyName) {
|
|
||||||
return errors.Errorf("missing key %s for client certificate %s. Note that CA certificates should use the extension .crt", keyName, certName)
|
|
||||||
}
|
|
||||||
cert, err := tls.LoadX509KeyPair(filepath.Join(dir, certName), filepath.Join(dir, keyName))
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
tlsc.Certificates = append(tlsc.Certificates, cert)
|
|
||||||
}
|
|
||||||
if strings.HasSuffix(f.Name(), ".key") {
|
|
||||||
keyName := f.Name()
|
|
||||||
certName := keyName[:len(keyName)-4] + ".cert"
|
|
||||||
logrus.Debugf(" key: %s", fullPath)
|
|
||||||
if !hasFile(fs, certName) {
|
|
||||||
return errors.Errorf("missing client certificate %s for key %s", certName, keyName)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func hasFile(files []os.FileInfo, name string) bool {
|
|
||||||
for _, f := range files {
|
|
||||||
if f.Name() == name {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// newDockerClient returns a new dockerClient instance for refHostname (a host a specified in the Docker image reference, not canonicalized to dockerRegistry)
|
// newDockerClient returns a new dockerClient instance for refHostname (a host a specified in the Docker image reference, not canonicalized to dockerRegistry)
|
||||||
// “write” specifies whether the client will be used for "write" access (in particular passed to lookaside.go:toplevelFromSection)
|
// “write” specifies whether the client will be used for "write" access (in particular passed to lookaside.go:toplevelFromSection)
|
||||||
func newDockerClient(ctx *types.SystemContext, ref dockerReference, write bool, actions string) (*dockerClient, error) {
|
func newDockerClient(ctx *types.SystemContext, ref dockerReference, write bool, actions string) (*dockerClient, error) {
|
||||||
@ -217,7 +139,7 @@ func newDockerClient(ctx *types.SystemContext, ref dockerReference, write bool,
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
tr := newTransport()
|
tr := tlsclientconfig.NewTransport()
|
||||||
tr.TLSClientConfig = serverDefault()
|
tr.TLSClientConfig = serverDefault()
|
||||||
// It is undefined whether the host[:port] string for dockerHostname should be dockerHostname or dockerRegistry,
|
// It is undefined whether the host[:port] string for dockerHostname should be dockerHostname or dockerRegistry,
|
||||||
// because docker/docker does not read the certs.d subdirectory at all in that case. We use the user-visible
|
// because docker/docker does not read the certs.d subdirectory at all in that case. We use the user-visible
|
||||||
@ -225,7 +147,7 @@ func newDockerClient(ctx *types.SystemContext, ref dockerReference, write bool,
|
|||||||
// generally the UI hides the existence of the different dockerRegistry. But note that this behavior is
|
// generally the UI hides the existence of the different dockerRegistry. But note that this behavior is
|
||||||
// undocumented and may change if docker/docker changes.
|
// undocumented and may change if docker/docker changes.
|
||||||
certDir := dockerCertDir(ctx, reference.Domain(ref.ref))
|
certDir := dockerCertDir(ctx, reference.Domain(ref.ref))
|
||||||
if err := setupCertificates(certDir, tr.TLSClientConfig); err != nil {
|
if err := tlsclientconfig.SetupCertificates(certDir, tr.TLSClientConfig); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
if ctx != nil && ctx.DockerInsecureSkipTLSVerify {
|
if ctx != nil && ctx.DockerInsecureSkipTLSVerify {
|
||||||
@ -254,24 +176,25 @@ func newDockerClient(ctx *types.SystemContext, ref dockerReference, write bool,
|
|||||||
|
|
||||||
// makeRequest creates and executes a http.Request with the specified parameters, adding authentication and TLS options for the Docker client.
|
// makeRequest creates and executes a http.Request with the specified parameters, adding authentication and TLS options for the Docker client.
|
||||||
// The host name and schema is taken from the client or autodetected, and the path is relative to it, i.e. the path usually starts with /v2/.
|
// The host name and schema is taken from the client or autodetected, and the path is relative to it, i.e. the path usually starts with /v2/.
|
||||||
func (c *dockerClient) makeRequest(method, path string, headers map[string][]string, stream io.Reader) (*http.Response, error) {
|
func (c *dockerClient) makeRequest(ctx context.Context, method, path string, headers map[string][]string, stream io.Reader) (*http.Response, error) {
|
||||||
if err := c.detectProperties(); err != nil {
|
if err := c.detectProperties(ctx); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
url := fmt.Sprintf("%s://%s%s", c.scheme, c.registry, path)
|
url := fmt.Sprintf("%s://%s%s", c.scheme, c.registry, path)
|
||||||
return c.makeRequestToResolvedURL(method, url, headers, stream, -1, true)
|
return c.makeRequestToResolvedURL(ctx, method, url, headers, stream, -1, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
// makeRequestToResolvedURL creates and executes a http.Request with the specified parameters, adding authentication and TLS options for the Docker client.
|
// makeRequestToResolvedURL creates and executes a http.Request with the specified parameters, adding authentication and TLS options for the Docker client.
|
||||||
// streamLen, if not -1, specifies the length of the data expected on stream.
|
// streamLen, if not -1, specifies the length of the data expected on stream.
|
||||||
// makeRequest should generally be preferred.
|
// makeRequest should generally be preferred.
|
||||||
// TODO(runcom): too many arguments here, use a struct
|
// TODO(runcom): too many arguments here, use a struct
|
||||||
func (c *dockerClient) makeRequestToResolvedURL(method, url string, headers map[string][]string, stream io.Reader, streamLen int64, sendAuth bool) (*http.Response, error) {
|
func (c *dockerClient) makeRequestToResolvedURL(ctx context.Context, method, url string, headers map[string][]string, stream io.Reader, streamLen int64, sendAuth bool) (*http.Response, error) {
|
||||||
req, err := http.NewRequest(method, url, stream)
|
req, err := http.NewRequest(method, url, stream)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
req = req.WithContext(ctx)
|
||||||
if streamLen != -1 { // Do not blindly overwrite if streamLen == -1, http.NewRequest above can figure out the length of bytes.Reader and similar objects without us having to compute it.
|
if streamLen != -1 { // Do not blindly overwrite if streamLen == -1, http.NewRequest above can figure out the length of bytes.Reader and similar objects without us having to compute it.
|
||||||
req.ContentLength = streamLen
|
req.ContentLength = streamLen
|
||||||
}
|
}
|
||||||
@ -323,7 +246,7 @@ func (c *dockerClient) setupRequestAuth(req *http.Request) error {
|
|||||||
}
|
}
|
||||||
service, _ := challenge.Parameters["service"] // Will be "" if not present
|
service, _ := challenge.Parameters["service"] // Will be "" if not present
|
||||||
scope := fmt.Sprintf("repository:%s:%s", c.scope.remoteName, c.scope.actions)
|
scope := fmt.Sprintf("repository:%s:%s", c.scope.remoteName, c.scope.actions)
|
||||||
token, err := c.getBearerToken(realm, service, scope)
|
token, err := c.getBearerToken(req.Context(), realm, service, scope)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -340,11 +263,12 @@ func (c *dockerClient) setupRequestAuth(req *http.Request) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *dockerClient) getBearerToken(realm, service, scope string) (*bearerToken, error) {
|
func (c *dockerClient) getBearerToken(ctx context.Context, realm, service, scope string) (*bearerToken, error) {
|
||||||
authReq, err := http.NewRequest("GET", realm, nil)
|
authReq, err := http.NewRequest("GET", realm, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
authReq = authReq.WithContext(ctx)
|
||||||
getParams := authReq.URL.Query()
|
getParams := authReq.URL.Query()
|
||||||
if service != "" {
|
if service != "" {
|
||||||
getParams.Add("service", service)
|
getParams.Add("service", service)
|
||||||
@ -356,7 +280,7 @@ func (c *dockerClient) getBearerToken(realm, service, scope string) (*bearerToke
|
|||||||
if c.username != "" && c.password != "" {
|
if c.username != "" && c.password != "" {
|
||||||
authReq.SetBasicAuth(c.username, c.password)
|
authReq.SetBasicAuth(c.username, c.password)
|
||||||
}
|
}
|
||||||
tr := newTransport()
|
tr := tlsclientconfig.NewTransport()
|
||||||
// TODO(runcom): insecure for now to contact the external token service
|
// TODO(runcom): insecure for now to contact the external token service
|
||||||
tr.TLSClientConfig = &tls.Config{InsecureSkipVerify: true}
|
tr.TLSClientConfig = &tls.Config{InsecureSkipVerify: true}
|
||||||
client := &http.Client{Transport: tr}
|
client := &http.Client{Transport: tr}
|
||||||
@ -428,7 +352,12 @@ func getAuth(ctx *types.SystemContext, registry string) (string, string, error)
|
|||||||
return "", "", errors.Wrap(err, dockerCfgPath)
|
return "", "", errors.Wrap(err, dockerCfgPath)
|
||||||
}
|
}
|
||||||
|
|
||||||
// I'm feeling lucky
|
// First try cred helpers. They should always be normalized.
|
||||||
|
if ch, exists := dockerAuth.CredHelpers[registry]; exists {
|
||||||
|
return getAuthFromCredHelper(ch, registry)
|
||||||
|
}
|
||||||
|
|
||||||
|
// I'm feeling lucky.
|
||||||
if c, exists := dockerAuth.AuthConfigs[registry]; exists {
|
if c, exists := dockerAuth.AuthConfigs[registry]; exists {
|
||||||
return decodeDockerAuth(c.Auth)
|
return decodeDockerAuth(c.Auth)
|
||||||
}
|
}
|
||||||
@ -447,14 +376,14 @@ func getAuth(ctx *types.SystemContext, registry string) (string, string, error)
|
|||||||
|
|
||||||
// detectProperties detects various properties of the registry.
|
// detectProperties detects various properties of the registry.
|
||||||
// See the dockerClient documentation for members which are affected by this.
|
// See the dockerClient documentation for members which are affected by this.
|
||||||
func (c *dockerClient) detectProperties() error {
|
func (c *dockerClient) detectProperties(ctx context.Context) error {
|
||||||
if c.scheme != "" {
|
if c.scheme != "" {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
ping := func(scheme string) error {
|
ping := func(scheme string) error {
|
||||||
url := fmt.Sprintf(resolvedPingV2URL, scheme, c.registry)
|
url := fmt.Sprintf(resolvedPingV2URL, scheme, c.registry)
|
||||||
resp, err := c.makeRequestToResolvedURL("GET", url, nil, nil, -1, true)
|
resp, err := c.makeRequestToResolvedURL(ctx, "GET", url, nil, nil, -1, true)
|
||||||
logrus.Debugf("Ping %s err %#v", url, err)
|
logrus.Debugf("Ping %s err %#v", url, err)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -481,7 +410,7 @@ func (c *dockerClient) detectProperties() error {
|
|||||||
// best effort to understand if we're talking to a V1 registry
|
// best effort to understand if we're talking to a V1 registry
|
||||||
pingV1 := func(scheme string) bool {
|
pingV1 := func(scheme string) bool {
|
||||||
url := fmt.Sprintf(resolvedPingV1URL, scheme, c.registry)
|
url := fmt.Sprintf(resolvedPingV1URL, scheme, c.registry)
|
||||||
resp, err := c.makeRequestToResolvedURL("GET", url, nil, nil, -1, true)
|
resp, err := c.makeRequestToResolvedURL(ctx, "GET", url, nil, nil, -1, true)
|
||||||
logrus.Debugf("Ping %s err %#v", url, err)
|
logrus.Debugf("Ping %s err %#v", url, err)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false
|
return false
|
||||||
@ -506,9 +435,9 @@ func (c *dockerClient) detectProperties() error {
|
|||||||
|
|
||||||
// getExtensionsSignatures returns signatures from the X-Registry-Supports-Signatures API extension,
|
// getExtensionsSignatures returns signatures from the X-Registry-Supports-Signatures API extension,
|
||||||
// using the original data structures.
|
// using the original data structures.
|
||||||
func (c *dockerClient) getExtensionsSignatures(ref dockerReference, manifestDigest digest.Digest) (*extensionSignatureList, error) {
|
func (c *dockerClient) getExtensionsSignatures(ctx context.Context, ref dockerReference, manifestDigest digest.Digest) (*extensionSignatureList, error) {
|
||||||
path := fmt.Sprintf(extensionsSignaturePath, reference.Path(ref.ref), manifestDigest)
|
path := fmt.Sprintf(extensionsSignaturePath, reference.Path(ref.ref), manifestDigest)
|
||||||
res, err := c.makeRequest("GET", path, nil, nil)
|
res, err := c.makeRequest(ctx, "GET", path, nil, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -538,6 +467,18 @@ type dockerAuthConfig struct {
|
|||||||
|
|
||||||
type dockerConfigFile struct {
|
type dockerConfigFile struct {
|
||||||
AuthConfigs map[string]dockerAuthConfig `json:"auths"`
|
AuthConfigs map[string]dockerAuthConfig `json:"auths"`
|
||||||
|
CredHelpers map[string]string `json:"credHelpers,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func getAuthFromCredHelper(credHelper, registry string) (string, string, error) {
|
||||||
|
helperName := fmt.Sprintf("docker-credential-%s", credHelper)
|
||||||
|
p := helperclient.NewShellProgramFunc(helperName)
|
||||||
|
creds, err := helperclient.Get(p, registry)
|
||||||
|
if err != nil {
|
||||||
|
return "", "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
return creds.Username, creds.Secret, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func decodeDockerAuth(s string) (string, string, error) {
|
func decodeDockerAuth(s string) (string, string, error) {
|
||||||
|
6
vendor/github.com/containers/image/docker/docker_image.go
generated
vendored
6
vendor/github.com/containers/image/docker/docker_image.go
generated
vendored
@ -1,6 +1,7 @@
|
|||||||
package docker
|
package docker
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net/http"
|
"net/http"
|
||||||
@ -22,7 +23,7 @@ type Image struct {
|
|||||||
// a client to the registry hosting the given image.
|
// a client to the registry hosting the given image.
|
||||||
// The caller must call .Close() on the returned Image.
|
// The caller must call .Close() on the returned Image.
|
||||||
func newImage(ctx *types.SystemContext, ref dockerReference) (types.Image, error) {
|
func newImage(ctx *types.SystemContext, ref dockerReference) (types.Image, error) {
|
||||||
s, err := newImageSource(ctx, ref, nil)
|
s, err := newImageSource(ctx, ref)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -41,7 +42,8 @@ func (i *Image) SourceRefFullName() string {
|
|||||||
// GetRepositoryTags list all tags available in the repository. Note that this has no connection with the tag(s) used for this specific image, if any.
|
// GetRepositoryTags list all tags available in the repository. Note that this has no connection with the tag(s) used for this specific image, if any.
|
||||||
func (i *Image) GetRepositoryTags() ([]string, error) {
|
func (i *Image) GetRepositoryTags() ([]string, error) {
|
||||||
path := fmt.Sprintf(tagsPath, reference.Path(i.src.ref.ref))
|
path := fmt.Sprintf(tagsPath, reference.Path(i.src.ref.ref))
|
||||||
res, err := i.src.c.makeRequest("GET", path, nil, nil)
|
// FIXME: Pass the context.Context
|
||||||
|
res, err := i.src.c.makeRequest(context.TODO(), "GET", path, nil, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
44
vendor/github.com/containers/image/docker/docker_image_dest.go
generated
vendored
44
vendor/github.com/containers/image/docker/docker_image_dest.go
generated
vendored
@ -2,6 +2,7 @@ package docker
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"context"
|
||||||
"crypto/rand"
|
"crypto/rand"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
@ -12,7 +13,6 @@ import (
|
|||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
|
||||||
"github.com/Sirupsen/logrus"
|
|
||||||
"github.com/containers/image/docker/reference"
|
"github.com/containers/image/docker/reference"
|
||||||
"github.com/containers/image/manifest"
|
"github.com/containers/image/manifest"
|
||||||
"github.com/containers/image/types"
|
"github.com/containers/image/types"
|
||||||
@ -20,24 +20,11 @@ import (
|
|||||||
"github.com/docker/distribution/registry/api/v2"
|
"github.com/docker/distribution/registry/api/v2"
|
||||||
"github.com/docker/distribution/registry/client"
|
"github.com/docker/distribution/registry/client"
|
||||||
"github.com/opencontainers/go-digest"
|
"github.com/opencontainers/go-digest"
|
||||||
|
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
)
|
)
|
||||||
|
|
||||||
var manifestMIMETypes = []string{
|
|
||||||
// TODO(runcom): we'll add OCI as part of another PR here
|
|
||||||
manifest.DockerV2Schema2MediaType,
|
|
||||||
manifest.DockerV2Schema1SignedMediaType,
|
|
||||||
manifest.DockerV2Schema1MediaType,
|
|
||||||
}
|
|
||||||
|
|
||||||
func supportedManifestMIMETypesMap() map[string]bool {
|
|
||||||
m := make(map[string]bool, len(manifestMIMETypes))
|
|
||||||
for _, mt := range manifestMIMETypes {
|
|
||||||
m[mt] = true
|
|
||||||
}
|
|
||||||
return m
|
|
||||||
}
|
|
||||||
|
|
||||||
type dockerImageDestination struct {
|
type dockerImageDestination struct {
|
||||||
ref dockerReference
|
ref dockerReference
|
||||||
c *dockerClient
|
c *dockerClient
|
||||||
@ -69,13 +56,18 @@ func (d *dockerImageDestination) Close() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (d *dockerImageDestination) SupportedManifestMIMETypes() []string {
|
func (d *dockerImageDestination) SupportedManifestMIMETypes() []string {
|
||||||
return manifestMIMETypes
|
return []string{
|
||||||
|
imgspecv1.MediaTypeImageManifest,
|
||||||
|
manifest.DockerV2Schema2MediaType,
|
||||||
|
manifest.DockerV2Schema1SignedMediaType,
|
||||||
|
manifest.DockerV2Schema1MediaType,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures.
|
// SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures.
|
||||||
// Note: It is still possible for PutSignatures to fail if SupportsSignatures returns nil.
|
// Note: It is still possible for PutSignatures to fail if SupportsSignatures returns nil.
|
||||||
func (d *dockerImageDestination) SupportsSignatures() error {
|
func (d *dockerImageDestination) SupportsSignatures() error {
|
||||||
if err := d.c.detectProperties(); err != nil {
|
if err := d.c.detectProperties(context.TODO()); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
switch {
|
switch {
|
||||||
@ -132,7 +124,7 @@ func (d *dockerImageDestination) PutBlob(stream io.Reader, inputInfo types.BlobI
|
|||||||
// FIXME? Chunked upload, progress reporting, etc.
|
// FIXME? Chunked upload, progress reporting, etc.
|
||||||
uploadPath := fmt.Sprintf(blobUploadPath, reference.Path(d.ref.ref))
|
uploadPath := fmt.Sprintf(blobUploadPath, reference.Path(d.ref.ref))
|
||||||
logrus.Debugf("Uploading %s", uploadPath)
|
logrus.Debugf("Uploading %s", uploadPath)
|
||||||
res, err := d.c.makeRequest("POST", uploadPath, nil, nil)
|
res, err := d.c.makeRequest(context.TODO(), "POST", uploadPath, nil, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return types.BlobInfo{}, err
|
return types.BlobInfo{}, err
|
||||||
}
|
}
|
||||||
@ -149,7 +141,7 @@ func (d *dockerImageDestination) PutBlob(stream io.Reader, inputInfo types.BlobI
|
|||||||
digester := digest.Canonical.Digester()
|
digester := digest.Canonical.Digester()
|
||||||
sizeCounter := &sizeCounter{}
|
sizeCounter := &sizeCounter{}
|
||||||
tee := io.TeeReader(stream, io.MultiWriter(digester.Hash(), sizeCounter))
|
tee := io.TeeReader(stream, io.MultiWriter(digester.Hash(), sizeCounter))
|
||||||
res, err = d.c.makeRequestToResolvedURL("PATCH", uploadLocation.String(), map[string][]string{"Content-Type": {"application/octet-stream"}}, tee, inputInfo.Size, true)
|
res, err = d.c.makeRequestToResolvedURL(context.TODO(), "PATCH", uploadLocation.String(), map[string][]string{"Content-Type": {"application/octet-stream"}}, tee, inputInfo.Size, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logrus.Debugf("Error uploading layer chunked, response %#v", res)
|
logrus.Debugf("Error uploading layer chunked, response %#v", res)
|
||||||
return types.BlobInfo{}, err
|
return types.BlobInfo{}, err
|
||||||
@ -168,7 +160,7 @@ func (d *dockerImageDestination) PutBlob(stream io.Reader, inputInfo types.BlobI
|
|||||||
// TODO: check inputInfo.Digest == computedDigest https://github.com/containers/image/pull/70#discussion_r77646717
|
// TODO: check inputInfo.Digest == computedDigest https://github.com/containers/image/pull/70#discussion_r77646717
|
||||||
locationQuery.Set("digest", computedDigest.String())
|
locationQuery.Set("digest", computedDigest.String())
|
||||||
uploadLocation.RawQuery = locationQuery.Encode()
|
uploadLocation.RawQuery = locationQuery.Encode()
|
||||||
res, err = d.c.makeRequestToResolvedURL("PUT", uploadLocation.String(), map[string][]string{"Content-Type": {"application/octet-stream"}}, nil, -1, true)
|
res, err = d.c.makeRequestToResolvedURL(context.TODO(), "PUT", uploadLocation.String(), map[string][]string{"Content-Type": {"application/octet-stream"}}, nil, -1, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return types.BlobInfo{}, err
|
return types.BlobInfo{}, err
|
||||||
}
|
}
|
||||||
@ -193,7 +185,7 @@ func (d *dockerImageDestination) HasBlob(info types.BlobInfo) (bool, int64, erro
|
|||||||
checkPath := fmt.Sprintf(blobsPath, reference.Path(d.ref.ref), info.Digest.String())
|
checkPath := fmt.Sprintf(blobsPath, reference.Path(d.ref.ref), info.Digest.String())
|
||||||
|
|
||||||
logrus.Debugf("Checking %s", checkPath)
|
logrus.Debugf("Checking %s", checkPath)
|
||||||
res, err := d.c.makeRequest("HEAD", checkPath, nil, nil)
|
res, err := d.c.makeRequest(context.TODO(), "HEAD", checkPath, nil, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, -1, err
|
return false, -1, err
|
||||||
}
|
}
|
||||||
@ -239,7 +231,7 @@ func (d *dockerImageDestination) PutManifest(m []byte) error {
|
|||||||
if mimeType != "" {
|
if mimeType != "" {
|
||||||
headers["Content-Type"] = []string{mimeType}
|
headers["Content-Type"] = []string{mimeType}
|
||||||
}
|
}
|
||||||
res, err := d.c.makeRequest("PUT", path, headers, bytes.NewReader(m))
|
res, err := d.c.makeRequest(context.TODO(), "PUT", path, headers, bytes.NewReader(m))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -275,7 +267,7 @@ func (d *dockerImageDestination) PutSignatures(signatures [][]byte) error {
|
|||||||
if len(signatures) == 0 {
|
if len(signatures) == 0 {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
if err := d.c.detectProperties(); err != nil {
|
if err := d.c.detectProperties(context.TODO()); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
switch {
|
switch {
|
||||||
@ -396,7 +388,7 @@ func (d *dockerImageDestination) putSignaturesToAPIExtension(signatures [][]byte
|
|||||||
// always adds signatures. Eventually we should also allow removing signatures,
|
// always adds signatures. Eventually we should also allow removing signatures,
|
||||||
// but the X-Registry-Supports-Signatures API extension does not support that yet.
|
// but the X-Registry-Supports-Signatures API extension does not support that yet.
|
||||||
|
|
||||||
existingSignatures, err := d.c.getExtensionsSignatures(d.ref, d.manifestDigest)
|
existingSignatures, err := d.c.getExtensionsSignatures(context.TODO(), d.ref, d.manifestDigest)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -438,7 +430,7 @@ sigExists:
|
|||||||
}
|
}
|
||||||
|
|
||||||
path := fmt.Sprintf(extensionsSignaturePath, reference.Path(d.ref.ref), d.manifestDigest.String())
|
path := fmt.Sprintf(extensionsSignaturePath, reference.Path(d.ref.ref), d.manifestDigest.String())
|
||||||
res, err := d.c.makeRequest("PUT", path, nil, bytes.NewReader(body))
|
res, err := d.c.makeRequest(context.TODO(), "PUT", path, nil, bytes.NewReader(body))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
103
vendor/github.com/containers/image/docker/docker_image_src.go
generated
vendored
103
vendor/github.com/containers/image/docker/docker_image_src.go
generated
vendored
@ -1,6 +1,7 @@
|
|||||||
package docker
|
package docker
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
@ -10,51 +11,33 @@ import (
|
|||||||
"os"
|
"os"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
|
||||||
"github.com/Sirupsen/logrus"
|
|
||||||
"github.com/containers/image/docker/reference"
|
"github.com/containers/image/docker/reference"
|
||||||
"github.com/containers/image/manifest"
|
"github.com/containers/image/manifest"
|
||||||
"github.com/containers/image/types"
|
"github.com/containers/image/types"
|
||||||
"github.com/docker/distribution/registry/client"
|
"github.com/docker/distribution/registry/client"
|
||||||
"github.com/opencontainers/go-digest"
|
"github.com/opencontainers/go-digest"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
)
|
)
|
||||||
|
|
||||||
type dockerImageSource struct {
|
type dockerImageSource struct {
|
||||||
ref dockerReference
|
ref dockerReference
|
||||||
requestedManifestMIMETypes []string
|
c *dockerClient
|
||||||
c *dockerClient
|
|
||||||
// State
|
// State
|
||||||
cachedManifest []byte // nil if not loaded yet
|
cachedManifest []byte // nil if not loaded yet
|
||||||
cachedManifestMIMEType string // Only valid if cachedManifest != nil
|
cachedManifestMIMEType string // Only valid if cachedManifest != nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// newImageSource creates a new ImageSource for the specified image reference,
|
// newImageSource creates a new ImageSource for the specified image reference.
|
||||||
// asking the backend to use a manifest from requestedManifestMIMETypes if possible.
|
|
||||||
// nil requestedManifestMIMETypes means manifest.DefaultRequestedManifestMIMETypes.
|
|
||||||
// The caller must call .Close() on the returned ImageSource.
|
// The caller must call .Close() on the returned ImageSource.
|
||||||
func newImageSource(ctx *types.SystemContext, ref dockerReference, requestedManifestMIMETypes []string) (*dockerImageSource, error) {
|
func newImageSource(ctx *types.SystemContext, ref dockerReference) (*dockerImageSource, error) {
|
||||||
c, err := newDockerClient(ctx, ref, false, "pull")
|
c, err := newDockerClient(ctx, ref, false, "pull")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
if requestedManifestMIMETypes == nil {
|
|
||||||
requestedManifestMIMETypes = manifest.DefaultRequestedManifestMIMETypes
|
|
||||||
}
|
|
||||||
supportedMIMEs := supportedManifestMIMETypesMap()
|
|
||||||
acceptableRequestedMIMEs := false
|
|
||||||
for _, mtrequested := range requestedManifestMIMETypes {
|
|
||||||
if supportedMIMEs[mtrequested] {
|
|
||||||
acceptableRequestedMIMEs = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if !acceptableRequestedMIMEs {
|
|
||||||
requestedManifestMIMETypes = manifest.DefaultRequestedManifestMIMETypes
|
|
||||||
}
|
|
||||||
return &dockerImageSource{
|
return &dockerImageSource{
|
||||||
ref: ref,
|
ref: ref,
|
||||||
requestedManifestMIMETypes: requestedManifestMIMETypes,
|
c: c,
|
||||||
c: c,
|
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -85,18 +68,18 @@ func simplifyContentType(contentType string) string {
|
|||||||
// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available).
|
// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available).
|
||||||
// It may use a remote (= slow) service.
|
// It may use a remote (= slow) service.
|
||||||
func (s *dockerImageSource) GetManifest() ([]byte, string, error) {
|
func (s *dockerImageSource) GetManifest() ([]byte, string, error) {
|
||||||
err := s.ensureManifestIsLoaded()
|
err := s.ensureManifestIsLoaded(context.TODO())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, "", err
|
return nil, "", err
|
||||||
}
|
}
|
||||||
return s.cachedManifest, s.cachedManifestMIMEType, nil
|
return s.cachedManifest, s.cachedManifestMIMEType, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *dockerImageSource) fetchManifest(tagOrDigest string) ([]byte, string, error) {
|
func (s *dockerImageSource) fetchManifest(ctx context.Context, tagOrDigest string) ([]byte, string, error) {
|
||||||
path := fmt.Sprintf(manifestPath, reference.Path(s.ref.ref), tagOrDigest)
|
path := fmt.Sprintf(manifestPath, reference.Path(s.ref.ref), tagOrDigest)
|
||||||
headers := make(map[string][]string)
|
headers := make(map[string][]string)
|
||||||
headers["Accept"] = s.requestedManifestMIMETypes
|
headers["Accept"] = manifest.DefaultRequestedManifestMIMETypes
|
||||||
res, err := s.c.makeRequest("GET", path, headers, nil)
|
res, err := s.c.makeRequest(ctx, "GET", path, headers, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, "", err
|
return nil, "", err
|
||||||
}
|
}
|
||||||
@ -114,7 +97,7 @@ func (s *dockerImageSource) fetchManifest(tagOrDigest string) ([]byte, string, e
|
|||||||
// GetTargetManifest returns an image's manifest given a digest.
|
// GetTargetManifest returns an image's manifest given a digest.
|
||||||
// This is mainly used to retrieve a single image's manifest out of a manifest list.
|
// This is mainly used to retrieve a single image's manifest out of a manifest list.
|
||||||
func (s *dockerImageSource) GetTargetManifest(digest digest.Digest) ([]byte, string, error) {
|
func (s *dockerImageSource) GetTargetManifest(digest digest.Digest) ([]byte, string, error) {
|
||||||
return s.fetchManifest(digest.String())
|
return s.fetchManifest(context.TODO(), digest.String())
|
||||||
}
|
}
|
||||||
|
|
||||||
// ensureManifestIsLoaded sets s.cachedManifest and s.cachedManifestMIMEType
|
// ensureManifestIsLoaded sets s.cachedManifest and s.cachedManifestMIMEType
|
||||||
@ -124,7 +107,7 @@ func (s *dockerImageSource) GetTargetManifest(digest digest.Digest) ([]byte, str
|
|||||||
// we need to ensure that the digest of the manifest returned by GetManifest
|
// we need to ensure that the digest of the manifest returned by GetManifest
|
||||||
// and used by GetSignatures are consistent, otherwise we would get spurious
|
// and used by GetSignatures are consistent, otherwise we would get spurious
|
||||||
// signature verification failures when pulling while a tag is being updated.
|
// signature verification failures when pulling while a tag is being updated.
|
||||||
func (s *dockerImageSource) ensureManifestIsLoaded() error {
|
func (s *dockerImageSource) ensureManifestIsLoaded(ctx context.Context) error {
|
||||||
if s.cachedManifest != nil {
|
if s.cachedManifest != nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -134,7 +117,7 @@ func (s *dockerImageSource) ensureManifestIsLoaded() error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
manblob, mt, err := s.fetchManifest(reference)
|
manblob, mt, err := s.fetchManifest(ctx, reference)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -150,13 +133,14 @@ func (s *dockerImageSource) getExternalBlob(urls []string) (io.ReadCloser, int64
|
|||||||
err error
|
err error
|
||||||
)
|
)
|
||||||
for _, url := range urls {
|
for _, url := range urls {
|
||||||
resp, err = s.c.makeRequestToResolvedURL("GET", url, nil, nil, -1, false)
|
resp, err = s.c.makeRequestToResolvedURL(context.TODO(), "GET", url, nil, nil, -1, false)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
if resp.StatusCode != http.StatusOK {
|
if resp.StatusCode != http.StatusOK {
|
||||||
err = errors.Errorf("error fetching external blob from %q: %d", url, resp.StatusCode)
|
err = errors.Errorf("error fetching external blob from %q: %d", url, resp.StatusCode)
|
||||||
logrus.Debug(err)
|
logrus.Debug(err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if resp.Body != nil && err == nil {
|
if resp.Body != nil && err == nil {
|
||||||
@ -181,7 +165,7 @@ func (s *dockerImageSource) GetBlob(info types.BlobInfo) (io.ReadCloser, int64,
|
|||||||
|
|
||||||
path := fmt.Sprintf(blobsPath, reference.Path(s.ref.ref), info.Digest.String())
|
path := fmt.Sprintf(blobsPath, reference.Path(s.ref.ref), info.Digest.String())
|
||||||
logrus.Debugf("Downloading %s", path)
|
logrus.Debugf("Downloading %s", path)
|
||||||
res, err := s.c.makeRequest("GET", path, nil, nil)
|
res, err := s.c.makeRequest(context.TODO(), "GET", path, nil, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, 0, err
|
return nil, 0, err
|
||||||
}
|
}
|
||||||
@ -192,27 +176,38 @@ func (s *dockerImageSource) GetBlob(info types.BlobInfo) (io.ReadCloser, int64,
|
|||||||
return res.Body, getBlobSize(res), nil
|
return res.Body, getBlobSize(res), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *dockerImageSource) GetSignatures() ([][]byte, error) {
|
func (s *dockerImageSource) GetSignatures(ctx context.Context) ([][]byte, error) {
|
||||||
if err := s.c.detectProperties(); err != nil {
|
if err := s.c.detectProperties(ctx); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
switch {
|
switch {
|
||||||
case s.c.signatureBase != nil:
|
case s.c.signatureBase != nil:
|
||||||
return s.getSignaturesFromLookaside()
|
return s.getSignaturesFromLookaside(ctx)
|
||||||
case s.c.supportsSignatures:
|
case s.c.supportsSignatures:
|
||||||
return s.getSignaturesFromAPIExtension()
|
return s.getSignaturesFromAPIExtension(ctx)
|
||||||
default:
|
default:
|
||||||
return [][]byte{}, nil
|
return [][]byte{}, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// manifestDigest returns a digest of the manifest, either from the supplied reference or from a fetched manifest.
|
||||||
|
func (s *dockerImageSource) manifestDigest(ctx context.Context) (digest.Digest, error) {
|
||||||
|
if digested, ok := s.ref.ref.(reference.Digested); ok {
|
||||||
|
d := digested.Digest()
|
||||||
|
if d.Algorithm() == digest.Canonical {
|
||||||
|
return d, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err := s.ensureManifestIsLoaded(ctx); err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
return manifest.Digest(s.cachedManifest)
|
||||||
|
}
|
||||||
|
|
||||||
// getSignaturesFromLookaside implements GetSignatures() from the lookaside location configured in s.c.signatureBase,
|
// getSignaturesFromLookaside implements GetSignatures() from the lookaside location configured in s.c.signatureBase,
|
||||||
// which is not nil.
|
// which is not nil.
|
||||||
func (s *dockerImageSource) getSignaturesFromLookaside() ([][]byte, error) {
|
func (s *dockerImageSource) getSignaturesFromLookaside(ctx context.Context) ([][]byte, error) {
|
||||||
if err := s.ensureManifestIsLoaded(); err != nil {
|
manifestDigest, err := s.manifestDigest(ctx)
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
manifestDigest, err := manifest.Digest(s.cachedManifest)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -224,7 +219,7 @@ func (s *dockerImageSource) getSignaturesFromLookaside() ([][]byte, error) {
|
|||||||
if url == nil {
|
if url == nil {
|
||||||
return nil, errors.Errorf("Internal error: signatureStorageURL with non-nil base returned nil")
|
return nil, errors.Errorf("Internal error: signatureStorageURL with non-nil base returned nil")
|
||||||
}
|
}
|
||||||
signature, missing, err := s.getOneSignature(url)
|
signature, missing, err := s.getOneSignature(ctx, url)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -239,7 +234,7 @@ func (s *dockerImageSource) getSignaturesFromLookaside() ([][]byte, error) {
|
|||||||
// getOneSignature downloads one signature from url.
|
// getOneSignature downloads one signature from url.
|
||||||
// If it successfully determines that the signature does not exist, returns with missing set to true and error set to nil.
|
// If it successfully determines that the signature does not exist, returns with missing set to true and error set to nil.
|
||||||
// NOTE: Keep this in sync with docs/signature-protocols.md!
|
// NOTE: Keep this in sync with docs/signature-protocols.md!
|
||||||
func (s *dockerImageSource) getOneSignature(url *url.URL) (signature []byte, missing bool, err error) {
|
func (s *dockerImageSource) getOneSignature(ctx context.Context, url *url.URL) (signature []byte, missing bool, err error) {
|
||||||
switch url.Scheme {
|
switch url.Scheme {
|
||||||
case "file":
|
case "file":
|
||||||
logrus.Debugf("Reading %s", url.Path)
|
logrus.Debugf("Reading %s", url.Path)
|
||||||
@ -254,7 +249,12 @@ func (s *dockerImageSource) getOneSignature(url *url.URL) (signature []byte, mis
|
|||||||
|
|
||||||
case "http", "https":
|
case "http", "https":
|
||||||
logrus.Debugf("GET %s", url)
|
logrus.Debugf("GET %s", url)
|
||||||
res, err := s.c.client.Get(url.String())
|
req, err := http.NewRequest("GET", url.String(), nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil, false, err
|
||||||
|
}
|
||||||
|
req = req.WithContext(ctx)
|
||||||
|
res, err := s.c.client.Do(req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, false, err
|
return nil, false, err
|
||||||
}
|
}
|
||||||
@ -276,16 +276,13 @@ func (s *dockerImageSource) getOneSignature(url *url.URL) (signature []byte, mis
|
|||||||
}
|
}
|
||||||
|
|
||||||
// getSignaturesFromAPIExtension implements GetSignatures() using the X-Registry-Supports-Signatures API extension.
|
// getSignaturesFromAPIExtension implements GetSignatures() using the X-Registry-Supports-Signatures API extension.
|
||||||
func (s *dockerImageSource) getSignaturesFromAPIExtension() ([][]byte, error) {
|
func (s *dockerImageSource) getSignaturesFromAPIExtension(ctx context.Context) ([][]byte, error) {
|
||||||
if err := s.ensureManifestIsLoaded(); err != nil {
|
manifestDigest, err := s.manifestDigest(ctx)
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
manifestDigest, err := manifest.Digest(s.cachedManifest)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
parsedBody, err := s.c.getExtensionsSignatures(s.ref, manifestDigest)
|
parsedBody, err := s.c.getExtensionsSignatures(ctx, s.ref, manifestDigest)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -316,7 +313,7 @@ func deleteImage(ctx *types.SystemContext, ref dockerReference) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
getPath := fmt.Sprintf(manifestPath, reference.Path(ref.ref), refTail)
|
getPath := fmt.Sprintf(manifestPath, reference.Path(ref.ref), refTail)
|
||||||
get, err := c.makeRequest("GET", getPath, headers, nil)
|
get, err := c.makeRequest(context.TODO(), "GET", getPath, headers, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -338,7 +335,7 @@ func deleteImage(ctx *types.SystemContext, ref dockerReference) error {
|
|||||||
|
|
||||||
// When retrieving the digest from a registry >= 2.3 use the following header:
|
// When retrieving the digest from a registry >= 2.3 use the following header:
|
||||||
// "Accept": "application/vnd.docker.distribution.manifest.v2+json"
|
// "Accept": "application/vnd.docker.distribution.manifest.v2+json"
|
||||||
delete, err := c.makeRequest("DELETE", deletePath, headers, nil)
|
delete, err := c.makeRequest(context.TODO(), "DELETE", deletePath, headers, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
8
vendor/github.com/containers/image/docker/docker_transport.go
generated
vendored
8
vendor/github.com/containers/image/docker/docker_transport.go
generated
vendored
@ -130,12 +130,10 @@ func (ref dockerReference) NewImage(ctx *types.SystemContext) (types.Image, erro
|
|||||||
return newImage(ctx, ref)
|
return newImage(ctx, ref)
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewImageSource returns a types.ImageSource for this reference,
|
// NewImageSource returns a types.ImageSource for this reference.
|
||||||
// asking the backend to use a manifest from requestedManifestMIMETypes if possible.
|
|
||||||
// nil requestedManifestMIMETypes means manifest.DefaultRequestedManifestMIMETypes.
|
|
||||||
// The caller must call .Close() on the returned ImageSource.
|
// The caller must call .Close() on the returned ImageSource.
|
||||||
func (ref dockerReference) NewImageSource(ctx *types.SystemContext, requestedManifestMIMETypes []string) (types.ImageSource, error) {
|
func (ref dockerReference) NewImageSource(ctx *types.SystemContext) (types.ImageSource, error) {
|
||||||
return newImageSource(ctx, ref, requestedManifestMIMETypes)
|
return newImageSource(ctx, ref)
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewImageDestination returns a types.ImageDestination for this reference.
|
// NewImageDestination returns a types.ImageDestination for this reference.
|
||||||
|
2
vendor/github.com/containers/image/docker/lookaside.go
generated
vendored
2
vendor/github.com/containers/image/docker/lookaside.go
generated
vendored
@ -9,12 +9,12 @@ import (
|
|||||||
"path/filepath"
|
"path/filepath"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/Sirupsen/logrus"
|
|
||||||
"github.com/containers/image/docker/reference"
|
"github.com/containers/image/docker/reference"
|
||||||
"github.com/containers/image/types"
|
"github.com/containers/image/types"
|
||||||
"github.com/ghodss/yaml"
|
"github.com/ghodss/yaml"
|
||||||
"github.com/opencontainers/go-digest"
|
"github.com/opencontainers/go-digest"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
)
|
)
|
||||||
|
|
||||||
// systemRegistriesDirPath is the path to registries.d, used for locating lookaside Docker signature storage.
|
// systemRegistriesDirPath is the path to registries.d, used for locating lookaside Docker signature storage.
|
||||||
|
2
vendor/github.com/containers/image/docker/tarfile/dest.go
generated
vendored
2
vendor/github.com/containers/image/docker/tarfile/dest.go
generated
vendored
@ -10,12 +10,12 @@ import (
|
|||||||
"os"
|
"os"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/Sirupsen/logrus"
|
|
||||||
"github.com/containers/image/docker/reference"
|
"github.com/containers/image/docker/reference"
|
||||||
"github.com/containers/image/manifest"
|
"github.com/containers/image/manifest"
|
||||||
"github.com/containers/image/types"
|
"github.com/containers/image/types"
|
||||||
"github.com/opencontainers/go-digest"
|
"github.com/opencontainers/go-digest"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
)
|
)
|
||||||
|
|
||||||
const temporaryDirectoryForBigFiles = "/var/tmp" // Do not use the system default of os.TempDir(), usually /tmp, because with systemd it could be a tmpfs.
|
const temporaryDirectoryForBigFiles = "/var/tmp" // Do not use the system default of os.TempDir(), usually /tmp, because with systemd it could be a tmpfs.
|
||||||
|
3
vendor/github.com/containers/image/docker/tarfile/src.go
generated
vendored
3
vendor/github.com/containers/image/docker/tarfile/src.go
generated
vendored
@ -3,6 +3,7 @@ package tarfile
|
|||||||
import (
|
import (
|
||||||
"archive/tar"
|
"archive/tar"
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
@ -354,6 +355,6 @@ func (s *Source) GetBlob(info types.BlobInfo) (io.ReadCloser, int64, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// GetSignatures returns the image's signatures. It may use a remote (= slow) service.
|
// GetSignatures returns the image's signatures. It may use a remote (= slow) service.
|
||||||
func (s *Source) GetSignatures() ([][]byte, error) {
|
func (s *Source) GetSignatures(ctx context.Context) ([][]byte, error) {
|
||||||
return [][]byte{}, nil
|
return [][]byte{}, nil
|
||||||
}
|
}
|
||||||
|
2
vendor/github.com/containers/image/image/docker_schema2.go
generated
vendored
2
vendor/github.com/containers/image/image/docker_schema2.go
generated
vendored
@ -8,13 +8,13 @@ import (
|
|||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/Sirupsen/logrus"
|
|
||||||
"github.com/containers/image/docker/reference"
|
"github.com/containers/image/docker/reference"
|
||||||
"github.com/containers/image/manifest"
|
"github.com/containers/image/manifest"
|
||||||
"github.com/containers/image/types"
|
"github.com/containers/image/types"
|
||||||
"github.com/opencontainers/go-digest"
|
"github.com/opencontainers/go-digest"
|
||||||
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
)
|
)
|
||||||
|
|
||||||
// gzippedEmptyLayer is a gzip-compressed version of an empty tar file (1024 NULL bytes)
|
// gzippedEmptyLayer is a gzip-compressed version of an empty tar file (1024 NULL bytes)
|
||||||
|
4
vendor/github.com/containers/image/image/memory.go
generated
vendored
4
vendor/github.com/containers/image/image/memory.go
generated
vendored
@ -1,6 +1,8 @@
|
|||||||
package image
|
package image
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
|
||||||
"github.com/containers/image/types"
|
"github.com/containers/image/types"
|
||||||
@ -54,7 +56,7 @@ func (i *memoryImage) Manifest() ([]byte, string, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Signatures is like ImageSource.GetSignatures, but the result is cached; it is OK to call this however often you need.
|
// Signatures is like ImageSource.GetSignatures, but the result is cached; it is OK to call this however often you need.
|
||||||
func (i *memoryImage) Signatures() ([][]byte, error) {
|
func (i *memoryImage) Signatures(ctx context.Context) ([][]byte, error) {
|
||||||
// Modifying an image invalidates signatures; a caller asking the updated image for signatures
|
// Modifying an image invalidates signatures; a caller asking the updated image for signatures
|
||||||
// is probably confused.
|
// is probably confused.
|
||||||
return nil, errors.New("Internal error: Image.Signatures() is not supported for images modified in memory")
|
return nil, errors.New("Internal error: Image.Signatures() is not supported for images modified in memory")
|
||||||
|
6
vendor/github.com/containers/image/image/oci.go
generated
vendored
6
vendor/github.com/containers/image/image/oci.go
generated
vendored
@ -56,7 +56,7 @@ func (m *manifestOCI1) manifestMIMEType() string {
|
|||||||
// ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object.
|
// ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object.
|
||||||
// Note that the config object may not exist in the underlying storage in the return value of UpdatedImage! Use ConfigBlob() below.
|
// Note that the config object may not exist in the underlying storage in the return value of UpdatedImage! Use ConfigBlob() below.
|
||||||
func (m *manifestOCI1) ConfigInfo() types.BlobInfo {
|
func (m *manifestOCI1) ConfigInfo() types.BlobInfo {
|
||||||
return types.BlobInfo{Digest: m.ConfigDescriptor.Digest, Size: m.ConfigDescriptor.Size}
|
return types.BlobInfo{Digest: m.ConfigDescriptor.Digest, Size: m.ConfigDescriptor.Size, Annotations: m.ConfigDescriptor.Annotations}
|
||||||
}
|
}
|
||||||
|
|
||||||
// ConfigBlob returns the blob described by ConfigInfo, iff ConfigInfo().Digest != ""; nil otherwise.
|
// ConfigBlob returns the blob described by ConfigInfo, iff ConfigInfo().Digest != ""; nil otherwise.
|
||||||
@ -109,7 +109,7 @@ func (m *manifestOCI1) OCIConfig() (*imgspecv1.Image, error) {
|
|||||||
func (m *manifestOCI1) LayerInfos() []types.BlobInfo {
|
func (m *manifestOCI1) LayerInfos() []types.BlobInfo {
|
||||||
blobs := []types.BlobInfo{}
|
blobs := []types.BlobInfo{}
|
||||||
for _, layer := range m.LayersDescriptors {
|
for _, layer := range m.LayersDescriptors {
|
||||||
blobs = append(blobs, types.BlobInfo{Digest: layer.Digest, Size: layer.Size})
|
blobs = append(blobs, types.BlobInfo{Digest: layer.Digest, Size: layer.Size, Annotations: layer.Annotations, URLs: layer.URLs})
|
||||||
}
|
}
|
||||||
return blobs
|
return blobs
|
||||||
}
|
}
|
||||||
@ -159,6 +159,8 @@ func (m *manifestOCI1) UpdatedImage(options types.ManifestUpdateOptions) (types.
|
|||||||
copy.LayersDescriptors[i].MediaType = m.LayersDescriptors[i].MediaType
|
copy.LayersDescriptors[i].MediaType = m.LayersDescriptors[i].MediaType
|
||||||
copy.LayersDescriptors[i].Digest = info.Digest
|
copy.LayersDescriptors[i].Digest = info.Digest
|
||||||
copy.LayersDescriptors[i].Size = info.Size
|
copy.LayersDescriptors[i].Size = info.Size
|
||||||
|
copy.LayersDescriptors[i].Annotations = info.Annotations
|
||||||
|
copy.LayersDescriptors[i].URLs = info.URLs
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Ignore options.EmbeddedDockerReference: it may be set when converting from schema1, but we really don't care.
|
// Ignore options.EmbeddedDockerReference: it may be set when converting from schema1, but we really don't care.
|
||||||
|
6
vendor/github.com/containers/image/image/unparsed.go
generated
vendored
6
vendor/github.com/containers/image/image/unparsed.go
generated
vendored
@ -1,6 +1,8 @@
|
|||||||
package image
|
package image
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
"github.com/containers/image/docker/reference"
|
"github.com/containers/image/docker/reference"
|
||||||
"github.com/containers/image/manifest"
|
"github.com/containers/image/manifest"
|
||||||
"github.com/containers/image/types"
|
"github.com/containers/image/types"
|
||||||
@ -71,9 +73,9 @@ func (i *UnparsedImage) Manifest() ([]byte, string, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Signatures is like ImageSource.GetSignatures, but the result is cached; it is OK to call this however often you need.
|
// Signatures is like ImageSource.GetSignatures, but the result is cached; it is OK to call this however often you need.
|
||||||
func (i *UnparsedImage) Signatures() ([][]byte, error) {
|
func (i *UnparsedImage) Signatures(ctx context.Context) ([][]byte, error) {
|
||||||
if i.cachedSignatures == nil {
|
if i.cachedSignatures == nil {
|
||||||
sigs, err := i.src.GetSignatures()
|
sigs, err := i.src.GetSignatures(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
2
vendor/github.com/containers/image/manifest/manifest.go
generated
vendored
2
vendor/github.com/containers/image/manifest/manifest.go
generated
vendored
@ -35,7 +35,7 @@ var DefaultRequestedManifestMIMETypes = []string{
|
|||||||
DockerV2Schema2MediaType,
|
DockerV2Schema2MediaType,
|
||||||
DockerV2Schema1SignedMediaType,
|
DockerV2Schema1SignedMediaType,
|
||||||
DockerV2Schema1MediaType,
|
DockerV2Schema1MediaType,
|
||||||
DockerV2ListMediaType,
|
// DockerV2ListMediaType, // FIXME: Restore this ASAP
|
||||||
}
|
}
|
||||||
|
|
||||||
// GuessMIMEType guesses MIME type of a manifest and returns it _if it is recognized_, or "" if unknown or unrecognized.
|
// GuessMIMEType guesses MIME type of a manifest and returns it _if it is recognized_, or "" if unknown or unrecognized.
|
||||||
|
132
vendor/github.com/containers/image/oci/archive/oci_dest.go
generated
vendored
Normal file
132
vendor/github.com/containers/image/oci/archive/oci_dest.go
generated
vendored
Normal file
@ -0,0 +1,132 @@
|
|||||||
|
package archive
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"github.com/containers/image/types"
|
||||||
|
"github.com/containers/storage/pkg/archive"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
)
|
||||||
|
|
||||||
|
type ociArchiveImageDestination struct {
|
||||||
|
ref ociArchiveReference
|
||||||
|
unpackedDest types.ImageDestination
|
||||||
|
tempDirRef tempDirOCIRef
|
||||||
|
}
|
||||||
|
|
||||||
|
// newImageDestination returns an ImageDestination for writing to an existing directory.
|
||||||
|
func newImageDestination(ctx *types.SystemContext, ref ociArchiveReference) (types.ImageDestination, error) {
|
||||||
|
tempDirRef, err := createOCIRef(ref.image)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrapf(err, "error creating oci reference")
|
||||||
|
}
|
||||||
|
unpackedDest, err := tempDirRef.ociRefExtracted.NewImageDestination(ctx)
|
||||||
|
if err != nil {
|
||||||
|
if err := tempDirRef.deleteTempDir(); err != nil {
|
||||||
|
return nil, errors.Wrapf(err, "error deleting temp directory", tempDirRef.tempDirectory)
|
||||||
|
}
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &ociArchiveImageDestination{ref: ref,
|
||||||
|
unpackedDest: unpackedDest,
|
||||||
|
tempDirRef: tempDirRef}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reference returns the reference used to set up this destination.
|
||||||
|
func (d *ociArchiveImageDestination) Reference() types.ImageReference {
|
||||||
|
return d.ref
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close removes resources associated with an initialized ImageDestination, if any
|
||||||
|
// Close deletes the temp directory of the oci-archive image
|
||||||
|
func (d *ociArchiveImageDestination) Close() error {
|
||||||
|
defer d.tempDirRef.deleteTempDir()
|
||||||
|
return d.unpackedDest.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *ociArchiveImageDestination) SupportedManifestMIMETypes() []string {
|
||||||
|
return d.unpackedDest.SupportedManifestMIMETypes()
|
||||||
|
}
|
||||||
|
|
||||||
|
// SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures
|
||||||
|
func (d *ociArchiveImageDestination) SupportsSignatures() error {
|
||||||
|
return d.unpackedDest.SupportsSignatures()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ShouldCompressLayers returns true iff it is desirable to compress layer blobs written to this destination
|
||||||
|
func (d *ociArchiveImageDestination) ShouldCompressLayers() bool {
|
||||||
|
return d.unpackedDest.ShouldCompressLayers()
|
||||||
|
}
|
||||||
|
|
||||||
|
// AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually
|
||||||
|
// uploaded to the image destination, true otherwise.
|
||||||
|
func (d *ociArchiveImageDestination) AcceptsForeignLayerURLs() bool {
|
||||||
|
return d.unpackedDest.AcceptsForeignLayerURLs()
|
||||||
|
}
|
||||||
|
|
||||||
|
// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime OS. False otherwise
|
||||||
|
func (d *ociArchiveImageDestination) MustMatchRuntimeOS() bool {
|
||||||
|
return d.unpackedDest.MustMatchRuntimeOS()
|
||||||
|
}
|
||||||
|
|
||||||
|
// PutBlob writes contents of stream and returns data representing the result (with all data filled in).
|
||||||
|
// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it.
|
||||||
|
// inputInfo.Size is the expected length of stream, if known.
|
||||||
|
func (d *ociArchiveImageDestination) PutBlob(stream io.Reader, inputInfo types.BlobInfo) (types.BlobInfo, error) {
|
||||||
|
return d.unpackedDest.PutBlob(stream, inputInfo)
|
||||||
|
}
|
||||||
|
|
||||||
|
// HasBlob returns true iff the image destination already contains a blob with the matching digest which can be reapplied using ReapplyBlob
|
||||||
|
func (d *ociArchiveImageDestination) HasBlob(info types.BlobInfo) (bool, int64, error) {
|
||||||
|
return d.unpackedDest.HasBlob(info)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *ociArchiveImageDestination) ReapplyBlob(info types.BlobInfo) (types.BlobInfo, error) {
|
||||||
|
return d.unpackedDest.ReapplyBlob(info)
|
||||||
|
}
|
||||||
|
|
||||||
|
// PutManifest writes manifest to the destination
|
||||||
|
func (d *ociArchiveImageDestination) PutManifest(m []byte) error {
|
||||||
|
return d.unpackedDest.PutManifest(m)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *ociArchiveImageDestination) PutSignatures(signatures [][]byte) error {
|
||||||
|
return d.unpackedDest.PutSignatures(signatures)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Commit marks the process of storing the image as successful and asks for the image to be persisted
|
||||||
|
// after the directory is made, it is tarred up into a file and the directory is deleted
|
||||||
|
func (d *ociArchiveImageDestination) Commit() error {
|
||||||
|
if err := d.unpackedDest.Commit(); err != nil {
|
||||||
|
return errors.Wrapf(err, "error storing image %q", d.ref.image)
|
||||||
|
}
|
||||||
|
|
||||||
|
// path of directory to tar up
|
||||||
|
src := d.tempDirRef.tempDirectory
|
||||||
|
// path to save tarred up file
|
||||||
|
dst := d.ref.resolvedFile
|
||||||
|
|
||||||
|
return tarDirectory(src, dst)
|
||||||
|
}
|
||||||
|
|
||||||
|
// tar converts the directory at src and saves it to dst
|
||||||
|
func tarDirectory(src, dst string) error {
|
||||||
|
// input is a stream of bytes from the archive of the directory at path
|
||||||
|
input, err := archive.Tar(src, archive.Uncompressed)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrapf(err, "error retrieving stream of bytes from %q", src)
|
||||||
|
}
|
||||||
|
|
||||||
|
// creates the tar file
|
||||||
|
outFile, err := os.Create(dst)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrapf(err, "error creating tar file %q", dst)
|
||||||
|
}
|
||||||
|
defer outFile.Close()
|
||||||
|
|
||||||
|
// copies the contents of the directory to the tar file
|
||||||
|
_, err = io.Copy(outFile, input)
|
||||||
|
|
||||||
|
return err
|
||||||
|
}
|
88
vendor/github.com/containers/image/oci/archive/oci_src.go
generated
vendored
Normal file
88
vendor/github.com/containers/image/oci/archive/oci_src.go
generated
vendored
Normal file
@ -0,0 +1,88 @@
|
|||||||
|
package archive
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"io"
|
||||||
|
|
||||||
|
ocilayout "github.com/containers/image/oci/layout"
|
||||||
|
"github.com/containers/image/types"
|
||||||
|
digest "github.com/opencontainers/go-digest"
|
||||||
|
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
)
|
||||||
|
|
||||||
|
type ociArchiveImageSource struct {
|
||||||
|
ref ociArchiveReference
|
||||||
|
unpackedSrc types.ImageSource
|
||||||
|
tempDirRef tempDirOCIRef
|
||||||
|
}
|
||||||
|
|
||||||
|
// newImageSource returns an ImageSource for reading from an existing directory.
|
||||||
|
// newImageSource untars the file and saves it in a temp directory
|
||||||
|
func newImageSource(ctx *types.SystemContext, ref ociArchiveReference) (types.ImageSource, error) {
|
||||||
|
tempDirRef, err := createUntarTempDir(ref)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrap(err, "error creating temp directory")
|
||||||
|
}
|
||||||
|
|
||||||
|
unpackedSrc, err := tempDirRef.ociRefExtracted.NewImageSource(ctx)
|
||||||
|
if err != nil {
|
||||||
|
if err := tempDirRef.deleteTempDir(); err != nil {
|
||||||
|
return nil, errors.Wrapf(err, "error deleting temp directory", tempDirRef.tempDirectory)
|
||||||
|
}
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &ociArchiveImageSource{ref: ref,
|
||||||
|
unpackedSrc: unpackedSrc,
|
||||||
|
tempDirRef: tempDirRef}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// LoadManifestDescriptor loads the manifest
|
||||||
|
func LoadManifestDescriptor(imgRef types.ImageReference) (imgspecv1.Descriptor, error) {
|
||||||
|
ociArchRef, ok := imgRef.(ociArchiveReference)
|
||||||
|
if !ok {
|
||||||
|
return imgspecv1.Descriptor{}, errors.Errorf("error typecasting, need type ociArchiveReference")
|
||||||
|
}
|
||||||
|
tempDirRef, err := createUntarTempDir(ociArchRef)
|
||||||
|
if err != nil {
|
||||||
|
return imgspecv1.Descriptor{}, errors.Wrap(err, "error creating temp directory")
|
||||||
|
}
|
||||||
|
defer tempDirRef.deleteTempDir()
|
||||||
|
|
||||||
|
descriptor, err := ocilayout.LoadManifestDescriptor(tempDirRef.ociRefExtracted)
|
||||||
|
if err != nil {
|
||||||
|
return imgspecv1.Descriptor{}, errors.Wrap(err, "error loading index")
|
||||||
|
}
|
||||||
|
return descriptor, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reference returns the reference used to set up this source.
|
||||||
|
func (s *ociArchiveImageSource) Reference() types.ImageReference {
|
||||||
|
return s.ref
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close removes resources associated with an initialized ImageSource, if any.
|
||||||
|
// Close deletes the temporary directory at dst
|
||||||
|
func (s *ociArchiveImageSource) Close() error {
|
||||||
|
defer s.tempDirRef.deleteTempDir()
|
||||||
|
return s.unpackedSrc.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetManifest returns the image's manifest along with its MIME type
|
||||||
|
// (which may be empty when it can't be determined but the manifest is available).
|
||||||
|
func (s *ociArchiveImageSource) GetManifest() ([]byte, string, error) {
|
||||||
|
return s.unpackedSrc.GetManifest()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *ociArchiveImageSource) GetTargetManifest(digest digest.Digest) ([]byte, string, error) {
|
||||||
|
return s.unpackedSrc.GetTargetManifest(digest)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetBlob returns a stream for the specified blob, and the blob's size.
|
||||||
|
func (s *ociArchiveImageSource) GetBlob(info types.BlobInfo) (io.ReadCloser, int64, error) {
|
||||||
|
return s.unpackedSrc.GetBlob(info)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *ociArchiveImageSource) GetSignatures(c context.Context) ([][]byte, error) {
|
||||||
|
return s.unpackedSrc.GetSignatures(c)
|
||||||
|
}
|
225
vendor/github.com/containers/image/oci/archive/oci_transport.go
generated
vendored
Normal file
225
vendor/github.com/containers/image/oci/archive/oci_transport.go
generated
vendored
Normal file
@ -0,0 +1,225 @@
|
|||||||
|
package archive
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"regexp"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/containers/image/directory/explicitfilepath"
|
||||||
|
"github.com/containers/image/docker/reference"
|
||||||
|
"github.com/containers/image/image"
|
||||||
|
ocilayout "github.com/containers/image/oci/layout"
|
||||||
|
"github.com/containers/image/transports"
|
||||||
|
"github.com/containers/image/types"
|
||||||
|
"github.com/containers/storage/pkg/archive"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
transports.Register(Transport)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Transport is an ImageTransport for OCI archive
|
||||||
|
// it creates an oci-archive tar file by calling into the OCI transport
|
||||||
|
// tarring the directory created by oci and deleting the directory
|
||||||
|
var Transport = ociArchiveTransport{}
|
||||||
|
|
||||||
|
type ociArchiveTransport struct{}
|
||||||
|
|
||||||
|
// ociArchiveReference is an ImageReference for OCI Archive paths
|
||||||
|
type ociArchiveReference struct {
|
||||||
|
file string
|
||||||
|
resolvedFile string
|
||||||
|
image string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t ociArchiveTransport) Name() string {
|
||||||
|
return "oci-archive"
|
||||||
|
}
|
||||||
|
|
||||||
|
// ParseReference converts a string, which should not start with the ImageTransport.Name prefix
|
||||||
|
// into an ImageReference.
|
||||||
|
func (t ociArchiveTransport) ParseReference(reference string) (types.ImageReference, error) {
|
||||||
|
return ParseReference(reference)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys
|
||||||
|
func (t ociArchiveTransport) ValidatePolicyConfigurationScope(scope string) error {
|
||||||
|
var file string
|
||||||
|
sep := strings.SplitN(scope, ":", 2)
|
||||||
|
file = sep[0]
|
||||||
|
|
||||||
|
if len(sep) == 2 {
|
||||||
|
image := sep[1]
|
||||||
|
if !refRegexp.MatchString(image) {
|
||||||
|
return errors.Errorf("Invalid image %s", image)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if !strings.HasPrefix(file, "/") {
|
||||||
|
return errors.Errorf("Invalid scope %s: must be an absolute path", scope)
|
||||||
|
}
|
||||||
|
// Refuse also "/", otherwise "/" and "" would have the same semantics,
|
||||||
|
// and "" could be unexpectedly shadowed by the "/" entry.
|
||||||
|
// (Note: we do allow "/:someimage", a bit ridiculous but why refuse it?)
|
||||||
|
if scope == "/" {
|
||||||
|
return errors.New(`Invalid scope "/": Use the generic default scope ""`)
|
||||||
|
}
|
||||||
|
cleaned := filepath.Clean(file)
|
||||||
|
if cleaned != file {
|
||||||
|
return errors.Errorf(`Invalid scope %s: Uses non-canonical path format, perhaps try with path %s`, scope, cleaned)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// annotation spex from https://github.com/opencontainers/image-spec/blob/master/annotations.md#pre-defined-annotation-keys
|
||||||
|
const (
|
||||||
|
separator = `(?:[-._:@+]|--)`
|
||||||
|
alphanum = `(?:[A-Za-z0-9]+)`
|
||||||
|
component = `(?:` + alphanum + `(?:` + separator + alphanum + `)*)`
|
||||||
|
)
|
||||||
|
|
||||||
|
var refRegexp = regexp.MustCompile(`^` + component + `(?:/` + component + `)*$`)
|
||||||
|
|
||||||
|
// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an OCI ImageReference.
|
||||||
|
func ParseReference(reference string) (types.ImageReference, error) {
|
||||||
|
var file, image string
|
||||||
|
sep := strings.SplitN(reference, ":", 2)
|
||||||
|
file = sep[0]
|
||||||
|
|
||||||
|
if len(sep) == 2 {
|
||||||
|
image = sep[1]
|
||||||
|
}
|
||||||
|
return NewReference(file, image)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewReference returns an OCI reference for a file and a image.
|
||||||
|
func NewReference(file, image string) (types.ImageReference, error) {
|
||||||
|
resolved, err := explicitfilepath.ResolvePathToFullyExplicit(file)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
// This is necessary to prevent directory paths returned by PolicyConfigurationNamespaces
|
||||||
|
// from being ambiguous with values of PolicyConfigurationIdentity.
|
||||||
|
if strings.Contains(resolved, ":") {
|
||||||
|
return nil, errors.Errorf("Invalid OCI reference %s:%s: path %s contains a colon", file, image, resolved)
|
||||||
|
}
|
||||||
|
if len(image) > 0 && !refRegexp.MatchString(image) {
|
||||||
|
return nil, errors.Errorf("Invalid image %s", image)
|
||||||
|
}
|
||||||
|
return ociArchiveReference{file: file, resolvedFile: resolved, image: image}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ref ociArchiveReference) Transport() types.ImageTransport {
|
||||||
|
return Transport
|
||||||
|
}
|
||||||
|
|
||||||
|
// StringWithinTransport returns a string representation of the reference, which MUST be such that
|
||||||
|
// reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference.
|
||||||
|
func (ref ociArchiveReference) StringWithinTransport() string {
|
||||||
|
return fmt.Sprintf("%s:%s", ref.file, ref.image)
|
||||||
|
}
|
||||||
|
|
||||||
|
// DockerReference returns a Docker reference associated with this reference
|
||||||
|
func (ref ociArchiveReference) DockerReference() reference.Named {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// PolicyConfigurationIdentity returns a string representation of the reference, suitable for policy lookup.
|
||||||
|
func (ref ociArchiveReference) PolicyConfigurationIdentity() string {
|
||||||
|
// NOTE: ref.image is not a part of the image identity, because "$dir:$someimage" and "$dir:" may mean the
|
||||||
|
// same image and the two can’t be statically disambiguated. Using at least the repository directory is
|
||||||
|
// less granular but hopefully still useful.
|
||||||
|
return fmt.Sprintf("%s", ref.resolvedFile)
|
||||||
|
}
|
||||||
|
|
||||||
|
// PolicyConfigurationNamespaces returns a list of other policy configuration namespaces to search
|
||||||
|
// for if explicit configuration for PolicyConfigurationIdentity() is not set
|
||||||
|
func (ref ociArchiveReference) PolicyConfigurationNamespaces() []string {
|
||||||
|
res := []string{}
|
||||||
|
path := ref.resolvedFile
|
||||||
|
for {
|
||||||
|
lastSlash := strings.LastIndex(path, "/")
|
||||||
|
// Note that we do not include "/"; it is redundant with the default "" global default,
|
||||||
|
// and rejected by ociTransport.ValidatePolicyConfigurationScope above.
|
||||||
|
if lastSlash == -1 || path == "/" {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
res = append(res, path)
|
||||||
|
path = path[:lastSlash]
|
||||||
|
}
|
||||||
|
return res
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewImage returns a types.Image for this reference, possibly specialized for this ImageTransport.
|
||||||
|
// The caller must call .Close() on the returned Image.
|
||||||
|
func (ref ociArchiveReference) NewImage(ctx *types.SystemContext) (types.Image, error) {
|
||||||
|
src, err := newImageSource(ctx, ref)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return image.FromSource(src)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewImageSource returns a types.ImageSource for this reference.
|
||||||
|
// The caller must call .Close() on the returned ImageSource.
|
||||||
|
func (ref ociArchiveReference) NewImageSource(ctx *types.SystemContext) (types.ImageSource, error) {
|
||||||
|
return newImageSource(ctx, ref)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewImageDestination returns a types.ImageDestination for this reference.
|
||||||
|
// The caller must call .Close() on the returned ImageDestination.
|
||||||
|
func (ref ociArchiveReference) NewImageDestination(ctx *types.SystemContext) (types.ImageDestination, error) {
|
||||||
|
return newImageDestination(ctx, ref)
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteImage deletes the named image from the registry, if supported.
|
||||||
|
func (ref ociArchiveReference) DeleteImage(ctx *types.SystemContext) error {
|
||||||
|
return errors.Errorf("Deleting images not implemented for oci: images")
|
||||||
|
}
|
||||||
|
|
||||||
|
// struct to store the ociReference and temporary directory returned by createOCIRef
|
||||||
|
type tempDirOCIRef struct {
|
||||||
|
tempDirectory string
|
||||||
|
ociRefExtracted types.ImageReference
|
||||||
|
}
|
||||||
|
|
||||||
|
// deletes the temporary directory created
|
||||||
|
func (t *tempDirOCIRef) deleteTempDir() error {
|
||||||
|
return os.RemoveAll(t.tempDirectory)
|
||||||
|
}
|
||||||
|
|
||||||
|
// createOCIRef creates the oci reference of the image
|
||||||
|
func createOCIRef(image string) (tempDirOCIRef, error) {
|
||||||
|
dir, err := ioutil.TempDir("/var/tmp", "oci")
|
||||||
|
if err != nil {
|
||||||
|
return tempDirOCIRef{}, errors.Wrapf(err, "error creating temp directory")
|
||||||
|
}
|
||||||
|
ociRef, err := ocilayout.NewReference(dir, image)
|
||||||
|
if err != nil {
|
||||||
|
return tempDirOCIRef{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
tempDirRef := tempDirOCIRef{tempDirectory: dir, ociRefExtracted: ociRef}
|
||||||
|
return tempDirRef, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// creates the temporary directory and copies the tarred content to it
|
||||||
|
func createUntarTempDir(ref ociArchiveReference) (tempDirOCIRef, error) {
|
||||||
|
tempDirRef, err := createOCIRef(ref.image)
|
||||||
|
if err != nil {
|
||||||
|
return tempDirOCIRef{}, errors.Wrap(err, "error creating oci reference")
|
||||||
|
}
|
||||||
|
src := ref.resolvedFile
|
||||||
|
dst := tempDirRef.tempDirectory
|
||||||
|
if err := archive.UntarPath(src, dst); err != nil {
|
||||||
|
if err := tempDirRef.deleteTempDir(); err != nil {
|
||||||
|
return tempDirOCIRef{}, errors.Wrapf(err, "error deleting temp directory %q", tempDirRef.tempDirectory)
|
||||||
|
}
|
||||||
|
return tempDirOCIRef{}, errors.Wrapf(err, "error untarring file %q", tempDirRef.tempDirectory)
|
||||||
|
}
|
||||||
|
return tempDirRef, nil
|
||||||
|
}
|
15
vendor/github.com/containers/image/oci/layout/oci_dest.go
generated
vendored
15
vendor/github.com/containers/image/oci/layout/oci_dest.go
generated
vendored
@ -23,13 +23,16 @@ type ociImageDestination struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// newImageDestination returns an ImageDestination for writing to an existing directory.
|
// newImageDestination returns an ImageDestination for writing to an existing directory.
|
||||||
func newImageDestination(ref ociReference) types.ImageDestination {
|
func newImageDestination(ref ociReference) (types.ImageDestination, error) {
|
||||||
|
if ref.image == "" {
|
||||||
|
return nil, errors.Errorf("cannot save image with empty image.ref.name")
|
||||||
|
}
|
||||||
index := imgspecv1.Index{
|
index := imgspecv1.Index{
|
||||||
Versioned: imgspec.Versioned{
|
Versioned: imgspec.Versioned{
|
||||||
SchemaVersion: 2,
|
SchemaVersion: 2,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
return &ociImageDestination{ref: ref, index: index}
|
return &ociImageDestination{ref: ref, index: index}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent,
|
// Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent,
|
||||||
@ -63,7 +66,7 @@ func (d *ociImageDestination) ShouldCompressLayers() bool {
|
|||||||
// AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually
|
// AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually
|
||||||
// uploaded to the image destination, true otherwise.
|
// uploaded to the image destination, true otherwise.
|
||||||
func (d *ociImageDestination) AcceptsForeignLayerURLs() bool {
|
func (d *ociImageDestination) AcceptsForeignLayerURLs() bool {
|
||||||
return false
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime OS. False otherwise.
|
// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime OS. False otherwise.
|
||||||
@ -177,8 +180,12 @@ func (d *ociImageDestination) PutManifest(m []byte) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if d.ref.image == "" {
|
||||||
|
return errors.Errorf("cannot save image with empyt image.ref.name")
|
||||||
|
}
|
||||||
|
|
||||||
annotations := make(map[string]string)
|
annotations := make(map[string]string)
|
||||||
annotations["org.opencontainers.image.ref.name"] = d.ref.tag
|
annotations["org.opencontainers.image.ref.name"] = d.ref.image
|
||||||
desc.Annotations = annotations
|
desc.Annotations = annotations
|
||||||
desc.Platform = &imgspecv1.Platform{
|
desc.Platform = &imgspecv1.Platform{
|
||||||
Architecture: runtime.GOARCH,
|
Architecture: runtime.GOARCH,
|
||||||
|
58
vendor/github.com/containers/image/oci/layout/oci_src.go
generated
vendored
58
vendor/github.com/containers/image/oci/layout/oci_src.go
generated
vendored
@ -1,27 +1,46 @@
|
|||||||
package layout
|
package layout
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
|
"net/http"
|
||||||
"os"
|
"os"
|
||||||
|
"strconv"
|
||||||
|
|
||||||
|
"github.com/containers/image/pkg/tlsclientconfig"
|
||||||
"github.com/containers/image/types"
|
"github.com/containers/image/types"
|
||||||
|
"github.com/docker/go-connections/tlsconfig"
|
||||||
"github.com/opencontainers/go-digest"
|
"github.com/opencontainers/go-digest"
|
||||||
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||||
|
"github.com/pkg/errors"
|
||||||
)
|
)
|
||||||
|
|
||||||
type ociImageSource struct {
|
type ociImageSource struct {
|
||||||
ref ociReference
|
ref ociReference
|
||||||
descriptor imgspecv1.Descriptor
|
descriptor imgspecv1.Descriptor
|
||||||
|
client *http.Client
|
||||||
}
|
}
|
||||||
|
|
||||||
// newImageSource returns an ImageSource for reading from an existing directory.
|
// newImageSource returns an ImageSource for reading from an existing directory.
|
||||||
func newImageSource(ref ociReference) (types.ImageSource, error) {
|
func newImageSource(ctx *types.SystemContext, ref ociReference) (types.ImageSource, error) {
|
||||||
|
tr := tlsclientconfig.NewTransport()
|
||||||
|
tr.TLSClientConfig = tlsconfig.ServerDefault()
|
||||||
|
|
||||||
|
if ctx != nil && ctx.OCICertPath != "" {
|
||||||
|
if err := tlsclientconfig.SetupCertificates(ctx.OCICertPath, tr.TLSClientConfig); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
tr.TLSClientConfig.InsecureSkipVerify = ctx.OCIInsecureSkipTLSVerify
|
||||||
|
}
|
||||||
|
|
||||||
|
client := &http.Client{}
|
||||||
|
client.Transport = tr
|
||||||
descriptor, err := ref.getManifestDescriptor()
|
descriptor, err := ref.getManifestDescriptor()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return &ociImageSource{ref: ref, descriptor: descriptor}, nil
|
return &ociImageSource{ref: ref, descriptor: descriptor, client: client}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Reference returns the reference used to set up this source.
|
// Reference returns the reference used to set up this source.
|
||||||
@ -69,6 +88,10 @@ func (s *ociImageSource) GetTargetManifest(digest digest.Digest) ([]byte, string
|
|||||||
|
|
||||||
// GetBlob returns a stream for the specified blob, and the blob's size.
|
// GetBlob returns a stream for the specified blob, and the blob's size.
|
||||||
func (s *ociImageSource) GetBlob(info types.BlobInfo) (io.ReadCloser, int64, error) {
|
func (s *ociImageSource) GetBlob(info types.BlobInfo) (io.ReadCloser, int64, error) {
|
||||||
|
if len(info.URLs) != 0 {
|
||||||
|
return s.getExternalBlob(info.URLs)
|
||||||
|
}
|
||||||
|
|
||||||
path, err := s.ref.blobPath(info.Digest)
|
path, err := s.ref.blobPath(info.Digest)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, 0, err
|
return nil, 0, err
|
||||||
@ -85,6 +108,35 @@ func (s *ociImageSource) GetBlob(info types.BlobInfo) (io.ReadCloser, int64, err
|
|||||||
return r, fi.Size(), nil
|
return r, fi.Size(), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *ociImageSource) GetSignatures() ([][]byte, error) {
|
func (s *ociImageSource) GetSignatures(context.Context) ([][]byte, error) {
|
||||||
return [][]byte{}, nil
|
return [][]byte{}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *ociImageSource) getExternalBlob(urls []string) (io.ReadCloser, int64, error) {
|
||||||
|
errWrap := errors.New("failed fetching external blob from all urls")
|
||||||
|
for _, url := range urls {
|
||||||
|
resp, err := s.client.Get(url)
|
||||||
|
if err != nil {
|
||||||
|
errWrap = errors.Wrapf(errWrap, "fetching %s failed %s", url, err.Error())
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if resp.StatusCode != http.StatusOK {
|
||||||
|
resp.Body.Close()
|
||||||
|
errWrap = errors.Wrapf(errWrap, "fetching %s failed, response code not 200", url)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
return resp.Body, getBlobSize(resp), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, 0, errWrap
|
||||||
|
}
|
||||||
|
|
||||||
|
func getBlobSize(resp *http.Response) int64 {
|
||||||
|
size, err := strconv.ParseInt(resp.Header.Get("Content-Length"), 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
size = -1
|
||||||
|
}
|
||||||
|
return size
|
||||||
|
}
|
||||||
|
123
vendor/github.com/containers/image/oci/layout/oci_transport.go
generated
vendored
123
vendor/github.com/containers/image/oci/layout/oci_transport.go
generated
vendored
@ -36,7 +36,14 @@ func (t ociTransport) ParseReference(reference string) (types.ImageReference, er
|
|||||||
return ParseReference(reference)
|
return ParseReference(reference)
|
||||||
}
|
}
|
||||||
|
|
||||||
var refRegexp = regexp.MustCompile(`^([A-Za-z0-9._-]+)+$`)
|
// annotation spex from https://github.com/opencontainers/image-spec/blob/master/annotations.md#pre-defined-annotation-keys
|
||||||
|
const (
|
||||||
|
separator = `(?:[-._:@+]|--)`
|
||||||
|
alphanum = `(?:[A-Za-z0-9]+)`
|
||||||
|
component = `(?:` + alphanum + `(?:` + separator + alphanum + `)*)`
|
||||||
|
)
|
||||||
|
|
||||||
|
var refRegexp = regexp.MustCompile(`^` + component + `(?:/` + component + `)*$`)
|
||||||
|
|
||||||
// ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys
|
// ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys
|
||||||
// (i.e. a valid PolicyConfigurationIdentity() or PolicyConfigurationNamespaces() return value).
|
// (i.e. a valid PolicyConfigurationIdentity() or PolicyConfigurationNamespaces() return value).
|
||||||
@ -44,19 +51,14 @@ var refRegexp = regexp.MustCompile(`^([A-Za-z0-9._-]+)+$`)
|
|||||||
// scope passed to this function will not be "", that value is always allowed.
|
// scope passed to this function will not be "", that value is always allowed.
|
||||||
func (t ociTransport) ValidatePolicyConfigurationScope(scope string) error {
|
func (t ociTransport) ValidatePolicyConfigurationScope(scope string) error {
|
||||||
var dir string
|
var dir string
|
||||||
sep := strings.LastIndex(scope, ":")
|
sep := strings.SplitN(scope, ":", 2)
|
||||||
if sep == -1 {
|
dir = sep[0]
|
||||||
dir = scope
|
|
||||||
} else {
|
|
||||||
dir = scope[:sep]
|
|
||||||
tag := scope[sep+1:]
|
|
||||||
if !refRegexp.MatchString(tag) {
|
|
||||||
return errors.Errorf("Invalid tag %s", tag)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if strings.Contains(dir, ":") {
|
if len(sep) == 2 {
|
||||||
return errors.Errorf("Invalid OCI reference %s: path contains a colon", scope)
|
image := sep[1]
|
||||||
|
if !refRegexp.MatchString(image) {
|
||||||
|
return errors.Errorf("Invalid image %s", image)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if !strings.HasPrefix(dir, "/") {
|
if !strings.HasPrefix(dir, "/") {
|
||||||
@ -64,7 +66,7 @@ func (t ociTransport) ValidatePolicyConfigurationScope(scope string) error {
|
|||||||
}
|
}
|
||||||
// Refuse also "/", otherwise "/" and "" would have the same semantics,
|
// Refuse also "/", otherwise "/" and "" would have the same semantics,
|
||||||
// and "" could be unexpectedly shadowed by the "/" entry.
|
// and "" could be unexpectedly shadowed by the "/" entry.
|
||||||
// (Note: we do allow "/:sometag", a bit ridiculous but why refuse it?)
|
// (Note: we do allow "/:someimage", a bit ridiculous but why refuse it?)
|
||||||
if scope == "/" {
|
if scope == "/" {
|
||||||
return errors.New(`Invalid scope "/": Use the generic default scope ""`)
|
return errors.New(`Invalid scope "/": Use the generic default scope ""`)
|
||||||
}
|
}
|
||||||
@ -85,28 +87,26 @@ type ociReference struct {
|
|||||||
// (But in general, we make no attempt to be completely safe against concurrent hostile filesystem modifications.)
|
// (But in general, we make no attempt to be completely safe against concurrent hostile filesystem modifications.)
|
||||||
dir string // As specified by the user. May be relative, contain symlinks, etc.
|
dir string // As specified by the user. May be relative, contain symlinks, etc.
|
||||||
resolvedDir string // Absolute path with no symlinks, at least at the time of its creation. Primarily used for policy namespaces.
|
resolvedDir string // Absolute path with no symlinks, at least at the time of its creation. Primarily used for policy namespaces.
|
||||||
tag string
|
image string // If image=="", it means the only image in the index.json is used
|
||||||
}
|
}
|
||||||
|
|
||||||
// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an OCI ImageReference.
|
// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an OCI ImageReference.
|
||||||
func ParseReference(reference string) (types.ImageReference, error) {
|
func ParseReference(reference string) (types.ImageReference, error) {
|
||||||
var dir, tag string
|
var dir, image string
|
||||||
sep := strings.LastIndex(reference, ":")
|
sep := strings.SplitN(reference, ":", 2)
|
||||||
if sep == -1 {
|
dir = sep[0]
|
||||||
dir = reference
|
|
||||||
tag = "latest"
|
if len(sep) == 2 {
|
||||||
} else {
|
image = sep[1]
|
||||||
dir = reference[:sep]
|
|
||||||
tag = reference[sep+1:]
|
|
||||||
}
|
}
|
||||||
return NewReference(dir, tag)
|
return NewReference(dir, image)
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewReference returns an OCI reference for a directory and a tag.
|
// NewReference returns an OCI reference for a directory and a image.
|
||||||
//
|
//
|
||||||
// We do not expose an API supplying the resolvedDir; we could, but recomputing it
|
// We do not expose an API supplying the resolvedDir; we could, but recomputing it
|
||||||
// is generally cheap enough that we prefer being confident about the properties of resolvedDir.
|
// is generally cheap enough that we prefer being confident about the properties of resolvedDir.
|
||||||
func NewReference(dir, tag string) (types.ImageReference, error) {
|
func NewReference(dir, image string) (types.ImageReference, error) {
|
||||||
resolved, err := explicitfilepath.ResolvePathToFullyExplicit(dir)
|
resolved, err := explicitfilepath.ResolvePathToFullyExplicit(dir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -114,12 +114,12 @@ func NewReference(dir, tag string) (types.ImageReference, error) {
|
|||||||
// This is necessary to prevent directory paths returned by PolicyConfigurationNamespaces
|
// This is necessary to prevent directory paths returned by PolicyConfigurationNamespaces
|
||||||
// from being ambiguous with values of PolicyConfigurationIdentity.
|
// from being ambiguous with values of PolicyConfigurationIdentity.
|
||||||
if strings.Contains(resolved, ":") {
|
if strings.Contains(resolved, ":") {
|
||||||
return nil, errors.Errorf("Invalid OCI reference %s:%s: path %s contains a colon", dir, tag, resolved)
|
return nil, errors.Errorf("Invalid OCI reference %s:%s: path %s contains a colon", dir, image, resolved)
|
||||||
}
|
}
|
||||||
if !refRegexp.MatchString(tag) {
|
if len(image) > 0 && !refRegexp.MatchString(image) {
|
||||||
return nil, errors.Errorf("Invalid tag %s", tag)
|
return nil, errors.Errorf("Invalid image %s", image)
|
||||||
}
|
}
|
||||||
return ociReference{dir: dir, resolvedDir: resolved, tag: tag}, nil
|
return ociReference{dir: dir, resolvedDir: resolved, image: image}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ref ociReference) Transport() types.ImageTransport {
|
func (ref ociReference) Transport() types.ImageTransport {
|
||||||
@ -132,7 +132,7 @@ func (ref ociReference) Transport() types.ImageTransport {
|
|||||||
// e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa.
|
// e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa.
|
||||||
// WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix.
|
// WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix.
|
||||||
func (ref ociReference) StringWithinTransport() string {
|
func (ref ociReference) StringWithinTransport() string {
|
||||||
return fmt.Sprintf("%s:%s", ref.dir, ref.tag)
|
return fmt.Sprintf("%s:%s", ref.dir, ref.image)
|
||||||
}
|
}
|
||||||
|
|
||||||
// DockerReference returns a Docker reference associated with this reference
|
// DockerReference returns a Docker reference associated with this reference
|
||||||
@ -150,7 +150,10 @@ func (ref ociReference) DockerReference() reference.Named {
|
|||||||
// not required/guaranteed that it will be a valid input to Transport().ParseReference().
|
// not required/guaranteed that it will be a valid input to Transport().ParseReference().
|
||||||
// Returns "" if configuration identities for these references are not supported.
|
// Returns "" if configuration identities for these references are not supported.
|
||||||
func (ref ociReference) PolicyConfigurationIdentity() string {
|
func (ref ociReference) PolicyConfigurationIdentity() string {
|
||||||
return fmt.Sprintf("%s:%s", ref.resolvedDir, ref.tag)
|
// NOTE: ref.image is not a part of the image identity, because "$dir:$someimage" and "$dir:" may mean the
|
||||||
|
// same image and the two can’t be statically disambiguated. Using at least the repository directory is
|
||||||
|
// less granular but hopefully still useful.
|
||||||
|
return fmt.Sprintf("%s", ref.resolvedDir)
|
||||||
}
|
}
|
||||||
|
|
||||||
// PolicyConfigurationNamespaces returns a list of other policy configuration namespaces to search
|
// PolicyConfigurationNamespaces returns a list of other policy configuration namespaces to search
|
||||||
@ -179,7 +182,7 @@ func (ref ociReference) PolicyConfigurationNamespaces() []string {
|
|||||||
// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource,
|
// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource,
|
||||||
// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage.
|
// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage.
|
||||||
func (ref ociReference) NewImage(ctx *types.SystemContext) (types.Image, error) {
|
func (ref ociReference) NewImage(ctx *types.SystemContext) (types.Image, error) {
|
||||||
src, err := newImageSource(ref)
|
src, err := newImageSource(ctx, ref)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -196,38 +199,58 @@ func (ref ociReference) getManifestDescriptor() (imgspecv1.Descriptor, error) {
|
|||||||
if err := json.NewDecoder(indexJSON).Decode(&index); err != nil {
|
if err := json.NewDecoder(indexJSON).Decode(&index); err != nil {
|
||||||
return imgspecv1.Descriptor{}, err
|
return imgspecv1.Descriptor{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
var d *imgspecv1.Descriptor
|
var d *imgspecv1.Descriptor
|
||||||
for _, md := range index.Manifests {
|
if ref.image == "" {
|
||||||
if md.MediaType != imgspecv1.MediaTypeImageManifest {
|
// return manifest if only one image is in the oci directory
|
||||||
continue
|
if len(index.Manifests) == 1 {
|
||||||
|
d = &index.Manifests[0]
|
||||||
|
} else {
|
||||||
|
// ask user to choose image when more than one image in the oci directory
|
||||||
|
return imgspecv1.Descriptor{}, errors.Wrapf(err, "more than one image in oci, choose an image")
|
||||||
}
|
}
|
||||||
refName, ok := md.Annotations["org.opencontainers.image.ref.name"]
|
} else {
|
||||||
if !ok {
|
// if image specified, look through all manifests for a match
|
||||||
continue
|
for _, md := range index.Manifests {
|
||||||
}
|
if md.MediaType != imgspecv1.MediaTypeImageManifest {
|
||||||
if refName == ref.tag {
|
continue
|
||||||
d = &md
|
}
|
||||||
break
|
refName, ok := md.Annotations["org.opencontainers.image.ref.name"]
|
||||||
|
if !ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if refName == ref.image {
|
||||||
|
d = &md
|
||||||
|
break
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if d == nil {
|
if d == nil {
|
||||||
return imgspecv1.Descriptor{}, fmt.Errorf("no descriptor found for reference %q", ref.tag)
|
return imgspecv1.Descriptor{}, fmt.Errorf("no descriptor found for reference %q", ref.image)
|
||||||
}
|
}
|
||||||
return *d, nil
|
return *d, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewImageSource returns a types.ImageSource for this reference,
|
// LoadManifestDescriptor loads the manifest descriptor to be used to retrieve the image name
|
||||||
// asking the backend to use a manifest from requestedManifestMIMETypes if possible.
|
// when pulling an image
|
||||||
// nil requestedManifestMIMETypes means manifest.DefaultRequestedManifestMIMETypes.
|
func LoadManifestDescriptor(imgRef types.ImageReference) (imgspecv1.Descriptor, error) {
|
||||||
|
ociRef, ok := imgRef.(ociReference)
|
||||||
|
if !ok {
|
||||||
|
return imgspecv1.Descriptor{}, errors.Errorf("error typecasting, need type ociRef")
|
||||||
|
}
|
||||||
|
return ociRef.getManifestDescriptor()
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewImageSource returns a types.ImageSource for this reference.
|
||||||
// The caller must call .Close() on the returned ImageSource.
|
// The caller must call .Close() on the returned ImageSource.
|
||||||
func (ref ociReference) NewImageSource(ctx *types.SystemContext, requestedManifestMIMETypes []string) (types.ImageSource, error) {
|
func (ref ociReference) NewImageSource(ctx *types.SystemContext) (types.ImageSource, error) {
|
||||||
return newImageSource(ref)
|
return newImageSource(ctx, ref)
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewImageDestination returns a types.ImageDestination for this reference.
|
// NewImageDestination returns a types.ImageDestination for this reference.
|
||||||
// The caller must call .Close() on the returned ImageDestination.
|
// The caller must call .Close() on the returned ImageDestination.
|
||||||
func (ref ociReference) NewImageDestination(ctx *types.SystemContext) (types.ImageDestination, error) {
|
func (ref ociReference) NewImageDestination(ctx *types.SystemContext) (types.ImageDestination, error) {
|
||||||
return newImageDestination(ref), nil
|
return newImageDestination(ref)
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeleteImage deletes the named image from the registry, if supported.
|
// DeleteImage deletes the named image from the registry, if supported.
|
||||||
|
42
vendor/github.com/containers/image/openshift/openshift.go
generated
vendored
42
vendor/github.com/containers/image/openshift/openshift.go
generated
vendored
@ -2,6 +2,7 @@ package openshift
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"context"
|
||||||
"crypto/rand"
|
"crypto/rand"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
@ -11,7 +12,6 @@ import (
|
|||||||
"net/url"
|
"net/url"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/Sirupsen/logrus"
|
|
||||||
"github.com/containers/image/docker"
|
"github.com/containers/image/docker"
|
||||||
"github.com/containers/image/docker/reference"
|
"github.com/containers/image/docker/reference"
|
||||||
"github.com/containers/image/manifest"
|
"github.com/containers/image/manifest"
|
||||||
@ -19,6 +19,7 @@ import (
|
|||||||
"github.com/containers/image/version"
|
"github.com/containers/image/version"
|
||||||
"github.com/opencontainers/go-digest"
|
"github.com/opencontainers/go-digest"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
)
|
)
|
||||||
|
|
||||||
// openshiftClient is configuration for dealing with a single image stream, for reading or writing.
|
// openshiftClient is configuration for dealing with a single image stream, for reading or writing.
|
||||||
@ -70,7 +71,7 @@ func newOpenshiftClient(ref openshiftReference) (*openshiftClient, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// doRequest performs a correctly authenticated request to a specified path, and returns response body or an error object.
|
// doRequest performs a correctly authenticated request to a specified path, and returns response body or an error object.
|
||||||
func (c *openshiftClient) doRequest(method, path string, requestBody []byte) ([]byte, error) {
|
func (c *openshiftClient) doRequest(ctx context.Context, method, path string, requestBody []byte) ([]byte, error) {
|
||||||
url := *c.baseURL
|
url := *c.baseURL
|
||||||
url.Path = path
|
url.Path = path
|
||||||
var requestBodyReader io.Reader
|
var requestBodyReader io.Reader
|
||||||
@ -82,6 +83,7 @@ func (c *openshiftClient) doRequest(method, path string, requestBody []byte) ([]
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
req = req.WithContext(ctx)
|
||||||
|
|
||||||
if len(c.bearerToken) != 0 {
|
if len(c.bearerToken) != 0 {
|
||||||
req.Header.Set("Authorization", "Bearer "+c.bearerToken)
|
req.Header.Set("Authorization", "Bearer "+c.bearerToken)
|
||||||
@ -132,10 +134,10 @@ func (c *openshiftClient) doRequest(method, path string, requestBody []byte) ([]
|
|||||||
}
|
}
|
||||||
|
|
||||||
// getImage loads the specified image object.
|
// getImage loads the specified image object.
|
||||||
func (c *openshiftClient) getImage(imageStreamImageName string) (*image, error) {
|
func (c *openshiftClient) getImage(ctx context.Context, imageStreamImageName string) (*image, error) {
|
||||||
// FIXME: validate components per validation.IsValidPathSegmentName?
|
// FIXME: validate components per validation.IsValidPathSegmentName?
|
||||||
path := fmt.Sprintf("/oapi/v1/namespaces/%s/imagestreamimages/%s@%s", c.ref.namespace, c.ref.stream, imageStreamImageName)
|
path := fmt.Sprintf("/oapi/v1/namespaces/%s/imagestreamimages/%s@%s", c.ref.namespace, c.ref.stream, imageStreamImageName)
|
||||||
body, err := c.doRequest("GET", path, nil)
|
body, err := c.doRequest(ctx, "GET", path, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -160,18 +162,15 @@ func (c *openshiftClient) convertDockerImageReference(ref string) (string, error
|
|||||||
type openshiftImageSource struct {
|
type openshiftImageSource struct {
|
||||||
client *openshiftClient
|
client *openshiftClient
|
||||||
// Values specific to this image
|
// Values specific to this image
|
||||||
ctx *types.SystemContext
|
ctx *types.SystemContext
|
||||||
requestedManifestMIMETypes []string
|
|
||||||
// State
|
// State
|
||||||
docker types.ImageSource // The Docker Registry endpoint, or nil if not resolved yet
|
docker types.ImageSource // The Docker Registry endpoint, or nil if not resolved yet
|
||||||
imageStreamImageName string // Resolved image identifier, or "" if not known yet
|
imageStreamImageName string // Resolved image identifier, or "" if not known yet
|
||||||
}
|
}
|
||||||
|
|
||||||
// newImageSource creates a new ImageSource for the specified reference,
|
// newImageSource creates a new ImageSource for the specified reference.
|
||||||
// asking the backend to use a manifest from requestedManifestMIMETypes if possible.
|
|
||||||
// nil requestedManifestMIMETypes means manifest.DefaultRequestedManifestMIMETypes.
|
|
||||||
// The caller must call .Close() on the returned ImageSource.
|
// The caller must call .Close() on the returned ImageSource.
|
||||||
func newImageSource(ctx *types.SystemContext, ref openshiftReference, requestedManifestMIMETypes []string) (types.ImageSource, error) {
|
func newImageSource(ctx *types.SystemContext, ref openshiftReference) (types.ImageSource, error) {
|
||||||
client, err := newOpenshiftClient(ref)
|
client, err := newOpenshiftClient(ref)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -180,7 +179,6 @@ func newImageSource(ctx *types.SystemContext, ref openshiftReference, requestedM
|
|||||||
return &openshiftImageSource{
|
return &openshiftImageSource{
|
||||||
client: client,
|
client: client,
|
||||||
ctx: ctx,
|
ctx: ctx,
|
||||||
requestedManifestMIMETypes: requestedManifestMIMETypes,
|
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -203,7 +201,7 @@ func (s *openshiftImageSource) Close() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (s *openshiftImageSource) GetTargetManifest(digest digest.Digest) ([]byte, string, error) {
|
func (s *openshiftImageSource) GetTargetManifest(digest digest.Digest) ([]byte, string, error) {
|
||||||
if err := s.ensureImageIsResolved(); err != nil {
|
if err := s.ensureImageIsResolved(context.TODO()); err != nil {
|
||||||
return nil, "", err
|
return nil, "", err
|
||||||
}
|
}
|
||||||
return s.docker.GetTargetManifest(digest)
|
return s.docker.GetTargetManifest(digest)
|
||||||
@ -212,7 +210,7 @@ func (s *openshiftImageSource) GetTargetManifest(digest digest.Digest) ([]byte,
|
|||||||
// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available).
|
// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available).
|
||||||
// It may use a remote (= slow) service.
|
// It may use a remote (= slow) service.
|
||||||
func (s *openshiftImageSource) GetManifest() ([]byte, string, error) {
|
func (s *openshiftImageSource) GetManifest() ([]byte, string, error) {
|
||||||
if err := s.ensureImageIsResolved(); err != nil {
|
if err := s.ensureImageIsResolved(context.TODO()); err != nil {
|
||||||
return nil, "", err
|
return nil, "", err
|
||||||
}
|
}
|
||||||
return s.docker.GetManifest()
|
return s.docker.GetManifest()
|
||||||
@ -220,18 +218,18 @@ func (s *openshiftImageSource) GetManifest() ([]byte, string, error) {
|
|||||||
|
|
||||||
// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown).
|
// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown).
|
||||||
func (s *openshiftImageSource) GetBlob(info types.BlobInfo) (io.ReadCloser, int64, error) {
|
func (s *openshiftImageSource) GetBlob(info types.BlobInfo) (io.ReadCloser, int64, error) {
|
||||||
if err := s.ensureImageIsResolved(); err != nil {
|
if err := s.ensureImageIsResolved(context.TODO()); err != nil {
|
||||||
return nil, 0, err
|
return nil, 0, err
|
||||||
}
|
}
|
||||||
return s.docker.GetBlob(info)
|
return s.docker.GetBlob(info)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *openshiftImageSource) GetSignatures() ([][]byte, error) {
|
func (s *openshiftImageSource) GetSignatures(ctx context.Context) ([][]byte, error) {
|
||||||
if err := s.ensureImageIsResolved(); err != nil {
|
if err := s.ensureImageIsResolved(ctx); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
image, err := s.client.getImage(s.imageStreamImageName)
|
image, err := s.client.getImage(ctx, s.imageStreamImageName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -245,14 +243,14 @@ func (s *openshiftImageSource) GetSignatures() ([][]byte, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// ensureImageIsResolved sets up s.docker and s.imageStreamImageName
|
// ensureImageIsResolved sets up s.docker and s.imageStreamImageName
|
||||||
func (s *openshiftImageSource) ensureImageIsResolved() error {
|
func (s *openshiftImageSource) ensureImageIsResolved(ctx context.Context) error {
|
||||||
if s.docker != nil {
|
if s.docker != nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// FIXME: validate components per validation.IsValidPathSegmentName?
|
// FIXME: validate components per validation.IsValidPathSegmentName?
|
||||||
path := fmt.Sprintf("/oapi/v1/namespaces/%s/imagestreams/%s", s.client.ref.namespace, s.client.ref.stream)
|
path := fmt.Sprintf("/oapi/v1/namespaces/%s/imagestreams/%s", s.client.ref.namespace, s.client.ref.stream)
|
||||||
body, err := s.client.doRequest("GET", path, nil)
|
body, err := s.client.doRequest(ctx, "GET", path, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -284,7 +282,7 @@ func (s *openshiftImageSource) ensureImageIsResolved() error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
d, err := dockerRef.NewImageSource(s.ctx, s.requestedManifestMIMETypes)
|
d, err := dockerRef.NewImageSource(s.ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -410,7 +408,7 @@ func (d *openshiftImageDestination) PutSignatures(signatures [][]byte) error {
|
|||||||
return nil // No need to even read the old state.
|
return nil // No need to even read the old state.
|
||||||
}
|
}
|
||||||
|
|
||||||
image, err := d.client.getImage(d.imageStreamImageName)
|
image, err := d.client.getImage(context.TODO(), d.imageStreamImageName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -451,7 +449,7 @@ sigExists:
|
|||||||
Content: newSig,
|
Content: newSig,
|
||||||
}
|
}
|
||||||
body, err := json.Marshal(sig)
|
body, err := json.Marshal(sig)
|
||||||
_, err = d.client.doRequest("POST", "/oapi/v1/imagesignatures", body)
|
_, err = d.client.doRequest(context.TODO(), "POST", "/oapi/v1/imagesignatures", body)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
10
vendor/github.com/containers/image/openshift/openshift_transport.go
generated
vendored
10
vendor/github.com/containers/image/openshift/openshift_transport.go
generated
vendored
@ -130,19 +130,17 @@ func (ref openshiftReference) PolicyConfigurationNamespaces() []string {
|
|||||||
// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource,
|
// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource,
|
||||||
// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage.
|
// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage.
|
||||||
func (ref openshiftReference) NewImage(ctx *types.SystemContext) (types.Image, error) {
|
func (ref openshiftReference) NewImage(ctx *types.SystemContext) (types.Image, error) {
|
||||||
src, err := newImageSource(ctx, ref, nil)
|
src, err := newImageSource(ctx, ref)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return genericImage.FromSource(src)
|
return genericImage.FromSource(src)
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewImageSource returns a types.ImageSource for this reference,
|
// NewImageSource returns a types.ImageSource for this reference.
|
||||||
// asking the backend to use a manifest from requestedManifestMIMETypes if possible.
|
|
||||||
// nil requestedManifestMIMETypes means manifest.DefaultRequestedManifestMIMETypes.
|
|
||||||
// The caller must call .Close() on the returned ImageSource.
|
// The caller must call .Close() on the returned ImageSource.
|
||||||
func (ref openshiftReference) NewImageSource(ctx *types.SystemContext, requestedManifestMIMETypes []string) (types.ImageSource, error) {
|
func (ref openshiftReference) NewImageSource(ctx *types.SystemContext) (types.ImageSource, error) {
|
||||||
return newImageSource(ctx, ref, requestedManifestMIMETypes)
|
return newImageSource(ctx, ref)
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewImageDestination returns a types.ImageDestination for this reference.
|
// NewImageDestination returns a types.ImageDestination for this reference.
|
||||||
|
23
vendor/github.com/containers/image/ostree/ostree_dest.go
generated
vendored
23
vendor/github.com/containers/image/ostree/ostree_dest.go
generated
vendored
@ -1,3 +1,5 @@
|
|||||||
|
// +build !containers_image_ostree_stub
|
||||||
|
|
||||||
package ostree
|
package ostree
|
||||||
|
|
||||||
import (
|
import (
|
||||||
@ -44,6 +46,7 @@ type ostreeImageDestination struct {
|
|||||||
schema manifestSchema
|
schema manifestSchema
|
||||||
tmpDirPath string
|
tmpDirPath string
|
||||||
blobs map[string]*blobToImport
|
blobs map[string]*blobToImport
|
||||||
|
digest digest.Digest
|
||||||
}
|
}
|
||||||
|
|
||||||
// newImageDestination returns an ImageDestination for writing to an existing ostree.
|
// newImageDestination returns an ImageDestination for writing to an existing ostree.
|
||||||
@ -52,7 +55,7 @@ func newImageDestination(ref ostreeReference, tmpDirPath string) (types.ImageDes
|
|||||||
if err := ensureDirectoryExists(tmpDirPath); err != nil {
|
if err := ensureDirectoryExists(tmpDirPath); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return &ostreeImageDestination{ref, "", manifestSchema{}, tmpDirPath, map[string]*blobToImport{}}, nil
|
return &ostreeImageDestination{ref, "", manifestSchema{}, tmpDirPath, map[string]*blobToImport{}, ""}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent,
|
// Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent,
|
||||||
@ -151,7 +154,7 @@ func fixFiles(dir string, usermode bool) error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
} else if usermode && (info.Mode().IsRegular() || (info.Mode()&os.ModeSymlink) != 0) {
|
} else if usermode && (info.Mode().IsRegular()) {
|
||||||
if err := os.Chmod(fullpath, info.Mode()|0600); err != nil {
|
if err := os.Chmod(fullpath, info.Mode()|0600); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -236,10 +239,10 @@ func (d *ostreeImageDestination) ReapplyBlob(info types.BlobInfo) (types.BlobInf
|
|||||||
// FIXME? This should also receive a MIME type if known, to differentiate between schema versions.
|
// FIXME? This should also receive a MIME type if known, to differentiate between schema versions.
|
||||||
// If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema),
|
// If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema),
|
||||||
// but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError.
|
// but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError.
|
||||||
func (d *ostreeImageDestination) PutManifest(manifest []byte) error {
|
func (d *ostreeImageDestination) PutManifest(manifestBlob []byte) error {
|
||||||
d.manifest = string(manifest)
|
d.manifest = string(manifestBlob)
|
||||||
|
|
||||||
if err := json.Unmarshal(manifest, &d.schema); err != nil {
|
if err := json.Unmarshal(manifestBlob, &d.schema); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -248,7 +251,13 @@ func (d *ostreeImageDestination) PutManifest(manifest []byte) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
return ioutil.WriteFile(manifestPath, manifest, 0644)
|
digest, err := manifest.Digest(manifestBlob)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
d.digest = digest
|
||||||
|
|
||||||
|
return ioutil.WriteFile(manifestPath, manifestBlob, 0644)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *ostreeImageDestination) PutSignatures(signatures [][]byte) error {
|
func (d *ostreeImageDestination) PutSignatures(signatures [][]byte) error {
|
||||||
@ -302,7 +311,7 @@ func (d *ostreeImageDestination) Commit() error {
|
|||||||
|
|
||||||
manifestPath := filepath.Join(d.tmpDirPath, "manifest")
|
manifestPath := filepath.Join(d.tmpDirPath, "manifest")
|
||||||
|
|
||||||
metadata := []string{fmt.Sprintf("docker.manifest=%s", string(d.manifest))}
|
metadata := []string{fmt.Sprintf("docker.manifest=%s", string(d.manifest)), fmt.Sprintf("docker.digest=%s", string(d.digest))}
|
||||||
err = d.ostreeCommit(repo, fmt.Sprintf("ociimage/%s", d.ref.branchName), manifestPath, metadata)
|
err = d.ostreeCommit(repo, fmt.Sprintf("ociimage/%s", d.ref.branchName), manifestPath, metadata)
|
||||||
|
|
||||||
_, err = repo.CommitTransaction()
|
_, err = repo.CommitTransaction()
|
||||||
|
31
vendor/github.com/containers/image/ostree/ostree_transport.go
generated
vendored
31
vendor/github.com/containers/image/ostree/ostree_transport.go
generated
vendored
@ -1,3 +1,5 @@
|
|||||||
|
// +build !containers_image_ostree_stub
|
||||||
|
|
||||||
package ostree
|
package ostree
|
||||||
|
|
||||||
import (
|
import (
|
||||||
@ -82,24 +84,15 @@ func NewReference(image string, repo string) (types.ImageReference, error) {
|
|||||||
// image is not _really_ in a containers/image/docker/reference format;
|
// image is not _really_ in a containers/image/docker/reference format;
|
||||||
// as far as the libOSTree ociimage/* namespace is concerned, it is more or
|
// as far as the libOSTree ociimage/* namespace is concerned, it is more or
|
||||||
// less an arbitrary string with an implied tag.
|
// less an arbitrary string with an implied tag.
|
||||||
// We use the reference.* parsers basically for the default tag name in
|
// Parse the image using reference.ParseNormalizedNamed so that we can
|
||||||
// reference.TagNameOnly, and incidentally for some character set and length
|
// check whether the images has a tag specified and we can add ":latest" if needed
|
||||||
// restrictions.
|
ostreeImage, err := reference.ParseNormalizedNamed(image)
|
||||||
var ostreeImage reference.Named
|
|
||||||
s := strings.SplitN(image, ":", 2)
|
|
||||||
|
|
||||||
named, err := reference.WithName(s[0])
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(s) == 1 {
|
if reference.IsNameOnly(ostreeImage) {
|
||||||
ostreeImage = reference.TagNameOnly(named)
|
image = image + ":latest"
|
||||||
} else {
|
|
||||||
ostreeImage, err = reference.WithTag(named, s[1])
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
resolved, err := explicitfilepath.ResolvePathToFullyExplicit(repo)
|
resolved, err := explicitfilepath.ResolvePathToFullyExplicit(repo)
|
||||||
@ -121,8 +114,8 @@ func NewReference(image string, repo string) (types.ImageReference, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
return ostreeReference{
|
return ostreeReference{
|
||||||
image: ostreeImage.String(),
|
image: image,
|
||||||
branchName: encodeOStreeRef(ostreeImage.String()),
|
branchName: encodeOStreeRef(image),
|
||||||
repo: resolved,
|
repo: resolved,
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
@ -183,11 +176,9 @@ func (ref ostreeReference) NewImage(ctx *types.SystemContext) (types.Image, erro
|
|||||||
return nil, errors.New("Reading ostree: images is currently not supported")
|
return nil, errors.New("Reading ostree: images is currently not supported")
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewImageSource returns a types.ImageSource for this reference,
|
// NewImageSource returns a types.ImageSource for this reference.
|
||||||
// asking the backend to use a manifest from requestedManifestMIMETypes if possible.
|
|
||||||
// nil requestedManifestMIMETypes means manifest.DefaultRequestedManifestMIMETypes.
|
|
||||||
// The caller must call .Close() on the returned ImageSource.
|
// The caller must call .Close() on the returned ImageSource.
|
||||||
func (ref ostreeReference) NewImageSource(ctx *types.SystemContext, requestedManifestMIMETypes []string) (types.ImageSource, error) {
|
func (ref ostreeReference) NewImageSource(ctx *types.SystemContext) (types.ImageSource, error) {
|
||||||
return nil, errors.New("Reading ostree: images is currently not supported")
|
return nil, errors.New("Reading ostree: images is currently not supported")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
2
vendor/github.com/containers/image/pkg/compression/compression.go
generated
vendored
2
vendor/github.com/containers/image/pkg/compression/compression.go
generated
vendored
@ -8,7 +8,7 @@ import (
|
|||||||
|
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
|
||||||
"github.com/Sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
)
|
)
|
||||||
|
|
||||||
// DecompressorFunc returns the decompressed stream, given a compressed stream.
|
// DecompressorFunc returns the decompressed stream, given a compressed stream.
|
||||||
|
102
vendor/github.com/containers/image/pkg/tlsclientconfig/tlsclientconfig.go
generated
vendored
Normal file
102
vendor/github.com/containers/image/pkg/tlsclientconfig/tlsclientconfig.go
generated
vendored
Normal file
@ -0,0 +1,102 @@
|
|||||||
|
package tlsclientconfig
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/tls"
|
||||||
|
"io/ioutil"
|
||||||
|
"net"
|
||||||
|
"net/http"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/docker/go-connections/sockets"
|
||||||
|
"github.com/docker/go-connections/tlsconfig"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
|
)
|
||||||
|
|
||||||
|
// SetupCertificates opens all .crt, .cert, and .key files in dir and appends / loads certs and key pairs as appropriate to tlsc
|
||||||
|
func SetupCertificates(dir string, tlsc *tls.Config) error {
|
||||||
|
logrus.Debugf("Looking for TLS certificates and private keys in %s", dir)
|
||||||
|
fs, err := ioutil.ReadDir(dir)
|
||||||
|
if err != nil {
|
||||||
|
if os.IsNotExist(err) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if os.IsPermission(err) {
|
||||||
|
logrus.Debugf("Skipping scan of %s due to permission error: %v", dir, err)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, f := range fs {
|
||||||
|
fullPath := filepath.Join(dir, f.Name())
|
||||||
|
if strings.HasSuffix(f.Name(), ".crt") {
|
||||||
|
systemPool, err := tlsconfig.SystemCertPool()
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrap(err, "unable to get system cert pool")
|
||||||
|
}
|
||||||
|
tlsc.RootCAs = systemPool
|
||||||
|
logrus.Debugf(" crt: %s", fullPath)
|
||||||
|
data, err := ioutil.ReadFile(fullPath)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
tlsc.RootCAs.AppendCertsFromPEM(data)
|
||||||
|
}
|
||||||
|
if strings.HasSuffix(f.Name(), ".cert") {
|
||||||
|
certName := f.Name()
|
||||||
|
keyName := certName[:len(certName)-5] + ".key"
|
||||||
|
logrus.Debugf(" cert: %s", fullPath)
|
||||||
|
if !hasFile(fs, keyName) {
|
||||||
|
return errors.Errorf("missing key %s for client certificate %s. Note that CA certificates should use the extension .crt", keyName, certName)
|
||||||
|
}
|
||||||
|
cert, err := tls.LoadX509KeyPair(filepath.Join(dir, certName), filepath.Join(dir, keyName))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
tlsc.Certificates = append(tlsc.Certificates, cert)
|
||||||
|
}
|
||||||
|
if strings.HasSuffix(f.Name(), ".key") {
|
||||||
|
keyName := f.Name()
|
||||||
|
certName := keyName[:len(keyName)-4] + ".cert"
|
||||||
|
logrus.Debugf(" key: %s", fullPath)
|
||||||
|
if !hasFile(fs, certName) {
|
||||||
|
return errors.Errorf("missing client certificate %s for key %s", certName, keyName)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func hasFile(files []os.FileInfo, name string) bool {
|
||||||
|
for _, f := range files {
|
||||||
|
if f.Name() == name {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewTransport Creates a default transport
|
||||||
|
func NewTransport() *http.Transport {
|
||||||
|
direct := &net.Dialer{
|
||||||
|
Timeout: 30 * time.Second,
|
||||||
|
KeepAlive: 30 * time.Second,
|
||||||
|
DualStack: true,
|
||||||
|
}
|
||||||
|
tr := &http.Transport{
|
||||||
|
Proxy: http.ProxyFromEnvironment,
|
||||||
|
Dial: direct.Dial,
|
||||||
|
TLSHandshakeTimeout: 10 * time.Second,
|
||||||
|
// TODO(dmcgowan): Call close idle connections when complete and use keep alive
|
||||||
|
DisableKeepAlives: true,
|
||||||
|
}
|
||||||
|
proxyDialer, err := sockets.DialerFromEnvironment(direct)
|
||||||
|
if err == nil {
|
||||||
|
tr.Dial = proxyDialer.Dial
|
||||||
|
}
|
||||||
|
return tr
|
||||||
|
}
|
14
vendor/github.com/containers/image/signature/mechanism_openpgp.go
generated
vendored
14
vendor/github.com/containers/image/signature/mechanism_openpgp.go
generated
vendored
@ -132,11 +132,17 @@ func (m *openpgpSigningMechanism) Verify(unverifiedSignature []byte) (contents [
|
|||||||
if md.SignedBy == nil {
|
if md.SignedBy == nil {
|
||||||
return nil, "", InvalidSignatureError{msg: fmt.Sprintf("Invalid GPG signature: %#v", md.Signature)}
|
return nil, "", InvalidSignatureError{msg: fmt.Sprintf("Invalid GPG signature: %#v", md.Signature)}
|
||||||
}
|
}
|
||||||
if md.Signature.SigLifetimeSecs != nil {
|
if md.Signature != nil {
|
||||||
expiry := md.Signature.CreationTime.Add(time.Duration(*md.Signature.SigLifetimeSecs) * time.Second)
|
if md.Signature.SigLifetimeSecs != nil {
|
||||||
if time.Now().After(expiry) {
|
expiry := md.Signature.CreationTime.Add(time.Duration(*md.Signature.SigLifetimeSecs) * time.Second)
|
||||||
return nil, "", InvalidSignatureError{msg: fmt.Sprintf("Signature expired on %s", expiry)}
|
if time.Now().After(expiry) {
|
||||||
|
return nil, "", InvalidSignatureError{msg: fmt.Sprintf("Signature expired on %s", expiry)}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
} else if md.SignatureV3 == nil {
|
||||||
|
// Coverage: If md.SignedBy != nil, the final md.UnverifiedBody.Read() either sets one of md.Signature or md.SignatureV3,
|
||||||
|
// or sets md.SignatureError.
|
||||||
|
return nil, "", InvalidSignatureError{msg: "Unexpected openpgp.MessageDetails: neither Signature nor SignatureV3 is set"}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Uppercase the fingerprint to be compatible with gpgme
|
// Uppercase the fingerprint to be compatible with gpgme
|
||||||
|
7
vendor/github.com/containers/image/signature/policy_eval.go
generated
vendored
7
vendor/github.com/containers/image/signature/policy_eval.go
generated
vendored
@ -6,9 +6,11 @@
|
|||||||
package signature
|
package signature
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/Sirupsen/logrus"
|
"context"
|
||||||
|
|
||||||
"github.com/containers/image/types"
|
"github.com/containers/image/types"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
)
|
)
|
||||||
|
|
||||||
// PolicyRequirementError is an explanatory text for rejecting a signature or an image.
|
// PolicyRequirementError is an explanatory text for rejecting a signature or an image.
|
||||||
@ -188,7 +190,8 @@ func (pc *PolicyContext) GetSignaturesWithAcceptedAuthor(image types.UnparsedIma
|
|||||||
reqs := pc.requirementsForImageRef(image.Reference())
|
reqs := pc.requirementsForImageRef(image.Reference())
|
||||||
|
|
||||||
// FIXME: rename Signatures to UnverifiedSignatures
|
// FIXME: rename Signatures to UnverifiedSignatures
|
||||||
unverifiedSignatures, err := image.Signatures()
|
// FIXME: pass context.Context
|
||||||
|
unverifiedSignatures, err := image.Signatures(context.TODO())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
2
vendor/github.com/containers/image/signature/policy_eval_baselayer.go
generated
vendored
2
vendor/github.com/containers/image/signature/policy_eval_baselayer.go
generated
vendored
@ -3,8 +3,8 @@
|
|||||||
package signature
|
package signature
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/Sirupsen/logrus"
|
|
||||||
"github.com/containers/image/types"
|
"github.com/containers/image/types"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
)
|
)
|
||||||
|
|
||||||
func (pr *prSignedBaseLayer) isSignatureAuthorAccepted(image types.UnparsedImage, sig []byte) (signatureAcceptanceResult, *Signature, error) {
|
func (pr *prSignedBaseLayer) isSignatureAuthorAccepted(image types.UnparsedImage, sig []byte) (signatureAcceptanceResult, *Signature, error) {
|
||||||
|
4
vendor/github.com/containers/image/signature/policy_eval_signedby.go
generated
vendored
4
vendor/github.com/containers/image/signature/policy_eval_signedby.go
generated
vendored
@ -3,6 +3,7 @@
|
|||||||
package signature
|
package signature
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"strings"
|
"strings"
|
||||||
@ -90,7 +91,8 @@ func (pr *prSignedBy) isSignatureAuthorAccepted(image types.UnparsedImage, sig [
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (pr *prSignedBy) isRunningImageAllowed(image types.UnparsedImage) (bool, error) {
|
func (pr *prSignedBy) isRunningImageAllowed(image types.UnparsedImage) (bool, error) {
|
||||||
sigs, err := image.Signatures()
|
// FIXME: pass context.Context
|
||||||
|
sigs, err := image.Signatures(context.TODO())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
|
8
vendor/github.com/containers/image/signature/signature.go
generated
vendored
8
vendor/github.com/containers/image/signature/signature.go
generated
vendored
@ -180,13 +180,9 @@ func (s *untrustedSignature) strictUnmarshalJSON(data []byte) error {
|
|||||||
}
|
}
|
||||||
s.UntrustedDockerManifestDigest = digest.Digest(digestString)
|
s.UntrustedDockerManifestDigest = digest.Digest(digestString)
|
||||||
|
|
||||||
if err := paranoidUnmarshalJSONObjectExactFields(identity, map[string]interface{}{
|
return paranoidUnmarshalJSONObjectExactFields(identity, map[string]interface{}{
|
||||||
"docker-reference": &s.UntrustedDockerReference,
|
"docker-reference": &s.UntrustedDockerReference,
|
||||||
}); err != nil {
|
})
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Sign formats the signature and returns a blob signed using mech and keyIdentity
|
// Sign formats the signature and returns a blob signed using mech and keyIdentity
|
||||||
|
5
vendor/github.com/containers/image/storage/storage_image.go
generated
vendored
5
vendor/github.com/containers/image/storage/storage_image.go
generated
vendored
@ -2,6 +2,7 @@ package storage
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
@ -9,7 +10,6 @@ import (
|
|||||||
|
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
|
||||||
"github.com/Sirupsen/logrus"
|
|
||||||
"github.com/containers/image/image"
|
"github.com/containers/image/image"
|
||||||
"github.com/containers/image/manifest"
|
"github.com/containers/image/manifest"
|
||||||
"github.com/containers/image/types"
|
"github.com/containers/image/types"
|
||||||
@ -17,6 +17,7 @@ import (
|
|||||||
"github.com/containers/storage/pkg/archive"
|
"github.com/containers/storage/pkg/archive"
|
||||||
"github.com/containers/storage/pkg/ioutils"
|
"github.com/containers/storage/pkg/ioutils"
|
||||||
ddigest "github.com/opencontainers/go-digest"
|
ddigest "github.com/opencontainers/go-digest"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@ -537,7 +538,7 @@ func (s *storageImageSource) GetTargetManifest(digest ddigest.Digest) (manifestB
|
|||||||
return nil, "", ErrNoManifestLists
|
return nil, "", ErrNoManifestLists
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *storageImageSource) GetSignatures() (signatures [][]byte, err error) {
|
func (s *storageImageSource) GetSignatures(ctx context.Context) (signatures [][]byte, err error) {
|
||||||
var offset int
|
var offset int
|
||||||
signature, err := s.imageRef.transport.store.ImageBigData(s.ID, "signatures")
|
signature, err := s.imageRef.transport.store.ImageBigData(s.ID, "signatures")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
4
vendor/github.com/containers/image/storage/storage_reference.go
generated
vendored
4
vendor/github.com/containers/image/storage/storage_reference.go
generated
vendored
@ -3,11 +3,11 @@ package storage
|
|||||||
import (
|
import (
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/Sirupsen/logrus"
|
|
||||||
"github.com/containers/image/docker/reference"
|
"github.com/containers/image/docker/reference"
|
||||||
"github.com/containers/image/types"
|
"github.com/containers/image/types"
|
||||||
"github.com/containers/storage"
|
"github.com/containers/storage"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
)
|
)
|
||||||
|
|
||||||
// A storageReference holds an arbitrary name and/or an ID, which is a 32-byte
|
// A storageReference holds an arbitrary name and/or an ID, which is a 32-byte
|
||||||
@ -154,7 +154,7 @@ func (s storageReference) DeleteImage(ctx *types.SystemContext) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s storageReference) NewImageSource(ctx *types.SystemContext, requestedManifestMIMETypes []string) (types.ImageSource, error) {
|
func (s storageReference) NewImageSource(ctx *types.SystemContext) (types.ImageSource, error) {
|
||||||
return newImageSource(s)
|
return newImageSource(s)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
2
vendor/github.com/containers/image/storage/storage_transport.go
generated
vendored
2
vendor/github.com/containers/image/storage/storage_transport.go
generated
vendored
@ -6,7 +6,6 @@ import (
|
|||||||
|
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
|
||||||
"github.com/Sirupsen/logrus"
|
|
||||||
"github.com/containers/image/docker/reference"
|
"github.com/containers/image/docker/reference"
|
||||||
"github.com/containers/image/transports"
|
"github.com/containers/image/transports"
|
||||||
"github.com/containers/image/types"
|
"github.com/containers/image/types"
|
||||||
@ -14,6 +13,7 @@ import (
|
|||||||
"github.com/containers/storage/pkg/idtools"
|
"github.com/containers/storage/pkg/idtools"
|
||||||
"github.com/opencontainers/go-digest"
|
"github.com/opencontainers/go-digest"
|
||||||
ddigest "github.com/opencontainers/go-digest"
|
ddigest "github.com/opencontainers/go-digest"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
|
1
vendor/github.com/containers/image/transports/alltransports/alltransports.go
generated
vendored
1
vendor/github.com/containers/image/transports/alltransports/alltransports.go
generated
vendored
@ -10,6 +10,7 @@ import (
|
|||||||
_ "github.com/containers/image/docker"
|
_ "github.com/containers/image/docker"
|
||||||
_ "github.com/containers/image/docker/archive"
|
_ "github.com/containers/image/docker/archive"
|
||||||
_ "github.com/containers/image/docker/daemon"
|
_ "github.com/containers/image/docker/daemon"
|
||||||
|
_ "github.com/containers/image/oci/archive"
|
||||||
_ "github.com/containers/image/oci/layout"
|
_ "github.com/containers/image/oci/layout"
|
||||||
_ "github.com/containers/image/openshift"
|
_ "github.com/containers/image/openshift"
|
||||||
// The ostree transport is registered by ostree*.go
|
// The ostree transport is registered by ostree*.go
|
||||||
|
10
vendor/github.com/containers/image/transports/transports.go
generated
vendored
10
vendor/github.com/containers/image/transports/transports.go
generated
vendored
@ -71,13 +71,19 @@ func ImageName(ref types.ImageReference) string {
|
|||||||
return ref.Transport().Name() + ":" + ref.StringWithinTransport()
|
return ref.Transport().Name() + ":" + ref.StringWithinTransport()
|
||||||
}
|
}
|
||||||
|
|
||||||
// ListNames returns a list of transport names
|
// ListNames returns a list of non deprecated transport names.
|
||||||
|
// Deprecated transports can be used, but are not presented to users.
|
||||||
func ListNames() []string {
|
func ListNames() []string {
|
||||||
kt.mu.Lock()
|
kt.mu.Lock()
|
||||||
defer kt.mu.Unlock()
|
defer kt.mu.Unlock()
|
||||||
|
deprecated := map[string]bool{
|
||||||
|
"atomic": true,
|
||||||
|
}
|
||||||
var names []string
|
var names []string
|
||||||
for _, transport := range kt.transports {
|
for _, transport := range kt.transports {
|
||||||
names = append(names, transport.Name())
|
if !deprecated[transport.Name()] {
|
||||||
|
names = append(names, transport.Name())
|
||||||
|
}
|
||||||
}
|
}
|
||||||
sort.Strings(names)
|
sort.Strings(names)
|
||||||
return names
|
return names
|
||||||
|
28
vendor/github.com/containers/image/types/types.go
generated
vendored
28
vendor/github.com/containers/image/types/types.go
generated
vendored
@ -1,6 +1,7 @@
|
|||||||
package types
|
package types
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"io"
|
"io"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@ -77,11 +78,9 @@ type ImageReference interface {
|
|||||||
// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource,
|
// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource,
|
||||||
// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage.
|
// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage.
|
||||||
NewImage(ctx *SystemContext) (Image, error)
|
NewImage(ctx *SystemContext) (Image, error)
|
||||||
// NewImageSource returns a types.ImageSource for this reference,
|
// NewImageSource returns a types.ImageSource for this reference.
|
||||||
// asking the backend to use a manifest from requestedManifestMIMETypes if possible.
|
|
||||||
// nil requestedManifestMIMETypes means manifest.DefaultRequestedManifestMIMETypes.
|
|
||||||
// The caller must call .Close() on the returned ImageSource.
|
// The caller must call .Close() on the returned ImageSource.
|
||||||
NewImageSource(ctx *SystemContext, requestedManifestMIMETypes []string) (ImageSource, error)
|
NewImageSource(ctx *SystemContext) (ImageSource, error)
|
||||||
// NewImageDestination returns a types.ImageDestination for this reference.
|
// NewImageDestination returns a types.ImageDestination for this reference.
|
||||||
// The caller must call .Close() on the returned ImageDestination.
|
// The caller must call .Close() on the returned ImageDestination.
|
||||||
NewImageDestination(ctx *SystemContext) (ImageDestination, error)
|
NewImageDestination(ctx *SystemContext) (ImageDestination, error)
|
||||||
@ -93,9 +92,10 @@ type ImageReference interface {
|
|||||||
// BlobInfo collects known information about a blob (layer/config).
|
// BlobInfo collects known information about a blob (layer/config).
|
||||||
// In some situations, some fields may be unknown, in others they may be mandatory; documenting an “unknown” value here does not override that.
|
// In some situations, some fields may be unknown, in others they may be mandatory; documenting an “unknown” value here does not override that.
|
||||||
type BlobInfo struct {
|
type BlobInfo struct {
|
||||||
Digest digest.Digest // "" if unknown.
|
Digest digest.Digest // "" if unknown.
|
||||||
Size int64 // -1 if unknown
|
Size int64 // -1 if unknown
|
||||||
URLs []string
|
URLs []string
|
||||||
|
Annotations map[string]string
|
||||||
}
|
}
|
||||||
|
|
||||||
// ImageSource is a service, possibly remote (= slow), to download components of a single image.
|
// ImageSource is a service, possibly remote (= slow), to download components of a single image.
|
||||||
@ -121,7 +121,7 @@ type ImageSource interface {
|
|||||||
// The Digest field in BlobInfo is guaranteed to be provided; Size may be -1.
|
// The Digest field in BlobInfo is guaranteed to be provided; Size may be -1.
|
||||||
GetBlob(BlobInfo) (io.ReadCloser, int64, error)
|
GetBlob(BlobInfo) (io.ReadCloser, int64, error)
|
||||||
// GetSignatures returns the image's signatures. It may use a remote (= slow) service.
|
// GetSignatures returns the image's signatures. It may use a remote (= slow) service.
|
||||||
GetSignatures() ([][]byte, error)
|
GetSignatures(context.Context) ([][]byte, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ImageDestination is a service, possibly remote (= slow), to store components of a single image.
|
// ImageDestination is a service, possibly remote (= slow), to store components of a single image.
|
||||||
@ -204,7 +204,7 @@ type UnparsedImage interface {
|
|||||||
// Manifest is like ImageSource.GetManifest, but the result is cached; it is OK to call this however often you need.
|
// Manifest is like ImageSource.GetManifest, but the result is cached; it is OK to call this however often you need.
|
||||||
Manifest() ([]byte, string, error)
|
Manifest() ([]byte, string, error)
|
||||||
// Signatures is like ImageSource.GetSignatures, but the result is cached; it is OK to call this however often you need.
|
// Signatures is like ImageSource.GetSignatures, but the result is cached; it is OK to call this however often you need.
|
||||||
Signatures() ([][]byte, error)
|
Signatures(ctx context.Context) ([][]byte, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Image is the primary API for inspecting properties of images.
|
// Image is the primary API for inspecting properties of images.
|
||||||
@ -302,6 +302,16 @@ type SystemContext struct {
|
|||||||
SignaturePolicyPath string
|
SignaturePolicyPath string
|
||||||
// If not "", overrides the system's default path for registries.d (Docker signature storage configuration)
|
// If not "", overrides the system's default path for registries.d (Docker signature storage configuration)
|
||||||
RegistriesDirPath string
|
RegistriesDirPath string
|
||||||
|
// Path to the system-wide registries configuration file
|
||||||
|
SystemRegistriesConfPath string
|
||||||
|
|
||||||
|
// === OCI.Transport overrides ===
|
||||||
|
// If not "", a directory containing a CA certificate (ending with ".crt"),
|
||||||
|
// a client certificate (ending with ".cert") and a client ceritificate key
|
||||||
|
// (ending with ".key") used when downloading OCI image layers.
|
||||||
|
OCICertPath string
|
||||||
|
// Allow downloading OCI image layers over HTTP, or HTTPS with failed TLS verification. Note that this does not affect other TLS connections.
|
||||||
|
OCIInsecureSkipTLSVerify bool
|
||||||
|
|
||||||
// === docker.Transport overrides ===
|
// === docker.Transport overrides ===
|
||||||
// If not "", a directory containing a CA certificate (ending with ".crt"),
|
// If not "", a directory containing a CA certificate (ending with ".crt"),
|
||||||
|
14
vendor/github.com/containers/image/vendor.conf
generated
vendored
14
vendor/github.com/containers/image/vendor.conf
generated
vendored
@ -1,9 +1,10 @@
|
|||||||
github.com/Sirupsen/logrus 7f4b1adc791766938c29457bed0703fb9134421a
|
github.com/sirupsen/logrus v1.0.0
|
||||||
github.com/containers/storage 105f7c77aef0c797429e41552743bf5b03b63263
|
github.com/containers/storage 47536c89fcc545a87745e1a1573addc439409165
|
||||||
github.com/davecgh/go-spew 346938d642f2ec3594ed81d874461961cd0faa76
|
github.com/davecgh/go-spew 346938d642f2ec3594ed81d874461961cd0faa76
|
||||||
github.com/docker/distribution df5327f76fb6468b84a87771e361762b8be23fdb
|
github.com/docker/docker-credential-helpers d68f9aeca33f5fd3f08eeae5e9d175edf4e731d1
|
||||||
github.com/docker/docker 75843d36aa5c3eaade50da005f9e0ff2602f3d5e
|
github.com/docker/distribution 5f6282db7d65e6d72ad7c2cc66310724a57be716
|
||||||
github.com/docker/go-connections 7da10c8c50cad14494ec818dcdfb6506265c0086
|
github.com/docker/docker 30eb4d8cdc422b023d5f11f29a82ecb73554183b
|
||||||
|
github.com/docker/go-connections 3ede32e2033de7505e6500d6c868c2b9ed9f169d
|
||||||
github.com/docker/go-units 0dadbb0345b35ec7ef35e228dabb8de89a65bf52
|
github.com/docker/go-units 0dadbb0345b35ec7ef35e228dabb8de89a65bf52
|
||||||
github.com/docker/libtrust aabc10ec26b754e797f9028f4589c5b7bd90dc20
|
github.com/docker/libtrust aabc10ec26b754e797f9028f4589c5b7bd90dc20
|
||||||
github.com/ghodss/yaml 04f313413ffd65ce25f2541bfd2b2ceec5c0908c
|
github.com/ghodss/yaml 04f313413ffd65ce25f2541bfd2b2ceec5c0908c
|
||||||
@ -24,7 +25,7 @@ github.com/stretchr/testify 4d4bfba8f1d1027c4fdbe371823030df51419987
|
|||||||
github.com/vbatts/tar-split bd4c5d64c3e9297f410025a3b1bd0c58f659e721
|
github.com/vbatts/tar-split bd4c5d64c3e9297f410025a3b1bd0c58f659e721
|
||||||
golang.org/x/crypto 453249f01cfeb54c3d549ddb75ff152ca243f9d8
|
golang.org/x/crypto 453249f01cfeb54c3d549ddb75ff152ca243f9d8
|
||||||
golang.org/x/net 6b27048ae5e6ad1ef927e72e437531493de612fe
|
golang.org/x/net 6b27048ae5e6ad1ef927e72e437531493de612fe
|
||||||
golang.org/x/sys 075e574b89e4c2d22f2286a7e2b919519c6f3547
|
golang.org/x/sys 43e60d72a8e2bd92ee98319ba9a384a0e9837c08
|
||||||
gopkg.in/cheggaaa/pb.v1 d7e6ca3010b6f084d8056847f55d7f572f180678
|
gopkg.in/cheggaaa/pb.v1 d7e6ca3010b6f084d8056847f55d7f572f180678
|
||||||
gopkg.in/yaml.v2 a3f3340b5840cee44f372bddb5880fcbc419b46a
|
gopkg.in/yaml.v2 a3f3340b5840cee44f372bddb5880fcbc419b46a
|
||||||
k8s.io/client-go bcde30fb7eaed76fd98a36b4120321b94995ffb6
|
k8s.io/client-go bcde30fb7eaed76fd98a36b4120321b94995ffb6
|
||||||
@ -35,3 +36,4 @@ github.com/tchap/go-patricia v2.2.6
|
|||||||
github.com/opencontainers/selinux ba1aefe8057f1d0cfb8e88d0ec1dc85925ef987d
|
github.com/opencontainers/selinux ba1aefe8057f1d0cfb8e88d0ec1dc85925ef987d
|
||||||
github.com/BurntSushi/toml b26d9c308763d68093482582cea63d69be07a0f0
|
github.com/BurntSushi/toml b26d9c308763d68093482582cea63d69be07a0f0
|
||||||
github.com/ostreedev/ostree-go aeb02c6b6aa2889db3ef62f7855650755befd460
|
github.com/ostreedev/ostree-go aeb02c6b6aa2889db3ef62f7855650755befd460
|
||||||
|
github.com/gogo/protobuf/proto fcdc5011193ff531a548e9b0301828d5a5b97fd8
|
||||||
|
109
vendor/github.com/containers/storage/containers.go
generated
vendored
109
vendor/github.com/containers/storage/containers.go
generated
vendored
@ -2,7 +2,6 @@ package storage
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"errors"
|
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
@ -11,11 +10,8 @@ import (
|
|||||||
"github.com/containers/storage/pkg/ioutils"
|
"github.com/containers/storage/pkg/ioutils"
|
||||||
"github.com/containers/storage/pkg/stringid"
|
"github.com/containers/storage/pkg/stringid"
|
||||||
"github.com/containers/storage/pkg/truncindex"
|
"github.com/containers/storage/pkg/truncindex"
|
||||||
)
|
digest "github.com/opencontainers/go-digest"
|
||||||
|
"github.com/pkg/errors"
|
||||||
var (
|
|
||||||
// ErrContainerUnknown indicates that there was no container with the specified name or ID
|
|
||||||
ErrContainerUnknown = errors.New("container not known")
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// A Container is a reference to a read-write layer with metadata.
|
// A Container is a reference to a read-write layer with metadata.
|
||||||
@ -50,6 +46,10 @@ type Container struct {
|
|||||||
// that has been stored, if they're known.
|
// that has been stored, if they're known.
|
||||||
BigDataSizes map[string]int64 `json:"big-data-sizes,omitempty"`
|
BigDataSizes map[string]int64 `json:"big-data-sizes,omitempty"`
|
||||||
|
|
||||||
|
// BigDataDigests maps the names in BigDataNames to the digests of the
|
||||||
|
// data that has been stored, if they're known.
|
||||||
|
BigDataDigests map[string]digest.Digest `json:"big-data-digests,omitempty"`
|
||||||
|
|
||||||
// Created is the datestamp for when this container was created. Older
|
// Created is the datestamp for when this container was created. Older
|
||||||
// versions of the library did not track this information, so callers
|
// versions of the library did not track this information, so callers
|
||||||
// will likely want to use the IsZero() method to verify that a value
|
// will likely want to use the IsZero() method to verify that a value
|
||||||
@ -139,6 +139,7 @@ func (r *containerStore) Load() error {
|
|||||||
ids := make(map[string]*Container)
|
ids := make(map[string]*Container)
|
||||||
names := make(map[string]*Container)
|
names := make(map[string]*Container)
|
||||||
if err = json.Unmarshal(data, &containers); len(data) == 0 || err == nil {
|
if err = json.Unmarshal(data, &containers); len(data) == 0 || err == nil {
|
||||||
|
idlist = make([]string, 0, len(containers))
|
||||||
for n, container := range containers {
|
for n, container := range containers {
|
||||||
idlist = append(idlist, container.ID)
|
idlist = append(idlist, container.ID)
|
||||||
ids[container.ID] = containers[n]
|
ids[container.ID] = containers[n]
|
||||||
@ -229,6 +230,9 @@ func (r *containerStore) SetFlag(id string, flag string, value interface{}) erro
|
|||||||
if !ok {
|
if !ok {
|
||||||
return ErrContainerUnknown
|
return ErrContainerUnknown
|
||||||
}
|
}
|
||||||
|
if container.Flags == nil {
|
||||||
|
container.Flags = make(map[string]interface{})
|
||||||
|
}
|
||||||
container.Flags[flag] = value
|
container.Flags[flag] = value
|
||||||
return r.Save()
|
return r.Save()
|
||||||
}
|
}
|
||||||
@ -245,6 +249,7 @@ func (r *containerStore) Create(id string, names []string, image, layer, metadat
|
|||||||
if _, idInUse := r.byid[id]; idInUse {
|
if _, idInUse := r.byid[id]; idInUse {
|
||||||
return nil, ErrDuplicateID
|
return nil, ErrDuplicateID
|
||||||
}
|
}
|
||||||
|
names = dedupeNames(names)
|
||||||
for _, name := range names {
|
for _, name := range names {
|
||||||
if _, nameInUse := r.byname[name]; nameInUse {
|
if _, nameInUse := r.byname[name]; nameInUse {
|
||||||
return nil, ErrDuplicateName
|
return nil, ErrDuplicateName
|
||||||
@ -252,15 +257,16 @@ func (r *containerStore) Create(id string, names []string, image, layer, metadat
|
|||||||
}
|
}
|
||||||
if err == nil {
|
if err == nil {
|
||||||
container = &Container{
|
container = &Container{
|
||||||
ID: id,
|
ID: id,
|
||||||
Names: names,
|
Names: names,
|
||||||
ImageID: image,
|
ImageID: image,
|
||||||
LayerID: layer,
|
LayerID: layer,
|
||||||
Metadata: metadata,
|
Metadata: metadata,
|
||||||
BigDataNames: []string{},
|
BigDataNames: []string{},
|
||||||
BigDataSizes: make(map[string]int64),
|
BigDataSizes: make(map[string]int64),
|
||||||
Created: time.Now().UTC(),
|
BigDataDigests: make(map[string]digest.Digest),
|
||||||
Flags: make(map[string]interface{}),
|
Created: time.Now().UTC(),
|
||||||
|
Flags: make(map[string]interface{}),
|
||||||
}
|
}
|
||||||
r.containers = append(r.containers, container)
|
r.containers = append(r.containers, container)
|
||||||
r.byid[id] = container
|
r.byid[id] = container
|
||||||
@ -294,6 +300,7 @@ func (r *containerStore) removeName(container *Container, name string) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (r *containerStore) SetNames(id string, names []string) error {
|
func (r *containerStore) SetNames(id string, names []string) error {
|
||||||
|
names = dedupeNames(names)
|
||||||
if container, ok := r.lookup(id); ok {
|
if container, ok := r.lookup(id); ok {
|
||||||
for _, name := range container.Names {
|
for _, name := range container.Names {
|
||||||
delete(r.byname, name)
|
delete(r.byname, name)
|
||||||
@ -366,6 +373,9 @@ func (r *containerStore) Exists(id string) bool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (r *containerStore) BigData(id, key string) ([]byte, error) {
|
func (r *containerStore) BigData(id, key string) ([]byte, error) {
|
||||||
|
if key == "" {
|
||||||
|
return nil, errors.Wrapf(ErrInvalidBigDataName, "can't retrieve container big data value for empty name")
|
||||||
|
}
|
||||||
c, ok := r.lookup(id)
|
c, ok := r.lookup(id)
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, ErrContainerUnknown
|
return nil, ErrContainerUnknown
|
||||||
@ -374,16 +384,61 @@ func (r *containerStore) BigData(id, key string) ([]byte, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (r *containerStore) BigDataSize(id, key string) (int64, error) {
|
func (r *containerStore) BigDataSize(id, key string) (int64, error) {
|
||||||
|
if key == "" {
|
||||||
|
return -1, errors.Wrapf(ErrInvalidBigDataName, "can't retrieve size of container big data with empty name")
|
||||||
|
}
|
||||||
c, ok := r.lookup(id)
|
c, ok := r.lookup(id)
|
||||||
if !ok {
|
if !ok {
|
||||||
return -1, ErrContainerUnknown
|
return -1, ErrContainerUnknown
|
||||||
}
|
}
|
||||||
|
if c.BigDataSizes == nil {
|
||||||
|
c.BigDataSizes = make(map[string]int64)
|
||||||
|
}
|
||||||
if size, ok := c.BigDataSizes[key]; ok {
|
if size, ok := c.BigDataSizes[key]; ok {
|
||||||
return size, nil
|
return size, nil
|
||||||
}
|
}
|
||||||
|
if data, err := r.BigData(id, key); err == nil && data != nil {
|
||||||
|
if r.SetBigData(id, key, data) == nil {
|
||||||
|
c, ok := r.lookup(id)
|
||||||
|
if !ok {
|
||||||
|
return -1, ErrContainerUnknown
|
||||||
|
}
|
||||||
|
if size, ok := c.BigDataSizes[key]; ok {
|
||||||
|
return size, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
return -1, ErrSizeUnknown
|
return -1, ErrSizeUnknown
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (r *containerStore) BigDataDigest(id, key string) (digest.Digest, error) {
|
||||||
|
if key == "" {
|
||||||
|
return "", errors.Wrapf(ErrInvalidBigDataName, "can't retrieve digest of container big data value with empty name")
|
||||||
|
}
|
||||||
|
c, ok := r.lookup(id)
|
||||||
|
if !ok {
|
||||||
|
return "", ErrContainerUnknown
|
||||||
|
}
|
||||||
|
if c.BigDataDigests == nil {
|
||||||
|
c.BigDataDigests = make(map[string]digest.Digest)
|
||||||
|
}
|
||||||
|
if d, ok := c.BigDataDigests[key]; ok {
|
||||||
|
return d, nil
|
||||||
|
}
|
||||||
|
if data, err := r.BigData(id, key); err == nil && data != nil {
|
||||||
|
if r.SetBigData(id, key, data) == nil {
|
||||||
|
c, ok := r.lookup(id)
|
||||||
|
if !ok {
|
||||||
|
return "", ErrContainerUnknown
|
||||||
|
}
|
||||||
|
if d, ok := c.BigDataDigests[key]; ok {
|
||||||
|
return d, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return "", ErrDigestUnknown
|
||||||
|
}
|
||||||
|
|
||||||
func (r *containerStore) BigDataNames(id string) ([]string, error) {
|
func (r *containerStore) BigDataNames(id string) ([]string, error) {
|
||||||
c, ok := r.lookup(id)
|
c, ok := r.lookup(id)
|
||||||
if !ok {
|
if !ok {
|
||||||
@ -393,6 +448,9 @@ func (r *containerStore) BigDataNames(id string) ([]string, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (r *containerStore) SetBigData(id, key string, data []byte) error {
|
func (r *containerStore) SetBigData(id, key string, data []byte) error {
|
||||||
|
if key == "" {
|
||||||
|
return errors.Wrapf(ErrInvalidBigDataName, "can't set empty name for container big data item")
|
||||||
|
}
|
||||||
c, ok := r.lookup(id)
|
c, ok := r.lookup(id)
|
||||||
if !ok {
|
if !ok {
|
||||||
return ErrContainerUnknown
|
return ErrContainerUnknown
|
||||||
@ -403,19 +461,28 @@ func (r *containerStore) SetBigData(id, key string, data []byte) error {
|
|||||||
err := ioutils.AtomicWriteFile(r.datapath(c.ID, key), data, 0600)
|
err := ioutils.AtomicWriteFile(r.datapath(c.ID, key), data, 0600)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
save := false
|
save := false
|
||||||
oldSize, ok := c.BigDataSizes[key]
|
if c.BigDataSizes == nil {
|
||||||
|
c.BigDataSizes = make(map[string]int64)
|
||||||
|
}
|
||||||
|
oldSize, sizeOk := c.BigDataSizes[key]
|
||||||
c.BigDataSizes[key] = int64(len(data))
|
c.BigDataSizes[key] = int64(len(data))
|
||||||
if !ok || oldSize != c.BigDataSizes[key] {
|
if c.BigDataDigests == nil {
|
||||||
|
c.BigDataDigests = make(map[string]digest.Digest)
|
||||||
|
}
|
||||||
|
oldDigest, digestOk := c.BigDataDigests[key]
|
||||||
|
newDigest := digest.Canonical.FromBytes(data)
|
||||||
|
c.BigDataDigests[key] = newDigest
|
||||||
|
if !sizeOk || oldSize != c.BigDataSizes[key] || !digestOk || oldDigest != newDigest {
|
||||||
save = true
|
save = true
|
||||||
}
|
}
|
||||||
add := true
|
addName := true
|
||||||
for _, name := range c.BigDataNames {
|
for _, name := range c.BigDataNames {
|
||||||
if name == key {
|
if name == key {
|
||||||
add = false
|
addName = false
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if add {
|
if addName {
|
||||||
c.BigDataNames = append(c.BigDataNames, key)
|
c.BigDataNames = append(c.BigDataNames, key)
|
||||||
save = true
|
save = true
|
||||||
}
|
}
|
||||||
@ -427,7 +494,7 @@ func (r *containerStore) SetBigData(id, key string, data []byte) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (r *containerStore) Wipe() error {
|
func (r *containerStore) Wipe() error {
|
||||||
ids := []string{}
|
ids := make([]string, 0, len(r.byid))
|
||||||
for id := range r.byid {
|
for id := range r.byid {
|
||||||
ids = append(ids, id)
|
ids = append(ids, id)
|
||||||
}
|
}
|
||||||
|
250
vendor/github.com/containers/storage/drivers/aufs/aufs.go
generated
vendored
250
vendor/github.com/containers/storage/drivers/aufs/aufs.go
generated
vendored
@ -25,6 +25,7 @@ package aufs
|
|||||||
import (
|
import (
|
||||||
"bufio"
|
"bufio"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"os"
|
"os"
|
||||||
"os/exec"
|
"os/exec"
|
||||||
@ -32,22 +33,22 @@ import (
|
|||||||
"path/filepath"
|
"path/filepath"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
"syscall"
|
"time"
|
||||||
|
|
||||||
"github.com/Sirupsen/logrus"
|
|
||||||
"github.com/vbatts/tar-split/tar/storage"
|
|
||||||
|
|
||||||
"github.com/containers/storage/drivers"
|
"github.com/containers/storage/drivers"
|
||||||
"github.com/containers/storage/pkg/archive"
|
"github.com/containers/storage/pkg/archive"
|
||||||
"github.com/containers/storage/pkg/chrootarchive"
|
"github.com/containers/storage/pkg/chrootarchive"
|
||||||
"github.com/containers/storage/pkg/directory"
|
"github.com/containers/storage/pkg/directory"
|
||||||
"github.com/containers/storage/pkg/idtools"
|
"github.com/containers/storage/pkg/idtools"
|
||||||
|
"github.com/containers/storage/pkg/locker"
|
||||||
mountpk "github.com/containers/storage/pkg/mount"
|
mountpk "github.com/containers/storage/pkg/mount"
|
||||||
"github.com/containers/storage/pkg/stringid"
|
"github.com/containers/storage/pkg/system"
|
||||||
|
|
||||||
rsystem "github.com/opencontainers/runc/libcontainer/system"
|
rsystem "github.com/opencontainers/runc/libcontainer/system"
|
||||||
"github.com/opencontainers/selinux/go-selinux/label"
|
"github.com/opencontainers/selinux/go-selinux/label"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
|
"github.com/vbatts/tar-split/tar/storage"
|
||||||
|
"golang.org/x/sys/unix"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@ -74,6 +75,8 @@ type Driver struct {
|
|||||||
ctr *graphdriver.RefCounter
|
ctr *graphdriver.RefCounter
|
||||||
pathCacheLock sync.Mutex
|
pathCacheLock sync.Mutex
|
||||||
pathCache map[string]string
|
pathCache map[string]string
|
||||||
|
naiveDiff graphdriver.DiffDriver
|
||||||
|
locker *locker.Locker
|
||||||
}
|
}
|
||||||
|
|
||||||
// Init returns a new AUFS driver.
|
// Init returns a new AUFS driver.
|
||||||
@ -83,6 +86,7 @@ func Init(root string, options []string, uidMaps, gidMaps []idtools.IDMap) (grap
|
|||||||
// Try to load the aufs kernel module
|
// Try to load the aufs kernel module
|
||||||
if err := supportsAufs(); err != nil {
|
if err := supportsAufs(); err != nil {
|
||||||
return nil, errors.Wrap(graphdriver.ErrNotSupported, "kernel does not support aufs")
|
return nil, errors.Wrap(graphdriver.ErrNotSupported, "kernel does not support aufs")
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fsMagic, err := graphdriver.GetFSMagic(root)
|
fsMagic, err := graphdriver.GetFSMagic(root)
|
||||||
@ -111,6 +115,7 @@ func Init(root string, options []string, uidMaps, gidMaps []idtools.IDMap) (grap
|
|||||||
gidMaps: gidMaps,
|
gidMaps: gidMaps,
|
||||||
pathCache: make(map[string]string),
|
pathCache: make(map[string]string),
|
||||||
ctr: graphdriver.NewRefCounter(graphdriver.NewFsChecker(graphdriver.FsMagicAufs)),
|
ctr: graphdriver.NewRefCounter(graphdriver.NewFsChecker(graphdriver.FsMagicAufs)),
|
||||||
|
locker: locker.New(),
|
||||||
}
|
}
|
||||||
|
|
||||||
rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps)
|
rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps)
|
||||||
@ -137,6 +142,32 @@ func Init(root string, options []string, uidMaps, gidMaps []idtools.IDMap) (grap
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
logger := logrus.WithFields(logrus.Fields{
|
||||||
|
"module": "graphdriver",
|
||||||
|
"driver": "aufs",
|
||||||
|
})
|
||||||
|
|
||||||
|
for _, path := range []string{"mnt", "diff"} {
|
||||||
|
p := filepath.Join(root, path)
|
||||||
|
entries, err := ioutil.ReadDir(p)
|
||||||
|
if err != nil {
|
||||||
|
logger.WithError(err).WithField("dir", p).Error("error reading dir entries")
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
for _, entry := range entries {
|
||||||
|
if !entry.IsDir() {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if strings.HasSuffix(entry.Name(), "-removing") {
|
||||||
|
logger.WithField("dir", entry.Name()).Debug("Cleaning up stale layer dir")
|
||||||
|
if err := system.EnsureRemoveAll(filepath.Join(p, entry.Name())); err != nil {
|
||||||
|
logger.WithField("dir", entry.Name()).WithError(err).Error("Error removing stale layer dir")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
a.naiveDiff = graphdriver.NewNaiveDiffDriver(a, uidMaps, gidMaps)
|
||||||
return a, nil
|
return a, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -200,17 +231,22 @@ func (a *Driver) Exists(id string) bool {
|
|||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// AdditionalImageStores returns additional image stores supported by the driver
|
||||||
|
func (a *Driver) AdditionalImageStores() []string {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// CreateReadWrite creates a layer that is writable for use as a container
|
// CreateReadWrite creates a layer that is writable for use as a container
|
||||||
// file system.
|
// file system.
|
||||||
func (a *Driver) CreateReadWrite(id, parent, mountLabel string, storageOpt map[string]string) error {
|
func (a *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error {
|
||||||
return a.Create(id, parent, mountLabel, storageOpt)
|
return a.Create(id, parent, opts)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create three folders for each id
|
// Create three folders for each id
|
||||||
// mnt, layers, and diff
|
// mnt, layers, and diff
|
||||||
func (a *Driver) Create(id, parent, mountLabel string, storageOpt map[string]string) error {
|
func (a *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error {
|
||||||
|
|
||||||
if len(storageOpt) != 0 {
|
if opts != nil && len(opts.StorageOpt) != 0 {
|
||||||
return fmt.Errorf("--storage-opt is not supported for aufs")
|
return fmt.Errorf("--storage-opt is not supported for aufs")
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -225,7 +261,7 @@ func (a *Driver) Create(id, parent, mountLabel string, storageOpt map[string]str
|
|||||||
defer f.Close()
|
defer f.Close()
|
||||||
|
|
||||||
if parent != "" {
|
if parent != "" {
|
||||||
ids, err := getParentIds(a.rootPath(), parent)
|
ids, err := getParentIDs(a.rootPath(), parent)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -268,35 +304,68 @@ func (a *Driver) createDirsFor(id string) error {
|
|||||||
|
|
||||||
// Remove will unmount and remove the given id.
|
// Remove will unmount and remove the given id.
|
||||||
func (a *Driver) Remove(id string) error {
|
func (a *Driver) Remove(id string) error {
|
||||||
|
a.locker.Lock(id)
|
||||||
|
defer a.locker.Unlock(id)
|
||||||
a.pathCacheLock.Lock()
|
a.pathCacheLock.Lock()
|
||||||
mountpoint, exists := a.pathCache[id]
|
mountpoint, exists := a.pathCache[id]
|
||||||
a.pathCacheLock.Unlock()
|
a.pathCacheLock.Unlock()
|
||||||
if !exists {
|
if !exists {
|
||||||
mountpoint = a.getMountpoint(id)
|
mountpoint = a.getMountpoint(id)
|
||||||
}
|
}
|
||||||
if err := a.unmount(mountpoint); err != nil {
|
|
||||||
// no need to return here, we can still try to remove since the `Rename` will fail below if still mounted
|
|
||||||
logrus.Debugf("aufs: error while unmounting %s: %v", mountpoint, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Atomically remove each directory in turn by first moving it out of the
|
logger := logrus.WithFields(logrus.Fields{
|
||||||
// way (so that container runtimes don't find it anymore) before doing removal of
|
"module": "graphdriver",
|
||||||
// the whole tree.
|
"driver": "aufs",
|
||||||
tmpMntPath := path.Join(a.mntPath(), fmt.Sprintf("%s-removing", id))
|
"layer": id,
|
||||||
if err := os.Rename(mountpoint, tmpMntPath); err != nil && !os.IsNotExist(err) {
|
})
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer os.RemoveAll(tmpMntPath)
|
|
||||||
|
|
||||||
tmpDiffpath := path.Join(a.diffPath(), fmt.Sprintf("%s-removing", id))
|
var retries int
|
||||||
if err := os.Rename(a.getDiffPath(id), tmpDiffpath); err != nil && !os.IsNotExist(err) {
|
for {
|
||||||
return err
|
mounted, err := a.mounted(mountpoint)
|
||||||
|
if err != nil {
|
||||||
|
if os.IsNotExist(err) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if !mounted {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
err = a.unmount(mountpoint)
|
||||||
|
if err == nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
if err != unix.EBUSY {
|
||||||
|
return errors.Wrapf(err, "aufs: unmount error: %s", mountpoint)
|
||||||
|
}
|
||||||
|
if retries >= 5 {
|
||||||
|
return errors.Wrapf(err, "aufs: unmount error after retries: %s", mountpoint)
|
||||||
|
}
|
||||||
|
// If unmount returns EBUSY, it could be a transient error. Sleep and retry.
|
||||||
|
retries++
|
||||||
|
logger.Warnf("unmount failed due to EBUSY: retry count: %d", retries)
|
||||||
|
time.Sleep(100 * time.Millisecond)
|
||||||
}
|
}
|
||||||
defer os.RemoveAll(tmpDiffpath)
|
|
||||||
|
|
||||||
// Remove the layers file for the id
|
// Remove the layers file for the id
|
||||||
if err := os.Remove(path.Join(a.rootPath(), "layers", id)); err != nil && !os.IsNotExist(err) {
|
if err := os.Remove(path.Join(a.rootPath(), "layers", id)); err != nil && !os.IsNotExist(err) {
|
||||||
return err
|
return errors.Wrapf(err, "error removing layers dir for %s", id)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := atomicRemove(a.getDiffPath(id)); err != nil {
|
||||||
|
return errors.Wrapf(err, "could not remove diff path for id %s", id)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Atomically remove each directory in turn by first moving it out of the
|
||||||
|
// way (so that container runtime doesn't find it anymore) before doing removal of
|
||||||
|
// the whole tree.
|
||||||
|
if err := atomicRemove(mountpoint); err != nil {
|
||||||
|
if errors.Cause(err) == unix.EBUSY {
|
||||||
|
logger.WithField("dir", mountpoint).WithError(err).Warn("error performing atomic remove due to EBUSY")
|
||||||
|
}
|
||||||
|
return errors.Wrapf(err, "could not remove mountpoint for id %s", id)
|
||||||
}
|
}
|
||||||
|
|
||||||
a.pathCacheLock.Lock()
|
a.pathCacheLock.Lock()
|
||||||
@ -305,9 +374,29 @@ func (a *Driver) Remove(id string) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func atomicRemove(source string) error {
|
||||||
|
target := source + "-removing"
|
||||||
|
|
||||||
|
err := os.Rename(source, target)
|
||||||
|
switch {
|
||||||
|
case err == nil, os.IsNotExist(err):
|
||||||
|
case os.IsExist(err):
|
||||||
|
// Got error saying the target dir already exists, maybe the source doesn't exist due to a previous (failed) remove
|
||||||
|
if _, e := os.Stat(source); !os.IsNotExist(e) {
|
||||||
|
return errors.Wrapf(err, "target rename dir '%s' exists but should not, this needs to be manually cleaned up")
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
return errors.Wrapf(err, "error preparing atomic delete")
|
||||||
|
}
|
||||||
|
|
||||||
|
return system.EnsureRemoveAll(target)
|
||||||
|
}
|
||||||
|
|
||||||
// Get returns the rootfs path for the id.
|
// Get returns the rootfs path for the id.
|
||||||
// This will mount the dir at it's given path
|
// This will mount the dir at its given path
|
||||||
func (a *Driver) Get(id, mountLabel string) (string, error) {
|
func (a *Driver) Get(id, mountLabel string) (string, error) {
|
||||||
|
a.locker.Lock(id)
|
||||||
|
defer a.locker.Unlock(id)
|
||||||
parents, err := a.getParentLayerPaths(id)
|
parents, err := a.getParentLayerPaths(id)
|
||||||
if err != nil && !os.IsNotExist(err) {
|
if err != nil && !os.IsNotExist(err) {
|
||||||
return "", err
|
return "", err
|
||||||
@ -343,6 +432,8 @@ func (a *Driver) Get(id, mountLabel string) (string, error) {
|
|||||||
|
|
||||||
// Put unmounts and updates list of active mounts.
|
// Put unmounts and updates list of active mounts.
|
||||||
func (a *Driver) Put(id string) error {
|
func (a *Driver) Put(id string) error {
|
||||||
|
a.locker.Lock(id)
|
||||||
|
defer a.locker.Unlock(id)
|
||||||
a.pathCacheLock.Lock()
|
a.pathCacheLock.Lock()
|
||||||
m, exists := a.pathCache[id]
|
m, exists := a.pathCache[id]
|
||||||
if !exists {
|
if !exists {
|
||||||
@ -361,9 +452,22 @@ func (a *Driver) Put(id string) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// isParent returns if the passed in parent is the direct parent of the passed in layer
|
||||||
|
func (a *Driver) isParent(id, parent string) bool {
|
||||||
|
parents, _ := getParentIDs(a.rootPath(), id)
|
||||||
|
if parent == "" && len(parents) > 0 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return !(len(parents) > 0 && parent != parents[0])
|
||||||
|
}
|
||||||
|
|
||||||
// Diff produces an archive of the changes between the specified
|
// Diff produces an archive of the changes between the specified
|
||||||
// layer and its parent layer which may be "".
|
// layer and its parent layer which may be "".
|
||||||
func (a *Driver) Diff(id, parent string) (archive.Archive, error) {
|
func (a *Driver) Diff(id, parent string) (io.ReadCloser, error) {
|
||||||
|
if !a.isParent(id, parent) {
|
||||||
|
return a.naiveDiff.Diff(id, parent)
|
||||||
|
}
|
||||||
|
|
||||||
// AUFS doesn't need the parent layer to produce a diff.
|
// AUFS doesn't need the parent layer to produce a diff.
|
||||||
return archive.TarWithOptions(path.Join(a.rootPath(), "diff", id), &archive.TarOptions{
|
return archive.TarWithOptions(path.Join(a.rootPath(), "diff", id), &archive.TarOptions{
|
||||||
Compression: archive.Uncompressed,
|
Compression: archive.Uncompressed,
|
||||||
@ -373,12 +477,6 @@ func (a *Driver) Diff(id, parent string) (archive.Archive, error) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// AdditionalImageStores returns additional image stores supported by the driver
|
|
||||||
func (a *Driver) AdditionalImageStores() []string {
|
|
||||||
var imageStores []string
|
|
||||||
return imageStores
|
|
||||||
}
|
|
||||||
|
|
||||||
type fileGetNilCloser struct {
|
type fileGetNilCloser struct {
|
||||||
storage.FileGetter
|
storage.FileGetter
|
||||||
}
|
}
|
||||||
@ -394,7 +492,7 @@ func (a *Driver) DiffGetter(id string) (graphdriver.FileGetCloser, error) {
|
|||||||
return fileGetNilCloser{storage.NewPathFileGetter(p)}, nil
|
return fileGetNilCloser{storage.NewPathFileGetter(p)}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *Driver) applyDiff(id string, diff archive.Reader) error {
|
func (a *Driver) applyDiff(id string, diff io.Reader) error {
|
||||||
return chrootarchive.UntarUncompressed(diff, path.Join(a.rootPath(), "diff", id), &archive.TarOptions{
|
return chrootarchive.UntarUncompressed(diff, path.Join(a.rootPath(), "diff", id), &archive.TarOptions{
|
||||||
UIDMaps: a.uidMaps,
|
UIDMaps: a.uidMaps,
|
||||||
GIDMaps: a.gidMaps,
|
GIDMaps: a.gidMaps,
|
||||||
@ -405,6 +503,9 @@ func (a *Driver) applyDiff(id string, diff archive.Reader) error {
|
|||||||
// and its parent and returns the size in bytes of the changes
|
// and its parent and returns the size in bytes of the changes
|
||||||
// relative to its base filesystem directory.
|
// relative to its base filesystem directory.
|
||||||
func (a *Driver) DiffSize(id, parent string) (size int64, err error) {
|
func (a *Driver) DiffSize(id, parent string) (size int64, err error) {
|
||||||
|
if !a.isParent(id, parent) {
|
||||||
|
return a.naiveDiff.DiffSize(id, parent)
|
||||||
|
}
|
||||||
// AUFS doesn't need the parent layer to calculate the diff size.
|
// AUFS doesn't need the parent layer to calculate the diff size.
|
||||||
return directory.Size(path.Join(a.rootPath(), "diff", id))
|
return directory.Size(path.Join(a.rootPath(), "diff", id))
|
||||||
}
|
}
|
||||||
@ -412,8 +513,12 @@ func (a *Driver) DiffSize(id, parent string) (size int64, err error) {
|
|||||||
// ApplyDiff extracts the changeset from the given diff into the
|
// ApplyDiff extracts the changeset from the given diff into the
|
||||||
// layer with the specified id and parent, returning the size of the
|
// layer with the specified id and parent, returning the size of the
|
||||||
// new layer in bytes.
|
// new layer in bytes.
|
||||||
func (a *Driver) ApplyDiff(id, parent string, diff archive.Reader) (size int64, err error) {
|
func (a *Driver) ApplyDiff(id, parent string, diff io.Reader) (size int64, err error) {
|
||||||
// AUFS doesn't need the parent id to apply the diff.
|
if !a.isParent(id, parent) {
|
||||||
|
return a.naiveDiff.ApplyDiff(id, parent, diff)
|
||||||
|
}
|
||||||
|
|
||||||
|
// AUFS doesn't need the parent id to apply the diff if it is the direct parent.
|
||||||
if err = a.applyDiff(id, diff); err != nil {
|
if err = a.applyDiff(id, diff); err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@ -424,6 +529,10 @@ func (a *Driver) ApplyDiff(id, parent string, diff archive.Reader) (size int64,
|
|||||||
// Changes produces a list of changes between the specified layer
|
// Changes produces a list of changes between the specified layer
|
||||||
// and its parent layer. If parent is "", then all changes will be ADD changes.
|
// and its parent layer. If parent is "", then all changes will be ADD changes.
|
||||||
func (a *Driver) Changes(id, parent string) ([]archive.Change, error) {
|
func (a *Driver) Changes(id, parent string) ([]archive.Change, error) {
|
||||||
|
if !a.isParent(id, parent) {
|
||||||
|
return a.naiveDiff.Changes(id, parent)
|
||||||
|
}
|
||||||
|
|
||||||
// AUFS doesn't have snapshots, so we need to get changes from all parent
|
// AUFS doesn't have snapshots, so we need to get changes from all parent
|
||||||
// layers.
|
// layers.
|
||||||
layers, err := a.getParentLayerPaths(id)
|
layers, err := a.getParentLayerPaths(id)
|
||||||
@ -434,7 +543,7 @@ func (a *Driver) Changes(id, parent string) ([]archive.Change, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (a *Driver) getParentLayerPaths(id string) ([]string, error) {
|
func (a *Driver) getParentLayerPaths(id string) ([]string, error) {
|
||||||
parentIds, err := getParentIds(a.rootPath(), id)
|
parentIds, err := getParentIDs(a.rootPath(), id)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -499,7 +608,7 @@ func (a *Driver) Cleanup() error {
|
|||||||
|
|
||||||
for _, m := range dirs {
|
for _, m := range dirs {
|
||||||
if err := a.unmount(m); err != nil {
|
if err := a.unmount(m); err != nil {
|
||||||
logrus.Debugf("aufs error unmounting %s: %s", stringid.TruncateID(m), err)
|
logrus.Debugf("aufs error unmounting %s: %s", m, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return mountpk.Unmount(a.root)
|
return mountpk.Unmount(a.root)
|
||||||
@ -517,46 +626,35 @@ func (a *Driver) aufsMount(ro []string, rw, target, mountLabel string) (err erro
|
|||||||
|
|
||||||
offset := 54
|
offset := 54
|
||||||
if useDirperm() {
|
if useDirperm() {
|
||||||
offset += len("dirperm1")
|
offset += len(",dirperm1")
|
||||||
}
|
}
|
||||||
b := make([]byte, syscall.Getpagesize()-len(mountLabel)-offset) // room for xino & mountLabel
|
b := make([]byte, unix.Getpagesize()-len(mountLabel)-offset) // room for xino & mountLabel
|
||||||
bp := copy(b, fmt.Sprintf("br:%s=rw", rw))
|
bp := copy(b, fmt.Sprintf("br:%s=rw", rw))
|
||||||
|
|
||||||
firstMount := true
|
index := 0
|
||||||
i := 0
|
for ; index < len(ro); index++ {
|
||||||
|
layer := fmt.Sprintf(":%s=ro+wh", ro[index])
|
||||||
for {
|
if bp+len(layer) > len(b) {
|
||||||
for ; i < len(ro); i++ {
|
|
||||||
layer := fmt.Sprintf(":%s=ro+wh", ro[i])
|
|
||||||
|
|
||||||
if firstMount {
|
|
||||||
if bp+len(layer) > len(b) {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
bp += copy(b[bp:], layer)
|
|
||||||
} else {
|
|
||||||
data := label.FormatMountLabel(fmt.Sprintf("append%s", layer), mountLabel)
|
|
||||||
if err = mount("none", target, "aufs", syscall.MS_REMOUNT, data); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if firstMount {
|
|
||||||
opts := "dio,xino=/dev/shm/aufs.xino"
|
|
||||||
if useDirperm() {
|
|
||||||
opts += ",dirperm1"
|
|
||||||
}
|
|
||||||
data := label.FormatMountLabel(fmt.Sprintf("%s,%s", string(b[:bp]), opts), mountLabel)
|
|
||||||
if err = mount("none", target, "aufs", 0, data); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
firstMount = false
|
|
||||||
}
|
|
||||||
|
|
||||||
if i == len(ro) {
|
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
bp += copy(b[bp:], layer)
|
||||||
|
}
|
||||||
|
|
||||||
|
opts := "dio,xino=/dev/shm/aufs.xino"
|
||||||
|
if useDirperm() {
|
||||||
|
opts += ",dirperm1"
|
||||||
|
}
|
||||||
|
data := label.FormatMountLabel(fmt.Sprintf("%s,%s", string(b[:bp]), opts), mountLabel)
|
||||||
|
if err = mount("none", target, "aufs", 0, data); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
for ; index < len(ro); index++ {
|
||||||
|
layer := fmt.Sprintf(":%s=ro+wh", ro[index])
|
||||||
|
data := label.FormatMountLabel(fmt.Sprintf("append%s", layer), mountLabel)
|
||||||
|
if err = mount("none", target, "aufs", unix.MS_REMOUNT, data); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return
|
return
|
||||||
|
2
vendor/github.com/containers/storage/drivers/aufs/dirs.go
generated
vendored
2
vendor/github.com/containers/storage/drivers/aufs/dirs.go
generated
vendored
@ -29,7 +29,7 @@ func loadIds(root string) ([]string, error) {
|
|||||||
//
|
//
|
||||||
// If there are no lines in the file then the id has no parent
|
// If there are no lines in the file then the id has no parent
|
||||||
// and an empty slice is returned.
|
// and an empty slice is returned.
|
||||||
func getParentIds(root, id string) ([]string, error) {
|
func getParentIDs(root, id string) ([]string, error) {
|
||||||
f, err := os.Open(path.Join(root, "layers", id))
|
f, err := os.Open(path.Join(root, "layers", id))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
6
vendor/github.com/containers/storage/drivers/aufs/mount.go
generated
vendored
6
vendor/github.com/containers/storage/drivers/aufs/mount.go
generated
vendored
@ -4,9 +4,9 @@ package aufs
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"os/exec"
|
"os/exec"
|
||||||
"syscall"
|
|
||||||
|
|
||||||
"github.com/Sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
|
"golang.org/x/sys/unix"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Unmount the target specified.
|
// Unmount the target specified.
|
||||||
@ -14,7 +14,7 @@ func Unmount(target string) error {
|
|||||||
if err := exec.Command("auplink", target, "flush").Run(); err != nil {
|
if err := exec.Command("auplink", target, "flush").Run(); err != nil {
|
||||||
logrus.Warnf("Couldn't run auplink before unmount %s: %s", target, err)
|
logrus.Warnf("Couldn't run auplink before unmount %s: %s", target, err)
|
||||||
}
|
}
|
||||||
if err := syscall.Unmount(target, 0); err != nil {
|
if err := unix.Unmount(target, 0); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
4
vendor/github.com/containers/storage/drivers/aufs/mount_linux.go
generated
vendored
4
vendor/github.com/containers/storage/drivers/aufs/mount_linux.go
generated
vendored
@ -1,7 +1,7 @@
|
|||||||
package aufs
|
package aufs
|
||||||
|
|
||||||
import "syscall"
|
import "golang.org/x/sys/unix"
|
||||||
|
|
||||||
func mount(source string, target string, fstype string, flags uintptr, data string) error {
|
func mount(source string, target string, fstype string, flags uintptr, data string) error {
|
||||||
return syscall.Mount(source, target, fstype, flags, data)
|
return unix.Mount(source, target, fstype, flags, data)
|
||||||
}
|
}
|
||||||
|
2
vendor/github.com/containers/storage/drivers/aufs/mount_unsupported.go
generated
vendored
2
vendor/github.com/containers/storage/drivers/aufs/mount_unsupported.go
generated
vendored
@ -2,7 +2,7 @@
|
|||||||
|
|
||||||
package aufs
|
package aufs
|
||||||
|
|
||||||
import "github.com/pkg/errors"
|
import "errors"
|
||||||
|
|
||||||
// MsRemount declared to specify a non-linux system mount.
|
// MsRemount declared to specify a non-linux system mount.
|
||||||
const MsRemount = 0
|
const MsRemount = 0
|
||||||
|
264
vendor/github.com/containers/storage/drivers/btrfs/btrfs.go
generated
vendored
264
vendor/github.com/containers/storage/drivers/btrfs/btrfs.go
generated
vendored
@ -16,31 +16,32 @@ import "C"
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
|
"math"
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"syscall"
|
"sync"
|
||||||
"unsafe"
|
"unsafe"
|
||||||
|
|
||||||
"github.com/containers/storage/drivers"
|
"github.com/containers/storage/drivers"
|
||||||
"github.com/containers/storage/pkg/idtools"
|
"github.com/containers/storage/pkg/idtools"
|
||||||
"github.com/containers/storage/pkg/mount"
|
"github.com/containers/storage/pkg/mount"
|
||||||
"github.com/containers/storage/pkg/parsers"
|
"github.com/containers/storage/pkg/parsers"
|
||||||
|
"github.com/containers/storage/pkg/system"
|
||||||
"github.com/docker/go-units"
|
"github.com/docker/go-units"
|
||||||
"github.com/opencontainers/selinux/go-selinux/label"
|
"github.com/opencontainers/selinux/go-selinux/label"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
|
"golang.org/x/sys/unix"
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
graphdriver.Register("btrfs", Init)
|
graphdriver.Register("btrfs", Init)
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
|
||||||
quotaEnabled = false
|
|
||||||
userDiskQuota = false
|
|
||||||
)
|
|
||||||
|
|
||||||
type btrfsOptions struct {
|
type btrfsOptions struct {
|
||||||
minSpace uint64
|
minSpace uint64
|
||||||
size uint64
|
size uint64
|
||||||
@ -71,18 +72,11 @@ func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (grap
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
opt, err := parseOptions(options)
|
opt, userDiskQuota, err := parseOptions(options)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if userDiskQuota {
|
|
||||||
if err := subvolEnableQuota(home); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
quotaEnabled = true
|
|
||||||
}
|
|
||||||
|
|
||||||
driver := &Driver{
|
driver := &Driver{
|
||||||
home: home,
|
home: home,
|
||||||
uidMaps: uidMaps,
|
uidMaps: uidMaps,
|
||||||
@ -90,39 +84,48 @@ func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (grap
|
|||||||
options: opt,
|
options: opt,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if userDiskQuota {
|
||||||
|
if err := driver.subvolEnableQuota(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return graphdriver.NewNaiveDiffDriver(driver, uidMaps, gidMaps), nil
|
return graphdriver.NewNaiveDiffDriver(driver, uidMaps, gidMaps), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func parseOptions(opt []string) (btrfsOptions, error) {
|
func parseOptions(opt []string) (btrfsOptions, bool, error) {
|
||||||
var options btrfsOptions
|
var options btrfsOptions
|
||||||
|
userDiskQuota := false
|
||||||
for _, option := range opt {
|
for _, option := range opt {
|
||||||
key, val, err := parsers.ParseKeyValueOpt(option)
|
key, val, err := parsers.ParseKeyValueOpt(option)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return options, err
|
return options, userDiskQuota, err
|
||||||
}
|
}
|
||||||
key = strings.ToLower(key)
|
key = strings.ToLower(key)
|
||||||
switch key {
|
switch key {
|
||||||
case "btrfs.min_space":
|
case "btrfs.min_space":
|
||||||
minSpace, err := units.RAMInBytes(val)
|
minSpace, err := units.RAMInBytes(val)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return options, err
|
return options, userDiskQuota, err
|
||||||
}
|
}
|
||||||
userDiskQuota = true
|
userDiskQuota = true
|
||||||
options.minSpace = uint64(minSpace)
|
options.minSpace = uint64(minSpace)
|
||||||
default:
|
default:
|
||||||
return options, fmt.Errorf("Unknown option %s", key)
|
return options, userDiskQuota, fmt.Errorf("Unknown option %s", key)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return options, nil
|
return options, userDiskQuota, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Driver contains information about the filesystem mounted.
|
// Driver contains information about the filesystem mounted.
|
||||||
type Driver struct {
|
type Driver struct {
|
||||||
//root of the file system
|
//root of the file system
|
||||||
home string
|
home string
|
||||||
uidMaps []idtools.IDMap
|
uidMaps []idtools.IDMap
|
||||||
gidMaps []idtools.IDMap
|
gidMaps []idtools.IDMap
|
||||||
options btrfsOptions
|
options btrfsOptions
|
||||||
|
quotaEnabled bool
|
||||||
|
once sync.Once
|
||||||
}
|
}
|
||||||
|
|
||||||
// String prints the name of the driver (btrfs).
|
// String prints the name of the driver (btrfs).
|
||||||
@ -151,10 +154,8 @@ func (d *Driver) Metadata(id string) (map[string]string, error) {
|
|||||||
|
|
||||||
// Cleanup unmounts the home directory.
|
// Cleanup unmounts the home directory.
|
||||||
func (d *Driver) Cleanup() error {
|
func (d *Driver) Cleanup() error {
|
||||||
if quotaEnabled {
|
if err := d.subvolDisableQuota(); err != nil {
|
||||||
if err := subvolDisableQuota(d.home); err != nil {
|
return err
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return mount.Unmount(d.home)
|
return mount.Unmount(d.home)
|
||||||
@ -197,7 +198,7 @@ func subvolCreate(path, name string) error {
|
|||||||
args.name[i] = C.char(c)
|
args.name[i] = C.char(c)
|
||||||
}
|
}
|
||||||
|
|
||||||
_, _, errno := syscall.Syscall(syscall.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_SUBVOL_CREATE,
|
_, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_SUBVOL_CREATE,
|
||||||
uintptr(unsafe.Pointer(&args)))
|
uintptr(unsafe.Pointer(&args)))
|
||||||
if errno != 0 {
|
if errno != 0 {
|
||||||
return fmt.Errorf("Failed to create btrfs subvolume: %v", errno.Error())
|
return fmt.Errorf("Failed to create btrfs subvolume: %v", errno.Error())
|
||||||
@ -225,7 +226,7 @@ func subvolSnapshot(src, dest, name string) error {
|
|||||||
C.set_name_btrfs_ioctl_vol_args_v2(&args, cs)
|
C.set_name_btrfs_ioctl_vol_args_v2(&args, cs)
|
||||||
C.free(unsafe.Pointer(cs))
|
C.free(unsafe.Pointer(cs))
|
||||||
|
|
||||||
_, _, errno := syscall.Syscall(syscall.SYS_IOCTL, getDirFd(destDir), C.BTRFS_IOC_SNAP_CREATE_V2,
|
_, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(destDir), C.BTRFS_IOC_SNAP_CREATE_V2,
|
||||||
uintptr(unsafe.Pointer(&args)))
|
uintptr(unsafe.Pointer(&args)))
|
||||||
if errno != 0 {
|
if errno != 0 {
|
||||||
return fmt.Errorf("Failed to create btrfs snapshot: %v", errno.Error())
|
return fmt.Errorf("Failed to create btrfs snapshot: %v", errno.Error())
|
||||||
@ -234,8 +235,8 @@ func subvolSnapshot(src, dest, name string) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func isSubvolume(p string) (bool, error) {
|
func isSubvolume(p string) (bool, error) {
|
||||||
var bufStat syscall.Stat_t
|
var bufStat unix.Stat_t
|
||||||
if err := syscall.Lstat(p, &bufStat); err != nil {
|
if err := unix.Lstat(p, &bufStat); err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -243,7 +244,7 @@ func isSubvolume(p string) (bool, error) {
|
|||||||
return bufStat.Ino == C.BTRFS_FIRST_FREE_OBJECTID, nil
|
return bufStat.Ino == C.BTRFS_FIRST_FREE_OBJECTID, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func subvolDelete(dirpath, name string) error {
|
func subvolDelete(dirpath, name string, quotaEnabled bool) error {
|
||||||
dir, err := openDir(dirpath)
|
dir, err := openDir(dirpath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -271,7 +272,7 @@ func subvolDelete(dirpath, name string) error {
|
|||||||
return fmt.Errorf("Failed to test if %s is a btrfs subvolume: %v", p, err)
|
return fmt.Errorf("Failed to test if %s is a btrfs subvolume: %v", p, err)
|
||||||
}
|
}
|
||||||
if sv {
|
if sv {
|
||||||
if err := subvolDelete(path.Dir(p), f.Name()); err != nil {
|
if err := subvolDelete(path.Dir(p), f.Name(), quotaEnabled); err != nil {
|
||||||
return fmt.Errorf("Failed to destroy btrfs child subvolume (%s) of parent (%s): %v", p, dirpath, err)
|
return fmt.Errorf("Failed to destroy btrfs child subvolume (%s) of parent (%s): %v", p, dirpath, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -282,12 +283,27 @@ func subvolDelete(dirpath, name string) error {
|
|||||||
return fmt.Errorf("Recursively walking subvolumes for %s failed: %v", dirpath, err)
|
return fmt.Errorf("Recursively walking subvolumes for %s failed: %v", dirpath, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if quotaEnabled {
|
||||||
|
if qgroupid, err := subvolLookupQgroup(fullPath); err == nil {
|
||||||
|
var args C.struct_btrfs_ioctl_qgroup_create_args
|
||||||
|
args.qgroupid = C.__u64(qgroupid)
|
||||||
|
|
||||||
|
_, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_QGROUP_CREATE,
|
||||||
|
uintptr(unsafe.Pointer(&args)))
|
||||||
|
if errno != 0 {
|
||||||
|
logrus.Errorf("Failed to delete btrfs qgroup %v for %s: %v", qgroupid, fullPath, errno.Error())
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
logrus.Errorf("Failed to lookup btrfs qgroup for %s: %v", fullPath, err.Error())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// all subvolumes have been removed
|
// all subvolumes have been removed
|
||||||
// now remove the one originally passed in
|
// now remove the one originally passed in
|
||||||
for i, c := range []byte(name) {
|
for i, c := range []byte(name) {
|
||||||
args.name[i] = C.char(c)
|
args.name[i] = C.char(c)
|
||||||
}
|
}
|
||||||
_, _, errno := syscall.Syscall(syscall.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_SNAP_DESTROY,
|
_, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_SNAP_DESTROY,
|
||||||
uintptr(unsafe.Pointer(&args)))
|
uintptr(unsafe.Pointer(&args)))
|
||||||
if errno != 0 {
|
if errno != 0 {
|
||||||
return fmt.Errorf("Failed to destroy btrfs snapshot %s for %s: %v", dirpath, name, errno.Error())
|
return fmt.Errorf("Failed to destroy btrfs snapshot %s for %s: %v", dirpath, name, errno.Error())
|
||||||
@ -295,8 +311,27 @@ func subvolDelete(dirpath, name string) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func subvolEnableQuota(path string) error {
|
func (d *Driver) updateQuotaStatus() {
|
||||||
dir, err := openDir(path)
|
d.once.Do(func() {
|
||||||
|
if !d.quotaEnabled {
|
||||||
|
// In case quotaEnabled is not set, check qgroup and update quotaEnabled as needed
|
||||||
|
if err := subvolQgroupStatus(d.home); err != nil {
|
||||||
|
// quota is still not enabled
|
||||||
|
return
|
||||||
|
}
|
||||||
|
d.quotaEnabled = true
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Driver) subvolEnableQuota() error {
|
||||||
|
d.updateQuotaStatus()
|
||||||
|
|
||||||
|
if d.quotaEnabled {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
dir, err := openDir(d.home)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -304,17 +339,25 @@ func subvolEnableQuota(path string) error {
|
|||||||
|
|
||||||
var args C.struct_btrfs_ioctl_quota_ctl_args
|
var args C.struct_btrfs_ioctl_quota_ctl_args
|
||||||
args.cmd = C.BTRFS_QUOTA_CTL_ENABLE
|
args.cmd = C.BTRFS_QUOTA_CTL_ENABLE
|
||||||
_, _, errno := syscall.Syscall(syscall.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_QUOTA_CTL,
|
_, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_QUOTA_CTL,
|
||||||
uintptr(unsafe.Pointer(&args)))
|
uintptr(unsafe.Pointer(&args)))
|
||||||
if errno != 0 {
|
if errno != 0 {
|
||||||
return fmt.Errorf("Failed to enable btrfs quota for %s: %v", dir, errno.Error())
|
return fmt.Errorf("Failed to enable btrfs quota for %s: %v", dir, errno.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
d.quotaEnabled = true
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func subvolDisableQuota(path string) error {
|
func (d *Driver) subvolDisableQuota() error {
|
||||||
dir, err := openDir(path)
|
d.updateQuotaStatus()
|
||||||
|
|
||||||
|
if !d.quotaEnabled {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
dir, err := openDir(d.home)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -322,24 +365,32 @@ func subvolDisableQuota(path string) error {
|
|||||||
|
|
||||||
var args C.struct_btrfs_ioctl_quota_ctl_args
|
var args C.struct_btrfs_ioctl_quota_ctl_args
|
||||||
args.cmd = C.BTRFS_QUOTA_CTL_DISABLE
|
args.cmd = C.BTRFS_QUOTA_CTL_DISABLE
|
||||||
_, _, errno := syscall.Syscall(syscall.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_QUOTA_CTL,
|
_, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_QUOTA_CTL,
|
||||||
uintptr(unsafe.Pointer(&args)))
|
uintptr(unsafe.Pointer(&args)))
|
||||||
if errno != 0 {
|
if errno != 0 {
|
||||||
return fmt.Errorf("Failed to disable btrfs quota for %s: %v", dir, errno.Error())
|
return fmt.Errorf("Failed to disable btrfs quota for %s: %v", dir, errno.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
d.quotaEnabled = false
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func subvolRescanQuota(path string) error {
|
func (d *Driver) subvolRescanQuota() error {
|
||||||
dir, err := openDir(path)
|
d.updateQuotaStatus()
|
||||||
|
|
||||||
|
if !d.quotaEnabled {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
dir, err := openDir(d.home)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
defer closeDir(dir)
|
defer closeDir(dir)
|
||||||
|
|
||||||
var args C.struct_btrfs_ioctl_quota_rescan_args
|
var args C.struct_btrfs_ioctl_quota_rescan_args
|
||||||
_, _, errno := syscall.Syscall(syscall.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_QUOTA_RESCAN_WAIT,
|
_, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_QUOTA_RESCAN_WAIT,
|
||||||
uintptr(unsafe.Pointer(&args)))
|
uintptr(unsafe.Pointer(&args)))
|
||||||
if errno != 0 {
|
if errno != 0 {
|
||||||
return fmt.Errorf("Failed to rescan btrfs quota for %s: %v", dir, errno.Error())
|
return fmt.Errorf("Failed to rescan btrfs quota for %s: %v", dir, errno.Error())
|
||||||
@ -358,7 +409,7 @@ func subvolLimitQgroup(path string, size uint64) error {
|
|||||||
var args C.struct_btrfs_ioctl_qgroup_limit_args
|
var args C.struct_btrfs_ioctl_qgroup_limit_args
|
||||||
args.lim.max_referenced = C.__u64(size)
|
args.lim.max_referenced = C.__u64(size)
|
||||||
args.lim.flags = C.BTRFS_QGROUP_LIMIT_MAX_RFER
|
args.lim.flags = C.BTRFS_QGROUP_LIMIT_MAX_RFER
|
||||||
_, _, errno := syscall.Syscall(syscall.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_QGROUP_LIMIT,
|
_, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_QGROUP_LIMIT,
|
||||||
uintptr(unsafe.Pointer(&args)))
|
uintptr(unsafe.Pointer(&args)))
|
||||||
if errno != 0 {
|
if errno != 0 {
|
||||||
return fmt.Errorf("Failed to limit qgroup for %s: %v", dir, errno.Error())
|
return fmt.Errorf("Failed to limit qgroup for %s: %v", dir, errno.Error())
|
||||||
@ -367,6 +418,60 @@ func subvolLimitQgroup(path string, size uint64) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// subvolQgroupStatus performs a BTRFS_IOC_TREE_SEARCH on the root path
|
||||||
|
// with search key of BTRFS_QGROUP_STATUS_KEY.
|
||||||
|
// In case qgroup is enabled, the retuned key type will match BTRFS_QGROUP_STATUS_KEY.
|
||||||
|
// For more details please see https://github.com/kdave/btrfs-progs/blob/v4.9/qgroup.c#L1035
|
||||||
|
func subvolQgroupStatus(path string) error {
|
||||||
|
dir, err := openDir(path)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer closeDir(dir)
|
||||||
|
|
||||||
|
var args C.struct_btrfs_ioctl_search_args
|
||||||
|
args.key.tree_id = C.BTRFS_QUOTA_TREE_OBJECTID
|
||||||
|
args.key.min_type = C.BTRFS_QGROUP_STATUS_KEY
|
||||||
|
args.key.max_type = C.BTRFS_QGROUP_STATUS_KEY
|
||||||
|
args.key.max_objectid = C.__u64(math.MaxUint64)
|
||||||
|
args.key.max_offset = C.__u64(math.MaxUint64)
|
||||||
|
args.key.max_transid = C.__u64(math.MaxUint64)
|
||||||
|
args.key.nr_items = 4096
|
||||||
|
|
||||||
|
_, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_TREE_SEARCH,
|
||||||
|
uintptr(unsafe.Pointer(&args)))
|
||||||
|
if errno != 0 {
|
||||||
|
return fmt.Errorf("Failed to search qgroup for %s: %v", path, errno.Error())
|
||||||
|
}
|
||||||
|
sh := (*C.struct_btrfs_ioctl_search_header)(unsafe.Pointer(&args.buf))
|
||||||
|
if sh._type != C.BTRFS_QGROUP_STATUS_KEY {
|
||||||
|
return fmt.Errorf("Invalid qgroup search header type for %s: %v", path, sh._type)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func subvolLookupQgroup(path string) (uint64, error) {
|
||||||
|
dir, err := openDir(path)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
defer closeDir(dir)
|
||||||
|
|
||||||
|
var args C.struct_btrfs_ioctl_ino_lookup_args
|
||||||
|
args.objectid = C.BTRFS_FIRST_FREE_OBJECTID
|
||||||
|
|
||||||
|
_, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_INO_LOOKUP,
|
||||||
|
uintptr(unsafe.Pointer(&args)))
|
||||||
|
if errno != 0 {
|
||||||
|
return 0, fmt.Errorf("Failed to lookup qgroup for %s: %v", dir, errno.Error())
|
||||||
|
}
|
||||||
|
if args.treeid == 0 {
|
||||||
|
return 0, fmt.Errorf("Invalid qgroup id for %s: 0", dir)
|
||||||
|
}
|
||||||
|
|
||||||
|
return uint64(args.treeid), nil
|
||||||
|
}
|
||||||
|
|
||||||
func (d *Driver) subvolumesDir() string {
|
func (d *Driver) subvolumesDir() string {
|
||||||
return path.Join(d.home, "subvolumes")
|
return path.Join(d.home, "subvolumes")
|
||||||
}
|
}
|
||||||
@ -375,14 +480,23 @@ func (d *Driver) subvolumesDirID(id string) string {
|
|||||||
return path.Join(d.subvolumesDir(), id)
|
return path.Join(d.subvolumesDir(), id)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (d *Driver) quotasDir() string {
|
||||||
|
return path.Join(d.home, "quotas")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Driver) quotasDirID(id string) string {
|
||||||
|
return path.Join(d.quotasDir(), id)
|
||||||
|
}
|
||||||
|
|
||||||
// CreateReadWrite creates a layer that is writable for use as a container
|
// CreateReadWrite creates a layer that is writable for use as a container
|
||||||
// file system.
|
// file system.
|
||||||
func (d *Driver) CreateReadWrite(id, parent, mountLabel string, storageOpt map[string]string) error {
|
func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error {
|
||||||
return d.Create(id, parent, mountLabel, storageOpt)
|
return d.Create(id, parent, opts)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create the filesystem with given id.
|
// Create the filesystem with given id.
|
||||||
func (d *Driver) Create(id, parent, mountLabel string, storageOpt map[string]string) error {
|
func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error {
|
||||||
|
quotas := path.Join(d.home, "quotas")
|
||||||
subvolumes := path.Join(d.home, "subvolumes")
|
subvolumes := path.Join(d.home, "subvolumes")
|
||||||
rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps)
|
rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -409,14 +523,26 @@ func (d *Driver) Create(id, parent, mountLabel string, storageOpt map[string]str
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var storageOpt map[string]string
|
||||||
|
if opts != nil {
|
||||||
|
storageOpt = opts.StorageOpt
|
||||||
|
}
|
||||||
|
|
||||||
if _, ok := storageOpt["size"]; ok {
|
if _, ok := storageOpt["size"]; ok {
|
||||||
driver := &Driver{}
|
driver := &Driver{}
|
||||||
if err := d.parseStorageOpt(storageOpt, driver); err != nil {
|
if err := d.parseStorageOpt(storageOpt, driver); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := d.setStorageSize(path.Join(subvolumes, id), driver); err != nil {
|
if err := d.setStorageSize(path.Join(subvolumes, id), driver); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
if err := idtools.MkdirAllAs(quotas, 0700, rootUID, rootGID); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := ioutil.WriteFile(path.Join(quotas, id), []byte(fmt.Sprint(driver.options.size)), 0644); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// if we have a remapped root (user namespaces enabled), change the created snapshot
|
// if we have a remapped root (user namespaces enabled), change the created snapshot
|
||||||
@ -427,6 +553,11 @@ func (d *Driver) Create(id, parent, mountLabel string, storageOpt map[string]str
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
mountLabel := ""
|
||||||
|
if opts != nil {
|
||||||
|
mountLabel = opts.MountLabel
|
||||||
|
}
|
||||||
|
|
||||||
return label.Relabel(path.Join(subvolumes, id), mountLabel, false)
|
return label.Relabel(path.Join(subvolumes, id), mountLabel, false)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -459,11 +590,8 @@ func (d *Driver) setStorageSize(dir string, driver *Driver) error {
|
|||||||
return fmt.Errorf("btrfs: storage size cannot be less than %s", units.HumanSize(float64(d.options.minSpace)))
|
return fmt.Errorf("btrfs: storage size cannot be less than %s", units.HumanSize(float64(d.options.minSpace)))
|
||||||
}
|
}
|
||||||
|
|
||||||
if !quotaEnabled {
|
if err := d.subvolEnableQuota(); err != nil {
|
||||||
if err := subvolEnableQuota(d.home); err != nil {
|
return err
|
||||||
return err
|
|
||||||
}
|
|
||||||
quotaEnabled = true
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := subvolLimitQgroup(dir, driver.options.size); err != nil {
|
if err := subvolLimitQgroup(dir, driver.options.size); err != nil {
|
||||||
@ -479,13 +607,25 @@ func (d *Driver) Remove(id string) error {
|
|||||||
if _, err := os.Stat(dir); err != nil {
|
if _, err := os.Stat(dir); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if err := subvolDelete(d.subvolumesDir(), id); err != nil {
|
quotasDir := d.quotasDirID(id)
|
||||||
|
if _, err := os.Stat(quotasDir); err == nil {
|
||||||
|
if err := os.Remove(quotasDir); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
} else if !os.IsNotExist(err) {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if err := os.RemoveAll(dir); err != nil && !os.IsNotExist(err) {
|
|
||||||
|
// Call updateQuotaStatus() to invoke status update
|
||||||
|
d.updateQuotaStatus()
|
||||||
|
|
||||||
|
if err := subvolDelete(d.subvolumesDir(), id, d.quotaEnabled); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if err := subvolRescanQuota(d.home); err != nil {
|
if err := system.EnsureRemoveAll(dir); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := d.subvolRescanQuota(); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
@ -503,6 +643,17 @@ func (d *Driver) Get(id, mountLabel string) (string, error) {
|
|||||||
return "", fmt.Errorf("%s: not a directory", dir)
|
return "", fmt.Errorf("%s: not a directory", dir)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if quota, err := ioutil.ReadFile(d.quotasDirID(id)); err == nil {
|
||||||
|
if size, err := strconv.ParseUint(string(quota), 10, 64); err == nil && size >= d.options.minSpace {
|
||||||
|
if err := d.subvolEnableQuota(); err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
if err := subvolLimitQgroup(dir, size); err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return dir, nil
|
return dir, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -522,6 +673,5 @@ func (d *Driver) Exists(id string) bool {
|
|||||||
|
|
||||||
// AdditionalImageStores returns additional image stores supported by the driver
|
// AdditionalImageStores returns additional image stores supported by the driver
|
||||||
func (d *Driver) AdditionalImageStores() []string {
|
func (d *Driver) AdditionalImageStores() []string {
|
||||||
var imageStores []string
|
return nil
|
||||||
return imageStores
|
|
||||||
}
|
}
|
||||||
|
34
vendor/github.com/containers/storage/drivers/counter.go
generated
vendored
34
vendor/github.com/containers/storage/drivers/counter.go
generated
vendored
@ -22,30 +22,21 @@ func NewRefCounter(c Checker) *RefCounter {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Increment increaes the ref count for the given id and returns the current count
|
// Increment increases the ref count for the given id and returns the current count
|
||||||
func (c *RefCounter) Increment(path string) int {
|
func (c *RefCounter) Increment(path string) int {
|
||||||
c.mu.Lock()
|
return c.incdec(path, func(minfo *minfo) {
|
||||||
m := c.counts[path]
|
minfo.count++
|
||||||
if m == nil {
|
})
|
||||||
m = &minfo{}
|
|
||||||
c.counts[path] = m
|
|
||||||
}
|
|
||||||
// if we are checking this path for the first time check to make sure
|
|
||||||
// if it was already mounted on the system and make sure we have a correct ref
|
|
||||||
// count if it is mounted as it is in use.
|
|
||||||
if !m.check {
|
|
||||||
m.check = true
|
|
||||||
if c.checker.IsMounted(path) {
|
|
||||||
m.count++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
m.count++
|
|
||||||
c.mu.Unlock()
|
|
||||||
return m.count
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Decrement decreases the ref count for the given id and returns the current count
|
// Decrement decreases the ref count for the given id and returns the current count
|
||||||
func (c *RefCounter) Decrement(path string) int {
|
func (c *RefCounter) Decrement(path string) int {
|
||||||
|
return c.incdec(path, func(minfo *minfo) {
|
||||||
|
minfo.count--
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *RefCounter) incdec(path string, infoOp func(minfo *minfo)) int {
|
||||||
c.mu.Lock()
|
c.mu.Lock()
|
||||||
m := c.counts[path]
|
m := c.counts[path]
|
||||||
if m == nil {
|
if m == nil {
|
||||||
@ -61,7 +52,8 @@ func (c *RefCounter) Decrement(path string) int {
|
|||||||
m.count++
|
m.count++
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
m.count--
|
infoOp(m)
|
||||||
|
count := m.count
|
||||||
c.mu.Unlock()
|
c.mu.Unlock()
|
||||||
return m.count
|
return count
|
||||||
}
|
}
|
||||||
|
236
vendor/github.com/containers/storage/drivers/devmapper/device_setup.go
generated
vendored
Normal file
236
vendor/github.com/containers/storage/drivers/devmapper/device_setup.go
generated
vendored
Normal file
@ -0,0 +1,236 @@
|
|||||||
|
package devmapper
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"bytes"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
"os/exec"
|
||||||
|
"path/filepath"
|
||||||
|
"reflect"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
|
)
|
||||||
|
|
||||||
|
type directLVMConfig struct {
|
||||||
|
Device string
|
||||||
|
ThinpPercent uint64
|
||||||
|
ThinpMetaPercent uint64
|
||||||
|
AutoExtendPercent uint64
|
||||||
|
AutoExtendThreshold uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
errThinpPercentMissing = errors.New("must set both `dm.thinp_percent` and `dm.thinp_metapercent` if either is specified")
|
||||||
|
errThinpPercentTooBig = errors.New("combined `dm.thinp_percent` and `dm.thinp_metapercent` must not be greater than 100")
|
||||||
|
errMissingSetupDevice = errors.New("must provide device path in `dm.setup_device` in order to configure direct-lvm")
|
||||||
|
)
|
||||||
|
|
||||||
|
func validateLVMConfig(cfg directLVMConfig) error {
|
||||||
|
if reflect.DeepEqual(cfg, directLVMConfig{}) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if cfg.Device == "" {
|
||||||
|
return errMissingSetupDevice
|
||||||
|
}
|
||||||
|
if (cfg.ThinpPercent > 0 && cfg.ThinpMetaPercent == 0) || cfg.ThinpMetaPercent > 0 && cfg.ThinpPercent == 0 {
|
||||||
|
return errThinpPercentMissing
|
||||||
|
}
|
||||||
|
|
||||||
|
if cfg.ThinpPercent+cfg.ThinpMetaPercent > 100 {
|
||||||
|
return errThinpPercentTooBig
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func checkDevAvailable(dev string) error {
|
||||||
|
lvmScan, err := exec.LookPath("lvmdiskscan")
|
||||||
|
if err != nil {
|
||||||
|
logrus.Debug("could not find lvmdiskscan")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
out, err := exec.Command(lvmScan).CombinedOutput()
|
||||||
|
if err != nil {
|
||||||
|
logrus.WithError(err).Error(string(out))
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if !bytes.Contains(out, []byte(dev)) {
|
||||||
|
return errors.Errorf("%s is not available for use with devicemapper", dev)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func checkDevInVG(dev string) error {
|
||||||
|
pvDisplay, err := exec.LookPath("pvdisplay")
|
||||||
|
if err != nil {
|
||||||
|
logrus.Debug("could not find pvdisplay")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
out, err := exec.Command(pvDisplay, dev).CombinedOutput()
|
||||||
|
if err != nil {
|
||||||
|
logrus.WithError(err).Error(string(out))
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
scanner := bufio.NewScanner(bytes.NewReader(bytes.TrimSpace(out)))
|
||||||
|
for scanner.Scan() {
|
||||||
|
fields := strings.SplitAfter(strings.TrimSpace(scanner.Text()), "VG Name")
|
||||||
|
if len(fields) > 1 {
|
||||||
|
// got "VG Name" line"
|
||||||
|
vg := strings.TrimSpace(fields[1])
|
||||||
|
if len(vg) > 0 {
|
||||||
|
return errors.Errorf("%s is already part of a volume group %q: must remove this device from any volume group or provide a different device", dev, vg)
|
||||||
|
}
|
||||||
|
logrus.Error(fields)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func checkDevHasFS(dev string) error {
|
||||||
|
blkid, err := exec.LookPath("blkid")
|
||||||
|
if err != nil {
|
||||||
|
logrus.Debug("could not find blkid")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
out, err := exec.Command(blkid, dev).CombinedOutput()
|
||||||
|
if err != nil {
|
||||||
|
logrus.WithError(err).Error(string(out))
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
fields := bytes.Fields(out)
|
||||||
|
for _, f := range fields {
|
||||||
|
kv := bytes.Split(f, []byte{'='})
|
||||||
|
if bytes.Equal(kv[0], []byte("TYPE")) {
|
||||||
|
v := bytes.Trim(kv[1], "\"")
|
||||||
|
if len(v) > 0 {
|
||||||
|
return errors.Errorf("%s has a filesystem already, use dm.directlvm_device_force=true if you want to wipe the device", dev)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func verifyBlockDevice(dev string, force bool) error {
|
||||||
|
if err := checkDevAvailable(dev); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := checkDevInVG(dev); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if force {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := checkDevHasFS(dev); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func readLVMConfig(root string) (directLVMConfig, error) {
|
||||||
|
var cfg directLVMConfig
|
||||||
|
|
||||||
|
p := filepath.Join(root, "setup-config.json")
|
||||||
|
b, err := ioutil.ReadFile(p)
|
||||||
|
if err != nil {
|
||||||
|
if os.IsNotExist(err) {
|
||||||
|
return cfg, nil
|
||||||
|
}
|
||||||
|
return cfg, errors.Wrap(err, "error reading existing setup config")
|
||||||
|
}
|
||||||
|
|
||||||
|
// check if this is just an empty file, no need to produce a json error later if so
|
||||||
|
if len(b) == 0 {
|
||||||
|
return cfg, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
err = json.Unmarshal(b, &cfg)
|
||||||
|
return cfg, errors.Wrap(err, "error unmarshaling previous device setup config")
|
||||||
|
}
|
||||||
|
|
||||||
|
func writeLVMConfig(root string, cfg directLVMConfig) error {
|
||||||
|
p := filepath.Join(root, "setup-config.json")
|
||||||
|
b, err := json.Marshal(cfg)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrap(err, "error marshalling direct lvm config")
|
||||||
|
}
|
||||||
|
err = ioutil.WriteFile(p, b, 0600)
|
||||||
|
return errors.Wrap(err, "error writing direct lvm config to file")
|
||||||
|
}
|
||||||
|
|
||||||
|
func setupDirectLVM(cfg directLVMConfig) error {
|
||||||
|
lvmProfileDir := "/etc/lvm/profile"
|
||||||
|
binaries := []string{"pvcreate", "vgcreate", "lvcreate", "lvconvert", "lvchange", "thin_check"}
|
||||||
|
|
||||||
|
for _, bin := range binaries {
|
||||||
|
if _, err := exec.LookPath(bin); err != nil {
|
||||||
|
return errors.Wrap(err, "error looking up command `"+bin+"` while setting up direct lvm")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
err := os.MkdirAll(lvmProfileDir, 0755)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrap(err, "error creating lvm profile directory")
|
||||||
|
}
|
||||||
|
|
||||||
|
if cfg.AutoExtendPercent == 0 {
|
||||||
|
cfg.AutoExtendPercent = 20
|
||||||
|
}
|
||||||
|
|
||||||
|
if cfg.AutoExtendThreshold == 0 {
|
||||||
|
cfg.AutoExtendThreshold = 80
|
||||||
|
}
|
||||||
|
|
||||||
|
if cfg.ThinpPercent == 0 {
|
||||||
|
cfg.ThinpPercent = 95
|
||||||
|
}
|
||||||
|
if cfg.ThinpMetaPercent == 0 {
|
||||||
|
cfg.ThinpMetaPercent = 1
|
||||||
|
}
|
||||||
|
|
||||||
|
out, err := exec.Command("pvcreate", "-f", cfg.Device).CombinedOutput()
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrap(err, string(out))
|
||||||
|
}
|
||||||
|
|
||||||
|
out, err = exec.Command("vgcreate", "storage", cfg.Device).CombinedOutput()
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrap(err, string(out))
|
||||||
|
}
|
||||||
|
|
||||||
|
out, err = exec.Command("lvcreate", "--wipesignatures", "y", "-n", "thinpool", "storage", "--extents", fmt.Sprintf("%d%%VG", cfg.ThinpPercent)).CombinedOutput()
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrap(err, string(out))
|
||||||
|
}
|
||||||
|
out, err = exec.Command("lvcreate", "--wipesignatures", "y", "-n", "thinpoolmeta", "storage", "--extents", fmt.Sprintf("%d%%VG", cfg.ThinpMetaPercent)).CombinedOutput()
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrap(err, string(out))
|
||||||
|
}
|
||||||
|
|
||||||
|
out, err = exec.Command("lvconvert", "-y", "--zero", "n", "-c", "512K", "--thinpool", "storage/thinpool", "--poolmetadata", "storage/thinpoolmeta").CombinedOutput()
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrap(err, string(out))
|
||||||
|
}
|
||||||
|
|
||||||
|
profile := fmt.Sprintf("activation{\nthin_pool_autoextend_threshold=%d\nthin_pool_autoextend_percent=%d\n}", cfg.AutoExtendThreshold, cfg.AutoExtendPercent)
|
||||||
|
err = ioutil.WriteFile(lvmProfileDir+"/storage-thinpool.profile", []byte(profile), 0600)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrap(err, "error writing storage thinp autoextend profile")
|
||||||
|
}
|
||||||
|
|
||||||
|
out, err = exec.Command("lvchange", "--metadataprofile", "storage-thinpool", "storage/thinpool").CombinedOutput()
|
||||||
|
return errors.Wrap(err, string(out))
|
||||||
|
}
|
574
vendor/github.com/containers/storage/drivers/devmapper/deviceset.go
generated
vendored
574
vendor/github.com/containers/storage/drivers/devmapper/deviceset.go
generated
vendored
@ -12,44 +12,41 @@ import (
|
|||||||
"os/exec"
|
"os/exec"
|
||||||
"path"
|
"path"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
"reflect"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
"syscall"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/Sirupsen/logrus"
|
|
||||||
|
|
||||||
"github.com/containers/storage/drivers"
|
"github.com/containers/storage/drivers"
|
||||||
"github.com/containers/storage/pkg/devicemapper"
|
"github.com/containers/storage/pkg/devicemapper"
|
||||||
|
"github.com/containers/storage/pkg/dmesg"
|
||||||
"github.com/containers/storage/pkg/idtools"
|
"github.com/containers/storage/pkg/idtools"
|
||||||
"github.com/containers/storage/pkg/ioutils"
|
|
||||||
"github.com/containers/storage/pkg/loopback"
|
"github.com/containers/storage/pkg/loopback"
|
||||||
"github.com/containers/storage/pkg/mount"
|
"github.com/containers/storage/pkg/mount"
|
||||||
"github.com/containers/storage/pkg/parsers"
|
"github.com/containers/storage/pkg/parsers"
|
||||||
"github.com/docker/go-units"
|
"github.com/containers/storage/pkg/parsers/kernel"
|
||||||
|
units "github.com/docker/go-units"
|
||||||
"github.com/opencontainers/selinux/go-selinux/label"
|
"github.com/opencontainers/selinux/go-selinux/label"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
|
"golang.org/x/sys/unix"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
defaultDataLoopbackSize int64 = 100 * 1024 * 1024 * 1024
|
defaultDataLoopbackSize int64 = 100 * 1024 * 1024 * 1024
|
||||||
defaultMetaDataLoopbackSize int64 = 2 * 1024 * 1024 * 1024
|
defaultMetaDataLoopbackSize int64 = 2 * 1024 * 1024 * 1024
|
||||||
defaultBaseFsSize uint64 = 10 * 1024 * 1024 * 1024
|
defaultBaseFsSize uint64 = 10 * 1024 * 1024 * 1024
|
||||||
defaultThinpBlockSize uint32 = 128 // 64K = 128 512b sectors
|
defaultThinpBlockSize uint32 = 128 // 64K = 128 512b sectors
|
||||||
defaultUdevSyncOverride = false
|
defaultUdevSyncOverride = false
|
||||||
maxDeviceID = 0xffffff // 24 bit, pool limit
|
maxDeviceID = 0xffffff // 24 bit, pool limit
|
||||||
deviceIDMapSz = (maxDeviceID + 1) / 8
|
deviceIDMapSz = (maxDeviceID + 1) / 8
|
||||||
// We retry device removal so many a times that even error messages
|
|
||||||
// will fill up console during normal operation. So only log Fatal
|
|
||||||
// messages by default.
|
|
||||||
logLevel = devicemapper.LogLevelFatal
|
|
||||||
driverDeferredRemovalSupport = false
|
driverDeferredRemovalSupport = false
|
||||||
enableDeferredRemoval = false
|
enableDeferredRemoval = false
|
||||||
enableDeferredDeletion = false
|
enableDeferredDeletion = false
|
||||||
userBaseSize = false
|
userBaseSize = false
|
||||||
defaultMinFreeSpacePercent uint32 = 10
|
defaultMinFreeSpacePercent uint32 = 10
|
||||||
|
lvmSetupConfigForce bool
|
||||||
)
|
)
|
||||||
|
|
||||||
const deviceSetMetaFile string = "deviceset-metadata"
|
const deviceSetMetaFile string = "deviceset-metadata"
|
||||||
@ -122,6 +119,8 @@ type DeviceSet struct {
|
|||||||
uidMaps []idtools.IDMap
|
uidMaps []idtools.IDMap
|
||||||
gidMaps []idtools.IDMap
|
gidMaps []idtools.IDMap
|
||||||
minFreeSpacePercent uint32 //min free space percentage in thinpool
|
minFreeSpacePercent uint32 //min free space percentage in thinpool
|
||||||
|
xfsNospaceRetries string // max retries when xfs receives ENOSPC
|
||||||
|
lvmSetupConfig directLVMConfig
|
||||||
}
|
}
|
||||||
|
|
||||||
// DiskUsage contains information about disk usage and is used when reporting Status of a device.
|
// DiskUsage contains information about disk usage and is used when reporting Status of a device.
|
||||||
@ -170,7 +169,7 @@ type Status struct {
|
|||||||
MinFreeSpace uint64
|
MinFreeSpace uint64
|
||||||
}
|
}
|
||||||
|
|
||||||
// Structure used to export image/container metadata in docker inspect.
|
// Structure used to export image/container metadata in inspect.
|
||||||
type deviceMetadata struct {
|
type deviceMetadata struct {
|
||||||
deviceID int
|
deviceID int
|
||||||
deviceSize uint64 // size in bytes
|
deviceSize uint64 // size in bytes
|
||||||
@ -379,10 +378,7 @@ func (devices *DeviceSet) isDeviceIDFree(deviceID int) bool {
|
|||||||
var mask byte
|
var mask byte
|
||||||
i := deviceID % 8
|
i := deviceID % 8
|
||||||
mask = (1 << uint(i))
|
mask = (1 << uint(i))
|
||||||
if (devices.deviceIDMap[deviceID/8] & mask) != 0 {
|
return (devices.deviceIDMap[deviceID/8] & mask) == 0
|
||||||
return false
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Should be called with devices.Lock() held.
|
// Should be called with devices.Lock() held.
|
||||||
@ -409,8 +405,8 @@ func (devices *DeviceSet) lookupDeviceWithLock(hash string) (*devInfo, error) {
|
|||||||
// This function relies on that device hash map has been loaded in advance.
|
// This function relies on that device hash map has been loaded in advance.
|
||||||
// Should be called with devices.Lock() held.
|
// Should be called with devices.Lock() held.
|
||||||
func (devices *DeviceSet) constructDeviceIDMap() {
|
func (devices *DeviceSet) constructDeviceIDMap() {
|
||||||
logrus.Debugf("devmapper: constructDeviceIDMap()")
|
logrus.Debug("devmapper: constructDeviceIDMap()")
|
||||||
defer logrus.Debugf("devmapper: constructDeviceIDMap() END")
|
defer logrus.Debug("devmapper: constructDeviceIDMap() END")
|
||||||
|
|
||||||
for _, info := range devices.Devices {
|
for _, info := range devices.Devices {
|
||||||
devices.markDeviceIDUsed(info.DeviceID)
|
devices.markDeviceIDUsed(info.DeviceID)
|
||||||
@ -458,8 +454,8 @@ func (devices *DeviceSet) deviceFileWalkFunction(path string, finfo os.FileInfo)
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (devices *DeviceSet) loadDeviceFilesOnStart() error {
|
func (devices *DeviceSet) loadDeviceFilesOnStart() error {
|
||||||
logrus.Debugf("devmapper: loadDeviceFilesOnStart()")
|
logrus.Debug("devmapper: loadDeviceFilesOnStart()")
|
||||||
defer logrus.Debugf("devmapper: loadDeviceFilesOnStart() END")
|
defer logrus.Debug("devmapper: loadDeviceFilesOnStart() END")
|
||||||
|
|
||||||
var scan = func(path string, info os.FileInfo, err error) error {
|
var scan = func(path string, info os.FileInfo, err error) error {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -479,11 +475,10 @@ func (devices *DeviceSet) loadDeviceFilesOnStart() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Should be called with devices.Lock() held.
|
// Should be called with devices.Lock() held.
|
||||||
func (devices *DeviceSet) unregisterDevice(id int, hash string) error {
|
func (devices *DeviceSet) unregisterDevice(hash string) error {
|
||||||
logrus.Debugf("devmapper: unregisterDevice(%v, %v)", id, hash)
|
logrus.Debugf("devmapper: unregisterDevice(%v)", hash)
|
||||||
info := &devInfo{
|
info := &devInfo{
|
||||||
Hash: hash,
|
Hash: hash,
|
||||||
DeviceID: id,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
delete(devices.Devices, hash)
|
delete(devices.Devices, hash)
|
||||||
@ -528,7 +523,7 @@ func (devices *DeviceSet) activateDeviceIfNeeded(info *devInfo, ignoreDeleted bo
|
|||||||
|
|
||||||
// Make sure deferred removal on device is canceled, if one was
|
// Make sure deferred removal on device is canceled, if one was
|
||||||
// scheduled.
|
// scheduled.
|
||||||
if err := devices.cancelDeferredRemoval(info); err != nil {
|
if err := devices.cancelDeferredRemovalIfNeeded(info); err != nil {
|
||||||
return fmt.Errorf("devmapper: Device Deferred Removal Cancellation Failed: %s", err)
|
return fmt.Errorf("devmapper: Device Deferred Removal Cancellation Failed: %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -539,11 +534,11 @@ func (devices *DeviceSet) activateDeviceIfNeeded(info *devInfo, ignoreDeleted bo
|
|||||||
return devicemapper.ActivateDevice(devices.getPoolDevName(), info.Name(), info.DeviceID, info.Size)
|
return devicemapper.ActivateDevice(devices.getPoolDevName(), info.Name(), info.DeviceID, info.Size)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Return true only if kernel supports xfs and mkfs.xfs is available
|
// xfsSupported checks if xfs is supported, returns nil if it is, otherwise an error
|
||||||
func xfsSupported() bool {
|
func xfsSupported() error {
|
||||||
// Make sure mkfs.xfs is available
|
// Make sure mkfs.xfs is available
|
||||||
if _, err := exec.LookPath("mkfs.xfs"); err != nil {
|
if _, err := exec.LookPath("mkfs.xfs"); err != nil {
|
||||||
return false
|
return err // error text is descriptive enough
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check if kernel supports xfs filesystem or not.
|
// Check if kernel supports xfs filesystem or not.
|
||||||
@ -551,43 +546,48 @@ func xfsSupported() bool {
|
|||||||
|
|
||||||
f, err := os.Open("/proc/filesystems")
|
f, err := os.Open("/proc/filesystems")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logrus.Warnf("devmapper: Could not check if xfs is supported: %v", err)
|
return errors.Wrapf(err, "error checking for xfs support")
|
||||||
return false
|
|
||||||
}
|
}
|
||||||
defer f.Close()
|
defer f.Close()
|
||||||
|
|
||||||
s := bufio.NewScanner(f)
|
s := bufio.NewScanner(f)
|
||||||
for s.Scan() {
|
for s.Scan() {
|
||||||
if strings.HasSuffix(s.Text(), "\txfs") {
|
if strings.HasSuffix(s.Text(), "\txfs") {
|
||||||
return true
|
return nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := s.Err(); err != nil {
|
if err := s.Err(); err != nil {
|
||||||
logrus.Warnf("devmapper: Could not check if xfs is supported: %v", err)
|
return errors.Wrapf(err, "error checking for xfs support")
|
||||||
}
|
}
|
||||||
return false
|
|
||||||
|
return errors.New(`kernel does not support xfs, or "modprobe xfs" failed`)
|
||||||
}
|
}
|
||||||
|
|
||||||
func determineDefaultFS() string {
|
func determineDefaultFS() string {
|
||||||
if xfsSupported() {
|
err := xfsSupported()
|
||||||
|
if err == nil {
|
||||||
return "xfs"
|
return "xfs"
|
||||||
}
|
}
|
||||||
|
|
||||||
logrus.Warn("devmapper: XFS is not supported in your system. Either the kernel doesn't support it or mkfs.xfs is not in your PATH. Defaulting to ext4 filesystem")
|
logrus.Warnf("devmapper: XFS is not supported in your system (%v). Defaulting to ext4 filesystem", err)
|
||||||
return "ext4"
|
return "ext4"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// mkfsOptions tries to figure out whether some additional mkfs options are required
|
||||||
|
func mkfsOptions(fs string) []string {
|
||||||
|
if fs == "xfs" && !kernel.CheckKernelVersion(3, 16, 0) {
|
||||||
|
// For kernels earlier than 3.16 (and newer xfsutils),
|
||||||
|
// some xfs features need to be explicitly disabled.
|
||||||
|
return []string{"-m", "crc=0,finobt=0"}
|
||||||
|
}
|
||||||
|
|
||||||
|
return []string{}
|
||||||
|
}
|
||||||
|
|
||||||
func (devices *DeviceSet) createFilesystem(info *devInfo) (err error) {
|
func (devices *DeviceSet) createFilesystem(info *devInfo) (err error) {
|
||||||
devname := info.DevName()
|
devname := info.DevName()
|
||||||
|
|
||||||
args := []string{}
|
|
||||||
for _, arg := range devices.mkfsArgs {
|
|
||||||
args = append(args, arg)
|
|
||||||
}
|
|
||||||
|
|
||||||
args = append(args, devname)
|
|
||||||
|
|
||||||
if devices.filesystem == "" {
|
if devices.filesystem == "" {
|
||||||
devices.filesystem = determineDefaultFS()
|
devices.filesystem = determineDefaultFS()
|
||||||
}
|
}
|
||||||
@ -595,7 +595,11 @@ func (devices *DeviceSet) createFilesystem(info *devInfo) (err error) {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
logrus.Infof("devmapper: Creating filesystem %s on device %s", devices.filesystem, info.Name())
|
args := mkfsOptions(devices.filesystem)
|
||||||
|
args = append(args, devices.mkfsArgs...)
|
||||||
|
args = append(args, devname)
|
||||||
|
|
||||||
|
logrus.Infof("devmapper: Creating filesystem %s on device %s, mkfs args: %v", devices.filesystem, info.Name(), args)
|
||||||
defer func() {
|
defer func() {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logrus.Infof("devmapper: Error while creating filesystem %s on device %s: %v", devices.filesystem, info.Name(), err)
|
logrus.Infof("devmapper: Error while creating filesystem %s on device %s: %v", devices.filesystem, info.Name(), err)
|
||||||
@ -833,7 +837,7 @@ func (devices *DeviceSet) createRegisterDevice(hash string) (*devInfo, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if err := devices.closeTransaction(); err != nil {
|
if err := devices.closeTransaction(); err != nil {
|
||||||
devices.unregisterDevice(deviceID, hash)
|
devices.unregisterDevice(hash)
|
||||||
devicemapper.DeleteDevice(devices.getPoolDevName(), deviceID)
|
devicemapper.DeleteDevice(devices.getPoolDevName(), deviceID)
|
||||||
devices.markDeviceIDFree(deviceID)
|
devices.markDeviceIDFree(deviceID)
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -841,11 +845,57 @@ func (devices *DeviceSet) createRegisterDevice(hash string) (*devInfo, error) {
|
|||||||
return info, nil
|
return info, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (devices *DeviceSet) createRegisterSnapDevice(hash string, baseInfo *devInfo, size uint64) error {
|
func (devices *DeviceSet) takeSnapshot(hash string, baseInfo *devInfo, size uint64) error {
|
||||||
if err := devices.poolHasFreeSpace(); err != nil {
|
var (
|
||||||
|
devinfo *devicemapper.Info
|
||||||
|
err error
|
||||||
|
)
|
||||||
|
|
||||||
|
if err = devices.poolHasFreeSpace(); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if devices.deferredRemove {
|
||||||
|
devinfo, err = devicemapper.GetInfoWithDeferred(baseInfo.Name())
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if devinfo != nil && devinfo.DeferredRemove != 0 {
|
||||||
|
err = devices.cancelDeferredRemoval(baseInfo)
|
||||||
|
if err != nil {
|
||||||
|
// If Error is ErrEnxio. Device is probably already gone. Continue.
|
||||||
|
if errors.Cause(err) != devicemapper.ErrEnxio {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
devinfo = nil
|
||||||
|
} else {
|
||||||
|
defer devices.deactivateDevice(baseInfo)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
devinfo, err = devicemapper.GetInfo(baseInfo.Name())
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
doSuspend := devinfo != nil && devinfo.Exists != 0
|
||||||
|
|
||||||
|
if doSuspend {
|
||||||
|
if err = devicemapper.SuspendDevice(baseInfo.Name()); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer devicemapper.ResumeDevice(baseInfo.Name())
|
||||||
|
}
|
||||||
|
|
||||||
|
if err = devices.createRegisterSnapDevice(hash, baseInfo, size); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (devices *DeviceSet) createRegisterSnapDevice(hash string, baseInfo *devInfo, size uint64) error {
|
||||||
deviceID, err := devices.getNextFreeDeviceID()
|
deviceID, err := devices.getNextFreeDeviceID()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -858,7 +908,7 @@ func (devices *DeviceSet) createRegisterSnapDevice(hash string, baseInfo *devInf
|
|||||||
}
|
}
|
||||||
|
|
||||||
for {
|
for {
|
||||||
if err := devicemapper.CreateSnapDevice(devices.getPoolDevName(), deviceID, baseInfo.Name(), baseInfo.DeviceID); err != nil {
|
if err := devicemapper.CreateSnapDeviceRaw(devices.getPoolDevName(), deviceID, baseInfo.DeviceID); err != nil {
|
||||||
if devicemapper.DeviceIDExists(err) {
|
if devicemapper.DeviceIDExists(err) {
|
||||||
// Device ID already exists. This should not
|
// Device ID already exists. This should not
|
||||||
// happen. Now we have a mechanism to find
|
// happen. Now we have a mechanism to find
|
||||||
@ -888,7 +938,7 @@ func (devices *DeviceSet) createRegisterSnapDevice(hash string, baseInfo *devInf
|
|||||||
}
|
}
|
||||||
|
|
||||||
if err := devices.closeTransaction(); err != nil {
|
if err := devices.closeTransaction(); err != nil {
|
||||||
devices.unregisterDevice(deviceID, hash)
|
devices.unregisterDevice(hash)
|
||||||
devicemapper.DeleteDevice(devices.getPoolDevName(), deviceID)
|
devicemapper.DeleteDevice(devices.getPoolDevName(), deviceID)
|
||||||
devices.markDeviceIDFree(deviceID)
|
devices.markDeviceIDFree(deviceID)
|
||||||
return err
|
return err
|
||||||
@ -1134,7 +1184,7 @@ func (devices *DeviceSet) growFS(info *devInfo) error {
|
|||||||
|
|
||||||
defer devices.deactivateDevice(info)
|
defer devices.deactivateDevice(info)
|
||||||
|
|
||||||
fsMountPoint := "/run/containers/mnt"
|
fsMountPoint := "/run/containers/storage/mnt"
|
||||||
if _, err := os.Stat(fsMountPoint); os.IsNotExist(err) {
|
if _, err := os.Stat(fsMountPoint); os.IsNotExist(err) {
|
||||||
if err := os.MkdirAll(fsMountPoint, 0700); err != nil {
|
if err := os.MkdirAll(fsMountPoint, 0700); err != nil {
|
||||||
return err
|
return err
|
||||||
@ -1150,10 +1200,10 @@ func (devices *DeviceSet) growFS(info *devInfo) error {
|
|||||||
options = joinMountOptions(options, devices.mountOptions)
|
options = joinMountOptions(options, devices.mountOptions)
|
||||||
|
|
||||||
if err := mount.Mount(info.DevName(), fsMountPoint, devices.BaseDeviceFilesystem, options); err != nil {
|
if err := mount.Mount(info.DevName(), fsMountPoint, devices.BaseDeviceFilesystem, options); err != nil {
|
||||||
return fmt.Errorf("Error mounting '%s' on '%s': %s", info.DevName(), fsMountPoint, err)
|
return fmt.Errorf("Error mounting '%s' on '%s': %s\n%v", info.DevName(), fsMountPoint, err, string(dmesg.Dmesg(256)))
|
||||||
}
|
}
|
||||||
|
|
||||||
defer syscall.Unmount(fsMountPoint, syscall.MNT_DETACH)
|
defer unix.Unmount(fsMountPoint, unix.MNT_DETACH)
|
||||||
|
|
||||||
switch devices.BaseDeviceFilesystem {
|
switch devices.BaseDeviceFilesystem {
|
||||||
case "ext4":
|
case "ext4":
|
||||||
@ -1216,39 +1266,18 @@ func (devices *DeviceSet) setupBaseImage() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func setCloseOnExec(name string) {
|
func setCloseOnExec(name string) {
|
||||||
if fileInfos, _ := ioutil.ReadDir("/proc/self/fd"); fileInfos != nil {
|
fileInfos, _ := ioutil.ReadDir("/proc/self/fd")
|
||||||
for _, i := range fileInfos {
|
for _, i := range fileInfos {
|
||||||
link, _ := os.Readlink(filepath.Join("/proc/self/fd", i.Name()))
|
link, _ := os.Readlink(filepath.Join("/proc/self/fd", i.Name()))
|
||||||
if link == name {
|
if link == name {
|
||||||
fd, err := strconv.Atoi(i.Name())
|
fd, err := strconv.Atoi(i.Name())
|
||||||
if err == nil {
|
if err == nil {
|
||||||
syscall.CloseOnExec(fd)
|
unix.CloseOnExec(fd)
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// DMLog implements logging using DevMapperLogger interface.
|
|
||||||
func (devices *DeviceSet) DMLog(level int, file string, line int, dmError int, message string) {
|
|
||||||
// By default libdm sends us all the messages including debug ones.
|
|
||||||
// We need to filter out messages here and figure out which one
|
|
||||||
// should be printed.
|
|
||||||
if level > logLevel {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// FIXME(vbatts) push this back into ./pkg/devicemapper/
|
|
||||||
if level <= devicemapper.LogLevelErr {
|
|
||||||
logrus.Errorf("libdevmapper(%d): %s:%d (%d) %s", level, file, line, dmError, message)
|
|
||||||
} else if level <= devicemapper.LogLevelInfo {
|
|
||||||
logrus.Infof("libdevmapper(%d): %s:%d (%d) %s", level, file, line, dmError, message)
|
|
||||||
} else {
|
|
||||||
// FIXME(vbatts) push this back into ./pkg/devicemapper/
|
|
||||||
logrus.Debugf("libdevmapper(%d): %s:%d (%d) %s", level, file, line, dmError, message)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func major(device uint64) uint64 {
|
func major(device uint64) uint64 {
|
||||||
return (device >> 8) & 0xfff
|
return (device >> 8) & 0xfff
|
||||||
}
|
}
|
||||||
@ -1356,10 +1385,7 @@ func (devices *DeviceSet) saveTransactionMetaData() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (devices *DeviceSet) removeTransactionMetaData() error {
|
func (devices *DeviceSet) removeTransactionMetaData() error {
|
||||||
if err := os.RemoveAll(devices.transactionMetaFile()); err != nil {
|
return os.RemoveAll(devices.transactionMetaFile())
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (devices *DeviceSet) rollbackTransaction() error {
|
func (devices *DeviceSet) rollbackTransaction() error {
|
||||||
@ -1464,12 +1490,9 @@ func (devices *DeviceSet) closeTransaction() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func determineDriverCapabilities(version string) error {
|
func determineDriverCapabilities(version string) error {
|
||||||
/*
|
// Kernel driver version >= 4.27.0 support deferred removal
|
||||||
* Driver version 4.27.0 and greater support deferred activation
|
|
||||||
* feature.
|
|
||||||
*/
|
|
||||||
|
|
||||||
logrus.Debugf("devicemapper: driver version is %s", version)
|
logrus.Debugf("devicemapper: kernel dm driver version is %s", version)
|
||||||
|
|
||||||
versionSplit := strings.Split(version, ".")
|
versionSplit := strings.Split(version, ".")
|
||||||
major, err := strconv.Atoi(versionSplit[0])
|
major, err := strconv.Atoi(versionSplit[0])
|
||||||
@ -1505,12 +1528,13 @@ func determineDriverCapabilities(version string) error {
|
|||||||
|
|
||||||
// Determine the major and minor number of loopback device
|
// Determine the major and minor number of loopback device
|
||||||
func getDeviceMajorMinor(file *os.File) (uint64, uint64, error) {
|
func getDeviceMajorMinor(file *os.File) (uint64, uint64, error) {
|
||||||
stat, err := file.Stat()
|
var stat unix.Stat_t
|
||||||
|
err := unix.Stat(file.Name(), &stat)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, 0, err
|
return 0, 0, err
|
||||||
}
|
}
|
||||||
|
|
||||||
dev := stat.Sys().(*syscall.Stat_t).Rdev
|
dev := stat.Rdev
|
||||||
majorNum := major(dev)
|
majorNum := major(dev)
|
||||||
minorNum := minor(dev)
|
minorNum := minor(dev)
|
||||||
|
|
||||||
@ -1648,36 +1672,19 @@ func (devices *DeviceSet) enableDeferredRemovalDeletion() error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (devices *DeviceSet) initDevmapper(doInit bool) error {
|
func (devices *DeviceSet) initDevmapper(doInit bool) (retErr error) {
|
||||||
// give ourselves to libdm as a log handler
|
|
||||||
devicemapper.LogInit(devices)
|
|
||||||
|
|
||||||
version, err := devicemapper.GetDriverVersion()
|
|
||||||
if err != nil {
|
|
||||||
// Can't even get driver version, assume not supported
|
|
||||||
return errors.Wrap(graphdriver.ErrNotSupported, "unable to determine version of device mapper")
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := determineDriverCapabilities(version); err != nil {
|
|
||||||
return errors.Wrap(graphdriver.ErrNotSupported, "unable to determine device mapper driver capabilities")
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := devices.enableDeferredRemovalDeletion(); err != nil {
|
if err := devices.enableDeferredRemovalDeletion(); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// https://github.com/docker/docker/issues/4036
|
// https://github.com/docker/docker/issues/4036
|
||||||
// if supported := devicemapper.UdevSetSyncSupport(true); !supported {
|
if supported := devicemapper.UdevSetSyncSupport(true); !supported {
|
||||||
// if storageversion.IAmStatic == "true" {
|
logrus.Error("devmapper: Udev sync is not supported. This will lead to data loss and unexpected behavior. Install a more recent version of libdevmapper or select a different storage driver. For more information, see https://docs.docker.com/engine/reference/commandline/dockerd/#storage-driver-options")
|
||||||
// logrus.Errorf("devmapper: Udev sync is not supported. This will lead to data loss and unexpected behavior. Install a dynamic binary to use devicemapper or select a different storage driver. For more information, see https://docs.docker.com/engine/reference/commandline/daemon/#daemon-storage-driver-option")
|
|
||||||
// } else {
|
if !devices.overrideUdevSyncCheck {
|
||||||
// logrus.Errorf("devmapper: Udev sync is not supported. This will lead to data loss and unexpected behavior. Install a more recent version of libdevmapper or select a different storage driver. For more information, see https://docs.docker.com/engine/reference/commandline/daemon/#daemon-storage-driver-option")
|
return graphdriver.ErrNotSupported
|
||||||
// }
|
}
|
||||||
//
|
}
|
||||||
// if !devices.overrideUdevSyncCheck {
|
|
||||||
// return graphdriver.ErrNotSupported
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
|
|
||||||
//create the root dir of the devmapper driver ownership to match this
|
//create the root dir of the devmapper driver ownership to match this
|
||||||
//daemon's remapped root uid/gid so containers can start properly
|
//daemon's remapped root uid/gid so containers can start properly
|
||||||
@ -1692,20 +1699,47 @@ func (devices *DeviceSet) initDevmapper(doInit bool) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Set the device prefix from the device id and inode of the container root dir
|
prevSetupConfig, err := readLVMConfig(devices.root)
|
||||||
|
|
||||||
st, err := os.Stat(devices.root)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if !reflect.DeepEqual(devices.lvmSetupConfig, directLVMConfig{}) {
|
||||||
|
if devices.thinPoolDevice != "" {
|
||||||
|
return errors.New("cannot setup direct-lvm when `dm.thinpooldev` is also specified")
|
||||||
|
}
|
||||||
|
|
||||||
|
if !reflect.DeepEqual(prevSetupConfig, devices.lvmSetupConfig) {
|
||||||
|
if !reflect.DeepEqual(prevSetupConfig, directLVMConfig{}) {
|
||||||
|
return errors.New("changing direct-lvm config is not supported")
|
||||||
|
}
|
||||||
|
logrus.WithField("storage-driver", "devicemapper").WithField("direct-lvm-config", devices.lvmSetupConfig).Debugf("Setting up direct lvm mode")
|
||||||
|
if err := verifyBlockDevice(devices.lvmSetupConfig.Device, lvmSetupConfigForce); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := setupDirectLVM(devices.lvmSetupConfig); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := writeLVMConfig(devices.root, devices.lvmSetupConfig); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
devices.thinPoolDevice = "storage-thinpool"
|
||||||
|
logrus.WithField("storage-driver", "devicemapper").Debugf("Setting dm.thinpooldev to %q", devices.thinPoolDevice)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set the device prefix from the device id and inode of the storage root dir
|
||||||
|
var st unix.Stat_t
|
||||||
|
if err := unix.Stat(devices.root, &st); err != nil {
|
||||||
return fmt.Errorf("devmapper: Error looking up dir %s: %s", devices.root, err)
|
return fmt.Errorf("devmapper: Error looking up dir %s: %s", devices.root, err)
|
||||||
}
|
}
|
||||||
sysSt := st.Sys().(*syscall.Stat_t)
|
|
||||||
// "reg-" stands for "regular file".
|
// "reg-" stands for "regular file".
|
||||||
// In the future we might use "dev-" for "device file", etc.
|
// In the future we might use "dev-" for "device file", etc.
|
||||||
// container-maj,min[-inode] stands for:
|
// container-maj,min[-inode] stands for:
|
||||||
// - Managed by container storage
|
// - Managed by container storage
|
||||||
// - The target of this device is at major <maj> and minor <min>
|
// - The target of this device is at major <maj> and minor <min>
|
||||||
// - If <inode> is defined, use that file inside the device as a loopback image. Otherwise use the device itself.
|
// - If <inode> is defined, use that file inside the device as a loopback image. Otherwise use the device itself.
|
||||||
devices.devicePrefix = fmt.Sprintf("container-%d:%d-%d", major(sysSt.Dev), minor(sysSt.Dev), sysSt.Ino)
|
devices.devicePrefix = fmt.Sprintf("container-%d:%d-%d", major(st.Dev), minor(st.Dev), st.Ino)
|
||||||
logrus.Debugf("devmapper: Generated prefix: %s", devices.devicePrefix)
|
logrus.Debugf("devmapper: Generated prefix: %s", devices.devicePrefix)
|
||||||
|
|
||||||
// Check for the existence of the thin-pool device
|
// Check for the existence of the thin-pool device
|
||||||
@ -1748,7 +1782,7 @@ func (devices *DeviceSet) initDevmapper(doInit bool) error {
|
|||||||
hasData := devices.hasImage("data")
|
hasData := devices.hasImage("data")
|
||||||
|
|
||||||
if !doInit && !hasData {
|
if !doInit && !hasData {
|
||||||
return errors.New("Loopback data file not found")
|
return errors.New("loopback data file not found")
|
||||||
}
|
}
|
||||||
|
|
||||||
if !hasData {
|
if !hasData {
|
||||||
@ -1781,7 +1815,7 @@ func (devices *DeviceSet) initDevmapper(doInit bool) error {
|
|||||||
hasMetadata := devices.hasImage("metadata")
|
hasMetadata := devices.hasImage("metadata")
|
||||||
|
|
||||||
if !doInit && !hasMetadata {
|
if !doInit && !hasMetadata {
|
||||||
return errors.New("Loopback metadata file not found")
|
return errors.New("loopback metadata file not found")
|
||||||
}
|
}
|
||||||
|
|
||||||
if !hasMetadata {
|
if !hasMetadata {
|
||||||
@ -1811,6 +1845,14 @@ func (devices *DeviceSet) initDevmapper(doInit bool) error {
|
|||||||
if err := devicemapper.CreatePool(devices.getPoolName(), dataFile, metadataFile, devices.thinpBlockSize); err != nil {
|
if err := devicemapper.CreatePool(devices.getPoolName(), dataFile, metadataFile, devices.thinpBlockSize); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
defer func() {
|
||||||
|
if retErr != nil {
|
||||||
|
err = devices.deactivatePool()
|
||||||
|
if err != nil {
|
||||||
|
logrus.Warnf("devmapper: Failed to deactivatePool: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Pool already exists and caller did not pass us a pool. That means
|
// Pool already exists and caller did not pass us a pool. That means
|
||||||
@ -1857,8 +1899,8 @@ func (devices *DeviceSet) initDevmapper(doInit bool) error {
|
|||||||
|
|
||||||
// AddDevice adds a device and registers in the hash.
|
// AddDevice adds a device and registers in the hash.
|
||||||
func (devices *DeviceSet) AddDevice(hash, baseHash string, storageOpt map[string]string) error {
|
func (devices *DeviceSet) AddDevice(hash, baseHash string, storageOpt map[string]string) error {
|
||||||
logrus.Debugf("devmapper: AddDevice(hash=%s basehash=%s)", hash, baseHash)
|
logrus.Debugf("devmapper: AddDevice START(hash=%s basehash=%s)", hash, baseHash)
|
||||||
defer logrus.Debugf("devmapper: AddDevice(hash=%s basehash=%s) END", hash, baseHash)
|
defer logrus.Debugf("devmapper: AddDevice END(hash=%s basehash=%s)", hash, baseHash)
|
||||||
|
|
||||||
// If a deleted device exists, return error.
|
// If a deleted device exists, return error.
|
||||||
baseInfo, err := devices.lookupDeviceWithLock(baseHash)
|
baseInfo, err := devices.lookupDeviceWithLock(baseHash)
|
||||||
@ -1895,7 +1937,7 @@ func (devices *DeviceSet) AddDevice(hash, baseHash string, storageOpt map[string
|
|||||||
return fmt.Errorf("devmapper: Container size cannot be smaller than %s", units.HumanSize(float64(baseInfo.Size)))
|
return fmt.Errorf("devmapper: Container size cannot be smaller than %s", units.HumanSize(float64(baseInfo.Size)))
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := devices.createRegisterSnapDevice(hash, baseInfo, size); err != nil {
|
if err := devices.takeSnapshot(hash, baseInfo, size); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1975,7 +2017,7 @@ func (devices *DeviceSet) deleteTransaction(info *devInfo, syncDelete bool) erro
|
|||||||
}
|
}
|
||||||
|
|
||||||
if err == nil {
|
if err == nil {
|
||||||
if err := devices.unregisterDevice(info.DeviceID, info.Hash); err != nil {
|
if err := devices.unregisterDevice(info.Hash); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
// If device was already in deferred delete state that means
|
// If device was already in deferred delete state that means
|
||||||
@ -1996,8 +2038,8 @@ func (devices *DeviceSet) deleteTransaction(info *devInfo, syncDelete bool) erro
|
|||||||
|
|
||||||
// Issue discard only if device open count is zero.
|
// Issue discard only if device open count is zero.
|
||||||
func (devices *DeviceSet) issueDiscard(info *devInfo) error {
|
func (devices *DeviceSet) issueDiscard(info *devInfo) error {
|
||||||
logrus.Debugf("devmapper: issueDiscard(device: %s). START", info.Hash)
|
logrus.Debugf("devmapper: issueDiscard START(device: %s).", info.Hash)
|
||||||
defer logrus.Debugf("devmapper: issueDiscard(device: %s). END", info.Hash)
|
defer logrus.Debugf("devmapper: issueDiscard END(device: %s).", info.Hash)
|
||||||
// This is a workaround for the kernel not discarding block so
|
// This is a workaround for the kernel not discarding block so
|
||||||
// on the thin pool when we remove a thinp device, so we do it
|
// on the thin pool when we remove a thinp device, so we do it
|
||||||
// manually.
|
// manually.
|
||||||
@ -2030,7 +2072,16 @@ func (devices *DeviceSet) deleteDevice(info *devInfo, syncDelete bool) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Try to deactivate device in case it is active.
|
// Try to deactivate device in case it is active.
|
||||||
if err := devices.deactivateDevice(info); err != nil {
|
// If deferred removal is enabled and deferred deletion is disabled
|
||||||
|
// then make sure device is removed synchronously. There have been
|
||||||
|
// some cases of device being busy for short duration and we would
|
||||||
|
// rather busy wait for device removal to take care of these cases.
|
||||||
|
deferredRemove := devices.deferredRemove
|
||||||
|
if !devices.deferredDelete {
|
||||||
|
deferredRemove = false
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := devices.deactivateDeviceMode(info, deferredRemove); err != nil {
|
||||||
logrus.Debugf("devmapper: Error deactivating device: %s", err)
|
logrus.Debugf("devmapper: Error deactivating device: %s", err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -2046,8 +2097,8 @@ func (devices *DeviceSet) deleteDevice(info *devInfo, syncDelete bool) error {
|
|||||||
// removal. If one wants to override that and want DeleteDevice() to fail if
|
// removal. If one wants to override that and want DeleteDevice() to fail if
|
||||||
// device was busy and could not be deleted, set syncDelete=true.
|
// device was busy and could not be deleted, set syncDelete=true.
|
||||||
func (devices *DeviceSet) DeleteDevice(hash string, syncDelete bool) error {
|
func (devices *DeviceSet) DeleteDevice(hash string, syncDelete bool) error {
|
||||||
logrus.Debugf("devmapper: DeleteDevice(hash=%v syncDelete=%v) START", hash, syncDelete)
|
logrus.Debugf("devmapper: DeleteDevice START(hash=%v syncDelete=%v)", hash, syncDelete)
|
||||||
defer logrus.Debugf("devmapper: DeleteDevice(hash=%v syncDelete=%v) END", hash, syncDelete)
|
defer logrus.Debugf("devmapper: DeleteDevice END(hash=%v syncDelete=%v)", hash, syncDelete)
|
||||||
info, err := devices.lookupDeviceWithLock(hash)
|
info, err := devices.lookupDeviceWithLock(hash)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -2063,8 +2114,8 @@ func (devices *DeviceSet) DeleteDevice(hash string, syncDelete bool) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (devices *DeviceSet) deactivatePool() error {
|
func (devices *DeviceSet) deactivatePool() error {
|
||||||
logrus.Debug("devmapper: deactivatePool()")
|
logrus.Debug("devmapper: deactivatePool() START")
|
||||||
defer logrus.Debug("devmapper: deactivatePool END")
|
defer logrus.Debug("devmapper: deactivatePool() END")
|
||||||
devname := devices.getPoolDevName()
|
devname := devices.getPoolDevName()
|
||||||
|
|
||||||
devinfo, err := devicemapper.GetInfo(devname)
|
devinfo, err := devicemapper.GetInfo(devname)
|
||||||
@ -2087,7 +2138,12 @@ func (devices *DeviceSet) deactivatePool() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (devices *DeviceSet) deactivateDevice(info *devInfo) error {
|
func (devices *DeviceSet) deactivateDevice(info *devInfo) error {
|
||||||
logrus.Debugf("devmapper: deactivateDevice(%s)", info.Hash)
|
return devices.deactivateDeviceMode(info, devices.deferredRemove)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (devices *DeviceSet) deactivateDeviceMode(info *devInfo, deferredRemove bool) error {
|
||||||
|
var err error
|
||||||
|
logrus.Debugf("devmapper: deactivateDevice START(%s)", info.Hash)
|
||||||
defer logrus.Debugf("devmapper: deactivateDevice END(%s)", info.Hash)
|
defer logrus.Debugf("devmapper: deactivateDevice END(%s)", info.Hash)
|
||||||
|
|
||||||
devinfo, err := devicemapper.GetInfo(info.Name())
|
devinfo, err := devicemapper.GetInfo(info.Name())
|
||||||
@ -2099,14 +2155,17 @@ func (devices *DeviceSet) deactivateDevice(info *devInfo) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if devices.deferredRemove {
|
if deferredRemove {
|
||||||
if err := devicemapper.RemoveDeviceDeferred(info.Name()); err != nil {
|
err = devicemapper.RemoveDeviceDeferred(info.Name())
|
||||||
return err
|
|
||||||
}
|
|
||||||
} else {
|
} else {
|
||||||
if err := devices.removeDevice(info.Name()); err != nil {
|
err = devices.removeDevice(info.Name())
|
||||||
return err
|
}
|
||||||
}
|
|
||||||
|
// This function's semantics is such that it does not return an
|
||||||
|
// error if device does not exist. So if device went away by
|
||||||
|
// the time we actually tried to remove it, do not return error.
|
||||||
|
if errors.Cause(err) != devicemapper.ErrEnxio {
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -2137,41 +2196,53 @@ func (devices *DeviceSet) removeDevice(devname string) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (devices *DeviceSet) cancelDeferredRemoval(info *devInfo) error {
|
func (devices *DeviceSet) cancelDeferredRemovalIfNeeded(info *devInfo) error {
|
||||||
if !devices.deferredRemove {
|
if !devices.deferredRemove {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
logrus.Debugf("devmapper: cancelDeferredRemoval START(%s)", info.Name())
|
logrus.Debugf("devmapper: cancelDeferredRemovalIfNeeded START(%s)", info.Name())
|
||||||
defer logrus.Debugf("devmapper: cancelDeferredRemoval END(%s)", info.Name())
|
defer logrus.Debugf("devmapper: cancelDeferredRemovalIfNeeded END(%s)", info.Name())
|
||||||
|
|
||||||
devinfo, err := devicemapper.GetInfoWithDeferred(info.Name())
|
devinfo, err := devicemapper.GetInfoWithDeferred(info.Name())
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
if devinfo != nil && devinfo.DeferredRemove == 0 {
|
if devinfo != nil && devinfo.DeferredRemove == 0 {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Cancel deferred remove
|
// Cancel deferred remove
|
||||||
for i := 0; i < 100; i++ {
|
if err := devices.cancelDeferredRemoval(info); err != nil {
|
||||||
err = devicemapper.CancelDeferredRemove(info.Name())
|
// If Error is ErrEnxio. Device is probably already gone. Continue.
|
||||||
if err == nil {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
if errors.Cause(err) == devicemapper.ErrEnxio {
|
|
||||||
// Device is probably already gone. Return success.
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if errors.Cause(err) != devicemapper.ErrBusy {
|
if errors.Cause(err) != devicemapper.ErrBusy {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// If we see EBUSY it may be a transient error,
|
func (devices *DeviceSet) cancelDeferredRemoval(info *devInfo) error {
|
||||||
// sleep a bit a retry a few times.
|
logrus.Debugf("devmapper: cancelDeferredRemoval START(%s)", info.Name())
|
||||||
devices.Unlock()
|
defer logrus.Debugf("devmapper: cancelDeferredRemoval END(%s)", info.Name())
|
||||||
time.Sleep(100 * time.Millisecond)
|
|
||||||
devices.Lock()
|
var err error
|
||||||
|
|
||||||
|
// Cancel deferred remove
|
||||||
|
for i := 0; i < 100; i++ {
|
||||||
|
err = devicemapper.CancelDeferredRemove(info.Name())
|
||||||
|
if err != nil {
|
||||||
|
if errors.Cause(err) != devicemapper.ErrBusy {
|
||||||
|
// If we see EBUSY it may be a transient error,
|
||||||
|
// sleep a bit a retry a few times.
|
||||||
|
devices.Unlock()
|
||||||
|
time.Sleep(100 * time.Millisecond)
|
||||||
|
devices.Lock()
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
break
|
||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -2209,9 +2280,6 @@ func (devices *DeviceSet) Shutdown(home string) error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if p == path.Join(home, "mnt") {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
if !info.IsDir() {
|
if !info.IsDir() {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -2220,7 +2288,7 @@ func (devices *DeviceSet) Shutdown(home string) error {
|
|||||||
// We use MNT_DETACH here in case it is still busy in some running
|
// We use MNT_DETACH here in case it is still busy in some running
|
||||||
// container. This means it'll go away from the global scope directly,
|
// container. This means it'll go away from the global scope directly,
|
||||||
// and the device will be released when that container dies.
|
// and the device will be released when that container dies.
|
||||||
if err := syscall.Unmount(p, syscall.MNT_DETACH); err != nil {
|
if err := unix.Unmount(p, unix.MNT_DETACH); err != nil {
|
||||||
logrus.Debugf("devmapper: Shutdown unmounting %s, error: %s", p, err)
|
logrus.Debugf("devmapper: Shutdown unmounting %s, error: %s", p, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -2263,6 +2331,34 @@ func (devices *DeviceSet) Shutdown(home string) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Recent XFS changes allow changing behavior of filesystem in case of errors.
|
||||||
|
// When thin pool gets full and XFS gets ENOSPC error, currently it tries
|
||||||
|
// IO infinitely and sometimes it can block the container process
|
||||||
|
// and process can't be killWith 0 value, XFS will not retry upon error
|
||||||
|
// and instead will shutdown filesystem.
|
||||||
|
|
||||||
|
func (devices *DeviceSet) xfsSetNospaceRetries(info *devInfo) error {
|
||||||
|
dmDevicePath, err := os.Readlink(info.DevName())
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("devmapper: readlink failed for device %v:%v", info.DevName(), err)
|
||||||
|
}
|
||||||
|
|
||||||
|
dmDeviceName := path.Base(dmDevicePath)
|
||||||
|
filePath := "/sys/fs/xfs/" + dmDeviceName + "/error/metadata/ENOSPC/max_retries"
|
||||||
|
maxRetriesFile, err := os.OpenFile(filePath, os.O_WRONLY, 0)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("devmapper: user specified daemon option dm.xfs_nospace_max_retries but it does not seem to be supported on this system :%v", err)
|
||||||
|
}
|
||||||
|
defer maxRetriesFile.Close()
|
||||||
|
|
||||||
|
// Set max retries to 0
|
||||||
|
_, err = maxRetriesFile.WriteString(devices.xfsNospaceRetries)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("devmapper: Failed to write string %v to file %v:%v", devices.xfsNospaceRetries, filePath, err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// MountDevice mounts the device if not already mounted.
|
// MountDevice mounts the device if not already mounted.
|
||||||
func (devices *DeviceSet) MountDevice(hash, path, mountLabel string) error {
|
func (devices *DeviceSet) MountDevice(hash, path, mountLabel string) error {
|
||||||
info, err := devices.lookupDeviceWithLock(hash)
|
info, err := devices.lookupDeviceWithLock(hash)
|
||||||
@ -2300,7 +2396,15 @@ func (devices *DeviceSet) MountDevice(hash, path, mountLabel string) error {
|
|||||||
options = joinMountOptions(options, label.FormatMountLabel("", mountLabel))
|
options = joinMountOptions(options, label.FormatMountLabel("", mountLabel))
|
||||||
|
|
||||||
if err := mount.Mount(info.DevName(), path, fstype, options); err != nil {
|
if err := mount.Mount(info.DevName(), path, fstype, options); err != nil {
|
||||||
return fmt.Errorf("devmapper: Error mounting '%s' on '%s': %s", info.DevName(), path, err)
|
return fmt.Errorf("devmapper: Error mounting '%s' on '%s': %s\n%v", info.DevName(), path, err, string(dmesg.Dmesg(256)))
|
||||||
|
}
|
||||||
|
|
||||||
|
if fstype == "xfs" && devices.xfsNospaceRetries != "" {
|
||||||
|
if err := devices.xfsSetNospaceRetries(info); err != nil {
|
||||||
|
unix.Unmount(path, unix.MNT_DETACH)
|
||||||
|
devices.deactivateDevice(info)
|
||||||
|
return err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
@ -2308,8 +2412,8 @@ func (devices *DeviceSet) MountDevice(hash, path, mountLabel string) error {
|
|||||||
|
|
||||||
// UnmountDevice unmounts the device and removes it from hash.
|
// UnmountDevice unmounts the device and removes it from hash.
|
||||||
func (devices *DeviceSet) UnmountDevice(hash, mountPath string) error {
|
func (devices *DeviceSet) UnmountDevice(hash, mountPath string) error {
|
||||||
logrus.Debugf("devmapper: UnmountDevice(hash=%s)", hash)
|
logrus.Debugf("devmapper: UnmountDevice START(hash=%s)", hash)
|
||||||
defer logrus.Debugf("devmapper: UnmountDevice(hash=%s) END", hash)
|
defer logrus.Debugf("devmapper: UnmountDevice END(hash=%s)", hash)
|
||||||
|
|
||||||
info, err := devices.lookupDeviceWithLock(hash)
|
info, err := devices.lookupDeviceWithLock(hash)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -2323,16 +2427,12 @@ func (devices *DeviceSet) UnmountDevice(hash, mountPath string) error {
|
|||||||
defer devices.Unlock()
|
defer devices.Unlock()
|
||||||
|
|
||||||
logrus.Debugf("devmapper: Unmount(%s)", mountPath)
|
logrus.Debugf("devmapper: Unmount(%s)", mountPath)
|
||||||
if err := syscall.Unmount(mountPath, syscall.MNT_DETACH); err != nil {
|
if err := unix.Unmount(mountPath, unix.MNT_DETACH); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
logrus.Debug("devmapper: Unmount done")
|
logrus.Debug("devmapper: Unmount done")
|
||||||
|
|
||||||
if err := devices.deactivateDevice(info); err != nil {
|
return devices.deactivateDevice(info)
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// HasDevice returns true if the device metadata exists.
|
// HasDevice returns true if the device metadata exists.
|
||||||
@ -2424,8 +2524,8 @@ func (devices *DeviceSet) MetadataDevicePath() string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (devices *DeviceSet) getUnderlyingAvailableSpace(loopFile string) (uint64, error) {
|
func (devices *DeviceSet) getUnderlyingAvailableSpace(loopFile string) (uint64, error) {
|
||||||
buf := new(syscall.Statfs_t)
|
buf := new(unix.Statfs_t)
|
||||||
if err := syscall.Statfs(loopFile, buf); err != nil {
|
if err := unix.Statfs(loopFile, buf); err != nil {
|
||||||
logrus.Warnf("devmapper: Couldn't stat loopfile filesystem %v: %v", loopFile, err)
|
logrus.Warnf("devmapper: Couldn't stat loopfile filesystem %v: %v", loopFile, err)
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
@ -2534,22 +2634,25 @@ func NewDeviceSet(root string, doInit bool, options []string, uidMaps, gidMaps [
|
|||||||
minFreeSpacePercent: defaultMinFreeSpacePercent,
|
minFreeSpacePercent: defaultMinFreeSpacePercent,
|
||||||
}
|
}
|
||||||
|
|
||||||
// Pick up initialization settings, if any were saved before
|
version, err := devicemapper.GetDriverVersion()
|
||||||
defaultsFile := path.Join(root, "defaults")
|
if err != nil {
|
||||||
defaultsBytes, err := ioutil.ReadFile(defaultsFile)
|
// Can't even get driver version, assume not supported
|
||||||
defaults := []string{}
|
return nil, graphdriver.ErrNotSupported
|
||||||
settings := map[string]string{}
|
}
|
||||||
if err == nil && len(defaultsBytes) > 0 {
|
|
||||||
defaults = strings.Split(string(defaultsBytes), "\n")
|
if err := determineDriverCapabilities(version); err != nil {
|
||||||
|
return nil, graphdriver.ErrNotSupported
|
||||||
|
}
|
||||||
|
|
||||||
|
if driverDeferredRemovalSupport && devicemapper.LibraryDeferredRemovalSupport {
|
||||||
|
// enable deferred stuff by default
|
||||||
|
enableDeferredDeletion = true
|
||||||
|
enableDeferredRemoval = true
|
||||||
}
|
}
|
||||||
|
|
||||||
foundBlkDiscard := false
|
foundBlkDiscard := false
|
||||||
nthOption := 0
|
var lvmSetupConfig directLVMConfig
|
||||||
for _, option := range append(defaults, options...) {
|
for _, option := range options {
|
||||||
nthOption = nthOption + 1
|
|
||||||
if len(option) == 0 {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
key, val, err := parsers.ParseKeyValueOpt(option)
|
key, val, err := parsers.ParseKeyValueOpt(option)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -2637,15 +2740,78 @@ func NewDeviceSet(root string, doInit bool, options []string, uidMaps, gidMaps [
|
|||||||
}
|
}
|
||||||
|
|
||||||
devices.minFreeSpacePercent = uint32(minFreeSpacePercent)
|
devices.minFreeSpacePercent = uint32(minFreeSpacePercent)
|
||||||
default:
|
case "dm.xfs_nospace_max_retries":
|
||||||
if nthOption > len(defaults) {
|
_, err := strconv.ParseUint(val, 10, 64)
|
||||||
return nil, fmt.Errorf("devmapper: Unknown option %s", key)
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
}
|
}
|
||||||
logrus.Errorf("devmapper: Unknown option %s, ignoring", key)
|
devices.xfsNospaceRetries = val
|
||||||
|
case "dm.directlvm_device":
|
||||||
|
lvmSetupConfig.Device = val
|
||||||
|
case "dm.directlvm_device_force":
|
||||||
|
lvmSetupConfigForce, err = strconv.ParseBool(val)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
case "dm.thinp_percent":
|
||||||
|
per, err := strconv.ParseUint(strings.TrimSuffix(val, "%"), 10, 32)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrapf(err, "could not parse `dm.thinp_percent=%s`", val)
|
||||||
|
}
|
||||||
|
if per >= 100 {
|
||||||
|
return nil, errors.New("dm.thinp_percent must be greater than 0 and less than 100")
|
||||||
|
}
|
||||||
|
lvmSetupConfig.ThinpPercent = per
|
||||||
|
case "dm.thinp_metapercent":
|
||||||
|
per, err := strconv.ParseUint(strings.TrimSuffix(val, "%"), 10, 32)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrapf(err, "could not parse `dm.thinp_metapercent=%s`", val)
|
||||||
|
}
|
||||||
|
if per >= 100 {
|
||||||
|
return nil, errors.New("dm.thinp_metapercent must be greater than 0 and less than 100")
|
||||||
|
}
|
||||||
|
lvmSetupConfig.ThinpMetaPercent = per
|
||||||
|
case "dm.thinp_autoextend_percent":
|
||||||
|
per, err := strconv.ParseUint(strings.TrimSuffix(val, "%"), 10, 32)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrapf(err, "could not parse `dm.thinp_autoextend_percent=%s`", val)
|
||||||
|
}
|
||||||
|
if per > 100 {
|
||||||
|
return nil, errors.New("dm.thinp_autoextend_percent must be greater than 0 and less than 100")
|
||||||
|
}
|
||||||
|
lvmSetupConfig.AutoExtendPercent = per
|
||||||
|
case "dm.thinp_autoextend_threshold":
|
||||||
|
per, err := strconv.ParseUint(strings.TrimSuffix(val, "%"), 10, 32)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrapf(err, "could not parse `dm.thinp_autoextend_threshold=%s`", val)
|
||||||
|
}
|
||||||
|
if per > 100 {
|
||||||
|
return nil, errors.New("dm.thinp_autoextend_threshold must be greater than 0 and less than 100")
|
||||||
|
}
|
||||||
|
lvmSetupConfig.AutoExtendThreshold = per
|
||||||
|
case "dm.libdm_log_level":
|
||||||
|
level, err := strconv.ParseInt(val, 10, 32)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrapf(err, "could not parse `dm.libdm_log_level=%s`", val)
|
||||||
|
}
|
||||||
|
if level < devicemapper.LogLevelFatal || level > devicemapper.LogLevelDebug {
|
||||||
|
return nil, errors.Errorf("dm.libdm_log_level must be in range [%d,%d]", devicemapper.LogLevelFatal, devicemapper.LogLevelDebug)
|
||||||
|
}
|
||||||
|
// Register a new logging callback with the specified level.
|
||||||
|
devicemapper.LogInit(devicemapper.DefaultLogger{
|
||||||
|
Level: int(level),
|
||||||
|
})
|
||||||
|
default:
|
||||||
|
return nil, fmt.Errorf("devmapper: Unknown option %s", key)
|
||||||
}
|
}
|
||||||
settings[key] = val
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if err := validateLVMConfig(lvmSetupConfig); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
devices.lvmSetupConfig = lvmSetupConfig
|
||||||
|
|
||||||
// By default, don't do blk discard hack on raw devices, its rarely useful and is expensive
|
// By default, don't do blk discard hack on raw devices, its rarely useful and is expensive
|
||||||
if !foundBlkDiscard && (devices.dataDevice != "" || devices.thinPoolDevice != "") {
|
if !foundBlkDiscard && (devices.dataDevice != "" || devices.thinPoolDevice != "") {
|
||||||
devices.doBlkDiscard = false
|
devices.doBlkDiscard = false
|
||||||
@ -2655,15 +2821,5 @@ func NewDeviceSet(root string, doInit bool, options []string, uidMaps, gidMaps [
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Save these settings along with the other metadata
|
|
||||||
defaults = []string{}
|
|
||||||
for key, val := range settings {
|
|
||||||
defaults = append(defaults, key+"="+val)
|
|
||||||
}
|
|
||||||
defaultsBytes = []byte(strings.Join(defaults, "\n") + "\n")
|
|
||||||
if err := ioutils.AtomicWriteFile(defaultsFile, defaultsBytes, 0600); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return devices, nil
|
return devices, nil
|
||||||
}
|
}
|
||||||
|
56
vendor/github.com/containers/storage/drivers/devmapper/driver.go
generated
vendored
56
vendor/github.com/containers/storage/drivers/devmapper/driver.go
generated
vendored
@ -9,13 +9,15 @@ import (
|
|||||||
"path"
|
"path"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
|
||||||
"github.com/Sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
|
|
||||||
"github.com/containers/storage/drivers"
|
"github.com/containers/storage/drivers"
|
||||||
"github.com/containers/storage/pkg/devicemapper"
|
"github.com/containers/storage/pkg/devicemapper"
|
||||||
"github.com/containers/storage/pkg/idtools"
|
"github.com/containers/storage/pkg/idtools"
|
||||||
|
"github.com/containers/storage/pkg/locker"
|
||||||
"github.com/containers/storage/pkg/mount"
|
"github.com/containers/storage/pkg/mount"
|
||||||
"github.com/docker/go-units"
|
"github.com/containers/storage/pkg/system"
|
||||||
|
units "github.com/docker/go-units"
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
@ -29,6 +31,7 @@ type Driver struct {
|
|||||||
uidMaps []idtools.IDMap
|
uidMaps []idtools.IDMap
|
||||||
gidMaps []idtools.IDMap
|
gidMaps []idtools.IDMap
|
||||||
ctr *graphdriver.RefCounter
|
ctr *graphdriver.RefCounter
|
||||||
|
locker *locker.Locker
|
||||||
}
|
}
|
||||||
|
|
||||||
// Init creates a driver with the given home and the set of options.
|
// Init creates a driver with the given home and the set of options.
|
||||||
@ -48,6 +51,7 @@ func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (grap
|
|||||||
uidMaps: uidMaps,
|
uidMaps: uidMaps,
|
||||||
gidMaps: gidMaps,
|
gidMaps: gidMaps,
|
||||||
ctr: graphdriver.NewRefCounter(graphdriver.NewDefaultChecker()),
|
ctr: graphdriver.NewRefCounter(graphdriver.NewDefaultChecker()),
|
||||||
|
locker: locker.New(),
|
||||||
}
|
}
|
||||||
|
|
||||||
return graphdriver.NewNaiveDiffDriver(d, uidMaps, gidMaps), nil
|
return graphdriver.NewNaiveDiffDriver(d, uidMaps, gidMaps), nil
|
||||||
@ -65,18 +69,18 @@ func (d *Driver) Status() [][2]string {
|
|||||||
|
|
||||||
status := [][2]string{
|
status := [][2]string{
|
||||||
{"Pool Name", s.PoolName},
|
{"Pool Name", s.PoolName},
|
||||||
{"Pool Blocksize", fmt.Sprintf("%s", units.HumanSize(float64(s.SectorSize)))},
|
{"Pool Blocksize", units.HumanSize(float64(s.SectorSize))},
|
||||||
{"Base Device Size", fmt.Sprintf("%s", units.HumanSize(float64(s.BaseDeviceSize)))},
|
{"Base Device Size", units.HumanSize(float64(s.BaseDeviceSize))},
|
||||||
{"Backing Filesystem", s.BaseDeviceFS},
|
{"Backing Filesystem", s.BaseDeviceFS},
|
||||||
{"Data file", s.DataFile},
|
{"Data file", s.DataFile},
|
||||||
{"Metadata file", s.MetadataFile},
|
{"Metadata file", s.MetadataFile},
|
||||||
{"Data Space Used", fmt.Sprintf("%s", units.HumanSize(float64(s.Data.Used)))},
|
{"Data Space Used", units.HumanSize(float64(s.Data.Used))},
|
||||||
{"Data Space Total", fmt.Sprintf("%s", units.HumanSize(float64(s.Data.Total)))},
|
{"Data Space Total", units.HumanSize(float64(s.Data.Total))},
|
||||||
{"Data Space Available", fmt.Sprintf("%s", units.HumanSize(float64(s.Data.Available)))},
|
{"Data Space Available", units.HumanSize(float64(s.Data.Available))},
|
||||||
{"Metadata Space Used", fmt.Sprintf("%s", units.HumanSize(float64(s.Metadata.Used)))},
|
{"Metadata Space Used", units.HumanSize(float64(s.Metadata.Used))},
|
||||||
{"Metadata Space Total", fmt.Sprintf("%s", units.HumanSize(float64(s.Metadata.Total)))},
|
{"Metadata Space Total", units.HumanSize(float64(s.Metadata.Total))},
|
||||||
{"Metadata Space Available", fmt.Sprintf("%s", units.HumanSize(float64(s.Metadata.Available)))},
|
{"Metadata Space Available", units.HumanSize(float64(s.Metadata.Available))},
|
||||||
{"Thin Pool Minimum Free Space", fmt.Sprintf("%s", units.HumanSize(float64(s.MinFreeSpace)))},
|
{"Thin Pool Minimum Free Space", units.HumanSize(float64(s.MinFreeSpace))},
|
||||||
{"Udev Sync Supported", fmt.Sprintf("%v", s.UdevSyncSupported)},
|
{"Udev Sync Supported", fmt.Sprintf("%v", s.UdevSyncSupported)},
|
||||||
{"Deferred Removal Enabled", fmt.Sprintf("%v", s.DeferredRemoveEnabled)},
|
{"Deferred Removal Enabled", fmt.Sprintf("%v", s.DeferredRemoveEnabled)},
|
||||||
{"Deferred Deletion Enabled", fmt.Sprintf("%v", s.DeferredDeleteEnabled)},
|
{"Deferred Deletion Enabled", fmt.Sprintf("%v", s.DeferredDeleteEnabled)},
|
||||||
@ -122,12 +126,17 @@ func (d *Driver) Cleanup() error {
|
|||||||
|
|
||||||
// CreateReadWrite creates a layer that is writable for use as a container
|
// CreateReadWrite creates a layer that is writable for use as a container
|
||||||
// file system.
|
// file system.
|
||||||
func (d *Driver) CreateReadWrite(id, parent, mountLabel string, storageOpt map[string]string) error {
|
func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error {
|
||||||
return d.Create(id, parent, mountLabel, storageOpt)
|
return d.Create(id, parent, opts)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create adds a device with a given id and the parent.
|
// Create adds a device with a given id and the parent.
|
||||||
func (d *Driver) Create(id, parent, mountLabel string, storageOpt map[string]string) error {
|
func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error {
|
||||||
|
var storageOpt map[string]string
|
||||||
|
if opts != nil {
|
||||||
|
storageOpt = opts.StorageOpt
|
||||||
|
}
|
||||||
|
|
||||||
if err := d.DeviceSet.AddDevice(id, parent, storageOpt); err != nil {
|
if err := d.DeviceSet.AddDevice(id, parent, storageOpt); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -137,6 +146,8 @@ func (d *Driver) Create(id, parent, mountLabel string, storageOpt map[string]str
|
|||||||
|
|
||||||
// Remove removes a device with a given id, unmounts the filesystem.
|
// Remove removes a device with a given id, unmounts the filesystem.
|
||||||
func (d *Driver) Remove(id string) error {
|
func (d *Driver) Remove(id string) error {
|
||||||
|
d.locker.Lock(id)
|
||||||
|
defer d.locker.Unlock(id)
|
||||||
if !d.DeviceSet.HasDevice(id) {
|
if !d.DeviceSet.HasDevice(id) {
|
||||||
// Consider removing a non-existing device a no-op
|
// Consider removing a non-existing device a no-op
|
||||||
// This is useful to be able to progress on container removal
|
// This is useful to be able to progress on container removal
|
||||||
@ -146,19 +157,15 @@ func (d *Driver) Remove(id string) error {
|
|||||||
|
|
||||||
// This assumes the device has been properly Get/Put:ed and thus is unmounted
|
// This assumes the device has been properly Get/Put:ed and thus is unmounted
|
||||||
if err := d.DeviceSet.DeleteDevice(id, false); err != nil {
|
if err := d.DeviceSet.DeleteDevice(id, false); err != nil {
|
||||||
return err
|
return fmt.Errorf("failed to remove device %s: %v", id, err)
|
||||||
}
|
}
|
||||||
|
return system.EnsureRemoveAll(path.Join(d.home, "mnt", id))
|
||||||
mp := path.Join(d.home, "mnt", id)
|
|
||||||
if err := os.RemoveAll(mp); err != nil && !os.IsNotExist(err) {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get mounts a device with given id into the root filesystem
|
// Get mounts a device with given id into the root filesystem
|
||||||
func (d *Driver) Get(id, mountLabel string) (string, error) {
|
func (d *Driver) Get(id, mountLabel string) (string, error) {
|
||||||
|
d.locker.Lock(id)
|
||||||
|
defer d.locker.Unlock(id)
|
||||||
mp := path.Join(d.home, "mnt", id)
|
mp := path.Join(d.home, "mnt", id)
|
||||||
rootFs := path.Join(mp, "rootfs")
|
rootFs := path.Join(mp, "rootfs")
|
||||||
if count := d.ctr.Increment(mp); count > 1 {
|
if count := d.ctr.Increment(mp); count > 1 {
|
||||||
@ -209,6 +216,8 @@ func (d *Driver) Get(id, mountLabel string) (string, error) {
|
|||||||
|
|
||||||
// Put unmounts a device and removes it.
|
// Put unmounts a device and removes it.
|
||||||
func (d *Driver) Put(id string) error {
|
func (d *Driver) Put(id string) error {
|
||||||
|
d.locker.Lock(id)
|
||||||
|
defer d.locker.Unlock(id)
|
||||||
mp := path.Join(d.home, "mnt", id)
|
mp := path.Join(d.home, "mnt", id)
|
||||||
if count := d.ctr.Decrement(mp); count > 0 {
|
if count := d.ctr.Decrement(mp); count > 0 {
|
||||||
return nil
|
return nil
|
||||||
@ -227,6 +236,5 @@ func (d *Driver) Exists(id string) bool {
|
|||||||
|
|
||||||
// AdditionalImageStores returns additional image stores supported by the driver
|
// AdditionalImageStores returns additional image stores supported by the driver
|
||||||
func (d *Driver) AdditionalImageStores() []string {
|
func (d *Driver) AdditionalImageStores() []string {
|
||||||
var imageStores []string
|
return nil
|
||||||
return imageStores
|
|
||||||
}
|
}
|
||||||
|
13
vendor/github.com/containers/storage/drivers/devmapper/mount.go
generated
vendored
13
vendor/github.com/containers/storage/drivers/devmapper/mount.go
generated
vendored
@ -7,7 +7,8 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"syscall"
|
|
||||||
|
"golang.org/x/sys/unix"
|
||||||
)
|
)
|
||||||
|
|
||||||
// FIXME: this is copy-pasted from the aufs driver.
|
// FIXME: this is copy-pasted from the aufs driver.
|
||||||
@ -15,19 +16,17 @@ import (
|
|||||||
|
|
||||||
// Mounted returns true if a mount point exists.
|
// Mounted returns true if a mount point exists.
|
||||||
func Mounted(mountpoint string) (bool, error) {
|
func Mounted(mountpoint string) (bool, error) {
|
||||||
mntpoint, err := os.Stat(mountpoint)
|
var mntpointSt unix.Stat_t
|
||||||
if err != nil {
|
if err := unix.Stat(mountpoint, &mntpointSt); err != nil {
|
||||||
if os.IsNotExist(err) {
|
if os.IsNotExist(err) {
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
parent, err := os.Stat(filepath.Join(mountpoint, ".."))
|
var parentSt unix.Stat_t
|
||||||
if err != nil {
|
if err := unix.Stat(filepath.Join(mountpoint, ".."), &parentSt); err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
mntpointSt := mntpoint.Sys().(*syscall.Stat_t)
|
|
||||||
parentSt := parent.Sys().(*syscall.Stat_t)
|
|
||||||
return mntpointSt.Dev != parentSt.Dev, nil
|
return mntpointSt.Dev != parentSt.Dev, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
95
vendor/github.com/containers/storage/drivers/driver.go
generated
vendored
95
vendor/github.com/containers/storage/drivers/driver.go
generated
vendored
@ -2,12 +2,13 @@ package graphdriver
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"io"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/Sirupsen/logrus"
|
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
"github.com/vbatts/tar-split/tar/storage"
|
"github.com/vbatts/tar-split/tar/storage"
|
||||||
|
|
||||||
"github.com/containers/storage/pkg/archive"
|
"github.com/containers/storage/pkg/archive"
|
||||||
@ -28,12 +29,19 @@ var (
|
|||||||
|
|
||||||
// ErrNotSupported returned when driver is not supported.
|
// ErrNotSupported returned when driver is not supported.
|
||||||
ErrNotSupported = errors.New("driver not supported")
|
ErrNotSupported = errors.New("driver not supported")
|
||||||
// ErrPrerequisites retuned when driver does not meet prerequisites.
|
// ErrPrerequisites returned when driver does not meet prerequisites.
|
||||||
ErrPrerequisites = errors.New("prerequisites for driver not satisfied (wrong filesystem?)")
|
ErrPrerequisites = errors.New("prerequisites for driver not satisfied (wrong filesystem?)")
|
||||||
// ErrIncompatibleFS returned when file system is not supported.
|
// ErrIncompatibleFS returned when file system is not supported.
|
||||||
ErrIncompatibleFS = fmt.Errorf("backing file system is unsupported for this graph driver")
|
ErrIncompatibleFS = fmt.Errorf("backing file system is unsupported for this graph driver")
|
||||||
)
|
)
|
||||||
|
|
||||||
|
//CreateOpts contains optional arguments for Create() and CreateReadWrite()
|
||||||
|
// methods.
|
||||||
|
type CreateOpts struct {
|
||||||
|
MountLabel string
|
||||||
|
StorageOpt map[string]string
|
||||||
|
}
|
||||||
|
|
||||||
// InitFunc initializes the storage driver.
|
// InitFunc initializes the storage driver.
|
||||||
type InitFunc func(root string, options []string, uidMaps, gidMaps []idtools.IDMap) (Driver, error)
|
type InitFunc func(root string, options []string, uidMaps, gidMaps []idtools.IDMap) (Driver, error)
|
||||||
|
|
||||||
@ -47,11 +55,13 @@ type ProtoDriver interface {
|
|||||||
// String returns a string representation of this driver.
|
// String returns a string representation of this driver.
|
||||||
String() string
|
String() string
|
||||||
// CreateReadWrite creates a new, empty filesystem layer that is ready
|
// CreateReadWrite creates a new, empty filesystem layer that is ready
|
||||||
// to be used as the storage for a container.
|
// to be used as the storage for a container. Additional options can
|
||||||
CreateReadWrite(id, parent, mountLabel string, storageOpt map[string]string) error
|
// be passed in opts. parent may be "" and opts may be nil.
|
||||||
|
CreateReadWrite(id, parent string, opts *CreateOpts) error
|
||||||
// Create creates a new, empty, filesystem layer with the
|
// Create creates a new, empty, filesystem layer with the
|
||||||
// specified id and parent and mountLabel. Parent and mountLabel may be "".
|
// specified id and parent and options passed in opts. Parent
|
||||||
Create(id, parent, mountLabel string, storageOpt map[string]string) error
|
// may be "" and opts may be nil.
|
||||||
|
Create(id, parent string, opts *CreateOpts) error
|
||||||
// Remove attempts to remove the filesystem layer with this id.
|
// Remove attempts to remove the filesystem layer with this id.
|
||||||
Remove(id string) error
|
Remove(id string) error
|
||||||
// Get returns the mountpoint for the layered filesystem referred
|
// Get returns the mountpoint for the layered filesystem referred
|
||||||
@ -78,26 +88,48 @@ type ProtoDriver interface {
|
|||||||
AdditionalImageStores() []string
|
AdditionalImageStores() []string
|
||||||
}
|
}
|
||||||
|
|
||||||
// Driver is the interface for layered/snapshot file system drivers.
|
// DiffDriver is the interface to use to implement graph diffs
|
||||||
type Driver interface {
|
type DiffDriver interface {
|
||||||
ProtoDriver
|
|
||||||
// Diff produces an archive of the changes between the specified
|
// Diff produces an archive of the changes between the specified
|
||||||
// layer and its parent layer which may be "".
|
// layer and its parent layer which may be "".
|
||||||
Diff(id, parent string) (archive.Archive, error)
|
Diff(id, parent string) (io.ReadCloser, error)
|
||||||
// Changes produces a list of changes between the specified layer
|
// Changes produces a list of changes between the specified layer
|
||||||
// and its parent layer. If parent is "", then all changes will be ADD changes.
|
// and its parent layer. If parent is "", then all changes will be ADD changes.
|
||||||
Changes(id, parent string) ([]archive.Change, error)
|
Changes(id, parent string) ([]archive.Change, error)
|
||||||
// ApplyDiff extracts the changeset from the given diff into the
|
// ApplyDiff extracts the changeset from the given diff into the
|
||||||
// layer with the specified id and parent, returning the size of the
|
// layer with the specified id and parent, returning the size of the
|
||||||
// new layer in bytes.
|
// new layer in bytes.
|
||||||
// The archive.Reader must be an uncompressed stream.
|
// The io.Reader must be an uncompressed stream.
|
||||||
ApplyDiff(id, parent string, diff archive.Reader) (size int64, err error)
|
ApplyDiff(id, parent string, diff io.Reader) (size int64, err error)
|
||||||
// DiffSize calculates the changes between the specified id
|
// DiffSize calculates the changes between the specified id
|
||||||
// and its parent and returns the size in bytes of the changes
|
// and its parent and returns the size in bytes of the changes
|
||||||
// relative to its base filesystem directory.
|
// relative to its base filesystem directory.
|
||||||
DiffSize(id, parent string) (size int64, err error)
|
DiffSize(id, parent string) (size int64, err error)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Driver is the interface for layered/snapshot file system drivers.
|
||||||
|
type Driver interface {
|
||||||
|
ProtoDriver
|
||||||
|
DiffDriver
|
||||||
|
}
|
||||||
|
|
||||||
|
// Capabilities defines a list of capabilities a driver may implement.
|
||||||
|
// These capabilities are not required; however, they do determine how a
|
||||||
|
// graphdriver can be used.
|
||||||
|
type Capabilities struct {
|
||||||
|
// Flags that this driver is capable of reproducing exactly equivalent
|
||||||
|
// diffs for read-only layers. If set, clients can rely on the driver
|
||||||
|
// for consistent tar streams, and avoid extra processing to account
|
||||||
|
// for potential differences (eg: the layer store's use of tar-split).
|
||||||
|
ReproducesExactDiffs bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// CapabilityDriver is the interface for layered file system drivers that
|
||||||
|
// can report on their Capabilities.
|
||||||
|
type CapabilityDriver interface {
|
||||||
|
Capabilities() Capabilities
|
||||||
|
}
|
||||||
|
|
||||||
// DiffGetterDriver is the interface for layered file system drivers that
|
// DiffGetterDriver is the interface for layered file system drivers that
|
||||||
// provide a specialized function for getting file contents for tar-split.
|
// provide a specialized function for getting file contents for tar-split.
|
||||||
type DiffGetterDriver interface {
|
type DiffGetterDriver interface {
|
||||||
@ -136,15 +168,13 @@ func Register(name string, initFunc InitFunc) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// GetDriver initializes and returns the registered driver
|
// GetDriver initializes and returns the registered driver
|
||||||
func GetDriver(name, home string, options []string, uidMaps, gidMaps []idtools.IDMap) (Driver, error) {
|
func GetDriver(name string, config Options) (Driver, error) {
|
||||||
if initFunc, exists := drivers[name]; exists {
|
if initFunc, exists := drivers[name]; exists {
|
||||||
return initFunc(filepath.Join(home, name), options, uidMaps, gidMaps)
|
return initFunc(filepath.Join(config.Root, name), config.DriverOptions, config.UIDMaps, config.GIDMaps)
|
||||||
}
|
}
|
||||||
if pluginDriver, err := lookupPlugin(name, home, options); err == nil {
|
|
||||||
return pluginDriver, nil
|
logrus.Errorf("Failed to GetDriver graph %s %s", name, config.Root)
|
||||||
}
|
return nil, errors.Wrapf(ErrNotSupported, "failed to GetDriver graph %s %s", name, config.Root)
|
||||||
logrus.Errorf("Failed to GetDriver graph %s %s", name, home)
|
|
||||||
return nil, errors.Wrapf(ErrNotSupported, "failed to GetDriver graph %s %s", name, home)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// getBuiltinDriver initializes and returns the registered driver, but does not try to load from plugins
|
// getBuiltinDriver initializes and returns the registered driver, but does not try to load from plugins
|
||||||
@ -156,15 +186,24 @@ func getBuiltinDriver(name, home string, options []string, uidMaps, gidMaps []id
|
|||||||
return nil, errors.Wrapf(ErrNotSupported, "failed to built-in GetDriver graph %s %s", name, home)
|
return nil, errors.Wrapf(ErrNotSupported, "failed to built-in GetDriver graph %s %s", name, home)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Options is used to initialize a graphdriver
|
||||||
|
type Options struct {
|
||||||
|
Root string
|
||||||
|
DriverOptions []string
|
||||||
|
UIDMaps []idtools.IDMap
|
||||||
|
GIDMaps []idtools.IDMap
|
||||||
|
ExperimentalEnabled bool
|
||||||
|
}
|
||||||
|
|
||||||
// New creates the driver and initializes it at the specified root.
|
// New creates the driver and initializes it at the specified root.
|
||||||
func New(root string, name string, options []string, uidMaps, gidMaps []idtools.IDMap) (Driver, error) {
|
func New(name string, config Options) (Driver, error) {
|
||||||
if name != "" {
|
if name != "" {
|
||||||
logrus.Debugf("[graphdriver] trying provided driver %q", name) // so the logs show specified driver
|
logrus.Debugf("[graphdriver] trying provided driver %q", name) // so the logs show specified driver
|
||||||
return GetDriver(name, root, options, uidMaps, gidMaps)
|
return GetDriver(name, config)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Guess for prior driver
|
// Guess for prior driver
|
||||||
driversMap := scanPriorDrivers(root)
|
driversMap := scanPriorDrivers(config.Root)
|
||||||
for _, name := range priority {
|
for _, name := range priority {
|
||||||
if name == "vfs" {
|
if name == "vfs" {
|
||||||
// don't use vfs even if there is state present.
|
// don't use vfs even if there is state present.
|
||||||
@ -173,13 +212,13 @@ func New(root string, name string, options []string, uidMaps, gidMaps []idtools.
|
|||||||
if _, prior := driversMap[name]; prior {
|
if _, prior := driversMap[name]; prior {
|
||||||
// of the state found from prior drivers, check in order of our priority
|
// of the state found from prior drivers, check in order of our priority
|
||||||
// which we would prefer
|
// which we would prefer
|
||||||
driver, err := getBuiltinDriver(name, root, options, uidMaps, gidMaps)
|
driver, err := getBuiltinDriver(name, config.Root, config.DriverOptions, config.UIDMaps, config.GIDMaps)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// unlike below, we will return error here, because there is prior
|
// unlike below, we will return error here, because there is prior
|
||||||
// state, and now it is no longer supported/prereq/compatible, so
|
// state, and now it is no longer supported/prereq/compatible, so
|
||||||
// something changed and needs attention. Otherwise the daemon's
|
// something changed and needs attention. Otherwise the daemon's
|
||||||
// images would just "disappear".
|
// images would just "disappear".
|
||||||
logrus.Errorf("[graphdriver] prior storage driver %q failed: %s", name, err)
|
logrus.Errorf("[graphdriver] prior storage driver %s failed: %s", name, err)
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -191,17 +230,17 @@ func New(root string, name string, options []string, uidMaps, gidMaps []idtools.
|
|||||||
driversSlice = append(driversSlice, name)
|
driversSlice = append(driversSlice, name)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil, fmt.Errorf("%q contains several valid graphdrivers: %s; Please cleanup or explicitly choose storage driver (-s <DRIVER>)", root, strings.Join(driversSlice, ", "))
|
return nil, fmt.Errorf("%s contains several valid graphdrivers: %s; Please cleanup or explicitly choose storage driver (-s <DRIVER>)", config.Root, strings.Join(driversSlice, ", "))
|
||||||
}
|
}
|
||||||
|
|
||||||
logrus.Infof("[graphdriver] using prior storage driver %q", name)
|
logrus.Infof("[graphdriver] using prior storage driver: %s", name)
|
||||||
return driver, nil
|
return driver, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check for priority drivers first
|
// Check for priority drivers first
|
||||||
for _, name := range priority {
|
for _, name := range priority {
|
||||||
driver, err := getBuiltinDriver(name, root, options, uidMaps, gidMaps)
|
driver, err := getBuiltinDriver(name, config.Root, config.DriverOptions, config.UIDMaps, config.GIDMaps)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if isDriverNotSupported(err) {
|
if isDriverNotSupported(err) {
|
||||||
continue
|
continue
|
||||||
@ -213,7 +252,7 @@ func New(root string, name string, options []string, uidMaps, gidMaps []idtools.
|
|||||||
|
|
||||||
// Check all registered drivers if no priority driver is found
|
// Check all registered drivers if no priority driver is found
|
||||||
for name, initFunc := range drivers {
|
for name, initFunc := range drivers {
|
||||||
driver, err := initFunc(filepath.Join(root, name), options, uidMaps, gidMaps)
|
driver, err := initFunc(filepath.Join(config.Root, name), config.DriverOptions, config.UIDMaps, config.GIDMaps)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if isDriverNotSupported(err) {
|
if isDriverNotSupported(err) {
|
||||||
continue
|
continue
|
||||||
|
8
vendor/github.com/containers/storage/drivers/driver_freebsd.go
generated
vendored
8
vendor/github.com/containers/storage/drivers/driver_freebsd.go
generated
vendored
@ -1,6 +1,10 @@
|
|||||||
package graphdriver
|
package graphdriver
|
||||||
|
|
||||||
import "syscall"
|
import (
|
||||||
|
"syscall"
|
||||||
|
|
||||||
|
"golang.org/x/sys/unix"
|
||||||
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
// Slice of drivers that should be used in an order
|
// Slice of drivers that should be used in an order
|
||||||
@ -11,7 +15,7 @@ var (
|
|||||||
|
|
||||||
// Mounted checks if the given path is mounted as the fs type
|
// Mounted checks if the given path is mounted as the fs type
|
||||||
func Mounted(fsType FsMagic, mountPath string) (bool, error) {
|
func Mounted(fsType FsMagic, mountPath string) (bool, error) {
|
||||||
var buf syscall.Statfs_t
|
var buf unix.Statfs_t
|
||||||
if err := syscall.Statfs(mountPath, &buf); err != nil {
|
if err := syscall.Statfs(mountPath, &buf); err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
|
15
vendor/github.com/containers/storage/drivers/driver_linux.go
generated
vendored
15
vendor/github.com/containers/storage/drivers/driver_linux.go
generated
vendored
@ -4,9 +4,9 @@ package graphdriver
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"syscall"
|
|
||||||
|
|
||||||
"github.com/containers/storage/pkg/mount"
|
"github.com/containers/storage/pkg/mount"
|
||||||
|
"golang.org/x/sys/unix"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@ -66,13 +66,14 @@ var (
|
|||||||
FsMagicAufs: "aufs",
|
FsMagicAufs: "aufs",
|
||||||
FsMagicBtrfs: "btrfs",
|
FsMagicBtrfs: "btrfs",
|
||||||
FsMagicCramfs: "cramfs",
|
FsMagicCramfs: "cramfs",
|
||||||
|
FsMagicEcryptfs: "ecryptfs",
|
||||||
FsMagicExtfs: "extfs",
|
FsMagicExtfs: "extfs",
|
||||||
FsMagicF2fs: "f2fs",
|
FsMagicF2fs: "f2fs",
|
||||||
FsMagicGPFS: "gpfs",
|
FsMagicGPFS: "gpfs",
|
||||||
FsMagicJffs2Fs: "jffs2",
|
FsMagicJffs2Fs: "jffs2",
|
||||||
FsMagicJfs: "jfs",
|
FsMagicJfs: "jfs",
|
||||||
FsMagicNfsFs: "nfs",
|
FsMagicNfsFs: "nfs",
|
||||||
FsMagicOverlay: "overlay",
|
FsMagicOverlay: "overlayfs",
|
||||||
FsMagicRAMFs: "ramfs",
|
FsMagicRAMFs: "ramfs",
|
||||||
FsMagicReiserFs: "reiserfs",
|
FsMagicReiserFs: "reiserfs",
|
||||||
FsMagicSmbFs: "smb",
|
FsMagicSmbFs: "smb",
|
||||||
@ -87,14 +88,14 @@ var (
|
|||||||
|
|
||||||
// GetFSMagic returns the filesystem id given the path.
|
// GetFSMagic returns the filesystem id given the path.
|
||||||
func GetFSMagic(rootpath string) (FsMagic, error) {
|
func GetFSMagic(rootpath string) (FsMagic, error) {
|
||||||
var buf syscall.Statfs_t
|
var buf unix.Statfs_t
|
||||||
if err := syscall.Statfs(filepath.Dir(rootpath), &buf); err != nil {
|
if err := unix.Statfs(filepath.Dir(rootpath), &buf); err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
return FsMagic(buf.Type), nil
|
return FsMagic(buf.Type), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewFsChecker returns a checker configured for the provied FsMagic
|
// NewFsChecker returns a checker configured for the provided FsMagic
|
||||||
func NewFsChecker(t FsMagic) Checker {
|
func NewFsChecker(t FsMagic) Checker {
|
||||||
return &fsChecker{
|
return &fsChecker{
|
||||||
t: t,
|
t: t,
|
||||||
@ -126,8 +127,8 @@ func (c *defaultChecker) IsMounted(path string) bool {
|
|||||||
|
|
||||||
// Mounted checks if the given path is mounted as the fs type
|
// Mounted checks if the given path is mounted as the fs type
|
||||||
func Mounted(fsType FsMagic, mountPath string) (bool, error) {
|
func Mounted(fsType FsMagic, mountPath string) (bool, error) {
|
||||||
var buf syscall.Statfs_t
|
var buf unix.Statfs_t
|
||||||
if err := syscall.Statfs(mountPath, &buf); err != nil {
|
if err := unix.Statfs(mountPath, &buf); err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
return FsMagic(buf.Type) == fsType, nil
|
return FsMagic(buf.Type) == fsType, nil
|
||||||
|
44
vendor/github.com/containers/storage/drivers/driver_solaris.go
generated
vendored
44
vendor/github.com/containers/storage/drivers/driver_solaris.go
generated
vendored
@ -19,8 +19,8 @@ import (
|
|||||||
"path/filepath"
|
"path/filepath"
|
||||||
"unsafe"
|
"unsafe"
|
||||||
|
|
||||||
log "github.com/Sirupsen/logrus"
|
"github.com/containers/storage/pkg/mount"
|
||||||
"github.com/pkg/errors"
|
"github.com/sirupsen/logrus"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@ -45,22 +45,52 @@ func GetFSMagic(rootpath string) (FsMagic, error) {
|
|||||||
return 0, nil
|
return 0, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type fsChecker struct {
|
||||||
|
t FsMagic
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *fsChecker) IsMounted(path string) bool {
|
||||||
|
m, _ := Mounted(c.t, path)
|
||||||
|
return m
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewFsChecker returns a checker configured for the provided FsMagic
|
||||||
|
func NewFsChecker(t FsMagic) Checker {
|
||||||
|
return &fsChecker{
|
||||||
|
t: t,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewDefaultChecker returns a check that parses /proc/mountinfo to check
|
||||||
|
// if the specified path is mounted.
|
||||||
|
// No-op on Solaris.
|
||||||
|
func NewDefaultChecker() Checker {
|
||||||
|
return &defaultChecker{}
|
||||||
|
}
|
||||||
|
|
||||||
|
type defaultChecker struct {
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *defaultChecker) IsMounted(path string) bool {
|
||||||
|
m, _ := mount.Mounted(path)
|
||||||
|
return m
|
||||||
|
}
|
||||||
|
|
||||||
// Mounted checks if the given path is mounted as the fs type
|
// Mounted checks if the given path is mounted as the fs type
|
||||||
//Solaris supports only ZFS for now
|
//Solaris supports only ZFS for now
|
||||||
func Mounted(fsType FsMagic, mountPath string) (bool, error) {
|
func Mounted(fsType FsMagic, mountPath string) (bool, error) {
|
||||||
|
|
||||||
cs := C.CString(filepath.Dir(mountPath))
|
cs := C.CString(filepath.Dir(mountPath))
|
||||||
|
defer C.free(unsafe.Pointer(cs))
|
||||||
buf := C.getstatfs(cs)
|
buf := C.getstatfs(cs)
|
||||||
|
defer C.free(unsafe.Pointer(buf))
|
||||||
|
|
||||||
// on Solaris buf.f_basetype contains ['z', 'f', 's', 0 ... ]
|
// on Solaris buf.f_basetype contains ['z', 'f', 's', 0 ... ]
|
||||||
if (buf.f_basetype[0] != 122) || (buf.f_basetype[1] != 102) || (buf.f_basetype[2] != 115) ||
|
if (buf.f_basetype[0] != 122) || (buf.f_basetype[1] != 102) || (buf.f_basetype[2] != 115) ||
|
||||||
(buf.f_basetype[3] != 0) {
|
(buf.f_basetype[3] != 0) {
|
||||||
log.Debugf("[zfs] no zfs dataset found for rootdir '%s'", mountPath)
|
logrus.Debugf("[zfs] no zfs dataset found for rootdir '%s'", mountPath)
|
||||||
C.free(unsafe.Pointer(buf))
|
return false, ErrPrerequisites
|
||||||
return false, errors.Wrapf(graphdriver.ErrPrerequisites, "no zfs dataset found for rootdir '%s'", mountPath)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
C.free(unsafe.Pointer(buf))
|
|
||||||
C.free(unsafe.Pointer(cs))
|
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
64
vendor/github.com/containers/storage/drivers/fsdiff.go
generated
vendored
64
vendor/github.com/containers/storage/drivers/fsdiff.go
generated
vendored
@ -1,14 +1,14 @@
|
|||||||
package graphdriver
|
package graphdriver
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"io"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/Sirupsen/logrus"
|
|
||||||
|
|
||||||
"github.com/containers/storage/pkg/archive"
|
"github.com/containers/storage/pkg/archive"
|
||||||
"github.com/containers/storage/pkg/chrootarchive"
|
"github.com/containers/storage/pkg/chrootarchive"
|
||||||
"github.com/containers/storage/pkg/idtools"
|
"github.com/containers/storage/pkg/idtools"
|
||||||
"github.com/containers/storage/pkg/ioutils"
|
"github.com/containers/storage/pkg/ioutils"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@ -31,30 +31,30 @@ type NaiveDiffDriver struct {
|
|||||||
// NewNaiveDiffDriver returns a fully functional driver that wraps the
|
// NewNaiveDiffDriver returns a fully functional driver that wraps the
|
||||||
// given ProtoDriver and adds the capability of the following methods which
|
// given ProtoDriver and adds the capability of the following methods which
|
||||||
// it may or may not support on its own:
|
// it may or may not support on its own:
|
||||||
// Diff(id, parent string) (archive.Archive, error)
|
// Diff(id, parent string) (io.ReadCloser, error)
|
||||||
// Changes(id, parent string) ([]archive.Change, error)
|
// Changes(id, parent string) ([]archive.Change, error)
|
||||||
// ApplyDiff(id, parent string, diff archive.Reader) (size int64, err error)
|
// ApplyDiff(id, parent string, diff io.Reader) (size int64, err error)
|
||||||
// DiffSize(id, parent string) (size int64, err error)
|
// DiffSize(id, parent string) (size int64, err error)
|
||||||
func NewNaiveDiffDriver(driver ProtoDriver, uidMaps, gidMaps []idtools.IDMap) Driver {
|
func NewNaiveDiffDriver(driver ProtoDriver, uidMaps, gidMaps []idtools.IDMap) Driver {
|
||||||
gdw := &NaiveDiffDriver{
|
return &NaiveDiffDriver{ProtoDriver: driver,
|
||||||
ProtoDriver: driver,
|
uidMaps: uidMaps,
|
||||||
uidMaps: uidMaps,
|
gidMaps: gidMaps}
|
||||||
gidMaps: gidMaps,
|
|
||||||
}
|
|
||||||
return gdw
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Diff produces an archive of the changes between the specified
|
// Diff produces an archive of the changes between the specified
|
||||||
// layer and its parent layer which may be "".
|
// layer and its parent layer which may be "".
|
||||||
func (gdw *NaiveDiffDriver) Diff(id, parent string) (arch archive.Archive, err error) {
|
func (gdw *NaiveDiffDriver) Diff(id, parent string) (arch io.ReadCloser, err error) {
|
||||||
layerFs, err := gdw.Get(id, "")
|
startTime := time.Now()
|
||||||
|
driver := gdw.ProtoDriver
|
||||||
|
|
||||||
|
layerFs, err := driver.Get(id, "")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
defer func() {
|
defer func() {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
gdw.Put(id)
|
driver.Put(id)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
@ -65,16 +65,16 @@ func (gdw *NaiveDiffDriver) Diff(id, parent string) (arch archive.Archive, err e
|
|||||||
}
|
}
|
||||||
return ioutils.NewReadCloserWrapper(archive, func() error {
|
return ioutils.NewReadCloserWrapper(archive, func() error {
|
||||||
err := archive.Close()
|
err := archive.Close()
|
||||||
gdw.Put(id)
|
driver.Put(id)
|
||||||
return err
|
return err
|
||||||
}), nil
|
}), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
parentFs, err := gdw.Get(parent, "")
|
parentFs, err := driver.Get(parent, "")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
defer gdw.Put(parent)
|
defer driver.Put(parent)
|
||||||
|
|
||||||
changes, err := archive.ChangesDirs(layerFs, parentFs)
|
changes, err := archive.ChangesDirs(layerFs, parentFs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -88,7 +88,13 @@ func (gdw *NaiveDiffDriver) Diff(id, parent string) (arch archive.Archive, err e
|
|||||||
|
|
||||||
return ioutils.NewReadCloserWrapper(archive, func() error {
|
return ioutils.NewReadCloserWrapper(archive, func() error {
|
||||||
err := archive.Close()
|
err := archive.Close()
|
||||||
gdw.Put(id)
|
driver.Put(id)
|
||||||
|
|
||||||
|
// NaiveDiffDriver compares file metadata with parent layers. Parent layers
|
||||||
|
// are extracted from tar's with full second precision on modified time.
|
||||||
|
// We need this hack here to make sure calls within same second receive
|
||||||
|
// correct result.
|
||||||
|
time.Sleep(startTime.Truncate(time.Second).Add(time.Second).Sub(time.Now()))
|
||||||
return err
|
return err
|
||||||
}), nil
|
}), nil
|
||||||
}
|
}
|
||||||
@ -96,20 +102,22 @@ func (gdw *NaiveDiffDriver) Diff(id, parent string) (arch archive.Archive, err e
|
|||||||
// Changes produces a list of changes between the specified layer
|
// Changes produces a list of changes between the specified layer
|
||||||
// and its parent layer. If parent is "", then all changes will be ADD changes.
|
// and its parent layer. If parent is "", then all changes will be ADD changes.
|
||||||
func (gdw *NaiveDiffDriver) Changes(id, parent string) ([]archive.Change, error) {
|
func (gdw *NaiveDiffDriver) Changes(id, parent string) ([]archive.Change, error) {
|
||||||
layerFs, err := gdw.Get(id, "")
|
driver := gdw.ProtoDriver
|
||||||
|
|
||||||
|
layerFs, err := driver.Get(id, "")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
defer gdw.Put(id)
|
defer driver.Put(id)
|
||||||
|
|
||||||
parentFs := ""
|
parentFs := ""
|
||||||
|
|
||||||
if parent != "" {
|
if parent != "" {
|
||||||
parentFs, err = gdw.Get(parent, "")
|
parentFs, err = driver.Get(parent, "")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
defer gdw.Put(parent)
|
defer driver.Put(parent)
|
||||||
}
|
}
|
||||||
|
|
||||||
return archive.ChangesDirs(layerFs, parentFs)
|
return archive.ChangesDirs(layerFs, parentFs)
|
||||||
@ -118,13 +126,15 @@ func (gdw *NaiveDiffDriver) Changes(id, parent string) ([]archive.Change, error)
|
|||||||
// ApplyDiff extracts the changeset from the given diff into the
|
// ApplyDiff extracts the changeset from the given diff into the
|
||||||
// layer with the specified id and parent, returning the size of the
|
// layer with the specified id and parent, returning the size of the
|
||||||
// new layer in bytes.
|
// new layer in bytes.
|
||||||
func (gdw *NaiveDiffDriver) ApplyDiff(id, parent string, diff archive.Reader) (size int64, err error) {
|
func (gdw *NaiveDiffDriver) ApplyDiff(id, parent string, diff io.Reader) (size int64, err error) {
|
||||||
|
driver := gdw.ProtoDriver
|
||||||
|
|
||||||
// Mount the root filesystem so we can apply the diff/layer.
|
// Mount the root filesystem so we can apply the diff/layer.
|
||||||
layerFs, err := gdw.Get(id, "")
|
layerFs, err := driver.Get(id, "")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
defer gdw.Put(id)
|
defer driver.Put(id)
|
||||||
|
|
||||||
options := &archive.TarOptions{UIDMaps: gdw.uidMaps,
|
options := &archive.TarOptions{UIDMaps: gdw.uidMaps,
|
||||||
GIDMaps: gdw.gidMaps}
|
GIDMaps: gdw.gidMaps}
|
||||||
@ -142,16 +152,18 @@ func (gdw *NaiveDiffDriver) ApplyDiff(id, parent string, diff archive.Reader) (s
|
|||||||
// and its parent and returns the size in bytes of the changes
|
// and its parent and returns the size in bytes of the changes
|
||||||
// relative to its base filesystem directory.
|
// relative to its base filesystem directory.
|
||||||
func (gdw *NaiveDiffDriver) DiffSize(id, parent string) (size int64, err error) {
|
func (gdw *NaiveDiffDriver) DiffSize(id, parent string) (size int64, err error) {
|
||||||
|
driver := gdw.ProtoDriver
|
||||||
|
|
||||||
changes, err := gdw.Changes(id, parent)
|
changes, err := gdw.Changes(id, parent)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
layerFs, err := gdw.Get(id, "")
|
layerFs, err := driver.Get(id, "")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
defer gdw.Put(id)
|
defer driver.Put(id)
|
||||||
|
|
||||||
return archive.ChangesSize(layerFs, changes), nil
|
return archive.ChangesSize(layerFs, changes), nil
|
||||||
}
|
}
|
||||||
|
102
vendor/github.com/containers/storage/drivers/overlay/check.go
generated
vendored
Normal file
102
vendor/github.com/containers/storage/drivers/overlay/check.go
generated
vendored
Normal file
@ -0,0 +1,102 @@
|
|||||||
|
// +build linux
|
||||||
|
|
||||||
|
package overlay
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
"path"
|
||||||
|
"path/filepath"
|
||||||
|
"syscall"
|
||||||
|
|
||||||
|
"github.com/containers/storage/pkg/system"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
|
"golang.org/x/sys/unix"
|
||||||
|
)
|
||||||
|
|
||||||
|
// doesSupportNativeDiff checks whether the filesystem has a bug
|
||||||
|
// which copies up the opaque flag when copying up an opaque
|
||||||
|
// directory or the kernel enable CONFIG_OVERLAY_FS_REDIRECT_DIR.
|
||||||
|
// When these exist naive diff should be used.
|
||||||
|
func doesSupportNativeDiff(d string) error {
|
||||||
|
td, err := ioutil.TempDir(d, "opaque-bug-check")
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer func() {
|
||||||
|
if err := os.RemoveAll(td); err != nil {
|
||||||
|
logrus.Warnf("Failed to remove check directory %v: %v", td, err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
// Make directories l1/d, l1/d1, l2/d, l3, work, merged
|
||||||
|
if err := os.MkdirAll(filepath.Join(td, "l1", "d"), 0755); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := os.MkdirAll(filepath.Join(td, "l1", "d1"), 0755); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := os.MkdirAll(filepath.Join(td, "l2", "d"), 0755); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := os.Mkdir(filepath.Join(td, "l3"), 0755); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := os.Mkdir(filepath.Join(td, "work"), 0755); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := os.Mkdir(filepath.Join(td, "merged"), 0755); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Mark l2/d as opaque
|
||||||
|
if err := system.Lsetxattr(filepath.Join(td, "l2", "d"), "trusted.overlay.opaque", []byte("y"), 0); err != nil {
|
||||||
|
return errors.Wrap(err, "failed to set opaque flag on middle layer")
|
||||||
|
}
|
||||||
|
|
||||||
|
opts := fmt.Sprintf("lowerdir=%s:%s,upperdir=%s,workdir=%s", path.Join(td, "l2"), path.Join(td, "l1"), path.Join(td, "l3"), path.Join(td, "work"))
|
||||||
|
if err := unix.Mount("overlay", filepath.Join(td, "merged"), "overlay", 0, opts); err != nil {
|
||||||
|
return errors.Wrap(err, "failed to mount overlay")
|
||||||
|
}
|
||||||
|
defer func() {
|
||||||
|
if err := unix.Unmount(filepath.Join(td, "merged"), 0); err != nil {
|
||||||
|
logrus.Warnf("Failed to unmount check directory %v: %v", filepath.Join(td, "merged"), err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
// Touch file in d to force copy up of opaque directory "d" from "l2" to "l3"
|
||||||
|
if err := ioutil.WriteFile(filepath.Join(td, "merged", "d", "f"), []byte{}, 0644); err != nil {
|
||||||
|
return errors.Wrap(err, "failed to write to merged directory")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check l3/d does not have opaque flag
|
||||||
|
xattrOpaque, err := system.Lgetxattr(filepath.Join(td, "l3", "d"), "trusted.overlay.opaque")
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrap(err, "failed to read opaque flag on upper layer")
|
||||||
|
}
|
||||||
|
if string(xattrOpaque) == "y" {
|
||||||
|
return errors.New("opaque flag erroneously copied up, consider update to kernel 4.8 or later to fix")
|
||||||
|
}
|
||||||
|
|
||||||
|
// rename "d1" to "d2"
|
||||||
|
if err := os.Rename(filepath.Join(td, "merged", "d1"), filepath.Join(td, "merged", "d2")); err != nil {
|
||||||
|
// if rename failed with syscall.EXDEV, the kernel doesn't have CONFIG_OVERLAY_FS_REDIRECT_DIR enabled
|
||||||
|
if err.(*os.LinkError).Err == syscall.EXDEV {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return errors.Wrap(err, "failed to rename dir in merged directory")
|
||||||
|
}
|
||||||
|
// get the xattr of "d2"
|
||||||
|
xattrRedirect, err := system.Lgetxattr(filepath.Join(td, "l3", "d2"), "trusted.overlay.redirect")
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrap(err, "failed to read redirect flag on upper layer")
|
||||||
|
}
|
||||||
|
|
||||||
|
if string(xattrRedirect) == "d1" {
|
||||||
|
return errors.New("kernel has CONFIG_OVERLAY_FS_REDIRECT_DIR enabled")
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
13
vendor/github.com/containers/storage/drivers/overlay/mount.go
generated
vendored
13
vendor/github.com/containers/storage/drivers/overlay/mount.go
generated
vendored
@ -9,9 +9,9 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
"runtime"
|
"runtime"
|
||||||
"syscall"
|
|
||||||
|
|
||||||
"github.com/containers/storage/pkg/reexec"
|
"github.com/containers/storage/pkg/reexec"
|
||||||
|
"golang.org/x/sys/unix"
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
@ -31,12 +31,12 @@ type mountOptions struct {
|
|||||||
Flag uint32
|
Flag uint32
|
||||||
}
|
}
|
||||||
|
|
||||||
func mountFrom(dir, device, target, mType, label string) error {
|
func mountFrom(dir, device, target, mType string, flags uintptr, label string) error {
|
||||||
options := &mountOptions{
|
options := &mountOptions{
|
||||||
Device: device,
|
Device: device,
|
||||||
Target: target,
|
Target: target,
|
||||||
Type: mType,
|
Type: mType,
|
||||||
Flag: 0,
|
Flag: uint32(flags),
|
||||||
Label: label,
|
Label: label,
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -49,18 +49,19 @@ func mountFrom(dir, device, target, mType, label string) error {
|
|||||||
output := bytes.NewBuffer(nil)
|
output := bytes.NewBuffer(nil)
|
||||||
cmd.Stdout = output
|
cmd.Stdout = output
|
||||||
cmd.Stderr = output
|
cmd.Stderr = output
|
||||||
|
|
||||||
if err := cmd.Start(); err != nil {
|
if err := cmd.Start(); err != nil {
|
||||||
|
w.Close()
|
||||||
return fmt.Errorf("mountfrom error on re-exec cmd: %v", err)
|
return fmt.Errorf("mountfrom error on re-exec cmd: %v", err)
|
||||||
}
|
}
|
||||||
//write the options to the pipe for the untar exec to read
|
//write the options to the pipe for the untar exec to read
|
||||||
if err := json.NewEncoder(w).Encode(options); err != nil {
|
if err := json.NewEncoder(w).Encode(options); err != nil {
|
||||||
|
w.Close()
|
||||||
return fmt.Errorf("mountfrom json encode to pipe failed: %v", err)
|
return fmt.Errorf("mountfrom json encode to pipe failed: %v", err)
|
||||||
}
|
}
|
||||||
w.Close()
|
w.Close()
|
||||||
|
|
||||||
if err := cmd.Wait(); err != nil {
|
if err := cmd.Wait(); err != nil {
|
||||||
return fmt.Errorf("mountfrom re-exec error: %v: output: %s", err, output)
|
return fmt.Errorf("mountfrom re-exec error: %v: output: %v", err, output)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -80,7 +81,7 @@ func mountFromMain() {
|
|||||||
fatal(err)
|
fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := syscall.Mount(options.Device, options.Target, options.Type, uintptr(options.Flag), options.Label); err != nil {
|
if err := unix.Mount(options.Device, options.Target, options.Type, uintptr(options.Flag), options.Label); err != nil {
|
||||||
fatal(err)
|
fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
334
vendor/github.com/containers/storage/drivers/overlay/overlay.go
generated
vendored
334
vendor/github.com/containers/storage/drivers/overlay/overlay.go
generated
vendored
@ -5,6 +5,7 @@ package overlay
|
|||||||
import (
|
import (
|
||||||
"bufio"
|
"bufio"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"os"
|
"os"
|
||||||
"os/exec"
|
"os/exec"
|
||||||
@ -12,21 +13,26 @@ import (
|
|||||||
"path/filepath"
|
"path/filepath"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"syscall"
|
"sync"
|
||||||
|
|
||||||
"github.com/Sirupsen/logrus"
|
|
||||||
|
|
||||||
"github.com/containers/storage/drivers"
|
"github.com/containers/storage/drivers"
|
||||||
|
"github.com/containers/storage/drivers/overlayutils"
|
||||||
|
"github.com/containers/storage/drivers/quota"
|
||||||
"github.com/containers/storage/pkg/archive"
|
"github.com/containers/storage/pkg/archive"
|
||||||
"github.com/containers/storage/pkg/chrootarchive"
|
"github.com/containers/storage/pkg/chrootarchive"
|
||||||
"github.com/containers/storage/pkg/directory"
|
"github.com/containers/storage/pkg/directory"
|
||||||
|
"github.com/containers/storage/pkg/fsutils"
|
||||||
"github.com/containers/storage/pkg/idtools"
|
"github.com/containers/storage/pkg/idtools"
|
||||||
|
"github.com/containers/storage/pkg/locker"
|
||||||
"github.com/containers/storage/pkg/mount"
|
"github.com/containers/storage/pkg/mount"
|
||||||
"github.com/containers/storage/pkg/parsers"
|
"github.com/containers/storage/pkg/parsers"
|
||||||
"github.com/containers/storage/pkg/parsers/kernel"
|
"github.com/containers/storage/pkg/parsers/kernel"
|
||||||
|
"github.com/containers/storage/pkg/system"
|
||||||
|
units "github.com/docker/go-units"
|
||||||
"github.com/opencontainers/selinux/go-selinux/label"
|
"github.com/opencontainers/selinux/go-selinux/label"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
|
"golang.org/x/sys/unix"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@ -42,7 +48,7 @@ var (
|
|||||||
|
|
||||||
// Each container/image has at least a "diff" directory and "link" file.
|
// Each container/image has at least a "diff" directory and "link" file.
|
||||||
// If there is also a "lower" file when there are diff layers
|
// If there is also a "lower" file when there are diff layers
|
||||||
// below as well as "merged" and "work" directories. The "diff" directory
|
// below as well as "merged" and "work" directories. The "diff" directory
|
||||||
// has the upper layer of the overlay and is used to capture any
|
// has the upper layer of the overlay and is used to capture any
|
||||||
// changes to the layer. The "lower" file contains all the lower layer
|
// changes to the layer. The "lower" file contains all the lower layer
|
||||||
// mounts separated by ":" and ordered from uppermost to lowermost
|
// mounts separated by ":" and ordered from uppermost to lowermost
|
||||||
@ -76,26 +82,43 @@ const (
|
|||||||
idLength = 26
|
idLength = 26
|
||||||
)
|
)
|
||||||
|
|
||||||
|
type overlayOptions struct {
|
||||||
|
overrideKernelCheck bool
|
||||||
|
imageStores []string
|
||||||
|
quota quota.Quota
|
||||||
|
}
|
||||||
|
|
||||||
// Driver contains information about the home directory and the list of active mounts that are created using this driver.
|
// Driver contains information about the home directory and the list of active mounts that are created using this driver.
|
||||||
type Driver struct {
|
type Driver struct {
|
||||||
name string
|
name string
|
||||||
home string
|
home string
|
||||||
uidMaps []idtools.IDMap
|
uidMaps []idtools.IDMap
|
||||||
gidMaps []idtools.IDMap
|
gidMaps []idtools.IDMap
|
||||||
ctr *graphdriver.RefCounter
|
ctr *graphdriver.RefCounter
|
||||||
opts *overlayOptions
|
quotaCtl *quota.Control
|
||||||
|
options overlayOptions
|
||||||
|
naiveDiff graphdriver.DiffDriver
|
||||||
|
supportsDType bool
|
||||||
|
locker *locker.Locker
|
||||||
}
|
}
|
||||||
|
|
||||||
var backingFs = "<unknown>"
|
var (
|
||||||
|
backingFs = "<unknown>"
|
||||||
|
projectQuotaSupported = false
|
||||||
|
|
||||||
|
useNaiveDiffLock sync.Once
|
||||||
|
useNaiveDiffOnly bool
|
||||||
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
graphdriver.Register("overlay", InitAsOverlay)
|
graphdriver.Register("overlay", Init)
|
||||||
graphdriver.Register("overlay2", InitAsOverlay2)
|
graphdriver.Register("overlay2", Init)
|
||||||
}
|
}
|
||||||
|
|
||||||
// InitWithName returns the a naive diff driver for the overlay filesystem,
|
// Init returns the a native diff driver for overlay filesystem.
|
||||||
// which returns the passed-in name when asked which driver it is.
|
// If overlay filesystem is not supported on the host, graphdriver.ErrNotSupported is returned as error.
|
||||||
func InitWithName(name, home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) {
|
// If an overlay filesystem is not supported over an existing filesystem then error graphdriver.ErrIncompatibleFS is returned.
|
||||||
|
func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) {
|
||||||
opts, err := parseOptions(options)
|
opts, err := parseOptions(options)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -114,7 +137,7 @@ func InitWithName(name, home string, options []string, uidMaps, gidMaps []idtool
|
|||||||
if !opts.overrideKernelCheck {
|
if !opts.overrideKernelCheck {
|
||||||
return nil, errors.Wrap(graphdriver.ErrNotSupported, "kernel too old to provide multiple lowers feature for overlay")
|
return nil, errors.Wrap(graphdriver.ErrNotSupported, "kernel too old to provide multiple lowers feature for overlay")
|
||||||
}
|
}
|
||||||
logrus.Warnf("Using pre-4.0.0 kernel for overlay, mount failures may require kernel update")
|
logrus.Warn("Using pre-4.0.0 kernel for overlay, mount failures may require kernel update")
|
||||||
}
|
}
|
||||||
|
|
||||||
fsMagic, err := graphdriver.GetFSMagic(home)
|
fsMagic, err := graphdriver.GetFSMagic(home)
|
||||||
@ -127,9 +150,19 @@ func InitWithName(name, home string, options []string, uidMaps, gidMaps []idtool
|
|||||||
|
|
||||||
// check if they are running over btrfs, aufs, zfs, overlay, or ecryptfs
|
// check if they are running over btrfs, aufs, zfs, overlay, or ecryptfs
|
||||||
switch fsMagic {
|
switch fsMagic {
|
||||||
case graphdriver.FsMagicBtrfs, graphdriver.FsMagicAufs, graphdriver.FsMagicZfs, graphdriver.FsMagicOverlay, graphdriver.FsMagicEcryptfs:
|
case graphdriver.FsMagicAufs, graphdriver.FsMagicZfs, graphdriver.FsMagicOverlay, graphdriver.FsMagicEcryptfs:
|
||||||
logrus.Errorf("'overlay' is not supported over %s", backingFs)
|
logrus.Errorf("'overlay' is not supported over %s", backingFs)
|
||||||
return nil, errors.Wrapf(graphdriver.ErrIncompatibleFS, "'overlay' is not supported over %s", backingFs)
|
return nil, errors.Wrapf(graphdriver.ErrIncompatibleFS, "'overlay' is not supported over %s", backingFs)
|
||||||
|
case graphdriver.FsMagicBtrfs:
|
||||||
|
// Support for OverlayFS on BTRFS was added in kernel 4.7
|
||||||
|
// See https://btrfs.wiki.kernel.org/index.php/Changelog
|
||||||
|
if kernel.CompareKernelVersion(*v, kernel.VersionInfo{Kernel: 4, Major: 7, Minor: 0}) < 0 {
|
||||||
|
if !opts.overrideKernelCheck {
|
||||||
|
logrus.Errorf("'overlay' requires kernel 4.7 to use on %s", backingFs)
|
||||||
|
return nil, errors.Wrapf(graphdriver.ErrIncompatibleFS, "'overlay' requires kernel 4.7 to use on %s", backingFs)
|
||||||
|
}
|
||||||
|
logrus.Warn("Using pre-4.7.0 kernel for overlay on btrfs, may require kernel update")
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps)
|
rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps)
|
||||||
@ -145,37 +178,46 @@ func InitWithName(name, home string, options []string, uidMaps, gidMaps []idtool
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
d := &Driver{
|
supportsDType, err := fsutils.SupportsDType(home)
|
||||||
name: name,
|
if err != nil {
|
||||||
home: home,
|
return nil, err
|
||||||
uidMaps: uidMaps,
|
}
|
||||||
gidMaps: gidMaps,
|
if !supportsDType {
|
||||||
ctr: graphdriver.NewRefCounter(graphdriver.NewFsChecker(graphdriver.FsMagicOverlay)),
|
logrus.Warn(overlayutils.ErrDTypeNotSupported("overlay", backingFs))
|
||||||
opts: opts,
|
// TODO: Will make fatal when CRI-O Has AMI built on RHEL7.4
|
||||||
|
// return nil, overlayutils.ErrDTypeNotSupported("overlay", backingFs)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
d := &Driver{
|
||||||
|
name: "overlay",
|
||||||
|
home: home,
|
||||||
|
uidMaps: uidMaps,
|
||||||
|
gidMaps: gidMaps,
|
||||||
|
ctr: graphdriver.NewRefCounter(graphdriver.NewFsChecker(graphdriver.FsMagicOverlay)),
|
||||||
|
supportsDType: supportsDType,
|
||||||
|
locker: locker.New(),
|
||||||
|
options: *opts,
|
||||||
|
}
|
||||||
|
|
||||||
|
d.naiveDiff = graphdriver.NewNaiveDiffDriver(d, uidMaps, gidMaps)
|
||||||
|
|
||||||
|
if backingFs == "xfs" {
|
||||||
|
// Try to enable project quota support over xfs.
|
||||||
|
if d.quotaCtl, err = quota.NewControl(home); err == nil {
|
||||||
|
projectQuotaSupported = true
|
||||||
|
} else if opts.quota.Size > 0 {
|
||||||
|
return nil, fmt.Errorf("Storage option overlay.size not supported. Filesystem does not support Project Quota: %v", err)
|
||||||
|
}
|
||||||
|
} else if opts.quota.Size > 0 {
|
||||||
|
// if xfs is not the backing fs then error out if the storage-opt overlay.size is used.
|
||||||
|
return nil, fmt.Errorf("Storage Option overlay.size only supported for backingFS XFS. Found %v", backingFs)
|
||||||
|
}
|
||||||
|
|
||||||
|
logrus.Debugf("backingFs=%s, projectQuotaSupported=%v", backingFs, projectQuotaSupported)
|
||||||
|
|
||||||
return d, nil
|
return d, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// InitAsOverlay returns the a naive diff driver for overlay filesystem.
|
|
||||||
// If overlay filesystem is not supported on the host, graphdriver.ErrNotSupported is returned as error.
|
|
||||||
// If a overlay filesystem is not supported over a existing filesystem then error graphdriver.ErrIncompatibleFS is returned.
|
|
||||||
func InitAsOverlay(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) {
|
|
||||||
return InitWithName("overlay", home, options, uidMaps, gidMaps)
|
|
||||||
}
|
|
||||||
|
|
||||||
// InitAsOverlay2 returns the a naive diff driver for overlay filesystem.
|
|
||||||
// If overlay filesystem is not supported on the host, graphdriver.ErrNotSupported is returned as error.
|
|
||||||
// If a overlay filesystem is not supported over a existing filesystem then error graphdriver.ErrIncompatibleFS is returned.
|
|
||||||
func InitAsOverlay2(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) {
|
|
||||||
return InitWithName("overlay2", home, options, uidMaps, gidMaps)
|
|
||||||
}
|
|
||||||
|
|
||||||
type overlayOptions struct {
|
|
||||||
overrideKernelCheck bool
|
|
||||||
imageStores []string
|
|
||||||
}
|
|
||||||
|
|
||||||
func parseOptions(options []string) (*overlayOptions, error) {
|
func parseOptions(options []string) (*overlayOptions, error) {
|
||||||
o := &overlayOptions{}
|
o := &overlayOptions{}
|
||||||
for _, option := range options {
|
for _, option := range options {
|
||||||
@ -186,11 +228,20 @@ func parseOptions(options []string) (*overlayOptions, error) {
|
|||||||
key = strings.ToLower(key)
|
key = strings.ToLower(key)
|
||||||
switch key {
|
switch key {
|
||||||
case "overlay.override_kernel_check", "overlay2.override_kernel_check":
|
case "overlay.override_kernel_check", "overlay2.override_kernel_check":
|
||||||
|
logrus.Debugf("overlay: override_kernelcheck=%s", val)
|
||||||
o.overrideKernelCheck, err = strconv.ParseBool(val)
|
o.overrideKernelCheck, err = strconv.ParseBool(val)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
case "overlay.imagestore":
|
case "overlay.size", "overlay2.size":
|
||||||
|
logrus.Debugf("overlay: size=%s", val)
|
||||||
|
size, err := units.RAMInBytes(val)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
o.quota.Size = uint64(size)
|
||||||
|
case "overlay.imagestore", "overlay2.imagestore":
|
||||||
|
logrus.Debugf("overlay: imagestore=%s", val)
|
||||||
// Additional read only image stores to use for lower paths
|
// Additional read only image stores to use for lower paths
|
||||||
for _, store := range strings.Split(val, ",") {
|
for _, store := range strings.Split(val, ",") {
|
||||||
store = filepath.Clean(store)
|
store = filepath.Clean(store)
|
||||||
@ -199,7 +250,7 @@ func parseOptions(options []string) (*overlayOptions, error) {
|
|||||||
}
|
}
|
||||||
st, err := os.Stat(store)
|
st, err := os.Stat(store)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("overlay: Can't stat imageStore dir %s: %v", store, err)
|
return nil, fmt.Errorf("overlay: can't stat imageStore dir %s: %v", store, err)
|
||||||
}
|
}
|
||||||
if !st.IsDir() {
|
if !st.IsDir() {
|
||||||
return nil, fmt.Errorf("overlay: image path %q must be a directory", store)
|
return nil, fmt.Errorf("overlay: image path %q must be a directory", store)
|
||||||
@ -234,6 +285,16 @@ func supportsOverlay() error {
|
|||||||
return errors.Wrap(graphdriver.ErrNotSupported, "'overlay' not found as a supported filesystem on this host. Please ensure kernel is new enough and has overlay support loaded.")
|
return errors.Wrap(graphdriver.ErrNotSupported, "'overlay' not found as a supported filesystem on this host. Please ensure kernel is new enough and has overlay support loaded.")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func useNaiveDiff(home string) bool {
|
||||||
|
useNaiveDiffLock.Do(func() {
|
||||||
|
if err := doesSupportNativeDiff(home); err != nil {
|
||||||
|
logrus.Warnf("Not using native diff for overlay, this may cause degraded performance for building images: %v", err)
|
||||||
|
useNaiveDiffOnly = true
|
||||||
|
}
|
||||||
|
})
|
||||||
|
return useNaiveDiffOnly
|
||||||
|
}
|
||||||
|
|
||||||
func (d *Driver) String() string {
|
func (d *Driver) String() string {
|
||||||
return d.name
|
return d.name
|
||||||
}
|
}
|
||||||
@ -243,6 +304,8 @@ func (d *Driver) String() string {
|
|||||||
func (d *Driver) Status() [][2]string {
|
func (d *Driver) Status() [][2]string {
|
||||||
return [][2]string{
|
return [][2]string{
|
||||||
{"Backing Filesystem", backingFs},
|
{"Backing Filesystem", backingFs},
|
||||||
|
{"Supports d_type", strconv.FormatBool(d.supportsDType)},
|
||||||
|
{"Native Overlay Diff", strconv.FormatBool(!useNaiveDiff(d.home))},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -280,18 +343,39 @@ func (d *Driver) Cleanup() error {
|
|||||||
|
|
||||||
// CreateReadWrite creates a layer that is writable for use as a container
|
// CreateReadWrite creates a layer that is writable for use as a container
|
||||||
// file system.
|
// file system.
|
||||||
func (d *Driver) CreateReadWrite(id, parent, mountLabel string, storageOpt map[string]string) error {
|
func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error {
|
||||||
return d.Create(id, parent, mountLabel, storageOpt)
|
if opts != nil && len(opts.StorageOpt) != 0 && !projectQuotaSupported {
|
||||||
|
return fmt.Errorf("--storage-opt is supported only for overlay over xfs with 'pquota' mount option")
|
||||||
|
}
|
||||||
|
|
||||||
|
if opts == nil {
|
||||||
|
opts = &graphdriver.CreateOpts{
|
||||||
|
StorageOpt: map[string]string{},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, ok := opts.StorageOpt["size"]; !ok {
|
||||||
|
if opts.StorageOpt == nil {
|
||||||
|
opts.StorageOpt = map[string]string{}
|
||||||
|
}
|
||||||
|
opts.StorageOpt["size"] = strconv.FormatUint(d.options.quota.Size, 10)
|
||||||
|
}
|
||||||
|
|
||||||
|
return d.create(id, parent, opts)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create is used to create the upper, lower, and merge directories required for overlay fs for a given id.
|
// Create is used to create the upper, lower, and merge directories required for overlay fs for a given id.
|
||||||
// The parent filesystem is used to configure these directories for the overlay.
|
// The parent filesystem is used to configure these directories for the overlay.
|
||||||
func (d *Driver) Create(id, parent, mountLabel string, storageOpt map[string]string) (retErr error) {
|
func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) (retErr error) {
|
||||||
|
if opts != nil && len(opts.StorageOpt) != 0 {
|
||||||
if len(storageOpt) != 0 {
|
if _, ok := opts.StorageOpt["size"]; ok {
|
||||||
return fmt.Errorf("--storage-opt is not supported for overlay")
|
return fmt.Errorf("--storage-opt size is only supported for ReadWrite Layers")
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
return d.create(id, parent, opts)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts) (retErr error) {
|
||||||
dir := d.dir(id)
|
dir := d.dir(id)
|
||||||
|
|
||||||
rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps)
|
rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps)
|
||||||
@ -312,6 +396,20 @@ func (d *Driver) Create(id, parent, mountLabel string, storageOpt map[string]str
|
|||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
|
if opts != nil && len(opts.StorageOpt) > 0 {
|
||||||
|
driver := &Driver{}
|
||||||
|
if err := d.parseStorageOpt(opts.StorageOpt, driver); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if driver.options.quota.Size > 0 {
|
||||||
|
// Set container disk quota limit
|
||||||
|
if err := d.quotaCtl.SetQuota(dir, driver.options.quota); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if err := idtools.MkdirAs(path.Join(dir, "diff"), 0755, rootUID, rootGID); err != nil {
|
if err := idtools.MkdirAs(path.Join(dir, "diff"), 0755, rootUID, rootGID); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -351,6 +449,26 @@ func (d *Driver) Create(id, parent, mountLabel string, storageOpt map[string]str
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Parse overlay storage options
|
||||||
|
func (d *Driver) parseStorageOpt(storageOpt map[string]string, driver *Driver) error {
|
||||||
|
// Read size to set the disk project quota per container
|
||||||
|
for key, val := range storageOpt {
|
||||||
|
key := strings.ToLower(key)
|
||||||
|
switch key {
|
||||||
|
case "size":
|
||||||
|
size, err := units.RAMInBytes(val)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
driver.options.quota.Size = uint64(size)
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("Unknown option %s", key)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
func (d *Driver) getLower(parent string) (string, error) {
|
func (d *Driver) getLower(parent string) (string, error) {
|
||||||
parentDir := d.dir(parent)
|
parentDir := d.dir(parent)
|
||||||
|
|
||||||
@ -377,11 +495,11 @@ func (d *Driver) getLower(parent string) (string, error) {
|
|||||||
return strings.Join(lowers, ":"), nil
|
return strings.Join(lowers, ":"), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Driver) dir(val string) string {
|
func (d *Driver) dir(id string) string {
|
||||||
newpath := path.Join(d.home, val)
|
newpath := path.Join(d.home, id)
|
||||||
if _, err := os.Stat(newpath); err != nil {
|
if _, err := os.Stat(newpath); err != nil {
|
||||||
for _, p := range d.AdditionalImageStores() {
|
for _, p := range d.AdditionalImageStores() {
|
||||||
l := path.Join(p, d.name, val)
|
l := path.Join(p, d.name, id)
|
||||||
_, err = os.Stat(l)
|
_, err = os.Stat(l)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
return l
|
return l
|
||||||
@ -411,6 +529,8 @@ func (d *Driver) getLowerDirs(id string) ([]string, error) {
|
|||||||
|
|
||||||
// Remove cleans the directories that are created for this id.
|
// Remove cleans the directories that are created for this id.
|
||||||
func (d *Driver) Remove(id string) error {
|
func (d *Driver) Remove(id string) error {
|
||||||
|
d.locker.Lock(id)
|
||||||
|
defer d.locker.Unlock(id)
|
||||||
dir := d.dir(id)
|
dir := d.dir(id)
|
||||||
lid, err := ioutil.ReadFile(path.Join(dir, "link"))
|
lid, err := ioutil.ReadFile(path.Join(dir, "link"))
|
||||||
if err == nil {
|
if err == nil {
|
||||||
@ -419,14 +539,16 @@ func (d *Driver) Remove(id string) error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := os.RemoveAll(dir); err != nil && !os.IsNotExist(err) {
|
if err := system.EnsureRemoveAll(dir); err != nil && !os.IsNotExist(err) {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get creates and mounts the required file system for the given id and returns the mount path.
|
// Get creates and mounts the required file system for the given id and returns the mount path.
|
||||||
func (d *Driver) Get(id string, mountLabel string) (s string, err error) {
|
func (d *Driver) Get(id, mountLabel string) (_ string, retErr error) {
|
||||||
|
d.locker.Lock(id)
|
||||||
|
defer d.locker.Unlock(id)
|
||||||
dir := d.dir(id)
|
dir := d.dir(id)
|
||||||
if _, err := os.Stat(dir); err != nil {
|
if _, err := os.Stat(dir); err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
@ -458,7 +580,7 @@ func (d *Driver) Get(id string, mountLabel string) (s string, err error) {
|
|||||||
return "", fmt.Errorf("Can't stat lower layer %q: %v", newpath, err)
|
return "", fmt.Errorf("Can't stat lower layer %q: %v", newpath, err)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
lower = l
|
lower = newpath
|
||||||
}
|
}
|
||||||
if newlowers == "" {
|
if newlowers == "" {
|
||||||
newlowers = lower
|
newlowers = lower
|
||||||
@ -472,22 +594,42 @@ func (d *Driver) Get(id string, mountLabel string) (s string, err error) {
|
|||||||
return mergedDir, nil
|
return mergedDir, nil
|
||||||
}
|
}
|
||||||
defer func() {
|
defer func() {
|
||||||
if err != nil {
|
if retErr != nil {
|
||||||
if c := d.ctr.Decrement(mergedDir); c <= 0 {
|
if c := d.ctr.Decrement(mergedDir); c <= 0 {
|
||||||
syscall.Unmount(mergedDir, 0)
|
if mntErr := unix.Unmount(mergedDir, 0); mntErr != nil {
|
||||||
|
logrus.Errorf("error unmounting %v: %v", mergedDir, mntErr)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
workDir := path.Join(dir, "work")
|
workDir := path.Join(dir, "work")
|
||||||
opts := fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", newlowers, path.Join(id, "diff"), path.Join(id, "work"))
|
opts := fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", newlowers, diffDir, workDir)
|
||||||
mountLabel = label.FormatMountLabel(opts, mountLabel)
|
mountData := label.FormatMountLabel(opts, mountLabel)
|
||||||
if len(mountLabel) > syscall.Getpagesize() {
|
mount := unix.Mount
|
||||||
return "", fmt.Errorf("cannot mount layer, mount label too large %d", len(mountLabel))
|
mountTarget := mergedDir
|
||||||
}
|
|
||||||
|
|
||||||
if err := mountFrom(d.home, "overlay", path.Join(id, "merged"), "overlay", mountLabel); err != nil {
|
pageSize := unix.Getpagesize()
|
||||||
return "", fmt.Errorf("error creating overlay mount to %s: %v", mergedDir, err)
|
|
||||||
|
// Use relative paths and mountFrom when the mount data has exceeded
|
||||||
|
// the page size. The mount syscall fails if the mount data cannot
|
||||||
|
// fit within a page and relative links make the mount data much
|
||||||
|
// smaller at the expense of requiring a fork exec to chroot.
|
||||||
|
if len(mountData) > pageSize {
|
||||||
|
//FIXME: We need to figure out to get this to work with additional stores
|
||||||
|
opts = fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", string(lowers), path.Join(id, "diff"), path.Join(id, "work"))
|
||||||
|
mountData = label.FormatMountLabel(opts, mountLabel)
|
||||||
|
if len(mountData) > pageSize {
|
||||||
|
return "", fmt.Errorf("cannot mount layer, mount label too large %d", len(mountData))
|
||||||
|
}
|
||||||
|
|
||||||
|
mount = func(source string, target string, mType string, flags uintptr, label string) error {
|
||||||
|
return mountFrom(d.home, source, target, mType, flags, label)
|
||||||
|
}
|
||||||
|
mountTarget = path.Join(id, "merged")
|
||||||
|
}
|
||||||
|
if err := mount("overlay", mountTarget, "overlay", 0, mountData); err != nil {
|
||||||
|
return "", fmt.Errorf("error creating overlay mount to %s: %v", mountTarget, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// chown "workdir/work" to the remapped root UID/GID. Overlay fs inside a
|
// chown "workdir/work" to the remapped root UID/GID. Overlay fs inside a
|
||||||
@ -506,19 +648,16 @@ func (d *Driver) Get(id string, mountLabel string) (s string, err error) {
|
|||||||
|
|
||||||
// Put unmounts the mount path created for the give id.
|
// Put unmounts the mount path created for the give id.
|
||||||
func (d *Driver) Put(id string) error {
|
func (d *Driver) Put(id string) error {
|
||||||
|
d.locker.Lock(id)
|
||||||
|
defer d.locker.Unlock(id)
|
||||||
mountpoint := path.Join(d.dir(id), "merged")
|
mountpoint := path.Join(d.dir(id), "merged")
|
||||||
if count := d.ctr.Decrement(mountpoint); count > 0 {
|
if count := d.ctr.Decrement(mountpoint); count > 0 {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
err := syscall.Unmount(mountpoint, 0)
|
if err := unix.Unmount(mountpoint, unix.MNT_DETACH); err != nil {
|
||||||
if err != nil {
|
logrus.Debugf("Failed to unmount %s overlay: %s - %v", id, mountpoint, err)
|
||||||
if _, err := ioutil.ReadFile(path.Join(d.dir(id), lowerFile)); err != nil {
|
|
||||||
// We didn't have a "lower" directory, so we weren't mounting a "merged" directory anyway
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
logrus.Debugf("Failed to unmount %s overlay: %v", id, err)
|
|
||||||
}
|
}
|
||||||
return err
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Exists checks to see if the id is already mounted.
|
// Exists checks to see if the id is already mounted.
|
||||||
@ -527,8 +666,33 @@ func (d *Driver) Exists(id string) bool {
|
|||||||
return err == nil
|
return err == nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// isParent returns if the passed in parent is the direct parent of the passed in layer
|
||||||
|
func (d *Driver) isParent(id, parent string) bool {
|
||||||
|
lowers, err := d.getLowerDirs(id)
|
||||||
|
if err != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if parent == "" && len(lowers) > 0 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
parentDir := d.dir(parent)
|
||||||
|
var ld string
|
||||||
|
if len(lowers) > 0 {
|
||||||
|
ld = filepath.Dir(lowers[0])
|
||||||
|
}
|
||||||
|
if ld == "" && parent == "" {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return ld == parentDir
|
||||||
|
}
|
||||||
|
|
||||||
// ApplyDiff applies the new layer into a root
|
// ApplyDiff applies the new layer into a root
|
||||||
func (d *Driver) ApplyDiff(id string, parent string, diff archive.Reader) (size int64, err error) {
|
func (d *Driver) ApplyDiff(id string, parent string, diff io.Reader) (size int64, err error) {
|
||||||
|
if !d.isParent(id, parent) {
|
||||||
|
return d.naiveDiff.ApplyDiff(id, parent, diff)
|
||||||
|
}
|
||||||
|
|
||||||
applyDir := d.getDiffPath(id)
|
applyDir := d.getDiffPath(id)
|
||||||
|
|
||||||
logrus.Debugf("Applying tar in %s", applyDir)
|
logrus.Debugf("Applying tar in %s", applyDir)
|
||||||
@ -541,7 +705,7 @@ func (d *Driver) ApplyDiff(id string, parent string, diff archive.Reader) (size
|
|||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
|
|
||||||
return d.DiffSize(id, parent)
|
return directory.Size(applyDir)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Driver) getDiffPath(id string) string {
|
func (d *Driver) getDiffPath(id string) string {
|
||||||
@ -554,12 +718,19 @@ func (d *Driver) getDiffPath(id string) string {
|
|||||||
// and its parent and returns the size in bytes of the changes
|
// and its parent and returns the size in bytes of the changes
|
||||||
// relative to its base filesystem directory.
|
// relative to its base filesystem directory.
|
||||||
func (d *Driver) DiffSize(id, parent string) (size int64, err error) {
|
func (d *Driver) DiffSize(id, parent string) (size int64, err error) {
|
||||||
|
if useNaiveDiff(d.home) || !d.isParent(id, parent) {
|
||||||
|
return d.naiveDiff.DiffSize(id, parent)
|
||||||
|
}
|
||||||
return directory.Size(d.getDiffPath(id))
|
return directory.Size(d.getDiffPath(id))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Diff produces an archive of the changes between the specified
|
// Diff produces an archive of the changes between the specified
|
||||||
// layer and its parent layer which may be "".
|
// layer and its parent layer which may be "".
|
||||||
func (d *Driver) Diff(id, parent string) (archive.Archive, error) {
|
func (d *Driver) Diff(id, parent string) (io.ReadCloser, error) {
|
||||||
|
if useNaiveDiff(d.home) || !d.isParent(id, parent) {
|
||||||
|
return d.naiveDiff.Diff(id, parent)
|
||||||
|
}
|
||||||
|
|
||||||
diffPath := d.getDiffPath(id)
|
diffPath := d.getDiffPath(id)
|
||||||
logrus.Debugf("Tar with options on %s", diffPath)
|
logrus.Debugf("Tar with options on %s", diffPath)
|
||||||
return archive.TarWithOptions(diffPath, &archive.TarOptions{
|
return archive.TarWithOptions(diffPath, &archive.TarOptions{
|
||||||
@ -573,6 +744,9 @@ func (d *Driver) Diff(id, parent string) (archive.Archive, error) {
|
|||||||
// Changes produces a list of changes between the specified layer
|
// Changes produces a list of changes between the specified layer
|
||||||
// and its parent layer. If parent is "", then all changes will be ADD changes.
|
// and its parent layer. If parent is "", then all changes will be ADD changes.
|
||||||
func (d *Driver) Changes(id, parent string) ([]archive.Change, error) {
|
func (d *Driver) Changes(id, parent string) ([]archive.Change, error) {
|
||||||
|
if useNaiveDiff(d.home) || !d.isParent(id, parent) {
|
||||||
|
return d.naiveDiff.Changes(id, parent)
|
||||||
|
}
|
||||||
// Overlay doesn't have snapshots, so we need to get changes from all parent
|
// Overlay doesn't have snapshots, so we need to get changes from all parent
|
||||||
// layers.
|
// layers.
|
||||||
diffPath := d.getDiffPath(id)
|
diffPath := d.getDiffPath(id)
|
||||||
@ -586,5 +760,5 @@ func (d *Driver) Changes(id, parent string) ([]archive.Change, error) {
|
|||||||
|
|
||||||
// AdditionalImageStores returns additional image stores supported by the driver
|
// AdditionalImageStores returns additional image stores supported by the driver
|
||||||
func (d *Driver) AdditionalImageStores() []string {
|
func (d *Driver) AdditionalImageStores() []string {
|
||||||
return d.opts.imageStores
|
return d.options.imageStores
|
||||||
}
|
}
|
||||||
|
5
vendor/github.com/containers/storage/drivers/overlay/randomid.go
generated
vendored
5
vendor/github.com/containers/storage/drivers/overlay/randomid.go
generated
vendored
@ -11,7 +11,8 @@ import (
|
|||||||
"syscall"
|
"syscall"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/Sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
|
"golang.org/x/sys/unix"
|
||||||
)
|
)
|
||||||
|
|
||||||
// generateID creates a new random string identifier with the given length
|
// generateID creates a new random string identifier with the given length
|
||||||
@ -69,7 +70,7 @@ func retryOnError(err error) bool {
|
|||||||
case *os.PathError:
|
case *os.PathError:
|
||||||
return retryOnError(err.Err) // unpack the target error
|
return retryOnError(err.Err) // unpack the target error
|
||||||
case syscall.Errno:
|
case syscall.Errno:
|
||||||
if err == syscall.EPERM {
|
if err == unix.EPERM {
|
||||||
// EPERM represents an entropy pool exhaustion, a condition under
|
// EPERM represents an entropy pool exhaustion, a condition under
|
||||||
// which we backoff and retry.
|
// which we backoff and retry.
|
||||||
return true
|
return true
|
||||||
|
18
vendor/github.com/containers/storage/drivers/overlayutils/overlayutils.go
generated
vendored
Normal file
18
vendor/github.com/containers/storage/drivers/overlayutils/overlayutils.go
generated
vendored
Normal file
@ -0,0 +1,18 @@
|
|||||||
|
// +build linux
|
||||||
|
|
||||||
|
package overlayutils
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ErrDTypeNotSupported denotes that the backing filesystem doesn't support d_type.
|
||||||
|
func ErrDTypeNotSupported(driver, backingFs string) error {
|
||||||
|
msg := fmt.Sprintf("%s: the backing %s filesystem is formatted without d_type support, which leads to incorrect behavior.", driver, backingFs)
|
||||||
|
if backingFs == "xfs" {
|
||||||
|
msg += " Reformat the filesystem with ftype=1 to enable d_type support."
|
||||||
|
}
|
||||||
|
msg += " Running without d_type is not supported."
|
||||||
|
return errors.New(msg)
|
||||||
|
}
|
32
vendor/github.com/containers/storage/drivers/plugin.go
generated
vendored
32
vendor/github.com/containers/storage/drivers/plugin.go
generated
vendored
@ -1,32 +0,0 @@
|
|||||||
// +build experimental
|
|
||||||
|
|
||||||
package graphdriver
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
|
|
||||||
"github.com/containers/storage/pkg/plugins"
|
|
||||||
)
|
|
||||||
|
|
||||||
type pluginClient interface {
|
|
||||||
// Call calls the specified method with the specified arguments for the plugin.
|
|
||||||
Call(string, interface{}, interface{}) error
|
|
||||||
// Stream calls the specified method with the specified arguments for the plugin and returns the response IO stream
|
|
||||||
Stream(string, interface{}) (io.ReadCloser, error)
|
|
||||||
// SendFile calls the specified method, and passes through the IO stream
|
|
||||||
SendFile(string, io.Reader, interface{}) error
|
|
||||||
}
|
|
||||||
|
|
||||||
func lookupPlugin(name, home string, opts []string) (Driver, error) {
|
|
||||||
pl, err := plugins.Get(name, "GraphDriver")
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("Error looking up graphdriver plugin %s: %v", name, err)
|
|
||||||
}
|
|
||||||
return newPluginDriver(name, home, opts, pl.Client())
|
|
||||||
}
|
|
||||||
|
|
||||||
func newPluginDriver(name, home string, opts []string, c pluginClient) (Driver, error) {
|
|
||||||
proxy := &graphDriverProxy{name, c}
|
|
||||||
return proxy, proxy.Init(home, opts)
|
|
||||||
}
|
|
7
vendor/github.com/containers/storage/drivers/plugin_unsupported.go
generated
vendored
7
vendor/github.com/containers/storage/drivers/plugin_unsupported.go
generated
vendored
@ -1,7 +0,0 @@
|
|||||||
// +build !experimental
|
|
||||||
|
|
||||||
package graphdriver
|
|
||||||
|
|
||||||
func lookupPlugin(name, home string, opts []string) (Driver, error) {
|
|
||||||
return nil, ErrNotSupported
|
|
||||||
}
|
|
225
vendor/github.com/containers/storage/drivers/proxy.go
generated
vendored
225
vendor/github.com/containers/storage/drivers/proxy.go
generated
vendored
@ -1,225 +0,0 @@
|
|||||||
// +build experimental
|
|
||||||
|
|
||||||
package graphdriver
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
|
|
||||||
"github.com/containers/storage/pkg/archive"
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
)
|
|
||||||
|
|
||||||
type graphDriverProxy struct {
|
|
||||||
name string
|
|
||||||
client pluginClient
|
|
||||||
}
|
|
||||||
|
|
||||||
type graphDriverRequest struct {
|
|
||||||
ID string `json:",omitempty"`
|
|
||||||
Parent string `json:",omitempty"`
|
|
||||||
MountLabel string `json:",omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type graphDriverResponse struct {
|
|
||||||
Err string `json:",omitempty"`
|
|
||||||
Dir string `json:",omitempty"`
|
|
||||||
Exists bool `json:",omitempty"`
|
|
||||||
Status [][2]string `json:",omitempty"`
|
|
||||||
Changes []archive.Change `json:",omitempty"`
|
|
||||||
Size int64 `json:",omitempty"`
|
|
||||||
Metadata map[string]string `json:",omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type graphDriverInitRequest struct {
|
|
||||||
Home string
|
|
||||||
Opts []string
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *graphDriverProxy) Init(home string, opts []string) error {
|
|
||||||
args := &graphDriverInitRequest{
|
|
||||||
Home: home,
|
|
||||||
Opts: opts,
|
|
||||||
}
|
|
||||||
var ret graphDriverResponse
|
|
||||||
if err := d.client.Call("GraphDriver.Init", args, &ret); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if ret.Err != "" {
|
|
||||||
return errors.New(ret.Err)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *graphDriverProxy) String() string {
|
|
||||||
return d.name
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *graphDriverProxy) CreateReadWrite(id, parent, mountLabel string, storageOpt map[string]string) error {
|
|
||||||
args := &graphDriverRequest{
|
|
||||||
ID: id,
|
|
||||||
Parent: parent,
|
|
||||||
MountLabel: mountLabel,
|
|
||||||
}
|
|
||||||
var ret graphDriverResponse
|
|
||||||
if err := d.client.Call("GraphDriver.CreateReadWrite", args, &ret); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if ret.Err != "" {
|
|
||||||
return errors.New(ret.Err)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *graphDriverProxy) Create(id, parent, mountLabel string, storageOpt map[string]string) error {
|
|
||||||
args := &graphDriverRequest{
|
|
||||||
ID: id,
|
|
||||||
Parent: parent,
|
|
||||||
MountLabel: mountLabel,
|
|
||||||
}
|
|
||||||
var ret graphDriverResponse
|
|
||||||
if err := d.client.Call("GraphDriver.Create", args, &ret); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if ret.Err != "" {
|
|
||||||
return errors.New(ret.Err)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *graphDriverProxy) Remove(id string) error {
|
|
||||||
args := &graphDriverRequest{ID: id}
|
|
||||||
var ret graphDriverResponse
|
|
||||||
if err := d.client.Call("GraphDriver.Remove", args, &ret); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if ret.Err != "" {
|
|
||||||
return errors.New(ret.Err)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *graphDriverProxy) Get(id, mountLabel string) (string, error) {
|
|
||||||
args := &graphDriverRequest{
|
|
||||||
ID: id,
|
|
||||||
MountLabel: mountLabel,
|
|
||||||
}
|
|
||||||
var ret graphDriverResponse
|
|
||||||
if err := d.client.Call("GraphDriver.Get", args, &ret); err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
var err error
|
|
||||||
if ret.Err != "" {
|
|
||||||
err = errors.New(ret.Err)
|
|
||||||
}
|
|
||||||
return ret.Dir, err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *graphDriverProxy) Put(id string) error {
|
|
||||||
args := &graphDriverRequest{ID: id}
|
|
||||||
var ret graphDriverResponse
|
|
||||||
if err := d.client.Call("GraphDriver.Put", args, &ret); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if ret.Err != "" {
|
|
||||||
return errors.New(ret.Err)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *graphDriverProxy) Exists(id string) bool {
|
|
||||||
args := &graphDriverRequest{ID: id}
|
|
||||||
var ret graphDriverResponse
|
|
||||||
if err := d.client.Call("GraphDriver.Exists", args, &ret); err != nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return ret.Exists
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *graphDriverProxy) Status() [][2]string {
|
|
||||||
args := &graphDriverRequest{}
|
|
||||||
var ret graphDriverResponse
|
|
||||||
if err := d.client.Call("GraphDriver.Status", args, &ret); err != nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return ret.Status
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *graphDriverProxy) Metadata(id string) (map[string]string, error) {
|
|
||||||
args := &graphDriverRequest{
|
|
||||||
ID: id,
|
|
||||||
}
|
|
||||||
var ret graphDriverResponse
|
|
||||||
if err := d.client.Call("GraphDriver.Metadata", args, &ret); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if ret.Err != "" {
|
|
||||||
return nil, errors.New(ret.Err)
|
|
||||||
}
|
|
||||||
return ret.Metadata, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *graphDriverProxy) Cleanup() error {
|
|
||||||
args := &graphDriverRequest{}
|
|
||||||
var ret graphDriverResponse
|
|
||||||
if err := d.client.Call("GraphDriver.Cleanup", args, &ret); err != nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
if ret.Err != "" {
|
|
||||||
return errors.New(ret.Err)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *graphDriverProxy) Diff(id, parent string) (archive.Archive, error) {
|
|
||||||
args := &graphDriverRequest{
|
|
||||||
ID: id,
|
|
||||||
Parent: parent,
|
|
||||||
}
|
|
||||||
body, err := d.client.Stream("GraphDriver.Diff", args)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return archive.Archive(body), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *graphDriverProxy) Changes(id, parent string) ([]archive.Change, error) {
|
|
||||||
args := &graphDriverRequest{
|
|
||||||
ID: id,
|
|
||||||
Parent: parent,
|
|
||||||
}
|
|
||||||
var ret graphDriverResponse
|
|
||||||
if err := d.client.Call("GraphDriver.Changes", args, &ret); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if ret.Err != "" {
|
|
||||||
return nil, errors.New(ret.Err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return ret.Changes, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *graphDriverProxy) ApplyDiff(id, parent string, diff archive.Reader) (int64, error) {
|
|
||||||
var ret graphDriverResponse
|
|
||||||
if err := d.client.SendFile(fmt.Sprintf("GraphDriver.ApplyDiff?id=%s&parent=%s", id, parent), diff, &ret); err != nil {
|
|
||||||
return -1, err
|
|
||||||
}
|
|
||||||
if ret.Err != "" {
|
|
||||||
return -1, errors.New(ret.Err)
|
|
||||||
}
|
|
||||||
return ret.Size, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *graphDriverProxy) DiffSize(id, parent string) (int64, error) {
|
|
||||||
args := &graphDriverRequest{
|
|
||||||
ID: id,
|
|
||||||
Parent: parent,
|
|
||||||
}
|
|
||||||
var ret graphDriverResponse
|
|
||||||
if err := d.client.Call("GraphDriver.DiffSize", args, &ret); err != nil {
|
|
||||||
return -1, err
|
|
||||||
}
|
|
||||||
if ret.Err != "" {
|
|
||||||
return -1, errors.New(ret.Err)
|
|
||||||
}
|
|
||||||
return ret.Size, nil
|
|
||||||
}
|
|
337
vendor/github.com/containers/storage/drivers/quota/projectquota.go
generated
vendored
Normal file
337
vendor/github.com/containers/storage/drivers/quota/projectquota.go
generated
vendored
Normal file
@ -0,0 +1,337 @@
|
|||||||
|
// +build linux
|
||||||
|
|
||||||
|
//
|
||||||
|
// projectquota.go - implements XFS project quota controls
|
||||||
|
// for setting quota limits on a newly created directory.
|
||||||
|
// It currently supports the legacy XFS specific ioctls.
|
||||||
|
//
|
||||||
|
// TODO: use generic quota control ioctl FS_IOC_FS{GET,SET}XATTR
|
||||||
|
// for both xfs/ext4 for kernel version >= v4.5
|
||||||
|
//
|
||||||
|
|
||||||
|
package quota
|
||||||
|
|
||||||
|
/*
|
||||||
|
#include <stdlib.h>
|
||||||
|
#include <dirent.h>
|
||||||
|
#include <linux/fs.h>
|
||||||
|
#include <linux/quota.h>
|
||||||
|
#include <linux/dqblk_xfs.h>
|
||||||
|
|
||||||
|
#ifndef FS_XFLAG_PROJINHERIT
|
||||||
|
struct fsxattr {
|
||||||
|
__u32 fsx_xflags;
|
||||||
|
__u32 fsx_extsize;
|
||||||
|
__u32 fsx_nextents;
|
||||||
|
__u32 fsx_projid;
|
||||||
|
unsigned char fsx_pad[12];
|
||||||
|
};
|
||||||
|
#define FS_XFLAG_PROJINHERIT 0x00000200
|
||||||
|
#endif
|
||||||
|
#ifndef FS_IOC_FSGETXATTR
|
||||||
|
#define FS_IOC_FSGETXATTR _IOR ('X', 31, struct fsxattr)
|
||||||
|
#endif
|
||||||
|
#ifndef FS_IOC_FSSETXATTR
|
||||||
|
#define FS_IOC_FSSETXATTR _IOW ('X', 32, struct fsxattr)
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifndef PRJQUOTA
|
||||||
|
#define PRJQUOTA 2
|
||||||
|
#endif
|
||||||
|
#ifndef XFS_PROJ_QUOTA
|
||||||
|
#define XFS_PROJ_QUOTA 2
|
||||||
|
#endif
|
||||||
|
#ifndef Q_XSETPQLIM
|
||||||
|
#define Q_XSETPQLIM QCMD(Q_XSETQLIM, PRJQUOTA)
|
||||||
|
#endif
|
||||||
|
#ifndef Q_XGETPQUOTA
|
||||||
|
#define Q_XGETPQUOTA QCMD(Q_XGETQUOTA, PRJQUOTA)
|
||||||
|
#endif
|
||||||
|
*/
|
||||||
|
import "C"
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
|
"path"
|
||||||
|
"path/filepath"
|
||||||
|
"unsafe"
|
||||||
|
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
|
"golang.org/x/sys/unix"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Quota limit params - currently we only control blocks hard limit
|
||||||
|
type Quota struct {
|
||||||
|
Size uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
// Control - Context to be used by storage driver (e.g. overlay)
|
||||||
|
// who wants to apply project quotas to container dirs
|
||||||
|
type Control struct {
|
||||||
|
backingFsBlockDev string
|
||||||
|
nextProjectID uint32
|
||||||
|
quotas map[string]uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewControl - initialize project quota support.
|
||||||
|
// Test to make sure that quota can be set on a test dir and find
|
||||||
|
// the first project id to be used for the next container create.
|
||||||
|
//
|
||||||
|
// Returns nil (and error) if project quota is not supported.
|
||||||
|
//
|
||||||
|
// First get the project id of the home directory.
|
||||||
|
// This test will fail if the backing fs is not xfs.
|
||||||
|
//
|
||||||
|
// xfs_quota tool can be used to assign a project id to the driver home directory, e.g.:
|
||||||
|
// echo 999:/var/lib/containers/storage/overlay >> /etc/projects
|
||||||
|
// echo storage:999 >> /etc/projid
|
||||||
|
// xfs_quota -x -c 'project -s storage' /<xfs mount point>
|
||||||
|
//
|
||||||
|
// In that case, the home directory project id will be used as a "start offset"
|
||||||
|
// and all containers will be assigned larger project ids (e.g. >= 1000).
|
||||||
|
// This is a way to prevent xfs_quota management from conflicting with containers/storage.
|
||||||
|
//
|
||||||
|
// Then try to create a test directory with the next project id and set a quota
|
||||||
|
// on it. If that works, continue to scan existing containers to map allocated
|
||||||
|
// project ids.
|
||||||
|
//
|
||||||
|
func NewControl(basePath string) (*Control, error) {
|
||||||
|
//
|
||||||
|
// Get project id of parent dir as minimal id to be used by driver
|
||||||
|
//
|
||||||
|
minProjectID, err := getProjectID(basePath)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
minProjectID++
|
||||||
|
|
||||||
|
//
|
||||||
|
// create backing filesystem device node
|
||||||
|
//
|
||||||
|
backingFsBlockDev, err := makeBackingFsDev(basePath)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
//
|
||||||
|
// Test if filesystem supports project quotas by trying to set
|
||||||
|
// a quota on the first available project id
|
||||||
|
//
|
||||||
|
quota := Quota{
|
||||||
|
Size: 0,
|
||||||
|
}
|
||||||
|
if err := setProjectQuota(backingFsBlockDev, minProjectID, quota); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
q := Control{
|
||||||
|
backingFsBlockDev: backingFsBlockDev,
|
||||||
|
nextProjectID: minProjectID + 1,
|
||||||
|
quotas: make(map[string]uint32),
|
||||||
|
}
|
||||||
|
|
||||||
|
//
|
||||||
|
// get first project id to be used for next container
|
||||||
|
//
|
||||||
|
err = q.findNextProjectID(basePath)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
logrus.Debugf("NewControl(%s): nextProjectID = %d", basePath, q.nextProjectID)
|
||||||
|
return &q, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetQuota - assign a unique project id to directory and set the quota limits
|
||||||
|
// for that project id
|
||||||
|
func (q *Control) SetQuota(targetPath string, quota Quota) error {
|
||||||
|
|
||||||
|
projectID, ok := q.quotas[targetPath]
|
||||||
|
if !ok {
|
||||||
|
projectID = q.nextProjectID
|
||||||
|
|
||||||
|
//
|
||||||
|
// assign project id to new container directory
|
||||||
|
//
|
||||||
|
err := setProjectID(targetPath, projectID)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
q.quotas[targetPath] = projectID
|
||||||
|
q.nextProjectID++
|
||||||
|
}
|
||||||
|
|
||||||
|
//
|
||||||
|
// set the quota limit for the container's project id
|
||||||
|
//
|
||||||
|
logrus.Debugf("SetQuota(%s, %d): projectID=%d", targetPath, quota.Size, projectID)
|
||||||
|
return setProjectQuota(q.backingFsBlockDev, projectID, quota)
|
||||||
|
}
|
||||||
|
|
||||||
|
// setProjectQuota - set the quota for project id on xfs block device
|
||||||
|
func setProjectQuota(backingFsBlockDev string, projectID uint32, quota Quota) error {
|
||||||
|
var d C.fs_disk_quota_t
|
||||||
|
d.d_version = C.FS_DQUOT_VERSION
|
||||||
|
d.d_id = C.__u32(projectID)
|
||||||
|
d.d_flags = C.XFS_PROJ_QUOTA
|
||||||
|
|
||||||
|
d.d_fieldmask = C.FS_DQ_BHARD | C.FS_DQ_BSOFT
|
||||||
|
d.d_blk_hardlimit = C.__u64(quota.Size / 512)
|
||||||
|
d.d_blk_softlimit = d.d_blk_hardlimit
|
||||||
|
|
||||||
|
var cs = C.CString(backingFsBlockDev)
|
||||||
|
defer C.free(unsafe.Pointer(cs))
|
||||||
|
|
||||||
|
_, _, errno := unix.Syscall6(unix.SYS_QUOTACTL, C.Q_XSETPQLIM,
|
||||||
|
uintptr(unsafe.Pointer(cs)), uintptr(d.d_id),
|
||||||
|
uintptr(unsafe.Pointer(&d)), 0, 0)
|
||||||
|
if errno != 0 {
|
||||||
|
return fmt.Errorf("Failed to set quota limit for projid %d on %s: %v",
|
||||||
|
projectID, backingFsBlockDev, errno.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetQuota - get the quota limits of a directory that was configured with SetQuota
|
||||||
|
func (q *Control) GetQuota(targetPath string, quota *Quota) error {
|
||||||
|
|
||||||
|
projectID, ok := q.quotas[targetPath]
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("quota not found for path : %s", targetPath)
|
||||||
|
}
|
||||||
|
|
||||||
|
//
|
||||||
|
// get the quota limit for the container's project id
|
||||||
|
//
|
||||||
|
var d C.fs_disk_quota_t
|
||||||
|
|
||||||
|
var cs = C.CString(q.backingFsBlockDev)
|
||||||
|
defer C.free(unsafe.Pointer(cs))
|
||||||
|
|
||||||
|
_, _, errno := unix.Syscall6(unix.SYS_QUOTACTL, C.Q_XGETPQUOTA,
|
||||||
|
uintptr(unsafe.Pointer(cs)), uintptr(C.__u32(projectID)),
|
||||||
|
uintptr(unsafe.Pointer(&d)), 0, 0)
|
||||||
|
if errno != 0 {
|
||||||
|
return fmt.Errorf("Failed to get quota limit for projid %d on %s: %v",
|
||||||
|
projectID, q.backingFsBlockDev, errno.Error())
|
||||||
|
}
|
||||||
|
quota.Size = uint64(d.d_blk_hardlimit) * 512
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// getProjectID - get the project id of path on xfs
|
||||||
|
func getProjectID(targetPath string) (uint32, error) {
|
||||||
|
dir, err := openDir(targetPath)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
defer closeDir(dir)
|
||||||
|
|
||||||
|
var fsx C.struct_fsxattr
|
||||||
|
_, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.FS_IOC_FSGETXATTR,
|
||||||
|
uintptr(unsafe.Pointer(&fsx)))
|
||||||
|
if errno != 0 {
|
||||||
|
return 0, fmt.Errorf("Failed to get projid for %s: %v", targetPath, errno.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
return uint32(fsx.fsx_projid), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// setProjectID - set the project id of path on xfs
|
||||||
|
func setProjectID(targetPath string, projectID uint32) error {
|
||||||
|
dir, err := openDir(targetPath)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer closeDir(dir)
|
||||||
|
|
||||||
|
var fsx C.struct_fsxattr
|
||||||
|
_, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.FS_IOC_FSGETXATTR,
|
||||||
|
uintptr(unsafe.Pointer(&fsx)))
|
||||||
|
if errno != 0 {
|
||||||
|
return fmt.Errorf("Failed to get projid for %s: %v", targetPath, errno.Error())
|
||||||
|
}
|
||||||
|
fsx.fsx_projid = C.__u32(projectID)
|
||||||
|
fsx.fsx_xflags |= C.FS_XFLAG_PROJINHERIT
|
||||||
|
_, _, errno = unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.FS_IOC_FSSETXATTR,
|
||||||
|
uintptr(unsafe.Pointer(&fsx)))
|
||||||
|
if errno != 0 {
|
||||||
|
return fmt.Errorf("Failed to set projid for %s: %v", targetPath, errno.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// findNextProjectID - find the next project id to be used for containers
|
||||||
|
// by scanning driver home directory to find used project ids
|
||||||
|
func (q *Control) findNextProjectID(home string) error {
|
||||||
|
files, err := ioutil.ReadDir(home)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("read directory failed : %s", home)
|
||||||
|
}
|
||||||
|
for _, file := range files {
|
||||||
|
if !file.IsDir() {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
path := filepath.Join(home, file.Name())
|
||||||
|
projid, err := getProjectID(path)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if projid > 0 {
|
||||||
|
q.quotas[path] = projid
|
||||||
|
}
|
||||||
|
if q.nextProjectID <= projid {
|
||||||
|
q.nextProjectID = projid + 1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func free(p *C.char) {
|
||||||
|
C.free(unsafe.Pointer(p))
|
||||||
|
}
|
||||||
|
|
||||||
|
func openDir(path string) (*C.DIR, error) {
|
||||||
|
Cpath := C.CString(path)
|
||||||
|
defer free(Cpath)
|
||||||
|
|
||||||
|
dir := C.opendir(Cpath)
|
||||||
|
if dir == nil {
|
||||||
|
return nil, fmt.Errorf("Can't open dir")
|
||||||
|
}
|
||||||
|
return dir, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func closeDir(dir *C.DIR) {
|
||||||
|
if dir != nil {
|
||||||
|
C.closedir(dir)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func getDirFd(dir *C.DIR) uintptr {
|
||||||
|
return uintptr(C.dirfd(dir))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get the backing block device of the driver home directory
|
||||||
|
// and create a block device node under the home directory
|
||||||
|
// to be used by quotactl commands
|
||||||
|
func makeBackingFsDev(home string) (string, error) {
|
||||||
|
var stat unix.Stat_t
|
||||||
|
if err := unix.Stat(home, &stat); err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
backingFsBlockDev := path.Join(home, "backingFsBlockDev")
|
||||||
|
// Re-create just in case someone copied the home directory over to a new device
|
||||||
|
unix.Unlink(backingFsBlockDev)
|
||||||
|
if err := unix.Mknod(backingFsBlockDev, unix.S_IFBLK|0600, int(stat.Dev)); err != nil {
|
||||||
|
return "", fmt.Errorf("Failed to mknod %s: %v", backingFsBlockDev, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return backingFsBlockDev, nil
|
||||||
|
}
|
74
vendor/github.com/containers/storage/drivers/vfs/driver.go
generated
vendored
74
vendor/github.com/containers/storage/drivers/vfs/driver.go
generated
vendored
@ -4,17 +4,18 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
|
||||||
"github.com/containers/storage/drivers"
|
"github.com/containers/storage/drivers"
|
||||||
"github.com/containers/storage/pkg/chrootarchive"
|
"github.com/containers/storage/pkg/chrootarchive"
|
||||||
"github.com/containers/storage/pkg/idtools"
|
"github.com/containers/storage/pkg/idtools"
|
||||||
|
"github.com/containers/storage/pkg/system"
|
||||||
"github.com/opencontainers/selinux/go-selinux/label"
|
"github.com/opencontainers/selinux/go-selinux/label"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
// CopyWithTar defines the copy method to use.
|
// CopyWithTar defines the copy method to use.
|
||||||
CopyWithTar = chrootarchive.CopyWithTar
|
CopyWithTar = chrootarchive.NewArchiver(nil).CopyWithTar
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
@ -25,16 +26,17 @@ func init() {
|
|||||||
// This sets the home directory for the driver and returns NaiveDiffDriver.
|
// This sets the home directory for the driver and returns NaiveDiffDriver.
|
||||||
func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) {
|
func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) {
|
||||||
d := &Driver{
|
d := &Driver{
|
||||||
home: home,
|
homes: []string{home},
|
||||||
uidMaps: uidMaps,
|
idMappings: idtools.NewIDMappingsFromMaps(uidMaps, gidMaps),
|
||||||
gidMaps: gidMaps,
|
|
||||||
}
|
}
|
||||||
rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps)
|
rootIDs := d.idMappings.RootPair()
|
||||||
if err != nil {
|
if err := idtools.MkdirAllAndChown(home, 0700, rootIDs); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
if err := idtools.MkdirAllAs(home, 0700, rootUID, rootGID); err != nil {
|
for _, option := range options {
|
||||||
return nil, err
|
if strings.HasPrefix(option, "vfs.imagestore=") {
|
||||||
|
d.homes = append(d.homes, strings.Split(option[15:], ",")...)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return graphdriver.NewNaiveDiffDriver(d, uidMaps, gidMaps), nil
|
return graphdriver.NewNaiveDiffDriver(d, uidMaps, gidMaps), nil
|
||||||
}
|
}
|
||||||
@ -44,9 +46,8 @@ func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (grap
|
|||||||
// In order to support layering, files are copied from the parent layer into the new layer. There is no copy-on-write support.
|
// In order to support layering, files are copied from the parent layer into the new layer. There is no copy-on-write support.
|
||||||
// Driver must be wrapped in NaiveDiffDriver to be used as a graphdriver.Driver
|
// Driver must be wrapped in NaiveDiffDriver to be used as a graphdriver.Driver
|
||||||
type Driver struct {
|
type Driver struct {
|
||||||
home string
|
homes []string
|
||||||
uidMaps []idtools.IDMap
|
idMappings *idtools.IDMappings
|
||||||
gidMaps []idtools.IDMap
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Driver) String() string {
|
func (d *Driver) String() string {
|
||||||
@ -70,29 +71,26 @@ func (d *Driver) Cleanup() error {
|
|||||||
|
|
||||||
// CreateReadWrite creates a layer that is writable for use as a container
|
// CreateReadWrite creates a layer that is writable for use as a container
|
||||||
// file system.
|
// file system.
|
||||||
func (d *Driver) CreateReadWrite(id, parent, mountLabel string, storageOpt map[string]string) error {
|
func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error {
|
||||||
return d.Create(id, parent, mountLabel, storageOpt)
|
return d.Create(id, parent, opts)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create prepares the filesystem for the VFS driver and copies the directory for the given id under the parent.
|
// Create prepares the filesystem for the VFS driver and copies the directory for the given id under the parent.
|
||||||
func (d *Driver) Create(id, parent, mountLabel string, storageOpt map[string]string) error {
|
func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error {
|
||||||
if len(storageOpt) != 0 {
|
if opts != nil && len(opts.StorageOpt) != 0 {
|
||||||
return fmt.Errorf("--storage-opt is not supported for vfs")
|
return fmt.Errorf("--storage-opt is not supported for vfs")
|
||||||
}
|
}
|
||||||
|
|
||||||
dir := d.dir(id)
|
dir := d.dir(id)
|
||||||
rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps)
|
rootIDs := d.idMappings.RootPair()
|
||||||
if err != nil {
|
if err := idtools.MkdirAllAndChown(filepath.Dir(dir), 0700, rootIDs); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if err := idtools.MkdirAllAs(filepath.Dir(dir), 0700, rootUID, rootGID); err != nil {
|
if err := idtools.MkdirAndChown(dir, 0755, rootIDs); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if err := idtools.MkdirAs(dir, 0755, rootUID, rootGID); err != nil {
|
labelOpts := []string{"level:s0"}
|
||||||
return err
|
if _, mountLabel, err := label.InitLabels(labelOpts); err == nil {
|
||||||
}
|
|
||||||
opts := []string{"level:s0"}
|
|
||||||
if _, mountLabel, err := label.InitLabels(opts); err == nil {
|
|
||||||
label.SetFileLabel(dir, mountLabel)
|
label.SetFileLabel(dir, mountLabel)
|
||||||
}
|
}
|
||||||
if parent == "" {
|
if parent == "" {
|
||||||
@ -102,22 +100,26 @@ func (d *Driver) Create(id, parent, mountLabel string, storageOpt map[string]str
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("%s: %s", parent, err)
|
return fmt.Errorf("%s: %s", parent, err)
|
||||||
}
|
}
|
||||||
if err := CopyWithTar(parentDir, dir); err != nil {
|
return CopyWithTar(parentDir, dir)
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Driver) dir(id string) string {
|
func (d *Driver) dir(id string) string {
|
||||||
return filepath.Join(d.home, "dir", filepath.Base(id))
|
for i, home := range d.homes {
|
||||||
|
if i > 0 {
|
||||||
|
home = filepath.Join(home, d.String())
|
||||||
|
}
|
||||||
|
candidate := filepath.Join(home, "dir", filepath.Base(id))
|
||||||
|
fi, err := os.Stat(candidate)
|
||||||
|
if err == nil && fi.IsDir() {
|
||||||
|
return candidate
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return filepath.Join(d.homes[0], "dir", filepath.Base(id))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Remove deletes the content from the directory for a given id.
|
// Remove deletes the content from the directory for a given id.
|
||||||
func (d *Driver) Remove(id string) error {
|
func (d *Driver) Remove(id string) error {
|
||||||
if err := os.RemoveAll(d.dir(id)); err != nil && !os.IsNotExist(err) {
|
return system.EnsureRemoveAll(d.dir(id))
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get returns the directory for the given id.
|
// Get returns the directory for the given id.
|
||||||
@ -146,6 +148,8 @@ func (d *Driver) Exists(id string) bool {
|
|||||||
|
|
||||||
// AdditionalImageStores returns additional image stores supported by the driver
|
// AdditionalImageStores returns additional image stores supported by the driver
|
||||||
func (d *Driver) AdditionalImageStores() []string {
|
func (d *Driver) AdditionalImageStores() []string {
|
||||||
var imageStores []string
|
if len(d.homes) > 1 {
|
||||||
return imageStores
|
return d.homes[1:]
|
||||||
|
}
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
556
vendor/github.com/containers/storage/drivers/windows/windows.go
generated
vendored
556
vendor/github.com/containers/storage/drivers/windows/windows.go
generated
vendored
@ -6,6 +6,7 @@ import (
|
|||||||
"bufio"
|
"bufio"
|
||||||
"bytes"
|
"bytes"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
@ -16,13 +17,13 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
"syscall"
|
"syscall"
|
||||||
|
"time"
|
||||||
"unsafe"
|
"unsafe"
|
||||||
|
|
||||||
"github.com/Microsoft/go-winio"
|
"github.com/Microsoft/go-winio"
|
||||||
"github.com/Microsoft/go-winio/archive/tar"
|
"github.com/Microsoft/go-winio/archive/tar"
|
||||||
"github.com/Microsoft/go-winio/backuptar"
|
"github.com/Microsoft/go-winio/backuptar"
|
||||||
"github.com/Microsoft/hcsshim"
|
"github.com/Microsoft/hcsshim"
|
||||||
"github.com/Sirupsen/logrus"
|
|
||||||
"github.com/containers/storage/drivers"
|
"github.com/containers/storage/drivers"
|
||||||
"github.com/containers/storage/pkg/archive"
|
"github.com/containers/storage/pkg/archive"
|
||||||
"github.com/containers/storage/pkg/idtools"
|
"github.com/containers/storage/pkg/idtools"
|
||||||
@ -30,16 +31,37 @@ import (
|
|||||||
"github.com/containers/storage/pkg/longpath"
|
"github.com/containers/storage/pkg/longpath"
|
||||||
"github.com/containers/storage/pkg/reexec"
|
"github.com/containers/storage/pkg/reexec"
|
||||||
"github.com/containers/storage/pkg/system"
|
"github.com/containers/storage/pkg/system"
|
||||||
"github.com/vbatts/tar-split/tar/storage"
|
units "github.com/docker/go-units"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
|
"golang.org/x/sys/windows"
|
||||||
)
|
)
|
||||||
|
|
||||||
// filterDriver is an HCSShim driver type for the Windows Filter driver.
|
// filterDriver is an HCSShim driver type for the Windows Filter driver.
|
||||||
const filterDriver = 1
|
const filterDriver = 1
|
||||||
|
|
||||||
|
var (
|
||||||
|
// mutatedFiles is a list of files that are mutated by the import process
|
||||||
|
// and must be backed up and restored.
|
||||||
|
mutatedFiles = map[string]string{
|
||||||
|
"UtilityVM/Files/EFI/Microsoft/Boot/BCD": "bcd.bak",
|
||||||
|
"UtilityVM/Files/EFI/Microsoft/Boot/BCD.LOG": "bcd.log.bak",
|
||||||
|
"UtilityVM/Files/EFI/Microsoft/Boot/BCD.LOG1": "bcd.log1.bak",
|
||||||
|
"UtilityVM/Files/EFI/Microsoft/Boot/BCD.LOG2": "bcd.log2.bak",
|
||||||
|
}
|
||||||
|
noreexec = false
|
||||||
|
)
|
||||||
|
|
||||||
// init registers the windows graph drivers to the register.
|
// init registers the windows graph drivers to the register.
|
||||||
func init() {
|
func init() {
|
||||||
graphdriver.Register("windowsfilter", InitFilter)
|
graphdriver.Register("windowsfilter", InitFilter)
|
||||||
reexec.Register("storage-windows-write-layer", writeLayer)
|
// DOCKER_WINDOWSFILTER_NOREEXEC allows for inline processing which makes
|
||||||
|
// debugging issues in the re-exec codepath significantly easier.
|
||||||
|
if os.Getenv("DOCKER_WINDOWSFILTER_NOREEXEC") != "" {
|
||||||
|
logrus.Warnf("WindowsGraphDriver is set to not re-exec. This is intended for debugging purposes only.")
|
||||||
|
noreexec = true
|
||||||
|
} else {
|
||||||
|
reexec.Register("docker-windows-write-layer", writeLayerReexec)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
type checker struct {
|
type checker struct {
|
||||||
@ -60,13 +82,22 @@ type Driver struct {
|
|||||||
cache map[string]string
|
cache map[string]string
|
||||||
}
|
}
|
||||||
|
|
||||||
func isTP5OrOlder() bool {
|
|
||||||
return system.GetOSVersion().Build <= 14300
|
|
||||||
}
|
|
||||||
|
|
||||||
// InitFilter returns a new Windows storage filter driver.
|
// InitFilter returns a new Windows storage filter driver.
|
||||||
func InitFilter(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) {
|
func InitFilter(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) {
|
||||||
logrus.Debugf("WindowsGraphDriver InitFilter at %s", home)
|
logrus.Debugf("WindowsGraphDriver InitFilter at %s", home)
|
||||||
|
|
||||||
|
fsType, err := getFileSystemType(string(home[0]))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if strings.ToLower(fsType) == "refs" {
|
||||||
|
return nil, fmt.Errorf("%s is on an ReFS volume - ReFS volumes are not supported", home)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := idtools.MkdirAllAs(home, 0700, 0, 0); err != nil {
|
||||||
|
return nil, fmt.Errorf("windowsfilter failed to create '%s': %v", home, err)
|
||||||
|
}
|
||||||
|
|
||||||
d := &Driver{
|
d := &Driver{
|
||||||
info: hcsshim.DriverInfo{
|
info: hcsshim.DriverInfo{
|
||||||
HomeDir: home,
|
HomeDir: home,
|
||||||
@ -78,6 +109,37 @@ func InitFilter(home string, options []string, uidMaps, gidMaps []idtools.IDMap)
|
|||||||
return d, nil
|
return d, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// win32FromHresult is a helper function to get the win32 error code from an HRESULT
|
||||||
|
func win32FromHresult(hr uintptr) uintptr {
|
||||||
|
if hr&0x1fff0000 == 0x00070000 {
|
||||||
|
return hr & 0xffff
|
||||||
|
}
|
||||||
|
return hr
|
||||||
|
}
|
||||||
|
|
||||||
|
// getFileSystemType obtains the type of a file system through GetVolumeInformation
|
||||||
|
// https://msdn.microsoft.com/en-us/library/windows/desktop/aa364993(v=vs.85).aspx
|
||||||
|
func getFileSystemType(drive string) (fsType string, hr error) {
|
||||||
|
var (
|
||||||
|
modkernel32 = windows.NewLazySystemDLL("kernel32.dll")
|
||||||
|
procGetVolumeInformation = modkernel32.NewProc("GetVolumeInformationW")
|
||||||
|
buf = make([]uint16, 255)
|
||||||
|
size = windows.MAX_PATH + 1
|
||||||
|
)
|
||||||
|
if len(drive) != 1 {
|
||||||
|
hr = errors.New("getFileSystemType must be called with a drive letter")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
drive += `:\`
|
||||||
|
n := uintptr(unsafe.Pointer(nil))
|
||||||
|
r0, _, _ := syscall.Syscall9(procGetVolumeInformation.Addr(), 8, uintptr(unsafe.Pointer(windows.StringToUTF16Ptr(drive))), n, n, n, n, n, uintptr(unsafe.Pointer(&buf[0])), uintptr(size), 0)
|
||||||
|
if int32(r0) < 0 {
|
||||||
|
hr = syscall.Errno(win32FromHresult(r0))
|
||||||
|
}
|
||||||
|
fsType = windows.UTF16ToString(buf)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
// String returns the string representation of a driver. This should match
|
// String returns the string representation of a driver. This should match
|
||||||
// the name the graph driver has been registered with.
|
// the name the graph driver has been registered with.
|
||||||
func (d *Driver) String() string {
|
func (d *Driver) String() string {
|
||||||
@ -91,8 +153,19 @@ func (d *Driver) Status() [][2]string {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// panicIfUsedByLcow does exactly what it says.
|
||||||
|
// TODO @jhowardmsft - this is a temporary measure for the bring-up of
|
||||||
|
// Linux containers on Windows. It is a failsafe to ensure that the right
|
||||||
|
// graphdriver is used.
|
||||||
|
func panicIfUsedByLcow() {
|
||||||
|
if system.LCOWSupported() {
|
||||||
|
panic("inconsistency - windowsfilter graphdriver should not be used when in LCOW mode")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Exists returns true if the given id is registered with this driver.
|
// Exists returns true if the given id is registered with this driver.
|
||||||
func (d *Driver) Exists(id string) bool {
|
func (d *Driver) Exists(id string) bool {
|
||||||
|
panicIfUsedByLcow()
|
||||||
rID, err := d.resolveID(id)
|
rID, err := d.resolveID(id)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false
|
return false
|
||||||
@ -106,20 +179,24 @@ func (d *Driver) Exists(id string) bool {
|
|||||||
|
|
||||||
// CreateReadWrite creates a layer that is writable for use as a container
|
// CreateReadWrite creates a layer that is writable for use as a container
|
||||||
// file system.
|
// file system.
|
||||||
func (d *Driver) CreateReadWrite(id, parent, mountLabel string, storageOpt map[string]string) error {
|
func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error {
|
||||||
return d.create(id, parent, mountLabel, false, storageOpt)
|
panicIfUsedByLcow()
|
||||||
|
if opts != nil {
|
||||||
|
return d.create(id, parent, opts.MountLabel, false, opts.StorageOpt)
|
||||||
|
}
|
||||||
|
return d.create(id, parent, "", false, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create creates a new read-only layer with the given id.
|
// Create creates a new read-only layer with the given id.
|
||||||
func (d *Driver) Create(id, parent, mountLabel string, storageOpt map[string]string) error {
|
func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error {
|
||||||
return d.create(id, parent, mountLabel, true, storageOpt)
|
panicIfUsedByLcow()
|
||||||
|
if opts != nil {
|
||||||
|
return d.create(id, parent, opts.MountLabel, true, opts.StorageOpt)
|
||||||
|
}
|
||||||
|
return d.create(id, parent, "", true, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Driver) create(id, parent, mountLabel string, readOnly bool, storageOpt map[string]string) error {
|
func (d *Driver) create(id, parent, mountLabel string, readOnly bool, storageOpt map[string]string) error {
|
||||||
if len(storageOpt) != 0 {
|
|
||||||
return fmt.Errorf("--storage-opt is not supported for windows")
|
|
||||||
}
|
|
||||||
|
|
||||||
rPId, err := d.resolveID(parent)
|
rPId, err := d.resolveID(parent)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -133,7 +210,7 @@ func (d *Driver) create(id, parent, mountLabel string, readOnly bool, storageOpt
|
|||||||
var layerChain []string
|
var layerChain []string
|
||||||
|
|
||||||
if rPId != "" {
|
if rPId != "" {
|
||||||
parentPath, err := hcsshim.LayerMountPath(d.info, rPId)
|
parentPath, err := hcsshim.GetLayerMountPath(d.info, rPId)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -156,32 +233,20 @@ func (d *Driver) create(id, parent, mountLabel string, readOnly bool, storageOpt
|
|||||||
parentPath = layerChain[0]
|
parentPath = layerChain[0]
|
||||||
}
|
}
|
||||||
|
|
||||||
if isTP5OrOlder() {
|
|
||||||
// Pre-create the layer directory, providing an ACL to give the Hyper-V Virtual Machines
|
|
||||||
// group access. This is necessary to ensure that Hyper-V containers can access the
|
|
||||||
// virtual machine data. This is not necessary post-TP5.
|
|
||||||
path, err := syscall.UTF16FromString(filepath.Join(d.info.HomeDir, id))
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
// Give system and administrators full control, and VMs read, write, and execute.
|
|
||||||
// Mark these ACEs as inherited.
|
|
||||||
sd, err := winio.SddlToSecurityDescriptor("D:(A;OICI;FA;;;SY)(A;OICI;FA;;;BA)(A;OICI;FRFWFX;;;S-1-5-83-0)")
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
err = syscall.CreateDirectory(&path[0], &syscall.SecurityAttributes{
|
|
||||||
Length: uint32(unsafe.Sizeof(syscall.SecurityAttributes{})),
|
|
||||||
SecurityDescriptor: uintptr(unsafe.Pointer(&sd[0])),
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := hcsshim.CreateSandboxLayer(d.info, id, parentPath, layerChain); err != nil {
|
if err := hcsshim.CreateSandboxLayer(d.info, id, parentPath, layerChain); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
storageOptions, err := parseStorageOpt(storageOpt)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Failed to parse storage options - %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if storageOptions.size != 0 {
|
||||||
|
if err := hcsshim.ExpandSandboxSize(d.info, id, storageOptions.size); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if _, err := os.Lstat(d.dir(parent)); err != nil {
|
if _, err := os.Lstat(d.dir(parent)); err != nil {
|
||||||
@ -208,16 +273,89 @@ func (d *Driver) dir(id string) string {
|
|||||||
|
|
||||||
// Remove unmounts and removes the dir information.
|
// Remove unmounts and removes the dir information.
|
||||||
func (d *Driver) Remove(id string) error {
|
func (d *Driver) Remove(id string) error {
|
||||||
|
panicIfUsedByLcow()
|
||||||
rID, err := d.resolveID(id)
|
rID, err := d.resolveID(id)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
os.RemoveAll(filepath.Join(d.info.HomeDir, "sysfile-backups", rID)) // ok to fail
|
|
||||||
return hcsshim.DestroyLayer(d.info, rID)
|
// This retry loop is due to a bug in Windows (Internal bug #9432268)
|
||||||
|
// if GetContainers fails with ErrVmcomputeOperationInvalidState
|
||||||
|
// it is a transient error. Retry until it succeeds.
|
||||||
|
var computeSystems []hcsshim.ContainerProperties
|
||||||
|
retryCount := 0
|
||||||
|
osv := system.GetOSVersion()
|
||||||
|
for {
|
||||||
|
// Get and terminate any template VMs that are currently using the layer.
|
||||||
|
// Note: It is unfortunate that we end up in the graphdrivers Remove() call
|
||||||
|
// for both containers and images, but the logic for template VMs is only
|
||||||
|
// needed for images - specifically we are looking to see if a base layer
|
||||||
|
// is in use by a template VM as a result of having started a Hyper-V
|
||||||
|
// container at some point.
|
||||||
|
//
|
||||||
|
// We have a retry loop for ErrVmcomputeOperationInvalidState and
|
||||||
|
// ErrVmcomputeOperationAccessIsDenied as there is a race condition
|
||||||
|
// in RS1 and RS2 building during enumeration when a silo is going away
|
||||||
|
// for example under it, in HCS. AccessIsDenied added to fix 30278.
|
||||||
|
//
|
||||||
|
// TODO @jhowardmsft - For RS3, we can remove the retries. Also consider
|
||||||
|
// using platform APIs (if available) to get this more succinctly. Also
|
||||||
|
// consider enhancing the Remove() interface to have context of why
|
||||||
|
// the remove is being called - that could improve efficiency by not
|
||||||
|
// enumerating compute systems during a remove of a container as it's
|
||||||
|
// not required.
|
||||||
|
computeSystems, err = hcsshim.GetContainers(hcsshim.ComputeSystemQuery{})
|
||||||
|
if err != nil {
|
||||||
|
if (osv.Build < 15139) &&
|
||||||
|
((err == hcsshim.ErrVmcomputeOperationInvalidState) || (err == hcsshim.ErrVmcomputeOperationAccessIsDenied)) {
|
||||||
|
if retryCount >= 500 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
retryCount++
|
||||||
|
time.Sleep(10 * time.Millisecond)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, computeSystem := range computeSystems {
|
||||||
|
if strings.Contains(computeSystem.RuntimeImagePath, id) && computeSystem.IsRuntimeTemplate {
|
||||||
|
container, err := hcsshim.OpenContainer(computeSystem.ID)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer container.Close()
|
||||||
|
err = container.Terminate()
|
||||||
|
if hcsshim.IsPending(err) {
|
||||||
|
err = container.Wait()
|
||||||
|
} else if hcsshim.IsAlreadyStopped(err) {
|
||||||
|
err = nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
layerPath := filepath.Join(d.info.HomeDir, rID)
|
||||||
|
tmpID := fmt.Sprintf("%s-removing", rID)
|
||||||
|
tmpLayerPath := filepath.Join(d.info.HomeDir, tmpID)
|
||||||
|
if err := os.Rename(layerPath, tmpLayerPath); err != nil && !os.IsNotExist(err) {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := hcsshim.DestroyLayer(d.info, tmpID); err != nil {
|
||||||
|
logrus.Errorf("Failed to DestroyLayer %s: %s", id, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get returns the rootfs path for the id. This will mount the dir at it's given path.
|
// Get returns the rootfs path for the id. This will mount the dir at its given path.
|
||||||
func (d *Driver) Get(id, mountLabel string) (string, error) {
|
func (d *Driver) Get(id, mountLabel string) (string, error) {
|
||||||
|
panicIfUsedByLcow()
|
||||||
logrus.Debugf("WindowsGraphDriver Get() id %s mountLabel %s", id, mountLabel)
|
logrus.Debugf("WindowsGraphDriver Get() id %s mountLabel %s", id, mountLabel)
|
||||||
var dir string
|
var dir string
|
||||||
|
|
||||||
@ -248,9 +386,12 @@ func (d *Driver) Get(id, mountLabel string) (string, error) {
|
|||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
|
|
||||||
mountPath, err := hcsshim.LayerMountPath(d.info, rID)
|
mountPath, err := hcsshim.GetLayerMountPath(d.info, rID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
d.ctr.Decrement(rID)
|
d.ctr.Decrement(rID)
|
||||||
|
if err := hcsshim.UnprepareLayer(d.info, rID); err != nil {
|
||||||
|
logrus.Warnf("Failed to Unprepare %s: %s", id, err)
|
||||||
|
}
|
||||||
if err2 := hcsshim.DeactivateLayer(d.info, rID); err2 != nil {
|
if err2 := hcsshim.DeactivateLayer(d.info, rID); err2 != nil {
|
||||||
logrus.Warnf("Failed to Deactivate %s: %s", id, err)
|
logrus.Warnf("Failed to Deactivate %s: %s", id, err)
|
||||||
}
|
}
|
||||||
@ -273,6 +414,7 @@ func (d *Driver) Get(id, mountLabel string) (string, error) {
|
|||||||
|
|
||||||
// Put adds a new layer to the driver.
|
// Put adds a new layer to the driver.
|
||||||
func (d *Driver) Put(id string) error {
|
func (d *Driver) Put(id string) error {
|
||||||
|
panicIfUsedByLcow()
|
||||||
logrus.Debugf("WindowsGraphDriver Put() id %s", id)
|
logrus.Debugf("WindowsGraphDriver Put() id %s", id)
|
||||||
|
|
||||||
rID, err := d.resolveID(id)
|
rID, err := d.resolveID(id)
|
||||||
@ -283,9 +425,15 @@ func (d *Driver) Put(id string) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
d.cacheMu.Lock()
|
d.cacheMu.Lock()
|
||||||
|
_, exists := d.cache[rID]
|
||||||
delete(d.cache, rID)
|
delete(d.cache, rID)
|
||||||
d.cacheMu.Unlock()
|
d.cacheMu.Unlock()
|
||||||
|
|
||||||
|
// If the cache was not populated, then the layer was left unprepared and deactivated
|
||||||
|
if !exists {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
if err := hcsshim.UnprepareLayer(d.info, rID); err != nil {
|
if err := hcsshim.UnprepareLayer(d.info, rID); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -293,14 +441,39 @@ func (d *Driver) Put(id string) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Cleanup ensures the information the driver stores is properly removed.
|
// Cleanup ensures the information the driver stores is properly removed.
|
||||||
|
// We use this opportunity to cleanup any -removing folders which may be
|
||||||
|
// still left if the daemon was killed while it was removing a layer.
|
||||||
func (d *Driver) Cleanup() error {
|
func (d *Driver) Cleanup() error {
|
||||||
|
items, err := ioutil.ReadDir(d.info.HomeDir)
|
||||||
|
if err != nil {
|
||||||
|
if os.IsNotExist(err) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Note we don't return an error below - it's possible the files
|
||||||
|
// are locked. However, next time around after the daemon exits,
|
||||||
|
// we likely will be able to to cleanup successfully. Instead we log
|
||||||
|
// warnings if there are errors.
|
||||||
|
for _, item := range items {
|
||||||
|
if item.IsDir() && strings.HasSuffix(item.Name(), "-removing") {
|
||||||
|
if err := hcsshim.DestroyLayer(d.info, item.Name()); err != nil {
|
||||||
|
logrus.Warnf("Failed to cleanup %s: %s", item.Name(), err)
|
||||||
|
} else {
|
||||||
|
logrus.Infof("Cleaned up %s", item.Name())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Diff produces an archive of the changes between the specified
|
// Diff produces an archive of the changes between the specified
|
||||||
// layer and its parent layer which may be "".
|
// layer and its parent layer which may be "".
|
||||||
// The layer should be mounted when calling this function
|
// The layer should be mounted when calling this function
|
||||||
func (d *Driver) Diff(id, parent string) (_ archive.Archive, err error) {
|
func (d *Driver) Diff(id, parent string) (_ io.ReadCloser, err error) {
|
||||||
|
panicIfUsedByLcow()
|
||||||
rID, err := d.resolveID(id)
|
rID, err := d.resolveID(id)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return
|
return
|
||||||
@ -335,8 +508,9 @@ func (d *Driver) Diff(id, parent string) (_ archive.Archive, err error) {
|
|||||||
|
|
||||||
// Changes produces a list of changes between the specified layer
|
// Changes produces a list of changes between the specified layer
|
||||||
// and its parent layer. If parent is "", then all changes will be ADD changes.
|
// and its parent layer. If parent is "", then all changes will be ADD changes.
|
||||||
// The layer should be mounted when calling this function
|
// The layer should not be mounted when calling this function.
|
||||||
func (d *Driver) Changes(id, parent string) ([]archive.Change, error) {
|
func (d *Driver) Changes(id, parent string) ([]archive.Change, error) {
|
||||||
|
panicIfUsedByLcow()
|
||||||
rID, err := d.resolveID(id)
|
rID, err := d.resolveID(id)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -346,13 +520,12 @@ func (d *Driver) Changes(id, parent string) ([]archive.Change, error) {
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// this is assuming that the layer is unmounted
|
if err := hcsshim.ActivateLayer(d.info, rID); err != nil {
|
||||||
if err := hcsshim.UnprepareLayer(d.info, rID); err != nil {
|
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
defer func() {
|
defer func() {
|
||||||
if err := hcsshim.PrepareLayer(d.info, rID, parentChain); err != nil {
|
if err2 := hcsshim.DeactivateLayer(d.info, rID); err2 != nil {
|
||||||
logrus.Warnf("Failed to Deactivate %s: %s", rID, err)
|
logrus.Errorf("changes() failed to DeactivateLayer %s %s: %s", id, rID, err2)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
@ -392,7 +565,8 @@ func (d *Driver) Changes(id, parent string) ([]archive.Change, error) {
|
|||||||
// layer with the specified id and parent, returning the size of the
|
// layer with the specified id and parent, returning the size of the
|
||||||
// new layer in bytes.
|
// new layer in bytes.
|
||||||
// The layer should not be mounted when calling this function
|
// The layer should not be mounted when calling this function
|
||||||
func (d *Driver) ApplyDiff(id, parent string, diff archive.Reader) (int64, error) {
|
func (d *Driver) ApplyDiff(id, parent string, diff io.Reader) (int64, error) {
|
||||||
|
panicIfUsedByLcow()
|
||||||
var layerChain []string
|
var layerChain []string
|
||||||
if parent != "" {
|
if parent != "" {
|
||||||
rPId, err := d.resolveID(parent)
|
rPId, err := d.resolveID(parent)
|
||||||
@ -403,7 +577,7 @@ func (d *Driver) ApplyDiff(id, parent string, diff archive.Reader) (int64, error
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
parentPath, err := hcsshim.LayerMountPath(d.info, rPId)
|
parentPath, err := hcsshim.GetLayerMountPath(d.info, rPId)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
@ -427,6 +601,7 @@ func (d *Driver) ApplyDiff(id, parent string, diff archive.Reader) (int64, error
|
|||||||
// and its parent and returns the size in bytes of the changes
|
// and its parent and returns the size in bytes of the changes
|
||||||
// relative to its base filesystem directory.
|
// relative to its base filesystem directory.
|
||||||
func (d *Driver) DiffSize(id, parent string) (size int64, err error) {
|
func (d *Driver) DiffSize(id, parent string) (size int64, err error) {
|
||||||
|
panicIfUsedByLcow()
|
||||||
rPId, err := d.resolveID(parent)
|
rPId, err := d.resolveID(parent)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return
|
return
|
||||||
@ -448,6 +623,7 @@ func (d *Driver) DiffSize(id, parent string) (size int64, err error) {
|
|||||||
|
|
||||||
// Metadata returns custom driver information.
|
// Metadata returns custom driver information.
|
||||||
func (d *Driver) Metadata(id string) (map[string]string, error) {
|
func (d *Driver) Metadata(id string) (map[string]string, error) {
|
||||||
|
panicIfUsedByLcow()
|
||||||
m := make(map[string]string)
|
m := make(map[string]string)
|
||||||
m["dir"] = d.dir(id)
|
m["dir"] = d.dir(id)
|
||||||
return m, nil
|
return m, nil
|
||||||
@ -483,7 +659,7 @@ func writeTarFromLayer(r hcsshim.LayerReader, w io.Writer) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// exportLayer generates an archive from a layer based on the given ID.
|
// exportLayer generates an archive from a layer based on the given ID.
|
||||||
func (d *Driver) exportLayer(id string, parentLayerPaths []string) (archive.Archive, error) {
|
func (d *Driver) exportLayer(id string, parentLayerPaths []string) (io.ReadCloser, error) {
|
||||||
archive, w := io.Pipe()
|
archive, w := io.Pipe()
|
||||||
go func() {
|
go func() {
|
||||||
err := winio.RunWithPrivilege(winio.SeBackupPrivilege, func() error {
|
err := winio.RunWithPrivilege(winio.SeBackupPrivilege, func() error {
|
||||||
@ -505,7 +681,48 @@ func (d *Driver) exportLayer(id string, parentLayerPaths []string) (archive.Arch
|
|||||||
return archive, nil
|
return archive, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func writeLayerFromTar(r archive.Reader, w hcsshim.LayerWriter) (int64, error) {
|
// writeBackupStreamFromTarAndSaveMutatedFiles reads data from a tar stream and
|
||||||
|
// writes it to a backup stream, and also saves any files that will be mutated
|
||||||
|
// by the import layer process to a backup location.
|
||||||
|
func writeBackupStreamFromTarAndSaveMutatedFiles(buf *bufio.Writer, w io.Writer, t *tar.Reader, hdr *tar.Header, root string) (nextHdr *tar.Header, err error) {
|
||||||
|
var bcdBackup *os.File
|
||||||
|
var bcdBackupWriter *winio.BackupFileWriter
|
||||||
|
if backupPath, ok := mutatedFiles[hdr.Name]; ok {
|
||||||
|
bcdBackup, err = os.Create(filepath.Join(root, backupPath))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer func() {
|
||||||
|
cerr := bcdBackup.Close()
|
||||||
|
if err == nil {
|
||||||
|
err = cerr
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
bcdBackupWriter = winio.NewBackupFileWriter(bcdBackup, false)
|
||||||
|
defer func() {
|
||||||
|
cerr := bcdBackupWriter.Close()
|
||||||
|
if err == nil {
|
||||||
|
err = cerr
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
buf.Reset(io.MultiWriter(w, bcdBackupWriter))
|
||||||
|
} else {
|
||||||
|
buf.Reset(w)
|
||||||
|
}
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
ferr := buf.Flush()
|
||||||
|
if err == nil {
|
||||||
|
err = ferr
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
return backuptar.WriteBackupStreamFromTarFile(buf, t, hdr)
|
||||||
|
}
|
||||||
|
|
||||||
|
func writeLayerFromTar(r io.Reader, w hcsshim.LayerWriter, root string) (int64, error) {
|
||||||
t := tar.NewReader(r)
|
t := tar.NewReader(r)
|
||||||
hdr, err := t.Next()
|
hdr, err := t.Next()
|
||||||
totalSize := int64(0)
|
totalSize := int64(0)
|
||||||
@ -539,30 +756,7 @@ func writeLayerFromTar(r archive.Reader, w hcsshim.LayerWriter) (int64, error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
buf.Reset(w)
|
hdr, err = writeBackupStreamFromTarAndSaveMutatedFiles(buf, w, t, hdr, root)
|
||||||
|
|
||||||
// Add the Hyper-V Virtual Machine group ACE to the security descriptor
|
|
||||||
// for TP5 so that Xenons can access all files. This is not necessary
|
|
||||||
// for post-TP5 builds.
|
|
||||||
if isTP5OrOlder() {
|
|
||||||
if sddl, ok := hdr.Winheaders["sd"]; ok {
|
|
||||||
var ace string
|
|
||||||
if hdr.Typeflag == tar.TypeDir {
|
|
||||||
ace = "(A;OICI;0x1200a9;;;S-1-5-83-0)"
|
|
||||||
} else {
|
|
||||||
ace = "(A;;0x1200a9;;;S-1-5-83-0)"
|
|
||||||
}
|
|
||||||
if hdr.Winheaders["sd"], ok = addAceToSddlDacl(sddl, ace); !ok {
|
|
||||||
logrus.Debugf("failed to add VM ACE to %s", sddl)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
hdr, err = backuptar.WriteBackupStreamFromTarFile(buf, t, hdr)
|
|
||||||
ferr := buf.Flush()
|
|
||||||
if ferr != nil {
|
|
||||||
err = ferr
|
|
||||||
}
|
|
||||||
totalSize += size
|
totalSize += size
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -572,105 +766,75 @@ func writeLayerFromTar(r archive.Reader, w hcsshim.LayerWriter) (int64, error) {
|
|||||||
return totalSize, nil
|
return totalSize, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func addAceToSddlDacl(sddl, ace string) (string, bool) {
|
|
||||||
daclStart := strings.Index(sddl, "D:")
|
|
||||||
if daclStart < 0 {
|
|
||||||
return sddl, false
|
|
||||||
}
|
|
||||||
|
|
||||||
dacl := sddl[daclStart:]
|
|
||||||
daclEnd := strings.Index(dacl, "S:")
|
|
||||||
if daclEnd < 0 {
|
|
||||||
daclEnd = len(dacl)
|
|
||||||
}
|
|
||||||
dacl = dacl[:daclEnd]
|
|
||||||
|
|
||||||
if strings.Contains(dacl, ace) {
|
|
||||||
return sddl, true
|
|
||||||
}
|
|
||||||
|
|
||||||
i := 2
|
|
||||||
for i+1 < len(dacl) {
|
|
||||||
if dacl[i] != '(' {
|
|
||||||
return sddl, false
|
|
||||||
}
|
|
||||||
|
|
||||||
if dacl[i+1] == 'A' {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
i += 2
|
|
||||||
for p := 1; i < len(dacl) && p > 0; i++ {
|
|
||||||
if dacl[i] == '(' {
|
|
||||||
p++
|
|
||||||
} else if dacl[i] == ')' {
|
|
||||||
p--
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return sddl[:daclStart+i] + ace + sddl[daclStart+i:], true
|
|
||||||
}
|
|
||||||
|
|
||||||
// importLayer adds a new layer to the tag and graph store based on the given data.
|
// importLayer adds a new layer to the tag and graph store based on the given data.
|
||||||
func (d *Driver) importLayer(id string, layerData archive.Reader, parentLayerPaths []string) (size int64, err error) {
|
func (d *Driver) importLayer(id string, layerData io.Reader, parentLayerPaths []string) (size int64, err error) {
|
||||||
cmd := reexec.Command(append([]string{"storage-windows-write-layer", d.info.HomeDir, id}, parentLayerPaths...)...)
|
if !noreexec {
|
||||||
output := bytes.NewBuffer(nil)
|
cmd := reexec.Command(append([]string{"docker-windows-write-layer", d.info.HomeDir, id}, parentLayerPaths...)...)
|
||||||
cmd.Stdin = layerData
|
output := bytes.NewBuffer(nil)
|
||||||
cmd.Stdout = output
|
cmd.Stdin = layerData
|
||||||
cmd.Stderr = output
|
cmd.Stdout = output
|
||||||
|
cmd.Stderr = output
|
||||||
|
|
||||||
if err = cmd.Start(); err != nil {
|
if err = cmd.Start(); err != nil {
|
||||||
return
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if err = cmd.Wait(); err != nil {
|
||||||
|
return 0, fmt.Errorf("re-exec error: %v: output: %s", err, output)
|
||||||
|
}
|
||||||
|
|
||||||
|
return strconv.ParseInt(output.String(), 10, 64)
|
||||||
}
|
}
|
||||||
|
return writeLayer(layerData, d.info.HomeDir, id, parentLayerPaths...)
|
||||||
if err = cmd.Wait(); err != nil {
|
|
||||||
return 0, fmt.Errorf("re-exec error: %v: output: %s", err, output)
|
|
||||||
}
|
|
||||||
|
|
||||||
return strconv.ParseInt(output.String(), 10, 64)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// writeLayer is the re-exec entry point for writing a layer from a tar file
|
// writeLayerReexec is the re-exec entry point for writing a layer from a tar file
|
||||||
func writeLayer() {
|
func writeLayerReexec() {
|
||||||
home := os.Args[1]
|
size, err := writeLayer(os.Stdin, os.Args[1], os.Args[2], os.Args[3:]...)
|
||||||
id := os.Args[2]
|
|
||||||
parentLayerPaths := os.Args[3:]
|
|
||||||
|
|
||||||
err := func() error {
|
|
||||||
err := winio.EnableProcessPrivileges([]string{winio.SeBackupPrivilege, winio.SeRestorePrivilege})
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
info := hcsshim.DriverInfo{
|
|
||||||
Flavour: filterDriver,
|
|
||||||
HomeDir: home,
|
|
||||||
}
|
|
||||||
|
|
||||||
w, err := hcsshim.NewLayerWriter(info, id, parentLayerPaths)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
size, err := writeLayerFromTar(os.Stdin, w)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
err = w.Close()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
fmt.Fprint(os.Stdout, size)
|
|
||||||
return nil
|
|
||||||
}()
|
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fmt.Fprint(os.Stderr, err)
|
fmt.Fprint(os.Stderr, err)
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
|
fmt.Fprint(os.Stdout, size)
|
||||||
|
}
|
||||||
|
|
||||||
|
// writeLayer writes a layer from a tar file.
|
||||||
|
func writeLayer(layerData io.Reader, home string, id string, parentLayerPaths ...string) (int64, error) {
|
||||||
|
err := winio.EnableProcessPrivileges([]string{winio.SeBackupPrivilege, winio.SeRestorePrivilege})
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
if noreexec {
|
||||||
|
defer func() {
|
||||||
|
if err := winio.DisableProcessPrivileges([]string{winio.SeBackupPrivilege, winio.SeRestorePrivilege}); err != nil {
|
||||||
|
// This should never happen, but just in case when in debugging mode.
|
||||||
|
// See https://github.com/docker/docker/pull/28002#discussion_r86259241 for rationale.
|
||||||
|
panic("Failed to disabled process privileges while in non re-exec mode")
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
|
||||||
|
info := hcsshim.DriverInfo{
|
||||||
|
Flavour: filterDriver,
|
||||||
|
HomeDir: home,
|
||||||
|
}
|
||||||
|
|
||||||
|
w, err := hcsshim.NewLayerWriter(info, id, parentLayerPaths)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
size, err := writeLayerFromTar(layerData, w, filepath.Join(home, id))
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
err = w.Close()
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return size, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// resolveID computes the layerID information based on the given id.
|
// resolveID computes the layerID information based on the given id.
|
||||||
@ -686,11 +850,7 @@ func (d *Driver) resolveID(id string) (string, error) {
|
|||||||
|
|
||||||
// setID stores the layerId in disk.
|
// setID stores the layerId in disk.
|
||||||
func (d *Driver) setID(id, altID string) error {
|
func (d *Driver) setID(id, altID string) error {
|
||||||
err := ioutil.WriteFile(filepath.Join(d.dir(id), "layerId"), []byte(altID), 0600)
|
return ioutil.WriteFile(filepath.Join(d.dir(id), "layerId"), []byte(altID), 0600)
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// getLayerChain returns the layer chain information.
|
// getLayerChain returns the layer chain information.
|
||||||
@ -733,17 +893,23 @@ type fileGetCloserWithBackupPrivileges struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (fg *fileGetCloserWithBackupPrivileges) Get(filename string) (io.ReadCloser, error) {
|
func (fg *fileGetCloserWithBackupPrivileges) Get(filename string) (io.ReadCloser, error) {
|
||||||
|
if backupPath, ok := mutatedFiles[filename]; ok {
|
||||||
|
return os.Open(filepath.Join(fg.path, backupPath))
|
||||||
|
}
|
||||||
|
|
||||||
var f *os.File
|
var f *os.File
|
||||||
// Open the file while holding the Windows backup privilege. This ensures that the
|
// Open the file while holding the Windows backup privilege. This ensures that the
|
||||||
// file can be opened even if the caller does not actually have access to it according
|
// file can be opened even if the caller does not actually have access to it according
|
||||||
// to the security descriptor.
|
// to the security descriptor. Also use sequential file access to avoid depleting the
|
||||||
|
// standby list - Microsoft VSO Bug Tracker #9900466
|
||||||
err := winio.RunWithPrivilege(winio.SeBackupPrivilege, func() error {
|
err := winio.RunWithPrivilege(winio.SeBackupPrivilege, func() error {
|
||||||
path := longpath.AddPrefix(filepath.Join(fg.path, filename))
|
path := longpath.AddPrefix(filepath.Join(fg.path, filename))
|
||||||
p, err := syscall.UTF16FromString(path)
|
p, err := windows.UTF16FromString(path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
h, err := syscall.CreateFile(&p[0], syscall.GENERIC_READ, syscall.FILE_SHARE_READ, nil, syscall.OPEN_EXISTING, syscall.FILE_FLAG_BACKUP_SEMANTICS, 0)
|
const fileFlagSequentialScan = 0x08000000 // FILE_FLAG_SEQUENTIAL_SCAN
|
||||||
|
h, err := windows.CreateFile(&p[0], windows.GENERIC_READ, windows.FILE_SHARE_READ, nil, windows.OPEN_EXISTING, windows.FILE_FLAG_BACKUP_SEMANTICS|fileFlagSequentialScan, 0)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return &os.PathError{Op: "open", Path: path, Err: err}
|
return &os.PathError{Op: "open", Path: path, Err: err}
|
||||||
}
|
}
|
||||||
@ -757,19 +923,10 @@ func (fg *fileGetCloserWithBackupPrivileges) Close() error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
type fileGetDestroyCloser struct {
|
|
||||||
storage.FileGetter
|
|
||||||
path string
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *fileGetDestroyCloser) Close() error {
|
|
||||||
// TODO: activate layers and release here?
|
|
||||||
return os.RemoveAll(f.path)
|
|
||||||
}
|
|
||||||
|
|
||||||
// DiffGetter returns a FileGetCloser that can read files from the directory that
|
// DiffGetter returns a FileGetCloser that can read files from the directory that
|
||||||
// contains files for the layer differences. Used for direct access for tar-split.
|
// contains files for the layer differences. Used for direct access for tar-split.
|
||||||
func (d *Driver) DiffGetter(id string) (graphdriver.FileGetCloser, error) {
|
func (d *Driver) DiffGetter(id string) (graphdriver.FileGetCloser, error) {
|
||||||
|
panicIfUsedByLcow()
|
||||||
id, err := d.resolveID(id)
|
id, err := d.resolveID(id)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -777,3 +934,32 @@ func (d *Driver) DiffGetter(id string) (graphdriver.FileGetCloser, error) {
|
|||||||
|
|
||||||
return &fileGetCloserWithBackupPrivileges{d.dir(id)}, nil
|
return &fileGetCloserWithBackupPrivileges{d.dir(id)}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// AdditionalImageStores returns additional image stores supported by the driver
|
||||||
|
func (d *Driver) AdditionalImageStores() []string {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type storageOptions struct {
|
||||||
|
size uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseStorageOpt(storageOpt map[string]string) (*storageOptions, error) {
|
||||||
|
options := storageOptions{}
|
||||||
|
|
||||||
|
// Read size to change the block device size per container.
|
||||||
|
for key, val := range storageOpt {
|
||||||
|
key := strings.ToLower(key)
|
||||||
|
switch key {
|
||||||
|
case "size":
|
||||||
|
size, err := units.RAMInBytes(val)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
options.size = uint64(size)
|
||||||
|
default:
|
||||||
|
return nil, fmt.Errorf("Unknown storage option: %s", key)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return &options, nil
|
||||||
|
}
|
||||||
|
44
vendor/github.com/containers/storage/drivers/zfs/zfs.go
generated
vendored
44
vendor/github.com/containers/storage/drivers/zfs/zfs.go
generated
vendored
@ -10,10 +10,8 @@ import (
|
|||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
"syscall"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/Sirupsen/logrus"
|
|
||||||
"github.com/containers/storage/drivers"
|
"github.com/containers/storage/drivers"
|
||||||
"github.com/containers/storage/pkg/idtools"
|
"github.com/containers/storage/pkg/idtools"
|
||||||
"github.com/containers/storage/pkg/mount"
|
"github.com/containers/storage/pkg/mount"
|
||||||
@ -21,6 +19,8 @@ import (
|
|||||||
zfs "github.com/mistifyio/go-zfs"
|
zfs "github.com/mistifyio/go-zfs"
|
||||||
"github.com/opencontainers/selinux/go-selinux/label"
|
"github.com/opencontainers/selinux/go-selinux/label"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
|
"golang.org/x/sys/unix"
|
||||||
)
|
)
|
||||||
|
|
||||||
type zfsOptions struct {
|
type zfsOptions struct {
|
||||||
@ -100,6 +100,14 @@ func Init(base string, opt []string, uidMaps, gidMaps []idtools.IDMap) (graphdri
|
|||||||
return nil, fmt.Errorf("BUG: zfs get all -t filesystem -rHp '%s' should contain '%s'", options.fsName, options.fsName)
|
return nil, fmt.Errorf("BUG: zfs get all -t filesystem -rHp '%s' should contain '%s'", options.fsName, options.fsName)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("Failed to get root uid/guid: %v", err)
|
||||||
|
}
|
||||||
|
if err := idtools.MkdirAllAs(base, 0700, rootUID, rootGID); err != nil {
|
||||||
|
return nil, fmt.Errorf("Failed to create '%s': %v", base, err)
|
||||||
|
}
|
||||||
|
|
||||||
if err := mount.MakePrivate(base); err != nil {
|
if err := mount.MakePrivate(base); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -134,8 +142,8 @@ func parseOptions(opt []string) (zfsOptions, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func lookupZfsDataset(rootdir string) (string, error) {
|
func lookupZfsDataset(rootdir string) (string, error) {
|
||||||
var stat syscall.Stat_t
|
var stat unix.Stat_t
|
||||||
if err := syscall.Stat(rootdir, &stat); err != nil {
|
if err := unix.Stat(rootdir, &stat); err != nil {
|
||||||
return "", fmt.Errorf("Failed to access '%s': %s", rootdir, err)
|
return "", fmt.Errorf("Failed to access '%s': %s", rootdir, err)
|
||||||
}
|
}
|
||||||
wantedDev := stat.Dev
|
wantedDev := stat.Dev
|
||||||
@ -145,7 +153,7 @@ func lookupZfsDataset(rootdir string) (string, error) {
|
|||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
for _, m := range mounts {
|
for _, m := range mounts {
|
||||||
if err := syscall.Stat(m.Mountpoint, &stat); err != nil {
|
if err := unix.Stat(m.Mountpoint, &stat); err != nil {
|
||||||
logrus.Debugf("[zfs] failed to stat '%s' while scanning for zfs mount: %v", m.Mountpoint, err)
|
logrus.Debugf("[zfs] failed to stat '%s' while scanning for zfs mount: %v", m.Mountpoint, err)
|
||||||
continue // may fail on fuse file systems
|
continue // may fail on fuse file systems
|
||||||
}
|
}
|
||||||
@ -213,7 +221,10 @@ func (d *Driver) Status() [][2]string {
|
|||||||
|
|
||||||
// Metadata returns image/container metadata related to graph driver
|
// Metadata returns image/container metadata related to graph driver
|
||||||
func (d *Driver) Metadata(id string) (map[string]string, error) {
|
func (d *Driver) Metadata(id string) (map[string]string, error) {
|
||||||
return nil, nil
|
return map[string]string{
|
||||||
|
"Mountpoint": d.mountPath(id),
|
||||||
|
"Dataset": d.zfsPath(id),
|
||||||
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Driver) cloneFilesystem(name, parentName string) error {
|
func (d *Driver) cloneFilesystem(name, parentName string) error {
|
||||||
@ -248,12 +259,17 @@ func (d *Driver) mountPath(id string) string {
|
|||||||
|
|
||||||
// CreateReadWrite creates a layer that is writable for use as a container
|
// CreateReadWrite creates a layer that is writable for use as a container
|
||||||
// file system.
|
// file system.
|
||||||
func (d *Driver) CreateReadWrite(id, parent, mountLabel string, storageOpt map[string]string) error {
|
func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error {
|
||||||
return d.Create(id, parent, mountLabel, storageOpt)
|
return d.Create(id, parent, opts)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create prepares the dataset and filesystem for the ZFS driver for the given id under the parent.
|
// Create prepares the dataset and filesystem for the ZFS driver for the given id under the parent.
|
||||||
func (d *Driver) Create(id string, parent string, mountLabel string, storageOpt map[string]string) error {
|
func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error {
|
||||||
|
var storageOpt map[string]string
|
||||||
|
if opts != nil {
|
||||||
|
storageOpt = opts.StorageOpt
|
||||||
|
}
|
||||||
|
|
||||||
err := d.create(id, parent, storageOpt)
|
err := d.create(id, parent, storageOpt)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
return nil
|
return nil
|
||||||
@ -391,22 +407,20 @@ func (d *Driver) Put(id string) error {
|
|||||||
|
|
||||||
logrus.Debugf(`[zfs] unmount("%s")`, mountpoint)
|
logrus.Debugf(`[zfs] unmount("%s")`, mountpoint)
|
||||||
|
|
||||||
err = mount.Unmount(mountpoint)
|
if err := mount.Unmount(mountpoint); err != nil {
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("error unmounting to %s: %v", mountpoint, err)
|
return fmt.Errorf("error unmounting to %s: %v", mountpoint, err)
|
||||||
}
|
}
|
||||||
return err
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Exists checks to see if the cache entry exists for the given id.
|
// Exists checks to see if the cache entry exists for the given id.
|
||||||
func (d *Driver) Exists(id string) bool {
|
func (d *Driver) Exists(id string) bool {
|
||||||
d.Lock()
|
d.Lock()
|
||||||
defer d.Unlock()
|
defer d.Unlock()
|
||||||
return d.filesystemsCache[d.zfsPath(id)] == true
|
return d.filesystemsCache[d.zfsPath(id)]
|
||||||
}
|
}
|
||||||
|
|
||||||
// AdditionalImageStores returns additional image stores supported by the driver
|
// AdditionalImageStores returns additional image stores supported by the driver
|
||||||
func (d *Driver) AdditionalImageStores() []string {
|
func (d *Driver) AdditionalImageStores() []string {
|
||||||
var imageStores []string
|
return nil
|
||||||
return imageStores
|
|
||||||
}
|
}
|
||||||
|
8
vendor/github.com/containers/storage/drivers/zfs/zfs_freebsd.go
generated
vendored
8
vendor/github.com/containers/storage/drivers/zfs/zfs_freebsd.go
generated
vendored
@ -3,16 +3,16 @@ package zfs
|
|||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"strings"
|
"strings"
|
||||||
"syscall"
|
|
||||||
|
|
||||||
"github.com/Sirupsen/logrus"
|
|
||||||
"github.com/containers/storage/drivers"
|
"github.com/containers/storage/drivers"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
|
"golang.org/x/sys/unix"
|
||||||
)
|
)
|
||||||
|
|
||||||
func checkRootdirFs(rootdir string) error {
|
func checkRootdirFs(rootdir string) error {
|
||||||
var buf syscall.Statfs_t
|
var buf unix.Statfs_t
|
||||||
if err := syscall.Statfs(rootdir, &buf); err != nil {
|
if err := unix.Statfs(rootdir, &buf); err != nil {
|
||||||
return fmt.Errorf("Failed to access '%s': %s", rootdir, err)
|
return fmt.Errorf("Failed to access '%s': %s", rootdir, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
8
vendor/github.com/containers/storage/drivers/zfs/zfs_linux.go
generated
vendored
8
vendor/github.com/containers/storage/drivers/zfs/zfs_linux.go
generated
vendored
@ -2,16 +2,16 @@ package zfs
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"syscall"
|
|
||||||
|
|
||||||
"github.com/Sirupsen/logrus"
|
|
||||||
"github.com/containers/storage/drivers"
|
"github.com/containers/storage/drivers"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
|
"golang.org/x/sys/unix"
|
||||||
)
|
)
|
||||||
|
|
||||||
func checkRootdirFs(rootdir string) error {
|
func checkRootdirFs(rootdir string) error {
|
||||||
var buf syscall.Statfs_t
|
var buf unix.Statfs_t
|
||||||
if err := syscall.Statfs(rootdir, &buf); err != nil {
|
if err := unix.Statfs(rootdir, &buf); err != nil {
|
||||||
return fmt.Errorf("Failed to access '%s': %s", rootdir, err)
|
return fmt.Errorf("Failed to access '%s': %s", rootdir, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user