1
0
mirror of https://github.com/minio/mc.git synced 2025-07-30 07:23:03 +03:00

Support SSE without keys (#2626)

This commit is contained in:
Harshavardhana
2019-01-04 11:56:43 -08:00
committed by kannappanr
parent 9ddc45162a
commit 59ef9fe468
29 changed files with 364 additions and 333 deletions

View File

@ -34,12 +34,10 @@ import (
) )
var ( var (
catFlags = []cli.Flag{ // This is kept dummy for future purposes
cli.StringFlag{ // and also to add ioFlags and globalFlags
Name: "encrypt-key", // in CLI registration.
Usage: "decrypt object (using server-side encryption)", catFlags = []cli.Flag{}
},
}
) )
// Display contents of a file. // Display contents of a file.
@ -48,7 +46,7 @@ var catCmd = cli.Command{
Usage: "display object contents", Usage: "display object contents",
Action: mainCat, Action: mainCat,
Before: setGlobalsFromContext, Before: setGlobalsFromContext,
Flags: append(catFlags, globalFlags...), Flags: append(append(catFlags, ioFlags...), globalFlags...),
CustomHelpTemplate: `NAME: CustomHelpTemplate: `NAME:
{{.HelpName}} - {{.Usage}} {{.HelpName}} - {{.Usage}}
@ -58,12 +56,12 @@ USAGE:
FLAGS: FLAGS:
{{range .VisibleFlags}}{{.}} {{range .VisibleFlags}}{{.}}
{{end}} {{end}}
ENVIRONMENT VARIABLES: ENVIRONMENT VARIABLES:
MC_ENCRYPT_KEY: List of comma delimited prefix=secret values MC_ENCRYPT_KEY: List of comma delimited prefix=secret values
EXAMPLES: EXAMPLES:
1. Stream an object from Amazon S3 cloud storage to mplayer standard input. 1. Stream an object from Amazon S3 cloud storage to mplayer standard input.
$ {{.HelpName}} s3/ferenginar/klingon_opera_aktuh_maylotah.ogg | mplayer - $ {{.HelpName}} s3/mysql-backups/kubecon-mysql-operator.mpv | mplayer -
2. Concatenate contents of file1.txt and stdin to standard output. 2. Concatenate contents of file1.txt and stdin to standard output.
$ {{.HelpName}} file1.txt - > file.txt $ {{.HelpName}} file1.txt - > file.txt
@ -71,8 +69,8 @@ EXAMPLES:
3. Concatenate multiple files to one. 3. Concatenate multiple files to one.
$ {{.HelpName}} part.* > complete.img $ {{.HelpName}} part.* > complete.img
4. Stream a server encrypted object from Amazon S3 cloud storage to standard output. 4. Save an encrypted object from Amazon S3 cloud storage to a local file.
$ {{.HelpName}} --encrypt-key 's3/ferenginar=32byteslongsecretkeymustbegiven1' s3/ferenginar/klingon_opera_aktuh_maylotah.ogg $ {{.HelpName}} --encrypt-key 's3/mysql-backups=32byteslongsecretkeymustbegiven1' s3/mysql-backups/backups-201810.gz > /mnt/data/recent.gz
`, `,
} }

View File

@ -33,6 +33,7 @@ import (
"github.com/minio/mc/pkg/hookreader" "github.com/minio/mc/pkg/hookreader"
"github.com/minio/mc/pkg/ioutils" "github.com/minio/mc/pkg/ioutils"
"github.com/minio/mc/pkg/probe" "github.com/minio/mc/pkg/probe"
"github.com/minio/minio-go/pkg/encrypt"
) )
// filesystem client // filesystem client
@ -96,7 +97,7 @@ func (f *fsClient) GetURL() clientURL {
} }
// Select replies a stream of query results. // Select replies a stream of query results.
func (f *fsClient) Select(expression, sseKey string) (io.ReadCloser, *probe.Error) { func (f *fsClient) Select(expression string, sse encrypt.ServerSide) (io.ReadCloser, *probe.Error) {
return nil, probe.NewError(APINotImplemented{}) return nil, probe.NewError(APINotImplemented{})
} }
@ -326,7 +327,7 @@ func (f *fsClient) put(reader io.Reader, size int64, metadata map[string][]strin
} }
// Put - create a new file with metadata. // Put - create a new file with metadata.
func (f *fsClient) Put(ctx context.Context, reader io.Reader, size int64, metadata map[string]string, progress io.Reader, sseKey string) (int64, *probe.Error) { func (f *fsClient) Put(ctx context.Context, reader io.Reader, size int64, metadata map[string]string, progress io.Reader, sse encrypt.ServerSide) (int64, *probe.Error) {
return f.put(reader, size, nil, progress) return f.put(reader, size, nil, progress)
} }
@ -366,7 +367,7 @@ func readFile(fpath string) (io.ReadCloser, error) {
} }
// Copy - copy data from source to destination // Copy - copy data from source to destination
func (f *fsClient) Copy(source string, size int64, progress io.Reader, srcSSEKey, tgtSSEKey string) *probe.Error { func (f *fsClient) Copy(source string, size int64, progress io.Reader, srcSSE, tgtSSE encrypt.ServerSide) *probe.Error {
destination := f.PathURL.Path destination := f.PathURL.Path
rc, e := readFile(source) rc, e := readFile(source)
if e != nil { if e != nil {
@ -406,7 +407,7 @@ func (f *fsClient) get() (io.ReadCloser, *probe.Error) {
} }
// Get returns reader and any additional metadata. // Get returns reader and any additional metadata.
func (f *fsClient) Get(sseKey string) (io.ReadCloser, *probe.Error) { func (f *fsClient) Get(sse encrypt.ServerSide) (io.ReadCloser, *probe.Error) {
return f.get() return f.get()
} }
@ -948,7 +949,7 @@ func (f *fsClient) SetAccess(access string) *probe.Error {
} }
// Stat - get metadata from path. // Stat - get metadata from path.
func (f *fsClient) Stat(isIncomplete, isFetchMeta bool, sseKey string) (content *clientContent, err *probe.Error) { func (f *fsClient) Stat(isIncomplete, isFetchMeta bool, sse encrypt.ServerSide) (content *clientContent, err *probe.Error) {
st, err := f.fsStat(isIncomplete) st, err := f.fsStat(isIncomplete)
if err != nil { if err != nil {
return nil, err.Trace(f.PathURL.String()) return nil, err.Trace(f.PathURL.String())

View File

@ -45,7 +45,7 @@ func (s *TestSuite) TestList(c *C) {
var n int64 var n int64
n, err = fsClient.Put(context.Background(), reader, int64(len(data)), map[string]string{ n, err = fsClient.Put(context.Background(), reader, int64(len(data)), map[string]string{
"Content-Type": "application/octet-stream", "Content-Type": "application/octet-stream",
}, nil, "") }, nil, nil)
c.Assert(err, IsNil) c.Assert(err, IsNil)
c.Assert(n, Equals, int64(len(data))) c.Assert(n, Equals, int64(len(data)))
@ -56,7 +56,7 @@ func (s *TestSuite) TestList(c *C) {
reader = bytes.NewReader([]byte(data)) reader = bytes.NewReader([]byte(data))
n, err = fsClient.Put(context.Background(), reader, int64(len(data)), map[string]string{ n, err = fsClient.Put(context.Background(), reader, int64(len(data)), map[string]string{
"Content-Type": "application/octet-stream", "Content-Type": "application/octet-stream",
}, nil, "") }, nil, nil)
c.Assert(err, IsNil) c.Assert(err, IsNil)
c.Assert(n, Equals, int64(len(data))) c.Assert(n, Equals, int64(len(data)))
@ -84,7 +84,7 @@ func (s *TestSuite) TestList(c *C) {
reader = bytes.NewReader([]byte(data)) reader = bytes.NewReader([]byte(data))
n, err = fsClient.Put(context.Background(), reader, int64(len(data)), map[string]string{ n, err = fsClient.Put(context.Background(), reader, int64(len(data)), map[string]string{
"Content-Type": "application/octet-stream", "Content-Type": "application/octet-stream",
}, nil, "") }, nil, nil)
c.Assert(err, IsNil) c.Assert(err, IsNil)
c.Assert(n, Equals, int64(len(data))) c.Assert(n, Equals, int64(len(data)))
@ -144,7 +144,7 @@ func (s *TestSuite) TestList(c *C) {
reader = bytes.NewReader([]byte(data)) reader = bytes.NewReader([]byte(data))
n, err = fsClient.Put(context.Background(), reader, int64(len(data)), map[string]string{ n, err = fsClient.Put(context.Background(), reader, int64(len(data)), map[string]string{
"Content-Type": "application/octet-stream", "Content-Type": "application/octet-stream",
}, nil, "") }, nil, nil)
c.Assert(err, IsNil) c.Assert(err, IsNil)
c.Assert(n, Equals, int64(len(data))) c.Assert(n, Equals, int64(len(data)))
@ -210,7 +210,7 @@ func (s *TestSuite) TestStatBucket(c *C) {
c.Assert(err, IsNil) c.Assert(err, IsNil)
err = fsClient.MakeBucket("us-east-1", true) err = fsClient.MakeBucket("us-east-1", true)
c.Assert(err, IsNil) c.Assert(err, IsNil)
_, err = fsClient.Stat(false, false, "") _, err = fsClient.Stat(false, false, nil)
c.Assert(err, IsNil) c.Assert(err, IsNil)
} }
@ -251,7 +251,7 @@ func (s *TestSuite) TestPut(c *C) {
var n int64 var n int64
n, err = fsClient.Put(context.Background(), reader, int64(len(data)), map[string]string{ n, err = fsClient.Put(context.Background(), reader, int64(len(data)), map[string]string{
"Content-Type": "application/octet-stream", "Content-Type": "application/octet-stream",
}, nil, "") }, nil, nil)
c.Assert(err, IsNil) c.Assert(err, IsNil)
c.Assert(n, Equals, int64(len(data))) c.Assert(n, Equals, int64(len(data)))
} }
@ -271,11 +271,11 @@ func (s *TestSuite) TestGet(c *C) {
reader = bytes.NewReader([]byte(data)) reader = bytes.NewReader([]byte(data))
n, err := fsClient.Put(context.Background(), reader, int64(len(data)), map[string]string{ n, err := fsClient.Put(context.Background(), reader, int64(len(data)), map[string]string{
"Content-Type": "application/octet-stream", "Content-Type": "application/octet-stream",
}, nil, "") }, nil, nil)
c.Assert(err, IsNil) c.Assert(err, IsNil)
c.Assert(n, Equals, int64(len(data))) c.Assert(n, Equals, int64(len(data)))
reader, err = fsClient.Get("") reader, err = fsClient.Get(nil)
c.Assert(err, IsNil) c.Assert(err, IsNil)
var results bytes.Buffer var results bytes.Buffer
_, e = io.Copy(&results, reader) _, e = io.Copy(&results, reader)
@ -299,11 +299,11 @@ func (s *TestSuite) TestGetRange(c *C) {
reader = bytes.NewReader([]byte(data)) reader = bytes.NewReader([]byte(data))
n, err := fsClient.Put(context.Background(), reader, int64(len(data)), map[string]string{ n, err := fsClient.Put(context.Background(), reader, int64(len(data)), map[string]string{
"Content-Type": "application/octet-stream", "Content-Type": "application/octet-stream",
}, nil, "") }, nil, nil)
c.Assert(err, IsNil) c.Assert(err, IsNil)
c.Assert(n, Equals, int64(len(data))) c.Assert(n, Equals, int64(len(data)))
reader, err = fsClient.Get("") reader, err = fsClient.Get(nil)
c.Assert(err, IsNil) c.Assert(err, IsNil)
var results bytes.Buffer var results bytes.Buffer
buf := make([]byte, 5) buf := make([]byte, 5)
@ -330,11 +330,11 @@ func (s *TestSuite) TestStatObject(c *C) {
reader := bytes.NewReader([]byte(data)) reader := bytes.NewReader([]byte(data))
n, err := fsClient.Put(context.Background(), reader, int64(dataLen), map[string]string{ n, err := fsClient.Put(context.Background(), reader, int64(dataLen), map[string]string{
"Content-Type": "application/octet-stream", "Content-Type": "application/octet-stream",
}, nil, "") }, nil, nil)
c.Assert(err, IsNil) c.Assert(err, IsNil)
c.Assert(n, Equals, int64(len(data))) c.Assert(n, Equals, int64(len(data)))
content, err := fsClient.Stat(false, false, "") content, err := fsClient.Stat(false, false, nil)
c.Assert(err, IsNil) c.Assert(err, IsNil)
c.Assert(content.Size, Equals, int64(dataLen)) c.Assert(content.Size, Equals, int64(dataLen))
} }
@ -356,10 +356,10 @@ func (s *TestSuite) TestCopy(c *C) {
reader = bytes.NewReader([]byte(data)) reader = bytes.NewReader([]byte(data))
n, err := fsClientSource.Put(context.Background(), reader, int64(len(data)), map[string]string{ n, err := fsClientSource.Put(context.Background(), reader, int64(len(data)), map[string]string{
"Content-Type": "application/octet-stream", "Content-Type": "application/octet-stream",
}, nil, "") }, nil, nil)
c.Assert(err, IsNil) c.Assert(err, IsNil)
c.Assert(n, Equals, int64(len(data))) c.Assert(n, Equals, int64(len(data)))
err = fsClientTarget.Copy(sourcePath, int64(len(data)), nil, "", "") err = fsClientTarget.Copy(sourcePath, int64(len(data)), nil, nil, nil)
c.Assert(err, IsNil) c.Assert(err, IsNil)
} }

View File

@ -394,7 +394,7 @@ var supportedContentTypes = []string{
"bzip2", "bzip2",
} }
func (c *s3Client) Select(expression, sseKey string) (io.ReadCloser, *probe.Error) { func (c *s3Client) Select(expression string, sse encrypt.ServerSide) (io.ReadCloser, *probe.Error) {
bucket, object := c.url2BucketAndObject() bucket, object := c.url2BucketAndObject()
origContentType := mimedb.TypeByExtension(filepath.Ext(strings.TrimSuffix(strings.TrimSuffix(object, ".gz"), ".bz2"))) origContentType := mimedb.TypeByExtension(filepath.Ext(strings.TrimSuffix(strings.TrimSuffix(object, ".gz"), ".bz2")))
contentType := mimedb.TypeByExtension(filepath.Ext(object)) contentType := mimedb.TypeByExtension(filepath.Ext(object))
@ -412,9 +412,8 @@ func (c *s3Client) Select(expression, sseKey string) (io.ReadCloser, *probe.Erro
}, },
} }
opts.OutputSerialization = minio.SelectObjectOutputSerialization{ opts.OutputSerialization = minio.SelectObjectOutputSerialization{
CSV: &minio.CSVOutputOptions{ JSON: &minio.JSONOutputOptions{
RecordDelimiter: "\n", RecordDelimiter: "\n",
FieldDelimiter: ",",
}, },
} }
} else if strings.Contains(origContentType, "json") { } else if strings.Contains(origContentType, "json") {
@ -425,17 +424,14 @@ func (c *s3Client) Select(expression, sseKey string) (io.ReadCloser, *probe.Erro
}, },
} }
opts.OutputSerialization = minio.SelectObjectOutputSerialization{ opts.OutputSerialization = minio.SelectObjectOutputSerialization{
JSON: &minio.JSONOutputOptions{ CSV: &minio.CSVOutputOptions{
RecordDelimiter: "\n", RecordDelimiter: "\n",
FieldDelimiter: ",",
}, },
} }
} }
if sseKey != "" { // Set any encryption headers
key, err := encrypt.NewSSEC([]byte(sseKey)) opts.ServerSideEncryption = sse
if err == nil {
opts.ServerSideEncryption = key
}
}
if strings.Contains(contentType, "gzip") { if strings.Contains(contentType, "gzip") {
opts.InputSerialization.CompressionType = minio.SelectCompressionGZIP opts.InputSerialization.CompressionType = minio.SelectCompressionGZIP
} else if strings.Contains(contentType, "bzip") { } else if strings.Contains(contentType, "bzip") {
@ -572,15 +568,10 @@ func (c *s3Client) Watch(params watchParams) (*watchObject, *probe.Error) {
} }
// Get - get object with metadata. // Get - get object with metadata.
func (c *s3Client) Get(sseKey string) (io.ReadCloser, *probe.Error) { func (c *s3Client) Get(sse encrypt.ServerSide) (io.ReadCloser, *probe.Error) {
bucket, object := c.url2BucketAndObject() bucket, object := c.url2BucketAndObject()
var opts minio.GetObjectOptions opts := minio.GetObjectOptions{}
if sseKey != "" { opts.ServerSideEncryption = sse
key, err := encrypt.NewSSEC([]byte(sseKey))
if err == nil {
opts.ServerSideEncryption = key
}
}
reader, e := c.api.GetObject(bucket, object, opts) reader, e := c.api.GetObject(bucket, object, opts)
if e != nil { if e != nil {
errResponse := minio.ToErrorResponse(e) errResponse := minio.ToErrorResponse(e)
@ -605,26 +596,19 @@ func (c *s3Client) Get(sseKey string) (io.ReadCloser, *probe.Error) {
// Copy - copy object, uses server side copy API. Also uses an abstracted API // Copy - copy object, uses server side copy API. Also uses an abstracted API
// such that large file sizes will be copied in multipart manner on server // such that large file sizes will be copied in multipart manner on server
// side. // side.
func (c *s3Client) Copy(source string, size int64, progress io.Reader, srcSSEKey, tgtSSEKey string) *probe.Error { func (c *s3Client) Copy(source string, size int64, progress io.Reader, srcSSE, tgtSSE encrypt.ServerSide) *probe.Error {
dstBucket, dstObject := c.url2BucketAndObject() dstBucket, dstObject := c.url2BucketAndObject()
if dstBucket == "" { if dstBucket == "" {
return probe.NewError(BucketNameEmpty{}) return probe.NewError(BucketNameEmpty{})
} }
tokens := splitStr(source, string(c.targetURL.Separator), 3) tokens := splitStr(source, string(c.targetURL.Separator), 3)
var srcKey, tgtKey encrypt.ServerSide
if srcSSEKey != "" {
srcKey, _ = encrypt.NewSSEC([]byte(srcSSEKey))
}
if tgtSSEKey != "" {
tgtKey, _ = encrypt.NewSSEC([]byte(tgtSSEKey))
}
// Source object // Source object
src := minio.NewSourceInfo(tokens[1], tokens[2], srcKey) src := minio.NewSourceInfo(tokens[1], tokens[2], srcSSE)
// Destination object // Destination object
dst, e := minio.NewDestinationInfo(dstBucket, dstObject, tgtKey, nil) dst, e := minio.NewDestinationInfo(dstBucket, dstObject, tgtSSE, nil)
if e != nil { if e != nil {
return probe.NewError(e) return probe.NewError(e)
} }
@ -655,7 +639,7 @@ func (c *s3Client) Copy(source string, size int64, progress io.Reader, srcSSEKey
} }
// Put - upload an object with custom metadata. // Put - upload an object with custom metadata.
func (c *s3Client) Put(ctx context.Context, reader io.Reader, size int64, metadata map[string]string, progress io.Reader, sseKey string) (int64, *probe.Error) { func (c *s3Client) Put(ctx context.Context, reader io.Reader, size int64, metadata map[string]string, progress io.Reader, sse encrypt.ServerSide) (int64, *probe.Error) {
bucket, object := c.url2BucketAndObject() bucket, object := c.url2BucketAndObject()
contentType, ok := metadata["Content-Type"] contentType, ok := metadata["Content-Type"]
if ok { if ok {
@ -689,10 +673,6 @@ func (c *s3Client) Put(ctx context.Context, reader io.Reader, size int64, metada
if ok { if ok {
delete(metadata, "X-Amz-Storage-Class") delete(metadata, "X-Amz-Storage-Class")
} }
var encryption encrypt.ServerSide
if sseKey != "" {
encryption, _ = encrypt.NewSSEC([]byte(sseKey))
}
if bucket == "" { if bucket == "" {
return 0, probe.NewError(BucketNameEmpty{}) return 0, probe.NewError(BucketNameEmpty{})
} }
@ -706,7 +686,7 @@ func (c *s3Client) Put(ctx context.Context, reader io.Reader, size int64, metada
ContentEncoding: contentEncoding, ContentEncoding: contentEncoding,
ContentLanguage: contentLanguage, ContentLanguage: contentLanguage,
StorageClass: strings.ToUpper(storageClass), StorageClass: strings.ToUpper(storageClass),
ServerSideEncryption: encryption, ServerSideEncryption: sse,
} }
n, e := c.api.PutObjectWithContext(ctx, bucket, object, reader, size, opts) n, e := c.api.PutObjectWithContext(ctx, bucket, object, reader, size, opts)
if e != nil { if e != nil {
@ -990,7 +970,7 @@ func (c *s3Client) listObjectWrapper(bucket, object string, isRecursive bool, do
} }
// Stat - send a 'HEAD' on a bucket or object to fetch its metadata. // Stat - send a 'HEAD' on a bucket or object to fetch its metadata.
func (c *s3Client) Stat(isIncomplete, isFetchMeta bool, sseKey string) (*clientContent, *probe.Error) { func (c *s3Client) Stat(isIncomplete, isFetchMeta bool, sse encrypt.ServerSide) (*clientContent, *probe.Error) {
c.mutex.Lock() c.mutex.Lock()
defer c.mutex.Unlock() defer c.mutex.Unlock()
bucket, object := c.url2BucketAndObject() bucket, object := c.url2BucketAndObject()
@ -1055,10 +1035,7 @@ func (c *s3Client) Stat(isIncomplete, isFetchMeta bool, sseKey string) (*clientC
} }
opts := minio.StatObjectOptions{} opts := minio.StatObjectOptions{}
if sseKey != "" { opts.ServerSideEncryption = sse
key, _ := encrypt.NewSSEC([]byte(sseKey))
opts.ServerSideEncryption = key
}
for objectStat := range c.listObjectWrapper(bucket, prefix, nonRecursive, nil) { for objectStat := range c.listObjectWrapper(bucket, prefix, nonRecursive, nil) {
if objectStat.Err != nil { if objectStat.Err != nil {
@ -1483,7 +1460,7 @@ func (c *s3Client) listIncompleteRecursiveInRoutineDirOpt(contentCh chan *client
} else if strings.HasSuffix(object, string(c.targetURL.Separator)) { } else if strings.HasSuffix(object, string(c.targetURL.Separator)) {
// Get stat of given object is a directory. // Get stat of given object is a directory.
isIncomplete := true isIncomplete := true
content, perr := c.Stat(isIncomplete, false, "") content, perr := c.Stat(isIncomplete, false, nil)
cContent = content cContent = content
if perr != nil { if perr != nil {
contentCh <- &clientContent{URL: *c.targetURL, Err: perr} contentCh <- &clientContent{URL: *c.targetURL, Err: perr}
@ -1628,7 +1605,7 @@ func (c *s3Client) listRecursiveInRoutineDirOpt(contentCh chan *clientContent, d
// Get stat of given object is a directory. // Get stat of given object is a directory.
isIncomplete := false isIncomplete := false
isFetchMeta := false isFetchMeta := false
content, perr := c.Stat(isIncomplete, isFetchMeta, "") content, perr := c.Stat(isIncomplete, isFetchMeta, nil)
cContent = content cContent = content
if perr != nil { if perr != nil {
contentCh <- &clientContent{URL: *c.targetURL, Err: perr} contentCh <- &clientContent{URL: *c.targetURL, Err: perr}

View File

@ -222,11 +222,11 @@ func (s *TestSuite) TestObjectOperations(c *C) {
reader = bytes.NewReader(object.data) reader = bytes.NewReader(object.data)
n, err := s3c.Put(context.Background(), reader, int64(len(object.data)), map[string]string{ n, err := s3c.Put(context.Background(), reader, int64(len(object.data)), map[string]string{
"Content-Type": "application/octet-stream", "Content-Type": "application/octet-stream",
}, nil, "") }, nil, nil)
c.Assert(err, IsNil) c.Assert(err, IsNil)
c.Assert(n, Equals, int64(len(object.data))) c.Assert(n, Equals, int64(len(object.data)))
reader, err = s3c.Get("") reader, err = s3c.Get(nil)
c.Assert(err, IsNil) c.Assert(err, IsNil)
var buffer bytes.Buffer var buffer bytes.Buffer
{ {

View File

@ -177,9 +177,9 @@ func url2Stat(urlStr string, isFetchMeta bool, encKeyDB map[string][]prefixSSEPa
return nil, nil, err.Trace(urlStr) return nil, nil, err.Trace(urlStr)
} }
alias, _ := url2Alias(urlStr) alias, _ := url2Alias(urlStr)
sseKey := getSSEKey(urlStr, encKeyDB[alias]) sse := getSSE(urlStr, encKeyDB[alias])
content, err = client.Stat(false, isFetchMeta, sseKey) content, err = client.Stat(false, isFetchMeta, sse)
if err != nil { if err != nil {
return nil, nil, err.Trace(urlStr) return nil, nil, err.Trace(urlStr)
} }

View File

@ -24,6 +24,7 @@ import (
"github.com/minio/mc/pkg/probe" "github.com/minio/mc/pkg/probe"
minio "github.com/minio/minio-go" minio "github.com/minio/minio-go"
"github.com/minio/minio-go/pkg/encrypt"
) )
// DirOpt - list directory option. // DirOpt - list directory option.
@ -44,7 +45,7 @@ const defaultMultipartThreadsNum = 4
// Client - client interface // Client - client interface
type Client interface { type Client interface {
// Common operations // Common operations
Stat(isIncomplete, isFetchMeta bool, sseKey string) (content *clientContent, err *probe.Error) Stat(isIncomplete, isFetchMeta bool, sse encrypt.ServerSide) (content *clientContent, err *probe.Error)
List(isRecursive, isIncomplete bool, showDir DirOpt) <-chan *clientContent List(isRecursive, isIncomplete bool, showDir DirOpt) <-chan *clientContent
// Bucket operations // Bucket operations
@ -56,14 +57,14 @@ type Client interface {
SetAccess(access string) *probe.Error SetAccess(access string) *probe.Error
// I/O operations // I/O operations
Copy(source string, size int64, progress io.Reader, srcSSEKey, tgtSSEKey string) *probe.Error Copy(source string, size int64, progress io.Reader, srcSSE, tgtSSE encrypt.ServerSide) *probe.Error
// Runs select expression on object storage on specific files. // Runs select expression on object storage on specific files.
Select(expression string, sseKey string) (io.ReadCloser, *probe.Error) Select(expression string, sse encrypt.ServerSide) (io.ReadCloser, *probe.Error)
// I/O operations with metadata. // I/O operations with metadata.
Get(sseKey string) (reader io.ReadCloser, err *probe.Error) Get(sse encrypt.ServerSide) (reader io.ReadCloser, err *probe.Error)
Put(ctx context.Context, reader io.Reader, size int64, metadata map[string]string, progress io.Reader, sseKey string) (n int64, err *probe.Error) Put(ctx context.Context, reader io.Reader, size int64, metadata map[string]string, progress io.Reader, sse encrypt.ServerSide) (n int64, err *probe.Error)
// I/O operations with expiration // I/O operations with expiration
ShareDownload(expires time.Duration) (string, *probe.Error) ShareDownload(expires time.Duration) (string, *probe.Error)

View File

@ -29,19 +29,29 @@ import (
"github.com/minio/cli" "github.com/minio/cli"
"github.com/minio/mc/pkg/probe" "github.com/minio/mc/pkg/probe"
"github.com/minio/minio-go/pkg/encrypt"
) )
// parse and return encryption key pairs per alias. // parse and return encryption key pairs per alias.
func getEncKeys(ctx *cli.Context) (map[string][]prefixSSEPair, *probe.Error) { func getEncKeys(ctx *cli.Context) (map[string][]prefixSSEPair, *probe.Error) {
sseKeys := os.Getenv("MC_ENCRYPT_KEY") sseServer := os.Getenv("MC_ENCRYPT")
if key := ctx.String("encrypt-key"); key != "" { if prefix := ctx.String("encrypt"); prefix != "" {
sseKeys = key sseServer = prefix
} }
encKeyDB, err := parseAndValidateEncryptionKeys(sseKeys) sseKeys := os.Getenv("MC_ENCRYPT_KEY")
if keyPrefix := ctx.String("encrypt-key"); keyPrefix != "" {
if sseServer != "" && strings.Contains(keyPrefix, sseServer) {
return nil, errConflictSSE(sseServer, keyPrefix).Trace(ctx.Args()...)
}
sseKeys = keyPrefix
}
encKeyDB, err := parseAndValidateEncryptionKeys(sseKeys, sseServer)
if err != nil { if err != nil {
return nil, err.Trace(sseKeys) return nil, err.Trace(sseKeys)
} }
return encKeyDB, nil return encKeyDB, nil
} }
@ -91,7 +101,7 @@ func getSourceStreamMetadataFromURL(urlStr string, encKeyDB map[string][]prefixS
if err != nil { if err != nil {
return nil, nil, err.Trace(urlStr) return nil, nil, err.Trace(urlStr)
} }
sseKey := getSSEKey(urlStr, encKeyDB[alias]) sseKey := getSSE(urlStr, encKeyDB[alias])
return getSourceStream(alias, urlStrFull, true, sseKey) return getSourceStream(alias, urlStrFull, true, sseKey)
} }
@ -101,24 +111,24 @@ func getSourceStreamFromURL(urlStr string, encKeyDB map[string][]prefixSSEPair)
if err != nil { if err != nil {
return nil, err.Trace(urlStr) return nil, err.Trace(urlStr)
} }
sseKey := getSSEKey(urlStr, encKeyDB[alias]) sse := getSSE(urlStr, encKeyDB[alias])
reader, _, err = getSourceStream(alias, urlStrFull, false, sseKey) reader, _, err = getSourceStream(alias, urlStrFull, false, sse)
return reader, err return reader, err
} }
// getSourceStream gets a reader from URL. // getSourceStream gets a reader from URL.
func getSourceStream(alias string, urlStr string, fetchStat bool, sseKey string) (reader io.ReadCloser, metadata map[string]string, err *probe.Error) { func getSourceStream(alias string, urlStr string, fetchStat bool, sse encrypt.ServerSide) (reader io.ReadCloser, metadata map[string]string, err *probe.Error) {
sourceClnt, err := newClientFromAlias(alias, urlStr) sourceClnt, err := newClientFromAlias(alias, urlStr)
if err != nil { if err != nil {
return nil, nil, err.Trace(alias, urlStr) return nil, nil, err.Trace(alias, urlStr)
} }
reader, err = sourceClnt.Get(sseKey) reader, err = sourceClnt.Get(sse)
if err != nil { if err != nil {
return nil, nil, err.Trace(alias, urlStr) return nil, nil, err.Trace(alias, urlStr)
} }
metadata = make(map[string]string) metadata = make(map[string]string)
if fetchStat { if fetchStat {
st, err := sourceClnt.Stat(false, true, sseKey) st, err := sourceClnt.Stat(false, true, sse)
if err != nil { if err != nil {
return nil, nil, err.Trace(alias, urlStr) return nil, nil, err.Trace(alias, urlStr)
} }
@ -158,12 +168,12 @@ func getSourceStream(alias string, urlStr string, fetchStat bool, sseKey string)
} }
// putTargetStream writes to URL from Reader. // putTargetStream writes to URL from Reader.
func putTargetStream(ctx context.Context, alias string, urlStr string, reader io.Reader, size int64, metadata map[string]string, progress io.Reader, sseKey string) (int64, *probe.Error) { func putTargetStream(ctx context.Context, alias string, urlStr string, reader io.Reader, size int64, metadata map[string]string, progress io.Reader, sse encrypt.ServerSide) (int64, *probe.Error) {
targetClnt, err := newClientFromAlias(alias, urlStr) targetClnt, err := newClientFromAlias(alias, urlStr)
if err != nil { if err != nil {
return 0, err.Trace(alias, urlStr) return 0, err.Trace(alias, urlStr)
} }
n, err := targetClnt.Put(ctx, reader, size, metadata, progress, sseKey) n, err := targetClnt.Put(ctx, reader, size, metadata, progress, sse)
if err != nil { if err != nil {
return n, err.Trace(alias, urlStr) return n, err.Trace(alias, urlStr)
} }
@ -171,7 +181,7 @@ func putTargetStream(ctx context.Context, alias string, urlStr string, reader io
} }
// putTargetStreamWithURL writes to URL from reader. If length=-1, read until EOF. // putTargetStreamWithURL writes to URL from reader. If length=-1, read until EOF.
func putTargetStreamWithURL(urlStr string, reader io.Reader, size int64, sseKey string) (int64, *probe.Error) { func putTargetStreamWithURL(urlStr string, reader io.Reader, size int64, sse encrypt.ServerSide) (int64, *probe.Error) {
alias, urlStrFull, _, err := expandAlias(urlStr) alias, urlStrFull, _, err := expandAlias(urlStr)
if err != nil { if err != nil {
return 0, err.Trace(alias, urlStr) return 0, err.Trace(alias, urlStr)
@ -180,16 +190,16 @@ func putTargetStreamWithURL(urlStr string, reader io.Reader, size int64, sseKey
metadata := map[string]string{ metadata := map[string]string{
"Content-Type": contentType, "Content-Type": contentType,
} }
return putTargetStream(context.Background(), alias, urlStrFull, reader, size, metadata, nil, sseKey) return putTargetStream(context.Background(), alias, urlStrFull, reader, size, metadata, nil, sse)
} }
// copySourceToTargetURL copies to targetURL from source. // copySourceToTargetURL copies to targetURL from source.
func copySourceToTargetURL(alias string, urlStr string, source string, size int64, progress io.Reader, srcSSEKey, tgtSSEKey string) *probe.Error { func copySourceToTargetURL(alias string, urlStr string, source string, size int64, progress io.Reader, srcSSE, tgtSSE encrypt.ServerSide) *probe.Error {
targetClnt, err := newClientFromAlias(alias, urlStr) targetClnt, err := newClientFromAlias(alias, urlStr)
if err != nil { if err != nil {
return err.Trace(alias, urlStr) return err.Trace(alias, urlStr)
} }
err = targetClnt.Copy(source, size, progress, srcSSEKey, tgtSSEKey) err = targetClnt.Copy(source, size, progress, srcSSE, tgtSSE)
if err != nil { if err != nil {
return err.Trace(alias, urlStr) return err.Trace(alias, urlStr)
} }
@ -199,22 +209,30 @@ func copySourceToTargetURL(alias string, urlStr string, source string, size int6
// uploadSourceToTargetURL - uploads to targetURL from source. // uploadSourceToTargetURL - uploads to targetURL from source.
// optionally optimizes copy for object sizes <= 5GiB by using // optionally optimizes copy for object sizes <= 5GiB by using
// server side copy operation. // server side copy operation.
func uploadSourceToTargetURL(ctx context.Context, urls URLs, progress io.Reader) URLs { func uploadSourceToTargetURL(ctx context.Context, urls URLs, progress io.Reader, encKeyDB map[string][]prefixSSEPair) URLs {
sourceAlias := urls.SourceAlias sourceAlias := urls.SourceAlias
sourceURL := urls.SourceContent.URL sourceURL := urls.SourceContent.URL
targetAlias := urls.TargetAlias targetAlias := urls.TargetAlias
targetURL := urls.TargetContent.URL targetURL := urls.TargetContent.URL
length := urls.SourceContent.Size length := urls.SourceContent.Size
sourcePath := filepath.ToSlash(filepath.Join(sourceAlias, urls.SourceContent.URL.Path))
targetPath := filepath.ToSlash(filepath.Join(targetAlias, urls.TargetContent.URL.Path))
srcSSE := getSSE(sourcePath, encKeyDB[sourceAlias])
tgtSSE := getSSE(targetPath, encKeyDB[targetAlias])
// Optimize for server side copy if the host is same. // Optimize for server side copy if the host is same.
if sourceAlias == targetAlias { if sourceAlias == targetAlias {
sourcePath := filepath.ToSlash(sourceURL.Path) sourcePath := filepath.ToSlash(sourceURL.Path)
err := copySourceToTargetURL(targetAlias, targetURL.String(), sourcePath, length, progress, urls.SrcSSEKey, urls.TgtSSEKey) err := copySourceToTargetURL(targetAlias, targetURL.String(), sourcePath, length, progress, srcSSE, tgtSSE)
if err != nil { if err != nil {
return urls.WithError(err.Trace(sourceURL.String())) return urls.WithError(err.Trace(sourceURL.String()))
} }
} else { } else {
// Proceed with regular stream copy. // Proceed with regular stream copy.
reader, metadata, err := getSourceStream(sourceAlias, sourceURL.String(), true, urls.SrcSSEKey) reader, metadata, err := getSourceStream(sourceAlias, sourceURL.String(), true, srcSSE)
if err != nil { if err != nil {
return urls.WithError(err.Trace(sourceURL.String())) return urls.WithError(err.Trace(sourceURL.String()))
} }
@ -225,12 +243,11 @@ func uploadSourceToTargetURL(ctx context.Context, urls URLs, progress io.Reader)
metadata[k] = v metadata[k] = v
} }
} }
if urls.SrcSSEKey != "" { if srcSSE != nil {
delete(metadata, "X-Amz-Server-Side-Encryption-Customer-Algorithm") delete(metadata, "X-Amz-Server-Side-Encryption-Customer-Algorithm")
delete(metadata, "X-Amz-Server-Side-Encryption-Customer-Key-Md5") delete(metadata, "X-Amz-Server-Side-Encryption-Customer-Key-Md5")
} }
_, err = putTargetStream(ctx, targetAlias, targetURL.String(), reader, length, metadata, progress, tgtSSE)
_, err = putTargetStream(ctx, targetAlias, targetURL.String(), reader, length, metadata, progress, urls.TgtSSEKey)
if err != nil { if err != nil {
return urls.WithError(err.Trace(targetURL.String())) return urls.WithError(err.Trace(targetURL.String()))
} }

View File

@ -168,7 +168,7 @@ func probeS3Signature(accessKey, secretKey, url string) (string, *probe.Error) {
return "", err return "", err
} }
if _, err = s3Client.Stat(false, false, ""); err != nil { if _, err = s3Client.Stat(false, false, nil); err != nil {
switch err.ToGoError().(type) { switch err.ToGoError().(type) {
case BucketDoesNotExist: case BucketDoesNotExist:
// Bucket doesn't exist, means signature probing worked V4. // Bucket doesn't exist, means signature probing worked V4.
@ -179,7 +179,7 @@ func probeS3Signature(accessKey, secretKey, url string) (string, *probe.Error) {
if err != nil { if err != nil {
return "", err return "", err
} }
if _, err = s3Client.Stat(false, false, ""); err != nil { if _, err = s3Client.Stat(false, false, nil); err != nil {
switch err.ToGoError().(type) { switch err.ToGoError().(type) {
case BucketDoesNotExist: case BucketDoesNotExist:
// Bucket doesn't exist, means signature probing worked with V2. // Bucket doesn't exist, means signature probing worked with V2.

View File

@ -43,19 +43,19 @@ var (
}, },
cli.IntFlag{ cli.IntFlag{
Name: "older-than", Name: "older-than",
Usage: "copy objects older than N days", Usage: "copy object(s) older than N days",
}, },
cli.IntFlag{ cli.IntFlag{
Name: "newer-than", Name: "newer-than",
Usage: "copy objects newer than N days", Usage: "copy object(s) newer than N days",
}, },
cli.StringFlag{ cli.StringFlag{
Name: "storage-class, sc", Name: "storage-class, sc",
Usage: "set storage class for object", Usage: "set storage class for new object(s) on target",
}, },
cli.StringFlag{ cli.StringFlag{
Name: "encrypt-key", Name: "encrypt",
Usage: "encrypt/decrypt objects (using server-side encryption)", Usage: "encrypt/decrypt objects (using server-side encryption with server managed keys)",
}, },
} }
) )
@ -66,7 +66,7 @@ var cpCmd = cli.Command{
Usage: "copy objects", Usage: "copy objects",
Action: mainCopy, Action: mainCopy,
Before: setGlobalsFromContext, Before: setGlobalsFromContext,
Flags: append(cpFlags, globalFlags...), Flags: append(append(cpFlags, ioFlags...), globalFlags...),
CustomHelpTemplate: `NAME: CustomHelpTemplate: `NAME:
{{.HelpName}} - {{.Usage}} {{.HelpName}} - {{.Usage}}
@ -76,9 +76,9 @@ USAGE:
FLAGS: FLAGS:
{{range .VisibleFlags}}{{.}} {{range .VisibleFlags}}{{.}}
{{end}} {{end}}
ENVIRONMENT VARIABLES: ENVIRONMENT VARIABLES:
MC_ENCRYPT_KEY: List of comma delimited prefix=secret values MC_ENCRYPT: list of comma delimited prefixes
MC_ENCRYPT_KEY: list of comma delimited prefix=secret values
EXAMPLES: EXAMPLES:
1. Copy a list of objects from local file system to Amazon S3 cloud storage. 1. Copy a list of objects from local file system to Amazon S3 cloud storage.
@ -168,7 +168,7 @@ type ProgressReader interface {
} }
// doCopy - Copy a singe file from source to destination // doCopy - Copy a singe file from source to destination
func doCopy(ctx context.Context, cpURLs URLs, pg ProgressReader) URLs { func doCopy(ctx context.Context, cpURLs URLs, pg ProgressReader, encKeyDB map[string][]prefixSSEPair) URLs {
if cpURLs.Error != nil { if cpURLs.Error != nil {
cpURLs.Error = cpURLs.Error.Trace() cpURLs.Error = cpURLs.Error.Trace()
return cpURLs return cpURLs
@ -193,7 +193,7 @@ func doCopy(ctx context.Context, cpURLs URLs, pg ProgressReader) URLs {
TotalSize: cpURLs.TotalSize, TotalSize: cpURLs.TotalSize,
}) })
} }
return uploadSourceToTargetURL(ctx, cpURLs, pg) return uploadSourceToTargetURL(ctx, cpURLs, pg, encKeyDB)
} }
// doCopyFake - Perform a fake copy to update the progress bar appropriately. // doCopyFake - Perform a fake copy to update the progress bar appropriately.
@ -220,7 +220,8 @@ func doPrepareCopyURLs(session *sessionV8, trapCh <-chan bool, cancelCopy contex
olderThan := session.Header.CommandIntFlags["older-than"] olderThan := session.Header.CommandIntFlags["older-than"]
newerThan := session.Header.CommandIntFlags["newer-than"] newerThan := session.Header.CommandIntFlags["newer-than"]
encryptKeys := session.Header.CommandStringFlags["encrypt-key"] encryptKeys := session.Header.CommandStringFlags["encrypt-key"]
encKeyDB, err := parseAndValidateEncryptionKeys(encryptKeys) encrypt := session.Header.CommandStringFlags["encrypt"]
encKeyDB, err := parseAndValidateEncryptionKeys(encryptKeys, encrypt)
fatalIf(err, "Unable to parse encryption keys.") fatalIf(err, "Unable to parse encryption keys.")
// Create a session data file to store the processed URLs. // Create a session data file to store the processed URLs.
@ -290,7 +291,7 @@ func doPrepareCopyURLs(session *sessionV8, trapCh <-chan bool, cancelCopy contex
session.Save() session.Save()
} }
func doCopySession(session *sessionV8) error { func doCopySession(session *sessionV8, encKeyDB map[string][]prefixSSEPair) error {
trapCh := signalTrap(os.Interrupt, syscall.SIGTERM, syscall.SIGKILL) trapCh := signalTrap(os.Interrupt, syscall.SIGTERM, syscall.SIGKILL)
ctx, cancelCopy := context.WithCancel(context.Background()) ctx, cancelCopy := context.WithCancel(context.Background())
@ -373,7 +374,7 @@ func doCopySession(session *sessionV8) error {
} }
} else { } else {
queueCh <- func() URLs { queueCh <- func() URLs {
return doCopy(ctx, cpURLs, pg) return doCopy(ctx, cpURLs, pg, encKeyDB)
} }
} }
} }
@ -457,6 +458,7 @@ func mainCopy(ctx *cli.Context) error {
if key := ctx.String("encrypt-key"); key != "" { if key := ctx.String("encrypt-key"); key != "" {
sseKeys = key sseKeys = key
} }
sse := ctx.String("encrypt")
session := newSessionV8() session := newSessionV8()
session.Header.CommandType = "cp" session.Header.CommandType = "cp"
@ -465,6 +467,7 @@ func mainCopy(ctx *cli.Context) error {
session.Header.CommandIntFlags["newer-than"] = newerThan session.Header.CommandIntFlags["newer-than"] = newerThan
session.Header.CommandStringFlags["storage-class"] = storageClass session.Header.CommandStringFlags["storage-class"] = storageClass
session.Header.CommandStringFlags["encrypt-key"] = sseKeys session.Header.CommandStringFlags["encrypt-key"] = sseKeys
session.Header.CommandStringFlags["encrypt"] = sse
var e error var e error
if session.Header.RootPath, e = os.Getwd(); e != nil { if session.Header.RootPath, e = os.Getwd(); e != nil {
@ -474,7 +477,7 @@ func mainCopy(ctx *cli.Context) error {
// extract URLs. // extract URLs.
session.Header.CommandArgs = ctx.Args() session.Header.CommandArgs = ctx.Args()
e = doCopySession(session) e = doCopySession(session, encKeyDB)
session.Delete() session.Delete()
return e return e

View File

@ -105,20 +105,11 @@ func prepareCopyURLsTypeA(sourceURL string, targetURL string, encKeyDB map[strin
// prepareCopyContentTypeA - makes CopyURLs content for copying. // prepareCopyContentTypeA - makes CopyURLs content for copying.
func makeCopyContentTypeA(sourceAlias string, sourceContent *clientContent, targetAlias string, targetURL string, encKeyDB map[string][]prefixSSEPair) URLs { func makeCopyContentTypeA(sourceAlias string, sourceContent *clientContent, targetAlias string, targetURL string, encKeyDB map[string][]prefixSSEPair) URLs {
targetContent := clientContent{URL: *newClientURL(targetURL)} targetContent := clientContent{URL: *newClientURL(targetURL)}
sourcePath := filepath.ToSlash(filepath.Join(sourceAlias, sourceContent.URL.Path))
targetPath := filepath.ToSlash(filepath.Join(targetAlias, targetContent.URL.Path))
srcSSEKey := getSSEKey(sourcePath, encKeyDB[sourceAlias])
tgtSSEKey := getSSEKey(targetPath, encKeyDB[targetAlias])
return URLs{ return URLs{
SourceAlias: sourceAlias, SourceAlias: sourceAlias,
SourceContent: sourceContent, SourceContent: sourceContent,
TargetAlias: targetAlias, TargetAlias: targetAlias,
TargetContent: &targetContent, TargetContent: &targetContent,
SrcSSEKey: srcSSEKey,
TgtSSEKey: tgtSSEKey,
} }
} }

View File

@ -460,7 +460,7 @@ func getShareURL(path string) string {
clnt, err := newClientFromAlias(targetAlias, targetURLFull) clnt, err := newClientFromAlias(targetAlias, targetURLFull)
fatalIf(err.Trace(targetAlias, targetURLFull), "Unable to initialize client instance from alias.") fatalIf(err.Trace(targetAlias, targetURLFull), "Unable to initialize client instance from alias.")
content, err := clnt.Stat(false, false, "") content, err := clnt.Stat(false, false, nil)
fatalIf(err.Trace(targetURLFull, targetAlias), "Unable to lookup file/object.") fatalIf(err.Trace(targetURLFull, targetAlias), "Unable to lookup file/object.")
// Skip if its a directory. // Skip if its a directory.

View File

@ -56,6 +56,14 @@ var globalFlags = []cli.Flag{
}, },
} }
// Flags common across all I/O commands such as cp, mirror, stat, pipe etc.
var ioFlags = []cli.Flag{
cli.StringFlag{
Name: "encrypt-key",
Usage: "encrypt/decrypt objects (using server-side encryption with customer provided keys)",
},
}
// registerCmd registers a cli command // registerCmd registers a cli command
func registerCmd(cmd cli.Command) { func registerCmd(cmd cli.Command) {
commands = append(commands, cmd) commands = append(commands, cmd)

View File

@ -34,10 +34,6 @@ import (
var ( var (
headFlags = []cli.Flag{ headFlags = []cli.Flag{
cli.StringFlag{
Name: "encrypt-key",
Usage: "decrypt object (using server-side encryption)",
},
cli.Int64Flag{ cli.Int64Flag{
Name: "n,lines", Name: "n,lines",
Usage: "print the first 'n' lines", Usage: "print the first 'n' lines",
@ -52,7 +48,7 @@ var headCmd = cli.Command{
Usage: "display first 'n' lines of an object", Usage: "display first 'n' lines of an object",
Action: mainHead, Action: mainHead,
Before: setGlobalsFromContext, Before: setGlobalsFromContext,
Flags: append(headFlags, globalFlags...), Flags: append(append(headFlags, ioFlags...), globalFlags...),
CustomHelpTemplate: `NAME: CustomHelpTemplate: `NAME:
{{.HelpName}} - {{.Usage}} {{.HelpName}} - {{.Usage}}

View File

@ -35,10 +35,6 @@ var (
Name: "incomplete, I", Name: "incomplete, I",
Usage: "list incomplete uploads", Usage: "list incomplete uploads",
}, },
cli.StringFlag{
Name: "encrypt-key",
Usage: "encrypt/decrypt (using server-side encryption)",
},
} }
) )
@ -95,12 +91,8 @@ func checkListSyntax(ctx *cli.Context) {
URLs := ctx.Args() URLs := ctx.Args()
isIncomplete := ctx.Bool("incomplete") isIncomplete := ctx.Bool("incomplete")
// Parse encryption keys per command.
encKeyDB, err := getEncKeys(ctx)
fatalIf(err, "Unable to parse encryption keys.")
for _, url := range URLs { for _, url := range URLs {
_, _, err := url2Stat(url, false, encKeyDB) _, _, err := url2Stat(url, false, nil)
if err != nil && !isURLPrefixExists(url, isIncomplete) { if err != nil && !isURLPrefixExists(url, isIncomplete) {
// Bucket name empty is a valid error for 'ls myminio', // Bucket name empty is a valid error for 'ls myminio',
// treat it as such. // treat it as such.
@ -140,7 +132,7 @@ func mainList(ctx *cli.Context) error {
fatalIf(err.Trace(targetURL), "Unable to initialize target `"+targetURL+"`.") fatalIf(err.Trace(targetURL), "Unable to initialize target `"+targetURL+"`.")
var st *clientContent var st *clientContent
if st, err = clnt.Stat(isIncomplete, false, ""); err != nil { if st, err = clnt.Stat(isIncomplete, false, nil); err != nil {
switch err.ToGoError().(type) { switch err.ToGoError().(type) {
case BucketNameEmpty: case BucketNameEmpty:
// For aliases like ``mc ls s3`` it's acceptable to receive BucketNameEmpty error. // For aliases like ``mc ls s3`` it's acceptable to receive BucketNameEmpty error.

View File

@ -84,8 +84,8 @@ var (
Usage: "specify storage class for new object(s) on target", Usage: "specify storage class for new object(s) on target",
}, },
cli.StringFlag{ cli.StringFlag{
Name: "encrypt-key", Name: "encrypt",
Usage: "encrypt/decrypt object(s) using specified encryption key(s) for source and/or target aliases", Usage: "encrypt/decrypt objects (using server-side encryption with server managed keys)",
}, },
} }
) )
@ -96,7 +96,7 @@ var mirrorCmd = cli.Command{
Usage: "synchronize object(s) to a remote site", Usage: "synchronize object(s) to a remote site",
Action: mainMirror, Action: mainMirror,
Before: setGlobalsFromContext, Before: setGlobalsFromContext,
Flags: append(mirrorFlags, globalFlags...), Flags: append(append(mirrorFlags, ioFlags...), globalFlags...),
CustomHelpTemplate: `NAME: CustomHelpTemplate: `NAME:
{{.HelpName}} - {{.Usage}} {{.HelpName}} - {{.Usage}}
@ -106,9 +106,9 @@ USAGE:
FLAGS: FLAGS:
{{range .VisibleFlags}}{{.}} {{range .VisibleFlags}}{{.}}
{{end}} {{end}}
ENVIRONMENT VARIABLES: ENVIRONMENT VARIABLES:
MC_ENCRYPT_KEY: List of comma delimited prefix=secret values MC_ENCRYPT: list of comma delimited prefixes
MC_ENCRYPT_KEY: list of comma delimited prefix=secret values
EXAMPLES: EXAMPLES:
1. Mirror a bucket recursively from Minio cloud storage to a bucket on Amazon S3 cloud storage. 1. Mirror a bucket recursively from Minio cloud storage to a bucket on Amazon S3 cloud storage.
@ -146,7 +146,6 @@ EXAMPLES:
11. Mirror server encrypted objects from Minio cloud storage to a bucket on Amazon S3 cloud storage 11. Mirror server encrypted objects from Minio cloud storage to a bucket on Amazon S3 cloud storage
$ {{.HelpName}} --encrypt-key "minio/photos=32byteslongsecretkeymustbegiven1,s3/archive=32byteslongsecretkeymustbegiven2" minio/photos/ s3/archive/ $ {{.HelpName}} --encrypt-key "minio/photos=32byteslongsecretkeymustbegiven1,s3/archive=32byteslongsecretkeymustbegiven2" minio/photos/ s3/archive/
`, `,
} }
@ -272,7 +271,7 @@ func (mj *mirrorJob) doMirror(ctx context.Context, cancelMirror context.CancelFu
TotalCount: sURLs.TotalCount, TotalCount: sURLs.TotalCount,
TotalSize: sURLs.TotalSize, TotalSize: sURLs.TotalSize,
}) })
return uploadSourceToTargetURL(ctx, sURLs, mj.status) return uploadSourceToTargetURL(ctx, sURLs, mj.status, mj.encKeyDB)
} }
// Go routine to update progress status // Go routine to update progress status
@ -366,8 +365,8 @@ func (mj *mirrorJob) watchMirror(ctx context.Context, cancelMirror context.Cance
targetAlias, expandedTargetPath, _ := mustExpandAlias(targetPath) targetAlias, expandedTargetPath, _ := mustExpandAlias(targetPath)
targetURL := newClientURL(expandedTargetPath) targetURL := newClientURL(expandedTargetPath)
sourcePath := filepath.ToSlash(filepath.Join(sourceAlias, sourceURL.Path)) sourcePath := filepath.ToSlash(filepath.Join(sourceAlias, sourceURL.Path))
srcSSEKey := getSSEKey(sourcePath, mj.encKeyDB[sourceAlias]) srcSSE := getSSE(sourcePath, mj.encKeyDB[sourceAlias])
tgtSSEKey := getSSEKey(targetPath, mj.encKeyDB[targetAlias]) tgtSSE := getSSE(targetPath, mj.encKeyDB[targetAlias])
if event.Type == EventCreate { if event.Type == EventCreate {
// we are checking if a destination file exists now, and if we only // we are checking if a destination file exists now, and if we only
@ -378,8 +377,6 @@ func (mj *mirrorJob) watchMirror(ctx context.Context, cancelMirror context.Cance
TargetAlias: targetAlias, TargetAlias: targetAlias,
TargetContent: &clientContent{URL: *targetURL}, TargetContent: &clientContent{URL: *targetURL},
encKeyDB: mj.encKeyDB, encKeyDB: mj.encKeyDB,
SrcSSEKey: srcSSEKey,
TgtSSEKey: tgtSSEKey,
} }
if event.Size == 0 { if event.Size == 0 {
sourceClient, err := newClient(aliasedPath) sourceClient, err := newClient(aliasedPath)
@ -388,7 +385,7 @@ func (mj *mirrorJob) watchMirror(ctx context.Context, cancelMirror context.Cance
mj.statusCh <- mirrorURL.WithError(err) mj.statusCh <- mirrorURL.WithError(err)
continue continue
} }
sourceContent, err := sourceClient.Stat(false, false, srcSSEKey) sourceContent, err := sourceClient.Stat(false, false, srcSSE)
if err != nil { if err != nil {
// source doesn't exist anymore // source doesn't exist anymore
mj.statusCh <- mirrorURL.WithError(err) mj.statusCh <- mirrorURL.WithError(err)
@ -402,7 +399,7 @@ func (mj *mirrorJob) watchMirror(ctx context.Context, cancelMirror context.Cance
} }
shouldQueue := false shouldQueue := false
if !mj.isOverwrite { if !mj.isOverwrite {
_, err = targetClient.Stat(false, false, tgtSSEKey) _, err = targetClient.Stat(false, false, tgtSSE)
if err == nil { if err == nil {
continue continue
} // doesn't exist } // doesn't exist
@ -425,7 +422,7 @@ func (mj *mirrorJob) watchMirror(ctx context.Context, cancelMirror context.Cance
mj.statusCh <- mirrorURL.WithError(err) mj.statusCh <- mirrorURL.WithError(err)
return return
} }
_, err = targetClient.Stat(false, false, tgtSSEKey) _, err = targetClient.Stat(false, false, tgtSSE)
if err == nil { if err == nil {
continue continue
} // doesn't exist } // doesn't exist
@ -445,8 +442,6 @@ func (mj *mirrorJob) watchMirror(ctx context.Context, cancelMirror context.Cance
SourceContent: nil, SourceContent: nil,
TargetAlias: targetAlias, TargetAlias: targetAlias,
TargetContent: &clientContent{URL: *targetURL}, TargetContent: &clientContent{URL: *targetURL},
SrcSSEKey: srcSSEKey,
TgtSSEKey: tgtSSEKey,
encKeyDB: mj.encKeyDB, encKeyDB: mj.encKeyDB,
} }
mirrorURL.TotalCount = mj.TotalObjects mirrorURL.TotalCount = mj.TotalObjects

View File

@ -18,7 +18,6 @@ package cmd
import ( import (
"fmt" "fmt"
"path/filepath"
"strings" "strings"
"github.com/minio/cli" "github.com/minio/cli"
@ -136,11 +135,6 @@ func deltaSourceTarget(sourceURL, targetURL string, isFake, isOverwrite, isRemov
continue continue
} }
sourcePath := filepath.ToSlash(filepath.Join(sourceAlias, sourceClnt.GetURL().Path))
srcSSEKey := getSSEKey(sourcePath, encKeyDB[sourceAlias])
targetPath := filepath.ToSlash(filepath.Join(targetAlias, targetClnt.GetURL().Path))
tgtSSEKey := getSSEKey(targetPath, encKeyDB[targetAlias])
switch diffMsg.Diff { switch diffMsg.Diff {
case differInNone: case differInNone:
// No difference, continue. // No difference, continue.
@ -163,8 +157,6 @@ func deltaSourceTarget(sourceURL, targetURL string, isFake, isOverwrite, isRemov
SourceContent: sourceContent, SourceContent: sourceContent,
TargetAlias: targetAlias, TargetAlias: targetAlias,
TargetContent: targetContent, TargetContent: targetContent,
SrcSSEKey: srcSSEKey,
TgtSSEKey: tgtSSEKey,
} }
case differInFirst: case differInFirst:
// Only in first, always copy. // Only in first, always copy.
@ -177,8 +169,6 @@ func deltaSourceTarget(sourceURL, targetURL string, isFake, isOverwrite, isRemov
SourceContent: sourceContent, SourceContent: sourceContent,
TargetAlias: targetAlias, TargetAlias: targetAlias,
TargetContent: targetContent, TargetContent: targetContent,
SrcSSEKey: srcSSEKey,
TgtSSEKey: tgtSSEKey,
} }
case differInSecond: case differInSecond:
if !isRemove && !isFake { if !isRemove && !isFake {
@ -187,7 +177,6 @@ func deltaSourceTarget(sourceURL, targetURL string, isFake, isOverwrite, isRemov
URLsCh <- URLs{ URLsCh <- URLs{
TargetAlias: targetAlias, TargetAlias: targetAlias,
TargetContent: diffMsg.secondContent, TargetContent: diffMsg.secondContent,
TgtSSEKey: tgtSSEKey,
} }
default: default:
URLsCh <- URLs{ URLsCh <- URLs{

View File

@ -27,8 +27,8 @@ import (
var ( var (
pipeFlags = []cli.Flag{ pipeFlags = []cli.Flag{
cli.StringFlag{ cli.StringFlag{
Name: "encrypt-key", Name: "encrypt",
Usage: "encrypt object (using server-side encryption)", Usage: "encrypt objects (using server-side encryption with server managed keys)",
}, },
} }
) )
@ -39,7 +39,7 @@ var pipeCmd = cli.Command{
Usage: "stream STDIN to an object", Usage: "stream STDIN to an object",
Action: mainPipe, Action: mainPipe,
Before: setGlobalsFromContext, Before: setGlobalsFromContext,
Flags: append(pipeFlags, globalFlags...), Flags: append(append(pipeFlags, ioFlags...), globalFlags...),
CustomHelpTemplate: `NAME: CustomHelpTemplate: `NAME:
{{.HelpName}} - {{.Usage}} {{.HelpName}} - {{.Usage}}
@ -49,9 +49,9 @@ USAGE:
FLAGS: FLAGS:
{{range .VisibleFlags}}{{.}} {{range .VisibleFlags}}{{.}}
{{end}} {{end}}
ENVIRONMENT VARIABLES: ENVIRONMENT VARIABLES:
MC_ENCRYPT_KEY: List of comma delimited prefix=secret values MC_ENCRYPT: list of comma delimited prefix values
MC_ENCRYPT_KEY: list of comma delimited prefix=secret values
EXAMPLES: EXAMPLES:
1. Write contents of stdin to a file on local filesystem. 1. Write contents of stdin to a file on local filesystem.
@ -61,14 +61,10 @@ EXAMPLES:
$ {{.HelpName}} s3/personalbuck/meeting-notes.txt $ {{.HelpName}} s3/personalbuck/meeting-notes.txt
3. Copy an ISO image to an object on Amazon S3 cloud storage. 3. Copy an ISO image to an object on Amazon S3 cloud storage.
$ cat debian-8.2.iso | {{.HelpName}} s3/ferenginar/gnuos.iso $ cat debian-8.2.iso | {{.HelpName}} s3/opensource-isos/gnuos.iso
4. Stream MySQL database dump to Amazon S3 directly. 4. Stream MySQL database dump to Amazon S3 directly.
$ mysqldump -u root -p ******* accountsdb | {{.HelpName}} s3/ferenginar/backups/accountsdb-oct-9-2015.sql $ mysqldump -u root -p ******* accountsdb | {{.HelpName}} s3/sql-backups/backups/accountsdb-oct-9-2015.sql
5. Stream an object to Amazon S3 cloud storage and encrypt on server.
$ {{.HelpName}} --encrypt-key "s3/ferenginar/=32byteslongsecretkeymustbegiven1" s3/ferenginar/klingon_opera_aktuh_maylotah.ogg
`, `,
} }
@ -78,7 +74,7 @@ func pipe(targetURL string, encKeyDB map[string][]prefixSSEPair) *probe.Error {
return catOut(os.Stdin, -1).Trace() return catOut(os.Stdin, -1).Trace()
} }
alias, _ := url2Alias(targetURL) alias, _ := url2Alias(targetURL)
sseKey := getSSEKey(targetURL, encKeyDB[alias]) sseKey := getSSE(targetURL, encKeyDB[alias])
// Stream from stdin to multiple objects until EOF. // Stream from stdin to multiple objects until EOF.
// Ignore size, since os.Stat() would not return proper size all the time // Ignore size, since os.Stat() would not return proper size all the time

View File

@ -69,10 +69,6 @@ var (
Name: "newer-than", Name: "newer-than",
Usage: "remove objects newer than N days", Usage: "remove objects newer than N days",
}, },
cli.StringFlag{
Name: "encrypt-key",
Usage: "remove encrypted object (using server-side encryption)",
},
} }
) )
@ -82,7 +78,7 @@ var rmCmd = cli.Command{
Usage: "remove objects", Usage: "remove objects",
Action: mainRm, Action: mainRm,
Before: setGlobalsFromContext, Before: setGlobalsFromContext,
Flags: append(rmFlags, globalFlags...), Flags: append(append(rmFlags, ioFlags...), globalFlags...),
CustomHelpTemplate: `NAME: CustomHelpTemplate: `NAME:
{{.HelpName}} - {{.Usage}} {{.HelpName}} - {{.Usage}}
@ -92,7 +88,6 @@ USAGE:
FLAGS: FLAGS:
{{range .VisibleFlags}}{{.}} {{range .VisibleFlags}}{{.}}
{{end}} {{end}}
ENVIRONMENT VARIABLES: ENVIRONMENT VARIABLES:
MC_ENCRYPT_KEY: List of comma delimited prefix=secret values MC_ENCRYPT_KEY: List of comma delimited prefix=secret values
@ -121,9 +116,8 @@ EXAMPLES:
8. Drop all incomplete uploads on 'jazz-songs' bucket. 8. Drop all incomplete uploads on 'jazz-songs' bucket.
$ {{.HelpName}} --incomplete --recursive s3/jazz-songs/ $ {{.HelpName}} --incomplete --recursive s3/jazz-songs/
9. Remove an encrypted object from s3. 9. Remove an encrypted object from Amazon S3 cloud storage.
$ {{.HelpName}} --encrypt-key "s3/ferenginar/=32byteslongsecretkeymustbegiven1" s3/ferenginar/1999/old-backup.tgz $ {{.HelpName}} --encrypt-key "s3/sql-backups/=32byteslongsecretkeymustbegiven1" s3/sql-backups/1999/old-backup.tgz
`, `,
} }
@ -194,7 +188,7 @@ func removeSingle(url string, isIncomplete bool, isFake bool, olderThan int, new
} }
isFetchMeta := true isFetchMeta := true
alias, _ := url2Alias(url) alias, _ := url2Alias(url)
sseKey := getSSEKey(url, encKeyDB[alias]) sseKey := getSSE(url, encKeyDB[alias])
content, pErr := clnt.Stat(isIncomplete, isFetchMeta, sseKey) content, pErr := clnt.Stat(isIncomplete, isFetchMeta, sseKey)
if pErr != nil { if pErr != nil {
errorIf(pErr.Trace(url), "Failed to remove `"+url+"`.") errorIf(pErr.Trace(url), "Failed to remove `"+url+"`.")

View File

@ -63,7 +63,10 @@ func (b bySessionWhen) Less(i, j int) bool { return b[i].Header.When.Before(b[j]
func sessionExecute(s *sessionV8) { func sessionExecute(s *sessionV8) {
switch s.Header.CommandType { switch s.Header.CommandType {
case "cp": case "cp":
doCopySession(s) sseKeys := s.Header.CommandStringFlags["encrypt-key"]
sseServer := s.Header.CommandStringFlags["encrypt"]
encKeyDB, _ := parseAndValidateEncryptionKeys(sseKeys, sseServer)
doCopySession(s, encKeyDB)
} }
} }

View File

@ -127,7 +127,7 @@ func doShareDownloadURL(targetURL string, isRecursive bool, expiry time.Duration
// Channel which will receive objects whose URLs need to be shared // Channel which will receive objects whose URLs need to be shared
objectsCh := make(chan *clientContent) objectsCh := make(chan *clientContent)
content, err := clnt.Stat(isIncomplete, isFetchMeta, "") content, err := clnt.Stat(isIncomplete, isFetchMeta, nil)
if err != nil { if err != nil {
return err.Trace(clnt.GetURL().String()) return err.Trace(clnt.GetURL().String())
} }

View File

@ -37,10 +37,6 @@ var (
Name: "recursive, r", Name: "recursive, r",
Usage: "sql query recursively", Usage: "sql query recursively",
}, },
cli.StringFlag{
Name: "encrypt-key",
Usage: "encrypt/decrypt objects (using server-side encryption)",
},
} }
) )
@ -50,7 +46,7 @@ var sqlCmd = cli.Command{
Usage: "run sql queries on objects", Usage: "run sql queries on objects",
Action: mainSQL, Action: mainSQL,
Before: setGlobalsFromContext, Before: setGlobalsFromContext,
Flags: append(sqlFlags, globalFlags...), Flags: append(append(sqlFlags, ioFlags...), globalFlags...),
CustomHelpTemplate: `NAME: CustomHelpTemplate: `NAME:
{{.HelpName}} - {{.Usage}} {{.HelpName}} - {{.Usage}}
@ -67,7 +63,7 @@ EXAMPLES:
2. Run a query on an object on minio account. 2. Run a query on an object on minio account.
$ {{.HelpName}} --query "select count(s.power) from S3Object" myminio/iot-devices/power-ratio.csv $ {{.HelpName}} --query "select count(s.power) from S3Object" myminio/iot-devices/power-ratio.csv
3. Run a query on an encrypted object with client provided keys. 3. Run a query on an encrypted object with customer provided keys.
$ {{.HelpName}} --encrypt-key "myminio/iot-devices=32byteslongsecretkeymustbegiven1" \ $ {{.HelpName}} --encrypt-key "myminio/iot-devices=32byteslongsecretkeymustbegiven1" \
--query "select count(s.power) from S3Object" myminio/iot-devices/power-ratio-encrypted.csv --query "select count(s.power) from S3Object" myminio/iot-devices/power-ratio-encrypted.csv
`, `,
@ -84,7 +80,7 @@ func sqlSelect(targetURL, expression string, encKeyDB map[string][]prefixSSEPair
return err.Trace(targetURL) return err.Trace(targetURL)
} }
sseKey := getSSEKey(targetURL, encKeyDB[alias]) sseKey := getSSE(targetURL, encKeyDB[alias])
outputer, err := targetClnt.Select(expression, sseKey) outputer, err := targetClnt.Select(expression, sseKey)
if err != nil { if err != nil {
return err.Trace(targetURL, expression) return err.Trace(targetURL, expression)

View File

@ -31,10 +31,6 @@ var (
Name: "recursive, r", Name: "recursive, r",
Usage: "stat all objects recursively", Usage: "stat all objects recursively",
}, },
cli.StringFlag{
Name: "encrypt-key",
Usage: "encrypt/decrypt (using server-side encryption)",
},
} }
) )
@ -44,7 +40,7 @@ var statCmd = cli.Command{
Usage: "show object metadata", Usage: "show object metadata",
Action: mainStat, Action: mainStat,
Before: setGlobalsFromContext, Before: setGlobalsFromContext,
Flags: append(statFlags, globalFlags...), Flags: append(append(statFlags, ioFlags...), globalFlags...),
CustomHelpTemplate: `NAME: CustomHelpTemplate: `NAME:
{{.HelpName}} - {{.Usage}} {{.HelpName}} - {{.Usage}}
@ -54,7 +50,6 @@ USAGE:
FLAGS: FLAGS:
{{range .VisibleFlags}}{{.}} {{range .VisibleFlags}}{{.}}
{{end}} {{end}}
ENVIRONMENT VARIABLES: ENVIRONMENT VARIABLES:
MC_ENCRYPT_KEY: List of comma delimited prefix=secret values MC_ENCRYPT_KEY: List of comma delimited prefix=secret values
@ -68,8 +63,8 @@ EXAMPLES:
3. Stat files recursively on a local filesystem on Microsoft Windows. 3. Stat files recursively on a local filesystem on Microsoft Windows.
$ {{.HelpName}} --recursive C:\Users\Worf\ $ {{.HelpName}} --recursive C:\Users\Worf\
4. Stat files which are encrypted on the server side 4. Stat encrypted files on Amazon S3 cloud storage.
$ {{.HelpName}} --encrypt-key "s3/ferenginar=32byteslongsecretkeymustbegiven1" s3/ferenginar/klingon_opera_aktuh_maylotah.ogg $ {{.HelpName}} --encrypt-key "s3/personal-docs/=32byteslongsecretkeymustbegiven1" s3/personal-docs/2018-account_report.docx
`, `,
} }

View File

@ -111,3 +111,10 @@ var errSourceIsDir = func(URL string) *probe.Error {
msg := "Source `" + URL + "` is a folder." msg := "Source `" + URL + "` is a folder."
return probe.NewError(sourceIsDirErr(errors.New(msg))).Untrace() return probe.NewError(sourceIsDirErr(errors.New(msg))).Untrace()
} }
type conflictSSEErr error
var errConflictSSE = func(sseServer, sseKeys string) *probe.Error {
err := fmt.Errorf("SSE alias '%s' overlaps with SSE-C aliases '%s'", sseServer, sseKeys)
return probe.NewError(conflictSSEErr(err)).Untrace()
}

View File

@ -16,7 +16,9 @@
package cmd package cmd
import "github.com/minio/mc/pkg/probe" import (
"github.com/minio/mc/pkg/probe"
)
// URLs contains source and target urls // URLs contains source and target urls
type URLs struct { type URLs struct {
@ -27,8 +29,6 @@ type URLs struct {
TotalCount int64 TotalCount int64
TotalSize int64 TotalSize int64
encKeyDB map[string][]prefixSSEPair encKeyDB map[string][]prefixSSEPair
SrcSSEKey string
TgtSSEKey string
Error *probe.Error `json:"-"` Error *probe.Error `json:"-"`
} }

View File

@ -28,6 +28,7 @@ import (
"time" "time"
"github.com/minio/minio-go" "github.com/minio/minio-go"
"github.com/minio/minio-go/pkg/encrypt"
"github.com/minio/mc/pkg/console" "github.com/minio/mc/pkg/console"
"github.com/minio/mc/pkg/probe" "github.com/minio/mc/pkg/probe"
@ -187,23 +188,29 @@ func getLookupType(l string) minio.BucketLookupType {
// struct representing object prefix and sse keys association. // struct representing object prefix and sse keys association.
type prefixSSEPair struct { type prefixSSEPair struct {
prefix string Prefix string
sseKey string SSE encrypt.ServerSide
} }
// parse and validate encryption keys entered on command line // parse and validate encryption keys entered on command line
func parseAndValidateEncryptionKeys(sseKeys string) (encMap map[string][]prefixSSEPair, err *probe.Error) { func parseAndValidateEncryptionKeys(sseKeys string, sse string) (encMap map[string][]prefixSSEPair, err *probe.Error) {
if sseKeys == "" {
return
}
encMap, err = parseEncryptionKeys(sseKeys) encMap, err = parseEncryptionKeys(sseKeys)
if err != nil { if err != nil {
return nil, err return nil, err
} }
if sse != "" {
for _, prefix := range strings.Split(sse, ",") {
alias, _ := url2Alias(prefix)
encMap[alias] = append(encMap[alias], prefixSSEPair{
Prefix: prefix,
SSE: encrypt.NewSSE(),
})
}
}
for alias, ps := range encMap { for alias, ps := range encMap {
if hostCfg := mustGetHostConfig(alias); hostCfg == nil { if hostCfg := mustGetHostConfig(alias); hostCfg == nil {
for _, p := range ps { for _, p := range ps {
return nil, probe.NewError(errors.New("sse-c prefix " + p.prefix + " has invalid alias")) return nil, probe.NewError(errors.New("SSE prefix " + p.Prefix + " has invalid alias"))
} }
} }
} }
@ -218,39 +225,47 @@ func parseEncryptionKeys(sseKeys string) (encMap map[string][]prefixSSEPair, err
return return
} }
prefix := "" prefix := ""
ssekey := ""
index := 0 // start index of prefix index := 0 // start index of prefix
vs := 0 // start index of sse-c key vs := 0 // start index of sse-c key
sseKeyLen := 32 sseKeyLen := 32
delim := 1 delim := 1
k := len(sseKeys) k := len(sseKeys)
for index < k { for index < k {
e := strings.Index(sseKeys[index:], "=") i := strings.Index(sseKeys[index:], "=")
if e == -1 { if i == -1 {
return nil, probe.NewError(errors.New("sse-c prefix should be of the form prefix1=key1,... ")) return nil, probe.NewError(errors.New("SSE-C prefix should be of the form prefix1=key1,... "))
} }
prefix = sseKeys[index : index+e] prefix = sseKeys[index : index+i]
alias, _ := url2Alias(prefix) alias, _ := url2Alias(prefix)
vs = e + 1 + index vs = i + 1 + index
if vs+32 > k { if vs+32 > k {
return nil, probe.NewError(errors.New("sse-c key should be 32 bytes long")) return nil, probe.NewError(errors.New("SSE-C key should be 32 bytes long"))
} }
ssekey = sseKeys[vs : vs+sseKeyLen]
if (vs+sseKeyLen < k) && sseKeys[vs+sseKeyLen] != ',' { if (vs+sseKeyLen < k) && sseKeys[vs+sseKeyLen] != ',' {
return nil, probe.NewError(errors.New("sse-c prefix=secret should be delimited by , and secret should be 32 bytes long")) return nil, probe.NewError(errors.New("SSE-C prefix=secret should be delimited by , and secret should be 32 bytes long"))
} }
sseKey := sseKeys[vs : vs+sseKeyLen]
if _, ok := encMap[alias]; !ok { if _, ok := encMap[alias]; !ok {
encMap[alias] = make([]prefixSSEPair, 0) encMap[alias] = make([]prefixSSEPair, 0)
} }
ps := prefixSSEPair{prefix: prefix, sseKey: ssekey} sse, e := encrypt.NewSSEC([]byte(sseKey))
encMap[alias] = append(encMap[alias], ps) if e != nil {
return nil, probe.NewError(e)
}
encMap[alias] = append(encMap[alias], prefixSSEPair{
Prefix: prefix,
SSE: sse,
})
// advance index sseKeyLen + delim bytes for the next key start // advance index sseKeyLen + delim bytes for the next key start
index = vs + sseKeyLen + delim index = vs + sseKeyLen + delim
} }
// sort encryption keys in descending order of prefix length
// Sort encryption keys in descending order of prefix length
for _, encKeys := range encMap { for _, encKeys := range encMap {
sort.Sort(byPrefixLength(encKeys)) sort.Sort(byPrefixLength(encKeys))
} }
// Success.
return encMap, nil return encMap, nil
} }
@ -259,18 +274,18 @@ type byPrefixLength []prefixSSEPair
func (p byPrefixLength) Len() int { return len(p) } func (p byPrefixLength) Len() int { return len(p) }
func (p byPrefixLength) Less(i, j int) bool { func (p byPrefixLength) Less(i, j int) bool {
return len(p[i].prefix) > len(p[j].prefix) return len(p[i].Prefix) > len(p[j].Prefix)
} }
func (p byPrefixLength) Swap(i, j int) { p[i], p[j] = p[j], p[i] } func (p byPrefixLength) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
// get SSE Key if object prefix matches with given resource. // get SSE Key if object prefix matches with given resource.
func getSSEKey(resource string, encKeys []prefixSSEPair) string { func getSSE(resource string, encKeys []prefixSSEPair) encrypt.ServerSide {
for _, k := range encKeys { for _, k := range encKeys {
if strings.HasPrefix(resource, k.prefix) { if strings.HasPrefix(resource, k.Prefix) {
return k.sseKey return k.SSE
} }
} }
return "" return nil
} }
// Return true if target url is a part of a source url such as: // Return true if target url is a part of a source url such as:

View File

@ -19,6 +19,8 @@ package cmd
import ( import (
"reflect" "reflect"
"testing" "testing"
"github.com/minio/minio-go/pkg/encrypt"
) )
func TestParseURLEnv(t *testing.T) { func TestParseURLEnv(t *testing.T) {
@ -82,15 +84,34 @@ func TestParseURLEnv(t *testing.T) {
} }
func TestParseEncryptionKeys(t *testing.T) { func TestParseEncryptionKeys(t *testing.T) {
sseKey1, err := encrypt.NewSSEC([]byte("32byteslongsecretkeymustbegiven2"))
if err != nil {
t.Fatal(err)
}
sseKey2, err := encrypt.NewSSEC([]byte("32byteslongsecretkeymustbegiven1"))
if err != nil {
t.Fatal(err)
}
sseSpaceKey1, err := encrypt.NewSSEC([]byte("32byteslongsecret mustbegiven1"))
if err != nil {
t.Fatal(err)
}
sseCommaKey1, err := encrypt.NewSSEC([]byte("32byteslongsecretkey,ustbegiven1"))
if err != nil {
t.Fatal(err)
}
testCases := []struct { testCases := []struct {
encryptionKey string encryptionKey string
expectedEncMap map[string][]prefixSSEPair expectedEncMap map[string][]prefixSSEPair
success bool success bool
}{ }{
{ {
encryptionKey: "myminio1/test2=32byteslongsecretkeymustbegiven2", encryptionKey: "myminio1/test2=32byteslongsecretkeymustbegiven2",
expectedEncMap: map[string][]prefixSSEPair{"myminio1": []prefixSSEPair{prefixSSEPair{prefix: "myminio1/test2", sseKey: "32byteslongsecretkeymustbegiven2"}}}, expectedEncMap: map[string][]prefixSSEPair{"myminio1": []prefixSSEPair{prefixSSEPair{
success: true, Prefix: "myminio1/test2",
SSE: sseKey1,
}}},
success: true,
}, },
{ {
encryptionKey: "myminio1/test2=32byteslongsecretkeymustbegiven", encryptionKey: "myminio1/test2=32byteslongsecretkeymustbegiven",
@ -98,19 +119,31 @@ func TestParseEncryptionKeys(t *testing.T) {
success: false, success: false,
}, },
{ {
encryptionKey: "myminio1/test2=32byteslongsecretkey,ustbegiven1", encryptionKey: "myminio1/test2=32byteslongsecretkey,ustbegiven1",
expectedEncMap: map[string][]prefixSSEPair{"myminio1": []prefixSSEPair{prefixSSEPair{prefix: "myminio1/test2", sseKey: "32byteslongsecretkey,ustbegiven1"}}}, expectedEncMap: map[string][]prefixSSEPair{"myminio1": []prefixSSEPair{prefixSSEPair{
success: true, Prefix: "myminio1/test2",
SSE: sseCommaKey1,
}}},
success: true,
}, },
{ {
encryptionKey: "myminio1/test2=32byteslongsecret mustbegiven1", encryptionKey: "myminio1/test2=32byteslongsecret mustbegiven1",
expectedEncMap: map[string][]prefixSSEPair{"myminio1": []prefixSSEPair{prefixSSEPair{prefix: "myminio1/test2", sseKey: "32byteslongsecret mustbegiven1"}}}, expectedEncMap: map[string][]prefixSSEPair{"myminio1": []prefixSSEPair{prefixSSEPair{
success: true, Prefix: "myminio1/test2",
SSE: sseSpaceKey1,
}}},
success: true,
}, },
{ {
encryptionKey: "myminio1/test2=32byteslongsecretkeymustbegiven2,myminio1/test1/a=32byteslongsecretkeymustbegiven1", encryptionKey: "myminio1/test2=32byteslongsecretkeymustbegiven2,myminio1/test1/a=32byteslongsecretkeymustbegiven1",
expectedEncMap: map[string][]prefixSSEPair{"myminio1": []prefixSSEPair{prefixSSEPair{prefix: "myminio1/test1/a", sseKey: "32byteslongsecretkeymustbegiven1"}, prefixSSEPair{prefix: "myminio1/test2", sseKey: "32byteslongsecretkeymustbegiven2"}}}, expectedEncMap: map[string][]prefixSSEPair{"myminio1": []prefixSSEPair{prefixSSEPair{
success: true, Prefix: "myminio1/test1/a",
SSE: sseKey2,
}, prefixSSEPair{
Prefix: "myminio1/test2",
SSE: sseKey1,
}}},
success: true,
}, },
} }
for i, testCase := range testCases { for i, testCase := range testCases {

View File

@ -266,9 +266,9 @@ USAGE:
mc ls [FLAGS] TARGET [TARGET ...] mc ls [FLAGS] TARGET [TARGET ...]
FLAGS: FLAGS:
--help, -h Show help. --recursive, -r list recursively
--recursive, -r List recursively. --incomplete, -I list incomplete uploads
--incomplete, -I List incomplete uploads. --help, -h show help
``` ```
*Example: List all buckets on https://play.minio.io:9000.* *Example: List all buckets on https://play.minio.io:9000.*
@ -292,8 +292,9 @@ USAGE:
mc mb [FLAGS] TARGET [TARGET...] mc mb [FLAGS] TARGET [TARGET...]
FLAGS: FLAGS:
--help, -h Show help. --region value specify bucket region; defaults to 'us-east-1' (default: "us-east-1")
--region "us-east-1" Specify bucket region. Defaults to us-east-1. --ignore-existing, -p ignore if bucket/directory already exists
--help, -h show help
``` ```
@ -322,11 +323,11 @@ USAGE:
mc cat [FLAGS] SOURCE [SOURCE...] mc cat [FLAGS] SOURCE [SOURCE...]
FLAGS: FLAGS:
--help, -h Show help. --encrypt-key value encrypt/decrypt objects (using server-side encryption with customer provided keys)
--encrypt-key value Decrypt object (using server-side encryption) --help, -h show help
ENVIRONMENT VARIABLES: ENVIRONMENT VARIABLES:
MC_ENCRYPT_KEY: List of comma delimited prefix=secret values MC_ENCRYPT_KEY: list of comma delimited prefix=secret values
``` ```
*Example: Display the contents of a text file `myobject.txt`* *Example: Display the contents of a text file `myobject.txt`*
@ -352,9 +353,10 @@ USAGE:
mc head [FLAGS] SOURCE [SOURCE...] mc head [FLAGS] SOURCE [SOURCE...]
FLAGS: FLAGS:
--help, -h Show help. -n value, --lines value print the first 'n' lines (default: 10)
--encrypt-key value decrypt object (using server-side encryption) --encrypt-key value encrypt/decrypt objects (using server-side encryption with customer provided keys)
-n value, --lines value print the first 'n' lines (default: 10) --help, -h show help
ENVIRONMENT VARIABLES: ENVIRONMENT VARIABLES:
MC_ENCRYPT_KEY: List of comma delimited prefix=secret values MC_ENCRYPT_KEY: List of comma delimited prefix=secret values
``` ```
@ -382,17 +384,19 @@ USAGE:
mc pipe [FLAGS] [TARGET] mc pipe [FLAGS] [TARGET]
FLAGS: FLAGS:
--help, -h Help of pipe. --encrypt value encrypt objects (using server-side encryption with server managed keys)
--encrypt-key value Encrypt object (using server-side encryption) --encrypt-key value encrypt/decrypt objects (using server-side encryption with customer provided keys)
--help, -h show help
ENVIRONMENT VARIABLES: ENVIRONMENT VARIABLES:
MC_ENCRYPT_KEY: List of comma delimited prefix=secret values MC_ENCRYPT: list of comma delimited prefix values
MC_ENCRYPT_KEY: list of comma delimited prefix=secret values
``` ```
*Example: Stream MySQL database dump to Amazon S3 directly.* *Example: Stream MySQL database dump to Amazon S3 directly.*
```sh ```sh
mysqldump -u root -p ******* accountsdb | mc pipe s3/ferenginar/backups/accountsdb-oct-9-2015.sql mysqldump -u root -p ******* accountsdb | mc pipe s3/sql-backups/backups/accountsdb-oct-9-2015.sql
``` ```
<a name="cp"></a> <a name="cp"></a>
@ -404,13 +408,17 @@ USAGE:
mc cp [FLAGS] SOURCE [SOURCE...] TARGET mc cp [FLAGS] SOURCE [SOURCE...] TARGET
FLAGS: FLAGS:
--recursive, -r Copy recursively. --recursive, -r copy recursively
--storage-class value, -sc value Set storage class for object. --older-than value copy object(s) older than N days (default: 0)
--help, -h Show help. --newer-than value copy object(s) newer than N days (default: 0)
--encrypt-key value Encrypt/Decrypt objects (using server-side encryption) --storage-class value, --sc value set storage class for new object(s) on target
--encrypt value encrypt/decrypt objects (using server-side encryption with server managed keys)
--encrypt-key value encrypt/decrypt objects (using server-side encryption with customer provided keys)
--help, -h show help
ENVIRONMENT VARIABLES: ENVIRONMENT VARIABLES:
MC_ENCRYPT_KEY: List of comma delimited prefix=secret values MC_ENCRYPT: list of comma delimited prefixes
MC_ENCRYPT_KEY: list of comma delimited prefix=secret values
``` ```
*Example: Copy a text file to an object storage.* *Example: Copy a text file to an object storage.*
@ -451,20 +459,19 @@ USAGE:
mc rm [FLAGS] TARGET [TARGET ...] mc rm [FLAGS] TARGET [TARGET ...]
FLAGS: FLAGS:
--help, -h Show help. --recursive, -r remove recursively
--recursive, -r Remove recursively. --force allow a recursive remove operation
--force Force a dangerous remove operation. --dangerous allow site-wide removal of buckets and objects
--dangerous Allow site-wide removal of buckets and objects. --incomplete, -I remove incomplete uploads
--incomplete, -I Remove an incomplete upload(s). --fake perform a fake remove operation
--fake Perform a fake remove operation. --stdin read object names from STDIN
--stdin Read object list from STDIN. --older-than value remove objects older than N days (default: 0)
--older-than value Remove objects older than N days. (default: 0) --newer-than value remove objects newer than N days (default: 0)
--newer-than value Remove objects newer than N days. (default: 0) --encrypt-key value encrypt/decrypt objects (using server-side encryption with customer provided keys)
--encrypt-key value Encrypt/Decrypt objects (using server-side encryption) --help, -h show help
ENVIRONMENT VARIABLES: ENVIRONMENT VARIABLES:
MC_ENCRYPT_KEY: List of comma delimited prefix=secret values MC_ENCRYPT_KEY: List of comma delimited prefix=secret values
``` ```
*Example: Remove a single object.* *Example: Remove a single object.*
@ -515,12 +522,12 @@ USAGE:
mc share [FLAGS] COMMAND mc share [FLAGS] COMMAND
FLAGS: FLAGS:
--help, -h Show help. --help, -h show help
COMMANDS: COMMANDS:
download Generate URLs for download access. download generate URLs for download access
upload Generate curl command to upload objects without requiring access/secret keys. upload generate curl command to upload objects without requiring access/secret keys
list List previously shared objects and folders. list list previously shared objects and folders
``` ```
### Sub-command `share download` - Share Download ### Sub-command `share download` - Share Download
@ -531,9 +538,9 @@ USAGE:
mc share download [FLAGS] TARGET [TARGET...] mc share download [FLAGS] TARGET [TARGET...]
FLAGS: FLAGS:
--help, -h Show help. --recursive, -r share all objects recursively
--recursive, -r Share all objects recursively. --expire value, -E value set expiry in NN[h|m|s] (default: "168h")
--expire, -E "168h" Set expiry in NN[h|m|s]. --help, -h show help
``` ```
*Example: Grant temporary access to an object with 4 hours expiry limit.* *Example: Grant temporary access to an object with 4 hours expiry limit.*
@ -555,9 +562,10 @@ USAGE:
mc share upload [FLAGS] TARGET [TARGET...] mc share upload [FLAGS] TARGET [TARGET...]
FLAGS: FLAGS:
--help, -h Show help. --recursive, -r recursively upload any object matching the prefix
--recursive, -r Recursively upload any object matching the prefix. --expire value, -E value set expiry in NN[h|m|s] (default: "168h")
--expire, -E "168h" Set expiry in NN[h|m|s]. --content-type value, -T value specify a content-type to allow
--help, -h show help
``` ```
*Example: Generate a `curl` command to enable upload access to `play/mybucket/myotherobject.txt`. User replaces `<FILE>` with the actual filename to upload* *Example: Generate a `curl` command to enable upload access to `play/mybucket/myotherobject.txt`. User replaces `<FILE>` with the actual filename to upload*
@ -590,16 +598,23 @@ USAGE:
mc mirror [FLAGS] SOURCE TARGET mc mirror [FLAGS] SOURCE TARGET
FLAGS: FLAGS:
--help, -h Show help. --overwrite overwrite object(s) on target
--force Force overwrite of an existing target(s). --fake perform a fake mirror operation
--fake Perform a fake mirror operation. --watch, -w watch and synchronize changes
--watch, -w Watch and mirror for changes. --remove remove extraneous object(s) on target
--remove Remove extraneous file(s) on target. --region value specify region when creating new bucket(s) on target (default: "us-east-1")
--storage-class value, --sc value Set storage class for object. -a preserve bucket policy rules on target bucket(s)
--encrypt-key value Encrypt/Decrypt objects (using server-side encryption) --exclude value exclude object(s) that match specified object name pattern
--older-than value filter object(s) older than N days (default: 0)
--newer-than value filter object(s) newer than N days (default: 0)
--storage-class value, --sc value specify storage class for new object(s) on target
--encrypt value encrypt/decrypt objects (using server-side encryption with server managed keys)
--encrypt-key value encrypt/decrypt objects (using server-side encryption with customer provided keys)
--help, -h show help
ENVIRONMENT VARIABLES: ENVIRONMENT VARIABLES:
MC_ENCRYPT_KEY: List of comma delimited prefix=secret values MC_ENCRYPT: list of comma delimited prefixes
MC_ENCRYPT_KEY: list of comma delimited prefix=secret values
``` ```
*Example: Mirror a local directory to 'mybucket' on https://play.minio.io:9000.* *Example: Mirror a local directory to 'mybucket' on https://play.minio.io:9000.*
@ -625,11 +640,21 @@ USAGE:
mc find PATH [FLAGS] mc find PATH [FLAGS]
FLAGS: FLAGS:
--help, -h Show help. --exec value spawn an external process for each matching object (see FORMAT)
--exec value Spawn an external process for each matching object (see FORMAT) --ignore value exclude objects matching the wildcard pattern
--name value Find object names matching wildcard pattern. --name value find object names matching wildcard pattern
--newer value match all objects newer than specified time in units (see UNITS)
--older value match all objects older than specified time in units (see UNITS)
--path value match directory names matching wildcard pattern
--print value print in custom format to STDOUT (see FORMAT)
--regex value match directory and object name with PCRE regex pattern
--larger value match all objects larger than specified size in units (see UNITS)
--smaller value match all objects smaller than specified size in units (see UNITS)
--maxdepth value limit directory navigation to specified depth (default: 0)
--watch monitor a specified path for newly created object(s)
... ...
... ...
--help, -h show help
``` ```
*Example: Find all jpeg images from s3 bucket and copy to minio "play/bucket" bucket continuously.* *Example: Find all jpeg images from s3 bucket and copy to minio "play/bucket" bucket continuously.*
@ -648,7 +673,7 @@ USAGE:
mc diff [FLAGS] FIRST SECOND mc diff [FLAGS] FIRST SECOND
FLAGS: FLAGS:
--help, -h Show help. --help, -h show help
``` ```
*Example: Compare a local directory and a remote object storage.* *Example: Compare a local directory and a remote object storage.*
@ -668,11 +693,11 @@ USAGE:
mc watch [FLAGS] PATH mc watch [FLAGS] PATH
FLAGS: FLAGS:
--events value Filter specific types of events. Defaults to all events by default. (default: "put,delete,get") --events value filter specific types of events, defaults to all events (default: "put,delete,get")
--prefix value Filter events for a prefix. --prefix value filter events for a prefix
--suffix value Filter events for a suffix. --suffix value filter events for a suffix
--recursive Recursively watch for events. --recursive recursively watch for events
--help, -h Show help. --help, -h show help
``` ```
*Example: Watch for all events on object storage* *Example: Watch for all events on object storage*
@ -703,12 +728,12 @@ USAGE:
mc event COMMAND [COMMAND FLAGS | -h] [ARGUMENTS...] mc event COMMAND [COMMAND FLAGS | -h] [ARGUMENTS...]
COMMANDS: COMMANDS:
add Add a new bucket notification. add add a new bucket notification
remove Remove a bucket notification. With '--force' can remove all bucket notifications. remove remove a bucket notification. With '--force' can remove all bucket notifications
list List bucket notifications. list list bucket notifications
FLAGS: FLAGS:
--help, -h Show help. --help, -h show help
``` ```
*Example: List all configured bucket notifications* *Example: List all configured bucket notifications*
@ -752,7 +777,7 @@ PERMISSION:
Allowed policies are: [none, download, upload, public]. Allowed policies are: [none, download, upload, public].
FLAGS: FLAGS:
--help, -h Show help. --help, -h show help
``` ```
*Example: Show current anonymous bucket policy* *Example: Show current anonymous bucket policy*
@ -795,12 +820,12 @@ USAGE:
mc session COMMAND [COMMAND FLAGS | -h] [ARGUMENTS...] mc session COMMAND [COMMAND FLAGS | -h] [ARGUMENTS...]
COMMANDS: COMMANDS:
list List all previously saved sessions. list list all previously saved sessions
clear Clear a previously saved session. clear clear a previously saved session
resume Resume a previously saved session. resume resume a previously saved session
FLAGS: FLAGS:
--help, -h Show help. --help, -h show help
``` ```
@ -835,12 +860,12 @@ USAGE:
mc config host COMMAND [COMMAND FLAGS | -h] [ARGUMENTS...] mc config host COMMAND [COMMAND FLAGS | -h] [ARGUMENTS...]
COMMANDS: COMMANDS:
add, a Add a new host to configuration file. add, a add a new host to configuration file
remove, rm Remove a host from configuration file. remove, rm remove a host from configuration file
list, ls Lists hosts in configuration file. list, ls lists hosts in configuration file
FLAGS: FLAGS:
--help, -h Show help. --help, -h show help
``` ```
*Example: Manage Config File* *Example: Manage Config File*
@ -862,9 +887,9 @@ USAGE:
mc update [FLAGS] mc update [FLAGS]
FLAGS: FLAGS:
--quiet, -q Suppress chatty console output. --quiet, -q suppress chatty console output
--json Enable JSON formatted output. --json enable JSON formatted output
--help, -h Show help. --help, -h show help
``` ```
*Example: Check for an update.* *Example: Check for an update.*
@ -883,9 +908,9 @@ USAGE:
mc version [FLAGS] mc version [FLAGS]
FLAGS: FLAGS:
--quiet, -q Suppress chatty console output. --quiet, -q suppress chatty console output
--json Enable JSON formatted output. --json enable JSON formatted output
--help, -h Show help. --help, -h show help
``` ```
*Example: Print version of mc.* *Example: Print version of mc.*
@ -905,12 +930,12 @@ USAGE:
mc stat [FLAGS] TARGET mc stat [FLAGS] TARGET
FLAGS: FLAGS:
--help, -h Show help. --recursive, -r stat all objects recursively
--recursive, -r Stat recursively. --encrypt-key value encrypt/decrypt objects (using server-side encryption with customer provided keys)
--encrypt-key value Encrypt/Decrypt (using server-side encryption) --help, -h show help
ENVIRONMENT VARIABLES: ENVIRONMENT VARIABLES:
MC_ENCRYPT_KEY: List of comma delimited prefix=secret values MC_ENCRYPT_KEY: List of comma delimited prefix=secret values
``` ```
*Example: Display information on a bucket named "mybucket" on https://play.minio.io:9000.* *Example: Display information on a bucket named "mybucket" on https://play.minio.io:9000.*
@ -942,7 +967,6 @@ Metadata :
*Example: Display information on objects contained in the bucket named "mybucket" on https://play.minio.io:9000.* *Example: Display information on objects contained in the bucket named "mybucket" on https://play.minio.io:9000.*
```sh ```sh
mc stat -r play/mybucket mc stat -r play/mybucket
Name : mybucket/META/textfile Name : mybucket/META/textfile

View File

@ -322,7 +322,7 @@ FLAGS:
*示例: 将MySQL数据库dump文件输出到Amazon S3。* *示例: 将MySQL数据库dump文件输出到Amazon S3。*
```sh ```sh
mysqldump -u root -p ******* accountsdb | mc pipe s3/ferenginar/backups/accountsdb-oct-9-2015.sql mysqldump -u root -p ******* accountsdb | mc pipe s3/sql-backups/backups/accountsdb-oct-9-2015.sql
``` ```
<a name="cp"></a> <a name="cp"></a>