From d5e5ea37fe54dc9a811545cdb714348678b87763 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Onur=20S=C3=B6nmez?= Date: Thu, 12 Aug 2021 10:36:01 +0300 Subject: [PATCH] command/mv: fix option usage (#338) * Some of the command options are not working in move command since we forget passing cli.Context flags to Copy struct. Use generic NewCopy constructor for both copy/move. --- command/cp.go | 82 +++++++++++++++++++++++++++----------------------- command/mv.go | 32 +++++--------------- e2e/mv_test.go | 65 +++++++++++++++++++++++++++++++++++++++ 3 files changed, 117 insertions(+), 62 deletions(-) diff --git a/command/cp.go b/command/cp.go index 914c34228..8d5ab0d41 100644 --- a/command/cp.go +++ b/command/cp.go @@ -72,28 +72,28 @@ Examples: > s5cmd {{.HelpName}} -n -s -u s3://bucket/source-prefix/* s3://bucket/target-prefix/ 12. Perform KMS Server Side Encryption of the object(s) at the destination - > s5cmd {{.HelpName}} --sse aws:kms s3://bucket/object s3://target-bucket/prefix/object + > s5cmd {{.HelpName}} --sse aws:kms s3://bucket/object s3://target-bucket/prefix/object 13. Perform KMS-SSE of the object(s) at the destination using customer managed Customer Master Key (CMK) key id - > s5cmd {{.HelpName}} --sse aws:kms --sse-kms-key-id s3://bucket/object s3://target-bucket/prefix/object + > s5cmd {{.HelpName}} --sse aws:kms --sse-kms-key-id s3://bucket/object s3://target-bucket/prefix/object 14. Force transfer of GLACIER objects with a prefix whether they are restored or not - > s5cmd {{.HelpName}} --force-glacier-transfer s3://bucket/prefix/* target-directory/ + > s5cmd {{.HelpName}} --force-glacier-transfer s3://bucket/prefix/* target-directory/ 15. Upload a file to S3 bucket with public read s3 acl - > s5cmd {{.HelpName}} --acl "public-read" myfile.gz s3://bucket/ + > s5cmd {{.HelpName}} --acl "public-read" myfile.gz s3://bucket/ 16. Upload a file to S3 bucket with expires header - > s5cmd {{.HelpName}} --expires "2024-10-01T20:30:00Z" myfile.gz s3://bucket/ + > s5cmd {{.HelpName}} --expires "2024-10-01T20:30:00Z" myfile.gz s3://bucket/ 17. Upload a file to S3 bucket with cache-control header - > s5cmd {{.HelpName}} --cache-control "public, max-age=345600" myfile.gz s3://bucket/ + > s5cmd {{.HelpName}} --cache-control "public, max-age=345600" myfile.gz s3://bucket/ - 18. Copy all files to S3 bucket but exclude the ones with txt and gz extension - > s5cmd cp --exclude "*.txt" --exclude "*.gz" dir/ s3://bucket + 18. Copy all files to S3 bucket but exclude the ones with txt and gz extension + > s5cmd {{.HelpName}} --exclude "*.txt" --exclude "*.gz" dir/ s3://bucket 19. Copy all files from S3 bucket to another S3 bucket but exclude the ones starts with log - > s5cmd cp --exclude "log*" s3://bucket/* s3://destbucket + > s5cmd {{.HelpName}} --exclude "log*" s3://bucket/* s3://destbucket ` func NewCopyCommandFlags() []cli.Flag { @@ -198,35 +198,8 @@ func NewCopyCommand() *cli.Command { Action: func(c *cli.Context) (err error) { defer stat.Collect(c.Command.FullName(), &err)() - return Copy{ - src: c.Args().Get(0), - dst: c.Args().Get(1), - op: c.Command.Name, - fullCommand: givenCommand(c), - deleteSource: false, // don't delete source - // flags - noClobber: c.Bool("no-clobber"), - ifSizeDiffer: c.Bool("if-size-differ"), - ifSourceNewer: c.Bool("if-source-newer"), - flatten: c.Bool("flatten"), - followSymlinks: !c.Bool("no-follow-symlinks"), - storageClass: storage.StorageClass(c.String("storage-class")), - concurrency: c.Int("concurrency"), - partSize: c.Int64("part-size") * megabytes, - encryptionMethod: c.String("sse"), - encryptionKeyID: c.String("sse-kms-key-id"), - acl: c.String("acl"), - forceGlacierTransfer: c.Bool("force-glacier-transfer"), - exclude: c.StringSlice("exclude"), - raw: c.Bool("raw"), - cacheControl: c.String("cache-control"), - expires: c.String("expires"), - // region settings - srcRegion: c.String("source-region"), - dstRegion: c.String("destination-region"), - - storageOpts: NewStorageOpts(c), - }.Run(c.Context) + // don't delete source + return NewCopy(c, false).Run(c.Context) }, } } @@ -266,6 +239,39 @@ type Copy struct { storageOpts storage.Options } +// NewCopy creates Copy from cli.Context. +func NewCopy(c *cli.Context, deleteSource bool) Copy { + return Copy{ + src: c.Args().Get(0), + dst: c.Args().Get(1), + op: c.Command.Name, + fullCommand: givenCommand(c), + deleteSource: deleteSource, + // flags + noClobber: c.Bool("no-clobber"), + ifSizeDiffer: c.Bool("if-size-differ"), + ifSourceNewer: c.Bool("if-source-newer"), + flatten: c.Bool("flatten"), + followSymlinks: !c.Bool("no-follow-symlinks"), + storageClass: storage.StorageClass(c.String("storage-class")), + concurrency: c.Int("concurrency"), + partSize: c.Int64("part-size") * megabytes, + encryptionMethod: c.String("sse"), + encryptionKeyID: c.String("sse-kms-key-id"), + acl: c.String("acl"), + forceGlacierTransfer: c.Bool("force-glacier-transfer"), + exclude: c.StringSlice("exclude"), + raw: c.Bool("raw"), + cacheControl: c.String("cache-control"), + expires: c.String("expires"), + // region settings + srcRegion: c.String("source-region"), + dstRegion: c.String("destination-region"), + + storageOpts: NewStorageOpts(c), + } +} + const fdlimitWarning = ` WARNING: s5cmd is hitting the max open file limit allowed by your OS. Either increase the open file limit or try to decrease the number of workers with diff --git a/command/mv.go b/command/mv.go index 2184a549b..fbc4e359d 100644 --- a/command/mv.go +++ b/command/mv.go @@ -2,7 +2,6 @@ package command import ( "github.com/peak/s5cmd/log/stat" - "github.com/peak/s5cmd/storage" "github.com/urfave/cli/v2" ) @@ -31,6 +30,12 @@ Examples: 5. Move a directory to S3 bucket recursively > s5cmd {{.HelpName}} dir/ s3://bucket/ + + 6. Move all files to S3 bucket but exclude the ones with txt and gz extension + > s5cmd {{.HelpName}} --exclude "*.txt" --exclude "*.gz" dir/ s3://bucket + + 7. Move all files from S3 bucket to another S3 bucket but exclude the ones starts with log + > s5cmd {{.HelpName}} --exclude "log*" s3://bucket/* s3://destbucket ` func NewMoveCommand() *cli.Command { @@ -46,29 +51,8 @@ func NewMoveCommand() *cli.Command { Action: func(c *cli.Context) (err error) { defer stat.Collect(c.Command.FullName(), &err)() - copyCommand := Copy{ - src: c.Args().Get(0), - dst: c.Args().Get(1), - op: c.Command.Name, - fullCommand: givenCommand(c), - deleteSource: true, // delete source - // flags - noClobber: c.Bool("no-clobber"), - ifSizeDiffer: c.Bool("if-size-differ"), - ifSourceNewer: c.Bool("if-source-newer"), - flatten: c.Bool("flatten"), - followSymlinks: !c.Bool("no-follow-symlinks"), - storageClass: storage.StorageClass(c.String("storage-class")), - encryptionMethod: c.String("sse"), - encryptionKeyID: c.String("sse-kms-key-id"), - acl: c.String("acl"), - cacheControl: c.String("cache-control"), - expires: c.String("expires"), - - storageOpts: NewStorageOpts(c), - } - - return copyCommand.Run(c.Context) + // delete source + return NewCopy(c, true).Run(c.Context) }, } } diff --git a/e2e/mv_test.go b/e2e/mv_test.go index 100703268..96589e372 100644 --- a/e2e/mv_test.go +++ b/e2e/mv_test.go @@ -3,6 +3,7 @@ package e2e import ( "fmt" "path/filepath" + "runtime" "testing" "gotest.tools/v3/assert" @@ -360,3 +361,67 @@ func TestMoveMultipleS3ObjectsToS3DryRun(t *testing.T) { assertError(t, err, errS3NoSuchKey) } } + +// mv --raw file s3://bucket/ +func TestMoveLocalObjectToS3WithRawFlag(t *testing.T) { + if runtime.GOOS == "windows" { + t.Skip() + } + + t.Parallel() + + bucket := s3BucketFromTestName(t) + + s3client, s5cmd, cleanup := setup(t) + defer cleanup() + + createBucket(t, s3client, bucket) + + objectsToMove := []fs.PathOp{ + fs.WithFile("a*.txt", "content"), + } + + otherObjects := []fs.PathOp{ + fs.WithDir( + "a*b", + fs.WithFile("file.txt", "content"), + ), + + fs.WithFile("abc.txt", "content"), + } + + folderLayout := append(objectsToMove, otherObjects...) + + workdir := fs.NewDir(t, t.Name(), folderLayout...) + defer workdir.Remove() + + srcpath := filepath.ToSlash(workdir.Join("a*.txt")) + dstpath := fmt.Sprintf("s3://%v", bucket) + + cmd := s5cmd("mv", "--raw", srcpath, dstpath) + result := icmd.RunCmd(cmd) + + result.Assert(t, icmd.Success) + + assertLines(t, result.Stdout(), map[int]compareFunc{ + 0: equals("mv %v %v/a*.txt", srcpath, dstpath), + }, sortInput(true)) + + expectedObjects := []string{"a*.txt"} + for _, obj := range expectedObjects { + err := ensureS3Object(s3client, bucket, obj, "content") + if err != nil { + t.Fatalf("Object %s is not in S3\n", obj) + } + } + + nonExpectedObjects := []string{"a*b/file.txt", "abc.txt"} + for _, obj := range nonExpectedObjects { + err := ensureS3Object(s3client, bucket, obj, "content") + assertError(t, err, errS3NoSuchKey) + } + + // assert local filesystem + expected := fs.Expected(t, otherObjects...) + assert.Assert(t, fs.Equal(workdir.Path(), expected)) +}