diff --git a/ChangeLog.md b/ChangeLog.md index 27ac630c..7d7464fe 100644 --- a/ChangeLog.md +++ b/ChangeLog.md @@ -1,3 +1,7 @@ +# v2.4.19 +BUG FIXES +- use single s3:CopyObject call instead s3:CreateMultipartUpload+s3:UploadCopyPart+s3:CompleteMultipartUpload for files with size less 5Gb + # v2.4.18 BUG FIXES - remove HeadObject request to calculate source key size in CopyObject, to allow cross region S3 disks backup, fix [813](https://github.com/Altinity/clickhouse-backup/issues/813) diff --git a/pkg/storage/s3.go b/pkg/storage/s3.go index 7c7b678b..4f476a17 100644 --- a/pkg/storage/s3.go +++ b/pkg/storage/s3.go @@ -449,31 +449,8 @@ func (s *S3) remotePager(ctx context.Context, s3Path string, recursive bool, pro func (s *S3) CopyObject(ctx context.Context, srcSize int64, srcBucket, srcKey, dstKey string) (int64, error) { dstKey = path.Join(s.Config.ObjectDiskPath, dstKey) s.Log.Debugf("S3->CopyObject %s/%s -> %s/%s", srcBucket, srcKey, s.Config.Bucket, dstKey) - if strings.Contains(s.Config.Endpoint, "storage.googleapis.com") { - params := &s3.CopyObjectInput{ - Bucket: aws.String(s.Config.Bucket), - Key: aws.String(dstKey), - CopySource: aws.String(path.Join(srcBucket, srcKey)), - StorageClass: s3types.StorageClass(strings.ToUpper(s.Config.StorageClass)), - } - s.enrichCopyObjectParams(params) - _, err := s.client.CopyObject(ctx, params) - if err != nil { - return 0, err - } - dstHeadParams := &s3.HeadObjectInput{ - Bucket: aws.String(s.Config.Bucket), - Key: aws.String(dstKey), - } - s.enrichHeadParams(dstHeadParams) - dstObjResp, err := s.client.HeadObject(ctx, dstHeadParams) - if err != nil { - return 0, err - } - return *dstObjResp.ContentLength, nil - } // just copy object without multipart - if srcSize == 0 { + if srcSize < 5*1024*1024*1024 || strings.Contains(s.Config.Endpoint, "storage.googleapis.com") { params := &s3.CopyObjectInput{ Bucket: aws.String(s.Config.Bucket), Key: aws.String(dstKey),