diff --git a/ChangeLog.md b/ChangeLog.md index 24693a79..6f03945c 100644 --- a/ChangeLog.md +++ b/ChangeLog.md @@ -1,6 +1,7 @@ # v2.4.10 IMPROVEMENTS - update go modules to latest versions +- add `S3_REQUEST_PAYER` config parameter, look https://docs.aws.amazon.com/AmazonS3/latest/userguide/RequesterPaysBuckets.html for details, fix [795](https://github.com/Altinity/clickhouse-backup/issues/795) # v2.4.9 BUG FIXES diff --git a/ReadMe.md b/ReadMe.md index 0e4cb99d..bf4d0efa 100644 --- a/ReadMe.md +++ b/ReadMe.md @@ -504,6 +504,8 @@ s3: object_labels: {} # S3_CUSTOM_STORAGE_CLASS_MAP, allow setup storage class depending on the backup name regexp pattern, format nameRegexp > className custom_storage_class_map: {} + # S3_REQUEST_PAYER, define who will pay to request, look https://docs.aws.amazon.com/AmazonS3/latest/userguide/RequesterPaysBuckets.html for details, possible values requester, if empty then bucket owner + request_payer: "" debug: false # S3_DEBUG gcs: credentials_file: "" # GCS_CREDENTIALS_FILE diff --git a/pkg/config/config.go b/pkg/config/config.go index 2d6a2bc8..2f906b1f 100644 --- a/pkg/config/config.go +++ b/pkg/config/config.go @@ -135,6 +135,7 @@ type S3Config struct { MaxPartsCount int64 `yaml:"max_parts_count" envconfig:"S3_MAX_PARTS_COUNT"` AllowMultipartDownload bool `yaml:"allow_multipart_download" envconfig:"S3_ALLOW_MULTIPART_DOWNLOAD"` ObjectLabels map[string]string `yaml:"object_labels" envconfig:"S3_OBJECT_LABELS"` + RequestPayer string `yaml:"request_payer" envconfig:"S3_REQUEST_PAYER"` Debug bool `yaml:"debug" envconfig:"S3_DEBUG"` } diff --git a/pkg/storage/s3.go b/pkg/storage/s3.go index ab8bfcda..75d11d14 100644 --- a/pkg/storage/s3.go +++ b/pkg/storage/s3.go @@ -203,15 +203,7 @@ func (s *S3) GetFileReader(ctx context.Context, key string) (io.ReadCloser, erro Bucket: aws.String(s.Config.Bucket), Key: aws.String(path.Join(s.Config.Path, key)), } - if s.Config.SSECustomerAlgorithm != "" { - params.SSECustomerAlgorithm = aws.String(s.Config.SSECustomerAlgorithm) - } - if s.Config.SSECustomerKey != "" { - params.SSECustomerKey = aws.String(s.Config.SSECustomerKey) - } - if s.Config.SSECustomerKeyMD5 != "" { - params.SSECustomerKeyMD5 = aws.String(s.Config.SSECustomerKeyMD5) - } + s.enrichGetObjectParams(params) resp, err := s.client.GetObject(ctx, params) if err != nil { var opError *smithy.OperationError @@ -241,6 +233,21 @@ func (s *S3) GetFileReader(ctx context.Context, key string) (io.ReadCloser, erro return resp.Body, nil } +func (s *S3) enrichGetObjectParams(params *s3.GetObjectInput) { + if s.Config.SSECustomerAlgorithm != "" { + params.SSECustomerAlgorithm = aws.String(s.Config.SSECustomerAlgorithm) + } + if s.Config.SSECustomerKey != "" { + params.SSECustomerKey = aws.String(s.Config.SSECustomerKey) + } + if s.Config.SSECustomerKeyMD5 != "" { + params.SSECustomerKeyMD5 = aws.String(s.Config.SSECustomerKeyMD5) + } + if s.Config.RequestPayer != "" { + params.RequestPayer = s3types.RequestPayer(s.Config.RequestPayer) + } +} + func (s *S3) GetFileReaderWithLocalPath(ctx context.Context, key, localPath string) (io.ReadCloser, error) { /* unfortunately, multipart download require allocate additional disk space and don't allow us to decompress data directly from stream */ @@ -311,6 +318,9 @@ func (s *S3) deleteKey(ctx context.Context, key string) error { Bucket: aws.String(s.Config.Bucket), Key: aws.String(key), } + if s.Config.RequestPayer != "" { + params.RequestPayer = s3types.RequestPayer(s.Config.RequestPayer) + } if s.versioning { objVersion, err := s.getObjectVersion(ctx, key) if err != nil { @@ -349,6 +359,9 @@ func (s *S3) getObjectVersion(ctx context.Context, key string) (*string, error) Bucket: aws.String(s.Config.Bucket), Key: aws.String(key), } + if s.Config.RequestPayer != "" { + params.RequestPayer = s3types.RequestPayer(s.Config.RequestPayer) + } object, err := s.client.HeadObject(ctx, params) if err != nil { return nil, err @@ -361,7 +374,7 @@ func (s *S3) StatFile(ctx context.Context, key string) (RemoteFile, error) { Bucket: aws.String(s.Config.Bucket), Key: aws.String(path.Join(s.Config.Path, key)), } - s.enrichHeadParamsWithSSE(params) + s.enrichHeadParams(params) head, err := s.client.HeadObject(ctx, params) if err != nil { var opError *smithy.OperationError @@ -440,42 +453,14 @@ func (s *S3) remotePager(ctx context.Context, s3Path string, recursive bool, pro func (s *S3) CopyObject(ctx context.Context, srcBucket, srcKey, dstKey string) (int64, error) { dstKey = path.Join(s.Config.ObjectDiskPath, dstKey) if strings.Contains(s.Config.Endpoint, "storage.googleapis.com") { - params := s3.CopyObjectInput{ + params := &s3.CopyObjectInput{ Bucket: aws.String(s.Config.Bucket), Key: aws.String(dstKey), CopySource: aws.String(path.Join(srcBucket, srcKey)), StorageClass: s3types.StorageClass(strings.ToUpper(s.Config.StorageClass)), } - // https://github.com/Altinity/clickhouse-backup/issues/588 - if len(s.Config.ObjectLabels) > 0 { - tags := "" - for k, v := range s.Config.ObjectLabels { - if tags != "" { - tags += "&" - } - tags += k + "=" + v - } - params.Tagging = aws.String(tags) - } - if s.Config.SSE != "" { - params.ServerSideEncryption = s3types.ServerSideEncryption(s.Config.SSE) - } - if s.Config.SSEKMSKeyId != "" { - params.SSEKMSKeyId = aws.String(s.Config.SSEKMSKeyId) - } - if s.Config.SSECustomerAlgorithm != "" { - params.SSECustomerAlgorithm = aws.String(s.Config.SSECustomerAlgorithm) - } - if s.Config.SSECustomerKey != "" { - params.SSECustomerKey = aws.String(s.Config.SSECustomerKey) - } - if s.Config.SSECustomerKeyMD5 != "" { - params.SSECustomerKeyMD5 = aws.String(s.Config.SSECustomerKeyMD5) - } - if s.Config.SSEKMSEncryptionContext != "" { - params.SSEKMSEncryptionContext = aws.String(s.Config.SSEKMSEncryptionContext) - } - _, err := s.client.CopyObject(ctx, ¶ms) + s.enrichCopyObjectParams(params) + _, err := s.client.CopyObject(ctx, params) if err != nil { return 0, err } @@ -483,7 +468,7 @@ func (s *S3) CopyObject(ctx context.Context, srcBucket, srcKey, dstKey string) ( Bucket: aws.String(s.Config.Bucket), Key: aws.String(dstKey), } - s.enrichHeadParamsWithSSE(dstHeadParams) + s.enrichHeadParams(dstHeadParams) dstObjResp, err := s.client.HeadObject(ctx, dstHeadParams) if err != nil { return 0, err @@ -491,51 +476,24 @@ func (s *S3) CopyObject(ctx context.Context, srcBucket, srcKey, dstKey string) ( return *dstObjResp.ContentLength, nil } // Get the size of the source object - sourceObjResp, err := s.client.HeadObject(ctx, &s3.HeadObjectInput{ + headParams := &s3.HeadObjectInput{ Bucket: aws.String(srcBucket), Key: aws.String(srcKey), - }) + } + s.enrichHeadParams(headParams) + sourceObjResp, err := s.client.HeadObject(ctx, headParams) if err != nil { return 0, err } srcSize := *sourceObjResp.ContentLength // Initiate a multipart upload - params := s3.CreateMultipartUploadInput{ + createMultipartUploadParams := &s3.CreateMultipartUploadInput{ Bucket: aws.String(s.Config.Bucket), Key: aws.String(dstKey), StorageClass: s3types.StorageClass(strings.ToUpper(s.Config.StorageClass)), } - // https://github.com/Altinity/clickhouse-backup/issues/588 - if len(s.Config.ObjectLabels) > 0 { - tags := "" - for k, v := range s.Config.ObjectLabels { - if tags != "" { - tags += "&" - } - tags += k + "=" + v - } - params.Tagging = aws.String(tags) - } - if s.Config.SSE != "" { - params.ServerSideEncryption = s3types.ServerSideEncryption(s.Config.SSE) - } - if s.Config.SSEKMSKeyId != "" { - params.SSEKMSKeyId = aws.String(s.Config.SSEKMSKeyId) - } - if s.Config.SSECustomerAlgorithm != "" { - params.SSECustomerAlgorithm = aws.String(s.Config.SSECustomerAlgorithm) - } - if s.Config.SSECustomerKey != "" { - params.SSECustomerKey = aws.String(s.Config.SSECustomerKey) - } - if s.Config.SSECustomerKeyMD5 != "" { - params.SSECustomerKeyMD5 = aws.String(s.Config.SSECustomerKeyMD5) - } - if s.Config.SSEKMSEncryptionContext != "" { - params.SSEKMSEncryptionContext = aws.String(s.Config.SSEKMSEncryptionContext) - } - - initResp, err := s.client.CreateMultipartUpload(ctx, ¶ms) + s.enrichCreateMultipartUploadParams(createMultipartUploadParams) + initResp, err := s.client.CreateMultipartUpload(ctx, createMultipartUploadParams) if err != nil { return 0, err } @@ -578,14 +536,18 @@ func (s *S3) CopyObject(ctx context.Context, srcBucket, srcKey, dstKey string) ( copyPartErrGroup.Go(func() error { defer copyPartSemaphore.Release(1) // Copy the part - partResp, err := s.client.UploadPartCopy(ctx, &s3.UploadPartCopyInput{ + uploadPartParams := &s3.UploadPartCopyInput{ Bucket: aws.String(s.Config.Bucket), Key: aws.String(dstKey), CopySource: aws.String(srcBucket + "/" + srcKey), CopySourceRange: aws.String(fmt.Sprintf("bytes=%d-%d", start, end-1)), UploadId: uploadID, PartNumber: aws.Int32(currentPartNumber), - }) + } + if s.Config.RequestPayer != "" { + uploadPartParams.RequestPayer = s3types.RequestPayer(s.Config.RequestPayer) + } + partResp, err := s.client.UploadPartCopy(ctx, uploadPartParams) if err != nil { return err } @@ -599,11 +561,15 @@ func (s *S3) CopyObject(ctx context.Context, srcBucket, srcKey, dstKey string) ( }) } if err := copyPartErrGroup.Wait(); err != nil { - _, abortErr := s.client.AbortMultipartUpload(context.Background(), &s3.AbortMultipartUploadInput{ + abortParams := &s3.AbortMultipartUploadInput{ Bucket: aws.String(s.Config.Bucket), Key: aws.String(dstKey), UploadId: uploadID, - }) + } + if s.Config.RequestPayer != "" { + abortParams.RequestPayer = s3types.RequestPayer(s.Config.RequestPayer) + } + _, abortErr := s.client.AbortMultipartUpload(context.Background(), abortParams) if abortErr != nil { return 0, fmt.Errorf("aborting CopyObject multipart upload: %v, original error was: %v", abortErr, err) } @@ -611,12 +577,16 @@ func (s *S3) CopyObject(ctx context.Context, srcBucket, srcKey, dstKey string) ( } // Complete the multipart upload - _, err = s.client.CompleteMultipartUpload(context.Background(), &s3.CompleteMultipartUploadInput{ + completeMultipartUploadParams := &s3.CompleteMultipartUploadInput{ Bucket: aws.String(s.Config.Bucket), Key: aws.String(dstKey), UploadId: uploadID, MultipartUpload: &s3types.CompletedMultipartUpload{Parts: parts}, - }) + } + if s.Config.RequestPayer != "" { + completeMultipartUploadParams.RequestPayer = s3types.RequestPayer(s.Config.RequestPayer) + } + _, err = s.client.CompleteMultipartUpload(context.Background(), completeMultipartUploadParams) if err != nil { return 0, fmt.Errorf("complete CopyObject multipart upload: %v", err) } @@ -624,8 +594,78 @@ func (s *S3) CopyObject(ctx context.Context, srcBucket, srcKey, dstKey string) ( return srcSize, nil } +func (s *S3) enrichCreateMultipartUploadParams(params *s3.CreateMultipartUploadInput) { + if s.Config.RequestPayer != "" { + params.RequestPayer = s3types.RequestPayer(s.Config.RequestPayer) + } + // https://github.com/Altinity/clickhouse-backup/issues/588 + if len(s.Config.ObjectLabels) > 0 { + tags := "" + for k, v := range s.Config.ObjectLabels { + if tags != "" { + tags += "&" + } + tags += k + "=" + v + } + params.Tagging = aws.String(tags) + } + if s.Config.SSE != "" { + params.ServerSideEncryption = s3types.ServerSideEncryption(s.Config.SSE) + } + if s.Config.SSEKMSKeyId != "" { + params.SSEKMSKeyId = aws.String(s.Config.SSEKMSKeyId) + } + if s.Config.SSECustomerAlgorithm != "" { + params.SSECustomerAlgorithm = aws.String(s.Config.SSECustomerAlgorithm) + } + if s.Config.SSECustomerKey != "" { + params.SSECustomerKey = aws.String(s.Config.SSECustomerKey) + } + if s.Config.SSECustomerKeyMD5 != "" { + params.SSECustomerKeyMD5 = aws.String(s.Config.SSECustomerKeyMD5) + } + if s.Config.SSEKMSEncryptionContext != "" { + params.SSEKMSEncryptionContext = aws.String(s.Config.SSEKMSEncryptionContext) + } +} + +func (s *S3) enrichCopyObjectParams(params *s3.CopyObjectInput) { + // https://github.com/Altinity/clickhouse-backup/issues/588 + if len(s.Config.ObjectLabels) > 0 { + tags := "" + for k, v := range s.Config.ObjectLabels { + if tags != "" { + tags += "&" + } + tags += k + "=" + v + } + params.Tagging = aws.String(tags) + } + if s.Config.SSE != "" { + params.ServerSideEncryption = s3types.ServerSideEncryption(s.Config.SSE) + } + if s.Config.SSEKMSKeyId != "" { + params.SSEKMSKeyId = aws.String(s.Config.SSEKMSKeyId) + } + if s.Config.SSECustomerAlgorithm != "" { + params.SSECustomerAlgorithm = aws.String(s.Config.SSECustomerAlgorithm) + } + if s.Config.SSECustomerKey != "" { + params.SSECustomerKey = aws.String(s.Config.SSECustomerKey) + } + if s.Config.SSECustomerKeyMD5 != "" { + params.SSECustomerKeyMD5 = aws.String(s.Config.SSECustomerKeyMD5) + } + if s.Config.SSEKMSEncryptionContext != "" { + params.SSEKMSEncryptionContext = aws.String(s.Config.SSEKMSEncryptionContext) + } + if s.Config.RequestPayer != "" { + params.RequestPayer = s3types.RequestPayer(s.Config.RequestPayer) + } +} + func (s *S3) restoreObject(ctx context.Context, key string) error { - restoreRequest := s3.RestoreObjectInput{ + restoreRequest := &s3.RestoreObjectInput{ Bucket: aws.String(s.Config.Bucket), Key: aws.String(path.Join(s.Config.Path, key)), RestoreRequest: &s3types.RestoreRequest{ @@ -635,7 +675,10 @@ func (s *S3) restoreObject(ctx context.Context, key string) error { }, }, } - _, err := s.client.RestoreObject(ctx, &restoreRequest) + if s.Config.RequestPayer != "" { + restoreRequest.RequestPayer = s3types.RequestPayer(s.Config.RequestPayer) + } + _, err := s.client.RestoreObject(ctx, restoreRequest) if err != nil { return err } @@ -645,7 +688,7 @@ func (s *S3) restoreObject(ctx context.Context, key string) error { Bucket: aws.String(s.Config.Bucket), Key: aws.String(path.Join(s.Config.Path, key)), } - s.enrichHeadParamsWithSSE(restoreHeadParams) + s.enrichHeadParams(restoreHeadParams) res, err := s.client.HeadObject(ctx, restoreHeadParams) if err != nil { return fmt.Errorf("restoreObject: failed to head %s object metadata, %v", path.Join(s.Config.Path, key), err) @@ -661,7 +704,10 @@ func (s *S3) restoreObject(ctx context.Context, key string) error { } } -func (s *S3) enrichHeadParamsWithSSE(headParams *s3.HeadObjectInput) { +func (s *S3) enrichHeadParams(headParams *s3.HeadObjectInput) { + if s.Config.RequestPayer != "" { + headParams.RequestPayer = s3types.RequestPayer(s.Config.RequestPayer) + } if s.Config.SSECustomerAlgorithm != "" { headParams.SSECustomerAlgorithm = aws.String(s.Config.SSECustomerAlgorithm) } diff --git a/test/integration/config-s3.yml b/test/integration/config-s3.yml index 8e20fbd3..3a2878dc 100644 --- a/test/integration/config-s3.yml +++ b/test/integration/config-s3.yml @@ -36,6 +36,7 @@ s3: compression_format: tar allow_multipart_download: true concurrency: 3 + request_payer: requester api: listen: :7171 create_integration_tables: true diff --git a/test/testflows/clickhouse_backup/tests/snapshots/cli.py.cli.snapshot b/test/testflows/clickhouse_backup/tests/snapshots/cli.py.cli.snapshot deleted file mode 100644 index e1b8b9c5..00000000 --- a/test/testflows/clickhouse_backup/tests/snapshots/cli.py.cli.snapshot +++ /dev/null @@ -1,6 +0,0 @@ -default_config = r"""'[\'general:\', \' remote_storage: none\', \' disable_progress_bar: true\', \' backups_to_keep_local: 0\', \' backups_to_keep_remote: 0\', \' log_level: info\', \' allow_empty_backups: false\', \' use_resumable_state: true\', \' restore_schema_on_cluster: ""\', \' upload_by_part: true\', \' download_by_part: true\', \' restore_database_mapping: {}\', \' retries_on_failure: 3\', \' retries_pause: 30s\', \' watch_interval: 1h\', \' full_interval: 24h\', \' watch_backup_name_template: shard{shard}-{type}-{time:20060102150405}\', \' sharded_operation_mode: ""\', \' cpu_nice_priority: 15\', \' io_nice_priority: idle\', \' retriesduration: 100ms\', \' watchduration: 1h0m0s\', \' fullduration: 24h0m0s\', \'clickhouse:\', \' username: default\', \' password: ""\', \' host: localhost\', \' port: 9000\', \' disk_mapping: {}\', \' skip_tables:\', \' - system.*\', \' - INFORMATION_SCHEMA.*\', \' - information_schema.*\', \' - _temporary_and_external_tables.*\', \' skip_table_engines: []\', \' timeout: 5m\', \' freeze_by_part: false\', \' freeze_by_part_where: ""\', \' use_embedded_backup_restore: false\', \' embedded_backup_disk: ""\', \' backup_mutations: true\', \' restore_as_attach: false\', \' check_parts_columns: true\', \' secure: false\', \' skip_verify: false\', \' sync_replicated_tables: false\', \' log_sql_queries: true\', \' config_dir: /etc/clickhouse-server/\', \' restart_command: exec:systemctl restart clickhouse-server\', \' ignore_not_exists_error_during_freeze: true\', \' check_replicas_before_attach: true\', \' tls_key: ""\', \' tls_cert: ""\', \' tls_ca: ""\', \' debug: false\', \'s3:\', \' access_key: ""\', \' secret_key: ""\', \' bucket: ""\', \' endpoint: ""\', \' region: us-east-1\', \' acl: private\', \' assume_role_arn: ""\', \' force_path_style: false\', \' path: ""\', \' object_disk_path: ""\', \' disable_ssl: false\', \' compression_level: 1\', \' compression_format: tar\', \' sse: ""\', \' sse_kms_key_id: ""\', \' sse_customer_algorithm: ""\', \' sse_customer_key: ""\', \' sse_customer_key_md5: ""\', \' sse_kms_encryption_context: ""\', \' disable_cert_verification: false\', \' use_custom_storage_class: false\', \' storage_class: STANDARD\', \' custom_storage_class_map: {}\', \' part_size: 0\', \' allow_multipart_download: false\', \' object_labels: {}\', \' debug: false\', \'gcs:\', \' credentials_file: ""\', \' credentials_json: ""\', \' credentials_json_encoded: ""\', \' bucket: ""\', \' path: ""\', \' object_disk_path: ""\', \' compression_level: 1\', \' compression_format: tar\', \' debug: false\', \' force_http: false\', \' endpoint: ""\', \' storage_class: STANDARD\', \' object_labels: {}\', \' custom_storage_class_map: {}\', \'cos:\', \' url: ""\', \' timeout: 2m\', \' secret_id: ""\', \' secret_key: ""\', \' path: ""\', \' compression_format: tar\', \' compression_level: 1\', \' debug: false\', \'api:\', \' listen: localhost:7171\', \' enable_metrics: true\', \' enable_pprof: false\', \' username: ""\', \' password: ""\', \' secure: false\', \' certificate_file: ""\', \' private_key_file: ""\', \' ca_cert_file: ""\', \' ca_key_file: ""\', \' create_integration_tables: false\', \' integration_tables_host: ""\', \' allow_parallel: false\', \' complete_resumable_after_restart: true\', \'ftp:\', \' address: ""\', \' timeout: 2m\', \' username: ""\', \' password: ""\', \' tls: false\', \' skip_tls_verify: false\', \' path: ""\', \' object_disk_path: ""\', \' compression_format: tar\', \' compression_level: 1\', \' debug: false\', \'sftp:\', \' address: ""\', \' port: 22\', \' username: ""\', \' password: ""\', \' key: ""\', \' path: ""\', \' object_disk_path: ""\', \' compression_format: tar\', \' compression_level: 1\', \' debug: false\', \'azblob:\', \' endpoint_schema: https\', \' endpoint_suffix: core.windows.net\', \' account_name: ""\', \' account_key: ""\', \' sas: ""\', \' use_managed_identity: false\', \' container: ""\', \' path: ""\', \' object_disk_path: ""\', \' compression_level: 1\', \' compression_format: tar\', \' sse_key: ""\', \' buffer_size: 0\', \' buffer_count: 3\', \' timeout: 15m\', \'custom:\', \' upload_command: ""\', \' download_command: ""\', \' list_command: ""\', \' delete_command: ""\', \' command_timeout: 4h\', \' commandtimeoutduration: 4h0m0s\']'""" - -help_flag = r"""'NAME:\n clickhouse-backup - Tool for easy backup of ClickHouse with cloud supportUSAGE:\n clickhouse-backup [-t, --tables=.] DESCRIPTION:\n Run as \'root\' or \'clickhouse\' userCOMMANDS:\n tables List of tables, exclude skip_tables\n create Create new backup\n create_remote Create and upload new backup\n upload Upload backup to remote storage\n list List of backups\n download Download backup from remote storage\n restore Create schema and restore data from backup\n restore_remote Download and restore\n delete Delete specific backup\n default-config Print default config\n print-config Print current config merged with environment variables\n clean Remove data in \'shadow\' folder from all \'path\' folders available from \'system.disks\'\n clean_remote_broken Remove all broken remote backups\n watch Run infinite loop which create full + incremental backup sequence to allow efficient backup sequences\n server Run API server\n help, h Shows a list of commands or help for one commandGLOBAL OPTIONS:\n --config value, -c value Config \'FILE\' name. (default: "/etc/clickhouse-backup/config.yml") [$CLICKHOUSE_BACKUP_CONFIG]\n --help, -h show help\n --version, -v print the version'""" - -cli_usage = r"""'NAME:\n clickhouse-backup - Tool for easy backup of ClickHouse with cloud supportUSAGE:\n clickhouse-backup [-t, --tables=.
] DESCRIPTION:\n Run as \'root\' or \'clickhouse\' userCOMMANDS:\n tables List of tables, exclude skip_tables\n create Create new backup\n create_remote Create and upload new backup\n upload Upload backup to remote storage\n list List of backups\n download Download backup from remote storage\n restore Create schema and restore data from backup\n restore_remote Download and restore\n delete Delete specific backup\n default-config Print default config\n print-config Print current config merged with environment variables\n clean Remove data in \'shadow\' folder from all \'path\' folders available from \'system.disks\'\n clean_remote_broken Remove all broken remote backups\n watch Run infinite loop which create full + incremental backup sequence to allow efficient backup sequences\n server Run API server\n help, h Shows a list of commands or help for one commandGLOBAL OPTIONS:\n --config value, -c value Config \'FILE\' name. (default: "/etc/clickhouse-backup/config.yml") [$CLICKHOUSE_BACKUP_CONFIG]\n --help, -h show help\n --version, -v print the version'""" -