From 45665d90b955fec7a52ef1416ee21db90ce6ed40 Mon Sep 17 00:00:00 2001 From: Slach Date: Thu, 13 Jul 2023 20:18:41 +0500 Subject: [PATCH] - add tests for `azure_blob_storage` backup disk for `use_embbeded_backup_restore: true`, it works, but slow, look https://github.com/ClickHouse/ClickHouse/issues/52088 and https://github.com/Azure/Azurite/issues/2053 for details - clean Dockerfile for build-reace-docker and build-race-fips-docker parameters --- ChangeLog.md | 8 ++ Dockerfile | 4 +- pkg/backup/delete.go | 3 + pkg/backup/upload.go | 22 ++++ pkg/storage/azblob.go | 8 +- pkg/storage/object_disk/object_disk.go | 18 +++- test/integration/config-azblob-embedded.yml | 2 +- test/integration/docker-compose.yml | 34 +++++- test/integration/docker-compose_advanced.yml | 34 +++++- test/integration/dynamic_settings.sh | 106 +++++++++++-------- test/integration/integration_test.go | 51 +++++---- 11 files changed, 205 insertions(+), 85 deletions(-) diff --git a/ChangeLog.md b/ChangeLog.md index f63505ba..c60d2609 100644 --- a/ChangeLog.md +++ b/ChangeLog.md @@ -1,3 +1,11 @@ +# v2.3.1 +IMPROVEMENTS +- add but skip tests for `azure_blob_storage` backup disk for `use_embbeded_backup_restore: true`, it works, but slow, look https://github.com/ClickHouse/ClickHouse/issues/52088 for details + +BUG FIXES +- complete success/failure server callback notification even when main context canceled, fix [680](https://github.com/Altinity/clickhouse-backup/pull/680) +- `clean` command will not return error when shadow directory not exists, fix [686](https://github.com/Altinity/clickhouse-backup/issues/686) + # v2.3.0 IMPROVEMENTS - add FIPS compatible builds and examples, fix [656](https://github.com/Altinity/clickhouse-backup/issues/656), fix [674](https://github.com/Altinity/clickhouse-backup/issues/674) diff --git a/Dockerfile b/Dockerfile index 5f604558..4711a1ac 100644 --- a/Dockerfile +++ b/Dockerfile @@ -32,9 +32,9 @@ FROM builder-base AS builder-race ARG TARGETPLATFORM COPY ./ /src/ RUN mkdir -p ./clickhouse-backup/ -RUN --mount=type=cache,id=clickhouse-backup-gobuild,target=/root/.cache/go GOOS=$( echo ${TARGETPLATFORM} | cut -d "/" -f 1) GOARCH=$( echo ${TARGETPLATFORM} | cut -d "/" -f 2) CGO_ENABLED=1 go build -cover -buildvcs=false -ldflags "-X 'main.version=race' -extldflags '-static'" -gcflags "all=-N -l" -race -o ./clickhouse-backup/clickhouse-backup-race ./cmd/clickhouse-backup +RUN --mount=type=cache,id=clickhouse-backup-gobuild,target=/root/.cache/go GOOS=$( echo ${TARGETPLATFORM} | cut -d "/" -f 1) GOARCH=$( echo ${TARGETPLATFORM} | cut -d "/" -f 2) CGO_ENABLED=1 go build -a -cover -buildvcs=false -ldflags "-X 'main.version=race' -extldflags '-static'" -gcflags "all=-N -l" -race -o ./clickhouse-backup/clickhouse-backup-race ./cmd/clickhouse-backup RUN cp -l ./clickhouse-backup/clickhouse-backup-race /bin/clickhouse-backup && ldd ./clickhouse-backup/clickhouse-backup-race -RUN --mount=type=cache,id=clickhouse-backup-gobuild,target=/root/.cache/go GOEXPERIMENT=boringcrypto CGO_ENABLED=1 GOOS=$( echo ${TARGETPLATFORM} | cut -d "/" -f 1) GOARCH=$( echo ${TARGETPLATFORM} | cut -d "/" -f 2) CGO_ENABLED=1 go build -cover -buildvcs=false -ldflags "-X 'main.version=race-fips' -extldflags '-static'" -gcflags "all=-N -l" -race -o ./clickhouse-backup/clickhouse-backup-race-fips ./cmd/clickhouse-backup +RUN --mount=type=cache,id=clickhouse-backup-gobuild,target=/root/.cache/go GOOS=$( echo ${TARGETPLATFORM} | cut -d "/" -f 1) GOARCH=$( echo ${TARGETPLATFORM} | cut -d "/" -f 2) GOEXPERIMENT=boringcrypto CGO_ENABLED=1 go build -cover -buildvcs=false -ldflags "-X 'main.version=race-fips' -extldflags '-static'" -gcflags "all=-N -l" -race -o ./clickhouse-backup/clickhouse-backup-race-fips ./cmd/clickhouse-backup RUN cp -l ./clickhouse-backup/clickhouse-backup-race-fips /bin/clickhouse-backup-fips && ldd ./clickhouse-backup/clickhouse-backup-race-fips COPY entrypoint.sh /entrypoint.sh diff --git a/pkg/backup/delete.go b/pkg/backup/delete.go index 6381dba6..e3af2029 100644 --- a/pkg/backup/delete.go +++ b/pkg/backup/delete.go @@ -277,6 +277,9 @@ func (b *Backuper) cleanRemoteEmbedded(ctx context.Context, backup storage.Backu } apexLog.Debugf("object_disk.ReadMetadataFromReader(%s)", f.Name()) meta, err := object_disk.ReadMetadataFromReader(r, f.Name()) + if err != nil { + return err + } for _, o := range meta.StorageObjects { if err = object_disk.DeleteFile(ctx, b.cfg.ClickHouse.EmbeddedBackupDisk, o.ObjectRelativePath); err != nil { return err diff --git a/pkg/backup/upload.go b/pkg/backup/upload.go index 8c8a0178..98ad6b19 100644 --- a/pkg/backup/upload.go +++ b/pkg/backup/upload.go @@ -369,6 +369,28 @@ func (b *Backuper) validateUploadParams(ctx context.Context, backupName string, if (diffFrom != "" || diffFromRemote != "") && b.cfg.ClickHouse.UseEmbeddedBackupRestore { log.Warnf("--diff-from and --diff-from-remote not compatible with backups created with `use_embedded_backup_restore: true`") } + + if b.cfg.ClickHouse.UseEmbeddedBackupRestore { + fatalMsg := fmt.Sprintf("`general->remote_storage: %s` `clickhouse->use_embedded_backup_restore: %v` require %s->compression_format: none, actual %%s", b.cfg.General.RemoteStorage, b.cfg.ClickHouse.UseEmbeddedBackupRestore, b.cfg.General.RemoteStorage) + if b.cfg.General.RemoteStorage == "s3" && b.cfg.S3.CompressionFormat != "none" { + log.Fatalf(fatalMsg, b.cfg.S3.CompressionFormat) + } + if b.cfg.General.RemoteStorage == "gcs" && b.cfg.GCS.CompressionFormat != "none" { + log.Fatalf(fatalMsg, b.cfg.GCS.CompressionFormat) + } + if b.cfg.General.RemoteStorage == "azblob" && b.cfg.AzureBlob.CompressionFormat != "none" { + log.Fatalf(fatalMsg, b.cfg.AzureBlob.CompressionFormat) + } + if b.cfg.General.RemoteStorage == "sftp" && b.cfg.SFTP.CompressionFormat != "none" { + log.Fatalf(fatalMsg, b.cfg.SFTP.CompressionFormat) + } + if b.cfg.General.RemoteStorage == "ftp" && b.cfg.FTP.CompressionFormat != "none" { + log.Fatalf(fatalMsg, b.cfg.FTP.CompressionFormat) + } + if b.cfg.General.RemoteStorage == "cos" && b.cfg.COS.CompressionFormat != "none" { + log.Fatalf(fatalMsg, b.cfg.COS.CompressionFormat) + } + } if b.cfg.General.RemoteStorage == "custom" && b.resume { return fmt.Errorf("can't resume for `remote_storage: custom`") } diff --git a/pkg/storage/azblob.go b/pkg/storage/azblob.go index b46ffab3..921e3556 100644 --- a/pkg/storage/azblob.go +++ b/pkg/storage/azblob.go @@ -36,16 +36,16 @@ func (s *AzureBlob) Kind() string { // Connect - connect to Azure func (s *AzureBlob) Connect(ctx context.Context) error { if s.Config.EndpointSuffix == "" { - return fmt.Errorf("endpoint suffix not set") + return fmt.Errorf("azblob endpoint suffix not set") } if s.Config.Container == "" { - return fmt.Errorf("container name not set") + return fmt.Errorf("azblob container name not set") } if s.Config.AccountName == "" { - return fmt.Errorf("account name not set") + return fmt.Errorf("azblob account name not set") } if s.Config.AccountKey == "" && s.Config.SharedAccessSignature == "" && !s.Config.UseManagedIdentity { - return fmt.Errorf("account key or SAS or use_managed_identity must be set") + return fmt.Errorf("azblob account key or SAS or use_managed_identity must be set") } var ( err error diff --git a/pkg/storage/object_disk/object_disk.go b/pkg/storage/object_disk/object_disk.go index 5cf2051e..c43dc9d8 100644 --- a/pkg/storage/object_disk/object_disk.go +++ b/pkg/storage/object_disk/object_disk.go @@ -346,7 +346,7 @@ func getObjectDisksCredentials(ctx context.Context, ch *clickhouse.ClickHouse) ( if containerNameNode == nil { return nil, fmt.Errorf("%s -> /%s/storage_configuration/disks/%s doesn't contains ", configFile, root.Data, diskName) } - creds.AzureAccountName = strings.Trim(accountKeyNode.InnerText(), "\r\n \t") + creds.AzureAccountKey = strings.Trim(accountKeyNode.InnerText(), "\r\n \t") credentials[diskName] = creds break } @@ -425,7 +425,12 @@ func makeObjectDiskConnection(ctx context.Context, ch *clickhouse.ClickHouse, cf break case "azblob": connection.Type = "azure_blob_storage" - azureCfg := config.AzureBlobConfig{} + azureCfg := config.AzureBlobConfig{ + Timeout: "15m", + BufferSize: 2 * 1024 * 1024, + MaxBuffers: 3, + MaxPartsCount: 5000, + } azureURL, err := url.Parse(creds.EndPoint) if err != nil { return nil, err @@ -435,11 +440,14 @@ func makeObjectDiskConnection(ctx context.Context, ch *clickhouse.ClickHouse, cf azureCfg.EndpointSchema = azureURL.Scheme } azureCfg.EndpointSuffix = azureURL.Host + if creds.AzureAccountName != "" { + azureCfg.AccountName = creds.AzureAccountName + } if azureURL.Path != "" { azureCfg.Path = azureURL.Path - } - if creds.AzureAccountKey != "" { - azureCfg.AccountName = creds.AzureAccountName + if azureCfg.AccountName != "" && strings.HasPrefix(azureCfg.Path, "/"+creds.AzureAccountName) { + azureCfg.Path = strings.TrimPrefix(azureURL.Path, "/"+creds.AzureAccountName) + } } if creds.AzureAccountKey != "" { azureCfg.AccountKey = creds.AzureAccountKey diff --git a/test/integration/config-azblob-embedded.yml b/test/integration/config-azblob-embedded.yml index 77c632d1..7e46d38e 100644 --- a/test/integration/config-azblob-embedded.yml +++ b/test/integration/config-azblob-embedded.yml @@ -28,7 +28,7 @@ azblob: endpoint_schema: http container: container1 path: backup - compression_format: tar + compression_format: none api: listen: :7171 create_integration_tables: true diff --git a/test/integration/docker-compose.yml b/test/integration/docker-compose.yml index 2a25c442..0d884f0f 100644 --- a/test/integration/docker-compose.yml +++ b/test/integration/docker-compose.yml @@ -1,4 +1,4 @@ -services: + services: sshd: image: docker.io/panubo/sshd:latest container_name: sshd @@ -32,6 +32,10 @@ services: MINIO_DEFAULT_BUCKETS: 'clickhouse' MINIO_ROOT_USER: access-key MINIO_ROOT_PASSWORD: it-is-my-super-secret-key + healthcheck: + test: curl -sL http://localhost:9000/ + interval: 10s + retries: 30 volumes: - ./minio_nodelete.sh:/bin/minio_nodelete.sh networks: @@ -54,9 +58,31 @@ services: image: mcr.microsoft.com/azure-storage/azurite:latest container_name: azure hostname: devstoreaccount1.blob.azure + healthcheck: + test: nc 127.0.0.1 10000 -z + interval: 1s + retries: 30 + command: ["azurite", "--debug", "/data/debug.log" , "-l", "/data", "--blobHost", "0.0.0.0","--queueHost", "0.0.0.0", "--tableHost", "0.0.0.0"] networks: - clickhouse-backup + # azure_init: + # image: mcr.microsoft.com/azure-cli:latest + # command: + # - /bin/sh + # - -xc + # - | + # az storage container create --debug --name azure-backup-disk && + # az storage container create --debug --name azure-disk + # depends_on: + # azure: + # condition: service_healthy + # environment: + # # https://github.com/Azure/Azurite/blob/main/README.md#usage-with-azure-storage-sdks-or-tools + # AZURE_STORAGE_CONNECTION_STRING: DefaultEndpointsProtocol=http;AccountName=devstoreaccount1;AccountKey=Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==;BlobEndpoint=http://azure:10000/devstoreaccount1; + # networks: + # - clickhouse-backup + zookeeper: image: docker.io/zookeeper:${ZOOKEEPER_VERSION:-latest} container_name: zookeeper @@ -134,6 +160,12 @@ services: depends_on: zookeeper: condition: service_healthy + minio: + condition: service_healthy + azure: + condition: service_healthy +# azure_init: +# condition: service_completed_successfully all_services_ready: image: hello-world diff --git a/test/integration/docker-compose_advanced.yml b/test/integration/docker-compose_advanced.yml index 42dc0ed6..16994948 100644 --- a/test/integration/docker-compose_advanced.yml +++ b/test/integration/docker-compose_advanced.yml @@ -43,6 +43,10 @@ services: MINIO_DEFAULT_BUCKETS: 'clickhouse' MINIO_ROOT_USER: access-key MINIO_ROOT_PASSWORD: it-is-my-super-secret-key + healthcheck: + test: curl -sL http://localhost:9000/ + interval: 10s + retries: 30 volumes: - ./minio_nodelete.sh:/bin/minio_nodelete.sh ports: @@ -58,7 +62,7 @@ services: # - /bin/sh # command: # - -c -# - "mkdir -p /storage/altinity-qa-test && fake-gcs-server -scheme http -port 8080 -public-host gsc:8080" +# - "mkdir -p /data/clickhouse-backup-test-gcs && fake-gcs-server -data /data -scheme http -port 8080 -public-host gsc:8080" # networks: # - clickhouse-backup @@ -67,9 +71,31 @@ services: image: mcr.microsoft.com/azure-storage/azurite:latest container_name: azure hostname: devstoreaccount1.blob.azure + healthcheck: + test: nc 127.0.0.1 10000 -z + interval: 1s + retries: 30 + command: ["azurite", "--debug", "/data/debug.log" , "-l", "/data", "--blobHost", "0.0.0.0","--queueHost", "0.0.0.0", "--tableHost", "0.0.0.0"] networks: - clickhouse-backup +# azure_init: +# image: mcr.microsoft.com/azure-cli:latest +# command: +# - /bin/sh +# - -xc +# - | +# az storage container create --debug --name azure-backup-disk && +# az storage container create --debug --name azure-disk +# depends_on: +# azure: +# condition: service_healthy +# environment: +# # https://github.com/Azure/Azurite/blob/main/README.md#usage-with-azure-storage-sdks-or-tools +# AZURE_STORAGE_CONNECTION_STRING: DefaultEndpointsProtocol=http;AccountName=devstoreaccount1;AccountKey=Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==;BlobEndpoint=http://azure:10000/devstoreaccount1; +# networks: +# - clickhouse-backup + mysql: image: docker.io/mysql:${MYSQL_VERSION:-latest} command: --default-authentication-plugin=mysql_native_password --gtid_mode=on --enforce_gtid_consistency=ON @@ -189,6 +215,12 @@ services: condition: service_healthy zookeeper: condition: service_healthy + minio: + condition: service_healthy + azure: + condition: service_healthy +# azure_init: +# condition: service_completed_successfully all_services_ready: image: hello-world diff --git a/test/integration/dynamic_settings.sh b/test/integration/dynamic_settings.sh index 861a21ab..072b999b 100644 --- a/test/integration/dynamic_settings.sh +++ b/test/integration/dynamic_settings.sh @@ -81,7 +81,7 @@ EOT fi -if [[ "${CLICKHOUSE_VERSION}" =~ ^21\.[8-9]|^21\.[0-9]{2} || "${CLICKHOUSE_VERSION}" =~ ^2[2-9]\.[0-9]+ ]]; then +if [[ "${CLICKHOUSE_VERSION}" == "head" || "${CLICKHOUSE_VERSION}" =~ ^21\.[8-9]|^21\.[0-9]{2} || "${CLICKHOUSE_VERSION}" =~ ^2[2-9]\.[0-9]+ ]]; then cat < /etc/clickhouse-server/config.d/storage_configuration_s3.xml @@ -110,7 +110,7 @@ EOT fi -if [[ "${CLICKHOUSE_VERSION}" =~ ^21\.12 || "${CLICKHOUSE_VERSION}" =~ ^2[2-9]\.[0-9]+ ]]; then +if [[ "${CLICKHOUSE_VERSION}" == "head" || "${CLICKHOUSE_VERSION}" =~ ^21\.12 || "${CLICKHOUSE_VERSION}" =~ ^2[2-9]\.[0-9]+ ]]; then cat < /etc/clickhouse-server/config.d/storage_configuration_encrypted_s3.xml @@ -182,52 +182,68 @@ EOT fi -# embedded s3_plain and azure backup configuration -if [[ "${CLICKHOUSE_VERSION}" == "head" || "${CLICKHOUSE_VERSION}" =~ ^23\.3 || "${CLICKHOUSE_VERSION}" =~ ^23\.1[0-9]+ || "${CLICKHOUSE_VERSION}" =~ ^2[4-9]\.[1-9]+ ]]; then +# s3_plain and azure backup configuration +if [[ "${CLICKHOUSE_VERSION}" == "head" || "${CLICKHOUSE_VERSION}" =~ ^23\.3 || "${CLICKHOUSE_VERSION}" =~ ^23\.[4-9] || "${CLICKHOUSE_VERSION}" =~ ^23\.1[0-9]+ || "${CLICKHOUSE_VERSION}" =~ ^2[4-9]\.[1-9]+ ]]; then -mkdir -p /var/lib/clickhouse/disks/backups_azure/ /var/lib/clickhouse/disks/backups_s3_plain/ +mkdir -p /var/lib/clickhouse/disks/backups_s3_plain/ chown -R clickhouse /var/lib/clickhouse/disks/ -#cat < /etc/clickhouse-server/config.d/backup_storage_configuration_s3_plain.xml -# -# -# -# -# -# true -# s3_plain -# http://minio:9000/clickhouse/backups_plain/ -# access-key -# it-is-my-super-secret-key -# false -# -# -# -# -# backups_azure -# -# -#EOT - -#cat < /etc/clickhouse-server/config.d/backup_storage_configuration_azure.xml -# -# -# -# -# -# azure_blob_storage -# http://azure:10000 -# container-embedded -# devstoreaccount1 -# Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw== -# -# -# -# -# backups_azure -# -# -#EOT +cat < /etc/clickhouse-server/config.d/backup_storage_configuration_s3_plain.xml + + + + + + s3_plain + http://minio:9000/clickhouse/backups_s3_plain/ + access-key + it-is-my-super-secret-key + false + + + + + backups_s3 + backups_s3_plain + + +EOT + +mkdir -p /var/lib/clickhouse/disks/backups_azure/ +chown -R clickhouse /var/lib/clickhouse/disks/ + +cat < /etc/clickhouse-server/config.d/backup_storage_configuration_azure.xml + + + + + + azure_blob_storage + http://azure:10000/devstoreaccount1 + azure-disk + + devstoreaccount1 + Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw== + false + + + azure_blob_storage + http://azure:10000/devstoreaccount1 + azure-backup-disk + + devstoreaccount1 + Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw== + false + + + + + backups_s3 + backups_s3_plain + backups_azure + + +EOT fi diff --git a/test/integration/integration_test.go b/test/integration/integration_test.go index 2ee7efc9..15e87396 100644 --- a/test/integration/integration_test.go +++ b/test/integration/integration_test.go @@ -754,13 +754,15 @@ func TestIntegrationEmbedded(t *testing.T) { //CUSTOM backup create folder in each disk r.NoError(dockerExec("clickhouse", "rm", "-rfv", "/var/lib/clickhouse/disks/backups_s3/backup/")) r.NoError(dockerCP("config-s3-embedded.yml", "clickhouse:/etc/clickhouse-backup/config.yml")) - runMainIntegrationScenario(t, "EMBEDDED") - //r.NoError(dockerExec("clickhouse","rm", "-rf", "/var/lib/clickhouse/disks/backups_s3_plain/backup", )) + runMainIntegrationScenario(t, "EMBEDDED_S3") + //@TODO uncomment when resolve slow azure BACKUP/RESTORE https://github.com/ClickHouse/ClickHouse/issues/52088 + //r.NoError(dockerExec("clickhouse", "rm", "-rf", "/var/lib/clickhouse/disks/backups_azure/backup/")) + //r.NoError(dockerCP("config-azblob-embedded.yml", "clickhouse:/etc/clickhouse-backup/config.yml")) + //runMainIntegrationScenario(t, "EMBEDDED_AZURE") + //@TODO think about how to implements embedded backup for s3_plain disks + //r.NoError(dockerExec("clickhouse", "rm", "-rf", "/var/lib/clickhouse/disks/backups_s3_plain/backup/")) //r.NoError(dockerCP("config-s3-plain-embedded.yml", "clickhouse:/etc/clickhouse-backup/config.yml")) - //runMainIntegrationScenario(t, "EMBEDDED") - //r.NoError(dockerExec("clickhouse","rm", "-rf", "/var/lib/clickhouse/disks/backups_azure/backup", )) - //r.NoError(dockerCP("config-azure-embedded.yml", "clickhouse:/etc/clickhouse-backup/config.yml")) - //runMainIntegrationScenario(t, "EMBEDDED") + //runMainIntegrationScenario(t, "EMBEDDED_S3_PLAIN") } func TestLongListRemote(t *testing.T) { @@ -993,8 +995,8 @@ func runMainIntegrationScenario(t *testing.T, remoteStorageType string) { checkResumeAlreadyProcessed(uploadCmd, incrementBackupName, "upload", r, remoteStorageType) backupDir := "/var/lib/clickhouse/backup" - if remoteStorageType == "EMBEDDED" { - backupDir = "/var/lib/clickhouse/disks/backups_s3" + if strings.HasPrefix(remoteStorageType, "EMBEDDED") { + backupDir = "/var/lib/clickhouse/disks/backups" + strings.ToLower(strings.TrimPrefix(remoteStorageType, "EMBEDDED")) } out, err = dockerExecOut("clickhouse", "ls", "-lha", backupDir) r.NoError(err) @@ -1066,12 +1068,9 @@ func runMainIntegrationScenario(t *testing.T, remoteStorageType string) { // test end log.Info("Clean after finish") - // why CUSTOM delete only local database? - if remoteStorageType == "CUSTOM" { - fullCleanup(r, ch, []string{}, []string{}, databaseList, false, true) - } else if remoteStorageType == "EMBEDDED" { + if remoteStorageType == "CUSTOM" || strings.HasPrefix(remoteStorageType, "EMBEDDED") { fullCleanup(r, ch, []string{testBackupName, incrementBackupName}, []string{"remote"}, databaseList, true, true) - fullCleanup(r, ch, []string{testBackupName, incrementBackupName}, []string{"local"}, nil, false, false) + fullCleanup(r, ch, []string{incrementBackupName}, []string{"local"}, nil, true, false) } else { fullCleanup(r, ch, []string{testBackupName, incrementBackupName}, []string{"remote", "local"}, databaseList, true, true) } @@ -1079,7 +1078,7 @@ func runMainIntegrationScenario(t *testing.T, remoteStorageType string) { func checkResumeAlreadyProcessed(backupCmd, testBackupName, resumeKind string, r *require.Assertions, remoteStorageType string) { // backupCmd = fmt.Sprintf("%s & PID=$!; sleep 0.7; kill -9 $PID; cat /var/lib/clickhouse/backup/%s/upload.state; sleep 0.3; %s", backupCmd, testBackupName, backupCmd) - if remoteStorageType == "CUSTOM" || remoteStorageType == "EMBEDDED" { + if remoteStorageType == "CUSTOM" || strings.HasPrefix(remoteStorageType, "EMBEDDED") { backupCmd = strings.Replace(backupCmd, "--resume", "", 1) } else { backupCmd = fmt.Sprintf("%s; cat /var/lib/clickhouse/backup/%s/%s.state; %s", backupCmd, testBackupName, resumeKind, backupCmd) @@ -2113,7 +2112,7 @@ func (ch *TestClickHouse) createTestSchema(data TestDataStruct, remoteStorageTyp return nil } // @TODO remove it when resolve https://github.com/ClickHouse/ClickHouse/issues/43971 - if strings.Contains(createSQL, "8192)") && remoteStorageType == "EMBEDDED" { + if strings.Contains(createSQL, "8192)") && strings.HasPrefix(remoteStorageType, "EMBEDDED") { matches := mergeTreeOldSyntax.FindStringSubmatch(createSQL) if len(matches) >= 3 { substitution := "MergeTree() PARTITION BY toYYYYMMDD($1) ORDER BY $2 SETTINGS index_granularity=$3" @@ -2356,15 +2355,15 @@ func testBackupSpecifiedPartitions(r *require.Assertions, ch *TestClickHouse, re r.NoError(dockerExec("clickhouse", "clickhouse-backup", "delete", "local", fullBackupName)) r.NoError(dockerExec("clickhouse", "clickhouse-backup", "download", "--partitions=('2022-01-02'),('2022-01-03')", fullBackupName)) fullBackupDir := "/var/lib/clickhouse/backup/" + fullBackupName + "/shadow/default/t?/default/" - if remoteStorageType == "EMBEDDED" { - fullBackupDir = "/var/lib/clickhouse/disks/backups_s3/" + fullBackupName + "/data/default/t?" + if strings.HasPrefix(remoteStorageType, "EMBEDDED") { + fullBackupDir = "/var/lib/clickhouse/disks/backups" + strings.ToLower(strings.TrimPrefix(remoteStorageType, "EMBEDDED")) + "/" + fullBackupName + "/data/default/t?" } out, err = dockerExecOut("clickhouse", "bash", "-c", "ls -la "+fullBackupDir+" | wc -l") r.NoError(err) expectedLines := "13" // custom storage doesn't support --partitions for upload / download now // embedded storage contain hardLink files and will download additional data parts - if remoteStorageType == "CUSTOM" || remoteStorageType == "EMBEDDED" { + if remoteStorageType == "CUSTOM" || strings.HasPrefix(remoteStorageType, "EMBEDDED") { expectedLines = "17" } r.Equal(expectedLines, strings.Trim(out, "\r\n\t ")) @@ -2372,8 +2371,8 @@ func testBackupSpecifiedPartitions(r *require.Assertions, ch *TestClickHouse, re r.NoError(dockerExec("clickhouse", "clickhouse-backup", "download", fullBackupName)) fullBackupDir = "/var/lib/clickhouse/backup/" + fullBackupName + "/shadow/default/t?/default/" - if remoteStorageType == "EMBEDDED" { - fullBackupDir = "/var/lib/clickhouse/disks/backups_s3/" + fullBackupName + "/data/default/t?" + if strings.HasPrefix(remoteStorageType, "EMBEDDED") { + fullBackupDir = "/var/lib/clickhouse/disks/backups" + strings.ToLower(strings.TrimPrefix(remoteStorageType, "EMBEDDED")) + "/" + fullBackupName + "/data/default/t?" } out, err = dockerExecOut("clickhouse", "bash", "-c", "ls -la "+fullBackupDir+"| wc -l") r.NoError(err) @@ -2393,8 +2392,8 @@ func testBackupSpecifiedPartitions(r *require.Assertions, ch *TestClickHouse, re // check create + partitions r.NoError(dockerExec("clickhouse", "clickhouse-backup", "create", "--tables=default.t1", "--partitions=20220102,20220103", partitionBackupName)) partitionBackupDir := "/var/lib/clickhouse/backup/" + partitionBackupName + "/shadow/default/t1/default/" - if remoteStorageType == "EMBEDDED" { - partitionBackupDir = "/var/lib/clickhouse/disks/backups_s3/" + partitionBackupName + "/data/default/t1" + if strings.HasPrefix(remoteStorageType, "EMBEDDED") { + partitionBackupDir = "/var/lib/clickhouse/disks/backups" + strings.ToLower(strings.TrimPrefix(remoteStorageType, "EMBEDDED")) + "/" + partitionBackupName + "/data/default/t1" } out, err = dockerExecOut("clickhouse", "bash", "-c", "ls -la "+partitionBackupDir+"| wc -l") r.NoError(err) @@ -2404,8 +2403,8 @@ func testBackupSpecifiedPartitions(r *require.Assertions, ch *TestClickHouse, re // check create > upload + partitions r.NoError(dockerExec("clickhouse", "clickhouse-backup", "create", "--tables=default.t1", partitionBackupName)) partitionBackupDir = "/var/lib/clickhouse/backup/" + partitionBackupName + "/shadow/default/t1/default/" - if remoteStorageType == "EMBEDDED" { - partitionBackupDir = "/var/lib/clickhouse/disks/backups_s3/" + partitionBackupName + "/data/default/t1" + if strings.HasPrefix(remoteStorageType, "EMBEDDED") { + partitionBackupDir = "/var/lib/clickhouse/disks/backups" + strings.ToLower(strings.TrimPrefix(remoteStorageType, "EMBEDDED")) + "/" + partitionBackupName + "/data/default/t1" } out, err = dockerExecOut("clickhouse", "bash", "-c", "ls -la "+partitionBackupDir+" | wc -l") r.NoError(err) @@ -2422,7 +2421,7 @@ func testBackupSpecifiedPartitions(r *require.Assertions, ch *TestClickHouse, re expectedCount = 20 // custom and embedded doesn't support --partitions in upload and download - if remoteStorageType == "CUSTOM" || remoteStorageType == "EMBEDDED" { + if remoteStorageType == "CUSTOM" || strings.HasPrefix(remoteStorageType, "EMBEDDED") { expectedCount = 40 } r.Equal(expectedCount, result, fmt.Sprintf("expect count=%d", expectedCount)) @@ -2432,7 +2431,7 @@ func testBackupSpecifiedPartitions(r *require.Assertions, ch *TestClickHouse, re r.NoError(ch.chbackend.SelectSingleRowNoCtx(&result, "SELECT count() FROM default.t1 WHERE dt NOT IN ('2022-01-02','2022-01-03')")) expectedCount = 0 // custom and embedded doesn't support --partitions in upload and download - if remoteStorageType == "CUSTOM" || remoteStorageType == "EMBEDDED" { + if remoteStorageType == "CUSTOM" || strings.HasPrefix(remoteStorageType, "EMBEDDED") { expectedCount = 20 } r.Equal(expectedCount, result, "expect count=0")