From e314805f03321d38c416d51d8f4a5c803d0f79eb Mon Sep 17 00:00:00 2001 From: Slach Date: Fri, 11 Aug 2023 15:02:51 +0400 Subject: [PATCH 1/7] fix `--rbac` behavior when /var/lib/clickhouse/access not exists, restore functions via `CREATE OR REPLACE` fix behavior for GetDatabases when `--table=*pattern.*` present tried Make ./tests/integration/ test parallel, FAILED, fix https://github.com/Altinity/clickhouse-backup/issues/721 --- ChangeLog.md | 5 +- Dockerfile | 2 +- pkg/backup/create.go | 31 +- pkg/clickhouse/clickhouse.go | 12 +- test/integration/config-custom-kopia.yml | 2 +- test/integration/config-custom-restic.yml | 2 +- test/integration/config-custom-rsync.yml | 2 +- test/integration/config-s3-fips.yml | 2 +- test/integration/config-s3.yml | 2 +- test/integration/docker-compose.yml | 15 + test/integration/docker-compose_advanced.yml | 15 + test/integration/integration_test.go | 2987 +++++++++--------- test/integration/kopia/init.sh | 6 +- test/integration/restic/init.sh | 3 +- test/integration/rsync/init.sh | 3 +- test/integration/run.sh | 2 +- 16 files changed, 1580 insertions(+), 1511 deletions(-) diff --git a/ChangeLog.md b/ChangeLog.md index 207908f3..01281245 100644 --- a/ChangeLog.md +++ b/ChangeLog.md @@ -6,10 +6,13 @@ IMPROVEMENTS - Backup/Restore RBAC related objects from Zookeeper via direct connection to zookeeper/keeper, fix [604](https://github.com/Altinity/clickhouse-backup/issues/604) - Add `SHARDED_OPERATION_MODE` option, to easy create backup for sharded cluster, available values `none` (no sharding), `table` (table granularity), `database` (database granularity), `first-replica` (on the lexicographically sorted first active replica), thanks @mskwon, fix [639](https://github.com/Altinity/clickhouse-backup/issues/639), fix [648](https://github.com/Altinity/clickhouse-backup/pull/648) - Add support for `compression_format: none` for upload and download backups created with `--rbac` / `--rbac-only` or `--configs` / `--configs-only` options, fix [713](https://github.com/Altinity/clickhouse-backup/issues/713) - +- Make ./tests/integration/ test parallel fix [721](https://github.com/Altinity/clickhouse-backup/issues/721) + BUG FIXES - fix possible create backup failures during UNFREEZE not exists tables, affected 2.2.7+ version, fix [704](https://github.com/Altinity/clickhouse-backup/issues/704) - fix too strict `system.parts_columns` check when backup create, exclude Enum and Tuple (JSON) and Nullable(Type) vs Type corner cases, fix [685](https://github.com/Altinity/clickhouse-backup/issues/685), fix [699](https://github.com/Altinity/clickhouse-backup/issues/699) +- fix `--rbac` behavior when /var/lib/clickhouse/access not exists +- restore functions via `CREATE OR REPLACE` # v2.3.2 BUG FIXES diff --git a/Dockerfile b/Dockerfile index 3229029f..16b9ada0 100644 --- a/Dockerfile +++ b/Dockerfile @@ -6,7 +6,7 @@ FROM ${CLICKHOUSE_IMAGE}:${CLICKHOUSE_VERSION} AS builder-base USER root # TODO remove ugly workaround for musl, https://www.perplexity.ai/search/2ead4c04-060a-4d78-a75f-f26835238438 RUN rm -fv /etc/apt/sources.list.d/clickhouse.list && \ - find /etc/apt/ -type f -exec sed -i 's/ru.archive.ubuntu.com/archive.ubuntu.com/g' {} + && \ + find /etc/apt/ -type f -name *.list -exec sed -i 's/ru.archive.ubuntu.com/archive.ubuntu.com/g' {} + && \ ( apt-get update || true ) && \ apt-get install -y --no-install-recommends gnupg ca-certificates wget && apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 52B59B1571A79DBC054901C0F6BC817356A3D45E && \ DISTRIB_CODENAME=$(cat /etc/lsb-release | grep DISTRIB_CODENAME | cut -d "=" -f 2) && \ diff --git a/pkg/backup/create.go b/pkg/backup/create.go index 8870a7b9..3b4cc73c 100644 --- a/pkg/backup/create.go +++ b/pkg/backup/create.go @@ -450,15 +450,28 @@ func (b *Backuper) createBackupRBAC(ctx context.Context, backupPath string, disk if err != nil { return 0, err } - log.Debugf("copy %s -> %s", accessPath, rbacBackup) - copyErr := recursiveCopy.Copy(accessPath, rbacBackup, recursiveCopy.Options{ - Skip: func(srcinfo os.FileInfo, src, dest string) (bool, error) { - rbacDataSize += uint64(srcinfo.Size()) - return false, nil - }, - }) - if copyErr != nil { - return 0, copyErr + accessPathInfo, err := os.Stat(accessPath) + if err != nil && !os.IsNotExist(err) { + return 0, err + } + if err == nil && !accessPathInfo.IsDir() { + return 0, fmt.Errorf("%s is not directory", accessPath) + } + if err == nil { + log.Debugf("copy %s -> %s", accessPath, rbacBackup) + copyErr := recursiveCopy.Copy(accessPath, rbacBackup, recursiveCopy.Options{ + Skip: func(srcinfo os.FileInfo, src, dest string) (bool, error) { + rbacDataSize += uint64(srcinfo.Size()) + return false, nil + }, + }) + if copyErr != nil { + return 0, copyErr + } + } else { + if err = os.MkdirAll(rbacBackup, 0755); err != nil { + return 0, err + } } replicatedRBACDataSize, err := b.createBackupRBACReplicated(ctx, rbacBackup) if err != nil { diff --git a/pkg/clickhouse/clickhouse.go b/pkg/clickhouse/clickhouse.go index 3ffb561d..788cf0ba 100644 --- a/pkg/clickhouse/clickhouse.go +++ b/pkg/clickhouse/clickhouse.go @@ -508,18 +508,19 @@ func (ch *ClickHouse) GetDatabases(ctx context.Context, cfg *config.Config, tabl case <-ctx.Done(): return nil, ctx.Err() default: + fileMatchToRE := strings.NewReplacer("*", ".*", "?", ".", "(", "\\(", ")", "\\)", "[", "\\[", "]", "\\]", "$", "\\$", "^", "\\^") if len(bypassDatabases) > 0 { allDatabasesSQL := fmt.Sprintf( - "SELECT name, engine FROM system.databases WHERE name NOT IN ('%s') AND name IN ('%s')", - strings.Join(skipDatabases, "','"), strings.Join(bypassDatabases, "','"), + "SELECT name, engine FROM system.databases WHERE NOT match(name,'^(%s)$') AND match(name,'^(%s)$')", + fileMatchToRE.Replace(strings.Join(skipDatabases, "|")), fileMatchToRE.Replace(strings.Join(bypassDatabases, "|")), ) if err := ch.StructSelect(&allDatabases, allDatabasesSQL); err != nil { return nil, err } } else { allDatabasesSQL := fmt.Sprintf( - "SELECT name, engine FROM system.databases WHERE name NOT IN ('%s')", - strings.Join(skipDatabases, "','"), + "SELECT name, engine FROM system.databases WHERE NOT match(name,'^(%s)$')", + fileMatchToRE.Replace(strings.Join(skipDatabases, "|")), ) if err := ch.StructSelect(&allDatabases, allDatabasesSQL); err != nil { return nil, err @@ -1055,6 +1056,9 @@ func (ch *ClickHouse) GetUserDefinedFunctions(ctx context.Context) ([]Function, if err := ch.SelectContext(ctx, &allFunctions, allFunctionsSQL); err != nil { return nil, err } + for i := range allFunctions { + allFunctions[i].CreateQuery = strings.Replace(allFunctions[i].CreateQuery, "CREATE FUNCTION", "CREATE OR REPLACE FUNCTION", 1) + } return allFunctions, nil } diff --git a/test/integration/config-custom-kopia.yml b/test/integration/config-custom-kopia.yml index f2742a0a..26b70025 100644 --- a/test/integration/config-custom-kopia.yml +++ b/test/integration/config-custom-kopia.yml @@ -15,7 +15,7 @@ clickhouse: username: backup password: meow=& 123?*%# МЯУ sync_replicated_tables: true - timeout: 2s + timeout: 5s restart_command: "sql:SYSTEM RELOAD USERS; sql:SYSTEM RELOAD CONFIG; sql:SYSTEM SHUTDOWN" custom: # all `kopia` uploads are incremental diff --git a/test/integration/config-custom-restic.yml b/test/integration/config-custom-restic.yml index 053cf073..88372f07 100644 --- a/test/integration/config-custom-restic.yml +++ b/test/integration/config-custom-restic.yml @@ -15,7 +15,7 @@ clickhouse: username: backup password: meow=& 123?*%# МЯУ sync_replicated_tables: true - timeout: 2s + timeout: 5s restart_command: "sql:SYSTEM RELOAD USERS; sql:SYSTEM RELOAD CONFIG; sql:SYSTEM SHUTDOWN" custom: upload_command: /custom/restic/upload.sh {{ .backupName }} {{ .diffFromRemote }} diff --git a/test/integration/config-custom-rsync.yml b/test/integration/config-custom-rsync.yml index 93813de5..74965d84 100644 --- a/test/integration/config-custom-rsync.yml +++ b/test/integration/config-custom-rsync.yml @@ -15,7 +15,7 @@ clickhouse: username: backup password: meow=& 123?*%# МЯУ sync_replicated_tables: true - timeout: 2s + timeout: 5s restart_command: "sql:SYSTEM RELOAD USERS; sql:SYSTEM RELOAD CONFIG; sql:SYSTEM SHUTDOWN" custom: upload_command: /custom/rsync/upload.sh {{ .backupName }} {{ .diffFromRemote }} diff --git a/test/integration/config-s3-fips.yml b/test/integration/config-s3-fips.yml index 724835f8..7fe65ff0 100644 --- a/test/integration/config-s3-fips.yml +++ b/test/integration/config-s3-fips.yml @@ -36,7 +36,7 @@ s3: allow_multipart_download: true concurrency: 3 api: - listen: :7171 + listen: :7172 create_integration_tables: true integration_tables_host: "localhost" allow_parallel: false diff --git a/test/integration/config-s3.yml b/test/integration/config-s3.yml index 29bd141d..8e20fbd3 100644 --- a/test/integration/config-s3.yml +++ b/test/integration/config-s3.yml @@ -20,7 +20,7 @@ clickhouse: secure: true skip_verify: true sync_replicated_tables: true - timeout: 2s + timeout: 5s restart_command: "sql:SYSTEM RELOAD USERS; sql:SYSTEM RELOAD CONFIG; sql:SYSTEM SHUTDOWN" backup_mutations: true s3: diff --git a/test/integration/docker-compose.yml b/test/integration/docker-compose.yml index 7640b12d..a5f9d012 100644 --- a/test/integration/docker-compose.yml +++ b/test/integration/docker-compose.yml @@ -183,6 +183,21 @@ services: - ${CLICKHOUSE_BACKUP_BIN:-../../clickhouse-backup/clickhouse-backup-race}:/usr/bin/clickhouse-backup - ${CLICKHOUSE_BACKUP_BIN_FIPS:-../../clickhouse-backup/clickhouse-backup-race-fips}:/usr/bin/clickhouse-backup-fips - ./credentials.json:/etc/clickhouse-backup/credentials.json + - ./config-azblob.yml:/etc/clickhouse-backup/config-azblob.yml + - ./config-azblob-embedded.yml:/etc/clickhouse-backup/config-azblob-embedded.yml + - ./config-custom-kopia.yml:/etc/clickhouse-backup/config-custom-kopia.yml + - ./config-custom-restic.yml:/etc/clickhouse-backup/config-custom-restic.yml + - ./config-custom-rsync.yml:/etc/clickhouse-backup/config-custom-rsync.yml + - ./config-database-mapping.yml:/etc/clickhouse-backup/config-database-mapping.yml + - ./config-ftp.yaml:/etc/clickhouse-backup/config-ftp.yaml + - ./config-gcs.yml:/etc/clickhouse-backup/config-gcs.yml + - ./config-s3.yml:/etc/clickhouse-backup/config-s3.yml + - ./config-s3-embedded.yml:/etc/clickhouse-backup/config-s3-embedded.yml + - ./config-s3-fips.yml:/etc/clickhouse-backup/config-s3-fips.yml.template + - ./config-s3-nodelete.yml:/etc/clickhouse-backup/config-s3-nodelete.yml + - ./config-s3-plain-embedded.yml:/etc/clickhouse-backup/config-s3-plain-embedded.yml + - ./config-sftp-auth-key.yaml:/etc/clickhouse-backup/config-sftp-auth-key.yaml + - ./config-sftp-auth-password.yaml:/etc/clickhouse-backup/config-sftp-auth-password.yaml - ./_coverage_/:/tmp/_coverage_/ # for local debug - ./install_delve.sh:/tmp/install_delve.sh diff --git a/test/integration/docker-compose_advanced.yml b/test/integration/docker-compose_advanced.yml index f9d7c1ac..33b2efdc 100644 --- a/test/integration/docker-compose_advanced.yml +++ b/test/integration/docker-compose_advanced.yml @@ -230,6 +230,21 @@ services: - ${CLICKHOUSE_BACKUP_BIN:-../../clickhouse-backup/clickhouse-backup-race}:/usr/bin/clickhouse-backup - ${CLICKHOUSE_BACKUP_BIN_FIPS:-../../clickhouse-backup/clickhouse-backup-race-fips}:/usr/bin/clickhouse-backup-fips - ./credentials.json:/etc/clickhouse-backup/credentials.json + - ./config-azblob.yml:/etc/clickhouse-backup/config-azblob.yml + - ./config-azblob-embedded.yml:/etc/clickhouse-backup/config-azblob-embedded.yml + - ./config-custom-kopia.yml:/etc/clickhouse-backup/config-custom-kopia.yml + - ./config-custom-restic.yml:/etc/clickhouse-backup/config-custom-restic.yml + - ./config-custom-rsync.yml:/etc/clickhouse-backup/config-custom-rsync.yml + - ./config-database-mapping.yml:/etc/clickhouse-backup/config-database-mapping.yml + - ./config-ftp.yaml:/etc/clickhouse-backup/config-ftp.yaml + - ./config-gcs.yml:/etc/clickhouse-backup/config-gcs.yml + - ./config-s3.yml:/etc/clickhouse-backup/config-s3.yml + - ./config-s3-embedded.yml:/etc/clickhouse-backup/config-s3-embedded.yml + - ./config-s3-fips.yml:/etc/clickhouse-backup/config-s3-fips.yml.template + - ./config-s3-nodelete.yml:/etc/clickhouse-backup/config-s3-nodelete.yml + - ./config-s3-plain-embedded.yml:/etc/clickhouse-backup/config-s3-plain-embedded.yml + - ./config-sftp-auth-key.yaml:/etc/clickhouse-backup/config-sftp-auth-key.yaml + - ./config-sftp-auth-password.yaml:/etc/clickhouse-backup/config-sftp-auth-password.yaml - ./_coverage_/:/tmp/_coverage_/ # for local debug - ./install_delve.sh:/tmp/install_delve.sh diff --git a/test/integration/integration_test.go b/test/integration/integration_test.go index 371e7f4a..bfe016ae 100644 --- a/test/integration/integration_test.go +++ b/test/integration/integration_test.go @@ -109,7 +109,7 @@ var defaultTestData = []TestDataStruct{ return result }(), Fields: []string{Issue331Atomic, "Col1", "Col2", "Col3", "Col4", "Col5"}, - OrderBy: Issue331Atomic, + OrderBy: Issue331Atomic + "_{test}", }, { Database: Issue331Ordinary, DatabaseEngine: "Ordinary", Name: Issue331Ordinary, // need cover fix https://github.com/Altinity/clickhouse-backup/issues/331 @@ -119,7 +119,7 @@ var defaultTestData = []TestDataStruct{ {Issue331Ordinary: "2", "order_time": toTS("2010-02-01 00:00:00"), "amount": 2.0}, }, Fields: []string{Issue331Ordinary, "order_time", "amount"}, - OrderBy: Issue331Ordinary, + OrderBy: Issue331Ordinary + "_{test}", }, { Database: dbNameOrdinary, DatabaseEngine: "Ordinary", Name: "yuzhichang_table3", @@ -142,7 +142,7 @@ var defaultTestData = []TestDataStruct{ OrderBy: "order_id", }, { Database: dbNameOrdinary, DatabaseEngine: "Ordinary", - Name: "jbod", + Name: "jbod_table", Schema: "(id UInt64) Engine=MergeTree ORDER BY id SETTINGS storage_policy = 'jbod'", Rows: func() []map[string]interface{} { var result []map[string]interface{} @@ -155,7 +155,7 @@ var defaultTestData = []TestDataStruct{ OrderBy: "id", }, { Database: dbNameAtomic, DatabaseEngine: "Atomic", - Name: "jbod", + Name: "jbod_table", Schema: "(t DateTime, id UInt64) Engine=MergeTree PARTITION BY (toYYYYMM(t), id % 4) ORDER BY id SETTINGS storage_policy = 'jbod'", Rows: func() []map[string]interface{} { var result []map[string]interface{} @@ -200,7 +200,7 @@ var defaultTestData = []TestDataStruct{ DatabaseEngine: "Atomic", IsMaterializedView: true, Name: "mv_max_with_inner", - Schema: fmt.Sprintf("(id UInt64) ENGINE=ReplicatedMergeTree('/clickhouse/tables/{database}/{table}/{uuid}','replica1') ORDER BY id AS SELECT max(id) AS id FROM `%s`.`mv_src_table`", dbNameAtomic), + Schema: fmt.Sprintf("(id UInt64) ENGINE=ReplicatedMergeTree('/clickhouse/tables/{database}/{table}/{uuid}','replica1') ORDER BY id AS SELECT max(id) AS id FROM `%s`.`mv_src_table_{test}`", dbNameAtomic), SkipInsert: true, Rows: func() []map[string]interface{} { return []map[string]interface{}{ @@ -215,7 +215,7 @@ var defaultTestData = []TestDataStruct{ DatabaseEngine: "Atomic", IsView: true, Name: "test_view", - Schema: fmt.Sprintf(" AS SELECT count() AS cnt FROM `%s`.`mv_src_table`", dbNameAtomic), + Schema: fmt.Sprintf(" AS SELECT count() AS cnt FROM `%s`.`mv_src_table_{test}`", dbNameAtomic), SkipInsert: true, Rows: func() []map[string]interface{} { return []map[string]interface{}{ @@ -230,7 +230,7 @@ var defaultTestData = []TestDataStruct{ DatabaseEngine: "Atomic", IsMaterializedView: true, Name: "mv_max_with_dst", - Schema: fmt.Sprintf(" TO `%s`.`mv_dst_table` AS SELECT max(id) AS id FROM `%s`.mv_src_table", dbNameAtomic, dbNameAtomic), + Schema: fmt.Sprintf(" TO `%s`.`mv_dst_table_{test}` AS SELECT max(id) AS id FROM `%s`.mv_src_table_{test}", dbNameAtomic, dbNameAtomic), SkipInsert: true, Rows: func() []map[string]interface{} { return []map[string]interface{}{ @@ -246,7 +246,7 @@ var defaultTestData = []TestDataStruct{ DatabaseEngine: "Atomic", IsMaterializedView: true, Name: "mv_min_with_nested_depencency", - Schema: fmt.Sprintf(" TO `%s`.`mv_dst_table` AS SELECT min(id) * 2 AS id FROM `%s`.mv_src_table", dbNameAtomic, dbNameAtomic), + Schema: fmt.Sprintf(" TO `%s`.`mv_dst_table_{test}` AS SELECT min(id) * 2 AS id FROM `%s`.mv_src_table_{test}", dbNameAtomic, dbNameAtomic), SkipInsert: true, Rows: func() []map[string]interface{} { return []map[string]interface{}{ @@ -276,7 +276,7 @@ var defaultTestData = []TestDataStruct{ return result }(), Fields: []string{}, - OrderBy: Issue331Atomic, + OrderBy: Issue331Atomic + "_{test}", }, { Database: dbNameMySQL, DatabaseEngine: "MySQL('mysql:3306','mysql','root','root')", @@ -301,7 +301,6 @@ var defaultTestData = []TestDataStruct{ }, } -var testData = defaultTestData var defaultIncrementData = []TestDataStruct{ { Database: dbNameOrdinary, DatabaseEngine: "Ordinary", @@ -346,7 +345,7 @@ var defaultIncrementData = []TestDataStruct{ return result }(), Fields: []string{Issue331Atomic, "Col1", "Col2", "Col3", "Col4", "Col5"}, - OrderBy: Issue331Atomic, + OrderBy: Issue331Atomic + "_{test}", }, { Database: Issue331Ordinary, DatabaseEngine: "Ordinary", Name: Issue331Ordinary, // need cover fix https://github.com/Altinity/clickhouse-backup/issues/331 @@ -356,7 +355,7 @@ var defaultIncrementData = []TestDataStruct{ {Issue331Ordinary: "4", "order_time": toTS("2010-04-01 00:00:00"), "amount": 4.0}, }, Fields: []string{Issue331Ordinary, "order_time", "amount"}, - OrderBy: Issue331Ordinary, + OrderBy: Issue331Ordinary + "_{test}", }, { Database: dbNameOrdinary, DatabaseEngine: "Ordinary", Name: "yuzhichang_table3", @@ -379,7 +378,7 @@ var defaultIncrementData = []TestDataStruct{ OrderBy: "order_id", }, { Database: dbNameAtomic, DatabaseEngine: "Atomic", - Name: "jbod", + Name: "jbod_table", Schema: "(t DateTime, id UInt64) Engine=MergeTree PARTITION BY (toYYYYMM(t), id % 4) ORDER BY id SETTINGS storage_policy = 'jbod'", Rows: func() []map[string]interface{} { var result []map[string]interface{} @@ -392,7 +391,6 @@ var defaultIncrementData = []TestDataStruct{ OrderBy: "id", }, } -var incrementData = defaultIncrementData func init() { log.SetHandler(logcli.New(os.Stdout)) @@ -401,241 +399,22 @@ func init() { logLevel = os.Getenv("LOG_LEVEL") } log.SetLevelFromString(logLevel) -} - -func TestSkipNotExistsTable(t *testing.T) { - if compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "19.1") < 0 { - t.Skip("TestSkipNotExistsTable too small time between `SELECT DISTINCT partition_id` and `ALTER TABLE ... FREEZE PARTITION`") - } - ch := &TestClickHouse{} - r := require.New(t) - ch.connectWithWait(r, 0*time.Second, 1*time.Second) - defer ch.chbackend.Close() - - log.Info("Check skip not exist errors") - r.NoError(dockerCP("config-s3.yml", "clickhouse-backup:/etc/clickhouse-backup/config.yml")) - ifNotExistsCreateSQL := "CREATE TABLE IF NOT EXISTS default.if_not_exists (id UInt64) ENGINE=MergeTree() ORDER BY id" - ifNotExistsInsertSQL := "INSERT INTO default.if_not_exists SELECT number FROM numbers(1000)" - chVersion, err := ch.chbackend.GetVersion(context.Background()) - r.NoError(err) - - freezeErrorHandled := false - pauseChannel := make(chan int64) - resumeChannel := make(chan int64) - ch.chbackend.Config.LogSQLQueries = true - wg := sync.WaitGroup{} - wg.Add(1) - go func() { - defer func() { - close(pauseChannel) - wg.Done() - }() - pause := int64(0) - // pausePercent := int64(90) - for i := int64(0); i < 100; i++ { - testBackupName := fmt.Sprintf("not_exists_%d", i) - err = ch.chbackend.Query(ifNotExistsCreateSQL) - r.NoError(err) - err = ch.chbackend.Query(ifNotExistsInsertSQL) - r.NoError(err) - if i < 5 { - log.Infof("pauseChannel <- %d", 0) - pauseChannel <- 0 - } else { - log.Infof("pauseChannel <- %d", pause/i) - pauseChannel <- pause / i - } - startTime := time.Now() - out, err := dockerExecOut("clickhouse-backup", "bash", "-ce", "LOG_LEVEL=debug clickhouse-backup create --table default.if_not_exists "+testBackupName) - log.Info(out) - if (err != nil && (strings.Contains(out, "can't freeze") || strings.Contains(out, "no tables for backup"))) || - (err == nil && !strings.Contains(out, "can't freeze")) { - parseTime := func(line string) time.Time { - parsedTime, err := time.Parse("2006/01/02 15:04:05.999999", line[:26]) - if err != nil { - r.Failf("Error parsing time", "%s, : %v", line, err) - } - return parsedTime - } - lines := strings.Split(out, "\n") - firstTime := parseTime(lines[0]) - var freezeTime time.Time - for _, line := range lines { - if strings.Contains(line, "create_table_query") { - freezeTime = parseTime(line) - break - } - if strings.Contains(line, "SELECT DISTINCT partition_id") { - freezeTime = parseTime(line) - break - } - } - pause += (firstTime.Sub(startTime) + freezeTime.Sub(firstTime)).Nanoseconds() - } - if err != nil { - if !strings.Contains(out, "no tables for backup") { - assert.NoError(t, err) - } - } - - if strings.Contains(out, "code: 60") && err == nil { - freezeErrorHandled = true - <-resumeChannel - r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "delete", "local", testBackupName)) - break - } - if err == nil { - err = dockerExec("clickhouse-backup", "clickhouse-backup", "delete", "local", testBackupName) - assert.NoError(t, err) - } - <-resumeChannel - } - }() - wg.Add(1) - go func() { - defer func() { - close(resumeChannel) - wg.Done() - }() - for pause := range pauseChannel { - log.Infof("%d <- pauseChannel", pause) - if pause > 0 { - pauseStart := time.Now() - time.Sleep(time.Duration(pause) * time.Nanosecond) - log.Infof("pause=%s pauseStart=%s", time.Duration(pause).String(), pauseStart.String()) - err = ch.chbackend.DropTable(clickhouse.Table{Database: "default", Name: "if_not_exists"}, ifNotExistsCreateSQL, "", false, chVersion) - r.NoError(err) - } - resumeChannel <- 1 - } - }() - wg.Wait() - r.True(freezeErrorHandled) -} - -func TestTablePatterns(t *testing.T) { - ch := &TestClickHouse{} - r := require.New(t) - ch.connectWithWait(r, 500*time.Millisecond, 2*time.Second) - defer ch.chbackend.Close() - - testBackupName := "test_backup_patterns" - databaseList := []string{dbNameOrdinary, dbNameAtomic} - r.NoError(dockerCP("config-s3.yml", "clickhouse-backup:/etc/clickhouse-backup/config.yml")) - - for _, createPattern := range []bool{true, false} { - for _, restorePattern := range []bool{true, false} { - fullCleanup(r, ch, []string{testBackupName}, []string{"remote", "local"}, databaseList, false, false) - generateTestData(ch, r, "S3") - if createPattern { - r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "create_remote", "--tables", " "+dbNameOrdinary+".*", testBackupName)) - } else { - r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "create_remote", testBackupName)) - } - - r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "delete", "local", testBackupName)) - dropDatabasesFromTestDataDataSet(r, ch, databaseList) - if restorePattern { - r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "restore_remote", "--tables", " "+dbNameOrdinary+".*", testBackupName)) - } else { - r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "restore_remote", testBackupName)) - } - - restored := uint64(0) - r.NoError(ch.chbackend.SelectSingleRowNoCtx(&restored, fmt.Sprintf("SELECT count() FROM system.tables WHERE database='%s'", dbNameOrdinary))) - r.NotZero(restored) - - if createPattern || restorePattern { - restored = 0 - r.NoError(ch.chbackend.SelectSingleRowNoCtx(&restored, fmt.Sprintf("SELECT count() FROM system.tables WHERE database='%s'", dbNameAtomic))) - // todo, old versions of clickhouse will return empty recordset - r.Zero(restored) - - restored = 0 - r.NoError(ch.chbackend.SelectSingleRowNoCtx(&restored, fmt.Sprintf("SELECT count() FROM system.databases WHERE name='%s'", dbNameAtomic))) - // todo, old versions of clickhouse will return empty recordset - r.Zero(restored) - } else { - restored = 0 - r.NoError(ch.chbackend.SelectSingleRowNoCtx(&restored, fmt.Sprintf("SELECT count() FROM system.tables WHERE database='%s'", dbNameAtomic))) - r.NotZero(restored) - } - - fullCleanup(r, ch, []string{testBackupName}, []string{"remote", "local"}, databaseList, true, true) - - } - } -} - -func TestProjections(t *testing.T) { - var err error - if compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "21.8") == -1 { - t.Skipf("Test skipped, PROJECTION available only 21.8+, current version %s", os.Getenv("CLICKHOUSE_VERSION")) - } - - ch := &TestClickHouse{} - r := require.New(t) - ch.connectWithWait(r, 0*time.Second, 1*time.Second) - defer ch.chbackend.Close() - - r.NoError(dockerCP("config-s3.yml", "clickhouse-backup:/etc/clickhouse-backup/config.yml")) - err = ch.chbackend.Query("CREATE TABLE default.table_with_projection(dt DateTime, v UInt64, PROJECTION x (SELECT toStartOfMonth(dt) m, sum(v) GROUP BY m)) ENGINE=MergeTree() ORDER BY dt") - r.NoError(err) - - err = ch.chbackend.Query("INSERT INTO default.table_with_projection SELECT today() - INTERVAL number DAY, number FROM numbers(10)") - r.NoError(err) - - r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "create", "test_backup_projection")) - r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "restore", "--rm", "test_backup_projection")) - r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "delete", "local", "test_backup_projection")) - var counts uint64 - r.NoError(ch.chbackend.SelectSingleRowNoCtx(&counts, "SELECT count() FROM default.table_with_projection")) - r.Equal(uint64(10), counts) - err = ch.chbackend.Query("DROP TABLE default.table_with_projection NO DELAY") - r.NoError(err) - -} - -func TestKeepBackupRemoteAndDiffFromRemote(t *testing.T) { - if isTestShouldSkip("RUN_ADVANCED_TESTS") { - t.Skip("Skipping Advanced integration tests...") - return - } - r := require.New(t) - ch := &TestClickHouse{} - ch.connectWithWait(r, 500*time.Millisecond, 2*time.Second) - backupNames := make([]string, 5) - for i := 0; i < 5; i++ { - backupNames[i] = fmt.Sprintf("keep_remote_backup_%d", i) - } - databaseList := []string{dbNameOrdinary, dbNameAtomic, dbNameMySQL, dbNamePostgreSQL, Issue331Atomic, Issue331Ordinary} - r.NoError(dockerCP("config-s3.yml", "clickhouse-backup:/etc/clickhouse-backup/config.yml")) - fullCleanup(r, ch, backupNames, []string{"remote", "local"}, databaseList, false, false) - generateTestData(ch, r, "S3") - for i, backupName := range backupNames { - generateIncrementTestData(ch, r) - if i == 0 { - r.NoError(dockerExec("clickhouse-backup", "bash", "-ce", fmt.Sprintf("BACKUPS_TO_KEEP_REMOTE=3 clickhouse-backup create_remote %s", backupName))) - } else { - r.NoError(dockerExec("clickhouse-backup", "bash", "-ce", fmt.Sprintf("BACKUPS_TO_KEEP_REMOTE=3 clickhouse-backup create_remote --diff-from-remote=%s %s", backupNames[i-1], backupName))) - } - } - out, err := dockerExecOut("clickhouse-backup", "bash", "-ce", "clickhouse-backup list local") - r.NoError(err) - // shall not delete any backup, cause all deleted backup have links as required in other backups - for _, backupName := range backupNames { - r.Contains(out, backupName) - r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "delete", "local", backupName)) - } - latestIncrementBackup := fmt.Sprintf("keep_remote_backup_%d", len(backupNames)-1) - r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "download", latestIncrementBackup)) - r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "restore", "--rm", latestIncrementBackup)) - var res uint64 - r.NoError(ch.chbackend.SelectSingleRowNoCtx(&res, fmt.Sprintf("SELECT count() FROM `%s`.`%s`", Issue331Atomic, Issue331Atomic))) - r.Equal(uint64(200), res) - fullCleanup(r, ch, backupNames, []string{"remote", "local"}, databaseList, true, true) -} - + r := require.New(&testing.T{}) + installDebIfNotExists(r, "clickhouse-backup", "ca-certificates", "curl") + r.NoError(dockerExec("clickhouse-backup", "update-ca-certificates")) + r.NoError(dockerExec("clickhouse-backup", "bash", "-xce", "curl -sL \"https://github.com/mikefarah/yq/releases/latest/download/yq_linux_$(dpkg --print-architecture)\" -o /usr/bin/yq && chmod +x /usr/bin/yq")) + installDebIfNotExists(r, "clickhouse-backup", "jq", "bzip2", "pgp", "git") + // rsync + installDebIfNotExists(r, "clickhouse-backup", "openssh-client", "rsync") + // kopia + r.NoError(dockerExec("clickhouse-backup", "bash", "-ce", "curl -sfL https://kopia.io/signing-key | gpg --dearmor -o /usr/share/keyrings/kopia-keyring.gpg")) + r.NoError(dockerExec("clickhouse-backup", "bash", "-ce", "echo 'deb [signed-by=/usr/share/keyrings/kopia-keyring.gpg] https://packages.kopia.io/apt/ stable main' > /etc/apt/sources.list.d/kopia.list")) + installDebIfNotExists(r, "clickhouse-backup", "kopia") + // restic + r.NoError(dockerExec("clickhouse-backup", "bash", "-xec", "RELEASE_TAG=$(curl -H 'Accept: application/json' -sL https://github.com/restic/restic/releases/latest | jq -c -r -M '.tag_name'); RELEASE=$(echo ${RELEASE_TAG} | sed -e 's/v//'); curl -sfL \"https://github.com/restic/restic/releases/download/${RELEASE_TAG}/restic_${RELEASE}_linux_amd64.bz2\" | bzip2 -d > /bin/restic; chmod +x /bin/restic")) +} + +// TestS3NoDeletePermission - no parallel func TestS3NoDeletePermission(t *testing.T) { if isTestShouldSkip("RUN_ADVANCED_TESTS") { t.Skip("Skipping Advanced integration tests...") @@ -648,1511 +427,1733 @@ func TestS3NoDeletePermission(t *testing.T) { ch := &TestClickHouse{} ch.connectWithWait(r, 500*time.Millisecond, 2*time.Second) defer ch.chbackend.Close() - generateTestData(ch, r, "S3") + generateTestData(t, r, ch, "S3", defaultTestData) r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "create_remote", "no_delete_backup")) r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "delete", "local", "no_delete_backup")) r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "restore_remote", "no_delete_backup")) r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "delete", "local", "no_delete_backup")) r.Error(dockerExec("clickhouse-backup", "clickhouse-backup", "delete", "remote", "no_delete_backup")) databaseList := []string{dbNameOrdinary, dbNameAtomic, dbNameMySQL, dbNamePostgreSQL, Issue331Atomic, Issue331Ordinary} - dropDatabasesFromTestDataDataSet(r, ch, databaseList) + dropDatabasesFromTestDataDataSet(t, r, ch, databaseList) r.NoError(dockerExec("minio", "bash", "-ce", "rm -rf /data/clickhouse/*")) } -func TestSyncReplicaTimeout(t *testing.T) { - if compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "19.11") == -1 { - t.Skipf("Test skipped, SYNC REPLICA ignore receive_timeout for %s version", os.Getenv("CLICKHOUSE_VERSION")) +// TestDoRestoreRBAC need clickhouse-server restart, no parallel +func TestDoRestoreRBAC(t *testing.T) { + if compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "20.4") == -1 { + t.Skipf("Test skipped, RBAC not available for %s version", os.Getenv("CLICKHOUSE_VERSION")) } ch := &TestClickHouse{} r := require.New(t) - ch.connectWithWait(r, 0*time.Millisecond, 2*time.Second) - r.NoError(dockerCP("config-s3.yml", "clickhouse-backup:/etc/clickhouse-backup/config.yml")) - dropReplTables := func() { - for _, table := range []string{"repl1", "repl2"} { - query := "DROP TABLE IF EXISTS default." + table - if compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "20.3") == 1 { - query += " NO DELAY" - } - ch.queryWithNoError(r, query) - } - } - dropReplTables() - ch.queryWithNoError(r, "CREATE TABLE default.repl1 (v UInt64) ENGINE=ReplicatedMergeTree('/clickhouse/tables/default/repl','repl1') ORDER BY tuple()") - ch.queryWithNoError(r, "CREATE TABLE default.repl2 (v UInt64) ENGINE=ReplicatedMergeTree('/clickhouse/tables/default/repl','repl2') ORDER BY tuple()") + ch.connectWithWait(r, 1*time.Second, 1*time.Second) + + ch.queryWithNoError(r, "DROP TABLE IF EXISTS default.test_rbac") + ch.queryWithNoError(r, "CREATE TABLE default.test_rbac (v UInt64) ENGINE=MergeTree() ORDER BY tuple()") - ch.queryWithNoError(r, "INSERT INTO default.repl1 SELECT number FROM numbers(10)") + ch.queryWithNoError(r, "DROP SETTINGS PROFILE IF EXISTS test_rbac") + ch.queryWithNoError(r, "DROP QUOTA IF EXISTS test_rbac") + ch.queryWithNoError(r, "DROP ROW POLICY IF EXISTS test_rbac ON default.test_rbac") + ch.queryWithNoError(r, "DROP ROLE IF EXISTS test_rbac") + ch.queryWithNoError(r, "DROP USER IF EXISTS test_rbac") - ch.queryWithNoError(r, "SYSTEM STOP REPLICATED SENDS default.repl1") - ch.queryWithNoError(r, "SYSTEM STOP FETCHES default.repl2") + log.Info("create RBAC related objects") + ch.queryWithNoError(r, "CREATE SETTINGS PROFILE test_rbac SETTINGS max_execution_time=60") + ch.queryWithNoError(r, "CREATE ROLE test_rbac SETTINGS PROFILE 'test_rbac'") + ch.queryWithNoError(r, "CREATE USER test_rbac IDENTIFIED BY 'test_rbac' DEFAULT ROLE test_rbac") + ch.queryWithNoError(r, "CREATE QUOTA test_rbac KEYED BY user_name FOR INTERVAL 1 hour NO LIMITS TO test_rbac") + ch.queryWithNoError(r, "CREATE ROW POLICY test_rbac ON default.test_rbac USING 1=1 AS RESTRICTIVE TO test_rbac") - ch.queryWithNoError(r, "INSERT INTO default.repl1 SELECT number FROM numbers(100)") + r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "create", "--rbac", "--rbac-only", "test_rbac_backup")) + r.NoError(dockerExec("clickhouse-backup", "bash", "-xec", "ALLOW_EMPTY_BACKUPS=1 CLICKHOUSE_BACKUP_CONFIG=/etc/clickhouse-backup/config-s3.yml clickhouse-backup upload test_rbac_backup")) + r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "delete", "local", "test_rbac_backup")) + r.NoError(dockerExec("clickhouse", "ls", "-lah", "/var/lib/clickhouse/access")) - r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "create", "--tables=default.repl*", "test_not_synced_backup")) - r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "upload", "test_not_synced_backup")) - r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "delete", "local", "test_not_synced_backup")) - r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "delete", "remote", "test_not_synced_backup")) + log.Info("drop all RBAC related objects after backup") + ch.queryWithNoError(r, "DROP SETTINGS PROFILE test_rbac") + ch.queryWithNoError(r, "DROP QUOTA test_rbac") + ch.queryWithNoError(r, "DROP ROW POLICY test_rbac ON default.test_rbac") + ch.queryWithNoError(r, "DROP ROLE test_rbac") + ch.queryWithNoError(r, "DROP USER test_rbac") - ch.queryWithNoError(r, "SYSTEM START REPLICATED SENDS default.repl1") - ch.queryWithNoError(r, "SYSTEM START FETCHES default.repl2") + log.Info("download+restore RBAC") + r.NoError(dockerExec("clickhouse", "ls", "-lah", "/var/lib/clickhouse/access")) + r.NoError(dockerExec("clickhouse-backup", "bash", "-xec", "ALLOW_EMPTY_BACKUPS=1 CLICKHOUSE_BACKUP_CONFIG=/etc/clickhouse-backup/config-s3.yml clickhouse-backup download test_rbac_backup")) + r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "restore", "--rm", "--rbac", "--rbac-only", "test_rbac_backup")) + r.NoError(dockerExec("clickhouse", "ls", "-lah", "/var/lib/clickhouse/access")) - dropReplTables() ch.chbackend.Close() + ch.connectWithWait(r, 2*time.Second, 8*time.Second) -} - -func TestGetPartitionId(t *testing.T) { - if compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "19.17") == -1 { - t.Skipf("Test skipped, is_in_partition_key not available for %s version", os.Getenv("CLICKHOUSE_VERSION")) - } - r := require.New(t) - ch := &TestClickHouse{} - ch.connectWithWait(r, 500*time.Millisecond, 1*time.Second) - defer ch.chbackend.Close() + r.NoError(dockerExec("clickhouse", "ls", "-lah", "/var/lib/clickhouse/access")) - type testData struct { - CreateTableSQL string - Database string - Table string - Partition string - ExpectedId string - ExpectedName string - } - testCases := []testData{ - { - "CREATE TABLE default.test_part_id_1 UUID 'b45e751f-6c06-42a3-ab4a-f5bb9ac3716e' (dt Date, version DateTime, category String, name String) ENGINE = ReplicatedReplacingMergeTree('/clickhouse/tables/{shard}/{database}/{table}','{replica}',version) ORDER BY dt PARTITION BY (toYYYYMM(dt),category)", - "default", - "test_part_id_1", - "('2023-01-01','category1')", - "cc1ad6ede2e7f708f147e132cac7a590", - "(202301,'category1')", - }, - { - "CREATE TABLE default.test_part_id_2 (dt Date, version DateTime, name String) ENGINE = ReplicatedReplacingMergeTree('/clickhouse/tables/{shard}/{database}/{table}','{replica}',version) ORDER BY dt PARTITION BY toYYYYMM(dt)", - "default", - "test_part_id_2", - "'2023-01-01'", - "202301", - "202301", - }, - { - "CREATE TABLE default.test_part_id_3 ON CLUSTER '{cluster}' (i UInt32, name String) ENGINE = ReplicatedMergeTree() ORDER BY i PARTITION BY i", - "default", - "test_part_id_3", - "202301", - "202301", - "202301", - }, - { - "CREATE TABLE default.test_part_id_4 (dt String, name String) ENGINE = MergeTree ORDER BY dt PARTITION BY dt", - "default", - "test_part_id_4", - "'2023-01-01'", - "c487903ebbb25a533634d6ec3485e3a9", - "2023-01-01", - }, - { - "CREATE TABLE default.test_part_id_5 (dt String, name String) ENGINE = Memory", - "default", - "test_part_id_5", - "'2023-01-01'", - "", - "", - }, - } - if isAtomic, _ := ch.chbackend.IsAtomic("default"); !isAtomic { - testCases[0].CreateTableSQL = strings.Replace(testCases[0].CreateTableSQL, "UUID 'b45e751f-6c06-42a3-ab4a-f5bb9ac3716e'", "", 1) + rbacTypes := map[string]string{ + "PROFILES": "test_rbac", + "QUOTAS": "test_rbac", + "POLICIES": "test_rbac ON default.test_rbac", + "ROLES": "test_rbac", + "USERS": "test_rbac", } - for _, tc := range testCases { - partitionId, partitionName, err := partition.GetPartitionIdAndName(context.Background(), ch.chbackend, tc.Database, tc.Table, tc.CreateTableSQL, tc.Partition) - assert.NoError(t, err) - assert.Equal(t, tc.ExpectedId, partitionId) - assert.Equal(t, tc.ExpectedName, partitionName) + for rbacType, expectedValue := range rbacTypes { + var rbacRows []struct { + Name string `ch:"name"` + } + err := ch.chbackend.Select(&rbacRows, fmt.Sprintf("SHOW %s", rbacType)) + r.NoError(err) + found := false + for _, row := range rbacRows { + if expectedValue == row.Name { + found = true + break + } + } + if !found { + //r.NoError(dockerExec("clickhouse", "cat", "/var/log/clickhouse-server/clickhouse-server.log")) + r.Failf("wrong RBAC", "SHOW %s, %#v doesn't contain %#v", rbacType, rbacRows, expectedValue) + } } + r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "delete", "local", "test_rbac_backup")) + r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "delete", "remote", "test_rbac_backup")) + + ch.queryWithNoError(r, "DROP SETTINGS PROFILE test_rbac") + ch.queryWithNoError(r, "DROP QUOTA test_rbac") + ch.queryWithNoError(r, "DROP ROW POLICY test_rbac ON default.test_rbac") + ch.queryWithNoError(r, "DROP ROLE test_rbac") + ch.queryWithNoError(r, "DROP USER test_rbac") + ch.queryWithNoError(r, "DROP TABLE IF EXISTS default.test_rbac") + ch.chbackend.Close() + } -func TestRestoreMutationInProgress(t *testing.T) { - r := require.New(t) - ch := &TestClickHouse{} - ch.connectWithWait(r, 0*time.Second, 5*time.Second) - defer ch.chbackend.Close() - version, err := ch.chbackend.GetVersion(context.Background()) - r.NoError(err) - zkPath := "/clickhouse/tables/{shard}/default/test_restore_mutation_in_progress" - onCluster := "" - if compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "20.8") >= 0 { - zkPath = "/clickhouse/tables/{shard}/{database}/{table}" - } - if compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "22.3") >= 0 { - zkPath = "/clickhouse/tables/{shard}/{database}/{table}/{uuid}" - onCluster = " ON CLUSTER '{cluster}'" - } - dropSQL := fmt.Sprintf("DROP TABLE IF EXISTS default.test_restore_mutation_in_progress %s", onCluster) - if compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "20.3") > 0 { - dropSQL += " NO DELAY" +// TestDoRestoreConfigs - require direct access to `/etc/clickhouse-backup/`, so executed inside `clickhouse` container +// need clickhouse-server restart, no parallel +func TestDoRestoreConfigs(t *testing.T) { + if compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "1.1.54391") < 0 { + t.Skipf("Test skipped, users.d is not available for %s version", os.Getenv("CLICKHOUSE_VERSION")) } - ch.queryWithNoError(r, dropSQL) + ch := &TestClickHouse{} + r := require.New(t) + ch.connectWithWait(r, 0*time.Millisecond, 1*time.Second) + ch.queryWithNoError(r, "DROP TABLE IF EXISTS default.test_configs") + ch.queryWithNoError(r, "CREATE TABLE default.test_rbac (v UInt64) ENGINE=MergeTree() ORDER BY tuple()") - createSQL := fmt.Sprintf("CREATE TABLE default.test_restore_mutation_in_progress %s (id UInt64, attr String) ENGINE=ReplicatedMergeTree('%s','{replica}') PARTITION BY id ORDER BY id", onCluster, zkPath) - ch.queryWithNoError(r, createSQL) - ch.queryWithNoError(r, "INSERT INTO default.test_restore_mutation_in_progress SELECT number, if(number>0,'a',toString(number)) FROM numbers(2)") + r.NoError(dockerExec("clickhouse", "bash", "-ce", "echo '1' > /etc/clickhouse-server/users.d/test_config.xml")) - mutationSQL := "ALTER TABLE default.test_restore_mutation_in_progress MODIFY COLUMN attr UInt64" - err = ch.chbackend.QueryContext(context.Background(), mutationSQL) - if err != nil { - errStr := strings.ToLower(err.Error()) - r.True(strings.Contains(errStr, "code: 341") || strings.Contains(errStr, "code: 517") || strings.Contains(errStr, "timeout"), "UNKNOWN ERROR: %s", err.Error()) - t.Logf("%s RETURN EXPECTED ERROR=%#v", mutationSQL, err) - } + r.NoError(dockerExec("clickhouse", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "create", "--configs", "--configs-only", "test_configs_backup")) + ch.queryWithNoError(r, "DROP TABLE IF EXISTS default.test_configs") + r.NoError(dockerExec("clickhouse", "bash", "-xec", "CLICKHOUSE_BACKUP_CONFIG=/etc/clickhouse-backup/config-s3.yml S3_COMPRESSION_FORMAT=none ALLOW_EMPTY_BACKUPS=1 clickhouse-backup upload test_configs_backup")) + r.NoError(dockerExec("clickhouse", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "delete", "local", "test_configs_backup")) - attrs := make([]struct { - Attr uint64 `ch:"attr"` - }, 0) - err = ch.chbackend.Select(&attrs, "SELECT attr FROM default.test_restore_mutation_in_progress ORDER BY id") - r.NotEqual(nil, err) - errStr := strings.ToLower(err.Error()) - r.True(strings.Contains(errStr, "code: 53") || strings.Contains(errStr, "code: 6")) - r.Zero(len(attrs)) + ch.chbackend.Close() + ch.connectWithWait(r, 1*time.Second, 1*time.Second) + ch.queryWithNoError(r, "SYSTEM RELOAD CONFIG") + selectEmptyResultForAggQuery := "SELECT value FROM system.settings WHERE name='empty_result_for_aggregation_by_empty_set'" + var settings string + r.NoError(ch.chbackend.SelectSingleRowNoCtx(&settings, selectEmptyResultForAggQuery)) + r.Equal("1", settings, "expect empty_result_for_aggregation_by_empty_set=1") - if compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "20.8") >= 0 { - mutationSQL = "ALTER TABLE default.test_restore_mutation_in_progress RENAME COLUMN attr TO attr_1" - err = ch.chbackend.QueryContext(context.Background(), mutationSQL) - r.NotEqual(nil, err) - errStr = strings.ToLower(err.Error()) - r.True(strings.Contains(errStr, "code: 517") || strings.Contains(errStr, "timeout")) - t.Logf("%s RETURN EXPECTED ERROR=%#v", mutationSQL, err) - } - r.NoError(dockerExec("clickhouse", "clickhouse", "client", "-q", "SELECT * FROM system.mutations WHERE is_done=0 FORMAT Vertical")) + r.NoError(dockerExec("clickhouse", "rm", "-rfv", "/etc/clickhouse-server/users.d/test_config.xml")) + r.NoError(dockerExec("clickhouse", "bash", "-xec", "CLICKHOUSE_BACKUP_CONFIG=/etc/clickhouse-backup/config-s3.yml ALLOW_EMPTY_BACKUPS=1 clickhouse-backup download test_configs_backup")) - r.NoError(dockerCP("config-s3.yml", "clickhouse-backup:/etc/clickhouse-backup/config.yml")) - // backup with check consistency - out, createErr := dockerExecOut("clickhouse-backup", "clickhouse-backup", "create", "--tables=default.test_restore_mutation_in_progress", "test_restore_mutation_in_progress") - r.NotEqual(createErr, nil) - r.Contains(out, "have inconsistent data types") - t.Log(out) + r.NoError(ch.chbackend.Query("SYSTEM RELOAD CONFIG")) + ch.chbackend.Close() + ch.connectWithWait(r, 1*time.Second, 1*time.Second) - // backup without check consistency - out, createErr = dockerExecOut("clickhouse-backup", "clickhouse-backup", "create", "--skip-check-parts-columns", "--tables=default.test_restore_mutation_in_progress", "test_restore_mutation_in_progress") - t.Log(out) - r.NoError(createErr) - r.NotContains(out, "have inconsistent data types") + settings = "" + r.NoError(ch.chbackend.SelectSingleRowNoCtx(&settings, "SELECT value FROM system.settings WHERE name='empty_result_for_aggregation_by_empty_set'")) + r.Equal("0", settings, "expect empty_result_for_aggregation_by_empty_set=0") - r.NoError(ch.chbackend.DropTable(clickhouse.Table{Database: "default", Name: "test_restore_mutation_in_progress"}, "", "", false, version)) - var restoreErr error - restoreErr = dockerExec("clickhouse-backup", "clickhouse-backup", "restore", "--rm", "--tables=default.test_restore_mutation_in_progress", "test_restore_mutation_in_progress") - if compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "20.8") >= 0 && compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "22.8") < 0 { - r.NotEqual(restoreErr, nil) - } else { - r.NoError(restoreErr) - } + r.NoError(dockerExec("clickhouse", "bash", "-xec", "CLICKHOUSE_BACKUP_CONFIG=/etc/clickhouse-backup/config-s3.yml CLICKHOUSE_RESTART_COMMAND='sql:SYSTEM RELOAD CONFIG' clickhouse-backup restore --rm --configs --configs-only test_configs_backup")) - attrs = make([]struct { - Attr uint64 `ch:"attr"` - }, 0) - checkRestoredData := "attr" - if restoreErr == nil { - if compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "20.8") >= 0 { - checkRestoredData = "attr_1 AS attr" - } - } - selectSQL := fmt.Sprintf("SELECT %s FROM default.test_restore_mutation_in_progress ORDER BY id", checkRestoredData) - selectErr := ch.chbackend.Select(&attrs, selectSQL) - expectedSelectResults := make([]struct { - Attr uint64 `ch:"attr"` - }, 1) - expectedSelectResults[0].Attr = 0 + ch.chbackend.Close() + ch.connectWithWait(r, 1*time.Second, 1*time.Second) - expectedSelectError := "code: 517" + settings = "" + r.NoError(ch.chbackend.SelectSingleRowNoCtx(&settings, "SELECT value FROM system.settings WHERE name='empty_result_for_aggregation_by_empty_set'")) + r.Equal("1", settings, "expect empty_result_for_aggregation_by_empty_set=1") - if compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "20.8") < 0 { - expectedSelectResults = make([]struct { - Attr uint64 `ch:"attr"` - }, 2) - expectedSelectError = "" - } - if compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "20.8") >= 0 && compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "22.8") < 0 { - expectedSelectError = "" - } - if compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "22.8") >= 0 { - expectedSelectError = "code: 6" - expectedSelectResults = make([]struct { - Attr uint64 `ch:"attr"` - }, 0) - } - r.Equal(expectedSelectResults, attrs) - if expectedSelectError != "" { - r.Error(selectErr) - r.Contains(strings.ToLower(selectErr.Error()), expectedSelectError) - t.Logf("%s RETURN EXPECTED ERROR=%#v", selectSQL, selectErr) - } else { - r.NoError(selectErr) - } + isTestConfigsTablePresent := 0 + r.NoError(ch.chbackend.SelectSingleRowNoCtx(&isTestConfigsTablePresent, "SELECT count() FROM system.tables WHERE database='default' AND name='test_configs' SETTINGS empty_result_for_aggregation_by_empty_set=1")) + r.Equal(0, isTestConfigsTablePresent, "expect default.test_configs is not present") - r.NoError(dockerExec("clickhouse", "clickhouse", "client", "-q", "SELECT * FROM system.mutations FORMAT Vertical")) + r.NoError(dockerExec("clickhouse", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "delete", "local", "test_configs_backup")) + r.NoError(dockerExec("clickhouse", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "delete", "remote", "test_configs_backup")) + r.NoError(dockerExec("clickhouse", "rm", "-rfv", "/etc/clickhouse-server/users.d/test_config.xml")) - r.NoError(ch.chbackend.DropTable(clickhouse.Table{Database: "default", Name: "test_restore_mutation_in_progress"}, "", "", false, version)) - r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "delete", "local", "test_restore_mutation_in_progress")) + ch.chbackend.Close() } -func TestInnerTablesMaterializedView(t *testing.T) { +// TestLongListRemote - no parallel, cause need to restart minito +func TestLongListRemote(t *testing.T) { ch := &TestClickHouse{} r := require.New(t) - ch.connectWithWait(r, 1*time.Second, 10*time.Second) + ch.connectWithWait(r, 0*time.Second, 1*time.Second) defer ch.chbackend.Close() + totalCacheCount := 20 + testBackupName := "test_list_remote" - ch.queryWithNoError(r, "CREATE DATABASE test_mv") - ch.queryWithNoError(r, "CREATE TABLE test_mv.src_table (v UInt64) ENGINE=MergeTree() ORDER BY v") - ch.queryWithNoError(r, "CREATE TABLE test_mv.dst_table (v UInt64) ENGINE=MergeTree() ORDER BY v") - ch.queryWithNoError(r, "CREATE MATERIALIZED VIEW test_mv.mv_with_inner (v UInt64) ENGINE=MergeTree() ORDER BY v AS SELECT v FROM test_mv.src_table") - ch.queryWithNoError(r, "CREATE MATERIALIZED VIEW test_mv.mv_with_dst TO test_mv.dst_table AS SELECT v FROM test_mv.src_table") - ch.queryWithNoError(r, "INSERT INTO test_mv.src_table SELECT number FROM numbers(100)") - r.NoError(dockerCP("config-s3.yml", "clickhouse-backup:/etc/clickhouse-backup/config.yml")) - r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "create", "test_mv", "--tables=test_mv.mv_with*,test_mv.dst*")) - dropSQL := "DROP DATABASE test_mv" - isAtomic, err := ch.chbackend.IsAtomic("test_mv") - r.NoError(err) - if isAtomic { - dropSQL += " NO DELAY" - } - ch.queryWithNoError(r, dropSQL) - r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "restore", "test_mv", "--tables=test_mv.mv_with*,test_mv.dst*")) - var rowCnt uint64 - r.NoError(ch.chbackend.SelectSingleRowNoCtx(&rowCnt, "SELECT count() FROM test_mv.mv_with_inner")) - r.Equal(uint64(100), rowCnt) - r.NoError(ch.chbackend.SelectSingleRowNoCtx(&rowCnt, "SELECT count() FROM test_mv.mv_with_dst")) - r.Equal(uint64(100), rowCnt) - ch.queryWithNoError(r, "DROP DATABASE test_mv") -} -func TestFIPS(t *testing.T) { - if os.Getenv("QA_AWS_ACCESS_KEY") == "" { - t.Skip("QA_AWS_ACCESS_KEY is empty, TestFIPS will skip") + for i := 0; i < totalCacheCount; i++ { + r.NoError(dockerExec("clickhouse-backup", "bash", "-ce", fmt.Sprintf("CLICKHOUSE_BACKUP_CONFIG=/etc/clickhouse-backup/config-s3.yml ALLOW_EMPTY_BACKUPS=true clickhouse-backup create_remote %s_%d", testBackupName, i))) } - ch := &TestClickHouse{} - r := require.New(t) - ch.connectWithWait(r, 1*time.Second, 10*time.Second) - defer ch.chbackend.Close() - fipsBackupName := fmt.Sprintf("fips_backup_%d", rand.Int()) - r.NoError(dockerExec("clickhouse", "rm", "-fv", "/etc/apt/sources.list.d/clickhouse.list")) - installDebIfNotExists(r, "clickhouse", "curl", "gettext-base", "bsdmainutils", "dnsutils", "git", "ca-certificates") - r.NoError(dockerCP("config-s3-fips.yml", "clickhouse:/etc/clickhouse-backup/config.yml.fips-template")) - r.NoError(dockerExec("clickhouse", "update-ca-certificates")) - r.NoError(dockerExec("clickhouse", "git", "clone", "--depth", "1", "https://github.com/drwetter/testssl.sh.git", "/opt/testssl")) - r.NoError(dockerExec("clickhouse", "chmod", "+x", "/opt/testssl/testssl.sh")) - generateCerts := func(certType, keyLength, curveType string) { - r.NoError(dockerExec("clickhouse", "bash", "-xce", "openssl rand -out /root/.rnd 2048")) - switch certType { - case "rsa": - r.NoError(dockerExec("clickhouse", "bash", "-xce", fmt.Sprintf("openssl genrsa -out /etc/clickhouse-backup/ca-key.pem %s", keyLength))) - r.NoError(dockerExec("clickhouse", "bash", "-xce", fmt.Sprintf("openssl genrsa -out /etc/clickhouse-backup/server-key.pem %s", keyLength))) - case "ecdsa": - r.NoError(dockerExec("clickhouse", "bash", "-xce", fmt.Sprintf("openssl ecparam -name %s -genkey -out /etc/clickhouse-backup/ca-key.pem", curveType))) - r.NoError(dockerExec("clickhouse", "bash", "-xce", fmt.Sprintf("openssl ecparam -name %s -genkey -out /etc/clickhouse-backup/server-key.pem", curveType))) - } - r.NoError(dockerExec("clickhouse", "bash", "-xce", "openssl req -subj \"/O=altinity\" -x509 -new -nodes -key /etc/clickhouse-backup/ca-key.pem -sha256 -days 365000 -out /etc/clickhouse-backup/ca-cert.pem")) - r.NoError(dockerExec("clickhouse", "bash", "-xce", "openssl req -subj \"/CN=localhost\" -addext \"subjectAltName = DNS:localhost,DNS:*.cluster.local\" -new -key /etc/clickhouse-backup/server-key.pem -out /etc/clickhouse-backup/server-req.csr")) - r.NoError(dockerExec("clickhouse", "bash", "-xce", "openssl x509 -req -days 365000 -extensions SAN -extfile <(printf \"\\n[SAN]\\nsubjectAltName=DNS:localhost,DNS:*.cluster.local\") -in /etc/clickhouse-backup/server-req.csr -out /etc/clickhouse-backup/server-cert.pem -CA /etc/clickhouse-backup/ca-cert.pem -CAkey /etc/clickhouse-backup/ca-key.pem -CAcreateserial")) - } - r.NoError(dockerExec("clickhouse", "bash", "-c", "cat /etc/clickhouse-backup/config.yml.fips-template | envsubst > /etc/clickhouse-backup/config.yml")) + r.NoError(dockerExec("clickhouse-backup", "rm", "-rfv", "/tmp/.clickhouse-backup-metadata.cache.S3")) + r.NoError(utils.ExecCmd(context.Background(), 180*time.Second, "docker-compose", "-f", os.Getenv("COMPOSE_FILE"), "restart", "minio")) + time.Sleep(2 * time.Second) - generateCerts("rsa", "4096", "") - createSQL := "CREATE TABLE default.fips_table (v UInt64) ENGINE=MergeTree() ORDER BY tuple()" - ch.queryWithNoError(r, createSQL) - r.NoError(dockerExec("clickhouse", "bash", "-ce", "clickhouse-backup-fips create_remote --tables=default.fips_table "+fipsBackupName)) - r.NoError(dockerExec("clickhouse", "bash", "-ce", "clickhouse-backup-fips delete local "+fipsBackupName)) - r.NoError(dockerExec("clickhouse", "bash", "-ce", "clickhouse-backup-fips restore_remote --tables=default.fips_table "+fipsBackupName)) - r.NoError(dockerExec("clickhouse", "bash", "-ce", "clickhouse-backup-fips delete local "+fipsBackupName)) - r.NoError(dockerExec("clickhouse", "bash", "-ce", "clickhouse-backup-fips delete remote "+fipsBackupName)) + startFirst := time.Now() + r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "list", "remote")) + noCacheDuration := time.Since(startFirst) - log.Info("Run `clickhouse-backup-fips server` in background") - r.NoError(dockerExec("-d", "clickhouse", "bash", "-ce", "AWS_USE_FIPS_ENDPOINT=true clickhouse-backup-fips server &>>/tmp/clickhouse-backup-server-fips.log")) - time.Sleep(1 * time.Second) + r.NoError(dockerExec("clickhouse-backup", "chmod", "-Rv", "+r", "/tmp/.clickhouse-backup-metadata.cache.S3")) - runClickHouseClientInsertSystemBackupActions(r, ch, []string{fmt.Sprintf("create_remote --tables=default.fips_table %s", fipsBackupName)}, true) - runClickHouseClientInsertSystemBackupActions(r, ch, []string{fmt.Sprintf("delete local %s", fipsBackupName)}, false) - runClickHouseClientInsertSystemBackupActions(r, ch, []string{fmt.Sprintf("restore_remote --tables=default.fips_table %s", fipsBackupName)}, true) - runClickHouseClientInsertSystemBackupActions(r, ch, []string{fmt.Sprintf("delete local %s", fipsBackupName)}, false) - runClickHouseClientInsertSystemBackupActions(r, ch, []string{fmt.Sprintf("delete remote %s", fipsBackupName)}, false) + startCashed := time.Now() + r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "list", "remote")) + cashedDuration := time.Since(startCashed) - inProgressActions := make([]struct { - Command string `ch:"command"` - Status string `ch:"status"` - }, 0) - r.NoError(ch.chbackend.StructSelect(&inProgressActions, - "SELECT command, status FROM system.backup_actions WHERE command LIKE ? AND status IN (?,?)", - fmt.Sprintf("%%%s%%", fipsBackupName), status.InProgressStatus, status.ErrorStatus, - )) - r.Equal(0, len(inProgressActions), "inProgressActions=%+v", inProgressActions) - r.NoError(dockerExec("clickhouse", "pkill", "-n", "-f", "clickhouse-backup-fips")) + r.Greater(noCacheDuration, cashedDuration) - testTLSCerts := func(certType, keyLength, curveName string, cipherList ...string) { - generateCerts(certType, keyLength, curveName) - log.Infof("Run `clickhouse-backup-fips server` in background for %s %s %s", certType, keyLength, curveName) - r.NoError(dockerExec("-d", "clickhouse", "bash", "-ce", "AWS_USE_FIPS_ENDPOINT=true clickhouse-backup-fips server &>>/tmp/clickhouse-backup-server-fips.log")) - time.Sleep(1 * time.Second) + r.NoError(dockerExec("clickhouse-backup", "rm", "-Rfv", "/tmp/.clickhouse-backup-metadata.cache.S3")) + r.NoError(utils.ExecCmd(context.Background(), 180*time.Second, "docker-compose", "-f", os.Getenv("COMPOSE_FILE"), "restart", "minio")) + time.Sleep(2 * time.Second) - r.NoError(dockerExec("clickhouse", "bash", "-ce", "rm -rf /tmp/testssl* && /opt/testssl/testssl.sh -e -s -oC /tmp/testssl.csv --color 0 --disable-rating --quiet -n min --mode parallel --add-ca /etc/clickhouse-backup/ca-cert.pem localhost:7171")) - out, err := dockerExecOut("clickhouse", "bash", "-ce", fmt.Sprintf("grep -c -E '%s' /tmp/testssl.csv", strings.Join(cipherList, "|"))) - r.NoError(err) - r.Equal(strconv.Itoa(len(cipherList)), strings.Trim(out, " \t\r\n")) + startCacheClear := time.Now() + r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "list", "remote")) + cacheClearDuration := time.Since(startCacheClear) - inProgressActions := make([]struct { - Command string `ch:"command"` - Status string `ch:"status"` - }, 0) - r.NoError(ch.chbackend.StructSelect(&inProgressActions, - "SELECT command, status FROM system.backup_actions WHERE command LIKE ? AND status IN (?,?)", - fmt.Sprintf("%%%s%%", fipsBackupName), status.InProgressStatus, status.ErrorStatus, - )) - r.Equal(0, len(inProgressActions), "inProgressActions=%+v", inProgressActions) - r.NoError(dockerExec("clickhouse", "pkill", "-n", "-f", "clickhouse-backup-fips")) - } - // https://www.perplexity.ai/search/0920f1e8-59ec-4e14-b779-ba7b2e037196 - testTLSCerts("rsa", "4096", "", "ECDHE-RSA-AES128-GCM-SHA256", "ECDHE-RSA-AES256-GCM-SHA384", "AES128-GCM-SHA256", "AES256-GCM-SHA384") - testTLSCerts("ecdsa", "", "prime256v1", "ECDHE-ECDSA-AES128-GCM-SHA256", "ECDHE-ECDSA-AES256-GCM-SHA384") - r.NoError(ch.chbackend.DropTable(clickhouse.Table{Database: "default", Name: "fips_table"}, createSQL, "", false, 0)) + r.Greater(cacheClearDuration, cashedDuration) + log.Infof("noCacheDuration=%s cachedDuration=%s cacheClearDuration=%s", noCacheDuration.String(), cashedDuration.String(), cacheClearDuration.String()) -} -func TestDoRestoreRBAC(t *testing.T) { - if compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "20.4") == -1 { - t.Skipf("Test skipped, RBAC not available for %s version", os.Getenv("CLICKHOUSE_VERSION")) + testListRemoteAllBackups := make([]string, totalCacheCount) + for i := 0; i < totalCacheCount; i++ { + testListRemoteAllBackups[i] = fmt.Sprintf("%s_%d", testBackupName, i) } + fullCleanup(t, r, ch, testListRemoteAllBackups, []string{"remote", "local"}, []string{}, true, true, "config-s3.yml") +} + +func TestServerAPI(t *testing.T) { ch := &TestClickHouse{} r := require.New(t) + ch.connectWithWait(r, 0*time.Second, 10*time.Second) + defer func() { + ch.chbackend.Close() + }() + r.NoError(dockerCP("config-s3.yml", "clickhouse-backup:/etc/clickhouse-backup/config.yml")) + fieldTypes := []string{"UInt64", "String", "Int"} + installDebIfNotExists(r, "clickhouse-backup", "curl") + maxTables := 10 + minFields := 10 + randFields := 10 + fillDatabaseForAPIServer(maxTables, minFields, randFields, ch, r, fieldTypes) - ch.connectWithWait(r, 1*time.Second, 1*time.Second) + log.Info("Run `clickhouse-backup server --watch` in background") + r.NoError(dockerExec("-d", "clickhouse-backup", "bash", "-ce", "clickhouse-backup server --watch &>>/tmp/clickhouse-backup-server.log")) + time.Sleep(1 * time.Second) - r.NoError(dockerCP("config-s3.yml", "clickhouse-backup:/etc/clickhouse-backup/config.yml")) - ch.queryWithNoError(r, "DROP TABLE IF EXISTS default.test_rbac") - ch.queryWithNoError(r, "CREATE TABLE default.test_rbac (v UInt64) ENGINE=MergeTree() ORDER BY tuple()") + testAPIBackupCreate(r) - ch.queryWithNoError(r, "DROP SETTINGS PROFILE IF EXISTS test_rbac") - ch.queryWithNoError(r, "DROP QUOTA IF EXISTS test_rbac") - ch.queryWithNoError(r, "DROP ROW POLICY IF EXISTS test_rbac ON default.test_rbac") - ch.queryWithNoError(r, "DROP ROLE IF EXISTS test_rbac") - ch.queryWithNoError(r, "DROP USER IF EXISTS test_rbac") + testAPIBackupTables(r) - log.Info("create RBAC related objects") - ch.queryWithNoError(r, "CREATE SETTINGS PROFILE test_rbac SETTINGS max_execution_time=60") - ch.queryWithNoError(r, "CREATE ROLE test_rbac SETTINGS PROFILE 'test_rbac'") - ch.queryWithNoError(r, "CREATE USER test_rbac IDENTIFIED BY 'test_rbac' DEFAULT ROLE test_rbac") - ch.queryWithNoError(r, "CREATE QUOTA test_rbac KEYED BY user_name FOR INTERVAL 1 hour NO LIMITS TO test_rbac") - ch.queryWithNoError(r, "CREATE ROW POLICY test_rbac ON default.test_rbac USING 1=1 AS RESTRICTIVE TO test_rbac") + log.Info("Check /backup/actions") + ch.queryWithNoError(r, "SELECT count() FROM system.backup_actions") + + testAPIBackupUpload(r) + + testAPIBackupList(t, r) + + testAPIDeleteLocalDownloadRestore(r) + + testAPIMetrics(r, ch) - r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "create", "--rbac", "--rbac-only", "test_rbac_backup")) - r.NoError(dockerExec("clickhouse-backup", "bash", "-xec", "ALLOW_EMPTY_BACKUPS=1 clickhouse-backup upload test_rbac_backup")) - r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "delete", "local", "test_rbac_backup")) - r.NoError(dockerExec("clickhouse", "ls", "-lah", "/var/lib/clickhouse/access")) + testAPIWatchAndKill(r, ch) - log.Info("drop all RBAC related objects after backup") - ch.queryWithNoError(r, "DROP SETTINGS PROFILE test_rbac") - ch.queryWithNoError(r, "DROP QUOTA test_rbac") - ch.queryWithNoError(r, "DROP ROW POLICY test_rbac ON default.test_rbac") - ch.queryWithNoError(r, "DROP ROLE test_rbac") - ch.queryWithNoError(r, "DROP USER test_rbac") + testAPIBackupActions(r, ch) - log.Info("download+restore RBAC") - r.NoError(dockerExec("clickhouse", "ls", "-lah", "/var/lib/clickhouse/access")) - r.NoError(dockerExec("clickhouse-backup", "bash", "-xec", "ALLOW_EMPTY_BACKUPS=1 clickhouse-backup download test_rbac_backup")) - r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "restore", "--rm", "--rbac", "--rbac-only", "test_rbac_backup")) - r.NoError(dockerExec("clickhouse", "ls", "-lah", "/var/lib/clickhouse/access")) + testAPIRestart(r, ch) - ch.chbackend.Close() - ch.connectWithWait(r, 2*time.Second, 8*time.Second) + testAPIBackupDelete(r) - r.NoError(dockerExec("clickhouse", "ls", "-lah", "/var/lib/clickhouse/access")) + r.NoError(dockerExec("clickhouse-backup", "pkill", "-n", "-f", "clickhouse-backup")) + r.NoError(ch.dropDatabase("long_schema")) +} - rbacTypes := map[string]string{ - "PROFILES": "test_rbac", - "QUOTAS": "test_rbac", - "POLICIES": "test_rbac ON default.test_rbac", - "ROLES": "test_rbac", - "USERS": "test_rbac", - } - for rbacType, expectedValue := range rbacTypes { - var rbacRows []struct { - Name string `ch:"name"` - } - err := ch.chbackend.Select(&rbacRows, fmt.Sprintf("SHOW %s", rbacType)) - r.NoError(err) - found := false - for _, row := range rbacRows { - if expectedValue == row.Name { - found = true - break +func testAPIRestart(r *require.Assertions, ch *TestClickHouse) { + out, err := dockerExecOut("clickhouse-backup", "bash", "-ce", "curl -sfL -XPOST 'http://localhost:7171/restart'") + log.Debug(out) + r.NoError(err) + r.Contains(out, "acknowledged") + + //some actions need time for restart + time.Sleep(6 * time.Second) + + var inProgressActions uint64 + r.NoError(ch.chbackend.SelectSingleRowNoCtx(&inProgressActions, "SELECT count() FROM system.backup_actions WHERE status!=?", status.CancelStatus)) + r.Equal(uint64(0), inProgressActions) +} + +func runClickHouseClientInsertSystemBackupActions(r *require.Assertions, ch *TestClickHouse, commands []string, needWait bool) { + sql := "INSERT INTO system.backup_actions(command) " + "VALUES ('" + strings.Join(commands, "'),('") + "')" + out, err := dockerExecOut("clickhouse", "bash", "-ce", fmt.Sprintf("clickhouse client --echo -mn -q \"%s\"", sql)) + log.Debug(out) + r.NoError(err) + if needWait { + for _, command := range commands { + for { + time.Sleep(500 * time.Millisecond) + var commandStatus string + r.NoError(ch.chbackend.SelectSingleRowNoCtx(&commandStatus, "SELECT status FROM system.backup_actions WHERE command=?", command)) + if commandStatus != status.InProgressStatus { + break + } } } - if !found { - //r.NoError(dockerExec("clickhouse", "cat", "/var/log/clickhouse-server/clickhouse-server.log")) - r.Failf("wrong RBAC", "SHOW %s, %#v doesn't contain %#v", rbacType, rbacRows, expectedValue) - } } - r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "delete", "local", "test_rbac_backup")) - r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "delete", "remote", "test_rbac_backup")) +} +func testAPIBackupActions(r *require.Assertions, ch *TestClickHouse) { + runClickHouseClientInsertSystemBackupActions(r, ch, []string{"create_remote actions_backup1"}, true) + runClickHouseClientInsertSystemBackupActions(r, ch, []string{"delete local actions_backup1", "restore_remote --rm actions_backup1"}, true) + runClickHouseClientInsertSystemBackupActions(r, ch, []string{"delete local actions_backup1", "delete remote actions_backup1"}, false) - ch.queryWithNoError(r, "DROP SETTINGS PROFILE test_rbac") - ch.queryWithNoError(r, "DROP QUOTA test_rbac") - ch.queryWithNoError(r, "DROP ROW POLICY test_rbac ON default.test_rbac") - ch.queryWithNoError(r, "DROP ROLE test_rbac") - ch.queryWithNoError(r, "DROP USER test_rbac") - ch.queryWithNoError(r, "DROP TABLE IF EXISTS default.test_rbac") - ch.chbackend.Close() + runClickHouseClientInsertSystemBackupActions(r, ch, []string{"create actions_backup2"}, true) + runClickHouseClientInsertSystemBackupActions(r, ch, []string{"upload actions_backup2"}, true) + runClickHouseClientInsertSystemBackupActions(r, ch, []string{"delete local actions_backup2"}, false) + runClickHouseClientInsertSystemBackupActions(r, ch, []string{"download actions_backup2"}, true) + runClickHouseClientInsertSystemBackupActions(r, ch, []string{"restore --rm actions_backup2"}, true) + runClickHouseClientInsertSystemBackupActions(r, ch, []string{"delete local actions_backup2", "delete remote actions_backup2"}, false) + + inProgressActions := make([]struct { + Command string `ch:"command"` + Status string `ch:"status"` + }, 0) + r.NoError(ch.chbackend.StructSelect(&inProgressActions, "SELECT command, status FROM system.backup_actions WHERE command LIKE '%actions%' AND status IN (?,?)", status.InProgressStatus, status.ErrorStatus)) + r.Equal(0, len(inProgressActions), "inProgressActions=%+v", inProgressActions) + + var actionsBackups uint64 + r.NoError(ch.chbackend.SelectSingleRowNoCtx(&actionsBackups, "SELECT count() FROM system.backup_list WHERE name LIKE 'backup_action%'")) + r.Equal(uint64(0), actionsBackups) + out, err := dockerExecOut("clickhouse-backup", "curl", "http://localhost:7171/metrics") + r.NoError(err) + r.Contains(out, "clickhouse_backup_last_create_remote_status 1") + r.Contains(out, "clickhouse_backup_last_create_status 1") + r.Contains(out, "clickhouse_backup_last_upload_status 1") + r.Contains(out, "clickhouse_backup_last_delete_status 1") + r.Contains(out, "clickhouse_backup_last_download_status 1") + r.Contains(out, "clickhouse_backup_last_restore_status 1") } -// TestDoRestoreConfigs - require direct access to `/etc/clickhouse-backup/`, so executed inside `clickhouse` container -func TestDoRestoreConfigs(t *testing.T) { - if compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "1.1.54391") < 0 { - t.Skipf("Test skipped, users.d is not available for %s version", os.Getenv("CLICKHOUSE_VERSION")) +func testAPIWatchAndKill(r *require.Assertions, ch *TestClickHouse) { + log.Info("Check /backup/watch + /backup/kill") + runKillCommand := func(command string) { + out, err := dockerExecOut("clickhouse-backup", "bash", "-ce", fmt.Sprintf("curl -sfL 'http://localhost:7171/backup/kill?command=%s'", command)) + log.Debug(out) + r.NoError(err) + } + checkWatchBackup := func(expectedCount uint64) { + var watchBackups uint64 + r.NoError(ch.chbackend.SelectSingleRowNoCtx(&watchBackups, "SELECT count() FROM system.backup_list WHERE name LIKE 'shard%'")) + r.Equal(expectedCount, watchBackups) } - ch := &TestClickHouse{} - r := require.New(t) - ch.connectWithWait(r, 0*time.Millisecond, 1*time.Second) - ch.queryWithNoError(r, "DROP TABLE IF EXISTS default.test_configs") - ch.queryWithNoError(r, "CREATE TABLE default.test_rbac (v UInt64) ENGINE=MergeTree() ORDER BY tuple()") - - r.NoError(dockerCP("config-s3.yml", "clickhouse:/etc/clickhouse-backup/config.yml")) - r.NoError(dockerExec("clickhouse", "bash", "-ce", "echo '1' > /etc/clickhouse-server/users.d/test_config.xml")) - r.NoError(dockerExec("clickhouse", "clickhouse-backup", "create", "--configs", "--configs-only", "test_configs_backup")) - ch.queryWithNoError(r, "DROP TABLE IF EXISTS default.test_configs") - r.NoError(dockerExec("clickhouse", "bash", "-xec", "S3_COMPRESSION_FORMAT=none ALLOW_EMPTY_BACKUPS=1 clickhouse-backup upload test_configs_backup")) - r.NoError(dockerExec("clickhouse", "clickhouse-backup", "delete", "local", "test_configs_backup")) + checkCanceledCommand := func(expectedCount int) { + canceledCommands := make([]struct { + Status string `ch:"status"` + Command string `ch:"command"` + }, 0) + r.NoError(ch.chbackend.StructSelect(&canceledCommands, "SELECT status, command FROM system.backup_actions WHERE command LIKE 'watch%'")) + r.Equal(expectedCount, len(canceledCommands)) + for i := range canceledCommands { + r.Equal("watch", canceledCommands[i].Command) + r.Equal(status.CancelStatus, canceledCommands[i].Status) + } + } - ch.chbackend.Close() - ch.connectWithWait(r, 1*time.Second, 1*time.Second) - ch.queryWithNoError(r, "SYSTEM RELOAD CONFIG") - selectEmptyResultForAggQuery := "SELECT value FROM system.settings WHERE name='empty_result_for_aggregation_by_empty_set'" - var settings string - r.NoError(ch.chbackend.SelectSingleRowNoCtx(&settings, selectEmptyResultForAggQuery)) - r.Equal("1", settings, "expect empty_result_for_aggregation_by_empty_set=1") + checkWatchBackup(1) + runKillCommand("watch") + checkCanceledCommand(1) - r.NoError(dockerExec("clickhouse", "rm", "-rfv", "/etc/clickhouse-server/users.d/test_config.xml")) - r.NoError(dockerExec("clickhouse", "bash", "-xec", "ALLOW_EMPTY_BACKUPS=1 clickhouse-backup download test_configs_backup")) + out, err := dockerExecOut("clickhouse-backup", "bash", "-ce", "curl -sfL 'http://localhost:7171/backup/watch'") + log.Debug(out) + r.NoError(err) + time.Sleep(7 * time.Second) - r.NoError(ch.chbackend.Query("SYSTEM RELOAD CONFIG")) - ch.chbackend.Close() - ch.connectWithWait(r, 1*time.Second, 1*time.Second) + checkWatchBackup(2) + runKillCommand("watch") + checkCanceledCommand(2) +} - settings = "" - r.NoError(ch.chbackend.SelectSingleRowNoCtx(&settings, "SELECT value FROM system.settings WHERE name='empty_result_for_aggregation_by_empty_set'")) - r.Equal("0", settings, "expect empty_result_for_aggregation_by_empty_set=0") +func testAPIBackupDelete(r *require.Assertions) { + log.Info("Check /backup/delete/{where}/{name}") + for i := 1; i <= apiBackupNumber; i++ { + out, err := dockerExecOut("clickhouse-backup", "bash", "-ce", fmt.Sprintf("curl -sfL -XPOST 'http://localhost:7171/backup/delete/local/z_backup_%d'", i)) + log.Infof(out) + r.NoError(err) + r.NotContains(out, "another operation is currently running") + r.NotContains(out, "\"status\":\"error\"") + out, err = dockerExecOut("clickhouse-backup", "bash", "-ce", fmt.Sprintf("curl -sfL -XPOST 'http://localhost:7171/backup/delete/remote/z_backup_%d'", i)) + log.Infof(out) + r.NoError(err) + r.NotContains(out, "another operation is currently running") + r.NotContains(out, "\"status\":\"error\"") + } + out, err := dockerExecOut("clickhouse-backup", "curl", "http://localhost:7171/metrics") + r.NoError(err) + r.Contains(out, "clickhouse_backup_last_delete_status 1") +} - r.NoError(dockerExec("clickhouse", "bash", "-xec", "CLICKHOUSE_RESTART_COMMAND='sql:SYSTEM RELOAD CONFIG' clickhouse-backup restore --rm --configs --configs-only test_configs_backup")) +func testAPIMetrics(r *require.Assertions, ch *TestClickHouse) { + log.Info("Check /metrics clickhouse_backup_last_backup_size_remote") + var lastRemoteSize int64 + r.NoError(ch.chbackend.SelectSingleRowNoCtx(&lastRemoteSize, "SELECT size FROM system.backup_list WHERE name='z_backup_5' AND location='remote'")) - ch.chbackend.Close() - ch.connectWithWait(r, 1*time.Second, 1*time.Second) + var realTotalBytes uint64 + if compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "20.8") >= 0 { + r.NoError(ch.chbackend.SelectSingleRowNoCtx(&realTotalBytes, "SELECT sum(total_bytes) FROM system.tables WHERE database='long_schema'")) + } else { + r.NoError(ch.chbackend.SelectSingleRowNoCtx(&realTotalBytes, "SELECT sum(bytes_on_disk) FROM system.parts WHERE database='long_schema'")) + } + r.Greater(realTotalBytes, uint64(0)) + r.Greater(uint64(lastRemoteSize), realTotalBytes) - settings = "" - r.NoError(ch.chbackend.SelectSingleRowNoCtx(&settings, "SELECT value FROM system.settings WHERE name='empty_result_for_aggregation_by_empty_set'")) - r.Equal("1", settings, "expect empty_result_for_aggregation_by_empty_set=1") + out, err := dockerExecOut("clickhouse-backup", "curl", "-sL", "http://localhost:7171/metrics") + log.Debug(out) + r.NoError(err) + r.Contains(out, fmt.Sprintf("clickhouse_backup_last_backup_size_remote %d", lastRemoteSize)) - isTestConfigsTablePresent := 0 - r.NoError(ch.chbackend.SelectSingleRowNoCtx(&isTestConfigsTablePresent, "SELECT count() FROM system.tables WHERE database='default' AND name='test_configs' SETTINGS empty_result_for_aggregation_by_empty_set=1")) - r.Equal(0, isTestConfigsTablePresent, "expect default.test_configs is not present") + log.Info("Check /metrics clickhouse_backup_number_backups_*") + r.Contains(out, fmt.Sprintf("clickhouse_backup_number_backups_local %d", apiBackupNumber)) + // +1 watch backup + r.Contains(out, fmt.Sprintf("clickhouse_backup_number_backups_remote %d", apiBackupNumber+1)) + r.Contains(out, "clickhouse_backup_number_backups_local_expected 0") + r.Contains(out, "clickhouse_backup_number_backups_remote_expected 0") +} - r.NoError(dockerExec("clickhouse", "clickhouse-backup", "delete", "local", "test_configs_backup")) - r.NoError(dockerExec("clickhouse", "clickhouse-backup", "delete", "remote", "test_configs_backup")) - r.NoError(dockerExec("clickhouse", "rm", "-rfv", "/etc/clickhouse-server/users.d/test_config.xml")) +func testAPIDeleteLocalDownloadRestore(r *require.Assertions) { + log.Info("Check /backup/delete/local/{name} + /backup/download/{name} + /backup/restore/{name}?rm=1") + out, err := dockerExecOut( + "clickhouse-backup", + "bash", "-xe", "-c", + fmt.Sprintf("for i in {1..%d}; do date; curl -sfL -XPOST \"http://localhost:7171/backup/delete/local/z_backup_$i\"; curl -sfL -XPOST \"http://localhost:7171/backup/download/z_backup_$i\"; sleep 2; curl -sfL -XPOST \"http://localhost:7171/backup/restore/z_backup_$i?rm=1\"; sleep 8; done", apiBackupNumber), + ) + log.Debug(out) + r.NoError(err) + r.NotContains(out, "another operation is currently running") + r.NotContains(out, "\"status\":\"error\"") - ch.chbackend.Close() + out, err = dockerExecOut("clickhouse-backup", "curl", "http://localhost:7171/metrics") + r.NoError(err) + r.Contains(out, "clickhouse_backup_last_delete_status 1") + r.Contains(out, "clickhouse_backup_last_download_status 1") + r.Contains(out, "clickhouse_backup_last_restore_status 1") } -func TestIntegrationS3(t *testing.T) { - r := require.New(t) - r.NoError(dockerCP("config-s3.yml", "clickhouse-backup:/etc/clickhouse-backup/config.yml")) - runMainIntegrationScenario(t, "S3") -} +func testAPIBackupList(t *testing.T, r *require.Assertions) { + log.Info("Check /backup/list") + out, err := dockerExecOut("clickhouse-backup", "bash", "-ce", "curl -sfL 'http://localhost:7171/backup/list'") + log.Debug(out) + r.NoError(err) + for i := 1; i <= apiBackupNumber; i++ { + r.True(assert.Regexp(t, regexp.MustCompile(fmt.Sprintf("{\"name\":\"z_backup_%d\",\"created\":\"\\d{4}-\\d{2}-\\d{2} \\d{2}:\\d{2}:\\d{2}\",\"size\":\\d+,\"location\":\"local\",\"required\":\"\",\"desc\":\"regular\"}", i)), out)) + r.True(assert.Regexp(t, regexp.MustCompile(fmt.Sprintf("{\"name\":\"z_backup_%d\",\"created\":\"\\d{4}-\\d{2}-\\d{2} \\d{2}:\\d{2}:\\d{2}\",\"size\":\\d+,\"location\":\"remote\",\"required\":\"\",\"desc\":\"tar, regular\"}", i)), out)) + } -func TestIntegrationGCS(t *testing.T) { - if isTestShouldSkip("GCS_TESTS") { - t.Skip("Skipping GCS integration tests...") - return + log.Info("Check /backup/list/local") + out, err = dockerExecOut("clickhouse-backup", "bash", "-ce", "curl -sfL 'http://localhost:7171/backup/list/local'") + log.Debug(out) + r.NoError(err) + for i := 1; i <= apiBackupNumber; i++ { + r.True(assert.Regexp(t, regexp.MustCompile(fmt.Sprintf("{\"name\":\"z_backup_%d\",\"created\":\"\\d{4}-\\d{2}-\\d{2} \\d{2}:\\d{2}:\\d{2}\",\"size\":\\d+,\"location\":\"local\",\"required\":\"\",\"desc\":\"regular\"}", i)), out)) + r.True(assert.NotRegexp(t, regexp.MustCompile(fmt.Sprintf("{\"name\":\"z_backup_%d\",\"created\":\"\\d{4}-\\d{2}-\\d{2} \\d{2}:\\d{2}:\\d{2}\",\"size\":\\d+,\"location\":\"remote\",\"required\":\"\",\"desc\":\"tar, regular\"}", i)), out)) } - r := require.New(t) - r.NoError(dockerCP("config-gcs.yml", "clickhouse-backup:/etc/clickhouse-backup/config.yml")) - installDebIfNotExists(r, "clickhouse-backup", "ca-certificates") - r.NoError(dockerExec("clickhouse-backup", "update-ca-certificates")) - runMainIntegrationScenario(t, "GCS") -} -func TestIntegrationAzure(t *testing.T) { - if isTestShouldSkip("AZURE_TESTS") { - t.Skip("Skipping Azure integration tests...") - return + log.Info("Check /backup/list/remote") + out, err = dockerExecOut("clickhouse-backup", "bash", "-ce", "curl -sfL 'http://localhost:7171/backup/list/remote'") + log.Debug(out) + r.NoError(err) + for i := 1; i <= apiBackupNumber; i++ { + r.True(assert.NotRegexp(t, regexp.MustCompile(fmt.Sprintf("{\"name\":\"z_backup_%d\",\"created\":\"\\d{4}-\\d{2}-\\d{2} \\d{2}:\\d{2}:\\d{2}\",\"size\":\\d+,\"location\":\"local\",\"required\":\"\",\"desc\":\"regular\"}", i)), out)) + r.True(assert.Regexp(t, regexp.MustCompile(fmt.Sprintf("{\"name\":\"z_backup_%d\",\"created\":\"\\d{4}-\\d{2}-\\d{2} \\d{2}:\\d{2}:\\d{2}\",\"size\":\\d+,\"location\":\"remote\",\"required\":\"\",\"desc\":\"tar, regular\"}", i)), out)) } - r := require.New(t) - r.NoError(dockerCP("config-azblob.yml", "clickhouse-backup:/etc/clickhouse-backup/config.yml")) - installDebIfNotExists(r, "clickhouse-backup", "ca-certificates") - r.NoError(dockerExec("clickhouse-backup", "update-ca-certificates")) - runMainIntegrationScenario(t, "AZBLOB") } -func TestIntegrationSFTPAuthPassword(t *testing.T) { - r := require.New(t) - r.NoError(dockerCP("config-sftp-auth-password.yaml", "clickhouse-backup:/etc/clickhouse-backup/config.yml")) - runMainIntegrationScenario(t, "SFTP") +func testAPIBackupUpload(r *require.Assertions) { + log.Info("Check /backup/upload") + out, err := dockerExecOut( + "clickhouse-backup", + "bash", "-xe", "-c", + fmt.Sprintf("for i in {1..%d}; do date; curl -sfL -XPOST \"http://localhost:7171/backup/upload/z_backup_$i\"; sleep 2; done", apiBackupNumber), + ) + log.Debug(out) + r.NoError(err) + r.NotContains(out, "\"status\":\"error\"") + r.NotContains(out, "another operation is currently running") + out, err = dockerExecOut("clickhouse-backup", "curl", "http://localhost:7171/metrics") + r.NoError(err) + r.Contains(out, "clickhouse_backup_last_upload_status 1") } -func TestIntegrationFTP(t *testing.T) { - r := require.New(t) - r.NoError(dockerCP("config-ftp.yaml", "clickhouse-backup:/etc/clickhouse-backup/config.yml")) - runMainIntegrationScenario(t, "FTP") -} +func testAPIBackupTables(r *require.Assertions) { + log.Info("Check /backup/tables") + out, err := dockerExecOut( + "clickhouse-backup", + "bash", "-xe", "-c", "curl -sfL \"http://localhost:7171/backup/tables\"", + ) + log.Debug(out) + r.NoError(err) + r.Contains(out, "long_schema") + r.NotContains(out, "Connection refused") + r.NotContains(out, "another operation is currently running") + r.NotContains(out, "\"status\":\"error\"") + r.NotContains(out, "system") + r.NotContains(out, "INFORMATION_SCHEMA") + r.NotContains(out, "information_schema") -func TestIntegrationSFTPAuthKey(t *testing.T) { - r := require.New(t) - r.NoError(dockerCP("config-sftp-auth-key.yaml", "clickhouse-backup:/etc/clickhouse-backup/config.yml")) + log.Info("Check /backup/tables/all") + out, err = dockerExecOut( + "clickhouse-backup", + "bash", "-xe", "-c", "curl -sfL \"http://localhost:7171/backup/tables/all\"", + ) + log.Debug(out) + r.NoError(err) + r.Contains(out, "long_schema") + r.Contains(out, "system") + r.NotContains(out, "Connection refused") + r.NotContains(out, "another operation is currently running") + r.NotContains(out, "\"status\":\"error\"") + if compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "22.3") >= 0 { + r.Contains(out, "INFORMATION_SCHEMA") + r.Contains(out, "information_schema") + } +} - uploadSSHKeys(r, "clickhouse-backup") +func testAPIBackupCreate(r *require.Assertions) { + log.Info("Check /backup/create") + out, err := dockerExecOut( + "clickhouse-backup", + "bash", "-xe", "-c", + fmt.Sprintf("sleep 3; for i in {1..%d}; do date; curl -sfL -XPOST \"http://localhost:7171/backup/create?table=long_schema.*&name=z_backup_$i\"; sleep 1.5; done", apiBackupNumber), + ) + log.Debug(out) + r.NoError(err) + r.NotContains(out, "Connection refused") + r.NotContains(out, "another operation is currently running") + r.NotContains(out, "\"status\":\"error\"") + out, err = dockerExecOut("clickhouse-backup", "curl", "http://localhost:7171/metrics") + r.NoError(err) + r.Contains(out, "clickhouse_backup_last_create_status 1") - runMainIntegrationScenario(t, "SFTP") } -func TestIntegrationCustom(t *testing.T) { - r := require.New(t) - installDebIfNotExists(r, "clickhouse-backup", "ca-certificates", "curl") - r.NoError(dockerExec("clickhouse-backup", "update-ca-certificates")) - r.NoError(dockerExec("clickhouse-backup", "bash", "-xce", "curl -sL \"https://github.com/mikefarah/yq/releases/latest/download/yq_linux_$(dpkg --print-architecture)\" -o /usr/bin/yq && chmod +x /usr/bin/yq")) - for _, customType := range []string{"restic", "kopia", "rsync"} { - if customType == "rsync" { - uploadSSHKeys(r, "clickhouse-backup") - installDebIfNotExists(r, "clickhouse-backup", "openssh-client", "rsync", "jq") - } - if customType == "restic" { - r.NoError(dockerExec("minio", "rm", "-rf", "/data/clickhouse/*")) - installDebIfNotExists(r, "clickhouse-backup", "curl", "jq", "bzip2") - r.NoError(dockerExec("clickhouse-backup", "bash", "-xec", "RELEASE_TAG=$(curl -H 'Accept: application/json' -sL https://github.com/restic/restic/releases/latest | jq -c -r -M '.tag_name'); RELEASE=$(echo ${RELEASE_TAG} | sed -e 's/v//'); curl -sfL \"https://github.com/restic/restic/releases/download/${RELEASE_TAG}/restic_${RELEASE}_linux_amd64.bz2\" | bzip2 -d > /bin/restic; chmod +x /bin/restic")) - } - if customType == "kopia" { - r.NoError(dockerExec("minio", "bash", "-ce", "rm -rfv /data/clickhouse/*")) - installDebIfNotExists(r, "clickhouse-backup", "pgp", "curl") - r.NoError(dockerExec("clickhouse-backup", "bash", "-ce", "curl -sfL https://kopia.io/signing-key | gpg --dearmor -o /usr/share/keyrings/kopia-keyring.gpg")) - r.NoError(dockerExec("clickhouse-backup", "bash", "-ce", "echo 'deb [signed-by=/usr/share/keyrings/kopia-keyring.gpg] https://packages.kopia.io/apt/ stable main' > /etc/apt/sources.list.d/kopia.list")) - installDebIfNotExists(r, "clickhouse-backup", "kopia", "jq") +func fillDatabaseForAPIServer(maxTables int, minFields int, randFields int, ch *TestClickHouse, r *require.Assertions, fieldTypes []string) { + log.Infof("Create %d `long_schema`.`t%%d` tables with with %d..%d fields...", maxTables, minFields, minFields+randFields) + ch.queryWithNoError(r, "CREATE DATABASE IF NOT EXISTS long_schema") + for i := 0; i < maxTables; i++ { + sql := fmt.Sprintf("CREATE TABLE long_schema.t%d (id UInt64", i) + fieldsCount := minFields + rand.Intn(randFields) + for j := 0; j < fieldsCount; j++ { + fieldType := fieldTypes[rand.Intn(len(fieldTypes))] + sql += fmt.Sprintf(", f%d %s", j, fieldType) } - r.NoError(dockerCP("config-custom-"+customType+".yml", "clickhouse-backup:/etc/clickhouse-backup/config.yml")) - r.NoError(dockerExec("clickhouse-backup", "mkdir", "-pv", "/custom/"+customType)) - r.NoError(dockerCP("./"+customType+"/", "clickhouse-backup:/custom/")) - runMainIntegrationScenario(t, "CUSTOM") + sql += ") ENGINE=MergeTree() ORDER BY id" + ch.queryWithNoError(r, sql) + sql = fmt.Sprintf("INSERT INTO long_schema.t%d(id) SELECT number FROM numbers(100)", i) + ch.queryWithNoError(r, sql) } + log.Info("...DONE") } -func TestIntegrationEmbedded(t *testing.T) { - //t.Skipf("Test skipped, wait 23.8, RESTORE Ordinary table and RESTORE MATERIALIZED VIEW and {uuid} not works for %s version, look https://github.com/ClickHouse/ClickHouse/issues/43971 and https://github.com/ClickHouse/ClickHouse/issues/42709", os.Getenv("CLICKHOUSE_VERSION")) - //dependencies restore https://github.com/ClickHouse/ClickHouse/issues/39416, fixed in 23.3 - version := os.Getenv("CLICKHOUSE_VERSION") - if version != "head" && compareVersion(version, "23.3") < 0 { - t.Skipf("Test skipped, BACKUP/RESTORE not production ready for %s version", version) +func TestSkipNotExistsTable(t *testing.T) { + if compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "19.1") < 0 { + t.Skip("TestSkipNotExistsTable too small time between `SELECT DISTINCT partition_id` and `ALTER TABLE ... FREEZE PARTITION`") } - r := require.New(t) - //CUSTOM backup create folder in each disk - r.NoError(dockerExec("clickhouse", "rm", "-rfv", "/var/lib/clickhouse/disks/backups_s3/backup/")) - r.NoError(dockerCP("config-s3-embedded.yml", "clickhouse-backup:/etc/clickhouse-backup/config.yml")) - runMainIntegrationScenario(t, "EMBEDDED_S3") - //@TODO uncomment when resolve slow azure BACKUP/RESTORE https://github.com/ClickHouse/ClickHouse/issues/52088 - //r.NoError(dockerExec("clickhouse", "rm", "-rf", "/var/lib/clickhouse/disks/backups_azure/backup/")) - //r.NoError(dockerCP("config-azblob-embedded.yml", "clickhouse-backup:/etc/clickhouse-backup/config.yml")) - //runMainIntegrationScenario(t, "EMBEDDED_AZURE") - //@TODO think about how to implements embedded backup for s3_plain disks - //r.NoError(dockerExec("clickhouse", "rm", "-rf", "/var/lib/clickhouse/disks/backups_s3_plain/backup/")) - //r.NoError(dockerCP("config-s3-plain-embedded.yml", "clickhouse-backup:/etc/clickhouse-backup/config.yml")) - //runMainIntegrationScenario(t, "EMBEDDED_S3_PLAIN") -} - -func TestLongListRemote(t *testing.T) { + //t.Parallel() ch := &TestClickHouse{} r := require.New(t) ch.connectWithWait(r, 0*time.Second, 1*time.Second) defer ch.chbackend.Close() - totalCacheCount := 20 - testBackupName := "test_list_remote" - err := dockerCP("config-s3.yml", "clickhouse-backup:/etc/clickhouse-backup/config.yml") + + log.Info("Check skip not exist errors") + ch.queryWithNoError(r, "CREATE DATABASE freeze_not_exists") + ifNotExistsCreateSQL := "CREATE TABLE IF NOT EXISTS freeze_not_exists.freeze_not_exists (id UInt64) ENGINE=MergeTree() ORDER BY id" + ifNotExistsInsertSQL := "INSERT INTO freeze_not_exists.freeze_not_exists SELECT number FROM numbers(1000)" + chVersion, err := ch.chbackend.GetVersion(context.Background()) r.NoError(err) - for i := 0; i < totalCacheCount; i++ { - r.NoError(dockerExec("clickhouse-backup", "bash", "-ce", fmt.Sprintf("ALLOW_EMPTY_BACKUPS=true clickhouse-backup create_remote %s_%d", testBackupName, i))) + freezeErrorHandled := false + pauseChannel := make(chan int64) + resumeChannel := make(chan int64) + ch.chbackend.Config.LogSQLQueries = true + wg := sync.WaitGroup{} + wg.Add(1) + go func() { + defer func() { + close(pauseChannel) + wg.Done() + }() + pause := int64(0) + // pausePercent := int64(90) + for i := int64(0); i < 100; i++ { + testBackupName := fmt.Sprintf("not_exists_%d", i) + err = ch.chbackend.Query(ifNotExistsCreateSQL) + r.NoError(err) + err = ch.chbackend.Query(ifNotExistsInsertSQL) + r.NoError(err) + if i < 5 { + log.Infof("pauseChannel <- %d", 0) + pauseChannel <- 0 + } else { + log.Infof("pauseChannel <- %d", pause/i) + pauseChannel <- pause / i + } + startTime := time.Now() + out, err := dockerExecOut("clickhouse-backup", "bash", "-ce", "LOG_LEVEL=debug CLICKHOUSE_BACKUP_CONFIG=/etc/clickhouse-backup/config-s3.yml clickhouse-backup create --table freeze_not_exists.freeze_not_exists "+testBackupName) + log.Info(out) + if (err != nil && (strings.Contains(out, "can't freeze") || strings.Contains(out, "no tables for backup"))) || + (err == nil && !strings.Contains(out, "can't freeze")) { + parseTime := func(line string) time.Time { + parsedTime, err := time.Parse("2006/01/02 15:04:05.999999", line[:26]) + if err != nil { + r.Failf("Error parsing time", "%s, : %v", line, err) + } + return parsedTime + } + lines := strings.Split(out, "\n") + firstTime := parseTime(lines[0]) + var freezeTime time.Time + for _, line := range lines { + if strings.Contains(line, "create_table_query") { + freezeTime = parseTime(line) + break + } + if strings.Contains(line, "SELECT DISTINCT partition_id") { + freezeTime = parseTime(line) + break + } + } + pause += (firstTime.Sub(startTime) + freezeTime.Sub(firstTime)).Nanoseconds() + } + if err != nil { + if !strings.Contains(out, "no tables for backup") { + assert.NoError(t, err) + } + } + + if strings.Contains(out, "code: 60") && err == nil { + freezeErrorHandled = true + <-resumeChannel + r.NoError(dockerExec("clickhouse-backup", "bash", "-ec", "CLICKHOUSE_BACKUP_CONFIG=/etc/clickhouse-backup/config-s3.yml clickhouse-backup delete local "+testBackupName)) + break + } + if err == nil { + err = dockerExec("clickhouse-backup", "bash", "-ec", "CLICKHOUSE_BACKUP_CONFIG=/etc/clickhouse-backup/config-s3.yml clickhouse-backup delete local "+testBackupName) + assert.NoError(t, err) + } + <-resumeChannel + } + }() + wg.Add(1) + go func() { + defer func() { + close(resumeChannel) + wg.Done() + }() + for pause := range pauseChannel { + log.Infof("%d <- pauseChannel", pause) + if pause > 0 { + pauseStart := time.Now() + time.Sleep(time.Duration(pause) * time.Nanosecond) + log.Infof("pause=%s pauseStart=%s", time.Duration(pause).String(), pauseStart.String()) + err = ch.chbackend.DropTable(clickhouse.Table{Database: "freeze_not_exists", Name: "freeze_not_exists"}, ifNotExistsCreateSQL, "", false, chVersion) + r.NoError(err) + } + resumeChannel <- 1 + } + }() + wg.Wait() + r.True(freezeErrorHandled) + dropDbSQL := "DROP DATABASE freeze_not_exists" + if isAtomic, err := ch.chbackend.IsAtomic("freeze_not_exists"); err == nil && isAtomic { + dropDbSQL += " SYNC" } + ch.queryWithNoError(r, dropDbSQL) +} - r.NoError(dockerExec("clickhouse-backup", "rm", "-rfv", "/tmp/.clickhouse-backup-metadata.cache.S3")) - r.NoError(utils.ExecCmd(context.Background(), 180*time.Second, "docker-compose", "-f", os.Getenv("COMPOSE_FILE"), "restart", "minio")) - time.Sleep(2 * time.Second) +func TestTablePatterns(t *testing.T) { + //t.Parallel() + ch := &TestClickHouse{} + r := require.New(t) + ch.connectWithWait(r, 500*time.Millisecond, 5*time.Second) + defer ch.chbackend.Close() - startFirst := time.Now() - r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "list", "remote")) - noCacheDuration := time.Since(startFirst) + testBackupName := "test_backup_patterns" + databaseList := []string{dbNameOrdinary, dbNameAtomic} + var dbNameOrdinaryTest = dbNameOrdinary + "_" + t.Name() + var dbNameAtomicTest = dbNameAtomic + "_" + t.Name() - r.NoError(dockerExec("clickhouse-backup", "chmod", "-Rv", "+r", "/tmp/.clickhouse-backup-metadata.cache.S3")) + for _, createPattern := range []bool{true, false} { + for _, restorePattern := range []bool{true, false} { + fullCleanup(t, r, ch, []string{testBackupName}, []string{"remote", "local"}, databaseList, false, false, "config-s3.yml") + generateTestData(t, r, ch, "S3", defaultTestData) + if createPattern { + r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "create_remote", "--tables", " "+dbNameOrdinaryTest+".*", testBackupName)) + } else { + r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "create_remote", testBackupName)) + } - startCashed := time.Now() - r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "list", "remote")) - cashedDuration := time.Since(startCashed) + r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "delete", "local", testBackupName)) + dropDatabasesFromTestDataDataSet(t, r, ch, databaseList) + if restorePattern { + r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "restore_remote", "--tables", " "+dbNameOrdinaryTest+".*", testBackupName)) + } else { + r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "restore_remote", testBackupName)) + } - r.Greater(noCacheDuration, cashedDuration) + restored := uint64(0) + r.NoError(ch.chbackend.SelectSingleRowNoCtx(&restored, fmt.Sprintf("SELECT count() FROM system.tables WHERE database='%s'", dbNameOrdinaryTest))) + r.NotZero(restored) - r.NoError(dockerExec("clickhouse-backup", "rm", "-Rfv", "/tmp/.clickhouse-backup-metadata.cache.S3")) - r.NoError(utils.ExecCmd(context.Background(), 180*time.Second, "docker-compose", "-f", os.Getenv("COMPOSE_FILE"), "restart", "minio")) - time.Sleep(2 * time.Second) + if createPattern || restorePattern { + restored = 0 + r.NoError(ch.chbackend.SelectSingleRowNoCtx(&restored, fmt.Sprintf("SELECT count() FROM system.tables WHERE database='%s'", dbNameAtomicTest))) + // todo, old versions of clickhouse will return empty recordset + r.Zero(restored) - startCacheClear := time.Now() - r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "list", "remote")) - cacheClearDuration := time.Since(startCacheClear) + restored = 0 + r.NoError(ch.chbackend.SelectSingleRowNoCtx(&restored, fmt.Sprintf("SELECT count() FROM system.databases WHERE name='%s'", dbNameAtomicTest))) + // todo, old versions of clickhouse will return empty recordset + r.Zero(restored) + } else { + restored = 0 + r.NoError(ch.chbackend.SelectSingleRowNoCtx(&restored, fmt.Sprintf("SELECT count() FROM system.tables WHERE database='%s'", dbNameAtomicTest))) + r.NotZero(restored) + } - r.Greater(cacheClearDuration, cashedDuration) - log.Infof("noCacheDuration=%s cachedDuration=%s cacheClearDuration=%s", noCacheDuration.String(), cashedDuration.String(), cacheClearDuration.String()) + fullCleanup(t, r, ch, []string{testBackupName}, []string{"remote", "local"}, databaseList, true, true, "config-s3.yml") - testListRemoteAllBackups := make([]string, totalCacheCount) - for i := 0; i < totalCacheCount; i++ { - testListRemoteAllBackups[i] = fmt.Sprintf("%s_%d", testBackupName, i) + } } - fullCleanup(r, ch, testListRemoteAllBackups, []string{"remote", "local"}, []string{}, true, true) } -func TestRestoreDatabaseMapping(t *testing.T) { - r := require.New(t) - r.NoError(dockerCP("config-database-mapping.yml", "clickhouse-backup:/etc/clickhouse-backup/config.yml")) +func TestProjections(t *testing.T) { + var err error + if compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "21.8") == -1 { + t.Skipf("Test skipped, PROJECTION available only 21.8+, current version %s", os.Getenv("CLICKHOUSE_VERSION")) + } + //t.Parallel() ch := &TestClickHouse{} - ch.connectWithWait(r, 500*time.Millisecond, 1*time.Second) + r := require.New(t) + ch.connectWithWait(r, 0*time.Second, 1*time.Second) defer ch.chbackend.Close() - checkRecordset := func(expectedRows int, expectedCount uint64, query string) { - result := make([]struct { - Count uint64 `ch:"count()"` - }, 0) - r.NoError(ch.chbackend.Select(&result, query)) - r.Equal(expectedRows, len(result), "expect %d row", expectedRows) - r.Equal(expectedCount, result[0].Count, "expect count=%d", expectedCount) - } - - testBackupName := "test_restore_database_mapping" - databaseList := []string{"database1", "database2"} - fullCleanup(r, ch, []string{testBackupName}, []string{"local"}, databaseList, false, false) - - ch.queryWithNoError(r, "CREATE DATABASE database1") - ch.queryWithNoError(r, "CREATE TABLE database1.t1 (dt DateTime, v UInt64) ENGINE=ReplicatedMergeTree('/clickhouse/tables/database1/t1','{replica}') PARTITION BY toYYYYMM(dt) ORDER BY dt") - ch.queryWithNoError(r, "CREATE TABLE database1.d1 AS database1.t1 ENGINE=Distributed('{cluster}',database1, t1)") - if compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "22.3") < 0 { - ch.queryWithNoError(r, "CREATE TABLE database1.t2 AS database1.t1 ENGINE=ReplicatedMergeTree('/clickhouse/tables/database1/t2','{replica}') PARTITION BY toYYYYMM(dt) ORDER BY dt") - } else { - ch.queryWithNoError(r, "CREATE TABLE database1.t2 AS database1.t1 ENGINE=ReplicatedMergeTree('/clickhouse/tables/{database}/{table}','{replica}') PARTITION BY toYYYYMM(dt) ORDER BY dt") - } - ch.queryWithNoError(r, "CREATE MATERIALIZED VIEW database1.mv1 TO database1.t2 AS SELECT * FROM database1.t1") - ch.queryWithNoError(r, "CREATE VIEW database1.v1 AS SELECT * FROM database1.t1") - ch.queryWithNoError(r, "INSERT INTO database1.t1 SELECT '2022-01-01 00:00:00', number FROM numbers(10)") - log.Info("Create backup") - r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "create", testBackupName)) - - log.Info("Restore schema") - r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "restore", "--schema", "--rm", "--restore-database-mapping", "database1:database2", "--tables", "database1.*", testBackupName)) - - log.Info("Check result database1") - ch.queryWithNoError(r, "INSERT INTO database1.t1 SELECT '2023-01-01 00:00:00', number FROM numbers(10)") - checkRecordset(1, 20, "SELECT count() FROM database1.t1") - checkRecordset(1, 20, "SELECT count() FROM database1.d1") - checkRecordset(1, 20, "SELECT count() FROM database1.mv1") - checkRecordset(1, 20, "SELECT count() FROM database1.v1") - - log.Info("Drop database1") - isAtomic, err := ch.chbackend.IsAtomic("database1") + r.NoError(dockerCP("config-s3.yml", "clickhouse-backup:/etc/clickhouse-backup/config.yml")) + err = ch.chbackend.Query("CREATE TABLE default.table_with_projection(dt DateTime, v UInt64, PROJECTION x (SELECT toStartOfMonth(dt) m, sum(v) GROUP BY m)) ENGINE=MergeTree() ORDER BY dt") r.NoError(err) - if isAtomic { - ch.queryWithNoError(r, "DROP DATABASE database1 SYNC") - } else { - ch.queryWithNoError(r, "DROP DATABASE database1") - } - - log.Info("Restore data") - r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "restore", "--data", "--restore-database-mapping", "database1:database2", "--tables", "database1.*", testBackupName)) - log.Info("Check result database2") - checkRecordset(1, 10, "SELECT count() FROM database2.t1") - checkRecordset(1, 10, "SELECT count() FROM database2.d1") - checkRecordset(1, 10, "SELECT count() FROM database2.mv1") - checkRecordset(1, 10, "SELECT count() FROM database2.v1") + err = ch.chbackend.Query("INSERT INTO default.table_with_projection SELECT today() - INTERVAL number DAY, number FROM numbers(10)") + r.NoError(err) - log.Info("Check database1 not exists") - checkRecordset(1, 0, "SELECT count() FROM system.databases WHERE name='database1'") + r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "create", "test_backup_projection")) + r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "restore", "--rm", "test_backup_projection")) + r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "delete", "local", "test_backup_projection")) + var counts uint64 + r.NoError(ch.chbackend.SelectSingleRowNoCtx(&counts, "SELECT count() FROM default.table_with_projection")) + r.Equal(uint64(10), counts) + err = ch.chbackend.Query("DROP TABLE default.table_with_projection NO DELAY") + r.NoError(err) - fullCleanup(r, ch, []string{testBackupName}, []string{"local"}, databaseList, true, true) } -func TestMySQLMaterialized(t *testing.T) { - t.Skipf("Wait when fix DROP TABLE not supported by MaterializedMySQL, just attach will not help") - if compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "22.12") == -1 { - t.Skipf("MaterializedMySQL doens't support for clickhouse version %s", os.Getenv("CLICKHOUSE_VERSION")) +func TestKeepBackupRemoteAndDiffFromRemote(t *testing.T) { + if isTestShouldSkip("RUN_ADVANCED_TESTS") { + t.Skip("Skipping Advanced integration tests...") + return } + //t.Parallel() r := require.New(t) - r.NoError(dockerCP("config-s3.yml", "clickhouse-backup:/etc/clickhouse-backup/config.yml")) - r.NoError(dockerExec("mysql", "mysql", "-u", "root", "--password=root", "-v", "-e", "CREATE DATABASE ch_mysql_repl")) ch := &TestClickHouse{} - ch.connectWithWait(r, 500*time.Millisecond, 1*time.Second) - defer ch.chbackend.Close() - engine := "MaterializedMySQL" - if compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "21.9") == -1 { - engine = "MaterializeMySQL" + ch.connectWithWait(r, 500*time.Millisecond, 2*time.Second) + backupNames := make([]string, 5) + for i := 0; i < 5; i++ { + backupNames[i] = fmt.Sprintf("keep_remote_backup_%d", i) } - ch.queryWithNoError(r, fmt.Sprintf("CREATE DATABASE ch_mysql_repl ENGINE=%s('mysql:3306','ch_mysql_repl','root','root')", engine)) - r.NoError(dockerExec("mysql", "mysql", "-u", "root", "--password=root", "-v", "-e", "CREATE TABLE ch_mysql_repl.t1 (id BIGINT UNSIGNED NOT NULL AUTO_INCREMENT PRIMARY KEY, s VARCHAR(255)); INSERT INTO ch_mysql_repl.t1(s) VALUES('s1'),('s2'),('s3')")) - time.Sleep(1 * time.Second) - - r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "create", "test_mysql_materialized")) - ch.queryWithNoError(r, "DROP DATABASE ch_mysql_repl") - r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "restore", "test_mysql_materialized")) - - result := 0 - r.NoError(ch.chbackend.SelectSingleRowNoCtx(&result, "SELECT count() FROM ch_mysql_repl.t1")) - r.Equal(3, result, "expect count=3") - - ch.queryWithNoError(r, "DROP DATABASE ch_mysql_repl") - r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "delete", "local", "test_mysql_materialized")) + databaseList := []string{dbNameOrdinary, dbNameAtomic, dbNameMySQL, dbNamePostgreSQL, Issue331Atomic, Issue331Ordinary} + fullCleanup(t, r, ch, backupNames, []string{"remote", "local"}, databaseList, false, false, "config-s3.yml") + generateTestData(t, r, ch, "S3", defaultTestData) + for i, backupName := range backupNames { + generateIncrementTestData(t, ch, r, defaultIncrementData) + if i == 0 { + r.NoError(dockerExec("clickhouse-backup", "bash", "-ce", fmt.Sprintf("BACKUPS_TO_KEEP_REMOTE=3 CLICKHOUSE_BACKUP_CONFIG=/etc/clickhouse-backup/config-s3.yml clickhouse-backup create_remote %s", backupName))) + } else { + r.NoError(dockerExec("clickhouse-backup", "bash", "-ce", fmt.Sprintf("BACKUPS_TO_KEEP_REMOTE=3 CLICKHOUSE_BACKUP_CONFIG=/etc/clickhouse-backup/config-s3.yml clickhouse-backup create_remote --diff-from-remote=%s %s", backupNames[i-1], backupName))) + } + } + out, err := dockerExecOut("clickhouse-backup", "bash", "-ce", "clickhouse-backup -c /etc/clickhouse-backup/config-s3.yml list local") + r.NoError(err) + // shall not delete any backup, cause all deleted backup have links as required in other backups + for _, backupName := range backupNames { + r.Contains(out, backupName) + r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "delete", "local", backupName)) + } + latestIncrementBackup := fmt.Sprintf("keep_remote_backup_%d", len(backupNames)-1) + r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "download", latestIncrementBackup)) + r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "restore", "--rm", latestIncrementBackup)) + var res uint64 + r.NoError(ch.chbackend.SelectSingleRowNoCtx(&res, fmt.Sprintf("SELECT count() FROM `%s_%s`.`%s_%s`", Issue331Atomic, t.Name(), Issue331Atomic, t.Name()))) + r.Equal(uint64(200), res) + fullCleanup(t, r, ch, backupNames, []string{"remote", "local"}, databaseList, true, true, "config-s3.yml") } -func TestPostgreSQLMaterialized(t *testing.T) { - t.Skipf("Wait when fix https://github.com/ClickHouse/ClickHouse/issues/44250") - - if compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "22.11") == -1 { - t.Skipf("MaterializedPostgreSQL doens't support for clickhouse version %s", os.Getenv("CLICKHOUSE_VERSION")) +func TestSyncReplicaTimeout(t *testing.T) { + if compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "19.11") == -1 { + t.Skipf("Test skipped, SYNC REPLICA ignore receive_timeout for %s version", os.Getenv("CLICKHOUSE_VERSION")) } + //t.Parallel() r := require.New(t) - r.NoError(dockerCP("config-s3.yml", "clickhouse-backup:/etc/clickhouse-backup/config.yml")) - r.NoError(dockerExec("pgsql", "bash", "-ce", "echo 'CREATE DATABASE ch_pgsql_repl' | PGPASSWORD=root psql -v ON_ERROR_STOP=1 -U root")) - r.NoError(dockerExec("pgsql", "bash", "-ce", "echo \"CREATE TABLE t1 (id BIGINT PRIMARY KEY, s VARCHAR(255)); INSERT INTO t1(id, s) VALUES(1,'s1'),(2,'s2'),(3,'s3')\" | PGPASSWORD=root psql -v ON_ERROR_STOP=1 -U root -d ch_pgsql_repl")) ch := &TestClickHouse{} - ch.connectWithWait(r, 500*time.Millisecond, 1*time.Second) + ch.connectWithWait(r, 0*time.Millisecond, 2*time.Second) defer ch.chbackend.Close() - ch.queryWithNoError(r, - "CREATE DATABASE ch_pgsql_repl ENGINE=MaterializedPostgreSQL('pgsql:5432','ch_pgsql_repl','root','root') "+ - "SETTINGS materialized_postgresql_allow_automatic_update = 1, materialized_postgresql_schema = 'public'", - ) - time.Sleep(1 * time.Second) + createDbSQL := "CREATE DATABASE IF NOT EXISTS " + t.Name() + ch.queryWithNoError(r, createDbSQL) + dropReplTables := func() { + for _, table := range []string{"repl1", "repl2"} { + query := "DROP TABLE IF EXISTS " + t.Name() + "." + table + if compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "20.3") == 1 { + query += " NO DELAY" + } + ch.queryWithNoError(r, query) + } + } + dropReplTables() + ch.queryWithNoError(r, "CREATE TABLE "+t.Name()+".repl1 (v UInt64) ENGINE=ReplicatedMergeTree('/clickhouse/tables/"+t.Name()+"/repl','repl1') ORDER BY tuple()") + ch.queryWithNoError(r, "CREATE TABLE "+t.Name()+".repl2 (v UInt64) ENGINE=ReplicatedMergeTree('/clickhouse/tables/"+t.Name()+"/repl','repl2') ORDER BY tuple()") - r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "create", "test_pgsql_materialized")) - ch.queryWithNoError(r, "DROP DATABASE ch_pgsql_repl") - r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "restore", "test_pgsql_materialized")) + ch.queryWithNoError(r, "INSERT INTO "+t.Name()+".repl1 SELECT number FROM numbers(10)") - result := 0 - r.NoError(ch.chbackend.SelectSingleRowNoCtx(&result, "SELECT count() FROM ch_pgsql_repl.t1")) - r.Equal(3, result, "expect count=3") + ch.queryWithNoError(r, "SYSTEM STOP REPLICATED SENDS "+t.Name()+".repl1") + ch.queryWithNoError(r, "SYSTEM STOP FETCHES "+t.Name()+".repl2") - ch.queryWithNoError(r, "DROP DATABASE ch_pgsql_repl") - r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "delete", "local", "test_pgsql_materialized")) -} + ch.queryWithNoError(r, "INSERT INTO "+t.Name()+".repl1 SELECT number FROM numbers(100)") -func uploadSSHKeys(r *require.Assertions, container string) { - r.NoError(dockerCP("sftp/clickhouse-backup_rsa", container+":/id_rsa")) - r.NoError(dockerExec(container, "cp", "-vf", "/id_rsa", "/tmp/id_rsa")) - r.NoError(dockerExec(container, "chmod", "-v", "0600", "/tmp/id_rsa")) + r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "create", "--tables="+t.Name()+".repl*", "test_not_synced_backup")) + r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "upload", "test_not_synced_backup")) + r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "delete", "local", "test_not_synced_backup")) + r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "delete", "remote", "test_not_synced_backup")) - r.NoError(dockerCP("sftp/clickhouse-backup_rsa.pub", "sshd:/root/.ssh/authorized_keys")) - r.NoError(dockerExec("sshd", "chown", "-v", "root:root", "/root/.ssh/authorized_keys")) - r.NoError(dockerExec("sshd", "chmod", "-v", "0600", "/root/.ssh/authorized_keys")) -} + ch.queryWithNoError(r, "SYSTEM START REPLICATED SENDS "+t.Name()+".repl1") + ch.queryWithNoError(r, "SYSTEM START FETCHES "+t.Name()+".repl2") -func runMainIntegrationScenario(t *testing.T, remoteStorageType string) { - var out string - var err error + dropReplTables() + r.NoError(ch.dropDatabase(t.Name())) +} +func TestGetPartitionId(t *testing.T) { + if compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "19.17") == -1 { + t.Skipf("Test skipped, is_in_partition_key not available for %s version", os.Getenv("CLICKHOUSE_VERSION")) + } + //t.Parallel() r := require.New(t) ch := &TestClickHouse{} - ch.connectWithWait(r, 500*time.Millisecond, 1*time.Minute) + ch.connectWithWait(r, 500*time.Millisecond, 1*time.Second) defer ch.chbackend.Close() - // test for specified partitions backup - testBackupSpecifiedPartitions(r, ch, remoteStorageType) + type testData struct { + CreateTableSQL string + Database string + Table string + Partition string + ExpectedId string + ExpectedName string + } + testCases := []testData{ + { + "CREATE TABLE default.test_part_id_1 UUID 'b45e751f-6c06-42a3-ab4a-f5bb9ac3716e' (dt Date, version DateTime, category String, name String) ENGINE = ReplicatedReplacingMergeTree('/clickhouse/tables/{shard}/{database}/{table}','{replica}',version) ORDER BY dt PARTITION BY (toYYYYMM(dt),category)", + "default", + "test_part_id_1", + "('2023-01-01','category1')", + "cc1ad6ede2e7f708f147e132cac7a590", + "(202301,'category1')", + }, + { + "CREATE TABLE default.test_part_id_2 (dt Date, version DateTime, name String) ENGINE = ReplicatedReplacingMergeTree('/clickhouse/tables/{shard}/{database}/{table}','{replica}',version) ORDER BY dt PARTITION BY toYYYYMM(dt)", + "default", + "test_part_id_2", + "'2023-01-01'", + "202301", + "202301", + }, + { + "CREATE TABLE default.test_part_id_3 ON CLUSTER '{cluster}' (i UInt32, name String) ENGINE = ReplicatedMergeTree() ORDER BY i PARTITION BY i", + "default", + "test_part_id_3", + "202301", + "202301", + "202301", + }, + { + "CREATE TABLE default.test_part_id_4 (dt String, name String) ENGINE = MergeTree ORDER BY dt PARTITION BY dt", + "default", + "test_part_id_4", + "'2023-01-01'", + "c487903ebbb25a533634d6ec3485e3a9", + "2023-01-01", + }, + { + "CREATE TABLE default.test_part_id_5 (dt String, name String) ENGINE = Memory", + "default", + "test_part_id_5", + "'2023-01-01'", + "", + "", + }, + } + if isAtomic, _ := ch.chbackend.IsAtomic("default"); !isAtomic { + testCases[0].CreateTableSQL = strings.Replace(testCases[0].CreateTableSQL, "UUID 'b45e751f-6c06-42a3-ab4a-f5bb9ac3716e'", "", 1) + } + for _, tc := range testCases { + partitionId, partitionName, err := partition.GetPartitionIdAndName(context.Background(), ch.chbackend, tc.Database, tc.Table, tc.CreateTableSQL, tc.Partition) + assert.NoError(t, err) + assert.Equal(t, tc.ExpectedId, partitionId) + assert.Equal(t, tc.ExpectedName, partitionName) + } +} - // main test scenario - testBackupName := fmt.Sprintf("test_backup_%d", rand.Int()) - incrementBackupName := fmt.Sprintf("increment_%d", rand.Int()) - databaseList := []string{dbNameOrdinary, dbNameAtomic, dbNameMySQL, dbNamePostgreSQL, Issue331Atomic, Issue331Ordinary} +func TestRestoreMutationInProgress(t *testing.T) { + //t.Parallel() + r := require.New(t) + ch := &TestClickHouse{} + ch.connectWithWait(r, 0*time.Second, 5*time.Second) + defer ch.chbackend.Close() + version, err := ch.chbackend.GetVersion(context.Background()) + r.NoError(err) + zkPath := "/clickhouse/tables/{shard}/" + t.Name() + "/test_restore_mutation_in_progress" + onCluster := "" + if compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "20.8") >= 0 { + zkPath = "/clickhouse/tables/{shard}/{database}/{table}" + } + if compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "22.3") >= 0 { + zkPath = "/clickhouse/tables/{shard}/{database}/{table}/{uuid}" + onCluster = " ON CLUSTER '{cluster}'" + } + createDbSQL := "CREATE DATABASE IF NOT EXISTS " + t.Name() + ch.queryWithNoError(r, createDbSQL) - log.Info("Clean before start") - fullCleanup(r, ch, []string{testBackupName, incrementBackupName}, []string{"remote", "local"}, databaseList, false, false) + createSQL := fmt.Sprintf("CREATE TABLE %s.test_restore_mutation_in_progress %s (id UInt64, attr String) ENGINE=ReplicatedMergeTree('%s','{replica}') PARTITION BY id ORDER BY id", t.Name(), onCluster, zkPath) + ch.queryWithNoError(r, createSQL) + ch.queryWithNoError(r, "INSERT INTO "+t.Name()+".test_restore_mutation_in_progress SELECT number, if(number>0,'a',toString(number)) FROM numbers(2)") - r.NoError(dockerExec("minio", "mc", "ls", "local/clickhouse/disk_s3")) - generateTestData(ch, r, remoteStorageType) + mutationSQL := "ALTER TABLE " + t.Name() + ".test_restore_mutation_in_progress MODIFY COLUMN attr UInt64" + err = ch.chbackend.QueryContext(context.Background(), mutationSQL) + if err != nil { + errStr := strings.ToLower(err.Error()) + r.True(strings.Contains(errStr, "code: 341") || strings.Contains(errStr, "code: 517") || strings.Contains(errStr, "timeout"), "UNKNOWN ERROR: %s", err.Error()) + t.Logf("%s RETURN EXPECTED ERROR=%#v", mutationSQL, err) + } - r.NoError(dockerExec("minio", "mc", "ls", "local/clickhouse/disk_s3")) - log.Info("Create backup") - r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "create", testBackupName)) + attrs := make([]struct { + Attr uint64 `ch:"attr"` + }, 0) + err = ch.chbackend.Select(&attrs, "SELECT attr FROM "+t.Name()+".test_restore_mutation_in_progress ORDER BY id") + r.NotEqual(nil, err) + errStr := strings.ToLower(err.Error()) + r.True(strings.Contains(errStr, "code: 53") || strings.Contains(errStr, "code: 6")) + r.Zero(len(attrs)) - generateIncrementTestData(ch, r) + if compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "20.8") >= 0 { + mutationSQL = "ALTER TABLE " + t.Name() + ".test_restore_mutation_in_progress RENAME COLUMN attr TO attr_1" + err = ch.chbackend.QueryContext(context.Background(), mutationSQL) + r.NotEqual(nil, err) + errStr = strings.ToLower(err.Error()) + r.True(strings.Contains(errStr, "code: 517") || strings.Contains(errStr, "timeout")) + t.Logf("%s RETURN EXPECTED ERROR=%#v", mutationSQL, err) + } + r.NoError(dockerExec("clickhouse", "clickhouse", "client", "-q", "SELECT * FROM system.mutations WHERE is_done=0 FORMAT Vertical")) - r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "create", incrementBackupName)) + // backup with check consistency + out, createErr := dockerExecOut("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "create", "--tables="+t.Name()+".test_restore_mutation_in_progress", "test_restore_mutation_in_progress") + r.NotEqual(createErr, nil) + r.Contains(out, "have inconsistent data types") + t.Log(out) - log.Info("Upload") - uploadCmd := fmt.Sprintf("%s_COMPRESSION_FORMAT=zstd clickhouse-backup upload --resume %s", remoteStorageType, testBackupName) - checkResumeAlreadyProcessed(uploadCmd, testBackupName, "upload", r, remoteStorageType) + // backup without check consistency + out, createErr = dockerExecOut("clickhouse-backup", "clickhouse-backup", "create", "-c", "/etc/clickhouse-backup/config-s3.yml", "--skip-check-parts-columns", "--tables="+t.Name()+".test_restore_mutation_in_progress", "test_restore_mutation_in_progress") + t.Log(out) + r.NoError(createErr) + r.NotContains(out, "have inconsistent data types") - //diffFrom := []string{"--diff-from", "--diff-from-remote"}[rand.Intn(2)] - diffFrom := "--diff-from-remote" - uploadCmd = fmt.Sprintf("clickhouse-backup upload %s %s %s --resume", incrementBackupName, diffFrom, testBackupName) - checkResumeAlreadyProcessed(uploadCmd, incrementBackupName, "upload", r, remoteStorageType) + r.NoError(ch.chbackend.DropTable(clickhouse.Table{Database: t.Name(), Name: "test_restore_mutation_in_progress"}, "", "", false, version)) + var restoreErr error + restoreErr = dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "restore", "--rm", "--tables="+t.Name()+".test_restore_mutation_in_progress", "test_restore_mutation_in_progress") + if compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "20.8") >= 0 && compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "22.8") < 0 { + r.NotEqual(restoreErr, nil) + } else { + r.NoError(restoreErr) + } - backupDir := "/var/lib/clickhouse/backup" - if strings.HasPrefix(remoteStorageType, "EMBEDDED") { - backupDir = "/var/lib/clickhouse/disks/backups" + strings.ToLower(strings.TrimPrefix(remoteStorageType, "EMBEDDED")) + attrs = make([]struct { + Attr uint64 `ch:"attr"` + }, 0) + checkRestoredData := "attr" + if restoreErr == nil { + if compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "20.8") >= 0 { + checkRestoredData = "attr_1 AS attr" + } } - out, err = dockerExecOut("clickhouse-backup", "ls", "-lha", backupDir) - r.NoError(err) - r.Equal(5, len(strings.Split(strings.Trim(out, " \t\r\n"), "\n")), "expect '2' backups exists in backup directory") - log.Info("Delete backup") - r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "delete", "local", testBackupName)) - r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "delete", "local", incrementBackupName)) - out, err = dockerExecOut("clickhouse-backup", "ls", "-lha", backupDir) - r.NoError(err) - r.Equal(3, len(strings.Split(strings.Trim(out, " \t\r\n"), "\n")), "expect '0' backup exists in backup directory") + selectSQL := fmt.Sprintf("SELECT %s FROM "+t.Name()+".test_restore_mutation_in_progress ORDER BY id", checkRestoredData) + selectErr := ch.chbackend.Select(&attrs, selectSQL) + expectedSelectResults := make([]struct { + Attr uint64 `ch:"attr"` + }, 1) + expectedSelectResults[0].Attr = 0 - dropDatabasesFromTestDataDataSet(r, ch, databaseList) + expectedSelectError := "code: 517" - log.Info("Download") - downloadCmd := fmt.Sprintf("clickhouse-backup download --resume %s", testBackupName) - checkResumeAlreadyProcessed(downloadCmd, testBackupName, "download", r, remoteStorageType) + if compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "20.8") < 0 { + expectedSelectResults = make([]struct { + Attr uint64 `ch:"attr"` + }, 2) + expectedSelectError = "" + } + if compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "20.8") >= 0 && compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "22.8") < 0 { + expectedSelectError = "" + } + if compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "22.8") >= 0 { + expectedSelectError = "code: 6" + expectedSelectResults = make([]struct { + Attr uint64 `ch:"attr"` + }, 0) + } + r.Equal(expectedSelectResults, attrs) + if expectedSelectError != "" { + r.Error(selectErr) + r.Contains(strings.ToLower(selectErr.Error()), expectedSelectError) + t.Logf("%s RETURN EXPECTED ERROR=%#v", selectSQL, selectErr) + } else { + r.NoError(selectErr) + } - log.Info("Restore schema") - r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "restore", "--schema", testBackupName)) + r.NoError(dockerExec("clickhouse", "clickhouse", "client", "-q", "SELECT * FROM system.mutations FORMAT Vertical")) - log.Info("Restore data") - r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "restore", "--data", testBackupName)) + r.NoError(ch.chbackend.DropTable(clickhouse.Table{Database: t.Name(), Name: "test_restore_mutation_in_progress"}, "", "", false, version)) + r.NoError(ch.dropDatabase(t.Name())) + r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "delete", "local", "test_restore_mutation_in_progress")) +} - log.Info("Full restore with rm") - r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "restore", "--rm", testBackupName)) +func TestInnerTablesMaterializedView(t *testing.T) { + //t.Parallel() + ch := &TestClickHouse{} + r := require.New(t) + ch.connectWithWait(r, 1*time.Second, 10*time.Second) + defer ch.chbackend.Close() - log.Info("Check data") - for i := range testData { - if testData[i].CheckDatabaseOnly { - r.NoError(ch.checkDatabaseEngine(t, testData[i])) - } else { - if isTableSkip(ch, testData[i], true) { - continue - } - r.NoError(ch.checkData(t, testData[i], r)) - } + ch.queryWithNoError(r, "CREATE DATABASE test_mv") + ch.queryWithNoError(r, "CREATE TABLE test_mv.src_table (v UInt64) ENGINE=MergeTree() ORDER BY v") + ch.queryWithNoError(r, "CREATE TABLE test_mv.dst_table (v UInt64) ENGINE=MergeTree() ORDER BY v") + ch.queryWithNoError(r, "CREATE MATERIALIZED VIEW test_mv.mv_with_inner (v UInt64) ENGINE=MergeTree() ORDER BY v AS SELECT v FROM test_mv.src_table") + ch.queryWithNoError(r, "CREATE MATERIALIZED VIEW test_mv.mv_with_dst TO test_mv.dst_table AS SELECT v FROM test_mv.src_table") + ch.queryWithNoError(r, "INSERT INTO test_mv.src_table SELECT number FROM numbers(100)") + r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "create", "test_mv", "--tables=test_mv.mv_with*,test_mv.dst*")) + dropSQL := "DROP DATABASE test_mv" + isAtomic, err := ch.chbackend.IsAtomic("test_mv") + r.NoError(err) + if isAtomic { + dropSQL += " NO DELAY" } - // test increment - dropDatabasesFromTestDataDataSet(r, ch, databaseList) - - log.Info("Delete backup") - r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "delete", "local", testBackupName)) - - log.Info("Download increment") - downloadCmd = fmt.Sprintf("clickhouse-backup download --resume %s", incrementBackupName) - checkResumeAlreadyProcessed(downloadCmd, incrementBackupName, "download", r, remoteStorageType) + ch.queryWithNoError(r, dropSQL) + r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "restore", "test_mv", "--tables=test_mv.mv_with*,test_mv.dst*")) + var rowCnt uint64 + r.NoError(ch.chbackend.SelectSingleRowNoCtx(&rowCnt, "SELECT count() FROM test_mv.mv_with_inner")) + r.Equal(uint64(100), rowCnt) + r.NoError(ch.chbackend.SelectSingleRowNoCtx(&rowCnt, "SELECT count() FROM test_mv.mv_with_dst")) + r.Equal(uint64(100), rowCnt) + r.NoError(ch.dropDatabase("test_mv")) + r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "delete", "local", "test_mv")) +} - log.Info("Restore") - r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "restore", "--schema", "--data", incrementBackupName)) +func TestFIPS(t *testing.T) { + if os.Getenv("QA_AWS_ACCESS_KEY") == "" { + t.Skip("QA_AWS_ACCESS_KEY is empty, TestFIPS will skip") + } + //t.Parallel() + ch := &TestClickHouse{} + r := require.New(t) + ch.connectWithWait(r, 1*time.Second, 10*time.Second) + defer ch.chbackend.Close() + fipsBackupName := fmt.Sprintf("fips_backup_%d", rand.Int()) + r.NoError(dockerExec("clickhouse", "rm", "-fv", "/etc/apt/sources.list.d/clickhouse.list")) + installDebIfNotExists(r, "clickhouse", "curl", "gettext-base", "bsdmainutils", "dnsutils", "git") + r.NoError(dockerCP("config-s3-fips.yml", "clickhouse:/etc/clickhouse-backup/config.yml.fips-template")) + r.NoError(dockerExec("clickhouse", "git", "clone", "--depth", "1", "https://github.com/drwetter/testssl.sh.git", "/opt/testssl")) + r.NoError(dockerExec("clickhouse", "chmod", "+x", "/opt/testssl/testssl.sh")) - log.Info("Check increment data") - for i := range testData { - testDataItem := testData[i] - if isTableSkip(ch, testDataItem, true) || testDataItem.IsDictionary { - continue - } - for _, incrementDataItem := range incrementData { - if testDataItem.Database == incrementDataItem.Database && testDataItem.Name == incrementDataItem.Name { - testDataItem.Rows = append(testDataItem.Rows, incrementDataItem.Rows...) - } - } - if testDataItem.CheckDatabaseOnly { - r.NoError(ch.checkDatabaseEngine(t, testDataItem)) - } else { - r.NoError(ch.checkData(t, testDataItem, r)) + generateCerts := func(certType, keyLength, curveType string) { + r.NoError(dockerExec("clickhouse", "bash", "-xce", "openssl rand -out /root/.rnd 2048")) + switch certType { + case "rsa": + r.NoError(dockerExec("clickhouse", "bash", "-xce", fmt.Sprintf("openssl genrsa -out /etc/clickhouse-backup/ca-key.pem %s", keyLength))) + r.NoError(dockerExec("clickhouse", "bash", "-xce", fmt.Sprintf("openssl genrsa -out /etc/clickhouse-backup/server-key.pem %s", keyLength))) + case "ecdsa": + r.NoError(dockerExec("clickhouse", "bash", "-xce", fmt.Sprintf("openssl ecparam -name %s -genkey -out /etc/clickhouse-backup/ca-key.pem", curveType))) + r.NoError(dockerExec("clickhouse", "bash", "-xce", fmt.Sprintf("openssl ecparam -name %s -genkey -out /etc/clickhouse-backup/server-key.pem", curveType))) } - + r.NoError(dockerExec("clickhouse", "bash", "-xce", "openssl req -subj \"/O=altinity\" -x509 -new -nodes -key /etc/clickhouse-backup/ca-key.pem -sha256 -days 365000 -out /etc/clickhouse-backup/ca-cert.pem")) + r.NoError(dockerExec("clickhouse", "bash", "-xce", "openssl req -subj \"/CN=localhost\" -addext \"subjectAltName = DNS:localhost,DNS:*.cluster.local\" -new -key /etc/clickhouse-backup/server-key.pem -out /etc/clickhouse-backup/server-req.csr")) + r.NoError(dockerExec("clickhouse", "bash", "-xce", "openssl x509 -req -days 365000 -extensions SAN -extfile <(printf \"\\n[SAN]\\nsubjectAltName=DNS:localhost,DNS:*.cluster.local\") -in /etc/clickhouse-backup/server-req.csr -out /etc/clickhouse-backup/server-cert.pem -CA /etc/clickhouse-backup/ca-cert.pem -CAkey /etc/clickhouse-backup/ca-key.pem -CAcreateserial")) } + r.NoError(dockerExec("clickhouse", "bash", "-c", "cat /etc/clickhouse-backup/config-s3-fips.yml.template | envsubst > /etc/clickhouse-backup/config-s3-fips.yml")) - // test end - log.Info("Clean after finish") - // CUSTOM and EMBEDDED download increment doesn't download full - if remoteStorageType == "CUSTOM" || strings.HasPrefix(remoteStorageType, "EMBEDDED") { - fullCleanup(r, ch, []string{incrementBackupName}, []string{"local"}, nil, true, false) - fullCleanup(r, ch, []string{testBackupName, incrementBackupName}, []string{"remote"}, databaseList, true, true) - } else { - fullCleanup(r, ch, []string{testBackupName, incrementBackupName}, []string{"remote", "local"}, databaseList, true, true) - } -} + generateCerts("rsa", "4096", "") + ch.queryWithNoError(r, "CREATE DATABASE "+t.Name()) + createSQL := "CREATE TABLE " + t.Name() + ".fips_table (v UInt64) ENGINE=MergeTree() ORDER BY tuple()" + ch.queryWithNoError(r, createSQL) + r.NoError(dockerExec("clickhouse", "bash", "-ce", "clickhouse-backup-fips -c /etc/clickhouse-backup/config-s3-fips.yml create_remote --tables="+t.Name()+".fips_table "+fipsBackupName)) + r.NoError(dockerExec("clickhouse", "bash", "-ce", "clickhouse-backup-fips -c /etc/clickhouse-backup/config-s3-fips.yml delete local "+fipsBackupName)) + r.NoError(dockerExec("clickhouse", "bash", "-ce", "clickhouse-backup-fips -c /etc/clickhouse-backup/config-s3-fips.yml restore_remote --tables="+t.Name()+".fips_table "+fipsBackupName)) + r.NoError(dockerExec("clickhouse", "bash", "-ce", "clickhouse-backup-fips -c /etc/clickhouse-backup/config-s3-fips.yml delete local "+fipsBackupName)) + r.NoError(dockerExec("clickhouse", "bash", "-ce", "clickhouse-backup-fips -c /etc/clickhouse-backup/config-s3-fips.yml delete remote "+fipsBackupName)) -func testBackupSpecifiedPartitions(r *require.Assertions, ch *TestClickHouse, remoteStorageType string) { - log.Info("testBackupSpecifiedPartitions started") - var err error - var out string - var result, expectedCount uint64 + log.Info("Run `clickhouse-backup-fips server` in background") + r.NoError(dockerExec("-d", "clickhouse", "bash", "-ce", "AWS_USE_FIPS_ENDPOINT=true clickhouse-backup-fips -c /etc/clickhouse-backup/config-s3-fips.yml server &>>/tmp/clickhouse-backup-server-fips.log")) + time.Sleep(1 * time.Second) - partitionBackupName := fmt.Sprintf("partition_backup_%d", rand.Int()) - fullBackupName := fmt.Sprintf("full_backup_%d", rand.Int()) - // Create and fill tables - ch.queryWithNoError(r, "DROP TABLE IF EXISTS default.t1") - ch.queryWithNoError(r, "DROP TABLE IF EXISTS default.t2") - ch.queryWithNoError(r, "CREATE TABLE default.t1 (dt Date, v UInt64) ENGINE=MergeTree() PARTITION BY toYYYYMMDD(dt) ORDER BY dt") - ch.queryWithNoError(r, "CREATE TABLE default.t2 (dt String, v UInt64) ENGINE=MergeTree() PARTITION BY dt ORDER BY dt") - for _, dt := range []string{"2022-01-01", "2022-01-02", "2022-01-03", "2022-01-04"} { - ch.queryWithNoError(r, fmt.Sprintf("INSERT INTO default.t1 SELECT '%s', number FROM numbers(10)", dt)) - ch.queryWithNoError(r, fmt.Sprintf("INSERT INTO default.t2 SELECT '%s', number FROM numbers(10)", dt)) - } + runClickHouseClientInsertSystemBackupActions(r, ch, []string{fmt.Sprintf("create_remote --tables="+t.Name()+".fips_table %s", fipsBackupName)}, true) + runClickHouseClientInsertSystemBackupActions(r, ch, []string{fmt.Sprintf("delete local %s", fipsBackupName)}, false) + runClickHouseClientInsertSystemBackupActions(r, ch, []string{fmt.Sprintf("restore_remote --tables="+t.Name()+".fips_table %s", fipsBackupName)}, true) + runClickHouseClientInsertSystemBackupActions(r, ch, []string{fmt.Sprintf("delete local %s", fipsBackupName)}, false) + runClickHouseClientInsertSystemBackupActions(r, ch, []string{fmt.Sprintf("delete remote %s", fipsBackupName)}, false) - // check create_remote full > download + partitions > delete local > download > restore --partitions > restore - r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "create_remote", "--tables=default.t*", fullBackupName)) - r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "delete", "local", fullBackupName)) - r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "download", "--partitions=('2022-01-02'),('2022-01-03')", fullBackupName)) - fullBackupDir := "/var/lib/clickhouse/backup/" + fullBackupName + "/shadow/default/t?/default/" - if strings.HasPrefix(remoteStorageType, "EMBEDDED") { - fullBackupDir = "/var/lib/clickhouse/disks/backups" + strings.ToLower(strings.TrimPrefix(remoteStorageType, "EMBEDDED")) + "/" + fullBackupName + "/data/default/t?" - } - out, err = dockerExecOut("clickhouse-backup", "bash", "-c", "ls -la "+fullBackupDir+" | wc -l") - r.NoError(err) - expectedLines := "13" - // custom storage doesn't support --partitions for upload / download now - // embedded storage contain hardLink files and will download additional data parts - if remoteStorageType == "CUSTOM" || strings.HasPrefix(remoteStorageType, "EMBEDDED") { - expectedLines = "17" - } - r.Equal(expectedLines, strings.Trim(out, "\r\n\t ")) - r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "delete", "local", fullBackupName)) - r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "download", fullBackupName)) + inProgressActions := make([]struct { + Command string `ch:"command"` + Status string `ch:"status"` + }, 0) + r.NoError(ch.chbackend.StructSelect(&inProgressActions, + "SELECT command, status FROM system.backup_actions WHERE command LIKE ? AND status IN (?,?)", + fmt.Sprintf("%%%s%%", fipsBackupName), status.InProgressStatus, status.ErrorStatus, + )) + r.Equal(0, len(inProgressActions), "inProgressActions=%+v", inProgressActions) + r.NoError(dockerExec("clickhouse", "pkill", "-n", "-f", "clickhouse-backup-fips")) - fullBackupDir = "/var/lib/clickhouse/backup/" + fullBackupName + "/shadow/default/t?/default/" - if strings.HasPrefix(remoteStorageType, "EMBEDDED") { - fullBackupDir = "/var/lib/clickhouse/disks/backups" + strings.ToLower(strings.TrimPrefix(remoteStorageType, "EMBEDDED")) + "/" + fullBackupName + "/data/default/t?" - } - out, err = dockerExecOut("clickhouse-backup", "bash", "-c", "ls -la "+fullBackupDir+"| wc -l") - r.NoError(err) - r.Equal("17", strings.Trim(out, "\r\n\t ")) - r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "restore", "--partitions=('2022-01-02'),('2022-01-03')", fullBackupName)) - result = 0 - r.NoError(ch.chbackend.SelectSingleRowNoCtx(&result, "SELECT sum(c) FROM (SELECT count() AS c FROM default.t1 UNION ALL SELECT count() AS c FROM default.t2)")) - expectedCount = 40 - r.Equal(expectedCount, result, fmt.Sprintf("expect count=%d", expectedCount)) - r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "restore", fullBackupName)) - result = 0 - r.NoError(ch.chbackend.SelectSingleRowNoCtx(&result, "SELECT sum(c) FROM (SELECT count() AS c FROM default.t1 UNION ALL SELECT count() AS c FROM default.t2)")) - r.Equal(uint64(80), result, "expect count=80") - r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "delete", "remote", fullBackupName)) - r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "delete", "local", fullBackupName)) + testTLSCerts := func(certType, keyLength, curveName string, cipherList ...string) { + generateCerts(certType, keyLength, curveName) + log.Infof("Run `clickhouse-backup-fips server` in background for %s %s %s", certType, keyLength, curveName) + r.NoError(dockerExec("-d", "clickhouse", "bash", "-ce", "AWS_USE_FIPS_ENDPOINT=true clickhouse-backup-fips -c /etc/clickhouse-backup/config-s3-fips.yml server &>>/tmp/clickhouse-backup-server-fips.log")) + time.Sleep(1 * time.Second) - // check create + partitions - r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "create", "--tables=default.t1", "--partitions=20220102,20220103", partitionBackupName)) - partitionBackupDir := "/var/lib/clickhouse/backup/" + partitionBackupName + "/shadow/default/t1/default/" - if strings.HasPrefix(remoteStorageType, "EMBEDDED") { - partitionBackupDir = "/var/lib/clickhouse/disks/backups" + strings.ToLower(strings.TrimPrefix(remoteStorageType, "EMBEDDED")) + "/" + partitionBackupName + "/data/default/t1" - } - out, err = dockerExecOut("clickhouse-backup", "bash", "-c", "ls -la "+partitionBackupDir+"| wc -l") - r.NoError(err) - r.Equal("5", strings.Trim(out, "\r\n\t ")) - r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "delete", "local", partitionBackupName)) + r.NoError(dockerExec("clickhouse", "bash", "-ce", "rm -rf /tmp/testssl* && /opt/testssl/testssl.sh -e -s -oC /tmp/testssl.csv --color 0 --disable-rating --quiet -n min --mode parallel --add-ca /etc/clickhouse-backup/ca-cert.pem localhost:7172")) + out, err := dockerExecOut("clickhouse", "bash", "-ce", fmt.Sprintf("grep -c -E '%s' /tmp/testssl.csv", strings.Join(cipherList, "|"))) + r.NoError(err) + r.Equal(strconv.Itoa(len(cipherList)), strings.Trim(out, " \t\r\n")) - // check create > upload + partitions - r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "create", "--tables=default.t1", partitionBackupName)) - partitionBackupDir = "/var/lib/clickhouse/backup/" + partitionBackupName + "/shadow/default/t1/default/" - if strings.HasPrefix(remoteStorageType, "EMBEDDED") { - partitionBackupDir = "/var/lib/clickhouse/disks/backups" + strings.ToLower(strings.TrimPrefix(remoteStorageType, "EMBEDDED")) + "/" + partitionBackupName + "/data/default/t1" + inProgressActions := make([]struct { + Command string `ch:"command"` + Status string `ch:"status"` + }, 0) + r.NoError(ch.chbackend.StructSelect(&inProgressActions, + "SELECT command, status FROM system.backup_actions WHERE command LIKE ? AND status IN (?,?)", + fmt.Sprintf("%%%s%%", fipsBackupName), status.InProgressStatus, status.ErrorStatus, + )) + r.Equal(0, len(inProgressActions), "inProgressActions=%+v", inProgressActions) + r.NoError(dockerExec("clickhouse", "pkill", "-n", "-f", "clickhouse-backup-fips")) } - out, err = dockerExecOut("clickhouse-backup", "bash", "-c", "ls -la "+partitionBackupDir+" | wc -l") - r.NoError(err) - r.Equal("7", strings.Trim(out, "\r\n\t ")) - r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "upload", "--tables=default.t1", "--partitions=20220102,20220103", partitionBackupName)) - r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "delete", "local", partitionBackupName)) + // https://www.perplexity.ai/search/0920f1e8-59ec-4e14-b779-ba7b2e037196 + testTLSCerts("rsa", "4096", "", "ECDHE-RSA-AES128-GCM-SHA256", "ECDHE-RSA-AES256-GCM-SHA384", "AES128-GCM-SHA256", "AES256-GCM-SHA384") + testTLSCerts("ecdsa", "", "prime256v1", "ECDHE-ECDSA-AES128-GCM-SHA256", "ECDHE-ECDSA-AES256-GCM-SHA384") + r.NoError(ch.chbackend.DropTable(clickhouse.Table{Database: t.Name(), Name: "fips_table"}, createSQL, "", false, 0)) + r.NoError(ch.dropDatabase(t.Name())) - // restore partial uploaded - r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "restore_remote", partitionBackupName)) +} - // Check partial restored t1 - result = 0 - r.NoError(ch.chbackend.SelectSingleRowNoCtx(&result, "SELECT count() FROM default.t1")) +func TestIntegrationS3(t *testing.T) { + //t.Parallel() + runMainIntegrationScenario(t, "S3", "config-s3.yml") +} - expectedCount = 20 - // custom and embedded doesn't support --partitions in upload and download - if remoteStorageType == "CUSTOM" || strings.HasPrefix(remoteStorageType, "EMBEDDED") { - expectedCount = 40 +func TestIntegrationGCS(t *testing.T) { + if isTestShouldSkip("GCS_TESTS") { + t.Skip("Skipping GCS integration tests...") + return } - r.Equal(expectedCount, result, fmt.Sprintf("expect count=%d", expectedCount)) + //t.Parallel() + runMainIntegrationScenario(t, "GCS", "config-gcs.yml") +} - // Check only selected partitions restored - result = 0 - r.NoError(ch.chbackend.SelectSingleRowNoCtx(&result, "SELECT count() FROM default.t1 WHERE dt NOT IN ('2022-01-02','2022-01-03')")) - expectedCount = 0 - // custom and embedded doesn't support --partitions in upload and download - if remoteStorageType == "CUSTOM" || strings.HasPrefix(remoteStorageType, "EMBEDDED") { - expectedCount = 20 +func TestIntegrationAzure(t *testing.T) { + if isTestShouldSkip("AZURE_TESTS") { + t.Skip("Skipping Azure integration tests...") + return } - r.Equal(expectedCount, result, "expect count=0") + //t.Parallel() + runMainIntegrationScenario(t, "AZBLOB", "config-azblob.yml") +} - // DELETE backup. - r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "delete", "remote", partitionBackupName)) - r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "delete", "local", partitionBackupName)) +func TestIntegrationSFTPAuthPassword(t *testing.T) { + //t.Parallel() + runMainIntegrationScenario(t, "SFTP", "config-sftp-auth-password.yaml") +} - ch.queryWithNoError(r, "DROP TABLE IF EXISTS default.t1") - ch.queryWithNoError(r, "DROP TABLE IF EXISTS default.t2") +func TestIntegrationFTP(t *testing.T) { + //t.Parallel() + runMainIntegrationScenario(t, "FTP", "config-ftp.yaml") +} - log.Info("testBackupSpecifiedPartitions finish") +func TestIntegrationSFTPAuthKey(t *testing.T) { + uploadSSHKeys(require.New(t), "clickhouse-backup") + //t.Parallel() + runMainIntegrationScenario(t, "SFTP", "config-sftp-auth-key.yaml") } -func checkResumeAlreadyProcessed(backupCmd, testBackupName, resumeKind string, r *require.Assertions, remoteStorageType string) { - // backupCmd = fmt.Sprintf("%s & PID=$!; sleep 0.7; kill -9 $PID; cat /var/lib/clickhouse/backup/%s/upload.state; sleep 0.3; %s", backupCmd, testBackupName, backupCmd) - if remoteStorageType == "CUSTOM" || strings.HasPrefix(remoteStorageType, "EMBEDDED") { - backupCmd = strings.Replace(backupCmd, "--resume", "", 1) - } else { - backupCmd = fmt.Sprintf("%s; cat /var/lib/clickhouse/backup/%s/%s.state; %s", backupCmd, testBackupName, resumeKind, backupCmd) - } - out, err := dockerExecOut("clickhouse-backup", "bash", "-xce", backupCmd) - log.Info(out) - r.NoError(err) - if strings.Contains(backupCmd, "--resume") { - r.Contains(out, "already processed") - } +func TestIntegrationCustomKopia(t *testing.T) { + //t.Parallel() + r := require.New(t) + runIntegrationCustom(t, r, "kopia") +} +func TestIntegrationCustomRestic(t *testing.T) { + //t.Parallel() + r := require.New(t) + runIntegrationCustom(t, r, "restic") } -func fullCleanup(r *require.Assertions, ch *TestClickHouse, backupNames, backupTypes, databaseList []string, checkDeleteErr, checkDropErr bool) { - for _, backupName := range backupNames { - for _, backupType := range backupTypes { - err := dockerExec("clickhouse-backup", "clickhouse-backup", "delete", backupType, backupName) - if checkDeleteErr { - r.NoError(err) - } - } - } - otherBackupList, err := dockerExecOut("clickhouse", "ls", "-1", "/var/lib/clickhouse/backup") - if err == nil { - for _, backupName := range strings.Split(otherBackupList, "\n") { - if backupName != "" { - err := dockerExec("clickhouse-backup", "clickhouse-backup", "delete", "local", backupName) - if checkDropErr { - r.NoError(err) - } - } - } - } +func TestIntegrationCustomRsync(t *testing.T) { + r := require.New(t) + uploadSSHKeys(r, "clickhouse-backup") + //t.Parallel() + runIntegrationCustom(t, r, "rsync") +} - dropDatabasesFromTestDataDataSet(r, ch, databaseList) +func runIntegrationCustom(t *testing.T, r *require.Assertions, customType string) { + r.NoError(dockerExec("clickhouse-backup", "mkdir", "-pv", "/custom/"+customType)) + r.NoError(dockerCP("./"+customType+"/", "clickhouse-backup:/custom/")) + runMainIntegrationScenario(t, "CUSTOM", "config-custom-"+customType+".yml") } -func generateTestData(ch *TestClickHouse, r *require.Assertions, remoteStorageType string) { - log.Infof("Generate test data %s", remoteStorageType) - testData = defaultTestData - generateTestDataWithDifferentStoragePolicy(remoteStorageType) - for _, data := range testData { - if isTableSkip(ch, data, false) { - continue - } - r.NoError(ch.createTestSchema(data, remoteStorageType)) - } - for _, data := range testData { - if isTableSkip(ch, data, false) { - continue - } - r.NoError(ch.createTestData(data)) +func TestIntegrationEmbedded(t *testing.T) { + //t.Skipf("Test skipped, wait 23.8, RESTORE Ordinary table and RESTORE MATERIALIZED VIEW and {uuid} not works for %s version, look https://github.com/ClickHouse/ClickHouse/issues/43971 and https://github.com/ClickHouse/ClickHouse/issues/42709", os.Getenv("CLICKHOUSE_VERSION")) + //dependencies restore https://github.com/ClickHouse/ClickHouse/issues/39416, fixed in 23.3 + version := os.Getenv("CLICKHOUSE_VERSION") + if version != "head" && compareVersion(version, "23.3") < 0 { + t.Skipf("Test skipped, BACKUP/RESTORE not production ready for %s version", version) } + //t.Parallel() + r := require.New(t) + //CUSTOM backup create folder in each disk + r.NoError(dockerExec("clickhouse", "rm", "-rfv", "/var/lib/clickhouse/disks/backups_s3/backup/")) + runMainIntegrationScenario(t, "EMBEDDED_S3", "config-s3-embedded.yml") + //@TODO uncomment when resolve slow azure BACKUP/RESTORE https://github.com/ClickHouse/ClickHouse/issues/52088 + //r.NoError(dockerExec("clickhouse", "rm", "-rf", "/var/lib/clickhouse/disks/backups_azure/backup/")) + //runMainIntegrationScenario(t, "EMBEDDED_AZURE", "config-azblob-embedded.yml") + //@TODO think about how to implements embedded backup for s3_plain disks + //r.NoError(dockerExec("clickhouse", "rm", "-rf", "/var/lib/clickhouse/disks/backups_s3_plain/backup/")) + //runMainIntegrationScenario(t, "EMBEDDED_S3_PLAIN", "config-s3-plain-embedded.yml") } -func generateTestDataWithDifferentStoragePolicy(remoteStorageType string) { - for databaseName, databaseEngine := range map[string]string{dbNameOrdinary: "Ordinary", dbNameAtomic: "Atomic"} { - testDataWithStoragePolicy := TestDataStruct{ - Database: databaseName, DatabaseEngine: databaseEngine, - Rows: func() []map[string]interface{} { - result := make([]map[string]interface{}, 100) - for i := 0; i < 100; i++ { - result[i] = map[string]interface{}{"id": uint64(i)} - } - return result - }(), - Fields: []string{"id"}, - OrderBy: "id", - } - addTestDataIfNotExists := func() { - found := false - for _, data := range testData { - if data.Name == testDataWithStoragePolicy.Name && data.Database == testDataWithStoragePolicy.Database { - found = true - break - } - } - if !found { - testData = append(testData, testDataWithStoragePolicy) - } - } - //s3 disks support after 21.8 - if compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "21.8") >= 0 && remoteStorageType == "S3" { - testDataWithStoragePolicy.Name = "test_s3" - testDataWithStoragePolicy.Schema = "(id UInt64) Engine=MergeTree ORDER BY id SETTINGS storage_policy = 's3_only'" - addTestDataIfNotExists() - } - //encrypted disks support after 21.10 - if compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "21.10") >= 0 { - testDataWithStoragePolicy.Name = "test_hdd3_encrypted" - testDataWithStoragePolicy.Schema = "(id UInt64) Engine=MergeTree ORDER BY id SETTINGS storage_policy = 'hdd3_only_encrypted'" - addTestDataIfNotExists() - } - //encrypted s3 disks support after 21.12 - if compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "21.12") >= 0 && remoteStorageType == "S3" { - testDataWithStoragePolicy.Name = "test_s3_encrypted" - testDataWithStoragePolicy.Schema = "(id UInt64) Engine=MergeTree ORDER BY id SETTINGS storage_policy = 's3_only_encrypted'" - addTestDataIfNotExists() - } - //gcs over s3 support added in 22.6 - if compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "22.6") >= 0 && remoteStorageType == "GCS" && os.Getenv("QA_GCS_OVER_S3_BUCKET") != "" { - testDataWithStoragePolicy.Name = "test_gcs" - testDataWithStoragePolicy.Schema = "(id UInt64) Engine=MergeTree ORDER BY id SETTINGS storage_policy = 'gcs_only'" - addTestDataIfNotExists() - } - //check azure_blob_storage only in 23.3+ (added in 22.1) - if compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "23.3") >= 0 && remoteStorageType == "AZBLOB" { - testDataWithStoragePolicy.Name = "test_azure" - testDataWithStoragePolicy.Schema = "(id UInt64) Engine=MergeTree ORDER BY id SETTINGS storage_policy = 'azure_only'" - addTestDataIfNotExists() - } +func TestRestoreDatabaseMapping(t *testing.T) { + //t.Parallel() + r := require.New(t) + ch := &TestClickHouse{} + ch.connectWithWait(r, 500*time.Millisecond, 1*time.Second) + defer ch.chbackend.Close() + checkRecordset := func(expectedRows int, expectedCount uint64, query string) { + result := make([]struct { + Count uint64 `ch:"count()"` + }, 0) + r.NoError(ch.chbackend.Select(&result, query)) + r.Equal(expectedRows, len(result), "expect %d row", expectedRows) + r.Equal(expectedCount, result[0].Count, "expect count=%d", expectedCount) } -} -func generateIncrementTestData(ch *TestClickHouse, r *require.Assertions) { - log.Info("Generate increment test data") - incrementData = defaultIncrementData - for _, data := range incrementData { - if isTableSkip(ch, data, false) { - continue - } - r.NoError(ch.createTestData(data)) + testBackupName := "test_restore_database_mapping" + databaseList := []string{"database1", "database2"} + fullCleanup(t, r, ch, []string{testBackupName}, []string{"local"}, databaseList, false, false, "config-database-mapping.yml") + + ch.queryWithNoError(r, "CREATE DATABASE database1") + ch.queryWithNoError(r, "CREATE TABLE database1.t1 (dt DateTime, v UInt64) ENGINE=ReplicatedMergeTree('/clickhouse/tables/database1/t1','{replica}') PARTITION BY toYYYYMM(dt) ORDER BY dt") + ch.queryWithNoError(r, "CREATE TABLE database1.d1 AS database1.t1 ENGINE=Distributed('{cluster}',database1, t1)") + if compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "22.3") < 0 { + ch.queryWithNoError(r, "CREATE TABLE database1.t2 AS database1.t1 ENGINE=ReplicatedMergeTree('/clickhouse/tables/database1/t2','{replica}') PARTITION BY toYYYYMM(dt) ORDER BY dt") + } else { + ch.queryWithNoError(r, "CREATE TABLE database1.t2 AS database1.t1 ENGINE=ReplicatedMergeTree('/clickhouse/tables/{database}/{table}','{replica}') PARTITION BY toYYYYMM(dt) ORDER BY dt") } + ch.queryWithNoError(r, "CREATE MATERIALIZED VIEW database1.mv1 TO database1.t2 AS SELECT * FROM database1.t1") + ch.queryWithNoError(r, "CREATE VIEW database1.v1 AS SELECT * FROM database1.t1") + ch.queryWithNoError(r, "INSERT INTO database1.t1 SELECT '2022-01-01 00:00:00', number FROM numbers(10)") + + log.Info("Create backup") + r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-database-mapping.yml", "create", testBackupName)) + + log.Info("Restore schema") + r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-database-mapping.yml", "restore", "--schema", "--rm", "--restore-database-mapping", "database1:database2", "--tables", "database1.*", testBackupName)) + + log.Info("Check result database1") + ch.queryWithNoError(r, "INSERT INTO database1.t1 SELECT '2023-01-01 00:00:00', number FROM numbers(10)") + checkRecordset(1, 20, "SELECT count() FROM database1.t1") + checkRecordset(1, 20, "SELECT count() FROM database1.d1") + checkRecordset(1, 20, "SELECT count() FROM database1.mv1") + checkRecordset(1, 20, "SELECT count() FROM database1.v1") + + log.Info("Drop database1") + r.NoError(ch.dropDatabase("database1")) + + log.Info("Restore data") + r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-database-mapping.yml", "restore", "--data", "--restore-database-mapping", "database1:database2", "--tables", "database1.*", testBackupName)) + + log.Info("Check result database2") + checkRecordset(1, 10, "SELECT count() FROM database2.t1") + checkRecordset(1, 10, "SELECT count() FROM database2.d1") + checkRecordset(1, 10, "SELECT count() FROM database2.mv1") + checkRecordset(1, 10, "SELECT count() FROM database2.v1") + + log.Info("Check database1 not exists") + checkRecordset(1, 0, "SELECT count() FROM system.databases WHERE name='database1'") + + fullCleanup(t, r, ch, []string{testBackupName}, []string{"local"}, databaseList, true, true, "config-database-mapping.yml") } -func dropDatabasesFromTestDataDataSet(r *require.Assertions, ch *TestClickHouse, databaseList []string) { - log.Info("Drop all databases") - for _, db := range databaseList { - r.NoError(ch.dropDatabase(db)) +func TestMySQLMaterialized(t *testing.T) { + t.Skipf("Wait when fix DROP TABLE not supported by MaterializedMySQL, just attach will not help") + if compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "22.12") == -1 { + t.Skipf("MaterializedMySQL doens't support for clickhouse version %s", os.Getenv("CLICKHOUSE_VERSION")) + } + //t.Parallel() + r := require.New(t) + r.NoError(dockerExec("mysql", "mysql", "-u", "root", "--password=root", "-v", "-e", "CREATE DATABASE ch_mysql_repl")) + ch := &TestClickHouse{} + ch.connectWithWait(r, 500*time.Millisecond, 1*time.Second) + defer ch.chbackend.Close() + engine := "MaterializedMySQL" + if compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "21.9") == -1 { + engine = "MaterializeMySQL" } + ch.queryWithNoError(r, fmt.Sprintf("CREATE DATABASE ch_mysql_repl ENGINE=%s('mysql:3306','ch_mysql_repl','root','root')", engine)) + r.NoError(dockerExec("mysql", "mysql", "-u", "root", "--password=root", "-v", "-e", "CREATE TABLE ch_mysql_repl.t1 (id BIGINT UNSIGNED NOT NULL AUTO_INCREMENT PRIMARY KEY, s VARCHAR(255)); INSERT INTO ch_mysql_repl.t1(s) VALUES('s1'),('s2'),('s3')")) + time.Sleep(1 * time.Second) + + r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "create", "test_mysql_materialized")) + ch.queryWithNoError(r, "DROP DATABASE ch_mysql_repl") + r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "restore", "test_mysql_materialized")) + + result := 0 + r.NoError(ch.chbackend.SelectSingleRowNoCtx(&result, "SELECT count() FROM ch_mysql_repl.t1")) + r.Equal(3, result, "expect count=3") + + ch.queryWithNoError(r, "DROP DATABASE ch_mysql_repl") + r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "delete", "local", "test_mysql_materialized")) } -const apiBackupNumber = 5 +func TestPostgreSQLMaterialized(t *testing.T) { + t.Skipf("Wait when fix https://github.com/ClickHouse/ClickHouse/issues/44250") -func TestServerAPI(t *testing.T) { - ch := &TestClickHouse{} - r := require.New(t) - ch.connectWithWait(r, 0*time.Second, 10*time.Second) - defer func() { - ch.chbackend.Close() - }() - r.NoError(dockerCP("config-s3.yml", "clickhouse-backup:/etc/clickhouse-backup/config.yml")) - fieldTypes := []string{"UInt64", "String", "Int"} - installDebIfNotExists(r, "clickhouse-backup", "curl") - maxTables := 10 - minFields := 10 - randFields := 10 - fillDatabaseForAPIServer(maxTables, minFields, randFields, ch, r, fieldTypes) + if compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "22.11") == -1 { + t.Skipf("MaterializedPostgreSQL doens't support for clickhouse version %s", os.Getenv("CLICKHOUSE_VERSION")) + } + //t.Parallel() + r := require.New(t) + r.NoError(dockerExec("pgsql", "bash", "-ce", "echo 'CREATE DATABASE ch_pgsql_repl' | PGPASSWORD=root psql -v ON_ERROR_STOP=1 -U root")) + r.NoError(dockerExec("pgsql", "bash", "-ce", "echo \"CREATE TABLE t1 (id BIGINT PRIMARY KEY, s VARCHAR(255)); INSERT INTO t1(id, s) VALUES(1,'s1'),(2,'s2'),(3,'s3')\" | PGPASSWORD=root psql -v ON_ERROR_STOP=1 -U root -d ch_pgsql_repl")) + ch := &TestClickHouse{} + ch.connectWithWait(r, 500*time.Millisecond, 1*time.Second) + defer ch.chbackend.Close() - log.Info("Run `clickhouse-backup server --watch` in background") - r.NoError(dockerExec("-d", "clickhouse-backup", "bash", "-ce", "clickhouse-backup server --watch &>>/tmp/clickhouse-backup-server.log")) + ch.queryWithNoError(r, + "CREATE DATABASE ch_pgsql_repl ENGINE=MaterializedPostgreSQL('pgsql:5432','ch_pgsql_repl','root','root') "+ + "SETTINGS materialized_postgresql_allow_automatic_update = 1, materialized_postgresql_schema = 'public'", + ) time.Sleep(1 * time.Second) - testAPIBackupCreate(r) - - testAPIBackupTables(r) + r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "create", "test_pgsql_materialized")) + ch.queryWithNoError(r, "DROP DATABASE ch_pgsql_repl") + r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "restore", "test_pgsql_materialized")) - log.Info("Check /backup/actions") - ch.queryWithNoError(r, "SELECT count() FROM system.backup_actions") + result := 0 + r.NoError(ch.chbackend.SelectSingleRowNoCtx(&result, "SELECT count() FROM ch_pgsql_repl.t1")) + r.Equal(3, result, "expect count=3") - testAPIBackupUpload(r) + ch.queryWithNoError(r, "DROP DATABASE ch_pgsql_repl") + r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "delete", "local", "test_pgsql_materialized")) +} - testAPIBackupList(t, r) +func uploadSSHKeys(r *require.Assertions, container string) { + r.NoError(dockerCP("sftp/clickhouse-backup_rsa", container+":/id_rsa")) + r.NoError(dockerExec(container, "cp", "-vf", "/id_rsa", "/tmp/id_rsa")) + r.NoError(dockerExec(container, "chmod", "-v", "0600", "/tmp/id_rsa")) - testAPIDeleteLocalDownloadRestore(r) + r.NoError(dockerCP("sftp/clickhouse-backup_rsa.pub", "sshd:/root/.ssh/authorized_keys")) + r.NoError(dockerExec("sshd", "chown", "-v", "root:root", "/root/.ssh/authorized_keys")) + r.NoError(dockerExec("sshd", "chmod", "-v", "0600", "/root/.ssh/authorized_keys")) +} - testAPIMetrics(r, ch) +func runMainIntegrationScenario(t *testing.T, remoteStorageType, backupConfig string) { + var out string + var err error - testAPIWatchAndKill(r, ch) + r := require.New(t) + ch := &TestClickHouse{} + ch.connectWithWait(r, 500*time.Millisecond, 1*time.Minute) + defer ch.chbackend.Close() - testAPIBackupActions(r, ch) + // test for specified partitions backup + testBackupSpecifiedPartitions(t, r, ch, remoteStorageType, backupConfig) - testAPIRestart(r, ch) + // main test scenario + testBackupName := fmt.Sprintf("%s_full_%d", t.Name(), rand.Int()) + incrementBackupName := fmt.Sprintf("%s_increment_%d", t.Name(), rand.Int()) + databaseList := []string{dbNameOrdinary, dbNameAtomic, dbNameMySQL, dbNamePostgreSQL, Issue331Atomic, Issue331Ordinary} + tablesPattern := fmt.Sprintf("*_%s.*", t.Name()) + log.Info("Clean before start") + fullCleanup(t, r, ch, []string{testBackupName, incrementBackupName}, []string{"remote", "local"}, databaseList, false, false, backupConfig) - testAPIBackupDelete(r) + r.NoError(dockerExec("minio", "mc", "ls", "local/clickhouse/disk_s3")) + testData := generateTestData(t, r, ch, remoteStorageType, defaultTestData) - r.NoError(dockerExec("clickhouse-backup", "pkill", "-n", "-f", "clickhouse-backup")) - r.NoError(ch.dropDatabase("long_schema")) -} + r.NoError(dockerExec("minio", "mc", "ls", "local/clickhouse/disk_s3")) + log.Info("Create backup") + r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "create", "--tables", tablesPattern, testBackupName)) + generateIncrementTestData(t, ch, r, defaultIncrementData) -func testAPIRestart(r *require.Assertions, ch *TestClickHouse) { - out, err := dockerExecOut("clickhouse-backup", "bash", "-ce", "curl -sfL -XPOST 'http://localhost:7171/restart'") - log.Debug(out) - r.NoError(err) - r.Contains(out, "acknowledged") + r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "create", "--tables", tablesPattern, incrementBackupName)) - //some actions need time for restart - time.Sleep(6 * time.Second) + log.Info("Upload") + uploadCmd := fmt.Sprintf("%s_COMPRESSION_FORMAT=zstd CLICKHOUSE_BACKUP_CONFIG=/etc/clickhouse-backup/%s clickhouse-backup upload --resume %s", remoteStorageType, backupConfig, testBackupName) + checkResumeAlreadyProcessed(uploadCmd, testBackupName, "upload", r, remoteStorageType) - var inProgressActions uint64 - r.NoError(ch.chbackend.SelectSingleRowNoCtx(&inProgressActions, "SELECT count() FROM system.backup_actions WHERE status!=?", status.CancelStatus)) - r.Equal(uint64(0), inProgressActions) -} + //diffFrom := []string{"--diff-from", "--diff-from-remote"}[rand.Intn(2)] + diffFrom := "--diff-from-remote" + uploadCmd = fmt.Sprintf("clickhouse-backup -c /etc/clickhouse-backup/%s upload %s %s %s --resume", backupConfig, incrementBackupName, diffFrom, testBackupName) + checkResumeAlreadyProcessed(uploadCmd, incrementBackupName, "upload", r, remoteStorageType) -func runClickHouseClientInsertSystemBackupActions(r *require.Assertions, ch *TestClickHouse, commands []string, needWait bool) { - sql := "INSERT INTO system.backup_actions(command) " + "VALUES ('" + strings.Join(commands, "'),('") + "')" - out, err := dockerExecOut("clickhouse", "bash", "-ce", fmt.Sprintf("clickhouse client --echo -mn -q \"%s\"", sql)) - log.Debug(out) - r.NoError(err) - if needWait { - for _, command := range commands { - for { - time.Sleep(500 * time.Millisecond) - var commandStatus string - r.NoError(ch.chbackend.SelectSingleRowNoCtx(&commandStatus, "SELECT status FROM system.backup_actions WHERE command=?", command)) - if commandStatus != status.InProgressStatus { - break - } - } - } + backupDir := "/var/lib/clickhouse/backup" + if strings.HasPrefix(remoteStorageType, "EMBEDDED") { + backupDir = "/var/lib/clickhouse/disks/backups" + strings.ToLower(strings.TrimPrefix(remoteStorageType, "EMBEDDED")) } -} -func testAPIBackupActions(r *require.Assertions, ch *TestClickHouse) { - runClickHouseClientInsertSystemBackupActions(r, ch, []string{"create_remote actions_backup1"}, true) - runClickHouseClientInsertSystemBackupActions(r, ch, []string{"delete local actions_backup1", "restore_remote --rm actions_backup1"}, true) - runClickHouseClientInsertSystemBackupActions(r, ch, []string{"delete local actions_backup1", "delete remote actions_backup1"}, false) + out, err = dockerExecOut("clickhouse-backup", "bash", "-ce", "ls -lha "+backupDir+" | grep "+t.Name()) + r.NoError(err) + r.Equal(2, len(strings.Split(strings.Trim(out, " \t\r\n"), "\n")), "expect '2' backups exists in backup directory") + log.Info("Delete backup") + r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "delete", "local", testBackupName)) + r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "delete", "local", incrementBackupName)) + out, err = dockerExecOut("clickhouse-backup", "bash", "-ce", "ls -lha "+backupDir+" | grep "+t.Name()) + r.NotNil(err) + r.Equal("", strings.Trim(out, " \t\r\n"), "expect '0' backup exists in backup directory") - runClickHouseClientInsertSystemBackupActions(r, ch, []string{"create actions_backup2"}, true) - runClickHouseClientInsertSystemBackupActions(r, ch, []string{"upload actions_backup2"}, true) - runClickHouseClientInsertSystemBackupActions(r, ch, []string{"delete local actions_backup2"}, false) - runClickHouseClientInsertSystemBackupActions(r, ch, []string{"download actions_backup2"}, true) - runClickHouseClientInsertSystemBackupActions(r, ch, []string{"restore --rm actions_backup2"}, true) - runClickHouseClientInsertSystemBackupActions(r, ch, []string{"delete local actions_backup2", "delete remote actions_backup2"}, false) + dropDatabasesFromTestDataDataSet(t, r, ch, databaseList) - inProgressActions := make([]struct { - Command string `ch:"command"` - Status string `ch:"status"` - }, 0) - r.NoError(ch.chbackend.StructSelect(&inProgressActions, "SELECT command, status FROM system.backup_actions WHERE command LIKE '%actions%' AND status IN (?,?)", status.InProgressStatus, status.ErrorStatus)) - r.Equal(0, len(inProgressActions), "inProgressActions=%+v", inProgressActions) + log.Info("Download") + downloadCmd := fmt.Sprintf("clickhouse-backup -c /etc/clickhouse-backup/%s download --resume %s", backupConfig, testBackupName) + checkResumeAlreadyProcessed(downloadCmd, testBackupName, "download", r, remoteStorageType) - var actionsBackups uint64 - r.NoError(ch.chbackend.SelectSingleRowNoCtx(&actionsBackups, "SELECT count() FROM system.backup_list WHERE name LIKE 'backup_action%'")) - r.Equal(uint64(0), actionsBackups) + log.Info("Restore schema") + r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "restore", "--schema", testBackupName)) - out, err := dockerExecOut("clickhouse-backup", "curl", "http://localhost:7171/metrics") - r.NoError(err) - r.Contains(out, "clickhouse_backup_last_create_remote_status 1") - r.Contains(out, "clickhouse_backup_last_create_status 1") - r.Contains(out, "clickhouse_backup_last_upload_status 1") - r.Contains(out, "clickhouse_backup_last_delete_status 1") - r.Contains(out, "clickhouse_backup_last_download_status 1") - r.Contains(out, "clickhouse_backup_last_restore_status 1") -} + log.Info("Restore data") + r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "restore", "--data", testBackupName)) -func testAPIWatchAndKill(r *require.Assertions, ch *TestClickHouse) { - log.Info("Check /backup/watch + /backup/kill") - runKillCommand := func(command string) { - out, err := dockerExecOut("clickhouse-backup", "bash", "-ce", fmt.Sprintf("curl -sfL 'http://localhost:7171/backup/kill?command=%s'", command)) - log.Debug(out) - r.NoError(err) - } - checkWatchBackup := func(expectedCount uint64) { - var watchBackups uint64 - r.NoError(ch.chbackend.SelectSingleRowNoCtx(&watchBackups, "SELECT count() FROM system.backup_list WHERE name LIKE 'shard%'")) - r.Equal(expectedCount, watchBackups) - } + log.Info("Full restore with rm") + r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "restore", "--rm", testBackupName)) - checkCanceledCommand := func(expectedCount int) { - canceledCommands := make([]struct { - Status string `ch:"status"` - Command string `ch:"command"` - }, 0) - r.NoError(ch.chbackend.StructSelect(&canceledCommands, "SELECT status, command FROM system.backup_actions WHERE command LIKE 'watch%'")) - r.Equal(expectedCount, len(canceledCommands)) - for i := range canceledCommands { - r.Equal("watch", canceledCommands[i].Command) - r.Equal(status.CancelStatus, canceledCommands[i].Status) + log.Info("Check data") + for i := range testData { + if testData[i].CheckDatabaseOnly { + r.NoError(ch.checkDatabaseEngine(t, testData[i])) + } else { + if isTableSkip(ch, testData[i], true) { + continue + } + r.NoError(ch.checkData(t, testData[i], r)) } } + // test increment + dropDatabasesFromTestDataDataSet(t, r, ch, databaseList) - checkWatchBackup(1) - runKillCommand("watch") - checkCanceledCommand(1) + log.Info("Delete backup") + r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "delete", "local", testBackupName)) - out, err := dockerExecOut("clickhouse-backup", "bash", "-ce", "curl -sfL 'http://localhost:7171/backup/watch'") - log.Debug(out) - r.NoError(err) - time.Sleep(7 * time.Second) + log.Info("Download increment") + downloadCmd = fmt.Sprintf("clickhouse-backup -c /etc/clickhouse-backup/%s download --resume %s", backupConfig, incrementBackupName) + checkResumeAlreadyProcessed(downloadCmd, incrementBackupName, "download", r, remoteStorageType) - checkWatchBackup(2) - runKillCommand("watch") - checkCanceledCommand(2) -} + log.Info("Restore") + r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "restore", "--schema", "--data", incrementBackupName)) -func testAPIBackupDelete(r *require.Assertions) { - log.Info("Check /backup/delete/{where}/{name}") - for i := 1; i <= apiBackupNumber; i++ { - out, err := dockerExecOut("clickhouse-backup", "bash", "-ce", fmt.Sprintf("curl -sfL -XPOST 'http://localhost:7171/backup/delete/local/z_backup_%d'", i)) - log.Infof(out) - r.NoError(err) - r.NotContains(out, "another operation is currently running") - r.NotContains(out, "\"status\":\"error\"") - out, err = dockerExecOut("clickhouse-backup", "bash", "-ce", fmt.Sprintf("curl -sfL -XPOST 'http://localhost:7171/backup/delete/remote/z_backup_%d'", i)) - log.Infof(out) - r.NoError(err) - r.NotContains(out, "another operation is currently running") - r.NotContains(out, "\"status\":\"error\"") - } - out, err := dockerExecOut("clickhouse-backup", "curl", "http://localhost:7171/metrics") - r.NoError(err) - r.Contains(out, "clickhouse_backup_last_delete_status 1") -} + log.Info("Check increment data") + for i := range testData { + testDataItem := testData[i] + if isTableSkip(ch, testDataItem, true) || testDataItem.IsDictionary { + continue + } + for _, incrementDataItem := range defaultIncrementData { + if testDataItem.Database == incrementDataItem.Database && testDataItem.Name == incrementDataItem.Name { + testDataItem.Rows = append(testDataItem.Rows, incrementDataItem.Rows...) + } + } + if testDataItem.CheckDatabaseOnly { + r.NoError(ch.checkDatabaseEngine(t, testDataItem)) + } else { + r.NoError(ch.checkData(t, testDataItem, r)) + } -func testAPIMetrics(r *require.Assertions, ch *TestClickHouse) { - log.Info("Check /metrics clickhouse_backup_last_backup_size_remote") - var lastRemoteSize int64 - r.NoError(ch.chbackend.SelectSingleRowNoCtx(&lastRemoteSize, "SELECT size FROM system.backup_list WHERE name='z_backup_5' AND location='remote'")) + } - var realTotalBytes uint64 - if compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "20.8") >= 0 { - r.NoError(ch.chbackend.SelectSingleRowNoCtx(&realTotalBytes, "SELECT sum(total_bytes) FROM system.tables WHERE database='long_schema'")) + // test end + log.Info("Clean after finish") + // CUSTOM and EMBEDDED download increment doesn't download full + if remoteStorageType == "CUSTOM" || strings.HasPrefix(remoteStorageType, "EMBEDDED") { + fullCleanup(t, r, ch, []string{incrementBackupName}, []string{"local"}, nil, true, false, backupConfig) + fullCleanup(t, r, ch, []string{testBackupName, incrementBackupName}, []string{"remote"}, databaseList, true, true, backupConfig) } else { - r.NoError(ch.chbackend.SelectSingleRowNoCtx(&realTotalBytes, "SELECT sum(bytes_on_disk) FROM system.parts WHERE database='long_schema'")) + fullCleanup(t, r, ch, []string{testBackupName, incrementBackupName}, []string{"remote", "local"}, databaseList, true, true, backupConfig) } - r.Greater(realTotalBytes, uint64(0)) - r.Greater(uint64(lastRemoteSize), realTotalBytes) +} - out, err := dockerExecOut("clickhouse-backup", "curl", "-sL", "http://localhost:7171/metrics") - log.Debug(out) - r.NoError(err) - r.Contains(out, fmt.Sprintf("clickhouse_backup_last_backup_size_remote %d", lastRemoteSize)) +func testBackupSpecifiedPartitions(t *testing.T, r *require.Assertions, ch *TestClickHouse, remoteStorageType string, backupConfig string) { + log.Info("testBackupSpecifiedPartitions started") + var err error + var out string + var result, expectedCount uint64 - log.Info("Check /metrics clickhouse_backup_number_backups_*") - r.Contains(out, fmt.Sprintf("clickhouse_backup_number_backups_local %d", apiBackupNumber)) - // +1 watch backup - r.Contains(out, fmt.Sprintf("clickhouse_backup_number_backups_remote %d", apiBackupNumber+1)) - r.Contains(out, "clickhouse_backup_number_backups_local_expected 0") - r.Contains(out, "clickhouse_backup_number_backups_remote_expected 0") -} + partitionBackupName := fmt.Sprintf("partition_backup_%d", rand.Int()) + fullBackupName := fmt.Sprintf("full_backup_%d", rand.Int()) + dbName := "test_partitions_" + t.Name() + // Create and fill tables + ch.queryWithNoError(r, "CREATE DATABASE IF NOT EXISTS "+dbName) + ch.queryWithNoError(r, "DROP TABLE IF EXISTS "+dbName+".t1") + ch.queryWithNoError(r, "DROP TABLE IF EXISTS "+dbName+".t2") + ch.queryWithNoError(r, "CREATE TABLE "+dbName+".t1 (dt Date, v UInt64) ENGINE=MergeTree() PARTITION BY toYYYYMMDD(dt) ORDER BY dt") + ch.queryWithNoError(r, "CREATE TABLE "+dbName+".t2 (dt String, v UInt64) ENGINE=MergeTree() PARTITION BY dt ORDER BY dt") + for _, dt := range []string{"2022-01-01", "2022-01-02", "2022-01-03", "2022-01-04"} { + ch.queryWithNoError(r, fmt.Sprintf("INSERT INTO "+dbName+".t1 SELECT '%s', number FROM numbers(10)", dt)) + ch.queryWithNoError(r, fmt.Sprintf("INSERT INTO "+dbName+".t2 SELECT '%s', number FROM numbers(10)", dt)) + } -func testAPIDeleteLocalDownloadRestore(r *require.Assertions) { - log.Info("Check /backup/delete/local/{name} + /backup/download/{name} + /backup/restore/{name}?rm=1") - out, err := dockerExecOut( - "clickhouse-backup", - "bash", "-xe", "-c", - fmt.Sprintf("for i in {1..%d}; do date; curl -sfL -XPOST \"http://localhost:7171/backup/delete/local/z_backup_$i\"; curl -sfL -XPOST \"http://localhost:7171/backup/download/z_backup_$i\"; sleep 2; curl -sfL -XPOST \"http://localhost:7171/backup/restore/z_backup_$i?rm=1\"; sleep 8; done", apiBackupNumber), - ) - log.Debug(out) + // check create_remote full > download + partitions > delete local > download > restore --partitions > restore + r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "create_remote", "--tables="+dbName+".t*", fullBackupName)) + r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "delete", "local", fullBackupName)) + r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "download", "--partitions=('2022-01-02'),('2022-01-03')", fullBackupName)) + fullBackupDir := "/var/lib/clickhouse/backup/" + fullBackupName + "/shadow/" + dbName + "/t?/default/" + if strings.HasPrefix(remoteStorageType, "EMBEDDED") { + fullBackupDir = "/var/lib/clickhouse/disks/backups" + strings.ToLower(strings.TrimPrefix(remoteStorageType, "EMBEDDED")) + "/" + fullBackupName + "/data/" + dbName + "/t?" + } + out, err = dockerExecOut("clickhouse-backup", "bash", "-c", "ls -la "+fullBackupDir+" | wc -l") + r.NoError(err) + expectedLines := "13" + // custom storage doesn't support --partitions for upload / download now + // embedded storage contain hardLink files and will download additional data parts + if remoteStorageType == "CUSTOM" || strings.HasPrefix(remoteStorageType, "EMBEDDED") { + expectedLines = "17" + } + r.Equal(expectedLines, strings.Trim(out, "\r\n\t ")) + r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "delete", "local", fullBackupName)) + r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "download", fullBackupName)) + + fullBackupDir = "/var/lib/clickhouse/backup/" + fullBackupName + "/shadow/" + dbName + "/t?/default/" + if strings.HasPrefix(remoteStorageType, "EMBEDDED") { + fullBackupDir = "/var/lib/clickhouse/disks/backups" + strings.ToLower(strings.TrimPrefix(remoteStorageType, "EMBEDDED")) + "/" + fullBackupName + "/data/" + dbName + "/t?" + } + out, err = dockerExecOut("clickhouse-backup", "bash", "-c", "ls -la "+fullBackupDir+"| wc -l") r.NoError(err) - r.NotContains(out, "another operation is currently running") - r.NotContains(out, "\"status\":\"error\"") + r.Equal("17", strings.Trim(out, "\r\n\t ")) + r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "restore", "--partitions=('2022-01-02'),('2022-01-03')", fullBackupName)) + result = 0 + r.NoError(ch.chbackend.SelectSingleRowNoCtx(&result, "SELECT sum(c) FROM (SELECT count() AS c FROM "+dbName+".t1 UNION ALL SELECT count() AS c FROM "+dbName+".t2)")) + expectedCount = 40 + r.Equal(expectedCount, result, fmt.Sprintf("expect count=%d", expectedCount)) + r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "restore", fullBackupName)) + result = 0 + r.NoError(ch.chbackend.SelectSingleRowNoCtx(&result, "SELECT sum(c) FROM (SELECT count() AS c FROM "+dbName+".t1 UNION ALL SELECT count() AS c FROM "+dbName+".t2)")) + r.Equal(uint64(80), result, "expect count=80") + r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "delete", "remote", fullBackupName)) + r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "delete", "local", fullBackupName)) - out, err = dockerExecOut("clickhouse-backup", "curl", "http://localhost:7171/metrics") + // check create + partitions + r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "create", "--tables="+dbName+".t1", "--partitions=20220102,20220103", partitionBackupName)) + partitionBackupDir := "/var/lib/clickhouse/backup/" + partitionBackupName + "/shadow/" + dbName + "/t1/default/" + if strings.HasPrefix(remoteStorageType, "EMBEDDED") { + partitionBackupDir = "/var/lib/clickhouse/disks/backups" + strings.ToLower(strings.TrimPrefix(remoteStorageType, "EMBEDDED")) + "/" + partitionBackupName + "/data/" + dbName + "/t1" + } + out, err = dockerExecOut("clickhouse-backup", "bash", "-c", "ls -la "+partitionBackupDir+"| wc -l") r.NoError(err) - r.Contains(out, "clickhouse_backup_last_delete_status 1") - r.Contains(out, "clickhouse_backup_last_download_status 1") - r.Contains(out, "clickhouse_backup_last_restore_status 1") -} + r.Equal("5", strings.Trim(out, "\r\n\t ")) + r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "delete", "local", partitionBackupName)) -func testAPIBackupList(t *testing.T, r *require.Assertions) { - log.Info("Check /backup/list") - out, err := dockerExecOut("clickhouse-backup", "bash", "-ce", "curl -sfL 'http://localhost:7171/backup/list'") - log.Debug(out) + // check create > upload + partitions + r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "create", "--tables="+dbName+".t1", partitionBackupName)) + partitionBackupDir = "/var/lib/clickhouse/backup/" + partitionBackupName + "/shadow/" + dbName + "/t1/default/" + if strings.HasPrefix(remoteStorageType, "EMBEDDED") { + partitionBackupDir = "/var/lib/clickhouse/disks/backups" + strings.ToLower(strings.TrimPrefix(remoteStorageType, "EMBEDDED")) + "/" + partitionBackupName + "/data/" + dbName + "/t1" + } + out, err = dockerExecOut("clickhouse-backup", "bash", "-c", "ls -la "+partitionBackupDir+" | wc -l") r.NoError(err) - for i := 1; i <= apiBackupNumber; i++ { - r.True(assert.Regexp(t, regexp.MustCompile(fmt.Sprintf("{\"name\":\"z_backup_%d\",\"created\":\"\\d{4}-\\d{2}-\\d{2} \\d{2}:\\d{2}:\\d{2}\",\"size\":\\d+,\"location\":\"local\",\"required\":\"\",\"desc\":\"regular\"}", i)), out)) - r.True(assert.Regexp(t, regexp.MustCompile(fmt.Sprintf("{\"name\":\"z_backup_%d\",\"created\":\"\\d{4}-\\d{2}-\\d{2} \\d{2}:\\d{2}:\\d{2}\",\"size\":\\d+,\"location\":\"remote\",\"required\":\"\",\"desc\":\"tar, regular\"}", i)), out)) + r.Equal("7", strings.Trim(out, "\r\n\t ")) + r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "upload", "--tables="+dbName+".t1", "--partitions=20220102,20220103", partitionBackupName)) + r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "delete", "local", partitionBackupName)) + + // restore partial uploaded + r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "restore_remote", partitionBackupName)) + + // Check partial restored t1 + result = 0 + r.NoError(ch.chbackend.SelectSingleRowNoCtx(&result, "SELECT count() FROM "+dbName+".t1")) + + expectedCount = 20 + // custom and embedded doesn't support --partitions in upload and download + if remoteStorageType == "CUSTOM" || strings.HasPrefix(remoteStorageType, "EMBEDDED") { + expectedCount = 40 } + r.Equal(expectedCount, result, fmt.Sprintf("expect count=%d", expectedCount)) - log.Info("Check /backup/list/local") - out, err = dockerExecOut("clickhouse-backup", "bash", "-ce", "curl -sfL 'http://localhost:7171/backup/list/local'") - log.Debug(out) - r.NoError(err) - for i := 1; i <= apiBackupNumber; i++ { - r.True(assert.Regexp(t, regexp.MustCompile(fmt.Sprintf("{\"name\":\"z_backup_%d\",\"created\":\"\\d{4}-\\d{2}-\\d{2} \\d{2}:\\d{2}:\\d{2}\",\"size\":\\d+,\"location\":\"local\",\"required\":\"\",\"desc\":\"regular\"}", i)), out)) - r.True(assert.NotRegexp(t, regexp.MustCompile(fmt.Sprintf("{\"name\":\"z_backup_%d\",\"created\":\"\\d{4}-\\d{2}-\\d{2} \\d{2}:\\d{2}:\\d{2}\",\"size\":\\d+,\"location\":\"remote\",\"required\":\"\",\"desc\":\"tar, regular\"}", i)), out)) + // Check only selected partitions restored + result = 0 + r.NoError(ch.chbackend.SelectSingleRowNoCtx(&result, "SELECT count() FROM "+dbName+".t1 WHERE dt NOT IN ('2022-01-02','2022-01-03')")) + expectedCount = 0 + // custom and embedded doesn't support --partitions in upload and download + if remoteStorageType == "CUSTOM" || strings.HasPrefix(remoteStorageType, "EMBEDDED") { + expectedCount = 20 } + r.Equal(expectedCount, result, "expect count=0") - log.Info("Check /backup/list/remote") - out, err = dockerExecOut("clickhouse-backup", "bash", "-ce", "curl -sfL 'http://localhost:7171/backup/list/remote'") - log.Debug(out) - r.NoError(err) - for i := 1; i <= apiBackupNumber; i++ { - r.True(assert.NotRegexp(t, regexp.MustCompile(fmt.Sprintf("{\"name\":\"z_backup_%d\",\"created\":\"\\d{4}-\\d{2}-\\d{2} \\d{2}:\\d{2}:\\d{2}\",\"size\":\\d+,\"location\":\"local\",\"required\":\"\",\"desc\":\"regular\"}", i)), out)) - r.True(assert.Regexp(t, regexp.MustCompile(fmt.Sprintf("{\"name\":\"z_backup_%d\",\"created\":\"\\d{4}-\\d{2}-\\d{2} \\d{2}:\\d{2}:\\d{2}\",\"size\":\\d+,\"location\":\"remote\",\"required\":\"\",\"desc\":\"tar, regular\"}", i)), out)) + // DELETE backup. + r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "delete", "remote", partitionBackupName)) + r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "delete", "local", partitionBackupName)) + + if err = ch.dropDatabase(dbName); err != nil { + t.Fatal(err) } + log.Info("testBackupSpecifiedPartitions finish") } -func testAPIBackupUpload(r *require.Assertions) { - log.Info("Check /backup/upload") - out, err := dockerExecOut( - "clickhouse-backup", - "bash", "-xe", "-c", - fmt.Sprintf("for i in {1..%d}; do date; curl -sfL -XPOST \"http://localhost:7171/backup/upload/z_backup_$i\"; sleep 2; done", apiBackupNumber), - ) - log.Debug(out) - r.NoError(err) - r.NotContains(out, "\"status\":\"error\"") - r.NotContains(out, "another operation is currently running") - out, err = dockerExecOut("clickhouse-backup", "curl", "http://localhost:7171/metrics") +func checkResumeAlreadyProcessed(backupCmd, testBackupName, resumeKind string, r *require.Assertions, remoteStorageType string) { + // backupCmd = fmt.Sprintf("%s & PID=$!; sleep 0.7; kill -9 $PID; cat /var/lib/clickhouse/backup/%s/upload.state; sleep 0.3; %s", backupCmd, testBackupName, backupCmd) + if remoteStorageType == "CUSTOM" || strings.HasPrefix(remoteStorageType, "EMBEDDED") { + backupCmd = strings.Replace(backupCmd, "--resume", "", 1) + } else { + backupCmd = fmt.Sprintf("%s; cat /var/lib/clickhouse/backup/%s/%s.state; %s", backupCmd, testBackupName, resumeKind, backupCmd) + } + out, err := dockerExecOut("clickhouse-backup", "bash", "-xce", backupCmd) + log.Info(out) r.NoError(err) - r.Contains(out, "clickhouse_backup_last_upload_status 1") + if strings.Contains(backupCmd, "--resume") { + r.Contains(out, "already processed") + } } -func testAPIBackupTables(r *require.Assertions) { - log.Info("Check /backup/tables") - out, err := dockerExecOut( - "clickhouse-backup", - "bash", "-xe", "-c", "curl -sfL \"http://localhost:7171/backup/tables\"", - ) - log.Debug(out) - r.NoError(err) - r.Contains(out, "long_schema") - r.NotContains(out, "Connection refused") - r.NotContains(out, "another operation is currently running") - r.NotContains(out, "\"status\":\"error\"") - r.NotContains(out, "system") - r.NotContains(out, "INFORMATION_SCHEMA") - r.NotContains(out, "information_schema") - - log.Info("Check /backup/tables/all") - out, err = dockerExecOut( - "clickhouse-backup", - "bash", "-xe", "-c", "curl -sfL \"http://localhost:7171/backup/tables/all\"", - ) - log.Debug(out) - r.NoError(err) - r.Contains(out, "long_schema") - r.Contains(out, "system") - r.NotContains(out, "Connection refused") - r.NotContains(out, "another operation is currently running") - r.NotContains(out, "\"status\":\"error\"") - if compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "22.3") >= 0 { - r.Contains(out, "INFORMATION_SCHEMA") - r.Contains(out, "information_schema") +func fullCleanup(t *testing.T, r *require.Assertions, ch *TestClickHouse, backupNames, backupTypes, databaseList []string, checkDeleteErr, checkDropErr bool, backupConfig string) { + for _, backupName := range backupNames { + for _, backupType := range backupTypes { + err := dockerExec("clickhouse-backup", "bash", "-xce", "clickhouse-backup -c /etc/clickhouse-backup/"+backupConfig+" delete "+backupType+" "+backupName) + if checkDeleteErr { + r.NoError(err) + } + } + } + otherBackupList, err := dockerExecOut("clickhouse", "ls", "-1", "/var/lib/clickhouse/backup/*"+t.Name()+"*") + if err == nil { + for _, backupName := range strings.Split(otherBackupList, "\n") { + if backupName != "" { + err := dockerExec("clickhouse-backup", "bash", "-xce", "clickhouse-backup -c /etc/clickhouse-backup/"+backupConfig+" delete local "+backupName) + if checkDropErr { + r.NoError(err) + } + } + } } + + dropDatabasesFromTestDataDataSet(t, r, ch, databaseList) } -func testAPIBackupCreate(r *require.Assertions) { - log.Info("Check /backup/create") - out, err := dockerExecOut( - "clickhouse-backup", - "bash", "-xe", "-c", - fmt.Sprintf("sleep 3; for i in {1..%d}; do date; curl -sfL -XPOST \"http://localhost:7171/backup/create?table=long_schema.*&name=z_backup_$i\"; sleep 1.5; done", apiBackupNumber), - ) - log.Debug(out) - r.NoError(err) - r.NotContains(out, "Connection refused") - r.NotContains(out, "another operation is currently running") - r.NotContains(out, "\"status\":\"error\"") - out, err = dockerExecOut("clickhouse-backup", "curl", "http://localhost:7171/metrics") - r.NoError(err) - r.Contains(out, "clickhouse_backup_last_create_status 1") +func generateTestData(t *testing.T, r *require.Assertions, ch *TestClickHouse, remoteStorageType string, testData []TestDataStruct) []TestDataStruct { + log.Infof("Generate test data %s with _%s suffix", remoteStorageType, t.Name()) + testData = generateTestDataWithDifferentStoragePolicy(remoteStorageType, testData) + for _, data := range testData { + if isTableSkip(ch, data, false) { + continue + } + r.NoError(ch.createTestSchema(t, data, remoteStorageType)) + } + for _, data := range testData { + if isTableSkip(ch, data, false) { + continue + } + r.NoError(ch.createTestData(t, data)) + } + return testData +} +func generateTestDataWithDifferentStoragePolicy(remoteStorageType string, testData []TestDataStruct) []TestDataStruct { + for databaseName, databaseEngine := range map[string]string{dbNameOrdinary: "Ordinary", dbNameAtomic: "Atomic"} { + testDataWithStoragePolicy := TestDataStruct{ + Database: databaseName, DatabaseEngine: databaseEngine, + Rows: func() []map[string]interface{} { + result := make([]map[string]interface{}, 100) + for i := 0; i < 100; i++ { + result[i] = map[string]interface{}{"id": uint64(i)} + } + return result + }(), + Fields: []string{"id"}, + OrderBy: "id", + } + addTestDataIfNotExists := func() { + found := false + for _, data := range testData { + if data.Name == testDataWithStoragePolicy.Name && data.Database == testDataWithStoragePolicy.Database { + found = true + break + } + } + if !found { + testData = append(testData, testDataWithStoragePolicy) + } + } + //s3 disks support after 21.8 + if compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "21.8") >= 0 && remoteStorageType == "S3" { + testDataWithStoragePolicy.Name = "test_s3" + testDataWithStoragePolicy.Schema = "(id UInt64) Engine=MergeTree ORDER BY id SETTINGS storage_policy = 's3_only'" + addTestDataIfNotExists() + } + //encrypted disks support after 21.10 + if compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "21.10") >= 0 { + testDataWithStoragePolicy.Name = "test_hdd3_encrypted" + testDataWithStoragePolicy.Schema = "(id UInt64) Engine=MergeTree ORDER BY id SETTINGS storage_policy = 'hdd3_only_encrypted'" + addTestDataIfNotExists() + } + //encrypted s3 disks support after 21.12 + if compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "21.12") >= 0 && remoteStorageType == "S3" { + testDataWithStoragePolicy.Name = "test_s3_encrypted" + testDataWithStoragePolicy.Schema = "(id UInt64) Engine=MergeTree ORDER BY id SETTINGS storage_policy = 's3_only_encrypted'" + addTestDataIfNotExists() + } + //gcs over s3 support added in 22.6 + if compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "22.6") >= 0 && remoteStorageType == "GCS" && os.Getenv("QA_GCS_OVER_S3_BUCKET") != "" { + testDataWithStoragePolicy.Name = "test_gcs" + testDataWithStoragePolicy.Schema = "(id UInt64) Engine=MergeTree ORDER BY id SETTINGS storage_policy = 'gcs_only'" + addTestDataIfNotExists() + } + //check azure_blob_storage only in 23.3+ (added in 22.1) + if compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "23.3") >= 0 && remoteStorageType == "AZBLOB" { + testDataWithStoragePolicy.Name = "test_azure" + testDataWithStoragePolicy.Schema = "(id UInt64) Engine=MergeTree ORDER BY id SETTINGS storage_policy = 'azure_only'" + addTestDataIfNotExists() + } + } + return testData } -func fillDatabaseForAPIServer(maxTables int, minFields int, randFields int, ch *TestClickHouse, r *require.Assertions, fieldTypes []string) { - log.Infof("Create %d `long_schema`.`t%%d` tables with with %d..%d fields...", maxTables, minFields, minFields+randFields) - ch.queryWithNoError(r, "CREATE DATABASE IF NOT EXISTS long_schema") - for i := 0; i < maxTables; i++ { - sql := fmt.Sprintf("CREATE TABLE long_schema.t%d (id UInt64", i) - fieldsCount := minFields + rand.Intn(randFields) - for j := 0; j < fieldsCount; j++ { - fieldType := fieldTypes[rand.Intn(len(fieldTypes))] - sql += fmt.Sprintf(", f%d %s", j, fieldType) +func generateIncrementTestData(t *testing.T, ch *TestClickHouse, r *require.Assertions, incrementData []TestDataStruct) { + log.Info("Generate increment test data") + for _, data := range incrementData { + if isTableSkip(ch, data, false) { + continue } - sql += ") ENGINE=MergeTree() ORDER BY id" - ch.queryWithNoError(r, sql) - sql = fmt.Sprintf("INSERT INTO long_schema.t%d(id) SELECT number FROM numbers(100)", i) - ch.queryWithNoError(r, sql) + r.NoError(ch.createTestData(t, data)) + } +} + +func dropDatabasesFromTestDataDataSet(t *testing.T, r *require.Assertions, ch *TestClickHouse, databaseList []string) { + log.Info("Drop all databases") + for _, db := range databaseList { + r.NoError(ch.dropDatabase(db + "_" + t.Name())) } - log.Info("...DONE") } +const apiBackupNumber = 5 + type TestClickHouse struct { chbackend *clickhouse.ClickHouse } @@ -2217,8 +2218,12 @@ func (ch *TestClickHouse) connect(timeOut string) error { var mergeTreeOldSyntax = regexp.MustCompile(`(?m)MergeTree\(([^,]+),([\w\s,)(]+),(\s*\d+\s*)\)`) -func (ch *TestClickHouse) createTestSchema(data TestDataStruct, remoteStorageType string) error { +func (ch *TestClickHouse) createTestSchema(t *testing.T, data TestDataStruct, remoteStorageType string) error { + origDatabase := data.Database + origName := data.Name if !data.IsFunction { + data.Database = data.Database + "_" + t.Name() + data.Name = data.Name + "_" + t.Name() // 20.8 doesn't respect DROP TABLE ... NO DELAY, so Atomic works but --rm is not applicable if compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "20.8") > 0 { if err := ch.chbackend.CreateDatabaseWithEngine(data.Database, data.DatabaseEngine, "cluster"); err != nil { @@ -2247,7 +2252,7 @@ func (ch *TestClickHouse) createTestSchema(data TestDataStruct, remoteStorageTyp } if data.IsFunction { - createSQL += fmt.Sprintf(" IF NOT EXISTS `%s` ", data.Name) + createSQL += fmt.Sprintf(" IF NOT EXISTS `%s_%s` ", data.Name, t.Name()) } else { createSQL += fmt.Sprintf(" IF NOT EXISTS `%s`.`%s` ", data.Database, data.Name) } @@ -2285,6 +2290,11 @@ func (ch *TestClickHouse) createTestSchema(data TestDataStruct, remoteStorageTyp log.Fatalf("Wrong %s, matches=%#v", createSQL, matches) } } + if !data.IsFunction { + createSQL = strings.NewReplacer("`"+origDatabase+"`", "`"+data.Database+"`", "'"+origDatabase+"'", "'"+data.Database+"'").Replace(createSQL) + createSQL = strings.NewReplacer("."+origName, "."+data.Name, "`"+origName+"`", "`"+data.Name+"`", "'"+origName+"'", "'"+data.Name+"'").Replace(createSQL) + } + createSQL = strings.Replace(createSQL, "{test}", t.Name(), -1) err := ch.chbackend.CreateTable( clickhouse.Table{ Database: data.Database, @@ -2296,7 +2306,9 @@ func (ch *TestClickHouse) createTestSchema(data TestDataStruct, remoteStorageTyp return err } -func (ch *TestClickHouse) createTestData(data TestDataStruct) error { +func (ch *TestClickHouse) createTestData(t *testing.T, data TestDataStruct) error { + data.Database = data.Database + "_" + t.Name() + data.Name = data.Name + "_" + t.Name() if data.SkipInsert || data.CheckDatabaseOnly { return nil } @@ -2333,17 +2345,19 @@ func (ch *TestClickHouse) dropDatabase(database string) (err error) { func (ch *TestClickHouse) checkData(t *testing.T, data TestDataStruct, r *require.Assertions) error { assert.NotNil(t, data.Rows) + data.Database += "_" + t.Name() + data.Name += "_" + t.Name() log.Infof("Check '%d' rows in '%s.%s'\n", len(data.Rows), data.Database, data.Name) - selectSQL := fmt.Sprintf("SELECT * FROM `%s`.`%s` ORDER BY `%s`", data.Database, data.Name, data.OrderBy) + selectSQL := fmt.Sprintf("SELECT * FROM `%s`.`%s` ORDER BY `%s`", data.Database, data.Name, strings.Replace(data.OrderBy, "{test}", t.Name(), -1)) if data.IsFunction && compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "21.12") == -1 { return nil } if data.IsFunction { - selectSQL = fmt.Sprintf("SELECT %s(number, number+1) AS test_result FROM numbers(3)", data.Name) + selectSQL = fmt.Sprintf("SELECT %s(number, number+1) AS test_result FROM numbers(%d)", data.Name, len(data.Rows)) } log.Debug(selectSQL) - rows, err := ch.chbackend.GetConn().Query(context.TODO(), selectSQL) + rows, err := ch.chbackend.GetConn().Query(context.Background(), selectSQL) if err != nil { return err } @@ -2392,6 +2406,7 @@ func (ch *TestClickHouse) checkData(t *testing.T, data TestDataStruct, r *requir } func (ch *TestClickHouse) checkDatabaseEngine(t *testing.T, data TestDataStruct) error { + data.Database += "_" + t.Name() if compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "20.8") <= 0 { return nil } @@ -2459,7 +2474,7 @@ func isTableSkip(ch *TestClickHouse, data TestDataStruct, dataExists bool) bool _ = ch.chbackend.Select(&dictEngines, dictSQL) return len(dictEngines) == 0 } - return os.Getenv("COMPOSE_FILE") == "docker-compose.yml" && (data.Name == "jbod" || data.IsDictionary) + return os.Getenv("COMPOSE_FILE") == "docker-compose.yml" && (strings.Contains(data.Name, "jbod_table") || data.IsDictionary) } func compareVersion(v1, v2 string) int { @@ -2490,7 +2505,7 @@ func installDebIfNotExists(r *require.Assertions, container string, pkgs ...stri container, "bash", "-xec", fmt.Sprintf( - "export DEBIAN_FRONTEND=noniteractive; if [[ '%d' != $(dpkg -l | grep -c -E \"%s\" ) ]]; then rm -fv /etc/apt/sources.list.d/clickhouse.list; find /etc/apt/ -type f -exec sed -i 's/ru.archive.ubuntu.com/archive.ubuntu.com/g' {} +; apt-get -y update; apt-get install --no-install-recommends -y %s; fi", + "export DEBIAN_FRONTEND=noniteractive; if [[ '%d' != $(dpkg -l | grep -c -E \"%s\" ) ]]; then rm -fv /etc/apt/sources.list.d/clickhouse.list; find /etc/apt/ -type f -name *.list -exec sed -i 's/ru.archive.ubuntu.com/archive.ubuntu.com/g' {} +; apt-get -y update; apt-get install --no-install-recommends -y %s; fi", len(pkgs), "^ii\\s+"+strings.Join(pkgs, "|^ii\\s+"), strings.Join(pkgs, " "), ), )) diff --git a/test/integration/kopia/init.sh b/test/integration/kopia/init.sh index b2fc2ecb..7af33791 100755 --- a/test/integration/kopia/init.sh +++ b/test/integration/kopia/init.sh @@ -1,12 +1,14 @@ CUR_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)" export KOPIA_PASSWORD_FILE="${CUR_DIR}/password" export KOPIA_S3_BUCKET=clickhouse +export KOPIA_S3_PATH=/clickhouse/kopia/cluster_name/shard_number/ export KOPIA_S3_ENDPOINT=minio:9000 export AWS_ACCESS_KEY_ID=access-key export AWS_SECRET_ACCESS_KEY=it-is-my-super-secret-key export KOPIA_KEEP_LAST=7 export KOPIA_PASSWORD=kopia-repo-password export KOPIA_CHECK_FOR_UPDATES=false -export CLICKHOUSE_PARAMS="--host '$(yq '.clickhouse.host' /etc/clickhouse-backup/config.yml)' --port '$(yq '.clickhouse.port' /etc/clickhouse-backup/config.yml)' --user '$(yq '.clickhouse.username' /etc/clickhouse-backup/config.yml)' --password '$(yq '.clickhouse.password' /etc/clickhouse-backup/config.yml)'" -kopia repository connect s3 --endpoint=${KOPIA_S3_ENDPOINT} --disable-tls --bucket=${KOPIA_S3_BUCKET} --access-key=${AWS_ACCESS_KEY_ID} --secret-access-key=${AWS_SECRET_ACCESS_KEY} || kopia repository create s3 --endpoint=${KOPIA_S3_ENDPOINT} --disable-tls --bucket=${KOPIA_S3_BUCKET} --access-key=${AWS_ACCESS_KEY_ID} --secret-access-key=${AWS_SECRET_ACCESS_KEY} +export CLICKHOUSE_BACKUP_CONFIG=/etc/clickhouse-backup/config-custom-kopia.yml +export CLICKHOUSE_PARAMS="--host '$(yq '.clickhouse.host' ${CLICKHOUSE_BACKUP_CONFIG})' --port '$(yq '.clickhouse.port' ${CLICKHOUSE_BACKUP_CONFIG})' --user '$(yq '.clickhouse.username' ${CLICKHOUSE_BACKUP_CONFIG})' --password '$(yq '.clickhouse.password' ${CLICKHOUSE_BACKUP_CONFIG})'" +kopia repository connect s3 --endpoint=${KOPIA_S3_ENDPOINT} --disable-tls --bucket=${KOPIA_S3_BUCKET} --prefix=${KOPIA_S3_PATH} --access-key=${AWS_ACCESS_KEY_ID} --secret-access-key=${AWS_SECRET_ACCESS_KEY} || kopia repository create s3 --endpoint=${KOPIA_S3_ENDPOINT} --disable-tls --bucket=${KOPIA_S3_BUCKET} --prefix=${KOPIA_S3_PATH} --access-key=${AWS_ACCESS_KEY_ID} --secret-access-key=${AWS_SECRET_ACCESS_KEY} kopia policy set --global --keep-latest=${KOPIA_KEEP_LAST} \ No newline at end of file diff --git a/test/integration/restic/init.sh b/test/integration/restic/init.sh index ce540059..7859fbf7 100755 --- a/test/integration/restic/init.sh +++ b/test/integration/restic/init.sh @@ -4,5 +4,6 @@ export RESTIC_REPOSITORY=s3:http://minio:9000/clickhouse/restic/cluster_name/sha export AWS_ACCESS_KEY_ID=access-key export AWS_SECRET_ACCESS_KEY=it-is-my-super-secret-key export RESTIC_KEEP_LAST=7 -export CLICKHOUSE_PARAMS="--host '$(yq '.clickhouse.host' /etc/clickhouse-backup/config.yml)' --port '$(yq '.clickhouse.port' /etc/clickhouse-backup/config.yml)' --user '$(yq '.clickhouse.username' /etc/clickhouse-backup/config.yml)' --password '$(yq '.clickhouse.password' /etc/clickhouse-backup/config.yml)'" +export CLICKHOUSE_BACKUP_CONFIG=/etc/clickhouse-backup/config-custom-restic.yml +export CLICKHOUSE_PARAMS="--host '$(yq '.clickhouse.host' ${CLICKHOUSE_BACKUP_CONFIG})' --port '$(yq '.clickhouse.port' ${CLICKHOUSE_BACKUP_CONFIG})' --user '$(yq '.clickhouse.username' ${CLICKHOUSE_BACKUP_CONFIG})' --password '$(yq '.clickhouse.password' ${CLICKHOUSE_BACKUP_CONFIG})'" restic cat config > /dev/null || restic init \ No newline at end of file diff --git a/test/integration/rsync/init.sh b/test/integration/rsync/init.sh index 150b610b..c70dc3fe 100644 --- a/test/integration/rsync/init.sh +++ b/test/integration/rsync/init.sh @@ -3,4 +3,5 @@ export BACKUP_REMOTE_DIR="/root/rsync_backups/cluster/shard0" export BACKUP_REMOTE_SERVER="root@sshd" export BACKUP_SSH_KEY="/tmp/id_rsa" export BACKUP_KEEP_TO_REMOTE=7 -export CLICKHOUSE_PARAMS="--host '$(yq '.clickhouse.host' /etc/clickhouse-backup/config.yml)' --port '$(yq '.clickhouse.port' /etc/clickhouse-backup/config.yml)' --user '$(yq '.clickhouse.username' /etc/clickhouse-backup/config.yml)' --password '$(yq '.clickhouse.password' /etc/clickhouse-backup/config.yml)'" \ No newline at end of file +export CLICKHOUSE_BACKUP_CONFIG=/etc/clickhouse-backup/config-custom-rsync.yml +export CLICKHOUSE_PARAMS="--host '$(yq '.clickhouse.host' ${CLICKHOUSE_BACKUP_CONFIG})' --port '$(yq '.clickhouse.port' ${CLICKHOUSE_BACKUP_CONFIG})' --user '$(yq '.clickhouse.username' ${CLICKHOUSE_BACKUP_CONFIG})' --password '$(yq '.clickhouse.password' ${CLICKHOUSE_BACKUP_CONFIG})'" \ No newline at end of file diff --git a/test/integration/run.sh b/test/integration/run.sh index 19a97479..29e9652e 100755 --- a/test/integration/run.sh +++ b/test/integration/run.sh @@ -44,5 +44,5 @@ make clean build-race-docker build-race-fips-docker docker-compose -f ${CUR_DIR}/${COMPOSE_FILE} up -d docker-compose -f ${CUR_DIR}/${COMPOSE_FILE} exec minio mc alias list -go test -timeout 30m -failfast -tags=integration -run "${RUN_TESTS:-.+}" -v ${CUR_DIR}/integration_test.go +go test -parallel ${RUN_PARALLEL:-$(nproc)} -timeout 30m -failfast -tags=integration -run "${RUN_TESTS:-.+}" -v ${CUR_DIR}/integration_test.go go tool covdata textfmt -i "${CUR_DIR}/_coverage_/" -o "${CUR_DIR}/_coverage_/coverage.out" \ No newline at end of file From d4880ae7d8b47ea5945485b6f53eebd707110349 Mon Sep 17 00:00:00 2001 From: Slach Date: Fri, 11 Aug 2023 16:11:06 +0400 Subject: [PATCH 2/7] fix TestIntegrationS3Glacier after t.Paralle() refactoring --- test/integration/integration_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/integration/integration_test.go b/test/integration/integration_test.go index 7fb56183..59b0a092 100644 --- a/test/integration/integration_test.go +++ b/test/integration/integration_test.go @@ -1571,9 +1571,9 @@ func TestIntegrationS3Glacier(t *testing.T) { r := require.New(t) r.NoError(dockerCP("config-s3-glacier.yml", "clickhouse-backup:/etc/clickhouse-backup/config.yml.s3glacier-template")) installDebIfNotExists(r, "clickhouse-backup", "curl", "gettext-base", "bsdmainutils", "dnsutils", "git", "ca-certificates") - r.NoError(dockerExec("clickhouse-backup", "bash", "-xec", "cat /etc/clickhouse-backup/config.yml.s3glacier-template | envsubst > /etc/clickhouse-backup/config.yml")) + r.NoError(dockerExec("clickhouse-backup", "bash", "-xec", "cat /etc/clickhouse-backup/config.yml.s3glacier-template | envsubst > /etc/clickhouse-backup/config-s3-glacier.yml")) dockerExecTimeout = 60 * time.Minute - runMainIntegrationScenario(t, "GLACIER") + runMainIntegrationScenario(t, "GLACIER", "config-s3-glacier.yml") dockerExecTimeout = 3 * time.Minute } From 7e8ed58e4a562cfcf2b8e909b3ab5b87f4d8def4 Mon Sep 17 00:00:00 2001 From: Slach Date: Fri, 11 Aug 2023 17:26:04 +0400 Subject: [PATCH 3/7] disable GLACIER_TESTS --- test/integration/run.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/integration/run.sh b/test/integration/run.sh index 729a30b9..36d3ab68 100755 --- a/test/integration/run.sh +++ b/test/integration/run.sh @@ -23,7 +23,7 @@ else export GCS_TESTS=${GCS_TESTS:-} fi -export GLACIER_TESTS=${GLACIER_TESTS:-} +export GLACIER_TESTS=${GLACIER_TESTS:-0} export AZURE_TESTS=${AZURE_TESTS:-1} export RUN_ADVANCED_TESTS=${RUN_ADVANCED_TESTS:-1} From 26ddc112b3f247e1c5ab56e0fb065b88d0b4a648 Mon Sep 17 00:00:00 2001 From: Slach Date: Sat, 12 Aug 2023 16:04:09 +0400 Subject: [PATCH 4/7] fix behavior in CLICKHOUSE_SKIP_TABLE_ENGINES --- ChangeLog.md | 4 +++- pkg/backup/table_pattern.go | 15 +++++++++++++-- pkg/clickhouse/clickhouse.go | 2 +- test/integration/integration_test.go | 3 ++- 4 files changed, 19 insertions(+), 5 deletions(-) diff --git a/ChangeLog.md b/ChangeLog.md index 68b99338..5a6ebba0 100644 --- a/ChangeLog.md +++ b/ChangeLog.md @@ -14,7 +14,9 @@ BUG FIXES - fix too strict `system.parts_columns` check when backup create, exclude Enum and Tuple (JSON) and Nullable(Type) vs Type corner cases, fix [685](https://github.com/Altinity/clickhouse-backup/issues/685), fix [699](https://github.com/Altinity/clickhouse-backup/issues/699) - fix `--rbac` behavior when /var/lib/clickhouse/access not exists - restore functions via `CREATE OR REPLACE` -- fix apply skip_databases and get tables for corner case --tables="*pattern.*" +- fix `skip_databases` behavior for corner case `--tables="*pattern.*"` +- fix `skip_database_engines` behavior + # v2.3.2 BUG FIXES - fix error when `backups_to_keep_local: -1`, fix [698](https://github.com/Altinity/clickhouse-backup/issues/698) diff --git a/pkg/backup/table_pattern.go b/pkg/backup/table_pattern.go index eaa79165..2763bb37 100644 --- a/pkg/backup/table_pattern.go +++ b/pkg/backup/table_pattern.go @@ -126,7 +126,7 @@ func (b *Backuper) getTableListByPatternLocal(ctx context.Context, metadataPath return nil, nil, err } result.Sort(dropTable) - for i := 1; i < len(result); i++ { + for i := 0; i < len(result); i++ { if b.shouldSkipByTableEngine(result[i]) { t := result[i] delete(resultPartitionNames, metadata.TableTitle{Database: t.Database, Table: t.Table}) @@ -138,9 +138,20 @@ func (b *Backuper) getTableListByPatternLocal(ctx context.Context, metadataPath func (b *Backuper) shouldSkipByTableEngine(t metadata.TableMetadata) bool { for _, engine := range b.cfg.ClickHouse.SkipTableEngines { - if strings.Contains(strings.ToLower(t.Query), fmt.Sprintf("engine=%s(", engine)) { + if engine == "MaterializedView" && (strings.HasPrefix(t.Query, "ATTACH MATERIALIZED VIEW") || strings.HasPrefix(t.Query, "CREATE MATERIALIZED VIEW")) { + b.log.Warnf("shouldSkipByTableEngine engine=%s found in : %s", engine, t.Query) return true } + if engine == "View" && strings.HasPrefix(t.Query, "CREATE VIEW") { + b.log.Warnf("shouldSkipByTableEngine engine=%s found in : %s", engine, t.Query) + return true + } + if shouldSkip, err := regexp.MatchString(fmt.Sprintf("(?mi)ENGINE\\s*=\\s*%s\\(", engine), t.Query); err == nil && shouldSkip { + b.log.Warnf("shouldSkipByTableEngine engine=%s found in : %s", engine, t.Query) + return true + } else if err != nil { + b.log.Warnf("shouldSkipByTableEngine engine=%s return error: %v", engine, err) + } } return false } diff --git a/pkg/clickhouse/clickhouse.go b/pkg/clickhouse/clickhouse.go index 788cf0ba..f1b809ca 100644 --- a/pkg/clickhouse/clickhouse.go +++ b/pkg/clickhouse/clickhouse.go @@ -436,7 +436,7 @@ func (ch *ClickHouse) prepareGetTablesSQL(tablePattern string, skipDatabases, sk allTablesSQL += fmt.Sprintf(" AND database NOT IN ('%s')", strings.Join(skipDatabases, "','")) } if len(skipTableEngines) > 0 { - allTablesSQL += fmt.Sprintf("AND engine NOT IN ('%s')", strings.Join(skipTableEngines, "','")) + allTablesSQL += fmt.Sprintf(" AND engine NOT IN ('%s')", strings.Join(skipTableEngines, "','")) } // try to upload big tables first if len(isSystemTablesFieldPresent) > 0 && isSystemTablesFieldPresent[0].IsTotalBytesPresent > 0 { diff --git a/test/integration/integration_test.go b/test/integration/integration_test.go index 59b0a092..bf8803d5 100644 --- a/test/integration/integration_test.go +++ b/test/integration/integration_test.go @@ -1480,7 +1480,8 @@ func TestFIPS(t *testing.T) { defer ch.chbackend.Close() fipsBackupName := fmt.Sprintf("fips_backup_%d", rand.Int()) r.NoError(dockerExec("clickhouse", "rm", "-fv", "/etc/apt/sources.list.d/clickhouse.list")) - installDebIfNotExists(r, "clickhouse", "curl", "gettext-base", "bsdmainutils", "dnsutils", "git") + installDebIfNotExists(r, "clickhouse", "ca-certificates", "curl", "gettext-base", "bsdmainutils", "dnsutils", "git") + r.NoError(dockerExec("clickhouse-backup", "update-ca-certificates")) r.NoError(dockerCP("config-s3-fips.yml", "clickhouse:/etc/clickhouse-backup/config.yml.fips-template")) r.NoError(dockerExec("clickhouse", "git", "clone", "--depth", "1", "https://github.com/drwetter/testssl.sh.git", "/opt/testssl")) r.NoError(dockerExec("clickhouse", "chmod", "+x", "/opt/testssl/testssl.sh")) From 4ce964b14739d221e89ae1d6900e1737394c8654 Mon Sep 17 00:00:00 2001 From: Slach Date: Sat, 12 Aug 2023 16:53:09 +0400 Subject: [PATCH 5/7] try debug TestDoRestoreConfigs --- test/integration/integration_test.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/test/integration/integration_test.go b/test/integration/integration_test.go index bf8803d5..121514d0 100644 --- a/test/integration/integration_test.go +++ b/test/integration/integration_test.go @@ -550,6 +550,9 @@ func TestDoRestoreConfigs(t *testing.T) { selectEmptyResultForAggQuery := "SELECT value FROM system.settings WHERE name='empty_result_for_aggregation_by_empty_set'" var settings string r.NoError(ch.chbackend.SelectSingleRowNoCtx(&settings, selectEmptyResultForAggQuery)) + if settings != "1" { + r.NoError(dockerExec("clickhouse", "grep", "empty_result_for_aggregation_by_empty_set", "-r", "/var/lib/clickhouse/preprocessed_configs/")) + } r.Equal("1", settings, "expect empty_result_for_aggregation_by_empty_set=1") r.NoError(dockerExec("clickhouse", "rm", "-rfv", "/etc/clickhouse-server/users.d/test_config.xml")) From 9d2b343b9bc7743103f32b1b4be6425511cbf0c5 Mon Sep 17 00:00:00 2001 From: Slach Date: Sat, 12 Aug 2023 17:14:25 +0400 Subject: [PATCH 6/7] fix TestDoRestoreConfigs --- test/integration/integration_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/integration/integration_test.go b/test/integration/integration_test.go index 121514d0..dd4a3d9c 100644 --- a/test/integration/integration_test.go +++ b/test/integration/integration_test.go @@ -544,9 +544,9 @@ func TestDoRestoreConfigs(t *testing.T) { r.NoError(dockerExec("clickhouse", "bash", "-xec", "CLICKHOUSE_BACKUP_CONFIG=/etc/clickhouse-backup/config-s3.yml S3_COMPRESSION_FORMAT=none ALLOW_EMPTY_BACKUPS=1 clickhouse-backup upload test_configs_backup")) r.NoError(dockerExec("clickhouse", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "delete", "local", "test_configs_backup")) + ch.queryWithNoError(r, "SYSTEM RELOAD CONFIG") ch.chbackend.Close() ch.connectWithWait(r, 1*time.Second, 1*time.Second) - ch.queryWithNoError(r, "SYSTEM RELOAD CONFIG") selectEmptyResultForAggQuery := "SELECT value FROM system.settings WHERE name='empty_result_for_aggregation_by_empty_set'" var settings string r.NoError(ch.chbackend.SelectSingleRowNoCtx(&settings, selectEmptyResultForAggQuery)) From 10a600fd47049b9e8bf2ff3f8092667b038d1c96 Mon Sep 17 00:00:00 2001 From: Slach Date: Sat, 12 Aug 2023 17:31:17 +0400 Subject: [PATCH 7/7] fix TestFIPS --- test/integration/integration_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/integration/integration_test.go b/test/integration/integration_test.go index dd4a3d9c..54c03ca2 100644 --- a/test/integration/integration_test.go +++ b/test/integration/integration_test.go @@ -1484,7 +1484,7 @@ func TestFIPS(t *testing.T) { fipsBackupName := fmt.Sprintf("fips_backup_%d", rand.Int()) r.NoError(dockerExec("clickhouse", "rm", "-fv", "/etc/apt/sources.list.d/clickhouse.list")) installDebIfNotExists(r, "clickhouse", "ca-certificates", "curl", "gettext-base", "bsdmainutils", "dnsutils", "git") - r.NoError(dockerExec("clickhouse-backup", "update-ca-certificates")) + r.NoError(dockerExec("clickhouse", "update-ca-certificates")) r.NoError(dockerCP("config-s3-fips.yml", "clickhouse:/etc/clickhouse-backup/config.yml.fips-template")) r.NoError(dockerExec("clickhouse", "git", "clone", "--depth", "1", "https://github.com/drwetter/testssl.sh.git", "/opt/testssl")) r.NoError(dockerExec("clickhouse", "chmod", "+x", "/opt/testssl/testssl.sh"))