Skip to content

Commit

Permalink
Test 051_1 for 0.23.x -> 0.24 CHK volume migration
Browse files Browse the repository at this point in the history
  • Loading branch information
alex-zaitsev committed Oct 4, 2024
1 parent e6dbc15 commit 9d0fc9c
Show file tree
Hide file tree
Showing 5 changed files with 158 additions and 17 deletions.
6 changes: 3 additions & 3 deletions tests/e2e/kubectl.py
Original file line number Diff line number Diff line change
Expand Up @@ -151,6 +151,8 @@ def create_and_check(manifest, check, kind="chi", ns=None, shell=None, timeout=1

if "chi_status" in check:
wait_chi_status(chi_name, check["chi_status"], ns=ns, shell=shell)
elif "chk_status" in check:
wait_chk_status(chi_name, check["chk_status"], ns=ns, shell=shell)
else:
# Wait for reconcile to start before performing other checks. In some cases it does not start, so we can pass
# wait_field_changed("chi", chi_name, state_field, prev_state, ns)
Expand Down Expand Up @@ -431,9 +433,7 @@ def wait_jsonpath(kind, name, field, value, ns=None, retries=max_retries):


def get_field(kind, name, field, ns=None, shell=None):
out = ""
if get_count(kind, name=name, ns=ns, shell=shell) > 0:
out = launch(f"get {kind} {name} -o=custom-columns=field:{field}", ns=ns, shell=shell).splitlines()
out = launch(f"get {kind} {name} -o=custom-columns=field:{field}", ns=ns, ok_to_fail=True, shell=shell).splitlines()
if len(out) > 1:
return out[1]
else:
Expand Down
5 changes: 2 additions & 3 deletions tests/e2e/manifests/chk/test-051-chk-chop-upgrade-2.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,6 @@ spec:
clusters:
- name: single
settings:
# logger/console: "true"
# listen_host: "0.0.0.0"
# can be skipped, this is default
logger/console: "true"
keeper_server/tcp_port: "2181"

47 changes: 47 additions & 0 deletions tests/e2e/manifests/chk/test-051-chk-chop-upgrade-3.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,47 @@
apiVersion: "clickhouse-keeper.altinity.com/v1"
kind: "ClickHouseKeeperInstallation"
metadata:
name: test-051-chk
spec:
defaults:
templates:
podTemplate: default
volumeClaimTemplate: default
serviceTemplate: backwards-compatible
templates:
podTemplates:
- name: default
spec:
containers:
- name: clickhouse-keeper
imagePullPolicy: IfNotPresent
image: "clickhouse/clickhouse-keeper:24.3.5.46"
volumeClaimTemplates:
- name: default
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi
volumeName: {volumeNamePlaceHolder}
serviceTemplates:
- name: backwards-compatible # operator 0.24 default service name is keeper-test-051-chk
generateName: "test-051-chk"
spec:
ports:
- name: zk
port: 2181
type: ClusterIP
clusterIP: None
configuration:
clusters:
- name: single
settings:
# can be skipped, this is default
logger/console: "true"
keeper_server/tcp_port: "2181"
# Required for backwards compatibility with operator 0.23.x
keeper_server/log_storage_path: /var/lib/clickhouse-keeper/logs
keeper_server/snapshot_storage_path: /var/lib/clickhouse-keeper/snapshots

1 change: 0 additions & 1 deletion tests/e2e/manifests/chk/test-051-chk-chop-upgrade.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,5 @@ spec:
- name: single
settings:
logger/console: "true"
# listen_host: "0.0.0.0"
keeper_server/tcp_port: "2181"

116 changes: 106 additions & 10 deletions tests/e2e/test_operator.py
Original file line number Diff line number Diff line change
Expand Up @@ -4721,6 +4721,9 @@ def test_labels(chi, label, value):
@Name("test_051. Test CHK upgrade from 0.23.x operator version")
@Tags("NO_PARALLEL")
def test_051(self):
with Then("Skip it. test_051_1 does a better job"):
return

version_from = "0.23.7"
version_to = current().context.operator_version # "0.24.0"
current().context.operator_version = version_from
Expand Down Expand Up @@ -4751,16 +4754,7 @@ def test_051(self):
)

with When("I create replicated table"):
create_table = """
CREATE TABLE test_local_051 ON CLUSTER 'default' (a UInt32)
Engine = ReplicatedMergeTree('/clickhouse/{installation}/tables/{shard}/{database}/{table}', '{replica}')
PARTITION BY tuple()
ORDER BY a
""".replace(
"\r", ""
).replace(
"\n", ""
)
create_table = "CREATE TABLE test_local_051 ON CLUSTER 'default' (a UInt32) Engine = ReplicatedMergeTree ORDER BY a"
clickhouse.query(chi, create_table)

with And("I insert data in the replicated table"):
Expand Down Expand Up @@ -4825,6 +4819,108 @@ def test_051(self):
with Finally("I clean up"):
delete_test_namespace()


@TestScenario
@Name("test_051_1. Test CHK upgrade from 0.23.x operator version")
@Tags("NO_PARALLEL")
def test_051_1(self):
version_from = "0.23.7"
version_to = current().context.operator_version # "0.24.0"
current().context.operator_version = version_from
create_shell_namespace_clickhouse_template()

chi_manifest = "manifests/chi/test-051-chk-chop-upgrade.yaml"
chk_manifest = "manifests/chk/test-051-chk-chop-upgrade.yaml"
chi = yaml_manifest.get_name(util.get_full_path(chi_manifest))
chk = yaml_manifest.get_name(util.get_full_path(chk_manifest))
cluster = "default"

with Given("Install CHK"):
kubectl.create_and_check(
manifest=chk_manifest, kind="chk",
check={
# "pod_count": 1, # do not work in 0.23.7
"do_not_delete": 1,
},
)

with Given("CHI with 2 replicas"):
kubectl.create_and_check(
manifest=chi_manifest,
check={
"pod_count": 2,
"do_not_delete": 1,
},
)

with When("I create replicated table"):
create_table = "CREATE TABLE test_local_051 ON CLUSTER 'default' (a UInt32) Engine = ReplicatedMergeTree ORDER BY a"
clickhouse.query(chi, create_table)

with And("I insert data in the replicated table"):
clickhouse.query(chi, f"INSERT INTO test_local_051 select 1")

with Then("Check replicated table has data on both nodes"):
for replica in {0,1}:
out = clickhouse.query(chi, "SELECT count(*) from test_local_051", host=f"chi-{chi}-{cluster}-0-{replica}-0")
assert out == "1", error()

old_pvc = "both-paths-test-051-chk-0"
pv = kubectl.get_pv_name(old_pvc)
new_pvc = "default-chk-test-051-chk-single-0-0-0"

with Then("Set PV persistentVolumeReclaimPolicy to Retain"):
kubectl.launch(f"patch pv {pv}" + """ -p \'{"spec":{"persistentVolumeReclaimPolicy":"Retain"}}\'""")

with Then("Delete old Keeper resources"):
kubectl.delete_kind("chk", chk)
kubectl.delete_kind("pvc", old_pvc)

with Then("Unmount PV from old PVC"):
kubectl.launch(f"patch pv {pv}" + """ -p \'{"spec":{"claimRef":null}}\'""")

with When(f"upgrade operator to {version_to}"):
util.install_operator_version(version_to)
time.sleep(30)

kubectl.wait_chi_status(chi, "Completed")

with Given("Re-deploy CHK, substituting PV in PVC template"):
volumeNamePlaceHolder = "{volumeNamePlaceHolder}"
manifest = util.get_full_path("manifests/chk/test-051-chk-chop-upgrade-3.yaml")
cmd = f"""cat {manifest} | sed "s/{volumeNamePlaceHolder}/{pv}/g" | kubectl apply -n {current().context.test_namespace} -f -"""
kubectl.run_shell(cmd, 300)

kubectl.wait_chk_status(chk, "Completed")


with Then("CLICKHOUSE_DATA_DIR should be properly set"):
pod = kubectl.get_pod_spec("", "chk-test-051-chk-single-0-0-0")
env = pod["containers"][0]["env"][0]
assert env["name"] == "CLICKHOUSE_DATA_DIR"
assert env["value"] == "/var/lib/clickhouse-keeper"

with Then("Wiat until Keeper connection is established"):
out = 0
for i in range(1, 10):
out = clickhouse.query_with_error(chi, "SELECT count(*) from system.zookeeper_connection")
if out == "1":
break
with Then("Waiting 10 seconds"):
time.sleep(10)
assert out == "1", error()

with And("I insert data in the replicated table"):
clickhouse.query(chi, f"INSERT INTO test_local_051 select 2")

with Then("Check replicated table has data on both nodes"):
for replica in {0,1}:
out = clickhouse.query(chi, "SELECT count(*) from test_local_051", host=f"chi-{chi}-{cluster}-0-{replica}-0")
assert out == "2", error()

with Finally("I clean up"):
delete_test_namespace()

@TestModule
@Name("e2e.test_operator")
@Requirements(RQ_SRS_026_ClickHouseOperator_CustomResource_APIVersion("1.0"),
Expand Down

0 comments on commit 9d0fc9c

Please sign in to comment.