Skip to content

Commit

Permalink
Merge pull request #4173 from julpark-rh/oct14
Browse files Browse the repository at this point in the history
adding kernel enhancement
  • Loading branch information
mergify[bot] authored Oct 18, 2024
2 parents 60c9a35 + bdd5c9b commit 1d95cbb
Show file tree
Hide file tree
Showing 5 changed files with 180 additions and 4 deletions.
8 changes: 7 additions & 1 deletion suites/pacific/cephfs/tier-0_fs_kernel.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -133,10 +133,16 @@ tests:
abort-on-fail: false
- test:
name: Run fsstress on kernel and fuse mounts
module: cephfs_bugs.test_fsstress_on_kernel_and_fuse.py
module: cephfs_bugs.fsstress_kernel_verification.py
polarion-id: CEPH-83575623
desc: Run fsstress on kernel and fuse mounts
abort-on-fail: false
- test:
name: Run xfs test on kernel
module: xfs_test.py
polarion-id: CEPH-83575623
desc: Run xfs test on kernel
abort-on-fail: false
-
test:
desc: "generate sosreport"
Expand Down
8 changes: 7 additions & 1 deletion suites/quincy/cephfs/tier-0_fs_kernel.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -133,10 +133,16 @@ tests:
abort-on-fail: false
- test:
name: Run fsstress on kernel and fuse mounts
module: cephfs_bugs.test_fsstress_on_kernel_and_fuse.py
module: cephfs_bugs.fsstress_kernel_verification.py
polarion-id: CEPH-83575623
desc: Run fsstress on kernel and fuse mounts
abort-on-fail: false
- test:
name: Run xfs test on kernel
module: xfs_test.py
polarion-id: CEPH-83575623
desc: Run xfs test on kernel
abort-on-fail: false
-
test:
desc: "generate sosreport"
Expand Down
8 changes: 7 additions & 1 deletion suites/reef/cephfs/tier-0_fs_kernel.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -134,10 +134,16 @@ tests:
abort-on-fail: false
- test:
name: Run fsstress on kernel and fuse mounts
module: cephfs_bugs.test_fsstress_on_kernel_and_fuse.py
module: cephfs_bugs.fsstress_kernel_verification.py
polarion-id: CEPH-83575623
desc: Run fsstress on kernel and fuse mounts
abort-on-fail: false
- test:
name: Run xfs test on kernel
module: xfs_test.py
polarion-id: CEPH-83575623
desc: Run xfs test on kernel
abort-on-fail: false
-
test:
desc: "generate sosreport"
Expand Down
157 changes: 157 additions & 0 deletions tests/cephfs/cephfs_bugs/fsstress_kernel_verification.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,157 @@
import random
import string
import time
import traceback

from tests.cephfs.cephfs_utilsV1 import FsUtils as FsUtilsV1
from utility.log import Log

log = Log(__name__)


def run(ceph_cluster, **kw):
"""
CEPH-83575623 - Run fsstress.sh repeatedly on fuse and kernel clients for a while and validate no crash is seen.
Procedure :
1. Create 2 Subvolumes - each mounted on kernel and fuse.
2. Run fsstress for few iterations(10).
3. Check for Ceph Health Status to validate if there is no Health Warn or Health Error status.
Clean-up:
1. Remove files from mountpoint, Unmount subvolumes.
3. ceph fs subvolume rm <vol_name> <subvol_name> [--group_name <subvol_group_name>]
4. ceph fs subvolumegroup rm <vol_name> <group_name>
"""

try:
tc = "CEPH-83575623"
log.info(f"Running cephfs {tc} test case")
test_data = kw.get("test_data")
fs_util_v1 = FsUtilsV1(ceph_cluster, test_data=test_data)
erasure = (
FsUtilsV1.get_custom_config_value(test_data, "erasure")
if test_data
else False
)
config = kw.get("config")
build = config.get("build", config.get("rhbuild"))
clients = ceph_cluster.get_ceph_objects("client")
log.info("checking Pre-requisites")
if len(clients) < 1:
log.error(
f"This test requires minimum 1 client nodes.This has only {len(clients)} clients"
)
return 1
log.info("Installing required packages for make")
clients[0].exec_command(
sudo=True, cmd='dnf groupinstall "Development Tools" -y'
)
fs_util_v1.prepare_clients(clients, build)
fs_util_v1.auth_list(clients)
default_fs = "cephfs" if not erasure else "cephfs-ec"
# if "cephfs" does not exsit, create it
fs_details = fs_util_v1.get_fs_info(clients[0], default_fs)
if not fs_details:
fs_util_v1.create_fs(clients[0], default_fs)
subvolume_group_name = "subvol_group1"
subvolume_name = "subvol"
subvolumegroup = {
"vol_name": default_fs,
"group_name": subvolume_group_name,
}
fs_util_v1.create_subvolumegroup(clients[0], **subvolumegroup)
subvolume_list = [
{
"vol_name": default_fs,
"subvol_name": f"{subvolume_name}_1",
"group_name": subvolume_group_name,
},
{
"vol_name": default_fs,
"subvol_name": f"{subvolume_name}_2",
"group_name": subvolume_group_name,
},
]
for subvolume in subvolume_list:
fs_util_v1.create_subvolume(clients[0], **subvolume)

mounting_dir = "".join(
random.choice(string.ascii_lowercase + string.digits)
for _ in list(range(10))
)
log.info("Mount 1 subvolume on kernel and 1 subvolume on Fuse → Client1")

kernel_mounting_dir_1 = f"/mnt/cephfs_kernel{mounting_dir}_1/"
mon_node_ips = fs_util_v1.get_mon_node_ips()
subvol_path_kernel, rc = clients[0].exec_command(
sudo=True,
cmd=f"ceph fs subvolume getpath {default_fs} {subvolume_name}_1 {subvolume_group_name}",
)
fs_util_v1.kernel_mount(
[clients[0]],
kernel_mounting_dir_1,
",".join(mon_node_ips),
sub_dir=f"{subvol_path_kernel.strip()}",
extra_params=f",fs={default_fs}",
)

fuse_mounting_dir_1 = f"/mnt/cephfs_fuse{mounting_dir}_1/"
subvol_path_fuse, rc = clients[0].exec_command(
sudo=True,
cmd=f"ceph fs subvolume getpath {default_fs} {subvolume_name}_2 {subvolume_group_name}",
)
fs_util_v1.fuse_mount(
[clients[0]],
fuse_mounting_dir_1,
extra_params=f" -r {subvol_path_fuse.strip()} --client_fs {default_fs}",
)

log.info("Run fsstress for few iterations on fuse and kernel mounts.")
fsstress_url = "https://raw.githubusercontent.com/ceph/ceph/main/qa/workunits/suites/fsstress.sh"

def run_commands(client, commands):
for command in commands:
client.exec_command(sudo=True, cmd=command)

directories = [kernel_mounting_dir_1, fuse_mounting_dir_1]
for directory in directories:
commands = [
f"mkdir -p {directory}fsstress/",
f"cd {directory}fsstress/ && wget {fsstress_url}",
f"chmod 777 {directory}fsstress/fsstress.sh",
]
run_commands(clients[0], commands)
iterations = 50
log.info(
f"run fsstress on kernel and fuse in parallel for {iterations} iterations"
)
for _ in range(iterations):
for directory in [kernel_mounting_dir_1, fuse_mounting_dir_1]:
clients[0].exec_command(
sudo=True, cmd=f"sh {directory}fsstress/fsstress.sh"
)
time.sleep(10)
clients[0].exec_command(
sudo=True, cmd=f"rm -rf {directory}fsstress/fsstress/"
)
time.sleep(10)
return 0
except Exception as e:
log.error(e)
log.error(traceback.format_exc())
return 1
finally:
log.error("Clean up the system")
fs_util_v1.client_clean_up(
"umount", kernel_clients=[clients[0]], mounting_dir=kernel_mounting_dir_1
)

fs_util_v1.client_clean_up(
"umount", fuse_clients=[clients[0]], mounting_dir=fuse_mounting_dir_1
)

for subvolume in subvolume_list:
fs_util_v1.remove_subvolume(clients[0], **subvolume)

fs_util_v1.remove_subvolumegroup(
clients[0], default_fs, subvolume_group_name, force=True
)
3 changes: 2 additions & 1 deletion tests/cephfs/xfs_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -114,7 +114,7 @@ def run(ceph_cluster, **kw):
cmd=f"echo '{xfs_config_context}' > /root/xfstests-dev/local.config",
)
# create exclude file
exclude_file = "generic/003"
exclude_file = "generic/003 generic/538 generic/397 generic/379"
client1.exec_command(
sudo=True, cmd=f"echo '{exclude_file}' > /root/xfstests-dev/ceph.exclude"
)
Expand All @@ -124,6 +124,7 @@ def run(ceph_cluster, **kw):
cmd="cd /root/xfstests-dev && ./check -d -T -g quick -e ceph.exclude",
check_ec=False,
long_running=True,
timeout=1800,
)
log.info("XFS tests completed successfully")
return 0
Expand Down

0 comments on commit 1d95cbb

Please sign in to comment.