From 3d246a21f06b0bf8d2791f49a118082d1a0e41a9 Mon Sep 17 00:00:00 2001 From: Slancaster1 Date: Thu, 29 Aug 2024 14:57:51 -0400 Subject: [PATCH] vm_start_destroy_repeatedly: Add Hugepage Variant Add variant to vm_start_destroy_repeatedly.py for handling a vm with hugepages Signed-off-by: Slancaster1 --- .../tests/cfg/vm_start_destroy_repeatedly.cfg | 8 ++ .../tests/src/vm_start_destroy_repeatedly.py | 80 ++++++++++++++++++- 2 files changed, 84 insertions(+), 4 deletions(-) diff --git a/libvirt/tests/cfg/vm_start_destroy_repeatedly.cfg b/libvirt/tests/cfg/vm_start_destroy_repeatedly.cfg index 909c7a4a10..c101021786 100644 --- a/libvirt/tests/cfg/vm_start_destroy_repeatedly.cfg +++ b/libvirt/tests/cfg/vm_start_destroy_repeatedly.cfg @@ -3,3 +3,11 @@ num_cycles = 3000 start_vm = no test_timeout = 288000 + check_hugepage_status = False + variants: + - hugepage: + num_cycles = 100 + check_hugepage_status = True + mb_params = {'hugepages': {}, 'source_type': 'memfd', 'access_mode': 'shared'} + vm_attrs = {'memory': 8388608, 'memory_unit': 'KiB'"} + - @default: diff --git a/libvirt/tests/src/vm_start_destroy_repeatedly.py b/libvirt/tests/src/vm_start_destroy_repeatedly.py index 4aae9f1f11..d2ec8dacb7 100644 --- a/libvirt/tests/src/vm_start_destroy_repeatedly.py +++ b/libvirt/tests/src/vm_start_destroy_repeatedly.py @@ -1,6 +1,9 @@ import logging from virttest import virsh from virttest import utils_misc +from virttest import xml_utils +from virttest.staging import utils_memory +from virttest.libvirt_xml import vm_xml def power_cycle_vm(test, vm, vm_name, login_timeout, startup_wait, resume_wait): @@ -37,6 +40,61 @@ def power_cycle_vm(test, vm, vm_name, login_timeout, startup_wait, resume_wait): test.fail("Failed to shutdown VM") +def setup_hugepage(vm, params): + vm_attrs = eval(params.get("vm_attrs", "{}")) + memory_amount = int(vm_attrs["memory"]) + + #Reserve memory for hugepages + hugepage_size = utils_memory.get_huge_page_size() + hugepage_nr = int(memory_amount) / hugepage_size + utils_memory.set_num_huge_pages(hugepage_nr) + logging.info("Set number of hugepages to {}".format(hugepage_nr)) + + #Prepare VM XML + vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm.name) + backup_xml = vmxml.copy() + + #Remove old memory tags + vmxml.xmltreefile.remove_by_xpath("/memory") + vmxml.xmltreefile.remove_by_xpath("/currentMemory") + + #Add in new memory tags + xmltreefile = vmxml.__dict_get__('xml') + xml_utils.ElementTree.SubElement(xmltreefile.getroot(), "memory").text = str(memory_amount) + xml_utils.ElementTree.SubElement(xmltreefile.getroot(), "currentMemory").text = str(memory_amount) + + #Include memory backing + mb_xml = vm_xml.VMMemBackingXML() + mb_params = eval(params.get("mb_params", "{}")) + mb_xml.setup_attrs(**mb_params) + vmxml.setup_attrs(**vm_attrs) + vmxml.mb = mb_xml + + #The relevant bug only appears if disk cache='none' + disk_nodes = xmltreefile.find("devices").findall("disk") + qcow_disk = [disk for disk in disk_nodes if disk.find("driver").get("type") == "qcow2"][0] + if qcow_disk.find("driver").get("cache") != "none": + qcow_disk.find("driver").set("cache", "none") + + vmxml.xmltreefile.write() + vmxml.sync() + logging.info("New XML for Hugepage testing: {}".format(vmxml)) + + return hugepage_nr, backup_xml + + +def check_hugepage_status(test, hugepage_nr): + if hugepage_nr != utils_memory.get_num_huge_pages(): + test.fail("Total number of hugepages does not match. Expected: {}. Actual: {}" + .format(hugepage_nr, utils_memory.get_num_huge_pages())) + if hugepage_nr != utils_memory.get_num_huge_pages_free(): + test.fail("Number of free huge pages does not match. Expected: {}. Actual: {}" + .format(hugepage_nr, utils_memory.get_num_huge_pages_free())) + if utils_memory.get_num_huge_pages_rsvd() != 0: + test.fail("Huge pages still reserved. Expected: 0. Actual: {}" + .format(utils_memory.get_num_huge_pages_rsvd())) + + def run(test, params, env): """ Test qemu-kvm startup reliability @@ -52,10 +110,24 @@ def run(test, params, env): login_timeout = float(params.get("login_timeout", 240)) # Controls vm.wait_for_login() timeout startup_wait = float(params.get("startup_wait", 240)) # Controls wait time for virsh.start() resume_wait = float(params.get("resume_wait", 240)) # Controls wait for virsh.resume() + hugepage_check = bool(params.get("check_hugepage_status", False)) + vm_memory = int(params.get("vm_memory", 8388608)) + + backup_xml = None + hugepage_nr = None + if hugepage_check: + hugepage_nr, backup_xml = setup_hugepage(vm, params) + logging.info("hugepage_nr: {}".format(hugepage_nr)) + + try: + for i in range(num_cycles): + logging.info("Starting vm '%s' -- attempt #%d", vm_name, i+1) - for i in range(num_cycles): - logging.info("Starting vm '%s' -- attempt #%d", vm_name, i+1) + power_cycle_vm(test, vm, vm_name, login_timeout, startup_wait, resume_wait) - power_cycle_vm(test, vm, vm_name, login_timeout, startup_wait, resume_wait) + logging.info("Completed vm '%s' power cycle #%d", vm_name, i+1) + finally: + if hugepage_check: + backup_xml.sync() - logging.info("Completed vm '%s' power cycle #%d", vm_name, i+1) + check_hugepage_status(test, hugepage_nr)