diff --git a/libvirt/tests/cfg/vm_start_destroy_repeatedly.cfg b/libvirt/tests/cfg/vm_start_destroy_repeatedly.cfg index 909c7a4a10..6d464ff426 100644 --- a/libvirt/tests/cfg/vm_start_destroy_repeatedly.cfg +++ b/libvirt/tests/cfg/vm_start_destroy_repeatedly.cfg @@ -3,3 +3,8 @@ num_cycles = 3000 start_vm = no test_timeout = 288000 + check_hugepage_status = False + variants: + - hugepage: + num_cycles = 100 + check_hugepage_status = True diff --git a/libvirt/tests/src/vm_start_destroy_repeatedly.py b/libvirt/tests/src/vm_start_destroy_repeatedly.py index 4aae9f1f11..dcbb92e828 100644 --- a/libvirt/tests/src/vm_start_destroy_repeatedly.py +++ b/libvirt/tests/src/vm_start_destroy_repeatedly.py @@ -1,6 +1,9 @@ import logging from virttest import virsh from virttest import utils_misc +from virttest import xml_utils +from virttest.staging import utils_memory +from virttest.libvirt_xml import vm_xml def power_cycle_vm(test, vm, vm_name, login_timeout, startup_wait, resume_wait): @@ -37,6 +40,55 @@ def power_cycle_vm(test, vm, vm_name, login_timeout, startup_wait, resume_wait): test.fail("Failed to shutdown VM") +def setup_hugepage(vm, memory_amount='8388608'): + + #Reserve memory for hugepages + hugepage_size = utils_memory.get_huge_page_size() + hugepage_nr = int(memory_amount) / hugepage_size + utils_memory.set_num_huge_pages(hugepage_nr) + logging.info("Set number of hugepages to {}".format(hugepage_nr)) + + #Prepare VM XML + vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm.name) + backup_xml = vmxml.copy() + + #Remove old memory tags + vmxml.xmltreefile.remove_by_xpath("/memory") + vmxml.xmltreefile.remove_by_xpath("/currentMemory") + + #Add in new memory tags + xmltreefile = vmxml.__dict_get__('xml') + xml_utils.ElementTree.SubElement(xmltreefile.getroot(), "memory").text = memory_amount + xml_utils.ElementTree.SubElement(xmltreefile.getroot(), "currentMemory").text = memory_amount + + #Include memory backing + mb_xml = vm_xml.VMMemBackingXML() + mb_xml.hugepages = vm_xml.VMHugepagesXML() + mb_xml.source_type = "file" + mb_xml.access_mode = "shared" + vmxml.mb = mb_xml + + #The relevent bug only appears if disk cache='none' + disk_nodes = xmltreefile.find("devices").findall("disk") + qcow_disk = [disk for disk in disk_nodes if disk.find("driver").get("type") == "qcow2"][0] + qcow_disk.find("driver").set("cache", "none") + + vmxml.xmltreefile.write() + vmxml.sync() + logging.info("New XML for Hugepage testing: {}".format(vmxml)) + + return hugepage_nr, backup_xml + + +def check_hugepage_status(test, hugepage_nr): + if hugepage_nr != utils_memory.get_num_huge_pages(): + test.fail("Total number of hugepages does not match") + if hugepage_nr != utils_memory.get_num_huge_pages_free(): + test.fail("Number of free huge pages does not match") + if utils_memory.get_num_huge_pages_rsvd() != 0: + test.fail("Huge pages still reserved") + + def run(test, params, env): """ Test qemu-kvm startup reliability @@ -52,6 +104,12 @@ def run(test, params, env): login_timeout = float(params.get("login_timeout", 240)) # Controls vm.wait_for_login() timeout startup_wait = float(params.get("startup_wait", 240)) # Controls wait time for virsh.start() resume_wait = float(params.get("resume_wait", 240)) # Controls wait for virsh.resume() + hugepage_check = bool(params.get("check_hugepage_status", False)) + + backup_xml = None + hugepage_nr = None + if hugepage_check: + hugepage_nr, backup_xml = setup_hugepage(vm) for i in range(num_cycles): logging.info("Starting vm '%s' -- attempt #%d", vm_name, i+1) @@ -59,3 +117,7 @@ def run(test, params, env): power_cycle_vm(test, vm, vm_name, login_timeout, startup_wait, resume_wait) logging.info("Completed vm '%s' power cycle #%d", vm_name, i+1) + + if hugepage_check: + backup_xml.sync() + check_hugepage_status(test, hugepage_nr)