Skip to content

Commit

Permalink
vm_start_destroy_repeatedly: Add Hugepage Variant
Browse files Browse the repository at this point in the history
Add variant to vm_start_destroy_repeatedly.py for
handling a vm with hugepages

Signed-off-by: Slancaster1 <[email protected]>
  • Loading branch information
Slancaster1 committed Aug 29, 2024
1 parent 595840c commit dc9ed63
Show file tree
Hide file tree
Showing 2 changed files with 67 additions and 0 deletions.
5 changes: 5 additions & 0 deletions libvirt/tests/cfg/vm_start_destroy_repeatedly.cfg
Original file line number Diff line number Diff line change
Expand Up @@ -3,3 +3,8 @@
num_cycles = 3000
start_vm = no
test_timeout = 288000
check_hugepage_status = False
variants:
- hugepage:
num_cycles = 100
check_hugepage_status = True
62 changes: 62 additions & 0 deletions libvirt/tests/src/vm_start_destroy_repeatedly.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,9 @@
import logging
from virttest import virsh
from virttest import utils_misc
from virttest import xml_utils
from virttest.staging import utils_memory
from virttest.libvirt_xml import vm_xml


def power_cycle_vm(test, vm, vm_name, login_timeout, startup_wait, resume_wait):
Expand Down Expand Up @@ -37,6 +40,55 @@ def power_cycle_vm(test, vm, vm_name, login_timeout, startup_wait, resume_wait):
test.fail("Failed to shutdown VM")


def setup_hugepage(vm, memory_amount='8388608'):

#Reserve memory for hugepages
hugepage_size = utils_memory.get_huge_page_size()
hugepage_nr = int(memory_amount) / hugepage_size
utils_memory.set_num_huge_pages(hugepage_nr)
logging.info("Set number of hugepages to {}".format(hugepage_nr))

#Prepare VM XML
vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm.name)
backup_xml = vmxml.copy()

#Remove old memory tags
vmxml.xmltreefile.remove_by_xpath("/memory")
vmxml.xmltreefile.remove_by_xpath("/currentMemory")

#Add in new memory tags
xmltreefile = vmxml.__dict_get__('xml')
xml_utils.ElementTree.SubElement(xmltreefile.getroot(), "memory").text = memory_amount
xml_utils.ElementTree.SubElement(xmltreefile.getroot(), "currentMemory").text = memory_amount

#Include memory backing
mb_xml = vm_xml.VMMemBackingXML()
mb_xml.hugepages = vm_xml.VMHugepagesXML()
mb_xml.source_type = "file"
mb_xml.access_mode = "shared"
vmxml.mb = mb_xml

#The relevent bug only appears if disk cache='none'
disk_nodes = xmltreefile.find("devices").findall("disk")
qcow_disk = [disk for disk in disk_nodes if disk.find("driver").get("type") == "qcow2"][0]
qcow_disk.find("driver").set("cache", "none")

vmxml.xmltreefile.write()
vmxml.sync()
logging.info("New XML for Hugepage testing: {}".format(vmxml))

return hugepage_nr, backup_xml


def check_hugepage_status(test, hugepage_nr):
if hugepage_nr != utils_memory.get_num_huge_pages():
test.fail("Total number of hugepages does not match")
if hugepage_nr != utils_memory.get_num_huge_pages_free():
test.fail("Number of free huge pages does not match")
if utils_memory.get_num_huge_pages_rsvd() != 0:
test.fail("Huge pages still reserved")


def run(test, params, env):
"""
Test qemu-kvm startup reliability
Expand All @@ -52,10 +104,20 @@ def run(test, params, env):
login_timeout = float(params.get("login_timeout", 240)) # Controls vm.wait_for_login() timeout
startup_wait = float(params.get("startup_wait", 240)) # Controls wait time for virsh.start()
resume_wait = float(params.get("resume_wait", 240)) # Controls wait for virsh.resume()
hugepage_check = bool(params.get("check_hugepage_status", False))

backup_xml = None
hugepage_nr = None
if hugepage_check:
hugepage_nr, backup_xml = setup_hugepage(vm)

for i in range(num_cycles):
logging.info("Starting vm '%s' -- attempt #%d", vm_name, i+1)

power_cycle_vm(test, vm, vm_name, login_timeout, startup_wait, resume_wait)

logging.info("Completed vm '%s' power cycle #%d", vm_name, i+1)

if hugepage_check:
backup_xml.sync()
check_hugepage_status(test, hugepage_nr)

0 comments on commit dc9ed63

Please sign in to comment.