diff --git a/kernel/src/boot_stage2.rs b/kernel/src/boot_stage2.rs index 12335b3b2..4edbee8d0 100644 --- a/kernel/src/boot_stage2.rs +++ b/kernel/src/boot_stage2.rs @@ -81,6 +81,16 @@ global_asm!( decl %ecx jnz 1b + /* Insert a self-map entry */ + movl $pgtable, %edi + movl %edi, %eax + orl $0x63, %eax + /* The value 0xF68 is equivalent to 8 * PGTABLE_LVL3_IDX_PTE_SELFMAP */ + movl %eax, 0xF68(%edi) + movl $0x80000000, %eax + orl %edx, %eax + movl %eax, 0xF6C(%edi) + /* Signal APs */ movl $setup_flag, %edi movl $1, (%edi) diff --git a/kernel/src/mm/address_space.rs b/kernel/src/mm/address_space.rs index 005df6e45..a3005669e 100644 --- a/kernel/src/mm/address_space.rs +++ b/kernel/src/mm/address_space.rs @@ -7,6 +7,9 @@ use crate::address::{PhysAddr, VirtAddr}; use crate::utils::immut_after_init::ImmutAfterInitCell; +#[cfg(target_os = "none")] +use crate::mm::pagetable::PageTable; + #[derive(Debug, Copy, Clone)] #[allow(dead_code)] pub struct FixedAddressMappingRange { @@ -38,16 +41,6 @@ impl FixedAddressMappingRange { } } } - - #[cfg(target_os = "none")] - fn virt_to_phys(&self, vaddr: VirtAddr) -> Option { - if (vaddr < self.virt_start) || (vaddr >= self.virt_end) { - None - } else { - let offset: usize = vaddr - self.virt_start; - Some(self.phys_start + offset) - } - } } #[derive(Debug, Copy, Clone)] @@ -74,16 +67,12 @@ pub fn init_kernel_mapping_info( #[cfg(target_os = "none")] pub fn virt_to_phys(vaddr: VirtAddr) -> PhysAddr { - if let Some(addr) = FIXED_MAPPING.kernel_mapping.virt_to_phys(vaddr) { - return addr; - } - if let Some(ref mapping) = FIXED_MAPPING.heap_mapping { - if let Some(addr) = mapping.virt_to_phys(vaddr) { - return addr; + match PageTable::virt_to_phys(vaddr) { + Some(paddr) => paddr, + None => { + panic!("Invalid virtual address {:#018x}", vaddr); } } - - panic!("Invalid virtual address {:#018x}", vaddr); } #[cfg(target_os = "none")] @@ -203,6 +192,11 @@ pub const SVSM_PERTASK_END: VirtAddr = SVSM_PERTASK_BASE.const_add(SIZE_LEVEL3); /// Kernel stack for a task pub const SVSM_PERTASK_STACK_BASE: VirtAddr = SVSM_PERTASK_BASE; +/// Page table self-map level 3 index +pub const PGTABLE_LVL3_IDX_PTE_SELFMAP: usize = 493; + +pub const SVSM_PTE_BASE: VirtAddr = virt_from_idx(PGTABLE_LVL3_IDX_PTE_SELFMAP); + // // User-space mapping constants // diff --git a/kernel/src/mm/pagetable.rs b/kernel/src/mm/pagetable.rs index b9246dc9b..c70a97911 100644 --- a/kernel/src/mm/pagetable.rs +++ b/kernel/src/mm/pagetable.rs @@ -11,8 +11,10 @@ use crate::cpu::flush_tlb_global_sync; use crate::cpu::idt::common::PageFaultError; use crate::cpu::registers::RFlags; use crate::error::SvsmError; -use crate::mm::PageBox; -use crate::mm::{phys_to_virt, virt_to_phys, PGTABLE_LVL3_IDX_SHARED}; +use crate::mm::{ + phys_to_virt, virt_to_phys, PageBox, PGTABLE_LVL3_IDX_PTE_SELFMAP, PGTABLE_LVL3_IDX_SHARED, + SVSM_PTE_BASE, +}; use crate::platform::SvsmPlatform; use crate::types::{PageSize, PAGE_SIZE, PAGE_SIZE_2M}; use crate::utils::immut_after_init::{ImmutAfterInitCell, ImmutAfterInitResult}; @@ -360,6 +362,17 @@ impl PTEntry { let addr = PhysAddr::from(self.0.bits() & 0x000f_ffff_ffff_f000); strip_confidentiality_bits(addr) } + + /// Read a page table entry from the specified virtual address. + /// + /// # Safety + /// + /// Reads from an arbitrary virtual address, making this essentially a + /// raw pointer read. The caller must be certain to calculate the correct + /// address. + pub unsafe fn read_pte(vaddr: VirtAddr) -> Self { + *vaddr.as_ptr::() + } } /// A pagetable page with multiple entries. @@ -456,13 +469,33 @@ impl PageTable { virt_to_phys(pgtable) } + /// Allocate a new page table root. + /// + /// # Errors + /// Returns [`SvsmError`] if the page cannot be allocated. + pub fn allocate_new() -> Result, SvsmError> { + let mut pgtable = PageBox::try_new(PageTable::default())?; + let paddr = virt_to_phys(pgtable.vaddr()); + + // Set the self-map entry. + let entry = &mut pgtable.root[PGTABLE_LVL3_IDX_PTE_SELFMAP]; + let flags = PTEntryFlags::PRESENT + | PTEntryFlags::WRITABLE + | PTEntryFlags::ACCESSED + | PTEntryFlags::DIRTY + | PTEntryFlags::NX; + entry.set(make_private_address(paddr), flags); + + Ok(pgtable) + } + /// Clone the shared part of the page table; excluding the private /// parts. /// /// # Errors /// Returns [`SvsmError`] if the page cannot be allocated. pub fn clone_shared(&self) -> Result, SvsmError> { - let mut pgtable = PageBox::try_new(PageTable::default())?; + let mut pgtable = Self::allocate_new()?; pgtable.root.entries[PGTABLE_LVL3_IDX_SHARED] = self.root.entries[PGTABLE_LVL3_IDX_SHARED]; Ok(pgtable) } @@ -560,6 +593,72 @@ impl PageTable { Self::walk_addr_lvl3(&mut self.root, vaddr) } + /// Calculate the virtual address of a PTE in the self-map, which maps a + /// specified virtual address. + /// + /// # Parameters + /// - `vaddr': The virtual address whose PTE should be located. + /// + /// # Returns + /// The virtual address of the PTE. + fn get_pte_address(vaddr: VirtAddr) -> VirtAddr { + SVSM_PTE_BASE + ((usize::from(vaddr) & 0x0000_FFFF_FFFF_F000) >> 9) + } + + /// Perform a virtual to physical translation using the self-map. + /// + /// # Parameters + /// - `vaddr': The virtual address to transalte. + /// + /// # Returns + /// Some(PhysAddr) if the virtual address is valid. + /// None if the virtual address is not valid. + pub fn virt_to_phys(vaddr: VirtAddr) -> Option { + // Calculate the virtual addresses of each level of the paging + // hierarchy in the self-map. + let pte_addr = Self::get_pte_address(vaddr); + let pde_addr = Self::get_pte_address(pte_addr); + let pdpe_addr = Self::get_pte_address(pde_addr); + let pml4e_addr = Self::get_pte_address(pdpe_addr); + + // Check each entry in the paging hierarchy to determine whether this + // address is mapped. Because the hierarchy is read from the top + // down using self-map addresses that were calculated correctly, + // the reads are safe to perform. + let pml4e = unsafe { PTEntry::read_pte(pml4e_addr) }; + if !pml4e.present() { + return None; + } + + // There is no need to check for a large page in the PML4E because + // the architecture does not support the large bit at the top-level + // entry. If a large page is detected at a lower level of the + // hierarchy, the low bits from the virtual address must be combined + // with the physical address from the PDE/PDPE. + let pdpe = unsafe { PTEntry::read_pte(pdpe_addr) }; + if !pdpe.present() { + return None; + } + if pdpe.huge() { + return Some(pdpe.address() + (usize::from(vaddr) & 0x3FFF_FFFF)); + } + + let pde = unsafe { PTEntry::read_pte(pde_addr) }; + if !pde.present() { + return None; + } + if pde.huge() { + return Some(pde.address() + (usize::from(vaddr) & 0x001F_FFFF)); + } + + let pte = unsafe { PTEntry::read_pte(pte_addr) }; + if pte.present() { + Some(pte.address() + (usize::from(vaddr) & 0xFFF)) + } else { + None + } + } + fn alloc_pte_lvl3(entry: &mut PTEntry, vaddr: VirtAddr, size: PageSize) -> Mapping<'_> { let flags = entry.flags(); @@ -575,7 +674,7 @@ impl PageTable { | PTEntryFlags::WRITABLE | PTEntryFlags::USER | PTEntryFlags::ACCESSED; - entry.set(paddr, flags); + entry.set(make_private_address(paddr), flags); let idx = Self::index::<2>(vaddr); Self::alloc_pte_lvl2(&mut page[idx], vaddr, size) @@ -596,7 +695,7 @@ impl PageTable { | PTEntryFlags::WRITABLE | PTEntryFlags::USER | PTEntryFlags::ACCESSED; - entry.set(paddr, flags); + entry.set(make_private_address(paddr), flags); let idx = Self::index::<1>(vaddr); Self::alloc_pte_lvl1(&mut page[idx], vaddr, size) @@ -617,7 +716,7 @@ impl PageTable { | PTEntryFlags::WRITABLE | PTEntryFlags::USER | PTEntryFlags::ACCESSED; - entry.set(paddr, flags); + entry.set(make_private_address(paddr), flags); let idx = Self::index::<0>(vaddr); Mapping::Level0(&mut page[idx]) @@ -1030,9 +1129,7 @@ impl PageTable { | PTEntryFlags::USER | PTEntryFlags::ACCESSED; let entry = &mut self.root[idx]; - // The C bit is not required here because all page table fetches are - // made as C=1. - entry.set(paddr, flags); + entry.set(make_private_address(paddr), flags); } } } diff --git a/kernel/src/svsm_paging.rs b/kernel/src/svsm_paging.rs index 94551ee93..8c7772ed6 100644 --- a/kernel/src/svsm_paging.rs +++ b/kernel/src/svsm_paging.rs @@ -24,7 +24,8 @@ pub fn init_page_table( launch_info: &KernelLaunchInfo, kernel_elf: &elf::Elf64File<'_>, ) -> Result, SvsmError> { - let mut pgtable = PageBox::try_new(PageTable::default())?; + let mut pgtable = PageTable::allocate_new()?; + let igvm_param_info = if launch_info.igvm_params_virt_addr != 0 { let addr = VirtAddr::from(launch_info.igvm_params_virt_addr); IgvmParamInfo {