Skip to content

Commit

Permalink
mm/pagetable: use self map to implement virt_to_phys
Browse files Browse the repository at this point in the history
It is possible to insert a reference to the current page table into the
page table itself, creating a recursive "self-map".  This permits access
to any PTE in the address space by taking advantage of the hierarchical
property of page tables, using the CPU's own page table walker to obtain
the PTE contents.  This change inserts a self-map entry into every page
table, and reimplements `virt_to_phys()` to use the self map so that it
is possible to translate any VA to a PA without requiring either the VA
or the PA to be within the bounds of any known address range.

Signed-off-by: Jon Lange <[email protected]>
  • Loading branch information
msft-jlange committed Oct 2, 2024
1 parent c47c34d commit 2e7e744
Show file tree
Hide file tree
Showing 4 changed files with 130 additions and 28 deletions.
10 changes: 10 additions & 0 deletions kernel/src/boot_stage2.rs
Original file line number Diff line number Diff line change
Expand Up @@ -81,6 +81,16 @@ global_asm!(
decl %ecx
jnz 1b
/* Insert a self-map entry */
movl $pgtable, %edi
movl %edi, %eax
orl $0x63, %eax
/* The value 0xF68 is equivalent to 8 * PGTABLE_LVL3_IDX_PTE_SELFMAP */
movl %eax, 0xF68(%edi)
movl $0x80000000, %eax
orl %edx, %eax
movl %eax, 0xF6C(%edi)
/* Signal APs */
movl $setup_flag, %edi
movl $1, (%edi)
Expand Down
30 changes: 12 additions & 18 deletions kernel/src/mm/address_space.rs
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,9 @@
use crate::address::{PhysAddr, VirtAddr};
use crate::utils::immut_after_init::ImmutAfterInitCell;

#[cfg(target_os = "none")]
use crate::mm::pagetable::PageTable;

#[derive(Debug, Copy, Clone)]
#[allow(dead_code)]
pub struct FixedAddressMappingRange {
Expand Down Expand Up @@ -38,16 +41,6 @@ impl FixedAddressMappingRange {
}
}
}

#[cfg(target_os = "none")]
fn virt_to_phys(&self, vaddr: VirtAddr) -> Option<PhysAddr> {
if (vaddr < self.virt_start) || (vaddr >= self.virt_end) {
None
} else {
let offset: usize = vaddr - self.virt_start;
Some(self.phys_start + offset)
}
}
}

#[derive(Debug, Copy, Clone)]
Expand All @@ -74,16 +67,12 @@ pub fn init_kernel_mapping_info(

#[cfg(target_os = "none")]
pub fn virt_to_phys(vaddr: VirtAddr) -> PhysAddr {
if let Some(addr) = FIXED_MAPPING.kernel_mapping.virt_to_phys(vaddr) {
return addr;
}
if let Some(ref mapping) = FIXED_MAPPING.heap_mapping {
if let Some(addr) = mapping.virt_to_phys(vaddr) {
return addr;
match PageTable::virt_to_phys(vaddr) {
Some(paddr) => paddr,
None => {
panic!("Invalid virtual address {:#018x}", vaddr);
}
}

panic!("Invalid virtual address {:#018x}", vaddr);
}

#[cfg(target_os = "none")]
Expand Down Expand Up @@ -203,6 +192,11 @@ pub const SVSM_PERTASK_END: VirtAddr = SVSM_PERTASK_BASE.const_add(SIZE_LEVEL3);
/// Kernel stack for a task
pub const SVSM_PERTASK_STACK_BASE: VirtAddr = SVSM_PERTASK_BASE;

/// Page table self-map level 3 index
pub const PGTABLE_LVL3_IDX_PTE_SELFMAP: usize = 493;

pub const SVSM_PTE_BASE: VirtAddr = virt_from_idx(PGTABLE_LVL3_IDX_PTE_SELFMAP);

//
// User-space mapping constants
//
Expand Down
115 changes: 106 additions & 9 deletions kernel/src/mm/pagetable.rs
Original file line number Diff line number Diff line change
Expand Up @@ -11,8 +11,10 @@ use crate::cpu::flush_tlb_global_sync;
use crate::cpu::idt::common::PageFaultError;
use crate::cpu::registers::RFlags;
use crate::error::SvsmError;
use crate::mm::PageBox;
use crate::mm::{phys_to_virt, virt_to_phys, PGTABLE_LVL3_IDX_SHARED};
use crate::mm::{
phys_to_virt, virt_to_phys, PageBox, PGTABLE_LVL3_IDX_PTE_SELFMAP, PGTABLE_LVL3_IDX_SHARED,
SVSM_PTE_BASE,
};
use crate::platform::SvsmPlatform;
use crate::types::{PageSize, PAGE_SIZE, PAGE_SIZE_2M};
use crate::utils::immut_after_init::{ImmutAfterInitCell, ImmutAfterInitResult};
Expand Down Expand Up @@ -360,6 +362,17 @@ impl PTEntry {
let addr = PhysAddr::from(self.0.bits() & 0x000f_ffff_ffff_f000);
strip_confidentiality_bits(addr)
}

/// Read a page table entry from the specified virtual address.
///
/// # Safety
///
/// Reads from an arbitrary virtual address, making this essentially a
/// raw pointer read. The caller must be certain to calculate the correct
/// address.
pub unsafe fn read_pte(vaddr: VirtAddr) -> Self {
*vaddr.as_ptr::<Self>()
}
}

/// A pagetable page with multiple entries.
Expand Down Expand Up @@ -456,13 +469,33 @@ impl PageTable {
virt_to_phys(pgtable)
}

/// Allocate a new page table root.
///
/// # Errors
/// Returns [`SvsmError`] if the page cannot be allocated.
pub fn allocate_new() -> Result<PageBox<Self>, SvsmError> {
let mut pgtable = PageBox::try_new(PageTable::default())?;
let paddr = virt_to_phys(pgtable.vaddr());

// Set the self-map entry.
let entry = &mut pgtable.root[PGTABLE_LVL3_IDX_PTE_SELFMAP];
let flags = PTEntryFlags::PRESENT
| PTEntryFlags::WRITABLE
| PTEntryFlags::ACCESSED
| PTEntryFlags::DIRTY
| PTEntryFlags::NX;
entry.set(make_private_address(paddr), flags);

Ok(pgtable)
}

/// Clone the shared part of the page table; excluding the private
/// parts.
///
/// # Errors
/// Returns [`SvsmError`] if the page cannot be allocated.
pub fn clone_shared(&self) -> Result<PageBox<PageTable>, SvsmError> {
let mut pgtable = PageBox::try_new(PageTable::default())?;
let mut pgtable = Self::allocate_new()?;
pgtable.root.entries[PGTABLE_LVL3_IDX_SHARED] = self.root.entries[PGTABLE_LVL3_IDX_SHARED];
Ok(pgtable)
}
Expand Down Expand Up @@ -560,6 +593,72 @@ impl PageTable {
Self::walk_addr_lvl3(&mut self.root, vaddr)
}

/// Calculate the virtual address of a PTE in the self-map, which maps a
/// specified virtual address.
///
/// # Parameters
/// - `vaddr': The virtual address whose PTE should be located.
///
/// # Returns
/// The virtual address of the PTE.
fn get_pte_address(vaddr: VirtAddr) -> VirtAddr {
SVSM_PTE_BASE + ((usize::from(vaddr) & 0x0000_FFFF_FFFF_F000) >> 9)
}

/// Perform a virtual to physical translation using the self-map.
///
/// # Parameters
/// - `vaddr': The virtual address to transalte.
///
/// # Returns
/// Some(PhysAddr) if the virtual address is valid.
/// None if the virtual address is not valid.
pub fn virt_to_phys(vaddr: VirtAddr) -> Option<PhysAddr> {
// Calculate the virtual addresses of each level of the paging
// hierarchy in the self-map.
let pte_addr = Self::get_pte_address(vaddr);
let pde_addr = Self::get_pte_address(pte_addr);
let pdpe_addr = Self::get_pte_address(pde_addr);
let pml4e_addr = Self::get_pte_address(pdpe_addr);

// Check each entry in the paging hierarchy to determine whether this
// address is mapped. Because the hierarchy is read from the top
// down using self-map addresses that were calculated correctly,
// the reads are safe to perform.
let pml4e = unsafe { PTEntry::read_pte(pml4e_addr) };
if !pml4e.present() {
return None;
}

// There is no need to check for a large page in the PML4E because
// the architecture does not support the large bit at the top-level
// entry. If a large page is detected at a lower level of the
// hierarchy, the low bits from the virtual address must be combined
// with the physical address from the PDE/PDPE.
let pdpe = unsafe { PTEntry::read_pte(pdpe_addr) };
if !pdpe.present() {
return None;
}
if pdpe.huge() {
return Some(pdpe.address() + (usize::from(vaddr) & 0x3FFF_FFFF));
}

let pde = unsafe { PTEntry::read_pte(pde_addr) };
if !pde.present() {
return None;
}
if pde.huge() {
return Some(pde.address() + (usize::from(vaddr) & 0x001F_FFFF));
}

let pte = unsafe { PTEntry::read_pte(pte_addr) };
if pte.present() {
Some(pte.address() + (usize::from(vaddr) & 0xFFF))
} else {
None
}
}

fn alloc_pte_lvl3(entry: &mut PTEntry, vaddr: VirtAddr, size: PageSize) -> Mapping<'_> {
let flags = entry.flags();

Expand All @@ -575,7 +674,7 @@ impl PageTable {
| PTEntryFlags::WRITABLE
| PTEntryFlags::USER
| PTEntryFlags::ACCESSED;
entry.set(paddr, flags);
entry.set(make_private_address(paddr), flags);

let idx = Self::index::<2>(vaddr);
Self::alloc_pte_lvl2(&mut page[idx], vaddr, size)
Expand All @@ -596,7 +695,7 @@ impl PageTable {
| PTEntryFlags::WRITABLE
| PTEntryFlags::USER
| PTEntryFlags::ACCESSED;
entry.set(paddr, flags);
entry.set(make_private_address(paddr), flags);

let idx = Self::index::<1>(vaddr);
Self::alloc_pte_lvl1(&mut page[idx], vaddr, size)
Expand All @@ -617,7 +716,7 @@ impl PageTable {
| PTEntryFlags::WRITABLE
| PTEntryFlags::USER
| PTEntryFlags::ACCESSED;
entry.set(paddr, flags);
entry.set(make_private_address(paddr), flags);

let idx = Self::index::<0>(vaddr);
Mapping::Level0(&mut page[idx])
Expand Down Expand Up @@ -1030,9 +1129,7 @@ impl PageTable {
| PTEntryFlags::USER
| PTEntryFlags::ACCESSED;
let entry = &mut self.root[idx];
// The C bit is not required here because all page table fetches are
// made as C=1.
entry.set(paddr, flags);
entry.set(make_private_address(paddr), flags);
}
}
}
Expand Down
3 changes: 2 additions & 1 deletion kernel/src/svsm_paging.rs
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,8 @@ pub fn init_page_table(
launch_info: &KernelLaunchInfo,
kernel_elf: &elf::Elf64File<'_>,
) -> Result<PageBox<PageTable>, SvsmError> {
let mut pgtable = PageBox::try_new(PageTable::default())?;
let mut pgtable = PageTable::allocate_new()?;

let igvm_param_info = if launch_info.igvm_params_virt_addr != 0 {
let addr = VirtAddr::from(launch_info.igvm_params_virt_addr);
IgvmParamInfo {
Expand Down

0 comments on commit 2e7e744

Please sign in to comment.