Skip to content

Commit

Permalink
merge(i386: memory): Add a physical memory manager
Browse files Browse the repository at this point in the history
In this MR, we added both a page frame allocator AND a way
to interface with the MMU.

For the PMM, we use a simple bitmap allocator for now. This could be
changed for a buddy allocator later on for efficiency (and to have
more fun learning). We also use multiple allocator structs, to be able
to differentiate between memory regions. We only use the kernel region
as of now, but this will prove usefull later on when dealing with IO
memory.

For the MMU interface we implemented recursive page tables to make it
easier to allocate and write to new page tables.
  • Loading branch information
d4ilyrun committed Feb 15, 2024
2 parents 8691c35 + 3e40992 commit 9495a8f
Show file tree
Hide file tree
Showing 14 changed files with 745 additions and 1 deletion.
1 change: 1 addition & 0 deletions .github/workflows/build.yml
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
name: Build

on:
pull_request:
push:
branches:
- master
Expand Down
26 changes: 26 additions & 0 deletions kernel/arch/i686/include/kernel/i686/interrupts.h
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,32 @@
#define IDT_SIZE (IDT_LENGTH * sizeof(idt_descriptor))
#define IDT_BASE_ADDRESS 0x00000000UL

typedef enum {
DIVISION_ERROR = 0x0,
DEBUG,
NON_MASKABLE,
BREAKPOINT,
OVERFLOW,
BOUND_RANGE_EXCEEDED,
INVALID_OPCODE,
DEVICE_NOT_AVAILABLE,
DOUBLE_FAULT,
COPROCESSOR_SEGMENT_OVERRUN,
INVALID_TSS,
SEGMENT_NOT_PRESENT,
STACK_SEGMENT_FAULT,
GENERAL_PROTECTION_FAULT,
PAGE_FAULT,
X87_FPE = 0x10,
ALIGNMENT_CHECK,
SIMD_FPE,
VIRTUALIZATTION_EXCEPTION,
CONTROL_PROTECTION_EXCEPTION,
HYPERVISOR_INJECTION_EXCEPTION = 0x1C,
VMM_COMMUNICATION_EXCEPTION,
SECURITY_EXCEPTION,
} x86_exceptions;

typedef enum idt_gate_type {
TASK_GATE = 0x5,
INTERRUPT_GATE = 0x6,
Expand Down
4 changes: 4 additions & 0 deletions kernel/arch/i686/linker.ld
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,8 @@ SECTIONS
/* We place our kernel at 1MiB, it's a conventional place for it to be here. */
. = 1M;

kernel_code_start_address = .;

.text BLOCK(4K) : ALIGN(4K)
{
*(.multiboot)
Expand All @@ -33,4 +35,6 @@ SECTIONS
*(COMMON)
*(.bss)
}

kernel_code_end_address = .;
}
193 changes: 193 additions & 0 deletions kernel/arch/i686/memory/mmu.c
Original file line number Diff line number Diff line change
@@ -0,0 +1,193 @@
#include <kernel/logger.h>
#include <kernel/mmu.h>
#include <kernel/pmm.h>

#include <stddef.h>
#include <utils/align.h>
#include <utils/compiler.h>
#include <utils/macro.h>
#include <utils/types.h>

// Number of entries inside the page directory
#define MMU_PDE_COUNT (1024)
// Number of entries inside a single page table
#define MMU_PTE_COUNT (1024)

// Compute the virtual address of a page table when using recursive paging
// @link https://medium.com/@connorstack/recursive-page-tables-ad1e03b20a85
#define MMU_RECURSIVE_PAGE_TABLE_ADDRESS(_index) ((0xFFC00 + (_index)) << 12)

/// Page Directory entry
/// @see Table 4-5
typedef struct PACKED {
u8 present : 1; ///< Wether this entry is present
u8 writable : 1; ///< Read/Write
u8 user : 1; ///< User/Supervisor
u8 pwt : 1; ///< Page-level write-through
u8 pcd : 1; ///< Page-level cache disabled
u8 accessed : 1; ///< Whether this entry has been used for translation
u8 _ignored : 1;
u8 ps : 1;
u8 _ignored2 : 4;
/// @brief Physical address of the referenced page table.
/// These are the 12 higher bits of the address (i.e. address / PAGE_SIZE)
u32 page_table : 20;
} mmu_pde_entry;

/// Page Table entry
/// @info we use 32-bit page tables that map 4-KiB pages
/// @see Table 4-6
typedef struct PACKED {
u8 present : 1; ///< Wether this entry is present
u8 writable : 1; ///< Read/Write
u8 user : 1; ///< User/Supervisor
u8 pwt : 1; ///< Page-level write-through
u8 pcd : 1; ///< Page-level cache disabled
u8 accessed : 1; ///< Whether the software has accessed this page
u8 dirty : 1; ///< Whether software has written to this page
u8 pat : 1;
u8 global : 1;
u8 _ignored : 3;
/// @brief Physical address of the referenced page frame.
/// These are the 12 higher bits of the address (i.e. address / PAGE_SIZE)
u32 page_frame : 20;
} mmu_pte_entry;

/// Compute the 12 higher bits of a page's address (i.e. address / PAGE_SIZE)
#define MMU_PAGE_ADDRESS(_page) (((u32)_page) >> 12)

// The kernel's very own page directory
static __attribute__((__aligned__(PAGE_SIZE)))
mmu_pde_entry kernel_page_directory[MMU_PDE_COUNT];

static inline void mmu_flush_tlb(u32 tlb_entry)
{
ASM("invlpg (%0)" ::"r"(tlb_entry) : "memory");
}

// TODO: Do not hard-code the content of CR3 to kernel's PD
//
// We set CR3 to be the kernel's page directory, AT ALL TIMES.
//
// This works for now since we only have ONE active tasks. But once
// we are able to execute multiple tasks at once, we should find another
// way to set its contentt to be that of the currently running process.
bool mmu_start_paging(void)
{
static void *page_directory = kernel_page_directory;

// Set CR3 to point to our page directory
ASM("movl %0, %%cr3" : : "r"(page_directory));

// According to 4.3, to activate 32-bit mode paging we must:
// 1. set CR4.PAE to 0 (de-activate PAE)
u32 cr4;
ASM("movl %%cr4, %0" : "=r"(cr4));
cr4 = BIT_MASK(cr4, 5); // PAE = bit 5
ASM("movl %0, %%cr4" : : "r"(cr4));
// 2. set CR0.PG to 1 (activate paging)
u32 cr0;
ASM("movl %%cr0, %0" : "=r"(cr0));
cr0 = BIT_SET(cr0, 31); // PG = bit 32
ASM("movl %0, %%cr0" : : "r"(cr0));

return true;
}

bool mmu_init(void)
{
log_info("MMU", "Initializing MMU");

// Mark all PDEs as "absent" (present = 0), and writable
for (size_t entry = 0; entry < MMU_PDE_COUNT; entry++) {
kernel_page_directory[entry] = (mmu_pde_entry){.writable = 1};
}

// Setup recursive page tables
// @link https://medium.com/@connorstack/recursive-page-tables-ad1e03b20a85
kernel_page_directory[MMU_PDE_COUNT - 1].present = 1;
kernel_page_directory[MMU_PDE_COUNT - 1].page_table =
MMU_PAGE_ADDRESS(kernel_page_directory);

// For more simplicity, we identity map the content of our kernel.
mmu_identity_map(align(KERNEL_CODE_START, PAGE_SIZE), KERNEL_CODE_END);
// We also map the first 1M of physical memory, it will be reserved for
// hardware structs.
mmu_identity_map(0x0, 0x100000);

return true;
}

bool mmu_map(u32 virtual, u32 pageframe)
{
u16 pde_index = virtual >> 22; // bits 31-22
u16 pte_index = (virtual >> 12) & ((1 << 10) - 1); // bits 21-12

// TODO: Hard-coded to work with kernel page tables only
// This should take the pagedir/pagetables as input later on
//
// We also hardcode the pde/pte to be un-accessible when in user mode.
// This will also cause an issue when reaching userspace later.

if (!kernel_page_directory[pde_index].present) {
u32 page_table = pmm_allocate(PMM_MAP_KERNEL);
kernel_page_directory[pde_index].page_table =
MMU_PAGE_ADDRESS(page_table);
kernel_page_directory[pde_index].present = 1;
}

u32 cr0;
ASM("movl %%cr0, %0" : "=r"(cr0));
bool paging_enabled = BIT(cr0, 31);

// Virtual address of the corresponding page table (physical if CR0.PG=0)
mmu_pte_entry *page_table =
(mmu_pte_entry *)(paging_enabled
? MMU_RECURSIVE_PAGE_TABLE_ADDRESS(pde_index)
: (kernel_page_directory[pde_index].page_table
<< 12));

if (page_table[pte_index].present) {
log_err("MMU",
"Allocating already allocated virtual address: " LOG_FMT_32,
virtual);
return false;
}

// Link the page table entry (virtual address) to the pageframe
page_table[pte_index] = (mmu_pte_entry){
.present = 1,
.page_frame = MMU_PAGE_ADDRESS(pageframe),
// TODO: hard-coded values
.writable = 1,
.user = 0,
};

return true;
}

void mmu_unmap(u32 virtual)
{
u16 pde_index = virtual >> 22; // bits 31-22
u16 pte_index = (virtual >> 12) & ((1 << 10) - 1); // bits 21-12

// TODO: Hard-coded to work with kernel pages only
// c.f. todo inside mmu_map

if (!kernel_page_directory[pde_index].present)
return;

// Erase the content of the page table entry
mmu_pte_entry *page_table =
(mmu_pte_entry *)MMU_RECURSIVE_PAGE_TABLE_ADDRESS(pde_index);
*((volatile u32 *)&page_table[pte_index]) = 0x0;

mmu_flush_tlb(virtual);
}

void mmu_identity_map(uint32_t start, uint32_t end)
{
for (; start < end; start += PAGE_SIZE) {
mmu_map(start, start);
}
}
14 changes: 14 additions & 0 deletions kernel/include/kernel/logger.h
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,20 @@
*/
void log(const char *type, const char *domain, const char *msg, ...);

/**
* \brief Completely stop the kernel's execution
*
* This function writes a BOLD RED message to the screen, and completely
* halts the kernel's execution.
*
* This should only be called in case of unrecoverable errors, or asserts
* that should never be false and would prevent the kernel from functioning
* as expected.
*
* TODO: Dump the kernel's internal state (registers, ...)
*/
void panic(const char *msg, ...) __attribute__((__noreturn__));

#define log_err(domain, ...) \
log(ANSI_ERR "ERROR" ANSI_RESET " ", domain, __VA_ARGS__)
#define log_warn(domain, ...) \
Expand Down
73 changes: 73 additions & 0 deletions kernel/include/kernel/mmu.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,73 @@
/**
* @brief Memory Management Unit
*
* Interact with the CPU's hardware MMU.
*
* The MMU is used to translate virtual addresses into physical ones.
* These physical addresses should be pages, allocated using the PMM.
* This whole translation process is called paging.
*
* All the functions defined inside this file are arch-dependant, and only
* serve as a common interface for different architectures.
*
* This includes:
* * Enabling/Disabling paging
* * Updating the underlying structures
*
* @file mmu.h
*/

#ifndef KERNEL_MMU_H
#define KERNEL_MMU_H

#include <stdbool.h>
#include <utils/types.h>

/**
* @brief Inititialize the MMU's underlying structures
*/
bool mmu_init(void);

/**
* @brief Enable paging and automatic virtual address translation.
*
* @warning After calling this function, each and every address will
* automatically be translated into its physical equivalent using the paging
* mechanism. Be sure to remap known addresses to avoid raising exceptions.
*/
bool mmu_start_paging(void);

/**
* @brief Map a virtual address to a physical one
*
* @param virt The virtual address
* @param physical Its physical equivalent
*
* @return False if the address was already mapped before
*/
bool mmu_map(u32 virt, u32 physical);

/**
* @brief Unmap a virtual address
*
* @warning After calling this, referencing the given virtual address may cause
* the CPU to raise an exception.
*
* @param virt The virtual address
*/
void mmu_unmap(u32 virt);

/**
* @brief Perform identity mapping inside a given virtual address range
*
* Identity mapping is the process of mapping a virtual address to the same
* physical address.
*
* Both start and end addresses will be included inside the range.
*
* @brief start the starting page of the address range
* @brief start the ending address of the address range
*/
void mmu_identity_map(u32 start, u32 end);

#endif /* KERNEL_MMU_H */
Loading

0 comments on commit 9495a8f

Please sign in to comment.