From b666b3bab7ed9367093d5c7c62668657786ffb57 Mon Sep 17 00:00:00 2001 From: corigan01 Date: Sun, 5 Jan 2025 18:07:17 -0600 Subject: [PATCH] Bootloader: Building kernel page tables and loading elf --- bootloader/stage-64bit/Cargo.toml | 1 + bootloader/stage-64bit/src/main.rs | 48 ++++++-- bootloader/stage-64bit/src/paging.rs | 163 +++++++++++++++++++++++++++ crates/arch/Cargo.toml | 1 + crates/arch/src/paging64.rs | 75 ++++++++++++ crates/util/src/lib.rs | 5 + kernel/src/main.rs | 2 +- 7 files changed, 284 insertions(+), 11 deletions(-) diff --git a/bootloader/stage-64bit/Cargo.toml b/bootloader/stage-64bit/Cargo.toml index 392caa9c..81b7e6fd 100644 --- a/bootloader/stage-64bit/Cargo.toml +++ b/bootloader/stage-64bit/Cargo.toml @@ -13,3 +13,4 @@ lldebug = {workspace = true} elf = {workspace = true} mem = {workspace = true} util = {workspace = true} +arch = {workspace = true} diff --git a/bootloader/stage-64bit/src/main.rs b/bootloader/stage-64bit/src/main.rs index fd22858f..765f4d02 100644 --- a/bootloader/stage-64bit/src/main.rs +++ b/bootloader/stage-64bit/src/main.rs @@ -33,7 +33,7 @@ use elf::{ Elf, tables::{ArchKind, SegmentKind}, }; -use lldebug::{debug_ready, logln, make_debug}; +use lldebug::{debug_ready, hexdump::HexPrint, log, logln, make_debug}; use mem::phys::{PhysMemoryEntry, PhysMemoryKind, PhysMemoryMap}; use serial::{Serial, baud::SerialBaud}; use util::{ @@ -71,25 +71,44 @@ fn main(stage_to_stage: &Stage32toStage64) { .exe_size() .expect("Unable to determine the size of the Kernel's exe!"); - logln!("Kernel Size: {}", HumanBytes::from(kernel_exe_len)); - build_memory_map(stage_to_stage, kernel_exe_len); + logln!("Kernel Size : {}", HumanBytes::from(kernel_exe_len)); + let page_info = build_memory_map(stage_to_stage, kernel_exe_len); + let mut virt_info = paging::build_page_tables(page_info); - let elf_header = match elf.header() { + log!("Loading new page tables..."); + unsafe { paging::load_page_tables() }; + logln!("OK"); + + let _elf_header = match elf.header() { Ok(elf::tables::ElfHeader::Header64(h)) if h.arch() == ArchKind::X64 && h.is_le() => h, _ => panic!("Kernel's elf is not valid!"), }; + log!("Loading ELF ("); elf.load_into(|h| { if h.segment_kind() != SegmentKind::Load { return None; } + log!("."); + + let vaddr = h.expected_vaddr(); + let len = h.in_mem_size() as u64; + + assert!( + vaddr >= virt_info.exe_start_virt && (vaddr + len) <= virt_info.exe_end_virt, + "Cannot fit section into mapped area" + ); - None + Some(unsafe { core::slice::from_raw_parts_mut(vaddr as *mut u8, len as usize) }) }) .unwrap(); + logln!(") -- OK"); + + let kernel_exe_slice = virt_info.exe_slice(); + logln!("{}", (&kernel_exe_slice[..1024]).hexdump()); } -fn build_memory_map(s2s: &Stage32toStage64, kernel_exe_len: usize) { +fn build_memory_map(s2s: &Stage32toStage64, kernel_exe_len: usize) -> paging::PageTableConfig { unsafe { let mm = &mut *MEMORY_MAP.get(); @@ -99,7 +118,7 @@ fn build_memory_map(s2s: &Stage32toStage64, kernel_exe_len: usize) { } logln!( - "Free Memory : {} Mib", + "Free Memory : {} Mib", mm.bytes_of(PhysMemoryKind::Free) / MIB ); logln!( @@ -134,8 +153,8 @@ fn build_memory_map(s2s: &Stage32toStage64, kernel_exe_len: usize) { let kernels_pages = mm .find_continuous_of( PhysMemoryKind::Free, - align_to(kernel_exe_len as u64, PAGE_4K) as usize, - PAGE_4K, + align_to(kernel_exe_len as u64, PAGE_2M) as usize, + PAGE_2M, 1 * MIB as u64, ) .map(|p| PhysMemoryEntry { @@ -146,7 +165,7 @@ fn build_memory_map(s2s: &Stage32toStage64, kernel_exe_len: usize) { mm.add_region(kernels_pages).unwrap(); let kernels_stack_pages = mm - .find_continuous_of(PhysMemoryKind::Free, PAGE_2M, PAGE_4K, 1 * MIB as u64) + .find_continuous_of(PhysMemoryKind::Free, PAGE_2M, PAGE_2M, 1 * MIB as u64) .map(|p| PhysMemoryEntry { kind: PhysMemoryKind::Kernel, ..p @@ -155,5 +174,14 @@ fn build_memory_map(s2s: &Stage32toStage64, kernel_exe_len: usize) { mm.add_region(kernels_stack_pages).unwrap(); logln!("{}", mm); + + paging::PageTableConfig { + kernel_exe_phys: (kernels_pages.start, kernels_pages.len() as usize), + kernel_stack_phys: ( + kernels_stack_pages.start, + kernels_stack_pages.len() as usize, + ), + kernel_virt: 0x100000000000, + } } } diff --git a/bootloader/stage-64bit/src/paging.rs b/bootloader/stage-64bit/src/paging.rs index 2dfead2e..c9e91102 100644 --- a/bootloader/stage-64bit/src/paging.rs +++ b/bootloader/stage-64bit/src/paging.rs @@ -22,3 +22,166 @@ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FO DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ + +use core::cell::SyncUnsafeCell; + +use arch::{ + paging64::{PageEntry2M, PageEntryLvl3, PageEntryLvl4, PageMapLvl2, PageMapLvl3, PageMapLvl4}, + registers::cr3, +}; +use util::{ + consts::{GIB, MIB, PAGE_2M}, + is_align_to, +}; + +/// Amount of Gib to identity map +const IDMAP_GIG_AMOUNT: usize = 1; + +// Main Table +static TABLE_LVL4: SyncUnsafeCell = SyncUnsafeCell::new(PageMapLvl4::new()); + +// Tables for lower memory id-mapping +static TABLE_LVL3_ID: SyncUnsafeCell = SyncUnsafeCell::new(PageMapLvl3::new()); +static TABLE_LVL2_ID: SyncUnsafeCell<[PageMapLvl2; IDMAP_GIG_AMOUNT]> = + SyncUnsafeCell::new([PageMapLvl2::new(); IDMAP_GIG_AMOUNT]); + +// Tables for higher-half kernel +static TABLE_LVL3_KERN: SyncUnsafeCell = SyncUnsafeCell::new(PageMapLvl3::new()); +static TABLE_LVL2_KERN: SyncUnsafeCell = SyncUnsafeCell::new(PageMapLvl2::new()); + +#[derive(Debug)] +pub struct PageTableConfig { + pub kernel_exe_phys: (u64, usize), + pub kernel_stack_phys: (u64, usize), + pub kernel_virt: u64, +} + +#[derive(Debug)] +pub struct KernelVirtInfo { + pub exe_start_virt: u64, + pub exe_end_virt: u64, + pub stack_start_virt: u64, + pub stack_end_virt: u64, +} + +impl KernelVirtInfo { + pub fn exe_slice(&mut self) -> &'static mut [u8] { + unsafe { + core::slice::from_raw_parts_mut( + self.exe_start_virt as *mut u8, + (self.exe_end_virt - self.exe_start_virt) as usize, + ) + } + } + + pub fn stack_slice(&mut self) -> &'static mut [u8] { + unsafe { + core::slice::from_raw_parts_mut( + self.stack_start_virt as *mut u8, + (self.stack_end_virt - self.stack_start_virt) as usize, + ) + } + } +} + +pub fn build_page_tables(c: PageTableConfig) -> KernelVirtInfo { + assert!( + c.kernel_exe_phys.1 <= GIB, + "TODO: Currently do not support kernel's size above 1Gib" + ); + assert!( + c.kernel_stack_phys.1 <= GIB, + "TODO: Currently do not support kernel's stack size above 1Gib" + ); + assert!(is_align_to(c.kernel_exe_phys.0, PAGE_2M)); + assert!(is_align_to(c.kernel_stack_phys.0, PAGE_2M)); + assert!(is_align_to(c.kernel_virt, PAGE_2M)); + + // ID MAP + for gig in 0..IDMAP_GIG_AMOUNT { + let table_ptr = unsafe { &raw mut (*TABLE_LVL2_ID.get())[gig] }; + + for mb2 in 0..512 { + let phy_addr = (mb2 as u64 * 2 * (MIB as u64)) + (gig as u64 * (GIB as u64)); + + let lvl2_entry = PageEntry2M::new() + .set_present_flag(true) + .set_read_write_flag(true) + .set_phy_address(phy_addr); + + unsafe { (*table_ptr).store(lvl2_entry, mb2) }; + } + + let lvl3_entry = PageEntryLvl3::new() + .set_present_flag(true) + .set_read_write_flag(true) + .set_next_entry_phy_address(unsafe { (*table_ptr).table_ptr() }); + + unsafe { (*TABLE_LVL3_ID.get()).store(lvl3_entry, gig) }; + } + + let lvl4_entry = PageEntryLvl4::new() + .set_present_flag(true) + .set_read_write_flag(true) + .set_next_entry_phy_address(unsafe { (*TABLE_LVL3_ID.get()).table_ptr() }); + + unsafe { (*TABLE_LVL4.get()).store(lvl4_entry, 0) }; + + // KERNEL MAP (EXE) + let tbl2_offset = PageMapLvl2::addr2index(c.kernel_virt % PageMapLvl2::SIZE_FOR_TABLE).unwrap(); + let tbl3_offset = PageMapLvl4::addr2index(c.kernel_virt % PageMapLvl3::SIZE_FOR_TABLE).unwrap(); + let tbl4_offset = PageMapLvl4::addr2index(c.kernel_virt % PageMapLvl4::SIZE_FOR_TABLE).unwrap(); + + let exe_pages = ((c.kernel_exe_phys.1 - 1) / PAGE_2M) + 1; + let stack_pages = ((c.kernel_stack_phys.1 - 1) / PAGE_2M) + 1; + + for mb2 in 0..exe_pages { + let phy_addr = c.kernel_exe_phys.0 + (mb2 * PAGE_2M) as u64; + + let lvl2_entry = PageEntry2M::new() + .set_present_flag(true) + .set_read_write_flag(true) + .set_phy_address(phy_addr); + + unsafe { (*TABLE_LVL2_KERN.get()).store(lvl2_entry, mb2 + tbl2_offset) }; + } + + // KERNEL MAP (STACK) + for mb2 in 0..stack_pages { + let phy_addr = c.kernel_stack_phys.0 + (mb2 * PAGE_2M) as u64; + + let lvl2_entry = PageEntry2M::new() + .set_present_flag(true) + .set_read_write_flag(true) + .set_phy_address(phy_addr); + + unsafe { (*TABLE_LVL2_KERN.get()).store(lvl2_entry, mb2 + exe_pages + 1 + tbl2_offset) }; + } + + let lvl3_kernel_entry = PageEntryLvl3::new() + .set_present_flag(true) + .set_read_write_flag(true) + .set_next_entry_phy_address(unsafe { (*TABLE_LVL2_KERN.get()).table_ptr() }); + + unsafe { (*TABLE_LVL3_KERN.get()).store(lvl3_kernel_entry, tbl3_offset) }; + + let lvl4_entry = PageEntryLvl4::new() + .set_present_flag(true) + .set_read_write_flag(true) + .set_next_entry_phy_address(unsafe { (*TABLE_LVL3_ID.get()).table_ptr() }); + + unsafe { (*TABLE_LVL4.get()).store(lvl4_entry, tbl4_offset) }; + + KernelVirtInfo { + exe_start_virt: c.kernel_virt, + exe_end_virt: c.kernel_virt + (exe_pages * PAGE_2M) as u64, + stack_start_virt: c.kernel_virt + ((exe_pages + 1) * PAGE_2M) as u64, + stack_end_virt: c.kernel_virt + ((exe_pages + stack_pages + 1) * PAGE_2M) as u64, + } +} + +pub unsafe fn load_page_tables() { + let phy_addr = unsafe { (*TABLE_LVL4.get()).table_ptr() }; + + unsafe { cr3::set_page_directory_base_register(phy_addr) }; +} diff --git a/crates/arch/Cargo.toml b/crates/arch/Cargo.toml index b4b171f4..dd2efd37 100644 --- a/crates/arch/Cargo.toml +++ b/crates/arch/Cargo.toml @@ -9,3 +9,4 @@ documentation.workspace = true [dependencies] bits = {workspace = true} hw = {workspace = true} +util = {workspace = true} diff --git a/crates/arch/src/paging64.rs b/crates/arch/src/paging64.rs index ec8180f8..10721234 100644 --- a/crates/arch/src/paging64.rs +++ b/crates/arch/src/paging64.rs @@ -362,10 +362,25 @@ impl Lvl1Entry for PageEntry4K { } impl PageMapLvl1 { + pub const SIZE_PER_INDEX: u64 = util::consts::PAGE_4K as u64; + pub const SIZE_FOR_TABLE: u64 = util::consts::PAGE_4K as u64 * 512; + pub const fn new() -> Self { Self([0; 512]) } + /// Convert an address to a table offset. + /// + /// If the given `addr` is larger than the page table, + /// it will return `None`. + pub const fn addr2index(addr: u64) -> Option { + if addr > Self::SIZE_FOR_TABLE { + None + } else { + Some(((addr.saturating_sub(1)) / Self::SIZE_PER_INDEX) as usize + 1) + } + } + pub fn store(&mut self, entry: impl Lvl1Entry, index: usize) { self.0[index] = entry.into_raw(); } @@ -380,10 +395,25 @@ impl PageMapLvl1 { } impl PageMapLvl2 { + pub const SIZE_PER_INDEX: u64 = util::consts::PAGE_2M as u64; + pub const SIZE_FOR_TABLE: u64 = util::consts::PAGE_2M as u64 * 512; + pub const fn new() -> Self { Self([0; 512]) } + /// Convert an address to a table offset. + /// + /// If the given `addr` is larger than the page table, + /// it will return `None`. + pub const fn addr2index(addr: u64) -> Option { + if addr > Self::SIZE_FOR_TABLE { + None + } else { + Some(((addr.saturating_sub(1)) / Self::SIZE_PER_INDEX) as usize + 1) + } + } + pub fn store(&mut self, entry: impl Lvl2Entry, index: usize) { self.0[index] = entry.into_raw(); } @@ -398,9 +428,24 @@ impl PageMapLvl2 { } impl PageMapLvl3 { + pub const SIZE_PER_INDEX: u64 = util::consts::PAGE_1G as u64 ; + pub const SIZE_FOR_TABLE: u64 = util::consts::PAGE_1G as u64 * 512 ; + pub const fn new() -> Self { Self([0; 512]) } + + /// Convert an address to a table offset. + /// + /// If the given `addr` is larger than the page table, + /// it will return `None`. + pub const fn addr2index(addr: u64) -> Option { + if addr > Self::SIZE_FOR_TABLE { + None + } else { + Some(((addr.saturating_sub(1)) / Self::SIZE_PER_INDEX) as usize + 1) + } + } pub fn store(&mut self, entry: impl Lvl3Entry, index: usize) { self.0[index] = entry.into_raw(); @@ -416,10 +461,25 @@ impl PageMapLvl3 { } impl PageMapLvl4 { + pub const SIZE_PER_INDEX: u64 = util::consts::PAGE_1G as u64 * 512; + pub const SIZE_FOR_TABLE: u64 = util::consts::PAGE_1G as u64 * 512 * 512; + pub const fn new() -> Self { Self([0; 512]) } + /// Convert an address to a table offset. + /// + /// If the given `addr` is larger than the page table, + /// it will return `None`. + pub const fn addr2index(addr: u64) -> Option { + if addr > Self::SIZE_FOR_TABLE { + None + } else { + Some(((addr.saturating_sub(1)) / Self::SIZE_PER_INDEX) as usize + 1) + } + } + pub fn store(&mut self, entry: impl Lvl4Entry, index: usize) { self.0[index] = entry.into_raw(); } @@ -434,6 +494,21 @@ impl PageMapLvl4 { } impl PageMapLvl5 { + pub const SIZE_PER_INDEX: u64 = util::consts::PAGE_1G as u64 * 512 * 512; + pub const SIZE_FOR_TABLE: u64 = util::consts::PAGE_1G as u64 * 512 * 512 * 512; + + /// Convert an address to a table offset. + /// + /// If the given `addr` is larger than the page table, + /// it will return `None`. + pub const fn addr2index(addr: u64) -> Option { + if addr > Self::SIZE_FOR_TABLE { + None + } else { + Some(((addr.saturating_sub(1)) / Self::SIZE_PER_INDEX) as usize + 1) + } + } + pub const fn new() -> Self { Self([0; 512]) } diff --git a/crates/util/src/lib.rs b/crates/util/src/lib.rs index 5a0bd046..d070cb77 100644 --- a/crates/util/src/lib.rs +++ b/crates/util/src/lib.rs @@ -41,6 +41,11 @@ pub const fn align_to(addr: u64, alignment: usize) -> u64 { } } +/// Check the alignment of `addr` and `alignment` +pub const fn is_align_to(addr: u64, alignment: usize) -> bool { + addr % (alignment as u64) == 0 +} + #[cfg(test)] mod test { use super::*; diff --git a/kernel/src/main.rs b/kernel/src/main.rs index ba1029df..3b69dbba 100644 --- a/kernel/src/main.rs +++ b/kernel/src/main.rs @@ -44,6 +44,6 @@ extern "C" fn _start(stage_to_stage: u64) { } #[debug_ready] -fn main(stage_to_stage: &Stage32toStage64) { +fn main(_stage_to_stage: &Stage32toStage64) { logln!("Kernel!"); }