Skip to content

Commit

Permalink
Bootloader: Building kernel page tables and loading elf
Browse files Browse the repository at this point in the history
  • Loading branch information
corigan01 committed Jan 6, 2025
1 parent 3b8756d commit b666b3b
Show file tree
Hide file tree
Showing 7 changed files with 284 additions and 11 deletions.
1 change: 1 addition & 0 deletions bootloader/stage-64bit/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -13,3 +13,4 @@ lldebug = {workspace = true}
elf = {workspace = true}
mem = {workspace = true}
util = {workspace = true}
arch = {workspace = true}
48 changes: 38 additions & 10 deletions bootloader/stage-64bit/src/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ use elf::{
Elf,
tables::{ArchKind, SegmentKind},
};
use lldebug::{debug_ready, logln, make_debug};
use lldebug::{debug_ready, hexdump::HexPrint, log, logln, make_debug};
use mem::phys::{PhysMemoryEntry, PhysMemoryKind, PhysMemoryMap};
use serial::{Serial, baud::SerialBaud};
use util::{
Expand Down Expand Up @@ -71,25 +71,44 @@ fn main(stage_to_stage: &Stage32toStage64) {
.exe_size()
.expect("Unable to determine the size of the Kernel's exe!");

logln!("Kernel Size: {}", HumanBytes::from(kernel_exe_len));
build_memory_map(stage_to_stage, kernel_exe_len);
logln!("Kernel Size : {}", HumanBytes::from(kernel_exe_len));
let page_info = build_memory_map(stage_to_stage, kernel_exe_len);
let mut virt_info = paging::build_page_tables(page_info);

let elf_header = match elf.header() {
log!("Loading new page tables...");
unsafe { paging::load_page_tables() };
logln!("OK");

let _elf_header = match elf.header() {
Ok(elf::tables::ElfHeader::Header64(h)) if h.arch() == ArchKind::X64 && h.is_le() => h,
_ => panic!("Kernel's elf is not valid!"),
};

log!("Loading ELF (");
elf.load_into(|h| {
if h.segment_kind() != SegmentKind::Load {
return None;
}
log!(".");

let vaddr = h.expected_vaddr();
let len = h.in_mem_size() as u64;

assert!(
vaddr >= virt_info.exe_start_virt && (vaddr + len) <= virt_info.exe_end_virt,
"Cannot fit section into mapped area"
);

None
Some(unsafe { core::slice::from_raw_parts_mut(vaddr as *mut u8, len as usize) })
})
.unwrap();
logln!(") -- OK");

let kernel_exe_slice = virt_info.exe_slice();
logln!("{}", (&kernel_exe_slice[..1024]).hexdump());
}

fn build_memory_map(s2s: &Stage32toStage64, kernel_exe_len: usize) {
fn build_memory_map(s2s: &Stage32toStage64, kernel_exe_len: usize) -> paging::PageTableConfig {
unsafe {
let mm = &mut *MEMORY_MAP.get();

Expand All @@ -99,7 +118,7 @@ fn build_memory_map(s2s: &Stage32toStage64, kernel_exe_len: usize) {
}

logln!(
"Free Memory : {} Mib",
"Free Memory : {} Mib",
mm.bytes_of(PhysMemoryKind::Free) / MIB
);
logln!(
Expand Down Expand Up @@ -134,8 +153,8 @@ fn build_memory_map(s2s: &Stage32toStage64, kernel_exe_len: usize) {
let kernels_pages = mm
.find_continuous_of(
PhysMemoryKind::Free,
align_to(kernel_exe_len as u64, PAGE_4K) as usize,
PAGE_4K,
align_to(kernel_exe_len as u64, PAGE_2M) as usize,
PAGE_2M,
1 * MIB as u64,
)
.map(|p| PhysMemoryEntry {
Expand All @@ -146,7 +165,7 @@ fn build_memory_map(s2s: &Stage32toStage64, kernel_exe_len: usize) {
mm.add_region(kernels_pages).unwrap();

let kernels_stack_pages = mm
.find_continuous_of(PhysMemoryKind::Free, PAGE_2M, PAGE_4K, 1 * MIB as u64)
.find_continuous_of(PhysMemoryKind::Free, PAGE_2M, PAGE_2M, 1 * MIB as u64)
.map(|p| PhysMemoryEntry {
kind: PhysMemoryKind::Kernel,
..p
Expand All @@ -155,5 +174,14 @@ fn build_memory_map(s2s: &Stage32toStage64, kernel_exe_len: usize) {
mm.add_region(kernels_stack_pages).unwrap();

logln!("{}", mm);

paging::PageTableConfig {
kernel_exe_phys: (kernels_pages.start, kernels_pages.len() as usize),
kernel_stack_phys: (
kernels_stack_pages.start,
kernels_stack_pages.len() as usize,
),
kernel_virt: 0x100000000000,
}
}
}
163 changes: 163 additions & 0 deletions bootloader/stage-64bit/src/paging.rs
Original file line number Diff line number Diff line change
Expand Up @@ -22,3 +22,166 @@ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FO
DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT
OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/

use core::cell::SyncUnsafeCell;

use arch::{
paging64::{PageEntry2M, PageEntryLvl3, PageEntryLvl4, PageMapLvl2, PageMapLvl3, PageMapLvl4},
registers::cr3,
};
use util::{
consts::{GIB, MIB, PAGE_2M},
is_align_to,
};

/// Amount of Gib to identity map
const IDMAP_GIG_AMOUNT: usize = 1;

// Main Table
static TABLE_LVL4: SyncUnsafeCell<PageMapLvl4> = SyncUnsafeCell::new(PageMapLvl4::new());

// Tables for lower memory id-mapping
static TABLE_LVL3_ID: SyncUnsafeCell<PageMapLvl3> = SyncUnsafeCell::new(PageMapLvl3::new());
static TABLE_LVL2_ID: SyncUnsafeCell<[PageMapLvl2; IDMAP_GIG_AMOUNT]> =
SyncUnsafeCell::new([PageMapLvl2::new(); IDMAP_GIG_AMOUNT]);

// Tables for higher-half kernel
static TABLE_LVL3_KERN: SyncUnsafeCell<PageMapLvl3> = SyncUnsafeCell::new(PageMapLvl3::new());
static TABLE_LVL2_KERN: SyncUnsafeCell<PageMapLvl2> = SyncUnsafeCell::new(PageMapLvl2::new());

#[derive(Debug)]
pub struct PageTableConfig {
pub kernel_exe_phys: (u64, usize),
pub kernel_stack_phys: (u64, usize),
pub kernel_virt: u64,
}

#[derive(Debug)]
pub struct KernelVirtInfo {
pub exe_start_virt: u64,
pub exe_end_virt: u64,
pub stack_start_virt: u64,

Check warning on line 63 in bootloader/stage-64bit/src/paging.rs

View workflow job for this annotation

GitHub Actions / Build OS

fields `stack_start_virt` and `stack_end_virt` are never read
pub stack_end_virt: u64,
}

impl KernelVirtInfo {
pub fn exe_slice(&mut self) -> &'static mut [u8] {
unsafe {
core::slice::from_raw_parts_mut(
self.exe_start_virt as *mut u8,
(self.exe_end_virt - self.exe_start_virt) as usize,
)
}
}

pub fn stack_slice(&mut self) -> &'static mut [u8] {

Check warning on line 77 in bootloader/stage-64bit/src/paging.rs

View workflow job for this annotation

GitHub Actions / Build OS

method `stack_slice` is never used
unsafe {
core::slice::from_raw_parts_mut(
self.stack_start_virt as *mut u8,
(self.stack_end_virt - self.stack_start_virt) as usize,
)
}
}
}

pub fn build_page_tables(c: PageTableConfig) -> KernelVirtInfo {
assert!(
c.kernel_exe_phys.1 <= GIB,
"TODO: Currently do not support kernel's size above 1Gib"
);
assert!(
c.kernel_stack_phys.1 <= GIB,
"TODO: Currently do not support kernel's stack size above 1Gib"
);
assert!(is_align_to(c.kernel_exe_phys.0, PAGE_2M));
assert!(is_align_to(c.kernel_stack_phys.0, PAGE_2M));
assert!(is_align_to(c.kernel_virt, PAGE_2M));

// ID MAP
for gig in 0..IDMAP_GIG_AMOUNT {
let table_ptr = unsafe { &raw mut (*TABLE_LVL2_ID.get())[gig] };

for mb2 in 0..512 {
let phy_addr = (mb2 as u64 * 2 * (MIB as u64)) + (gig as u64 * (GIB as u64));

let lvl2_entry = PageEntry2M::new()
.set_present_flag(true)
.set_read_write_flag(true)
.set_phy_address(phy_addr);

unsafe { (*table_ptr).store(lvl2_entry, mb2) };
}

let lvl3_entry = PageEntryLvl3::new()
.set_present_flag(true)
.set_read_write_flag(true)
.set_next_entry_phy_address(unsafe { (*table_ptr).table_ptr() });

unsafe { (*TABLE_LVL3_ID.get()).store(lvl3_entry, gig) };
}

let lvl4_entry = PageEntryLvl4::new()
.set_present_flag(true)
.set_read_write_flag(true)
.set_next_entry_phy_address(unsafe { (*TABLE_LVL3_ID.get()).table_ptr() });

unsafe { (*TABLE_LVL4.get()).store(lvl4_entry, 0) };

// KERNEL MAP (EXE)
let tbl2_offset = PageMapLvl2::addr2index(c.kernel_virt % PageMapLvl2::SIZE_FOR_TABLE).unwrap();
let tbl3_offset = PageMapLvl4::addr2index(c.kernel_virt % PageMapLvl3::SIZE_FOR_TABLE).unwrap();
let tbl4_offset = PageMapLvl4::addr2index(c.kernel_virt % PageMapLvl4::SIZE_FOR_TABLE).unwrap();

let exe_pages = ((c.kernel_exe_phys.1 - 1) / PAGE_2M) + 1;
let stack_pages = ((c.kernel_stack_phys.1 - 1) / PAGE_2M) + 1;

for mb2 in 0..exe_pages {
let phy_addr = c.kernel_exe_phys.0 + (mb2 * PAGE_2M) as u64;

let lvl2_entry = PageEntry2M::new()
.set_present_flag(true)
.set_read_write_flag(true)
.set_phy_address(phy_addr);

unsafe { (*TABLE_LVL2_KERN.get()).store(lvl2_entry, mb2 + tbl2_offset) };
}

// KERNEL MAP (STACK)
for mb2 in 0..stack_pages {
let phy_addr = c.kernel_stack_phys.0 + (mb2 * PAGE_2M) as u64;

let lvl2_entry = PageEntry2M::new()
.set_present_flag(true)
.set_read_write_flag(true)
.set_phy_address(phy_addr);

unsafe { (*TABLE_LVL2_KERN.get()).store(lvl2_entry, mb2 + exe_pages + 1 + tbl2_offset) };
}

let lvl3_kernel_entry = PageEntryLvl3::new()
.set_present_flag(true)
.set_read_write_flag(true)
.set_next_entry_phy_address(unsafe { (*TABLE_LVL2_KERN.get()).table_ptr() });

unsafe { (*TABLE_LVL3_KERN.get()).store(lvl3_kernel_entry, tbl3_offset) };

let lvl4_entry = PageEntryLvl4::new()
.set_present_flag(true)
.set_read_write_flag(true)
.set_next_entry_phy_address(unsafe { (*TABLE_LVL3_ID.get()).table_ptr() });

unsafe { (*TABLE_LVL4.get()).store(lvl4_entry, tbl4_offset) };

KernelVirtInfo {
exe_start_virt: c.kernel_virt,
exe_end_virt: c.kernel_virt + (exe_pages * PAGE_2M) as u64,
stack_start_virt: c.kernel_virt + ((exe_pages + 1) * PAGE_2M) as u64,
stack_end_virt: c.kernel_virt + ((exe_pages + stack_pages + 1) * PAGE_2M) as u64,
}
}

pub unsafe fn load_page_tables() {
let phy_addr = unsafe { (*TABLE_LVL4.get()).table_ptr() };

unsafe { cr3::set_page_directory_base_register(phy_addr) };
}
1 change: 1 addition & 0 deletions crates/arch/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -9,3 +9,4 @@ documentation.workspace = true
[dependencies]
bits = {workspace = true}
hw = {workspace = true}
util = {workspace = true}
Loading

0 comments on commit b666b3b

Please sign in to comment.