diff --git a/.cargo/config.toml b/.cargo/config.toml index f62e735..6d40238 100644 --- a/.cargo/config.toml +++ b/.cargo/config.toml @@ -1,5 +1,5 @@ [unstable] -build-std = ["core"] +build-std = ["core", "compiler_builtins", "alloc"] build-std-features = ["compiler-builtins-mem"] [build] diff --git a/Cargo.toml b/Cargo.toml index b8b5dba..706f146 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -4,6 +4,7 @@ name = "lateral" version = "0.1.0" [dependencies] +linked_list_allocator = "0.9.0" pc-keyboard = "0.5.0" pic8259 = "0.10.1" spin = "0.9.1" diff --git a/src/alloc/block.rs b/src/alloc/block.rs new file mode 100644 index 0000000..e256051 --- /dev/null +++ b/src/alloc/block.rs @@ -0,0 +1,96 @@ +use core::alloc::GlobalAlloc; +use core::alloc::Layout; +use core::mem; +use core::ptr; +use core::ptr::NonNull; + +use super::lock::Locked; + +const BLOCK_SIZES: &[usize] = &[8, 16, 32, 64, 128, 256, 512, 1024, 2048]; + +struct ListNode { + next: Option<&'static mut ListNode>, +} + +pub struct FixedSizeBlockAllocator { + list_heads: [Option<&'static mut ListNode>; BLOCK_SIZES.len()], + fallback_allocator: linked_list_allocator::Heap, +} + +impl FixedSizeBlockAllocator { + /// Creates an empty FixedSizeBlockAllocator. + pub const fn new() -> Self { + const EMPTY: Option<&'static mut ListNode> = None; + FixedSizeBlockAllocator { + list_heads: [EMPTY; BLOCK_SIZES.len()], + fallback_allocator: linked_list_allocator::Heap::empty(), + } + } + + /// Initialize the allocator with the given heap bounds. + /// + /// This function is unsafe because the caller must guarantee that the given + /// heap bounds are valid and that the heap is unused. This method must be + /// called only once. + pub unsafe fn init(&mut self, heap_start: usize, heap_size: usize) { + self.fallback_allocator.init(heap_start, heap_size); + } + + fn fallback_alloc(&mut self, layout: Layout) -> *mut u8 { + match self.fallback_allocator.allocate_first_fit(layout) { + Ok(ptr) => ptr.as_ptr(), + Err(_) => ptr::null_mut(), + } + } +} + +fn list_index(layout: &Layout) -> Option { + let required_block_size = layout.size().max(layout.align()); + BLOCK_SIZES.iter().position(|&s| s >= required_block_size) +} + +unsafe impl GlobalAlloc for Locked { + unsafe fn alloc(&self, layout: Layout) -> *mut u8 { + let mut allocator = self.lock(); + match list_index(&layout) { + Some(index) => { + match allocator.list_heads[index].take() { + Some(node) => { + allocator.list_heads[index] = node.next.take(); + node as *mut ListNode as *mut u8 + } + None => { + // no block exists in list => allocate new block + let block_size = BLOCK_SIZES[index]; + // only works if all block sizes are a power of 2 + let block_align = block_size; + let layout = Layout::from_size_align(block_size, block_align).unwrap(); + allocator.fallback_alloc(layout) + } + } + } + None => allocator.fallback_alloc(layout), + } + } + + unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) { + let mut allocator = self.lock(); + match list_index(&layout) { + Some(index) => { + let new_node = ListNode { + next: allocator.list_heads[index].take(), + }; + // verify that block has size and alignment required for storing node + assert!(mem::size_of::() <= BLOCK_SIZES[index]); + assert!(mem::align_of::() <= BLOCK_SIZES[index]); + let new_node_ptr = ptr as *mut ListNode; + new_node_ptr.write(new_node); + allocator.list_heads[index] = Some(&mut *new_node_ptr); + } + None => { + let ptr = NonNull::new(ptr).unwrap(); + allocator.fallback_allocator.deallocate(ptr, layout); + } + } + } +} diff --git a/src/alloc/global.rs b/src/alloc/global.rs new file mode 100644 index 0000000..0d2cc6d --- /dev/null +++ b/src/alloc/global.rs @@ -0,0 +1,23 @@ +extern crate alloc as rust_alloc; + +use core::ptr::null_mut; +use rust_alloc::alloc::{GlobalAlloc, Layout}; + +use super::block::FixedSizeBlockAllocator; +use super::lock::Locked; + +#[global_allocator] +pub(super) static ALLOCATOR: Locked = + Locked::new(FixedSizeBlockAllocator::new()); + +pub struct Dummy; + +unsafe impl GlobalAlloc for Dummy { + unsafe fn alloc(&self, _layout: Layout) -> *mut u8 { + null_mut() + } + + unsafe fn dealloc(&self, _ptr: *mut u8, _layout: Layout) { + panic!("dealloc should be never called") + } +} diff --git a/src/alloc/heap.rs b/src/alloc/heap.rs new file mode 100644 index 0000000..d9a1a8d --- /dev/null +++ b/src/alloc/heap.rs @@ -0,0 +1,38 @@ +use crate::alloc::global::ALLOCATOR; + +const KIB: usize = 0x400; +pub const HEAP_START: usize = 0x_4444_4444_0000; +pub const HEAP_SIZE: usize = 100 * KIB; + +use x86_64::{ + structures::paging::{ + mapper::MapToError, FrameAllocator, Mapper, Page, PageTableFlags, Size4KiB, + }, + VirtAddr, +}; +pub fn init_heap( + mapper: &mut impl Mapper, + frame_allocator: &mut impl FrameAllocator, +) -> Result<(), MapToError> { + let page_range = { + let heap_start = VirtAddr::new(HEAP_START as u64); + let heap_end = heap_start + HEAP_SIZE - 1u64; + let heap_start_page = Page::containing_address(heap_start); + let heap_end_page = Page::containing_address(heap_end); + Page::range_inclusive(heap_start_page, heap_end_page) + }; + + for page in page_range { + let frame = frame_allocator + .allocate_frame() + .ok_or(MapToError::FrameAllocationFailed)?; + let flags = PageTableFlags::PRESENT | PageTableFlags::WRITABLE; + unsafe { mapper.map_to(page, frame, flags, frame_allocator)?.flush() }; + } + + unsafe { + ALLOCATOR.lock().init(HEAP_START, HEAP_SIZE); + } + + Ok(()) +} diff --git a/src/alloc/lock.rs b/src/alloc/lock.rs new file mode 100644 index 0000000..b3499d8 --- /dev/null +++ b/src/alloc/lock.rs @@ -0,0 +1,15 @@ +pub struct Locked { + inner: spin::Mutex, +} + +impl Locked { + pub const fn new(inner: A) -> Self { + Locked { + inner: spin::Mutex::new(inner), + } + } + + pub fn lock(&self) -> spin::MutexGuard { + self.inner.lock() + } +} diff --git a/src/alloc/mod.rs b/src/alloc/mod.rs new file mode 100644 index 0000000..1134b61 --- /dev/null +++ b/src/alloc/mod.rs @@ -0,0 +1,4 @@ +pub mod block; +pub mod global; +pub mod heap; +pub mod lock; diff --git a/src/lib.rs b/src/lib.rs index 9573b99..de4edbd 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -5,7 +5,12 @@ #![feature(custom_test_frameworks)] #![test_runner(crate::test::runner)] #![reexport_test_harness_main = "test_harness"] +#![feature(alloc_error_handler)] +#![feature(const_mut_refs)] +extern crate alloc as rust_alloc; + +pub mod alloc; pub mod cpu; pub mod io; pub mod mem; @@ -40,3 +45,8 @@ pub fn init() { unsafe { cpu::interrupt::PICS.lock().initialize() }; x86_64::instructions::interrupts::enable(); } + +#[alloc_error_handler] +fn alloc_error_handler(layout: rust_alloc::alloc::Layout) -> ! { + panic!("allocation error: {:?}", layout) +} diff --git a/src/main.rs b/src/main.rs index a90029b..a10e2e0 100644 --- a/src/main.rs +++ b/src/main.rs @@ -11,9 +11,11 @@ bootloader::entry_point!(tests::main); #[cfg(not(test))] mod kernel { + extern crate alloc as rust_alloc; use lateral::mem::frame::BootInfoFrameAllocator; use lateral::mem::paging; use lateral::println; + use rust_alloc::boxed::Box; use x86_64::structures::paging::Page; use x86_64::VirtAddr; @@ -32,6 +34,12 @@ mod kernel { let page_ptr: *mut u64 = page.start_address().as_mut_ptr(); unsafe { page_ptr.offset(400).write_volatile(0x_f021_f077_f065_f04e) }; + lateral::alloc::heap::init_heap(&mut mapper, &mut frame_allocator) + .expect("heap initialization failed"); + + let x = Box::new(100); + println!("{}", x); + println!("Hello World!"); lateral::halt_loop(); }