Skip to content

Commit

Permalink
Revert "page allocator: mutable references are disallowed in statics"
Browse files Browse the repository at this point in the history
This reverts commit 58315b0.
The mentioned compiler bug
(rust-lang/rust#120450) is fixed now so we can continue to use
mutable statics which doesn't point to any memory.

Update rust-toolchain to the latest nightly.
  • Loading branch information
hieronymusma committed Feb 19, 2024
1 parent 61b5b07 commit a6ebf09
Show file tree
Hide file tree
Showing 4 changed files with 86 additions and 69 deletions.
2 changes: 1 addition & 1 deletion rust-toolchain
Original file line number Diff line number Diff line change
@@ -1,3 +1,3 @@
[toolchain]
channel = "nightly-2024-02-06"
channel = "nightly-2024-02-17"
targets = ["riscv64gc-unknown-none-elf"]
16 changes: 6 additions & 10 deletions src/kernel/src/memory/heap.rs
Original file line number Diff line number Diff line change
Expand Up @@ -269,28 +269,24 @@ mod test {
const HEAP_SIZE: usize = (HEAP_PAGES - 1) * PAGE_SIZE;

static mut PAGE_ALLOC_MEMORY: [u8; PAGE_SIZE * HEAP_PAGES] = [0; PAGE_SIZE * HEAP_PAGES];
static PAGE_ALLOC: Mutex<Option<MetadataPageAllocator>> = Mutex::new(None);
static PAGE_ALLOC: Mutex<MetadataPageAllocator> = Mutex::new(MetadataPageAllocator::new());

struct TestAllocator;
impl PageAllocator for TestAllocator {
fn alloc(number_of_pages_requested: usize) -> Option<Range<NonNull<Page>>> {
PAGE_ALLOC
.lock()
.as_mut()
.unwrap()
.alloc(number_of_pages_requested)
PAGE_ALLOC.lock().alloc(number_of_pages_requested)
}

fn dealloc(page: NonNull<Page>) {
PAGE_ALLOC.lock().as_mut().unwrap().dealloc(page)
PAGE_ALLOC.lock().dealloc(page)
}
}

fn init_allocator() {
unsafe {
*PAGE_ALLOC.lock() = Some(MetadataPageAllocator::new(&mut *addr_of_mut!(
PAGE_ALLOC_MEMORY
)));
PAGE_ALLOC
.lock()
.init(&mut *addr_of_mut!(PAGE_ALLOC_MEMORY));
}
}

Expand Down
20 changes: 4 additions & 16 deletions src/kernel/src/memory/mod.rs
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
use core::{
cell::OnceCell,
mem::{transmute, MaybeUninit},
ops::Range,
ptr::NonNull,
Expand All @@ -20,25 +19,17 @@ pub mod page_tables;

pub use page::PAGE_SIZE;

static PAGE_ALLOCATOR: Mutex<OnceCell<MetadataPageAllocator>> = Mutex::new(OnceCell::new());
static PAGE_ALLOCATOR: Mutex<MetadataPageAllocator> = Mutex::new(MetadataPageAllocator::new());

pub struct StaticPageAllocator;

impl PageAllocator for StaticPageAllocator {
fn alloc(number_of_pages_requested: usize) -> Option<Range<NonNull<Page>>> {
PAGE_ALLOCATOR
.lock()
.get_mut()
.expect("PAGE_ALLOCATOR has to be initialized")
.alloc(number_of_pages_requested)
PAGE_ALLOCATOR.lock().alloc(number_of_pages_requested)
}

fn dealloc(page: NonNull<Page>) {
PAGE_ALLOCATOR
.lock()
.get_mut()
.expect("PAGE_ALLOCATOR has to be initialized")
.dealloc(page)
PAGE_ALLOCATOR.lock().dealloc(page)
}
}

Expand All @@ -48,8 +39,5 @@ pub fn init_page_allocator(heap_start: usize, heap_size: usize) {
elem.write(0);
}
let initialized_memory = unsafe { transmute::<&mut [MaybeUninit<u8>], &mut [u8]>(memory) };
PAGE_ALLOCATOR
.lock()
.set(MetadataPageAllocator::new(initialized_memory))
.expect("PAGE_ALLOCATOR has to be uninitialized");
PAGE_ALLOCATOR.lock().init(initialized_memory);
}
117 changes: 75 additions & 42 deletions src/kernel/src/memory/page_allocator.rs
Original file line number Diff line number Diff line change
@@ -1,5 +1,9 @@
use crate::{debug, memory::PAGE_SIZE};
use core::{fmt::Debug, ops::Range, ptr::NonNull};
use core::{
fmt::Debug,
ops::Range,
ptr::{null_mut, NonNull},
};

use super::page::Page;

Expand All @@ -26,7 +30,14 @@ impl<'a> Debug for MetadataPageAllocator<'a> {
}

impl<'a> MetadataPageAllocator<'a> {
pub(super) fn new(memory: &'a mut [u8]) -> Self {
pub(super) const fn new() -> Self {
Self {
metadata: &mut [],
pages: null_mut()..null_mut(),
}
}

pub(super) fn init(&mut self, memory: &'a mut [u8]) {
let heap_size = memory.len();
let number_of_heap_pages = heap_size / (PAGE_SIZE + 1); // We need one byte per page as metadata

Expand All @@ -46,14 +57,14 @@ impl<'a> MetadataPageAllocator<'a> {

metadata.iter_mut().for_each(|x| *x = PageStatus::Free);

let pages = heap.as_mut_ptr_range();
self.metadata = metadata;

debug!("Page allocator initalized");
debug!("Metadata start:\t\t{:p}", metadata);
debug!("Heap start:\t\t{:p}", pages.start);
debug!("Number of pages:\t{}\n", metadata.len());
self.pages = heap.as_mut_ptr_range();

Self { metadata, pages }
debug!("Page allocator initalized");
debug!("Metadata start:\t\t{:p}", self.metadata);
debug!("Heap start:\t\t{:p}", self.pages.start);
debug!("Number of pages:\t{}\n", self.total_heap_pages());
}

fn total_heap_pages(&self) -> usize {
Expand Down Expand Up @@ -123,31 +134,53 @@ pub trait PageAllocator {

#[cfg(test)]
mod tests {
use core::ptr::addr_of_mut;
use core::{
ops::Range,
ptr::{addr_of_mut, NonNull},
};

use common::mutex::Mutex;

use crate::memory::page_allocator::PageStatus;

use super::{MetadataPageAllocator, PAGE_SIZE};
use super::{MetadataPageAllocator, Page, PAGE_SIZE};

static mut PAGE_ALLOC_MEMORY: [u8; PAGE_SIZE * 8] = [0; PAGE_SIZE * 8];
static PAGE_ALLOC: Mutex<MetadataPageAllocator> = Mutex::new(MetadataPageAllocator::new());

fn create_allocator() -> MetadataPageAllocator<'static> {
unsafe { MetadataPageAllocator::new(&mut *addr_of_mut!(PAGE_ALLOC_MEMORY)) }
fn init_allocator() {
unsafe {
PAGE_ALLOC
.lock()
.init(&mut *addr_of_mut!(PAGE_ALLOC_MEMORY));
}
}

fn alloc(number_of_pages: usize) -> Option<Range<NonNull<Page>>> {
PAGE_ALLOC.lock().alloc(number_of_pages)
}

fn dealloc(pages: Range<NonNull<Page>>) {
PAGE_ALLOC.lock().dealloc(pages.start)
}

#[test_case]
fn clean_start() {
let allocator = create_allocator();
assert!(allocator.metadata.iter().all(|s| *s == PageStatus::Free));
init_allocator();
assert!(PAGE_ALLOC
.lock()
.metadata
.iter()
.all(|s| *s == PageStatus::Free));
}

#[test_case]
fn exhaustion_allocation() {
let mut allocator = create_allocator();
let number_of_pages = allocator.total_heap_pages();
let _pages = allocator.alloc(number_of_pages).unwrap();
assert!(allocator.alloc(1).is_none());
let allocator = allocator;
init_allocator();
let number_of_pages = PAGE_ALLOC.lock().total_heap_pages();
let _pages = alloc(number_of_pages).unwrap();
assert!(alloc(1).is_none());
let allocator = PAGE_ALLOC.lock();
let (last, all_metadata_except_last) = allocator.metadata.split_last().unwrap();
assert!(all_metadata_except_last
.iter()
Expand All @@ -157,41 +190,41 @@ mod tests {

#[test_case]
fn beyond_capacity() {
let mut allocator = create_allocator();
let number_of_pages = allocator.total_heap_pages();
let pages = allocator.alloc(number_of_pages + 1);
init_allocator();
let number_of_pages = PAGE_ALLOC.lock().total_heap_pages();
let pages = alloc(number_of_pages + 1);
assert!(pages.is_none());
}

#[test_case]
fn all_single_allocations() {
let mut allocator = create_allocator();
let number_of_pages = allocator.total_heap_pages();
init_allocator();
let number_of_pages = PAGE_ALLOC.lock().total_heap_pages();
for _ in 0..number_of_pages {
assert!(allocator.alloc(1).is_some());
assert!(alloc(1).is_some());
}
assert!(allocator.alloc(1).is_none());
assert!(alloc(1).is_none());
}

#[test_case]
fn metadata_integrity() {
let mut allocator = create_allocator();
let page1 = allocator.alloc(1).unwrap();
assert_eq!(allocator.metadata[0], PageStatus::Last);
assert!(allocator.metadata[1..]
init_allocator();
let page1 = alloc(1).unwrap();
assert_eq!(PAGE_ALLOC.lock().metadata[0], PageStatus::Last);
assert!(PAGE_ALLOC.lock().metadata[1..]
.iter()
.all(|s| *s == PageStatus::Free));
let page2 = allocator.alloc(2).unwrap();
let page2 = alloc(2).unwrap();
assert_eq!(
allocator.metadata[..3],
PAGE_ALLOC.lock().metadata[..3],
[PageStatus::Last, PageStatus::Used, PageStatus::Last]
);
assert!(allocator.metadata[3..]
assert!(PAGE_ALLOC.lock().metadata[3..]
.iter()
.all(|s| *s == PageStatus::Free));
let page3 = allocator.alloc(3).unwrap();
let page3 = alloc(3).unwrap();
assert_eq!(
allocator.metadata[..6],
PAGE_ALLOC.lock().metadata[..6],
[
PageStatus::Last,
PageStatus::Used,
Expand All @@ -201,12 +234,12 @@ mod tests {
PageStatus::Last
]
);
assert!(allocator.metadata[6..]
assert!(PAGE_ALLOC.lock().metadata[6..]
.iter()
.all(|s| *s == PageStatus::Free),);
allocator.dealloc(page2.start);
dealloc(page2);
assert_eq!(
allocator.metadata[..6],
PAGE_ALLOC.lock().metadata[..6],
[
PageStatus::Last,
PageStatus::Free,
Expand All @@ -216,9 +249,9 @@ mod tests {
PageStatus::Last
]
);
allocator.dealloc(page1.start);
dealloc(page1);
assert_eq!(
allocator.metadata[..6],
PAGE_ALLOC.lock().metadata[..6],
[
PageStatus::Free,
PageStatus::Free,
Expand All @@ -228,9 +261,9 @@ mod tests {
PageStatus::Last
]
);
allocator.dealloc(page3.start);
dealloc(page3);
assert_eq!(
allocator.metadata[..6],
PAGE_ALLOC.lock().metadata[..6],
[
PageStatus::Free,
PageStatus::Free,
Expand Down

0 comments on commit a6ebf09

Please sign in to comment.