From f7f1f088bb80c395f8002ae764abd4c87264d46f Mon Sep 17 00:00:00 2001 From: Joshua Liebow-Feeser Date: Fri, 16 Feb 2018 13:24:16 -0800 Subject: [PATCH] object-alloc: Switch from *mut to NonNull - Switch from *mut u8 and *mut T to NonNull and NonNull in trait methods - Upgrade slab-alloc, mmap-alloc, and object-alloc-test accordingly - Closes #140 --- mmap-alloc/CHANGELOG.md | 4 + mmap-alloc/Cargo.toml | 2 +- mmap-alloc/src/lib.rs | 14 +-- object-alloc-test/CHANGELOG.md | 4 + object-alloc-test/Cargo.toml | 8 +- object-alloc-test/src/corruption.rs | 159 ++++++++++++++---------- object-alloc-test/src/leaky_alloc.rs | 44 +++---- object-alloc-test/src/lib.rs | 1 + object-alloc/CHANGELOG.md | 3 + object-alloc/src/lib.rs | 18 +-- slab-alloc/CHANGELOG.md | 8 ++ slab-alloc/Cargo.toml | 4 +- slab-alloc/src/aligned.rs | 16 +-- slab-alloc/src/backing.rs | 9 +- slab-alloc/src/init.rs | 61 +++++----- slab-alloc/src/large.rs | 69 ++++++----- slab-alloc/src/lib.rs | 107 ++++++++-------- slab-alloc/src/ptr_map.rs | 56 +++++---- slab-alloc/src/stack.rs | 111 ++++++++++------- slab-alloc/src/tests.rs | 70 ++++++----- slab-alloc/src/util.rs | 175 ++++++++++++++------------- slab-alloc/travis.sh | 2 + 22 files changed, 524 insertions(+), 421 deletions(-) diff --git a/mmap-alloc/CHANGELOG.md b/mmap-alloc/CHANGELOG.md index a04b671..8559599 100644 --- a/mmap-alloc/CHANGELOG.md +++ b/mmap-alloc/CHANGELOG.md @@ -13,6 +13,10 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ## [Unreleased] +### Changed +- Upgraded to new `UntypedObjectAlloc` trait that uses `NonNull` instead + of `*mut u8` + ## 0.2.0 ### Added diff --git a/mmap-alloc/Cargo.toml b/mmap-alloc/Cargo.toml index fc2f01a..23425d8 100644 --- a/mmap-alloc/Cargo.toml +++ b/mmap-alloc/Cargo.toml @@ -26,6 +26,6 @@ errno = "0.2" kernel32-sys = "0.2" # use no_std libc libc = { version = "0.2", default-features = false } -object-alloc = "0.1.0" +object-alloc = { path = "../object-alloc" } sysconf = "0.3.1" winapi = "0.2" diff --git a/mmap-alloc/src/lib.rs b/mmap-alloc/src/lib.rs index 2bb86dc..9e85553 100644 --- a/mmap-alloc/src/lib.rs +++ b/mmap-alloc/src/lib.rs @@ -39,7 +39,7 @@ extern crate winapi; use self::alloc::allocator::{Alloc, AllocErr, CannotReallocInPlace, Excess, Layout}; use self::object_alloc::{Exhausted, UntypedObjectAlloc}; -use core::ptr; +use core::ptr::{self, NonNull}; #[cfg(any(target_os = "linux", target_os = "macos"))] use errno::errno; @@ -533,17 +533,17 @@ unsafe impl<'a> UntypedObjectAlloc for &'a MapAlloc { } } - unsafe fn alloc(&mut self) -> Result<*mut u8, Exhausted> { + unsafe fn alloc(&mut self) -> Result, Exhausted> { // TODO: There's probably a method that does this more cleanly. match self.alloc_excess(self.layout()) { - Ok(Excess(ptr, _)) => Ok(ptr), + Ok(Excess(ptr, _)) => Ok(NonNull::new_unchecked(ptr)), Err(AllocErr::Exhausted { .. }) => Err(Exhausted), Err(AllocErr::Unsupported { .. }) => unreachable!(), } } - unsafe fn dealloc(&mut self, ptr: *mut u8) { - unmap(ptr, self.obj_size); + unsafe fn dealloc(&mut self, ptr: NonNull) { + unmap(ptr.as_ptr(), self.obj_size); } } @@ -601,11 +601,11 @@ unsafe impl UntypedObjectAlloc for MapAlloc { <&MapAlloc as UntypedObjectAlloc>::layout(&(&*self)) } - unsafe fn alloc(&mut self) -> Result<*mut u8, Exhausted> { + unsafe fn alloc(&mut self) -> Result, Exhausted> { <&MapAlloc as UntypedObjectAlloc>::alloc(&mut (&*self)) } - unsafe fn dealloc(&mut self, ptr: *mut u8) { + unsafe fn dealloc(&mut self, ptr: NonNull) { <&MapAlloc as UntypedObjectAlloc>::dealloc(&mut (&*self), ptr); } } diff --git a/object-alloc-test/CHANGELOG.md b/object-alloc-test/CHANGELOG.md index 22d4873..2a5b9fd 100644 --- a/object-alloc-test/CHANGELOG.md +++ b/object-alloc-test/CHANGELOG.md @@ -15,3 +15,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Added - Added this changelog + +### Changed +- Upgraded to new `ObjectAlloc` trait that uses `NonNull` instead + of `*mut u8` \ No newline at end of file diff --git a/object-alloc-test/Cargo.toml b/object-alloc-test/Cargo.toml index 7d0bc74..cde2adb 100644 --- a/object-alloc-test/Cargo.toml +++ b/object-alloc-test/Cargo.toml @@ -23,10 +23,14 @@ exclude = ["appveyor.sh", "travis.sh"] [dependencies] interpolate_idents = "0.2.2" +kernel32-sys = "0.2" lazy_static = "1.0.0" libc = "0.2" -object-alloc = "0.1.0" +object-alloc = { path = "../object-alloc" } quickcheck = "0.4" rand = "0.3" twox-hash = "1.1" -winapi = "0.2" + +[dependencies.winapi] +version = "0.3" +features = ["basetsd", "psapi"] \ No newline at end of file diff --git a/object-alloc-test/src/corruption.rs b/object-alloc-test/src/corruption.rs index e45a27b..e98a449 100644 --- a/object-alloc-test/src/corruption.rs +++ b/object-alloc-test/src/corruption.rs @@ -9,15 +9,15 @@ //! //! See `TestBuilder` for documentation on running tests. -extern crate core; extern crate libc; extern crate object_alloc; extern crate rand; extern crate test; extern crate twox_hash; -use self::core::fmt; -use self::core::fmt::{Debug, Formatter}; +use std::fmt; +use std::fmt::{Debug, Formatter}; +use std::ptr::NonNull; use self::test::black_box; use self::object_alloc::ObjectAlloc; @@ -188,7 +188,7 @@ mod wrapper { } // MIN_SIZE is large enough to hold a Header and a one-byte random nonce. -const MIN_SIZE: usize = core::mem::size_of::
() + 1; +const MIN_SIZE: usize = ::std::mem::size_of::
() + 1; #[derive(Debug)] struct Header { @@ -209,7 +209,7 @@ const NONCE_DROPPED: u32 = 2_620_515_550; impl Default for CorruptionTesterDefault { fn default() -> CorruptionTesterDefault { - use self::core::mem::uninitialized; + use std::mem::uninitialized; let mut tester = unsafe { uninitialized::>() }; CorruptionTester::init(&mut tester.0, Initializer::Default); assert_eq!(tester.0.state(Initializer::Default), State::New); @@ -225,7 +225,7 @@ pub unsafe fn unsafe_default(ptr: *mut CorruptionTesterUnsafe) { // The memory we're being given should either be invalid (i.e., not a CorruptionTester) or // should be an already-dropped CorruptionTester. let state = (*ptr).0.state(Initializer::Unsafe); - assert!(state == State::Invalid || state == State::Dropped); + assert!(state == State::Invalid || state == State::Dropped, "state: {:?}", state); CorruptionTester::init(&mut (*ptr).0, Initializer::Unsafe); assert_eq!((*ptr).0.state(Initializer::Unsafe), State::New); @@ -280,7 +280,7 @@ impl CorruptionTester { fn init(ptr: *mut CorruptionTester, init: Initializer) { unsafe { { - use self::core::ptr::write; + use std::ptr::write; let (hdr, slc) = (*ptr).to_header_and_slice_mut(); for elem in slc.iter_mut() { *elem = self::rand::random(); @@ -383,8 +383,8 @@ impl CorruptionTester { /// Split into a header and byte slice. fn to_header_and_slice(&self) -> (&Header, &[u8]) { - use self::core::mem::{size_of, transmute}; - use self::core::slice::from_raw_parts; + use std::mem::{size_of, transmute}; + use std::slice::from_raw_parts; let size = size_of::>(); let header_size = size_of::
(); @@ -397,8 +397,8 @@ impl CorruptionTester { /// Split into a mutable header and byte slice. #[cfg_attr(feature="cargo-clippy", allow(wrong_self_convention))] fn to_header_and_slice_mut(&mut self) -> (&mut Header, &mut [u8]) { - use self::core::mem::{size_of, transmute}; - use self::core::slice::from_raw_parts_mut; + use std::mem::{size_of, transmute}; + use std::slice::from_raw_parts_mut; let size = size_of::>(); let header_size = size_of::
(); @@ -415,7 +415,7 @@ impl CorruptionTester { /// `random_bytes` (in that order). fn hash(ptr: usize, state_nonce: u32, random_bytes: &[u8]) -> u32 { use self::twox_hash::XxHash; - use self::core::hash::Hasher; + use std::hash::Hasher; // we could do with_seed(0) and then write_usize(ptr), but this is (marginally) faster let mut hasher = XxHash::with_seed(ptr as u64); @@ -434,6 +434,8 @@ mod mapped { #[cfg(windows)] extern crate winapi; + use std::ptr::NonNull; + #[cfg(unix)] lazy_static!{ static ref RANDOM_FD: i32 = unsafe { @@ -450,65 +452,87 @@ mod mapped { /// /// Returns true if the range `[ptr, ptr + len)` represents mapped memory. #[cfg(unix)] - pub fn is_mapped_range(ptr: *mut u8, len: usize) -> bool { + pub fn is_mapped_range(ptr: NonNull, len: usize) -> bool { // Credit to http://stackoverflow.com/a/24081504/836390 unsafe { - let ret = libc::write(*RANDOM_FD, ptr as *const libc::c_void, len); - assert!(ret < 0 || (ret as usize) == len); + let ret = libc::write(*RANDOM_FD, ptr.cast().as_ptr(), len); + assert!(ret < 0 || (ret as usize) == len, "ret: {}", ret); (ret as usize) == len } } #[cfg(windows)] - pub fn is_mapped_range(ptr: *mut u8, len: usize) -> bool { - use core::mem::{uninitialized, size_of}; + pub fn is_mapped_range(ptr: NonNull, len: usize) -> bool { + use std::mem::{uninitialized, size_of}; use self::kernel32::{K32QueryWorkingSet, GetCurrentProcess}; - use self::winapi::c_void; - use self::winapi::basetsd::ULONG_PTR; + use self::winapi::shared::basetsd::ULONG_PTR; + use std::os::raw::c_void; + use self::winapi::um::psapi::PSAPI_WORKING_SET_BLOCK; + + // NOTE: winapi 0.3.4 has winapi::um::psapi::PSAPI_WORKING_SET_INFORMATION, + // but it's defined as having a single entry (just like the equivalent type + // defined in the Windows docs: https://msdn.microsoft.com/en-us/library/windows/desktop/ms684910(v=vs.85).aspx). + // In order to support more than a single entry, we'd need to think about how + // Rust would deal with laying out entries after the end of a struct> Logically, + // the memory layout would be something like: + // (PSAPI_WORKING_SET_INFORMATION, [PSAPI_WORKING_SET_BLOCK]) + // It's easier to just define it here ourselves as having many entries, and + // thus avoiding having to worry about memory layout issues. + + // NOTE: It's possible that there will be more than 64 * 1024 entries, + // in which case K32QueryWorkingSet will return ERROR_BAD_LENGTH. + // If this becomes a problem, we may have to either bump the length + // of the entries field (while watching out for stack overflow; we + // could alternatively heap-allocate) or write the logic to dynamically + // allocate the right number of entries (on error, K32QueryWorkingSet + // will write the required number of entries into the num_entries field). #[repr(C)] struct WorkingSets { num_entries: ULONG_PTR, - entries: [PSAPI_WORKING_SET_BLOCK; 1024], + entries: [PSAPI_WORKING_SET_BLOCK; 64 * 1024], } - unsafe { + let entries = unsafe { let mut entries = uninitialized::(); let ptr = &mut entries as *mut _ as *mut c_void; let size = size_of::() as u32; assert_eq!(K32QueryWorkingSet(GetCurrentProcess(), ptr, size), 1); - } + entries + }; + + let entries = &entries.entries[..entries.num_entries]; // See https://msdn.microsoft.com/en-us/library/windows/desktop/ms684902(v=vs.85).aspx // for layout of entries. - // TODO: Once winapi 3.0 lands, there will be support for accessing the bitfields of + // TODO: Now that winapi 0.3 has landed, there is support for accessing the bitfields of // PSAPI_WORKING_SET_BLOCK programmatically (see https://github.com/retep998/winapi-rs/issues/482). + // Iterate over the entries to ensure the entire range is mapped. false } #[cfg(test)] mod tests { - extern crate core; + use std::ptr::NonNull; #[test] fn test_is_mapped_range() { use super::is_mapped_range; - use self::core::ptr::null_mut; - use self::core::mem::size_of; + use std::mem::size_of; - let arr = Box::new([0 as usize; 100]); + let arr = Box::new([0usize; 100]); unsafe { - let ptr = Box::into_raw(arr); - assert!(is_mapped_range(ptr as *mut u8, size_of::<[usize; 100]>())); - assert!(!is_mapped_range(null_mut(), 1)); - Box::from_raw(ptr); // make sure it gets dropped properly + let ptr = NonNull::new(Box::into_raw(arr)).unwrap(); + assert!(is_mapped_range(ptr.cast(), size_of::<[usize; 100]>())); + assert!(!is_mapped_range(NonNull::new(1usize as *mut u8).unwrap(), 1)); + Box::from_raw(ptr.as_ptr()); // make sure it gets dropped properly } } } } -use self::core::marker::PhantomData; +use std::marker::PhantomData; /// A builder for tests. /// @@ -618,12 +642,11 @@ impl, F: Fn() -> O> TestBuilder(new: F, tests: Option) where C: CorruptionTesterWrapper + Send + 'static, @@ -695,9 +718,9 @@ use std::collections::HashSet; struct Tester> { alloc: O, - alloced: Vec<*mut C>, - alloced_set: HashSet<*mut C>, - freed: HashSet<*mut C>, + alloced: Vec>, + alloced_set: HashSet>, + freed: HashSet>, } impl> Tester { @@ -720,10 +743,10 @@ impl> Tester { // either be still constructed (Valid) because it hadn't been dropped yet or New // because it was dropped and then re-initialized using default() or unsafe_default(). // Any other state is a bug. - match unsafe { (*obj).state() } { + match unsafe { (*obj.as_ptr()).state() } { State::New => unsafe { // we just verified the state, so no sense in wasting time checking again - (*obj).update_new(false); + (*obj.as_ptr()).update_new(false); }, State::Valid => {} state => { @@ -734,10 +757,10 @@ impl> Tester { } } else { // this is memory we've never seen before, so it should be New - match unsafe { (*obj).state() } { + match unsafe { (*obj.as_ptr()).state() } { State::New => unsafe { // we just verified the state, so no sense in wasting time checking again - (*obj).update_new(false); + (*obj.as_ptr()).update_new(false); }, state => { panic!("newly-allocated object at {:?} in unexpected state {:?}", @@ -752,9 +775,9 @@ impl> Tester { fn dealloc(&mut self, idx: usize) { let len = self.alloced.len(); - let obj: *mut C = self.alloced.swap_remove(idx % len); + let obj: NonNull = self.alloced.swap_remove(idx % len); // make sure it's still valid - assert_eq!(unsafe { (*obj).state() }, State::Valid); + assert_eq!(unsafe { (*obj.as_ptr()).state() }, State::Valid); self.alloced_set.remove(&obj); self.freed.insert(obj); unsafe { @@ -763,7 +786,7 @@ impl> Tester { } fn drop_and_check(mut self) { - use self::core::mem::drop; + use std::mem::drop; while !self.alloced.is_empty() { let idx = self.alloced.len() - 1; @@ -774,12 +797,12 @@ impl> Tester { drop(alloc); for obj in freed { - use self::core::mem; - if !mapped::is_mapped_range(obj as *mut u8, mem::size_of::()) { + use std::mem; + if !mapped::is_mapped_range(obj.cast(), mem::size_of::()) { // the underlying memory already got freed back to the kernel continue; } - match unsafe { (*obj).state() } { + match unsafe { (*obj.as_ptr()).state() } { State::Invalid | State::Dropped => {} state => { panic!("freed object at {:?} in unexpected state: {:?}", obj, state); @@ -791,12 +814,11 @@ impl> Tester { #[cfg(test)] pub mod tests { - extern crate core; extern crate rand; extern crate test; use super::*; - use self::core::{mem, ptr}; + use std::{mem, ptr}; fn make_default_boxed() -> Box { Box::new(CorruptionTesterDefault::default()) @@ -806,21 +828,34 @@ pub mod tests { unsafe { let obj = mem::uninitialized::(); let mut bx = Box::new(obj); - use self::core::ops::DerefMut; + use std::ops::DerefMut; unsafe_default(bx.deref_mut()); bx } } fn test_corruption_tester_helper(mut obj: Box) { - assert!(obj.state() == State::New); + assert_eq!(obj.state(), State::New); obj.update_new(true); - assert!(obj.state() == State::Valid); + assert_eq!(obj.state(), State::Valid); let ptr = Box::into_raw(obj); unsafe { - mem::drop(Box::from_raw(ptr)); - assert!((*ptr).state() == State::Dropped); + // Save the bytes (ptr::read doesn't drop) so we can write them + // back after dropping in place. If we didn't do this, when we + // put ptr back into a Box and dropped that Box, the CorruptionTesterWrapper + // drop implementation would discover it had already been dropped + // and would panic. + let bytes = ptr::read(ptr); + // drop in place (rather than just doing mem::drop(Box::from_raw(ptr))) + // so that we're guaranteed that the underlying space hasn't been + // unmapped or overwritten. A previous version of this function just + // freed and then hoped that the memory would still be valid, and it + // turned out not to be a valid assumption on Windows. + ptr::drop_in_place(ptr); + assert_eq!((*ptr).state(), State::Dropped); + ptr::write(ptr, bytes); + Box::from_raw(ptr); } } @@ -832,17 +867,17 @@ pub mod tests { fn test_corruption_tester_corruption_helper(mut a: Box, mut b: Box) { - assert!(a.state() == State::New); - assert!(b.state() == State::New); + assert_eq!(a.state(), State::New); + assert_eq!(b.state(), State::New); a.update_new(true); b.update_new(true); - assert!(a.state() == State::Valid); - assert!(b.state() == State::Valid); + assert_eq!(a.state(), State::Valid); + assert_eq!(b.state(), State::Valid); - use self::core::ops::DerefMut; + use std::ops::DerefMut; unsafe { ptr::copy(a.deref_mut(), b.deref_mut(), 1) }; - assert!(a.state() == State::Valid); - assert!(b.state() == State::Invalid); + assert_eq!(a.state(), State::Valid); + assert_eq!(b.state(), State::Invalid); mem::forget(b); // so it doesn't panic when being dropped } @@ -854,7 +889,7 @@ pub mod tests { } fn test_corruption_tester_corruption_panic_on_drop_helper() { - use self::core::mem::zeroed; + use std::mem::zeroed; let _tester: C = unsafe { zeroed() }; } diff --git a/object-alloc-test/src/leaky_alloc.rs b/object-alloc-test/src/leaky_alloc.rs index 0d63262..883081c 100644 --- a/object-alloc-test/src/leaky_alloc.rs +++ b/object-alloc-test/src/leaky_alloc.rs @@ -31,13 +31,6 @@ impl Drop for Ptr { } } -impl LeakyAlloc { - /// Creates a new `LeakyAlloc`. - pub fn new() -> LeakyAlloc { - LeakyAlloc { allocs: Vec::new() } - } -} - unsafe impl Alloc for LeakyAlloc { unsafe fn alloc(&mut self, layout: Layout) -> Result<*mut u8, AllocErr> { let ptr = Alloc::alloc(&mut Heap, layout.clone())?; @@ -56,52 +49,51 @@ unsafe impl Alloc for LeakyAlloc { #[cfg(test)] mod tests { extern crate alloc; - extern crate core; extern crate object_alloc; use self::alloc::heap::{Alloc, AllocErr, Layout}; - use self::core::marker::PhantomData; use self::object_alloc::{Exhausted, ObjectAlloc}; + use std::marker::PhantomData; + use std::ptr::NonNull; + #[derive(Default)] struct LeakyObjectAlloc { alloc: super::LeakyAlloc, _marker: PhantomData, } - impl LeakyObjectAlloc { - fn new() -> LeakyObjectAlloc { - LeakyObjectAlloc { - alloc: super::LeakyAlloc::new(), - _marker: PhantomData, - } - } - } + // impl LeakyObjectAlloc { + // fn new() -> LeakyObjectAlloc { + // LeakyObjectAlloc { + // alloc: super::LeakyAlloc::new(), + // _marker: PhantomData, + // } + // } + // } unsafe impl ObjectAlloc for LeakyObjectAlloc { - unsafe fn alloc(&mut self) -> Result<*mut T, Exhausted> { + unsafe fn alloc(&mut self) -> Result, Exhausted> { let ptr = match Alloc::alloc(&mut self.alloc, Layout::new::()) { - Ok(ptr) => ptr as *mut T, + Ok(ptr) => NonNull::new(ptr).unwrap().cast(), Err(AllocErr::Exhausted { .. }) => return Err(Exhausted), Err(AllocErr::Unsupported { details }) => { unreachable!("unexpected unsupported alloc: {}", details) } }; - use self::core::ptr::write; - write(ptr, T::default()); + ::std::ptr::write(ptr.as_ptr(), T::default()); Ok(ptr) } - unsafe fn dealloc(&mut self, ptr: *mut T) { - use self::core::ptr::drop_in_place; - drop_in_place(ptr); - Alloc::dealloc(&mut self.alloc, ptr as *mut u8, Layout::new::()); + unsafe fn dealloc(&mut self, ptr: NonNull) { + ::std::ptr::drop_in_place(ptr.as_ptr()); + Alloc::dealloc(&mut self.alloc, ptr.cast().as_ptr(), Layout::new::()); } } #[test] fn test_memory_corruption() { use corruption::{CorruptionTesterDefault, TestBuilder}; - TestBuilder::new(|| LeakyObjectAlloc::::new()).test(); + TestBuilder::new(|| LeakyObjectAlloc::::default()).test(); } } diff --git a/object-alloc-test/src/lib.rs b/object-alloc-test/src/lib.rs index 1d3a694..c7032e1 100644 --- a/object-alloc-test/src/lib.rs +++ b/object-alloc-test/src/lib.rs @@ -10,6 +10,7 @@ #![feature(test)] #![feature(const_fn)] #![feature(const_size_of)] +#![feature(nonnull_cast)] #![feature(plugin)] #![plugin(interpolate_idents)] diff --git a/object-alloc/CHANGELOG.md b/object-alloc/CHANGELOG.md index 22d4873..150b27e 100644 --- a/object-alloc/CHANGELOG.md +++ b/object-alloc/CHANGELOG.md @@ -15,3 +15,6 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Added - Added this changelog + +### Changed +- Switched from `*mut u8` to `NonNull` for pointer values diff --git a/object-alloc/src/lib.rs b/object-alloc/src/lib.rs index b776427..8c45067 100644 --- a/object-alloc/src/lib.rs +++ b/object-alloc/src/lib.rs @@ -8,10 +8,12 @@ #![no_std] #![feature(alloc, allocator_api)] #![feature(core_intrinsics)] +#![feature(nonnull_cast)] extern crate alloc; use alloc::allocator::Layout; use core::intrinsics::abort; +use core::ptr::NonNull; /// An error indicating that no memory is available. /// @@ -72,7 +74,7 @@ pub unsafe trait ObjectAlloc { /// /// The memory returned by `alloc` is guaranteed to be aligned according to the requirements of /// `T` (that is, according to `core::mem::align_of::()`). - unsafe fn alloc(&mut self) -> Result<*mut T, Exhausted>; + unsafe fn alloc(&mut self) -> Result, Exhausted>; /// Deallocates an object previously returned by `alloc`. /// @@ -85,7 +87,7 @@ pub unsafe trait ObjectAlloc { /// objects), then `x` will be dropped at some point during the `ObjectAlloc`'s lifetime. This /// may happen during this call to `dealloc`, when the `ObjectAlloc` itself is dropped, or some /// time in between. - unsafe fn dealloc(&mut self, x: *mut T); + unsafe fn dealloc(&mut self, x: NonNull); /// Allocator-specific method for signalling an out-of-memory condition. /// @@ -128,13 +130,13 @@ pub unsafe trait UntypedObjectAlloc { /// /// The memory returned by `alloc` is guaranteed to abide by the `Layout` returned from /// `layout`. - unsafe fn alloc(&mut self) -> Result<*mut u8, Exhausted>; + unsafe fn alloc(&mut self) -> Result, Exhausted>; /// Deallocates an object previously returned by `alloc`. /// /// If `x` was not obtained through a call to `alloc`, or if `x` has already been `dealloc`'d, /// the behavior of `dealloc` is undefined. - unsafe fn dealloc(&mut self, x: *mut u8); + unsafe fn dealloc(&mut self, x: NonNull); /// Allocator-specific method for signalling an out-of-memory condition. /// @@ -167,11 +169,11 @@ unsafe impl UntypedObjectAlloc for ObjectAlloc { Layout::new::() } - unsafe fn alloc(&mut self) -> Result<*mut u8, Exhausted> { - ObjectAlloc::alloc(self).map(|x| x as *mut u8) + unsafe fn alloc(&mut self) -> Result, Exhausted> { + ObjectAlloc::alloc(self).map(|x| x.cast()) } - unsafe fn dealloc(&mut self, x: *mut u8) { - ObjectAlloc::dealloc(self, x as *mut T); + unsafe fn dealloc(&mut self, x: NonNull) { + ObjectAlloc::dealloc(self, x.cast()); } } diff --git a/slab-alloc/CHANGELOG.md b/slab-alloc/CHANGELOG.md index eade1ad..9902660 100644 --- a/slab-alloc/CHANGELOG.md +++ b/slab-alloc/CHANGELOG.md @@ -16,6 +16,14 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Added - Added this changelog +### Changed +- Upgraded to new `ObjectAlloc` and `UntypedObjectAlloc` traits that use + `NonNull` instead of `*mut u8` +- Switch from `*mut T` to `NonNull` or `Option>` for various + internal pointers +- Make `PtrHashMap` support any value type that is `Copy`, not just raw + pointers + ### Fixed - Fixed a bug that prevented compilation on 32-bit Windows - Fixed a bug caused by `sysconf` 0.3.0 that prevented compilation on Windows diff --git a/slab-alloc/Cargo.toml b/slab-alloc/Cargo.toml index d649fd9..052c4dc 100644 --- a/slab-alloc/Cargo.toml +++ b/slab-alloc/Cargo.toml @@ -41,8 +41,8 @@ hashmap-no-coalesce = [] [dependencies] interpolate_idents = "0.2.2" lazy_static = { version = "1.0.0", features = ["spin_no_std"] } -mmap-alloc = "0.1.0" -object-alloc = "0.1.0" +mmap-alloc = { path = "../mmap-alloc" } +object-alloc = { path = "../object-alloc" } object-alloc-test = { path = "../object-alloc-test" } rand = "0.3" sysconf = "0.3.1" diff --git a/slab-alloc/src/aligned.rs b/slab-alloc/src/aligned.rs index d7ff967..1a75f39 100644 --- a/slab-alloc/src/aligned.rs +++ b/slab-alloc/src/aligned.rs @@ -26,19 +26,20 @@ extern crate alloc; extern crate object_alloc; -use {OBJECTS_PER_SLAB, PAGE_SIZE, stack}; -use stack::{SlabHeader, Layout}; +use {stack, OBJECTS_PER_SLAB, PAGE_SIZE}; +use stack::{Layout, SlabHeader}; use init::InitSystem; use self::alloc::allocator; use self::object_alloc::UntypedObjectAlloc; +use core::ptr::NonNull; pub struct ConfigData; impl stack::ConfigData for ConfigData { - fn ptr_to_slab(&self, slab_size: usize, ptr: *mut u8) -> *mut SlabHeader { - let slab = ptr as usize & !(slab_size - 1); + fn ptr_to_slab(&self, slab_size: usize, ptr: NonNull) -> NonNull { + let slab = ptr.as_ptr() as usize & !(slab_size - 1); debug_assert_eq!(slab % slab_size, 0); - slab as *mut SlabHeader + unsafe { NonNull::new_unchecked(slab as *mut SlabHeader) } } } @@ -72,9 +73,8 @@ pub fn backing_size_for(layout: &allocator::Layout) -> usize { } let unused = |slab_size: usize| { - Layout::for_slab_size(layout.clone(), slab_size).map(|(layout, unused)| { - (layout.num_obj, unused) - }) + Layout::for_slab_size(layout.clone(), slab_size) + .map(|(layout, unused)| (layout.num_obj, unused)) }; // We guarantee that we never request aligned slabs smaller than a page (see the Documentation diff --git a/slab-alloc/src/backing.rs b/slab-alloc/src/backing.rs index 464bc2b..7f45c13 100644 --- a/slab-alloc/src/backing.rs +++ b/slab-alloc/src/backing.rs @@ -41,6 +41,7 @@ pub mod alloc { extern crate object_alloc; use self::alloc::allocator::{Alloc, AllocErr, Layout}; use self::object_alloc::{Exhausted, UntypedObjectAlloc}; + use core::ptr::NonNull; /// An `UntypedObjectAlloc` that uses an arbitrary allocator. #[derive(Clone)] @@ -60,9 +61,9 @@ pub mod alloc { self.layout.clone() } - unsafe fn alloc(&mut self) -> Result<*mut u8, Exhausted> { + unsafe fn alloc(&mut self) -> Result, Exhausted> { match self.alloc.alloc(self.layout.clone()) { - Ok(ptr) => Ok(ptr), + Ok(ptr) => Ok(NonNull::new_unchecked(ptr)), Err(AllocErr::Exhausted { .. }) => Err(Exhausted), Err(AllocErr::Unsupported { details }) => { unreachable!("unexpected unsupported alloc: {}", details) @@ -70,8 +71,8 @@ pub mod alloc { } } - unsafe fn dealloc(&mut self, ptr: *mut u8) { - self.alloc.dealloc(ptr, self.layout.clone()); + unsafe fn dealloc(&mut self, ptr: NonNull) { + self.alloc.dealloc(ptr.as_ptr(), self.layout.clone()); } } } diff --git a/slab-alloc/src/init.rs b/slab-alloc/src/init.rs index aa1d12a..f8512a2 100644 --- a/slab-alloc/src/init.rs +++ b/slab-alloc/src/init.rs @@ -6,6 +6,7 @@ // copied, modified, or distributed except according to those terms. use core::marker::PhantomData; +use core::ptr::NonNull; /// A system to perform initialization of objects. /// @@ -35,7 +36,7 @@ pub trait InitSystem { /// /// `pack` packs a pointer and a status into a `usize`. `ptr` must be aligned to `min_align()`, /// or else the behavior of `pack` is undefined. - fn pack(ptr: *mut u8, status: Self::Status) -> usize; + fn pack(ptr: NonNull, status: Self::Status) -> usize; /// Unpack a `Status` from a packed `usize`. /// @@ -47,20 +48,20 @@ pub trait InitSystem { /// /// `unpack_ptr` unpacks a pointer from `packed`. It should only be called on values that were /// previously returned from `pack`. - fn unpack_ptr(packed: usize) -> *mut u8; + fn unpack_ptr(packed: usize) -> NonNull; /// Initialize an object. /// /// For implementations that perform initialization, `init` initializes `obj` if it is /// currently uninitialized. For implementations that do not perform initialization, `init` is /// a no-op. - fn init(&self, obj: *mut u8, init_status: Self::Status); + fn init(&self, obj: NonNull, init_status: Self::Status); /// Drop an object. /// /// For implementations that perform initialization, `drop` drops `obj` if it is currently /// initialized. For implementations that do not perform initialization, `drop` is a no-op. - fn drop(obj: *mut u8, init_status: Self::Status); + fn drop(obj: NonNull, init_status: Self::Status); } pub struct NopInitSystem; @@ -77,17 +78,17 @@ impl InitSystem for NopInitSystem { fn min_align() -> usize { 1 } - fn pack(ptr: *mut u8, _status: ()) -> usize { - ptr as usize + fn pack(ptr: NonNull, _status: ()) -> usize { + ptr.as_ptr() as usize } fn unpack_status(_packed: usize) -> () { () } - fn unpack_ptr(packed: usize) -> *mut u8 { - packed as *mut u8 + fn unpack_ptr(packed: usize) -> NonNull { + unsafe { NonNull::new_unchecked(packed as *mut u8) } } - fn init(&self, _obj: *mut u8, _init_status: ()) {} - fn drop(_obj: *mut u8, _init_status: ()) {} + fn init(&self, _obj: NonNull, _init_status: ()) {} + fn drop(_obj: NonNull, _init_status: ()) {} } pub struct InitInitSystem> { @@ -117,38 +118,38 @@ impl> InitSystem for InitInitSystem { 2 } - fn pack(ptr: *mut u8, status: bool) -> usize { - (ptr as usize) | if status { 1 } else { 0 } + fn pack(ptr: NonNull, status: bool) -> usize { + (ptr.as_ptr() as usize) | if status { 1 } else { 0 } } fn unpack_status(packed: usize) -> bool { packed & 1 == 1 } - fn unpack_ptr(packed: usize) -> *mut u8 { - (packed & (::max_value() - 1)) as *mut u8 + fn unpack_ptr(packed: usize) -> NonNull { + unsafe { NonNull::new_unchecked((packed & (::max_value() - 1)) as *mut u8) } } - fn init(&self, obj: *mut u8, init: bool) { + fn init(&self, obj: NonNull, init: bool) { if !init { unsafe { - self.init.init(obj as *mut T); + self.init.init(obj.cast()); } } } - fn drop(obj: *mut u8, init: bool) { + fn drop(obj: NonNull, init: bool) { if init { unsafe { use core::ptr::drop_in_place; - drop_in_place(obj as *mut T); + drop_in_place::(obj.cast().as_ptr()); } } } } pub unsafe trait Initializer { - unsafe fn init(&self, ptr: *mut T); + unsafe fn init(&self, ptr: NonNull); } #[derive(Default)] @@ -158,14 +159,15 @@ pub struct DefaultInitializer { impl DefaultInitializer { pub fn new() -> DefaultInitializer { - DefaultInitializer { _marker: PhantomData } + DefaultInitializer { + _marker: PhantomData, + } } } unsafe impl Initializer for DefaultInitializer { - unsafe fn init(&self, ptr: *mut T) { - use core::ptr::write; - write(ptr, T::default()); + unsafe fn init(&self, ptr: NonNull) { + ::core::ptr::write(ptr.as_ptr(), T::default()); } } @@ -178,22 +180,21 @@ impl T> FnInitializer { } unsafe impl T> Initializer for FnInitializer { - unsafe fn init(&self, ptr: *mut T) { - use core::ptr::write; - write(ptr, (self.0)()); + unsafe fn init(&self, ptr: NonNull) { + ::core::ptr::write(ptr.as_ptr(), (self.0)()); } } -pub struct UnsafeFnInitializer(F, PhantomData); +pub struct UnsafeFnInitializer)>(F, PhantomData); -impl UnsafeFnInitializer { +impl)> UnsafeFnInitializer { pub fn new(f: F) -> UnsafeFnInitializer { UnsafeFnInitializer(f, PhantomData) } } -unsafe impl Initializer for UnsafeFnInitializer { - unsafe fn init(&self, ptr: *mut T) { +unsafe impl)> Initializer for UnsafeFnInitializer { + unsafe fn init(&self, ptr: NonNull) { (self.0)(ptr); } } diff --git a/slab-alloc/src/large.rs b/slab-alloc/src/large.rs index 427d101..ce4d16e 100644 --- a/slab-alloc/src/large.rs +++ b/slab-alloc/src/large.rs @@ -27,57 +27,57 @@ extern crate alloc; extern crate object_alloc; -use {PAGE_SIZE, PAGE_ALIGN_MASK, OBJECTS_PER_SLAB}; +use {OBJECTS_PER_SLAB, PAGE_ALIGN_MASK, PAGE_SIZE}; use stack; -use stack::{SlabHeader, Layout}; +use stack::{Layout, SlabHeader}; use init::InitSystem; use util::ptrmap::*; -// use size::TypeSize; use self::alloc::allocator; use self::object_alloc::UntypedObjectAlloc; +use core::ptr::NonNull; pub struct ConfigData { - pub map: Map, + pub map: Map>, map_by_page_addr: bool, } impl stack::ConfigData for ConfigData { - fn post_alloc(&mut self, layout: &Layout, slab_size: usize, slab: *mut SlabHeader) { + fn post_alloc(&mut self, layout: &Layout, slab_size: usize, slab: NonNull) { if self.map_by_page_addr { for i in 0..(slab_size / *PAGE_SIZE) { - let page_ptr = ((slab as usize) + (i * *PAGE_SIZE)) as *mut u8; + let page_ptr = ((slab.as_ptr() as usize) + (i * *PAGE_SIZE)) as *mut u8; debug_assert_eq!(page_ptr as usize % *PAGE_SIZE, 0); self.map.insert(page_ptr, slab); } } else { for i in 0..layout.num_obj { - let ptr = layout.nth_obj(slab, unsafe { (*slab).get_color() }, i); - self.map.insert(ptr, slab); + let ptr = layout.nth_obj(slab, unsafe { (*slab.as_ptr()).get_color() }, i); + self.map.insert(ptr.as_ptr(), slab); } } } - fn pre_dealloc(&mut self, layout: &Layout, slab_size: usize, slab: *mut SlabHeader) { + fn pre_dealloc(&mut self, layout: &Layout, slab_size: usize, slab: NonNull) { if self.map_by_page_addr { for i in 0..(slab_size / *PAGE_SIZE) { - let page_ptr = ((slab as usize) + (i * *PAGE_SIZE)) as *mut u8; + let page_ptr = ((slab.as_ptr() as usize) + (i * *PAGE_SIZE)) as *mut u8; debug_assert_eq!(page_ptr as usize % *PAGE_SIZE, 0); self.map.delete(page_ptr); } } else { for i in 0..layout.num_obj { - let ptr = layout.nth_obj(slab, unsafe { (*slab).get_color() }, i); - self.map.delete(ptr); + let ptr = layout.nth_obj(slab, unsafe { (*slab.as_ptr()).get_color() }, i); + self.map.delete(ptr.as_ptr()); } } } - fn ptr_to_slab(&self, _slab_size: usize, ptr: *mut u8) -> *mut SlabHeader { + fn ptr_to_slab(&self, _slab_size: usize, ptr: NonNull) -> NonNull { if self.map_by_page_addr { self.map - .get(((ptr as usize) & *PAGE_ALIGN_MASK) as *mut u8) + .get(((ptr.as_ptr() as usize) & *PAGE_ALIGN_MASK) as *mut u8) } else { - self.map.get(ptr) + self.map.get(ptr.as_ptr()) } } } @@ -88,20 +88,22 @@ pub const DEFAULT_MAP_SIZE: usize = 256; impl System { pub fn new(layout: allocator::Layout, alloc: A) -> Option> { - if let Some((slab_layout, _)) = - Layout::for_slab_size(layout.clone(), alloc.layout().size()) { + if let Some((slab_layout, _)) = Layout::for_slab_size(layout.clone(), alloc.layout().size()) + { let map_by_page_addr = layout.size() < *PAGE_SIZE; let map_key_align = if map_by_page_addr { *PAGE_SIZE } else { layout.size().next_power_of_two() }; - Some(Self::from_config_data(ConfigData { - map: Map::new(DEFAULT_MAP_SIZE, map_key_align), - map_by_page_addr: map_by_page_addr, - }, - slab_layout, - alloc)) + Some(Self::from_config_data( + ConfigData { + map: Map::new(DEFAULT_MAP_SIZE, map_key_align), + map_by_page_addr: map_by_page_addr, + }, + slab_layout, + alloc, + )) } else { None } @@ -126,9 +128,8 @@ pub fn backing_size_for(layout: &allocator::Layout) -> usize { } let unused = |slab_size: usize| { - Layout::for_slab_size(layout.clone(), slab_size).map(|(layout, unused)| { - (layout.num_obj, unused) - }) + Layout::for_slab_size(layout.clone(), slab_size) + .map(|(layout, unused)| (layout.num_obj, unused)) }; // pick a reasonable lower bound on slab size to avoid wasting unnecessary work on slab sizes @@ -161,11 +162,11 @@ mod tests { let layout = Layout::new::(); let backing_layout = Layout::from_size_align(pagesize(), pagesize()).unwrap(); - let mut alloc = - SizedSlabAlloc::new(DefaultInitSystem::::new(DefaultInitializer::new()), - layout.clone(), - super::System::new(layout, heap::get_large(backing_layout)) - .unwrap()); + let mut alloc = SizedSlabAlloc::new( + DefaultInitSystem::::new(DefaultInitializer::new()), + layout.clone(), + super::System::new(layout, heap::get_large(backing_layout)).unwrap(), + ); let mut ptrs = Vec::new(); let size = super::DEFAULT_MAP_SIZE << i; @@ -195,6 +196,8 @@ mod tests { ); } - call_for_all_types_prefix!(make_test_hash_table_bucket_distribution, - test_hash_table_bucket_distribution); + call_for_all_types_prefix!( + make_test_hash_table_bucket_distribution, + test_hash_table_bucket_distribution + ); } diff --git a/slab-alloc/src/lib.rs b/slab-alloc/src/lib.rs index 9fdcdf8..af37386 100644 --- a/slab-alloc/src/lib.rs +++ b/slab-alloc/src/lib.rs @@ -51,9 +51,9 @@ #![cfg_attr(not(feature = "std"), no_std)] #![feature(alloc, allocator_api)] +#![feature(nonnull_cast)] #![feature(plugin)] #![cfg_attr(test, feature(test))] - #![plugin(interpolate_idents)] mod aligned; @@ -80,6 +80,7 @@ extern crate sysconf; use core::marker::PhantomData; use core::default::Default; use core::mem; +use core::ptr::NonNull; use self::util::list::*; use util::workingset::WorkingSet; use init::*; @@ -207,9 +208,10 @@ impl SlabAllocBuilder { /// required to be supported (at least a page in size and page-aligned), so `get_large` returns /// an allocator directly rather than an `Option`. pub fn build_backing(self, get_aligned: A, get_large: L) -> SlabAlloc - where B: BackingAlloc, - A: Fn(Layout) -> Option, - L: Fn(Layout) -> B::Large + where + B: BackingAlloc, + A: Fn(Layout) -> Option, + L: Fn(Layout) -> B::Large, { let layout = util::misc::satisfy_min_align(self.layout.clone(), I::min_align()); let aligned_backing_size = aligned::backing_size_for::(&layout); @@ -233,13 +235,15 @@ impl SlabAllocBuilder { /// /// `build_untyped_backing` is like `build_backing`, except that it builds an /// `UntypedSlabAlloc` instead of a `SlabAlloc`. - pub fn build_untyped_backing(self, - get_aligned: A, - get_large: L) - -> UntypedSlabAlloc - where B: BackingAlloc, - A: Fn(Layout) -> Option, - L: Fn(Layout) -> B::Large + pub fn build_untyped_backing( + self, + get_aligned: A, + get_large: L, + ) -> UntypedSlabAlloc + where + B: BackingAlloc, + A: Fn(Layout) -> Option, + L: Fn(Layout) -> B::Large, { let layout = util::misc::satisfy_min_align(self.layout.clone(), I::min_align()); let aligned_backing_size = aligned::backing_size_for::(&layout); @@ -287,7 +291,7 @@ impl T> SlabAllocBuilder> { } } -impl SlabAllocBuilder> { +impl)> SlabAllocBuilder> { /// Constructs a new builder for an allocator which uses `f` to initialize allocated objects. /// /// The constructed allocator will call `f` whenever a new object needs to be initialized. @@ -385,9 +389,10 @@ impl UntypedSlabAllocBuilder { /// required to be supported (at least a page in size and page-aligned), so `get_large` returns /// an allocator directly rather than an `Option`. pub fn build_backing(self, get_aligned: A, get_large: L) -> UntypedSlabAlloc - where B: BackingAlloc, - A: Fn(Layout) -> Option, - L: Fn(Layout) -> B::Large + where + B: BackingAlloc, + A: Fn(Layout) -> Option, + L: Fn(Layout) -> B::Large, { let layout = util::misc::satisfy_min_align(self.layout.clone(), I::min_align()); let aligned_backing_size = aligned::backing_size_for::(&layout); @@ -407,7 +412,7 @@ impl UntypedSlabAllocBuilder { } } -impl UntypedSlabAllocBuilder> { +impl)> UntypedSlabAllocBuilder> { pub fn func(layout: Layout, f: F) -> UntypedSlabAllocBuilder> { UntypedSlabAllocBuilder { init: UnsafeFnInitSystem::new(UnsafeFnInitializer::new(f)), @@ -426,18 +431,17 @@ impl UntypedSlabAllocBuilder { } unsafe impl ObjectAlloc for SlabAlloc { - unsafe fn alloc(&mut self) -> Result<*mut T, Exhausted> { + unsafe fn alloc(&mut self) -> Result, Exhausted> { match self.alloc { - PrivateSlabAlloc::Aligned(ref mut alloc) => alloc.alloc(), - PrivateSlabAlloc::Large(ref mut alloc) => alloc.alloc(), - } - .map(|ptr| ptr as *mut T) + PrivateSlabAlloc::Aligned(ref mut alloc) => alloc.alloc(), + PrivateSlabAlloc::Large(ref mut alloc) => alloc.alloc(), + }.map(NonNull::cast) } - unsafe fn dealloc(&mut self, x: *mut T) { + unsafe fn dealloc(&mut self, x: NonNull) { match self.alloc { - PrivateSlabAlloc::Aligned(ref mut alloc) => alloc.dealloc(x as *mut u8), - PrivateSlabAlloc::Large(ref mut alloc) => alloc.dealloc(x as *mut u8), + PrivateSlabAlloc::Aligned(ref mut alloc) => alloc.dealloc(x.cast()), + PrivateSlabAlloc::Large(ref mut alloc) => alloc.dealloc(x.cast()), } } } @@ -450,14 +454,14 @@ unsafe impl UntypedObjectAlloc for SlabAlloc< } } - unsafe fn alloc(&mut self) -> Result<*mut u8, Exhausted> { + unsafe fn alloc(&mut self) -> Result, Exhausted> { match self.alloc { PrivateSlabAlloc::Aligned(ref mut alloc) => alloc.alloc(), PrivateSlabAlloc::Large(ref mut alloc) => alloc.alloc(), } } - unsafe fn dealloc(&mut self, x: *mut u8) { + unsafe fn dealloc(&mut self, x: NonNull) { match self.alloc { PrivateSlabAlloc::Aligned(ref mut alloc) => alloc.dealloc(x), PrivateSlabAlloc::Large(ref mut alloc) => alloc.dealloc(x), @@ -473,14 +477,14 @@ unsafe impl UntypedObjectAlloc for UntypedSlabAl } } - unsafe fn alloc(&mut self) -> Result<*mut u8, Exhausted> { + unsafe fn alloc(&mut self) -> Result, Exhausted> { match self.alloc { PrivateUntypedSlabAlloc::Aligned(ref mut alloc) => alloc.alloc(), PrivateUntypedSlabAlloc::Large(ref mut alloc) => alloc.alloc(), } } - unsafe fn dealloc(&mut self, x: *mut u8) { + unsafe fn dealloc(&mut self, x: NonNull) { match self.alloc { PrivateUntypedSlabAlloc::Aligned(ref mut alloc) => alloc.dealloc(x), PrivateUntypedSlabAlloc::Large(ref mut alloc) => alloc.dealloc(x), @@ -493,7 +497,8 @@ struct SizedSlabAlloc> { total_slabs: usize, num_full: usize, // number of full slabs refcnt: usize, - full_slab_working_set: WorkingSet, /* minimum number of slabs full at every moment during this working period */ + // minimum number of slabs full at every moment during this working period + full_slab_working_set: WorkingSet, slab_system: S, init_system: I, @@ -515,7 +520,7 @@ impl> SizedSlabAlloc { } } - fn alloc(&mut self) -> Result<*mut u8, Exhausted> { + fn alloc(&mut self) -> Result, Exhausted> { if self.freelist.size() == 0 { let ok = self.alloc_slab(); if !ok { @@ -534,7 +539,7 @@ impl> SizedSlabAlloc { self.freelist.remove_front(); } self.refcnt += 1; - debug_assert_eq!(obj as usize % self.layout.align(), 0); + debug_assert_eq!(obj.as_ptr() as usize % self.layout.align(), 0); self.init_system.init(obj, init_status); Ok(obj) } @@ -544,21 +549,20 @@ impl> SizedSlabAlloc { /// Allocates a new slab and inserts it onto the back of the freelist. Returns `true` upon /// success and `false` upon failure. fn alloc_slab(&mut self) -> bool { - let new = self.slab_system.alloc_slab(); - if new.is_null() { - return false; + if let Some(new) = self.slab_system.alloc_slab() { + // technically it doesn't matter whether it's back or front since this is only called when + // the list is currently empty + self.freelist.insert_back(new); + self.total_slabs += 1; + self.num_full += 1; + true + } else { + false } - - // technically it doesn't matter whether it's back or front since this is only called when - // the list is currently empty - self.freelist.insert_back(new); - self.total_slabs += 1; - self.num_full += 1; - true } - fn dealloc(&mut self, ptr: *mut u8) { - debug_assert_eq!(ptr as usize % self.layout.align(), 0); + fn dealloc(&mut self, ptr: NonNull) { + debug_assert_eq!(ptr.as_ptr() as usize % self.layout.align(), 0); let (slab, was_empty) = self.slab_system.dealloc(ptr, I::status_initialized()); let is_full = self.slab_system.is_full(slab); @@ -593,8 +597,7 @@ impl> SizedSlabAlloc { } fn garbage_collect_slabs(&mut self) { - if let Some(min_full) = self.full_slab_working_set - .refresh(WORKING_PERIOD_SECONDS) { + if let Some(min_full) = self.full_slab_working_set.refresh(WORKING_PERIOD_SECONDS) { for _ in 0..min_full { let slab = self.freelist.remove_back(); self.slab_system.dealloc_slab(slab); @@ -630,18 +633,18 @@ trait SlabSystem { /// Allocate a new `Slab`. /// - /// The returned `Slab` has its next and previous pointers initialized to null. - fn alloc_slab(&mut self) -> *mut Self::Slab; - fn dealloc_slab(&mut self, slab: *mut Self::Slab); + /// The returned `Slab` has its next and previous pointers initialized to `None`. + fn alloc_slab(&mut self) -> Option>; + fn dealloc_slab(&mut self, slab: NonNull); /// `is_full` returns true if all objects are available for allocation. - fn is_full(&self, slab: *mut Self::Slab) -> bool; + fn is_full(&self, slab: NonNull) -> bool; /// `is_empty` returns true if no objects are available for allocation. - fn is_empty(&self, slab: *mut Self::Slab) -> bool; + fn is_empty(&self, slab: NonNull) -> bool; /// `alloc` allocates a new object from the given `Slab`. - fn alloc(&self, slab: *mut Self::Slab) -> (*mut u8, I::Status); + fn alloc(&self, slab: NonNull) -> (NonNull, I::Status); /// `dealloc` deallocates the given object. It is `dealloc`'s responsibility to find the /// object's parent `Slab` and return it. It also returns whether the `Slab` was empty prior to /// deallocation. - fn dealloc(&self, obj: *mut u8, init_status: I::Status) -> (*mut Self::Slab, bool); + fn dealloc(&self, obj: NonNull, init_status: I::Status) -> (NonNull, bool); } diff --git a/slab-alloc/src/ptr_map.rs b/slab-alloc/src/ptr_map.rs index f974121..00f0040 100644 --- a/slab-alloc/src/ptr_map.rs +++ b/slab-alloc/src/ptr_map.rs @@ -12,6 +12,8 @@ // beginning. We can do better - we could keep track of what bucket/index we're currently at, and // start the next search from that point. // - Once Vec and Box support parametric allocators, use that functionality. +// - Is there something safer we can do for the value field of empty buckets other than +// mem::uninitialized? // Design // @@ -58,6 +60,7 @@ extern crate alloc; use core::ptr; +use mem; const BUCKET_SIZE: usize = 4; @@ -67,7 +70,7 @@ pub struct PtrHashMap { align_shift: u32, } -impl PtrHashMap { +impl PtrHashMap { /// Constructs a new `PtrHashMap`. /// /// `size` is a hint for how many elements will be stored in the map. @@ -97,7 +100,7 @@ impl PtrHashMap { /// `dump_by_bucket` creates a copy of the table, with each bucket represented by a `Vec<(*mut /// K, *mut V)>`. #[cfg_attr(not(test), allow(unused))] - pub fn dump_by_bucket(&self) -> Vec> { + pub fn dump_by_bucket(&self) -> Vec> { let mut res = Vec::with_capacity(self.vec.len()); for b in &self.vec { res.push(b.dump()); @@ -132,7 +135,7 @@ impl PtrHashMap { // Get the value associated with the given key. The key must exist, or else get will panic. #[inline] - pub fn get(&self, ptr: *mut K) -> *mut V { + pub fn get(&self, ptr: *mut K) -> V { self.get_bucket(ptr).get(ptr) } @@ -141,7 +144,7 @@ impl PtrHashMap { /// # Panics /// The key must not already exist in the table, or else `insert` will panic. #[inline] - pub fn insert(&mut self, ptr: *mut K, v: *mut V) { + pub fn insert(&mut self, ptr: *mut K, v: V) { debug_assert!(!ptr.is_null()); #[cfg(not(feature = "hashmap-no-resize"))] { @@ -196,8 +199,8 @@ impl PtrHashMap { // first_free_bkt.is_null() is a sentinal value indicating that these variables are not // set. - let (mut first_free_bkt, mut first_free_idx) = (ptr::null_mut() as *mut Bucket, - 0); + let (mut first_free_bkt, mut first_free_idx) = + (ptr::null_mut() as *mut Bucket, 0); macro_rules! update_first_free { ($bkt:expr, $idx:expr) => ( if first_free_bkt.is_null() { @@ -217,7 +220,7 @@ impl PtrHashMap { if ((ptr as usize) >> self.align_shift) & old_len != 0 { unsafe { - (*cur_bkt).data[j] = (ptr::null_mut(), ptr::null_mut()); + (*cur_bkt).data[j] = (ptr::null_mut(), mem::uninitialized()); } update_first_free!(cur_bkt, j); let new_idx = i + old_len; @@ -229,7 +232,7 @@ impl PtrHashMap { if !first_free_bkt.is_null() { unsafe { // there's an earlier free slot - move this pointer there - (*cur_bkt).data[j] = (ptr::null_mut(), ptr::null_mut()); + (*cur_bkt).data[j] = (ptr::null_mut(), mem::uninitialized()); (*first_free_bkt).data[first_free_idx] = (ptr, v); // Now that we've filled this slot, we need to find the // next empty slot. We're guaranteed to only search at most @@ -239,8 +242,8 @@ impl PtrHashMap { if first_free_idx == BUCKET_SIZE - 1 { use core::ops::DerefMut; first_free_bkt = - (*first_free_bkt).next.as_mut().unwrap().deref_mut() as - *mut Bucket; + (*first_free_bkt).next.as_mut().unwrap().deref_mut() + as *mut Bucket; first_free_idx = 0; } else { first_free_idx += 1; @@ -291,16 +294,16 @@ impl PtrHashMap { } #[inline] -fn new_buckets() -> [(*mut K, *mut V); BUCKET_SIZE] { - [(ptr::null_mut(), ptr::null_mut()); BUCKET_SIZE] +fn new_buckets() -> [(*mut K, V); BUCKET_SIZE] { + unsafe { [(ptr::null_mut(), mem::uninitialized()); BUCKET_SIZE] } } pub struct Bucket { - data: [(*mut K, *mut V); BUCKET_SIZE], + data: [(*mut K, V); BUCKET_SIZE], next: Option>>, } -impl Clone for Bucket { +impl Clone for Bucket { fn clone(&self) -> Bucket { Bucket { data: self.data, @@ -309,7 +312,7 @@ impl Clone for Bucket { } } -impl Default for Bucket { +impl Default for Bucket { fn default() -> Bucket { Bucket { data: new_buckets(), @@ -318,9 +321,9 @@ impl Default for Bucket { } } -impl Bucket { +impl Bucket { #[cfg_attr(not(test), allow(unused))] - fn dump(&self) -> Vec<(*mut K, *mut V)> { + fn dump(&self) -> Vec<(*mut K, V)> { let mut res = Vec::new(); for i in &self.data { if !i.0.is_null() { @@ -334,7 +337,7 @@ impl Bucket { } #[inline] - fn get(&self, ptr: *mut K) -> *mut V { + fn get(&self, ptr: *mut K) -> V { for i in &self.data { if ptr == i.0 { return i.1; @@ -370,8 +373,8 @@ impl Bucket { // Starting at the idx index into this bucket, find the last slot in this bucket chain that is // non-empty. Zero it out and return it. Also return a boolean indicating whether this bucket // is now empty (and should thus be deleted). - fn remove_last(&mut self, idx: usize) -> ((*mut K, *mut V), bool) { - let mut last = (ptr::null_mut(), ptr::null_mut()); + fn remove_last(&mut self, idx: usize) -> ((*mut K, V), bool) { + let mut last = unsafe { (ptr::null_mut(), mem::uninitialized()) }; let mut last_idx = None; for i in BUCKET_SIZE..idx { unsafe { @@ -397,7 +400,8 @@ impl Bucket { // We don't have any children. Thus, this is really the last full slot. Zero it out // and return it. unsafe { - *self.data.get_unchecked_mut(last_idx) = (ptr::null_mut(), ptr::null_mut()); + *self.data.get_unchecked_mut(last_idx) = + (ptr::null_mut(), mem::uninitialized()); } // If we just deleted the first (and thus only) slot in the bucket, then this // bucket is now empty and should be deleted. @@ -410,11 +414,11 @@ impl Bucket { // Since buckets are guaranteed to not have any gaps, this means that we're the last // bucket and so our search is done. Since idx is greater than zero, we know that this // bucket isn't empty, and thus shouldn't be deleted. - ((ptr::null_mut(), ptr::null_mut()), false) + unsafe { ((ptr::null_mut(), mem::uninitialized()), false) } } } - fn insert(&mut self, ptr: *mut K, v: *mut V) { + fn insert(&mut self, ptr: *mut K, v: V) { for i in 0..BUCKET_SIZE { unsafe { let cur = self.data.get_unchecked_mut(i); @@ -433,9 +437,9 @@ impl Bucket { let mut data = new_buckets(); data[0] = (ptr, v); self.next = Some(Box::new(Bucket { - data: data, - next: None, - })); + data: data, + next: None, + })); } } diff --git a/slab-alloc/src/stack.rs b/slab-alloc/src/stack.rs index 012e2b0..2e7ddbd 100644 --- a/slab-alloc/src/stack.rs +++ b/slab-alloc/src/stack.rs @@ -60,8 +60,9 @@ extern crate object_alloc; use SlabSystem; use init::InitSystem; use core::{mem, ptr}; +use core::ptr::NonNull; use util::stack::Stack; -use util::color::{ColorSettings, Color}; +use util::color::{Color, ColorSettings}; use util::list::*; use self::alloc::allocator; use self::object_alloc::UntypedObjectAlloc; @@ -71,26 +72,27 @@ use self::object_alloc::UntypedObjectAlloc; /// `ConfigData` completes the stack-based slab implementation by providing post-alloc and /// pre-dealloc hooks and by providing a mechanism to look up an object's containing slab. pub trait ConfigData - where Self: Sized +where + Self: Sized, { /// Perform per-slab post-allocation work. /// /// `post_alloc` is called after a newly-allocated slab has been initialized. It is optional, /// and defaults to a no-op. #[allow(unused)] - fn post_alloc(&mut self, layout: &Layout, slab_size: usize, slab: *mut SlabHeader) {} + fn post_alloc(&mut self, layout: &Layout, slab_size: usize, slab: NonNull) {} /// Perform per-slab pre-deallocation work. /// /// `pre_dealloc` is called before a slab is uninitialized and deallocated. It is optional, and /// defaults to a no-op. #[allow(unused)] - fn pre_dealloc(&mut self, layout: &Layout, slab_size: usize, slab: *mut SlabHeader) {} + fn pre_dealloc(&mut self, layout: &Layout, slab_size: usize, slab: NonNull) {} /// Look up an object's slab. /// /// Given an object, `ptr_to_slab` locates the slab containing that object. - fn ptr_to_slab(&self, slab_size: usize, ptr: *mut u8) -> *mut SlabHeader; + fn ptr_to_slab(&self, slab_size: usize, ptr: NonNull) -> NonNull; } pub struct System { @@ -112,75 +114,77 @@ impl System { impl SlabSystem for System { type Slab = SlabHeader; - fn alloc_slab(&mut self) -> *mut SlabHeader { + fn alloc_slab(&mut self) -> Option> { unsafe { let color = self.layout .color_settings .next_color(self.layout.layout.align()); let slab = match self.alloc.alloc() { - Ok(slab) => slab as *mut SlabHeader, - Err(..) => return ptr::null_mut(), + Ok(slab) => slab.cast(), + Err(..) => return None, }; - ptr::write(slab, - SlabHeader { - stack: Stack::new(), - color: color, - next: ptr::null_mut(), - prev: ptr::null_mut(), - }); + ptr::write( + slab.as_ptr(), + SlabHeader { + stack: Stack::new(), + color: color, + next: None, + prev: None, + }, + ); let stack_data_ptr = self.layout.stack_begin(slab); for i in 0..self.layout.num_obj { let ptr = self.layout.nth_obj(slab, color, i); - (*slab) + (*slab.as_ptr()) .stack .push(stack_data_ptr, I::pack(ptr, I::status_uninitialized())); } self.data .post_alloc(&self.layout, self.alloc.layout().size(), slab); - slab + Some(slab) } } - fn dealloc_slab(&mut self, slab: *mut SlabHeader) { + fn dealloc_slab(&mut self, slab: NonNull) { unsafe { - debug_assert_eq!((*slab).stack.size(), self.layout.num_obj); + debug_assert_eq!((*slab.as_ptr()).stack.size(), self.layout.num_obj); self.data .pre_dealloc(&self.layout, self.alloc.layout().size(), slab); let stack_data_ptr = self.layout.stack_begin(slab); for _ in 0..self.layout.num_obj { - let packed = (*slab).stack.pop(stack_data_ptr); + let packed = (*slab.as_ptr()).stack.pop(stack_data_ptr); I::drop(I::unpack_ptr(packed), I::unpack_status(packed)); } - self.alloc.dealloc(slab as *mut u8); + self.alloc.dealloc(slab.cast()); } } - fn is_full(&self, slab: *mut SlabHeader) -> bool { - unsafe { (*slab).stack.size() == self.layout.num_obj } + fn is_full(&self, slab: NonNull) -> bool { + unsafe { (*slab.as_ptr()).stack.size() == self.layout.num_obj } } - fn is_empty(&self, slab: *mut SlabHeader) -> bool { - unsafe { (*slab).stack.size() == 0 } + fn is_empty(&self, slab: NonNull) -> bool { + unsafe { (*slab.as_ptr()).stack.size() == 0 } } - fn alloc(&self, slab: *mut SlabHeader) -> (*mut u8, I::Status) { + fn alloc(&self, slab: NonNull) -> (NonNull, I::Status) { unsafe { let stack_data_ptr = self.layout.stack_begin(slab); - let packed = (*slab).stack.pop(stack_data_ptr); + let packed = (*slab.as_ptr()).stack.pop(stack_data_ptr); (I::unpack_ptr(packed), I::unpack_status(packed)) } } - fn dealloc(&self, obj: *mut u8, init_status: I::Status) -> (*mut SlabHeader, bool) { + fn dealloc(&self, obj: NonNull, init_status: I::Status) -> (NonNull, bool) { unsafe { let slab = self.data.ptr_to_slab(self.alloc.layout().size(), obj); - let was_empty = (*slab).stack.size() == 0; + let was_empty = (*slab.as_ptr()).stack.size() == 0; let stack_data_ptr = self.layout.stack_begin(slab); - (*slab) + (*slab.as_ptr()) .stack .push(stack_data_ptr, I::pack(obj, init_status)); (slab, was_empty) @@ -190,22 +194,22 @@ impl SlabSystem for Syst pub struct SlabHeader { stack: Stack, // note: this is only the metadata; the real stack comes after this header - color: Color, // extra padding added before array beginning - next: *mut SlabHeader, - prev: *mut SlabHeader, + color: Color, // extra padding added before array beginning + next: Option>, + prev: Option>, } impl Linkable for SlabHeader { - fn next(&self) -> *mut SlabHeader { + fn next(&self) -> Option> { self.next } - fn prev(&self) -> *mut SlabHeader { + fn prev(&self) -> Option> { self.prev } - fn set_next(&mut self, next: *mut SlabHeader) { + fn set_next(&mut self, next: Option>) { self.next = next; } - fn set_prev(&mut self, prev: *mut SlabHeader) { + fn set_prev(&mut self, prev: Option>) { self.prev = prev; } } @@ -276,23 +280,38 @@ impl Layout { }; // assert that the objects fit within the slab - assert!(slab_size >= - l.array_begin_offset + l.color_settings.max_color().as_usize() + - (l.num_obj * obj_size)); + assert!( + slab_size + >= l.array_begin_offset + l.color_settings.max_color().as_usize() + + (l.num_obj * obj_size) + ); Some((l, unused_space)) } - fn array_begin(&self, slab: *mut SlabHeader, color: Color) -> *mut u8 { + fn array_begin(&self, slab: NonNull, color: Color) -> NonNull { debug_assert!(color.as_usize() <= self.color_settings.max_color().as_usize()); - ((slab as usize) + self.array_begin_offset + color.as_usize()) as *mut u8 + unsafe { + NonNull::new_unchecked( + ((slab.as_ptr() as usize) + self.array_begin_offset + color.as_usize()) as *mut u8, + ) + } } - fn stack_begin(&self, slab: *mut SlabHeader) -> *mut usize { - ((slab as usize) + self.stack_begin_offset) as *mut usize + fn stack_begin(&self, slab: NonNull) -> NonNull { + unsafe { + NonNull::new_unchecked( + ((slab.as_ptr() as usize) + self.stack_begin_offset) as *mut usize, + ) + } } - pub fn nth_obj(&self, slab: *mut SlabHeader, color: Color, n: usize) -> *mut u8 { + pub fn nth_obj(&self, slab: NonNull, color: Color, n: usize) -> NonNull { debug_assert!((n as usize) < self.num_obj); - (self.array_begin(slab, color) as usize + n * self.layout.size()) as *mut u8 + unsafe { + NonNull::new_unchecked( + (self.array_begin(slab, color).as_ptr() as usize + n * self.layout.size()) + as *mut u8, + ) + } } } diff --git a/slab-alloc/src/tests.rs b/slab-alloc/src/tests.rs index b72f8ce..039d515 100644 --- a/slab-alloc/src/tests.rs +++ b/slab-alloc/src/tests.rs @@ -15,15 +15,17 @@ extern crate test; use SlabAllocBuilder; use self::alloc::heap::{Alloc, Heap, Layout}; use self::object_alloc::{Exhausted, ObjectAlloc}; -use self::test::{Bencher, black_box}; +use self::test::{black_box, Bencher}; use self::object_alloc_test::leaky_alloc::LeakyAlloc; use backing::alloc::AllocObjectAlloc; use backing::BackingAlloc; use SlabAlloc; +use core::ptr::NonNull; + fn infer_allocator_type(alloc: &mut ObjectAlloc) { if false { - let _: Result<*mut T, Exhausted> = unsafe { alloc.alloc() }; + let _: Result, Exhausted> = unsafe { alloc.alloc() }; } } @@ -36,14 +38,14 @@ impl BackingAlloc for LeakyBackingAlloc { fn leaky_get_aligned(layout: Layout) -> Option> { if layout.align() == self::sysconf::page::pagesize() { - Some(AllocObjectAlloc::new(LeakyAlloc::new(), layout)) + Some(AllocObjectAlloc::new(LeakyAlloc::default(), layout)) } else { None } } fn leaky_get_large(layout: Layout) -> AllocObjectAlloc { - AllocObjectAlloc::new(LeakyAlloc::new(), layout) + AllocObjectAlloc::new(LeakyAlloc::default(), layout) } fn test_memory_corruption() { @@ -59,8 +61,8 @@ fn test_memory_corruption() { let new = move || { SlabAllocBuilder::default() .align(align) - .build_backing(leaky_get_aligned, leaky_get_large) as - SlabAlloc<_, _, LeakyBackingAlloc> + .build_backing(leaky_get_aligned, leaky_get_large) + as SlabAlloc<_, _, LeakyBackingAlloc> }; infer_allocator_type::>(&mut new()); TestBuilder::new(new).test_iters(iters).test(); @@ -81,13 +83,11 @@ fn test_quickcheck_memory_corruption() { let new = move || { SlabAllocBuilder::default() .align(align) - .build_backing(leaky_get_aligned, leaky_get_large) as - SlabAlloc<_, _, LeakyBackingAlloc> + .build_backing(leaky_get_aligned, leaky_get_large) + as SlabAlloc<_, _, LeakyBackingAlloc> }; infer_allocator_type::>(&mut new()); - TestBuilder::new(new) - .quickcheck_tests(tests) - .quickcheck(); + TestBuilder::new(new).quickcheck_tests(tests).quickcheck(); }; foreach_align::, _>(f, self::sysconf::page::pagesize()); } @@ -134,8 +134,10 @@ macro_rules! make_test_quickcheck_memory_corruption { } call_for_all_types_prefix!(make_test_memory_corruption, test_memory_corruption); -call_for_all_types_prefix!(make_test_quickcheck_memory_corruption, - quickcheck_memory_corruption); +call_for_all_types_prefix!( + make_test_quickcheck_memory_corruption, + quickcheck_memory_corruption +); #[cfg_attr(not(feature = "build-ignored-tests"), allow(unused))] fn bench_alloc_no_free(b: &mut Bencher) { @@ -171,10 +173,10 @@ fn bench_alloc_free_pairs(b: &mut Bencher) { // we're trying to bench the best-case performance, not the slab gc policy let t = unsafe { alloc.alloc().unwrap() }; b.iter(|| unsafe { - let t = alloc.alloc().unwrap(); - black_box(t); - alloc.dealloc(t); - }); + let t = alloc.alloc().unwrap(); + black_box(t); + alloc.dealloc(t); + }); unsafe { alloc.dealloc(t); } @@ -188,10 +190,10 @@ fn bench_alloc_free_pairs_no_init(b: &mut Bencher) { // we're trying to bench the best-case performance, not the slab gc policy let t = unsafe { alloc.alloc().unwrap() }; b.iter(|| unsafe { - let t = alloc.alloc().unwrap(); - black_box(t); - alloc.dealloc(t); - }); + let t = alloc.alloc().unwrap(); + black_box(t); + alloc.dealloc(t); + }); unsafe { alloc.dealloc(t); } @@ -201,10 +203,10 @@ fn bench_alloc_free_pairs_no_init(b: &mut Bencher) { fn bench_alloc_free_pairs_heap(b: &mut Bencher) { let layout = Layout::new::(); b.iter(|| unsafe { - let t = Heap.alloc(layout.clone()).unwrap(); - black_box(t); - Heap.dealloc(t, layout.clone()); - }); + let t = Heap.alloc(layout.clone()).unwrap(); + black_box(t); + Heap.dealloc(t, layout.clone()); + }); } macro_rules! make_bench_alloc_no_free { @@ -263,11 +265,17 @@ macro_rules! make_bench_alloc_free_pairs_heap { } call_for_all_types_prefix!(make_bench_alloc_no_free, bench_alloc_no_free); -call_for_all_types_prefix!(make_bench_alloc_no_free_no_init, - bench_alloc_no_free_no_init); +call_for_all_types_prefix!( + make_bench_alloc_no_free_no_init, + bench_alloc_no_free_no_init +); call_for_all_types_prefix!(make_bench_alloc_no_free_heap, bench_alloc_no_free_heap); call_for_all_types_prefix!(make_bench_alloc_free_pairs, bench_alloc_free_pairs); -call_for_all_types_prefix!(make_bench_alloc_free_pairs_no_init, - bench_alloc_free_pairs_no_init); -call_for_all_types_prefix!(make_bench_alloc_free_pairs_heap, - bench_alloc_free_pairs_heap); +call_for_all_types_prefix!( + make_bench_alloc_free_pairs_no_init, + bench_alloc_free_pairs_no_init +); +call_for_all_types_prefix!( + make_bench_alloc_free_pairs_heap, + bench_alloc_free_pairs_heap +); diff --git a/slab-alloc/src/util.rs b/slab-alloc/src/util.rs index 2e49b5f..b854be4 100644 --- a/slab-alloc/src/util.rs +++ b/slab-alloc/src/util.rs @@ -6,11 +6,13 @@ // copied, modified, or distributed except according to those terms. pub mod size { - pub fn choose_size, F>(sizes: I, - unused: F, - max_objects_per_slab: usize) - -> usize - where F: Fn(usize) -> Option<(usize, usize)> + pub fn choose_size, F>( + sizes: I, + unused: F, + max_objects_per_slab: usize, + ) -> usize + where + F: Fn(usize) -> Option<(usize, usize)>, { fn leq(a: f64, b: Option) -> bool { match b { @@ -50,8 +52,9 @@ pub mod size { pub mod stack { extern crate alloc; - use core::{mem, ptr}; use core::marker::PhantomData; + use core::{mem, ptr}; + use core::ptr::NonNull; /// A manually-allocated stack. The `Stack` object itself is only where the metadata lives; the /// data itself lives in memory which is manually allocated by the user. @@ -91,7 +94,7 @@ pub mod stack { /// /// The new size of the stack must not exceed the size for which the underlying memory /// (`data`) was allocated. - pub fn push(&mut self, data: *mut T, val: T) { + pub fn push(&mut self, data: NonNull, val: T) { Self::set_at_idx(data, self.size as isize, val); self.size += 1; } @@ -99,7 +102,7 @@ pub mod stack { /// Pops an element from the stack. /// /// The stack must not be empty. - pub fn pop(&mut self, data: *mut T) -> T { + pub fn pop(&mut self, data: NonNull) -> T { debug_assert!(self.size > 0); self.size -= 1; Self::get_at_idx(data, self.size as isize) @@ -112,17 +115,17 @@ pub mod stack { #[cfg_attr(feature = "cargo-clippy", allow(inline_always))] #[inline(always)] - fn get_at_idx(data: *mut T, idx: isize) -> T { + fn get_at_idx(data: NonNull, idx: isize) -> T { debug_assert!(idx >= 0); - unsafe { ptr::read(data.offset(idx)) } + unsafe { ptr::read(data.as_ptr().offset(idx)) } } #[cfg_attr(feature = "cargo-clippy", allow(inline_always))] #[inline(always)] - fn set_at_idx(data: *mut T, idx: isize, val: T) { + fn set_at_idx(data: NonNull, idx: isize, val: T) { debug_assert!(idx >= 0); unsafe { - ptr::write(data.offset(idx), val); + ptr::write(data.as_ptr().offset(idx), val); } } } @@ -233,16 +236,18 @@ pub mod ptrmap { pub map: PtrHashMap, } - impl Map { + impl Map { pub fn new(size_hint: usize, align: usize) -> Map { - Map { map: PtrHashMap::new(size_hint, align) } + Map { + map: PtrHashMap::new(size_hint, align), + } } - pub fn get(&self, k: *mut K) -> *mut V { + pub fn get(&self, k: *mut K) -> V { self.map.get(k) } - pub fn insert(&mut self, k: *mut K, v: *mut V) { + pub fn insert(&mut self, k: *mut K, v: V) { self.map.insert(k, v); } @@ -257,19 +262,21 @@ pub mod ptrmap { use std::collections::HashMap; pub struct Map { - map: HashMap<*mut K, *mut V>, + map: HashMap<*mut K, V>, } impl Map { pub fn new(size_hint: usize, _: usize) -> Map { - Map { map: HashMap::with_capacity(size_hint) } + Map { + map: HashMap::with_capacity(size_hint), + } } - pub fn get(&self, k: *mut K) -> *mut V { + pub fn get(&self, k: *mut K) -> V { *self.map.get(&k).unwrap() } - pub fn insert(&mut self, k: *mut K, v: *mut V) { + pub fn insert(&mut self, k: *mut K, v: V) { self.map.insert(k, v); } @@ -282,136 +289,138 @@ pub mod ptrmap { pub mod list { extern crate core; - use core::ptr; + use core::ptr::NonNull; pub trait Linkable { - fn next(&self) -> *mut Self; - fn prev(&self) -> *mut Self; - fn set_next(&mut self, ptr: *mut Self); - fn set_prev(&mut self, ptr: *mut Self); + fn next(&self) -> Option>; + fn prev(&self) -> Option>; + fn set_next(&mut self, ptr: Option>); + fn set_prev(&mut self, ptr: Option>); } pub struct LinkedList { size: usize, - head: *mut T, - tail: *mut T, + head: Option>, + tail: Option>, } impl LinkedList { pub fn new() -> LinkedList { LinkedList { size: 0, - head: ptr::null_mut(), - tail: ptr::null_mut(), + head: None, + tail: None, } } - pub fn insert_front(&mut self, t: *mut T) { + pub fn insert_front(&mut self, t: NonNull) { unsafe { - debug_assert!((*t).next().is_null()); - debug_assert!((*t).prev().is_null()); - if self.size == 0 { - self.head = t; - self.tail = t; + debug_assert!((*t.as_ptr()).next().is_none()); + debug_assert!((*t.as_ptr()).prev().is_none()); + if let Some(head) = self.head { + (*t.as_ptr()).set_next(Some(head)); + (*head.as_ptr()).set_prev(Some(t)); + self.head = Some(t); } else { - (*t).set_next(self.head); - (*self.head).set_prev(t); - self.head = t; + self.head = Some(t); + self.tail = Some(t); } self.size += 1; } } - pub fn insert_back(&mut self, t: *mut T) { + pub fn insert_back(&mut self, t: NonNull) { unsafe { - debug_assert!((*t).next().is_null()); - debug_assert!((*t).prev().is_null()); - if self.size == 0 { - self.head = t; - self.tail = t; + debug_assert!((*t.as_ptr()).next().is_none()); + debug_assert!((*t.as_ptr()).prev().is_none()); + if let Some(tail) = self.tail { + (*t.as_ptr()).set_prev(Some(tail)); + (*tail.as_ptr()).set_next(Some(t)); + self.tail = Some(t); } else { - (*self.tail).set_next(t); - (*t).set_prev(self.tail); - self.tail = t; + self.head = Some(t); + self.tail = Some(t); } self.size += 1; } } - pub fn remove_front(&mut self) -> *mut T { + pub fn remove_front(&mut self) -> NonNull { unsafe { debug_assert!(self.size > 0); let t = if self.size == 1 { - let t = self.head; - self.head = ptr::null_mut(); - self.tail = ptr::null_mut(); + let t = self.head.unwrap(); + self.head = None; + self.tail = None; t } else { - let t = self.head; - self.head = (*t).next(); - (*self.head).set_prev(ptr::null_mut()); - (*t).set_next(ptr::null_mut()); + let t = self.head.unwrap(); + let t_next = (*t.as_ptr()).next().unwrap(); + self.head = Some(t_next); + (*t_next.as_ptr()).set_prev(None); + (*t.as_ptr()).set_next(None); t }; - debug_assert!((*t).next().is_null()); - debug_assert!((*t).prev().is_null()); + debug_assert!((*t.as_ptr()).next().is_none()); + debug_assert!((*t.as_ptr()).prev().is_none()); self.size -= 1; t } } - pub fn remove_back(&mut self) -> *mut T { + pub fn remove_back(&mut self) -> NonNull { unsafe { debug_assert!(self.size > 0); let t = if self.size == 1 { - let t = self.head; - self.head = ptr::null_mut(); - self.tail = ptr::null_mut(); + let t = self.head.unwrap(); + self.head = None; + self.tail = None; t } else { - let t = self.tail; - self.tail = (*t).prev(); - (*self.tail).set_next(ptr::null_mut()); - (*t).set_prev(ptr::null_mut()); + let t = self.tail.unwrap(); + let t_prev = (*t.as_ptr()).prev().unwrap(); + self.tail = Some(t_prev); + (*t_prev.as_ptr()).set_next(None); + (*t.as_ptr()).set_prev(None); t }; - debug_assert!((*t).next().is_null()); - debug_assert!((*t).prev().is_null()); + debug_assert!((*t.as_ptr()).next().is_none()); + debug_assert!((*t.as_ptr()).prev().is_none()); self.size -= 1; t } } - pub fn move_to_back(&mut self, t: *mut T) { + pub fn move_to_back(&mut self, t: NonNull) { debug_assert!(self.size > 0); - if self.size == 1 || self.tail == t { + if self.size == 1 || self.tail == Some(t) { return; } unsafe { // remove from its place in the list - if self.head == t { - let next = (*t).next(); - (*next).set_prev(ptr::null_mut()); - self.head = next; + if self.head == Some(t) { + let next = (*t.as_ptr()).next().unwrap(); + (*next.as_ptr()).set_prev(None); + self.head = Some(next); } else { - let prev = (*t).prev(); - let next = (*t).next(); - (*prev).set_next(next); - (*next).set_prev(prev); + let prev = (*t.as_ptr()).prev().unwrap(); + let next = (*t.as_ptr()).next().unwrap(); + (*prev.as_ptr()).set_next(Some(next)); + (*next.as_ptr()).set_prev(Some(prev)); } // insert at the back of the list - (*t).set_prev(self.tail); - (*t).set_next(ptr::null_mut()); - (*self.tail).set_next(t); - self.tail = t; + (*t.as_ptr()).set_prev(self.tail); + (*t.as_ptr()).set_next(None); + (*self.tail.unwrap().as_ptr()).set_next(Some(t)); + self.tail = Some(t); } } - pub fn peek_front(&self) -> *mut T { + pub fn peek_front(&self) -> NonNull { debug_assert!(self.size > 0); - self.head + self.head.unwrap() } pub fn size(&self) -> usize { diff --git a/slab-alloc/travis.sh b/slab-alloc/travis.sh index 7066d6a..1b92224 100755 --- a/slab-alloc/travis.sh +++ b/slab-alloc/travis.sh @@ -14,6 +14,8 @@ travis-cargo --only nightly build # Use opt-level=3 because the corruption tests take a long time, and the # optimized build saves more time in test execution than it adds in compilation # time. +# TODO: Now that we have opt-level = 3 in the test profile in Cargo.toml, +# is this necessary anymore? RUSTFLAGS='-C opt-level=3' RUST_BACKTRACE=1 travis-cargo --only nightly test # TODO: Once no-std and no-os work, add those features. for feature in build-ignored-tests use-stdlib-hashmap no-coloring hashmap-no-resize hashmap-no-coalesce; do