From 1ffe3453cb2bd4cc031b4f8a4bdb88279e01e094 Mon Sep 17 00:00:00 2001 From: Alexis Beingessner Date: Mon, 20 Jul 2015 16:57:29 -0700 Subject: [PATCH 1/2] make Rc mem::forget safe --- src/liballoc/rc.rs | 19 ++++++++++++++++--- 1 file changed, 16 insertions(+), 3 deletions(-) diff --git a/src/liballoc/rc.rs b/src/liballoc/rc.rs index d461eeea0b7eb..3eaffa1e028a9 100644 --- a/src/liballoc/rc.rs +++ b/src/liballoc/rc.rs @@ -160,7 +160,7 @@ use core::cell::Cell; use core::cmp::Ordering; use core::fmt; use core::hash::{Hasher, Hash}; -use core::intrinsics::{assume, drop_in_place}; +use core::intrinsics::{assume, drop_in_place, abort}; use core::marker::{self, Unsize}; use core::mem::{self, align_of, size_of, align_of_val, size_of_val, forget}; use core::nonzero::NonZero; @@ -846,6 +846,15 @@ impl fmt::Debug for Weak { } } +// NOTE: We checked_add here to deal with mem::forget safety. In particular +// if you mem::forget Rcs (or Weaks), the ref-count can overflow, and then +// you can free the allocation while outstanding Rcs (or Weaks) exist. +// We abort because this is such a degenerate scenario that we don't care about +// what happens -- no real program should ever experience this. +// +// This should have negligible overhead since you don't actually need to +// clone these much in Rust thanks to ownership and move-semantics. + #[doc(hidden)] trait RcBoxPtr { fn inner(&self) -> &RcBox; @@ -854,7 +863,9 @@ trait RcBoxPtr { fn strong(&self) -> usize { self.inner().strong.get() } #[inline] - fn inc_strong(&self) { self.inner().strong.set(self.strong() + 1); } + fn inc_strong(&self) { + self.inner().strong.set(self.strong().checked_add(1).unwrap_or_else(|| unsafe { abort() })); + } #[inline] fn dec_strong(&self) { self.inner().strong.set(self.strong() - 1); } @@ -863,7 +874,9 @@ trait RcBoxPtr { fn weak(&self) -> usize { self.inner().weak.get() } #[inline] - fn inc_weak(&self) { self.inner().weak.set(self.weak() + 1); } + fn inc_weak(&self) { + self.inner().weak.set(self.weak().checked_add(1).unwrap_or_else(|| unsafe { abort() })); + } #[inline] fn dec_weak(&self) { self.inner().weak.set(self.weak() - 1); } From 22e21004582902cc1b7d1bef89d09728cbe64ca2 Mon Sep 17 00:00:00 2001 From: Alexis Beingessner Date: Mon, 20 Jul 2015 17:09:44 -0700 Subject: [PATCH 2/2] make Arc mem::forget safe --- src/liballoc/arc.rs | 30 ++++++++++++++++++++++++++---- 1 file changed, 26 insertions(+), 4 deletions(-) diff --git a/src/liballoc/arc.rs b/src/liballoc/arc.rs index 2a47fd29bd653..46f5df02d55e2 100644 --- a/src/liballoc/arc.rs +++ b/src/liballoc/arc.rs @@ -78,16 +78,18 @@ use core::atomic::Ordering::{Relaxed, Release, Acquire, SeqCst}; use core::fmt; use core::cmp::Ordering; use core::mem::{align_of_val, size_of_val}; -use core::intrinsics::drop_in_place; +use core::intrinsics::{drop_in_place, abort}; use core::mem; use core::nonzero::NonZero; use core::ops::{Deref, CoerceUnsized}; use core::ptr; use core::marker::Unsize; use core::hash::{Hash, Hasher}; -use core::usize; +use core::{usize, isize}; use heap::deallocate; +const MAX_REFCOUNT: usize = (isize::MAX) as usize; + /// An atomically reference counted wrapper for shared state. /// /// # Examples @@ -311,7 +313,21 @@ impl Clone for Arc { // another must already provide any required synchronization. // // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html) - self.inner().strong.fetch_add(1, Relaxed); + let old_size = self.inner().strong.fetch_add(1, Relaxed); + + // However we need to guard against massive refcounts in case someone + // is `mem::forget`ing Arcs. If we don't do this the count can overflow + // and users will use-after free. We racily saturate to `isize::MAX` on + // the assumption that there aren't ~2 billion threads incrementing + // the reference count at once. This branch will never be taken in + // any realistic program. + // + // We abort because such a program is incredibly degenerate, and we + // don't care to support it. + if old_size > MAX_REFCOUNT { + unsafe { abort(); } + } + Arc { _ptr: self._ptr } } } @@ -612,7 +628,13 @@ impl Clone for Weak { // fetch_add (ignoring the lock) because the weak count is only locked // where are *no other* weak pointers in existence. (So we can't be // running this code in that case). - self.inner().weak.fetch_add(1, Relaxed); + let old_size = self.inner().weak.fetch_add(1, Relaxed); + + // See comments in Arc::clone() for why we do this (for mem::forget). + if old_size > MAX_REFCOUNT { + unsafe { abort(); } + } + return Weak { _ptr: self._ptr } } }