Skip to content

Commit

Permalink
when an address gets reused, establish a happens-before link in the d…
Browse files Browse the repository at this point in the history
…ata race model
  • Loading branch information
RalfJung committed Apr 16, 2024
1 parent e090ca2 commit 6992266
Show file tree
Hide file tree
Showing 7 changed files with 127 additions and 36 deletions.
42 changes: 27 additions & 15 deletions src/alloc_addresses/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,8 @@ use rustc_span::Span;
use rustc_target::abi::{Align, HasDataLayout, Size};

use crate::*;
use reuse_pool::ReusePool;

use self::reuse_pool::ReusePool;

#[derive(Copy, Clone, Debug, PartialEq, Eq)]
pub enum ProvenanceMode {
Expand Down Expand Up @@ -159,9 +160,12 @@ trait EvalContextExtPriv<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
assert!(!matches!(kind, AllocKind::Dead));

// This allocation does not have a base address yet, pick or reuse one.
let base_addr = if let Some(reuse_addr) =
let base_addr = if let Some((reuse_addr, clock)) =
global_state.reuse.take_addr(&mut *rng, size, align)
{
if let Some(data_race) = &ecx.machine.data_race {
data_race.validate_lock_acquire(&clock, ecx.get_active_thread());
}
reuse_addr
} else {
// We have to pick a fresh address.
Expand Down Expand Up @@ -328,14 +332,11 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
}
}

impl GlobalStateInner {
pub fn free_alloc_id(
&mut self,
rng: &mut impl Rng,
dead_id: AllocId,
size: Size,
align: Align,
) {
impl<'mir, 'tcx> MiriMachine<'mir, 'tcx> {
pub fn free_alloc_id(&mut self, dead_id: AllocId, size: Size, align: Align) {
let global_state = self.alloc_addresses.get_mut();
let rng = self.rng.get_mut();

// We can *not* remove this from `base_addr`, since the interpreter design requires that we
// be able to retrieve an AllocId + offset for any memory access *before* we check if the
// access is valid. Specifically, `ptr_get_alloc` is called on each attempt at a memory
Expand All @@ -348,15 +349,26 @@ impl GlobalStateInner {
// returns a dead allocation.
// To avoid a linear scan we first look up the address in `base_addr`, and then find it in
// `int_to_ptr_map`.
let addr = *self.base_addr.get(&dead_id).unwrap();
let pos = self.int_to_ptr_map.binary_search_by_key(&addr, |(addr, _)| *addr).unwrap();
let removed = self.int_to_ptr_map.remove(pos);
let addr = *global_state.base_addr.get(&dead_id).unwrap();
let pos =
global_state.int_to_ptr_map.binary_search_by_key(&addr, |(addr, _)| *addr).unwrap();
let removed = global_state.int_to_ptr_map.remove(pos);
assert_eq!(removed, (addr, dead_id)); // double-check that we removed the right thing
// We can also remove it from `exposed`, since this allocation can anyway not be returned by
// `alloc_id_from_addr` any more.
self.exposed.remove(&dead_id);
global_state.exposed.remove(&dead_id);
// Also remember this address for future reuse.
self.reuse.add_addr(rng, addr, size, align)
global_state.reuse.add_addr(rng, addr, size, align, || {
let mut clock = concurrency::VClock::default();
if let Some(data_race) = &self.data_race {
data_race.validate_lock_release(
&mut clock,
self.threads.get_active_thread_id(),
self.threads.active_thread_ref().current_span(),
);
}
clock
})
}
}

Expand Down
39 changes: 28 additions & 11 deletions src/alloc_addresses/reuse_pool.rs
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,8 @@ use rand::Rng;

use rustc_target::abi::{Align, Size};

use crate::concurrency::VClock;

const MAX_POOL_SIZE: usize = 64;

// Just use fair coins, until we have evidence that other numbers are better.
Expand All @@ -21,42 +23,57 @@ pub struct ReusePool {
///
/// Each of these maps has at most MAX_POOL_SIZE elements, and since alignment is limited to
/// less than 64 different possible value, that bounds the overall size of the pool.
pool: Vec<Vec<(u64, Size)>>,
///
/// We also store the clock from the thread that donated this pool element,
/// to ensure synchronization with the thread that picks up this address.
pool: Vec<Vec<(u64, Size, VClock)>>,
}

impl ReusePool {
pub fn new() -> Self {
ReusePool { pool: vec![] }
}

fn subpool(&mut self, align: Align) -> &mut Vec<(u64, Size)> {
fn subpool(&mut self, align: Align) -> &mut Vec<(u64, Size, VClock)> {
let pool_idx: usize = align.bytes().trailing_zeros().try_into().unwrap();
if self.pool.len() <= pool_idx {
self.pool.resize(pool_idx + 1, Vec::new());
}
&mut self.pool[pool_idx]
}

pub fn add_addr(&mut self, rng: &mut impl Rng, addr: u64, size: Size, align: Align) {
pub fn add_addr(
&mut self,
rng: &mut impl Rng,
addr: u64,
size: Size,
align: Align,
clock: impl FnOnce() -> VClock,
) {
// Let's see if we even want to remember this address.
if !rng.gen_bool(ADDR_REMEMBER_CHANCE) {
return;
}
// Determine the pool to add this to, and where in the pool to put it.
let subpool = self.subpool(align);
let pos = subpool.partition_point(|(_addr, other_size)| *other_size < size);
let pos = subpool.partition_point(|(_addr, other_size, _)| *other_size < size);
// Make sure the pool does not grow too big.
if subpool.len() >= MAX_POOL_SIZE {
// Pool full. Replace existing element, or last one if this would be even bigger.
let clamped_pos = pos.min(subpool.len() - 1);
subpool[clamped_pos] = (addr, size);
subpool[clamped_pos] = (addr, size, clock());
return;
}
// Add address to pool, at the right position.
subpool.insert(pos, (addr, size));
subpool.insert(pos, (addr, size, clock()));
}

pub fn take_addr(&mut self, rng: &mut impl Rng, size: Size, align: Align) -> Option<u64> {
pub fn take_addr(
&mut self,
rng: &mut impl Rng,
size: Size,
align: Align,
) -> Option<(u64, VClock)> {
// Determine whether we'll even attempt a reuse.
if !rng.gen_bool(ADDR_TAKE_CHANCE) {
return None;
Expand All @@ -65,9 +82,9 @@ impl ReusePool {
let subpool = self.subpool(align);
// Let's see if we can find something of the right size. We want to find the full range of
// such items, beginning with the first, so we can't use `binary_search_by_key`.
let begin = subpool.partition_point(|(_addr, other_size)| *other_size < size);
let begin = subpool.partition_point(|(_addr, other_size, _)| *other_size < size);
let mut end = begin;
while let Some((_addr, other_size)) = subpool.get(end) {
while let Some((_addr, other_size, _)) = subpool.get(end) {
if *other_size != size {
break;
}
Expand All @@ -80,8 +97,8 @@ impl ReusePool {
// Pick a random element with the desired size.
let idx = rng.gen_range(begin..end);
// Remove it from the pool and return.
let (chosen_addr, chosen_size) = subpool.remove(idx);
let (chosen_addr, chosen_size, clock) = subpool.remove(idx);
debug_assert!(chosen_size >= size && chosen_addr % align.bytes() == 0);
Some(chosen_addr)
Some((chosen_addr, clock))
}
}
2 changes: 2 additions & 0 deletions src/concurrency/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -6,3 +6,5 @@ pub mod init_once;
pub mod thread;
mod vector_clock;
pub mod weak_memory;

pub use vector_clock::VClock;
6 changes: 6 additions & 0 deletions src/concurrency/thread.rs
Original file line number Diff line number Diff line change
Expand Up @@ -223,6 +223,12 @@ impl<'mir, 'tcx> Thread<'mir, 'tcx> {
// empty stacks.
self.top_user_relevant_frame.or_else(|| self.stack.len().checked_sub(1))
}

pub fn current_span(&self) -> Span {
self.top_user_relevant_frame()
.map(|frame_idx| self.stack[frame_idx].current_span())
.unwrap_or(rustc_span::DUMMY_SP)
}
}

impl<'mir, 'tcx> std::fmt::Debug for Thread<'mir, 'tcx> {
Expand Down
6 changes: 2 additions & 4 deletions src/helpers.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1265,9 +1265,7 @@ impl<'mir, 'tcx> MiriMachine<'mir, 'tcx> {
/// This function is backed by a cache, and can be assumed to be very fast.
/// It will work even when the stack is empty.
pub fn current_span(&self) -> Span {
self.top_user_relevant_frame()
.map(|frame_idx| self.stack()[frame_idx].current_span())
.unwrap_or(rustc_span::DUMMY_SP)
self.threads.active_thread_ref().current_span()
}

/// Returns the span of the *caller* of the current operation, again
Expand All @@ -1279,7 +1277,7 @@ impl<'mir, 'tcx> MiriMachine<'mir, 'tcx> {
// We need to go down at least to the caller (len - 2), or however
// far we have to go to find a frame in a local crate which is also not #[track_caller].
let frame_idx = self.top_user_relevant_frame().unwrap();
let frame_idx = cmp::min(frame_idx, self.stack().len().checked_sub(2).unwrap());
let frame_idx = cmp::min(frame_idx, self.stack().len().saturating_sub(2));
self.stack()[frame_idx].current_span()
}

Expand Down
7 changes: 1 addition & 6 deletions src/machine.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1300,12 +1300,7 @@ impl<'mir, 'tcx> Machine<'mir, 'tcx> for MiriMachine<'mir, 'tcx> {
{
*deallocated_at = Some(machine.current_span());
}
machine.alloc_addresses.get_mut().free_alloc_id(
machine.rng.get_mut(),
alloc_id,
size,
align,
);
machine.free_alloc_id(alloc_id, size, align);
Ok(())
}

Expand Down
61 changes: 61 additions & 0 deletions tests/pass/concurrency/address_reuse_happens_before.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,61 @@
//! Regression test for <https://github.com/rust-lang/miri/issues/3450>:
//! When the address gets reused, there should be a happens-before relation.
#![feature(strict_provenance)]
#![feature(sync_unsafe_cell)]

use std::cell::SyncUnsafeCell;
use std::sync::atomic::{AtomicUsize, Ordering::Relaxed};
use std::thread;

static ADDR: AtomicUsize = AtomicUsize::new(0);
static VAL: SyncUnsafeCell<i32> = SyncUnsafeCell::new(0);

fn addr() -> usize {
let alloc = Box::new(42);
<*const i32>::addr(&*alloc)
}

fn thread1() {
unsafe {
VAL.get().write(42);
}
let alloc = addr();
ADDR.store(alloc, Relaxed);
}

fn thread2() -> bool {
// We try to get an allocation at the same address. If we fail too often,
// try again with a different allocation to ensure it is still in miris
// reuse cache.
for _ in 0..16 {
let alloc = addr();
let addr = ADDR.load(Relaxed);
if alloc == addr {
// If the new allocation is at the same address as the old one,
// there must be a happens-before relationship between them.
// Therefore, we can read VAL without racing and must observe
// the write above.
let val = unsafe { VAL.get().read() };
assert_eq!(val, 42);
return true;
}
}

false
}

fn main() {
let mut success = false;
while !success {
let t1 = thread::spawn(thread1);
let t2 = thread::spawn(thread2);
t1.join().unwrap();
success = t2.join().unwrap();

// Reset everything.
ADDR.store(0, Relaxed);
unsafe {
VAL.get().write(0);
}
}
}

0 comments on commit 6992266

Please sign in to comment.