Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

adjust for MemoryExtra being merged into Machine #2047

Merged
merged 2 commits into from
Apr 5, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion rust-version
Original file line number Diff line number Diff line change
@@ -1 +1 @@
6af09d2505f38e4f1df291df56d497fb2ad935ed
634770c0a7f8598164ab825cfe419cc8b03c36e5
26 changes: 12 additions & 14 deletions src/data_race.rs
Original file line number Diff line number Diff line change
Expand Up @@ -79,7 +79,6 @@ use crate::{
};

pub type AllocExtra = VClockAlloc;
pub type MemoryExtra = GlobalState;

/// Valid atomic read-write operations, alias of atomic::Ordering (not non-exhaustive).
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
Expand Down Expand Up @@ -596,9 +595,9 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
let eq = this.binary_op(mir::BinOp::Eq, &old, expect_old)?;
// If the operation would succeed, but is "weak", fail some portion
// of the time, based on `rate`.
let rate = this.memory.extra.cmpxchg_weak_failure_rate;
let rate = this.machine.cmpxchg_weak_failure_rate;
let cmpxchg_success = eq.to_scalar()?.to_bool()?
&& (!can_fail_spuriously || this.memory.extra.rng.get_mut().gen::<f64>() < rate);
&& (!can_fail_spuriously || this.machine.rng.get_mut().gen::<f64>() < rate);
let res = Immediate::ScalarPair(
old.to_scalar_or_uninit(),
Scalar::from_bool(cmpxchg_success).into(),
Expand Down Expand Up @@ -690,7 +689,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
/// Update the data-race detector for an atomic fence on the current thread.
fn validate_atomic_fence(&mut self, atomic: AtomicFenceOp) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
if let Some(data_race) = &mut this.memory.extra.data_race {
if let Some(data_race) = &mut this.machine.data_race {
data_race.maybe_perform_sync_operation(move |index, mut clocks| {
log::trace!("Atomic fence on {:?} with ordering {:?}", index, atomic);

Expand Down Expand Up @@ -725,7 +724,7 @@ pub struct VClockAlloc {
impl VClockAlloc {
/// Create a new data-race detector for newly allocated memory.
pub fn new_allocation(
global: &MemoryExtra,
global: &GlobalState,
len: Size,
kind: MemoryKind<MiriMemoryKind>,
) -> VClockAlloc {
Expand Down Expand Up @@ -796,7 +795,7 @@ impl VClockAlloc {
#[cold]
#[inline(never)]
fn report_data_race<'tcx>(
global: &MemoryExtra,
global: &GlobalState,
range: &MemoryCellClocks,
action: &str,
is_atomic: bool,
Expand Down Expand Up @@ -950,13 +949,13 @@ trait EvalContextPrivExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
#[inline]
fn allow_data_races_ref<R>(&self, op: impl FnOnce(&MiriEvalContext<'mir, 'tcx>) -> R) -> R {
let this = self.eval_context_ref();
let old = if let Some(data_race) = &this.memory.extra.data_race {
let old = if let Some(data_race) = &this.machine.data_race {
data_race.multi_threaded.replace(false)
} else {
false
};
let result = op(this);
if let Some(data_race) = &this.memory.extra.data_race {
if let Some(data_race) = &this.machine.data_race {
data_race.multi_threaded.set(old);
}
result
Expand All @@ -971,13 +970,13 @@ trait EvalContextPrivExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
op: impl FnOnce(&mut MiriEvalContext<'mir, 'tcx>) -> R,
) -> R {
let this = self.eval_context_mut();
let old = if let Some(data_race) = &this.memory.extra.data_race {
let old = if let Some(data_race) = &this.machine.data_race {
data_race.multi_threaded.replace(false)
} else {
false
};
let result = op(this);
if let Some(data_race) = &this.memory.extra.data_race {
if let Some(data_race) = &this.machine.data_race {
data_race.multi_threaded.set(old);
}
result
Expand All @@ -997,14 +996,13 @@ trait EvalContextPrivExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
) -> Result<(), DataRace>,
) -> InterpResult<'tcx> {
let this = self.eval_context_ref();
if let Some(data_race) = &this.memory.extra.data_race {
if let Some(data_race) = &this.machine.data_race {
if data_race.multi_threaded.get() {
let size = place.layout.size;
let (alloc_id, base_offset, ptr) = this.memory.ptr_get_alloc(place.ptr)?;
let (alloc_id, base_offset, ptr) = this.ptr_get_alloc_id(place.ptr)?;
// Load and log the atomic operation.
// Note that atomic loads are possible even from read-only allocations, so `get_alloc_extra_mut` is not an option.
let alloc_meta =
&this.memory.get_alloc_extra(alloc_id)?.data_race.as_ref().unwrap();
let alloc_meta = &this.get_alloc_extra(alloc_id)?.data_race.as_ref().unwrap();
log::trace!(
"Atomic op({}) with ordering {:?} on {:?} (size={})",
description,
Expand Down
4 changes: 2 additions & 2 deletions src/diagnostics.rs
Original file line number Diff line number Diff line change
Expand Up @@ -196,7 +196,7 @@ pub fn report_error<'tcx, 'mir>(
Unsupported(_) =>
vec![(None, format!("this is likely not a bug in the program; it indicates that the program performed an operation that the interpreter does not support"))],
UndefinedBehavior(UndefinedBehaviorInfo::AlignmentCheckFailed { .. })
if ecx.memory.extra.check_alignment == AlignmentCheck::Symbolic
if ecx.machine.check_alignment == AlignmentCheck::Symbolic
=>
vec![
(None, format!("this usually indicates that your program performed an invalid operation and caused Undefined Behavior")),
Expand Down Expand Up @@ -251,7 +251,7 @@ pub fn report_error<'tcx, 'mir>(
access.uninit_offset.bytes(),
access.uninit_offset.bytes() + access.uninit_size.bytes(),
);
eprintln!("{:?}", ecx.memory.dump_alloc(*alloc_id));
eprintln!("{:?}", ecx.dump_alloc(*alloc_id));
}
_ => {}
}
Expand Down
16 changes: 7 additions & 9 deletions src/eval.rs
Original file line number Diff line number Diff line change
Expand Up @@ -153,20 +153,18 @@ pub fn create_ecx<'mir, 'tcx: 'mir>(
tcx: TyCtxt<'tcx>,
entry_id: DefId,
entry_type: EntryFnType,
config: MiriConfig,
config: &MiriConfig,
) -> InterpResult<'tcx, (InterpCx<'mir, 'tcx, Evaluator<'mir, 'tcx>>, MPlaceTy<'tcx, Tag>)> {
let param_env = ty::ParamEnv::reveal_all();
let layout_cx = LayoutCx { tcx, param_env };
let mut ecx = InterpCx::new(
tcx,
rustc_span::source_map::DUMMY_SP,
param_env,
Evaluator::new(&config, layout_cx),
MemoryExtra::new(&config),
Evaluator::new(config, layout_cx),
);
// Complete initialization.
EnvVars::init(&mut ecx, config.excluded_env_vars, config.forwarded_env_vars)?;
MemoryExtra::init_extern_statics(&mut ecx)?;
// Some parts of initialization require a full `InterpCx`.
Evaluator::late_init(&mut ecx, config)?;

// Make sure we have MIR. We check MIR for some stable monomorphic function in libcore.
let sentinel = ecx.resolve_path(&["core", "ascii", "escape_default"]);
Expand Down Expand Up @@ -260,7 +258,7 @@ pub fn create_ecx<'mir, 'tcx: 'mir>(
.unwrap()
.unwrap();

let main_ptr = ecx.memory.create_fn_alloc(FnVal::Instance(entry_instance));
let main_ptr = ecx.create_fn_alloc_ptr(FnVal::Instance(entry_instance));

ecx.call_function(
start_instance,
Expand Down Expand Up @@ -296,7 +294,7 @@ pub fn eval_entry<'tcx>(
// Copy setting before we move `config`.
let ignore_leaks = config.ignore_leaks;

let (mut ecx, ret_place) = match create_ecx(tcx, entry_id, entry_type, config) {
let (mut ecx, ret_place) = match create_ecx(tcx, entry_id, entry_type, &config) {
Ok(v) => v,
Err(err) => {
err.print_backtrace();
Expand Down Expand Up @@ -354,7 +352,7 @@ pub fn eval_entry<'tcx>(
}
// Check for memory leaks.
info!("Additonal static roots: {:?}", ecx.machine.static_roots);
let leaks = ecx.memory.leak_report(&ecx.machine.static_roots);
let leaks = ecx.leak_report(&ecx.machine.static_roots);
if leaks != 0 {
tcx.sess.err("the evaluated program leaked memory");
tcx.sess.note_without_error("pass `-Zmiri-ignore-leaks` to disable this check");
Expand Down
14 changes: 7 additions & 7 deletions src/helpers.rs
Original file line number Diff line number Diff line change
Expand Up @@ -199,11 +199,11 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
getrandom::getrandom(&mut data)
.map_err(|err| err_unsup_format!("host getrandom failed: {}", err))?;
} else {
let rng = this.memory.extra.rng.get_mut();
let rng = this.machine.rng.get_mut();
rng.fill_bytes(&mut data);
}

this.memory.write_bytes(ptr, data.iter().copied())
this.write_bytes_ptr(ptr, data.iter().copied())
}

/// Call a function: Push the stack frame and pass the arguments.
Expand Down Expand Up @@ -645,7 +645,8 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
loop {
// FIXME: We are re-getting the allocation each time around the loop.
// Would be nice if we could somehow "extend" an existing AllocRange.
let alloc = this.memory.get(ptr.offset(len, this)?.into(), size1, Align::ONE)?.unwrap(); // not a ZST, so we will get a result
let alloc =
this.get_ptr_alloc(ptr.offset(len, this)?.into(), size1, Align::ONE)?.unwrap(); // not a ZST, so we will get a result
let byte = alloc.read_scalar(alloc_range(Size::ZERO, size1))?.to_u8()?;
if byte == 0 {
break;
Expand All @@ -655,7 +656,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
}

// Step 2: get the bytes.
this.memory.read_bytes(ptr.into(), len)
this.read_bytes_ptr(ptr.into(), len)
}

fn read_wide_str(&self, mut ptr: Pointer<Option<Tag>>) -> InterpResult<'tcx, Vec<u16>> {
Expand All @@ -667,7 +668,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
loop {
// FIXME: We are re-getting the allocation each time around the loop.
// Would be nice if we could somehow "extend" an existing AllocRange.
let alloc = this.memory.get(ptr.into(), size2, align2)?.unwrap(); // not a ZST, so we will get a result
let alloc = this.get_ptr_alloc(ptr.into(), size2, align2)?.unwrap(); // not a ZST, so we will get a result
let wchar = alloc.read_scalar(alloc_range(Size::ZERO, size2))?.to_u16()?;
if wchar == 0 {
break;
Expand Down Expand Up @@ -750,8 +751,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
/// Mark a machine allocation that was just created as immutable.
fn mark_immutable(&mut self, mplace: &MemPlace<Tag>) {
let this = self.eval_context_mut();
this.memory
.mark_immutable(mplace.ptr.into_pointer_or_addr().unwrap().provenance.alloc_id)
this.alloc_mark_immutable(mplace.ptr.into_pointer_or_addr().unwrap().provenance.alloc_id)
.unwrap();
}
}
Expand Down
56 changes: 24 additions & 32 deletions src/intptrcast.rs
Original file line number Diff line number Diff line change
Expand Up @@ -9,10 +9,10 @@ use rustc_target::abi::{HasDataLayout, Size};

use crate::*;

pub type MemoryExtra = RefCell<GlobalState>;
pub type GlobalState = RefCell<GlobalStateInner>;

#[derive(Clone, Debug)]
pub struct GlobalState {
pub struct GlobalStateInner {
/// This is used as a map between the address of each allocation and its `AllocId`.
/// It is always sorted
int_to_ptr_map: Vec<(u64, AllocId)>,
Expand All @@ -29,9 +29,9 @@ pub struct GlobalState {
strict_provenance: bool,
}

impl GlobalState {
impl GlobalStateInner {
pub fn new(config: &MiriConfig) -> Self {
GlobalState {
GlobalStateInner {
int_to_ptr_map: Vec::default(),
base_addr: FxHashMap::default(),
next_base_addr: STACK_ADDR,
Expand All @@ -40,13 +40,10 @@ impl GlobalState {
}
}

impl<'mir, 'tcx> GlobalState {
pub fn ptr_from_addr(
addr: u64,
memory: &Memory<'mir, 'tcx, Evaluator<'mir, 'tcx>>,
) -> Pointer<Option<Tag>> {
impl<'mir, 'tcx> GlobalStateInner {
pub fn ptr_from_addr(addr: u64, ecx: &MiriEvalContext<'mir, 'tcx>) -> Pointer<Option<Tag>> {
trace!("Casting 0x{:x} to a pointer", addr);
let global_state = memory.extra.intptrcast.borrow();
let global_state = ecx.machine.intptrcast.borrow();

if global_state.strict_provenance {
return Pointer::new(None, Size::from_bytes(addr));
Expand All @@ -64,7 +61,11 @@ impl<'mir, 'tcx> GlobalState {
let offset = addr - glb;
// If the offset exceeds the size of the allocation, don't use this `alloc_id`.
if offset
<= memory.get_size_and_align(alloc_id, AllocCheck::MaybeDead).unwrap().0.bytes()
<= ecx
.get_alloc_size_and_align(alloc_id, AllocCheck::MaybeDead)
.unwrap()
.0
.bytes()
{
Some(alloc_id)
} else {
Expand All @@ -79,11 +80,8 @@ impl<'mir, 'tcx> GlobalState {
)
}

fn alloc_base_addr(
memory: &Memory<'mir, 'tcx, Evaluator<'mir, 'tcx>>,
alloc_id: AllocId,
) -> u64 {
let mut global_state = memory.extra.intptrcast.borrow_mut();
fn alloc_base_addr(ecx: &MiriEvalContext<'mir, 'tcx>, alloc_id: AllocId) -> u64 {
let mut global_state = ecx.machine.intptrcast.borrow_mut();
let global_state = &mut *global_state;

match global_state.base_addr.entry(alloc_id) {
Expand All @@ -92,12 +90,12 @@ impl<'mir, 'tcx> GlobalState {
// There is nothing wrong with a raw pointer being cast to an integer only after
// it became dangling. Hence `MaybeDead`.
let (size, align) =
memory.get_size_and_align(alloc_id, AllocCheck::MaybeDead).unwrap();
ecx.get_alloc_size_and_align(alloc_id, AllocCheck::MaybeDead).unwrap();

// This allocation does not have a base address yet, pick one.
// Leave some space to the previous allocation, to give it some chance to be less aligned.
let slack = {
let mut rng = memory.extra.rng.borrow_mut();
let mut rng = ecx.machine.rng.borrow_mut();
// This means that `(global_state.next_base_addr + slack) % 16` is uniformly distributed.
rng.gen_range(0..16)
};
Expand Down Expand Up @@ -129,27 +127,21 @@ impl<'mir, 'tcx> GlobalState {
}

/// Convert a relative (tcx) pointer to an absolute address.
pub fn rel_ptr_to_addr(
memory: &Memory<'mir, 'tcx, Evaluator<'mir, 'tcx>>,
ptr: Pointer<AllocId>,
) -> u64 {
pub fn rel_ptr_to_addr(ecx: &MiriEvalContext<'mir, 'tcx>, ptr: Pointer<AllocId>) -> u64 {
let (alloc_id, offset) = ptr.into_parts(); // offset is relative
let base_addr = GlobalState::alloc_base_addr(memory, alloc_id);
let base_addr = GlobalStateInner::alloc_base_addr(ecx, alloc_id);

// Add offset with the right kind of pointer-overflowing arithmetic.
let dl = memory.data_layout();
let dl = ecx.data_layout();
dl.overflowing_offset(base_addr, offset.bytes()).0
}

pub fn abs_ptr_to_rel(
memory: &Memory<'mir, 'tcx, Evaluator<'mir, 'tcx>>,
ptr: Pointer<Tag>,
) -> Size {
pub fn abs_ptr_to_rel(ecx: &MiriEvalContext<'mir, 'tcx>, ptr: Pointer<Tag>) -> Size {
let (tag, addr) = ptr.into_parts(); // addr is absolute
let base_addr = GlobalState::alloc_base_addr(memory, tag.alloc_id);
let base_addr = GlobalStateInner::alloc_base_addr(ecx, tag.alloc_id);

// Wrapping "addr - base_addr"
let dl = memory.data_layout();
let dl = ecx.data_layout();
let neg_base_addr = (base_addr as i64).wrapping_neg();
Size::from_bytes(dl.overflowing_signed_offset(addr.bytes(), neg_base_addr).0)
}
Expand All @@ -170,7 +162,7 @@ mod tests {

#[test]
fn test_align_addr() {
assert_eq!(GlobalState::align_addr(37, 4), 40);
assert_eq!(GlobalState::align_addr(44, 4), 44);
assert_eq!(GlobalStateInner::align_addr(37, 4), 40);
assert_eq!(GlobalStateInner::align_addr(44, 4), 44);
}
}
4 changes: 2 additions & 2 deletions src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -66,8 +66,8 @@ pub use crate::eval::{
};
pub use crate::helpers::EvalContextExt as HelpersEvalContextExt;
pub use crate::machine::{
AllocExtra, Evaluator, FrameData, MemoryExtra, MiriEvalContext, MiriEvalContextExt,
MiriMemoryKind, Tag, NUM_CPUS, PAGE_SIZE, STACK_ADDR, STACK_SIZE,
AllocExtra, Evaluator, FrameData, MiriEvalContext, MiriEvalContextExt, MiriMemoryKind, Tag,
NUM_CPUS, PAGE_SIZE, STACK_ADDR, STACK_SIZE,
};
pub use crate::mono_hash_map::MonoHashMap;
pub use crate::operator::EvalContextExt as OperatorEvalContextExt;
Expand Down
Loading