From db025c14eccb435ffb6bc5d4f834b4551589447b Mon Sep 17 00:00:00 2001 From: Dylan MacKenzie Date: Thu, 21 Jun 2018 17:31:09 -0700 Subject: [PATCH 01/14] Refactor EvalContext stack and heap into inner struct Change surrounding code to use accessor methods to refer to these fields. Similar changes have not yet been made in tools/miri --- src/librustc_mir/interpret/cast.rs | 8 +- src/librustc_mir/interpret/const_eval.rs | 10 +- src/librustc_mir/interpret/eval_context.rs | 138 ++++++++++--------- src/librustc_mir/interpret/memory.rs | 4 +- src/librustc_mir/interpret/place.rs | 4 +- src/librustc_mir/interpret/step.rs | 10 +- src/librustc_mir/interpret/terminator/mod.rs | 8 +- src/librustc_mir/interpret/traits.rs | 26 ++-- 8 files changed, 110 insertions(+), 98 deletions(-) diff --git a/src/librustc_mir/interpret/cast.rs b/src/librustc_mir/interpret/cast.rs index e69e7a522ab73..4bf4009115353 100644 --- a/src/librustc_mir/interpret/cast.rs +++ b/src/librustc_mir/interpret/cast.rs @@ -76,8 +76,8 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { // No alignment check needed for raw pointers. But we have to truncate to target ptr size. TyRawPtr(_) => { Ok(Scalar::Bits { - bits: self.memory.truncate_to_ptr(v).0 as u128, - defined: self.memory.pointer_size().bits() as u8, + bits: self.memory().truncate_to_ptr(v).0 as u128, + defined: self.memory().pointer_size().bits() as u8, }) }, @@ -92,7 +92,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { match dest_ty.sty { // float -> uint TyUint(t) => { - let width = t.bit_width().unwrap_or(self.memory.pointer_size().bits() as usize); + let width = t.bit_width().unwrap_or(self.memory().pointer_size().bits() as usize); match fty { FloatTy::F32 => Ok(Scalar::Bits { bits: Single::from_bits(bits).to_u128(width).value, @@ -106,7 +106,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { }, // float -> int TyInt(t) => { - let width = t.bit_width().unwrap_or(self.memory.pointer_size().bits() as usize); + let width = t.bit_width().unwrap_or(self.memory().pointer_size().bits() as usize); match fty { FloatTy::F32 => Ok(Scalar::Bits { bits: Single::from_bits(bits).to_i128(width).value as u128, diff --git a/src/librustc_mir/interpret/const_eval.rs b/src/librustc_mir/interpret/const_eval.rs index ea09bab5d1411..fe2c46aa48fd8 100644 --- a/src/librustc_mir/interpret/const_eval.rs +++ b/src/librustc_mir/interpret/const_eval.rs @@ -88,7 +88,7 @@ pub fn value_to_const_value<'tcx>( Value::ScalarPair(a, b) => Ok(ConstValue::ScalarPair(a, b)), Value::ByRef(ptr, align) => { let ptr = ptr.to_ptr().unwrap(); - let alloc = ecx.memory.get(ptr.alloc_id)?; + let alloc = ecx.memory().get(ptr.alloc_id)?; assert!(alloc.align.abi() >= align.abi()); assert!(alloc.bytes.len() as u64 - ptr.offset.bytes() >= layout.size.bytes()); let mut alloc = alloc.clone(); @@ -149,7 +149,7 @@ fn eval_body_using_ecx<'a, 'mir, 'tcx>( } let layout = ecx.layout_of(mir.return_ty().subst(tcx, cid.instance.substs))?; assert!(!layout.is_unsized()); - let ptr = ecx.memory.allocate( + let ptr = ecx.memory_mut().allocate( layout.size, layout.align, MemoryKind::Stack, @@ -486,7 +486,7 @@ pub fn const_variant_index<'a, 'tcx>( let (ptr, align) = match value { Value::ScalarPair(..) | Value::Scalar(_) => { let layout = ecx.layout_of(val.ty)?; - let ptr = ecx.memory.allocate(layout.size, layout.align, MemoryKind::Stack)?.into(); + let ptr = ecx.memory_mut().allocate(layout.size, layout.align, MemoryKind::Stack)?.into(); ecx.write_value_to_ptr(value, ptr, layout.align, val.ty)?; (ptr, layout.align) }, @@ -515,9 +515,9 @@ pub fn const_value_to_allocation_provider<'a, 'tcx>( ()); let value = ecx.const_to_value(val.val)?; let layout = ecx.layout_of(val.ty)?; - let ptr = ecx.memory.allocate(layout.size, layout.align, MemoryKind::Stack)?; + let ptr = ecx.memory_mut().allocate(layout.size, layout.align, MemoryKind::Stack)?; ecx.write_value_to_ptr(value, ptr.into(), layout.align, val.ty)?; - let alloc = ecx.memory.get(ptr.alloc_id)?; + let alloc = ecx.memory().get(ptr.alloc_id)?; Ok(tcx.intern_const_alloc(alloc.clone())) }; result().expect("unable to convert ConstValue to Allocation") diff --git a/src/librustc_mir/interpret/eval_context.rs b/src/librustc_mir/interpret/eval_context.rs index 031c75013a27b..fe95b9319f320 100644 --- a/src/librustc_mir/interpret/eval_context.rs +++ b/src/librustc_mir/interpret/eval_context.rs @@ -32,11 +32,8 @@ pub struct EvalContext<'a, 'mir, 'tcx: 'a + 'mir, M: Machine<'mir, 'tcx>> { /// Bounds in scope for polymorphic evaluations. pub param_env: ty::ParamEnv<'tcx>, - /// The virtual memory system. - pub memory: Memory<'a, 'mir, 'tcx, M>, - - /// The virtual call stack. - pub(crate) stack: Vec>, + /// Virtual memory and call stack. + state: EvalState<'a, 'mir, 'tcx, M>, /// The maximum number of stack frames allowed pub(crate) stack_limit: usize, @@ -47,6 +44,14 @@ pub struct EvalContext<'a, 'mir, 'tcx: 'a + 'mir, M: Machine<'mir, 'tcx>> { pub(crate) terminators_remaining: usize, } +struct EvalState<'a, 'mir, 'tcx: 'a + 'mir, M: Machine<'mir, 'tcx>> { + /// The virtual memory system. + memory: Memory<'a, 'mir, 'tcx, M>, + + /// The virtual call stack. + stack: Vec>, +} + /// A stack frame. pub struct Frame<'mir, 'tcx: 'mir> { //////////////////////////////////////////////////////////////////////////////// @@ -186,18 +191,20 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M machine, tcx, param_env, - memory: Memory::new(tcx, memory_data), - stack: Vec::new(), + state: EvalState { + memory: Memory::new(tcx, memory_data), + stack: Vec::new(), + }, stack_limit: tcx.sess.const_eval_stack_frame_limit, terminators_remaining: MAX_TERMINATORS, } } pub(crate) fn with_fresh_body R, R>(&mut self, f: F) -> R { - let stack = mem::replace(&mut self.stack, Vec::new()); + let stack = mem::replace(self.stack_mut(), Vec::new()); let terminators_remaining = mem::replace(&mut self.terminators_remaining, MAX_TERMINATORS); let r = f(self); - self.stack = stack; + *self.stack_mut() = stack; self.terminators_remaining = terminators_remaining; r } @@ -206,29 +213,34 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M let layout = self.layout_of(ty)?; assert!(!layout.is_unsized(), "cannot alloc memory for unsized type"); - self.memory.allocate(layout.size, layout.align, MemoryKind::Stack) + self.memory_mut().allocate(layout.size, layout.align, MemoryKind::Stack) } pub fn memory(&self) -> &Memory<'a, 'mir, 'tcx, M> { - &self.memory + &self.state.memory } pub fn memory_mut(&mut self) -> &mut Memory<'a, 'mir, 'tcx, M> { - &mut self.memory + &mut self.state.memory } + #[inline] pub fn stack(&self) -> &[Frame<'mir, 'tcx>] { - &self.stack + &self.state.stack + } + + pub fn stack_mut(&mut self) -> &mut Vec> { + &mut self.state.stack } #[inline] pub fn cur_frame(&self) -> usize { - assert!(self.stack.len() > 0); - self.stack.len() - 1 + assert!(self.stack().len() > 0); + self.stack().len() - 1 } pub fn str_to_value(&mut self, s: &str) -> EvalResult<'tcx, Value> { - let ptr = self.memory.allocate_bytes(s.as_bytes()); + let ptr = self.memory_mut().allocate_bytes(s.as_bytes()); Ok(Scalar::Ptr(ptr).to_value_with_len(s.len() as u64, self.tcx.tcx)) } @@ -246,7 +258,7 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M } ConstValue::ByRef(alloc, offset) => { // FIXME: Allocate new AllocId for all constants inside - let id = self.memory.allocate_value(alloc.clone(), MemoryKind::Stack)?; + let id = self.memory_mut().allocate_value(alloc.clone(), MemoryKind::Stack)?; Ok(Value::ByRef(Pointer::new(id, offset).into(), alloc.align)) }, ConstValue::ScalarPair(a, b) => Ok(Value::ScalarPair(a, b)), @@ -417,7 +429,7 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M IndexVec::new() }; - self.stack.push(Frame { + self.stack_mut().push(Frame { mir, block: mir::START_BLOCK, return_to_block, @@ -428,9 +440,9 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M stmt: 0, }); - self.memory.cur_frame = self.cur_frame(); + self.memory_mut().cur_frame = self.cur_frame(); - if self.stack.len() > self.stack_limit { + if self.stack().len() > self.stack_limit { err!(StackFrameLimitReached) } else { Ok(()) @@ -440,18 +452,18 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M pub(super) fn pop_stack_frame(&mut self) -> EvalResult<'tcx> { ::log_settings::settings().indentation -= 1; M::end_region(self, None)?; - let frame = self.stack.pop().expect( + let frame = self.stack_mut().pop().expect( "tried to pop a stack frame, but there were none", ); - if !self.stack.is_empty() { + if !self.stack().is_empty() { // TODO: Is this the correct time to start considering these accesses as originating from the returned-to stack frame? - self.memory.cur_frame = self.cur_frame(); + self.memory_mut().cur_frame = self.cur_frame(); } match frame.return_to_block { StackPopCleanup::MarkStatic(mutable) => { if let Place::Ptr { ptr, .. } = frame.return_place { // FIXME: to_ptr()? might be too extreme here, static zsts might reach this under certain conditions - self.memory.mark_static_initialized( + self.memory_mut().mark_static_initialized( ptr.to_ptr()?.alloc_id, mutable, )? @@ -474,8 +486,8 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M if let Some(Value::ByRef(ptr, _align)) = local { trace!("deallocating local"); let ptr = ptr.to_ptr()?; - self.memory.dump_alloc(ptr.alloc_id); - self.memory.deallocate_local(ptr)?; + self.memory().dump_alloc(ptr.alloc_id); + self.memory_mut().deallocate_local(ptr)?; }; Ok(()) } @@ -595,7 +607,7 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M let src = self.eval_place(place)?; let ty = self.place_ty(place); let (_, len) = src.elem_ty_and_len(ty, self.tcx.tcx); - let defined = self.memory.pointer_size().bits() as u8; + let defined = self.memory().pointer_size().bits() as u8; self.write_scalar( dest, Scalar::Bits { @@ -637,7 +649,7 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M let layout = self.layout_of(ty)?; assert!(!layout.is_unsized(), "SizeOf nullary MIR operator called for unsized type"); - let defined = self.memory.pointer_size().bits() as u8; + let defined = self.memory().pointer_size().bits() as u8; self.write_scalar( dest, Scalar::Bits { @@ -732,7 +744,7 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M def_id, substs, ).ok_or_else(|| EvalErrorKind::TooGeneric.into()); - let fn_ptr = self.memory.create_fn_alloc(instance?); + let fn_ptr = self.memory_mut().create_fn_alloc(instance?); let valty = ValTy { value: Value::Scalar(fn_ptr.into()), ty: dest_ty, @@ -768,7 +780,7 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M substs, ty::ClosureKind::FnOnce, ); - let fn_ptr = self.memory.create_fn_alloc(instance); + let fn_ptr = self.memory_mut().create_fn_alloc(instance); let valty = ValTy { value: Value::Scalar(fn_ptr.into()), ty: dest_ty, @@ -1045,7 +1057,7 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M pub fn force_allocation(&mut self, place: Place) -> EvalResult<'tcx, Place> { let new_place = match place { Place::Local { frame, local } => { - match self.stack[frame].locals[local] { + match self.stack()[frame].locals[local] { None => return err!(DeadLocal), Some(Value::ByRef(ptr, align)) => { Place::Ptr { @@ -1055,11 +1067,11 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M } } Some(val) => { - let ty = self.stack[frame].mir.local_decls[local].ty; - let ty = self.monomorphize(ty, self.stack[frame].instance.substs); + let ty = self.stack()[frame].mir.local_decls[local].ty; + let ty = self.monomorphize(ty, self.stack()[frame].instance.substs); let layout = self.layout_of(ty)?; let ptr = self.alloc_ptr(ty)?; - self.stack[frame].locals[local] = + self.stack_mut()[frame].locals[local] = Some(Value::ByRef(ptr.into(), layout.align)); // it stays live let place = Place::from_ptr(ptr, layout.align); self.write_value(ValTy { value: val, ty }, place)?; @@ -1141,10 +1153,10 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M } Place::Local { frame, local } => { - let dest = self.stack[frame].get_local(local)?; + let dest = self.stack()[frame].get_local(local)?; self.write_value_possibly_by_val( src_val, - |this, val| this.stack[frame].set_local(local, val), + |this, val| this.stack_mut()[frame].set_local(local, val), dest, dest_ty, ) @@ -1186,7 +1198,7 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M } else { let dest_ptr = self.alloc_ptr(dest_ty)?.into(); let layout = self.layout_of(dest_ty)?; - self.memory.copy(src_ptr, align.min(layout.align), dest_ptr, layout.align, layout.size, false)?; + self.memory_mut().copy(src_ptr, align.min(layout.align), dest_ptr, layout.align, layout.size, false)?; write_dest(self, Value::ByRef(dest_ptr, layout.align))?; } } else { @@ -1208,7 +1220,7 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M trace!("write_value_to_ptr: {:#?}, {}, {:#?}", value, dest_ty, layout); match value { Value::ByRef(ptr, align) => { - self.memory.copy(ptr, align.min(layout.align), dest, dest_align.min(layout.align), layout.size, false) + self.memory_mut().copy(ptr, align.min(layout.align), dest, dest_align.min(layout.align), layout.size, false) } Value::Scalar(scalar) => { let signed = match layout.abi { @@ -1221,7 +1233,7 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M _ => bug!("write_value_to_ptr: invalid ByVal layout: {:#?}", layout), } }; - self.memory.write_scalar(dest, dest_align, scalar, layout.size, signed) + self.memory_mut().write_scalar(dest, dest_align, scalar, layout.size, signed) } Value::ScalarPair(a_val, b_val) => { trace!("write_value_to_ptr valpair: {:#?}", layout); @@ -1234,8 +1246,8 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M let b_offset = a_size.abi_align(b.align(&self)); let b_ptr = dest.ptr_offset(b_offset, &self)?.into(); // TODO: What about signedess? - self.memory.write_scalar(a_ptr, dest_align, a_val, a_size, false)?; - self.memory.write_scalar(b_ptr, dest_align, b_val, b_size, false) + self.memory_mut().write_scalar(a_ptr, dest_align, a_val, a_size, false)?; + self.memory_mut().write_scalar(b_ptr, dest_align, b_val, b_size, false) } } } @@ -1266,8 +1278,8 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M ptr_align: Align, pointee_ty: Ty<'tcx>, ) -> EvalResult<'tcx, Value> { - let ptr_size = self.memory.pointer_size(); - let p: Scalar = self.memory.read_ptr_sized(ptr, ptr_align)?.into(); + let ptr_size = self.memory().pointer_size(); + let p: Scalar = self.memory().read_ptr_sized(ptr, ptr_align)?.into(); if self.type_is_sized(pointee_ty) { Ok(p.to_value()) } else { @@ -1275,11 +1287,11 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M let extra = ptr.offset(ptr_size, self)?; match self.tcx.struct_tail(pointee_ty).sty { ty::TyDynamic(..) => Ok(p.to_value_with_vtable( - self.memory.read_ptr_sized(extra, ptr_align)?.to_ptr()?, + self.memory().read_ptr_sized(extra, ptr_align)?.to_ptr()?, )), ty::TySlice(..) | ty::TyStr => { let len = self - .memory + .memory() .read_ptr_sized(extra, ptr_align)? .to_bits(ptr_size)?; Ok(p.to_value_with_len(len as u64, self.tcx.tcx)) @@ -1297,10 +1309,10 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M ) -> EvalResult<'tcx> { match ty.sty { ty::TyBool => { - self.memory.read_scalar(ptr, ptr_align, Size::from_bytes(1))?.to_bool()?; + self.memory().read_scalar(ptr, ptr_align, Size::from_bytes(1))?.to_bool()?; } ty::TyChar => { - let c = self.memory.read_scalar(ptr, ptr_align, Size::from_bytes(4))?.to_bits(Size::from_bytes(4))? as u32; + let c = self.memory().read_scalar(ptr, ptr_align, Size::from_bytes(4))?.to_bits(Size::from_bytes(4))? as u32; match ::std::char::from_u32(c) { Some(..) => (), None => return err!(InvalidChar(c as u128)), @@ -1308,7 +1320,7 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M } ty::TyFnPtr(_) => { - self.memory.read_ptr_sized(ptr, ptr_align)?; + self.memory().read_ptr_sized(ptr, ptr_align)?; }, ty::TyRef(_, rty, _) | ty::TyRawPtr(ty::TypeAndMut { ty: rty, .. }) => { @@ -1323,7 +1335,7 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M if let layout::Abi::Scalar(ref scalar) = self.layout_of(ty)?.abi { let size = scalar.value.size(self); - self.memory.read_scalar(ptr, ptr_align, size)?; + self.memory().read_scalar(ptr, ptr_align, size)?; } } @@ -1344,7 +1356,7 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M pub fn try_read_value(&self, ptr: Scalar, ptr_align: Align, ty: Ty<'tcx>) -> EvalResult<'tcx, Option> { let layout = self.layout_of(ty)?; - self.memory.check_align(ptr, ptr_align)?; + self.memory().check_align(ptr, ptr_align)?; if layout.size.bytes() == 0 { return Ok(Some(Value::Scalar(Scalar::undef()))); @@ -1357,7 +1369,7 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M match layout.abi { layout::Abi::Scalar(..) => { - let scalar = self.memory.read_scalar(ptr, ptr_align, layout.size)?; + let scalar = self.memory().read_scalar(ptr, ptr_align, layout.size)?; Ok(Some(Value::Scalar(scalar))) } layout::Abi::ScalarPair(ref a, ref b) => { @@ -1366,8 +1378,8 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M let a_ptr = ptr; let b_offset = a_size.abi_align(b.align(self)); let b_ptr = ptr.offset(b_offset, self)?.into(); - let a_val = self.memory.read_scalar(a_ptr, ptr_align, a_size)?; - let b_val = self.memory.read_scalar(b_ptr, ptr_align, b_size)?; + let a_val = self.memory().read_scalar(a_ptr, ptr_align, a_size)?; + let b_val = self.memory().read_scalar(b_ptr, ptr_align, b_size)?; Ok(Some(Value::ScalarPair(a_val, b_val))) } _ => Ok(None), @@ -1375,11 +1387,11 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M } pub fn frame(&self) -> &Frame<'mir, 'tcx> { - self.stack.last().expect("no call frames exist") + self.stack().last().expect("no call frames exist") } pub fn frame_mut(&mut self) -> &mut Frame<'mir, 'tcx> { - self.stack.last_mut().expect("no call frames exist") + self.stack_mut().last_mut().expect("no call frames exist") } pub(super) fn mir(&self) -> &'mir mir::Mir<'tcx> { @@ -1387,7 +1399,7 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M } pub fn substs(&self) -> &'tcx Substs<'tcx> { - if let Some(frame) = self.stack.last() { + if let Some(frame) = self.stack().last() { frame.instance.substs } else { Substs::empty() @@ -1533,7 +1545,7 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M } write!(msg, ":").unwrap(); - match self.stack[frame].get_local(local) { + match self.stack()[frame].get_local(local) { Err(err) => { if let EvalErrorKind::DeadLocal = err.kind { write!(msg, " is dead").unwrap(); @@ -1568,13 +1580,13 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M } trace!("{}", msg); - self.memory.dump_allocs(allocs); + self.memory().dump_allocs(allocs); } Place::Ptr { ptr, align, .. } => { match ptr { Scalar::Ptr(ptr) => { trace!("by align({}) ref:", align.abi()); - self.memory.dump_alloc(ptr.alloc_id); + self.memory().dump_alloc(ptr.alloc_id); } ptr => trace!(" integral by ref: {:?}", ptr), } @@ -1587,12 +1599,12 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M where F: FnOnce(&mut Self, Value) -> EvalResult<'tcx, Value>, { - let val = self.stack[frame].get_local(local)?; + let val = self.stack()[frame].get_local(local)?; let new_val = f(self, val)?; - self.stack[frame].set_local(local, new_val)?; + self.stack_mut()[frame].set_local(local, new_val)?; // FIXME(solson): Run this when setting to Undef? (See previous version of this code.) - // if let Value::ByRef(ptr) = self.stack[frame].get_local(local) { - // self.memory.deallocate(ptr)?; + // if let Value::ByRef(ptr) = self.stack()[frame].get_local(local) { + // self.memory().deallocate(ptr)?; // } Ok(()) } diff --git a/src/librustc_mir/interpret/memory.rs b/src/librustc_mir/interpret/memory.rs index 5cf734cce8a3f..4d1188b0f152b 100644 --- a/src/librustc_mir/interpret/memory.rs +++ b/src/librustc_mir/interpret/memory.rs @@ -991,12 +991,12 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> HasMemory<'a, 'mir, 'tcx, M> for Me impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> HasMemory<'a, 'mir, 'tcx, M> for EvalContext<'a, 'mir, 'tcx, M> { #[inline] fn memory_mut(&mut self) -> &mut Memory<'a, 'mir, 'tcx, M> { - &mut self.memory + self.memory_mut() } #[inline] fn memory(&self) -> &Memory<'a, 'mir, 'tcx, M> { - &self.memory + self.memory() } } diff --git a/src/librustc_mir/interpret/place.rs b/src/librustc_mir/interpret/place.rs index 51b33fa54b249..95391bc8963e9 100644 --- a/src/librustc_mir/interpret/place.rs +++ b/src/librustc_mir/interpret/place.rs @@ -201,7 +201,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { assert_eq!(extra, PlaceExtra::None); Ok(Value::ByRef(ptr, align)) } - Place::Local { frame, local } => self.stack[frame].get_local(local), + Place::Local { frame, local } => self.stack()[frame].get_local(local), } } @@ -261,7 +261,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { let (base_ptr, base_align, base_extra) = match base { Place::Ptr { ptr, align, extra } => (ptr, align, extra), Place::Local { frame, local } => { - match (&self.stack[frame].get_local(local)?, &base_layout.abi) { + match (&self.stack()[frame].get_local(local)?, &base_layout.abi) { // in case the field covers the entire type, just return the value (&Value::Scalar(_), &layout::Abi::Scalar(_)) | (&Value::ScalarPair(..), &layout::Abi::ScalarPair(..)) diff --git a/src/librustc_mir/interpret/step.rs b/src/librustc_mir/interpret/step.rs index ab15278219f40..86eb02312bd19 100644 --- a/src/librustc_mir/interpret/step.rs +++ b/src/librustc_mir/interpret/step.rs @@ -19,7 +19,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { /// Returns true as long as there are more things to do. pub fn step(&mut self) -> EvalResult<'tcx, bool> { - if self.stack.is_empty() { + if self.stack().is_empty() { return Ok(false); } @@ -53,7 +53,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { // *before* executing the statement. let frame_idx = self.cur_frame(); self.tcx.span = stmt.source_info.span; - self.memory.tcx.span = stmt.source_info.span; + self.memory_mut().tcx.span = stmt.source_info.span; match stmt.kind { Assign(ref place, ref rvalue) => self.eval_rvalue_into_place(rvalue, place)?, @@ -102,16 +102,16 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { InlineAsm { .. } => return err!(InlineAsm), } - self.stack[frame_idx].stmt += 1; + self.stack_mut()[frame_idx].stmt += 1; Ok(()) } fn terminator(&mut self, terminator: &mir::Terminator<'tcx>) -> EvalResult<'tcx> { trace!("{:?}", terminator.kind); self.tcx.span = terminator.source_info.span; - self.memory.tcx.span = terminator.source_info.span; + self.memory_mut().tcx.span = terminator.source_info.span; self.eval_terminator(terminator)?; - if !self.stack.is_empty() { + if !self.stack().is_empty() { trace!("// {:?}", self.frame().block); } Ok(()) diff --git a/src/librustc_mir/interpret/terminator/mod.rs b/src/librustc_mir/interpret/terminator/mod.rs index 2994b1b387f3f..61670769d01ad 100644 --- a/src/librustc_mir/interpret/terminator/mod.rs +++ b/src/librustc_mir/interpret/terminator/mod.rs @@ -71,7 +71,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { let (fn_def, sig) = match func.ty.sty { ty::TyFnPtr(sig) => { let fn_ptr = self.value_to_scalar(func)?.to_ptr()?; - let instance = self.memory.get_fn(fn_ptr)?; + let instance = self.memory().get_fn(fn_ptr)?; let instance_ty = instance.ty(*self.tcx); match instance_ty.sty { ty::TyFnDef(..) => { @@ -377,14 +377,14 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { } // cannot use the shim here, because that will only result in infinite recursion ty::InstanceDef::Virtual(_, idx) => { - let ptr_size = self.memory.pointer_size(); + let ptr_size = self.memory().pointer_size(); let ptr_align = self.tcx.data_layout.pointer_align; let (ptr, vtable) = self.into_ptr_vtable_pair(args[0].value)?; - let fn_ptr = self.memory.read_ptr_sized( + let fn_ptr = self.memory().read_ptr_sized( vtable.offset(ptr_size * (idx as u64 + 3), &self)?, ptr_align )?.to_ptr()?; - let instance = self.memory.get_fn(fn_ptr)?; + let instance = self.memory().get_fn(fn_ptr)?; let mut args = args.to_vec(); let ty = self.layout_of(args[0].ty)?.field(&self, 0)?.ty; args[0].ty = ty; diff --git a/src/librustc_mir/interpret/traits.rs b/src/librustc_mir/interpret/traits.rs index b6c7feda19fa8..2a9129498ad52 100644 --- a/src/librustc_mir/interpret/traits.rs +++ b/src/librustc_mir/interpret/traits.rs @@ -25,26 +25,26 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { let size = layout.size.bytes(); let align = layout.align.abi(); - let ptr_size = self.memory.pointer_size(); + let ptr_size = self.memory().pointer_size(); let ptr_align = self.tcx.data_layout.pointer_align; let methods = self.tcx.vtable_methods(trait_ref); - let vtable = self.memory.allocate( + let vtable = self.memory_mut().allocate( ptr_size * (3 + methods.len() as u64), ptr_align, MemoryKind::Stack, )?; let drop = ::monomorphize::resolve_drop_in_place(*self.tcx, ty); - let drop = self.memory.create_fn_alloc(drop); - self.memory.write_ptr_sized_unsigned(vtable, ptr_align, drop.into())?; + let drop = self.memory_mut().create_fn_alloc(drop); + self.memory_mut().write_ptr_sized_unsigned(vtable, ptr_align, drop.into())?; let size_ptr = vtable.offset(ptr_size, &self)?; - self.memory.write_ptr_sized_unsigned(size_ptr, ptr_align, Scalar::Bits { + self.memory_mut().write_ptr_sized_unsigned(size_ptr, ptr_align, Scalar::Bits { bits: size as u128, defined: ptr_size.bits() as u8, })?; let align_ptr = vtable.offset(ptr_size * 2, &self)?; - self.memory.write_ptr_sized_unsigned(align_ptr, ptr_align, Scalar::Bits { + self.memory_mut().write_ptr_sized_unsigned(align_ptr, ptr_align, Scalar::Bits { bits: align as u128, defined: ptr_size.bits() as u8, })?; @@ -52,13 +52,13 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { for (i, method) in methods.iter().enumerate() { if let Some((def_id, substs)) = *method { let instance = self.resolve(def_id, substs)?; - let fn_ptr = self.memory.create_fn_alloc(instance); + let fn_ptr = self.memory_mut().create_fn_alloc(instance); let method_ptr = vtable.offset(ptr_size * (3 + i as u64), &self)?; - self.memory.write_ptr_sized_unsigned(method_ptr, ptr_align, fn_ptr.into())?; + self.memory_mut().write_ptr_sized_unsigned(method_ptr, ptr_align, fn_ptr.into())?; } } - self.memory.mark_static_initialized( + self.memory_mut().mark_static_initialized( vtable.alloc_id, Mutability::Immutable, )?; @@ -76,7 +76,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { match self.read_ptr(vtable, pointer_align, self.tcx.mk_nil_ptr())? { // some values don't need to call a drop impl, so the value is null Value::Scalar(Scalar::Bits { bits: 0, defined} ) if defined == pointer_size => Ok(None), - Value::Scalar(Scalar::Ptr(drop_fn)) => self.memory.get_fn(drop_fn).map(Some), + Value::Scalar(Scalar::Ptr(drop_fn)) => self.memory().get_fn(drop_fn).map(Some), _ => err!(ReadBytesAsPointer), } } @@ -85,10 +85,10 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { &self, vtable: Pointer, ) -> EvalResult<'tcx, (Size, Align)> { - let pointer_size = self.memory.pointer_size(); + let pointer_size = self.memory().pointer_size(); let pointer_align = self.tcx.data_layout.pointer_align; - let size = self.memory.read_ptr_sized(vtable.offset(pointer_size, self)?, pointer_align)?.to_bits(pointer_size)? as u64; - let align = self.memory.read_ptr_sized( + let size = self.memory().read_ptr_sized(vtable.offset(pointer_size, self)?, pointer_align)?.to_bits(pointer_size)? as u64; + let align = self.memory().read_ptr_sized( vtable.offset(pointer_size * 2, self)?, pointer_align )?.to_bits(pointer_size)? as u64; From 6c0f502fe6bb54b285d66850ca8171e0c98091b0 Mon Sep 17 00:00:00 2001 From: Dylan MacKenzie Date: Thu, 21 Jun 2018 21:40:14 -0700 Subject: [PATCH 02/14] Implement Clone, Eq and Hash for the heap and stack I use a pattern binding in each custom impl, so that adding fields to `Memory` or `Frame` will cause a compiler error instead of causing e.g. `PartialEq` to become invalid. This may be too cute. This adds several requirements to `Machine::MemoryData`. These can be removed if we don't want this associated type to be part of the equality of `Memory`. --- src/librustc_mir/interpret/eval_context.rs | 88 +++++++++++++++++++++- src/librustc_mir/interpret/machine.rs | 4 +- src/librustc_mir/interpret/memory.rs | 82 +++++++++++++++++++- src/librustc_mir/interpret/place.rs | 4 +- 4 files changed, 170 insertions(+), 8 deletions(-) diff --git a/src/librustc_mir/interpret/eval_context.rs b/src/librustc_mir/interpret/eval_context.rs index fe95b9319f320..50987af409f40 100644 --- a/src/librustc_mir/interpret/eval_context.rs +++ b/src/librustc_mir/interpret/eval_context.rs @@ -1,5 +1,6 @@ use std::fmt::Write; -use std::mem; +use std::hash::{Hash, Hasher}; +use std::{mem, ptr}; use rustc::hir::def_id::DefId; use rustc::hir::def::Def; @@ -44,7 +45,7 @@ pub struct EvalContext<'a, 'mir, 'tcx: 'a + 'mir, M: Machine<'mir, 'tcx>> { pub(crate) terminators_remaining: usize, } -struct EvalState<'a, 'mir, 'tcx: 'a + 'mir, M: Machine<'mir, 'tcx>> { +pub(crate) struct EvalState<'a, 'mir, 'tcx: 'a + 'mir, M: Machine<'mir, 'tcx>> { /// The virtual memory system. memory: Memory<'a, 'mir, 'tcx, M>, @@ -52,7 +53,44 @@ struct EvalState<'a, 'mir, 'tcx: 'a + 'mir, M: Machine<'mir, 'tcx>> { stack: Vec>, } +impl<'a, 'mir, 'tcx, M> Clone for EvalState<'a, 'mir, 'tcx, M> + where M: Machine<'mir, 'tcx>, + 'tcx: 'a + 'mir, +{ + fn clone(&self) -> Self { + EvalState { + memory: self.memory.clone(), + stack: self.stack.clone(), + } + } +} + +impl<'a, 'mir, 'tcx, M> Eq for EvalState<'a, 'mir, 'tcx, M> + where M: Machine<'mir, 'tcx>, + 'tcx: 'a + 'mir, +{} + +impl<'a, 'mir, 'tcx, M> PartialEq for EvalState<'a, 'mir, 'tcx, M> + where M: Machine<'mir, 'tcx>, + 'tcx: 'a + 'mir, +{ + fn eq(&self, other: &Self) -> bool { + self.memory == other.memory + && self.stack == other.stack + } +} + +impl<'a, 'mir, 'tcx, M> Hash for EvalState<'a, 'mir, 'tcx, M> + where M: Machine<'mir, 'tcx>, + 'tcx: 'a + 'mir, +{ + fn hash(&self, state: &mut H) { + self.memory.hash(state); + self.stack.hash(state); + } +} /// A stack frame. +#[derive(Clone)] pub struct Frame<'mir, 'tcx: 'mir> { //////////////////////////////////////////////////////////////////////////////// // Function and callsite information @@ -94,6 +132,52 @@ pub struct Frame<'mir, 'tcx: 'mir> { pub stmt: usize, } +impl<'mir, 'tcx: 'mir> Eq for Frame<'mir, 'tcx> {} + +impl<'mir, 'tcx: 'mir> PartialEq for Frame<'mir, 'tcx> { + fn eq(&self, other: &Self) -> bool { + let Frame { + mir, + instance: _, + span: _, + return_to_block, + return_place, + locals, + block, + stmt, + } = self; + + ptr::eq(mir, &other.mir) + && *return_to_block == other.return_to_block // TODO: Are these two necessary? + && *return_place == other.return_place + && *locals == other.locals + && *block == other.block + && *stmt == other.stmt + } +} + +impl<'mir, 'tcx: 'mir> Hash for Frame<'mir, 'tcx> { + fn hash(&self, state: &mut H) { + let Frame { + mir, + instance: _, + span: _, + return_to_block, + return_place, + locals, + block, + stmt, + } = self; + + (mir as *const _ as usize).hash(state); + return_to_block.hash(state); + return_place.hash(state); + locals.hash(state); + block.hash(state); + stmt.hash(state); + } +} + #[derive(Clone, Debug, Eq, PartialEq, Hash)] pub enum StackPopCleanup { /// The stackframe existed to compute the initial value of a static/constant, make sure it diff --git a/src/librustc_mir/interpret/machine.rs b/src/librustc_mir/interpret/machine.rs index 4d04900320fe9..f6491d7f1a469 100644 --- a/src/librustc_mir/interpret/machine.rs +++ b/src/librustc_mir/interpret/machine.rs @@ -2,6 +2,8 @@ //! This separation exists to ensure that no fancy miri features like //! interpreting common C functions leak into CTFE. +use std::hash::Hash; + use rustc::mir::interpret::{AllocId, EvalResult, Scalar, Pointer, AccessKind, GlobalId}; use super::{EvalContext, Place, ValTy, Memory}; @@ -15,7 +17,7 @@ use syntax::ast::Mutability; /// and some use case dependent behaviour can instead be applied pub trait Machine<'mir, 'tcx>: Sized { /// Additional data that can be accessed via the Memory - type MemoryData; + type MemoryData: Clone + Eq + Hash; /// Additional memory kinds a machine wishes to distinguish from the builtin ones type MemoryKinds: ::std::fmt::Debug + PartialEq + Copy + Clone; diff --git a/src/librustc_mir/interpret/memory.rs b/src/librustc_mir/interpret/memory.rs index 4d1188b0f152b..98ab361c239ee 100644 --- a/src/librustc_mir/interpret/memory.rs +++ b/src/librustc_mir/interpret/memory.rs @@ -1,4 +1,5 @@ use std::collections::VecDeque; +use std::hash::{Hash, Hasher}; use std::ptr; use rustc::hir::def_id::DefId; @@ -9,7 +10,7 @@ use rustc::ty::layout::{self, Align, TargetDataLayout, Size}; use rustc::mir::interpret::{Pointer, AllocId, Allocation, AccessKind, Value, EvalResult, Scalar, EvalErrorKind, GlobalId, AllocType}; pub use rustc::mir::interpret::{write_target_uint, write_target_int, read_target_uint}; -use rustc_data_structures::fx::{FxHashSet, FxHashMap}; +use rustc_data_structures::fx::{FxHashSet, FxHashMap, FxHasher}; use syntax::ast::Mutability; @@ -19,7 +20,7 @@ use super::{EvalContext, Machine}; // Allocations and pointers //////////////////////////////////////////////////////////////////////////////// -#[derive(Debug, PartialEq, Copy, Clone)] +#[derive(Debug, PartialEq, Eq, Copy, Clone)] pub enum MemoryKind { /// Error if deallocated except during a stack pop Stack, @@ -47,6 +48,81 @@ pub struct Memory<'a, 'mir, 'tcx: 'a + 'mir, M: Machine<'mir, 'tcx>> { pub tcx: TyCtxtAt<'a, 'tcx, 'tcx>, } +impl<'a, 'mir, 'tcx, M> Clone for Memory<'a, 'mir, 'tcx, M> + where M: Machine<'mir, 'tcx>, + 'tcx: 'a + 'mir, +{ + fn clone(&self) -> Self { + Memory { + data: self.data.clone(), + alloc_kind: self.alloc_kind.clone(), + alloc_map: self.alloc_map.clone(), + cur_frame: self.cur_frame.clone(), + tcx: self.tcx.clone(), + } + } +} + +impl<'a, 'mir, 'tcx, M> Eq for Memory<'a, 'mir, 'tcx, M> + where M: Machine<'mir, 'tcx>, + 'tcx: 'a + 'mir, +{} + +impl<'a, 'mir, 'tcx, M> PartialEq for Memory<'a, 'mir, 'tcx, M> + where M: Machine<'mir, 'tcx>, + 'tcx: 'a + 'mir, +{ + fn eq(&self, other: &Self) -> bool { + let Memory { + data, + alloc_kind, + alloc_map, + cur_frame, + tcx, + } = self; + + *data == other.data + && *alloc_kind == other.alloc_kind + && *alloc_map == other.alloc_map + && *cur_frame == other.cur_frame + && ptr::eq(tcx, &other.tcx) + } +} + +impl<'a, 'mir, 'tcx, M> Hash for Memory<'a, 'mir, 'tcx, M> + where M: Machine<'mir, 'tcx>, + 'tcx: 'a + 'mir, +{ + fn hash(&self, state: &mut H) { + let Memory { + data, + alloc_kind: _, + alloc_map: _, + cur_frame, + tcx, + } = self; + + data.hash(state); + cur_frame.hash(state); + (tcx as *const _ as usize).hash(state); + + // We ignore some fields which don't change between evaluation steps. + + // Since HashMaps which contain the same items may have different + // iteration orders, we use a commutative operation (in this case + // addition, but XOR would also work), to combine the hash of each + // `Allocation`. + self.allocations() + .map(|allocs| { + let mut h = FxHasher::default(); + allocs.hash(&mut h); + h.finish() + }) + .fold(0u64, |hash, x| hash.wrapping_add(x)) + .hash(state); + } +} + impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> { pub fn new(tcx: TyCtxtAt<'a, 'tcx, 'tcx>, data: M::MemoryData) -> Self { Memory { @@ -866,7 +942,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> { for i in 0..size.bytes() { let defined = undef_mask.get(src.offset + Size::from_bytes(i)); - + for j in 0..repeat { dest_allocation.undef_mask.set( dest.offset + Size::from_bytes(i + (size.bytes() * j)), diff --git a/src/librustc_mir/interpret/place.rs b/src/librustc_mir/interpret/place.rs index 95391bc8963e9..bde2e5945f5d4 100644 --- a/src/librustc_mir/interpret/place.rs +++ b/src/librustc_mir/interpret/place.rs @@ -7,7 +7,7 @@ use rustc::mir::interpret::{GlobalId, Value, Scalar, EvalResult, Pointer}; use super::{EvalContext, Machine, ValTy}; use interpret::memory::HasMemory; -#[derive(Copy, Clone, Debug)] +#[derive(Copy, Clone, Debug, Hash, PartialEq, Eq)] pub enum Place { /// A place referring to a value allocated in the `Memory` system. Ptr { @@ -24,7 +24,7 @@ pub enum Place { Local { frame: usize, local: mir::Local }, } -#[derive(Copy, Clone, Debug, Eq, PartialEq)] +#[derive(Copy, Clone, Debug, Hash, PartialEq, Eq)] pub enum PlaceExtra { None, Length(u64), From 7f9b01a0fc680943a5c37ee820ae3fbd6abc40f6 Mon Sep 17 00:00:00 2001 From: Dylan MacKenzie Date: Fri, 22 Jun 2018 00:46:29 -0700 Subject: [PATCH 03/14] Add miri infinite loop detection Use the approach suggested by @oli-obk, a table holding `EvalState` hashes and a table holding full `EvalState` objects. When a hash collision is observed, the state is cloned and put into the full table. If the collision was not spurious, it will be detected during the next iteration of the infinite loop. --- src/librustc_mir/interpret/eval_context.rs | 71 +++++++++++++++++++--- src/librustc_mir/interpret/step.rs | 14 +++-- 2 files changed, 71 insertions(+), 14 deletions(-) diff --git a/src/librustc_mir/interpret/eval_context.rs b/src/librustc_mir/interpret/eval_context.rs index 50987af409f40..4699444bd4249 100644 --- a/src/librustc_mir/interpret/eval_context.rs +++ b/src/librustc_mir/interpret/eval_context.rs @@ -10,6 +10,7 @@ use rustc::ty::layout::{self, Size, Align, HasDataLayout, IntegerExt, LayoutOf, use rustc::ty::subst::{Subst, Substs}; use rustc::ty::{self, Ty, TyCtxt, TypeAndMut}; use rustc::ty::query::TyCtxtAt; +use rustc_data_structures::fx::{FxHashSet, FxHasher}; use rustc_data_structures::indexed_vec::{IndexVec, Idx}; use rustc::mir::interpret::{ FrameInfo, GlobalId, Value, Scalar, @@ -34,15 +35,16 @@ pub struct EvalContext<'a, 'mir, 'tcx: 'a + 'mir, M: Machine<'mir, 'tcx>> { pub param_env: ty::ParamEnv<'tcx>, /// Virtual memory and call stack. - state: EvalState<'a, 'mir, 'tcx, M>, + pub(crate) state: EvalState<'a, 'mir, 'tcx, M>, /// The maximum number of stack frames allowed pub(crate) stack_limit: usize, - /// The maximum number of terminators that may be evaluated. - /// This prevents infinite loops and huge computations from freezing up const eval. - /// Remove once halting problem is solved. - pub(crate) terminators_remaining: usize, + /// The number of terminators to be evaluated before enabling the infinite + /// loop detector. + pub(crate) steps_until_detector_enabled: usize, + + pub(crate) loop_detector: InfiniteLoopDetector<'a, 'mir, 'tcx, M>, } pub(crate) struct EvalState<'a, 'mir, 'tcx: 'a + 'mir, M: Machine<'mir, 'tcx>> { @@ -178,6 +180,56 @@ impl<'mir, 'tcx: 'mir> Hash for Frame<'mir, 'tcx> { } } +pub(crate) struct InfiniteLoopDetector<'a, 'mir, 'tcx: 'a + 'mir, M: Machine<'mir, 'tcx>> { + /// The set of all `EvalState` *hashes* observed by this detector. + /// + /// Not a proper bloom filter. + bloom: FxHashSet, + + /// The set of all `EvalState`s observed by this detector. + /// + /// An `EvalState` will only be fully cloned once it has caused a collision + /// in `bloom`. As a result, the detector must observe *two* full cycles of + /// an infinite loop before it triggers. + snapshots: FxHashSet>, +} + +impl<'a, 'mir, 'tcx, M> Default for InfiniteLoopDetector<'a, 'mir, 'tcx, M> + where M: Machine<'mir, 'tcx>, + 'tcx: 'a + 'mir, +{ + fn default() -> Self { + InfiniteLoopDetector { + bloom: FxHashSet::default(), + snapshots: FxHashSet::default(), + } + } +} + +impl<'a, 'mir, 'tcx, M> InfiniteLoopDetector<'a, 'mir, 'tcx, M> + where M: Machine<'mir, 'tcx>, + 'tcx: 'a + 'mir, +{ + pub fn observe(&mut self, snapshot: &EvalState<'a, 'mir, 'tcx, M>) -> Result<(), (/*TODO*/)> { + let mut fx = FxHasher::default(); + snapshot.hash(&mut fx); + let hash = fx.finish(); + + if self.bloom.insert(hash) { + // No collision + return Ok(()) + } + + if self.snapshots.insert(snapshot.clone()) { + // Spurious collision or first cycle + return Ok(()) + } + + // Second cycle, + Err(()) + } +} + #[derive(Clone, Debug, Eq, PartialEq, Hash)] pub enum StackPopCleanup { /// The stackframe existed to compute the initial value of a static/constant, make sure it @@ -280,16 +332,17 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M stack: Vec::new(), }, stack_limit: tcx.sess.const_eval_stack_frame_limit, - terminators_remaining: MAX_TERMINATORS, + loop_detector: Default::default(), + steps_until_detector_enabled: MAX_TERMINATORS, } } pub(crate) fn with_fresh_body R, R>(&mut self, f: F) -> R { let stack = mem::replace(self.stack_mut(), Vec::new()); - let terminators_remaining = mem::replace(&mut self.terminators_remaining, MAX_TERMINATORS); + let steps = mem::replace(&mut self.steps_until_detector_enabled, MAX_TERMINATORS); let r = f(self); *self.stack_mut() = stack; - self.terminators_remaining = terminators_remaining; + self.steps_until_detector_enabled = steps; r } @@ -634,7 +687,7 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M } Aggregate(ref kind, ref operands) => { - self.inc_step_counter_and_check_limit(operands.len()); + self.inc_step_counter_and_detect_loops(operands.len()); let (dest, active_field_index) = match **kind { mir::AggregateKind::Adt(adt_def, variant_index, _, active_field_index) => { diff --git a/src/librustc_mir/interpret/step.rs b/src/librustc_mir/interpret/step.rs index 86eb02312bd19..e8eabe2b1bf6f 100644 --- a/src/librustc_mir/interpret/step.rs +++ b/src/librustc_mir/interpret/step.rs @@ -8,12 +8,16 @@ use rustc::mir::interpret::EvalResult; use super::{EvalContext, Machine}; impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { - pub fn inc_step_counter_and_check_limit(&mut self, n: usize) { - self.terminators_remaining = self.terminators_remaining.saturating_sub(n); - if self.terminators_remaining == 0 { + pub fn inc_step_counter_and_detect_loops(&mut self, n: usize) { + self.steps_until_detector_enabled + = self.steps_until_detector_enabled.saturating_sub(n); + + if self.steps_until_detector_enabled == 0 { + let _ = self.loop_detector.observe(&self.state); // TODO: Handle error + // FIXME(#49980): make this warning a lint self.tcx.sess.span_warn(self.frame().span, "Constant evaluating a complex constant, this might take some time"); - self.terminators_remaining = 1_000_000; + self.steps_until_detector_enabled = 1_000_000; } } @@ -36,7 +40,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { return Ok(true); } - self.inc_step_counter_and_check_limit(1); + self.inc_step_counter_and_detect_loops(1); let terminator = basic_block.terminator(); assert_eq!(old_frames, self.cur_frame()); From 788c5f3c8b1453d5c7815bc345b8b7e7f79ebf80 Mon Sep 17 00:00:00 2001 From: Dylan MacKenzie Date: Fri, 22 Jun 2018 12:36:54 -0700 Subject: [PATCH 04/14] Revert "Refactor EvalContext stack and heap into inner struct" This reverts commit 59d21c526c036d7097d05edd6dffdad9c5b1cb62, and uses tuple to store the mutable parts of an EvalContext (which now includes `Machine`). This requires that `Machine` be `Clone`. --- src/librustc_mir/interpret/cast.rs | 8 +- src/librustc_mir/interpret/const_eval.rs | 11 +- src/librustc_mir/interpret/eval_context.rs | 209 ++++++++----------- src/librustc_mir/interpret/memory.rs | 20 +- src/librustc_mir/interpret/place.rs | 4 +- src/librustc_mir/interpret/step.rs | 18 +- src/librustc_mir/interpret/terminator/mod.rs | 8 +- src/librustc_mir/interpret/traits.rs | 26 +-- 8 files changed, 131 insertions(+), 173 deletions(-) diff --git a/src/librustc_mir/interpret/cast.rs b/src/librustc_mir/interpret/cast.rs index 4bf4009115353..e69e7a522ab73 100644 --- a/src/librustc_mir/interpret/cast.rs +++ b/src/librustc_mir/interpret/cast.rs @@ -76,8 +76,8 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { // No alignment check needed for raw pointers. But we have to truncate to target ptr size. TyRawPtr(_) => { Ok(Scalar::Bits { - bits: self.memory().truncate_to_ptr(v).0 as u128, - defined: self.memory().pointer_size().bits() as u8, + bits: self.memory.truncate_to_ptr(v).0 as u128, + defined: self.memory.pointer_size().bits() as u8, }) }, @@ -92,7 +92,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { match dest_ty.sty { // float -> uint TyUint(t) => { - let width = t.bit_width().unwrap_or(self.memory().pointer_size().bits() as usize); + let width = t.bit_width().unwrap_or(self.memory.pointer_size().bits() as usize); match fty { FloatTy::F32 => Ok(Scalar::Bits { bits: Single::from_bits(bits).to_u128(width).value, @@ -106,7 +106,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { }, // float -> int TyInt(t) => { - let width = t.bit_width().unwrap_or(self.memory().pointer_size().bits() as usize); + let width = t.bit_width().unwrap_or(self.memory.pointer_size().bits() as usize); match fty { FloatTy::F32 => Ok(Scalar::Bits { bits: Single::from_bits(bits).to_i128(width).value as u128, diff --git a/src/librustc_mir/interpret/const_eval.rs b/src/librustc_mir/interpret/const_eval.rs index fe2c46aa48fd8..e84132f27cc62 100644 --- a/src/librustc_mir/interpret/const_eval.rs +++ b/src/librustc_mir/interpret/const_eval.rs @@ -88,7 +88,7 @@ pub fn value_to_const_value<'tcx>( Value::ScalarPair(a, b) => Ok(ConstValue::ScalarPair(a, b)), Value::ByRef(ptr, align) => { let ptr = ptr.to_ptr().unwrap(); - let alloc = ecx.memory().get(ptr.alloc_id)?; + let alloc = ecx.memory.get(ptr.alloc_id)?; assert!(alloc.align.abi() >= align.abi()); assert!(alloc.bytes.len() as u64 - ptr.offset.bytes() >= layout.size.bytes()); let mut alloc = alloc.clone(); @@ -149,7 +149,7 @@ fn eval_body_using_ecx<'a, 'mir, 'tcx>( } let layout = ecx.layout_of(mir.return_ty().subst(tcx, cid.instance.substs))?; assert!(!layout.is_unsized()); - let ptr = ecx.memory_mut().allocate( + let ptr = ecx.memory.allocate( layout.size, layout.align, MemoryKind::Stack, @@ -185,6 +185,7 @@ fn eval_body_using_ecx<'a, 'mir, 'tcx>( Ok((value, ptr, layout.ty)) } +#[derive(Debug, Clone, Eq, PartialEq, Hash)] pub struct CompileTimeEvaluator; impl<'tcx> Into> for ConstEvalError { @@ -486,7 +487,7 @@ pub fn const_variant_index<'a, 'tcx>( let (ptr, align) = match value { Value::ScalarPair(..) | Value::Scalar(_) => { let layout = ecx.layout_of(val.ty)?; - let ptr = ecx.memory_mut().allocate(layout.size, layout.align, MemoryKind::Stack)?.into(); + let ptr = ecx.memory.allocate(layout.size, layout.align, MemoryKind::Stack)?.into(); ecx.write_value_to_ptr(value, ptr, layout.align, val.ty)?; (ptr, layout.align) }, @@ -515,9 +516,9 @@ pub fn const_value_to_allocation_provider<'a, 'tcx>( ()); let value = ecx.const_to_value(val.val)?; let layout = ecx.layout_of(val.ty)?; - let ptr = ecx.memory_mut().allocate(layout.size, layout.align, MemoryKind::Stack)?; + let ptr = ecx.memory.allocate(layout.size, layout.align, MemoryKind::Stack)?; ecx.write_value_to_ptr(value, ptr.into(), layout.align, val.ty)?; - let alloc = ecx.memory().get(ptr.alloc_id)?; + let alloc = ecx.memory.get(ptr.alloc_id)?; Ok(tcx.intern_const_alloc(alloc.clone())) }; result().expect("unable to convert ConstValue to Allocation") diff --git a/src/librustc_mir/interpret/eval_context.rs b/src/librustc_mir/interpret/eval_context.rs index 4699444bd4249..85aeb3c7df51a 100644 --- a/src/librustc_mir/interpret/eval_context.rs +++ b/src/librustc_mir/interpret/eval_context.rs @@ -34,8 +34,11 @@ pub struct EvalContext<'a, 'mir, 'tcx: 'a + 'mir, M: Machine<'mir, 'tcx>> { /// Bounds in scope for polymorphic evaluations. pub param_env: ty::ParamEnv<'tcx>, - /// Virtual memory and call stack. - pub(crate) state: EvalState<'a, 'mir, 'tcx, M>, + /// The virtual memory system. + pub memory: Memory<'a, 'mir, 'tcx, M>, + + /// The virtual call stack. + pub(crate) stack: Vec>, /// The maximum number of stack frames allowed pub(crate) stack_limit: usize, @@ -47,50 +50,6 @@ pub struct EvalContext<'a, 'mir, 'tcx: 'a + 'mir, M: Machine<'mir, 'tcx>> { pub(crate) loop_detector: InfiniteLoopDetector<'a, 'mir, 'tcx, M>, } -pub(crate) struct EvalState<'a, 'mir, 'tcx: 'a + 'mir, M: Machine<'mir, 'tcx>> { - /// The virtual memory system. - memory: Memory<'a, 'mir, 'tcx, M>, - - /// The virtual call stack. - stack: Vec>, -} - -impl<'a, 'mir, 'tcx, M> Clone for EvalState<'a, 'mir, 'tcx, M> - where M: Machine<'mir, 'tcx>, - 'tcx: 'a + 'mir, -{ - fn clone(&self) -> Self { - EvalState { - memory: self.memory.clone(), - stack: self.stack.clone(), - } - } -} - -impl<'a, 'mir, 'tcx, M> Eq for EvalState<'a, 'mir, 'tcx, M> - where M: Machine<'mir, 'tcx>, - 'tcx: 'a + 'mir, -{} - -impl<'a, 'mir, 'tcx, M> PartialEq for EvalState<'a, 'mir, 'tcx, M> - where M: Machine<'mir, 'tcx>, - 'tcx: 'a + 'mir, -{ - fn eq(&self, other: &Self) -> bool { - self.memory == other.memory - && self.stack == other.stack - } -} - -impl<'a, 'mir, 'tcx, M> Hash for EvalState<'a, 'mir, 'tcx, M> - where M: Machine<'mir, 'tcx>, - 'tcx: 'a + 'mir, -{ - fn hash(&self, state: &mut H) { - self.memory.hash(state); - self.stack.hash(state); - } -} /// A stack frame. #[derive(Clone)] pub struct Frame<'mir, 'tcx: 'mir> { @@ -180,22 +139,26 @@ impl<'mir, 'tcx: 'mir> Hash for Frame<'mir, 'tcx> { } } +/// The virtual machine state during const-evaluation at a given point in time. +type EvalSnapshot<'a, 'mir, 'tcx, M> + = (M, Vec>, Memory<'a, 'mir, 'tcx, M>); + pub(crate) struct InfiniteLoopDetector<'a, 'mir, 'tcx: 'a + 'mir, M: Machine<'mir, 'tcx>> { - /// The set of all `EvalState` *hashes* observed by this detector. + /// The set of all `EvalSnapshot` *hashes* observed by this detector. /// /// Not a proper bloom filter. bloom: FxHashSet, - /// The set of all `EvalState`s observed by this detector. + /// The set of all `EvalSnapshot`s observed by this detector. /// - /// An `EvalState` will only be fully cloned once it has caused a collision + /// An `EvalSnapshot` will only be fully cloned once it has caused a collision /// in `bloom`. As a result, the detector must observe *two* full cycles of /// an infinite loop before it triggers. - snapshots: FxHashSet>, + snapshots: FxHashSet>, } impl<'a, 'mir, 'tcx, M> Default for InfiniteLoopDetector<'a, 'mir, 'tcx, M> - where M: Machine<'mir, 'tcx>, + where M: Eq + Hash + Machine<'mir, 'tcx>, 'tcx: 'a + 'mir, { fn default() -> Self { @@ -207,10 +170,17 @@ impl<'a, 'mir, 'tcx, M> Default for InfiniteLoopDetector<'a, 'mir, 'tcx, M> } impl<'a, 'mir, 'tcx, M> InfiniteLoopDetector<'a, 'mir, 'tcx, M> - where M: Machine<'mir, 'tcx>, + where M: Clone + Eq + Hash + Machine<'mir, 'tcx>, 'tcx: 'a + 'mir, { - pub fn observe(&mut self, snapshot: &EvalState<'a, 'mir, 'tcx, M>) -> Result<(), (/*TODO*/)> { + pub fn observe( + &mut self, + machine: &M, + stack: &Vec>, + memory: &Memory<'a, 'mir, 'tcx, M>, + ) -> Result<(), (/*TODO*/)> { + let snapshot = (machine, stack, memory); + let mut fx = FxHasher::default(); snapshot.hash(&mut fx); let hash = fx.finish(); @@ -220,7 +190,7 @@ impl<'a, 'mir, 'tcx, M> InfiniteLoopDetector<'a, 'mir, 'tcx, M> return Ok(()) } - if self.snapshots.insert(snapshot.clone()) { + if self.snapshots.insert((machine.clone(), stack.clone(), memory.clone())) { // Spurious collision or first cycle return Ok(()) } @@ -322,15 +292,15 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M param_env: ty::ParamEnv<'tcx>, machine: M, memory_data: M::MemoryData, - ) -> Self { + ) -> Self + where M: Eq + Hash + { EvalContext { machine, tcx, param_env, - state: EvalState { - memory: Memory::new(tcx, memory_data), - stack: Vec::new(), - }, + memory: Memory::new(tcx, memory_data), + stack: Vec::new(), stack_limit: tcx.sess.const_eval_stack_frame_limit, loop_detector: Default::default(), steps_until_detector_enabled: MAX_TERMINATORS, @@ -338,10 +308,10 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M } pub(crate) fn with_fresh_body R, R>(&mut self, f: F) -> R { - let stack = mem::replace(self.stack_mut(), Vec::new()); + let stack = mem::replace(&mut self.stack, Vec::new()); let steps = mem::replace(&mut self.steps_until_detector_enabled, MAX_TERMINATORS); let r = f(self); - *self.stack_mut() = stack; + self.stack = stack; self.steps_until_detector_enabled = steps; r } @@ -350,34 +320,29 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M let layout = self.layout_of(ty)?; assert!(!layout.is_unsized(), "cannot alloc memory for unsized type"); - self.memory_mut().allocate(layout.size, layout.align, MemoryKind::Stack) + self.memory.allocate(layout.size, layout.align, MemoryKind::Stack) } pub fn memory(&self) -> &Memory<'a, 'mir, 'tcx, M> { - &self.state.memory + &self.memory } pub fn memory_mut(&mut self) -> &mut Memory<'a, 'mir, 'tcx, M> { - &mut self.state.memory + &mut self.memory } - #[inline] pub fn stack(&self) -> &[Frame<'mir, 'tcx>] { - &self.state.stack - } - - pub fn stack_mut(&mut self) -> &mut Vec> { - &mut self.state.stack + &self.stack } #[inline] pub fn cur_frame(&self) -> usize { - assert!(self.stack().len() > 0); - self.stack().len() - 1 + assert!(self.stack.len() > 0); + self.stack.len() - 1 } pub fn str_to_value(&mut self, s: &str) -> EvalResult<'tcx, Value> { - let ptr = self.memory_mut().allocate_bytes(s.as_bytes()); + let ptr = self.memory.allocate_bytes(s.as_bytes()); Ok(Scalar::Ptr(ptr).to_value_with_len(s.len() as u64, self.tcx.tcx)) } @@ -395,7 +360,7 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M } ConstValue::ByRef(alloc, offset) => { // FIXME: Allocate new AllocId for all constants inside - let id = self.memory_mut().allocate_value(alloc.clone(), MemoryKind::Stack)?; + let id = self.memory.allocate_value(alloc.clone(), MemoryKind::Stack)?; Ok(Value::ByRef(Pointer::new(id, offset).into(), alloc.align)) }, ConstValue::ScalarPair(a, b) => Ok(Value::ScalarPair(a, b)), @@ -566,7 +531,7 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M IndexVec::new() }; - self.stack_mut().push(Frame { + self.stack.push(Frame { mir, block: mir::START_BLOCK, return_to_block, @@ -577,9 +542,9 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M stmt: 0, }); - self.memory_mut().cur_frame = self.cur_frame(); + self.memory.cur_frame = self.cur_frame(); - if self.stack().len() > self.stack_limit { + if self.stack.len() > self.stack_limit { err!(StackFrameLimitReached) } else { Ok(()) @@ -589,18 +554,18 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M pub(super) fn pop_stack_frame(&mut self) -> EvalResult<'tcx> { ::log_settings::settings().indentation -= 1; M::end_region(self, None)?; - let frame = self.stack_mut().pop().expect( + let frame = self.stack.pop().expect( "tried to pop a stack frame, but there were none", ); - if !self.stack().is_empty() { + if !self.stack.is_empty() { // TODO: Is this the correct time to start considering these accesses as originating from the returned-to stack frame? - self.memory_mut().cur_frame = self.cur_frame(); + self.memory.cur_frame = self.cur_frame(); } match frame.return_to_block { StackPopCleanup::MarkStatic(mutable) => { if let Place::Ptr { ptr, .. } = frame.return_place { // FIXME: to_ptr()? might be too extreme here, static zsts might reach this under certain conditions - self.memory_mut().mark_static_initialized( + self.memory.mark_static_initialized( ptr.to_ptr()?.alloc_id, mutable, )? @@ -623,8 +588,8 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M if let Some(Value::ByRef(ptr, _align)) = local { trace!("deallocating local"); let ptr = ptr.to_ptr()?; - self.memory().dump_alloc(ptr.alloc_id); - self.memory_mut().deallocate_local(ptr)?; + self.memory.dump_alloc(ptr.alloc_id); + self.memory.deallocate_local(ptr)?; }; Ok(()) } @@ -637,7 +602,9 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M &mut self, rvalue: &mir::Rvalue<'tcx>, place: &mir::Place<'tcx>, - ) -> EvalResult<'tcx> { + ) -> EvalResult<'tcx> + where M: Clone + Eq + Hash, + { let dest = self.eval_place(place)?; let dest_ty = self.place_ty(place); @@ -744,7 +711,7 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M let src = self.eval_place(place)?; let ty = self.place_ty(place); let (_, len) = src.elem_ty_and_len(ty, self.tcx.tcx); - let defined = self.memory().pointer_size().bits() as u8; + let defined = self.memory.pointer_size().bits() as u8; self.write_scalar( dest, Scalar::Bits { @@ -786,7 +753,7 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M let layout = self.layout_of(ty)?; assert!(!layout.is_unsized(), "SizeOf nullary MIR operator called for unsized type"); - let defined = self.memory().pointer_size().bits() as u8; + let defined = self.memory.pointer_size().bits() as u8; self.write_scalar( dest, Scalar::Bits { @@ -881,7 +848,7 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M def_id, substs, ).ok_or_else(|| EvalErrorKind::TooGeneric.into()); - let fn_ptr = self.memory_mut().create_fn_alloc(instance?); + let fn_ptr = self.memory.create_fn_alloc(instance?); let valty = ValTy { value: Value::Scalar(fn_ptr.into()), ty: dest_ty, @@ -917,7 +884,7 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M substs, ty::ClosureKind::FnOnce, ); - let fn_ptr = self.memory_mut().create_fn_alloc(instance); + let fn_ptr = self.memory.create_fn_alloc(instance); let valty = ValTy { value: Value::Scalar(fn_ptr.into()), ty: dest_ty, @@ -1194,7 +1161,7 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M pub fn force_allocation(&mut self, place: Place) -> EvalResult<'tcx, Place> { let new_place = match place { Place::Local { frame, local } => { - match self.stack()[frame].locals[local] { + match self.stack[frame].locals[local] { None => return err!(DeadLocal), Some(Value::ByRef(ptr, align)) => { Place::Ptr { @@ -1204,11 +1171,11 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M } } Some(val) => { - let ty = self.stack()[frame].mir.local_decls[local].ty; - let ty = self.monomorphize(ty, self.stack()[frame].instance.substs); + let ty = self.stack[frame].mir.local_decls[local].ty; + let ty = self.monomorphize(ty, self.stack[frame].instance.substs); let layout = self.layout_of(ty)?; let ptr = self.alloc_ptr(ty)?; - self.stack_mut()[frame].locals[local] = + self.stack[frame].locals[local] = Some(Value::ByRef(ptr.into(), layout.align)); // it stays live let place = Place::from_ptr(ptr, layout.align); self.write_value(ValTy { value: val, ty }, place)?; @@ -1290,10 +1257,10 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M } Place::Local { frame, local } => { - let dest = self.stack()[frame].get_local(local)?; + let dest = self.stack[frame].get_local(local)?; self.write_value_possibly_by_val( src_val, - |this, val| this.stack_mut()[frame].set_local(local, val), + |this, val| this.stack[frame].set_local(local, val), dest, dest_ty, ) @@ -1335,7 +1302,7 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M } else { let dest_ptr = self.alloc_ptr(dest_ty)?.into(); let layout = self.layout_of(dest_ty)?; - self.memory_mut().copy(src_ptr, align.min(layout.align), dest_ptr, layout.align, layout.size, false)?; + self.memory.copy(src_ptr, align.min(layout.align), dest_ptr, layout.align, layout.size, false)?; write_dest(self, Value::ByRef(dest_ptr, layout.align))?; } } else { @@ -1357,7 +1324,7 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M trace!("write_value_to_ptr: {:#?}, {}, {:#?}", value, dest_ty, layout); match value { Value::ByRef(ptr, align) => { - self.memory_mut().copy(ptr, align.min(layout.align), dest, dest_align.min(layout.align), layout.size, false) + self.memory.copy(ptr, align.min(layout.align), dest, dest_align.min(layout.align), layout.size, false) } Value::Scalar(scalar) => { let signed = match layout.abi { @@ -1370,7 +1337,7 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M _ => bug!("write_value_to_ptr: invalid ByVal layout: {:#?}", layout), } }; - self.memory_mut().write_scalar(dest, dest_align, scalar, layout.size, signed) + self.memory.write_scalar(dest, dest_align, scalar, layout.size, signed) } Value::ScalarPair(a_val, b_val) => { trace!("write_value_to_ptr valpair: {:#?}", layout); @@ -1383,8 +1350,8 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M let b_offset = a_size.abi_align(b.align(&self)); let b_ptr = dest.ptr_offset(b_offset, &self)?.into(); // TODO: What about signedess? - self.memory_mut().write_scalar(a_ptr, dest_align, a_val, a_size, false)?; - self.memory_mut().write_scalar(b_ptr, dest_align, b_val, b_size, false) + self.memory.write_scalar(a_ptr, dest_align, a_val, a_size, false)?; + self.memory.write_scalar(b_ptr, dest_align, b_val, b_size, false) } } } @@ -1415,8 +1382,8 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M ptr_align: Align, pointee_ty: Ty<'tcx>, ) -> EvalResult<'tcx, Value> { - let ptr_size = self.memory().pointer_size(); - let p: Scalar = self.memory().read_ptr_sized(ptr, ptr_align)?.into(); + let ptr_size = self.memory.pointer_size(); + let p: Scalar = self.memory.read_ptr_sized(ptr, ptr_align)?.into(); if self.type_is_sized(pointee_ty) { Ok(p.to_value()) } else { @@ -1424,11 +1391,11 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M let extra = ptr.offset(ptr_size, self)?; match self.tcx.struct_tail(pointee_ty).sty { ty::TyDynamic(..) => Ok(p.to_value_with_vtable( - self.memory().read_ptr_sized(extra, ptr_align)?.to_ptr()?, + self.memory.read_ptr_sized(extra, ptr_align)?.to_ptr()?, )), ty::TySlice(..) | ty::TyStr => { let len = self - .memory() + .memory .read_ptr_sized(extra, ptr_align)? .to_bits(ptr_size)?; Ok(p.to_value_with_len(len as u64, self.tcx.tcx)) @@ -1446,10 +1413,10 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M ) -> EvalResult<'tcx> { match ty.sty { ty::TyBool => { - self.memory().read_scalar(ptr, ptr_align, Size::from_bytes(1))?.to_bool()?; + self.memory.read_scalar(ptr, ptr_align, Size::from_bytes(1))?.to_bool()?; } ty::TyChar => { - let c = self.memory().read_scalar(ptr, ptr_align, Size::from_bytes(4))?.to_bits(Size::from_bytes(4))? as u32; + let c = self.memory.read_scalar(ptr, ptr_align, Size::from_bytes(4))?.to_bits(Size::from_bytes(4))? as u32; match ::std::char::from_u32(c) { Some(..) => (), None => return err!(InvalidChar(c as u128)), @@ -1457,7 +1424,7 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M } ty::TyFnPtr(_) => { - self.memory().read_ptr_sized(ptr, ptr_align)?; + self.memory.read_ptr_sized(ptr, ptr_align)?; }, ty::TyRef(_, rty, _) | ty::TyRawPtr(ty::TypeAndMut { ty: rty, .. }) => { @@ -1472,7 +1439,7 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M if let layout::Abi::Scalar(ref scalar) = self.layout_of(ty)?.abi { let size = scalar.value.size(self); - self.memory().read_scalar(ptr, ptr_align, size)?; + self.memory.read_scalar(ptr, ptr_align, size)?; } } @@ -1493,7 +1460,7 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M pub fn try_read_value(&self, ptr: Scalar, ptr_align: Align, ty: Ty<'tcx>) -> EvalResult<'tcx, Option> { let layout = self.layout_of(ty)?; - self.memory().check_align(ptr, ptr_align)?; + self.memory.check_align(ptr, ptr_align)?; if layout.size.bytes() == 0 { return Ok(Some(Value::Scalar(Scalar::undef()))); @@ -1506,7 +1473,7 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M match layout.abi { layout::Abi::Scalar(..) => { - let scalar = self.memory().read_scalar(ptr, ptr_align, layout.size)?; + let scalar = self.memory.read_scalar(ptr, ptr_align, layout.size)?; Ok(Some(Value::Scalar(scalar))) } layout::Abi::ScalarPair(ref a, ref b) => { @@ -1515,8 +1482,8 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M let a_ptr = ptr; let b_offset = a_size.abi_align(b.align(self)); let b_ptr = ptr.offset(b_offset, self)?.into(); - let a_val = self.memory().read_scalar(a_ptr, ptr_align, a_size)?; - let b_val = self.memory().read_scalar(b_ptr, ptr_align, b_size)?; + let a_val = self.memory.read_scalar(a_ptr, ptr_align, a_size)?; + let b_val = self.memory.read_scalar(b_ptr, ptr_align, b_size)?; Ok(Some(Value::ScalarPair(a_val, b_val))) } _ => Ok(None), @@ -1524,11 +1491,11 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M } pub fn frame(&self) -> &Frame<'mir, 'tcx> { - self.stack().last().expect("no call frames exist") + self.stack.last().expect("no call frames exist") } pub fn frame_mut(&mut self) -> &mut Frame<'mir, 'tcx> { - self.stack_mut().last_mut().expect("no call frames exist") + self.stack.last_mut().expect("no call frames exist") } pub(super) fn mir(&self) -> &'mir mir::Mir<'tcx> { @@ -1536,7 +1503,7 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M } pub fn substs(&self) -> &'tcx Substs<'tcx> { - if let Some(frame) = self.stack().last() { + if let Some(frame) = self.stack.last() { frame.instance.substs } else { Substs::empty() @@ -1682,7 +1649,7 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M } write!(msg, ":").unwrap(); - match self.stack()[frame].get_local(local) { + match self.stack[frame].get_local(local) { Err(err) => { if let EvalErrorKind::DeadLocal = err.kind { write!(msg, " is dead").unwrap(); @@ -1717,13 +1684,13 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M } trace!("{}", msg); - self.memory().dump_allocs(allocs); + self.memory.dump_allocs(allocs); } Place::Ptr { ptr, align, .. } => { match ptr { Scalar::Ptr(ptr) => { trace!("by align({}) ref:", align.abi()); - self.memory().dump_alloc(ptr.alloc_id); + self.memory.dump_alloc(ptr.alloc_id); } ptr => trace!(" integral by ref: {:?}", ptr), } @@ -1736,12 +1703,12 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M where F: FnOnce(&mut Self, Value) -> EvalResult<'tcx, Value>, { - let val = self.stack()[frame].get_local(local)?; + let val = self.stack[frame].get_local(local)?; let new_val = f(self, val)?; - self.stack_mut()[frame].set_local(local, new_val)?; + self.stack[frame].set_local(local, new_val)?; // FIXME(solson): Run this when setting to Undef? (See previous version of this code.) - // if let Value::ByRef(ptr) = self.stack()[frame].get_local(local) { - // self.memory().deallocate(ptr)?; + // if let Value::ByRef(ptr) = self.stack[frame].get_local(local) { + // self.memory.deallocate(ptr)?; // } Ok(()) } diff --git a/src/librustc_mir/interpret/memory.rs b/src/librustc_mir/interpret/memory.rs index 98ab361c239ee..e8994cdac4c2e 100644 --- a/src/librustc_mir/interpret/memory.rs +++ b/src/librustc_mir/interpret/memory.rs @@ -32,6 +32,7 @@ pub enum MemoryKind { // Top-level interpreter memory //////////////////////////////////////////////////////////////////////////////// +#[derive(Clone)] pub struct Memory<'a, 'mir, 'tcx: 'a + 'mir, M: Machine<'mir, 'tcx>> { /// Additional data required by the Machine pub data: M::MemoryData, @@ -48,21 +49,6 @@ pub struct Memory<'a, 'mir, 'tcx: 'a + 'mir, M: Machine<'mir, 'tcx>> { pub tcx: TyCtxtAt<'a, 'tcx, 'tcx>, } -impl<'a, 'mir, 'tcx, M> Clone for Memory<'a, 'mir, 'tcx, M> - where M: Machine<'mir, 'tcx>, - 'tcx: 'a + 'mir, -{ - fn clone(&self) -> Self { - Memory { - data: self.data.clone(), - alloc_kind: self.alloc_kind.clone(), - alloc_map: self.alloc_map.clone(), - cur_frame: self.cur_frame.clone(), - tcx: self.tcx.clone(), - } - } -} - impl<'a, 'mir, 'tcx, M> Eq for Memory<'a, 'mir, 'tcx, M> where M: Machine<'mir, 'tcx>, 'tcx: 'a + 'mir, @@ -1067,12 +1053,12 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> HasMemory<'a, 'mir, 'tcx, M> for Me impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> HasMemory<'a, 'mir, 'tcx, M> for EvalContext<'a, 'mir, 'tcx, M> { #[inline] fn memory_mut(&mut self) -> &mut Memory<'a, 'mir, 'tcx, M> { - self.memory_mut() + &mut self.memory } #[inline] fn memory(&self) -> &Memory<'a, 'mir, 'tcx, M> { - self.memory() + &self.memory } } diff --git a/src/librustc_mir/interpret/place.rs b/src/librustc_mir/interpret/place.rs index bde2e5945f5d4..bb8e5c99d499b 100644 --- a/src/librustc_mir/interpret/place.rs +++ b/src/librustc_mir/interpret/place.rs @@ -201,7 +201,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { assert_eq!(extra, PlaceExtra::None); Ok(Value::ByRef(ptr, align)) } - Place::Local { frame, local } => self.stack()[frame].get_local(local), + Place::Local { frame, local } => self.stack[frame].get_local(local), } } @@ -261,7 +261,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { let (base_ptr, base_align, base_extra) = match base { Place::Ptr { ptr, align, extra } => (ptr, align, extra), Place::Local { frame, local } => { - match (&self.stack()[frame].get_local(local)?, &base_layout.abi) { + match (&self.stack[frame].get_local(local)?, &base_layout.abi) { // in case the field covers the entire type, just return the value (&Value::Scalar(_), &layout::Abi::Scalar(_)) | (&Value::ScalarPair(..), &layout::Abi::ScalarPair(..)) diff --git a/src/librustc_mir/interpret/step.rs b/src/librustc_mir/interpret/step.rs index e8eabe2b1bf6f..dac7c0610c0ef 100644 --- a/src/librustc_mir/interpret/step.rs +++ b/src/librustc_mir/interpret/step.rs @@ -2,18 +2,22 @@ //! //! The main entry point is the `step` method. +use std::hash::Hash; + use rustc::mir; use rustc::mir::interpret::EvalResult; use super::{EvalContext, Machine}; -impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { +impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> + where M: Clone + Eq + Hash, +{ pub fn inc_step_counter_and_detect_loops(&mut self, n: usize) { self.steps_until_detector_enabled = self.steps_until_detector_enabled.saturating_sub(n); if self.steps_until_detector_enabled == 0 { - let _ = self.loop_detector.observe(&self.state); // TODO: Handle error + let _ = self.loop_detector.observe(&self.machine, &self.stack, &self.memory); // TODO: Handle error // FIXME(#49980): make this warning a lint self.tcx.sess.span_warn(self.frame().span, "Constant evaluating a complex constant, this might take some time"); @@ -23,7 +27,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { /// Returns true as long as there are more things to do. pub fn step(&mut self) -> EvalResult<'tcx, bool> { - if self.stack().is_empty() { + if self.stack.is_empty() { return Ok(false); } @@ -57,7 +61,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { // *before* executing the statement. let frame_idx = self.cur_frame(); self.tcx.span = stmt.source_info.span; - self.memory_mut().tcx.span = stmt.source_info.span; + self.memory.tcx.span = stmt.source_info.span; match stmt.kind { Assign(ref place, ref rvalue) => self.eval_rvalue_into_place(rvalue, place)?, @@ -106,16 +110,16 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { InlineAsm { .. } => return err!(InlineAsm), } - self.stack_mut()[frame_idx].stmt += 1; + self.stack[frame_idx].stmt += 1; Ok(()) } fn terminator(&mut self, terminator: &mir::Terminator<'tcx>) -> EvalResult<'tcx> { trace!("{:?}", terminator.kind); self.tcx.span = terminator.source_info.span; - self.memory_mut().tcx.span = terminator.source_info.span; + self.memory.tcx.span = terminator.source_info.span; self.eval_terminator(terminator)?; - if !self.stack().is_empty() { + if !self.stack.is_empty() { trace!("// {:?}", self.frame().block); } Ok(()) diff --git a/src/librustc_mir/interpret/terminator/mod.rs b/src/librustc_mir/interpret/terminator/mod.rs index 61670769d01ad..2994b1b387f3f 100644 --- a/src/librustc_mir/interpret/terminator/mod.rs +++ b/src/librustc_mir/interpret/terminator/mod.rs @@ -71,7 +71,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { let (fn_def, sig) = match func.ty.sty { ty::TyFnPtr(sig) => { let fn_ptr = self.value_to_scalar(func)?.to_ptr()?; - let instance = self.memory().get_fn(fn_ptr)?; + let instance = self.memory.get_fn(fn_ptr)?; let instance_ty = instance.ty(*self.tcx); match instance_ty.sty { ty::TyFnDef(..) => { @@ -377,14 +377,14 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { } // cannot use the shim here, because that will only result in infinite recursion ty::InstanceDef::Virtual(_, idx) => { - let ptr_size = self.memory().pointer_size(); + let ptr_size = self.memory.pointer_size(); let ptr_align = self.tcx.data_layout.pointer_align; let (ptr, vtable) = self.into_ptr_vtable_pair(args[0].value)?; - let fn_ptr = self.memory().read_ptr_sized( + let fn_ptr = self.memory.read_ptr_sized( vtable.offset(ptr_size * (idx as u64 + 3), &self)?, ptr_align )?.to_ptr()?; - let instance = self.memory().get_fn(fn_ptr)?; + let instance = self.memory.get_fn(fn_ptr)?; let mut args = args.to_vec(); let ty = self.layout_of(args[0].ty)?.field(&self, 0)?.ty; args[0].ty = ty; diff --git a/src/librustc_mir/interpret/traits.rs b/src/librustc_mir/interpret/traits.rs index 2a9129498ad52..b6c7feda19fa8 100644 --- a/src/librustc_mir/interpret/traits.rs +++ b/src/librustc_mir/interpret/traits.rs @@ -25,26 +25,26 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { let size = layout.size.bytes(); let align = layout.align.abi(); - let ptr_size = self.memory().pointer_size(); + let ptr_size = self.memory.pointer_size(); let ptr_align = self.tcx.data_layout.pointer_align; let methods = self.tcx.vtable_methods(trait_ref); - let vtable = self.memory_mut().allocate( + let vtable = self.memory.allocate( ptr_size * (3 + methods.len() as u64), ptr_align, MemoryKind::Stack, )?; let drop = ::monomorphize::resolve_drop_in_place(*self.tcx, ty); - let drop = self.memory_mut().create_fn_alloc(drop); - self.memory_mut().write_ptr_sized_unsigned(vtable, ptr_align, drop.into())?; + let drop = self.memory.create_fn_alloc(drop); + self.memory.write_ptr_sized_unsigned(vtable, ptr_align, drop.into())?; let size_ptr = vtable.offset(ptr_size, &self)?; - self.memory_mut().write_ptr_sized_unsigned(size_ptr, ptr_align, Scalar::Bits { + self.memory.write_ptr_sized_unsigned(size_ptr, ptr_align, Scalar::Bits { bits: size as u128, defined: ptr_size.bits() as u8, })?; let align_ptr = vtable.offset(ptr_size * 2, &self)?; - self.memory_mut().write_ptr_sized_unsigned(align_ptr, ptr_align, Scalar::Bits { + self.memory.write_ptr_sized_unsigned(align_ptr, ptr_align, Scalar::Bits { bits: align as u128, defined: ptr_size.bits() as u8, })?; @@ -52,13 +52,13 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { for (i, method) in methods.iter().enumerate() { if let Some((def_id, substs)) = *method { let instance = self.resolve(def_id, substs)?; - let fn_ptr = self.memory_mut().create_fn_alloc(instance); + let fn_ptr = self.memory.create_fn_alloc(instance); let method_ptr = vtable.offset(ptr_size * (3 + i as u64), &self)?; - self.memory_mut().write_ptr_sized_unsigned(method_ptr, ptr_align, fn_ptr.into())?; + self.memory.write_ptr_sized_unsigned(method_ptr, ptr_align, fn_ptr.into())?; } } - self.memory_mut().mark_static_initialized( + self.memory.mark_static_initialized( vtable.alloc_id, Mutability::Immutable, )?; @@ -76,7 +76,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { match self.read_ptr(vtable, pointer_align, self.tcx.mk_nil_ptr())? { // some values don't need to call a drop impl, so the value is null Value::Scalar(Scalar::Bits { bits: 0, defined} ) if defined == pointer_size => Ok(None), - Value::Scalar(Scalar::Ptr(drop_fn)) => self.memory().get_fn(drop_fn).map(Some), + Value::Scalar(Scalar::Ptr(drop_fn)) => self.memory.get_fn(drop_fn).map(Some), _ => err!(ReadBytesAsPointer), } } @@ -85,10 +85,10 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { &self, vtable: Pointer, ) -> EvalResult<'tcx, (Size, Align)> { - let pointer_size = self.memory().pointer_size(); + let pointer_size = self.memory.pointer_size(); let pointer_align = self.tcx.data_layout.pointer_align; - let size = self.memory().read_ptr_sized(vtable.offset(pointer_size, self)?, pointer_align)?.to_bits(pointer_size)? as u64; - let align = self.memory().read_ptr_sized( + let size = self.memory.read_ptr_sized(vtable.offset(pointer_size, self)?, pointer_align)?.to_bits(pointer_size)? as u64; + let align = self.memory.read_ptr_sized( vtable.offset(pointer_size * 2, self)?, pointer_align )?.to_bits(pointer_size)? as u64; From 0f1c61cb7f20d8600e334bdd13e39c0eeb313d15 Mon Sep 17 00:00:00 2001 From: Dylan MacKenzie Date: Fri, 22 Jun 2018 13:31:25 -0700 Subject: [PATCH 05/14] Improve correctness of `Frame` and `Memory` equality Incorporate a subset of the suggestions from @oli-obk. More to come. --- src/librustc_mir/interpret/eval_context.rs | 18 ++++++++++-------- src/librustc_mir/interpret/memory.rs | 6 ++---- 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/src/librustc_mir/interpret/eval_context.rs b/src/librustc_mir/interpret/eval_context.rs index 85aeb3c7df51a..e3024e069d911 100644 --- a/src/librustc_mir/interpret/eval_context.rs +++ b/src/librustc_mir/interpret/eval_context.rs @@ -1,6 +1,6 @@ use std::fmt::Write; use std::hash::{Hash, Hasher}; -use std::{mem, ptr}; +use std::mem; use rustc::hir::def_id::DefId; use rustc::hir::def::Def; @@ -98,8 +98,8 @@ impl<'mir, 'tcx: 'mir> Eq for Frame<'mir, 'tcx> {} impl<'mir, 'tcx: 'mir> PartialEq for Frame<'mir, 'tcx> { fn eq(&self, other: &Self) -> bool { let Frame { - mir, - instance: _, + mir: _, + instance, span: _, return_to_block, return_place, @@ -108,8 +108,10 @@ impl<'mir, 'tcx: 'mir> PartialEq for Frame<'mir, 'tcx> { stmt, } = self; - ptr::eq(mir, &other.mir) - && *return_to_block == other.return_to_block // TODO: Are these two necessary? + // Some of these are constant during evaluation, but are included + // anyways for correctness. + *instance == other.instance + && *return_to_block == other.return_to_block && *return_place == other.return_place && *locals == other.locals && *block == other.block @@ -120,8 +122,8 @@ impl<'mir, 'tcx: 'mir> PartialEq for Frame<'mir, 'tcx> { impl<'mir, 'tcx: 'mir> Hash for Frame<'mir, 'tcx> { fn hash(&self, state: &mut H) { let Frame { - mir, - instance: _, + mir: _, + instance, span: _, return_to_block, return_place, @@ -130,7 +132,7 @@ impl<'mir, 'tcx: 'mir> Hash for Frame<'mir, 'tcx> { stmt, } = self; - (mir as *const _ as usize).hash(state); + instance.hash(state); return_to_block.hash(state); return_place.hash(state); locals.hash(state); diff --git a/src/librustc_mir/interpret/memory.rs b/src/librustc_mir/interpret/memory.rs index e8994cdac4c2e..ac4d1c74b8cc1 100644 --- a/src/librustc_mir/interpret/memory.rs +++ b/src/librustc_mir/interpret/memory.rs @@ -64,14 +64,13 @@ impl<'a, 'mir, 'tcx, M> PartialEq for Memory<'a, 'mir, 'tcx, M> alloc_kind, alloc_map, cur_frame, - tcx, + tcx: _, } = self; *data == other.data && *alloc_kind == other.alloc_kind && *alloc_map == other.alloc_map && *cur_frame == other.cur_frame - && ptr::eq(tcx, &other.tcx) } } @@ -85,12 +84,11 @@ impl<'a, 'mir, 'tcx, M> Hash for Memory<'a, 'mir, 'tcx, M> alloc_kind: _, alloc_map: _, cur_frame, - tcx, + tcx: _, } = self; data.hash(state); cur_frame.hash(state); - (tcx as *const _ as usize).hash(state); // We ignore some fields which don't change between evaluation steps. From f7e9d2ac3edf91403f2bd4879e0e8f6b9248adb2 Mon Sep 17 00:00:00 2001 From: Dylan MacKenzie Date: Sun, 24 Jun 2018 01:44:23 -0700 Subject: [PATCH 06/14] Add an `InfiniteLoop` variant to `EvalErrorKind` --- src/librustc/ich/impls_ty.rs | 3 ++- src/librustc/mir/interpret/error.rs | 2 ++ src/librustc/ty/structural_impls.rs | 1 + src/librustc_mir/interpret/eval_context.rs | 6 +++--- 4 files changed, 8 insertions(+), 4 deletions(-) diff --git a/src/librustc/ich/impls_ty.rs b/src/librustc/ich/impls_ty.rs index 5753557a102aa..a3600c0480017 100644 --- a/src/librustc/ich/impls_ty.rs +++ b/src/librustc/ich/impls_ty.rs @@ -549,7 +549,8 @@ for ::mir::interpret::EvalErrorKind<'gcx, O> { RemainderByZero | DivisionByZero | GeneratorResumedAfterReturn | - GeneratorResumedAfterPanic => {} + GeneratorResumedAfterPanic | + InfiniteLoop => {} ReferencedConstant(ref err) => err.hash_stable(hcx, hasher), MachineError(ref err) => err.hash_stable(hcx, hasher), FunctionPointerTyMismatch(a, b) => { diff --git a/src/librustc/mir/interpret/error.rs b/src/librustc/mir/interpret/error.rs index 7e2c144e0a71d..143b7b2069cc2 100644 --- a/src/librustc/mir/interpret/error.rs +++ b/src/librustc/mir/interpret/error.rs @@ -264,6 +264,7 @@ pub enum EvalErrorKind<'tcx, O> { ReferencedConstant(Lrc>), GeneratorResumedAfterReturn, GeneratorResumedAfterPanic, + InfiniteLoop, } pub type EvalResult<'tcx, T = ()> = Result>; @@ -398,6 +399,7 @@ impl<'tcx, O> EvalErrorKind<'tcx, O> { RemainderByZero => "attempt to calculate the remainder with a divisor of zero", GeneratorResumedAfterReturn => "generator resumed after completion", GeneratorResumedAfterPanic => "generator resumed after panicking", + InfiniteLoop => "program will never terminate", } } } diff --git a/src/librustc/ty/structural_impls.rs b/src/librustc/ty/structural_impls.rs index c84999a7e5990..874dabaf1c9e7 100644 --- a/src/librustc/ty/structural_impls.rs +++ b/src/librustc/ty/structural_impls.rs @@ -587,6 +587,7 @@ impl<'a, 'tcx, O: Lift<'tcx>> Lift<'tcx> for interpret::EvalErrorKind<'a, O> { RemainderByZero => RemainderByZero, GeneratorResumedAfterReturn => GeneratorResumedAfterReturn, GeneratorResumedAfterPanic => GeneratorResumedAfterPanic, + InfiniteLoop => InfiniteLoop, }) } } diff --git a/src/librustc_mir/interpret/eval_context.rs b/src/librustc_mir/interpret/eval_context.rs index e3024e069d911..10fd97365f97e 100644 --- a/src/librustc_mir/interpret/eval_context.rs +++ b/src/librustc_mir/interpret/eval_context.rs @@ -180,7 +180,7 @@ impl<'a, 'mir, 'tcx, M> InfiniteLoopDetector<'a, 'mir, 'tcx, M> machine: &M, stack: &Vec>, memory: &Memory<'a, 'mir, 'tcx, M>, - ) -> Result<(), (/*TODO*/)> { + ) -> EvalResult<'_, ()> { let snapshot = (machine, stack, memory); let mut fx = FxHasher::default(); @@ -197,8 +197,8 @@ impl<'a, 'mir, 'tcx, M> InfiniteLoopDetector<'a, 'mir, 'tcx, M> return Ok(()) } - // Second cycle, - Err(()) + // Second cycle + Err(EvalErrorKind::InfiniteLoop.into()) } } From c6aea935cf70bb79ed88dc891b517fc5130ab398 Mon Sep 17 00:00:00 2001 From: Dylan MacKenzie Date: Sun, 24 Jun 2018 12:46:23 -0700 Subject: [PATCH 07/14] Enable loop detector in step loop The detector runs every `DETECTOR_SNAPSHOT_PERIOD` steps. Since the number of steps can increase by more than 1 (I'd like to remove this), the detector may fail if the step counter is incremented past the scheduled detection point during the loop. --- src/librustc_mir/interpret/eval_context.rs | 15 ++++++---- src/librustc_mir/interpret/step.rs | 33 ++++++++++++++++------ 2 files changed, 35 insertions(+), 13 deletions(-) diff --git a/src/librustc_mir/interpret/eval_context.rs b/src/librustc_mir/interpret/eval_context.rs index 10fd97365f97e..3375b6edf1a18 100644 --- a/src/librustc_mir/interpret/eval_context.rs +++ b/src/librustc_mir/interpret/eval_context.rs @@ -45,7 +45,7 @@ pub struct EvalContext<'a, 'mir, 'tcx: 'a + 'mir, M: Machine<'mir, 'tcx>> { /// The number of terminators to be evaluated before enabling the infinite /// loop detector. - pub(crate) steps_until_detector_enabled: usize, + pub(crate) steps_until_detector_enabled: isize, pub(crate) loop_detector: InfiniteLoopDetector<'a, 'mir, 'tcx, M>, } @@ -175,12 +175,17 @@ impl<'a, 'mir, 'tcx, M> InfiniteLoopDetector<'a, 'mir, 'tcx, M> where M: Clone + Eq + Hash + Machine<'mir, 'tcx>, 'tcx: 'a + 'mir, { - pub fn observe( + /// Returns `true` if the loop detector has not yet observed a snapshot. + pub fn is_empty(&self) -> bool { + self.bloom.is_empty() + } + + pub fn observe_and_analyze( &mut self, machine: &M, stack: &Vec>, memory: &Memory<'a, 'mir, 'tcx, M>, - ) -> EvalResult<'_, ()> { + ) -> EvalResult<'tcx, ()> { let snapshot = (machine, stack, memory); let mut fx = FxHasher::default(); @@ -286,7 +291,7 @@ impl<'c, 'b, 'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> LayoutOf } } -const MAX_TERMINATORS: usize = 1_000_000; +const MAX_TERMINATORS: isize = 1_000_000; impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { pub fn new( @@ -656,7 +661,7 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M } Aggregate(ref kind, ref operands) => { - self.inc_step_counter_and_detect_loops(operands.len()); + self.inc_step_counter_and_detect_loops(operands.len())?; let (dest, active_field_index) = match **kind { mir::AggregateKind::Adt(adt_def, variant_index, _, active_field_index) => { diff --git a/src/librustc_mir/interpret/step.rs b/src/librustc_mir/interpret/step.rs index dac7c0610c0ef..b0c08304969f6 100644 --- a/src/librustc_mir/interpret/step.rs +++ b/src/librustc_mir/interpret/step.rs @@ -12,17 +12,34 @@ use super::{EvalContext, Machine}; impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> where M: Clone + Eq + Hash, { - pub fn inc_step_counter_and_detect_loops(&mut self, n: usize) { - self.steps_until_detector_enabled - = self.steps_until_detector_enabled.saturating_sub(n); + /// Returns `true` if the loop detector should take a snapshot during the current step. + pub fn is_loop_detector_scheduled(&self) -> bool { + /// The number of steps between loop detector snapshots. + /// Should be a power of two for performance reasons. + const LOOP_SNAPSHOT_PERIOD: isize = 1 << 8; + + let steps = self.steps_until_detector_enabled; + steps <= 0 && steps % LOOP_SNAPSHOT_PERIOD == 0 + } + + pub fn inc_step_counter_and_detect_loops(&mut self, n: usize) -> EvalResult<'tcx, ()> { + // TODO: Remove `as` cast + self.steps_until_detector_enabled = + self.steps_until_detector_enabled.saturating_sub(n as isize); - if self.steps_until_detector_enabled == 0 { - let _ = self.loop_detector.observe(&self.machine, &self.stack, &self.memory); // TODO: Handle error + if !self.is_loop_detector_scheduled() { + return Ok(()); + } + + if self.loop_detector.is_empty() { + // First run of the loop detector // FIXME(#49980): make this warning a lint - self.tcx.sess.span_warn(self.frame().span, "Constant evaluating a complex constant, this might take some time"); - self.steps_until_detector_enabled = 1_000_000; + self.tcx.sess.span_warn(self.frame().span, + "Constant evaluating a complex constant, this might take some time"); } + + self.loop_detector.observe_and_analyze(&self.machine, &self.stack, &self.memory) } /// Returns true as long as there are more things to do. @@ -44,7 +61,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> return Ok(true); } - self.inc_step_counter_and_detect_loops(1); + self.inc_step_counter_and_detect_loops(1)?; let terminator = basic_block.terminator(); assert_eq!(old_frames, self.cur_frame()); From 10f217159b7ee7492fceb369aab179110affe2db Mon Sep 17 00:00:00 2001 From: Dylan MacKenzie Date: Sun, 24 Jun 2018 12:51:49 -0700 Subject: [PATCH 08/14] Rename `bloom` to `hashes` --- src/librustc_mir/interpret/eval_context.rs | 16 ++++++++-------- src/librustc_mir/interpret/step.rs | 4 ++-- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/src/librustc_mir/interpret/eval_context.rs b/src/librustc_mir/interpret/eval_context.rs index 3375b6edf1a18..020abbf94c5b1 100644 --- a/src/librustc_mir/interpret/eval_context.rs +++ b/src/librustc_mir/interpret/eval_context.rs @@ -148,14 +148,14 @@ type EvalSnapshot<'a, 'mir, 'tcx, M> pub(crate) struct InfiniteLoopDetector<'a, 'mir, 'tcx: 'a + 'mir, M: Machine<'mir, 'tcx>> { /// The set of all `EvalSnapshot` *hashes* observed by this detector. /// - /// Not a proper bloom filter. - bloom: FxHashSet, + /// When a collision occurs in this table, we store the full snapshot in `snapshots`. + hashes: FxHashSet, /// The set of all `EvalSnapshot`s observed by this detector. /// - /// An `EvalSnapshot` will only be fully cloned once it has caused a collision - /// in `bloom`. As a result, the detector must observe *two* full cycles of - /// an infinite loop before it triggers. + /// An `EvalSnapshot` will only be fully cloned once it has caused a collision in `hashes`. As + /// a result, the detector must observe at least *two* full cycles of an infinite loop before + /// it triggers. snapshots: FxHashSet>, } @@ -165,7 +165,7 @@ impl<'a, 'mir, 'tcx, M> Default for InfiniteLoopDetector<'a, 'mir, 'tcx, M> { fn default() -> Self { InfiniteLoopDetector { - bloom: FxHashSet::default(), + hashes: FxHashSet::default(), snapshots: FxHashSet::default(), } } @@ -177,7 +177,7 @@ impl<'a, 'mir, 'tcx, M> InfiniteLoopDetector<'a, 'mir, 'tcx, M> { /// Returns `true` if the loop detector has not yet observed a snapshot. pub fn is_empty(&self) -> bool { - self.bloom.is_empty() + self.hashes.is_empty() } pub fn observe_and_analyze( @@ -192,7 +192,7 @@ impl<'a, 'mir, 'tcx, M> InfiniteLoopDetector<'a, 'mir, 'tcx, M> snapshot.hash(&mut fx); let hash = fx.finish(); - if self.bloom.insert(hash) { + if self.hashes.insert(hash) { // No collision return Ok(()) } diff --git a/src/librustc_mir/interpret/step.rs b/src/librustc_mir/interpret/step.rs index b0c08304969f6..2025db4871824 100644 --- a/src/librustc_mir/interpret/step.rs +++ b/src/librustc_mir/interpret/step.rs @@ -16,10 +16,10 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> pub fn is_loop_detector_scheduled(&self) -> bool { /// The number of steps between loop detector snapshots. /// Should be a power of two for performance reasons. - const LOOP_SNAPSHOT_PERIOD: isize = 1 << 8; + const DETECTOR_SNAPSHOT_PERIOD: isize = 1 << 8; let steps = self.steps_until_detector_enabled; - steps <= 0 && steps % LOOP_SNAPSHOT_PERIOD == 0 + steps <= 0 && steps % DETECTOR_SNAPSHOT_PERIOD == 0 } pub fn inc_step_counter_and_detect_loops(&mut self, n: usize) -> EvalResult<'tcx, ()> { From d36302da53d398e35d8eacc8cef1c43d1d95359c Mon Sep 17 00:00:00 2001 From: Dylan MacKenzie Date: Sun, 24 Jun 2018 13:53:27 -0700 Subject: [PATCH 09/14] Add a UI test for #50637 This test relies on the fact that restrictions on expressions in `const fn` do not apply when computing array lengths. It is more difficult to statically analyze than the simple `loop{}` mentioned in #50637. This test should be updated to ignore the warning after #49980 is resolved. --- src/test/ui/const-eval/infinite_loop.rs | 25 +++++++++++++ src/test/ui/const-eval/infinite_loop.stderr | 41 +++++++++++++++++++++ 2 files changed, 66 insertions(+) create mode 100644 src/test/ui/const-eval/infinite_loop.rs create mode 100644 src/test/ui/const-eval/infinite_loop.stderr diff --git a/src/test/ui/const-eval/infinite_loop.rs b/src/test/ui/const-eval/infinite_loop.rs new file mode 100644 index 0000000000000..8011cf83eedbf --- /dev/null +++ b/src/test/ui/const-eval/infinite_loop.rs @@ -0,0 +1,25 @@ +// Copyright 2018 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +#![feature(const_let)] + +fn main() { + // Tests the Collatz conjecture with an incorrect base case (0 instead of 1). + // The value of `n` will loop indefinitely (4 - 2 - 1 - 4). + let _ = [(); { + //~^ WARNING Constant evaluating a complex constant, this might take some time + //~| ERROR could not evaluate repeat length + let mut n = 113383; // #20 in A006884 + while n != 0 { //~ ERROR constant contains unimplemented expression type + n = if n % 2 == 0 { n/2 } else { 3*n + 1 }; + } + n + }]; +} diff --git a/src/test/ui/const-eval/infinite_loop.stderr b/src/test/ui/const-eval/infinite_loop.stderr new file mode 100644 index 0000000000000..904fbcb07e4b2 --- /dev/null +++ b/src/test/ui/const-eval/infinite_loop.stderr @@ -0,0 +1,41 @@ +error[E0019]: constant contains unimplemented expression type + --> $DIR/infinite_loop.rs:20:9 + | +LL | / while n != 0 { //~ ERROR constant contains unimplemented expression type +LL | | n = if n % 2 == 0 { n/2 } else { 3*n + 1 }; +LL | | } + | |_________^ + +warning: Constant evaluating a complex constant, this might take some time + --> $DIR/infinite_loop.rs:16:18 + | +LL | let _ = [(); { + | __________________^ +LL | | //~^ WARNING Constant evaluating a complex constant, this might take some time +LL | | //~| ERROR could not evaluate repeat length +LL | | let mut n = 113383; // #20 in A006884 +... | +LL | | n +LL | | }]; + | |_____^ + +error[E0080]: could not evaluate repeat length + --> $DIR/infinite_loop.rs:16:18 + | +LL | let _ = [(); { + | __________________^ +LL | | //~^ WARNING Constant evaluating a complex constant, this might take some time +LL | | //~| ERROR could not evaluate repeat length +LL | | let mut n = 113383; // #20 in A006884 +LL | | while n != 0 { //~ ERROR constant contains unimplemented expression type +LL | | n = if n % 2 == 0 { n/2 } else { 3*n + 1 }; + | | ---------- program will never terminate +LL | | } +LL | | n +LL | | }]; + | |_____^ + +error: aborting due to 2 previous errors + +Some errors occurred: E0019, E0080. +For more information about an error, try `rustc --explain E0019`. From 647ba29b90155e07cf569060d344242b3ee474eb Mon Sep 17 00:00:00 2001 From: Dylan MacKenzie Date: Tue, 26 Jun 2018 18:54:59 -0700 Subject: [PATCH 10/14] Explain reason behind error span We can't expand the span of the error reliably according to @oli-obk, so just mention why it points to this particular expression. --- src/librustc/mir/interpret/error.rs | 4 +++- src/test/ui/const-eval/infinite_loop.stderr | 2 +- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/src/librustc/mir/interpret/error.rs b/src/librustc/mir/interpret/error.rs index 143b7b2069cc2..68e1d6c65c06c 100644 --- a/src/librustc/mir/interpret/error.rs +++ b/src/librustc/mir/interpret/error.rs @@ -399,7 +399,9 @@ impl<'tcx, O> EvalErrorKind<'tcx, O> { RemainderByZero => "attempt to calculate the remainder with a divisor of zero", GeneratorResumedAfterReturn => "generator resumed after completion", GeneratorResumedAfterPanic => "generator resumed after panicking", - InfiniteLoop => "program will never terminate", + InfiniteLoop => + "duplicate interpreter state observed while executing this expression, \ + const evaluation will never terminate", } } } diff --git a/src/test/ui/const-eval/infinite_loop.stderr b/src/test/ui/const-eval/infinite_loop.stderr index 904fbcb07e4b2..95c15c3e4e254 100644 --- a/src/test/ui/const-eval/infinite_loop.stderr +++ b/src/test/ui/const-eval/infinite_loop.stderr @@ -29,7 +29,7 @@ LL | | //~| ERROR could not evaluate repeat length LL | | let mut n = 113383; // #20 in A006884 LL | | while n != 0 { //~ ERROR constant contains unimplemented expression type LL | | n = if n % 2 == 0 { n/2 } else { 3*n + 1 }; - | | ---------- program will never terminate + | | ---------- duplicate interpreter state observed while executing this expression, const evaluation will never terminate LL | | } LL | | n LL | | }]; From b3b04b8cc6fb66b89613b4b636182b5de53a0601 Mon Sep 17 00:00:00 2001 From: Dylan MacKenzie Date: Tue, 26 Jun 2018 21:59:43 -0700 Subject: [PATCH 11/14] Avoid overflow in step counter This removes the `usize` argument to `inc_step_counter`. Now, the step counter increments by exactly one for every terminator evaluated. After `STEPS_UNTIL_DETECTOR_ENABLED` steps elapse, the detector is run every `DETECTOR_SNAPSHOT_PERIOD` steps. The step counter is only kept modulo this period. --- src/librustc_mir/interpret/eval_context.rs | 27 +++++++++++----------- src/librustc_mir/interpret/step.rs | 26 ++++++++++----------- 2 files changed, 27 insertions(+), 26 deletions(-) diff --git a/src/librustc_mir/interpret/eval_context.rs b/src/librustc_mir/interpret/eval_context.rs index 020abbf94c5b1..363ad537b3f36 100644 --- a/src/librustc_mir/interpret/eval_context.rs +++ b/src/librustc_mir/interpret/eval_context.rs @@ -43,9 +43,11 @@ pub struct EvalContext<'a, 'mir, 'tcx: 'a + 'mir, M: Machine<'mir, 'tcx>> { /// The maximum number of stack frames allowed pub(crate) stack_limit: usize, - /// The number of terminators to be evaluated before enabling the infinite - /// loop detector. - pub(crate) steps_until_detector_enabled: isize, + /// When this value is negative, it indicates the number of interpreter + /// steps *until* the loop detector is enabled. When it is positive, it is + /// the number of steps after the detector has been enabled modulo the loop + /// detector period. + pub(crate) steps_since_detector_enabled: isize, pub(crate) loop_detector: InfiniteLoopDetector<'a, 'mir, 'tcx, M>, } @@ -148,14 +150,15 @@ type EvalSnapshot<'a, 'mir, 'tcx, M> pub(crate) struct InfiniteLoopDetector<'a, 'mir, 'tcx: 'a + 'mir, M: Machine<'mir, 'tcx>> { /// The set of all `EvalSnapshot` *hashes* observed by this detector. /// - /// When a collision occurs in this table, we store the full snapshot in `snapshots`. + /// When a collision occurs in this table, we store the full snapshot in + /// `snapshots`. hashes: FxHashSet, /// The set of all `EvalSnapshot`s observed by this detector. /// - /// An `EvalSnapshot` will only be fully cloned once it has caused a collision in `hashes`. As - /// a result, the detector must observe at least *two* full cycles of an infinite loop before - /// it triggers. + /// An `EvalSnapshot` will only be fully cloned once it has caused a + /// collision in `hashes`. As a result, the detector must observe at least + /// *two* full cycles of an infinite loop before it triggers. snapshots: FxHashSet>, } @@ -291,7 +294,7 @@ impl<'c, 'b, 'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> LayoutOf } } -const MAX_TERMINATORS: isize = 1_000_000; +const STEPS_UNTIL_DETECTOR_ENABLED: isize = 1_000_000; impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { pub fn new( @@ -310,16 +313,16 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M stack: Vec::new(), stack_limit: tcx.sess.const_eval_stack_frame_limit, loop_detector: Default::default(), - steps_until_detector_enabled: MAX_TERMINATORS, + steps_since_detector_enabled: -STEPS_UNTIL_DETECTOR_ENABLED, } } pub(crate) fn with_fresh_body R, R>(&mut self, f: F) -> R { let stack = mem::replace(&mut self.stack, Vec::new()); - let steps = mem::replace(&mut self.steps_until_detector_enabled, MAX_TERMINATORS); + let steps = mem::replace(&mut self.steps_since_detector_enabled, -STEPS_UNTIL_DETECTOR_ENABLED); let r = f(self); self.stack = stack; - self.steps_until_detector_enabled = steps; + self.steps_since_detector_enabled = steps; r } @@ -661,8 +664,6 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M } Aggregate(ref kind, ref operands) => { - self.inc_step_counter_and_detect_loops(operands.len())?; - let (dest, active_field_index) = match **kind { mir::AggregateKind::Adt(adt_def, variant_index, _, active_field_index) => { self.write_discriminant_value(dest_ty, dest, variant_index)?; diff --git a/src/librustc_mir/interpret/step.rs b/src/librustc_mir/interpret/step.rs index 2025db4871824..25f45fff04d1e 100644 --- a/src/librustc_mir/interpret/step.rs +++ b/src/librustc_mir/interpret/step.rs @@ -12,23 +12,23 @@ use super::{EvalContext, Machine}; impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> where M: Clone + Eq + Hash, { - /// Returns `true` if the loop detector should take a snapshot during the current step. - pub fn is_loop_detector_scheduled(&self) -> bool { + pub fn inc_step_counter_and_detect_loops(&mut self) -> EvalResult<'tcx, ()> { /// The number of steps between loop detector snapshots. /// Should be a power of two for performance reasons. - const DETECTOR_SNAPSHOT_PERIOD: isize = 1 << 8; + const DETECTOR_SNAPSHOT_PERIOD: isize = 256; - let steps = self.steps_until_detector_enabled; - steps <= 0 && steps % DETECTOR_SNAPSHOT_PERIOD == 0 - } + { + let steps = &mut self.steps_since_detector_enabled; - pub fn inc_step_counter_and_detect_loops(&mut self, n: usize) -> EvalResult<'tcx, ()> { - // TODO: Remove `as` cast - self.steps_until_detector_enabled = - self.steps_until_detector_enabled.saturating_sub(n as isize); + *steps += 1; + if *steps < 0 { + return Ok(()); + } - if !self.is_loop_detector_scheduled() { - return Ok(()); + *steps %= DETECTOR_SNAPSHOT_PERIOD; + if *steps != 0 { + return Ok(()); + } } if self.loop_detector.is_empty() { @@ -61,7 +61,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> return Ok(true); } - self.inc_step_counter_and_detect_loops(1)?; + self.inc_step_counter_and_detect_loops()?; let terminator = basic_block.terminator(); assert_eq!(old_frames, self.cur_frame()); From 0d0e021b1cf9ecc48b4e164e7de14226cff5d3ab Mon Sep 17 00:00:00 2001 From: Dylan MacKenzie Date: Fri, 29 Jun 2018 13:00:32 -0700 Subject: [PATCH 12/14] Derive Eq and Hash for types used in Miri's evaluator --- src/librustc/mir/interpret/mod.rs | 2 +- src/librustc/mir/mod.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/librustc/mir/interpret/mod.rs b/src/librustc/mir/interpret/mod.rs index f5b449d68e78f..4164fe3fd933b 100644 --- a/src/librustc/mir/interpret/mod.rs +++ b/src/librustc/mir/interpret/mod.rs @@ -36,7 +36,7 @@ use ty::codec::TyDecoder; use std::sync::atomic::{AtomicU32, Ordering}; use std::num::NonZeroU32; -#[derive(Clone, Debug, PartialEq, RustcEncodable, RustcDecodable)] +#[derive(Clone, Debug, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable)] pub enum Lock { NoLock, WriteLock(DynamicLifetime), diff --git a/src/librustc/mir/mod.rs b/src/librustc/mir/mod.rs index dca0d4f442a4c..2f6d273ec642f 100644 --- a/src/librustc/mir/mod.rs +++ b/src/librustc/mir/mod.rs @@ -1624,7 +1624,7 @@ impl Debug for ValidationOp { } // This is generic so that it can be reused by miri -#[derive(Clone, RustcEncodable, RustcDecodable)] +#[derive(Clone, Hash, PartialEq, Eq, RustcEncodable, RustcDecodable)] pub struct ValidationOperand<'tcx, T> { pub place: T, pub ty: Ty<'tcx>, From c395044a5002a9c275b92bc8e0bc54da245fa8b1 Mon Sep 17 00:00:00 2001 From: Dylan MacKenzie Date: Fri, 29 Jun 2018 13:32:54 -0700 Subject: [PATCH 13/14] Shorten error message and add link to test Implements @bjorn3's suggestions. --- src/librustc/mir/interpret/error.rs | 3 +-- src/test/ui/const-eval/infinite_loop.rs | 2 +- src/test/ui/const-eval/infinite_loop.stderr | 6 +++--- 3 files changed, 5 insertions(+), 6 deletions(-) diff --git a/src/librustc/mir/interpret/error.rs b/src/librustc/mir/interpret/error.rs index 68e1d6c65c06c..9125597a7273e 100644 --- a/src/librustc/mir/interpret/error.rs +++ b/src/librustc/mir/interpret/error.rs @@ -400,8 +400,7 @@ impl<'tcx, O> EvalErrorKind<'tcx, O> { GeneratorResumedAfterReturn => "generator resumed after completion", GeneratorResumedAfterPanic => "generator resumed after panicking", InfiniteLoop => - "duplicate interpreter state observed while executing this expression, \ - const evaluation will never terminate", + "duplicate interpreter state observed here, const evaluation will never terminate", } } } diff --git a/src/test/ui/const-eval/infinite_loop.rs b/src/test/ui/const-eval/infinite_loop.rs index 8011cf83eedbf..a1f8ab7f87882 100644 --- a/src/test/ui/const-eval/infinite_loop.rs +++ b/src/test/ui/const-eval/infinite_loop.rs @@ -16,7 +16,7 @@ fn main() { let _ = [(); { //~^ WARNING Constant evaluating a complex constant, this might take some time //~| ERROR could not evaluate repeat length - let mut n = 113383; // #20 in A006884 + let mut n = 113383; // #20 in https://oeis.org/A006884 while n != 0 { //~ ERROR constant contains unimplemented expression type n = if n % 2 == 0 { n/2 } else { 3*n + 1 }; } diff --git a/src/test/ui/const-eval/infinite_loop.stderr b/src/test/ui/const-eval/infinite_loop.stderr index 95c15c3e4e254..f69aae2920360 100644 --- a/src/test/ui/const-eval/infinite_loop.stderr +++ b/src/test/ui/const-eval/infinite_loop.stderr @@ -13,7 +13,7 @@ LL | let _ = [(); { | __________________^ LL | | //~^ WARNING Constant evaluating a complex constant, this might take some time LL | | //~| ERROR could not evaluate repeat length -LL | | let mut n = 113383; // #20 in A006884 +LL | | let mut n = 113383; // #20 in https://oeis.org/A006884 ... | LL | | n LL | | }]; @@ -26,10 +26,10 @@ LL | let _ = [(); { | __________________^ LL | | //~^ WARNING Constant evaluating a complex constant, this might take some time LL | | //~| ERROR could not evaluate repeat length -LL | | let mut n = 113383; // #20 in A006884 +LL | | let mut n = 113383; // #20 in https://oeis.org/A006884 LL | | while n != 0 { //~ ERROR constant contains unimplemented expression type LL | | n = if n % 2 == 0 { n/2 } else { 3*n + 1 }; - | | ---------- duplicate interpreter state observed while executing this expression, const evaluation will never terminate + | | ---------- duplicate interpreter state observed here, const evaluation will never terminate LL | | } LL | | n LL | | }]; From cf5eaa75bb171f00d6baa475333b741b86f93f72 Mon Sep 17 00:00:00 2001 From: Dylan MacKenzie Date: Wed, 4 Jul 2018 13:05:43 -0700 Subject: [PATCH 14/14] Move `Eq + Hash + Clone` bounds to `Machine` --- src/librustc_mir/interpret/eval_context.rs | 12 ++++-------- src/librustc_mir/interpret/machine.rs | 2 +- src/librustc_mir/interpret/step.rs | 6 +----- 3 files changed, 6 insertions(+), 14 deletions(-) diff --git a/src/librustc_mir/interpret/eval_context.rs b/src/librustc_mir/interpret/eval_context.rs index 363ad537b3f36..a92c81e046a0f 100644 --- a/src/librustc_mir/interpret/eval_context.rs +++ b/src/librustc_mir/interpret/eval_context.rs @@ -163,7 +163,7 @@ pub(crate) struct InfiniteLoopDetector<'a, 'mir, 'tcx: 'a + 'mir, M: Machine<'mi } impl<'a, 'mir, 'tcx, M> Default for InfiniteLoopDetector<'a, 'mir, 'tcx, M> - where M: Eq + Hash + Machine<'mir, 'tcx>, + where M: Machine<'mir, 'tcx>, 'tcx: 'a + 'mir, { fn default() -> Self { @@ -175,7 +175,7 @@ impl<'a, 'mir, 'tcx, M> Default for InfiniteLoopDetector<'a, 'mir, 'tcx, M> } impl<'a, 'mir, 'tcx, M> InfiniteLoopDetector<'a, 'mir, 'tcx, M> - where M: Clone + Eq + Hash + Machine<'mir, 'tcx>, + where M: Machine<'mir, 'tcx>, 'tcx: 'a + 'mir, { /// Returns `true` if the loop detector has not yet observed a snapshot. @@ -302,9 +302,7 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M param_env: ty::ParamEnv<'tcx>, machine: M, memory_data: M::MemoryData, - ) -> Self - where M: Eq + Hash - { + ) -> Self { EvalContext { machine, tcx, @@ -612,9 +610,7 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M &mut self, rvalue: &mir::Rvalue<'tcx>, place: &mir::Place<'tcx>, - ) -> EvalResult<'tcx> - where M: Clone + Eq + Hash, - { + ) -> EvalResult<'tcx> { let dest = self.eval_place(place)?; let dest_ty = self.place_ty(place); diff --git a/src/librustc_mir/interpret/machine.rs b/src/librustc_mir/interpret/machine.rs index f6491d7f1a469..e2086c57c7c7c 100644 --- a/src/librustc_mir/interpret/machine.rs +++ b/src/librustc_mir/interpret/machine.rs @@ -15,7 +15,7 @@ use syntax::ast::Mutability; /// Methods of this trait signifies a point where CTFE evaluation would fail /// and some use case dependent behaviour can instead be applied -pub trait Machine<'mir, 'tcx>: Sized { +pub trait Machine<'mir, 'tcx>: Clone + Eq + Hash { /// Additional data that can be accessed via the Memory type MemoryData: Clone + Eq + Hash; diff --git a/src/librustc_mir/interpret/step.rs b/src/librustc_mir/interpret/step.rs index 25f45fff04d1e..db90714d0e623 100644 --- a/src/librustc_mir/interpret/step.rs +++ b/src/librustc_mir/interpret/step.rs @@ -2,16 +2,12 @@ //! //! The main entry point is the `step` method. -use std::hash::Hash; - use rustc::mir; use rustc::mir::interpret::EvalResult; use super::{EvalContext, Machine}; -impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> - where M: Clone + Eq + Hash, -{ +impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { pub fn inc_step_counter_and_detect_loops(&mut self) -> EvalResult<'tcx, ()> { /// The number of steps between loop detector snapshots. /// Should be a power of two for performance reasons.