From 1ae7ae0c1c7ed68c616273f245647afa47f3cbde Mon Sep 17 00:00:00 2001 From: Ariel Ben-Yehuda Date: Sun, 29 May 2016 22:01:06 +0300 Subject: [PATCH] fix translation of terminators in MSVC cleanup blocks MSVC requires unwinding code to be split to a tree of *funclets*, where each funclet can only branch to itself or to to its parent. Luckily, the code we generates matches this pattern. Recover that structure in an analyze pass and translate according to that. --- .../borrowck/mir/elaborate_drops.rs | 6 +- src/librustc_trans/common.rs | 24 ++- src/librustc_trans/mir/analyze.rs | 103 +++++++++ src/librustc_trans/mir/block.rs | 200 +++++++++++------- src/librustc_trans/mir/mod.rs | 26 ++- src/test/run-fail/issue-30380.rs | 3 + src/test/run-pass/dynamic-drop.rs | 90 ++++++-- 7 files changed, 347 insertions(+), 105 deletions(-) diff --git a/src/librustc_borrowck/borrowck/mir/elaborate_drops.rs b/src/librustc_borrowck/borrowck/mir/elaborate_drops.rs index cbe923ead9f13..f72e10d99cfd1 100644 --- a/src/librustc_borrowck/borrowck/mir/elaborate_drops.rs +++ b/src/librustc_borrowck/borrowck/mir/elaborate_drops.rs @@ -998,9 +998,11 @@ impl<'b, 'tcx> ElaborateDropsCtxt<'b, 'tcx> { continue } TerminatorKind::DropAndReplace { .. } => { - // this contains the consume of the source and + // this contains the move of the source and // the initialization of the destination. We - // only want the latter + // only want the former - the latter is handled + // by the elaboration code and must be done + // *after* the destination is dropped. assert!(self.patch.is_patched(bb)); allow_initializations = false; } diff --git a/src/librustc_trans/common.rs b/src/librustc_trans/common.rs index c1685e6a74904..bf62849818d4e 100644 --- a/src/librustc_trans/common.rs +++ b/src/librustc_trans/common.rs @@ -577,6 +577,15 @@ impl<'blk, 'tcx> BlockS<'blk, 'tcx> { self.lpad.get() } + pub fn set_lpad_ref(&self, lpad: Option<&'blk LandingPad>) { + // FIXME: use an IVar? + self.lpad.set(lpad); + } + + pub fn set_lpad(&self, lpad: Option) { + self.set_lpad_ref(lpad.map(|p| &*self.fcx().lpad_arena.alloc(p))) + } + pub fn mir(&self) -> CachedMir<'blk, 'tcx> { self.fcx.mir() } @@ -716,7 +725,16 @@ impl<'blk, 'tcx> BlockAndBuilder<'blk, 'tcx> { } pub fn set_lpad(&self, lpad: Option) { - self.bcx.lpad.set(lpad.map(|p| &*self.fcx().lpad_arena.alloc(p))) + self.bcx.set_lpad(lpad) + } + + pub fn set_lpad_ref(&self, lpad: Option<&'blk LandingPad>) { + // FIXME: use an IVar? + self.bcx.set_lpad_ref(lpad); + } + + pub fn lpad(&self) -> Option<&'blk LandingPad> { + self.bcx.lpad() } } @@ -761,6 +779,10 @@ impl LandingPad { pub fn bundle(&self) -> Option<&OperandBundleDef> { self.operand.as_ref() } + + pub fn cleanuppad(&self) -> Option { + self.cleanuppad + } } impl Clone for LandingPad { diff --git a/src/librustc_trans/mir/analyze.rs b/src/librustc_trans/mir/analyze.rs index 0b88ba554da67..03df1c451f0d1 100644 --- a/src/librustc_trans/mir/analyze.rs +++ b/src/librustc_trans/mir/analyze.rs @@ -13,7 +13,9 @@ use rustc_data_structures::bitvec::BitVector; use rustc::mir::repr as mir; +use rustc::mir::repr::TerminatorKind; use rustc::mir::visit::{Visitor, LvalueContext}; +use rustc_mir::traversal; use common::{self, Block, BlockAndBuilder}; use super::rvalue; @@ -134,3 +136,104 @@ impl<'mir, 'bcx, 'tcx> Visitor<'tcx> for TempAnalyzer<'mir, 'bcx, 'tcx> { self.super_lvalue(lvalue, context); } } + +#[derive(Copy, Clone, Debug, PartialEq, Eq)] +pub enum CleanupKind { + NotCleanup, + Funclet, + Internal { funclet: mir::BasicBlock } +} + +pub fn cleanup_kinds<'bcx,'tcx>(_bcx: Block<'bcx,'tcx>, + mir: &mir::Mir<'tcx>) + -> Vec +{ + fn discover_masters<'tcx>(result: &mut [CleanupKind], mir: &mir::Mir<'tcx>) { + for bb in mir.all_basic_blocks() { + let data = mir.basic_block_data(bb); + match data.terminator().kind { + TerminatorKind::Goto { .. } | + TerminatorKind::Resume | + TerminatorKind::Return | + TerminatorKind::If { .. } | + TerminatorKind::Switch { .. } | + TerminatorKind::SwitchInt { .. } => { + /* nothing to do */ + } + TerminatorKind::Call { cleanup: unwind, .. } | + TerminatorKind::DropAndReplace { unwind, .. } | + TerminatorKind::Drop { unwind, .. } => { + if let Some(unwind) = unwind { + debug!("cleanup_kinds: {:?}/{:?} registering {:?} as funclet", + bb, data, unwind); + result[unwind.index()] = CleanupKind::Funclet; + } + } + } + } + } + + fn propagate<'tcx>(result: &mut [CleanupKind], mir: &mir::Mir<'tcx>) { + let mut funclet_succs : Vec<_> = + mir.all_basic_blocks().iter().map(|_| None).collect(); + + let mut set_successor = |funclet: mir::BasicBlock, succ| { + match funclet_succs[funclet.index()] { + ref mut s @ None => { + debug!("set_successor: updating successor of {:?} to {:?}", + funclet, succ); + *s = Some(succ); + }, + Some(s) => if s != succ { + span_bug!(mir.span, "funclet {:?} has 2 parents - {:?} and {:?}", + funclet, s, succ); + } + } + }; + + for (bb, data) in traversal::reverse_postorder(mir) { + let funclet = match result[bb.index()] { + CleanupKind::NotCleanup => continue, + CleanupKind::Funclet => bb, + CleanupKind::Internal { funclet } => funclet, + }; + + debug!("cleanup_kinds: {:?}/{:?}/{:?} propagating funclet {:?}", + bb, data, result[bb.index()], funclet); + + for &succ in data.terminator().successors().iter() { + let kind = result[succ.index()]; + debug!("cleanup_kinds: propagating {:?} to {:?}/{:?}", + funclet, succ, kind); + match kind { + CleanupKind::NotCleanup => { + result[succ.index()] = CleanupKind::Internal { funclet: funclet }; + } + CleanupKind::Funclet => { + set_successor(funclet, succ); + } + CleanupKind::Internal { funclet: succ_funclet } => { + if funclet != succ_funclet { + // `succ` has 2 different funclet going into it, so it must + // be a funclet by itself. + + debug!("promoting {:?} to a funclet and updating {:?}", succ, + succ_funclet); + result[succ.index()] = CleanupKind::Funclet; + set_successor(succ_funclet, succ); + set_successor(funclet, succ); + } + } + } + } + } + } + + let mut result : Vec<_> = + mir.all_basic_blocks().iter().map(|_| CleanupKind::NotCleanup).collect(); + + discover_masters(&mut result, mir); + propagate(&mut result, mir); + debug!("cleanup_kinds: result={:?}", result); + result +} diff --git a/src/librustc_trans/mir/block.rs b/src/librustc_trans/mir/block.rs index a917327b0523a..eb962b6615442 100644 --- a/src/librustc_trans/mir/block.rs +++ b/src/librustc_trans/mir/block.rs @@ -8,7 +8,7 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use llvm::{self, BasicBlockRef, ValueRef, OperandBundleDef}; +use llvm::{self, ValueRef}; use rustc::ty; use rustc::mir::repr as mir; use abi::{Abi, FnType, ArgType}; @@ -16,7 +16,7 @@ use adt; use base; use build; use callee::{Callee, CalleeData, Fn, Intrinsic, NamedTupleConstructor, Virtual}; -use common::{self, type_is_fat_ptr, Block, BlockAndBuilder, C_undef}; +use common::{self, type_is_fat_ptr, Block, BlockAndBuilder, LandingPad, C_undef}; use debuginfo::DebugLoc; use Disr; use machine::{llalign_of_min, llbitsize_of_real}; @@ -27,6 +27,7 @@ use type_::Type; use rustc_data_structures::fnv::FnvHashMap; use super::{MirContext, TempRef}; +use super::analyze::CleanupKind; use super::constant::Const; use super::lvalue::{LvalueRef, load_fat_ptr}; use super::operand::OperandRef; @@ -34,22 +35,62 @@ use super::operand::OperandValue::{self, FatPtr, Immediate, Ref}; impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { pub fn trans_block(&mut self, bb: mir::BasicBlock) { - debug!("trans_block({:?})", bb); - let mut bcx = self.bcx(bb); let mir = self.mir.clone(); let data = mir.basic_block_data(bb); - // MSVC SEH bits - let (cleanup_pad, cleanup_bundle) = if let Some((cp, cb)) = self.make_cleanup_pad(bb) { - (Some(cp), Some(cb)) - } else { - (None, None) + debug!("trans_block({:?}={:?})", bb, data); + + // Create the cleanup bundle, if needed. + let cleanup_pad = bcx.lpad().and_then(|lp| lp.cleanuppad()); + let cleanup_bundle = bcx.lpad().and_then(|l| l.bundle()); + + let funclet_br = |this: &Self, bcx: BlockAndBuilder, bb: mir::BasicBlock| { + let lltarget = this.blocks[bb.index()].llbb; + if let Some(cp) = cleanup_pad { + match this.cleanup_kind(bb) { + CleanupKind::Funclet => { + // micro-optimization: generate a `ret` rather than a jump + // to a return block + bcx.cleanup_ret(cp, Some(lltarget)); + } + CleanupKind::Internal { .. } => bcx.br(lltarget), + CleanupKind::NotCleanup => bug!("jump from cleanup bb to bb {:?}", bb) + } + } else { + bcx.br(lltarget); + } }; - let funclet_br = |bcx: BlockAndBuilder, llbb: BasicBlockRef| if let Some(cp) = cleanup_pad { - bcx.cleanup_ret(cp, Some(llbb)); - } else { - bcx.br(llbb); + + let llblock = |this: &mut Self, target: mir::BasicBlock| { + let lltarget = this.blocks[target.index()].llbb; + + if let Some(cp) = cleanup_pad { + match this.cleanup_kind(target) { + CleanupKind::Funclet => { + // MSVC cross-funclet jump - need a trampoline + + debug!("llblock: creating cleanup trampoline for {:?}", target); + let name = &format!("{:?}_cleanup_trampoline_{:?}", bb, target); + let trampoline = this.fcx.new_block(name, None).build(); + trampoline.set_personality_fn(this.fcx.eh_personality()); + trampoline.cleanup_ret(cp, Some(lltarget)); + trampoline.llbb() + } + CleanupKind::Internal { .. } => lltarget, + CleanupKind::NotCleanup => + bug!("jump from cleanup bb {:?} to bb {:?}", bb, target) + } + } else { + if let (CleanupKind::NotCleanup, CleanupKind::Funclet) = + (this.cleanup_kind(bb), this.cleanup_kind(target)) + { + // jump *into* cleanup - need a landing pad if GNU + this.landing_pad_to(target).llbb + } else { + lltarget + } + } }; for statement in &data.statements { @@ -78,13 +119,14 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { } mir::TerminatorKind::Goto { target } => { - funclet_br(bcx, self.llblock(target)); + funclet_br(self, bcx, target); } mir::TerminatorKind::If { ref cond, targets: (true_bb, false_bb) } => { let cond = self.trans_operand(&bcx, cond); - let lltrue = self.llblock(true_bb); - let llfalse = self.llblock(false_bb); + + let lltrue = llblock(self, true_bb); + let llfalse = llblock(self, false_bb); bcx.cond_br(cond.immediate(), lltrue, llfalse); } @@ -106,18 +148,18 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { // code. This is especially helpful in cases like an if-let on a huge enum. // Note: This optimization is only valid for exhaustive matches. Some((&&bb, &c)) if c > targets.len() / 2 => { - (Some(bb), self.blocks[bb.index()]) + (Some(bb), llblock(self, bb)) } // We're generating an exhaustive switch, so the else branch // can't be hit. Branching to an unreachable instruction // lets LLVM know this - _ => (None, self.unreachable_block()) + _ => (None, self.unreachable_block().llbb) }; - let switch = bcx.switch(discr, default_blk.llbb, targets.len()); + let switch = bcx.switch(discr, default_blk, targets.len()); assert_eq!(adt_def.variants.len(), targets.len()); for (adt_variant, &target) in adt_def.variants.iter().zip(targets) { if default_bb != Some(target) { - let llbb = self.llblock(target); + let llbb = llblock(self, target); let llval = bcx.with_block(|bcx| adt::trans_case( bcx, &repr, Disr::from(adt_variant.disr_val))); build::AddCase(switch, llval, llbb) @@ -129,10 +171,10 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { let (otherwise, targets) = targets.split_last().unwrap(); let discr = bcx.load(self.trans_lvalue(&bcx, discr).llval); let discr = bcx.with_block(|bcx| base::to_immediate(bcx, discr, switch_ty)); - let switch = bcx.switch(discr, self.llblock(*otherwise), values.len()); + let switch = bcx.switch(discr, llblock(self, *otherwise), values.len()); for (value, target) in values.iter().zip(targets) { let val = Const::from_constval(bcx.ccx(), value.clone(), switch_ty); - let llbb = self.llblock(*target); + let llbb = llblock(self, *target); build::AddCase(switch, val.llval, llbb) } } @@ -148,7 +190,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { let ty = lvalue.ty.to_ty(bcx.tcx()); // Double check for necessity to drop if !glue::type_needs_drop(bcx.tcx(), ty) { - funclet_br(bcx, self.llblock(target)); + funclet_br(self, bcx, target); return; } let drop_fn = glue::get_drop_glue(bcx.ccx(), ty); @@ -159,19 +201,14 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { lvalue.llval }; if let Some(unwind) = unwind { - let uwbcx = self.bcx(unwind); - let unwind = self.make_landing_pad(uwbcx); bcx.invoke(drop_fn, &[llvalue], - self.llblock(target), - unwind.llbb(), - cleanup_bundle.as_ref()); - self.bcx(target).at_start(|bcx| { - debug_loc.apply_to_bcx(bcx); - }); + self.blocks[target.index()].llbb, + llblock(self, unwind), + cleanup_bundle); } else { - bcx.call(drop_fn, &[llvalue], cleanup_bundle.as_ref()); - funclet_br(bcx, self.llblock(target)); + bcx.call(drop_fn, &[llvalue], cleanup_bundle); + funclet_br(self, bcx, target); } } @@ -213,7 +250,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { let llptr = self.trans_operand(&bcx, &args[0]).immediate(); let val = self.trans_operand(&bcx, &args[1]); self.store_operand(&bcx, llptr, val); - funclet_br(bcx, self.llblock(target)); + funclet_br(self, bcx, target); return; } @@ -223,7 +260,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { this.trans_transmute(&bcx, &args[0], dest); }); - funclet_br(bcx, self.llblock(target)); + funclet_br(self, bcx, target); return; } @@ -328,7 +365,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { } if let Some((_, target)) = *destination { - funclet_br(bcx, self.llblock(target)); + funclet_br(self, bcx, target); } else { // trans_intrinsic_call already used Unreachable. // bcx.unreachable(); @@ -341,19 +378,17 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { }; // Many different ways to call a function handled here - if let Some(cleanup) = cleanup.map(|bb| self.bcx(bb)) { + if let &Some(cleanup) = cleanup { let ret_bcx = if let Some((_, target)) = *destination { self.blocks[target.index()] } else { self.unreachable_block() }; - let landingpad = self.make_landing_pad(cleanup); - let invokeret = bcx.invoke(fn_ptr, &llargs, ret_bcx.llbb, - landingpad.llbb(), - cleanup_bundle.as_ref()); + llblock(self, cleanup), + cleanup_bundle); fn_ty.apply_attrs_callsite(invokeret); if destination.is_some() { @@ -368,7 +403,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { }); } } else { - let llret = bcx.call(fn_ptr, &llargs, cleanup_bundle.as_ref()); + let llret = bcx.call(fn_ptr, &llargs, cleanup_bundle); fn_ty.apply_attrs_callsite(llret); if let Some((_, target)) = *destination { let op = OperandRef { @@ -376,9 +411,8 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { ty: sig.output.unwrap() }; self.store_return(&bcx, ret_dest, fn_ty.ret, op); - funclet_br(bcx, self.llblock(target)); + funclet_br(self, bcx, target); } else { - // no need to drop args, because the call never returns bcx.unreachable(); } } @@ -518,17 +552,29 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { } } - /// Create a landingpad wrapper around the given Block. + fn cleanup_kind(&self, bb: mir::BasicBlock) -> CleanupKind { + self.cleanup_kinds[bb.index()] + } + + /// Return the landingpad wrapper around the given basic block /// /// No-op in MSVC SEH scheme. - fn make_landing_pad(&mut self, - cleanup: BlockAndBuilder<'bcx, 'tcx>) - -> BlockAndBuilder<'bcx, 'tcx> + fn landing_pad_to(&mut self, target_bb: mir::BasicBlock) -> Block<'bcx, 'tcx> { - if base::wants_msvc_seh(cleanup.sess()) { - return cleanup; + if let Some(block) = self.landing_pads[target_bb.index()] { + return block; + } + + if base::wants_msvc_seh(self.fcx.ccx.sess()) { + return self.blocks[target_bb.index()]; } - let bcx = self.fcx.new_block("cleanup", None).build(); + + let target = self.bcx(target_bb); + + let block = self.fcx.new_block("cleanup", None); + self.landing_pads[target_bb.index()] = Some(block); + + let bcx = block.build(); let ccx = bcx.ccx(); let llpersonality = self.fcx.eh_personality(); let llretty = Type::struct_(ccx, &[Type::i8p(ccx), Type::i32(ccx)], false); @@ -536,36 +582,34 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { bcx.set_cleanup(llretval); let slot = self.get_personality_slot(&bcx); bcx.store(llretval, slot); - bcx.br(cleanup.llbb()); - bcx + bcx.br(target.llbb()); + block } - /// Create prologue cleanuppad instruction under MSVC SEH handling scheme. - /// - /// Also handles setting some state for the original trans and creating an operand bundle for - /// function calls. - fn make_cleanup_pad(&mut self, bb: mir::BasicBlock) -> Option<(ValueRef, OperandBundleDef)> { + pub fn init_cpad(&mut self, bb: mir::BasicBlock) { let bcx = self.bcx(bb); let data = self.mir.basic_block_data(bb); - let use_funclets = base::wants_msvc_seh(bcx.sess()) && data.is_cleanup; - let cleanup_pad = if use_funclets { - bcx.set_personality_fn(self.fcx.eh_personality()); - bcx.at_start(|bcx| { - DebugLoc::None.apply_to_bcx(bcx); - Some(bcx.cleanup_pad(None, &[])) - }) - } else { - None + debug!("init_cpad({:?})", data); + + match self.cleanup_kinds[bb.index()] { + CleanupKind::NotCleanup => { + bcx.set_lpad(None) + } + _ if !base::wants_msvc_seh(bcx.sess()) => { + bcx.set_lpad(Some(LandingPad::gnu())) + } + CleanupKind::Internal { funclet } => { + // FIXME: is this needed? + bcx.set_personality_fn(self.fcx.eh_personality()); + bcx.set_lpad_ref(self.bcx(funclet).lpad()); + } + CleanupKind::Funclet => { + bcx.set_personality_fn(self.fcx.eh_personality()); + DebugLoc::None.apply_to_bcx(&bcx); + let cleanup_pad = bcx.cleanup_pad(None, &[]); + bcx.set_lpad(Some(LandingPad::msvc(cleanup_pad))); + } }; - // Set the landingpad global-state for old translator, so it knows about the SEH used. - bcx.set_lpad(if let Some(cleanup_pad) = cleanup_pad { - Some(common::LandingPad::msvc(cleanup_pad)) - } else if data.is_cleanup { - Some(common::LandingPad::gnu()) - } else { - None - }); - cleanup_pad.map(|f| (f, OperandBundleDef::new("funclet", &[f]))) } fn unreachable_block(&mut self) -> Block<'bcx, 'tcx> { @@ -581,10 +625,6 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { self.blocks[bb.index()].build() } - pub fn llblock(&self, bb: mir::BasicBlock) -> BasicBlockRef { - self.blocks[bb.index()].llbb - } - fn make_return_dest(&mut self, bcx: &BlockAndBuilder<'bcx, 'tcx>, dest: &mir::Lvalue<'tcx>, fn_ret_ty: &ArgType, llargs: &mut Vec, is_intrinsic: bool) -> ReturnDest { diff --git a/src/librustc_trans/mir/mod.rs b/src/librustc_trans/mir/mod.rs index 3ff304758116c..d1206550b13d6 100644 --- a/src/librustc_trans/mir/mod.rs +++ b/src/librustc_trans/mir/mod.rs @@ -73,6 +73,13 @@ pub struct MirContext<'bcx, 'tcx:'bcx> { /// A `Block` for each MIR `BasicBlock` blocks: Vec>, + /// The funclet status of each basic block + cleanup_kinds: Vec, + + /// This stores the landing-pad block for a given BB, computed lazily on GNU + /// and eagerly on MSVC. + landing_pads: Vec>>, + /// Cached unreachable block unreachable_block: Option>, @@ -139,8 +146,9 @@ pub fn trans_mir<'blk, 'tcx: 'blk>(fcx: &'blk FunctionContext<'blk, 'tcx>) { // Analyze the temps to determine which must be lvalues // FIXME - let lvalue_temps = bcx.with_block(|bcx| { - analyze::lvalue_temps(bcx, &mir) + let (lvalue_temps, cleanup_kinds) = bcx.with_block(|bcx| { + (analyze::lvalue_temps(bcx, &mir), + analyze::cleanup_kinds(bcx, &mir)) }); // Compute debuginfo scopes from MIR scopes. @@ -206,6 +214,8 @@ pub fn trans_mir<'blk, 'tcx: 'blk>(fcx: &'blk FunctionContext<'blk, 'tcx>) { llpersonalityslot: None, blocks: block_bcxs, unreachable_block: None, + cleanup_kinds: cleanup_kinds, + landing_pads: mir_blocks.iter().map(|_| None).collect(), vars: vars, temps: temps, args: args, @@ -214,7 +224,14 @@ pub fn trans_mir<'blk, 'tcx: 'blk>(fcx: &'blk FunctionContext<'blk, 'tcx>) { let mut visited = BitVector::new(mir_blocks.len()); - let rpo = traversal::reverse_postorder(&mir); + let mut rpo = traversal::reverse_postorder(&mir); + + // Prepare each block for translation. + for (bb, _) in rpo.by_ref() { + mircx.init_cpad(bb); + } + rpo.reset(); + // Translate the body of each block using reverse postorder for (bb, _) in rpo { visited.insert(bb.index()); @@ -228,8 +245,7 @@ pub fn trans_mir<'blk, 'tcx: 'blk>(fcx: &'blk FunctionContext<'blk, 'tcx>) { let block = BasicBlock(block.llbb); // Unreachable block if !visited.contains(bb.index()) { - block.delete(); - } else if block.pred_iter().count() == 0 { + debug!("trans_mir: block {:?} was not visited", bb); block.delete(); } } diff --git a/src/test/run-fail/issue-30380.rs b/src/test/run-fail/issue-30380.rs index 7bd9adcba9bd1..eb668517bdf34 100644 --- a/src/test/run-fail/issue-30380.rs +++ b/src/test/run-fail/issue-30380.rs @@ -8,6 +8,8 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +#![feature(rustc_attrs)] + // check that panics in destructors during assignment do not leave // destroyed values lying around for other destructors to observe. @@ -33,6 +35,7 @@ impl<'a> Drop for Observer<'a> { } } +#[rustc_mir] fn foo(b: &mut Observer) { *b.0 = FilledOnDrop(1); } diff --git a/src/test/run-pass/dynamic-drop.rs b/src/test/run-pass/dynamic-drop.rs index 48e7b7ca57696..f917531e868f1 100644 --- a/src/test/run-pass/dynamic-drop.rs +++ b/src/test/run-pass/dynamic-drop.rs @@ -8,12 +8,23 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use std::cell::RefCell; +#![feature(rustc_attrs)] + +use std::cell::{Cell, RefCell}; +use std::panic; +use std::usize; + +struct InjectedFailure; struct Allocator { data: RefCell>, + failing_op: usize, + cur_ops: Cell, } +impl panic::UnwindSafe for Allocator {} +impl panic::RefUnwindSafe for Allocator {} + impl Drop for Allocator { fn drop(&mut self) { let data = self.data.borrow(); @@ -24,8 +35,20 @@ impl Drop for Allocator { } impl Allocator { - fn new() -> Self { Allocator { data: RefCell::new(vec![]) } } + fn new(failing_op: usize) -> Self { + Allocator { + failing_op: failing_op, + cur_ops: Cell::new(0), + data: RefCell::new(vec![]) + } + } fn alloc(&self) -> Ptr { + self.cur_ops.set(self.cur_ops.get() + 1); + + if self.cur_ops.get() == self.failing_op { + panic!(InjectedFailure); + } + let mut data = self.data.borrow_mut(); let addr = data.len(); data.push(true); @@ -42,9 +65,16 @@ impl<'a> Drop for Ptr<'a> { } ref mut d => *d = false } + + self.1.cur_ops.set(self.1.cur_ops.get()+1); + + if self.1.cur_ops.get() == self.1.failing_op { + panic!(InjectedFailure); + } } } +#[rustc_mir] fn dynamic_init(a: &Allocator, c: bool) { let _x; if c { @@ -52,15 +82,17 @@ fn dynamic_init(a: &Allocator, c: bool) { } } -fn dynamic_drop(a: &Allocator, c: bool) -> Option { +#[rustc_mir] +fn dynamic_drop(a: &Allocator, c: bool) { let x = a.alloc(); if c { Some(x) } else { None - } + }; } +#[rustc_mir] fn assignment2(a: &Allocator, c0: bool, c1: bool) { let mut _v = a.alloc(); let mut _w = a.alloc(); @@ -73,6 +105,7 @@ fn assignment2(a: &Allocator, c0: bool, c1: bool) { } } +#[rustc_mir] fn assignment1(a: &Allocator, c0: bool) { let mut _v = a.alloc(); let mut _w = a.alloc(); @@ -82,19 +115,42 @@ fn assignment1(a: &Allocator, c0: bool) { _v = _w; } +fn run_test(mut f: F) + where F: FnMut(&Allocator) +{ + let first_alloc = Allocator::new(usize::MAX); + f(&first_alloc); + + for failing_op in 1..first_alloc.cur_ops.get()+1 { + let alloc = Allocator::new(failing_op); + let alloc = &alloc; + let f = panic::AssertUnwindSafe(&mut f); + let result = panic::catch_unwind(move || { + f.0(alloc); + }); + match result { + Ok(..) => panic!("test executed {} ops but now {}", + first_alloc.cur_ops.get(), alloc.cur_ops.get()), + Err(e) => { + if e.downcast_ref::().is_none() { + panic::resume_unwind(e); + } + } + } + } +} fn main() { - let a = Allocator::new(); - dynamic_init(&a, false); - dynamic_init(&a, true); - dynamic_drop(&a, false); - dynamic_drop(&a, true); - - assignment2(&a, false, false); - assignment2(&a, false, true); - assignment2(&a, true, false); - assignment2(&a, true, true); - - assignment1(&a, false); - assignment1(&a, true); + run_test(|a| dynamic_init(a, false)); + run_test(|a| dynamic_init(a, true)); + run_test(|a| dynamic_drop(a, false)); + run_test(|a| dynamic_drop(a, true)); + + run_test(|a| assignment2(a, false, false)); + run_test(|a| assignment2(a, false, true)); + run_test(|a| assignment2(a, true, false)); + run_test(|a| assignment2(a, true, true)); + + run_test(|a| assignment1(a, false)); + run_test(|a| assignment1(a, true)); }