diff --git a/src/coreclr/jit/block.cpp b/src/coreclr/jit/block.cpp index cbbce73c54119..b8356189bd13f 100644 --- a/src/coreclr/jit/block.cpp +++ b/src/coreclr/jit/block.cpp @@ -301,18 +301,20 @@ bool BasicBlock::CanRemoveJumpToNext(Compiler* compiler) const } //------------------------------------------------------------------------ -// CanRemoveJumpToFalseTarget: determine if jump to false target can be omitted +// CanRemoveJumpToTarget: determine if jump to target can be omitted // // Arguments: +// target - true/false target of the BBJ_COND block // compiler - current compiler instance // // Returns: -// true if block is a BBJ_COND that can fall into its false target +// true if block is a BBJ_COND that can fall into target // -bool BasicBlock::CanRemoveJumpToFalseTarget(Compiler* compiler) const +bool BasicBlock::CanRemoveJumpToTarget(BasicBlock* target, Compiler* compiler) const { assert(KindIs(BBJ_COND)); - return NextIs(bbFalseTarget) && !hasAlign() && !compiler->fgInDifferentRegions(this, bbFalseTarget); + assert(TrueTargetIs(target) || FalseTargetIs(target)); + return NextIs(target) && !compiler->fgInDifferentRegions(this, target); } //------------------------------------------------------------------------ @@ -855,8 +857,7 @@ void BasicBlock::CopyTarget(Compiler* compiler, const BasicBlock* from) SetEhf(new (compiler, CMK_BasicBlock) BBehfDesc(compiler, from->GetEhfTargets())); break; case BBJ_COND: - // TODO-NoFallThrough: Copy false target, too? - SetCond(from->GetTrueTarget(), Next()); + SetCond(from->GetTrueTarget(), from->GetFalseTarget()); break; case BBJ_ALWAYS: SetKindAndTarget(from->GetKind(), from->GetTarget()); @@ -897,8 +898,7 @@ void BasicBlock::TransferTarget(BasicBlock* from) from->bbEhfTargets = nullptr; // Make sure nobody uses the descriptor after this. break; case BBJ_COND: - // TODO-NoFallThrough: Copy false target, too? - SetCond(from->GetTrueTarget(), Next()); + SetCond(from->GetTrueTarget(), from->GetFalseTarget()); break; case BBJ_ALWAYS: SetKindAndTarget(from->GetKind(), from->GetTarget()); @@ -1183,7 +1183,7 @@ unsigned BasicBlock::NumSucc() const return 1; case BBJ_COND: - if (bbTarget == bbNext) + if (bbTrueTarget == bbFalseTarget) { return 1; } @@ -1308,7 +1308,7 @@ unsigned BasicBlock::NumSucc(Compiler* comp) return 1; case BBJ_COND: - if (bbTarget == bbNext) + if (bbTrueTarget == bbFalseTarget) { return 1; } diff --git a/src/coreclr/jit/block.h b/src/coreclr/jit/block.h index ab07d18d7fabe..9fa0bcf0f8d73 100644 --- a/src/coreclr/jit/block.h +++ b/src/coreclr/jit/block.h @@ -610,7 +610,7 @@ struct BasicBlock : private LIR::Range bool CanRemoveJumpToNext(Compiler* compiler) const; - bool CanRemoveJumpToFalseTarget(Compiler* compiler) const; + bool CanRemoveJumpToTarget(BasicBlock* target, Compiler* compiler) const; unsigned GetTargetOffs() const { @@ -663,19 +663,13 @@ struct BasicBlock : private LIR::Range { assert(KindIs(BBJ_COND)); assert(bbTrueTarget != nullptr); - assert(target != nullptr); return (bbTrueTarget == target); } BasicBlock* GetFalseTarget() const { assert(KindIs(BBJ_COND)); - - // So long as bbFalseTarget tracks bbNext in SetNext(), it is possible for bbFalseTarget to be null - // if this block is unlinked from the block list. - // So check bbNext before triggering the assert if bbFalseTarget is null. - // TODO-NoFallThrough: Remove IsLast() check once bbFalseTarget isn't hard-coded to bbNext - assert((bbFalseTarget != nullptr) || IsLast()); + assert(bbFalseTarget != nullptr); return bbFalseTarget; } @@ -690,15 +684,12 @@ struct BasicBlock : private LIR::Range { assert(KindIs(BBJ_COND)); assert(bbFalseTarget != nullptr); - assert(target != nullptr); return (bbFalseTarget == target); } void SetCond(BasicBlock* trueTarget, BasicBlock* falseTarget) { assert(trueTarget != nullptr); - // TODO-NoFallThrough: Allow falseTarget to diverge from bbNext - assert(falseTarget == bbNext); bbKind = BBJ_COND; bbTrueTarget = trueTarget; bbFalseTarget = falseTarget; diff --git a/src/coreclr/jit/codegenarm.cpp b/src/coreclr/jit/codegenarm.cpp index 28908f4392d79..4a8c08a89858e 100644 --- a/src/coreclr/jit/codegenarm.cpp +++ b/src/coreclr/jit/codegenarm.cpp @@ -1317,6 +1317,13 @@ void CodeGen::genCodeForJTrue(GenTreeOp* jtrue) regNumber reg = genConsumeReg(op); inst_RV_RV(INS_tst, reg, reg, genActualType(op)); inst_JMP(EJ_ne, compiler->compCurBB->GetTrueTarget()); + + // If we cannot fall into the false target, emit a jump to it + BasicBlock* falseTarget = compiler->compCurBB->GetFalseTarget(); + if (!compiler->compCurBB->CanRemoveJumpToTarget(falseTarget, compiler)) + { + inst_JMP(EJ_jmp, falseTarget); + } } //------------------------------------------------------------------------ diff --git a/src/coreclr/jit/codegenarm64.cpp b/src/coreclr/jit/codegenarm64.cpp index dc0fe90c1e2fe..4efe4a235f6a7 100644 --- a/src/coreclr/jit/codegenarm64.cpp +++ b/src/coreclr/jit/codegenarm64.cpp @@ -4708,6 +4708,13 @@ void CodeGen::genCodeForJTrue(GenTreeOp* jtrue) GenTree* op = jtrue->gtGetOp1(); regNumber reg = genConsumeReg(op); GetEmitter()->emitIns_J_R(INS_cbnz, emitActualTypeSize(op), compiler->compCurBB->GetTrueTarget(), reg); + + // If we cannot fall into the false target, emit a jump to it + BasicBlock* falseTarget = compiler->compCurBB->GetFalseTarget(); + if (!compiler->compCurBB->CanRemoveJumpToTarget(falseTarget, compiler)) + { + inst_JMP(EJ_jmp, falseTarget); + } } //------------------------------------------------------------------------ @@ -4935,6 +4942,13 @@ void CodeGen::genCodeForJumpCompare(GenTreeOpCC* tree) GetEmitter()->emitIns_J_R(ins, attr, compiler->compCurBB->GetTrueTarget(), reg); } + + // If we cannot fall into the false target, emit a jump to it + BasicBlock* falseTarget = compiler->compCurBB->GetFalseTarget(); + if (!compiler->compCurBB->CanRemoveJumpToTarget(falseTarget, compiler)) + { + inst_JMP(EJ_jmp, falseTarget); + } } //--------------------------------------------------------------------- diff --git a/src/coreclr/jit/codegencommon.cpp b/src/coreclr/jit/codegencommon.cpp index 9410417393ba1..0537b050219a5 100644 --- a/src/coreclr/jit/codegencommon.cpp +++ b/src/coreclr/jit/codegencommon.cpp @@ -396,7 +396,7 @@ void CodeGen::genMarkLabelsForCodegen() block->GetTrueTarget()->SetFlags(BBF_HAS_LABEL); // If we need a jump to the false target, give it a label - if (!block->CanRemoveJumpToFalseTarget(compiler)) + if (!block->CanRemoveJumpToTarget(block->GetFalseTarget(), compiler)) { JITDUMP(" " FMT_BB " : branch target\n", block->GetFalseTarget()->bbNum); block->GetFalseTarget()->SetFlags(BBF_HAS_LABEL); diff --git a/src/coreclr/jit/codegenlinear.cpp b/src/coreclr/jit/codegenlinear.cpp index ecdd1dc62b9cf..08b35cc501cca 100644 --- a/src/coreclr/jit/codegenlinear.cpp +++ b/src/coreclr/jit/codegenlinear.cpp @@ -2643,9 +2643,10 @@ void CodeGen::genCodeForJcc(GenTreeCC* jcc) inst_JCC(jcc->gtCondition, compiler->compCurBB->GetTrueTarget()); // If we cannot fall into the false target, emit a jump to it - if (!compiler->compCurBB->CanRemoveJumpToFalseTarget(compiler)) + BasicBlock* falseTarget = compiler->compCurBB->GetFalseTarget(); + if (!compiler->compCurBB->CanRemoveJumpToTarget(falseTarget, compiler)) { - inst_JMP(EJ_jmp, compiler->compCurBB->GetFalseTarget()); + inst_JMP(EJ_jmp, falseTarget); } } diff --git a/src/coreclr/jit/codegenloongarch64.cpp b/src/coreclr/jit/codegenloongarch64.cpp index a0d325c4f29b6..41266917205a5 100644 --- a/src/coreclr/jit/codegenloongarch64.cpp +++ b/src/coreclr/jit/codegenloongarch64.cpp @@ -4330,6 +4330,13 @@ void CodeGen::genCodeForJumpCompare(GenTreeOpCC* tree) assert(regs != 0); emit->emitIns_J(ins, compiler->compCurBB->GetTrueTarget(), regs); // 5-bits; + + // If we cannot fall into the false target, emit a jump to it + BasicBlock* falseTarget = compiler->compCurBB->GetFalseTarget(); + if (!compiler->compCurBB->CanRemoveJumpToTarget(falseTarget, compiler)) + { + inst_JMP(EJ_jmp, falseTarget); + } } //--------------------------------------------------------------------- @@ -4884,6 +4891,16 @@ void CodeGen::genCodeForTreeNode(GenTree* treeNode) assert(jcc->gtCondition.Is(GenCondition::EQ, GenCondition::NE)); instruction ins = jcc->gtCondition.Is(GenCondition::EQ) ? INS_bceqz : INS_bcnez; emit->emitIns_J(ins, tgtBlock, (int)1 /* cc */); + + if (compiler->compCurBB->KindIs(BBJ_COND)) + { + // If we cannot fall into the false target, emit a jump to it + BasicBlock* falseTarget = compiler->compCurBB->GetFalseTarget(); + if (!compiler->compCurBB->CanRemoveJumpToTarget(falseTarget, compiler)) + { + inst_JMP(EJ_jmp, falseTarget); + } + } } break; diff --git a/src/coreclr/jit/codegenriscv64.cpp b/src/coreclr/jit/codegenriscv64.cpp index b3e9c244fc10b..d2656d20314df 100644 --- a/src/coreclr/jit/codegenriscv64.cpp +++ b/src/coreclr/jit/codegenriscv64.cpp @@ -4243,6 +4243,13 @@ void CodeGen::genCodeForJumpCompare(GenTreeOpCC* tree) assert(regs != 0); emit->emitIns_J(ins, compiler->compCurBB->GetTrueTarget(), regs); // 5-bits; + + // If we cannot fall into the false target, emit a jump to it + BasicBlock* falseTarget = compiler->compCurBB->GetFalseTarget(); + if (!compiler->compCurBB->CanRemoveJumpToTarget(falseTarget, compiler)) + { + inst_JMP(EJ_jmp, falseTarget); + } } //--------------------------------------------------------------------- diff --git a/src/coreclr/jit/codegenxarch.cpp b/src/coreclr/jit/codegenxarch.cpp index 6088d7203ced1..03a255d98078a 100644 --- a/src/coreclr/jit/codegenxarch.cpp +++ b/src/coreclr/jit/codegenxarch.cpp @@ -1479,6 +1479,13 @@ void CodeGen::genCodeForJTrue(GenTreeOp* jtrue) regNumber reg = genConsumeReg(op); inst_RV_RV(INS_test, reg, reg, genActualType(op)); inst_JMP(EJ_jne, compiler->compCurBB->GetTrueTarget()); + + // If we cannot fall into the false target, emit a jump to it + BasicBlock* falseTarget = compiler->compCurBB->GetFalseTarget(); + if (!compiler->compCurBB->CanRemoveJumpToTarget(falseTarget, compiler)) + { + inst_JMP(EJ_jmp, falseTarget); + } } //------------------------------------------------------------------------ diff --git a/src/coreclr/jit/compiler.h b/src/coreclr/jit/compiler.h index ab741cedd731c..14f4b872a6d16 100644 --- a/src/coreclr/jit/compiler.h +++ b/src/coreclr/jit/compiler.h @@ -2189,7 +2189,7 @@ class FlowGraphNaturalLoop bool HasDef(unsigned lclNum); bool CanDuplicate(INDEBUG(const char** reason)); - void Duplicate(BasicBlock** insertAfter, BlockToBlockMap* map, weight_t weightScale, bool bottomNeedsRedirection); + void Duplicate(BasicBlock** insertAfter, BlockToBlockMap* map, weight_t weightScale); #ifdef DEBUG static void Dump(FlowGraphNaturalLoop* loop); @@ -6783,7 +6783,7 @@ class Compiler bool optCompactLoop(FlowGraphNaturalLoop* loop); BasicBlock* optFindLoopCompactionInsertionPoint(FlowGraphNaturalLoop* loop, BasicBlock* top); BasicBlock* optTryAdvanceLoopCompactionInsertionPoint(FlowGraphNaturalLoop* loop, BasicBlock* insertionPoint, BasicBlock* top, BasicBlock* bottom); - bool optLoopCompactionFixupFallThrough(BasicBlock* block, BasicBlock* oldNext, BasicBlock* newNext); + bool optLoopCompactionFixupFallThrough(BasicBlock* block, BasicBlock* newNext); bool optCreatePreheader(FlowGraphNaturalLoop* loop); void optSetPreheaderWeight(FlowGraphNaturalLoop* loop, BasicBlock* preheader); diff --git a/src/coreclr/jit/fgbasic.cpp b/src/coreclr/jit/fgbasic.cpp index b1419e6374b15..3852eec65d4bf 100644 --- a/src/coreclr/jit/fgbasic.cpp +++ b/src/coreclr/jit/fgbasic.cpp @@ -693,15 +693,39 @@ void Compiler::fgReplaceJumpTarget(BasicBlock* block, BasicBlock* newTarget, Bas break; case BBJ_COND: - - // Functionally equivalent to above + { + FlowEdge* oldEdge = fgRemoveRefPred(oldTarget, block); if (block->TrueTargetIs(oldTarget)) { - block->SetTrueTarget(newTarget); - fgRemoveRefPred(oldTarget, block); + if (block->FalseTargetIs(oldTarget)) + { + // fgRemoveRefPred returns nullptr for BBJ_COND blocks with two flow edges to target + assert(oldEdge == nullptr); + fgRemoveConditionalJump(block); + assert(block->KindIs(BBJ_ALWAYS)); + block->SetTarget(newTarget); + } + else + { + // fgRemoveRefPred should have removed the flow edge + assert(oldEdge != nullptr); + block->SetTrueTarget(newTarget); + } + + // TODO-NoFallThrough: Proliferate weight from oldEdge + // (we avoid doing so when updating the true target to reduce diffs for now, as a quirk) fgAddRefPred(newTarget, block); } + else + { + // fgRemoveRefPred should have removed the flow edge + assert(oldEdge != nullptr); + assert(block->FalseTargetIs(oldTarget)); + block->SetFalseTarget(newTarget); + fgAddRefPred(newTarget, block, oldEdge); + } break; + } case BBJ_SWITCH: { @@ -5032,13 +5056,17 @@ BasicBlock* Compiler::fgSplitEdge(BasicBlock* curr, BasicBlock* succ) if (curr->KindIs(BBJ_COND)) { - curr->SetFalseTarget(curr->Next()); - fgReplacePred(succ, curr, newBlock); if (curr->TrueTargetIs(succ)) { - // Now 'curr' jumps to newBlock curr->SetTrueTarget(newBlock); } + else + { + assert(curr->FalseTargetIs(succ)); + curr->SetFalseTarget(newBlock); + } + + fgReplacePred(succ, curr, newBlock); fgAddRefPred(newBlock, curr); } else if (curr->KindIs(BBJ_SWITCH)) @@ -5178,7 +5206,7 @@ void Compiler::fgUnlinkRange(BasicBlock* bBeg, BasicBlock* bEnd) // // Arguments: // block - the block to remove -// unreachable - the block to remove +// unreachable - indicates whether removal is because block is unreachable or empty // // Return Value: // The block after the block, or blocks, removed. @@ -5345,24 +5373,15 @@ BasicBlock* Compiler::fgRemoveBlock(BasicBlock* block, bool unreachable) break; case BBJ_COND: - /* The links for the direct predecessor case have already been updated above */ - if (!predBlock->TrueTargetIs(block)) + if (predBlock->TrueTargetIs(block)) { - break; + predBlock->SetTrueTarget(succBlock); } - /* Check if both sides of the BBJ_COND now jump to the same block */ - if (predBlock->FalseTargetIs(succBlock)) + if (predBlock->FalseTargetIs(block)) { - // Make sure we are replacing "block" with "succBlock" in predBlock->bbTarget. - noway_assert(predBlock->TrueTargetIs(block)); - predBlock->SetTrueTarget(succBlock); - fgRemoveConditionalJump(predBlock); - break; + predBlock->SetFalseTarget(succBlock); } - - noway_assert(predBlock->TrueTargetIs(block)); - predBlock->SetTrueTarget(succBlock); break; case BBJ_CALLFINALLY: @@ -5397,7 +5416,9 @@ BasicBlock* Compiler::fgRemoveBlock(BasicBlock* block, bool unreachable) break; case BBJ_COND: - bPrev->SetFalseTarget(block->Next()); + // block should not be a target anymore + assert(!bPrev->TrueTargetIs(block)); + assert(!bPrev->FalseTargetIs(block)); /* Check if both sides of the BBJ_COND now jump to the same block */ if (bPrev->TrueTargetIs(bPrev->GetFalseTarget())) @@ -5473,7 +5494,7 @@ BasicBlock* Compiler::fgConnectFallThrough(BasicBlock* bSrc, BasicBlock* bDst) /* If bSrc falls through to a block that is not bDst, we will insert a jump to bDst */ - if (bSrc->KindIs(BBJ_COND) && !bSrc->NextIs(bDst)) + if (bSrc->KindIs(BBJ_COND) && bSrc->FalseTargetIs(bDst) && !bSrc->NextIs(bDst)) { // Add a new block after bSrc which jumps to 'bDst' jmpBlk = fgNewBBafter(BBJ_ALWAYS, bSrc, true, bDst); @@ -5532,10 +5553,6 @@ BasicBlock* Compiler::fgConnectFallThrough(BasicBlock* bSrc, BasicBlock* bDst) { bSrc->SetFlags(BBF_NONE_QUIRK); } - else if (bSrc->KindIs(BBJ_COND) && bSrc->NextIs(bDst)) - { - bSrc->SetFalseTarget(bDst); - } return jmpBlk; } @@ -6049,11 +6066,15 @@ BasicBlock* Compiler::fgRelocateEHRange(unsigned regionIndex, FG_RELOCATE_TYPE r // We have decided to insert the block(s) after fgLastBlock fgMoveBlocksAfter(bStart, bLast, insertAfterBlk); - // If bPrev falls through, we will insert a jump to block - fgConnectFallThrough(bPrev, bStart); + if (bPrev->KindIs(BBJ_ALWAYS) && bPrev->JumpsToNext()) + { + bPrev->SetFlags(BBF_NONE_QUIRK); + } - // If bLast falls through, we will insert a jump to bNext - fgConnectFallThrough(bLast, bNext); + if (bLast->KindIs(BBJ_ALWAYS) && bLast->JumpsToNext()) + { + bLast->SetFlags(BBF_NONE_QUIRK); + } #endif // !FEATURE_EH_FUNCLETS @@ -6944,9 +6965,6 @@ BasicBlock* Compiler::fgNewBBinRegionWorker(BBKinds jumpKind, } } - /* If afterBlk falls through, we insert a jump around newBlk */ - fgConnectFallThrough(afterBlk, newBlk->Next()); - #ifdef DEBUG fgVerifyHandlerTab(); #endif diff --git a/src/coreclr/jit/fgdiagnostic.cpp b/src/coreclr/jit/fgdiagnostic.cpp index dd99bd0a34df6..ed3f4252bed5a 100644 --- a/src/coreclr/jit/fgdiagnostic.cpp +++ b/src/coreclr/jit/fgdiagnostic.cpp @@ -2980,13 +2980,6 @@ void Compiler::fgDebugCheckBBlist(bool checkBBNum /* = false */, bool checkBBRef maxBBNum = max(maxBBNum, block->bbNum); - // BBJ_COND's normal (false) jump target is expected to be the next block - // TODO-NoFallThrough: Allow bbFalseTarget to diverge from bbNext - if (block->KindIs(BBJ_COND)) - { - assert(block->NextIs(block->GetFalseTarget())); - } - // Check that all the successors have the current traversal stamp. Use the 'Compiler*' version of the // iterator, but not for BBJ_SWITCH: we don't want to end up calling GetDescriptorForSwitch(), which will // dynamically create the unique switch list. diff --git a/src/coreclr/jit/fgopt.cpp b/src/coreclr/jit/fgopt.cpp index 921d2f7dbdd02..52f969611a0a7 100644 --- a/src/coreclr/jit/fgopt.cpp +++ b/src/coreclr/jit/fgopt.cpp @@ -903,11 +903,7 @@ PhaseStatus Compiler::fgPostImportationCleanup() // bool Compiler::fgCanCompactBlocks(BasicBlock* block, BasicBlock* bNext) { - if ((block == nullptr) || (bNext == nullptr)) - { - return false; - } - + assert(block != nullptr); assert(block->NextIs(bNext)); if (!block->KindIs(BBJ_ALWAYS) || !block->TargetIs(bNext) || block->HasFlag(BBF_KEEP_BBJ_ALWAYS)) @@ -1559,7 +1555,16 @@ bool Compiler::fgOptimizeBranchToEmptyUnconditional(BasicBlock* block, BasicBloc break; case BBJ_COND: - block->SetTrueTarget(bDest->GetTarget()); + if (block->TrueTargetIs(bDest)) + { + assert(!block->FalseTargetIs(bDest)); + block->SetTrueTarget(bDest->GetTarget()); + } + else + { + assert(block->FalseTargetIs(bDest)); + block->SetFalseTarget(bDest->GetTarget()); + } break; default: @@ -3486,6 +3491,23 @@ bool Compiler::fgReorderBlocks(bool useProfile) } } + // If we will be reordering blocks, re-establish implicit fallthrough for BBJ_COND blocks + if (useProfile) + { + for (BasicBlock* block = fgFirstBB; block != nullptr; block = block->Next()) + { + if (block->KindIs(BBJ_COND) && !block->NextIs(block->GetFalseTarget())) + { + BasicBlock* jmpBlk = fgConnectFallThrough(block, block->GetFalseTarget()); + assert(jmpBlk != nullptr); + assert(block->NextIs(jmpBlk)); + + // Skip next block + block = jmpBlk; + } + } + } + #ifdef DEBUG if (verbose) { @@ -3545,6 +3567,10 @@ bool Compiler::fgReorderBlocks(bool useProfile) } else if (bPrev->KindIs(BBJ_COND)) { + // fgReorderBlocks is called in more than one optimization phase, + // but only does any reordering in optOptimizeLayout. + // At that point, we expect implicit fallthrough to be restored for BBJ_COND blocks. + assert(bPrev->FalseTargetIs(block) || !reorderBlock); bDest = bPrev->GetTrueTarget(); forwardBranch = fgIsForwardBranch(bPrev, bDest); backwardBranch = !forwardBranch; @@ -4524,6 +4550,11 @@ bool Compiler::fgReorderBlocks(bool useProfile) noway_assert(condTest->gtOper == GT_JTRUE); condTest->AsOp()->gtOp1 = gtReverseCond(condTest->AsOp()->gtOp1); + BasicBlock* trueTarget = bPrev->GetTrueTarget(); + BasicBlock* falseTarget = bPrev->GetFalseTarget(); + bPrev->SetTrueTarget(falseTarget); + bPrev->SetFalseTarget(trueTarget); + // may need to rethread // if (fgNodeThreading == NodeThreading::AllTrees) @@ -4533,18 +4564,10 @@ bool Compiler::fgReorderBlocks(bool useProfile) fgSetStmtSeq(condTestStmt); } - if (bStart2 == nullptr) - { - /* Set the new jump dest for bPrev to the rarely run or uncommon block(s) */ - bPrev->SetTrueTarget(bStart); - } - else + if (bStart2 != nullptr) { noway_assert(insertAfterBlk == bPrev); noway_assert(insertAfterBlk->NextIs(block)); - - /* Set the new jump dest for bPrev to the rarely run or uncommon block(s) */ - bPrev->SetTrueTarget(block); } } @@ -4683,7 +4706,7 @@ PhaseStatus Compiler::fgUpdateFlowGraphPhase() // Debuggable code and Min Optimization JIT also introduces basic blocks // but we do not optimize those! // -bool Compiler::fgUpdateFlowGraph(bool doTailDuplication, bool isPhase) +bool Compiler::fgUpdateFlowGraph(bool doTailDuplication /* = false */, bool isPhase /* = false */) { #ifdef DEBUG if (verbose && !isPhase) @@ -4722,6 +4745,7 @@ bool Compiler::fgUpdateFlowGraph(bool doTailDuplication, bool isPhase) BasicBlock* bPrev = nullptr; // the previous non-worthless block BasicBlock* bNext; // the successor of the current block BasicBlock* bDest; // the jump target of the current block + BasicBlock* bFalseDest; // the false target of the current block (if it is a BBJ_COND) for (block = fgFirstBB; block != nullptr; block = block->Next()) { @@ -4755,8 +4779,9 @@ bool Compiler::fgUpdateFlowGraph(bool doTailDuplication, bool isPhase) REPEAT:; - bNext = block->Next(); - bDest = nullptr; + bNext = block->Next(); + bDest = nullptr; + bFalseDest = nullptr; if (block->KindIs(BBJ_ALWAYS)) { @@ -4791,28 +4816,23 @@ bool Compiler::fgUpdateFlowGraph(bool doTailDuplication, bool isPhase) } else if (block->KindIs(BBJ_COND)) { - bDest = block->GetTrueTarget(); - if (bDest == bNext) + bDest = block->GetTrueTarget(); + bFalseDest = block->GetFalseTarget(); + if (bDest == bFalseDest) { - // TODO-NoFallThrough: Fix above condition once bbFalseTarget can diverge from bbNext - assert(block->FalseTargetIs(bNext)); fgRemoveConditionalJump(block); - assert(block->KindIs(BBJ_ALWAYS)); - assert(block->TargetIs(bNext)); - change = true; - modified = true; - - // Skip jump optimizations, and try to compact block and bNext later - bDest = nullptr; + change = true; + modified = true; + bFalseDest = nullptr; } } if (bDest != nullptr) { // Do we have a JUMP to an empty unconditional JUMP block? - if (bDest->isEmpty() && bDest->KindIs(BBJ_ALWAYS) && - !bDest->TargetIs(bDest)) // special case for self jumps + if (bDest->KindIs(BBJ_ALWAYS) && !bDest->TargetIs(bDest) && // special case for self jumps + bDest->isEmpty()) { // TODO: Allow optimizing branches to blocks that jump to the next block const bool optimizeBranch = !bDest->JumpsToNext() || !bDest->HasFlag(BBF_NONE_QUIRK); @@ -4833,17 +4853,16 @@ bool Compiler::fgUpdateFlowGraph(bool doTailDuplication, bool isPhase) // bNext's jump target has a join. // if (block->KindIs(BBJ_COND) && // block is a BBJ_COND block - (bNext->bbRefs == 1) && // No other block jumps to bNext - bNext->KindIs(BBJ_ALWAYS) && // The next block is a BBJ_ALWAYS block + (bFalseDest == bNext) && // false target is the next block + (bNext->bbRefs == 1) && // no other block jumps to bNext + bNext->KindIs(BBJ_ALWAYS) && // the next block is a BBJ_ALWAYS block !bNext->JumpsToNext() && // and it doesn't jump to the next block (we might compact them) bNext->isEmpty() && // and it is an empty block !bNext->TargetIs(bNext) && // special case for self jumps !bDest->IsFirstColdBlock(this) && !fgInDifferentRegions(block, bDest)) // do not cross hot/cold sections { - // bbFalseTarget cannot be null assert(block->FalseTargetIs(bNext)); - assert(bNext != nullptr); // case (a) // @@ -4917,15 +4936,6 @@ bool Compiler::fgUpdateFlowGraph(bool doTailDuplication, bool isPhase) JITDUMP("\nMoving " FMT_BB " after " FMT_BB " to enable reversal\n", bDest->bbNum, bNext->bbNum); - // If bDest can fall through we'll need to create a jump - // block after it too. Remember where to jump to. - // - BasicBlock* const bDestNext = bDest->Next(); - - // Once bbFalseTarget and bbNext can diverge, this assert will hit - // TODO-NoFallThrough: Remove fall-through for BBJ_COND below - assert(!bDest->KindIs(BBJ_COND) || bDest->FalseTargetIs(bDestNext)); - // Move bDest // if (ehIsBlockEHLast(bDest)) @@ -4943,15 +4953,16 @@ bool Compiler::fgUpdateFlowGraph(bool doTailDuplication, bool isPhase) // Add fall through fixup block, if needed. // - if (bDest->KindIs(BBJ_COND)) + if (bDest->KindIs(BBJ_COND) && !bDest->NextIs(bDest->GetFalseTarget())) { - BasicBlock* const bFixup = fgNewBBafter(BBJ_ALWAYS, bDest, true, bDestNext); + BasicBlock* const bDestFalseTarget = bDest->GetFalseTarget(); + BasicBlock* const bFixup = fgNewBBafter(BBJ_ALWAYS, bDest, true, bDestFalseTarget); bDest->SetFalseTarget(bFixup); - bFixup->inheritWeight(bDestNext); + bFixup->inheritWeight(bDestFalseTarget); - fgRemoveRefPred(bDestNext, bDest); + fgRemoveRefPred(bDestFalseTarget, bDest); fgAddRefPred(bFixup, bDest); - fgAddRefPred(bDestNext, bFixup); + fgAddRefPred(bDestFalseTarget, bFixup); } } } @@ -5114,7 +5125,7 @@ bool Compiler::fgUpdateFlowGraph(bool doTailDuplication, bool isPhase) switch (block->GetKind()) { case BBJ_COND: - if (block->TrueTargetIs(block)) + if (block->TrueTargetIs(block) || block->FalseTargetIs(block)) { fgRemoveBlock(block, /* unreachable */ true); diff --git a/src/coreclr/jit/fgprofile.cpp b/src/coreclr/jit/fgprofile.cpp index a4d9c21722471..504449a7c178a 100644 --- a/src/coreclr/jit/fgprofile.cpp +++ b/src/coreclr/jit/fgprofile.cpp @@ -521,14 +521,6 @@ void BlockCountInstrumentor::RelocateProbes() // Redirect any jumps // m_comp->fgReplaceJumpTarget(pred, intermediary, block); - - // Handle case where we had a fall through critical edge - // - if (pred->NextIs(intermediary)) - { - m_comp->fgRemoveRefPred(pred, block); - m_comp->fgAddRefPred(intermediary, block); - } } } } diff --git a/src/coreclr/jit/fgprofilesynthesis.cpp b/src/coreclr/jit/fgprofilesynthesis.cpp index 923d327666498..b7af7de958555 100644 --- a/src/coreclr/jit/fgprofilesynthesis.cpp +++ b/src/coreclr/jit/fgprofilesynthesis.cpp @@ -337,7 +337,9 @@ void ProfileSynthesis::AssignLikelihoodSwitch(BasicBlock* block) // const unsigned n = block->NumSucc(); assert(n != 0); - const weight_t p = 1 / (weight_t)n; + + // Check for divide by zero to avoid compiler warnings for Release builds (above assert is removed) + const weight_t p = (n != 0) ? (1 / (weight_t)n) : 0; // Each unique edge gets some multiple of that basic probability // diff --git a/src/coreclr/jit/flowgraph.cpp b/src/coreclr/jit/flowgraph.cpp index bed61ad81fb0c..f5ce79df20d4f 100644 --- a/src/coreclr/jit/flowgraph.cpp +++ b/src/coreclr/jit/flowgraph.cpp @@ -4370,10 +4370,9 @@ FlowGraphNaturalLoops* FlowGraphNaturalLoops::Find(const FlowGraphDfsTree* dfsTr for (FlowEdge* const predEdge : loop->m_header->PredEdges()) { BasicBlock* predBlock = predEdge->getSourceBlock(); - if (dfsTree->Contains(predBlock) && !dfsTree->IsAncestor(header, predEdge->getSourceBlock())) + if (dfsTree->Contains(predBlock) && !dfsTree->IsAncestor(header, predBlock)) { - JITDUMP(FMT_BB " -> " FMT_BB " is an entry edge\n", predEdge->getSourceBlock()->bbNum, - loop->m_header->bbNum); + JITDUMP(FMT_BB " -> " FMT_BB " is an entry edge\n", predBlock->bbNum, loop->m_header->bbNum); loop->m_entryEdges.push_back(predEdge); } } @@ -5449,16 +5448,8 @@ bool FlowGraphNaturalLoop::CanDuplicate(INDEBUG(const char** reason)) // insertAfter - [in, out] Block to insert duplicated blocks after; updated to last block inserted. // map - A map that will have mappings from loop blocks to duplicated blocks added to it. // weightScale - Factor to scale weight of new blocks by -// bottomNeedsRedirection - Whether or not to insert a redirection block for the bottom block in case of fallthrough // -// Remarks: -// Due to fallthrough this block may need to insert blocks with no -// corresponding source block in "map". -// -void FlowGraphNaturalLoop::Duplicate(BasicBlock** insertAfter, - BlockToBlockMap* map, - weight_t weightScale, - bool bottomNeedsRedirection) +void FlowGraphNaturalLoop::Duplicate(BasicBlock** insertAfter, BlockToBlockMap* map, weight_t weightScale) { assert(CanDuplicate(nullptr)); @@ -5484,38 +5475,6 @@ void FlowGraphNaturalLoop::Duplicate(BasicBlock** insertAfter, *insertAfter = newBlk; map->Set(blk, newBlk, BlockToBlockMap::Overwrite); - // If the block falls through to a block outside the loop then we may - // need to insert a new block to redirect. - // Skip this once we get to the bottom block if our cloned version is - // going to fall into the right version anyway. - if (blk->bbFallsThrough() && !ContainsBlock(blk->Next()) && ((blk != bottom) || bottomNeedsRedirection)) - { - if (blk->KindIs(BBJ_COND)) - { - BasicBlock* targetBlk = blk->GetFalseTarget(); - assert(blk->NextIs(targetBlk)); - - // Need to insert a block. - BasicBlock* newRedirBlk = - comp->fgNewBBafter(BBJ_ALWAYS, *insertAfter, /* extendRegion */ true, targetBlk); - newRedirBlk->copyEHRegion(*insertAfter); - newRedirBlk->bbWeight = blk->Next()->bbWeight; - newRedirBlk->CopyFlags(blk->Next(), (BBF_RUN_RARELY | BBF_PROF_WEIGHT)); - newRedirBlk->scaleBBWeight(weightScale); - - JITDUMP(FMT_BB " falls through to " FMT_BB "; inserted redirection block " FMT_BB "\n", blk->bbNum, - blk->Next()->bbNum, newRedirBlk->bbNum); - // This block isn't part of the loop, so below loop won't add - // refs for it. - comp->fgAddRefPred(targetBlk, newRedirBlk); - *insertAfter = newRedirBlk; - } - else - { - assert(!"Cannot handle fallthrough"); - } - } - return BasicBlockVisit::Continue; }); diff --git a/src/coreclr/jit/jiteh.cpp b/src/coreclr/jit/jiteh.cpp index 8c5e0e70fd6f1..1b4ec52c353fc 100644 --- a/src/coreclr/jit/jiteh.cpp +++ b/src/coreclr/jit/jiteh.cpp @@ -2256,13 +2256,6 @@ bool Compiler::fgNormalizeEHCase2() // fgReplaceJumpTarget(predBlock, newTryStart, insertBeforeBlk); - if (predBlock->NextIs(newTryStart) && predBlock->KindIs(BBJ_COND)) - { - predBlock->SetFalseTarget(newTryStart); - fgRemoveRefPred(insertBeforeBlk, predBlock); - fgAddRefPred(newTryStart, predBlock); - } - JITDUMP("Redirect " FMT_BB " target from " FMT_BB " to " FMT_BB ".\n", predBlock->bbNum, insertBeforeBlk->bbNum, newTryStart->bbNum); } diff --git a/src/coreclr/jit/loopcloning.cpp b/src/coreclr/jit/loopcloning.cpp index 0183169db3172..bb7bb5133f1b6 100644 --- a/src/coreclr/jit/loopcloning.cpp +++ b/src/coreclr/jit/loopcloning.cpp @@ -2000,25 +2000,6 @@ void Compiler::optCloneLoop(FlowGraphNaturalLoop* loop, LoopCloneContext* contex // bottomNext BasicBlock* bottom = loop->GetLexicallyBottomMostBlock(); BasicBlock* newPred = bottom; - if (bottom->KindIs(BBJ_COND)) - { - // TODO-NoFallThrough: Shouldn't need new BBJ_ALWAYS block once bbFalseTarget can diverge from bbNext - BasicBlock* bottomNext = bottom->Next(); - assert(bottom->FalseTargetIs(bottomNext)); - JITDUMP("Create branch around cloned loop\n"); - BasicBlock* bottomRedirBlk = fgNewBBafter(BBJ_ALWAYS, bottom, /*extendRegion*/ true, bottomNext); - JITDUMP("Adding " FMT_BB " after " FMT_BB "\n", bottomRedirBlk->bbNum, bottom->bbNum); - bottomRedirBlk->bbWeight = bottomRedirBlk->isRunRarely() ? BB_ZERO_WEIGHT : ambientWeight; - - bottom->SetFalseTarget(bottomRedirBlk); - fgAddRefPred(bottomRedirBlk, bottom); - JITDUMP("Adding " FMT_BB " -> " FMT_BB "\n", bottom->bbNum, bottomRedirBlk->bbNum); - fgReplacePred(bottomNext, bottom, bottomRedirBlk); - JITDUMP("Replace " FMT_BB " -> " FMT_BB " with " FMT_BB " -> " FMT_BB "\n", bottom->bbNum, bottomNext->bbNum, - bottomRedirBlk->bbNum, bottomNext->bbNum); - - newPred = bottomRedirBlk; - } // Create a new preheader for the slow loop immediately before the slow // loop itself. All failed conditions will branch to the slow preheader. @@ -2035,7 +2016,7 @@ void Compiler::optCloneLoop(FlowGraphNaturalLoop* loop, LoopCloneContext* contex BlockToBlockMap* blockMap = new (getAllocator(CMK_LoopClone)) BlockToBlockMap(getAllocator(CMK_LoopClone)); - loop->Duplicate(&newPred, blockMap, slowPathWeightScaleFactor, /* bottomNeedsRedirection */ false); + loop->Duplicate(&newPred, blockMap, slowPathWeightScaleFactor); // Scale old blocks to the fast path weight. loop->VisitLoopBlocks([=](BasicBlock* block) { diff --git a/src/coreclr/jit/lower.cpp b/src/coreclr/jit/lower.cpp index f1142bcbd215b..e96ebcf9c3a66 100644 --- a/src/coreclr/jit/lower.cpp +++ b/src/coreclr/jit/lower.cpp @@ -7200,7 +7200,7 @@ PhaseStatus Lowering::DoPhase() // local var liveness can delete code, which may create empty blocks if (comp->opts.OptimizationEnabled()) { - bool modified = comp->fgUpdateFlowGraph(); + bool modified = comp->fgUpdateFlowGraph(/* doTailDuplication */ false, /* isPhase */ false); modified |= comp->fgRemoveDeadBlocks(); if (modified) diff --git a/src/coreclr/jit/optimizebools.cpp b/src/coreclr/jit/optimizebools.cpp index 7cca29bc7dbfe..f9616636681b5 100644 --- a/src/coreclr/jit/optimizebools.cpp +++ b/src/coreclr/jit/optimizebools.cpp @@ -751,7 +751,7 @@ bool OptBoolsDsc::optOptimizeRangeTests() // BasicBlock* notInRangeBb = m_b1->GetTrueTarget(); BasicBlock* inRangeBb; - if (notInRangeBb == m_b2->GetTrueTarget()) + if (m_b2->TrueTargetIs(notInRangeBb)) { // Shape 1: both conditions jump to NotInRange // @@ -765,7 +765,7 @@ bool OptBoolsDsc::optOptimizeRangeTests() // ... inRangeBb = m_b2->GetFalseTarget(); } - else if (notInRangeBb == m_b2->GetFalseTarget()) + else if (m_b2->FalseTargetIs(notInRangeBb)) { // Shape 2: 2nd block jumps to InRange // @@ -807,11 +807,16 @@ bool OptBoolsDsc::optOptimizeRangeTests() return false; } + // Re-direct firstBlock to jump to inRangeBb m_comp->fgAddRefPred(inRangeBb, m_b1); if (!cmp2IsReversed) { - // Re-direct firstBlock to jump to inRangeBb m_b1->SetTrueTarget(inRangeBb); + m_b1->SetFalseTarget(notInRangeBb); + } + else + { + m_b1->SetFalseTarget(inRangeBb); } // Remove the 2nd condition block as we no longer need it @@ -1015,7 +1020,7 @@ bool OptBoolsDsc::optOptimizeCompareChainCondBlock() m_b2->CopyFlags(m_b1, BBF_COPY_PROPAGATE); // Join the two blocks. This is done now to ensure that additional conditions can be chained. - if (m_comp->fgCanCompactBlocks(m_b1, m_b2)) + if (m_b1->NextIs(m_b2) && m_comp->fgCanCompactBlocks(m_b1, m_b2)) { m_comp->fgCompactBlocks(m_b1, m_b2); } diff --git a/src/coreclr/jit/optimizer.cpp b/src/coreclr/jit/optimizer.cpp index d0dfefe46f77a..6c3e9e83e6e58 100644 --- a/src/coreclr/jit/optimizer.cpp +++ b/src/coreclr/jit/optimizer.cpp @@ -561,7 +561,6 @@ void Compiler::optCheckPreds() // predOption - specifies how to update the pred lists // // Notes: -// Fall-through successors are assumed correct and are not modified. // Pred lists for successors of `blk` may be changed, depending on `predOption`. // void Compiler::optRedirectBlock(BasicBlock* blk, BlockToBlockMap* redirectMap, RedirectBlockOption predOption) @@ -569,11 +568,6 @@ void Compiler::optRedirectBlock(BasicBlock* blk, BlockToBlockMap* redirectMap, R const bool updatePreds = (predOption == RedirectBlockOption::UpdatePredLists); const bool addPreds = (predOption == RedirectBlockOption::AddToPredLists); - if (addPreds && blk->bbFallsThrough()) - { - fgAddRefPred(blk->Next(), blk); - } - BasicBlock* newJumpDest = nullptr; switch (blk->GetKind()) @@ -586,9 +580,15 @@ void Compiler::optRedirectBlock(BasicBlock* blk, BlockToBlockMap* redirectMap, R // These have no jump destination to update. break; + case BBJ_CALLFINALLY: + if (addPreds && blk->bbFallsThrough()) + { + fgAddRefPred(blk->Next(), blk); + } + + FALLTHROUGH; case BBJ_ALWAYS: case BBJ_LEAVE: - case BBJ_CALLFINALLY: case BBJ_CALLFINALLYRET: // All of these have a single jump destination to update. if (redirectMap->Lookup(blk->GetTarget(), &newJumpDest)) @@ -627,6 +627,24 @@ void Compiler::optRedirectBlock(BasicBlock* blk, BlockToBlockMap* redirectMap, R { fgAddRefPred(blk->GetTrueTarget(), blk); } + + // Update jump taken when condition is false + if (redirectMap->Lookup(blk->GetFalseTarget(), &newJumpDest)) + { + if (updatePreds) + { + fgRemoveRefPred(blk->GetFalseTarget(), blk); + } + blk->SetFalseTarget(newJumpDest); + if (updatePreds || addPreds) + { + fgAddRefPred(newJumpDest, blk); + } + } + else if (addPreds) + { + fgAddRefPred(blk->GetFalseTarget(), blk); + } break; case BBJ_EHFINALLYRET: @@ -1579,26 +1597,6 @@ bool Compiler::optTryUnrollLoop(FlowGraphNaturalLoop* loop, bool* changedIR) BasicBlock* exit = loop->ContainsBlock(exiting->GetTrueTarget()) ? exiting->GetFalseTarget() : exiting->GetTrueTarget(); - // If the original bottom block was falling out of the loop, then insert an - // explicit block to branch around the unrolled iterations we created. - if (bottom->KindIs(BBJ_COND)) - { - // TODO-NoFallThrough: Shouldn't need new BBJ_ALWAYS block once bbFalseTarget can diverge from bbNext - BasicBlock* bottomFalseTarget = bottom->GetFalseTarget(); - JITDUMP("Create branch around unrolled loop\n"); - BasicBlock* bottomRedirBlk = fgNewBBafter(BBJ_ALWAYS, bottom, /*extendRegion*/ true, bottomFalseTarget); - JITDUMP("Adding " FMT_BB " after " FMT_BB "\n", bottomRedirBlk->bbNum, bottom->bbNum); - - bottom->SetFalseTarget(bottomRedirBlk); - fgAddRefPred(bottomRedirBlk, bottom); - JITDUMP("Adding " FMT_BB " -> " FMT_BB "\n", bottom->bbNum, bottomRedirBlk->bbNum); - fgReplacePred(bottomFalseTarget, bottom, bottomRedirBlk); - JITDUMP("Replace " FMT_BB " -> " FMT_BB " with " FMT_BB " -> " FMT_BB "\n", bottom->bbNum, - bottomFalseTarget->bbNum, bottomRedirBlk->bbNum, bottomFalseTarget->bbNum); - - insertAfter = bottomRedirBlk; - } - for (int lval = lbeg; iterToUnroll > 0; iterToUnroll--) { // Block weight should no longer have the loop multiplier @@ -1607,7 +1605,7 @@ bool Compiler::optTryUnrollLoop(FlowGraphNaturalLoop* loop, bool* changedIR) // and we might not have upscaled at all, if we had profile data. // weight_t scaleWeight = 1.0 / BB_LOOP_WEIGHT_SCALE; - loop->Duplicate(&insertAfter, &blockMap, scaleWeight, /* bottomNeedsRedirection */ true); + loop->Duplicate(&insertAfter, &blockMap, scaleWeight); // Replace all uses of the loop iterator with the current value. loop->VisitLoopBlocks([=, &blockMap](BasicBlock* block) { @@ -2435,7 +2433,7 @@ PhaseStatus Compiler::optOptimizeLayout() fgUpdateFlowGraph(/* doTailDuplication */ false); fgReorderBlocks(/* useProfile */ true); - fgUpdateFlowGraph(); + fgUpdateFlowGraph(/* doTailDuplication */ false, /* isPhase */ false); // fgReorderBlocks can cause IR changes even if it does not modify // the flow graph. It calls gtPrepareCost which can cause operand swapping. @@ -2816,10 +2814,10 @@ bool Compiler::optCompactLoop(FlowGraphNaturalLoop* loop) ehUpdateLastBlocks(insertionPoint, lastNonLoopBlock); // Apply any adjustments needed for fallthrough at the boundaries of the moved region. - changedFlowGraph |= optLoopCompactionFixupFallThrough(insertionPoint, moveBefore, cur); - changedFlowGraph |= optLoopCompactionFixupFallThrough(lastNonLoopBlock, nextLoopBlock, moveBefore); + changedFlowGraph |= optLoopCompactionFixupFallThrough(insertionPoint, cur); + changedFlowGraph |= optLoopCompactionFixupFallThrough(lastNonLoopBlock, moveBefore); // Also apply any adjustments needed where the blocks were snipped out of the loop. - changedFlowGraph |= optLoopCompactionFixupFallThrough(previous, cur, nextLoopBlock); + changedFlowGraph |= optLoopCompactionFixupFallThrough(previous, nextLoopBlock); // Update insertionPoint for the next insertion. insertionPoint = lastNonLoopBlock; @@ -2847,7 +2845,7 @@ BasicBlock* Compiler::optFindLoopCompactionInsertionPoint(FlowGraphNaturalLoop* // out of the loop, and if possible find a spot that won't break up fall-through. BasicBlock* bottom = loop->GetLexicallyBottomMostBlock(); BasicBlock* insertionPoint = bottom; - while (insertionPoint->bbFallsThrough()) + while (insertionPoint->bbFallsThrough() && !insertionPoint->IsLast()) { // Keep looking for a better insertion point if we can. BasicBlock* newInsertionPoint = optTryAdvanceLoopCompactionInsertionPoint(loop, insertionPoint, top, bottom); @@ -2935,47 +2933,36 @@ BasicBlock* Compiler::optTryAdvanceLoopCompactionInsertionPoint(FlowGraphNatural // // Parameters: // block - Block that may have fallthrough -// oldNext - The old block that was the fallthrough block // newNext - The new block that was the fallthrough block // // Returns: // True if the flow graph was changed by this function. // -bool Compiler::optLoopCompactionFixupFallThrough(BasicBlock* block, BasicBlock* oldNext, BasicBlock* newNext) +bool Compiler::optLoopCompactionFixupFallThrough(BasicBlock* block, BasicBlock* newNext) { + assert(block->NextIs(newNext)); bool changed = false; - if (block->bbFallsThrough()) + if (block->KindIs(BBJ_COND) && block->TrueTargetIs(newNext)) { - // Need to reconnect the flow from `block` to `oldNext`. + // Reverse the jump condition + GenTree* test = block->lastNode(); + noway_assert(test->OperIsConditionalJump()); - if (block->KindIs(BBJ_COND) && (newNext != nullptr) && block->TrueTargetIs(newNext)) + if (test->OperGet() == GT_JTRUE) { - // Reverse the jump condition - GenTree* test = block->lastNode(); - noway_assert(test->OperIsConditionalJump()); - - if (test->OperGet() == GT_JTRUE) - { - GenTree* cond = gtReverseCond(test->AsOp()->gtOp1); - assert(cond == test->AsOp()->gtOp1); // Ensure `gtReverseCond` did not create a new node. - test->AsOp()->gtOp1 = cond; - } - else - { - gtReverseCond(test); - } - - // Redirect the Conditional JUMP to go to `oldNext` - block->SetTrueTarget(oldNext); - block->SetFalseTarget(newNext); + GenTree* cond = gtReverseCond(test->AsOp()->gtOp1); + assert(cond == test->AsOp()->gtOp1); // Ensure `gtReverseCond` did not create a new node. + test->AsOp()->gtOp1 = cond; } else { - // Insert an unconditional jump to `oldNext` just after `block`. - fgConnectFallThrough(block, oldNext); + gtReverseCond(test); } + // Redirect the Conditional JUMP to go to `oldNext` + block->SetTrueTarget(block->GetFalseTarget()); + block->SetFalseTarget(newNext); changed = true; } else if (block->KindIs(BBJ_ALWAYS) && block->TargetIs(newNext)) @@ -3058,7 +3045,7 @@ bool Compiler::optCreatePreheader(FlowGraphNaturalLoop* loop) BasicBlock* preheaderCandidate = loop->EntryEdges()[0]->getSourceBlock(); unsigned candidateEHRegion = preheaderCandidate->hasTryIndex() ? preheaderCandidate->getTryIndex() : EHblkDsc::NO_ENCLOSING_INDEX; - if (preheaderCandidate->KindIs(BBJ_ALWAYS) && (preheaderCandidate->GetUniqueSucc() == loop->GetHeader()) && + if (preheaderCandidate->KindIs(BBJ_ALWAYS) && preheaderCandidate->TargetIs(loop->GetHeader()) && (candidateEHRegion == preheaderEHRegion)) { JITDUMP("Natural loop " FMT_LP " already has preheader " FMT_BB "\n", loop->GetIndex(), @@ -3095,47 +3082,6 @@ bool Compiler::optCreatePreheader(FlowGraphNaturalLoop* loop) header->bbNum, enterBlock->bbNum, preheader->bbNum); fgReplaceJumpTarget(enterBlock, preheader, header); - - // Fix up fall through - if (enterBlock->KindIs(BBJ_COND) && enterBlock->NextIs(header)) - { - FlowEdge* edge = fgRemoveRefPred(header, enterBlock); - BasicBlock* newAlways = fgNewBBafter(BBJ_ALWAYS, enterBlock, true, preheader); - fgAddRefPred(preheader, newAlways, edge); - fgAddRefPred(newAlways, enterBlock, edge); - - JITDUMP("Created new BBJ_ALWAYS " FMT_BB " to redirect " FMT_BB " to preheader\n", newAlways->bbNum, - enterBlock->bbNum); - enterBlock->SetFalseTarget(newAlways); - } - } - - // Fix up potential fallthrough into the preheader. - BasicBlock* fallthroughSource = preheader->Prev(); - if ((fallthroughSource != nullptr) && fallthroughSource->KindIs(BBJ_COND)) - { - if (!loop->ContainsBlock(fallthroughSource)) - { - // Either unreachable or an enter edge. The new fallthrough into - // the preheader is what we want. We still need to update refs - // which fgReplaceJumpTarget doesn't do for fallthrough. - FlowEdge* old = fgRemoveRefPred(header, fallthroughSource); - fgAddRefPred(preheader, fallthroughSource, old); - } - else - { - // For a backedge we need to make sure we're still going to the head, - // and not falling into the preheader. - FlowEdge* edge = fgRemoveRefPred(header, fallthroughSource); - BasicBlock* newAlways = fgNewBBafter(BBJ_ALWAYS, fallthroughSource, true, header); - fgAddRefPred(header, newAlways, edge); - fgAddRefPred(newAlways, fallthroughSource, edge); - - JITDUMP("Created new BBJ_ALWAYS " FMT_BB " to redirect " FMT_BB " over preheader\n", newAlways->bbNum, - fallthroughSource->bbNum); - } - - fallthroughSource->SetFalseTarget(fallthroughSource->Next()); } optSetPreheaderWeight(loop, preheader); diff --git a/src/coreclr/jit/redundantbranchopts.cpp b/src/coreclr/jit/redundantbranchopts.cpp index 38f614d8d3c8c..d8ea4bfcf6118 100644 --- a/src/coreclr/jit/redundantbranchopts.cpp +++ b/src/coreclr/jit/redundantbranchopts.cpp @@ -1727,12 +1727,11 @@ bool Compiler::optJumpThreadCore(JumpThreadInfo& jti) { Statement* const lastStmt = jti.m_block->lastStmt(); fgRemoveStmt(jti.m_block, lastStmt); - JITDUMP(" repurposing " FMT_BB " to always fall through to " FMT_BB "\n", jti.m_block->bbNum, + JITDUMP(" repurposing " FMT_BB " to always jump to " FMT_BB "\n", jti.m_block->bbNum, jti.m_falseTarget->bbNum); fgRemoveRefPred(jti.m_trueTarget, jti.m_block); jti.m_block->SetKindAndTarget(BBJ_ALWAYS, jti.m_falseTarget); jti.m_block->SetFlags(BBF_NONE_QUIRK); - assert(jti.m_block->JumpsToNext()); } // Now reroute the flow from the predecessors. diff --git a/src/coreclr/jit/switchrecognition.cpp b/src/coreclr/jit/switchrecognition.cpp index 7ab33d07c3c34..fa6abd0f23e8b 100644 --- a/src/coreclr/jit/switchrecognition.cpp +++ b/src/coreclr/jit/switchrecognition.cpp @@ -349,6 +349,31 @@ bool Compiler::optSwitchConvert(BasicBlock* firstBlock, int testsCount, ssize_t* assert((jumpCount > 0) && (jumpCount <= SWITCH_MAX_DISTANCE + 1)); const auto jmpTab = new (this, CMK_BasicBlock) BasicBlock*[jumpCount + 1 /*default case*/]; + // Quirk: lastBlock's false target may have diverged from bbNext. If the false target is behind firstBlock, + // we may create a cycle in the BasicBlock list by setting firstBlock->bbNext to it. + // Add a new BBJ_ALWAYS to the false target to avoid this. + // (We only need this if the false target is behind firstBlock, + // but it's cheaper to just check if the false target has diverged) + // TODO-NoFallThrough: Revisit this quirk? + bool skipPredRemoval = false; + if (!lastBlock->FalseTargetIs(lastBlock->Next())) + { + if (isReversed) + { + assert(lastBlock->FalseTargetIs(blockIfTrue)); + fgRemoveRefPred(blockIfTrue, firstBlock); + blockIfTrue = fgNewBBafter(BBJ_ALWAYS, firstBlock, true, blockIfTrue); + fgAddRefPred(blockIfTrue->GetTarget(), blockIfTrue); + skipPredRemoval = true; + } + else + { + assert(lastBlock->FalseTargetIs(blockIfFalse)); + blockIfFalse = fgNewBBafter(BBJ_ALWAYS, firstBlock, true, blockIfFalse); + fgAddRefPred(blockIfFalse->GetTarget(), blockIfFalse); + } + } + fgHasSwitch = true; firstBlock->GetSwitchTargets()->bbsCount = jumpCount + 1; firstBlock->GetSwitchTargets()->bbsHasDefault = true; @@ -368,7 +393,10 @@ bool Compiler::optSwitchConvert(BasicBlock* firstBlock, int testsCount, ssize_t* } // Unlink blockIfTrue from firstBlock, we're going to link it again in the loop below. - fgRemoveRefPred(blockIfTrue, firstBlock); + if (!skipPredRemoval) + { + fgRemoveRefPred(blockIfTrue, firstBlock); + } for (unsigned i = 0; i < jumpCount; i++) {