Welcome to mirror list, hosted at ThFree Co, Russian Federation.

github.com/dotnet/runtime.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorBruce Forstall <brucefo@microsoft.com>2021-04-15 19:45:51 +0300
committerGitHub <noreply@github.com>2021-04-15 19:45:51 +0300
commita2e36a7693967d04ca0548e2b825d7ec363bd1f3 (patch)
tree94934398db5ab0f1a3435c4afa9dc68862ed5db7 /src
parent8f491fc4efdf229a78f687b896b8a46eb2a2ce03 (diff)
Simplify JIT label handling (#51208)
* Simplify JIT label handling Remove the BBF_JMP_TARGET flag that was set early and attempted to be maintained all through compilation. Instead, use BBF_USE_LABEL to indicate to the emitter where we need labels. Also, stop setting and maintaining BBF_USE_LABEL early. Add a pass over the blocks when preparing for codegen that sets most of the necessary BBF_USE_LABEL flags. This flag will never be set before codegen. A few places set the flag after codegen starts, namely `genCreateTempLabel` and BBJ_COND handling for alignment. Note that this flag must be set before the block is processed for codegen (and an insGroup is created). Together, these changes make it easier to work with the flow graph without worrying about maintaining these bits of information through various optimizations. Add a few more details about alignment processing to the dump. There are a few code asm diffs due to alignment processing not previously creating a label to help compute how large a loop is. There are a lot of textual asm diffs due to there being (mostly) fewer labels, plus some additional insGroup output. This can happen if a block was labeled with `BBF_JMP_TARGET` or `BBF_USE_LABEL` before, but didn't need to be, perhaps after some optimizations. Now, the flag is never added in the first place. There are a large number of GC info diffs. Labels are where GC info state changes are recorded between codegen and the emitter. If we eliminate an unnecessary emitter label, then we also eliminate a capture of the current codegen GC state. Since the emitter is lazy at marking GC deaths, this means that we see a lot of lengthened GC lifetimes -- until the next label, or some other cause of GC kill. Often, you see a register kill followed by register birth just disappear, and the register is maintained alive across the interim. * Remove loop align flag if we decide a loop is no longer a loop
Diffstat (limited to 'src')
-rw-r--r--src/coreclr/jit/block.cpp4
-rw-r--r--src/coreclr/jit/block.h7
-rw-r--r--src/coreclr/jit/codegen.h2
-rw-r--r--src/coreclr/jit/codegenarm.cpp3
-rw-r--r--src/coreclr/jit/codegenarm64.cpp3
-rw-r--r--src/coreclr/jit/codegencommon.cpp192
-rw-r--r--src/coreclr/jit/codegenlinear.cpp27
-rw-r--r--src/coreclr/jit/codegenxarch.cpp3
-rw-r--r--src/coreclr/jit/emit.cpp38
-rw-r--r--src/coreclr/jit/emitarm.cpp6
-rw-r--r--src/coreclr/jit/emitarm64.cpp8
-rw-r--r--src/coreclr/jit/emitxarch.cpp6
-rw-r--r--src/coreclr/jit/fgbasic.cpp41
-rw-r--r--src/coreclr/jit/fgehopt.cpp6
-rw-r--r--src/coreclr/jit/fgflow.cpp14
-rw-r--r--src/coreclr/jit/fgopt.cpp20
-rw-r--r--src/coreclr/jit/flowgraph.cpp26
-rw-r--r--src/coreclr/jit/importer.cpp6
-rw-r--r--src/coreclr/jit/indirectcalltransformer.cpp2
-rw-r--r--src/coreclr/jit/jiteh.cpp12
-rw-r--r--src/coreclr/jit/morph.cpp7
-rw-r--r--src/coreclr/jit/optimizer.cpp16
-rw-r--r--src/coreclr/jit/redundantbranchopts.cpp1
23 files changed, 235 insertions, 215 deletions
diff --git a/src/coreclr/jit/block.cpp b/src/coreclr/jit/block.cpp
index 756b4601cff..3634b781379 100644
--- a/src/coreclr/jit/block.cpp
+++ b/src/coreclr/jit/block.cpp
@@ -419,10 +419,6 @@ void BasicBlock::dspFlags()
{
printf("label ");
}
- if (bbFlags & BBF_JMP_TARGET)
- {
- printf("target ");
- }
if (bbFlags & BBF_HAS_JMP)
{
printf("jmp ");
diff --git a/src/coreclr/jit/block.h b/src/coreclr/jit/block.h
index e411d6b32f9..5c41dcd02bc 100644
--- a/src/coreclr/jit/block.h
+++ b/src/coreclr/jit/block.h
@@ -411,7 +411,7 @@ struct BasicBlock : private LIR::Range
#define BBF_LOOP_CALL1 MAKE_BBFLAG(15) // BB starts a loop that will always call
#define BBF_HAS_LABEL MAKE_BBFLAG(16) // BB needs a label
-#define BBF_JMP_TARGET MAKE_BBFLAG(17) // BB is a target of an implicit/explicit jump
+// Unused MAKE_BBFLAG(17)
#define BBF_HAS_JMP MAKE_BBFLAG(18) // BB executes a JMP instruction (instead of return)
#define BBF_GC_SAFE_POINT MAKE_BBFLAG(19) // BB has a GC safe point (a call). More abstractly, BB does not require a
// (further) poll -- this may be because this BB has a call, or, in some
@@ -501,9 +501,8 @@ struct BasicBlock : private LIR::Range
// TODO: Should BBF_RUN_RARELY be added to BBF_SPLIT_GAINED ?
#define BBF_SPLIT_GAINED \
- (BBF_DONT_REMOVE | BBF_HAS_LABEL | BBF_HAS_JMP | BBF_BACKWARD_JUMP | BBF_HAS_IDX_LEN | BBF_HAS_NEWARRAY | \
- BBF_PROF_WEIGHT | BBF_HAS_NEWOBJ | BBF_KEEP_BBJ_ALWAYS | BBF_CLONED_FINALLY_END | BBF_HAS_NULLCHECK | \
- BBF_HAS_CLASS_PROFILE)
+ (BBF_DONT_REMOVE | BBF_HAS_JMP | BBF_BACKWARD_JUMP | BBF_HAS_IDX_LEN | BBF_HAS_NEWARRAY | BBF_PROF_WEIGHT | \
+ BBF_HAS_NEWOBJ | BBF_KEEP_BBJ_ALWAYS | BBF_CLONED_FINALLY_END | BBF_HAS_NULLCHECK | BBF_HAS_CLASS_PROFILE)
#ifndef __GNUC__ // GCC doesn't like C_ASSERT at global scope
static_assert_no_msg((BBF_SPLIT_NONEXIST & BBF_SPLIT_LOST) == 0);
diff --git a/src/coreclr/jit/codegen.h b/src/coreclr/jit/codegen.h
index 1770947d640..25597bdf6cd 100644
--- a/src/coreclr/jit/codegen.h
+++ b/src/coreclr/jit/codegen.h
@@ -88,7 +88,7 @@ private:
void genPrepForCompiler();
- void genPrepForEHCodegen();
+ void genMarkLabelsForCodegen();
inline RegState* regStateForType(var_types t)
{
diff --git a/src/coreclr/jit/codegenarm.cpp b/src/coreclr/jit/codegenarm.cpp
index c5dc53c712a..4c352ff9f64 100644
--- a/src/coreclr/jit/codegenarm.cpp
+++ b/src/coreclr/jit/codegenarm.cpp
@@ -129,7 +129,6 @@ BasicBlock* CodeGen::genCallFinally(BasicBlock* block)
assert(block->bbNext->bbJumpDest->bbFlags & BBF_FINALLY_TARGET);
bbFinallyRet = block->bbNext->bbJumpDest;
- bbFinallyRet->bbFlags |= BBF_JMP_TARGET;
// Load the address where the finally funclet should return into LR.
// The funclet prolog/epilog will do "push {lr}" / "pop {pc}" to do the return.
@@ -633,7 +632,7 @@ void CodeGen::genJumpTable(GenTree* treeNode)
for (unsigned i = 0; i < jumpCount; i++)
{
BasicBlock* target = *jumpTable++;
- noway_assert(target->bbFlags & BBF_JMP_TARGET);
+ noway_assert(target->bbFlags & BBF_HAS_LABEL);
JITDUMP(" DD L_M%03u_" FMT_BB "\n", compiler->compMethodID, target->bbNum);
diff --git a/src/coreclr/jit/codegenarm64.cpp b/src/coreclr/jit/codegenarm64.cpp
index 737db8dc424..187e01d62b1 100644
--- a/src/coreclr/jit/codegenarm64.cpp
+++ b/src/coreclr/jit/codegenarm64.cpp
@@ -2743,7 +2743,6 @@ void CodeGen::genTableBasedSwitch(GenTree* treeNode)
GetEmitter()->emitIns_R_R_R(INS_ldr, EA_4BYTE, baseReg, baseReg, idxReg, INS_OPTS_LSL);
// add it to the absolute address of fgFirstBB
- compiler->fgFirstBB->bbFlags |= BBF_JMP_TARGET;
GetEmitter()->emitIns_R_L(INS_adr, EA_PTRSIZE, compiler->fgFirstBB, tmpReg);
GetEmitter()->emitIns_R_R_R(INS_add, EA_PTRSIZE, baseReg, baseReg, tmpReg);
@@ -2771,7 +2770,7 @@ void CodeGen::genJumpTable(GenTree* treeNode)
for (unsigned i = 0; i < jumpCount; i++)
{
BasicBlock* target = *jumpTable++;
- noway_assert(target->bbFlags & BBF_JMP_TARGET);
+ noway_assert(target->bbFlags & BBF_HAS_LABEL);
JITDUMP(" DD L_M%03u_" FMT_BB "\n", compiler->compMethodID, target->bbNum);
diff --git a/src/coreclr/jit/codegencommon.cpp b/src/coreclr/jit/codegencommon.cpp
index 6f9d5f1c5ed..ed15b7a6085 100644
--- a/src/coreclr/jit/codegencommon.cpp
+++ b/src/coreclr/jit/codegencommon.cpp
@@ -318,84 +318,165 @@ void CodeGen::genPrepForCompiler()
#endif
}
-/*****************************************************************************
- * To report exception handling information to the VM, we need the size of the exception
- * handling regions. To compute that, we need to emit labels for the beginning block of
- * an EH region, and the block that immediately follows a region. Go through the EH
- * table and mark all these blocks with BBF_HAS_LABEL to make this happen.
- *
- * The beginning blocks of the EH regions already should have this flag set.
- *
- * No blocks should be added or removed after this.
- *
- * This code is closely couple with genReportEH() in the sense that any block
- * that this procedure has determined it needs to have a label has to be selected
- * using the same logic both here and in genReportEH(), so basically any time there is
- * a change in the way we handle EH reporting, we have to keep the logic of these two
- * methods 'in sync'.
- */
-
-void CodeGen::genPrepForEHCodegen()
+//------------------------------------------------------------------------
+// genMarkLabelsForCodegen: Mark labels required for codegen.
+//
+// Mark all blocks that require a label with BBF_HAS_LABEL. These are either blocks that are:
+// 1. the target of jumps (fall-through flow doesn't require a label),
+// 2. referenced labels such as for "switch" codegen,
+// 3. needed to denote the range of EH regions to the VM.
+// 4. needed to denote the range of code for alignment processing.
+//
+// No labels will be in the IR before now, but future codegen might annotate additional blocks
+// with this flag, such as "switch" codegen, or codegen-created blocks from genCreateTempLabel().
+// Also, the alignment processing code marks BBJ_COND fall-through labels elsewhere.
+//
+// To report exception handling information to the VM, we need the size of the exception
+// handling regions. To compute that, we need to emit labels for the beginning block of
+// an EH region, and the block that immediately follows a region. Go through the EH
+// table and mark all these blocks with BBF_HAS_LABEL to make this happen.
+//
+// This code is closely couple with genReportEH() in the sense that any block
+// that this procedure has determined it needs to have a label has to be selected
+// using the same logic both here and in genReportEH(), so basically any time there is
+// a change in the way we handle EH reporting, we have to keep the logic of these two
+// methods 'in sync'.
+//
+// No blocks should be added or removed after this.
+//
+void CodeGen::genMarkLabelsForCodegen()
{
assert(!compiler->fgSafeBasicBlockCreation);
+ JITDUMP("Mark labels for codegen\n");
+
+#ifdef DEBUG
+ // No label flags should be set before this.
+ for (BasicBlock* block = compiler->fgFirstBB; block != nullptr; block = block->bbNext)
+ {
+ assert((block->bbFlags & BBF_HAS_LABEL) == 0);
+ }
+#endif // DEBUG
+
+ // The first block is special; it always needs a label. This is to properly set up GC info.
+ JITDUMP(" " FMT_BB " : first block\n", compiler->fgFirstBB->bbNum);
+ compiler->fgFirstBB->bbFlags |= BBF_HAS_LABEL;
+
+ // The current implementation of switch tables requires the first block to have a label so it
+ // can generate offsets to the switch label targets.
+ // (This is duplicative with the fact we always set the first block with a label above.)
+ // TODO-CQ: remove this when switches have been re-implemented to not use this.
+ if (compiler->fgHasSwitch)
+ {
+ JITDUMP(" " FMT_BB " : function has switch; mark first block\n", compiler->fgFirstBB->bbNum);
+ compiler->fgFirstBB->bbFlags |= BBF_HAS_LABEL;
+ }
+
+ for (BasicBlock* block = compiler->fgFirstBB; block != nullptr; block = block->bbNext)
+ {
+ switch (block->bbJumpKind)
+ {
+ case BBJ_ALWAYS: // This will also handle the BBJ_ALWAYS of a BBJ_CALLFINALLY/BBJ_ALWAYS pair.
+ case BBJ_COND:
+ case BBJ_EHCATCHRET:
+ JITDUMP(" " FMT_BB " : branch target\n", block->bbJumpDest->bbNum);
+ block->bbJumpDest->bbFlags |= BBF_HAS_LABEL;
+ break;
+
+ case BBJ_SWITCH:
+ unsigned jumpCnt;
+ jumpCnt = block->bbJumpSwt->bbsCount;
+ BasicBlock** jumpTab;
+ jumpTab = block->bbJumpSwt->bbsDstTab;
+ do
+ {
+ JITDUMP(" " FMT_BB " : branch target\n", (*jumpTab)->bbNum);
+ (*jumpTab)->bbFlags |= BBF_HAS_LABEL;
+ } while (++jumpTab, --jumpCnt);
+ break;
+
+ case BBJ_CALLFINALLY:
+ // The finally target itself will get marked by walking the EH table, below, and marking
+ // all handler begins.
+ CLANG_FORMAT_COMMENT_ANCHOR;
+
+#if FEATURE_EH_CALLFINALLY_THUNKS
+ {
+ // For callfinally thunks, we need to mark the block following the callfinally/always pair,
+ // as that's needed for identifying the range of the "duplicate finally" region in EH data.
+ BasicBlock* bbToLabel = block->bbNext;
+ if (block->isBBCallAlwaysPair())
+ {
+ bbToLabel = bbToLabel->bbNext; // skip the BBJ_ALWAYS
+ }
+ if (bbToLabel != nullptr)
+ {
+ JITDUMP(" " FMT_BB " : callfinally thunk region end\n", bbToLabel->bbNum);
+ bbToLabel->bbFlags |= BBF_HAS_LABEL;
+ }
+ }
+#endif // FEATURE_EH_CALLFINALLY_THUNKS
+
+ break;
+
+ case BBJ_EHFINALLYRET:
+ case BBJ_EHFILTERRET:
+ case BBJ_RETURN:
+ case BBJ_THROW:
+ case BBJ_NONE:
+ break;
+
+ default:
+ noway_assert(!"Unexpected bbJumpKind");
+ break;
+ }
+ }
+
+ // Walk all the exceptional code blocks and mark them, since they don't appear in the normal flow graph.
+ for (Compiler::AddCodeDsc* add = compiler->fgAddCodeList; add; add = add->acdNext)
+ {
+ JITDUMP(" " FMT_BB " : throw helper block\n", add->acdDstBlk->bbNum);
+ add->acdDstBlk->bbFlags |= BBF_HAS_LABEL;
+ }
+
EHblkDsc* HBtab;
EHblkDsc* HBtabEnd;
- bool anyFinallys = false;
-
for (HBtab = compiler->compHndBBtab, HBtabEnd = compiler->compHndBBtab + compiler->compHndBBtabCount;
HBtab < HBtabEnd; HBtab++)
{
- assert(HBtab->ebdTryBeg->bbFlags & BBF_HAS_LABEL);
- assert(HBtab->ebdHndBeg->bbFlags & BBF_HAS_LABEL);
+ HBtab->ebdTryBeg->bbFlags |= BBF_HAS_LABEL;
+ HBtab->ebdHndBeg->bbFlags |= BBF_HAS_LABEL;
+
+ JITDUMP(" " FMT_BB " : try begin\n", HBtab->ebdTryBeg->bbNum);
+ JITDUMP(" " FMT_BB " : hnd begin\n", HBtab->ebdHndBeg->bbNum);
if (HBtab->ebdTryLast->bbNext != nullptr)
{
HBtab->ebdTryLast->bbNext->bbFlags |= BBF_HAS_LABEL;
+ JITDUMP(" " FMT_BB " : try end\n", HBtab->ebdTryLast->bbNext->bbNum);
}
if (HBtab->ebdHndLast->bbNext != nullptr)
{
HBtab->ebdHndLast->bbNext->bbFlags |= BBF_HAS_LABEL;
+ JITDUMP(" " FMT_BB " : hnd end\n", HBtab->ebdHndLast->bbNext->bbNum);
}
if (HBtab->HasFilter())
{
- assert(HBtab->ebdFilter->bbFlags & BBF_HAS_LABEL);
- // The block after the last block of the filter is
- // the handler begin block, which we already asserted
- // has BBF_HAS_LABEL set.
+ HBtab->ebdFilter->bbFlags |= BBF_HAS_LABEL;
+ JITDUMP(" " FMT_BB " : filter begin\n", HBtab->ebdFilter->bbNum);
}
-
-#if FEATURE_EH_CALLFINALLY_THUNKS
- if (HBtab->HasFinallyHandler())
- {
- anyFinallys = true;
- }
-#endif // FEATURE_EH_CALLFINALLY_THUNKS
}
-#if FEATURE_EH_CALLFINALLY_THUNKS
- if (anyFinallys)
+#ifdef DEBUG
+ if (compiler->verbose)
{
- for (BasicBlock* block = compiler->fgFirstBB; block != nullptr; block = block->bbNext)
- {
- if (block->bbJumpKind == BBJ_CALLFINALLY)
- {
- BasicBlock* bbToLabel = block->bbNext;
- if (block->isBBCallAlwaysPair())
- {
- bbToLabel = bbToLabel->bbNext; // skip the BBJ_ALWAYS
- }
- if (bbToLabel != nullptr)
- {
- bbToLabel->bbFlags |= BBF_HAS_LABEL;
- }
- } // block is BBJ_CALLFINALLY
- } // for each block
- } // if (anyFinallys)
-#endif // FEATURE_EH_CALLFINALLY_THUNKS
+ printf("*************** After genMarkLabelsForCodegen()\n");
+ compiler->fgDispBasicBlocks();
+ }
+#endif // DEBUG
}
void CodeGenInterface::genUpdateLife(GenTree* tree)
@@ -954,7 +1035,8 @@ BasicBlock* CodeGen::genCreateTempLabel()
compiler->fgSafeBasicBlockCreation = false;
#endif
- block->bbFlags |= BBF_JMP_TARGET | BBF_HAS_LABEL;
+ JITDUMP("Mark " FMT_BB " as label: codegen temp block\n", block->bbNum);
+ block->bbFlags |= BBF_HAS_LABEL;
// Use coldness of current block, as this label will
// be contained in it.
@@ -1067,7 +1149,7 @@ void CodeGen::genAdjustStackLevel(BasicBlock* block)
if (!isFramePointerUsed() && compiler->fgIsThrowHlpBlk(block))
{
- noway_assert(block->bbFlags & BBF_JMP_TARGET);
+ noway_assert(block->bbFlags & BBF_HAS_LABEL);
SetStackLevel(compiler->fgThrowHlpBlkStkLevel(block) * sizeof(int));
@@ -1979,7 +2061,7 @@ void CodeGen::genInsertNopForUnwinder(BasicBlock* block)
// calls the funclet during non-exceptional control flow.
if (block->bbFlags & BBF_FINALLY_TARGET)
{
- assert(block->bbFlags & BBF_JMP_TARGET);
+ assert(block->bbFlags & BBF_HAS_LABEL);
#ifdef DEBUG
if (compiler->verbose)
diff --git a/src/coreclr/jit/codegenlinear.cpp b/src/coreclr/jit/codegenlinear.cpp
index 2744bfdfd16..29a352a73b3 100644
--- a/src/coreclr/jit/codegenlinear.cpp
+++ b/src/coreclr/jit/codegenlinear.cpp
@@ -93,14 +93,6 @@ void CodeGen::genInitialize()
initializeVariableLiveKeeper();
#endif // USING_VARIABLE_LIVE_RANGE
- // The current implementation of switch tables requires the first block to have a label so it
- // can generate offsets to the switch label targets.
- // TODO-CQ: remove this when switches have been re-implemented to not use this.
- if (compiler->fgHasSwitch)
- {
- compiler->fgFirstBB->bbFlags |= BBF_JMP_TARGET;
- }
-
genPendingCallLabel = nullptr;
// Initialize the pointer tracking code
@@ -167,8 +159,7 @@ void CodeGen::genCodeForBBlist()
#endif // defined(DEBUG) && defined(TARGET_XARCH)
- // Prepare the blocks for exception handling codegen: mark the blocks that needs labels.
- genPrepForEHCodegen();
+ genMarkLabelsForCodegen();
assert(!compiler->fgFirstBBScratch ||
compiler->fgFirstBB == compiler->fgFirstBBScratch); // compiler->fgFirstBBScratch has to be first.
@@ -321,7 +312,7 @@ void CodeGen::genCodeForBBlist()
// If this block is a jump target or it requires a label then set 'needLabel' to true,
//
- bool needLabel = (block->bbFlags & (BBF_JMP_TARGET | BBF_HAS_LABEL)) != 0;
+ bool needLabel = (block->bbFlags & BBF_HAS_LABEL) != 0;
if (block == compiler->fgFirstColdBlock)
{
@@ -758,12 +749,24 @@ void CodeGen::genCodeForBBlist()
//
// During emitter, this information will be used to calculate the loop size.
// Depending on the loop size, decision of whether to align a loop or not will be taken.
+ //
+ // In the emitter, we need to calculate the loop size from `block->bbJumpDest` through
+ // `block` (inclusive). Thus, we need to ensure there is a label on the lexical fall-through
+ // block, even if one is not otherwise needed, to be able to calculate the size of this
+ // loop (loop size is calculated by walking the instruction groups; see emitter::getLoopSize()).
if (block->bbJumpDest->isLoopAlign())
{
GetEmitter()->emitSetLoopBackEdge(block->bbJumpDest);
+
+ if (block->bbNext != nullptr)
+ {
+ JITDUMP("Mark " FMT_BB " as label: alignment end-of-loop\n", block->bbNext->bbNum);
+ block->bbNext->bbFlags |= BBF_HAS_LABEL;
+ }
}
-#endif
+#endif // FEATURE_LOOP_ALIGN
+
break;
default:
diff --git a/src/coreclr/jit/codegenxarch.cpp b/src/coreclr/jit/codegenxarch.cpp
index 756a2811a12..b74d0a8c8f8 100644
--- a/src/coreclr/jit/codegenxarch.cpp
+++ b/src/coreclr/jit/codegenxarch.cpp
@@ -3531,7 +3531,6 @@ void CodeGen::genTableBasedSwitch(GenTree* treeNode)
GetEmitter()->emitIns_R_ARX(INS_mov, EA_4BYTE, baseReg, baseReg, idxReg, 4, 0);
// add it to the absolute address of fgFirstBB
- compiler->fgFirstBB->bbFlags |= BBF_JMP_TARGET;
GetEmitter()->emitIns_R_L(INS_lea, EA_PTR_DSP_RELOC, compiler->fgFirstBB, tmpReg);
GetEmitter()->emitIns_R_R(INS_add, EA_PTRSIZE, baseReg, tmpReg);
// jmp baseReg
@@ -3558,7 +3557,7 @@ void CodeGen::genJumpTable(GenTree* treeNode)
for (unsigned i = 0; i < jumpCount; i++)
{
BasicBlock* target = *jumpTable++;
- noway_assert(target->bbFlags & BBF_JMP_TARGET);
+ noway_assert(target->bbFlags & BBF_HAS_LABEL);
JITDUMP(" DD L_M%03u_" FMT_BB "\n", compiler->compMethodID, target->bbNum);
diff --git a/src/coreclr/jit/emit.cpp b/src/coreclr/jit/emit.cpp
index 6c61e7065fb..e01ee04f291 100644
--- a/src/coreclr/jit/emit.cpp
+++ b/src/coreclr/jit/emit.cpp
@@ -3600,8 +3600,17 @@ void emitter::emitDispIG(insGroup* ig, insGroup* igPrev, bool verbose)
printf("%sbyrefRegs=", separator);
printRegMaskInt(ig->igByrefRegs());
emitDispRegSet(ig->igByrefRegs());
+ separator = ", ";
}
+#if FEATURE_LOOP_ALIGN
+ if (ig->igLoopBackEdge != nullptr)
+ {
+ printf("%sloop=IG%02u", separator, ig->igLoopBackEdge->igNum);
+ separator = ", ";
+ }
+#endif // FEATURE_LOOP_ALIGN
+
emitDispIGflags(ig->igFlags);
if (ig == emitCurIG)
@@ -4193,8 +4202,7 @@ AGAIN:
}
else
{
- printf("-- ERROR, no emitter cookie for " FMT_BB "; it is probably missing BBF_JMP_TARGET or "
- "BBF_HAS_LABEL.\n",
+ printf("-- ERROR, no emitter cookie for " FMT_BB "; it is probably missing BBF_HAS_LABEL.\n",
jmp->idAddr()->iiaBBlabel->bbNum);
}
}
@@ -4659,7 +4667,7 @@ void emitter::emitLoopAlignment()
//-----------------------------------------------------------------------------
// emitEndsWithAlignInstr: Checks if current IG ends with loop align instruction.
//
-// Returns: true if current IG ends with align instruciton.
+// Returns: true if current IG ends with align instruction.
//
bool emitter::emitEndsWithAlignInstr()
{
@@ -4974,7 +4982,8 @@ unsigned emitter::emitCalculatePaddingForLoopAlignment(insGroup* ig, size_t offs
// No padding if loop is already aligned
if ((offset & (alignmentBoundary - 1)) == 0)
{
- JITDUMP(";; Skip alignment: 'Loop already aligned at %dB boundary.'\n", alignmentBoundary);
+ JITDUMP(";; Skip alignment: 'Loop at G_M%03u_IG%02u already aligned at %dB boundary.'\n",
+ emitComp->compMethodID, ig->igNext->igNum, alignmentBoundary);
return 0;
}
@@ -4998,7 +5007,8 @@ unsigned emitter::emitCalculatePaddingForLoopAlignment(insGroup* ig, size_t offs
// No padding if loop is big
if (loopSize > maxLoopSize)
{
- JITDUMP(";; Skip alignment: 'Loop is big. LoopSize= %d, MaxLoopSize= %d.'\n", loopSize, maxLoopSize);
+ JITDUMP(";; Skip alignment: 'Loop at G_M%03u_IG%02u is big. LoopSize= %d, MaxLoopSize= %d.'\n",
+ emitComp->compMethodID, ig->igNext->igNum, loopSize, maxLoopSize);
return 0;
}
@@ -5024,15 +5034,17 @@ unsigned emitter::emitCalculatePaddingForLoopAlignment(insGroup* ig, size_t offs
if (nPaddingBytes == 0)
{
skipPadding = true;
- JITDUMP(";; Skip alignment: 'Loop already aligned at %uB boundary.'\n", alignmentBoundary);
+ JITDUMP(";; Skip alignment: 'Loop at G_M%03u_IG%02u already aligned at %uB boundary.'\n",
+ emitComp->compMethodID, ig->igNext->igNum, alignmentBoundary);
}
// Check if the alignment exceeds new maxPadding limit
else if (nPaddingBytes > nMaxPaddingBytes)
{
skipPadding = true;
- JITDUMP(";; Skip alignment: 'PaddingNeeded= %d, MaxPadding= %d, LoopSize= %d, "
+ JITDUMP(";; Skip alignment: 'Loop at G_M%03u_IG%02u PaddingNeeded= %d, MaxPadding= %d, LoopSize= %d, "
"AlignmentBoundary= %dB.'\n",
- nPaddingBytes, nMaxPaddingBytes, loopSize, alignmentBoundary);
+ emitComp->compMethodID, ig->igNext->igNum, nPaddingBytes, nMaxPaddingBytes, loopSize,
+ alignmentBoundary);
}
}
@@ -5054,8 +5066,8 @@ unsigned emitter::emitCalculatePaddingForLoopAlignment(insGroup* ig, size_t offs
else
{
// Otherwise, the loop just fits in minBlocksNeededForLoop and so can skip alignment.
- JITDUMP(";; Skip alignment: 'Loop is aligned to fit in %d blocks of %d chunks.'\n",
- minBlocksNeededForLoop, alignmentBoundary);
+ JITDUMP(";; Skip alignment: 'Loop at G_M%03u_IG%02u is aligned to fit in %d blocks of %d chunks.'\n",
+ emitComp->compMethodID, ig->igNext->igNum, minBlocksNeededForLoop, alignmentBoundary);
}
}
}
@@ -5083,8 +5095,8 @@ unsigned emitter::emitCalculatePaddingForLoopAlignment(insGroup* ig, size_t offs
else
{
// Otherwise, the loop just fits in minBlocksNeededForLoop and so can skip alignment.
- JITDUMP(";; Skip alignment: 'Loop is aligned to fit in %d blocks of %d chunks.'\n", minBlocksNeededForLoop,
- alignmentBoundary);
+ JITDUMP(";; Skip alignment: 'Loop at G_M%03u_IG%02u is aligned to fit in %d blocks of %d chunks.'\n",
+ emitComp->compMethodID, ig->igNext->igNum, minBlocksNeededForLoop, alignmentBoundary);
}
}
@@ -5098,7 +5110,7 @@ unsigned emitter::emitCalculatePaddingForLoopAlignment(insGroup* ig, size_t offs
return paddingToAdd;
}
-#endif
+#endif // FEATURE_LOOP_ALIGN
void emitter::emitCheckFuncletBranch(instrDesc* jmp, insGroup* jmpIG)
{
diff --git a/src/coreclr/jit/emitarm.cpp b/src/coreclr/jit/emitarm.cpp
index ef122d6a316..58732c476d9 100644
--- a/src/coreclr/jit/emitarm.cpp
+++ b/src/coreclr/jit/emitarm.cpp
@@ -4198,7 +4198,7 @@ void emitter::emitIns_J(instruction ins, BasicBlock* dst, int instrCount /* = 0
if (dst != NULL)
{
- assert(dst->bbFlags & BBF_JMP_TARGET);
+ assert(dst->bbFlags & BBF_HAS_LABEL);
}
else
{
@@ -4356,7 +4356,7 @@ void emitter::emitIns_J(instruction ins, BasicBlock* dst, int instrCount /* = 0
void emitter::emitIns_R_L(instruction ins, emitAttr attr, BasicBlock* dst, regNumber reg)
{
- assert(dst->bbFlags & BBF_JMP_TARGET);
+ assert(dst->bbFlags & BBF_HAS_LABEL);
insFormat fmt = IF_NONE;
instrDescJmp* id;
@@ -4462,7 +4462,7 @@ void emitter::emitIns_R_D(instruction ins, emitAttr attr, unsigned offs, regNumb
void emitter::emitIns_J_R(instruction ins, emitAttr attr, BasicBlock* dst, regNumber reg)
{
- assert(dst->bbFlags & BBF_JMP_TARGET);
+ assert(dst->bbFlags & BBF_HAS_LABEL);
insFormat fmt = IF_NONE;
switch (ins)
diff --git a/src/coreclr/jit/emitarm64.cpp b/src/coreclr/jit/emitarm64.cpp
index 223d7b3cb4e..3a37d7e893c 100644
--- a/src/coreclr/jit/emitarm64.cpp
+++ b/src/coreclr/jit/emitarm64.cpp
@@ -8168,7 +8168,7 @@ void emitter::emitSetShortJump(instrDescJmp* id)
void emitter::emitIns_R_L(instruction ins, emitAttr attr, BasicBlock* dst, regNumber reg)
{
- assert(dst->bbFlags & BBF_JMP_TARGET);
+ assert(dst->bbFlags & BBF_HAS_LABEL);
insFormat fmt = IF_NONE;
@@ -8238,7 +8238,7 @@ void emitter::emitIns_J_R(instruction ins, emitAttr attr, BasicBlock* dst, regNu
assert((ins == INS_cbz) || (ins == INS_cbnz));
assert(dst != nullptr);
- assert((dst->bbFlags & BBF_JMP_TARGET) != 0);
+ assert((dst->bbFlags & BBF_HAS_LABEL) != 0);
insFormat fmt = IF_LARGEJMP;
@@ -8276,7 +8276,7 @@ void emitter::emitIns_J_R_I(instruction ins, emitAttr attr, BasicBlock* dst, reg
assert((ins == INS_tbz) || (ins == INS_tbnz));
assert(dst != nullptr);
- assert((dst->bbFlags & BBF_JMP_TARGET) != 0);
+ assert((dst->bbFlags & BBF_HAS_LABEL) != 0);
assert((EA_SIZE(attr) == EA_4BYTE) || (EA_SIZE(attr) == EA_8BYTE));
assert(imm < ((EA_SIZE(attr) == EA_4BYTE) ? 32 : 64));
@@ -8318,7 +8318,7 @@ void emitter::emitIns_J(instruction ins, BasicBlock* dst, int instrCount)
if (dst != nullptr)
{
- assert(dst->bbFlags & BBF_JMP_TARGET);
+ assert(dst->bbFlags & BBF_HAS_LABEL);
}
else
{
diff --git a/src/coreclr/jit/emitxarch.cpp b/src/coreclr/jit/emitxarch.cpp
index c6c0b64166c..18d37a489d5 100644
--- a/src/coreclr/jit/emitxarch.cpp
+++ b/src/coreclr/jit/emitxarch.cpp
@@ -5163,7 +5163,7 @@ void emitter::emitIns_C_I(instruction ins, emitAttr attr, CORINFO_FIELD_HANDLE f
void emitter::emitIns_J_S(instruction ins, emitAttr attr, BasicBlock* dst, int varx, int offs)
{
assert(ins == INS_mov);
- assert(dst->bbFlags & BBF_JMP_TARGET);
+ assert(dst->bbFlags & BBF_HAS_LABEL);
instrDescLbl* id = emitNewInstrLbl();
@@ -5222,7 +5222,7 @@ void emitter::emitIns_J_S(instruction ins, emitAttr attr, BasicBlock* dst, int v
void emitter::emitIns_R_L(instruction ins, emitAttr attr, BasicBlock* dst, regNumber reg)
{
assert(ins == INS_lea);
- assert(dst->bbFlags & BBF_JMP_TARGET);
+ assert(dst->bbFlags & BBF_HAS_LABEL);
instrDescJmp* id = emitNewInstrJmp();
@@ -6788,7 +6788,7 @@ void emitter::emitIns_J(instruction ins, BasicBlock* dst, int instrCount /* = 0
if (dst != nullptr)
{
- assert(dst->bbFlags & BBF_JMP_TARGET);
+ assert(dst->bbFlags & BBF_HAS_LABEL);
assert(instrCount == 0);
}
else
diff --git a/src/coreclr/jit/fgbasic.cpp b/src/coreclr/jit/fgbasic.cpp
index 052ab147f78..7992d3b696a 100644
--- a/src/coreclr/jit/fgbasic.cpp
+++ b/src/coreclr/jit/fgbasic.cpp
@@ -281,7 +281,7 @@ void Compiler::fgEnsureFirstBBisScratch()
noway_assert(fgLastBB != nullptr);
// Set the expected flags
- block->bbFlags |= (BBF_INTERNAL | BBF_IMPORTED | BBF_JMP_TARGET | BBF_HAS_LABEL);
+ block->bbFlags |= (BBF_INTERNAL | BBF_IMPORTED);
// This new first BB has an implicit ref, and no others.
block->bbRefs = 1;
@@ -510,9 +510,6 @@ void Compiler::fgReplaceSwitchJumpTarget(BasicBlock* blockSwitch, BasicBlock* ne
// Maintain, if necessary, the set of unique targets of "block."
UpdateSwitchTableTarget(blockSwitch, oldTarget, newTarget);
- // Make sure the new target has the proper bits set for being a branch target.
- newTarget->bbFlags |= BBF_HAS_LABEL | BBF_JMP_TARGET;
-
return; // We have replaced the jumps to oldTarget with newTarget
}
i++; // Check the next entry in jumpTab[] for a match
@@ -2677,9 +2674,6 @@ void Compiler::fgFindBasicBlocks()
BADCODE("Handler Clause is invalid");
}
- tryBegBB->bbFlags |= BBF_HAS_LABEL;
- hndBegBB->bbFlags |= BBF_HAS_LABEL | BBF_JMP_TARGET;
-
#if HANDLER_ENTRY_MUST_BE_IN_HOT_SECTION
// This will change the block weight from 0 to 1
// and clear the rarely run flag
@@ -2696,11 +2690,8 @@ void Compiler::fgFindBasicBlocks()
if (clause.Flags & CORINFO_EH_CLAUSE_FILTER)
{
filtBB = HBtab->ebdFilter = fgLookupBB(clause.FilterOffset);
-
- filtBB->bbCatchTyp = BBCT_FILTER;
- filtBB->bbFlags |= BBF_HAS_LABEL | BBF_JMP_TARGET;
-
- hndBegBB->bbCatchTyp = BBCT_FILTER_HANDLER;
+ filtBB->bbCatchTyp = BBCT_FILTER;
+ hndBegBB->bbCatchTyp = BBCT_FILTER_HANDLER;
#if HANDLER_ENTRY_MUST_BE_IN_HOT_SECTION
// This will change the block weight from 0 to 1
@@ -2774,7 +2765,7 @@ void Compiler::fgFindBasicBlocks()
/* Mark the initial block and last blocks in the 'try' region */
- tryBegBB->bbFlags |= BBF_TRY_BEG | BBF_HAS_LABEL;
+ tryBegBB->bbFlags |= BBF_TRY_BEG;
/* Prevent future optimizations of removing the first block */
/* of a TRY block and the first block of an exception handler */
@@ -3536,9 +3527,9 @@ BasicBlock* Compiler::fgSplitBlockAtEnd(BasicBlock* curr)
newBlock->bbFlags = curr->bbFlags;
// Remove flags that the new block can't have.
- newBlock->bbFlags &= ~(BBF_TRY_BEG | BBF_LOOP_HEAD | BBF_LOOP_CALL0 | BBF_LOOP_CALL1 | BBF_HAS_LABEL |
- BBF_JMP_TARGET | BBF_FUNCLET_BEG | BBF_LOOP_PREHEADER | BBF_KEEP_BBJ_ALWAYS |
- BBF_PATCHPOINT | BBF_BACKWARD_JUMP_TARGET | BBF_LOOP_ALIGN);
+ newBlock->bbFlags &=
+ ~(BBF_TRY_BEG | BBF_LOOP_HEAD | BBF_LOOP_CALL0 | BBF_LOOP_CALL1 | BBF_FUNCLET_BEG | BBF_LOOP_PREHEADER |
+ BBF_KEEP_BBJ_ALWAYS | BBF_PATCHPOINT | BBF_BACKWARD_JUMP_TARGET | BBF_LOOP_ALIGN);
// Remove the GC safe bit on the new block. It seems clear that if we split 'curr' at the end,
// such that all the code is left in 'curr', and 'newBlock' just gets the control flow, then
@@ -3761,7 +3752,6 @@ BasicBlock* Compiler::fgSplitEdge(BasicBlock* curr, BasicBlock* succ)
{
// Now 'curr' jumps to newBlock
curr->bbJumpDest = newBlock;
- newBlock->bbFlags |= BBF_JMP_TARGET;
}
fgAddRefPred(newBlock, curr);
}
@@ -3778,7 +3768,6 @@ BasicBlock* Compiler::fgSplitEdge(BasicBlock* curr, BasicBlock* succ)
assert(curr->bbJumpKind == BBJ_ALWAYS);
fgReplacePred(succ, curr, newBlock);
curr->bbJumpDest = newBlock;
- newBlock->bbFlags |= BBF_JMP_TARGET;
fgAddRefPred(newBlock, curr);
}
@@ -4118,18 +4107,10 @@ void Compiler::fgRemoveBlock(BasicBlock* block, bool unreachable)
/* old block no longer gets the extra ref count for being the first block */
block->bbRefs--;
succBlock->bbRefs++;
-
- /* Set the new firstBB */
- fgUnlinkBlock(block);
-
- /* Always treat the initial block as a jump target */
- fgFirstBB->bbFlags |= BBF_JMP_TARGET | BBF_HAS_LABEL;
- }
- else
- {
- fgUnlinkBlock(block);
}
+ fgUnlinkBlock(block);
+
/* mark the block as removed and set the change flag */
block->bbFlags |= BBF_REMOVED;
@@ -4189,7 +4170,6 @@ void Compiler::fgRemoveBlock(BasicBlock* block, bool unreachable)
/* The links for the direct predecessor case have already been updated above */
if (predBlock->bbJumpDest != block)
{
- succBlock->bbFlags |= BBF_HAS_LABEL | BBF_JMP_TARGET;
break;
}
@@ -4211,7 +4191,6 @@ void Compiler::fgRemoveBlock(BasicBlock* block, bool unreachable)
case BBJ_EHCATCHRET:
noway_assert(predBlock->bbJumpDest == block);
predBlock->bbJumpDest = succBlock;
- succBlock->bbFlags |= BBF_HAS_LABEL | BBF_JMP_TARGET;
break;
case BBJ_SWITCH:
@@ -4293,7 +4272,6 @@ BasicBlock* Compiler::fgConnectFallThrough(BasicBlock* bSrc, BasicBlock* bDst)
case BBJ_NONE:
bSrc->bbJumpKind = BBJ_ALWAYS;
bSrc->bbJumpDest = bDst;
- bSrc->bbJumpDest->bbFlags |= (BBF_JMP_TARGET | BBF_HAS_LABEL);
#ifdef DEBUG
if (verbose)
{
@@ -4363,7 +4341,6 @@ BasicBlock* Compiler::fgConnectFallThrough(BasicBlock* bSrc, BasicBlock* bDst)
}
jmpBlk->bbJumpDest = bDst;
- jmpBlk->bbJumpDest->bbFlags |= (BBF_JMP_TARGET | BBF_HAS_LABEL);
if (fgComputePredsDone)
{
diff --git a/src/coreclr/jit/fgehopt.cpp b/src/coreclr/jit/fgehopt.cpp
index 39bbec95a7c..0a5e1602857 100644
--- a/src/coreclr/jit/fgehopt.cpp
+++ b/src/coreclr/jit/fgehopt.cpp
@@ -2253,9 +2253,6 @@ void Compiler::fgTailMergeThrowsFallThroughHelper(BasicBlock* predBlock,
newBlock->bbJumpDest = canonicalBlock;
fgAddRefPred(canonicalBlock, newBlock, predEdge);
- // Note there is now a jump to the canonical block
- canonicalBlock->bbFlags |= (BBF_JMP_TARGET | BBF_HAS_LABEL);
-
// If nonCanonicalBlock has only one pred, all its flow transfers.
// If it has multiple preds, then we need edge counts or likelihoods
// to figure things out.
@@ -2292,7 +2289,4 @@ void Compiler::fgTailMergeThrowsJumpToHelper(BasicBlock* predBlock,
// Wire up the new flow
predBlock->bbJumpDest = canonicalBlock;
fgAddRefPred(canonicalBlock, predBlock, predEdge);
-
- // Note there is now a jump to the canonical block
- canonicalBlock->bbFlags |= (BBF_JMP_TARGET | BBF_HAS_LABEL);
}
diff --git a/src/coreclr/jit/fgflow.cpp b/src/coreclr/jit/fgflow.cpp
index 5a1ba9d687c..d2da22c0f09 100644
--- a/src/coreclr/jit/fgflow.cpp
+++ b/src/coreclr/jit/fgflow.cpp
@@ -779,9 +779,6 @@ void Compiler::fgComputePreds()
// the first block is always reachable
fgFirstBB->bbRefs = 1;
- // Treat the initial block as a jump target
- fgFirstBB->bbFlags |= BBF_JMP_TARGET | BBF_HAS_LABEL;
-
// Under OSR, we may need to specially protect the original method entry.
//
if (opts.IsOSR() && (fgEntryBB != nullptr) && (fgEntryBB->bbFlags & BBF_IMPORTED))
@@ -802,7 +799,6 @@ void Compiler::fgComputePreds()
/* Mark the next block as being a jump target,
since the call target will return there */
PREFIX_ASSUME(block->bbNext != nullptr);
- block->bbNext->bbFlags |= (BBF_JMP_TARGET | BBF_HAS_LABEL);
}
FALLTHROUGH;
@@ -813,9 +809,6 @@ void Compiler::fgComputePreds()
case BBJ_ALWAYS:
case BBJ_EHCATCHRET:
- /* Mark the jump dest block as being a jump target */
- block->bbJumpDest->bbFlags |= BBF_JMP_TARGET | BBF_HAS_LABEL;
-
fgAddRefPred(block->bbJumpDest, block, nullptr, true);
/* Is the next block reachable? */
@@ -898,9 +891,6 @@ void Compiler::fgComputePreds()
do
{
- /* Mark the target block as being a jump target */
- (*jumpTab)->bbFlags |= BBF_JMP_TARGET | BBF_HAS_LABEL;
-
fgAddRefPred(*jumpTab, block, nullptr, true);
} while (++jumpTab, --jumpCnt);
@@ -918,14 +908,10 @@ void Compiler::fgComputePreds()
if (ehDsc->HasFilter())
{
- ehDsc->ebdFilter->bbFlags |= BBF_JMP_TARGET | BBF_HAS_LABEL;
-
// The first block of a filter has an artifical extra refcount.
ehDsc->ebdFilter->bbRefs++;
}
- ehDsc->ebdHndBeg->bbFlags |= BBF_JMP_TARGET | BBF_HAS_LABEL;
-
// The first block of a handler has an artificial extra refcount.
ehDsc->ebdHndBeg->bbRefs++;
}
diff --git a/src/coreclr/jit/fgopt.cpp b/src/coreclr/jit/fgopt.cpp
index 3fb3c37b8e6..dd414ced2a0 100644
--- a/src/coreclr/jit/fgopt.cpp
+++ b/src/coreclr/jit/fgopt.cpp
@@ -1357,7 +1357,7 @@ void Compiler::fgRemoveEmptyBlocks()
fgSetTryBeg(HBtab, newTryEntry);
// Try entry blocks get specially marked and have special protection.
- HBtab->ebdTryBeg->bbFlags |= BBF_DONT_REMOVE | BBF_TRY_BEG | BBF_HAS_LABEL;
+ HBtab->ebdTryBeg->bbFlags |= BBF_DONT_REMOVE | BBF_TRY_BEG;
// We are keeping this try region
removeTryRegion = false;
@@ -1539,7 +1539,6 @@ void Compiler::fgCompactBlocks(BasicBlock* block, BasicBlock* bNext)
JITDUMP("Second block has multiple incoming edges\n");
assert(block->isEmpty());
- block->bbFlags |= BBF_JMP_TARGET;
for (flowList* pred = bNext->bbPreds; pred; pred = pred->flNext)
{
fgReplaceJumpTarget(pred->getBlock(), block, bNext);
@@ -1882,16 +1881,11 @@ void Compiler::fgCompactBlocks(BasicBlock* block, BasicBlock* bNext)
break;
}
- // Add the LOOP_ALIGN flag
if (bNext->isLoopAlign())
{
- // Only if the new block is jump target or has label
- if (((block->bbFlags & BBF_JMP_TARGET) != 0) || ((block->bbFlags & BBF_HAS_LABEL) != 0))
- {
- block->bbFlags |= BBF_LOOP_ALIGN;
- JITDUMP("Propagating LOOP_ALIGN flag from " FMT_BB " to " FMT_BB " during compacting.\n", bNext->bbNum,
- block->bbNum);
- }
+ block->bbFlags |= BBF_LOOP_ALIGN;
+ JITDUMP("Propagating LOOP_ALIGN flag from " FMT_BB " to " FMT_BB " during compacting.\n", bNext->bbNum,
+ block->bbNum);
}
// If we're collapsing a block created after the dominators are
@@ -3128,7 +3122,6 @@ bool Compiler::fgOptimizeUncondBranchToSimpleCond(BasicBlock* block, BasicBlock*
// The new block 'next' will inherit its weight from 'block'
next->inheritWeight(block);
next->bbJumpDest = target->bbNext;
- target->bbNext->bbFlags |= BBF_JMP_TARGET;
fgAddRefPred(next, block);
fgAddRefPred(next->bbJumpDest, next);
@@ -3559,9 +3552,6 @@ bool Compiler::fgOptimizeBranch(BasicBlock* bJump)
bJump->bbJumpKind = BBJ_COND;
bJump->bbJumpDest = bDest->bbNext;
- /* Mark the jump dest block as being a jump target */
- bJump->bbJumpDest->bbFlags |= BBF_JMP_TARGET | BBF_HAS_LABEL;
-
/* Update bbRefs and bbPreds */
// bJump now falls through into the next block
@@ -5064,7 +5054,6 @@ bool Compiler::fgReorderBlocks()
{
/* Set the new jump dest for bPrev to the rarely run or uncommon block(s) */
bPrev->bbJumpDest = bStart;
- bStart->bbFlags |= (BBF_JMP_TARGET | BBF_HAS_LABEL);
}
else
{
@@ -5073,7 +5062,6 @@ bool Compiler::fgReorderBlocks()
/* Set the new jump dest for bPrev to the rarely run or uncommon block(s) */
bPrev->bbJumpDest = block;
- block->bbFlags |= (BBF_JMP_TARGET | BBF_HAS_LABEL);
}
}
diff --git a/src/coreclr/jit/flowgraph.cpp b/src/coreclr/jit/flowgraph.cpp
index 3210718d041..046f939772b 100644
--- a/src/coreclr/jit/flowgraph.cpp
+++ b/src/coreclr/jit/flowgraph.cpp
@@ -430,7 +430,6 @@ BasicBlock* Compiler::fgCreateGCPoll(GCPollType pollType, BasicBlock* block)
top->bbJumpDest = bottom;
top->bbJumpKind = BBJ_COND;
- bottom->bbFlags |= BBF_JMP_TARGET;
// Bottom has Top and Poll as its predecessors. Poll has just Top as a predecessor.
fgAddRefPred(bottom, poll);
@@ -1748,9 +1747,9 @@ void Compiler::fgAddSyncMethodEnterExit()
// EH regions in fgFindBasicBlocks(). Note that the try has no enclosing
// handler, and the fault has no enclosing try.
- tryBegBB->bbFlags |= BBF_HAS_LABEL | BBF_DONT_REMOVE | BBF_TRY_BEG | BBF_IMPORTED;
+ tryBegBB->bbFlags |= BBF_DONT_REMOVE | BBF_TRY_BEG | BBF_IMPORTED;
- faultBB->bbFlags |= BBF_HAS_LABEL | BBF_DONT_REMOVE | BBF_IMPORTED;
+ faultBB->bbFlags |= BBF_DONT_REMOVE | BBF_IMPORTED;
faultBB->bbCatchTyp = BBCT_FAULT;
tryBegBB->setTryIndex(XTnew);
@@ -2294,8 +2293,6 @@ private:
newReturnBB->bbRefs = 1; // bbRefs gets update later, for now it should be 1
comp->fgReturnCount++;
- newReturnBB->bbFlags |= (BBF_INTERNAL | BBF_JMP_TARGET);
-
noway_assert(newReturnBB->bbNext == nullptr);
JITDUMP("\n newReturnBB [" FMT_BB "] created\n", newReturnBB->bbNum);
@@ -3177,10 +3174,7 @@ void Compiler::fgInsertFuncletPrologBlock(BasicBlock* block)
/* Allocate a new basic block */
BasicBlock* newHead = bbNewBasicBlock(BBJ_NONE);
-
- // In fgComputePreds() we set the BBF_JMP_TARGET and BBF_HAS_LABEL for all of the handler entry points
- //
- newHead->bbFlags |= (BBF_INTERNAL | BBF_JMP_TARGET | BBF_HAS_LABEL);
+ newHead->bbFlags |= BBF_INTERNAL;
newHead->inheritWeight(block);
newHead->bbRefs = 0;
@@ -3221,8 +3215,7 @@ void Compiler::fgInsertFuncletPrologBlock(BasicBlock* block)
assert(nullptr == fgGetPredForBlock(block, newHead));
fgAddRefPred(block, newHead);
- assert((newHead->bbFlags & (BBF_INTERNAL | BBF_JMP_TARGET | BBF_HAS_LABEL)) ==
- (BBF_INTERNAL | BBF_JMP_TARGET | BBF_HAS_LABEL));
+ assert((newHead->bbFlags & BBF_INTERNAL) == BBF_INTERNAL);
}
/*****************************************************************************
@@ -3578,14 +3571,9 @@ void Compiler::fgDetermineFirstColdBlock()
}
}
- if (firstColdBlock != nullptr)
+ for (block = firstColdBlock; block != nullptr; block = block->bbNext)
{
- firstColdBlock->bbFlags |= BBF_JMP_TARGET;
-
- for (block = firstColdBlock; block; block = block->bbNext)
- {
- block->bbFlags |= BBF_COLD;
- }
+ block->bbFlags |= BBF_COLD;
}
EXIT:;
@@ -3695,8 +3683,6 @@ BasicBlock* Compiler::fgAddCodeRef(BasicBlock* srcBlk, unsigned refData, Special
newBlk = add->acdDstBlk = fgNewBBinRegion(jumpKinds[kind], srcBlk, /* runRarely */ true, /* insertAtEnd */ true);
- add->acdDstBlk->bbFlags |= BBF_JMP_TARGET | BBF_HAS_LABEL;
-
#ifdef DEBUG
if (verbose)
{
diff --git a/src/coreclr/jit/importer.cpp b/src/coreclr/jit/importer.cpp
index 2f77fb1878d..4b6b6574be0 100644
--- a/src/coreclr/jit/importer.cpp
+++ b/src/coreclr/jit/importer.cpp
@@ -2753,8 +2753,8 @@ BasicBlock* Compiler::impPushCatchArgOnStack(BasicBlock* hndBlk, CORINFO_CLASS_H
// hit only under JIT stress. See if the block is the one we injected.
// Note that EH canonicalization can inject internal blocks here. We might
// be able to re-use such a block (but we don't, right now).
- if ((hndBlk->bbFlags & (BBF_IMPORTED | BBF_INTERNAL | BBF_DONT_REMOVE | BBF_HAS_LABEL | BBF_JMP_TARGET)) ==
- (BBF_IMPORTED | BBF_INTERNAL | BBF_DONT_REMOVE | BBF_HAS_LABEL | BBF_JMP_TARGET))
+ if ((hndBlk->bbFlags & (BBF_IMPORTED | BBF_INTERNAL | BBF_DONT_REMOVE)) ==
+ (BBF_IMPORTED | BBF_INTERNAL | BBF_DONT_REMOVE))
{
Statement* stmt = hndBlk->firstStmt();
@@ -2801,7 +2801,7 @@ BasicBlock* Compiler::impPushCatchArgOnStack(BasicBlock* hndBlk, CORINFO_CLASS_H
/* Create extra basic block for the spill */
BasicBlock* newBlk = fgNewBBbefore(BBJ_NONE, hndBlk, /* extendRegion */ true);
- newBlk->bbFlags |= BBF_IMPORTED | BBF_DONT_REMOVE | BBF_HAS_LABEL | BBF_JMP_TARGET;
+ newBlk->bbFlags |= BBF_IMPORTED | BBF_DONT_REMOVE;
newBlk->inheritWeight(hndBlk);
newBlk->bbCodeOffs = hndBlk->bbCodeOffs;
diff --git a/src/coreclr/jit/indirectcalltransformer.cpp b/src/coreclr/jit/indirectcalltransformer.cpp
index ad762d992f1..8b582980e5d 100644
--- a/src/coreclr/jit/indirectcalltransformer.cpp
+++ b/src/coreclr/jit/indirectcalltransformer.cpp
@@ -230,7 +230,7 @@ private:
void CreateRemainder()
{
remainderBlock = compiler->fgSplitBlockAfterStatement(currBlock, stmt);
- remainderBlock->bbFlags |= BBF_JMP_TARGET | BBF_HAS_LABEL | BBF_INTERNAL;
+ remainderBlock->bbFlags |= BBF_INTERNAL;
}
virtual void CreateCheck() = 0;
diff --git a/src/coreclr/jit/jiteh.cpp b/src/coreclr/jit/jiteh.cpp
index 8f511a2913d..3fefd70ab92 100644
--- a/src/coreclr/jit/jiteh.cpp
+++ b/src/coreclr/jit/jiteh.cpp
@@ -2217,7 +2217,7 @@ bool Compiler::fgNormalizeEHCase1()
newHndStart->bbCodeOffs = handlerStart->bbCodeOffs;
newHndStart->bbCodeOffsEnd = newHndStart->bbCodeOffs; // code size = 0. TODO: use BAD_IL_OFFSET instead?
newHndStart->inheritWeight(handlerStart);
- newHndStart->bbFlags |= (BBF_DONT_REMOVE | BBF_INTERNAL | BBF_HAS_LABEL);
+ newHndStart->bbFlags |= (BBF_DONT_REMOVE | BBF_INTERNAL);
modified = true;
#ifdef DEBUG
@@ -2379,7 +2379,7 @@ bool Compiler::fgNormalizeEHCase2()
// Note that we don't need to clear any flags on the old try start, since it is still a 'try'
// start.
- newTryStart->bbFlags |= (BBF_TRY_BEG | BBF_DONT_REMOVE | BBF_INTERNAL | BBF_HAS_LABEL);
+ newTryStart->bbFlags |= (BBF_TRY_BEG | BBF_DONT_REMOVE | BBF_INTERNAL);
// Now we need to split any flow edges targetting the old try begin block between the old
// and new block. Note that if we are handling a multiply-nested 'try', we may have already
@@ -3084,10 +3084,8 @@ void Compiler::fgVerifyHandlerTab()
assert(HBtab->ebdTryBeg->bbFlags & BBF_TRY_BEG);
assert(HBtab->ebdTryBeg->bbFlags & BBF_DONT_REMOVE);
- assert(HBtab->ebdTryBeg->bbFlags & BBF_HAS_LABEL);
assert(HBtab->ebdHndBeg->bbFlags & BBF_DONT_REMOVE);
- assert(HBtab->ebdHndBeg->bbFlags & BBF_HAS_LABEL);
assert((HBtab->ebdTryBeg->bbFlags & BBF_REMOVED) == 0);
assert((HBtab->ebdTryLast->bbFlags & BBF_REMOVED) == 0);
@@ -4412,7 +4410,7 @@ void Compiler::fgExtendEHRegionBefore(BasicBlock* block)
}
#endif // DEBUG
HBtab->ebdTryBeg = bPrev;
- bPrev->bbFlags |= BBF_TRY_BEG | BBF_DONT_REMOVE | BBF_HAS_LABEL;
+ bPrev->bbFlags |= BBF_TRY_BEG | BBF_DONT_REMOVE;
// clear the TryBeg flag unless it begins another try region
if (!bbIsTryBeg(block))
@@ -4435,7 +4433,7 @@ void Compiler::fgExtendEHRegionBefore(BasicBlock* block)
block->bbRefs--;
HBtab->ebdHndBeg = bPrev;
- bPrev->bbFlags |= BBF_DONT_REMOVE | BBF_HAS_LABEL;
+ bPrev->bbFlags |= BBF_DONT_REMOVE;
#if defined(FEATURE_EH_FUNCLETS)
if (fgFuncletsCreated)
@@ -4484,7 +4482,7 @@ void Compiler::fgExtendEHRegionBefore(BasicBlock* block)
block->bbRefs--;
HBtab->ebdFilter = bPrev;
- bPrev->bbFlags |= BBF_DONT_REMOVE | BBF_HAS_LABEL;
+ bPrev->bbFlags |= BBF_DONT_REMOVE;
#if defined(FEATURE_EH_FUNCLETS)
if (fgFuncletsCreated)
diff --git a/src/coreclr/jit/morph.cpp b/src/coreclr/jit/morph.cpp
index d61e6791557..23ea4b82eb9 100644
--- a/src/coreclr/jit/morph.cpp
+++ b/src/coreclr/jit/morph.cpp
@@ -8961,7 +8961,6 @@ void Compiler::fgMorphRecursiveFastTailCallIntoLoop(BasicBlock* block, GenTreeCa
// Finish hooking things up.
block->bbJumpKind = BBJ_ALWAYS;
- block->bbJumpDest->bbFlags |= BBF_JMP_TARGET;
fgAddRefPred(block->bbJumpDest, block);
block->bbFlags &= ~BBF_HAS_JMP;
}
@@ -17706,7 +17705,7 @@ void Compiler::fgExpandQmarkForCastInstOf(BasicBlock* block, Statement* stmt)
BasicBlock* cond1Block = fgNewBBafter(BBJ_COND, block, true);
BasicBlock* asgBlock = fgNewBBafter(BBJ_NONE, block, true);
- remainderBlock->bbFlags |= BBF_JMP_TARGET | BBF_HAS_LABEL | propagateFlags;
+ remainderBlock->bbFlags |= propagateFlags;
// These blocks are only internal if 'block' is (but they've been set as internal by fgNewBBafter).
// If they're not internal, mark them as imported to avoid asserts about un-imported blocks.
@@ -17892,7 +17891,7 @@ void Compiler::fgExpandQmarkStmt(BasicBlock* block, Statement* stmt)
elseBlock->bbFlags |= BBF_IMPORTED;
}
- remainderBlock->bbFlags |= BBF_JMP_TARGET | BBF_HAS_LABEL | propagateFlags;
+ remainderBlock->bbFlags |= propagateFlags;
condBlock->inheritWeight(block);
@@ -17922,8 +17921,6 @@ void Compiler::fgExpandQmarkStmt(BasicBlock* block, Statement* stmt)
thenBlock->bbFlags |= BBF_IMPORTED;
}
- elseBlock->bbFlags |= (BBF_JMP_TARGET | BBF_HAS_LABEL);
-
fgAddRefPred(thenBlock, condBlock);
fgAddRefPred(remainderBlock, thenBlock);
diff --git a/src/coreclr/jit/optimizer.cpp b/src/coreclr/jit/optimizer.cpp
index 528c08a90a9..67c26e26c4e 100644
--- a/src/coreclr/jit/optimizer.cpp
+++ b/src/coreclr/jit/optimizer.cpp
@@ -374,6 +374,17 @@ void Compiler::optUnmarkLoopBlocks(BasicBlock* begBlk, BasicBlock* endBlk)
break;
}
}
+
+ JITDUMP("\n");
+
+#if FEATURE_LOOP_ALIGN
+ if (begBlk->isLoopAlign())
+ {
+ // Clear the loop alignment bit on the head of a loop, since it's no longer a loop.
+ begBlk->bbFlags &= ~BBF_LOOP_ALIGN;
+ JITDUMP("Removing LOOP_ALIGN flag from removed loop in " FMT_BB "\n", begBlk->bbNum);
+ }
+#endif
}
/*****************************************************************************************************
@@ -4393,9 +4404,6 @@ void Compiler::optInvertWhileLoop(BasicBlock* block)
block->bbJumpKind = BBJ_COND;
block->bbJumpDest = bTest->bbNext;
- /* Mark the jump dest block as being a jump target */
- block->bbJumpDest->bbFlags |= BBF_JMP_TARGET | BBF_HAS_LABEL;
-
/* Update bbRefs and bbPreds for 'block->bbNext' 'bTest' and 'bTest->bbNext' */
fgAddRefPred(block->bbNext, block);
@@ -7849,7 +7857,6 @@ void Compiler::fgCreateLoopPreHeader(unsigned lnum)
case BBJ_EHCATCHRET:
noway_assert(predBlock->bbJumpDest == top);
predBlock->bbJumpDest = preHead;
- preHead->bbFlags |= BBF_JMP_TARGET | BBF_HAS_LABEL;
if (predBlock == head)
{
@@ -7879,7 +7886,6 @@ void Compiler::fgCreateLoopPreHeader(unsigned lnum)
fgRemoveRefPred(top, predBlock);
fgAddRefPred(preHead, predBlock);
- preHead->bbFlags |= BBF_JMP_TARGET | BBF_HAS_LABEL;
}
} while (++jumpTab, --jumpCnt);
break;
diff --git a/src/coreclr/jit/redundantbranchopts.cpp b/src/coreclr/jit/redundantbranchopts.cpp
index df844db0eba..a3974859110 100644
--- a/src/coreclr/jit/redundantbranchopts.cpp
+++ b/src/coreclr/jit/redundantbranchopts.cpp
@@ -576,7 +576,6 @@ bool Compiler::optJumpThread(BasicBlock* const block, BasicBlock* const domBlock
fgRemoveRefPred(block, predBlock);
fgReplaceJumpTarget(predBlock, falseTarget, block);
fgAddRefPred(falseTarget, predBlock);
- falseTarget->bbFlags |= BBF_JMP_TARGET;
}
}
}