Welcome to mirror list, hosted at ThFree Co, Russian Federation.

github.com/dotnet/runtime.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSingleAccretion <62474226+SingleAccretion@users.noreply.github.com>2021-06-25 09:37:09 +0300
committerGitHub <noreply@github.com>2021-06-25 09:37:09 +0300
commit17828d199b0794f1902611e1f2fffa72d734e489 (patch)
treeea661b1e0b1b9b6950c9e52130a5cea88d36615b /src/coreclr/jit
parentec42090f311ae3239abe61830e75bddc761fc828 (diff)
Fix CQ regression & correctness bug in morphing of long muls (#53566)
* Add a test covering GTF_MUL_64RSLT transform * Disable the test on Mono * Add genActualTypeIsInt helper * Add some gtFlags helpers * Prepare decomposition for new long muls * Update gtIsValid64RsltMul To understand the new format for long muls. * Rework morphing of long muls Previously, morph was looking for the exact pattern of MUL(CAST(long <- int), CAST(long <- int)) when assessing candidacy of GT_MUL for being marked with "GTF_MUL_64RSLT" and emitted as "long mul". This worked fine, until the importer was changed to fold all casts with constant operands. This broke the pattern matching and thus all MULs in the form of (long)value * 10 started being emitted as helper calls. This change updates morph to understand the new folded casts and in general updates the "format" of long mul from "CAST * CAST" to "CAST * (CAST | CONST)". In the process, new helper functions have been introduced, to avoid bloating fgMorphSmpOp with the new sizeable logic. Recognition of overflowing cases has been upgraded, and a correctness bug, where "checked((long)uint.MaxValue * (long)uint.MaxValue)" was wrongly treated as non-overflowing, fixed. Additionally, the logic to emit intermediate NOPs has been changed to instead always skip morphing the casts themselves, even when remorphing. * Add the script to generate the longmul test The test itself has been regenerated using it and there were no diffs, as expected.
Diffstat (limited to 'src/coreclr/jit')
-rw-r--r--src/coreclr/jit/compiler.h6
-rw-r--r--src/coreclr/jit/decomposelongs.cpp30
-rw-r--r--src/coreclr/jit/gentree.cpp28
-rw-r--r--src/coreclr/jit/gentree.h57
-rw-r--r--src/coreclr/jit/morph.cpp269
-rw-r--r--src/coreclr/jit/vartype.h6
6 files changed, 276 insertions, 120 deletions
diff --git a/src/coreclr/jit/compiler.h b/src/coreclr/jit/compiler.h
index 117d7d28a16..fe3f987667d 100644
--- a/src/coreclr/jit/compiler.h
+++ b/src/coreclr/jit/compiler.h
@@ -5833,6 +5833,12 @@ private:
GenTree* fgRecognizeAndMorphBitwiseRotation(GenTree* tree);
bool fgOperIsBitwiseRotationRoot(genTreeOps oper);
+#if !defined(TARGET_64BIT)
+ // Recognize and morph a long multiplication with 32 bit operands.
+ GenTreeOp* fgRecognizeAndMorphLongMul(GenTreeOp* mul);
+ GenTreeOp* fgMorphLongMul(GenTreeOp* mul);
+#endif
+
//-------- Determine the order in which the trees will be evaluated -------
unsigned fgTreeSeqNum;
diff --git a/src/coreclr/jit/decomposelongs.cpp b/src/coreclr/jit/decomposelongs.cpp
index 7e17db1ae91..39a4d01acc4 100644
--- a/src/coreclr/jit/decomposelongs.cpp
+++ b/src/coreclr/jit/decomposelongs.cpp
@@ -608,12 +608,12 @@ GenTree* DecomposeLongs::DecomposeCast(LIR::Use& use)
{
//
// This int->long cast is used by a GT_MUL that will be transformed by DecomposeMul into a
- // GT_LONG_MUL and as a result the high operand produced by the cast will become dead.
+ // GT_MUL_LONG and as a result the high operand produced by the cast will become dead.
// Skip cast decomposition so DecomposeMul doesn't need to bother with dead code removal,
// especially in the case of sign extending casts that also introduce new lclvars.
//
- assert((use.User()->gtFlags & GTF_MUL_64RSLT) != 0);
+ assert(use.User()->Is64RsltMul());
skipDecomposition = true;
}
@@ -1541,19 +1541,29 @@ GenTree* DecomposeLongs::DecomposeMul(LIR::Use& use)
{
assert(use.IsInitialized());
- GenTree* tree = use.Def();
- genTreeOps oper = tree->OperGet();
+ GenTree* tree = use.Def();
- assert(oper == GT_MUL);
- assert((tree->gtFlags & GTF_MUL_64RSLT) != 0);
+ assert(tree->OperIs(GT_MUL));
+ assert(tree->Is64RsltMul());
GenTree* op1 = tree->gtGetOp1();
GenTree* op2 = tree->gtGetOp2();
- // We expect both operands to be int->long casts. DecomposeCast specifically
- // ignores such casts when they are used by GT_MULs.
- assert((op1->OperGet() == GT_CAST) && (op1->TypeGet() == TYP_LONG));
- assert((op2->OperGet() == GT_CAST) && (op2->TypeGet() == TYP_LONG));
+ assert(op1->TypeIs(TYP_LONG) && op2->TypeIs(TYP_LONG));
+
+ // We expect the first operand to be an int->long cast.
+ // DecomposeCast specifically ignores such casts when they are used by GT_MULs.
+ assert(op1->OperIs(GT_CAST));
+
+ // The second operand can be a cast or a constant.
+ if (!op2->OperIs(GT_CAST))
+ {
+ assert(op2->OperIs(GT_LONG));
+ assert(op2->gtGetOp1()->IsIntegralConst());
+ assert(op2->gtGetOp2()->IsIntegralConst());
+
+ Range().Remove(op2->gtGetOp2());
+ }
Range().Remove(op1);
Range().Remove(op2);
diff --git a/src/coreclr/jit/gentree.cpp b/src/coreclr/jit/gentree.cpp
index 3596de621d6..bb088e9dae9 100644
--- a/src/coreclr/jit/gentree.cpp
+++ b/src/coreclr/jit/gentree.cpp
@@ -2438,15 +2438,15 @@ GenTree* Compiler::gtReverseCond(GenTree* tree)
bool GenTree::gtIsValid64RsltMul()
{
- if ((gtOper != GT_MUL) || !(gtFlags & GTF_MUL_64RSLT))
+ if (!OperIs(GT_MUL) || !Is64RsltMul())
{
return false;
}
- GenTree* op1 = AsOp()->gtOp1;
- GenTree* op2 = AsOp()->gtOp2;
+ GenTree* op1 = AsOp()->gtGetOp1();
+ GenTree* op2 = AsOp()->gtGetOp2();
- if (TypeGet() != TYP_LONG || op1->TypeGet() != TYP_LONG || op2->TypeGet() != TYP_LONG)
+ if (!TypeIs(TYP_LONG) || !op1->TypeIs(TYP_LONG) || !op2->TypeIs(TYP_LONG))
{
return false;
}
@@ -2456,26 +2456,30 @@ bool GenTree::gtIsValid64RsltMul()
return false;
}
- // op1 has to be conv.i8(i4Expr)
- if ((op1->gtOper != GT_CAST) || (genActualType(op1->CastFromType()) != TYP_INT))
+ // op1 has to be CAST(long <- int).
+ if (!(op1->OperIs(GT_CAST) && genActualTypeIsInt(op1->AsCast()->CastOp())))
{
return false;
}
- // op2 has to be conv.i8(i4Expr)
- if ((op2->gtOper != GT_CAST) || (genActualType(op2->CastFromType()) != TYP_INT))
+ // op2 has to be CAST(long <- int) or a suitably small constant.
+ if (!(op2->OperIs(GT_CAST) && genActualTypeIsInt(op2->AsCast()->CastOp())) &&
+ !(op2->IsIntegralConst() && FitsIn<int32_t>(op2->AsIntConCommon()->IntegralValue())))
{
return false;
}
- // The signedness of both casts must be the same
- if (((op1->gtFlags & GTF_UNSIGNED) != 0) != ((op2->gtFlags & GTF_UNSIGNED) != 0))
+ // Both operands must extend the same way.
+ bool op1ZeroExtends = op1->IsUnsigned();
+ bool op2ZeroExtends = op2->OperIs(GT_CAST) ? op2->IsUnsigned() : op2->AsIntConCommon()->IntegralValue() >= 0;
+ bool op2AnyExtensionIsSuitable = op2->IsIntegralConst() && op2ZeroExtends;
+ if ((op1ZeroExtends != op2ZeroExtends) && !op2AnyExtensionIsSuitable)
{
return false;
}
- // Do unsigned mul iff both the casts are unsigned
- if (((op1->gtFlags & GTF_UNSIGNED) != 0) != ((gtFlags & GTF_UNSIGNED) != 0))
+ // Do unsigned mul iff both operands are zero-extending.
+ if (op1->IsUnsigned() != IsUnsigned())
{
return false;
}
diff --git a/src/coreclr/jit/gentree.h b/src/coreclr/jit/gentree.h
index e2a5045e906..e46e33de6fe 100644
--- a/src/coreclr/jit/gentree.h
+++ b/src/coreclr/jit/gentree.h
@@ -2117,6 +2117,63 @@ public:
return ((gtFlags & GTF_UNSIGNED) != 0);
}
+ void SetUnsigned()
+ {
+ assert(OperIs(GT_ADD, GT_SUB, GT_MUL, GT_CAST));
+ gtFlags |= GTF_UNSIGNED;
+ }
+
+ void ClearUnsigned()
+ {
+ assert(OperIs(GT_ADD, GT_SUB, GT_MUL, GT_CAST));
+ gtFlags &= ~GTF_UNSIGNED;
+ }
+
+ void SetOverflow()
+ {
+ assert(OperMayOverflow());
+ gtFlags |= GTF_OVERFLOW;
+ }
+
+ void ClearOverflow()
+ {
+ assert(OperMayOverflow());
+ gtFlags &= ~GTF_OVERFLOW;
+ }
+
+ bool Is64RsltMul() const
+ {
+ return (gtFlags & GTF_MUL_64RSLT) != 0;
+ }
+
+ void Set64RsltMul()
+ {
+ gtFlags |= GTF_MUL_64RSLT;
+ }
+
+ void Clear64RsltMul()
+ {
+ gtFlags &= ~GTF_MUL_64RSLT;
+ }
+
+ void SetAllEffectsFlags(GenTree* source)
+ {
+ SetAllEffectsFlags(source->gtFlags & GTF_ALL_EFFECT);
+ }
+
+ void SetAllEffectsFlags(GenTree* source, GenTree* otherSource)
+ {
+ SetAllEffectsFlags((source->gtFlags | otherSource->gtFlags) & GTF_ALL_EFFECT);
+ }
+
+ void SetAllEffectsFlags(GenTreeFlags sourceFlags)
+ {
+ assert((sourceFlags & ~GTF_ALL_EFFECT) == 0);
+
+ gtFlags &= ~GTF_ALL_EFFECT;
+ gtFlags |= sourceFlags;
+ }
+
inline bool IsCnsIntOrI() const;
inline bool IsIntegralConst() const;
diff --git a/src/coreclr/jit/morph.cpp b/src/coreclr/jit/morph.cpp
index 946fe11cbec..9dc3020c15d 100644
--- a/src/coreclr/jit/morph.cpp
+++ b/src/coreclr/jit/morph.cpp
@@ -12382,6 +12382,8 @@ GenTree* Compiler::fgMorphSmpOp(GenTree* tree, MorphAddrContext* mac)
return fgMorphCast(tree);
case GT_MUL:
+ noway_assert(op2 != nullptr);
+
if (opts.OptimizationEnabled() && !optValnumCSE_phase && !tree->gtOverflow())
{
// MUL(NEG(a), C) => MUL(a, NEG(C))
@@ -12401,119 +12403,39 @@ GenTree* Compiler::fgMorphSmpOp(GenTree* tree, MorphAddrContext* mac)
#ifndef TARGET_64BIT
if (typ == TYP_LONG)
{
- /* For (long)int1 * (long)int2, we dont actually do the
- casts, and just multiply the 32 bit values, which will
- give us the 64 bit result in edx:eax */
+ // For (long)int1 * (long)int2, we dont actually do the
+ // casts, and just multiply the 32 bit values, which will
+ // give us the 64 bit result in edx:eax.
- noway_assert(op2);
- if ((op1->gtOper == GT_CAST && op2->gtOper == GT_CAST &&
- genActualType(op1->CastFromType()) == TYP_INT && genActualType(op2->CastFromType()) == TYP_INT) &&
- !op1->gtOverflow() && !op2->gtOverflow())
+ if (tree->Is64RsltMul())
{
- // The casts have to be of the same signedness.
- if ((op1->gtFlags & GTF_UNSIGNED) != (op2->gtFlags & GTF_UNSIGNED))
- {
- // We see if we can force an int constant to change its signedness
- GenTree* constOp;
- if (op1->AsCast()->CastOp()->gtOper == GT_CNS_INT)
- constOp = op1;
- else if (op2->AsCast()->CastOp()->gtOper == GT_CNS_INT)
- constOp = op2;
- else
- goto NO_MUL_64RSLT;
-
- if (((unsigned)(constOp->AsCast()->CastOp()->AsIntCon()->gtIconVal) < (unsigned)(0x80000000)))
- constOp->gtFlags ^= GTF_UNSIGNED;
- else
- goto NO_MUL_64RSLT;
- }
-
- // The only combination that can overflow
- if (tree->gtOverflow() && (tree->gtFlags & GTF_UNSIGNED) && !(op1->gtFlags & GTF_UNSIGNED))
- goto NO_MUL_64RSLT;
-
- /* Remaining combinations can never overflow during long mul. */
-
- tree->gtFlags &= ~GTF_OVERFLOW;
-
- /* Do unsigned mul only if the casts were unsigned */
-
- tree->gtFlags &= ~GTF_UNSIGNED;
- tree->gtFlags |= op1->gtFlags & GTF_UNSIGNED;
-
- /* Since we are committing to GTF_MUL_64RSLT, we don't want
- the casts to be folded away. So morph the castees directly */
-
- op1->AsOp()->gtOp1 = fgMorphTree(op1->AsOp()->gtOp1);
- op2->AsOp()->gtOp1 = fgMorphTree(op2->AsOp()->gtOp1);
-
- // Propagate side effect flags up the tree
- op1->gtFlags &= ~GTF_ALL_EFFECT;
- op1->gtFlags |= (op1->AsOp()->gtOp1->gtFlags & GTF_ALL_EFFECT);
- op2->gtFlags &= ~GTF_ALL_EFFECT;
- op2->gtFlags |= (op2->AsOp()->gtOp1->gtFlags & GTF_ALL_EFFECT);
-
- // If the GT_MUL can be altogether folded away, we should do that.
-
- if ((op1->AsCast()->CastOp()->OperKind() & op2->AsCast()->CastOp()->OperKind() & GTK_CONST) &&
- opts.OptEnabled(CLFLG_CONSTANTFOLD))
- {
- tree->AsOp()->gtOp1 = op1 = gtFoldExprConst(op1);
- tree->AsOp()->gtOp2 = op2 = gtFoldExprConst(op2);
- noway_assert(op1->OperKind() & op2->OperKind() & GTK_CONST);
- tree = gtFoldExprConst(tree);
- noway_assert(tree->OperIsConst());
- return tree;
- }
-
- tree->gtFlags |= GTF_MUL_64RSLT;
-
- // If op1 and op2 are unsigned casts, we need to do an unsigned mult
- tree->gtFlags |= (op1->gtFlags & GTF_UNSIGNED);
-
- // Insert GT_NOP nodes for the cast operands so that they do not get folded
- // And propagate the new flags. We don't want to CSE the casts because
- // codegen expects GTF_MUL_64RSLT muls to have a certain layout.
-
- if (op1->AsCast()->CastOp()->OperGet() != GT_NOP)
- {
- op1->AsOp()->gtOp1 = gtNewOperNode(GT_NOP, TYP_INT, op1->AsCast()->CastOp());
- op1->gtFlags &= ~GTF_ALL_EFFECT;
- op1->gtFlags |= (op1->AsCast()->CastOp()->gtFlags & GTF_ALL_EFFECT);
- }
+ // We are seeing this node again.
+ // Morph only the children of casts,
+ // so as to avoid losing them.
+ assert(tree->gtIsValid64RsltMul());
+ tree = fgMorphLongMul(tree->AsOp());
- if (op2->AsCast()->CastOp()->OperGet() != GT_NOP)
- {
- op2->AsOp()->gtOp1 = gtNewOperNode(GT_NOP, TYP_INT, op2->AsCast()->CastOp());
- op2->gtFlags &= ~GTF_ALL_EFFECT;
- op2->gtFlags |= (op2->AsCast()->CastOp()->gtFlags & GTF_ALL_EFFECT);
- }
+ goto DONE_MORPHING_CHILDREN;
+ }
- op1->gtFlags |= GTF_DONT_CSE;
- op2->gtFlags |= GTF_DONT_CSE;
+ tree = fgRecognizeAndMorphLongMul(tree->AsOp());
- tree->gtFlags &= ~GTF_ALL_EFFECT;
- tree->gtFlags |= ((op1->gtFlags | op2->gtFlags) & GTF_ALL_EFFECT);
+ if (tree->Is64RsltMul())
+ {
+ op1 = tree->AsOp()->gtGetOp1();
+ op2 = tree->AsOp()->gtGetOp2();
goto DONE_MORPHING_CHILDREN;
}
- else if ((tree->gtFlags & GTF_MUL_64RSLT) == 0)
+ else
{
- NO_MUL_64RSLT:
if (tree->gtOverflow())
- helper = (tree->gtFlags & GTF_UNSIGNED) ? CORINFO_HELP_ULMUL_OVF : CORINFO_HELP_LMUL_OVF;
+ helper = tree->IsUnsigned() ? CORINFO_HELP_ULMUL_OVF : CORINFO_HELP_LMUL_OVF;
else
helper = CORINFO_HELP_LMUL;
goto USE_HELPER_FOR_ARITH;
}
- else
- {
- /* We are seeing this node again. We have decided to use
- GTF_MUL_64RSLT, so leave it alone. */
-
- assert(tree->gtIsValid64RsltMul());
- }
}
#endif // !TARGET_64BIT
break;
@@ -15993,6 +15915,157 @@ GenTree* Compiler::fgRecognizeAndMorphBitwiseRotation(GenTree* tree)
return tree;
}
+#if !defined(TARGET_64BIT)
+//------------------------------------------------------------------------------
+// fgRecognizeAndMorphLongMul : Check for and morph long multiplication with 32 bit operands.
+//
+// Recognizes the following tree: MUL(CAST(long <- int) or CONST, CAST(long <- int) or CONST),
+// where CONST must be an integer constant that fits in 32 bits. Note that if both operands are
+// constants, the original tree is returned unmodified, i. e. the caller is responsible for
+// folding or correct code generation (e. g. for overflow cases). May swap operands if the
+// first one is a constant and the second one is not.
+//
+// Arguments:
+// mul - GT_MUL tree to check for a long multiplication opportunity
+//
+// Return Value:
+// The original tree unmodified if it is not eligible for long multiplication.
+// Tree with GTF_MUL_64RSLT set, side effect flags propagated, and children morphed if it is.
+//
+GenTreeOp* Compiler::fgRecognizeAndMorphLongMul(GenTreeOp* mul)
+{
+ assert(mul->OperIs(GT_MUL));
+ assert(mul->TypeIs(TYP_LONG));
+
+ GenTree* op1 = mul->gtGetOp1();
+ GenTree* op2 = mul->gtGetOp2();
+
+ assert(op1->TypeIs(TYP_LONG) && op2->TypeIs(TYP_LONG));
+
+ if (!(op1->OperIs(GT_CAST) && genActualTypeIsInt(op1->AsCast()->CastOp())) &&
+ !(op1->IsIntegralConst() && FitsIn<int32_t>(op1->AsIntConCommon()->IntegralValue())))
+ {
+ return mul;
+ }
+
+ if (!(op2->OperIs(GT_CAST) && genActualTypeIsInt(op2->AsCast()->CastOp())) &&
+ !(op2->IsIntegralConst() && FitsIn<int32_t>(op2->AsIntConCommon()->IntegralValue())))
+ {
+ return mul;
+ }
+
+ // Let fgMorphSmpOp take care of folding.
+ if (op1->IsIntegralConst() && op2->IsIntegralConst())
+ {
+ return mul;
+ }
+
+ // We don't handle checked casts.
+ if (op1->gtOverflowEx() || op2->gtOverflowEx())
+ {
+ return mul;
+ }
+
+ // Move the constant operand to the right to make the logic below more straightfoward.
+ if (op2->OperIs(GT_CAST) && op1->IsIntegralConst())
+ {
+ std::swap(op1, op2);
+ mul->gtOp1 = op1;
+ mul->gtOp2 = op2;
+ }
+
+ // The operands must have the same extending behavior, since the instruction
+ // used to compute the result will sign/zero-extend both operands at once.
+ bool op1ZeroExtends = op1->IsUnsigned();
+ bool op2ZeroExtends = op2->OperIs(GT_CAST) ? op2->IsUnsigned() : op2->AsIntConCommon()->IntegralValue() >= 0;
+ bool op2AnyExtensionIsSuitable = op2->IsIntegralConst() && op2ZeroExtends;
+ if ((op1ZeroExtends != op2ZeroExtends) && !op2AnyExtensionIsSuitable)
+ {
+ return mul;
+ }
+
+ if (mul->gtOverflow())
+ {
+ auto getMaxValue = [mul](GenTree* op) -> int64_t {
+ if (op->OperIs(GT_CAST))
+ {
+ if (op->IsUnsigned())
+ {
+ switch (op->AsCast()->CastOp()->TypeGet())
+ {
+ case TYP_UBYTE:
+ return UINT8_MAX;
+ case TYP_USHORT:
+ return UINT16_MAX;
+ default:
+ return UINT32_MAX;
+ }
+ }
+
+ return mul->IsUnsigned() ? static_cast<int64_t>(UINT64_MAX) : INT32_MIN;
+ }
+
+ return op->AsIntConCommon()->IntegralValue();
+ };
+
+ int64_t maxOp1 = getMaxValue(op1);
+ int64_t maxOp2 = getMaxValue(op2);
+
+ if (CheckedOps::MulOverflows(maxOp1, maxOp2, mul->IsUnsigned()))
+ {
+ return mul;
+ }
+
+ mul->ClearOverflow();
+ }
+
+ // MUL_LONG needs to do the work the casts would have done.
+ mul->ClearUnsigned();
+ if (op1->IsUnsigned())
+ {
+ mul->SetUnsigned();
+ }
+
+ mul->Set64RsltMul();
+
+ return fgMorphLongMul(mul);
+}
+
+//------------------------------------------------------------------------------
+// fgMorphLongMul : Morphs GT_MUL nodes marked with GTF_MUL_64RSLT.
+//
+// Morphs *only* the operands of casts that compose the long mul to
+// avoid them being folded aways.
+//
+// Arguments:
+// mul - GT_MUL tree to morph operands of
+//
+// Return Value:
+// The original tree, with operands morphed and flags propagated.
+//
+GenTreeOp* Compiler::fgMorphLongMul(GenTreeOp* mul)
+{
+ GenTree* op1 = mul->gtGetOp1();
+ GenTree* op2 = mul->gtGetOp2();
+
+ // Morph the operands. We cannot allow the casts to go away, so we morph their operands directly.
+ op1->AsCast()->CastOp() = fgMorphTree(op1->AsCast()->CastOp());
+ op1->SetAllEffectsFlags(op1->AsCast()->CastOp());
+
+ if (op2->OperIs(GT_CAST))
+ {
+ op2->AsCast()->CastOp() = fgMorphTree(op2->AsCast()->CastOp());
+ op2->SetAllEffectsFlags(op2->AsCast()->CastOp());
+ }
+
+ mul->SetAllEffectsFlags(op1, op2);
+ op1->SetDoNotCSE();
+ op2->SetDoNotCSE();
+
+ return mul;
+}
+#endif // !defined(TARGET_64BIT)
+
/*****************************************************************************
*
* Transform the given tree for code generation and return an equivalent tree.
diff --git a/src/coreclr/jit/vartype.h b/src/coreclr/jit/vartype.h
index 35abfb53b59..d43e02a0a30 100644
--- a/src/coreclr/jit/vartype.h
+++ b/src/coreclr/jit/vartype.h
@@ -230,6 +230,12 @@ inline bool varTypeIsIntOrI(T vt)
}
template <class T>
+inline bool genActualTypeIsInt(T vt)
+{
+ return ((TypeGet(vt) >= TYP_BOOL) && (TypeGet(vt) <= TYP_UINT));
+}
+
+template <class T>
inline bool genActualTypeIsIntOrI(T vt)
{
return ((TypeGet(vt) >= TYP_BOOL) && (TypeGet(vt) <= TYP_U_IMPL));