Welcome to mirror list, hosted at ThFree Co, Russian Federation.

github.com/dotnet/runtime.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorTanner Gooding <tagoo@outlook.com>2021-04-20 03:55:09 +0300
committerGitHub <noreply@github.com>2021-04-20 03:55:09 +0300
commit17506ca9c713d09049cc9fc831d19fc076d01dea (patch)
tree51544491d333d089e81584352717b2fe8838700a /src/coreclr
parent989ba287fcc3947dc07925ad5bf4842a94b22c71 (diff)
Updating Vector<T> to support nint and nuint (#50832)
* Refactoring GenTreeJitIntrinsic to expose SimdSize and SimdBaseType via methods * Updating the JIT to pass through the CORINFO_TYPE for hardware intrinsics * Adding support for Vector<nint> and Vector<nuint> to managed code * Updating the vector tests to cover nint and nuint * Recognize Vector<nint> and Vector<nuint> in the JIT * Updating Vector64/128/256<T> NotSupportedTest metadata to include type name * Updating the Vector64/128/256<T> tests to have NotSupported validation for nint/nuint * Splitting ThrowHelper.ThrowForUnsupportedVectorBaseType into separate functions for Numerics vs Intrinsics * Updating Utf16Utility.Validation to directly use Vector<nuint> * Don't use the auxiliary type to hold a SIMD type, since it can be trivially pulled from the operand instead * Split the mono handling for ThrowForUnsupportedVectorBaseType into ThrowForUnsupportedNumericsVectorBaseType and ThrowForUnsupportedIntrinsicsVectorBaseType * Add basic handling for MONO_TYPE_I and MONO_TYPE_U to simd-intrinsics.c * Ensure simd-intrinsics.c in Mono handles `MONO_TYPE_I` and `MONO_TYPE_U` on relevant code paths * Ensure we don't assert when encountering synthesized Vector128<nint> handles * Applying formatting patch * Fix the handling for Crc32 and Crc32C on ARM64 * Updating Mono mini-amd64 to handle MONO_TYPE_I and MONO_TYPE_U for SIMD operations * Handle OP_XCOMPARE.CMP_GE_UN for MONO_TYPE_U * Handle MONO_TYPE_I and MONO_TYPE_U for Vector types in mini-llvm
Diffstat (limited to 'src/coreclr')
-rw-r--r--src/coreclr/jit/assertionprop.cpp4
-rw-r--r--src/coreclr/jit/codegenarm64.cpp38
-rw-r--r--src/coreclr/jit/compiler.h187
-rw-r--r--src/coreclr/jit/decomposelongs.cpp12
-rw-r--r--src/coreclr/jit/ee_il_dll.hpp61
-rw-r--r--src/coreclr/jit/gentree.cpp251
-rw-r--r--src/coreclr/jit/gentree.h97
-rw-r--r--src/coreclr/jit/gschecks.cpp3
-rw-r--r--src/coreclr/jit/hwintrinsic.cpp314
-rw-r--r--src/coreclr/jit/hwintrinsic.h2
-rw-r--r--src/coreclr/jit/hwintrinsicarm64.cpp58
-rw-r--r--src/coreclr/jit/hwintrinsiccodegenarm64.cpp58
-rw-r--r--src/coreclr/jit/hwintrinsiccodegenxarch.cpp34
-rw-r--r--src/coreclr/jit/hwintrinsicxarch.cpp284
-rw-r--r--src/coreclr/jit/importer.cpp39
-rw-r--r--src/coreclr/jit/lclvars.cpp48
-rw-r--r--src/coreclr/jit/lower.cpp8
-rw-r--r--src/coreclr/jit/lowerarmarch.cpp108
-rw-r--r--src/coreclr/jit/lowerxarch.cpp319
-rw-r--r--src/coreclr/jit/lsra.cpp12
-rw-r--r--src/coreclr/jit/lsraarm64.cpp24
-rw-r--r--src/coreclr/jit/lsraxarch.cpp29
-rw-r--r--src/coreclr/jit/morph.cpp83
-rw-r--r--src/coreclr/jit/rationalize.cpp22
-rw-r--r--src/coreclr/jit/simd.cpp558
-rw-r--r--src/coreclr/jit/simdashwintrinsic.cpp392
-rw-r--r--src/coreclr/jit/simdcodegenxarch.cpp44
-rw-r--r--src/coreclr/jit/valuenum.cpp8
28 files changed, 1722 insertions, 1375 deletions
diff --git a/src/coreclr/jit/assertionprop.cpp b/src/coreclr/jit/assertionprop.cpp
index 56366cfd6d7..8db49752c10 100644
--- a/src/coreclr/jit/assertionprop.cpp
+++ b/src/coreclr/jit/assertionprop.cpp
@@ -2733,8 +2733,8 @@ GenTree* Compiler::optConstantAssertionProp(AssertionDsc* curAssertion,
LclVarDsc* varDsc = lvaGetDesc(lclNum);
var_types simdType = tree->TypeGet();
assert(varDsc->TypeGet() == simdType);
- var_types baseType = varDsc->lvBaseType;
- newTree = gtGetSIMDZero(simdType, baseType, varDsc->GetStructHnd());
+ CorInfoType simdBaseJitType = varDsc->GetSimdBaseJitType();
+ newTree = gtGetSIMDZero(simdType, simdBaseJitType, varDsc->GetStructHnd());
if (newTree == nullptr)
{
return nullptr;
diff --git a/src/coreclr/jit/codegenarm64.cpp b/src/coreclr/jit/codegenarm64.cpp
index 187e01d62b1..d306c3d4385 100644
--- a/src/coreclr/jit/codegenarm64.cpp
+++ b/src/coreclr/jit/codegenarm64.cpp
@@ -3846,11 +3846,11 @@ void CodeGen::genEmitHelperCall(unsigned helper, int argSize, emitAttr retSize,
void CodeGen::genSIMDIntrinsic(GenTreeSIMD* simdNode)
{
// NYI for unsupported base types
- if (simdNode->gtSIMDBaseType != TYP_INT && simdNode->gtSIMDBaseType != TYP_LONG &&
- simdNode->gtSIMDBaseType != TYP_FLOAT && simdNode->gtSIMDBaseType != TYP_DOUBLE &&
- simdNode->gtSIMDBaseType != TYP_USHORT && simdNode->gtSIMDBaseType != TYP_UBYTE &&
- simdNode->gtSIMDBaseType != TYP_SHORT && simdNode->gtSIMDBaseType != TYP_BYTE &&
- simdNode->gtSIMDBaseType != TYP_UINT && simdNode->gtSIMDBaseType != TYP_ULONG)
+ if (simdNode->GetSimdBaseType() != TYP_INT && simdNode->GetSimdBaseType() != TYP_LONG &&
+ simdNode->GetSimdBaseType() != TYP_FLOAT && simdNode->GetSimdBaseType() != TYP_DOUBLE &&
+ simdNode->GetSimdBaseType() != TYP_USHORT && simdNode->GetSimdBaseType() != TYP_UBYTE &&
+ simdNode->GetSimdBaseType() != TYP_SHORT && simdNode->GetSimdBaseType() != TYP_BYTE &&
+ simdNode->GetSimdBaseType() != TYP_UINT && simdNode->GetSimdBaseType() != TYP_ULONG)
{
// We don't need a base type for the Upper Save & Restore intrinsics, and we may find
// these implemented over lclVars created by CSE without full handle information (and
@@ -4067,7 +4067,7 @@ void CodeGen::genSIMDIntrinsicInit(GenTreeSIMD* simdNode)
assert(simdNode->gtSIMDIntrinsicID == SIMDIntrinsicInit);
GenTree* op1 = simdNode->gtGetOp1();
- var_types baseType = simdNode->gtSIMDBaseType;
+ var_types baseType = simdNode->GetSimdBaseType();
regNumber targetReg = simdNode->GetRegNum();
assert(targetReg != REG_NA);
var_types targetType = simdNode->TypeGet();
@@ -4084,7 +4084,7 @@ void CodeGen::genSIMDIntrinsicInit(GenTreeSIMD* simdNode)
assert(genIsValidFloatReg(targetReg));
assert(genIsValidIntReg(op1Reg) || genIsValidFloatReg(op1Reg));
- emitAttr attr = (simdNode->gtSIMDSize > 8) ? EA_16BYTE : EA_8BYTE;
+ emitAttr attr = (simdNode->GetSimdSize() > 8) ? EA_16BYTE : EA_8BYTE;
insOpts opt = genGetSimdInsOpt(attr, baseType);
if (opt == INS_OPTS_1D)
@@ -4122,7 +4122,7 @@ void CodeGen::genSIMDIntrinsicInitN(GenTreeSIMD* simdNode)
var_types targetType = simdNode->TypeGet();
- var_types baseType = simdNode->gtSIMDBaseType;
+ var_types baseType = simdNode->GetSimdBaseType();
regNumber vectorReg = targetReg;
@@ -4150,7 +4150,7 @@ void CodeGen::genSIMDIntrinsicInitN(GenTreeSIMD* simdNode)
initCount++;
}
- assert((initCount * baseTypeSize) <= simdNode->gtSIMDSize);
+ assert((initCount * baseTypeSize) <= simdNode->GetSimdSize());
if (initCount * baseTypeSize < EA_16BYTE)
{
@@ -4199,7 +4199,7 @@ void CodeGen::genSIMDIntrinsicUnOp(GenTreeSIMD* simdNode)
simdNode->gtSIMDIntrinsicID == SIMDIntrinsicConvertToInt64);
GenTree* op1 = simdNode->gtGetOp1();
- var_types baseType = simdNode->gtSIMDBaseType;
+ var_types baseType = simdNode->GetSimdBaseType();
regNumber targetReg = simdNode->GetRegNum();
assert(targetReg != REG_NA);
var_types targetType = simdNode->TypeGet();
@@ -4211,7 +4211,7 @@ void CodeGen::genSIMDIntrinsicUnOp(GenTreeSIMD* simdNode)
assert(genIsValidFloatReg(targetReg));
instruction ins = getOpForSIMDIntrinsic(simdNode->gtSIMDIntrinsicID, baseType);
- emitAttr attr = (simdNode->gtSIMDSize > 8) ? EA_16BYTE : EA_8BYTE;
+ emitAttr attr = (simdNode->GetSimdSize() > 8) ? EA_16BYTE : EA_8BYTE;
insOpts opt = (ins == INS_mov) ? INS_OPTS_NONE : genGetSimdInsOpt(attr, baseType);
GetEmitter()->emitIns_R_R(ins, attr, targetReg, op1Reg, opt);
@@ -4234,7 +4234,7 @@ void CodeGen::genSIMDIntrinsicWiden(GenTreeSIMD* simdNode)
(simdNode->gtSIMDIntrinsicID == SIMDIntrinsicWidenHi));
GenTree* op1 = simdNode->gtGetOp1();
- var_types baseType = simdNode->gtSIMDBaseType;
+ var_types baseType = simdNode->GetSimdBaseType();
regNumber targetReg = simdNode->GetRegNum();
assert(targetReg != REG_NA);
var_types simdType = simdNode->TypeGet();
@@ -4270,7 +4270,7 @@ void CodeGen::genSIMDIntrinsicNarrow(GenTreeSIMD* simdNode)
GenTree* op1 = simdNode->gtGetOp1();
GenTree* op2 = simdNode->gtGetOp2();
- var_types baseType = simdNode->gtSIMDBaseType;
+ var_types baseType = simdNode->GetSimdBaseType();
regNumber targetReg = simdNode->GetRegNum();
assert(targetReg != REG_NA);
var_types simdType = simdNode->TypeGet();
@@ -4284,7 +4284,7 @@ void CodeGen::genSIMDIntrinsicNarrow(GenTreeSIMD* simdNode)
assert(genIsValidFloatReg(op2Reg));
assert(genIsValidFloatReg(targetReg));
assert(op2Reg != targetReg);
- assert(simdNode->gtSIMDSize == 16);
+ assert(simdNode->GetSimdSize() == 16);
instruction ins = getOpForSIMDIntrinsic(simdNode->gtSIMDIntrinsicID, baseType);
assert((ins == INS_fcvtn) || (ins == INS_xtn));
@@ -4343,7 +4343,7 @@ void CodeGen::genSIMDIntrinsicBinOp(GenTreeSIMD* simdNode)
GenTree* op1 = simdNode->gtGetOp1();
GenTree* op2 = simdNode->gtGetOp2();
- var_types baseType = simdNode->gtSIMDBaseType;
+ var_types baseType = simdNode->GetSimdBaseType();
regNumber targetReg = simdNode->GetRegNum();
assert(targetReg != REG_NA);
var_types targetType = simdNode->TypeGet();
@@ -4359,7 +4359,7 @@ void CodeGen::genSIMDIntrinsicBinOp(GenTreeSIMD* simdNode)
// TODO-ARM64-CQ Contain integer constants where posible
instruction ins = getOpForSIMDIntrinsic(simdNode->gtSIMDIntrinsicID, baseType);
- emitAttr attr = (simdNode->gtSIMDSize > 8) ? EA_16BYTE : EA_8BYTE;
+ emitAttr attr = (simdNode->GetSimdSize() > 8) ? EA_16BYTE : EA_8BYTE;
insOpts opt = genGetSimdInsOpt(attr, baseType);
GetEmitter()->emitIns_R_R_R(ins, attr, targetReg, op1Reg, op2Reg, opt);
@@ -4391,7 +4391,7 @@ void CodeGen::genSIMDIntrinsicGetItem(GenTreeSIMD* simdNode)
simdType = TYP_SIMD16;
}
- var_types baseType = simdNode->gtSIMDBaseType;
+ var_types baseType = simdNode->GetSimdBaseType();
regNumber targetReg = simdNode->GetRegNum();
assert(targetReg != REG_NA);
var_types targetType = simdNode->TypeGet();
@@ -4568,14 +4568,14 @@ void CodeGen::genSIMDIntrinsicSetItem(GenTreeSIMD* simdNode)
GenTree* op1 = simdNode->gtGetOp1();
GenTree* op2 = simdNode->gtGetOp2();
- var_types baseType = simdNode->gtSIMDBaseType;
+ var_types baseType = simdNode->GetSimdBaseType();
regNumber targetReg = simdNode->GetRegNum();
assert(targetReg != REG_NA);
var_types targetType = simdNode->TypeGet();
assert(varTypeIsSIMD(targetType));
assert(op2->TypeGet() == baseType);
- assert(simdNode->gtSIMDSize >= ((index + 1) * genTypeSize(baseType)));
+ assert(simdNode->GetSimdSize() >= ((index + 1) * genTypeSize(baseType)));
genConsumeOperands(simdNode);
regNumber op1Reg = op1->GetRegNum();
diff --git a/src/coreclr/jit/compiler.h b/src/coreclr/jit/compiler.h
index e7d37654673..3bc79d2b902 100644
--- a/src/coreclr/jit/compiler.h
+++ b/src/coreclr/jit/compiler.h
@@ -505,9 +505,22 @@ public:
// type of an arg node is TYP_BYREF and a local node is TYP_SIMD*.
unsigned char lvSIMDType : 1; // This is a SIMD struct
unsigned char lvUsedInSIMDIntrinsic : 1; // This tells lclvar is used for simd intrinsic
- var_types lvBaseType : 5; // Note: this only packs because var_types is a typedef of unsigned char
-#endif // FEATURE_SIMD
- unsigned char lvRegStruct : 1; // This is a reg-sized non-field-addressed struct.
+ unsigned char lvSimdBaseJitType : 5; // Note: this only packs because CorInfoType has less than 32 entries
+
+ CorInfoType GetSimdBaseJitType() const
+ {
+ return (CorInfoType)lvSimdBaseJitType;
+ }
+
+ void SetSimdBaseJitType(CorInfoType simdBaseJitType)
+ {
+ assert(simdBaseJitType < (1 << 5));
+ lvSimdBaseJitType = (unsigned char)simdBaseJitType;
+ }
+
+ var_types GetSimdBaseType() const;
+#endif // FEATURE_SIMD
+ unsigned char lvRegStruct : 1; // This is a reg-sized non-field-addressed struct.
unsigned char lvClassIsExact : 1; // lvClassHandle is the exact type
@@ -2758,7 +2771,7 @@ public:
GenTreeLclVar* gtNewStoreLclVar(unsigned dstLclNum, GenTree* src);
#ifdef FEATURE_SIMD
- GenTree* gtNewSIMDVectorZero(var_types simdType, var_types baseType, unsigned size);
+ GenTree* gtNewSIMDVectorZero(var_types simdType, CorInfoType simdBaseJitType, unsigned simdSize);
#endif
GenTree* gtNewBlkOpNode(GenTree* dst, GenTree* srcOrFillVal, bool isVolatile, bool isCopyBlock);
@@ -2815,62 +2828,74 @@ public:
#ifdef FEATURE_SIMD
GenTreeSIMD* gtNewSIMDNode(
- var_types type, GenTree* op1, SIMDIntrinsicID simdIntrinsicID, var_types baseType, unsigned size);
- GenTreeSIMD* gtNewSIMDNode(
- var_types type, GenTree* op1, GenTree* op2, SIMDIntrinsicID simdIntrinsicID, var_types baseType, unsigned size);
+ var_types type, GenTree* op1, SIMDIntrinsicID simdIntrinsicID, CorInfoType simdBaseJitType, unsigned simdSize);
+ GenTreeSIMD* gtNewSIMDNode(var_types type,
+ GenTree* op1,
+ GenTree* op2,
+ SIMDIntrinsicID simdIntrinsicID,
+ CorInfoType simdBaseJitType,
+ unsigned simdSize);
void SetOpLclRelatedToSIMDIntrinsic(GenTree* op);
#endif
#ifdef FEATURE_HW_INTRINSICS
GenTreeHWIntrinsic* gtNewSimdHWIntrinsicNode(var_types type,
NamedIntrinsic hwIntrinsicID,
- var_types baseType,
- unsigned size);
- GenTreeHWIntrinsic* gtNewSimdHWIntrinsicNode(
- var_types type, GenTree* op1, NamedIntrinsic hwIntrinsicID, var_types baseType, unsigned size);
+ CorInfoType simdBaseJitType,
+ unsigned simdSize);
GenTreeHWIntrinsic* gtNewSimdHWIntrinsicNode(
- var_types type, GenTree* op1, GenTree* op2, NamedIntrinsic hwIntrinsicID, var_types baseType, unsigned size);
+ var_types type, GenTree* op1, NamedIntrinsic hwIntrinsicID, CorInfoType simdBaseJitType, unsigned simdSize);
+ GenTreeHWIntrinsic* gtNewSimdHWIntrinsicNode(var_types type,
+ GenTree* op1,
+ GenTree* op2,
+ NamedIntrinsic hwIntrinsicID,
+ CorInfoType simdBaseJitType,
+ unsigned simdSize);
GenTreeHWIntrinsic* gtNewSimdHWIntrinsicNode(var_types type,
GenTree* op1,
GenTree* op2,
GenTree* op3,
NamedIntrinsic hwIntrinsicID,
- var_types baseType,
- unsigned size);
+ CorInfoType simdBaseJitType,
+ unsigned simdSize);
GenTreeHWIntrinsic* gtNewSimdHWIntrinsicNode(var_types type,
GenTree* op1,
GenTree* op2,
GenTree* op3,
GenTree* op4,
NamedIntrinsic hwIntrinsicID,
- var_types baseType,
- unsigned size);
+ CorInfoType simdBaseJitType,
+ unsigned simdSize);
GenTreeHWIntrinsic* gtNewSimdCreateBroadcastNode(
- var_types type, GenTree* op1, var_types baseType, unsigned size, bool isSimdAsHWIntrinsic);
+ var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic);
GenTreeHWIntrinsic* gtNewSimdAsHWIntrinsicNode(var_types type,
NamedIntrinsic hwIntrinsicID,
- var_types baseType,
- unsigned size)
+ CorInfoType simdBaseJitType,
+ unsigned simdSize)
{
- GenTreeHWIntrinsic* node = gtNewSimdHWIntrinsicNode(type, hwIntrinsicID, baseType, size);
+ GenTreeHWIntrinsic* node = gtNewSimdHWIntrinsicNode(type, hwIntrinsicID, simdBaseJitType, simdSize);
node->gtFlags |= GTF_SIMDASHW_OP;
return node;
}
GenTreeHWIntrinsic* gtNewSimdAsHWIntrinsicNode(
- var_types type, GenTree* op1, NamedIntrinsic hwIntrinsicID, var_types baseType, unsigned size)
+ var_types type, GenTree* op1, NamedIntrinsic hwIntrinsicID, CorInfoType simdBaseJitType, unsigned simdSize)
{
- GenTreeHWIntrinsic* node = gtNewSimdHWIntrinsicNode(type, op1, hwIntrinsicID, baseType, size);
+ GenTreeHWIntrinsic* node = gtNewSimdHWIntrinsicNode(type, op1, hwIntrinsicID, simdBaseJitType, simdSize);
node->gtFlags |= GTF_SIMDASHW_OP;
return node;
}
- GenTreeHWIntrinsic* gtNewSimdAsHWIntrinsicNode(
- var_types type, GenTree* op1, GenTree* op2, NamedIntrinsic hwIntrinsicID, var_types baseType, unsigned size)
+ GenTreeHWIntrinsic* gtNewSimdAsHWIntrinsicNode(var_types type,
+ GenTree* op1,
+ GenTree* op2,
+ NamedIntrinsic hwIntrinsicID,
+ CorInfoType simdBaseJitType,
+ unsigned simdSize)
{
- GenTreeHWIntrinsic* node = gtNewSimdHWIntrinsicNode(type, op1, op2, hwIntrinsicID, baseType, size);
+ GenTreeHWIntrinsic* node = gtNewSimdHWIntrinsicNode(type, op1, op2, hwIntrinsicID, simdBaseJitType, simdSize);
node->gtFlags |= GTF_SIMDASHW_OP;
return node;
}
@@ -2880,10 +2905,11 @@ public:
GenTree* op2,
GenTree* op3,
NamedIntrinsic hwIntrinsicID,
- var_types baseType,
- unsigned size)
+ CorInfoType simdBaseJitType,
+ unsigned simdSize)
{
- GenTreeHWIntrinsic* node = gtNewSimdHWIntrinsicNode(type, op1, op2, op3, hwIntrinsicID, baseType, size);
+ GenTreeHWIntrinsic* node =
+ gtNewSimdHWIntrinsicNode(type, op1, op2, op3, hwIntrinsicID, simdBaseJitType, simdSize);
node->gtFlags |= GTF_SIMDASHW_OP;
return node;
}
@@ -2895,11 +2921,11 @@ public:
NamedIntrinsic hwIntrinsicID);
GenTreeHWIntrinsic* gtNewScalarHWIntrinsicNode(
var_types type, GenTree* op1, GenTree* op2, GenTree* op3, NamedIntrinsic hwIntrinsicID);
- CORINFO_CLASS_HANDLE gtGetStructHandleForHWSIMD(var_types simdType, var_types simdBaseType);
- var_types getBaseTypeFromArgIfNeeded(NamedIntrinsic intrinsic,
- CORINFO_CLASS_HANDLE clsHnd,
- CORINFO_SIG_INFO* sig,
- var_types baseType);
+ CORINFO_CLASS_HANDLE gtGetStructHandleForHWSIMD(var_types simdType, CorInfoType simdBaseJitType);
+ CorInfoType getBaseJitTypeFromArgIfNeeded(NamedIntrinsic intrinsic,
+ CORINFO_CLASS_HANDLE clsHnd,
+ CORINFO_SIG_INFO* sig,
+ CorInfoType simdBaseJitType);
#endif // FEATURE_HW_INTRINSICS
GenTree* gtNewMustThrowException(unsigned helper, var_types type, CORINFO_CLASS_HANDLE clsHnd);
@@ -4024,13 +4050,13 @@ protected:
CORINFO_CLASS_HANDLE clsHnd,
CORINFO_SIG_INFO* sig,
var_types retType,
- var_types baseType,
+ CorInfoType simdBaseJitType,
unsigned simdSize,
GenTree* newobjThis);
GenTree* impSimdAsHWIntrinsicCndSel(CORINFO_CLASS_HANDLE clsHnd,
var_types retType,
- var_types baseType,
+ CorInfoType simdBaseJitType,
unsigned simdSize,
GenTree* op1,
GenTree* op2,
@@ -4040,7 +4066,7 @@ protected:
CORINFO_CLASS_HANDLE clsHnd,
CORINFO_METHOD_HANDLE method,
CORINFO_SIG_INFO* sig,
- var_types baseType,
+ CorInfoType simdBaseJitType,
var_types retType,
unsigned simdSize);
@@ -4048,7 +4074,7 @@ protected:
CORINFO_CLASS_HANDLE argClass,
bool expectAddr = false,
GenTree* newobjThis = nullptr);
- GenTree* impNonConstFallback(NamedIntrinsic intrinsic, var_types simdType, var_types baseType);
+ GenTree* impNonConstFallback(NamedIntrinsic intrinsic, var_types simdType, CorInfoType simdBaseJitType);
GenTree* addRangeCheckIfNeeded(
NamedIntrinsic intrinsic, GenTree* immOp, bool mustExpand, int immLowerBound, int immUpperBound);
@@ -4057,7 +4083,7 @@ protected:
CORINFO_CLASS_HANDLE clsHnd,
CORINFO_METHOD_HANDLE method,
CORINFO_SIG_INFO* sig,
- var_types baseType,
+ CorInfoType simdBaseJitType,
var_types retType,
unsigned simdSize);
GenTree* impSSEIntrinsic(NamedIntrinsic intrinsic, CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* sig);
@@ -4068,7 +4094,7 @@ protected:
GenTree* impSimdAsHWIntrinsicRelOp(NamedIntrinsic intrinsic,
CORINFO_CLASS_HANDLE clsHnd,
var_types retType,
- var_types baseType,
+ CorInfoType simdBaseJitType,
unsigned simdSize,
GenTree* op1,
GenTree* op2);
@@ -4145,7 +4171,7 @@ public:
GenTree* impGetStructAddr(GenTree* structVal, CORINFO_CLASS_HANDLE structHnd, unsigned curLevel, bool willDeref);
- var_types impNormStructType(CORINFO_CLASS_HANDLE structHnd, var_types* simdBaseType = nullptr);
+ var_types impNormStructType(CORINFO_CLASS_HANDLE structHnd, CorInfoType* simdBaseJitType = nullptr);
GenTree* impNormStructVal(GenTree* structVal,
CORINFO_CLASS_HANDLE structHnd,
@@ -5824,11 +5850,11 @@ private:
static MorphAddrContext s_CopyBlockMAC;
#ifdef FEATURE_SIMD
- GenTree* getSIMDStructFromField(GenTree* tree,
- var_types* baseTypeOut,
- unsigned* indexOut,
- unsigned* simdSizeOut,
- bool ignoreUsedInSIMDIntrinsic = false);
+ GenTree* getSIMDStructFromField(GenTree* tree,
+ CorInfoType* simdBaseJitTypeOut,
+ unsigned* indexOut,
+ unsigned* simdSizeOut,
+ bool ignoreUsedInSIMDIntrinsic = false);
GenTree* fgMorphFieldAssignToSIMDIntrinsicSet(GenTree* tree);
GenTree* fgMorphFieldToSIMDIntrinsicGet(GenTree* tree);
bool fgMorphCombineSIMDFieldAssignments(BasicBlock* block, Statement* stmt);
@@ -8254,6 +8280,9 @@ private:
CORINFO_CLASS_HANDLE SIMDLongHandle;
CORINFO_CLASS_HANDLE SIMDUIntHandle;
CORINFO_CLASS_HANDLE SIMDULongHandle;
+ CORINFO_CLASS_HANDLE SIMDNIntHandle;
+ CORINFO_CLASS_HANDLE SIMDNUIntHandle;
+
CORINFO_CLASS_HANDLE SIMDVector2Handle;
CORINFO_CLASS_HANDLE SIMDVector3Handle;
CORINFO_CLASS_HANDLE SIMDVector4Handle;
@@ -8305,19 +8334,19 @@ private:
SIMDHandlesCache* m_simdHandleCache;
// Get an appropriate "zero" for the given type and class handle.
- GenTree* gtGetSIMDZero(var_types simdType, var_types baseType, CORINFO_CLASS_HANDLE simdHandle);
+ GenTree* gtGetSIMDZero(var_types simdType, CorInfoType simdBaseJitType, CORINFO_CLASS_HANDLE simdHandle);
// Get the handle for a SIMD type.
- CORINFO_CLASS_HANDLE gtGetStructHandleForSIMD(var_types simdType, var_types simdBaseType)
+ CORINFO_CLASS_HANDLE gtGetStructHandleForSIMD(var_types simdType, CorInfoType simdBaseJitType)
{
if (m_simdHandleCache == nullptr)
{
// This may happen if the JIT generates SIMD node on its own, without importing them.
- // Otherwise getBaseTypeAndSizeOfSIMDType should have created the cache.
+ // Otherwise getBaseJitTypeAndSizeOfSIMDType should have created the cache.
return NO_CLASS_HANDLE;
}
- if (simdBaseType == TYP_FLOAT)
+ if (simdBaseJitType == CORINFO_TYPE_FLOAT)
{
switch (simdType)
{
@@ -8339,28 +8368,32 @@ private:
}
}
assert(emitTypeSize(simdType) <= largestEnregisterableStructSize());
- switch (simdBaseType)
+ switch (simdBaseJitType)
{
- case TYP_FLOAT:
+ case CORINFO_TYPE_FLOAT:
return m_simdHandleCache->SIMDFloatHandle;
- case TYP_DOUBLE:
+ case CORINFO_TYPE_DOUBLE:
return m_simdHandleCache->SIMDDoubleHandle;
- case TYP_INT:
+ case CORINFO_TYPE_INT:
return m_simdHandleCache->SIMDIntHandle;
- case TYP_USHORT:
+ case CORINFO_TYPE_USHORT:
return m_simdHandleCache->SIMDUShortHandle;
- case TYP_UBYTE:
+ case CORINFO_TYPE_UBYTE:
return m_simdHandleCache->SIMDUByteHandle;
- case TYP_SHORT:
+ case CORINFO_TYPE_SHORT:
return m_simdHandleCache->SIMDShortHandle;
- case TYP_BYTE:
+ case CORINFO_TYPE_BYTE:
return m_simdHandleCache->SIMDByteHandle;
- case TYP_LONG:
+ case CORINFO_TYPE_LONG:
return m_simdHandleCache->SIMDLongHandle;
- case TYP_UINT:
+ case CORINFO_TYPE_UINT:
return m_simdHandleCache->SIMDUIntHandle;
- case TYP_ULONG:
+ case CORINFO_TYPE_ULONG:
return m_simdHandleCache->SIMDULongHandle;
+ case CORINFO_TYPE_NATIVEINT:
+ return m_simdHandleCache->SIMDNIntHandle;
+ case CORINFO_TYPE_NATIVEUINT:
+ return m_simdHandleCache->SIMDNUIntHandle;
default:
assert(!"Didn't find a class handle for simdType");
}
@@ -8401,16 +8434,16 @@ private:
return (intrinsicId == SIMDIntrinsicEqual);
}
- // Returns base type of a TYP_SIMD local.
- // Returns TYP_UNKNOWN if the local is not TYP_SIMD.
- var_types getBaseTypeOfSIMDLocal(GenTree* tree)
+ // Returns base JIT type of a TYP_SIMD local.
+ // Returns CORINFO_TYPE_UNDEF if the local is not TYP_SIMD.
+ CorInfoType getBaseJitTypeOfSIMDLocal(GenTree* tree)
{
if (isSIMDTypeLocal(tree))
{
- return lvaTable[tree->AsLclVarCommon()->GetLclNum()].lvBaseType;
+ return lvaTable[tree->AsLclVarCommon()->GetLclNum()].GetSimdBaseJitType();
}
- return TYP_UNKNOWN;
+ return CORINFO_TYPE_UNDEF;
}
bool isSIMDClass(CORINFO_CLASS_HANDLE clsHnd)
@@ -8461,13 +8494,13 @@ private:
return isSIMDClass(pTypeInfo) || isHWSIMDClass(pTypeInfo);
}
- // Get the base (element) type and size in bytes for a SIMD type. Returns TYP_UNKNOWN
- // if it is not a SIMD type or is an unsupported base type.
- var_types getBaseTypeAndSizeOfSIMDType(CORINFO_CLASS_HANDLE typeHnd, unsigned* sizeBytes = nullptr);
+ // Get the base (element) type and size in bytes for a SIMD type. Returns CORINFO_TYPE_UNDEF
+ // if it is not a SIMD type or is an unsupported base JIT type.
+ CorInfoType getBaseJitTypeAndSizeOfSIMDType(CORINFO_CLASS_HANDLE typeHnd, unsigned* sizeBytes = nullptr);
- var_types getBaseTypeOfSIMDType(CORINFO_CLASS_HANDLE typeHnd)
+ CorInfoType getBaseJitTypeOfSIMDType(CORINFO_CLASS_HANDLE typeHnd)
{
- return getBaseTypeAndSizeOfSIMDType(typeHnd, nullptr);
+ return getBaseJitTypeAndSizeOfSIMDType(typeHnd, nullptr);
}
// Get SIMD Intrinsic info given the method handle.
@@ -8477,7 +8510,7 @@ private:
CORINFO_SIG_INFO* sig,
bool isNewObj,
unsigned* argCount,
- var_types* baseType,
+ CorInfoType* simdBaseJitType,
unsigned* sizeBytes);
// Pops and returns GenTree node from importers type stack.
@@ -8485,14 +8518,14 @@ private:
GenTree* impSIMDPopStack(var_types type, bool expectAddr = false, CORINFO_CLASS_HANDLE structType = nullptr);
// Create a GT_SIMD tree for a Get property of SIMD vector with a fixed index.
- GenTreeSIMD* impSIMDGetFixed(var_types simdType, var_types baseType, unsigned simdSize, int index);
+ GenTreeSIMD* impSIMDGetFixed(var_types simdType, CorInfoType simdBaseJitType, unsigned simdSize, int index);
// Transforms operands and returns the SIMD intrinsic to be applied on
// transformed operands to obtain given relop result.
SIMDIntrinsicID impSIMDRelOp(SIMDIntrinsicID relOpIntrinsicId,
CORINFO_CLASS_HANDLE typeHnd,
unsigned simdVectorSize,
- var_types* baseType,
+ CorInfoType* inOutBaseJitType,
GenTree** op1,
GenTree** op2);
@@ -8548,7 +8581,7 @@ private:
#else
vectorRegisterByteLength = getSIMDVectorRegisterByteLength();
#endif
- return (simdNode->gtSIMDSize < vectorRegisterByteLength);
+ return (simdNode->GetSimdSize() < vectorRegisterByteLength);
}
// Get the type for the hardware SIMD vector.
@@ -8579,14 +8612,14 @@ private:
int getSIMDTypeSizeInBytes(CORINFO_CLASS_HANDLE typeHnd)
{
unsigned sizeBytes = 0;
- (void)getBaseTypeAndSizeOfSIMDType(typeHnd, &sizeBytes);
+ (void)getBaseJitTypeAndSizeOfSIMDType(typeHnd, &sizeBytes);
return sizeBytes;
}
- // Get the the number of elements of basetype of SIMD vector given by its size and baseType
+ // Get the the number of elements of baseType of SIMD vector given by its size and baseType
static int getSIMDVectorLength(unsigned simdSize, var_types baseType);
- // Get the the number of elements of basetype of SIMD vector given by its type handle
+ // Get the the number of elements of baseType of SIMD vector given by its type handle
int getSIMDVectorLength(CORINFO_CLASS_HANDLE typeHnd);
// Get preferred alignment of SIMD type.
diff --git a/src/coreclr/jit/decomposelongs.cpp b/src/coreclr/jit/decomposelongs.cpp
index 092e6b54fc6..882168e0224 100644
--- a/src/coreclr/jit/decomposelongs.cpp
+++ b/src/coreclr/jit/decomposelongs.cpp
@@ -1687,8 +1687,8 @@ GenTree* DecomposeLongs::DecomposeSimdGetItem(LIR::Use& use)
assert(oper == GT_SIMD);
GenTreeSIMD* simdTree = tree->AsSIMD();
- var_types baseType = simdTree->gtSIMDBaseType;
- unsigned simdSize = simdTree->gtSIMDSize;
+ var_types baseType = simdTree->GetSimdBaseType();
+ unsigned simdSize = simdTree->GetSimdSize();
assert(simdTree->gtSIMDIntrinsicID == SIMDIntrinsicGetItem);
assert(varTypeIsLong(baseType));
@@ -1743,8 +1743,8 @@ GenTree* DecomposeLongs::DecomposeSimdGetItem(LIR::Use& use)
Range().InsertBefore(simdTree, simdTmpVar1, indexTmpVar1, two1, indexTimesTwo1);
}
- GenTree* loResult =
- m_compiler->gtNewSIMDNode(TYP_INT, simdTmpVar1, indexTimesTwo1, SIMDIntrinsicGetItem, TYP_INT, simdSize);
+ GenTree* loResult = m_compiler->gtNewSIMDNode(TYP_INT, simdTmpVar1, indexTimesTwo1, SIMDIntrinsicGetItem,
+ CORINFO_TYPE_INT, simdSize);
Range().InsertBefore(simdTree, loResult);
// Create:
@@ -1769,8 +1769,8 @@ GenTree* DecomposeLongs::DecomposeSimdGetItem(LIR::Use& use)
Range().InsertBefore(simdTree, one, indexTimesTwoPlusOne);
}
- GenTree* hiResult =
- m_compiler->gtNewSIMDNode(TYP_INT, simdTmpVar2, indexTimesTwoPlusOne, SIMDIntrinsicGetItem, TYP_INT, simdSize);
+ GenTree* hiResult = m_compiler->gtNewSIMDNode(TYP_INT, simdTmpVar2, indexTimesTwoPlusOne, SIMDIntrinsicGetItem,
+ CORINFO_TYPE_INT, simdSize);
Range().InsertBefore(simdTree, hiResult);
// Done with the original tree; remove it.
diff --git a/src/coreclr/jit/ee_il_dll.hpp b/src/coreclr/jit/ee_il_dll.hpp
index 5a45f22bce6..e9dd74e96db 100644
--- a/src/coreclr/jit/ee_il_dll.hpp
+++ b/src/coreclr/jit/ee_il_dll.hpp
@@ -215,6 +215,67 @@ inline var_types JITtype2varType(CorInfoType type)
return ((var_types)varTypeMap[type]);
};
+// Convert the type returned from the VM to a precise var_type
+inline var_types JitType2PreciseVarType(CorInfoType type)
+{
+
+ static const unsigned char preciseVarTypeMap[CORINFO_TYPE_COUNT] = {
+ // see the definition of enum CorInfoType in file inc/corinfo.h
+ TYP_UNDEF, // CORINFO_TYPE_UNDEF = 0x0,
+ TYP_VOID, // CORINFO_TYPE_VOID = 0x1,
+ TYP_BOOL, // CORINFO_TYPE_BOOL = 0x2,
+ TYP_USHORT, // CORINFO_TYPE_CHAR = 0x3,
+ TYP_BYTE, // CORINFO_TYPE_BYTE = 0x4,
+ TYP_UBYTE, // CORINFO_TYPE_UBYTE = 0x5,
+ TYP_SHORT, // CORINFO_TYPE_SHORT = 0x6,
+ TYP_USHORT, // CORINFO_TYPE_USHORT = 0x7,
+ TYP_INT, // CORINFO_TYPE_INT = 0x8,
+ TYP_UINT, // CORINFO_TYPE_UINT = 0x9,
+ TYP_LONG, // CORINFO_TYPE_LONG = 0xa,
+ TYP_ULONG, // CORINFO_TYPE_ULONG = 0xb,
+ TYP_I_IMPL, // CORINFO_TYPE_NATIVEINT = 0xc,
+ TYP_U_IMPL, // CORINFO_TYPE_NATIVEUINT = 0xd,
+ TYP_FLOAT, // CORINFO_TYPE_FLOAT = 0xe,
+ TYP_DOUBLE, // CORINFO_TYPE_DOUBLE = 0xf,
+ TYP_REF, // CORINFO_TYPE_STRING = 0x10, // Not used, should remove
+ TYP_U_IMPL, // CORINFO_TYPE_PTR = 0x11,
+ TYP_BYREF, // CORINFO_TYPE_BYREF = 0x12,
+ TYP_STRUCT, // CORINFO_TYPE_VALUECLASS = 0x13,
+ TYP_REF, // CORINFO_TYPE_CLASS = 0x14,
+ TYP_STRUCT, // CORINFO_TYPE_REFANY = 0x15,
+
+ // Generic type variables only appear when we're doing
+ // verification of generic code, in which case we're running
+ // in "import only" mode. Annoyingly the "import only"
+ // mode of the JIT actually does a fair bit of compilation,
+ // so we have to trick the compiler into thinking it's compiling
+ // a real instantiation. We do that by just pretending we're
+ // compiling the "object" instantiation of the code, i.e. by
+ // turing all generic type variables refs, except for a few
+ // choice places to do with verification, where we use
+ // verification types and CLASS_HANDLEs to track the difference.
+
+ TYP_REF, // CORINFO_TYPE_VAR = 0x16,
+ };
+
+ // spot check to make certain enumerations have not changed
+
+ assert(preciseVarTypeMap[CORINFO_TYPE_CLASS] == TYP_REF);
+ assert(preciseVarTypeMap[CORINFO_TYPE_BYREF] == TYP_BYREF);
+ assert(preciseVarTypeMap[CORINFO_TYPE_PTR] == TYP_U_IMPL);
+ assert(preciseVarTypeMap[CORINFO_TYPE_INT] == TYP_INT);
+ assert(preciseVarTypeMap[CORINFO_TYPE_UINT] == TYP_UINT);
+ assert(preciseVarTypeMap[CORINFO_TYPE_DOUBLE] == TYP_DOUBLE);
+ assert(preciseVarTypeMap[CORINFO_TYPE_VOID] == TYP_VOID);
+ assert(preciseVarTypeMap[CORINFO_TYPE_VALUECLASS] == TYP_STRUCT);
+ assert(preciseVarTypeMap[CORINFO_TYPE_REFANY] == TYP_STRUCT);
+
+ assert(type < CORINFO_TYPE_COUNT);
+ assert(preciseVarTypeMap[type] != TYP_UNDEF);
+
+ return ((var_types)preciseVarTypeMap[type]);
+};
+
inline CORINFO_CALLINFO_FLAGS combine(CORINFO_CALLINFO_FLAGS flag1, CORINFO_CALLINFO_FLAGS flag2)
{
return (CORINFO_CALLINFO_FLAGS)(flag1 | flag2);
diff --git a/src/coreclr/jit/gentree.cpp b/src/coreclr/jit/gentree.cpp
index d4c1a188570..c2f53c00f80 100644
--- a/src/coreclr/jit/gentree.cpp
+++ b/src/coreclr/jit/gentree.cpp
@@ -1479,8 +1479,8 @@ AGAIN:
#ifdef FEATURE_SIMD
case GT_SIMD:
if ((op1->AsSIMD()->gtSIMDIntrinsicID != op2->AsSIMD()->gtSIMDIntrinsicID) ||
- (op1->AsSIMD()->gtSIMDBaseType != op2->AsSIMD()->gtSIMDBaseType) ||
- (op1->AsSIMD()->gtSIMDSize != op2->AsSIMD()->gtSIMDSize))
+ (op1->AsSIMD()->GetSimdBaseType() != op2->AsSIMD()->GetSimdBaseType()) ||
+ (op1->AsSIMD()->GetSimdSize() != op2->AsSIMD()->GetSimdSize()))
{
return false;
}
@@ -1490,8 +1490,8 @@ AGAIN:
#ifdef FEATURE_HW_INTRINSICS
case GT_HWINTRINSIC:
if ((op1->AsHWIntrinsic()->gtHWIntrinsicId != op2->AsHWIntrinsic()->gtHWIntrinsicId) ||
- (op1->AsHWIntrinsic()->gtSIMDBaseType != op2->AsHWIntrinsic()->gtSIMDBaseType) ||
- (op1->AsHWIntrinsic()->gtSIMDSize != op2->AsHWIntrinsic()->gtSIMDSize) ||
+ (op1->AsHWIntrinsic()->GetSimdBaseType() != op2->AsHWIntrinsic()->GetSimdBaseType()) ||
+ (op1->AsHWIntrinsic()->GetSimdSize() != op2->AsHWIntrinsic()->GetSimdSize()) ||
(op1->AsHWIntrinsic()->GetAuxiliaryType() != op2->AsHWIntrinsic()->GetAuxiliaryType()))
{
return false;
@@ -2152,16 +2152,16 @@ AGAIN:
#ifdef FEATURE_SIMD
case GT_SIMD:
hash += tree->AsSIMD()->gtSIMDIntrinsicID;
- hash += tree->AsSIMD()->gtSIMDBaseType;
- hash += tree->AsSIMD()->gtSIMDSize;
+ hash += tree->AsSIMD()->GetSimdBaseType();
+ hash += tree->AsSIMD()->GetSimdSize();
break;
#endif // FEATURE_SIMD
#ifdef FEATURE_HW_INTRINSICS
case GT_HWINTRINSIC:
hash += tree->AsHWIntrinsic()->gtHWIntrinsicId;
- hash += tree->AsHWIntrinsic()->gtSIMDBaseType;
- hash += tree->AsHWIntrinsic()->gtSIMDSize;
+ hash += tree->AsHWIntrinsic()->GetSimdBaseType();
+ hash += tree->AsHWIntrinsic()->GetSimdSize();
hash += tree->AsHWIntrinsic()->GetAuxiliaryType();
break;
#endif // FEATURE_HW_INTRINSICS
@@ -6353,15 +6353,15 @@ GenTreeLclVar* Compiler::gtNewStoreLclVar(unsigned dstLclNum, GenTree* src)
// gtNewSIMDVectorZero: create a GT_SIMD node for Vector<T>.Zero
//
// Arguments:
-// simdType - simd vector type
-// baseType - element type of vector
-// size - size of vector in bytes
-GenTree* Compiler::gtNewSIMDVectorZero(var_types simdType, var_types baseType, unsigned size)
+// simdType - simd vector type
+// simdBaseJitType - element type of vector
+// simdSize - size of vector in bytes
+GenTree* Compiler::gtNewSIMDVectorZero(var_types simdType, CorInfoType simdBaseJitType, unsigned simdSize)
{
- baseType = genActualType(baseType);
- GenTree* initVal = gtNewZeroConNode(baseType);
- initVal->gtType = baseType;
- return gtNewSIMDNode(simdType, initVal, nullptr, SIMDIntrinsicInit, baseType, size);
+ var_types simdBaseType = genActualType(JitType2PreciseVarType(simdBaseJitType));
+ GenTree* initVal = gtNewZeroConNode(simdBaseType);
+ initVal->gtType = simdBaseType;
+ return gtNewSIMDNode(simdType, initVal, nullptr, SIMDIntrinsicInit, simdBaseJitType, simdSize);
}
#endif // FEATURE_SIMD
@@ -7907,7 +7907,7 @@ GenTree* Compiler::gtCloneExpr(
{
GenTreeSIMD* simdOp = tree->AsSIMD();
copy = gtNewSIMDNode(simdOp->TypeGet(), simdOp->gtGetOp1(), simdOp->gtGetOp2IfPresent(),
- simdOp->gtSIMDIntrinsicID, simdOp->gtSIMDBaseType, simdOp->gtSIMDSize);
+ simdOp->gtSIMDIntrinsicID, simdOp->GetSimdBaseJitType(), simdOp->GetSimdSize());
}
break;
#endif
@@ -7919,8 +7919,8 @@ GenTree* Compiler::gtCloneExpr(
copy = new (this, GT_HWINTRINSIC)
GenTreeHWIntrinsic(hwintrinsicOp->TypeGet(), hwintrinsicOp->gtGetOp1(),
hwintrinsicOp->gtGetOp2IfPresent(), hwintrinsicOp->gtHWIntrinsicId,
- hwintrinsicOp->gtSIMDBaseType, hwintrinsicOp->gtSIMDSize);
- copy->AsHWIntrinsic()->SetAuxiliaryType(hwintrinsicOp->GetAuxiliaryType());
+ hwintrinsicOp->GetSimdBaseJitType(), hwintrinsicOp->GetSimdSize());
+ copy->AsHWIntrinsic()->SetAuxiliaryJitType(hwintrinsicOp->GetAuxiliaryJitType());
}
break;
#endif
@@ -11832,7 +11832,7 @@ void Compiler::gtDispTree(GenTree* tree,
#ifdef FEATURE_SIMD
if (tree->gtOper == GT_SIMD)
{
- printf(" %s %s", varTypeName(tree->AsSIMD()->gtSIMDBaseType),
+ printf(" %s %s", varTypeName(tree->AsSIMD()->GetSimdBaseType()),
simdIntrinsicNames[tree->AsSIMD()->gtSIMDIntrinsicID]);
}
#endif // FEATURE_SIMD
@@ -11840,9 +11840,9 @@ void Compiler::gtDispTree(GenTree* tree,
#ifdef FEATURE_HW_INTRINSICS
if (tree->gtOper == GT_HWINTRINSIC)
{
- printf(" %s %s", tree->AsHWIntrinsic()->gtSIMDBaseType == TYP_UNKNOWN
+ printf(" %s %s", tree->AsHWIntrinsic()->GetSimdBaseType() == TYP_UNKNOWN
? ""
- : varTypeName(tree->AsHWIntrinsic()->gtSIMDBaseType),
+ : varTypeName(tree->AsHWIntrinsic()->GetSimdBaseType()),
HWIntrinsicInfo::lookupName(tree->AsHWIntrinsic()->gtHWIntrinsicId));
}
#endif // FEATURE_HW_INTRINSICS
@@ -17428,16 +17428,16 @@ bool Compiler::gtIsStaticFieldPtrToBoxedStruct(var_types fieldNodeType, CORINFO_
// gtGetSIMDZero: Get a zero value of the appropriate SIMD type.
//
// Arguments:
-// var_types - The simdType
-// baseType - The base type we need
-// simdHandle - The handle for the SIMD type
+// var_types - The simdType
+// simdBaseJitType - The SIMD base JIT type we need
+// simdHandle - The handle for the SIMD type
//
// Return Value:
// A node generating the appropriate Zero, if we are able to discern it,
// otherwise null (note that this shouldn't happen, but callers should
// be tolerant of this case).
-GenTree* Compiler::gtGetSIMDZero(var_types simdType, var_types baseType, CORINFO_CLASS_HANDLE simdHandle)
+GenTree* Compiler::gtGetSIMDZero(var_types simdType, CorInfoType simdBaseJitType, CORINFO_CLASS_HANDLE simdHandle)
{
bool found = false;
bool isHWSIMD = true;
@@ -17446,38 +17446,44 @@ GenTree* Compiler::gtGetSIMDZero(var_types simdType, var_types baseType, CORINFO
// First, determine whether this is Vector<T>.
if (simdType == getSIMDVectorType())
{
- switch (baseType)
+ switch (simdBaseJitType)
{
- case TYP_FLOAT:
+ case CORINFO_TYPE_FLOAT:
found = (simdHandle == m_simdHandleCache->SIMDFloatHandle);
break;
- case TYP_DOUBLE:
+ case CORINFO_TYPE_DOUBLE:
found = (simdHandle == m_simdHandleCache->SIMDDoubleHandle);
break;
- case TYP_INT:
+ case CORINFO_TYPE_INT:
found = (simdHandle == m_simdHandleCache->SIMDIntHandle);
break;
- case TYP_USHORT:
+ case CORINFO_TYPE_USHORT:
found = (simdHandle == m_simdHandleCache->SIMDUShortHandle);
break;
- case TYP_UBYTE:
+ case CORINFO_TYPE_UBYTE:
found = (simdHandle == m_simdHandleCache->SIMDUByteHandle);
break;
- case TYP_SHORT:
+ case CORINFO_TYPE_SHORT:
found = (simdHandle == m_simdHandleCache->SIMDShortHandle);
break;
- case TYP_BYTE:
+ case CORINFO_TYPE_BYTE:
found = (simdHandle == m_simdHandleCache->SIMDByteHandle);
break;
- case TYP_LONG:
+ case CORINFO_TYPE_LONG:
found = (simdHandle == m_simdHandleCache->SIMDLongHandle);
break;
- case TYP_UINT:
+ case CORINFO_TYPE_UINT:
found = (simdHandle == m_simdHandleCache->SIMDUIntHandle);
break;
- case TYP_ULONG:
+ case CORINFO_TYPE_ULONG:
found = (simdHandle == m_simdHandleCache->SIMDULongHandle);
break;
+ case CORINFO_TYPE_NATIVEINT:
+ found = (simdHandle == m_simdHandleCache->SIMDNIntHandle);
+ break;
+ case CORINFO_TYPE_NATIVEUINT:
+ found = (simdHandle == m_simdHandleCache->SIMDNUIntHandle);
+ break;
default:
break;
}
@@ -17493,9 +17499,9 @@ GenTree* Compiler::gtGetSIMDZero(var_types simdType, var_types baseType, CORINFO
switch (simdType)
{
case TYP_SIMD8:
- switch (baseType)
+ switch (simdBaseJitType)
{
- case TYP_FLOAT:
+ case CORINFO_TYPE_FLOAT:
if (simdHandle == m_simdHandleCache->SIMDVector2Handle)
{
isHWSIMD = false;
@@ -17506,22 +17512,22 @@ GenTree* Compiler::gtGetSIMDZero(var_types simdType, var_types baseType, CORINFO
assert(simdHandle == m_simdHandleCache->Vector64FloatHandle);
}
break;
- case TYP_INT:
+ case CORINFO_TYPE_INT:
assert(simdHandle == m_simdHandleCache->Vector64IntHandle);
break;
- case TYP_USHORT:
+ case CORINFO_TYPE_USHORT:
assert(simdHandle == m_simdHandleCache->Vector64UShortHandle);
break;
- case TYP_UBYTE:
+ case CORINFO_TYPE_UBYTE:
assert(simdHandle == m_simdHandleCache->Vector64UByteHandle);
break;
- case TYP_SHORT:
+ case CORINFO_TYPE_SHORT:
assert(simdHandle == m_simdHandleCache->Vector64ShortHandle);
break;
- case TYP_BYTE:
+ case CORINFO_TYPE_BYTE:
assert(simdHandle == m_simdHandleCache->Vector64ByteHandle);
break;
- case TYP_UINT:
+ case CORINFO_TYPE_UINT:
assert(simdHandle == m_simdHandleCache->Vector64UIntHandle);
#endif // defined(TARGET_ARM64) && defined(FEATURE_HW_INTRINSICS)
break;
@@ -17531,14 +17537,14 @@ GenTree* Compiler::gtGetSIMDZero(var_types simdType, var_types baseType, CORINFO
break;
case TYP_SIMD12:
- assert((baseType == TYP_FLOAT) && (simdHandle == m_simdHandleCache->SIMDVector3Handle));
+ assert((simdBaseJitType == CORINFO_TYPE_FLOAT) && (simdHandle == m_simdHandleCache->SIMDVector3Handle));
isHWSIMD = false;
break;
case TYP_SIMD16:
- switch (baseType)
+ switch (simdBaseJitType)
{
- case TYP_FLOAT:
+ case CORINFO_TYPE_FLOAT:
if (simdHandle == m_simdHandleCache->SIMDVector4Handle)
{
isHWSIMD = false;
@@ -17549,31 +17555,31 @@ GenTree* Compiler::gtGetSIMDZero(var_types simdType, var_types baseType, CORINFO
assert(simdHandle == m_simdHandleCache->Vector128FloatHandle);
}
break;
- case TYP_DOUBLE:
+ case CORINFO_TYPE_DOUBLE:
assert(simdHandle == m_simdHandleCache->Vector128DoubleHandle);
break;
- case TYP_INT:
+ case CORINFO_TYPE_INT:
assert(simdHandle == m_simdHandleCache->Vector128IntHandle);
break;
- case TYP_USHORT:
+ case CORINFO_TYPE_USHORT:
assert(simdHandle == m_simdHandleCache->Vector128UShortHandle);
break;
- case TYP_UBYTE:
+ case CORINFO_TYPE_UBYTE:
assert(simdHandle == m_simdHandleCache->Vector128UByteHandle);
break;
- case TYP_SHORT:
+ case CORINFO_TYPE_SHORT:
assert(simdHandle == m_simdHandleCache->Vector128ShortHandle);
break;
- case TYP_BYTE:
+ case CORINFO_TYPE_BYTE:
assert(simdHandle == m_simdHandleCache->Vector128ByteHandle);
break;
- case TYP_LONG:
+ case CORINFO_TYPE_LONG:
assert(simdHandle == m_simdHandleCache->Vector128LongHandle);
break;
- case TYP_UINT:
+ case CORINFO_TYPE_UINT:
assert(simdHandle == m_simdHandleCache->Vector128UIntHandle);
break;
- case TYP_ULONG:
+ case CORINFO_TYPE_ULONG:
assert(simdHandle == m_simdHandleCache->Vector128ULongHandle);
break;
#endif // defined(FEATURE_HW_INTRINSICS)
@@ -17585,36 +17591,36 @@ GenTree* Compiler::gtGetSIMDZero(var_types simdType, var_types baseType, CORINFO
#if defined(TARGET_XARCH) && defined(FEATURE_HW_INTRINSICS)
case TYP_SIMD32:
- switch (baseType)
+ switch (simdBaseJitType)
{
- case TYP_FLOAT:
+ case CORINFO_TYPE_FLOAT:
assert(simdHandle == m_simdHandleCache->Vector256FloatHandle);
break;
- case TYP_DOUBLE:
+ case CORINFO_TYPE_DOUBLE:
assert(simdHandle == m_simdHandleCache->Vector256DoubleHandle);
break;
- case TYP_INT:
+ case CORINFO_TYPE_INT:
assert(simdHandle == m_simdHandleCache->Vector256IntHandle);
break;
- case TYP_USHORT:
+ case CORINFO_TYPE_USHORT:
assert(simdHandle == m_simdHandleCache->Vector256UShortHandle);
break;
- case TYP_UBYTE:
+ case CORINFO_TYPE_UBYTE:
assert(simdHandle == m_simdHandleCache->Vector256UByteHandle);
break;
- case TYP_SHORT:
+ case CORINFO_TYPE_SHORT:
assert(simdHandle == m_simdHandleCache->Vector256ShortHandle);
break;
- case TYP_BYTE:
+ case CORINFO_TYPE_BYTE:
assert(simdHandle == m_simdHandleCache->Vector256ByteHandle);
break;
- case TYP_LONG:
+ case CORINFO_TYPE_LONG:
assert(simdHandle == m_simdHandleCache->Vector256LongHandle);
break;
- case TYP_UINT:
+ case CORINFO_TYPE_UINT:
assert(simdHandle == m_simdHandleCache->Vector256UIntHandle);
break;
- case TYP_ULONG:
+ case CORINFO_TYPE_ULONG:
assert(simdHandle == m_simdHandleCache->Vector256ULongHandle);
break;
default:
@@ -17639,7 +17645,7 @@ GenTree* Compiler::gtGetSIMDZero(var_types simdType, var_types baseType, CORINFO
// We only return the HWIntrinsicNode if SSE is supported, since it is possible for
// the user to disable the SSE HWIntrinsic support via the COMPlus configuration knobs
// even though the hardware vector types are still available.
- return gtNewSimdHWIntrinsicNode(simdType, NI_Vector128_get_Zero, baseType, size);
+ return gtNewSimdHWIntrinsicNode(simdType, NI_Vector128_get_Zero, simdBaseJitType, size);
}
return nullptr;
case TYP_SIMD32:
@@ -17648,7 +17654,7 @@ GenTree* Compiler::gtGetSIMDZero(var_types simdType, var_types baseType, CORINFO
// We only return the HWIntrinsicNode if AVX is supported, since it is possible for
// the user to disable the AVX HWIntrinsic support via the COMPlus configuration knobs
// even though the hardware vector types are still available.
- return gtNewSimdHWIntrinsicNode(simdType, NI_Vector256_get_Zero, baseType, size);
+ return gtNewSimdHWIntrinsicNode(simdType, NI_Vector256_get_Zero, simdBaseJitType, size);
}
return nullptr;
default:
@@ -17656,11 +17662,11 @@ GenTree* Compiler::gtGetSIMDZero(var_types simdType, var_types baseType, CORINFO
}
#endif // TARGET_XARCH && FEATURE_HW_INTRINSICS
JITDUMP("Coudn't find the matching SIMD type for %s<%s> in gtGetSIMDZero\n", varTypeName(simdType),
- varTypeName(baseType));
+ varTypeName(JitType2PreciseVarType(simdBaseJitType)));
}
else
{
- return gtNewSIMDVectorZero(simdType, baseType, size);
+ return gtNewSIMDVectorZero(simdType, simdBaseJitType, size);
}
return nullptr;
}
@@ -17707,11 +17713,11 @@ CORINFO_CLASS_HANDLE Compiler::gtGetStructHandleIfPresent(GenTree* tree)
#ifdef FEATURE_SIMD
if (varTypeIsSIMD(tree))
{
- structHnd = gtGetStructHandleForSIMD(tree->gtType, TYP_FLOAT);
+ structHnd = gtGetStructHandleForSIMD(tree->gtType, CORINFO_TYPE_FLOAT);
#ifdef FEATURE_HW_INTRINSICS
if (structHnd == NO_CLASS_HANDLE)
{
- structHnd = gtGetStructHandleForHWSIMD(tree->gtType, TYP_FLOAT);
+ structHnd = gtGetStructHandleForHWSIMD(tree->gtType, CORINFO_TYPE_FLOAT);
}
#endif
}
@@ -17730,11 +17736,11 @@ CORINFO_CLASS_HANDLE Compiler::gtGetStructHandleIfPresent(GenTree* tree)
#ifdef FEATURE_SIMD
if (varTypeIsSIMD(tree))
{
- structHnd = gtGetStructHandleForSIMD(tree->gtType, TYP_FLOAT);
+ structHnd = gtGetStructHandleForSIMD(tree->gtType, CORINFO_TYPE_FLOAT);
#ifdef FEATURE_HW_INTRINSICS
if (structHnd == NO_CLASS_HANDLE)
{
- structHnd = gtGetStructHandleForHWSIMD(tree->gtType, TYP_FLOAT);
+ structHnd = gtGetStructHandleForHWSIMD(tree->gtType, CORINFO_TYPE_FLOAT);
}
#endif
}
@@ -17780,18 +17786,18 @@ CORINFO_CLASS_HANDLE Compiler::gtGetStructHandleIfPresent(GenTree* tree)
break;
#ifdef FEATURE_SIMD
case GT_SIMD:
- structHnd = gtGetStructHandleForSIMD(tree->gtType, tree->AsSIMD()->gtSIMDBaseType);
+ structHnd = gtGetStructHandleForSIMD(tree->gtType, tree->AsSIMD()->GetSimdBaseJitType());
break;
#endif // FEATURE_SIMD
#ifdef FEATURE_HW_INTRINSICS
case GT_HWINTRINSIC:
if ((tree->gtFlags & GTF_SIMDASHW_OP) != 0)
{
- structHnd = gtGetStructHandleForSIMD(tree->gtType, tree->AsHWIntrinsic()->gtSIMDBaseType);
+ structHnd = gtGetStructHandleForSIMD(tree->gtType, tree->AsHWIntrinsic()->GetSimdBaseJitType());
}
else
{
- structHnd = gtGetStructHandleForHWSIMD(tree->gtType, tree->AsHWIntrinsic()->gtSIMDBaseType);
+ structHnd = gtGetStructHandleForHWSIMD(tree->gtType, tree->AsHWIntrinsic()->GetSimdBaseJitType());
}
break;
#endif
@@ -18896,23 +18902,27 @@ bool FieldSeqNode::IsPseudoField() const
#ifdef FEATURE_SIMD
GenTreeSIMD* Compiler::gtNewSIMDNode(
- var_types type, GenTree* op1, SIMDIntrinsicID simdIntrinsicID, var_types baseType, unsigned size)
+ var_types type, GenTree* op1, SIMDIntrinsicID simdIntrinsicID, CorInfoType simdBaseJitType, unsigned simdSize)
{
assert(op1 != nullptr);
SetOpLclRelatedToSIMDIntrinsic(op1);
- GenTreeSIMD* simdNode = new (this, GT_SIMD) GenTreeSIMD(type, op1, simdIntrinsicID, baseType, size);
+ GenTreeSIMD* simdNode = new (this, GT_SIMD) GenTreeSIMD(type, op1, simdIntrinsicID, simdBaseJitType, simdSize);
return simdNode;
}
-GenTreeSIMD* Compiler::gtNewSIMDNode(
- var_types type, GenTree* op1, GenTree* op2, SIMDIntrinsicID simdIntrinsicID, var_types baseType, unsigned size)
+GenTreeSIMD* Compiler::gtNewSIMDNode(var_types type,
+ GenTree* op1,
+ GenTree* op2,
+ SIMDIntrinsicID simdIntrinsicID,
+ CorInfoType simdBaseJitType,
+ unsigned simdSize)
{
assert(op1 != nullptr);
SetOpLclRelatedToSIMDIntrinsic(op1);
SetOpLclRelatedToSIMDIntrinsic(op2);
- GenTreeSIMD* simdNode = new (this, GT_SIMD) GenTreeSIMD(type, op1, op2, simdIntrinsicID, baseType, size);
+ GenTreeSIMD* simdNode = new (this, GT_SIMD) GenTreeSIMD(type, op1, op2, simdIntrinsicID, simdBaseJitType, simdSize);
return simdNode;
}
@@ -18953,6 +18963,28 @@ bool GenTree::isCommutativeSIMDIntrinsic()
}
}
+var_types GenTreeJitIntrinsic::GetAuxiliaryType() const
+{
+ CorInfoType auxiliaryJitType = GetAuxiliaryJitType();
+
+ if (auxiliaryJitType == CORINFO_TYPE_UNDEF)
+ {
+ return TYP_UNKNOWN;
+ }
+ return JitType2PreciseVarType(auxiliaryJitType);
+}
+
+var_types GenTreeJitIntrinsic::GetSimdBaseType() const
+{
+ CorInfoType simdBaseJitType = GetSimdBaseJitType();
+
+ if (simdBaseJitType == CORINFO_TYPE_UNDEF)
+ {
+ return TYP_UNKNOWN;
+ }
+ return JitType2PreciseVarType(simdBaseJitType);
+}
+
// Returns true for the SIMD Intrinsic instructions that have MemoryLoad semantics, false otherwise
bool GenTreeSIMD::OperIsMemoryLoad() const
{
@@ -19052,27 +19084,31 @@ bool GenTree::isRMWHWIntrinsic(Compiler* comp)
GenTreeHWIntrinsic* Compiler::gtNewSimdHWIntrinsicNode(var_types type,
NamedIntrinsic hwIntrinsicID,
- var_types baseType,
- unsigned size)
+ CorInfoType simdBaseJitType,
+ unsigned simdSize)
{
- return new (this, GT_HWINTRINSIC) GenTreeHWIntrinsic(type, hwIntrinsicID, baseType, size);
+ return new (this, GT_HWINTRINSIC) GenTreeHWIntrinsic(type, hwIntrinsicID, simdBaseJitType, simdSize);
}
GenTreeHWIntrinsic* Compiler::gtNewSimdHWIntrinsicNode(
- var_types type, GenTree* op1, NamedIntrinsic hwIntrinsicID, var_types baseType, unsigned simdSize)
+ var_types type, GenTree* op1, NamedIntrinsic hwIntrinsicID, CorInfoType simdBaseJitType, unsigned simdSize)
{
SetOpLclRelatedToSIMDIntrinsic(op1);
- return new (this, GT_HWINTRINSIC) GenTreeHWIntrinsic(type, op1, hwIntrinsicID, baseType, simdSize);
+ return new (this, GT_HWINTRINSIC) GenTreeHWIntrinsic(type, op1, hwIntrinsicID, simdBaseJitType, simdSize);
}
-GenTreeHWIntrinsic* Compiler::gtNewSimdHWIntrinsicNode(
- var_types type, GenTree* op1, GenTree* op2, NamedIntrinsic hwIntrinsicID, var_types baseType, unsigned simdSize)
+GenTreeHWIntrinsic* Compiler::gtNewSimdHWIntrinsicNode(var_types type,
+ GenTree* op1,
+ GenTree* op2,
+ NamedIntrinsic hwIntrinsicID,
+ CorInfoType simdBaseJitType,
+ unsigned simdSize)
{
SetOpLclRelatedToSIMDIntrinsic(op1);
SetOpLclRelatedToSIMDIntrinsic(op2);
- return new (this, GT_HWINTRINSIC) GenTreeHWIntrinsic(type, op1, op2, hwIntrinsicID, baseType, simdSize);
+ return new (this, GT_HWINTRINSIC) GenTreeHWIntrinsic(type, op1, op2, hwIntrinsicID, simdBaseJitType, simdSize);
}
GenTreeHWIntrinsic* Compiler::gtNewSimdHWIntrinsicNode(var_types type,
@@ -19080,15 +19116,15 @@ GenTreeHWIntrinsic* Compiler::gtNewSimdHWIntrinsicNode(var_types type,
GenTree* op2,
GenTree* op3,
NamedIntrinsic hwIntrinsicID,
- var_types baseType,
- unsigned size)
+ CorInfoType simdBaseJitType,
+ unsigned simdSize)
{
SetOpLclRelatedToSIMDIntrinsic(op1);
SetOpLclRelatedToSIMDIntrinsic(op2);
SetOpLclRelatedToSIMDIntrinsic(op3);
return new (this, GT_HWINTRINSIC)
- GenTreeHWIntrinsic(type, gtNewArgList(op1, op2, op3), hwIntrinsicID, baseType, size);
+ GenTreeHWIntrinsic(type, gtNewArgList(op1, op2, op3), hwIntrinsicID, simdBaseJitType, simdSize);
}
GenTreeHWIntrinsic* Compiler::gtNewSimdHWIntrinsicNode(var_types type,
@@ -19097,8 +19133,8 @@ GenTreeHWIntrinsic* Compiler::gtNewSimdHWIntrinsicNode(var_types type,
GenTree* op3,
GenTree* op4,
NamedIntrinsic hwIntrinsicID,
- var_types baseType,
- unsigned size)
+ CorInfoType simdBaseJitType,
+ unsigned simdSize)
{
SetOpLclRelatedToSIMDIntrinsic(op1);
SetOpLclRelatedToSIMDIntrinsic(op2);
@@ -19106,17 +19142,18 @@ GenTreeHWIntrinsic* Compiler::gtNewSimdHWIntrinsicNode(var_types type,
SetOpLclRelatedToSIMDIntrinsic(op4);
return new (this, GT_HWINTRINSIC)
- GenTreeHWIntrinsic(type, gtNewArgList(op1, op2, op3, op4), hwIntrinsicID, baseType, size);
+ GenTreeHWIntrinsic(type, gtNewArgList(op1, op2, op3, op4), hwIntrinsicID, simdBaseJitType, simdSize);
}
GenTreeHWIntrinsic* Compiler::gtNewSimdCreateBroadcastNode(
- var_types type, GenTree* op1, var_types baseType, unsigned size, bool isSimdAsHWIntrinsic)
+ var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic)
{
NamedIntrinsic hwIntrinsicID = NI_Vector128_Create;
+ var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType);
#if defined(TARGET_XARCH)
#if defined(TARGET_X86)
- if (varTypeIsLong(baseType) && !op1->IsIntegralConst())
+ if (varTypeIsLong(simdBaseType) && !op1->IsIntegralConst())
{
// TODO-XARCH-CQ: It may be beneficial to emit the movq
// instruction, which takes a 64-bit memory address and
@@ -19125,12 +19162,12 @@ GenTreeHWIntrinsic* Compiler::gtNewSimdCreateBroadcastNode(
}
#endif // TARGET_X86
- if (size == 32)
+ if (simdSize == 32)
{
hwIntrinsicID = NI_Vector256_Create;
}
#elif defined(TARGET_ARM64)
- if (size == 8)
+ if (simdSize == 8)
{
hwIntrinsicID = NI_Vector64_Create;
}
@@ -19140,17 +19177,17 @@ GenTreeHWIntrinsic* Compiler::gtNewSimdCreateBroadcastNode(
if (isSimdAsHWIntrinsic)
{
- return gtNewSimdAsHWIntrinsicNode(type, op1, hwIntrinsicID, baseType, size);
+ return gtNewSimdAsHWIntrinsicNode(type, op1, hwIntrinsicID, simdBaseJitType, simdSize);
}
- return gtNewSimdHWIntrinsicNode(type, op1, hwIntrinsicID, baseType, size);
+ return gtNewSimdHWIntrinsicNode(type, op1, hwIntrinsicID, simdBaseJitType, simdSize);
}
GenTreeHWIntrinsic* Compiler::gtNewScalarHWIntrinsicNode(var_types type, GenTree* op1, NamedIntrinsic hwIntrinsicID)
{
SetOpLclRelatedToSIMDIntrinsic(op1);
- return new (this, GT_HWINTRINSIC) GenTreeHWIntrinsic(type, op1, hwIntrinsicID, TYP_UNKNOWN, 0);
+ return new (this, GT_HWINTRINSIC) GenTreeHWIntrinsic(type, op1, hwIntrinsicID, CORINFO_TYPE_UNDEF, 0);
}
GenTreeHWIntrinsic* Compiler::gtNewScalarHWIntrinsicNode(var_types type,
@@ -19161,7 +19198,7 @@ GenTreeHWIntrinsic* Compiler::gtNewScalarHWIntrinsicNode(var_types type,
SetOpLclRelatedToSIMDIntrinsic(op1);
SetOpLclRelatedToSIMDIntrinsic(op2);
- return new (this, GT_HWINTRINSIC) GenTreeHWIntrinsic(type, op1, op2, hwIntrinsicID, TYP_UNKNOWN, 0);
+ return new (this, GT_HWINTRINSIC) GenTreeHWIntrinsic(type, op1, op2, hwIntrinsicID, CORINFO_TYPE_UNDEF, 0);
}
GenTreeHWIntrinsic* Compiler::gtNewScalarHWIntrinsicNode(
@@ -19172,7 +19209,7 @@ GenTreeHWIntrinsic* Compiler::gtNewScalarHWIntrinsicNode(
SetOpLclRelatedToSIMDIntrinsic(op3);
return new (this, GT_HWINTRINSIC)
- GenTreeHWIntrinsic(type, gtNewArgList(op1, op2, op3), hwIntrinsicID, TYP_UNKNOWN, 0);
+ GenTreeHWIntrinsic(type, gtNewArgList(op1, op2, op3), hwIntrinsicID, CORINFO_TYPE_UNDEF, 0);
}
// Returns true for the HW Intrinsic instructions that have MemoryLoad semantics, false otherwise
diff --git a/src/coreclr/jit/gentree.h b/src/coreclr/jit/gentree.h
index c9ddd2cc481..22f692d6c74 100644
--- a/src/coreclr/jit/gentree.h
+++ b/src/coreclr/jit/gentree.h
@@ -4850,14 +4850,15 @@ private:
ClassLayout* m_layout;
union {
- var_types gtAuxiliaryType; // For intrinsics than need another type (e.g. Avx2.Gather* or SIMD (by element))
- regNumberSmall gtOtherReg; // For intrinsics that return 2 registers
+ unsigned char gtAuxiliaryJitType; // For intrinsics than need another type (e.g. Avx2.Gather* or SIMD (by
+ // element))
+ regNumberSmall gtOtherReg; // For intrinsics that return 2 registers
};
-public:
- var_types gtSIMDBaseType; // SIMD vector base type
- unsigned char gtSIMDSize; // SIMD vector size in bytes, use 0 for scalar intrinsics
+ unsigned char gtSimdBaseJitType; // SIMD vector base JIT type
+ unsigned char gtSimdSize; // SIMD vector size in bytes, use 0 for scalar intrinsics
+public:
#if defined(FEATURE_SIMD)
union {
SIMDIntrinsicID gtSIMDIntrinsicID; // operation Id
@@ -4882,34 +4883,64 @@ public:
{
return (regNumber)gtOtherReg;
}
+
void SetOtherReg(regNumber reg)
{
gtOtherReg = (regNumberSmall)reg;
assert(gtOtherReg == reg);
}
- var_types GetAuxiliaryType() const
+ CorInfoType GetAuxiliaryJitType() const
{
- return gtAuxiliaryType;
+ return (CorInfoType)gtAuxiliaryJitType;
}
- void SetAuxiliaryType(var_types type)
+ void SetAuxiliaryJitType(CorInfoType auxiliaryJitType)
{
- gtAuxiliaryType = type;
+ gtAuxiliaryJitType = (unsigned char)auxiliaryJitType;
+ assert(gtAuxiliaryJitType == auxiliaryJitType);
}
- GenTreeJitIntrinsic(genTreeOps oper, var_types type, GenTree* op1, GenTree* op2, var_types baseType, unsigned size)
+ var_types GetAuxiliaryType() const;
+
+ CorInfoType GetSimdBaseJitType() const
+ {
+ return (CorInfoType)gtSimdBaseJitType;
+ }
+
+ void SetSimdBaseJitType(CorInfoType simdBaseJitType)
+ {
+ gtSimdBaseJitType = (unsigned char)simdBaseJitType;
+ assert(gtSimdBaseJitType == simdBaseJitType);
+ }
+
+ var_types GetSimdBaseType() const;
+
+ unsigned char GetSimdSize() const
+ {
+ return gtSimdSize;
+ }
+
+ void SetSimdSize(unsigned simdSize)
+ {
+ gtSimdSize = (unsigned char)simdSize;
+ assert(gtSimdSize == simdSize);
+ }
+
+ GenTreeJitIntrinsic(
+ genTreeOps oper, var_types type, GenTree* op1, GenTree* op2, CorInfoType simdBaseJitType, unsigned simdSize)
: GenTreeOp(oper, type, op1, op2)
- , gtSIMDBaseType(baseType)
- , gtSIMDSize((unsigned char)size)
+ , gtSimdBaseJitType((unsigned char)simdBaseJitType)
+ , gtSimdSize((unsigned char)simdSize)
, gtHWIntrinsicId(NI_Illegal)
{
- assert(gtSIMDSize == size);
+ assert(gtSimdBaseJitType == simdBaseJitType);
+ assert(gtSimdSize == simdSize);
}
bool isSIMD() const
{
- return gtSIMDSize != 0;
+ return gtSimdSize != 0;
}
#if DEBUGGABLE_GENTREE
@@ -4925,15 +4956,20 @@ public:
struct GenTreeSIMD : public GenTreeJitIntrinsic
{
- GenTreeSIMD(var_types type, GenTree* op1, SIMDIntrinsicID simdIntrinsicID, var_types baseType, unsigned size)
- : GenTreeJitIntrinsic(GT_SIMD, type, op1, nullptr, baseType, size)
+ GenTreeSIMD(
+ var_types type, GenTree* op1, SIMDIntrinsicID simdIntrinsicID, CorInfoType simdBaseJitType, unsigned simdSize)
+ : GenTreeJitIntrinsic(GT_SIMD, type, op1, nullptr, simdBaseJitType, simdSize)
{
gtSIMDIntrinsicID = simdIntrinsicID;
}
- GenTreeSIMD(
- var_types type, GenTree* op1, GenTree* op2, SIMDIntrinsicID simdIntrinsicID, var_types baseType, unsigned size)
- : GenTreeJitIntrinsic(GT_SIMD, type, op1, op2, baseType, size)
+ GenTreeSIMD(var_types type,
+ GenTree* op1,
+ GenTree* op2,
+ SIMDIntrinsicID simdIntrinsicID,
+ CorInfoType simdBaseJitType,
+ unsigned simdSize)
+ : GenTreeJitIntrinsic(GT_SIMD, type, op1, op2, simdBaseJitType, simdSize)
{
gtSIMDIntrinsicID = simdIntrinsicID;
}
@@ -4952,14 +4988,15 @@ struct GenTreeSIMD : public GenTreeJitIntrinsic
#ifdef FEATURE_HW_INTRINSICS
struct GenTreeHWIntrinsic : public GenTreeJitIntrinsic
{
- GenTreeHWIntrinsic(var_types type, NamedIntrinsic hwIntrinsicID, var_types baseType, unsigned size)
- : GenTreeJitIntrinsic(GT_HWINTRINSIC, type, nullptr, nullptr, baseType, size)
+ GenTreeHWIntrinsic(var_types type, NamedIntrinsic hwIntrinsicID, CorInfoType simdBaseJitType, unsigned simdSize)
+ : GenTreeJitIntrinsic(GT_HWINTRINSIC, type, nullptr, nullptr, simdBaseJitType, simdSize)
{
gtHWIntrinsicId = hwIntrinsicID;
}
- GenTreeHWIntrinsic(var_types type, GenTree* op1, NamedIntrinsic hwIntrinsicID, var_types baseType, unsigned size)
- : GenTreeJitIntrinsic(GT_HWINTRINSIC, type, op1, nullptr, baseType, size)
+ GenTreeHWIntrinsic(
+ var_types type, GenTree* op1, NamedIntrinsic hwIntrinsicID, CorInfoType simdBaseJitType, unsigned simdSize)
+ : GenTreeJitIntrinsic(GT_HWINTRINSIC, type, op1, nullptr, simdBaseJitType, simdSize)
{
gtHWIntrinsicId = hwIntrinsicID;
if (OperIsMemoryStore())
@@ -4968,9 +5005,13 @@ struct GenTreeHWIntrinsic : public GenTreeJitIntrinsic
}
}
- GenTreeHWIntrinsic(
- var_types type, GenTree* op1, GenTree* op2, NamedIntrinsic hwIntrinsicID, var_types baseType, unsigned size)
- : GenTreeJitIntrinsic(GT_HWINTRINSIC, type, op1, op2, baseType, size)
+ GenTreeHWIntrinsic(var_types type,
+ GenTree* op1,
+ GenTree* op2,
+ NamedIntrinsic hwIntrinsicID,
+ CorInfoType simdBaseJitType,
+ unsigned simdSize)
+ : GenTreeJitIntrinsic(GT_HWINTRINSIC, type, op1, op2, simdBaseJitType, simdSize)
{
gtHWIntrinsicId = hwIntrinsicID;
if (OperIsMemoryStore())
@@ -6960,7 +7001,7 @@ inline bool GenTree::IsIntegralConstVector(ssize_t constVal)
if ((gtOper == GT_SIMD) && (AsSIMD()->gtSIMDIntrinsicID == SIMDIntrinsicInit) &&
gtGetOp1()->IsIntegralConst(constVal))
{
- assert(varTypeIsIntegral(AsSIMD()->gtSIMDBaseType));
+ assert(varTypeIsIntegral(AsSIMD()->GetSimdBaseType()));
assert(gtGetOp2IfPresent() == nullptr);
return true;
}
@@ -6971,7 +7012,7 @@ inline bool GenTree::IsIntegralConstVector(ssize_t constVal)
{
GenTreeHWIntrinsic* node = AsHWIntrinsic();
- if (!varTypeIsIntegral(node->gtSIMDBaseType))
+ if (!varTypeIsIntegral(node->GetSimdBaseType()))
{
// Can't be an integral constant
return false;
diff --git a/src/coreclr/jit/gschecks.cpp b/src/coreclr/jit/gschecks.cpp
index 9d841c571af..9bf4207eb0b 100644
--- a/src/coreclr/jit/gschecks.cpp
+++ b/src/coreclr/jit/gschecks.cpp
@@ -396,7 +396,8 @@ void Compiler::gsParamsToShadows()
shadowVarDsc->lvUsedInSIMDIntrinsic = varDsc->lvUsedInSIMDIntrinsic;
if (varDsc->lvSIMDType)
{
- shadowVarDsc->lvBaseType = varDsc->lvBaseType;
+ CorInfoType simdBaseJitType = varDsc->GetSimdBaseJitType();
+ shadowVarDsc->SetSimdBaseJitType(simdBaseJitType);
}
#endif
shadowVarDsc->lvRegStruct = varDsc->lvRegStruct;
diff --git a/src/coreclr/jit/hwintrinsic.cpp b/src/coreclr/jit/hwintrinsic.cpp
index dee63b2c99d..c3c9889c199 100644
--- a/src/coreclr/jit/hwintrinsic.cpp
+++ b/src/coreclr/jit/hwintrinsic.cpp
@@ -41,22 +41,22 @@ const HWIntrinsicInfo& HWIntrinsicInfo::lookup(NamedIntrinsic id)
}
//------------------------------------------------------------------------
-// getBaseTypeFromArgIfNeeded: Get baseType of intrinsic from 1st or 2nd argument depending on the flag
+// getBaseJitTypeFromArgIfNeeded: Get simdBaseJitType of intrinsic from 1st or 2nd argument depending on the flag
//
// Arguments:
-// intrinsic -- id of the intrinsic function.
-// clsHnd -- class handle containing the intrinsic function.
-// method -- method handle of the intrinsic function.
-// sig -- signature of the intrinsic call.
-// baseType -- Predetermined baseType, could be TYP_UNKNOWN
+// intrinsic -- id of the intrinsic function.
+// clsHnd -- class handle containing the intrinsic function.
+// method -- method handle of the intrinsic function.
+// sig -- signature of the intrinsic call.
+// simdBaseJitType -- Predetermined simdBaseJitType, could be CORINFO_TYPE_UNDEF
//
// Return Value:
// The basetype of intrinsic of it can be fetched from 1st or 2nd argument, else return baseType unmodified.
//
-var_types Compiler::getBaseTypeFromArgIfNeeded(NamedIntrinsic intrinsic,
- CORINFO_CLASS_HANDLE clsHnd,
- CORINFO_SIG_INFO* sig,
- var_types baseType)
+CorInfoType Compiler::getBaseJitTypeFromArgIfNeeded(NamedIntrinsic intrinsic,
+ CORINFO_CLASS_HANDLE clsHnd,
+ CORINFO_SIG_INFO* sig,
+ CorInfoType simdBaseJitType)
{
if (HWIntrinsicInfo::BaseTypeFromSecondArg(intrinsic) || HWIntrinsicInfo::BaseTypeFromFirstArg(intrinsic))
{
@@ -68,27 +68,25 @@ var_types Compiler::getBaseTypeFromArgIfNeeded(NamedIntrinsic intrinsic,
}
CORINFO_CLASS_HANDLE argClass = info.compCompHnd->getArgClass(sig, arg);
- baseType = getBaseTypeAndSizeOfSIMDType(argClass);
+ simdBaseJitType = getBaseJitTypeAndSizeOfSIMDType(argClass);
- if (baseType == TYP_UNKNOWN) // the argument is not a vector
+ if (simdBaseJitType == CORINFO_TYPE_UNDEF) // the argument is not a vector
{
CORINFO_CLASS_HANDLE tmpClass;
- CorInfoType corInfoType = strip(info.compCompHnd->getArgType(sig, arg, &tmpClass));
+ simdBaseJitType = strip(info.compCompHnd->getArgType(sig, arg, &tmpClass));
- if (corInfoType == CORINFO_TYPE_PTR)
+ if (simdBaseJitType == CORINFO_TYPE_PTR)
{
- corInfoType = info.compCompHnd->getChildType(argClass, &tmpClass);
+ simdBaseJitType = info.compCompHnd->getChildType(argClass, &tmpClass);
}
-
- baseType = JITtype2varType(corInfoType);
}
- assert(baseType != TYP_UNKNOWN);
+ assert(simdBaseJitType != CORINFO_TYPE_UNDEF);
}
- return baseType;
+ return simdBaseJitType;
}
-CORINFO_CLASS_HANDLE Compiler::gtGetStructHandleForHWSIMD(var_types simdType, var_types simdBaseType)
+CORINFO_CLASS_HANDLE Compiler::gtGetStructHandleForHWSIMD(var_types simdType, CorInfoType simdBaseJitType)
{
if (m_simdHandleCache == nullptr)
{
@@ -96,28 +94,32 @@ CORINFO_CLASS_HANDLE Compiler::gtGetStructHandleForHWSIMD(var_types simdType, va
}
if (simdType == TYP_SIMD16)
{
- switch (simdBaseType)
+ switch (simdBaseJitType)
{
- case TYP_FLOAT:
+ case CORINFO_TYPE_FLOAT:
return m_simdHandleCache->Vector128FloatHandle;
- case TYP_DOUBLE:
+ case CORINFO_TYPE_DOUBLE:
return m_simdHandleCache->Vector128DoubleHandle;
- case TYP_INT:
+ case CORINFO_TYPE_INT:
return m_simdHandleCache->Vector128IntHandle;
- case TYP_USHORT:
+ case CORINFO_TYPE_USHORT:
return m_simdHandleCache->Vector128UShortHandle;
- case TYP_UBYTE:
+ case CORINFO_TYPE_UBYTE:
return m_simdHandleCache->Vector128UByteHandle;
- case TYP_SHORT:
+ case CORINFO_TYPE_SHORT:
return m_simdHandleCache->Vector128ShortHandle;
- case TYP_BYTE:
+ case CORINFO_TYPE_BYTE:
return m_simdHandleCache->Vector128ByteHandle;
- case TYP_LONG:
+ case CORINFO_TYPE_LONG:
return m_simdHandleCache->Vector128LongHandle;
- case TYP_UINT:
+ case CORINFO_TYPE_UINT:
return m_simdHandleCache->Vector128UIntHandle;
- case TYP_ULONG:
+ case CORINFO_TYPE_ULONG:
return m_simdHandleCache->Vector128ULongHandle;
+ case CORINFO_TYPE_NATIVEINT:
+ break;
+ case CORINFO_TYPE_NATIVEUINT:
+ break;
default:
assert(!"Didn't find a class handle for simdType");
}
@@ -125,28 +127,32 @@ CORINFO_CLASS_HANDLE Compiler::gtGetStructHandleForHWSIMD(var_types simdType, va
#ifdef TARGET_XARCH
else if (simdType == TYP_SIMD32)
{
- switch (simdBaseType)
+ switch (simdBaseJitType)
{
- case TYP_FLOAT:
+ case CORINFO_TYPE_FLOAT:
return m_simdHandleCache->Vector256FloatHandle;
- case TYP_DOUBLE:
+ case CORINFO_TYPE_DOUBLE:
return m_simdHandleCache->Vector256DoubleHandle;
- case TYP_INT:
+ case CORINFO_TYPE_INT:
return m_simdHandleCache->Vector256IntHandle;
- case TYP_USHORT:
+ case CORINFO_TYPE_USHORT:
return m_simdHandleCache->Vector256UShortHandle;
- case TYP_UBYTE:
+ case CORINFO_TYPE_UBYTE:
return m_simdHandleCache->Vector256UByteHandle;
- case TYP_SHORT:
+ case CORINFO_TYPE_SHORT:
return m_simdHandleCache->Vector256ShortHandle;
- case TYP_BYTE:
+ case CORINFO_TYPE_BYTE:
return m_simdHandleCache->Vector256ByteHandle;
- case TYP_LONG:
+ case CORINFO_TYPE_LONG:
return m_simdHandleCache->Vector256LongHandle;
- case TYP_UINT:
+ case CORINFO_TYPE_UINT:
return m_simdHandleCache->Vector256UIntHandle;
- case TYP_ULONG:
+ case CORINFO_TYPE_ULONG:
return m_simdHandleCache->Vector256ULongHandle;
+ case CORINFO_TYPE_NATIVEINT:
+ break;
+ case CORINFO_TYPE_NATIVEUINT:
+ break;
default:
assert(!"Didn't find a class handle for simdType");
}
@@ -155,28 +161,32 @@ CORINFO_CLASS_HANDLE Compiler::gtGetStructHandleForHWSIMD(var_types simdType, va
#ifdef TARGET_ARM64
else if (simdType == TYP_SIMD8)
{
- switch (simdBaseType)
+ switch (simdBaseJitType)
{
- case TYP_FLOAT:
+ case CORINFO_TYPE_FLOAT:
return m_simdHandleCache->Vector64FloatHandle;
- case TYP_DOUBLE:
+ case CORINFO_TYPE_DOUBLE:
return m_simdHandleCache->Vector64DoubleHandle;
- case TYP_INT:
+ case CORINFO_TYPE_INT:
return m_simdHandleCache->Vector64IntHandle;
- case TYP_USHORT:
+ case CORINFO_TYPE_USHORT:
return m_simdHandleCache->Vector64UShortHandle;
- case TYP_UBYTE:
+ case CORINFO_TYPE_UBYTE:
return m_simdHandleCache->Vector64UByteHandle;
- case TYP_SHORT:
+ case CORINFO_TYPE_SHORT:
return m_simdHandleCache->Vector64ShortHandle;
- case TYP_BYTE:
+ case CORINFO_TYPE_BYTE:
return m_simdHandleCache->Vector64ByteHandle;
- case TYP_UINT:
+ case CORINFO_TYPE_UINT:
return m_simdHandleCache->Vector64UIntHandle;
- case TYP_LONG:
+ case CORINFO_TYPE_LONG:
return m_simdHandleCache->Vector64LongHandle;
- case TYP_ULONG:
+ case CORINFO_TYPE_ULONG:
return m_simdHandleCache->Vector64ULongHandle;
+ case CORINFO_TYPE_NATIVEINT:
+ break;
+ case CORINFO_TYPE_NATIVEUINT:
+ break;
default:
assert(!"Didn't find a class handle for simdType");
}
@@ -351,8 +361,8 @@ unsigned HWIntrinsicInfo::lookupSimdSize(Compiler* comp, NamedIntrinsic id, CORI
typeHnd = sig->retTypeSigClass;
}
- var_types baseType = comp->getBaseTypeAndSizeOfSIMDType(typeHnd, &simdSize);
- assert((simdSize > 0) && (baseType != TYP_UNKNOWN));
+ CorInfoType simdBaseJitType = comp->getBaseJitTypeAndSizeOfSIMDType(typeHnd, &simdSize);
+ assert((simdSize > 0) && (simdBaseJitType != CORINFO_TYPE_UNDEF));
return simdSize;
}
@@ -504,8 +514,8 @@ GenTree* Compiler::getArgForHWIntrinsic(var_types argType,
if (!varTypeIsSIMD(argType))
{
unsigned int argSizeBytes;
- var_types base = getBaseTypeAndSizeOfSIMDType(argClass, &argSizeBytes);
- argType = getSIMDTypeForSize(argSizeBytes);
+ (void)getBaseJitTypeAndSizeOfSIMDType(argClass, &argSizeBytes);
+ argType = getSIMDTypeForSize(argSizeBytes);
}
assert(varTypeIsSIMD(argType));
@@ -644,16 +654,24 @@ static bool impIsTableDrivenHWIntrinsic(NamedIntrinsic intrinsicId, HWIntrinsicC
//
// Arguments:
// intrinsicId - HW intrinsic id
-// baseType - Base type of the intrinsic.
+// baseJitType - Base JIT type of the intrinsic.
//
// Return Value:
// returns true if the baseType is supported for given intrinsic.
//
-static bool isSupportedBaseType(NamedIntrinsic intrinsic, var_types baseType)
+static bool isSupportedBaseType(NamedIntrinsic intrinsic, CorInfoType baseJitType)
{
+ if (baseJitType == CORINFO_TYPE_UNDEF)
+ {
+ return false;
+ }
+
+ var_types baseType = JitType2PreciseVarType(baseJitType);
+
// We don't actually check the intrinsic outside of the false case as we expect
// the exposed managed signatures are either generic and support all types
// or they are explicit and support the type indicated.
+
if (varTypeIsArithmetic(baseType))
{
return true;
@@ -722,28 +740,24 @@ struct HWIntrinsicSignatureReader final
if (sig->numArgs > 0)
{
- CorInfoType op1Type = strip(compHnd->getArgType(sig, args, &op1ClsHnd));
- op1VarType = JITtype2varType(op1Type);
+ op1JitType = strip(compHnd->getArgType(sig, args, &op1ClsHnd));
if (sig->numArgs > 1)
{
- args = compHnd->getArgNext(args);
- CorInfoType op2Type = strip(compHnd->getArgType(sig, args, &op2ClsHnd));
- op2VarType = JITtype2varType(op2Type);
+ args = compHnd->getArgNext(args);
+ op2JitType = strip(compHnd->getArgType(sig, args, &op2ClsHnd));
}
if (sig->numArgs > 2)
{
- args = compHnd->getArgNext(args);
- CorInfoType op3Type = strip(compHnd->getArgType(sig, args, &op3ClsHnd));
- op3VarType = JITtype2varType(op3Type);
+ args = compHnd->getArgNext(args);
+ op3JitType = strip(compHnd->getArgType(sig, args, &op3ClsHnd));
}
if (sig->numArgs > 3)
{
- args = compHnd->getArgNext(args);
- CorInfoType op4Type = strip(compHnd->getArgType(sig, args, &op4ClsHnd));
- op4VarType = JITtype2varType(op4Type);
+ args = compHnd->getArgNext(args);
+ op4JitType = strip(compHnd->getArgType(sig, args, &op4ClsHnd));
}
}
}
@@ -752,10 +766,30 @@ struct HWIntrinsicSignatureReader final
CORINFO_CLASS_HANDLE op2ClsHnd;
CORINFO_CLASS_HANDLE op3ClsHnd;
CORINFO_CLASS_HANDLE op4ClsHnd;
- var_types op1VarType;
- var_types op2VarType;
- var_types op3VarType;
- var_types op4VarType;
+ CorInfoType op1JitType;
+ CorInfoType op2JitType;
+ CorInfoType op3JitType;
+ CorInfoType op4JitType;
+
+ var_types GetOp1Type() const
+ {
+ return JITtype2varType(op1JitType);
+ }
+
+ var_types GetOp2Type() const
+ {
+ return JITtype2varType(op2JitType);
+ }
+
+ var_types GetOp3Type() const
+ {
+ return JITtype2varType(op3JitType);
+ }
+
+ var_types GetOp4Type() const
+ {
+ return JITtype2varType(op4JitType);
+ }
};
//------------------------------------------------------------------------
@@ -777,50 +811,56 @@ GenTree* Compiler::impHWIntrinsic(NamedIntrinsic intrinsic,
CORINFO_SIG_INFO* sig,
bool mustExpand)
{
- HWIntrinsicCategory category = HWIntrinsicInfo::lookupCategory(intrinsic);
- int numArgs = sig->numArgs;
- var_types retType = JITtype2varType(sig->retType);
- var_types baseType = TYP_UNKNOWN;
+ HWIntrinsicCategory category = HWIntrinsicInfo::lookupCategory(intrinsic);
+ int numArgs = sig->numArgs;
+ var_types retType = JITtype2varType(sig->retType);
+ CorInfoType simdBaseJitType = CORINFO_TYPE_UNDEF;
if ((retType == TYP_STRUCT) && featureSIMD)
{
unsigned int sizeBytes;
- baseType = getBaseTypeAndSizeOfSIMDType(sig->retTypeSigClass, &sizeBytes);
- retType = getSIMDTypeForSize(sizeBytes);
+ simdBaseJitType = getBaseJitTypeAndSizeOfSIMDType(sig->retTypeSigClass, &sizeBytes);
+ retType = getSIMDTypeForSize(sizeBytes);
assert(sizeBytes != 0);
// We want to return early here for cases where retType was TYP_STRUCT as per method signature and
- // rather than deferring the decision after getting the baseType of arg.
- if (!isSupportedBaseType(intrinsic, baseType))
+ // rather than deferring the decision after getting the simdBaseJitType of arg.
+ if (!isSupportedBaseType(intrinsic, simdBaseJitType))
{
return nullptr;
}
}
- baseType = getBaseTypeFromArgIfNeeded(intrinsic, clsHnd, sig, baseType);
+ simdBaseJitType = getBaseJitTypeFromArgIfNeeded(intrinsic, clsHnd, sig, simdBaseJitType);
- if (baseType == TYP_UNKNOWN)
+ if (simdBaseJitType == CORINFO_TYPE_UNDEF)
{
if (category != HW_Category_Scalar)
{
unsigned int sizeBytes;
- baseType = getBaseTypeAndSizeOfSIMDType(clsHnd, &sizeBytes);
+ simdBaseJitType = getBaseJitTypeAndSizeOfSIMDType(clsHnd, &sizeBytes);
assert((category == HW_Category_Special) || (sizeBytes != 0));
}
else
{
- baseType = retType;
+ simdBaseJitType = sig->retType;
}
}
// Immediately return if the category is other than scalar/special and this is not a supported base type.
if ((category != HW_Category_Special) && (category != HW_Category_Scalar) &&
- !isSupportedBaseType(intrinsic, baseType))
+ !isSupportedBaseType(intrinsic, simdBaseJitType))
{
return nullptr;
}
- GenTree* immOp = nullptr;
+ var_types simdBaseType = TYP_UNKNOWN;
+ GenTree* immOp = nullptr;
+
+ if (simdBaseJitType != CORINFO_TYPE_UNDEF)
+ {
+ simdBaseType = JitType2PreciseVarType(simdBaseJitType);
+ }
HWIntrinsicSignatureReader sigReader;
sigReader.Read(info.compCompHnd, sig);
@@ -858,13 +898,14 @@ GenTree* Compiler::impHWIntrinsic(NamedIntrinsic intrinsic,
if (!immOp2->IsCnsIntOrI())
{
assert(HWIntrinsicInfo::NoJmpTableImm(intrinsic));
- return impNonConstFallback(intrinsic, retType, baseType);
+ return impNonConstFallback(intrinsic, retType, simdBaseJitType);
}
- unsigned int otherSimdSize = 0;
- var_types otherBaseType = getBaseTypeAndSizeOfSIMDType(sigReader.op3ClsHnd, &otherSimdSize);
+ unsigned int otherSimdSize = 0;
+ CorInfoType otherBaseJitType = getBaseJitTypeAndSizeOfSIMDType(sigReader.op3ClsHnd, &otherSimdSize);
+ var_types otherBaseType = JitType2PreciseVarType(otherBaseJitType);
- assert(otherBaseType == baseType);
+ assert(otherBaseJitType == simdBaseJitType);
int immLowerBound2 = 0;
int immUpperBound2 = 0;
@@ -902,34 +943,40 @@ GenTree* Compiler::impHWIntrinsic(NamedIntrinsic intrinsic,
#elif defined(TARGET_ARM64)
if (category == HW_Category_SIMDByIndexedElement)
{
+ CorInfoType indexedElementBaseJitType;
var_types indexedElementBaseType;
unsigned int indexedElementSimdSize = 0;
if (numArgs == 3)
{
- indexedElementBaseType = getBaseTypeAndSizeOfSIMDType(sigReader.op2ClsHnd, &indexedElementSimdSize);
+ indexedElementBaseJitType =
+ getBaseJitTypeAndSizeOfSIMDType(sigReader.op2ClsHnd, &indexedElementSimdSize);
+ indexedElementBaseType = JitType2PreciseVarType(indexedElementBaseJitType);
}
else
{
assert(numArgs == 4);
- indexedElementBaseType = getBaseTypeAndSizeOfSIMDType(sigReader.op3ClsHnd, &indexedElementSimdSize);
+ indexedElementBaseJitType =
+ getBaseJitTypeAndSizeOfSIMDType(sigReader.op3ClsHnd, &indexedElementSimdSize);
+ indexedElementBaseType = JitType2PreciseVarType(indexedElementBaseJitType);
if (intrinsic == NI_Dp_DotProductBySelectedQuadruplet)
{
- assert(((baseType == TYP_INT) && (indexedElementBaseType == TYP_BYTE)) ||
- ((baseType == TYP_UINT) && (indexedElementBaseType == TYP_UBYTE)));
+ assert(((simdBaseType == TYP_INT) && (indexedElementBaseType == TYP_BYTE)) ||
+ ((simdBaseType == TYP_UINT) && (indexedElementBaseType == TYP_UBYTE)));
// The second source operand of sdot, udot instructions is an indexed 32-bit element.
- indexedElementBaseType = baseType;
+ indexedElementBaseJitType = simdBaseJitType;
+ indexedElementBaseType = simdBaseType;
}
}
- assert(indexedElementBaseType == baseType);
- HWIntrinsicInfo::lookupImmBounds(intrinsic, indexedElementSimdSize, baseType, &immLowerBound,
+ assert(indexedElementBaseType == simdBaseType);
+ HWIntrinsicInfo::lookupImmBounds(intrinsic, indexedElementSimdSize, simdBaseType, &immLowerBound,
&immUpperBound);
}
else
{
- HWIntrinsicInfo::lookupImmBounds(intrinsic, simdSize, baseType, &immLowerBound, &immUpperBound);
+ HWIntrinsicInfo::lookupImmBounds(intrinsic, simdSize, simdBaseType, &immLowerBound, &immUpperBound);
}
#endif
@@ -960,7 +1007,7 @@ GenTree* Compiler::impHWIntrinsic(NamedIntrinsic intrinsic,
{
if (HWIntrinsicInfo::NoJmpTableImm(intrinsic))
{
- return impNonConstFallback(intrinsic, retType, baseType);
+ return impNonConstFallback(intrinsic, retType, simdBaseJitType);
}
else if (!mustExpand)
{
@@ -987,7 +1034,7 @@ GenTree* Compiler::impHWIntrinsic(NamedIntrinsic intrinsic,
assert(numArgs >= 0);
- if (!isScalar && ((HWIntrinsicInfo::lookupIns(intrinsic, baseType) == INS_invalid) ||
+ if (!isScalar && ((HWIntrinsicInfo::lookupIns(intrinsic, simdBaseType) == INS_invalid) ||
((simdSize != 8) && (simdSize != 16) && (simdSize != 32))))
{
assert(!"Unexpected HW Intrinsic");
@@ -1004,11 +1051,11 @@ GenTree* Compiler::impHWIntrinsic(NamedIntrinsic intrinsic,
{
case 0:
assert(!isScalar);
- retNode = gtNewSimdHWIntrinsicNode(retType, intrinsic, baseType, simdSize);
+ retNode = gtNewSimdHWIntrinsicNode(retType, intrinsic, simdBaseJitType, simdSize);
break;
case 1:
- op1 = getArgForHWIntrinsic(sigReader.op1VarType, sigReader.op1ClsHnd);
+ op1 = getArgForHWIntrinsic(sigReader.GetOp1Type(), sigReader.op1ClsHnd);
if ((category == HW_Category_MemoryLoad) && op1->OperIs(GT_CAST))
{
@@ -1021,23 +1068,23 @@ GenTree* Compiler::impHWIntrinsic(NamedIntrinsic intrinsic,
}
retNode = isScalar ? gtNewScalarHWIntrinsicNode(retType, op1, intrinsic)
- : gtNewSimdHWIntrinsicNode(retType, op1, intrinsic, baseType, simdSize);
+ : gtNewSimdHWIntrinsicNode(retType, op1, intrinsic, simdBaseJitType, simdSize);
break;
case 2:
- op2 = getArgForHWIntrinsic(sigReader.op2VarType, sigReader.op2ClsHnd);
+ op2 = getArgForHWIntrinsic(sigReader.GetOp2Type(), sigReader.op2ClsHnd);
op2 = addRangeCheckIfNeeded(intrinsic, op2, mustExpand, immLowerBound, immUpperBound);
- op1 = getArgForHWIntrinsic(sigReader.op1VarType, sigReader.op1ClsHnd);
+ op1 = getArgForHWIntrinsic(sigReader.GetOp1Type(), sigReader.op1ClsHnd);
retNode = isScalar ? gtNewScalarHWIntrinsicNode(retType, op1, op2, intrinsic)
- : gtNewSimdHWIntrinsicNode(retType, op1, op2, intrinsic, baseType, simdSize);
+ : gtNewSimdHWIntrinsicNode(retType, op1, op2, intrinsic, simdBaseJitType, simdSize);
#ifdef TARGET_XARCH
if ((intrinsic == NI_SSE42_Crc32) || (intrinsic == NI_SSE42_X64_Crc32))
{
- // TODO-XArch-Cleanup: currently we use the BaseType to bring the type of the second argument
+ // TODO-XArch-Cleanup: currently we use the simdBaseJitType to bring the type of the second argument
// to the code generator. May encode the overload info in other way.
- retNode->AsHWIntrinsic()->gtSIMDBaseType = sigReader.op2VarType;
+ retNode->AsHWIntrinsic()->SetSimdBaseJitType(sigReader.op2JitType);
}
#elif defined(TARGET_ARM64)
switch (intrinsic)
@@ -1046,29 +1093,29 @@ GenTree* Compiler::impHWIntrinsic(NamedIntrinsic intrinsic,
case NI_Crc32_ComputeCrc32C:
case NI_Crc32_Arm64_ComputeCrc32:
case NI_Crc32_Arm64_ComputeCrc32C:
- retNode->AsHWIntrinsic()->gtSIMDBaseType = sigReader.op2VarType;
+ retNode->AsHWIntrinsic()->SetSimdBaseJitType(sigReader.op2JitType);
break;
case NI_AdvSimd_AddWideningUpper:
case NI_AdvSimd_SubtractWideningUpper:
assert(varTypeIsSIMD(op1->TypeGet()));
- retNode->AsHWIntrinsic()->SetAuxiliaryType(getBaseTypeOfSIMDType(sigReader.op1ClsHnd));
+ retNode->AsHWIntrinsic()->SetAuxiliaryJitType(getBaseJitTypeOfSIMDType(sigReader.op1ClsHnd));
break;
case NI_AdvSimd_Arm64_AddSaturateScalar:
assert(varTypeIsSIMD(op2->TypeGet()));
- retNode->AsHWIntrinsic()->SetAuxiliaryType(getBaseTypeOfSIMDType(sigReader.op2ClsHnd));
+ retNode->AsHWIntrinsic()->SetAuxiliaryJitType(getBaseJitTypeOfSIMDType(sigReader.op2ClsHnd));
break;
case NI_ArmBase_Arm64_MultiplyHigh:
if (sig->retType == CORINFO_TYPE_ULONG)
{
- retNode->AsHWIntrinsic()->gtSIMDBaseType = TYP_ULONG;
+ retNode->AsHWIntrinsic()->SetSimdBaseJitType(CORINFO_TYPE_ULONG);
}
else
{
assert(sig->retType == CORINFO_TYPE_LONG);
- retNode->AsHWIntrinsic()->gtSIMDBaseType = TYP_LONG;
+ retNode->AsHWIntrinsic()->SetSimdBaseJitType(CORINFO_TYPE_LONG);
}
break;
@@ -1079,9 +1126,9 @@ GenTree* Compiler::impHWIntrinsic(NamedIntrinsic intrinsic,
break;
case 3:
- op3 = getArgForHWIntrinsic(sigReader.op3VarType, sigReader.op3ClsHnd);
- op2 = getArgForHWIntrinsic(sigReader.op2VarType, sigReader.op2ClsHnd);
- op1 = getArgForHWIntrinsic(sigReader.op1VarType, sigReader.op1ClsHnd);
+ op3 = getArgForHWIntrinsic(sigReader.GetOp3Type(), sigReader.op3ClsHnd);
+ op2 = getArgForHWIntrinsic(sigReader.GetOp2Type(), sigReader.op2ClsHnd);
+ op1 = getArgForHWIntrinsic(sigReader.GetOp1Type(), sigReader.op1ClsHnd);
#ifdef TARGET_ARM64
if (intrinsic == NI_AdvSimd_LoadAndInsertScalar)
@@ -1108,40 +1155,29 @@ GenTree* Compiler::impHWIntrinsic(NamedIntrinsic intrinsic,
op3 = addRangeCheckIfNeeded(intrinsic, op3, mustExpand, immLowerBound, immUpperBound);
}
- retNode = isScalar ? gtNewScalarHWIntrinsicNode(retType, op1, op2, op3, intrinsic)
- : gtNewSimdHWIntrinsicNode(retType, op1, op2, op3, intrinsic, baseType, simdSize);
+ retNode = isScalar
+ ? gtNewScalarHWIntrinsicNode(retType, op1, op2, op3, intrinsic)
+ : gtNewSimdHWIntrinsicNode(retType, op1, op2, op3, intrinsic, simdBaseJitType, simdSize);
#ifdef TARGET_XARCH
if ((intrinsic == NI_AVX2_GatherVector128) || (intrinsic == NI_AVX2_GatherVector256))
{
assert(varTypeIsSIMD(op2->TypeGet()));
- retNode->AsHWIntrinsic()->SetAuxiliaryType(getBaseTypeOfSIMDType(sigReader.op2ClsHnd));
- }
-#elif defined(TARGET_ARM64)
- if (category == HW_Category_SIMDByIndexedElement)
- {
- assert(varTypeIsSIMD(op2->TypeGet()));
- retNode->AsHWIntrinsic()->SetAuxiliaryType(op2->TypeGet());
+ retNode->AsHWIntrinsic()->SetAuxiliaryJitType(getBaseJitTypeOfSIMDType(sigReader.op2ClsHnd));
}
#endif
break;
#ifdef TARGET_ARM64
case 4:
- op4 = getArgForHWIntrinsic(sigReader.op4VarType, sigReader.op4ClsHnd);
+ op4 = getArgForHWIntrinsic(sigReader.GetOp4Type(), sigReader.op4ClsHnd);
op4 = addRangeCheckIfNeeded(intrinsic, op4, mustExpand, immLowerBound, immUpperBound);
- op3 = getArgForHWIntrinsic(sigReader.op3VarType, sigReader.op3ClsHnd);
- op2 = getArgForHWIntrinsic(sigReader.op2VarType, sigReader.op2ClsHnd);
- op1 = getArgForHWIntrinsic(sigReader.op1VarType, sigReader.op1ClsHnd);
+ op3 = getArgForHWIntrinsic(sigReader.GetOp3Type(), sigReader.op3ClsHnd);
+ op2 = getArgForHWIntrinsic(sigReader.GetOp2Type(), sigReader.op2ClsHnd);
+ op1 = getArgForHWIntrinsic(sigReader.GetOp1Type(), sigReader.op1ClsHnd);
assert(!isScalar);
- retNode = gtNewSimdHWIntrinsicNode(retType, op1, op2, op3, op4, intrinsic, baseType, simdSize);
-
- if (category == HW_Category_SIMDByIndexedElement)
- {
- assert(varTypeIsSIMD(op3->TypeGet()));
- retNode->AsHWIntrinsic()->SetAuxiliaryType(op3->TypeGet());
- }
+ retNode = gtNewSimdHWIntrinsicNode(retType, op1, op2, op3, op4, intrinsic, simdBaseJitType, simdSize);
break;
#endif
@@ -1168,7 +1204,7 @@ GenTree* Compiler::impHWIntrinsic(NamedIntrinsic intrinsic,
return retNode;
}
- return impSpecialIntrinsic(intrinsic, clsHnd, method, sig, baseType, retType, simdSize);
+ return impSpecialIntrinsic(intrinsic, clsHnd, method, sig, simdBaseJitType, retType, simdSize);
}
#endif // FEATURE_HW_INTRINSICS
diff --git a/src/coreclr/jit/hwintrinsic.h b/src/coreclr/jit/hwintrinsic.h
index ca527f14076..0b35ca719b6 100644
--- a/src/coreclr/jit/hwintrinsic.h
+++ b/src/coreclr/jit/hwintrinsic.h
@@ -800,7 +800,7 @@ private:
void InitializeBaseType(const GenTreeHWIntrinsic* node)
{
- baseType = node->gtSIMDBaseType;
+ baseType = node->GetSimdBaseType();
if (baseType == TYP_UNKNOWN)
{
diff --git a/src/coreclr/jit/hwintrinsicarm64.cpp b/src/coreclr/jit/hwintrinsicarm64.cpp
index 0d0021b0b8c..4f57bc38ace 100644
--- a/src/coreclr/jit/hwintrinsicarm64.cpp
+++ b/src/coreclr/jit/hwintrinsicarm64.cpp
@@ -276,14 +276,14 @@ void HWIntrinsicInfo::lookupImmBounds(
// impNonConstFallback: generate alternate code when the imm-arg is not a compile-time constant
//
// Arguments:
-// intrinsic -- intrinsic ID
-// simdType -- Vector type
-// baseType -- base type of the Vector64/128<T>
+// intrinsic -- intrinsic ID
+// simdType -- Vector type
+// simdBaseJitType -- base JIT type of the Vector64/128<T>
//
// Return Value:
// return the IR of semantic alternative on non-const imm-arg
//
-GenTree* Compiler::impNonConstFallback(NamedIntrinsic intrinsic, var_types simdType, var_types baseType)
+GenTree* Compiler::impNonConstFallback(NamedIntrinsic intrinsic, var_types simdType, CorInfoType simdBaseJitType)
{
return nullptr;
}
@@ -292,12 +292,12 @@ GenTree* Compiler::impNonConstFallback(NamedIntrinsic intrinsic, var_types simdT
// impSpecialIntrinsic: Import a hardware intrinsic that requires special handling as a GT_HWINTRINSIC node if possible
//
// Arguments:
-// intrinsic -- id of the intrinsic function.
-// clsHnd -- class handle containing the intrinsic function.
-// method -- method handle of the intrinsic function.
-// sig -- signature of the intrinsic call.
-// baseType -- generic argument of the intrinsic.
-// retType -- return type of the intrinsic.
+// intrinsic -- id of the intrinsic function.
+// clsHnd -- class handle containing the intrinsic function.
+// method -- method handle of the intrinsic function.
+// sig -- signature of the intrinsic call.
+// simdBaseJitType -- generic argument of the intrinsic.
+// retType -- return type of the intrinsic.
//
// Return Value:
// The GT_HWINTRINSIC node, or nullptr if not a supported intrinsic
@@ -306,15 +306,16 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic,
CORINFO_CLASS_HANDLE clsHnd,
CORINFO_METHOD_HANDLE method,
CORINFO_SIG_INFO* sig,
- var_types baseType,
+ CorInfoType simdBaseJitType,
var_types retType,
unsigned simdSize)
{
- HWIntrinsicCategory category = HWIntrinsicInfo::lookupCategory(intrinsic);
- int numArgs = sig->numArgs;
+ HWIntrinsicCategory category = HWIntrinsicInfo::lookupCategory(intrinsic);
+ int numArgs = sig->numArgs;
+ var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType);
assert(numArgs >= 0);
- assert(varTypeIsArithmetic(baseType));
+ assert(varTypeIsArithmetic(simdBaseType));
GenTree* retNode = nullptr;
GenTree* op1 = nullptr;
@@ -380,13 +381,13 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic,
if (sig->numArgs == 1)
{
op1 = impPopStack().val;
- retNode = gtNewSimdHWIntrinsicNode(retType, op1, intrinsic, baseType, simdSize);
+ retNode = gtNewSimdHWIntrinsicNode(retType, op1, intrinsic, simdBaseJitType, simdSize);
}
else if (sig->numArgs == 2)
{
op2 = impPopStack().val;
op1 = impPopStack().val;
- retNode = gtNewSimdHWIntrinsicNode(retType, op1, op2, intrinsic, baseType, simdSize);
+ retNode = gtNewSimdHWIntrinsicNode(retType, op1, op2, intrinsic, simdBaseJitType, simdSize);
}
else
{
@@ -400,7 +401,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic,
}
op1 = tmp;
- retNode = gtNewSimdHWIntrinsicNode(retType, op1, intrinsic, baseType, simdSize);
+ retNode = gtNewSimdHWIntrinsicNode(retType, op1, intrinsic, simdBaseJitType, simdSize);
}
break;
}
@@ -411,7 +412,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic,
assert(!sig->hasThis());
assert(numArgs == 0);
- GenTreeIntCon* countNode = gtNewIconNode(getSIMDVectorLength(simdSize, baseType), TYP_INT);
+ GenTreeIntCon* countNode = gtNewIconNode(getSIMDVectorLength(simdSize, simdBaseType), TYP_INT);
countNode->gtFlags |= GTF_ICON_SIMD_COUNT;
retNode = countNode;
break;
@@ -425,7 +426,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic,
assert(!sig->hasThis());
assert(numArgs == 0);
- retNode = gtNewSimdHWIntrinsicNode(retType, intrinsic, baseType, simdSize);
+ retNode = gtNewSimdHWIntrinsicNode(retType, intrinsic, simdBaseJitType, simdSize);
break;
}
@@ -441,7 +442,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic,
}
ssize_t imm8 = indexOp->AsIntCon()->IconValue();
- ssize_t count = simdSize / genTypeSize(baseType);
+ ssize_t count = simdSize / genTypeSize(simdBaseType);
if (imm8 >= count || imm8 < 0)
{
@@ -453,7 +454,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic,
impPopStack(); // pop the indexOp that we already have.
GenTree* vectorOp = impSIMDPopStack(getSIMDTypeForSize(simdSize));
- switch (baseType)
+ switch (simdBaseType)
{
case TYP_LONG:
case TYP_ULONG:
@@ -461,11 +462,12 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic,
if (simdSize == 16)
{
retNode = gtNewSimdHWIntrinsicNode(retType, vectorOp, gtNewIconNode(imm8), valueOp,
- NI_AdvSimd_Insert, baseType, simdSize);
+ NI_AdvSimd_Insert, simdBaseJitType, simdSize);
}
else
{
- retNode = gtNewSimdHWIntrinsicNode(retType, valueOp, NI_Vector64_Create, baseType, simdSize);
+ retNode =
+ gtNewSimdHWIntrinsicNode(retType, valueOp, NI_Vector64_Create, simdBaseJitType, simdSize);
}
break;
@@ -477,7 +479,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic,
case TYP_INT:
case TYP_UINT:
retNode = gtNewSimdHWIntrinsicNode(retType, vectorOp, gtNewIconNode(imm8), valueOp,
- NI_AdvSimd_Insert, baseType, simdSize);
+ NI_AdvSimd_Insert, simdBaseJitType, simdSize);
break;
default:
@@ -493,12 +495,12 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic,
// AdvSimd.ExtractVector128(vector, Vector128<T>.Zero, 8 / sizeof(T)).GetLower();
assert(numArgs == 1);
op1 = impPopStack().val;
- GenTree* zero = gtNewSimdHWIntrinsicNode(retType, NI_Vector128_get_Zero, baseType, simdSize);
- ssize_t index = 8 / genTypeSize(baseType);
+ GenTree* zero = gtNewSimdHWIntrinsicNode(retType, NI_Vector128_get_Zero, simdBaseJitType, simdSize);
+ ssize_t index = 8 / genTypeSize(simdBaseType);
retNode = gtNewSimdHWIntrinsicNode(TYP_SIMD16, op1, zero, gtNewIconNode(index), NI_AdvSimd_ExtractVector128,
- baseType, simdSize);
- retNode = gtNewSimdHWIntrinsicNode(TYP_SIMD8, retNode, NI_Vector128_GetLower, baseType, 8);
+ simdBaseJitType, simdSize);
+ retNode = gtNewSimdHWIntrinsicNode(TYP_SIMD8, retNode, NI_Vector128_GetLower, simdBaseJitType, 8);
break;
}
diff --git a/src/coreclr/jit/hwintrinsiccodegenarm64.cpp b/src/coreclr/jit/hwintrinsiccodegenarm64.cpp
index 5b2fd24a539..ae287737816 100644
--- a/src/coreclr/jit/hwintrinsiccodegenarm64.cpp
+++ b/src/coreclr/jit/hwintrinsiccodegenarm64.cpp
@@ -55,14 +55,28 @@ CodeGen::HWIntrinsicImmOpHelper::HWIntrinsicImmOpHelper(CodeGen* codeGen, GenTre
if (category == HW_Category_SIMDByIndexedElement)
{
- assert(varTypeIsSIMD(intrin->GetAuxiliaryType()));
- const unsigned int indexedElementSimdSize = genTypeSize(intrin->GetAuxiliaryType());
- HWIntrinsicInfo::lookupImmBounds(intrin->gtHWIntrinsicId, indexedElementSimdSize, intrin->gtSIMDBaseType,
+ const HWIntrinsic intrinInfo(intrin);
+ var_types indexedElementOpType;
+
+ if (intrinInfo.numOperands == 3)
+ {
+ indexedElementOpType = intrinInfo.op2->TypeGet();
+ }
+ else
+ {
+ assert(intrinInfo.numOperands == 4);
+ indexedElementOpType = intrinInfo.op3->TypeGet();
+ }
+
+ assert(varTypeIsSIMD(indexedElementOpType));
+
+ const unsigned int indexedElementSimdSize = genTypeSize(indexedElementOpType);
+ HWIntrinsicInfo::lookupImmBounds(intrin->gtHWIntrinsicId, indexedElementSimdSize, intrin->GetSimdBaseType(),
&immLowerBound, &immUpperBound);
}
else
{
- HWIntrinsicInfo::lookupImmBounds(intrin->gtHWIntrinsicId, intrin->gtSIMDSize, intrin->gtSIMDBaseType,
+ HWIntrinsicInfo::lookupImmBounds(intrin->gtHWIntrinsicId, intrin->GetSimdSize(), intrin->GetSimdBaseType(),
&immLowerBound, &immUpperBound);
}
@@ -243,7 +257,7 @@ void CodeGen::genHWIntrinsic(GenTreeHWIntrinsic* node)
}
else
{
- emitSize = emitActualTypeSize(Compiler::getSIMDTypeForSize(node->gtSIMDSize));
+ emitSize = emitActualTypeSize(Compiler::getSIMDTypeForSize(node->GetSimdSize()));
opt = genGetSimdInsOpt(emitSize, intrin.baseType);
}
@@ -395,38 +409,6 @@ void CodeGen::genHWIntrinsic(GenTreeHWIntrinsic* node)
instruction ins = INS_invalid;
switch (intrin.id)
{
- case NI_Crc32_ComputeCrc32:
- if (intrin.baseType == TYP_INT)
- {
- ins = INS_crc32w;
- }
- else
- {
- ins = HWIntrinsicInfo::lookupIns(intrin.id, intrin.baseType);
- }
- break;
-
- case NI_Crc32_ComputeCrc32C:
- if (intrin.baseType == TYP_INT)
- {
- ins = INS_crc32cw;
- }
- else
- {
- ins = HWIntrinsicInfo::lookupIns(intrin.id, intrin.baseType);
- }
- break;
-
- case NI_Crc32_Arm64_ComputeCrc32:
- assert(intrin.baseType == TYP_LONG);
- ins = INS_crc32x;
- break;
-
- case NI_Crc32_Arm64_ComputeCrc32C:
- assert(intrin.baseType == TYP_LONG);
- ins = INS_crc32cx;
- break;
-
case NI_AdvSimd_AddWideningLower:
assert(varTypeIsIntegral(intrin.baseType));
if (intrin.op1->TypeGet() == TYP_SIMD8)
@@ -546,7 +528,7 @@ void CodeGen::genHWIntrinsic(GenTreeHWIntrinsic* node)
{
HWIntrinsicImmOpHelper helper(this, intrin.op2, node);
- // Prior to codegen, the emitSize is based on node->gtSIMDSize which
+ // Prior to codegen, the emitSize is based on node->GetSimdSize() which
// tracks the size of the first operand and is used to tell if the index
// is in range. However, when actually emitting it needs to be the size
// of the return and the size of the operand is interpreted based on the
diff --git a/src/coreclr/jit/hwintrinsiccodegenxarch.cpp b/src/coreclr/jit/hwintrinsiccodegenxarch.cpp
index e5ae7fa13b4..42711acaed8 100644
--- a/src/coreclr/jit/hwintrinsiccodegenxarch.cpp
+++ b/src/coreclr/jit/hwintrinsiccodegenxarch.cpp
@@ -92,7 +92,7 @@ void CodeGen::genHWIntrinsic(GenTreeHWIntrinsic* node)
GenTree* op1 = node->gtGetOp1();
GenTree* op2 = node->gtGetOp2();
regNumber targetReg = node->GetRegNum();
- var_types baseType = node->gtSIMDBaseType;
+ var_types baseType = node->GetSimdBaseType();
regNumber op1Reg = REG_NA;
regNumber op2Reg = REG_NA;
@@ -101,7 +101,7 @@ void CodeGen::genHWIntrinsic(GenTreeHWIntrinsic* node)
assert(numArgs >= 0);
instruction ins = HWIntrinsicInfo::lookupIns(intrinsicId, baseType);
assert(ins != INS_invalid);
- emitAttr simdSize = emitActualTypeSize(Compiler::getSIMDTypeForSize(node->gtSIMDSize));
+ emitAttr simdSize = emitActualTypeSize(Compiler::getSIMDTypeForSize(node->GetSimdSize()));
assert(simdSize != 0);
switch (numArgs)
@@ -162,7 +162,7 @@ void CodeGen::genHWIntrinsic(GenTreeHWIntrinsic* node)
regNumber regData = genConsumeReg(extract->gtGetOp1());
- ins = HWIntrinsicInfo::lookupIns(extract->gtHWIntrinsicId, extract->gtSIMDBaseType);
+ ins = HWIntrinsicInfo::lookupIns(extract->gtHWIntrinsicId, extract->GetSimdBaseType());
ival = static_cast<int>(extract->gtGetOp2()->AsIntCon()->IconValue());
GenTreeIndir indir = indirForm(TYP_SIMD16, op1);
@@ -550,7 +550,7 @@ void CodeGen::genHWIntrinsic_R_RM_I(GenTreeHWIntrinsic* node, instruction ins, i
{
regNumber targetReg = node->GetRegNum();
GenTree* op1 = node->gtGetOp1();
- emitAttr simdSize = emitActualTypeSize(Compiler::getSIMDTypeForSize(node->gtSIMDSize));
+ emitAttr simdSize = emitActualTypeSize(Compiler::getSIMDTypeForSize(node->GetSimdSize()));
// TODO-XArch-CQ: Commutative operations can have op1 be contained
// TODO-XArch-CQ: Non-VEX encoded instructions can have both ops contained
@@ -630,7 +630,7 @@ void CodeGen::genHWIntrinsic_R_R_RM_I(GenTreeHWIntrinsic* node, instruction ins,
regNumber targetReg = node->GetRegNum();
GenTree* op1 = node->gtGetOp1();
GenTree* op2 = node->gtGetOp2();
- emitAttr simdSize = emitActualTypeSize(Compiler::getSIMDTypeForSize(node->gtSIMDSize));
+ emitAttr simdSize = emitActualTypeSize(Compiler::getSIMDTypeForSize(node->GetSimdSize()));
emitter* emit = GetEmitter();
// TODO-XArch-CQ: Commutative operations can have op1 be contained
@@ -795,7 +795,7 @@ void CodeGen::genHWIntrinsic_R_R_RM_R(GenTreeHWIntrinsic* node, instruction ins)
GenTree* op1 = node->gtGetOp1();
GenTree* op2 = node->gtGetOp2();
GenTree* op3 = nullptr;
- emitAttr simdSize = emitActualTypeSize(Compiler::getSIMDTypeForSize(node->gtSIMDSize));
+ emitAttr simdSize = emitActualTypeSize(Compiler::getSIMDTypeForSize(node->GetSimdSize()));
emitter* emit = GetEmitter();
assert(op1->OperIsList());
@@ -1138,7 +1138,7 @@ void CodeGen::genBaseIntrinsic(GenTreeHWIntrinsic* node)
{
NamedIntrinsic intrinsicId = node->gtHWIntrinsicId;
regNumber targetReg = node->GetRegNum();
- var_types baseType = node->gtSIMDBaseType;
+ var_types baseType = node->GetSimdBaseType();
assert(compiler->compIsaSupportedDebugOnly(InstructionSet_SSE));
assert((baseType >= TYP_BYTE) && (baseType <= TYP_DOUBLE));
@@ -1151,7 +1151,7 @@ void CodeGen::genBaseIntrinsic(GenTreeHWIntrinsic* node)
assert(node->gtGetOp2() == nullptr);
emitter* emit = GetEmitter();
- emitAttr attr = emitActualTypeSize(Compiler::getSIMDTypeForSize(node->gtSIMDSize));
+ emitAttr attr = emitActualTypeSize(Compiler::getSIMDTypeForSize(node->GetSimdSize()));
instruction ins = HWIntrinsicInfo::lookupIns(intrinsicId, baseType);
switch (intrinsicId)
@@ -1328,7 +1328,7 @@ void CodeGen::genSSEIntrinsic(GenTreeHWIntrinsic* node)
GenTree* op2 = node->gtGetOp2();
regNumber targetReg = node->GetRegNum();
var_types targetType = node->TypeGet();
- var_types baseType = node->gtSIMDBaseType;
+ var_types baseType = node->GetSimdBaseType();
regNumber op1Reg = REG_NA;
emitter* emit = GetEmitter();
@@ -1368,7 +1368,7 @@ void CodeGen::genSSEIntrinsic(GenTreeHWIntrinsic* node)
// These do not support containment.
assert(!op1->isContained());
- instruction ins = HWIntrinsicInfo::lookupIns(intrinsicId, node->gtSIMDBaseType);
+ instruction ins = HWIntrinsicInfo::lookupIns(intrinsicId, node->GetSimdBaseType());
op1Reg = op1->GetRegNum();
emit->emitIns_AR(ins, emitTypeSize(baseType), op1Reg, 0);
break;
@@ -1404,7 +1404,7 @@ void CodeGen::genSSE2Intrinsic(GenTreeHWIntrinsic* node)
GenTree* op2 = node->gtGetOp2();
regNumber targetReg = node->GetRegNum();
var_types targetType = node->TypeGet();
- var_types baseType = node->gtSIMDBaseType;
+ var_types baseType = node->GetSimdBaseType();
regNumber op1Reg = REG_NA;
emitter* emit = GetEmitter();
@@ -1508,7 +1508,7 @@ void CodeGen::genSSE41Intrinsic(GenTreeHWIntrinsic* node)
GenTree* op1 = node->gtGetOp1();
GenTree* op2 = node->gtGetOp2();
regNumber targetReg = node->GetRegNum();
- var_types baseType = node->gtSIMDBaseType;
+ var_types baseType = node->GetSimdBaseType();
emitter* emit = GetEmitter();
@@ -1597,7 +1597,7 @@ void CodeGen::genSSE42Intrinsic(GenTreeHWIntrinsic* node)
regNumber targetReg = node->GetRegNum();
GenTree* op1 = node->gtGetOp1();
GenTree* op2 = node->gtGetOp2();
- var_types baseType = node->gtSIMDBaseType;
+ var_types baseType = node->GetSimdBaseType();
var_types targetType = node->TypeGet();
emitter* emit = GetEmitter();
@@ -1654,8 +1654,8 @@ void CodeGen::genSSE42Intrinsic(GenTreeHWIntrinsic* node)
void CodeGen::genAvxOrAvx2Intrinsic(GenTreeHWIntrinsic* node)
{
NamedIntrinsic intrinsicId = node->gtHWIntrinsicId;
- var_types baseType = node->gtSIMDBaseType;
- emitAttr attr = emitActualTypeSize(Compiler::getSIMDTypeForSize(node->gtSIMDSize));
+ var_types baseType = node->GetSimdBaseType();
+ emitAttr attr = emitActualTypeSize(Compiler::getSIMDTypeForSize(node->GetSimdSize()));
var_types targetType = node->TypeGet();
instruction ins = HWIntrinsicInfo::lookupIns(intrinsicId, baseType);
int numArgs = HWIntrinsicInfo::lookupNumArgs(node);
@@ -1967,8 +1967,8 @@ void CodeGen::genBMI1OrBMI2Intrinsic(GenTreeHWIntrinsic* node)
void CodeGen::genFMAIntrinsic(GenTreeHWIntrinsic* node)
{
NamedIntrinsic intrinsicId = node->gtHWIntrinsicId;
- var_types baseType = node->gtSIMDBaseType;
- emitAttr attr = emitActualTypeSize(Compiler::getSIMDTypeForSize(node->gtSIMDSize));
+ var_types baseType = node->GetSimdBaseType();
+ emitAttr attr = emitActualTypeSize(Compiler::getSIMDTypeForSize(node->GetSimdSize()));
instruction ins = HWIntrinsicInfo::lookupIns(intrinsicId, baseType);
GenTree* op1 = node->gtGetOp1();
regNumber targetReg = node->GetRegNum();
diff --git a/src/coreclr/jit/hwintrinsicxarch.cpp b/src/coreclr/jit/hwintrinsicxarch.cpp
index 02e0e17548a..73a43ab0584 100644
--- a/src/coreclr/jit/hwintrinsicxarch.cpp
+++ b/src/coreclr/jit/hwintrinsicxarch.cpp
@@ -426,14 +426,14 @@ bool HWIntrinsicInfo::isScalarIsa(CORINFO_InstructionSet isa)
// not a compile-time constant
//
// Arguments:
-// intrinsic -- intrinsic ID
-// simdType -- Vector type
-// baseType -- base type of the Vector128/256<T>
+// intrinsic -- intrinsic ID
+// simdType -- Vector type
+// simdBaseJitType -- SIMD base JIT type of the Vector128/256<T>
//
// Return Value:
// return the IR of semantic alternative on non-const imm-arg
//
-GenTree* Compiler::impNonConstFallback(NamedIntrinsic intrinsic, var_types simdType, var_types baseType)
+GenTree* Compiler::impNonConstFallback(NamedIntrinsic intrinsic, var_types simdType, CorInfoType simdBaseJitType)
{
assert(HWIntrinsicInfo::NoJmpTableImm(intrinsic));
switch (intrinsic)
@@ -448,8 +448,8 @@ GenTree* Compiler::impNonConstFallback(NamedIntrinsic intrinsic, var_types simdT
GenTree* op2 = impPopStack().val;
GenTree* op1 = impSIMDPopStack(simdType);
GenTree* tmpOp =
- gtNewSimdHWIntrinsicNode(TYP_SIMD16, op2, NI_SSE2_ConvertScalarToVector128Int32, TYP_INT, 16);
- return gtNewSimdHWIntrinsicNode(simdType, op1, tmpOp, intrinsic, baseType, genTypeSize(simdType));
+ gtNewSimdHWIntrinsicNode(TYP_SIMD16, op2, NI_SSE2_ConvertScalarToVector128Int32, CORINFO_TYPE_INT, 16);
+ return gtNewSimdHWIntrinsicNode(simdType, op1, tmpOp, intrinsic, simdBaseJitType, genTypeSize(simdType));
}
default:
@@ -461,12 +461,12 @@ GenTree* Compiler::impNonConstFallback(NamedIntrinsic intrinsic, var_types simdT
// impSpecialIntrinsic: dispatch intrinsics to their own implementation
//
// Arguments:
-// intrinsic -- id of the intrinsic function.
-// clsHnd -- class handle containing the intrinsic function.
-// method -- method handle of the intrinsic function.
-// sig -- signature of the intrinsic call.
-// baseType -- generic argument of the intrinsic.
-// retType -- return type of the intrinsic.
+// intrinsic -- id of the intrinsic function.
+// clsHnd -- class handle containing the intrinsic function.
+// method -- method handle of the intrinsic function.
+// sig -- signature of the intrinsic call.
+// simdBaseJitType -- generic argument of the intrinsic.
+// retType -- return type of the intrinsic.
// Return Value:
// the expanded intrinsic.
//
@@ -474,7 +474,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic,
CORINFO_CLASS_HANDLE clsHnd,
CORINFO_METHOD_HANDLE method,
CORINFO_SIG_INFO* sig,
- var_types baseType,
+ CorInfoType simdBaseJitType,
var_types retType,
unsigned simdSize)
{
@@ -483,7 +483,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic,
{
case InstructionSet_Vector128:
case InstructionSet_Vector256:
- return impBaseIntrinsic(intrinsic, clsHnd, method, sig, baseType, retType, simdSize);
+ return impBaseIntrinsic(intrinsic, clsHnd, method, sig, simdBaseJitType, retType, simdSize);
case InstructionSet_SSE:
return impSSEIntrinsic(intrinsic, method, sig);
case InstructionSet_SSE2:
@@ -518,7 +518,7 @@ GenTree* Compiler::impBaseIntrinsic(NamedIntrinsic intrinsic,
CORINFO_CLASS_HANDLE clsHnd,
CORINFO_METHOD_HANDLE method,
CORINFO_SIG_INFO* sig,
- var_types baseType,
+ CorInfoType simdBaseJitType,
var_types retType,
unsigned simdSize)
{
@@ -531,6 +531,8 @@ GenTree* Compiler::impBaseIntrinsic(NamedIntrinsic intrinsic,
return nullptr;
}
+ var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType);
+
switch (intrinsic)
{
case NI_Vector256_As:
@@ -585,7 +587,8 @@ GenTree* Compiler::impBaseIntrinsic(NamedIntrinsic intrinsic,
if (getSIMDVectorRegisterByteLength() == YMM_REGSIZE_BYTES)
{
// Vector<T> is TYP_SIMD32, so we should treat this as a call to Vector128.ToVector256
- return impBaseIntrinsic(NI_Vector128_ToVector256, clsHnd, method, sig, baseType, retType, simdSize);
+ return impBaseIntrinsic(NI_Vector128_ToVector256, clsHnd, method, sig, simdBaseJitType, retType,
+ simdSize);
}
assert(getSIMDVectorRegisterByteLength() == XMM_REGSIZE_BYTES);
@@ -627,10 +630,8 @@ GenTree* Compiler::impBaseIntrinsic(NamedIntrinsic intrinsic,
{
assert(sig->numArgs == 1);
assert(HWIntrinsicInfo::BaseTypeFromFirstArg(intrinsic));
-
- var_types baseTypeOfIntrinsic =
- getBaseTypeAndSizeOfSIMDType(info.compCompHnd->getArgClass(sig, sig->args), &simdSize);
- assert(baseType == baseTypeOfIntrinsic);
+ assert(simdBaseJitType ==
+ getBaseJitTypeAndSizeOfSIMDType(info.compCompHnd->getArgClass(sig, sig->args), &simdSize));
switch (getSIMDTypeForSize(simdSize))
{
@@ -658,7 +659,8 @@ GenTree* Compiler::impBaseIntrinsic(NamedIntrinsic intrinsic,
case TYP_SIMD32:
{
// Vector<T> is TYP_SIMD32, so we should treat this as a call to Vector256.GetLower
- return impBaseIntrinsic(NI_Vector256_GetLower, clsHnd, method, sig, baseType, retType, simdSize);
+ return impBaseIntrinsic(NI_Vector256_GetLower, clsHnd, method, sig, simdBaseJitType, retType,
+ simdSize);
}
default:
@@ -697,12 +699,14 @@ GenTree* Compiler::impBaseIntrinsic(NamedIntrinsic intrinsic,
if (intrinsic == NI_Vector256_AsVector)
{
- return impBaseIntrinsic(NI_Vector256_GetLower, clsHnd, method, sig, baseType, retType, simdSize);
+ return impBaseIntrinsic(NI_Vector256_GetLower, clsHnd, method, sig, simdBaseJitType, retType,
+ simdSize);
}
else
{
assert(intrinsic == NI_Vector256_AsVector256);
- return impBaseIntrinsic(NI_Vector128_ToVector256, clsHnd, method, sig, baseType, retType, 16);
+ return impBaseIntrinsic(NI_Vector128_ToVector256, clsHnd, method, sig, simdBaseJitType, retType,
+ 16);
}
}
@@ -714,7 +718,7 @@ GenTree* Compiler::impBaseIntrinsic(NamedIntrinsic intrinsic,
{
assert(sig->numArgs == 0);
- GenTreeIntCon* countNode = gtNewIconNode(getSIMDVectorLength(simdSize, baseType), TYP_INT);
+ GenTreeIntCon* countNode = gtNewIconNode(getSIMDVectorLength(simdSize, simdBaseType), TYP_INT);
countNode->gtFlags |= GTF_ICON_SIMD_COUNT;
retNode = countNode;
break;
@@ -724,7 +728,7 @@ GenTree* Compiler::impBaseIntrinsic(NamedIntrinsic intrinsic,
case NI_Vector256_Create:
{
#if defined(TARGET_X86)
- if (varTypeIsLong(baseType))
+ if (varTypeIsLong(simdBaseType))
{
// TODO-XARCH-CQ: It may be beneficial to emit the movq
// instruction, which takes a 64-bit memory address and
@@ -743,7 +747,7 @@ GenTree* Compiler::impBaseIntrinsic(NamedIntrinsic intrinsic,
break;
}
}
- else if (baseType == TYP_FLOAT)
+ else if (simdBaseType == TYP_FLOAT)
{
if (!compExactlyDependsOn(InstructionSet_SSE))
{
@@ -758,13 +762,13 @@ GenTree* Compiler::impBaseIntrinsic(NamedIntrinsic intrinsic,
if (sig->numArgs == 1)
{
op1 = impPopStack().val;
- retNode = gtNewSimdHWIntrinsicNode(retType, op1, intrinsic, baseType, simdSize);
+ retNode = gtNewSimdHWIntrinsicNode(retType, op1, intrinsic, simdBaseJitType, simdSize);
}
else if (sig->numArgs == 2)
{
op2 = impPopStack().val;
op1 = impPopStack().val;
- retNode = gtNewSimdHWIntrinsicNode(retType, op1, op2, intrinsic, baseType, simdSize);
+ retNode = gtNewSimdHWIntrinsicNode(retType, op1, op2, intrinsic, simdBaseJitType, simdSize);
}
else
{
@@ -778,7 +782,7 @@ GenTree* Compiler::impBaseIntrinsic(NamedIntrinsic intrinsic,
}
op1 = tmp;
- retNode = gtNewSimdHWIntrinsicNode(retType, op1, intrinsic, baseType, simdSize);
+ retNode = gtNewSimdHWIntrinsicNode(retType, op1, intrinsic, simdBaseJitType, simdSize);
}
break;
}
@@ -788,7 +792,7 @@ GenTree* Compiler::impBaseIntrinsic(NamedIntrinsic intrinsic,
assert(sig->numArgs == 1);
#ifdef TARGET_X86
- if (varTypeIsLong(baseType))
+ if (varTypeIsLong(simdBaseType))
{
// TODO-XARCH-CQ: It may be beneficial to emit the movq
// instruction, which takes a 64-bit memory address and
@@ -798,10 +802,10 @@ GenTree* Compiler::impBaseIntrinsic(NamedIntrinsic intrinsic,
#endif // TARGET_X86
if (compExactlyDependsOn(InstructionSet_SSE2) ||
- (compExactlyDependsOn(InstructionSet_SSE) && (baseType == TYP_FLOAT)))
+ (compExactlyDependsOn(InstructionSet_SSE) && (simdBaseType == TYP_FLOAT)))
{
op1 = impPopStack().val;
- retNode = gtNewSimdHWIntrinsicNode(retType, op1, intrinsic, baseType, simdSize);
+ retNode = gtNewSimdHWIntrinsicNode(retType, op1, intrinsic, simdBaseJitType, simdSize);
}
break;
}
@@ -812,7 +816,7 @@ GenTree* Compiler::impBaseIntrinsic(NamedIntrinsic intrinsic,
bool isSupported = false;
- switch (baseType)
+ switch (simdBaseType)
{
case TYP_BYTE:
case TYP_UBYTE:
@@ -848,7 +852,7 @@ GenTree* Compiler::impBaseIntrinsic(NamedIntrinsic intrinsic,
if (isSupported)
{
op1 = impSIMDPopStack(getSIMDTypeForSize(simdSize));
- retNode = gtNewSimdHWIntrinsicNode(retType, op1, intrinsic, baseType, simdSize);
+ retNode = gtNewSimdHWIntrinsicNode(retType, op1, intrinsic, simdBaseJitType, simdSize);
}
break;
}
@@ -859,7 +863,7 @@ GenTree* Compiler::impBaseIntrinsic(NamedIntrinsic intrinsic,
bool isSupported = false;
- switch (baseType)
+ switch (simdBaseType)
{
case TYP_BYTE:
case TYP_UBYTE:
@@ -896,7 +900,7 @@ GenTree* Compiler::impBaseIntrinsic(NamedIntrinsic intrinsic,
if (isSupported)
{
op1 = impSIMDPopStack(getSIMDTypeForSize(simdSize));
- retNode = gtNewSimdHWIntrinsicNode(retType, op1, intrinsic, baseType, simdSize);
+ retNode = gtNewSimdHWIntrinsicNode(retType, op1, intrinsic, simdBaseJitType, simdSize);
}
break;
}
@@ -910,7 +914,7 @@ GenTree* Compiler::impBaseIntrinsic(NamedIntrinsic intrinsic,
if (compExactlyDependsOn(InstructionSet_AVX))
{
op1 = impSIMDPopStack(getSIMDTypeForSize(simdSize));
- retNode = gtNewSimdHWIntrinsicNode(retType, op1, intrinsic, baseType, simdSize);
+ retNode = gtNewSimdHWIntrinsicNode(retType, op1, intrinsic, simdBaseJitType, simdSize);
}
break;
}
@@ -922,7 +926,7 @@ GenTree* Compiler::impBaseIntrinsic(NamedIntrinsic intrinsic,
if (compExactlyDependsOn(InstructionSet_SSE))
{
- retNode = gtNewSimdHWIntrinsicNode(retType, intrinsic, baseType, simdSize);
+ retNode = gtNewSimdHWIntrinsicNode(retType, intrinsic, simdBaseJitType, simdSize);
}
break;
}
@@ -932,7 +936,7 @@ GenTree* Compiler::impBaseIntrinsic(NamedIntrinsic intrinsic,
assert(sig->numArgs == 1);
#ifdef TARGET_X86
- if (varTypeIsLong(baseType))
+ if (varTypeIsLong(simdBaseType))
{
// TODO-XARCH-CQ: It may be beneficial to emit the movq
// instruction, which takes a 64-bit memory address and
@@ -944,7 +948,7 @@ GenTree* Compiler::impBaseIntrinsic(NamedIntrinsic intrinsic,
if (compExactlyDependsOn(InstructionSet_AVX))
{
op1 = impPopStack().val;
- retNode = gtNewSimdHWIntrinsicNode(retType, op1, intrinsic, baseType, simdSize);
+ retNode = gtNewSimdHWIntrinsicNode(retType, op1, intrinsic, simdBaseJitType, simdSize);
}
break;
}
@@ -956,7 +960,7 @@ GenTree* Compiler::impBaseIntrinsic(NamedIntrinsic intrinsic,
if (compExactlyDependsOn(InstructionSet_AVX))
{
- retNode = gtNewSimdHWIntrinsicNode(retType, intrinsic, baseType, simdSize);
+ retNode = gtNewSimdHWIntrinsicNode(retType, intrinsic, simdBaseJitType, simdSize);
}
break;
}
@@ -975,18 +979,19 @@ GenTree* Compiler::impBaseIntrinsic(NamedIntrinsic intrinsic,
{
assert(sig->numArgs == 3);
GenTree* indexOp = impStackTop(1).val;
- if (!compExactlyDependsOn(InstructionSet_SSE2) || !varTypeIsArithmetic(baseType) || !indexOp->OperIsConst())
+ if (!compExactlyDependsOn(InstructionSet_SSE2) || !varTypeIsArithmetic(simdBaseType) ||
+ !indexOp->OperIsConst())
{
// Using software fallback if
// 1. JIT/hardware don't support SSE2 instructions
- // 2. baseType is not a numeric type (throw execptions)
+ // 2. simdBaseType is not a numeric type (throw execptions)
// 3. index is not a constant
return nullptr;
}
- switch (baseType)
+ switch (simdBaseType)
{
- // Using software fallback if baseType is not supported by hardware
+ // Using software fallback if simdBaseType is not supported by hardware
case TYP_BYTE:
case TYP_UBYTE:
case TYP_INT:
@@ -1018,7 +1023,7 @@ GenTree* Compiler::impBaseIntrinsic(NamedIntrinsic intrinsic,
ssize_t imm8 = indexOp->AsIntCon()->IconValue();
ssize_t cachedImm8 = imm8;
- ssize_t count = simdSize / genTypeSize(baseType);
+ ssize_t count = simdSize / genTypeSize(simdBaseType);
if (imm8 >= count || imm8 < 0)
{
@@ -1045,23 +1050,23 @@ GenTree* Compiler::impBaseIntrinsic(NamedIntrinsic intrinsic,
{
imm8 -= count / 2;
vectorOp = gtNewSimdHWIntrinsicNode(TYP_SIMD16, vectorOp, gtNewIconNode(1), NI_AVX_ExtractVector128,
- baseType, simdSize);
+ simdBaseJitType, simdSize);
}
else
{
- vectorOp =
- gtNewSimdHWIntrinsicNode(TYP_SIMD16, vectorOp, NI_Vector256_GetLower, baseType, simdSize);
+ vectorOp = gtNewSimdHWIntrinsicNode(TYP_SIMD16, vectorOp, NI_Vector256_GetLower, simdBaseJitType,
+ simdSize);
}
}
GenTree* immNode = gtNewIconNode(imm8);
- switch (baseType)
+ switch (simdBaseType)
{
case TYP_LONG:
case TYP_ULONG:
retNode = gtNewSimdHWIntrinsicNode(TYP_SIMD16, vectorOp, valueOp, immNode, NI_SSE41_X64_Insert,
- baseType, 16);
+ simdBaseJitType, 16);
break;
case TYP_FLOAT:
@@ -1075,9 +1080,9 @@ GenTree* Compiler::impBaseIntrinsic(NamedIntrinsic intrinsic,
// =>
// movss xmm0, xmm1 (xmm0 = vector, xmm1 = value)
valueOp = gtNewSimdHWIntrinsicNode(TYP_SIMD16, valueOp, NI_Vector128_CreateScalarUnsafe,
- TYP_FLOAT, 16);
+ CORINFO_TYPE_FLOAT, 16);
retNode = gtNewSimdHWIntrinsicNode(TYP_SIMD16, vectorOp, valueOp, NI_SSE_MoveScalar,
- TYP_FLOAT, 16);
+ CORINFO_TYPE_FLOAT, 16);
}
else if (imm8 == 1)
{
@@ -1085,15 +1090,16 @@ GenTree* Compiler::impBaseIntrinsic(NamedIntrinsic intrinsic,
// =>
// shufps xmm1, xmm0, 0 (xmm0 = vector, xmm1 = value)
// shufps xmm1, xmm0, 226
- GenTree* tmpOp = gtNewSimdHWIntrinsicNode(TYP_SIMD16, valueOp,
- NI_Vector128_CreateScalarUnsafe, TYP_FLOAT, 16);
+ GenTree* tmpOp =
+ gtNewSimdHWIntrinsicNode(TYP_SIMD16, valueOp, NI_Vector128_CreateScalarUnsafe,
+ CORINFO_TYPE_FLOAT, 16);
GenTree* dupVectorOp = nullptr;
vectorOp = impCloneExpr(vectorOp, &dupVectorOp, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone Vector for Vector128<float>.WithElement"));
tmpOp = gtNewSimdHWIntrinsicNode(TYP_SIMD16, tmpOp, vectorOp, gtNewIconNode(0),
- NI_SSE_Shuffle, TYP_FLOAT, 16);
+ NI_SSE_Shuffle, CORINFO_TYPE_FLOAT, 16);
retNode = gtNewSimdHWIntrinsicNode(TYP_SIMD16, tmpOp, dupVectorOp, gtNewIconNode(226),
- NI_SSE_Shuffle, TYP_FLOAT, 16);
+ NI_SSE_Shuffle, CORINFO_TYPE_FLOAT, 16);
}
else
{
@@ -1118,23 +1124,24 @@ GenTree* Compiler::impBaseIntrinsic(NamedIntrinsic intrinsic,
// =>
// shufps xmm1, xmm0, 32 (xmm0 = vector, xmm1 = value)
// shufps xmm0, xmm1, 36
- GenTree* tmpOp = gtNewSimdHWIntrinsicNode(TYP_SIMD16, valueOp,
- NI_Vector128_CreateScalarUnsafe, TYP_FLOAT, 16);
+ GenTree* tmpOp =
+ gtNewSimdHWIntrinsicNode(TYP_SIMD16, valueOp, NI_Vector128_CreateScalarUnsafe,
+ CORINFO_TYPE_FLOAT, 16);
GenTree* dupVectorOp = nullptr;
vectorOp = impCloneExpr(vectorOp, &dupVectorOp, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone Vector for Vector128<float>.WithElement"));
valueOp = gtNewSimdHWIntrinsicNode(TYP_SIMD16, vectorOp, tmpOp, gtNewIconNode(controlBits1),
- NI_SSE_Shuffle, TYP_FLOAT, 16);
+ NI_SSE_Shuffle, CORINFO_TYPE_FLOAT, 16);
retNode =
gtNewSimdHWIntrinsicNode(TYP_SIMD16, valueOp, dupVectorOp, gtNewIconNode(controlBits2),
- NI_SSE_Shuffle, TYP_FLOAT, 16);
+ NI_SSE_Shuffle, CORINFO_TYPE_FLOAT, 16);
}
break;
}
else
{
valueOp = gtNewSimdHWIntrinsicNode(TYP_SIMD16, valueOp, NI_Vector128_CreateScalarUnsafe,
- TYP_FLOAT, 16);
+ CORINFO_TYPE_FLOAT, 16);
immNode->AsIntCon()->SetIconValue(imm8 * 16);
FALLTHROUGH;
}
@@ -1144,14 +1151,14 @@ GenTree* Compiler::impBaseIntrinsic(NamedIntrinsic intrinsic,
case TYP_UBYTE:
case TYP_INT:
case TYP_UINT:
- retNode =
- gtNewSimdHWIntrinsicNode(TYP_SIMD16, vectorOp, valueOp, immNode, NI_SSE41_Insert, baseType, 16);
+ retNode = gtNewSimdHWIntrinsicNode(TYP_SIMD16, vectorOp, valueOp, immNode, NI_SSE41_Insert,
+ simdBaseJitType, 16);
break;
case TYP_SHORT:
case TYP_USHORT:
- retNode =
- gtNewSimdHWIntrinsicNode(TYP_SIMD16, vectorOp, valueOp, immNode, NI_SSE2_Insert, baseType, 16);
+ retNode = gtNewSimdHWIntrinsicNode(TYP_SIMD16, vectorOp, valueOp, immNode, NI_SSE2_Insert,
+ simdBaseJitType, 16);
break;
case TYP_DOUBLE:
@@ -1163,10 +1170,10 @@ GenTree* Compiler::impBaseIntrinsic(NamedIntrinsic intrinsic,
// vector.WithElement(1, value)
// =>
// unpcklpd xmm0, xmm1 (xmm0 = vector, xmm1 = value)
- valueOp =
- gtNewSimdHWIntrinsicNode(TYP_SIMD16, valueOp, NI_Vector128_CreateScalarUnsafe, TYP_DOUBLE, 16);
+ valueOp = gtNewSimdHWIntrinsicNode(TYP_SIMD16, valueOp, NI_Vector128_CreateScalarUnsafe,
+ CORINFO_TYPE_DOUBLE, 16);
NamedIntrinsic in = (imm8 == 0) ? NI_SSE2_MoveScalar : NI_SSE2_UnpackLow;
- retNode = gtNewSimdHWIntrinsicNode(TYP_SIMD16, vectorOp, valueOp, in, TYP_DOUBLE, 16);
+ retNode = gtNewSimdHWIntrinsicNode(TYP_SIMD16, vectorOp, valueOp, in, CORINFO_TYPE_DOUBLE, 16);
break;
}
@@ -1179,7 +1186,7 @@ GenTree* Compiler::impBaseIntrinsic(NamedIntrinsic intrinsic,
assert(clonedVectorOp);
int upperOrLower = (cachedImm8 >= count / 2) ? 1 : 0;
retNode = gtNewSimdHWIntrinsicNode(retType, clonedVectorOp, retNode, gtNewIconNode(upperOrLower),
- NI_AVX_InsertVector128, baseType, simdSize);
+ NI_AVX_InsertVector128, simdBaseJitType, simdSize);
}
break;
@@ -1199,18 +1206,19 @@ GenTree* Compiler::impBaseIntrinsic(NamedIntrinsic intrinsic,
{
assert(sig->numArgs == 2);
GenTree* indexOp = impStackTop().val;
- if (!compExactlyDependsOn(InstructionSet_SSE2) || !varTypeIsArithmetic(baseType) || !indexOp->OperIsConst())
+ if (!compExactlyDependsOn(InstructionSet_SSE2) || !varTypeIsArithmetic(simdBaseType) ||
+ !indexOp->OperIsConst())
{
// Using software fallback if
// 1. JIT/hardware don't support SSE2 instructions
- // 2. baseType is not a numeric type (throw execptions)
+ // 2. simdBaseType is not a numeric type (throw execptions)
// 3. index is not a constant
return nullptr;
}
- switch (baseType)
+ switch (simdBaseType)
{
- // Using software fallback if baseType is not supported by hardware
+ // Using software fallback if simdBaseType is not supported by hardware
case TYP_BYTE:
case TYP_UBYTE:
case TYP_INT:
@@ -1241,7 +1249,7 @@ GenTree* Compiler::impBaseIntrinsic(NamedIntrinsic intrinsic,
}
ssize_t imm8 = indexOp->AsIntCon()->IconValue();
- ssize_t count = simdSize / genTypeSize(baseType);
+ ssize_t count = simdSize / genTypeSize(simdBaseType);
if (imm8 >= count || imm8 < 0)
{
@@ -1261,18 +1269,18 @@ GenTree* Compiler::impBaseIntrinsic(NamedIntrinsic intrinsic,
{
imm8 -= count / 2;
vectorOp = gtNewSimdHWIntrinsicNode(TYP_SIMD16, vectorOp, gtNewIconNode(1), NI_AVX_ExtractVector128,
- baseType, simdSize);
+ simdBaseJitType, simdSize);
}
else
{
- vectorOp =
- gtNewSimdHWIntrinsicNode(TYP_SIMD16, vectorOp, NI_Vector256_GetLower, baseType, simdSize);
+ vectorOp = gtNewSimdHWIntrinsicNode(TYP_SIMD16, vectorOp, NI_Vector256_GetLower, simdBaseJitType,
+ simdSize);
}
}
- if (imm8 == 0 && (genTypeSize(baseType) >= 4))
+ if (imm8 == 0 && (genTypeSize(simdBaseType) >= 4))
{
- switch (baseType)
+ switch (simdBaseType)
{
case TYP_LONG:
resIntrinsic = NI_SSE2_X64_ConvertToInt64;
@@ -1299,16 +1307,17 @@ GenTree* Compiler::impBaseIntrinsic(NamedIntrinsic intrinsic,
return nullptr;
}
- return gtNewSimdHWIntrinsicNode(retType, vectorOp, resIntrinsic, baseType, 16);
+ return gtNewSimdHWIntrinsicNode(retType, vectorOp, resIntrinsic, simdBaseJitType, 16);
}
GenTree* immNode = gtNewIconNode(imm8);
- switch (baseType)
+ switch (simdBaseType)
{
case TYP_LONG:
case TYP_ULONG:
- retNode = gtNewSimdHWIntrinsicNode(retType, vectorOp, immNode, NI_SSE41_X64_Extract, baseType, 16);
+ retNode =
+ gtNewSimdHWIntrinsicNode(retType, vectorOp, immNode, NI_SSE41_X64_Extract, simdBaseJitType, 16);
break;
case TYP_FLOAT:
@@ -1327,8 +1336,9 @@ GenTree* Compiler::impBaseIntrinsic(NamedIntrinsic intrinsic,
vectorOp = impCloneExpr(vectorOp, &clonedVectorOp, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone Vector for Vector128<float>.GetElement"));
vectorOp = gtNewSimdHWIntrinsicNode(TYP_SIMD16, vectorOp, clonedVectorOp, immNode,
- NI_SSE_Shuffle, TYP_FLOAT, 16);
- return gtNewSimdHWIntrinsicNode(retType, vectorOp, NI_Vector128_ToScalar, TYP_FLOAT, 16);
+ NI_SSE_Shuffle, CORINFO_TYPE_FLOAT, 16);
+ return gtNewSimdHWIntrinsicNode(retType, vectorOp, NI_Vector128_ToScalar, CORINFO_TYPE_FLOAT,
+ 16);
}
FALLTHROUGH;
}
@@ -1336,20 +1346,23 @@ GenTree* Compiler::impBaseIntrinsic(NamedIntrinsic intrinsic,
case TYP_UBYTE:
case TYP_INT:
case TYP_UINT:
- retNode = gtNewSimdHWIntrinsicNode(retType, vectorOp, immNode, NI_SSE41_Extract, baseType, 16);
+ retNode =
+ gtNewSimdHWIntrinsicNode(retType, vectorOp, immNode, NI_SSE41_Extract, simdBaseJitType, 16);
break;
case TYP_BYTE:
// We do not have SSE41/SSE2 Extract APIs on signed small int, so need a CAST on the result
- retNode = gtNewSimdHWIntrinsicNode(TYP_UBYTE, vectorOp, immNode, NI_SSE41_Extract, TYP_UBYTE, 16);
+ retNode = gtNewSimdHWIntrinsicNode(TYP_UBYTE, vectorOp, immNode, NI_SSE41_Extract,
+ CORINFO_TYPE_UBYTE, 16);
retNode = gtNewCastNode(TYP_INT, retNode, true, TYP_BYTE);
break;
case TYP_SHORT:
case TYP_USHORT:
// We do not have SSE41/SSE2 Extract APIs on signed small int, so need a CAST on the result
- retNode = gtNewSimdHWIntrinsicNode(TYP_USHORT, vectorOp, immNode, NI_SSE2_Extract, TYP_USHORT, 16);
- if (baseType == TYP_SHORT)
+ retNode = gtNewSimdHWIntrinsicNode(TYP_USHORT, vectorOp, immNode, NI_SSE2_Extract,
+ CORINFO_TYPE_USHORT, 16);
+ if (simdBaseType == TYP_SHORT)
{
retNode = gtNewCastNode(TYP_INT, retNode, true, TYP_SHORT);
}
@@ -1361,8 +1374,9 @@ GenTree* Compiler::impBaseIntrinsic(NamedIntrinsic intrinsic,
// =>
// pshufd xmm1, xmm0, 0xEE (xmm0 = vector)
vectorOp = gtNewSimdHWIntrinsicNode(TYP_SIMD16, vectorOp, gtNewIconNode(0xEE), NI_SSE2_Shuffle,
- TYP_INT, 16);
- retNode = gtNewSimdHWIntrinsicNode(TYP_DOUBLE, vectorOp, NI_Vector128_ToScalar, TYP_DOUBLE, 16);
+ CORINFO_TYPE_INT, 16);
+ retNode =
+ gtNewSimdHWIntrinsicNode(TYP_DOUBLE, vectorOp, NI_Vector128_ToScalar, CORINFO_TYPE_DOUBLE, 16);
break;
default:
@@ -1383,11 +1397,11 @@ GenTree* Compiler::impBaseIntrinsic(NamedIntrinsic intrinsic,
GenTree* Compiler::impSSEIntrinsic(NamedIntrinsic intrinsic, CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* sig)
{
- GenTree* retNode = nullptr;
- GenTree* op1 = nullptr;
- GenTree* op2 = nullptr;
- int simdSize = HWIntrinsicInfo::lookupSimdSize(this, intrinsic, sig);
- var_types baseType = TYP_UNKNOWN;
+ GenTree* retNode = nullptr;
+ GenTree* op1 = nullptr;
+ GenTree* op2 = nullptr;
+ int simdSize = HWIntrinsicInfo::lookupSimdSize(this, intrinsic, sig);
+ CorInfoType simdBaseJitType = CORINFO_TYPE_UNDEF;
// The Prefetch and StoreFence intrinsics don't take any SIMD operands
// and have a simdSize of 0
@@ -1401,10 +1415,10 @@ GenTree* Compiler::impSSEIntrinsic(NamedIntrinsic intrinsic, CORINFO_METHOD_HAND
case NI_SSE_CompareScalarNotGreaterThanOrEqual:
{
assert(sig->numArgs == 2);
- op2 = impSIMDPopStack(TYP_SIMD16);
- op1 = impSIMDPopStack(TYP_SIMD16);
- baseType = getBaseTypeOfSIMDType(sig->retTypeSigClass);
- assert(baseType == TYP_FLOAT);
+ op2 = impSIMDPopStack(TYP_SIMD16);
+ op1 = impSIMDPopStack(TYP_SIMD16);
+ simdBaseJitType = getBaseJitTypeOfSIMDType(sig->retTypeSigClass);
+ assert(JitType2PreciseVarType(simdBaseJitType) == TYP_FLOAT);
if (compOpportunisticallyDependsOn(InstructionSet_AVX))
{
@@ -1414,7 +1428,7 @@ GenTree* Compiler::impSSEIntrinsic(NamedIntrinsic intrinsic, CORINFO_METHOD_HAND
FloatComparisonMode comparison =
static_cast<FloatComparisonMode>(HWIntrinsicInfo::lookupIval(intrinsic, true));
retNode = gtNewSimdHWIntrinsicNode(TYP_SIMD16, op1, op2, gtNewIconNode(static_cast<int>(comparison)),
- NI_AVX_CompareScalar, baseType, simdSize);
+ NI_AVX_CompareScalar, simdBaseJitType, simdSize);
}
else
{
@@ -1422,9 +1436,9 @@ GenTree* Compiler::impSSEIntrinsic(NamedIntrinsic intrinsic, CORINFO_METHOD_HAND
op1 = impCloneExpr(op1, &clonedOp1, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone op1 for Sse.CompareScalarGreaterThan"));
- retNode = gtNewSimdHWIntrinsicNode(TYP_SIMD16, op2, op1, intrinsic, baseType, simdSize);
- retNode =
- gtNewSimdHWIntrinsicNode(TYP_SIMD16, clonedOp1, retNode, NI_SSE_MoveScalar, baseType, simdSize);
+ retNode = gtNewSimdHWIntrinsicNode(TYP_SIMD16, op2, op1, intrinsic, simdBaseJitType, simdSize);
+ retNode = gtNewSimdHWIntrinsicNode(TYP_SIMD16, clonedOp1, retNode, NI_SSE_MoveScalar, simdBaseJitType,
+ simdSize);
}
break;
}
@@ -1437,14 +1451,14 @@ GenTree* Compiler::impSSEIntrinsic(NamedIntrinsic intrinsic, CORINFO_METHOD_HAND
assert(sig->numArgs == 1);
assert(JITtype2varType(sig->retType) == TYP_VOID);
op1 = impPopStack().val;
- retNode = gtNewSimdHWIntrinsicNode(TYP_VOID, op1, intrinsic, TYP_UBYTE, 0);
+ retNode = gtNewSimdHWIntrinsicNode(TYP_VOID, op1, intrinsic, CORINFO_TYPE_UBYTE, 0);
break;
}
case NI_SSE_StoreFence:
assert(sig->numArgs == 0);
assert(JITtype2varType(sig->retType) == TYP_VOID);
- retNode = gtNewSimdHWIntrinsicNode(TYP_VOID, intrinsic, TYP_VOID, 0);
+ retNode = gtNewSimdHWIntrinsicNode(TYP_VOID, intrinsic, CORINFO_TYPE_VOID, 0);
break;
default:
@@ -1456,11 +1470,11 @@ GenTree* Compiler::impSSEIntrinsic(NamedIntrinsic intrinsic, CORINFO_METHOD_HAND
GenTree* Compiler::impSSE2Intrinsic(NamedIntrinsic intrinsic, CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* sig)
{
- GenTree* retNode = nullptr;
- GenTree* op1 = nullptr;
- GenTree* op2 = nullptr;
- int simdSize = HWIntrinsicInfo::lookupSimdSize(this, intrinsic, sig);
- var_types baseType = getBaseTypeOfSIMDType(sig->retTypeSigClass);
+ GenTree* retNode = nullptr;
+ GenTree* op1 = nullptr;
+ GenTree* op2 = nullptr;
+ int simdSize = HWIntrinsicInfo::lookupSimdSize(this, intrinsic, sig);
+ CorInfoType simdBaseJitType = getBaseJitTypeOfSIMDType(sig->retTypeSigClass);
// The fencing intrinsics don't take any operands and simdSize is 0
assert((simdSize == 16) || (simdSize == 0));
@@ -1475,7 +1489,7 @@ GenTree* Compiler::impSSE2Intrinsic(NamedIntrinsic intrinsic, CORINFO_METHOD_HAN
assert(sig->numArgs == 2);
op2 = impSIMDPopStack(TYP_SIMD16);
op1 = impSIMDPopStack(TYP_SIMD16);
- assert(baseType == TYP_DOUBLE);
+ assert(JitType2PreciseVarType(simdBaseJitType) == TYP_DOUBLE);
if (compOpportunisticallyDependsOn(InstructionSet_AVX))
{
@@ -1485,7 +1499,7 @@ GenTree* Compiler::impSSE2Intrinsic(NamedIntrinsic intrinsic, CORINFO_METHOD_HAN
FloatComparisonMode comparison =
static_cast<FloatComparisonMode>(HWIntrinsicInfo::lookupIval(intrinsic, true));
retNode = gtNewSimdHWIntrinsicNode(TYP_SIMD16, op1, op2, gtNewIconNode(static_cast<int>(comparison)),
- NI_AVX_CompareScalar, baseType, simdSize);
+ NI_AVX_CompareScalar, simdBaseJitType, simdSize);
}
else
{
@@ -1493,9 +1507,9 @@ GenTree* Compiler::impSSE2Intrinsic(NamedIntrinsic intrinsic, CORINFO_METHOD_HAN
op1 = impCloneExpr(op1, &clonedOp1, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone op1 for Sse2.CompareScalarGreaterThan"));
- retNode = gtNewSimdHWIntrinsicNode(TYP_SIMD16, op2, op1, intrinsic, baseType, simdSize);
- retNode =
- gtNewSimdHWIntrinsicNode(TYP_SIMD16, clonedOp1, retNode, NI_SSE2_MoveScalar, baseType, simdSize);
+ retNode = gtNewSimdHWIntrinsicNode(TYP_SIMD16, op2, op1, intrinsic, simdBaseJitType, simdSize);
+ retNode = gtNewSimdHWIntrinsicNode(TYP_SIMD16, clonedOp1, retNode, NI_SSE2_MoveScalar, simdBaseJitType,
+ simdSize);
}
break;
}
@@ -1507,7 +1521,7 @@ GenTree* Compiler::impSSE2Intrinsic(NamedIntrinsic intrinsic, CORINFO_METHOD_HAN
assert(JITtype2varType(sig->retType) == TYP_VOID);
assert(simdSize == 0);
- retNode = gtNewSimdHWIntrinsicNode(TYP_VOID, intrinsic, TYP_VOID, simdSize);
+ retNode = gtNewSimdHWIntrinsicNode(TYP_VOID, intrinsic, CORINFO_TYPE_VOID, simdSize);
break;
}
@@ -1515,9 +1529,14 @@ GenTree* Compiler::impSSE2Intrinsic(NamedIntrinsic intrinsic, CORINFO_METHOD_HAN
{
assert(sig->numArgs == 2);
assert(JITtype2varType(sig->retType) == TYP_VOID);
+
+ CORINFO_ARG_LIST_HANDLE argList = info.compCompHnd->getArgNext(sig->args);
+ CORINFO_CLASS_HANDLE argClass;
+ CorInfoType argJitType = strip(info.compCompHnd->getArgType(sig, argList, &argClass));
+
op2 = impPopStack().val;
op1 = impPopStack().val;
- retNode = gtNewSimdHWIntrinsicNode(TYP_VOID, op1, op2, NI_SSE2_StoreNonTemporal, op2->TypeGet(), 0);
+ retNode = gtNewSimdHWIntrinsicNode(TYP_VOID, op1, op2, NI_SSE2_StoreNonTemporal, argJitType, 0);
break;
}
@@ -1530,22 +1549,22 @@ GenTree* Compiler::impSSE2Intrinsic(NamedIntrinsic intrinsic, CORINFO_METHOD_HAN
GenTree* Compiler::impAvxOrAvx2Intrinsic(NamedIntrinsic intrinsic, CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* sig)
{
- GenTree* retNode = nullptr;
- GenTree* op1 = nullptr;
- GenTree* op2 = nullptr;
- var_types baseType = TYP_UNKNOWN;
- int simdSize = HWIntrinsicInfo::lookupSimdSize(this, intrinsic, sig);
+ GenTree* retNode = nullptr;
+ GenTree* op1 = nullptr;
+ GenTree* op2 = nullptr;
+ CorInfoType simdBaseJitType = CORINFO_TYPE_UNDEF;
+ int simdSize = HWIntrinsicInfo::lookupSimdSize(this, intrinsic, sig);
switch (intrinsic)
{
case NI_AVX2_PermuteVar8x32:
{
- baseType = getBaseTypeOfSIMDType(sig->retTypeSigClass);
+ simdBaseJitType = getBaseJitTypeOfSIMDType(sig->retTypeSigClass);
// swap the two operands
GenTree* indexVector = impSIMDPopStack(TYP_SIMD32);
GenTree* sourceVector = impSIMDPopStack(TYP_SIMD32);
- retNode =
- gtNewSimdHWIntrinsicNode(TYP_SIMD32, indexVector, sourceVector, NI_AVX2_PermuteVar8x32, baseType, 32);
+ retNode = gtNewSimdHWIntrinsicNode(TYP_SIMD32, indexVector, sourceVector, NI_AVX2_PermuteVar8x32,
+ simdBaseJitType, 32);
break;
}
@@ -1556,7 +1575,7 @@ GenTree* Compiler::impAvxOrAvx2Intrinsic(NamedIntrinsic intrinsic, CORINFO_METHO
CORINFO_CLASS_HANDLE argClass;
var_types argType = TYP_UNKNOWN;
unsigned int sizeBytes;
- baseType = getBaseTypeAndSizeOfSIMDType(sig->retTypeSigClass, &sizeBytes);
+ simdBaseJitType = getBaseJitTypeAndSizeOfSIMDType(sig->retTypeSigClass, &sizeBytes);
var_types retType = getSIMDTypeForSize(sizeBytes);
assert(sig->numArgs == 5);
@@ -1573,9 +1592,9 @@ GenTree* Compiler::impAvxOrAvx2Intrinsic(NamedIntrinsic intrinsic, CORINFO_METHO
GenTree* op4 = getArgForHWIntrinsic(argType, argClass);
SetOpLclRelatedToSIMDIntrinsic(op4);
- argType = JITtype2varType(strip(info.compCompHnd->getArgType(sig, arg3, &argClass)));
- var_types indexbaseType = getBaseTypeOfSIMDType(argClass);
- GenTree* op3 = getArgForHWIntrinsic(argType, argClass);
+ argType = JITtype2varType(strip(info.compCompHnd->getArgType(sig, arg3, &argClass)));
+ CorInfoType indexBaseJitType = getBaseJitTypeOfSIMDType(argClass);
+ GenTree* op3 = getArgForHWIntrinsic(argType, argClass);
SetOpLclRelatedToSIMDIntrinsic(op3);
argType = JITtype2varType(strip(info.compCompHnd->getArgType(sig, arg2, &argClass)));
@@ -1587,8 +1606,9 @@ GenTree* Compiler::impAvxOrAvx2Intrinsic(NamedIntrinsic intrinsic, CORINFO_METHO
SetOpLclRelatedToSIMDIntrinsic(op1);
GenTree* opList = new (this, GT_LIST) GenTreeArgList(op1, gtNewArgList(op2, op3, op4, op5));
- retNode = new (this, GT_HWINTRINSIC) GenTreeHWIntrinsic(retType, opList, intrinsic, baseType, simdSize);
- retNode->AsHWIntrinsic()->SetAuxiliaryType(indexbaseType);
+ retNode =
+ new (this, GT_HWINTRINSIC) GenTreeHWIntrinsic(retType, opList, intrinsic, simdBaseJitType, simdSize);
+ retNode->AsHWIntrinsic()->SetAuxiliaryJitType(indexBaseJitType);
break;
}
diff --git a/src/coreclr/jit/importer.cpp b/src/coreclr/jit/importer.cpp
index 8ec1ca1108c..3ca3f2e4260 100644
--- a/src/coreclr/jit/importer.cpp
+++ b/src/coreclr/jit/importer.cpp
@@ -1703,9 +1703,9 @@ GenTree* Compiler::impGetStructAddr(GenTree* structVal,
// impNormStructType: Normalize the type of a (known to be) struct class handle.
//
// Arguments:
-// structHnd - The class handle for the struct type of interest.
-// pSimdBaseType - (optional, default nullptr) - if non-null, and the struct is a SIMD
-// type, set to the SIMD base type
+// structHnd - The class handle for the struct type of interest.
+// pSimdBaseJitType - (optional, default nullptr) - if non-null, and the struct is a SIMD
+// type, set to the SIMD base JIT type
//
// Return Value:
// The JIT type for the struct (e.g. TYP_STRUCT, or TYP_SIMD*).
@@ -1717,7 +1717,7 @@ GenTree* Compiler::impGetStructAddr(GenTree* structVal,
// for full enregistration, e.g. TYP_SIMD16. If the size of the struct is already known
// call structSizeMightRepresentSIMDType to determine if this api needs to be called.
-var_types Compiler::impNormStructType(CORINFO_CLASS_HANDLE structHnd, var_types* pSimdBaseType)
+var_types Compiler::impNormStructType(CORINFO_CLASS_HANDLE structHnd, CorInfoType* pSimdBaseJitType)
{
assert(structHnd != NO_CLASS_HANDLE);
@@ -1736,14 +1736,14 @@ var_types Compiler::impNormStructType(CORINFO_CLASS_HANDLE structHnd, var_types*
if (structSizeMightRepresentSIMDType(originalSize))
{
unsigned int sizeBytes;
- var_types simdBaseType = getBaseTypeAndSizeOfSIMDType(structHnd, &sizeBytes);
- if (simdBaseType != TYP_UNKNOWN)
+ CorInfoType simdBaseJitType = getBaseJitTypeAndSizeOfSIMDType(structHnd, &sizeBytes);
+ if (simdBaseJitType != CORINFO_TYPE_UNDEF)
{
assert(sizeBytes == originalSize);
structType = getSIMDTypeForSize(sizeBytes);
- if (pSimdBaseType != nullptr)
+ if (pSimdBaseJitType != nullptr)
{
- *pSimdBaseType = simdBaseType;
+ *pSimdBaseJitType = simdBaseJitType;
}
// Also indicate that we use floating point registers.
compFloatingPointUsed = true;
@@ -3776,7 +3776,8 @@ GenTree* Compiler::impIntrinsic(GenTree* newobjThis,
return retNode;
}
- var_types callType = JITtype2varType(sig->retType);
+ CorInfoType callJitType = sig->retType;
+ var_types callType = JITtype2varType(callJitType);
/* First do the intrinsics which are always smaller than a call */
@@ -4364,15 +4365,15 @@ GenTree* Compiler::impIntrinsic(GenTree* newobjThis,
// ).ToScalar();
GenTree* op3 = gtNewSimdHWIntrinsicNode(TYP_SIMD16, impPopStack().val,
- NI_Vector128_CreateScalarUnsafe, callType, 16);
+ NI_Vector128_CreateScalarUnsafe, callJitType, 16);
GenTree* op2 = gtNewSimdHWIntrinsicNode(TYP_SIMD16, impPopStack().val,
- NI_Vector128_CreateScalarUnsafe, callType, 16);
+ NI_Vector128_CreateScalarUnsafe, callJitType, 16);
GenTree* op1 = gtNewSimdHWIntrinsicNode(TYP_SIMD16, impPopStack().val,
- NI_Vector128_CreateScalarUnsafe, callType, 16);
+ NI_Vector128_CreateScalarUnsafe, callJitType, 16);
GenTree* res =
- gtNewSimdHWIntrinsicNode(TYP_SIMD16, op1, op2, op3, NI_FMA_MultiplyAddScalar, callType, 16);
+ gtNewSimdHWIntrinsicNode(TYP_SIMD16, op1, op2, op3, NI_FMA_MultiplyAddScalar, callJitType, 16);
- retNode = gtNewSimdHWIntrinsicNode(callType, res, NI_Vector128_ToScalar, callType, 16);
+ retNode = gtNewSimdHWIntrinsicNode(callType, res, NI_Vector128_ToScalar, callJitType, 16);
break;
}
#elif defined(TARGET_ARM64)
@@ -4393,18 +4394,18 @@ GenTree* Compiler::impIntrinsic(GenTree* newobjThis,
constexpr unsigned int simdSize = 8;
GenTree* op3 =
- gtNewSimdHWIntrinsicNode(TYP_SIMD8, impPopStack().val, createVector64, callType, simdSize);
+ gtNewSimdHWIntrinsicNode(TYP_SIMD8, impPopStack().val, createVector64, callJitType, simdSize);
GenTree* op2 =
- gtNewSimdHWIntrinsicNode(TYP_SIMD8, impPopStack().val, createVector64, callType, simdSize);
+ gtNewSimdHWIntrinsicNode(TYP_SIMD8, impPopStack().val, createVector64, callJitType, simdSize);
GenTree* op1 =
- gtNewSimdHWIntrinsicNode(TYP_SIMD8, impPopStack().val, createVector64, callType, simdSize);
+ gtNewSimdHWIntrinsicNode(TYP_SIMD8, impPopStack().val, createVector64, callJitType, simdSize);
// Note that AdvSimd.FusedMultiplyAddScalar(op1,op2,op3) corresponds to op1 + op2 * op3
// while Math{F}.FusedMultiplyAddScalar(op1,op2,op3) corresponds to op1 * op2 + op3
retNode = gtNewSimdHWIntrinsicNode(TYP_SIMD8, op3, op2, op1, NI_AdvSimd_FusedMultiplyAddScalar,
- callType, simdSize);
+ callJitType, simdSize);
- retNode = gtNewSimdHWIntrinsicNode(callType, retNode, NI_Vector64_ToScalar, callType, simdSize);
+ retNode = gtNewSimdHWIntrinsicNode(callType, retNode, NI_Vector64_ToScalar, callJitType, simdSize);
break;
}
#endif
diff --git a/src/coreclr/jit/lclvars.cpp b/src/coreclr/jit/lclvars.cpp
index 5ed85c89eea..edb7a4b04d1 100644
--- a/src/coreclr/jit/lclvars.cpp
+++ b/src/coreclr/jit/lclvars.cpp
@@ -467,13 +467,13 @@ void Compiler::lvaInitThisPtr(InitVarDscInfo* varDscInfo)
#ifdef FEATURE_SIMD
if (supportSIMDTypes())
{
- var_types simdBaseType = TYP_UNKNOWN;
- var_types type = impNormStructType(info.compClassHnd, &simdBaseType);
- if (simdBaseType != TYP_UNKNOWN)
+ CorInfoType simdBaseJitType = CORINFO_TYPE_UNDEF;
+ var_types type = impNormStructType(info.compClassHnd, &simdBaseJitType);
+ if (simdBaseJitType != CORINFO_TYPE_UNDEF)
{
assert(varTypeIsSIMD(type));
- varDsc->lvSIMDType = true;
- varDsc->lvBaseType = simdBaseType;
+ varDsc->lvSIMDType = true;
+ varDsc->SetSimdBaseJitType(simdBaseJitType);
varDsc->lvExactSize = genTypeSize(type);
}
}
@@ -578,9 +578,12 @@ void Compiler::lvaInitRetBuffArg(InitVarDscInfo* varDscInfo, bool useFixedRetBuf
else if (supportSIMDTypes() && varTypeIsSIMD(info.compRetType))
{
varDsc->lvSIMDType = true;
- varDsc->lvBaseType =
- getBaseTypeAndSizeOfSIMDType(info.compMethodInfo->args.retTypeClass, &varDsc->lvExactSize);
- assert(varDsc->lvBaseType != TYP_UNKNOWN);
+
+ CorInfoType simdBaseJitType =
+ getBaseJitTypeAndSizeOfSIMDType(info.compMethodInfo->args.retTypeClass, &varDsc->lvExactSize);
+ varDsc->SetSimdBaseJitType(simdBaseJitType);
+
+ assert(varDsc->GetSimdBaseType() != TYP_UNKNOWN);
}
#endif // FEATURE_SIMD
@@ -2171,10 +2174,10 @@ Compiler::lvaStructFieldInfo Compiler::StructPromotionHelper::GetFieldInfo(CORIN
// we have encountered any SIMD intrinsics.
if (compiler->usesSIMDTypes() && (fieldInfo.fldSize == 0) && compiler->isSIMDorHWSIMDClass(fieldInfo.fldTypeHnd))
{
- unsigned simdSize;
- var_types simdBaseType = compiler->getBaseTypeAndSizeOfSIMDType(fieldInfo.fldTypeHnd, &simdSize);
+ unsigned simdSize;
+ CorInfoType simdBaseJitType = compiler->getBaseJitTypeAndSizeOfSIMDType(fieldInfo.fldTypeHnd, &simdSize);
// We will only promote fields of SIMD types that fit into a SIMD register.
- if (simdBaseType != TYP_UNKNOWN)
+ if (simdBaseJitType != CORINFO_TYPE_UNDEF)
{
if ((simdSize >= compiler->minSIMDStructBytes()) && (simdSize <= compiler->maxSIMDStructBytes()))
{
@@ -2789,8 +2792,8 @@ void Compiler::lvaSetStruct(unsigned varNum, CORINFO_CLASS_HANDLE typeHnd, bool
if (layout->IsValueClass())
{
- var_types simdBaseType = TYP_UNKNOWN;
- varDsc->lvType = impNormStructType(typeHnd, &simdBaseType);
+ CorInfoType simdBaseJitType = CORINFO_TYPE_UNDEF;
+ varDsc->lvType = impNormStructType(typeHnd, &simdBaseJitType);
#if defined(TARGET_AMD64) || defined(TARGET_ARM64)
// Mark implicit byref struct parameters
@@ -2808,11 +2811,11 @@ void Compiler::lvaSetStruct(unsigned varNum, CORINFO_CLASS_HANDLE typeHnd, bool
#endif // defined(TARGET_AMD64) || defined(TARGET_ARM64)
#if FEATURE_SIMD
- if (simdBaseType != TYP_UNKNOWN)
+ if (simdBaseJitType != CORINFO_TYPE_UNDEF)
{
assert(varTypeIsSIMD(varDsc));
varDsc->lvSIMDType = true;
- varDsc->lvBaseType = simdBaseType;
+ varDsc->SetSimdBaseJitType(simdBaseJitType);
}
#endif // FEATURE_SIMD
if (GlobalJitOptions::compFeatureHfa)
@@ -2840,7 +2843,7 @@ void Compiler::lvaSetStruct(unsigned varNum, CORINFO_CLASS_HANDLE typeHnd, bool
else
{
#if FEATURE_SIMD
- assert(!varTypeIsSIMD(varDsc) || (varDsc->lvBaseType != TYP_UNKNOWN));
+ assert(!varTypeIsSIMD(varDsc) || (varDsc->GetSimdBaseType() != TYP_UNKNOWN));
#endif // FEATURE_SIMD
ClassLayout* layout = typGetObjLayout(typeHnd);
assert(ClassLayout::AreCompatible(varDsc->GetLayout(), layout));
@@ -3697,6 +3700,19 @@ void LclVarDsc::lvaDisqualifyVar()
}
#endif // ASSERTION_PROP
+#ifdef FEATURE_SIMD
+var_types LclVarDsc::GetSimdBaseType() const
+{
+ CorInfoType simdBaseJitType = GetSimdBaseJitType();
+
+ if (simdBaseJitType == CORINFO_TYPE_UNDEF)
+ {
+ return TYP_UNKNOWN;
+ }
+ return JitType2PreciseVarType(simdBaseJitType);
+}
+#endif // FEATURE_SIMD
+
unsigned LclVarDsc::lvSize() const // Size needed for storage representation. Only used for structs or TYP_BLK.
{
// TODO-Review: Sometimes we get called on ARM with HFA struct variables that have been promoted,
diff --git a/src/coreclr/jit/lower.cpp b/src/coreclr/jit/lower.cpp
index 1c4aac1af08..63d7a7f6f6d 100644
--- a/src/coreclr/jit/lower.cpp
+++ b/src/coreclr/jit/lower.cpp
@@ -1319,13 +1319,13 @@ void Lowering::LowerArg(GenTreeCall* call, GenTree** ppArg)
GenTreeJitIntrinsic* jitIntrinsic = reinterpret_cast<GenTreeJitIntrinsic*>(arg);
// For HWIntrinsic, there are some intrinsics like ExtractVector128 which have
- // a gtType of TYP_SIMD16 but a gtSIMDSize of 32, so we need to include that in
+ // a gtType of TYP_SIMD16 but a SimdSize of 32, so we need to include that in
// the assert below.
- assert((jitIntrinsic->gtSIMDSize == 12) || (jitIntrinsic->gtSIMDSize == 16) ||
- (jitIntrinsic->gtSIMDSize == 32));
+ assert((jitIntrinsic->GetSimdSize() == 12) || (jitIntrinsic->GetSimdSize() == 16) ||
+ (jitIntrinsic->GetSimdSize() == 32));
- if (jitIntrinsic->gtSIMDSize == 12)
+ if (jitIntrinsic->GetSimdSize() == 12)
{
type = TYP_SIMD12;
}
diff --git a/src/coreclr/jit/lowerarmarch.cpp b/src/coreclr/jit/lowerarmarch.cpp
index 0a9c8eb7f86..a522f3f57b3 100644
--- a/src/coreclr/jit/lowerarmarch.cpp
+++ b/src/coreclr/jit/lowerarmarch.cpp
@@ -681,7 +681,7 @@ bool Lowering::IsValidConstForMovImm(GenTreeHWIntrinsic* node)
GenTree* op1 = node->gtOp1;
GenTree* castOp = nullptr;
- if (varTypeIsIntegral(node->gtSIMDBaseType) && op1->OperIs(GT_CAST))
+ if (varTypeIsIntegral(node->GetSimdBaseType()) && op1->OperIs(GT_CAST))
{
// We will sometimes get a cast around a constant value (such as for
// certain long constants) which would block the below containment.
@@ -696,7 +696,7 @@ bool Lowering::IsValidConstForMovImm(GenTreeHWIntrinsic* node)
{
const ssize_t dataValue = op1->AsIntCon()->gtIconVal;
- if (comp->GetEmitter()->emitIns_valid_imm_for_movi(dataValue, emitActualTypeSize(node->gtSIMDBaseType)))
+ if (comp->GetEmitter()->emitIns_valid_imm_for_movi(dataValue, emitActualTypeSize(node->GetSimdBaseType())))
{
if (castOp != nullptr)
{
@@ -711,7 +711,7 @@ bool Lowering::IsValidConstForMovImm(GenTreeHWIntrinsic* node)
}
else if (op1->IsCnsFltOrDbl())
{
- assert(varTypeIsFloating(node->gtSIMDBaseType));
+ assert(varTypeIsFloating(node->GetSimdBaseType()));
assert(castOp == nullptr);
const double dataValue = op1->AsDblCon()->gtDconVal;
@@ -730,16 +730,17 @@ bool Lowering::IsValidConstForMovImm(GenTreeHWIntrinsic* node)
//
void Lowering::LowerHWIntrinsicCmpOp(GenTreeHWIntrinsic* node, genTreeOps cmpOp)
{
- NamedIntrinsic intrinsicId = node->gtHWIntrinsicId;
- var_types baseType = node->gtSIMDBaseType;
- unsigned simdSize = node->gtSIMDSize;
- var_types simdType = Compiler::getSIMDTypeForSize(simdSize);
+ NamedIntrinsic intrinsicId = node->gtHWIntrinsicId;
+ CorInfoType simdBaseJitType = node->GetSimdBaseJitType();
+ var_types simdBaseType = node->GetSimdBaseType();
+ unsigned simdSize = node->GetSimdSize();
+ var_types simdType = Compiler::getSIMDTypeForSize(simdSize);
assert((intrinsicId == NI_Vector64_op_Equality) || (intrinsicId == NI_Vector64_op_Inequality) ||
(intrinsicId == NI_Vector128_op_Equality) || (intrinsicId == NI_Vector128_op_Inequality));
assert(varTypeIsSIMD(simdType));
- assert(varTypeIsArithmetic(baseType));
+ assert(varTypeIsArithmetic(simdBaseType));
assert(simdSize != 0);
assert(node->gtType == TYP_BOOL);
assert((cmpOp == GT_EQ) || (cmpOp == GT_NE));
@@ -754,7 +755,7 @@ void Lowering::LowerHWIntrinsicCmpOp(GenTreeHWIntrinsic* node, genTreeOps cmpOp)
NamedIntrinsic cmpIntrinsic;
- switch (baseType)
+ switch (simdBaseType)
{
case TYP_BYTE:
case TYP_UBYTE:
@@ -782,11 +783,11 @@ void Lowering::LowerHWIntrinsicCmpOp(GenTreeHWIntrinsic* node, genTreeOps cmpOp)
}
}
- GenTree* cmp = comp->gtNewSimdHWIntrinsicNode(simdType, op1, op2, cmpIntrinsic, baseType, simdSize);
+ GenTree* cmp = comp->gtNewSimdHWIntrinsicNode(simdType, op1, op2, cmpIntrinsic, simdBaseJitType, simdSize);
BlockRange().InsertBefore(node, cmp);
LowerNode(cmp);
- if ((baseType == TYP_FLOAT) && (simdSize == 12))
+ if ((simdBaseType == TYP_FLOAT) && (simdSize == 12))
{
// For TYP_SIMD12 we don't want the upper bits to participate in the comparison. So, we will insert all ones
// into those bits of the result, "as if" the upper bits are equal. Then if all lower bits are equal, we get the
@@ -798,22 +799,24 @@ void Lowering::LowerHWIntrinsicCmpOp(GenTreeHWIntrinsic* node, genTreeOps cmpOp)
GenTree* insCns = comp->gtNewIconNode(-1, TYP_INT);
BlockRange().InsertAfter(idxCns, insCns);
- GenTree* tmp =
- comp->gtNewSimdAsHWIntrinsicNode(simdType, cmp, idxCns, insCns, NI_AdvSimd_Insert, TYP_INT, simdSize);
+ GenTree* tmp = comp->gtNewSimdAsHWIntrinsicNode(simdType, cmp, idxCns, insCns, NI_AdvSimd_Insert,
+ CORINFO_TYPE_INT, simdSize);
BlockRange().InsertAfter(insCns, tmp);
LowerNode(tmp);
cmp = tmp;
}
- GenTree* msk = comp->gtNewSimdHWIntrinsicNode(simdType, cmp, NI_AdvSimd_Arm64_MinAcross, TYP_UBYTE, simdSize);
+ GenTree* msk =
+ comp->gtNewSimdHWIntrinsicNode(simdType, cmp, NI_AdvSimd_Arm64_MinAcross, CORINFO_TYPE_UBYTE, simdSize);
BlockRange().InsertAfter(cmp, msk);
LowerNode(msk);
GenTree* zroCns = comp->gtNewIconNode(0, TYP_INT);
BlockRange().InsertAfter(msk, zroCns);
- GenTree* val = comp->gtNewSimdAsHWIntrinsicNode(TYP_UBYTE, msk, zroCns, NI_AdvSimd_Extract, TYP_UBYTE, simdSize);
+ GenTree* val =
+ comp->gtNewSimdAsHWIntrinsicNode(TYP_UBYTE, msk, zroCns, NI_AdvSimd_Extract, CORINFO_TYPE_UBYTE, simdSize);
BlockRange().InsertAfter(zroCns, val);
LowerNode(val);
@@ -847,11 +850,12 @@ void Lowering::LowerHWIntrinsicCmpOp(GenTreeHWIntrinsic* node, genTreeOps cmpOp)
//
void Lowering::LowerHWIntrinsicCreate(GenTreeHWIntrinsic* node)
{
- NamedIntrinsic intrinsicId = node->gtHWIntrinsicId;
- var_types simdType = node->gtType;
- var_types baseType = node->gtSIMDBaseType;
- unsigned simdSize = node->gtSIMDSize;
- VectorConstant vecCns = {};
+ NamedIntrinsic intrinsicId = node->gtHWIntrinsicId;
+ var_types simdType = node->gtType;
+ CorInfoType simdBaseJitType = node->GetSimdBaseJitType();
+ var_types simdBaseType = node->GetSimdBaseType();
+ unsigned simdSize = node->GetSimdSize();
+ VectorConstant vecCns = {};
if ((simdSize == 8) && (simdType == TYP_DOUBLE))
{
@@ -861,7 +865,7 @@ void Lowering::LowerHWIntrinsicCreate(GenTreeHWIntrinsic* node)
}
assert(varTypeIsSIMD(simdType));
- assert(varTypeIsArithmetic(baseType));
+ assert(varTypeIsArithmetic(simdBaseType));
assert(simdSize != 0);
GenTreeArgList* argList = nullptr;
@@ -886,7 +890,7 @@ void Lowering::LowerHWIntrinsicCreate(GenTreeHWIntrinsic* node)
for (argList = op1->AsArgList(); argList != nullptr; argList = argList->Rest())
{
- if (HandleArgForHWIntrinsicCreate(argList->Current(), argCnt, vecCns, baseType))
+ if (HandleArgForHWIntrinsicCreate(argList->Current(), argCnt, vecCns, simdBaseType))
{
cnsArgCnt += 1;
}
@@ -895,7 +899,7 @@ void Lowering::LowerHWIntrinsicCreate(GenTreeHWIntrinsic* node)
}
else
{
- if (HandleArgForHWIntrinsicCreate(op1, argCnt, vecCns, baseType))
+ if (HandleArgForHWIntrinsicCreate(op1, argCnt, vecCns, simdBaseType))
{
cnsArgCnt += 1;
}
@@ -903,7 +907,7 @@ void Lowering::LowerHWIntrinsicCreate(GenTreeHWIntrinsic* node)
if (op2 != nullptr)
{
- if (HandleArgForHWIntrinsicCreate(op2, argCnt, vecCns, baseType))
+ if (HandleArgForHWIntrinsicCreate(op2, argCnt, vecCns, simdBaseType))
{
cnsArgCnt += 1;
}
@@ -915,19 +919,19 @@ void Lowering::LowerHWIntrinsicCreate(GenTreeHWIntrinsic* node)
// so we'll just specially handle it here and copy it into the remaining
// indices.
- for (unsigned i = 1; i < simdSize / genTypeSize(baseType); i++)
+ for (unsigned i = 1; i < simdSize / genTypeSize(simdBaseType); i++)
{
- HandleArgForHWIntrinsicCreate(op1, i, vecCns, baseType);
+ HandleArgForHWIntrinsicCreate(op1, i, vecCns, simdBaseType);
}
}
}
- assert((argCnt == 1) || (argCnt == (simdSize / genTypeSize(baseType))));
+ assert((argCnt == 1) || (argCnt == (simdSize / genTypeSize(simdBaseType))));
if ((argCnt == cnsArgCnt) && (argCnt == 1))
{
GenTree* castOp = nullptr;
- if (varTypeIsIntegral(baseType) && op1->OperIs(GT_CAST))
+ if (varTypeIsIntegral(simdBaseType) && op1->OperIs(GT_CAST))
{
// We will sometimes get a cast around a constant value (such as for
// certain long constants) which would block the below containment.
@@ -1023,7 +1027,7 @@ void Lowering::LowerHWIntrinsicCreate(GenTreeHWIntrinsic* node)
// This is roughly the following managed code:
// return AdvSimd.Arm64.DuplicateToVector(op1);
- if (varTypeIsLong(baseType) || (baseType == TYP_DOUBLE))
+ if (varTypeIsLong(simdBaseType) || (simdBaseType == TYP_DOUBLE))
{
node->gtHWIntrinsicId =
(simdType == TYP_SIMD8) ? NI_AdvSimd_Arm64_DuplicateToVector64 : NI_AdvSimd_Arm64_DuplicateToVector128;
@@ -1061,7 +1065,7 @@ void Lowering::LowerHWIntrinsicCreate(GenTreeHWIntrinsic* node)
NamedIntrinsic createScalarUnsafe =
(simdType == TYP_SIMD8) ? NI_Vector64_CreateScalarUnsafe : NI_Vector128_CreateScalarUnsafe;
- tmp1 = comp->gtNewSimdHWIntrinsicNode(simdType, op1, createScalarUnsafe, baseType, simdSize);
+ tmp1 = comp->gtNewSimdHWIntrinsicNode(simdType, op1, createScalarUnsafe, simdBaseJitType, simdSize);
BlockRange().InsertAfter(op1, tmp1);
LowerNode(tmp1);
@@ -1089,7 +1093,7 @@ void Lowering::LowerHWIntrinsicCreate(GenTreeHWIntrinsic* node)
idx = comp->gtNewIconNode(N, TYP_INT);
BlockRange().InsertBefore(opN, idx);
- tmp1 = comp->gtNewSimdHWIntrinsicNode(simdType, tmp1, idx, opN, NI_AdvSimd_Insert, baseType, simdSize);
+ tmp1 = comp->gtNewSimdHWIntrinsicNode(simdType, tmp1, idx, opN, NI_AdvSimd_Insert, simdBaseJitType, simdSize);
BlockRange().InsertAfter(opN, tmp1);
LowerNode(tmp1);
@@ -1129,14 +1133,15 @@ void Lowering::LowerHWIntrinsicCreate(GenTreeHWIntrinsic* node)
//
void Lowering::LowerHWIntrinsicDot(GenTreeHWIntrinsic* node)
{
- NamedIntrinsic intrinsicId = node->gtHWIntrinsicId;
- var_types baseType = node->gtSIMDBaseType;
- unsigned simdSize = node->gtSIMDSize;
- var_types simdType = Compiler::getSIMDTypeForSize(simdSize);
+ NamedIntrinsic intrinsicId = node->gtHWIntrinsicId;
+ CorInfoType simdBaseJitType = node->GetSimdBaseJitType();
+ var_types simdBaseType = node->GetSimdBaseType();
+ unsigned simdSize = node->GetSimdSize();
+ var_types simdType = Compiler::getSIMDTypeForSize(simdSize);
assert((intrinsicId == NI_Vector64_Dot) || (intrinsicId == NI_Vector128_Dot));
assert(varTypeIsSIMD(simdType));
- assert(varTypeIsArithmetic(baseType));
+ assert(varTypeIsArithmetic(simdBaseType));
assert(simdSize != 0);
GenTree* op1 = node->gtGetOp1();
@@ -1154,7 +1159,7 @@ void Lowering::LowerHWIntrinsicDot(GenTreeHWIntrinsic* node)
if (simdSize == 12)
{
- assert(baseType == TYP_FLOAT);
+ assert(simdBaseType == TYP_FLOAT);
// For 12 byte SIMD, we need to clear the upper 4 bytes:
// idx = CNS_INT int 0x03
@@ -1176,7 +1181,7 @@ void Lowering::LowerHWIntrinsicDot(GenTreeHWIntrinsic* node)
BlockRange().InsertAfter(idx, tmp1);
LowerNode(tmp1);
- op1 = comp->gtNewSimdAsHWIntrinsicNode(simdType, op1, idx, tmp1, NI_AdvSimd_Insert, baseType, simdSize);
+ op1 = comp->gtNewSimdAsHWIntrinsicNode(simdType, op1, idx, tmp1, NI_AdvSimd_Insert, simdBaseJitType, simdSize);
BlockRange().InsertAfter(tmp1, op1);
LowerNode(op1);
}
@@ -1193,14 +1198,14 @@ void Lowering::LowerHWIntrinsicDot(GenTreeHWIntrinsic* node)
// var tmp1 = AdvSimd.Multiply(op1, op2);
// ...
- NamedIntrinsic multiply = (baseType == TYP_DOUBLE) ? NI_AdvSimd_Arm64_Multiply : NI_AdvSimd_Multiply;
- assert(!varTypeIsLong(baseType));
+ NamedIntrinsic multiply = (simdBaseType == TYP_DOUBLE) ? NI_AdvSimd_Arm64_Multiply : NI_AdvSimd_Multiply;
+ assert(!varTypeIsLong(simdBaseType));
- tmp1 = comp->gtNewSimdAsHWIntrinsicNode(simdType, op1, op2, multiply, baseType, simdSize);
+ tmp1 = comp->gtNewSimdAsHWIntrinsicNode(simdType, op1, op2, multiply, simdBaseJitType, simdSize);
BlockRange().InsertBefore(node, tmp1);
LowerNode(tmp1);
- if (varTypeIsFloating(baseType))
+ if (varTypeIsFloating(simdBaseType))
{
// We will be constructing the following parts:
// ...
@@ -1225,7 +1230,7 @@ void Lowering::LowerHWIntrinsicDot(GenTreeHWIntrinsic* node)
if (simdSize == 8)
{
- assert(baseType == TYP_FLOAT);
+ assert(simdBaseType == TYP_FLOAT);
// We will be constructing the following parts:
// ...
@@ -1239,7 +1244,8 @@ void Lowering::LowerHWIntrinsicDot(GenTreeHWIntrinsic* node)
// var tmp1 = AdvSimd.AddPairwise(tmp1, tmp2);
// ...
- tmp1 = comp->gtNewSimdAsHWIntrinsicNode(simdType, tmp1, tmp2, NI_AdvSimd_AddPairwise, baseType, simdSize);
+ tmp1 = comp->gtNewSimdAsHWIntrinsicNode(simdType, tmp1, tmp2, NI_AdvSimd_AddPairwise, simdBaseJitType,
+ simdSize);
BlockRange().InsertAfter(tmp2, tmp1);
LowerNode(tmp1);
}
@@ -1259,12 +1265,12 @@ void Lowering::LowerHWIntrinsicDot(GenTreeHWIntrinsic* node)
// var tmp1 = AdvSimd.Arm64.AddPairwise(tmp1, tmp2);
// ...
- tmp1 = comp->gtNewSimdAsHWIntrinsicNode(simdType, tmp1, tmp2, NI_AdvSimd_Arm64_AddPairwise, baseType,
+ tmp1 = comp->gtNewSimdAsHWIntrinsicNode(simdType, tmp1, tmp2, NI_AdvSimd_Arm64_AddPairwise, simdBaseJitType,
simdSize);
BlockRange().InsertAfter(tmp2, tmp1);
LowerNode(tmp1);
- if (baseType == TYP_FLOAT)
+ if (simdBaseType == TYP_FLOAT)
{
// Float needs an additional pairwise add to finish summing the parts
// The first will have summed e0 with e1 and e2 with e3 and then repeats that for the upper half
@@ -1298,8 +1304,8 @@ void Lowering::LowerHWIntrinsicDot(GenTreeHWIntrinsic* node)
tmp2 = comp->gtClone(tmp1);
BlockRange().InsertAfter(tmp1, tmp2);
- tmp1 = comp->gtNewSimdAsHWIntrinsicNode(simdType, tmp1, tmp2, NI_AdvSimd_Arm64_AddPairwise, baseType,
- simdSize);
+ tmp1 = comp->gtNewSimdAsHWIntrinsicNode(simdType, tmp1, tmp2, NI_AdvSimd_Arm64_AddPairwise,
+ simdBaseJitType, simdSize);
BlockRange().InsertAfter(tmp2, tmp1);
LowerNode(tmp1);
}
@@ -1309,7 +1315,7 @@ void Lowering::LowerHWIntrinsicDot(GenTreeHWIntrinsic* node)
}
else
{
- assert(varTypeIsIntegral(baseType));
+ assert(varTypeIsIntegral(simdBaseType));
// We will be constructing the following parts:
// ...
@@ -1322,7 +1328,7 @@ void Lowering::LowerHWIntrinsicDot(GenTreeHWIntrinsic* node)
// var tmp2 = AdvSimd.Arm64.AddAcross(tmp1);
// ...
- tmp2 = comp->gtNewSimdAsHWIntrinsicNode(simdType, tmp1, NI_AdvSimd_Arm64_AddAcross, baseType, simdSize);
+ tmp2 = comp->gtNewSimdAsHWIntrinsicNode(simdType, tmp1, NI_AdvSimd_Arm64_AddAcross, simdBaseJitType, simdSize);
BlockRange().InsertAfter(tmp1, tmp2);
LowerNode(tmp2);
}
@@ -1669,7 +1675,7 @@ void Lowering::ContainCheckSIMD(GenTreeSIMD* simdNode)
// This implements get_Item method. The sources are:
// - the source SIMD struct
// - index (which element to get)
- // The result is baseType of SIMD struct.
+ // The result is simdBaseType of SIMD struct.
op1 = simdNode->AsOp()->gtOp1;
op2 = simdNode->AsOp()->gtOp2;
diff --git a/src/coreclr/jit/lowerxarch.cpp b/src/coreclr/jit/lowerxarch.cpp
index dfa6fe68ff4..31279fb8b43 100644
--- a/src/coreclr/jit/lowerxarch.cpp
+++ b/src/coreclr/jit/lowerxarch.cpp
@@ -682,7 +682,7 @@ void Lowering::LowerSIMD(GenTreeSIMD* simdNode)
if (simdNode->gtSIMDIntrinsicID == SIMDIntrinsicInitN)
{
- assert(simdNode->gtSIMDBaseType == TYP_FLOAT);
+ assert(simdNode->GetSimdBaseType() == TYP_FLOAT);
int argCount = 0;
int constArgCount = 0;
@@ -692,7 +692,7 @@ void Lowering::LowerSIMD(GenTreeSIMD* simdNode)
{
GenTree* arg = list->Current();
- assert(arg->TypeGet() == simdNode->gtSIMDBaseType);
+ assert(arg->TypeGet() == simdNode->GetSimdBaseType());
assert(argCount < (int)_countof(constArgValues));
if (arg->IsCnsFltOrDbl())
@@ -717,7 +717,7 @@ void Lowering::LowerSIMD(GenTreeSIMD* simdNode)
unsigned cnsAlign = (comp->compCodeOpt() != Compiler::SMALL_CODE) ? cnsSize : 1;
CORINFO_FIELD_HANDLE hnd =
- comp->GetEmitter()->emitBlkConst(constArgValues, cnsSize, cnsAlign, simdNode->gtSIMDBaseType);
+ comp->GetEmitter()->emitBlkConst(constArgValues, cnsSize, cnsAlign, simdNode->GetSimdBaseType());
GenTree* clsVarAddr = new (comp, GT_CLS_VAR_ADDR) GenTreeClsVar(GT_CLS_VAR_ADDR, TYP_I_IMPL, hnd, nullptr);
BlockRange().InsertBefore(simdNode, clsVarAddr);
simdNode->ChangeOper(GT_IND);
@@ -734,7 +734,7 @@ void Lowering::LowerSIMD(GenTreeSIMD* simdNode)
// If SIMD vector is already in memory, we force its
// addr to be evaluated into a reg. This would allow
// us to generate [regBase] or [regBase+offset] or
- // [regBase+sizeOf(SIMD vector baseType)*regIndex]
+ // [regBase+sizeOf(SIMD vector simdBaseType)*regIndex]
// to access the required SIMD vector element directly
// from memory.
//
@@ -980,11 +980,11 @@ void Lowering::LowerHWIntrinsic(GenTreeHWIntrinsic* node)
GenTreeArgList* argList = node->gtOp1->AsArgList();
// Insert takes either a 32-bit register or a memory operand.
- // In either case, only gtSIMDBaseType bits are read and so
+ // In either case, only SimdBaseType bits are read and so
// widening or narrowing the operand may be unnecessary and it
// can just be used directly.
- argList->Rest()->gtOp1 = TryRemoveCastIfPresent(node->gtSIMDBaseType, argList->Rest()->gtOp1);
+ argList->Rest()->gtOp1 = TryRemoveCastIfPresent(node->GetSimdBaseType(), argList->Rest()->gtOp1);
break;
}
@@ -1003,9 +1003,9 @@ void Lowering::LowerHWIntrinsic(GenTreeHWIntrinsic* node)
case NI_SSE2_CompareGreaterThan:
{
- if (node->gtSIMDBaseType != TYP_DOUBLE)
+ if (node->GetSimdBaseType() != TYP_DOUBLE)
{
- assert(varTypeIsIntegral(node->gtSIMDBaseType));
+ assert(varTypeIsIntegral(node->GetSimdBaseType()));
break;
}
@@ -1020,7 +1020,7 @@ void Lowering::LowerHWIntrinsic(GenTreeHWIntrinsic* node)
case NI_SSE2_CompareNotGreaterThan:
case NI_SSE2_CompareNotGreaterThanOrEqual:
{
- assert((node->gtSIMDBaseType == TYP_FLOAT) || (node->gtSIMDBaseType == TYP_DOUBLE));
+ assert((node->GetSimdBaseType() == TYP_FLOAT) || (node->GetSimdBaseType() == TYP_DOUBLE));
if (comp->compOpportunisticallyDependsOn(InstructionSet_AVX))
{
@@ -1036,11 +1036,11 @@ void Lowering::LowerHWIntrinsic(GenTreeHWIntrinsic* node)
case NI_SSE42_CompareLessThan:
case NI_AVX2_CompareLessThan:
{
- if (node->gtSIMDBaseType == TYP_DOUBLE)
+ if (node->GetSimdBaseType() == TYP_DOUBLE)
{
break;
}
- assert(varTypeIsIntegral(node->gtSIMDBaseType));
+ assert(varTypeIsIntegral(node->GetSimdBaseType()));
// this isn't actually supported in hardware so we need to swap the operands around
std::swap(node->gtOp1, node->gtOp2);
@@ -1163,16 +1163,17 @@ void Lowering::LowerHWIntrinsic(GenTreeHWIntrinsic* node)
//
void Lowering::LowerHWIntrinsicCmpOp(GenTreeHWIntrinsic* node, genTreeOps cmpOp)
{
- NamedIntrinsic intrinsicId = node->gtHWIntrinsicId;
- var_types baseType = node->gtSIMDBaseType;
- unsigned simdSize = node->gtSIMDSize;
- var_types simdType = Compiler::getSIMDTypeForSize(simdSize);
+ NamedIntrinsic intrinsicId = node->gtHWIntrinsicId;
+ CorInfoType simdBaseJitType = node->GetSimdBaseJitType();
+ var_types simdBaseType = node->GetSimdBaseType();
+ unsigned simdSize = node->GetSimdSize();
+ var_types simdType = Compiler::getSIMDTypeForSize(simdSize);
assert((intrinsicId == NI_Vector128_op_Equality) || (intrinsicId == NI_Vector128_op_Inequality) ||
(intrinsicId == NI_Vector256_op_Equality) || (intrinsicId == NI_Vector256_op_Inequality));
assert(varTypeIsSIMD(simdType));
- assert(varTypeIsArithmetic(baseType));
+ assert(varTypeIsArithmetic(simdBaseType));
assert(simdSize != 0);
assert(node->gtType == TYP_BOOL);
assert((cmpOp == GT_EQ) || (cmpOp == GT_NE));
@@ -1219,12 +1220,12 @@ void Lowering::LowerHWIntrinsicCmpOp(GenTreeHWIntrinsic* node, genTreeOps cmpOp)
}
NamedIntrinsic cmpIntrinsic;
- var_types cmpType;
+ CorInfoType cmpJitType;
NamedIntrinsic mskIntrinsic;
- var_types mskType;
+ CorInfoType mskJitType;
int mskConstant;
- switch (baseType)
+ switch (simdBaseType)
{
case TYP_BYTE:
case TYP_UBYTE:
@@ -1233,8 +1234,8 @@ void Lowering::LowerHWIntrinsicCmpOp(GenTreeHWIntrinsic* node, genTreeOps cmpOp)
case TYP_INT:
case TYP_UINT:
{
- cmpType = baseType;
- mskType = TYP_UBYTE;
+ cmpJitType = simdBaseJitType;
+ mskJitType = CORINFO_TYPE_UBYTE;
if (simdSize == 32)
{
@@ -1256,12 +1257,12 @@ void Lowering::LowerHWIntrinsicCmpOp(GenTreeHWIntrinsic* node, genTreeOps cmpOp)
case TYP_LONG:
case TYP_ULONG:
{
- mskType = TYP_UBYTE;
+ mskJitType = CORINFO_TYPE_UBYTE;
if (simdSize == 32)
{
cmpIntrinsic = NI_AVX2_CompareEqual;
- cmpType = baseType;
+ cmpJitType = simdBaseJitType;
mskIntrinsic = NI_AVX2_MoveMask;
mskConstant = -1;
}
@@ -1272,12 +1273,12 @@ void Lowering::LowerHWIntrinsicCmpOp(GenTreeHWIntrinsic* node, genTreeOps cmpOp)
if (comp->compOpportunisticallyDependsOn(InstructionSet_SSE41))
{
cmpIntrinsic = NI_SSE41_CompareEqual;
- cmpType = baseType;
+ cmpJitType = simdBaseJitType;
}
else
{
cmpIntrinsic = NI_SSE2_CompareEqual;
- cmpType = TYP_UINT;
+ cmpJitType = CORINFO_TYPE_UINT;
}
mskIntrinsic = NI_SSE2_MoveMask;
@@ -1288,8 +1289,8 @@ void Lowering::LowerHWIntrinsicCmpOp(GenTreeHWIntrinsic* node, genTreeOps cmpOp)
case TYP_FLOAT:
{
- cmpType = baseType;
- mskType = baseType;
+ cmpJitType = simdBaseJitType;
+ mskJitType = simdBaseJitType;
if (simdSize == 32)
{
@@ -1321,8 +1322,8 @@ void Lowering::LowerHWIntrinsicCmpOp(GenTreeHWIntrinsic* node, genTreeOps cmpOp)
case TYP_DOUBLE:
{
- cmpType = baseType;
- mskType = baseType;
+ cmpJitType = simdBaseJitType;
+ mskJitType = simdBaseJitType;
if (simdSize == 32)
{
@@ -1347,18 +1348,18 @@ void Lowering::LowerHWIntrinsicCmpOp(GenTreeHWIntrinsic* node, genTreeOps cmpOp)
}
}
- GenTree* cmp = comp->gtNewSimdHWIntrinsicNode(simdType, op1, op2, cmpIntrinsic, cmpType, simdSize);
+ GenTree* cmp = comp->gtNewSimdHWIntrinsicNode(simdType, op1, op2, cmpIntrinsic, cmpJitType, simdSize);
BlockRange().InsertBefore(node, cmp);
LowerNode(cmp);
- GenTree* msk = comp->gtNewSimdHWIntrinsicNode(TYP_INT, cmp, mskIntrinsic, mskType, simdSize);
+ GenTree* msk = comp->gtNewSimdHWIntrinsicNode(TYP_INT, cmp, mskIntrinsic, mskJitType, simdSize);
BlockRange().InsertAfter(cmp, msk);
LowerNode(msk);
GenTree* mskCns = comp->gtNewIconNode(mskConstant, TYP_INT);
BlockRange().InsertAfter(msk, mskCns);
- if ((baseType == TYP_FLOAT) && (simdSize < 16))
+ if ((simdBaseType == TYP_FLOAT) && (simdSize < 16))
{
// For TYP_SIMD8 and TYP_SIMD12 we need to clear the upper bits and can't assume their value
@@ -1394,11 +1395,12 @@ void Lowering::LowerHWIntrinsicCmpOp(GenTreeHWIntrinsic* node, genTreeOps cmpOp)
//
void Lowering::LowerHWIntrinsicCreate(GenTreeHWIntrinsic* node)
{
- NamedIntrinsic intrinsicId = node->gtHWIntrinsicId;
- var_types simdType = node->gtType;
- var_types baseType = node->gtSIMDBaseType;
- unsigned simdSize = node->gtSIMDSize;
- VectorConstant vecCns = {};
+ NamedIntrinsic intrinsicId = node->gtHWIntrinsicId;
+ var_types simdType = node->gtType;
+ CorInfoType simdBaseJitType = node->GetSimdBaseJitType();
+ var_types simdBaseType = node->GetSimdBaseType();
+ unsigned simdSize = node->GetSimdSize();
+ VectorConstant vecCns = {};
if ((simdSize == 8) && (simdType == TYP_DOUBLE))
{
@@ -1408,7 +1410,7 @@ void Lowering::LowerHWIntrinsicCreate(GenTreeHWIntrinsic* node)
}
assert(varTypeIsSIMD(simdType));
- assert(varTypeIsArithmetic(baseType));
+ assert(varTypeIsArithmetic(simdBaseType));
assert(simdSize != 0);
GenTreeArgList* argList = nullptr;
@@ -1433,7 +1435,7 @@ void Lowering::LowerHWIntrinsicCreate(GenTreeHWIntrinsic* node)
for (argList = op1->AsArgList(); argList != nullptr; argList = argList->Rest())
{
- if (HandleArgForHWIntrinsicCreate(argList->Current(), argCnt, vecCns, baseType))
+ if (HandleArgForHWIntrinsicCreate(argList->Current(), argCnt, vecCns, simdBaseType))
{
cnsArgCnt += 1;
}
@@ -1442,7 +1444,7 @@ void Lowering::LowerHWIntrinsicCreate(GenTreeHWIntrinsic* node)
}
else
{
- if (HandleArgForHWIntrinsicCreate(op1, argCnt, vecCns, baseType))
+ if (HandleArgForHWIntrinsicCreate(op1, argCnt, vecCns, simdBaseType))
{
cnsArgCnt += 1;
}
@@ -1450,7 +1452,7 @@ void Lowering::LowerHWIntrinsicCreate(GenTreeHWIntrinsic* node)
if (op2 != nullptr)
{
- if (HandleArgForHWIntrinsicCreate(op2, argCnt, vecCns, baseType))
+ if (HandleArgForHWIntrinsicCreate(op2, argCnt, vecCns, simdBaseType))
{
cnsArgCnt += 1;
}
@@ -1462,13 +1464,13 @@ void Lowering::LowerHWIntrinsicCreate(GenTreeHWIntrinsic* node)
// so we'll just specially handle it here and copy it into the remaining
// indices.
- for (unsigned i = 1; i < simdSize / genTypeSize(baseType); i++)
+ for (unsigned i = 1; i < simdSize / genTypeSize(simdBaseType); i++)
{
- HandleArgForHWIntrinsicCreate(op1, i, vecCns, baseType);
+ HandleArgForHWIntrinsicCreate(op1, i, vecCns, simdBaseType);
}
}
}
- assert((argCnt == 1) || (argCnt == (simdSize / genTypeSize(baseType))));
+ assert((argCnt == 1) || (argCnt == (simdSize / genTypeSize(simdBaseType))));
if (argCnt == cnsArgCnt)
{
@@ -1577,7 +1579,8 @@ void Lowering::LowerHWIntrinsicCreate(GenTreeHWIntrinsic* node)
// var tmp1 = Vector128.CreateScalarUnsafe(op1);
// return Avx2.BroadcastScalarToVector256(tmp1);
- tmp1 = comp->gtNewSimdHWIntrinsicNode(TYP_SIMD16, op1, NI_Vector128_CreateScalarUnsafe, baseType, 16);
+ tmp1 = comp->gtNewSimdHWIntrinsicNode(TYP_SIMD16, op1, NI_Vector128_CreateScalarUnsafe, simdBaseJitType,
+ 16);
BlockRange().InsertAfter(op1, tmp1);
LowerNode(tmp1);
@@ -1611,7 +1614,7 @@ void Lowering::LowerHWIntrinsicCreate(GenTreeHWIntrinsic* node)
// var tmp3 = tmp2.ToVector256Unsafe();
// return Avx.InsertVector128(tmp3, tmp1, 0x01);
- tmp1 = comp->gtNewSimdHWIntrinsicNode(TYP_SIMD16, op1, NI_Vector128_Create, baseType, 16);
+ tmp1 = comp->gtNewSimdHWIntrinsicNode(TYP_SIMD16, op1, NI_Vector128_Create, simdBaseJitType, 16);
BlockRange().InsertAfter(op1, tmp1);
LowerNode(tmp1);
@@ -1623,7 +1626,8 @@ void Lowering::LowerHWIntrinsicCreate(GenTreeHWIntrinsic* node)
tmp2 = comp->gtClone(tmp1);
BlockRange().InsertAfter(tmp1, tmp2);
- tmp3 = comp->gtNewSimdHWIntrinsicNode(TYP_SIMD32, tmp2, NI_Vector128_ToVector256Unsafe, baseType, 16);
+ tmp3 =
+ comp->gtNewSimdHWIntrinsicNode(TYP_SIMD32, tmp2, NI_Vector128_ToVector256Unsafe, simdBaseJitType, 16);
BlockRange().InsertAfter(tmp2, tmp3);
LowerNode(tmp3);
@@ -1646,11 +1650,11 @@ void Lowering::LowerHWIntrinsicCreate(GenTreeHWIntrinsic* node)
// var tmp1 = Vector128.CreateScalarUnsafe(op1);
// ...
- tmp1 = comp->gtNewSimdHWIntrinsicNode(TYP_SIMD16, op1, NI_Vector128_CreateScalarUnsafe, baseType, 16);
+ tmp1 = comp->gtNewSimdHWIntrinsicNode(TYP_SIMD16, op1, NI_Vector128_CreateScalarUnsafe, simdBaseJitType, 16);
BlockRange().InsertAfter(op1, tmp1);
LowerNode(tmp1);
- if ((baseType != TYP_DOUBLE) && comp->compOpportunisticallyDependsOn(InstructionSet_AVX2))
+ if ((simdBaseJitType != CORINFO_TYPE_DOUBLE) && comp->compOpportunisticallyDependsOn(InstructionSet_AVX2))
{
// We will be constructing the following parts:
// ...
@@ -1668,7 +1672,7 @@ void Lowering::LowerHWIntrinsicCreate(GenTreeHWIntrinsic* node)
return;
}
- switch (baseType)
+ switch (simdBaseType)
{
case TYP_BYTE:
case TYP_UBYTE:
@@ -1687,7 +1691,8 @@ void Lowering::LowerHWIntrinsicCreate(GenTreeHWIntrinsic* node)
// var tmp2 = Vector128<byte>.Zero;
// return Ssse3.Shuffle(tmp1, tmp2);
- tmp2 = comp->gtNewSimdHWIntrinsicNode(simdType, NI_Vector128_get_Zero, TYP_UBYTE, simdSize);
+ tmp2 =
+ comp->gtNewSimdHWIntrinsicNode(simdType, NI_Vector128_get_Zero, CORINFO_TYPE_UBYTE, simdSize);
BlockRange().InsertAfter(tmp1, tmp2);
LowerNode(tmp2);
@@ -1725,7 +1730,8 @@ void Lowering::LowerHWIntrinsicCreate(GenTreeHWIntrinsic* node)
tmp2 = comp->gtClone(tmp1);
BlockRange().InsertAfter(tmp1, tmp2);
- tmp1 = comp->gtNewSimdHWIntrinsicNode(simdType, tmp1, tmp2, NI_SSE2_UnpackLow, TYP_UBYTE, simdSize);
+ tmp1 = comp->gtNewSimdHWIntrinsicNode(simdType, tmp1, tmp2, NI_SSE2_UnpackLow, CORINFO_TYPE_UBYTE,
+ simdSize);
BlockRange().InsertAfter(tmp2, tmp1);
LowerNode(tmp1);
@@ -1762,7 +1768,8 @@ void Lowering::LowerHWIntrinsicCreate(GenTreeHWIntrinsic* node)
tmp2 = comp->gtClone(tmp1);
BlockRange().InsertAfter(tmp1, tmp2);
- tmp1 = comp->gtNewSimdHWIntrinsicNode(simdType, tmp1, tmp2, NI_SSE2_UnpackLow, TYP_USHORT, simdSize);
+ tmp1 = comp->gtNewSimdHWIntrinsicNode(simdType, tmp1, tmp2, NI_SSE2_UnpackLow, CORINFO_TYPE_USHORT,
+ simdSize);
BlockRange().InsertAfter(tmp2, tmp1);
LowerNode(tmp1);
@@ -1792,7 +1799,7 @@ void Lowering::LowerHWIntrinsicCreate(GenTreeHWIntrinsic* node)
node->gtOp2 = idx;
node->gtHWIntrinsicId = NI_SSE2_Shuffle;
- node->gtSIMDBaseType = TYP_UINT;
+ node->SetSimdBaseJitType(CORINFO_TYPE_UINT);
break;
}
@@ -1945,7 +1952,7 @@ void Lowering::LowerHWIntrinsicCreate(GenTreeHWIntrinsic* node)
node->gtOp2 = tmp2;
node->gtHWIntrinsicId = NI_SSE_MoveLowToHigh;
- node->gtSIMDBaseType = TYP_FLOAT;
+ node->SetSimdBaseJitType(CORINFO_TYPE_FLOAT);
break;
}
@@ -2037,7 +2044,7 @@ void Lowering::LowerHWIntrinsicCreate(GenTreeHWIntrinsic* node)
tmp1 = argList->Current();
tmp2 = argList->Rest()->Current();
- lo = comp->gtNewSimdHWIntrinsicNode(TYP_SIMD16, tmp1, tmp2, NI_Vector128_Create, baseType, 16);
+ lo = comp->gtNewSimdHWIntrinsicNode(TYP_SIMD16, tmp1, tmp2, NI_Vector128_Create, simdBaseJitType, 16);
BlockRange().InsertAfter(tmp2, lo);
LowerNode(lo);
@@ -2046,7 +2053,7 @@ void Lowering::LowerHWIntrinsicCreate(GenTreeHWIntrinsic* node)
tmp1 = argList->Current();
tmp2 = argList->Rest()->Current();
- hi = comp->gtNewSimdHWIntrinsicNode(TYP_SIMD16, tmp1, tmp2, NI_Vector128_Create, baseType, 16);
+ hi = comp->gtNewSimdHWIntrinsicNode(TYP_SIMD16, tmp1, tmp2, NI_Vector128_Create, simdBaseJitType, 16);
BlockRange().InsertAfter(tmp2, hi);
LowerNode(hi);
}
@@ -2059,11 +2066,11 @@ void Lowering::LowerHWIntrinsicCreate(GenTreeHWIntrinsic* node)
tmp1 = op2->AsArgList()->Current();
- lo = comp->gtNewSimdHWIntrinsicNode(TYP_SIMD16, op1, NI_Vector128_Create, baseType, 16);
+ lo = comp->gtNewSimdHWIntrinsicNode(TYP_SIMD16, op1, NI_Vector128_Create, simdBaseJitType, 16);
BlockRange().InsertBefore(tmp1, lo);
LowerNode(lo);
- hi = comp->gtNewSimdHWIntrinsicNode(TYP_SIMD16, op2, NI_Vector128_Create, baseType, 16);
+ hi = comp->gtNewSimdHWIntrinsicNode(TYP_SIMD16, op2, NI_Vector128_Create, simdBaseJitType, 16);
BlockRange().InsertBefore(node, hi);
LowerNode(hi);
}
@@ -2094,11 +2101,11 @@ void Lowering::LowerHWIntrinsicCreate(GenTreeHWIntrinsic* node)
// var tmp1 = Vector128.CreateScalarUnsafe(op1);
// ...
- tmp1 = comp->gtNewSimdHWIntrinsicNode(TYP_SIMD16, op1, NI_Vector128_CreateScalarUnsafe, baseType, 16);
+ tmp1 = comp->gtNewSimdHWIntrinsicNode(TYP_SIMD16, op1, NI_Vector128_CreateScalarUnsafe, simdBaseJitType, 16);
BlockRange().InsertAfter(op1, tmp1);
LowerNode(tmp1);
- switch (baseType)
+ switch (simdBaseType)
{
case TYP_BYTE:
case TYP_UBYTE:
@@ -2111,7 +2118,7 @@ void Lowering::LowerHWIntrinsicCreate(GenTreeHWIntrinsic* node)
GenTree* opN = nullptr;
NamedIntrinsic insIntrinsic = NI_Illegal;
- if ((baseType == TYP_SHORT) || (baseType == TYP_USHORT))
+ if ((simdBaseType == TYP_SHORT) || (simdBaseType == TYP_USHORT))
{
assert(comp->compIsaSupportedDebugOnly(InstructionSet_SSE2));
insIntrinsic = NI_SSE2_Insert;
@@ -2144,7 +2151,8 @@ void Lowering::LowerHWIntrinsicCreate(GenTreeHWIntrinsic* node)
idx = comp->gtNewIconNode(N, TYP_INT);
BlockRange().InsertAfter(opN, idx);
- tmp1 = comp->gtNewSimdHWIntrinsicNode(simdType, tmp1, opN, idx, insIntrinsic, baseType, simdSize);
+ tmp1 = comp->gtNewSimdHWIntrinsicNode(simdType, tmp1, opN, idx, insIntrinsic, simdBaseJitType,
+ simdSize);
BlockRange().InsertAfter(idx, tmp1);
LowerNode(tmp1);
@@ -2177,7 +2185,7 @@ void Lowering::LowerHWIntrinsicCreate(GenTreeHWIntrinsic* node)
break;
}
- assert((baseType != TYP_SHORT) && (baseType != TYP_USHORT));
+ assert((simdBaseType != TYP_SHORT) && (simdBaseType != TYP_USHORT));
assert(comp->compIsaSupportedDebugOnly(InstructionSet_SSE2));
GenTree* op[16];
@@ -2187,7 +2195,8 @@ void Lowering::LowerHWIntrinsicCreate(GenTreeHWIntrinsic* node)
{
opN = argList->Current();
- op[N] = comp->gtNewSimdHWIntrinsicNode(TYP_SIMD16, opN, NI_Vector128_CreateScalarUnsafe, baseType, 16);
+ op[N] = comp->gtNewSimdHWIntrinsicNode(TYP_SIMD16, opN, NI_Vector128_CreateScalarUnsafe,
+ simdBaseJitType, 16);
BlockRange().InsertAfter(opN, op[N]);
LowerNode(op[N]);
@@ -2195,7 +2204,7 @@ void Lowering::LowerHWIntrinsicCreate(GenTreeHWIntrinsic* node)
}
assert(argList == nullptr);
- if ((baseType == TYP_BYTE) || (baseType == TYP_UBYTE))
+ if ((simdBaseType == TYP_BYTE) || (simdBaseType == TYP_UBYTE))
{
for (N = 0; N < argCnt; N += 4)
{
@@ -2231,18 +2240,18 @@ void Lowering::LowerHWIntrinsicCreate(GenTreeHWIntrinsic* node)
unsigned P = N + 2;
unsigned Q = N + 3;
- tmp1 =
- comp->gtNewSimdHWIntrinsicNode(simdType, op[N], op[O], NI_SSE2_UnpackLow, TYP_UBYTE, simdSize);
+ tmp1 = comp->gtNewSimdHWIntrinsicNode(simdType, op[N], op[O], NI_SSE2_UnpackLow, CORINFO_TYPE_UBYTE,
+ simdSize);
BlockRange().InsertAfter(op[O], tmp1);
LowerNode(tmp1);
- tmp2 =
- comp->gtNewSimdHWIntrinsicNode(simdType, op[P], op[Q], NI_SSE2_UnpackLow, TYP_UBYTE, simdSize);
+ tmp2 = comp->gtNewSimdHWIntrinsicNode(simdType, op[P], op[Q], NI_SSE2_UnpackLow, CORINFO_TYPE_UBYTE,
+ simdSize);
BlockRange().InsertAfter(op[Q], tmp2);
LowerNode(tmp2);
- tmp3 =
- comp->gtNewSimdHWIntrinsicNode(simdType, tmp1, tmp2, NI_SSE2_UnpackLow, TYP_USHORT, simdSize);
+ tmp3 = comp->gtNewSimdHWIntrinsicNode(simdType, tmp1, tmp2, NI_SSE2_UnpackLow, CORINFO_TYPE_USHORT,
+ simdSize);
BlockRange().InsertAfter(tmp2, tmp3);
LowerNode(tmp3);
@@ -2280,11 +2289,13 @@ void Lowering::LowerHWIntrinsicCreate(GenTreeHWIntrinsic* node)
// tmp2 = Sse2.UnpackLow(opP, opQ);
// return Sse2.UnpackLow(tmp1, tmp2);
- tmp1 = comp->gtNewSimdHWIntrinsicNode(simdType, op[0], op[1], NI_SSE2_UnpackLow, TYP_UINT, simdSize);
+ tmp1 =
+ comp->gtNewSimdHWIntrinsicNode(simdType, op[0], op[1], NI_SSE2_UnpackLow, CORINFO_TYPE_UINT, simdSize);
BlockRange().InsertAfter(op[1], tmp1);
LowerNode(tmp1);
- tmp2 = comp->gtNewSimdHWIntrinsicNode(simdType, op[2], op[3], NI_SSE2_UnpackLow, TYP_UINT, simdSize);
+ tmp2 =
+ comp->gtNewSimdHWIntrinsicNode(simdType, op[2], op[3], NI_SSE2_UnpackLow, CORINFO_TYPE_UINT, simdSize);
BlockRange().InsertAfter(op[3], tmp2);
LowerNode(tmp2);
@@ -2292,7 +2303,7 @@ void Lowering::LowerHWIntrinsicCreate(GenTreeHWIntrinsic* node)
node->gtOp2 = tmp2;
node->gtHWIntrinsicId = NI_SSE2_UnpackLow;
- node->gtSIMDBaseType = TYP_ULONG;
+ node->SetSimdBaseJitType(CORINFO_TYPE_ULONG);
break;
}
@@ -2339,7 +2350,8 @@ void Lowering::LowerHWIntrinsicCreate(GenTreeHWIntrinsic* node)
assert(comp->compIsaSupportedDebugOnly(InstructionSet_SSE2));
- tmp2 = comp->gtNewSimdHWIntrinsicNode(TYP_SIMD16, op2, NI_Vector128_CreateScalarUnsafe, baseType, 16);
+ tmp2 =
+ comp->gtNewSimdHWIntrinsicNode(TYP_SIMD16, op2, NI_Vector128_CreateScalarUnsafe, simdBaseJitType, 16);
BlockRange().InsertAfter(op2, tmp2);
LowerNode(tmp2);
@@ -2380,16 +2392,16 @@ void Lowering::LowerHWIntrinsicCreate(GenTreeHWIntrinsic* node)
opN = argList->Current();
- tmp2 =
- comp->gtNewSimdHWIntrinsicNode(TYP_SIMD16, opN, NI_Vector128_CreateScalarUnsafe, baseType, 16);
+ tmp2 = comp->gtNewSimdHWIntrinsicNode(TYP_SIMD16, opN, NI_Vector128_CreateScalarUnsafe,
+ simdBaseJitType, 16);
BlockRange().InsertAfter(opN, tmp2);
LowerNode(tmp2);
idx = comp->gtNewIconNode(N << 4, TYP_INT);
BlockRange().InsertAfter(tmp2, idx);
- tmp1 =
- comp->gtNewSimdHWIntrinsicNode(simdType, tmp1, tmp2, idx, NI_SSE41_Insert, baseType, simdSize);
+ tmp1 = comp->gtNewSimdHWIntrinsicNode(simdType, tmp1, tmp2, idx, NI_SSE41_Insert, simdBaseJitType,
+ simdSize);
BlockRange().InsertAfter(idx, tmp1);
LowerNode(tmp1);
@@ -2414,7 +2426,8 @@ void Lowering::LowerHWIntrinsicCreate(GenTreeHWIntrinsic* node)
opN = argList->Current();
- tmp2 = comp->gtNewSimdHWIntrinsicNode(TYP_SIMD16, opN, NI_Vector128_CreateScalarUnsafe, baseType, 16);
+ tmp2 = comp->gtNewSimdHWIntrinsicNode(TYP_SIMD16, opN, NI_Vector128_CreateScalarUnsafe, simdBaseJitType,
+ 16);
BlockRange().InsertAfter(opN, tmp2);
LowerNode(tmp2);
@@ -2463,7 +2476,8 @@ void Lowering::LowerHWIntrinsicCreate(GenTreeHWIntrinsic* node)
{
opN = argList->Current();
- op[N] = comp->gtNewSimdHWIntrinsicNode(TYP_SIMD16, opN, NI_Vector128_CreateScalarUnsafe, baseType, 16);
+ op[N] = comp->gtNewSimdHWIntrinsicNode(TYP_SIMD16, opN, NI_Vector128_CreateScalarUnsafe,
+ simdBaseJitType, 16);
BlockRange().InsertAfter(opN, op[N]);
LowerNode(op[N]);
@@ -2471,11 +2485,11 @@ void Lowering::LowerHWIntrinsicCreate(GenTreeHWIntrinsic* node)
}
assert(argList == nullptr);
- tmp1 = comp->gtNewSimdHWIntrinsicNode(simdType, op[0], op[1], NI_SSE_UnpackLow, baseType, simdSize);
+ tmp1 = comp->gtNewSimdHWIntrinsicNode(simdType, op[0], op[1], NI_SSE_UnpackLow, simdBaseJitType, simdSize);
BlockRange().InsertAfter(op[1], tmp1);
LowerNode(tmp1);
- tmp2 = comp->gtNewSimdHWIntrinsicNode(simdType, op[2], op[3], NI_SSE_UnpackLow, baseType, simdSize);
+ tmp2 = comp->gtNewSimdHWIntrinsicNode(simdType, op[2], op[3], NI_SSE_UnpackLow, simdBaseJitType, simdSize);
BlockRange().InsertAfter(op[3], tmp2);
LowerNode(tmp2);
@@ -2503,7 +2517,8 @@ void Lowering::LowerHWIntrinsicCreate(GenTreeHWIntrinsic* node)
assert(comp->compIsaSupportedDebugOnly(InstructionSet_SSE2));
- tmp2 = comp->gtNewSimdHWIntrinsicNode(TYP_SIMD16, op2, NI_Vector128_CreateScalarUnsafe, baseType, 16);
+ tmp2 =
+ comp->gtNewSimdHWIntrinsicNode(TYP_SIMD16, op2, NI_Vector128_CreateScalarUnsafe, simdBaseJitType, 16);
BlockRange().InsertAfter(op2, tmp2);
LowerNode(tmp2);
@@ -2511,7 +2526,7 @@ void Lowering::LowerHWIntrinsicCreate(GenTreeHWIntrinsic* node)
node->gtOp2 = tmp2;
node->gtHWIntrinsicId = NI_SSE_MoveLowToHigh;
- node->gtSIMDBaseType = TYP_FLOAT;
+ node->SetSimdBaseJitType(CORINFO_TYPE_FLOAT);
break;
}
@@ -2531,16 +2546,16 @@ void Lowering::LowerHWIntrinsicCreate(GenTreeHWIntrinsic* node)
//
void Lowering::LowerHWIntrinsicDot(GenTreeHWIntrinsic* node)
{
- NamedIntrinsic intrinsicId = node->gtHWIntrinsicId;
- ;
- var_types baseType = node->gtSIMDBaseType;
- unsigned simdSize = node->gtSIMDSize;
- var_types simdType = Compiler::getSIMDTypeForSize(simdSize);
- unsigned simd16Count = comp->getSIMDVectorLength(16, baseType);
+ NamedIntrinsic intrinsicId = node->gtHWIntrinsicId;
+ CorInfoType simdBaseJitType = node->GetSimdBaseJitType();
+ var_types simdBaseType = node->GetSimdBaseType();
+ unsigned simdSize = node->GetSimdSize();
+ var_types simdType = Compiler::getSIMDTypeForSize(simdSize);
+ unsigned simd16Count = comp->getSIMDVectorLength(16, simdBaseType);
assert((intrinsicId == NI_Vector128_Dot) || (intrinsicId == NI_Vector256_Dot));
assert(varTypeIsSIMD(simdType));
- assert(varTypeIsArithmetic(baseType));
+ assert(varTypeIsArithmetic(simdBaseType));
assert(simdSize != 0);
GenTree* op1 = node->gtGetOp1();
@@ -2566,7 +2581,7 @@ void Lowering::LowerHWIntrinsicDot(GenTreeHWIntrinsic* node)
{
assert(comp->compIsaSupportedDebugOnly(InstructionSet_AVX2));
- switch (baseType)
+ switch (simdBaseType)
{
case TYP_SHORT:
case TYP_USHORT:
@@ -2610,7 +2625,8 @@ void Lowering::LowerHWIntrinsicDot(GenTreeHWIntrinsic* node)
idx = comp->gtNewIconNode(0xF1, TYP_INT);
BlockRange().InsertBefore(node, idx);
- tmp1 = comp->gtNewSimdHWIntrinsicNode(simdType, op1, op2, idx, NI_AVX_DotProduct, baseType, simdSize);
+ tmp1 = comp->gtNewSimdHWIntrinsicNode(simdType, op1, op2, idx, NI_AVX_DotProduct, simdBaseJitType,
+ simdSize);
BlockRange().InsertAfter(idx, tmp1);
LowerNode(tmp1);
@@ -2625,16 +2641,16 @@ void Lowering::LowerHWIntrinsicDot(GenTreeHWIntrinsic* node)
idx = comp->gtNewIconNode(0x01, TYP_INT);
BlockRange().InsertAfter(tmp2, idx);
- tmp2 =
- comp->gtNewSimdHWIntrinsicNode(TYP_SIMD16, tmp2, idx, NI_AVX_ExtractVector128, baseType, simdSize);
+ tmp2 = comp->gtNewSimdHWIntrinsicNode(TYP_SIMD16, tmp2, idx, NI_AVX_ExtractVector128, simdBaseJitType,
+ simdSize);
BlockRange().InsertAfter(idx, tmp2);
LowerNode(tmp2);
- tmp3 = comp->gtNewSimdHWIntrinsicNode(TYP_SIMD16, tmp1, tmp2, NI_SSE_Add, baseType, 16);
+ tmp3 = comp->gtNewSimdHWIntrinsicNode(TYP_SIMD16, tmp1, tmp2, NI_SSE_Add, simdBaseJitType, 16);
BlockRange().InsertAfter(tmp2, tmp3);
LowerNode(tmp3);
- node->gtSIMDSize = 16;
+ node->SetSimdSize(16);
node->gtOp1 = tmp3;
node->gtOp2 = nullptr;
@@ -2663,7 +2679,7 @@ void Lowering::LowerHWIntrinsicDot(GenTreeHWIntrinsic* node)
{
assert(comp->compIsaSupportedDebugOnly(InstructionSet_SSE2));
- switch (baseType)
+ switch (simdBaseType)
{
case TYP_SHORT:
case TYP_USHORT:
@@ -2722,7 +2738,7 @@ void Lowering::LowerHWIntrinsicDot(GenTreeHWIntrinsic* node)
}
BlockRange().InsertBefore(node, idx);
- tmp3 = comp->gtNewSimdHWIntrinsicNode(simdType, op1, op2, idx, NI_SSE41_DotProduct, baseType,
+ tmp3 = comp->gtNewSimdHWIntrinsicNode(simdType, op1, op2, idx, NI_SSE41_DotProduct, simdBaseJitType,
simdSize);
BlockRange().InsertAfter(idx, tmp3);
LowerNode(tmp3);
@@ -2767,7 +2783,7 @@ void Lowering::LowerHWIntrinsicDot(GenTreeHWIntrinsic* node)
idx = comp->gtNewIconNode(0x31, TYP_INT);
BlockRange().InsertBefore(node, idx);
- tmp3 = comp->gtNewSimdHWIntrinsicNode(simdType, op1, op2, idx, NI_SSE41_DotProduct, baseType,
+ tmp3 = comp->gtNewSimdHWIntrinsicNode(simdType, op1, op2, idx, NI_SSE41_DotProduct, simdBaseJitType,
simdSize);
BlockRange().InsertAfter(idx, tmp3);
LowerNode(tmp3);
@@ -2800,7 +2816,7 @@ void Lowering::LowerHWIntrinsicDot(GenTreeHWIntrinsic* node)
if (simdSize == 8)
{
- assert(baseType == TYP_FLOAT);
+ assert(simdBaseType == TYP_FLOAT);
// If simdSize == 8 then we have only two elements, not the 4 that we got from getSIMDVectorLength,
// which we gave a simdSize of 16. So, we set the simd16Count to 2 so that only 1 hadd will
@@ -2810,7 +2826,7 @@ void Lowering::LowerHWIntrinsicDot(GenTreeHWIntrinsic* node)
}
else if (simdSize == 12)
{
- assert(baseType == TYP_FLOAT);
+ assert(simdBaseType == TYP_FLOAT);
// We will be constructing the following parts:
// ...
@@ -2842,11 +2858,12 @@ void Lowering::LowerHWIntrinsicDot(GenTreeHWIntrinsic* node)
GenTree* cns3 = comp->gtNewIconNode(0, TYP_INT);
BlockRange().InsertAfter(cns2, cns3);
- tmp1 = comp->gtNewSimdHWIntrinsicNode(simdType, cns0, cns1, cns2, cns3, NI_Vector128_Create, TYP_INT, 16);
+ tmp1 = comp->gtNewSimdHWIntrinsicNode(simdType, cns0, cns1, cns2, cns3, NI_Vector128_Create,
+ CORINFO_TYPE_INT, 16);
BlockRange().InsertAfter(cns3, tmp1);
LowerNode(tmp1);
- op1 = comp->gtNewSimdHWIntrinsicNode(simdType, op1, tmp1, NI_SSE_And, baseType, simdSize);
+ op1 = comp->gtNewSimdHWIntrinsicNode(simdType, op1, tmp1, NI_SSE_And, simdBaseJitType, simdSize);
BlockRange().InsertAfter(tmp1, op1);
LowerNode(op1);
}
@@ -2862,7 +2879,7 @@ void Lowering::LowerHWIntrinsicDot(GenTreeHWIntrinsic* node)
// var tmp1 = Isa.Multiply(op1, op2);
// ...
- tmp1 = comp->gtNewSimdHWIntrinsicNode(simdType, op1, op2, multiply, baseType, simdSize);
+ tmp1 = comp->gtNewSimdHWIntrinsicNode(simdType, op1, op2, multiply, simdBaseJitType, simdSize);
BlockRange().InsertBefore(node, tmp1);
LowerNode(tmp1);
@@ -2906,7 +2923,7 @@ void Lowering::LowerHWIntrinsicDot(GenTreeHWIntrinsic* node)
// tmp1 = Isa.HorizontalAdd(tmp1, tmp2);
// ...
- tmp1 = comp->gtNewSimdHWIntrinsicNode(simdType, tmp1, tmp2, horizontalAdd, baseType, simdSize);
+ tmp1 = comp->gtNewSimdHWIntrinsicNode(simdType, tmp1, tmp2, horizontalAdd, simdBaseJitType, simdSize);
}
else
{
@@ -2916,7 +2933,8 @@ void Lowering::LowerHWIntrinsicDot(GenTreeHWIntrinsic* node)
{
case 0:
{
- assert((baseType == TYP_SHORT) || (baseType == TYP_USHORT) || varTypeIsFloating(baseType));
+ assert((simdBaseType == TYP_SHORT) || (simdBaseType == TYP_USHORT) ||
+ varTypeIsFloating(simdBaseType));
// Adds (e0 + e1, e1 + e0, e2 + e3, e3 + e2), giving:
// e0, e1, e2, e3 | e4, e5, e6, e7
@@ -2929,7 +2947,7 @@ void Lowering::LowerHWIntrinsicDot(GenTreeHWIntrinsic* node)
case 1:
{
- assert((baseType == TYP_SHORT) || (baseType == TYP_USHORT) || (baseType == TYP_FLOAT));
+ assert((simdBaseType == TYP_SHORT) || (simdBaseType == TYP_USHORT) || (simdBaseType == TYP_FLOAT));
// Adds (e0 + e2, e1 + e3, e2 + e0, e3 + e1), giving:
// ...
@@ -2942,7 +2960,7 @@ void Lowering::LowerHWIntrinsicDot(GenTreeHWIntrinsic* node)
case 2:
{
- assert((baseType == TYP_SHORT) || (baseType == TYP_USHORT));
+ assert((simdBaseType == TYP_SHORT) || (simdBaseType == TYP_USHORT));
// Adds (e0 + e4, e1 + e5, e2 + e6, e3 + e7), giving:
// ...
@@ -2964,7 +2982,7 @@ void Lowering::LowerHWIntrinsicDot(GenTreeHWIntrinsic* node)
idx = comp->gtNewIconNode(shuffleConst, TYP_INT);
BlockRange().InsertAfter(tmp2, idx);
- if (varTypeIsFloating(baseType))
+ if (varTypeIsFloating(simdBaseType))
{
// We will be constructing the following parts:
// ...
@@ -2993,11 +3011,11 @@ void Lowering::LowerHWIntrinsicDot(GenTreeHWIntrinsic* node)
tmp3 = comp->gtClone(tmp2);
BlockRange().InsertAfter(tmp2, tmp3);
- tmp2 = comp->gtNewSimdHWIntrinsicNode(simdType, tmp2, tmp3, idx, shuffle, baseType, simdSize);
+ tmp2 = comp->gtNewSimdHWIntrinsicNode(simdType, tmp2, tmp3, idx, shuffle, simdBaseJitType, simdSize);
}
else
{
- assert((baseType == TYP_SHORT) || (baseType == TYP_USHORT));
+ assert((simdBaseType == TYP_SHORT) || (simdBaseType == TYP_USHORT));
if (i < 2)
{
@@ -3018,14 +3036,16 @@ void Lowering::LowerHWIntrinsicDot(GenTreeHWIntrinsic* node)
// tmp2 = Isa.Shuffle(tmp1, shuffleConst);
// ...
- tmp2 = comp->gtNewSimdHWIntrinsicNode(simdType, tmp2, idx, NI_SSE2_ShuffleLow, baseType, simdSize);
+ tmp2 = comp->gtNewSimdHWIntrinsicNode(simdType, tmp2, idx, NI_SSE2_ShuffleLow, simdBaseJitType,
+ simdSize);
BlockRange().InsertAfter(idx, tmp2);
LowerNode(tmp2);
idx = comp->gtNewIconNode(shuffleConst, TYP_INT);
BlockRange().InsertAfter(tmp2, idx);
- tmp2 = comp->gtNewSimdHWIntrinsicNode(simdType, tmp2, idx, NI_SSE2_ShuffleHigh, baseType, simdSize);
+ tmp2 = comp->gtNewSimdHWIntrinsicNode(simdType, tmp2, idx, NI_SSE2_ShuffleHigh, simdBaseJitType,
+ simdSize);
}
else
{
@@ -3044,7 +3064,8 @@ void Lowering::LowerHWIntrinsicDot(GenTreeHWIntrinsic* node)
// tmp2 = Isa.Shuffle(tmp1, shuffleConst);
// ...
- tmp2 = comp->gtNewSimdHWIntrinsicNode(simdType, tmp2, idx, NI_SSE2_Shuffle, TYP_INT, simdSize);
+ tmp2 = comp->gtNewSimdHWIntrinsicNode(simdType, tmp2, idx, NI_SSE2_Shuffle, CORINFO_TYPE_INT,
+ simdSize);
}
}
@@ -3063,7 +3084,7 @@ void Lowering::LowerHWIntrinsicDot(GenTreeHWIntrinsic* node)
// tmp1 = Isa.Add(tmp1, tmp2);
// ...
- tmp1 = comp->gtNewSimdHWIntrinsicNode(simdType, tmp1, tmp2, add, baseType, simdSize);
+ tmp1 = comp->gtNewSimdHWIntrinsicNode(simdType, tmp1, tmp2, add, simdBaseJitType, simdSize);
}
BlockRange().InsertAfter(tmp2, tmp1);
@@ -3105,15 +3126,16 @@ void Lowering::LowerHWIntrinsicDot(GenTreeHWIntrinsic* node)
idx = comp->gtNewIconNode(0x01, TYP_INT);
BlockRange().InsertAfter(tmp2, idx);
- tmp2 = comp->gtNewSimdHWIntrinsicNode(TYP_SIMD16, tmp2, idx, NI_AVX_ExtractVector128, baseType, simdSize);
+ tmp2 =
+ comp->gtNewSimdHWIntrinsicNode(TYP_SIMD16, tmp2, idx, NI_AVX_ExtractVector128, simdBaseJitType, simdSize);
BlockRange().InsertAfter(idx, tmp2);
LowerNode(tmp2);
- tmp1 = comp->gtNewSimdHWIntrinsicNode(TYP_SIMD16, tmp1, tmp2, add, baseType, 16);
+ tmp1 = comp->gtNewSimdHWIntrinsicNode(TYP_SIMD16, tmp1, tmp2, add, simdBaseJitType, 16);
BlockRange().InsertAfter(tmp2, tmp1);
LowerNode(tmp1);
- node->gtSIMDSize = 16;
+ node->SetSimdSize(16);
}
// We will be constructing the following parts:
@@ -3142,25 +3164,25 @@ void Lowering::LowerHWIntrinsicDot(GenTreeHWIntrinsic* node)
//
void Lowering::LowerHWIntrinsicToScalar(GenTreeHWIntrinsic* node)
{
- NamedIntrinsic intrinsicId = node->gtHWIntrinsicId;
- ;
- var_types baseType = node->gtSIMDBaseType;
- unsigned simdSize = node->gtSIMDSize;
- var_types simdType = Compiler::getSIMDTypeForSize(simdSize);
+ NamedIntrinsic intrinsicId = node->gtHWIntrinsicId;
+ CorInfoType simdBaseJitType = node->GetSimdBaseJitType();
+ var_types simdBaseType = node->GetSimdBaseType();
+ unsigned simdSize = node->GetSimdSize();
+ var_types simdType = Compiler::getSIMDTypeForSize(simdSize);
assert((intrinsicId == NI_Vector128_ToScalar) || (intrinsicId == NI_Vector256_ToScalar));
assert(varTypeIsSIMD(simdType));
- assert(varTypeIsArithmetic(baseType));
+ assert(varTypeIsArithmetic(simdBaseType));
assert(simdSize != 0);
- switch (baseType)
+ switch (simdBaseType)
{
case TYP_BYTE:
case TYP_SHORT:
case TYP_INT:
{
- node->gtType = TYP_INT;
- node->gtSIMDBaseType = TYP_INT;
+ node->gtType = TYP_INT;
+ node->SetSimdBaseJitType(CORINFO_TYPE_INT);
node->gtHWIntrinsicId = NI_SSE2_ConvertToInt32;
break;
}
@@ -3169,8 +3191,8 @@ void Lowering::LowerHWIntrinsicToScalar(GenTreeHWIntrinsic* node)
case TYP_USHORT:
case TYP_UINT:
{
- node->gtType = TYP_UINT;
- node->gtSIMDBaseType = TYP_UINT;
+ node->gtType = TYP_UINT;
+ node->SetSimdBaseJitType(CORINFO_TYPE_UINT);
node->gtHWIntrinsicId = NI_SSE2_ConvertToUInt32;
break;
}
@@ -3204,12 +3226,12 @@ void Lowering::LowerHWIntrinsicToScalar(GenTreeHWIntrinsic* node)
LowerNode(node);
- if (genTypeSize(baseType) < 4)
+ if (genTypeSize(simdBaseType) < 4)
{
LIR::Use use;
bool foundUse = BlockRange().TryGetUse(node, &use);
- GenTreeCast* cast = comp->gtNewCastNode(baseType, node, node->IsUnsigned(), baseType);
+ GenTreeCast* cast = comp->gtNewCastNode(simdBaseType, node, node->IsUnsigned(), simdBaseType);
BlockRange().InsertAfter(node, cast);
if (foundUse)
@@ -4669,12 +4691,12 @@ void Lowering::ContainCheckSIMD(GenTreeSIMD* simdNode)
else
#endif // !TARGET_64BIT
if (op1->IsFPZero() || op1->IsIntegralConst(0) ||
- (varTypeIsIntegral(simdNode->gtSIMDBaseType) && op1->IsIntegralConst(-1)))
+ (varTypeIsIntegral(simdNode->GetSimdBaseType()) && op1->IsIntegralConst(-1)))
{
MakeSrcContained(simdNode, op1);
}
else if ((comp->getSIMDSupportLevel() == SIMD_AVX2_Supported) &&
- ((simdNode->gtSIMDSize == 16) || (simdNode->gtSIMDSize == 32)))
+ ((simdNode->GetSimdSize() == 16) || (simdNode->GetSimdSize() == 32)))
{
// Either op1 is a float or dbl constant or an addr
if (op1->IsCnsFltOrDbl() || op1->OperIsLocalAddr())
@@ -4695,7 +4717,7 @@ void Lowering::ContainCheckSIMD(GenTreeSIMD* simdNode)
// This implements get_Item method. The sources are:
// - the source SIMD struct
// - index (which element to get)
- // The result is baseType of SIMD struct.
+ // The result is simdBaseType of SIMD struct.
op1 = simdNode->AsOp()->gtOp1;
op2 = simdNode->AsOp()->gtOp2;
@@ -4883,7 +4905,7 @@ bool Lowering::IsContainableHWIntrinsicOp(GenTreeHWIntrinsic* containingNode, Ge
case NI_SSE41_Insert:
case NI_SSE41_X64_Insert:
{
- if (containingNode->gtSIMDBaseType == TYP_FLOAT)
+ if (containingNode->GetSimdBaseType() == TYP_FLOAT)
{
assert(containingIntrinsicId == NI_SSE41_Insert);
assert(genTypeSize(node->TypeGet()) == 16);
@@ -4944,7 +4966,7 @@ bool Lowering::IsContainableHWIntrinsicOp(GenTreeHWIntrinsic* containingNode, Ge
assert(supportsUnalignedSIMDLoads == false);
assert(supportsSIMDScalarLoads == false);
- const unsigned expectedSize = genTypeSize(containingNode->gtSIMDBaseType);
+ const unsigned expectedSize = genTypeSize(containingNode->GetSimdBaseType());
const unsigned operandSize = genTypeSize(node->TypeGet());
supportsGeneralLoads = (operandSize >= expectedSize);
@@ -4988,7 +5010,7 @@ bool Lowering::IsContainableHWIntrinsicOp(GenTreeHWIntrinsic* containingNode, Ge
{
assert(supportsSIMDScalarLoads == false);
- const unsigned expectedSize = genTypeSize(genActualType(containingNode->gtSIMDBaseType));
+ const unsigned expectedSize = genTypeSize(genActualType(containingNode->GetSimdBaseType()));
const unsigned operandSize = genTypeSize(node->TypeGet());
supportsGeneralLoads = (operandSize == expectedSize);
@@ -5024,7 +5046,7 @@ bool Lowering::IsContainableHWIntrinsicOp(GenTreeHWIntrinsic* containingNode, Ge
assert(supportsSIMDScalarLoads == false);
- const unsigned expectedSize = genTypeSize(genActualType(containingNode->gtSIMDBaseType));
+ const unsigned expectedSize = genTypeSize(genActualType(containingNode->GetSimdBaseType()));
const unsigned operandSize = genTypeSize(node->TypeGet());
supportsGeneralLoads = (operandSize == expectedSize);
@@ -5060,7 +5082,7 @@ bool Lowering::IsContainableHWIntrinsicOp(GenTreeHWIntrinsic* containingNode, Ge
// Currently, we are using SIMDBaseType to store the op2Type info.
if (containingIntrinsicId == NI_SSE42_Crc32)
{
- var_types op2Type = containingNode->gtSIMDBaseType;
+ var_types op2Type = containingNode->GetSimdBaseType();
expectedSize = genTypeSize(op2Type);
}
@@ -5154,10 +5176,11 @@ void Lowering::ContainCheckHWIntrinsicAddr(GenTreeHWIntrinsic* node, GenTree* ad
//
void Lowering::ContainCheckHWIntrinsic(GenTreeHWIntrinsic* node)
{
- NamedIntrinsic intrinsicId = node->gtHWIntrinsicId;
- HWIntrinsicCategory category = HWIntrinsicInfo::lookupCategory(intrinsicId);
- int numArgs = HWIntrinsicInfo::lookupNumArgs(node);
- var_types baseType = node->gtSIMDBaseType;
+ NamedIntrinsic intrinsicId = node->gtHWIntrinsicId;
+ HWIntrinsicCategory category = HWIntrinsicInfo::lookupCategory(intrinsicId);
+ int numArgs = HWIntrinsicInfo::lookupNumArgs(node);
+ CorInfoType simdBaseJitType = node->GetSimdBaseJitType();
+ var_types simdBaseType = node->GetSimdBaseType();
GenTree* op1 = node->gtGetOp1();
GenTree* op2 = node->gtGetOp2();
@@ -5187,7 +5210,7 @@ void Lowering::ContainCheckHWIntrinsic(GenTreeHWIntrinsic* node)
}
}
- if ((node->gtSIMDSize == 8) || (node->gtSIMDSize == 12))
+ if ((node->GetSimdSize() == 8) || (node->GetSimdSize() == 12))
{
// TODO-XArch-CQ: Ideally we would key this off of the size containingNode
// expects vs the size node actually is or would be if spilled to the stack
@@ -5246,7 +5269,7 @@ void Lowering::ContainCheckHWIntrinsic(GenTreeHWIntrinsic* node)
case NI_AVX2_ConvertToInt32:
case NI_AVX2_ConvertToUInt32:
{
- if (varTypeIsIntegral(baseType))
+ if (varTypeIsIntegral(simdBaseType))
{
// TODO-XARCH-CQ: These intrinsics are "ins reg/mem, xmm" and don't
// currently support containment.
diff --git a/src/coreclr/jit/lsra.cpp b/src/coreclr/jit/lsra.cpp
index 04f6e90468c..a868ca415fe 100644
--- a/src/coreclr/jit/lsra.cpp
+++ b/src/coreclr/jit/lsra.cpp
@@ -6974,16 +6974,16 @@ void LinearScan::insertUpperVectorSave(GenTree* tree,
GenTreeSIMD* simdNode =
new (compiler, GT_SIMD) GenTreeSIMD(LargeVectorSaveType, saveLcl, nullptr, SIMDIntrinsicUpperSave,
- varDsc->lvBaseType, genTypeSize(varDsc->lvType));
+ varDsc->GetSimdBaseJitType(), genTypeSize(varDsc->lvType));
- if (simdNode->gtSIMDBaseType == TYP_UNDEF)
+ if (simdNode->GetSimdBaseJitType() == CORINFO_TYPE_UNDEF)
{
// There are a few scenarios where we can get a LCL_VAR which
// doesn't know the underlying baseType. In that scenario, we
// will just lie and say it is a float. Codegen doesn't actually
// care what the type is but this avoids an assert that would
// otherwise be fired from the more general checks that happen.
- simdNode->gtSIMDBaseType = TYP_FLOAT;
+ simdNode->SetSimdBaseJitType(CORINFO_TYPE_FLOAT);
}
SetLsraAdded(simdNode);
@@ -7041,16 +7041,16 @@ void LinearScan::insertUpperVectorRestore(GenTree* tree,
GenTreeSIMD* simdNode =
new (compiler, GT_SIMD) GenTreeSIMD(varDsc->lvType, restoreLcl, nullptr, SIMDIntrinsicUpperRestore,
- varDsc->lvBaseType, genTypeSize(varDsc->lvType));
+ varDsc->GetSimdBaseJitType(), genTypeSize(varDsc->lvType));
- if (simdNode->gtSIMDBaseType == TYP_UNDEF)
+ if (simdNode->GetSimdBaseJitType() == CORINFO_TYPE_UNDEF)
{
// There are a few scenarios where we can get a LCL_VAR which
// doesn't know the underlying baseType. In that scenario, we
// will just lie and say it is a float. Codegen doesn't actually
// care what the type is but this avoids an assert that would
// otherwise be fired from the more general checks that happen.
- simdNode->gtSIMDBaseType = TYP_FLOAT;
+ simdNode->SetSimdBaseJitType(CORINFO_TYPE_FLOAT);
}
regNumber restoreReg = upperVectorInterval->physReg;
diff --git a/src/coreclr/jit/lsraarm64.cpp b/src/coreclr/jit/lsraarm64.cpp
index d9aa71da582..d7b51ff42bb 100644
--- a/src/coreclr/jit/lsraarm64.cpp
+++ b/src/coreclr/jit/lsraarm64.cpp
@@ -871,9 +871,9 @@ int LinearScan::BuildSIMD(GenTreeSIMD* simdTree)
case SIMDIntrinsicInitN:
{
- var_types baseType = simdTree->gtSIMDBaseType;
- srcCount = (short)(simdTree->gtSIMDSize / genTypeSize(baseType));
- if (varTypeIsFloating(simdTree->gtSIMDBaseType))
+ var_types baseType = simdTree->GetSimdBaseType();
+ srcCount = (short)(simdTree->GetSimdSize() / genTypeSize(baseType));
+ if (varTypeIsFloating(simdTree->GetSimdBaseType()))
{
// Need an internal register to stitch together all the values into a single vector in a SIMD reg.
buildInternalFloatRegisterDefForNode(simdTree);
@@ -978,13 +978,27 @@ int LinearScan::BuildHWIntrinsic(GenTreeHWIntrinsic* intrinsicTree)
if (intrin.category == HW_Category_SIMDByIndexedElement)
{
- const unsigned int indexedElementSimdSize = genTypeSize(intrinsicTree->GetAuxiliaryType());
+ var_types indexedElementOpType;
+
+ if (intrin.numOperands == 3)
+ {
+ indexedElementOpType = intrin.op2->TypeGet();
+ }
+ else
+ {
+ assert(intrin.numOperands == 4);
+ indexedElementOpType = intrin.op3->TypeGet();
+ }
+
+ assert(varTypeIsSIMD(indexedElementOpType));
+
+ const unsigned int indexedElementSimdSize = genTypeSize(indexedElementOpType);
HWIntrinsicInfo::lookupImmBounds(intrin.id, indexedElementSimdSize, intrin.baseType, &immLowerBound,
&immUpperBound);
}
else
{
- HWIntrinsicInfo::lookupImmBounds(intrin.id, intrinsicTree->gtSIMDSize, intrin.baseType, &immLowerBound,
+ HWIntrinsicInfo::lookupImmBounds(intrin.id, intrinsicTree->GetSimdSize(), intrin.baseType, &immLowerBound,
&immUpperBound);
}
diff --git a/src/coreclr/jit/lsraxarch.cpp b/src/coreclr/jit/lsraxarch.cpp
index 910b121ce05..ad750fad953 100644
--- a/src/coreclr/jit/lsraxarch.cpp
+++ b/src/coreclr/jit/lsraxarch.cpp
@@ -1874,7 +1874,7 @@ int LinearScan::BuildSIMD(GenTreeSIMD* simdTree)
// Only SIMDIntrinsicInit can be contained
assert(simdTree->gtSIMDIntrinsicID == SIMDIntrinsicInit);
}
- SetContainsAVXFlags(simdTree->gtSIMDSize);
+ SetContainsAVXFlags(simdTree->GetSimdSize());
GenTree* op1 = simdTree->gtGetOp1();
GenTree* op2 = simdTree->gtGetOp2();
int srcCount = 0;
@@ -1925,8 +1925,8 @@ int LinearScan::BuildSIMD(GenTreeSIMD* simdTree)
case SIMDIntrinsicInitN:
{
- var_types baseType = simdTree->gtSIMDBaseType;
- srcCount = (short)(simdTree->gtSIMDSize / genTypeSize(baseType));
+ var_types baseType = simdTree->GetSimdBaseType();
+ srcCount = (short)(simdTree->GetSimdSize() / genTypeSize(baseType));
// Need an internal register to stitch together all the values into a single vector in a SIMD reg.
buildInternalFloatRegisterDefForNode(simdTree);
int initCount = 0;
@@ -1982,13 +1982,13 @@ int LinearScan::BuildSIMD(GenTreeSIMD* simdTree)
{
(void)compiler->getSIMDInitTempVarNum();
}
- else if (!varTypeIsFloating(simdTree->gtSIMDBaseType))
+ else if (!varTypeIsFloating(simdTree->GetSimdBaseType()))
{
bool needFloatTemp;
- if (varTypeIsSmallInt(simdTree->gtSIMDBaseType) &&
+ if (varTypeIsSmallInt(simdTree->GetSimdBaseType()) &&
(compiler->getSIMDSupportLevel() == SIMD_AVX2_Supported))
{
- int byteShiftCnt = (int)op2->AsIntCon()->gtIconVal * genTypeSize(simdTree->gtSIMDBaseType);
+ int byteShiftCnt = (int)op2->AsIntCon()->gtIconVal * genTypeSize(simdTree->GetSimdBaseType());
needFloatTemp = (byteShiftCnt >= 16);
}
else
@@ -2007,7 +2007,7 @@ int LinearScan::BuildSIMD(GenTreeSIMD* simdTree)
// generate a movzx/movsx. On x86, these require byteable registers. So figure out which
// cases will require this, so the non-byteable registers can be excluded.
- var_types baseType = simdTree->gtSIMDBaseType;
+ var_types baseType = simdTree->GetSimdBaseType();
if (op2->IsCnsIntOrI() && varTypeIsSmallInt(baseType))
{
bool ZeroOrSignExtnReqd = true;
@@ -2050,7 +2050,7 @@ int LinearScan::BuildSIMD(GenTreeSIMD* simdTree)
break;
case SIMDIntrinsicConvertToSingle:
- if (simdTree->gtSIMDBaseType == TYP_UINT)
+ if (simdTree->GetSimdBaseType() == TYP_UINT)
{
// We need an internal register different from targetReg.
setInternalRegsDelayFree = true;
@@ -2066,7 +2066,7 @@ int LinearScan::BuildSIMD(GenTreeSIMD* simdTree)
case SIMDIntrinsicWidenLo:
case SIMDIntrinsicWidenHi:
- if (varTypeIsIntegral(simdTree->gtSIMDBaseType))
+ if (varTypeIsIntegral(simdTree->GetSimdBaseType()))
{
// We need an internal register different from targetReg.
setInternalRegsDelayFree = true;
@@ -2091,14 +2091,15 @@ int LinearScan::BuildSIMD(GenTreeSIMD* simdTree)
setInternalRegsDelayFree = true;
buildInternalFloatRegisterDefForNode(simdTree);
#ifdef TARGET_X86
- if (simdTree->gtSIMDBaseType == TYP_LONG)
+ if (simdTree->GetSimdBaseType() == TYP_LONG)
{
buildInternalFloatRegisterDefForNode(simdTree);
buildInternalFloatRegisterDefForNode(simdTree);
}
else
#endif
- if ((compiler->getSIMDSupportLevel() == SIMD_AVX2_Supported) || (simdTree->gtSIMDBaseType == TYP_ULONG))
+ if ((compiler->getSIMDSupportLevel() == SIMD_AVX2_Supported) ||
+ (simdTree->GetSimdBaseType() == TYP_ULONG))
{
buildInternalFloatRegisterDefForNode(simdTree);
}
@@ -2110,7 +2111,7 @@ int LinearScan::BuildSIMD(GenTreeSIMD* simdTree)
// We need an internal register different from targetReg.
setInternalRegsDelayFree = true;
buildInternalFloatRegisterDefForNode(simdTree);
- if ((compiler->getSIMDSupportLevel() == SIMD_AVX2_Supported) && (simdTree->gtSIMDBaseType != TYP_DOUBLE))
+ if ((compiler->getSIMDSupportLevel() == SIMD_AVX2_Supported) && (simdTree->GetSimdBaseType() != TYP_DOUBLE))
{
buildInternalFloatRegisterDefForNode(simdTree);
}
@@ -2158,7 +2159,7 @@ int LinearScan::BuildSIMD(GenTreeSIMD* simdTree)
int LinearScan::BuildHWIntrinsic(GenTreeHWIntrinsic* intrinsicTree)
{
NamedIntrinsic intrinsicId = intrinsicTree->gtHWIntrinsicId;
- var_types baseType = intrinsicTree->gtSIMDBaseType;
+ var_types baseType = intrinsicTree->GetSimdBaseType();
HWIntrinsicCategory category = HWIntrinsicInfo::lookupCategory(intrinsicId);
int numArgs = HWIntrinsicInfo::lookupNumArgs(intrinsicTree);
@@ -2167,7 +2168,7 @@ int LinearScan::BuildHWIntrinsic(GenTreeHWIntrinsic* intrinsicTree)
// or non-AVX intrinsics that will use VEX encoding if it is available on the target).
if (intrinsicTree->isSIMD())
{
- SetContainsAVXFlags(intrinsicTree->gtSIMDSize);
+ SetContainsAVXFlags(intrinsicTree->GetSimdSize());
}
GenTree* op1 = intrinsicTree->gtGetOp1();
diff --git a/src/coreclr/jit/morph.cpp b/src/coreclr/jit/morph.cpp
index 23ea4b82eb9..e0b39011eae 100644
--- a/src/coreclr/jit/morph.cpp
+++ b/src/coreclr/jit/morph.cpp
@@ -5439,7 +5439,7 @@ GenTree* Compiler::fgMorphArrayIndex(GenTree* tree)
// so we need to set the correct type on the GT_IND.
// (We don't care about the base type here, so we only check, but don't retain, the return value).
unsigned simdElemSize = 0;
- if (getBaseTypeAndSizeOfSIMDType(elemStructType, &simdElemSize) != TYP_UNKNOWN)
+ if (getBaseJitTypeAndSizeOfSIMDType(elemStructType, &simdElemSize) != CORINFO_TYPE_UNDEF)
{
assert(simdElemSize == elemSize);
elemTyp = getSIMDTypeForSize(elemSize);
@@ -10102,7 +10102,8 @@ GenTree* Compiler::fgMorphOneAsgBlockOp(GenTree* tree)
noway_assert(src->IsIntegralConst(0));
noway_assert(destVarDsc != nullptr);
- src = new (this, GT_SIMD) GenTreeSIMD(asgType, src, SIMDIntrinsicInit, destVarDsc->lvBaseType, size);
+ src = new (this, GT_SIMD)
+ GenTreeSIMD(asgType, src, SIMDIntrinsicInit, destVarDsc->GetSimdBaseJitType(), size);
}
else
#endif
@@ -11934,8 +11935,8 @@ GenTree* Compiler::fgMorphForRegisterFP(GenTree* tree)
// Arguments:
// tree - GentreePtr. This node will be checked to see this is a field which belongs to a simd
// struct used for simd intrinsic or not.
-// pBaseTypeOut - var_types pointer, if the tree node is the tree we want, we set *pBaseTypeOut
-// to simd lclvar's base type.
+// simdBaseJitTypeOut - CorInfoType pointer, if the tree node is the tree we want, we set *simdBaseJitTypeOut
+// to simd lclvar's base JIT type.
// indexOut - unsigned pointer, if the tree is used for simd intrinsic, we will set *indexOut
// equals to the index number of this field.
// simdSizeOut - unsigned pointer, if the tree is used for simd intrinsic, set the *simdSizeOut
@@ -11948,11 +11949,11 @@ GenTree* Compiler::fgMorphForRegisterFP(GenTree* tree)
// instrinic related field, return nullptr.
//
-GenTree* Compiler::getSIMDStructFromField(GenTree* tree,
- var_types* pBaseTypeOut,
- unsigned* indexOut,
- unsigned* simdSizeOut,
- bool ignoreUsedInSIMDIntrinsic /*false*/)
+GenTree* Compiler::getSIMDStructFromField(GenTree* tree,
+ CorInfoType* simdBaseJitTypeOut,
+ unsigned* indexOut,
+ unsigned* simdSizeOut,
+ bool ignoreUsedInSIMDIntrinsic /*false*/)
{
GenTree* ret = nullptr;
if (tree->OperGet() == GT_FIELD)
@@ -11980,33 +11981,33 @@ GenTree* Compiler::getSIMDStructFromField(GenTree* tree,
LclVarDsc* varDsc = &lvaTable[lclNum];
if (varDsc->lvIsUsedInSIMDIntrinsic() || ignoreUsedInSIMDIntrinsic)
{
- *simdSizeOut = varDsc->lvExactSize;
- *pBaseTypeOut = getBaseTypeOfSIMDLocal(obj);
- ret = obj;
+ *simdSizeOut = varDsc->lvExactSize;
+ *simdBaseJitTypeOut = getBaseJitTypeOfSIMDLocal(obj);
+ ret = obj;
}
}
else if (obj->OperGet() == GT_SIMD)
{
ret = obj;
GenTreeSIMD* simdNode = obj->AsSIMD();
- *simdSizeOut = simdNode->gtSIMDSize;
- *pBaseTypeOut = simdNode->gtSIMDBaseType;
+ *simdSizeOut = simdNode->GetSimdSize();
+ *simdBaseJitTypeOut = simdNode->GetSimdBaseJitType();
}
#ifdef FEATURE_HW_INTRINSICS
else if (obj->OperIsHWIntrinsic())
{
ret = obj;
GenTreeHWIntrinsic* simdNode = obj->AsHWIntrinsic();
- *simdSizeOut = simdNode->gtSIMDSize;
- *pBaseTypeOut = simdNode->gtSIMDBaseType;
+ *simdSizeOut = simdNode->GetSimdSize();
+ *simdBaseJitTypeOut = simdNode->GetSimdBaseJitType();
}
#endif // FEATURE_HW_INTRINSICS
}
}
if (ret != nullptr)
{
- unsigned BaseTypeSize = genTypeSize(*pBaseTypeOut);
- *indexOut = tree->AsField()->gtFldOffset / BaseTypeSize;
+ unsigned baseTypeSize = genTypeSize(JITtype2varType(*simdBaseJitTypeOut));
+ *indexOut = tree->AsField()->gtFldOffset / baseTypeSize;
}
return ret;
}
@@ -12025,15 +12026,16 @@ GenTree* Compiler::getSIMDStructFromField(GenTree* tree,
GenTree* Compiler::fgMorphFieldToSIMDIntrinsicGet(GenTree* tree)
{
- unsigned index = 0;
- var_types baseType = TYP_UNKNOWN;
- unsigned simdSize = 0;
- GenTree* simdStructNode = getSIMDStructFromField(tree, &baseType, &index, &simdSize);
+ unsigned index = 0;
+ CorInfoType simdBaseJitType = CORINFO_TYPE_UNDEF;
+ unsigned simdSize = 0;
+ GenTree* simdStructNode = getSIMDStructFromField(tree, &simdBaseJitType, &index, &simdSize);
if (simdStructNode != nullptr)
{
- assert(simdSize >= ((index + 1) * genTypeSize(baseType)));
+ var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType);
+ assert(simdSize >= ((index + 1) * genTypeSize(simdBaseType)));
GenTree* op2 = gtNewIconNode(index);
- tree = gtNewSIMDNode(baseType, simdStructNode, op2, SIMDIntrinsicGetItem, baseType, simdSize);
+ tree = gtNewSIMDNode(simdBaseType, simdStructNode, op2, SIMDIntrinsicGetItem, simdBaseJitType, simdSize);
#ifdef DEBUG
tree->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED;
#endif
@@ -12059,14 +12061,16 @@ GenTree* Compiler::fgMorphFieldAssignToSIMDIntrinsicSet(GenTree* tree)
GenTree* op1 = tree->gtGetOp1();
GenTree* op2 = tree->gtGetOp2();
- unsigned index = 0;
- var_types baseType = TYP_UNKNOWN;
- unsigned simdSize = 0;
- GenTree* simdOp1Struct = getSIMDStructFromField(op1, &baseType, &index, &simdSize);
+ unsigned index = 0;
+ CorInfoType simdBaseJitType = CORINFO_TYPE_UNDEF;
+ unsigned simdSize = 0;
+ GenTree* simdOp1Struct = getSIMDStructFromField(op1, &simdBaseJitType, &index, &simdSize);
if (simdOp1Struct != nullptr)
{
+ var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType);
+
// Generate the simd set intrinsic
- assert(simdSize >= ((index + 1) * genTypeSize(baseType)));
+ assert(simdSize >= ((index + 1) * genTypeSize(simdBaseType)));
SIMDIntrinsicID simdIntrinsicID = SIMDIntrinsicInvalid;
switch (index)
@@ -12090,7 +12094,7 @@ GenTree* Compiler::fgMorphFieldAssignToSIMDIntrinsicSet(GenTree* tree)
GenTree* target = gtClone(simdOp1Struct);
assert(target != nullptr);
var_types simdType = target->gtType;
- GenTree* simdTree = gtNewSIMDNode(simdType, simdOp1Struct, op2, simdIntrinsicID, baseType, simdSize);
+ GenTree* simdTree = gtNewSIMDNode(simdType, simdOp1Struct, op2, simdIntrinsicID, simdBaseJitType, simdSize);
tree->AsOp()->gtOp1 = target;
tree->AsOp()->gtOp2 = simdTree;
@@ -19099,22 +19103,23 @@ bool Compiler::fgMorphCombineSIMDFieldAssignments(BasicBlock* block, Statement*
GenTree* tree = stmt->GetRootNode();
assert(tree->OperGet() == GT_ASG);
- GenTree* originalLHS = tree->AsOp()->gtOp1;
- GenTree* prevLHS = tree->AsOp()->gtOp1;
- GenTree* prevRHS = tree->AsOp()->gtOp2;
- unsigned index = 0;
- var_types baseType = TYP_UNKNOWN;
- unsigned simdSize = 0;
- GenTree* simdStructNode = getSIMDStructFromField(prevRHS, &baseType, &index, &simdSize, true);
+ GenTree* originalLHS = tree->AsOp()->gtOp1;
+ GenTree* prevLHS = tree->AsOp()->gtOp1;
+ GenTree* prevRHS = tree->AsOp()->gtOp2;
+ unsigned index = 0;
+ CorInfoType simdBaseJitType = CORINFO_TYPE_UNDEF;
+ unsigned simdSize = 0;
+ GenTree* simdStructNode = getSIMDStructFromField(prevRHS, &simdBaseJitType, &index, &simdSize, true);
- if (simdStructNode == nullptr || index != 0 || baseType != TYP_FLOAT)
+ if (simdStructNode == nullptr || index != 0 || simdBaseJitType != CORINFO_TYPE_FLOAT)
{
// if the RHS is not from a SIMD vector field X, then there is no need to check further.
return false;
}
+ var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType);
var_types simdType = getSIMDTypeForSize(simdSize);
- int assignmentsCount = simdSize / genTypeSize(baseType) - 1;
+ int assignmentsCount = simdSize / genTypeSize(simdBaseType) - 1;
int remainingAssignments = assignmentsCount;
Statement* curStmt = stmt->GetNextStmt();
Statement* lastStmt = stmt;
diff --git a/src/coreclr/jit/rationalize.cpp b/src/coreclr/jit/rationalize.cpp
index ebdac7725ab..e15bc54d3af 100644
--- a/src/coreclr/jit/rationalize.cpp
+++ b/src/coreclr/jit/rationalize.cpp
@@ -400,13 +400,13 @@ void Rationalizer::RewriteAssignment(LIR::Use& use)
{
if (location->OperGet() == GT_LCL_VAR)
{
- var_types simdType = location->TypeGet();
- GenTree* initVal = assignment->AsOp()->gtOp2;
- var_types baseType = comp->getBaseTypeOfSIMDLocal(location);
- if (baseType != TYP_UNKNOWN)
+ var_types simdType = location->TypeGet();
+ GenTree* initVal = assignment->AsOp()->gtOp2;
+ CorInfoType simdBaseJitType = comp->getBaseJitTypeOfSIMDLocal(location);
+ if (simdBaseJitType != CORINFO_TYPE_UNDEF)
{
GenTreeSIMD* simdTree = new (comp, GT_SIMD)
- GenTreeSIMD(simdType, initVal, SIMDIntrinsicInit, baseType, genTypeSize(simdType));
+ GenTreeSIMD(simdType, initVal, SIMDIntrinsicInit, simdBaseJitType, genTypeSize(simdType));
assignment->AsOp()->gtOp2 = simdTree;
value = simdTree;
initVal->gtNext = simdTree;
@@ -734,17 +734,17 @@ Compiler::fgWalkResult Rationalizer::RewriteNode(GenTree** useEdge, Compiler::Ge
{
noway_assert(comp->supportSIMDTypes());
GenTreeSIMD* simdNode = node->AsSIMD();
- unsigned simdSize = simdNode->gtSIMDSize;
+ unsigned simdSize = simdNode->GetSimdSize();
var_types simdType = comp->getSIMDTypeForSize(simdSize);
// TODO-1stClassStructs: This should be handled more generally for enregistered or promoted
// structs that are passed or returned in a different register type than their enregistered
// type(s).
- if (simdNode->gtType == TYP_I_IMPL && simdNode->gtSIMDSize == TARGET_POINTER_SIZE)
+ if (simdNode->gtType == TYP_I_IMPL && simdNode->GetSimdSize() == TARGET_POINTER_SIZE)
{
// This happens when it is consumed by a GT_RET_EXPR.
// It can only be a Vector2f or Vector2i.
- assert(genTypeSize(simdNode->gtSIMDBaseType) == 4);
+ assert(genTypeSize(simdNode->GetSimdBaseType()) == 4);
simdNode->gtType = TYP_SIMD8;
}
// Certain SIMD trees require rationalizing.
@@ -752,7 +752,7 @@ Compiler::fgWalkResult Rationalizer::RewriteNode(GenTree** useEdge, Compiler::Ge
{
// Rewrite this as an explicit load.
JITDUMP("Rewriting GT_SIMD array init as an explicit load:\n");
- unsigned int baseTypeSize = genTypeSize(simdNode->gtSIMDBaseType);
+ unsigned int baseTypeSize = genTypeSize(simdNode->GetSimdBaseType());
GenTree* address = new (comp, GT_LEA) GenTreeAddrMode(TYP_BYREF, simdNode->gtOp1, simdNode->gtOp2,
baseTypeSize, OFFSETOF__CORINFO_Array__data);
GenTree* ind = comp->gtNewOperNode(GT_IND, simdType, address);
@@ -801,7 +801,7 @@ Compiler::fgWalkResult Rationalizer::RewriteNode(GenTree** useEdge, Compiler::Ge
// TODO-1stClassStructs: This should be handled more generally for enregistered or promoted
// structs that are passed or returned in a different register type than their enregistered
// type(s).
- if ((hwIntrinsicNode->gtType == TYP_I_IMPL) && (hwIntrinsicNode->gtSIMDSize == TARGET_POINTER_SIZE))
+ if ((hwIntrinsicNode->gtType == TYP_I_IMPL) && (hwIntrinsicNode->GetSimdSize() == TARGET_POINTER_SIZE))
{
#ifdef TARGET_ARM64
// Special case for GetElement/ToScalar because they take Vector64<T> and return T
@@ -812,7 +812,7 @@ Compiler::fgWalkResult Rationalizer::RewriteNode(GenTree** useEdge, Compiler::Ge
{
// This happens when it is consumed by a GT_RET_EXPR.
// It can only be a Vector2f or Vector2i.
- assert(genTypeSize(hwIntrinsicNode->gtSIMDBaseType) == 4);
+ assert(genTypeSize(hwIntrinsicNode->GetSimdBaseType()) == 4);
hwIntrinsicNode->gtType = TYP_SIMD8;
}
}
diff --git a/src/coreclr/jit/simd.cpp b/src/coreclr/jit/simd.cpp
index 0664089a834..b5b3532f632 100644
--- a/src/coreclr/jit/simd.cpp
+++ b/src/coreclr/jit/simd.cpp
@@ -61,8 +61,9 @@ int Compiler::getSIMDVectorLength(unsigned simdSize, var_types baseType)
//
int Compiler::getSIMDVectorLength(CORINFO_CLASS_HANDLE typeHnd)
{
- unsigned sizeBytes = 0;
- var_types baseType = getBaseTypeAndSizeOfSIMDType(typeHnd, &sizeBytes);
+ unsigned sizeBytes = 0;
+ CorInfoType baseJitType = getBaseJitTypeAndSizeOfSIMDType(typeHnd, &sizeBytes);
+ var_types baseType = JitType2PreciseVarType(baseJitType);
return getSIMDVectorLength(sizeBytes, baseType);
}
@@ -124,7 +125,7 @@ int Compiler::getSIMDTypeAlignment(var_types simdType)
// TODO-Throughput: current implementation parses class name to find base type. Change
// this when we implement SIMD intrinsic identification for the final
// product.
-var_types Compiler::getBaseTypeAndSizeOfSIMDType(CORINFO_CLASS_HANDLE typeHnd, unsigned* sizeBytes /*= nullptr */)
+CorInfoType Compiler::getBaseJitTypeAndSizeOfSIMDType(CORINFO_CLASS_HANDLE typeHnd, unsigned* sizeBytes /*= nullptr */)
{
assert(supportSIMDTypes());
@@ -149,12 +150,12 @@ var_types Compiler::getBaseTypeAndSizeOfSIMDType(CORINFO_CLASS_HANDLE typeHnd, u
if (typeHnd == nullptr)
{
- return TYP_UNKNOWN;
+ return CORINFO_TYPE_UNDEF;
}
// fast path search using cached type handles of important types
- var_types simdBaseType = TYP_UNKNOWN;
- unsigned size = 0;
+ CorInfoType simdBaseJitType = CORINFO_TYPE_UNDEF;
+ unsigned size = 0;
// TODO - Optimize SIMD type recognition by IntrinsicAttribute
if (isSIMDClass(typeHnd))
@@ -163,34 +164,34 @@ var_types Compiler::getBaseTypeAndSizeOfSIMDType(CORINFO_CLASS_HANDLE typeHnd, u
// less likely to be used type handles
if (typeHnd == m_simdHandleCache->SIMDFloatHandle)
{
- simdBaseType = TYP_FLOAT;
- size = getSIMDVectorRegisterByteLength();
+ simdBaseJitType = CORINFO_TYPE_FLOAT;
+ size = getSIMDVectorRegisterByteLength();
JITDUMP(" Known type SIMD Vector<Float>\n");
}
else if (typeHnd == m_simdHandleCache->SIMDIntHandle)
{
- simdBaseType = TYP_INT;
- size = getSIMDVectorRegisterByteLength();
+ simdBaseJitType = CORINFO_TYPE_INT;
+ size = getSIMDVectorRegisterByteLength();
JITDUMP(" Known type SIMD Vector<Int>\n");
}
else if (typeHnd == m_simdHandleCache->SIMDVector2Handle)
{
- simdBaseType = TYP_FLOAT;
- size = 2 * genTypeSize(TYP_FLOAT);
+ simdBaseJitType = CORINFO_TYPE_FLOAT;
+ size = 2 * genTypeSize(TYP_FLOAT);
assert(size == roundUp(info.compCompHnd->getClassSize(typeHnd), TARGET_POINTER_SIZE));
JITDUMP(" Known type Vector2\n");
}
else if (typeHnd == m_simdHandleCache->SIMDVector3Handle)
{
- simdBaseType = TYP_FLOAT;
- size = 3 * genTypeSize(TYP_FLOAT);
+ simdBaseJitType = CORINFO_TYPE_FLOAT;
+ size = 3 * genTypeSize(TYP_FLOAT);
assert(size == info.compCompHnd->getClassSize(typeHnd));
JITDUMP(" Known type Vector3\n");
}
else if (typeHnd == m_simdHandleCache->SIMDVector4Handle)
{
- simdBaseType = TYP_FLOAT;
- size = 4 * genTypeSize(TYP_FLOAT);
+ simdBaseJitType = CORINFO_TYPE_FLOAT;
+ size = 4 * genTypeSize(TYP_FLOAT);
assert(size == roundUp(info.compCompHnd->getClassSize(typeHnd), TARGET_POINTER_SIZE));
JITDUMP(" Known type Vector4\n");
}
@@ -201,55 +202,67 @@ var_types Compiler::getBaseTypeAndSizeOfSIMDType(CORINFO_CLASS_HANDLE typeHnd, u
}
else if (typeHnd == m_simdHandleCache->SIMDUShortHandle)
{
- simdBaseType = TYP_USHORT;
- size = getSIMDVectorRegisterByteLength();
+ simdBaseJitType = CORINFO_TYPE_USHORT;
+ size = getSIMDVectorRegisterByteLength();
JITDUMP(" Known type SIMD Vector<ushort>\n");
}
else if (typeHnd == m_simdHandleCache->SIMDUByteHandle)
{
- simdBaseType = TYP_UBYTE;
- size = getSIMDVectorRegisterByteLength();
+ simdBaseJitType = CORINFO_TYPE_UBYTE;
+ size = getSIMDVectorRegisterByteLength();
JITDUMP(" Known type SIMD Vector<ubyte>\n");
}
else if (typeHnd == m_simdHandleCache->SIMDDoubleHandle)
{
- simdBaseType = TYP_DOUBLE;
- size = getSIMDVectorRegisterByteLength();
+ simdBaseJitType = CORINFO_TYPE_DOUBLE;
+ size = getSIMDVectorRegisterByteLength();
JITDUMP(" Known type SIMD Vector<Double>\n");
}
else if (typeHnd == m_simdHandleCache->SIMDLongHandle)
{
- simdBaseType = TYP_LONG;
- size = getSIMDVectorRegisterByteLength();
+ simdBaseJitType = CORINFO_TYPE_LONG;
+ size = getSIMDVectorRegisterByteLength();
JITDUMP(" Known type SIMD Vector<Long>\n");
}
else if (typeHnd == m_simdHandleCache->SIMDShortHandle)
{
- simdBaseType = TYP_SHORT;
- size = getSIMDVectorRegisterByteLength();
+ simdBaseJitType = CORINFO_TYPE_SHORT;
+ size = getSIMDVectorRegisterByteLength();
JITDUMP(" Known type SIMD Vector<short>\n");
}
else if (typeHnd == m_simdHandleCache->SIMDByteHandle)
{
- simdBaseType = TYP_BYTE;
- size = getSIMDVectorRegisterByteLength();
+ simdBaseJitType = CORINFO_TYPE_BYTE;
+ size = getSIMDVectorRegisterByteLength();
JITDUMP(" Known type SIMD Vector<byte>\n");
}
else if (typeHnd == m_simdHandleCache->SIMDUIntHandle)
{
- simdBaseType = TYP_UINT;
- size = getSIMDVectorRegisterByteLength();
+ simdBaseJitType = CORINFO_TYPE_UINT;
+ size = getSIMDVectorRegisterByteLength();
JITDUMP(" Known type SIMD Vector<uint>\n");
}
else if (typeHnd == m_simdHandleCache->SIMDULongHandle)
{
- simdBaseType = TYP_ULONG;
- size = getSIMDVectorRegisterByteLength();
+ simdBaseJitType = CORINFO_TYPE_ULONG;
+ size = getSIMDVectorRegisterByteLength();
JITDUMP(" Known type SIMD Vector<ulong>\n");
}
+ else if (typeHnd == m_simdHandleCache->SIMDNIntHandle)
+ {
+ simdBaseJitType = CORINFO_TYPE_NATIVEINT;
+ size = getSIMDVectorRegisterByteLength();
+ JITDUMP(" Known type SIMD Vector<nint>\n");
+ }
+ else if (typeHnd == m_simdHandleCache->SIMDNUIntHandle)
+ {
+ simdBaseJitType = CORINFO_TYPE_NATIVEUINT;
+ size = getSIMDVectorRegisterByteLength();
+ JITDUMP(" Known type SIMD Vector<nuint>\n");
+ }
// slow path search
- if (simdBaseType == TYP_UNKNOWN)
+ if (simdBaseJitType == CORINFO_TYPE_UNDEF)
{
// Doesn't match with any of the cached type handles.
// Obtain base type by parsing fully qualified class name.
@@ -271,63 +284,75 @@ var_types Compiler::getBaseTypeAndSizeOfSIMDType(CORINFO_CLASS_HANDLE typeHnd, u
if (wcsncmp(&(className[25]), W("System.Single"), 13) == 0)
{
m_simdHandleCache->SIMDFloatHandle = typeHnd;
- simdBaseType = TYP_FLOAT;
+ simdBaseJitType = CORINFO_TYPE_FLOAT;
JITDUMP(" Found type SIMD Vector<Float>\n");
}
else if (wcsncmp(&(className[25]), W("System.Int32"), 12) == 0)
{
m_simdHandleCache->SIMDIntHandle = typeHnd;
- simdBaseType = TYP_INT;
+ simdBaseJitType = CORINFO_TYPE_INT;
JITDUMP(" Found type SIMD Vector<Int>\n");
}
else if (wcsncmp(&(className[25]), W("System.UInt16"), 13) == 0)
{
m_simdHandleCache->SIMDUShortHandle = typeHnd;
- simdBaseType = TYP_USHORT;
+ simdBaseJitType = CORINFO_TYPE_USHORT;
JITDUMP(" Found type SIMD Vector<ushort>\n");
}
else if (wcsncmp(&(className[25]), W("System.Byte"), 11) == 0)
{
m_simdHandleCache->SIMDUByteHandle = typeHnd;
- simdBaseType = TYP_UBYTE;
+ simdBaseJitType = CORINFO_TYPE_UBYTE;
JITDUMP(" Found type SIMD Vector<ubyte>\n");
}
else if (wcsncmp(&(className[25]), W("System.Double"), 13) == 0)
{
m_simdHandleCache->SIMDDoubleHandle = typeHnd;
- simdBaseType = TYP_DOUBLE;
+ simdBaseJitType = CORINFO_TYPE_DOUBLE;
JITDUMP(" Found type SIMD Vector<Double>\n");
}
else if (wcsncmp(&(className[25]), W("System.Int64"), 12) == 0)
{
m_simdHandleCache->SIMDLongHandle = typeHnd;
- simdBaseType = TYP_LONG;
+ simdBaseJitType = CORINFO_TYPE_LONG;
JITDUMP(" Found type SIMD Vector<Long>\n");
}
else if (wcsncmp(&(className[25]), W("System.Int16"), 12) == 0)
{
m_simdHandleCache->SIMDShortHandle = typeHnd;
- simdBaseType = TYP_SHORT;
+ simdBaseJitType = CORINFO_TYPE_SHORT;
JITDUMP(" Found type SIMD Vector<short>\n");
}
else if (wcsncmp(&(className[25]), W("System.SByte"), 12) == 0)
{
m_simdHandleCache->SIMDByteHandle = typeHnd;
- simdBaseType = TYP_BYTE;
+ simdBaseJitType = CORINFO_TYPE_BYTE;
JITDUMP(" Found type SIMD Vector<byte>\n");
}
else if (wcsncmp(&(className[25]), W("System.UInt32"), 13) == 0)
{
m_simdHandleCache->SIMDUIntHandle = typeHnd;
- simdBaseType = TYP_UINT;
+ simdBaseJitType = CORINFO_TYPE_UINT;
JITDUMP(" Found type SIMD Vector<uint>\n");
}
else if (wcsncmp(&(className[25]), W("System.UInt64"), 13) == 0)
{
m_simdHandleCache->SIMDULongHandle = typeHnd;
- simdBaseType = TYP_ULONG;
+ simdBaseJitType = CORINFO_TYPE_ULONG;
JITDUMP(" Found type SIMD Vector<ulong>\n");
}
+ else if (wcsncmp(&(className[25]), W("System.IntPtr"), 13) == 0)
+ {
+ m_simdHandleCache->SIMDNIntHandle = typeHnd;
+ simdBaseJitType = CORINFO_TYPE_NATIVEINT;
+ JITDUMP(" Found type SIMD Vector<nint>\n");
+ }
+ else if (wcsncmp(&(className[25]), W("System.UIntPtr"), 14) == 0)
+ {
+ m_simdHandleCache->SIMDNUIntHandle = typeHnd;
+ simdBaseJitType = CORINFO_TYPE_NATIVEUINT;
+ JITDUMP(" Found type SIMD Vector<nuint>\n");
+ }
else
{
JITDUMP(" Unknown SIMD Vector<T>\n");
@@ -337,8 +362,8 @@ var_types Compiler::getBaseTypeAndSizeOfSIMDType(CORINFO_CLASS_HANDLE typeHnd, u
{
m_simdHandleCache->SIMDVector2Handle = typeHnd;
- simdBaseType = TYP_FLOAT;
- size = 2 * genTypeSize(TYP_FLOAT);
+ simdBaseJitType = CORINFO_TYPE_FLOAT;
+ size = 2 * genTypeSize(TYP_FLOAT);
assert(size == roundUp(info.compCompHnd->getClassSize(typeHnd), TARGET_POINTER_SIZE));
JITDUMP(" Found Vector2\n");
}
@@ -346,8 +371,8 @@ var_types Compiler::getBaseTypeAndSizeOfSIMDType(CORINFO_CLASS_HANDLE typeHnd, u
{
m_simdHandleCache->SIMDVector3Handle = typeHnd;
- simdBaseType = TYP_FLOAT;
- size = 3 * genTypeSize(TYP_FLOAT);
+ simdBaseJitType = CORINFO_TYPE_FLOAT;
+ size = 3 * genTypeSize(TYP_FLOAT);
assert(size == info.compCompHnd->getClassSize(typeHnd));
JITDUMP(" Found Vector3\n");
}
@@ -355,8 +380,8 @@ var_types Compiler::getBaseTypeAndSizeOfSIMDType(CORINFO_CLASS_HANDLE typeHnd, u
{
m_simdHandleCache->SIMDVector4Handle = typeHnd;
- simdBaseType = TYP_FLOAT;
- size = 4 * genTypeSize(TYP_FLOAT);
+ simdBaseJitType = CORINFO_TYPE_FLOAT;
+ size = 4 * genTypeSize(TYP_FLOAT);
assert(size == roundUp(info.compCompHnd->getClassSize(typeHnd), TARGET_POINTER_SIZE));
JITDUMP(" Found Vector4\n");
}
@@ -386,192 +411,192 @@ var_types Compiler::getBaseTypeAndSizeOfSIMDType(CORINFO_CLASS_HANDLE typeHnd, u
if (typeHnd == m_simdHandleCache->Vector256FloatHandle)
{
- simdBaseType = TYP_FLOAT;
- size = Vector256SizeBytes;
+ simdBaseJitType = CORINFO_TYPE_FLOAT;
+ size = Vector256SizeBytes;
JITDUMP(" Known type Vector256<float>\n");
}
else if (typeHnd == m_simdHandleCache->Vector256DoubleHandle)
{
- simdBaseType = TYP_DOUBLE;
- size = Vector256SizeBytes;
+ simdBaseJitType = CORINFO_TYPE_DOUBLE;
+ size = Vector256SizeBytes;
JITDUMP(" Known type Vector256<double>\n");
}
else if (typeHnd == m_simdHandleCache->Vector256IntHandle)
{
- simdBaseType = TYP_INT;
- size = Vector256SizeBytes;
+ simdBaseJitType = CORINFO_TYPE_INT;
+ size = Vector256SizeBytes;
JITDUMP(" Known type Vector256<int>\n");
}
else if (typeHnd == m_simdHandleCache->Vector256UIntHandle)
{
- simdBaseType = TYP_UINT;
- size = Vector256SizeBytes;
+ simdBaseJitType = CORINFO_TYPE_UINT;
+ size = Vector256SizeBytes;
JITDUMP(" Known type Vector256<uint>\n");
}
else if (typeHnd == m_simdHandleCache->Vector256ShortHandle)
{
- simdBaseType = TYP_SHORT;
- size = Vector256SizeBytes;
+ simdBaseJitType = CORINFO_TYPE_SHORT;
+ size = Vector256SizeBytes;
JITDUMP(" Known type Vector256<short>\n");
}
else if (typeHnd == m_simdHandleCache->Vector256UShortHandle)
{
- simdBaseType = TYP_USHORT;
- size = Vector256SizeBytes;
+ simdBaseJitType = CORINFO_TYPE_USHORT;
+ size = Vector256SizeBytes;
JITDUMP(" Known type Vector256<ushort>\n");
}
else if (typeHnd == m_simdHandleCache->Vector256ByteHandle)
{
- simdBaseType = TYP_BYTE;
- size = Vector256SizeBytes;
+ simdBaseJitType = CORINFO_TYPE_BYTE;
+ size = Vector256SizeBytes;
JITDUMP(" Known type Vector256<sbyte>\n");
}
else if (typeHnd == m_simdHandleCache->Vector256UByteHandle)
{
- simdBaseType = TYP_UBYTE;
- size = Vector256SizeBytes;
+ simdBaseJitType = CORINFO_TYPE_UBYTE;
+ size = Vector256SizeBytes;
JITDUMP(" Known type Vector256<byte>\n");
}
else if (typeHnd == m_simdHandleCache->Vector256LongHandle)
{
- simdBaseType = TYP_LONG;
- size = Vector256SizeBytes;
+ simdBaseJitType = CORINFO_TYPE_LONG;
+ size = Vector256SizeBytes;
JITDUMP(" Known type Vector256<long>\n");
}
else if (typeHnd == m_simdHandleCache->Vector256ULongHandle)
{
- simdBaseType = TYP_ULONG;
- size = Vector256SizeBytes;
+ simdBaseJitType = CORINFO_TYPE_ULONG;
+ size = Vector256SizeBytes;
JITDUMP(" Known type Vector256<ulong>\n");
}
else
#endif // defined(TARGET_XARCH)
if (typeHnd == m_simdHandleCache->Vector128FloatHandle)
{
- simdBaseType = TYP_FLOAT;
- size = Vector128SizeBytes;
+ simdBaseJitType = CORINFO_TYPE_FLOAT;
+ size = Vector128SizeBytes;
JITDUMP(" Known type Vector128<float>\n");
}
else if (typeHnd == m_simdHandleCache->Vector128DoubleHandle)
{
- simdBaseType = TYP_DOUBLE;
- size = Vector128SizeBytes;
+ simdBaseJitType = CORINFO_TYPE_DOUBLE;
+ size = Vector128SizeBytes;
JITDUMP(" Known type Vector128<double>\n");
}
else if (typeHnd == m_simdHandleCache->Vector128IntHandle)
{
- simdBaseType = TYP_INT;
- size = Vector128SizeBytes;
+ simdBaseJitType = CORINFO_TYPE_INT;
+ size = Vector128SizeBytes;
JITDUMP(" Known type Vector128<int>\n");
}
else if (typeHnd == m_simdHandleCache->Vector128UIntHandle)
{
- simdBaseType = TYP_UINT;
- size = Vector128SizeBytes;
+ simdBaseJitType = CORINFO_TYPE_UINT;
+ size = Vector128SizeBytes;
JITDUMP(" Known type Vector128<uint>\n");
}
else if (typeHnd == m_simdHandleCache->Vector128ShortHandle)
{
- simdBaseType = TYP_SHORT;
- size = Vector128SizeBytes;
+ simdBaseJitType = CORINFO_TYPE_SHORT;
+ size = Vector128SizeBytes;
JITDUMP(" Known type Vector128<short>\n");
}
else if (typeHnd == m_simdHandleCache->Vector128UShortHandle)
{
- simdBaseType = TYP_USHORT;
- size = Vector128SizeBytes;
+ simdBaseJitType = CORINFO_TYPE_USHORT;
+ size = Vector128SizeBytes;
JITDUMP(" Known type Vector128<ushort>\n");
}
else if (typeHnd == m_simdHandleCache->Vector128ByteHandle)
{
- simdBaseType = TYP_BYTE;
- size = Vector128SizeBytes;
+ simdBaseJitType = CORINFO_TYPE_BYTE;
+ size = Vector128SizeBytes;
JITDUMP(" Known type Vector128<sbyte>\n");
}
else if (typeHnd == m_simdHandleCache->Vector128UByteHandle)
{
- simdBaseType = TYP_UBYTE;
- size = Vector128SizeBytes;
+ simdBaseJitType = CORINFO_TYPE_UBYTE;
+ size = Vector128SizeBytes;
JITDUMP(" Known type Vector128<byte>\n");
}
else if (typeHnd == m_simdHandleCache->Vector128LongHandle)
{
- simdBaseType = TYP_LONG;
- size = Vector128SizeBytes;
+ simdBaseJitType = CORINFO_TYPE_LONG;
+ size = Vector128SizeBytes;
JITDUMP(" Known type Vector128<long>\n");
}
else if (typeHnd == m_simdHandleCache->Vector128ULongHandle)
{
- simdBaseType = TYP_ULONG;
- size = Vector128SizeBytes;
+ simdBaseJitType = CORINFO_TYPE_ULONG;
+ size = Vector128SizeBytes;
JITDUMP(" Known type Vector128<ulong>\n");
}
else
#if defined(TARGET_ARM64)
if (typeHnd == m_simdHandleCache->Vector64FloatHandle)
{
- simdBaseType = TYP_FLOAT;
- size = Vector64SizeBytes;
+ simdBaseJitType = CORINFO_TYPE_FLOAT;
+ size = Vector64SizeBytes;
JITDUMP(" Known type Vector64<float>\n");
}
else if (typeHnd == m_simdHandleCache->Vector64DoubleHandle)
{
- simdBaseType = TYP_DOUBLE;
- size = Vector64SizeBytes;
+ simdBaseJitType = CORINFO_TYPE_DOUBLE;
+ size = Vector64SizeBytes;
JITDUMP(" Known type Vector64<double>\n");
}
else if (typeHnd == m_simdHandleCache->Vector64IntHandle)
{
- simdBaseType = TYP_INT;
- size = Vector64SizeBytes;
+ simdBaseJitType = CORINFO_TYPE_INT;
+ size = Vector64SizeBytes;
JITDUMP(" Known type Vector64<int>\n");
}
else if (typeHnd == m_simdHandleCache->Vector64UIntHandle)
{
- simdBaseType = TYP_UINT;
- size = Vector64SizeBytes;
+ simdBaseJitType = CORINFO_TYPE_UINT;
+ size = Vector64SizeBytes;
JITDUMP(" Known type Vector64<uint>\n");
}
else if (typeHnd == m_simdHandleCache->Vector64ShortHandle)
{
- simdBaseType = TYP_SHORT;
- size = Vector64SizeBytes;
+ simdBaseJitType = CORINFO_TYPE_SHORT;
+ size = Vector64SizeBytes;
JITDUMP(" Known type Vector64<short>\n");
}
else if (typeHnd == m_simdHandleCache->Vector64UShortHandle)
{
- simdBaseType = TYP_USHORT;
- size = Vector64SizeBytes;
+ simdBaseJitType = CORINFO_TYPE_USHORT;
+ size = Vector64SizeBytes;
JITDUMP(" Known type Vector64<ushort>\n");
}
else if (typeHnd == m_simdHandleCache->Vector64ByteHandle)
{
- simdBaseType = TYP_BYTE;
- size = Vector64SizeBytes;
+ simdBaseJitType = CORINFO_TYPE_BYTE;
+ size = Vector64SizeBytes;
JITDUMP(" Known type Vector64<sbyte>\n");
}
else if (typeHnd == m_simdHandleCache->Vector64UByteHandle)
{
- simdBaseType = TYP_UBYTE;
- size = Vector64SizeBytes;
+ simdBaseJitType = CORINFO_TYPE_UBYTE;
+ size = Vector64SizeBytes;
JITDUMP(" Known type Vector64<byte>\n");
}
else if (typeHnd == m_simdHandleCache->Vector64LongHandle)
{
- simdBaseType = TYP_LONG;
- size = Vector64SizeBytes;
+ simdBaseJitType = CORINFO_TYPE_LONG;
+ size = Vector64SizeBytes;
JITDUMP(" Known type Vector64<long>\n");
}
else if (typeHnd == m_simdHandleCache->Vector64ULongHandle)
{
- simdBaseType = TYP_ULONG;
- size = Vector64SizeBytes;
+ simdBaseJitType = CORINFO_TYPE_ULONG;
+ size = Vector64SizeBytes;
JITDUMP(" Known type Vector64<ulong>\n");
}
#endif // defined(TARGET_ARM64)
// slow path search
- if (simdBaseType == TYP_UNKNOWN)
+ if (simdBaseJitType == CORINFO_TYPE_UNDEF)
{
// Doesn't match with any of the cached type handles.
const char* className = getClassNameFromMetadata(typeHnd, nullptr);
@@ -592,52 +617,52 @@ var_types Compiler::getBaseTypeAndSizeOfSIMDType(CORINFO_CLASS_HANDLE typeHnd, u
{
case CORINFO_TYPE_FLOAT:
m_simdHandleCache->Vector256FloatHandle = typeHnd;
- simdBaseType = TYP_FLOAT;
+ simdBaseJitType = CORINFO_TYPE_FLOAT;
JITDUMP(" Found type Hardware Intrinsic SIMD Vector256<float>\n");
break;
case CORINFO_TYPE_DOUBLE:
m_simdHandleCache->Vector256DoubleHandle = typeHnd;
- simdBaseType = TYP_DOUBLE;
+ simdBaseJitType = CORINFO_TYPE_DOUBLE;
JITDUMP(" Found type Hardware Intrinsic SIMD Vector256<double>\n");
break;
case CORINFO_TYPE_INT:
m_simdHandleCache->Vector256IntHandle = typeHnd;
- simdBaseType = TYP_INT;
+ simdBaseJitType = CORINFO_TYPE_INT;
JITDUMP(" Found type Hardware Intrinsic SIMD Vector256<int>\n");
break;
case CORINFO_TYPE_UINT:
m_simdHandleCache->Vector256UIntHandle = typeHnd;
- simdBaseType = TYP_UINT;
+ simdBaseJitType = CORINFO_TYPE_UINT;
JITDUMP(" Found type Hardware Intrinsic SIMD Vector256<uint>\n");
break;
case CORINFO_TYPE_SHORT:
m_simdHandleCache->Vector256ShortHandle = typeHnd;
- simdBaseType = TYP_SHORT;
+ simdBaseJitType = CORINFO_TYPE_SHORT;
JITDUMP(" Found type Hardware Intrinsic SIMD Vector256<short>\n");
break;
case CORINFO_TYPE_USHORT:
m_simdHandleCache->Vector256UShortHandle = typeHnd;
- simdBaseType = TYP_USHORT;
+ simdBaseJitType = CORINFO_TYPE_USHORT;
JITDUMP(" Found type Hardware Intrinsic SIMD Vector256<ushort>\n");
break;
case CORINFO_TYPE_LONG:
m_simdHandleCache->Vector256LongHandle = typeHnd;
- simdBaseType = TYP_LONG;
+ simdBaseJitType = CORINFO_TYPE_LONG;
JITDUMP(" Found type Hardware Intrinsic SIMD Vector256<long>\n");
break;
case CORINFO_TYPE_ULONG:
m_simdHandleCache->Vector256ULongHandle = typeHnd;
- simdBaseType = TYP_ULONG;
+ simdBaseJitType = CORINFO_TYPE_ULONG;
JITDUMP(" Found type Hardware Intrinsic SIMD Vector256<ulong>\n");
break;
case CORINFO_TYPE_UBYTE:
m_simdHandleCache->Vector256UByteHandle = typeHnd;
- simdBaseType = TYP_UBYTE;
+ simdBaseJitType = CORINFO_TYPE_UBYTE;
JITDUMP(" Found type Hardware Intrinsic SIMD Vector256<byte>\n");
break;
case CORINFO_TYPE_BYTE:
m_simdHandleCache->Vector256ByteHandle = typeHnd;
- simdBaseType = TYP_BYTE;
+ simdBaseJitType = CORINFO_TYPE_BYTE;
JITDUMP(" Found type Hardware Intrinsic SIMD Vector256<sbyte>\n");
break;
@@ -654,52 +679,52 @@ var_types Compiler::getBaseTypeAndSizeOfSIMDType(CORINFO_CLASS_HANDLE typeHnd, u
{
case CORINFO_TYPE_FLOAT:
m_simdHandleCache->Vector128FloatHandle = typeHnd;
- simdBaseType = TYP_FLOAT;
+ simdBaseJitType = CORINFO_TYPE_FLOAT;
JITDUMP(" Found type Hardware Intrinsic SIMD Vector128<float>\n");
break;
case CORINFO_TYPE_DOUBLE:
m_simdHandleCache->Vector128DoubleHandle = typeHnd;
- simdBaseType = TYP_DOUBLE;
+ simdBaseJitType = CORINFO_TYPE_DOUBLE;
JITDUMP(" Found type Hardware Intrinsic SIMD Vector128<double>\n");
break;
case CORINFO_TYPE_INT:
m_simdHandleCache->Vector128IntHandle = typeHnd;
- simdBaseType = TYP_INT;
+ simdBaseJitType = CORINFO_TYPE_INT;
JITDUMP(" Found type Hardware Intrinsic SIMD Vector128<int>\n");
break;
case CORINFO_TYPE_UINT:
m_simdHandleCache->Vector128UIntHandle = typeHnd;
- simdBaseType = TYP_UINT;
+ simdBaseJitType = CORINFO_TYPE_UINT;
JITDUMP(" Found type Hardware Intrinsic SIMD Vector128<uint>\n");
break;
case CORINFO_TYPE_SHORT:
m_simdHandleCache->Vector128ShortHandle = typeHnd;
- simdBaseType = TYP_SHORT;
+ simdBaseJitType = CORINFO_TYPE_SHORT;
JITDUMP(" Found type Hardware Intrinsic SIMD Vector128<short>\n");
break;
case CORINFO_TYPE_USHORT:
m_simdHandleCache->Vector128UShortHandle = typeHnd;
- simdBaseType = TYP_USHORT;
+ simdBaseJitType = CORINFO_TYPE_USHORT;
JITDUMP(" Found type Hardware Intrinsic SIMD Vector128<ushort>\n");
break;
case CORINFO_TYPE_LONG:
m_simdHandleCache->Vector128LongHandle = typeHnd;
- simdBaseType = TYP_LONG;
+ simdBaseJitType = CORINFO_TYPE_LONG;
JITDUMP(" Found type Hardware Intrinsic SIMD Vector128<long>\n");
break;
case CORINFO_TYPE_ULONG:
m_simdHandleCache->Vector128ULongHandle = typeHnd;
- simdBaseType = TYP_ULONG;
+ simdBaseJitType = CORINFO_TYPE_ULONG;
JITDUMP(" Found type Hardware Intrinsic SIMD Vector128<ulong>\n");
break;
case CORINFO_TYPE_UBYTE:
m_simdHandleCache->Vector128UByteHandle = typeHnd;
- simdBaseType = TYP_UBYTE;
+ simdBaseJitType = CORINFO_TYPE_UBYTE;
JITDUMP(" Found type Hardware Intrinsic SIMD Vector128<byte>\n");
break;
case CORINFO_TYPE_BYTE:
m_simdHandleCache->Vector128ByteHandle = typeHnd;
- simdBaseType = TYP_BYTE;
+ simdBaseJitType = CORINFO_TYPE_BYTE;
JITDUMP(" Found type Hardware Intrinsic SIMD Vector128<sbyte>\n");
break;
@@ -715,52 +740,52 @@ var_types Compiler::getBaseTypeAndSizeOfSIMDType(CORINFO_CLASS_HANDLE typeHnd, u
{
case CORINFO_TYPE_FLOAT:
m_simdHandleCache->Vector64FloatHandle = typeHnd;
- simdBaseType = TYP_FLOAT;
+ simdBaseJitType = CORINFO_TYPE_FLOAT;
JITDUMP(" Found type Hardware Intrinsic SIMD Vector64<float>\n");
break;
case CORINFO_TYPE_DOUBLE:
m_simdHandleCache->Vector64DoubleHandle = typeHnd;
- simdBaseType = TYP_DOUBLE;
+ simdBaseJitType = CORINFO_TYPE_DOUBLE;
JITDUMP(" Found type Hardware Intrinsic SIMD Vector64<double>\n");
break;
case CORINFO_TYPE_INT:
m_simdHandleCache->Vector64IntHandle = typeHnd;
- simdBaseType = TYP_INT;
+ simdBaseJitType = CORINFO_TYPE_INT;
JITDUMP(" Found type Hardware Intrinsic SIMD Vector64<int>\n");
break;
case CORINFO_TYPE_UINT:
m_simdHandleCache->Vector64UIntHandle = typeHnd;
- simdBaseType = TYP_UINT;
+ simdBaseJitType = CORINFO_TYPE_UINT;
JITDUMP(" Found type Hardware Intrinsic SIMD Vector64<uint>\n");
break;
case CORINFO_TYPE_SHORT:
m_simdHandleCache->Vector64ShortHandle = typeHnd;
- simdBaseType = TYP_SHORT;
+ simdBaseJitType = CORINFO_TYPE_SHORT;
JITDUMP(" Found type Hardware Intrinsic SIMD Vector64<short>\n");
break;
case CORINFO_TYPE_USHORT:
m_simdHandleCache->Vector64UShortHandle = typeHnd;
- simdBaseType = TYP_USHORT;
+ simdBaseJitType = CORINFO_TYPE_USHORT;
JITDUMP(" Found type Hardware Intrinsic SIMD Vector64<ushort>\n");
break;
case CORINFO_TYPE_LONG:
m_simdHandleCache->Vector64LongHandle = typeHnd;
- simdBaseType = TYP_LONG;
+ simdBaseJitType = CORINFO_TYPE_LONG;
JITDUMP(" Found type Hardware Intrinsic SIMD Vector64<long>\n");
break;
case CORINFO_TYPE_ULONG:
m_simdHandleCache->Vector64ULongHandle = typeHnd;
- simdBaseType = TYP_ULONG;
+ simdBaseJitType = CORINFO_TYPE_ULONG;
JITDUMP(" Found type Hardware Intrinsic SIMD Vector64<ulong>\n");
break;
case CORINFO_TYPE_UBYTE:
m_simdHandleCache->Vector64UByteHandle = typeHnd;
- simdBaseType = TYP_UBYTE;
+ simdBaseJitType = CORINFO_TYPE_UBYTE;
JITDUMP(" Found type Hardware Intrinsic SIMD Vector64<byte>\n");
break;
case CORINFO_TYPE_BYTE:
m_simdHandleCache->Vector64ByteHandle = typeHnd;
- simdBaseType = TYP_BYTE;
+ simdBaseJitType = CORINFO_TYPE_BYTE;
JITDUMP(" Found type Hardware Intrinsic SIMD Vector64<sbyte>\n");
break;
@@ -775,9 +800,10 @@ var_types Compiler::getBaseTypeAndSizeOfSIMDType(CORINFO_CLASS_HANDLE typeHnd, u
#if defined(TARGET_XARCH)
// Even though Vector256 is TYP_SIMD32, if AVX isn't supported, then it must
// be treated as a regular struct
- if (size == YMM_REGSIZE_BYTES && (simdBaseType != TYP_UNKNOWN) && !compExactlyDependsOn(InstructionSet_AVX))
+ if (size == YMM_REGSIZE_BYTES && (simdBaseJitType != CORINFO_TYPE_UNDEF) &&
+ !compExactlyDependsOn(InstructionSet_AVX))
{
- simdBaseType = TYP_UNKNOWN;
+ simdBaseJitType = CORINFO_TYPE_UNDEF;
}
#endif // TARGET_XARCH
}
@@ -788,12 +814,12 @@ var_types Compiler::getBaseTypeAndSizeOfSIMDType(CORINFO_CLASS_HANDLE typeHnd, u
*sizeBytes = size;
}
- if (simdBaseType != TYP_UNKNOWN)
+ if (simdBaseJitType != CORINFO_TYPE_UNDEF)
{
setUsesSIMDTypes(true);
}
- return simdBaseType;
+ return simdBaseJitType;
}
//--------------------------------------------------------------------------------------
@@ -805,7 +831,7 @@ var_types Compiler::getBaseTypeAndSizeOfSIMDType(CORINFO_CLASS_HANDLE typeHnd, u
// sig - method signature info
// isNewObj - whether this call represents a newboj constructor call
// argCount - argument count - out pram
-// baseType - base type of the intrinsic - out param
+// simdBaseJitType - base JIT type of the intrinsic - out param
// sizeBytes - size of SIMD vector type on which the method is invoked - out param
//
// Return Value:
@@ -827,23 +853,23 @@ const SIMDIntrinsicInfo* Compiler::getSIMDIntrinsicInfo(CORINFO_CLASS_HANDLE* in
CORINFO_SIG_INFO* sig,
bool isNewObj,
unsigned* argCount,
- var_types* baseType,
+ CorInfoType* simdBaseJitType,
unsigned* sizeBytes)
{
assert(featureSIMD);
- assert(baseType != nullptr);
+ assert(simdBaseJitType != nullptr);
assert(sizeBytes != nullptr);
- // get baseType and size of the type
+ // get simdBaseJitType and size of the type
CORINFO_CLASS_HANDLE typeHnd = *inOutTypeHnd;
- *baseType = getBaseTypeAndSizeOfSIMDType(typeHnd, sizeBytes);
+ *simdBaseJitType = getBaseJitTypeAndSizeOfSIMDType(typeHnd, sizeBytes);
if (typeHnd == m_simdHandleCache->SIMDVectorHandle)
{
// All of the supported intrinsics on this static class take a first argument that's a vector,
- // which determines the baseType.
+ // which determines the simdBaseJitType.
// The exception is the IsHardwareAccelerated property, which is handled as a special case.
- assert(*baseType == TYP_UNKNOWN);
+ assert(*simdBaseJitType == CORINFO_TYPE_UNDEF);
if (sig->numArgs == 0)
{
const SIMDIntrinsicInfo* hwAccelIntrinsicInfo = &(simdIntrinsicInfoArray[SIMDIntrinsicHWAccel]);
@@ -858,18 +884,20 @@ const SIMDIntrinsicInfo* Compiler::getSIMDIntrinsicInfo(CORINFO_CLASS_HANDLE* in
}
else
{
- typeHnd = info.compCompHnd->getArgClass(sig, sig->args);
- *inOutTypeHnd = typeHnd;
- *baseType = getBaseTypeAndSizeOfSIMDType(typeHnd, sizeBytes);
+ typeHnd = info.compCompHnd->getArgClass(sig, sig->args);
+ *inOutTypeHnd = typeHnd;
+ *simdBaseJitType = getBaseJitTypeAndSizeOfSIMDType(typeHnd, sizeBytes);
}
}
- if (*baseType == TYP_UNKNOWN)
+ if (*simdBaseJitType == CORINFO_TYPE_UNDEF)
{
JITDUMP("NOT a SIMD Intrinsic: unsupported baseType\n");
return nullptr;
}
+ var_types simdBaseType = JitType2PreciseVarType(*simdBaseJitType);
+
// account for implicit "this" arg
*argCount = sig->numArgs;
if (sig->hasThis())
@@ -899,7 +927,7 @@ const SIMDIntrinsicInfo* Compiler::getSIMDIntrinsicInfo(CORINFO_CLASS_HANDLE* in
break;
}
- if (simdIntrinsicInfoArray[i].supportedBaseTypes[j] == *baseType)
+ if (simdIntrinsicInfoArray[i].supportedBaseTypes[j] == simdBaseType)
{
found = true;
break;
@@ -979,20 +1007,20 @@ const SIMDIntrinsicInfo* Compiler::getSIMDIntrinsicInfo(CORINFO_CLASS_HANDLE* in
{
// Convention:
// - intrinsicInfo.argType[i] == TYP_UNDEF - intrinsic doesn't have a valid arg at position i
- // - intrinsicInfo.argType[i] == TYP_UNKNOWN - arg type should be same as basetype
+ // - intrinsicInfo.argType[i] == TYP_UNKNOWN - arg type should be same as simdBaseType
// Note that we pop the args off in reverse order.
expectedArgType = simdIntrinsicInfoArray[i].argType[argIndex];
assert(expectedArgType != TYP_UNDEF);
if (expectedArgType == TYP_UNKNOWN)
{
- // The type of the argument will be genActualType(*baseType).
- expectedArgType = genActualType(*baseType);
+ // The type of the argument will be genActualType(*simdBaseType).
+ expectedArgType = genActualType(simdBaseType);
argType = genActualType(argType);
}
}
else
{
- expectedArgType = *baseType;
+ expectedArgType = simdBaseType;
}
if (!isThisPtr && argType == TYP_I_IMPL)
@@ -1028,15 +1056,16 @@ const SIMDIntrinsicInfo* Compiler::getSIMDIntrinsicInfo(CORINFO_CLASS_HANDLE* in
// Cross check return type and static vs. instance is what we are expecting.
// If not, don't consider it as an intrinsic.
- // Note that ret type of TYP_UNKNOWN means that it is not known apriori and must be same as baseType
+ // Note that ret type of TYP_UNKNOWN means that it is not known apriori and must be same as simdBaseType
if (found)
{
var_types expectedRetType = simdIntrinsicInfoArray[i].retType;
if (expectedRetType == TYP_UNKNOWN)
{
// JIT maps uint/ulong type vars to TYP_INT/TYP_LONG.
- expectedRetType =
- (*baseType == TYP_UINT || *baseType == TYP_ULONG) ? genActualType(*baseType) : *baseType;
+ expectedRetType = (simdBaseType == TYP_UINT || simdBaseType == TYP_ULONG)
+ ? genActualType(simdBaseType)
+ : simdBaseType;
}
if (JITtype2varType(sig->retType) != expectedRetType ||
@@ -1193,22 +1222,23 @@ GenTree* Compiler::impSIMDPopStack(var_types type, bool expectAddr, CORINFO_CLAS
// impSIMDGetFixed: Create a GT_SIMD tree for a Get property of SIMD vector with a fixed index.
//
// Arguments:
-// baseType - The base (element) type of the SIMD vector.
-// simdSize - The total size in bytes of the SIMD vector.
-// index - The index of the field to get.
+// simdBaseJitType - The base (element) JIT type of the SIMD vector.
+// simdSize - The total size in bytes of the SIMD vector.
+// index - The index of the field to get.
//
// Return Value:
// Returns a GT_SIMD node with the SIMDIntrinsicGetItem intrinsic id.
//
-GenTreeSIMD* Compiler::impSIMDGetFixed(var_types simdType, var_types baseType, unsigned simdSize, int index)
+GenTreeSIMD* Compiler::impSIMDGetFixed(var_types simdType, CorInfoType simdBaseJitType, unsigned simdSize, int index)
{
- assert(simdSize >= ((index + 1) * genTypeSize(baseType)));
+ var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType);
+ assert(simdSize >= ((index + 1) * genTypeSize(simdBaseType)));
// op1 is a SIMD source.
GenTree* op1 = impSIMDPopStack(simdType, true);
GenTree* op2 = gtNewIconNode(index);
- GenTreeSIMD* simdTree = gtNewSIMDNode(baseType, op1, op2, SIMDIntrinsicGetItem, baseType, simdSize);
+ GenTreeSIMD* simdTree = gtNewSIMDNode(simdBaseType, op1, op2, SIMDIntrinsicGetItem, simdBaseJitType, simdSize);
return simdTree;
}
@@ -1242,7 +1272,7 @@ SIMDIntrinsicID Compiler::impSIMDLongRelOpEqual(CORINFO_CLASS_HANDLE typeHnd,
// Shuffle is meant to swap the comparison results of low-32-bits and high 32-bits of respective long elements.
// Compare vector<long> as if they were vector<int> and assign the result to a temp
- GenTree* compResult = gtNewSIMDNode(simdType, *pOp1, *pOp2, SIMDIntrinsicEqual, TYP_INT, size);
+ GenTree* compResult = gtNewSIMDNode(simdType, *pOp1, *pOp2, SIMDIntrinsicEqual, CORINFO_TYPE_INT, size);
unsigned lclNum = lvaGrabTemp(true DEBUGARG("SIMD Long =="));
lvaSetStruct(lclNum, typeHnd, false);
GenTree* tmp = gtNewLclvNode(lclNum, simdType);
@@ -1253,7 +1283,7 @@ SIMDIntrinsicID Compiler::impSIMDLongRelOpEqual(CORINFO_CLASS_HANDLE typeHnd,
// IntrinsicId = BitwiseAnd
*pOp1 = gtNewOperNode(GT_COMMA, simdType, asg, tmp);
*pOp2 = gtNewSIMDNode(simdType, gtNewLclvNode(lclNum, simdType), gtNewIconNode(SHUFFLE_ZWXY, TYP_INT),
- SIMDIntrinsicShuffleSSE2, TYP_INT, size);
+ SIMDIntrinsicShuffleSSE2, CORINFO_TYPE_INT, size);
return SIMDIntrinsicBitwiseAnd;
}
#endif // TARGET_XARCH
@@ -1264,10 +1294,10 @@ SIMDIntrinsicID Compiler::impSIMDLongRelOpEqual(CORINFO_CLASS_HANDLE typeHnd,
// Arguments:
// relOpIntrinsicId - Relational operator SIMD intrinsic
// typeHnd - type handle of SIMD vector
-// size - SIMD vector size
-// inOutBaseType - base type of SIMD vector
-// pOp1 - in-out parameter; first operand
-// pOp2 - in-out parameter; second operand
+// size - SIMD vector size
+// inOutBaseJitType - base JIT type of SIMD vector
+// pOp1 - in-out parameter; first operand
+// pOp2 - in-out parameter; second operand
//
// Return Value:
// Modifies in-out params pOp1, pOp2, inOutBaseType and returns intrinsic ID to be applied to modified operands
@@ -1275,7 +1305,7 @@ SIMDIntrinsicID Compiler::impSIMDLongRelOpEqual(CORINFO_CLASS_HANDLE typeHnd,
SIMDIntrinsicID Compiler::impSIMDRelOp(SIMDIntrinsicID relOpIntrinsicId,
CORINFO_CLASS_HANDLE typeHnd,
unsigned size,
- var_types* inOutBaseType,
+ CorInfoType* inOutBaseJitType,
GenTree** pOp1,
GenTree** pOp2)
{
@@ -1286,14 +1316,15 @@ SIMDIntrinsicID Compiler::impSIMDRelOp(SIMDIntrinsicID relOpIntrinsicId,
SIMDIntrinsicID intrinsicID = relOpIntrinsicId;
#ifdef TARGET_XARCH
- var_types baseType = *inOutBaseType;
+ CorInfoType simdBaseJitType = *inOutBaseJitType;
+ var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType);
- if (varTypeIsFloating(baseType))
+ if (varTypeIsFloating(simdBaseType))
{
}
- else if (varTypeIsIntegral(baseType))
+ else if (varTypeIsIntegral(simdBaseType))
{
- if ((getSIMDSupportLevel() == SIMD_SSE2_Supported) && baseType == TYP_LONG)
+ if ((getSIMDSupportLevel() == SIMD_SSE2_Supported) && simdBaseType == TYP_LONG)
{
// There is no direct SSE2 support for comparing TYP_LONG vectors.
// These have to be implemented interms of TYP_INT vector comparison operations.
@@ -1307,7 +1338,7 @@ SIMDIntrinsicID Compiler::impSIMDRelOp(SIMDIntrinsicID relOpIntrinsicId,
}
}
// SSE2 and AVX direct support for signed comparison of int32, int16 and int8 types
- else if (varTypeIsUnsigned(baseType))
+ else if (varTypeIsUnsigned(simdBaseType))
{
// Vector<byte>, Vector<ushort>, Vector<uint> and Vector<ulong>:
// SSE2 supports > for signed comparison. Therefore, to use it for
@@ -1324,23 +1355,23 @@ SIMDIntrinsicID Compiler::impSIMDRelOp(SIMDIntrinsicID relOpIntrinsicId,
// We need to treat op1 and op2 as signed for comparison purpose after
// the transformation.
__int64 constVal = 0;
- switch (baseType)
+ switch (simdBaseType)
{
case TYP_UBYTE:
- constVal = 0x80808080;
- *inOutBaseType = TYP_BYTE;
+ constVal = 0x80808080;
+ *inOutBaseJitType = CORINFO_TYPE_BYTE;
break;
case TYP_USHORT:
- constVal = 0x80008000;
- *inOutBaseType = TYP_SHORT;
+ constVal = 0x80008000;
+ *inOutBaseJitType = CORINFO_TYPE_SHORT;
break;
case TYP_UINT:
- constVal = 0x80000000;
- *inOutBaseType = TYP_INT;
+ constVal = 0x80000000;
+ *inOutBaseJitType = CORINFO_TYPE_INT;
break;
case TYP_ULONG:
- constVal = 0x8000000000000000LL;
- *inOutBaseType = TYP_LONG;
+ constVal = 0x8000000000000000LL;
+ *inOutBaseJitType = CORINFO_TYPE_LONG;
break;
default:
unreached();
@@ -1352,20 +1383,21 @@ SIMDIntrinsicID Compiler::impSIMDRelOp(SIMDIntrinsicID relOpIntrinsicId,
if (intrinsicID != SIMDIntrinsicEqual)
{
// For constructing const vector use either long or int base type.
- var_types tempBaseType;
- GenTree* initVal;
- if (baseType == TYP_ULONG)
+ CorInfoType tempBaseJitType;
+ GenTree* initVal;
+ if (simdBaseType == TYP_ULONG)
{
- tempBaseType = TYP_LONG;
- initVal = gtNewLconNode(constVal);
+ tempBaseJitType = CORINFO_TYPE_LONG;
+ initVal = gtNewLconNode(constVal);
}
else
{
- tempBaseType = TYP_INT;
- initVal = gtNewIconNode((ssize_t)constVal);
+ tempBaseJitType = CORINFO_TYPE_INT;
+ initVal = gtNewIconNode((ssize_t)constVal);
}
- initVal->gtType = tempBaseType;
- GenTree* constVector = gtNewSIMDNode(simdType, initVal, nullptr, SIMDIntrinsicInit, tempBaseType, size);
+ initVal->gtType = JITtype2varType(tempBaseJitType);
+ GenTree* constVector =
+ gtNewSIMDNode(simdType, initVal, nullptr, SIMDIntrinsicInit, tempBaseJitType, size);
// Assign constVector to a temp, since we intend to use it more than once
// TODO-CQ: We have quite a few such constant vectors constructed during
@@ -1375,11 +1407,11 @@ SIMDIntrinsicID Compiler::impSIMDRelOp(SIMDIntrinsicID relOpIntrinsicId,
// op1 = op1 - constVector
// op2 = op2 - constVector
- *pOp1 = gtNewSIMDNode(simdType, *pOp1, constVector, SIMDIntrinsicSub, baseType, size);
- *pOp2 = gtNewSIMDNode(simdType, *pOp2, tmp, SIMDIntrinsicSub, baseType, size);
+ *pOp1 = gtNewSIMDNode(simdType, *pOp1, constVector, SIMDIntrinsicSub, simdBaseJitType, size);
+ *pOp2 = gtNewSIMDNode(simdType, *pOp2, tmp, SIMDIntrinsicSub, simdBaseJitType, size);
}
- return impSIMDRelOp(intrinsicID, typeHnd, size, inOutBaseType, pOp1, pOp2);
+ return impSIMDRelOp(intrinsicID, typeHnd, size, inOutBaseJitType, pOp1, pOp2);
}
}
#elif !defined(TARGET_ARM64)
@@ -1713,13 +1745,14 @@ void Compiler::impMarkContiguousSIMDFieldAssignments(Statement* stmt)
GenTree* expr = stmt->GetRootNode();
if (expr->OperGet() == GT_ASG && expr->TypeGet() == TYP_FLOAT)
{
- GenTree* curDst = expr->AsOp()->gtOp1;
- GenTree* curSrc = expr->AsOp()->gtOp2;
- unsigned index = 0;
- var_types baseType = TYP_UNKNOWN;
- unsigned simdSize = 0;
- GenTree* srcSimdStructNode = getSIMDStructFromField(curSrc, &baseType, &index, &simdSize, true);
- if (srcSimdStructNode == nullptr || baseType != TYP_FLOAT)
+ GenTree* curDst = expr->AsOp()->gtOp1;
+ GenTree* curSrc = expr->AsOp()->gtOp2;
+ unsigned index = 0;
+ CorInfoType simdBaseJitType = CORINFO_TYPE_UNDEF;
+ unsigned simdSize = 0;
+ GenTree* srcSimdStructNode = getSIMDStructFromField(curSrc, &simdBaseJitType, &index, &simdSize, true);
+
+ if (srcSimdStructNode == nullptr || simdBaseJitType != CORINFO_TYPE_FLOAT)
{
fgPreviousCandidateSIMDFieldAsgStmt = nullptr;
}
@@ -1730,16 +1763,17 @@ void Compiler::impMarkContiguousSIMDFieldAssignments(Statement* stmt)
else if (fgPreviousCandidateSIMDFieldAsgStmt != nullptr)
{
assert(index > 0);
- GenTree* prevAsgExpr = fgPreviousCandidateSIMDFieldAsgStmt->GetRootNode();
- GenTree* prevDst = prevAsgExpr->AsOp()->gtOp1;
- GenTree* prevSrc = prevAsgExpr->AsOp()->gtOp2;
+ var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType);
+ GenTree* prevAsgExpr = fgPreviousCandidateSIMDFieldAsgStmt->GetRootNode();
+ GenTree* prevDst = prevAsgExpr->AsOp()->gtOp1;
+ GenTree* prevSrc = prevAsgExpr->AsOp()->gtOp2;
if (!areArgumentsContiguous(prevDst, curDst) || !areArgumentsContiguous(prevSrc, curSrc))
{
fgPreviousCandidateSIMDFieldAsgStmt = nullptr;
}
else
{
- if (index == (simdSize / genTypeSize(baseType) - 1))
+ if (index == (simdSize / genTypeSize(simdBaseType) - 1))
{
// Successfully found the pattern, mark the lclvar as UsedInSIMDIntrinsic
if (srcSimdStructNode->OperIsLocal())
@@ -1812,11 +1846,11 @@ GenTree* Compiler::impSIMDIntrinsic(OPCODE opcode,
}
// Get base type and intrinsic Id
- var_types baseType = TYP_UNKNOWN;
- unsigned size = 0;
- unsigned argCount = 0;
+ CorInfoType simdBaseJitType = CORINFO_TYPE_UNDEF;
+ unsigned size = 0;
+ unsigned argCount = 0;
const SIMDIntrinsicInfo* intrinsicInfo =
- getSIMDIntrinsicInfo(&clsHnd, methodHnd, sig, (opcode == CEE_NEWOBJ), &argCount, &baseType, &size);
+ getSIMDIntrinsicInfo(&clsHnd, methodHnd, sig, (opcode == CEE_NEWOBJ), &argCount, &simdBaseJitType, &size);
// Exit early if the intrinsic is invalid or unrecognized
if ((intrinsicInfo == nullptr) || (intrinsicInfo->id == SIMDIntrinsicInvalid))
@@ -1844,15 +1878,19 @@ GenTree* Compiler::impSIMDIntrinsic(OPCODE opcode,
}
SIMDIntrinsicID simdIntrinsicID = intrinsicInfo->id;
+ var_types simdBaseType;
var_types simdType;
- if (baseType != TYP_UNKNOWN)
+
+ if (simdBaseJitType != CORINFO_TYPE_UNDEF)
{
- simdType = getSIMDTypeForSize(size);
+ simdBaseType = JitType2PreciseVarType(simdBaseJitType);
+ simdType = getSIMDTypeForSize(size);
}
else
{
assert(simdIntrinsicID == SIMDIntrinsicHWAccel);
- simdType = TYP_UNKNOWN;
+ simdBaseType = TYP_UNKNOWN;
+ simdType = TYP_UNKNOWN;
}
bool instMethod = intrinsicInfo->isInstMethod;
var_types callType = JITtype2varType(sig->retType);
@@ -1888,15 +1926,15 @@ GenTree* Compiler::impSIMDIntrinsic(OPCODE opcode,
bool initFromFirstArgIndir = false;
if (simdIntrinsicID == SIMDIntrinsicInit)
{
- op2 = impSIMDPopStack(baseType);
+ op2 = impSIMDPopStack(simdBaseType);
}
else
{
assert(simdIntrinsicID == SIMDIntrinsicInitN);
- assert(baseType == TYP_FLOAT);
+ assert(simdBaseType == TYP_FLOAT);
unsigned initCount = argCount - 1;
- unsigned elementCount = getSIMDVectorLength(size, baseType);
+ unsigned elementCount = getSIMDVectorLength(size, simdBaseType);
noway_assert(initCount == elementCount);
// Build a GT_LIST with the N values.
@@ -1909,7 +1947,7 @@ GenTree* Compiler::impSIMDIntrinsic(OPCODE opcode,
bool areArgsContiguous = true;
for (unsigned i = 0; i < initCount; i++)
{
- GenTree* nextArg = impSIMDPopStack(baseType);
+ GenTree* nextArg = impSIMDPopStack(simdBaseType);
if (areArgsContiguous)
{
GenTree* curArg = nextArg;
@@ -1923,14 +1961,14 @@ GenTree* Compiler::impSIMDIntrinsic(OPCODE opcode,
prevArg = curArg;
}
- list = new (this, GT_LIST) GenTreeOp(GT_LIST, baseType, nextArg, list);
+ list = new (this, GT_LIST) GenTreeOp(GT_LIST, simdBaseType, nextArg, list);
}
- if (areArgsContiguous && baseType == TYP_FLOAT)
+ if (areArgsContiguous && simdBaseType == TYP_FLOAT)
{
// Since Vector2, Vector3 and Vector4's arguments type are only float,
// we intialize the vector from first argument address, only when
- // the baseType is TYP_FLOAT and the arguments are located contiguously in memory
+ // the simdBaseType is TYP_FLOAT and the arguments are located contiguously in memory
initFromFirstArgIndir = true;
GenTree* op2Address = createAddressNodeForSIMDInit(firstArg, size);
var_types simdType = getSIMDTypeForSize(size);
@@ -1945,16 +1983,16 @@ GenTree* Compiler::impSIMDIntrinsic(OPCODE opcode,
op1 = getOp1ForConstructor(opcode, newobjThis, clsHnd);
assert(op1->TypeGet() == TYP_BYREF);
- assert(genActualType(op2->TypeGet()) == genActualType(baseType) || initFromFirstArgIndir);
+ assert(genActualType(op2->TypeGet()) == genActualType(simdBaseType) || initFromFirstArgIndir);
// For integral base types of size less than TYP_INT, expand the initializer
// to fill size of TYP_INT bytes.
- if (varTypeIsSmallInt(baseType))
+ if (varTypeIsSmallInt(simdBaseType))
{
// This case should occur only for Init intrinsic.
assert(simdIntrinsicID == SIMDIntrinsicInit);
- unsigned baseSize = genTypeSize(baseType);
+ unsigned baseSize = genTypeSize(simdBaseType);
int multiplier;
if (baseSize == 1)
{
@@ -1967,7 +2005,7 @@ GenTree* Compiler::impSIMDIntrinsic(OPCODE opcode,
}
GenTree* t1 = nullptr;
- if (baseType == TYP_BYTE)
+ if (simdBaseType == TYP_BYTE)
{
// What we have is a signed byte initializer,
// which when loaded to a reg will get sign extended to TYP_INT.
@@ -1975,7 +2013,7 @@ GenTree* Compiler::impSIMDIntrinsic(OPCODE opcode,
// rather zero extended to 32-bits.
t1 = gtNewOperNode(GT_AND, TYP_INT, op2, gtNewIconNode(0xff, TYP_INT));
}
- else if (baseType == TYP_SHORT)
+ else if (simdBaseType == TYP_SHORT)
{
// What we have is a signed short initializer,
// which when loaded to a reg will get sign extended to TYP_INT.
@@ -1985,7 +2023,7 @@ GenTree* Compiler::impSIMDIntrinsic(OPCODE opcode,
}
else
{
- assert(baseType == TYP_UBYTE || baseType == TYP_USHORT);
+ assert(simdBaseType == TYP_UBYTE || simdBaseType == TYP_USHORT);
t1 = gtNewCastNode(TYP_INT, op2, false, TYP_INT);
}
@@ -1993,9 +2031,9 @@ GenTree* Compiler::impSIMDIntrinsic(OPCODE opcode,
GenTree* t2 = gtNewIconNode(multiplier, TYP_INT);
op2 = gtNewOperNode(GT_MUL, TYP_INT, t1, t2);
- // Construct a vector of TYP_INT with the new initializer and cast it back to vector of baseType
- simdTree = gtNewSIMDNode(simdType, op2, nullptr, simdIntrinsicID, TYP_INT, size);
- simdTree = gtNewSIMDNode(simdType, simdTree, nullptr, SIMDIntrinsicCast, baseType, size);
+ // Construct a vector of TYP_INT with the new initializer and cast it back to vector of simdBaseType
+ simdTree = gtNewSIMDNode(simdType, op2, nullptr, simdIntrinsicID, CORINFO_TYPE_INT, size);
+ simdTree = gtNewSIMDNode(simdType, simdTree, nullptr, SIMDIntrinsicCast, simdBaseJitType, size);
}
else
{
@@ -2012,7 +2050,7 @@ GenTree* Compiler::impSIMDIntrinsic(OPCODE opcode,
}
else
{
- simdTree = gtNewSIMDNode(simdType, op2, nullptr, simdIntrinsicID, baseType, size);
+ simdTree = gtNewSIMDNode(simdType, op2, nullptr, simdIntrinsicID, simdBaseJitType, size);
}
}
@@ -2030,7 +2068,7 @@ GenTree* Compiler::impSIMDIntrinsic(OPCODE opcode,
// op2 - array itself
// op1 - byref to vector struct
- unsigned int vectorLength = getSIMDVectorLength(size, baseType);
+ unsigned int vectorLength = getSIMDVectorLength(size, simdBaseType);
// (This constructor takes only the zero-based arrays.)
// We will add one or two bounds checks:
// 1. If we have an index, we must do a check on that first.
@@ -2140,7 +2178,7 @@ GenTree* Compiler::impSIMDIntrinsic(OPCODE opcode,
if (simdIntrinsicID == SIMDIntrinsicInitArray || simdIntrinsicID == SIMDIntrinsicInitArrayX)
{
op1 = getOp1ForConstructor(opcode, newobjThis, clsHnd);
- simdTree = gtNewSIMDNode(simdType, op2, op3, SIMDIntrinsicInitArray, baseType, size);
+ simdTree = gtNewSIMDNode(simdType, op2, op3, SIMDIntrinsicInitArray, simdBaseJitType, size);
copyBlkDst = op1;
doCopyBlk = true;
}
@@ -2156,7 +2194,7 @@ GenTree* Compiler::impSIMDIntrinsic(OPCODE opcode,
// TODO-Cleanup: Though it happens to just work fine front-end phases are not aware of GT_LEA node.
// Therefore, convert these to use GT_ADDR .
copyBlkDst = new (this, GT_LEA)
- GenTreeAddrMode(TYP_BYREF, op2, op3, genTypeSize(baseType), OFFSETOF__CORINFO_Array__data);
+ GenTreeAddrMode(TYP_BYREF, op2, op3, genTypeSize(simdBaseType), OFFSETOF__CORINFO_Array__data);
doCopyBlk = true;
}
}
@@ -2170,7 +2208,7 @@ GenTree* Compiler::impSIMDIntrinsic(OPCODE opcode,
// op3 - float value for VLarge.Z or VLarge.W
// op2 - VSmall
// op1 - byref of VLarge
- assert(baseType == TYP_FLOAT);
+ assert(simdBaseType == TYP_FLOAT);
GenTree* op4 = nullptr;
if (argCount == 4)
@@ -2202,11 +2240,11 @@ GenTree* Compiler::impSIMDIntrinsic(OPCODE opcode,
simdTree = op2;
if (op3 != nullptr)
{
- simdTree = gtNewSIMDNode(simdType, simdTree, op3, SIMDIntrinsicSetZ, baseType, size);
+ simdTree = gtNewSIMDNode(simdType, simdTree, op3, SIMDIntrinsicSetZ, simdBaseJitType, size);
}
if (op4 != nullptr)
{
- simdTree = gtNewSIMDNode(simdType, simdTree, op4, SIMDIntrinsicSetW, baseType, size);
+ simdTree = gtNewSIMDNode(simdType, simdTree, op4, SIMDIntrinsicSetW, simdBaseJitType, size);
}
copyBlkDst = op1;
@@ -2219,9 +2257,9 @@ GenTree* Compiler::impSIMDIntrinsic(OPCODE opcode,
op2 = impSIMDPopStack(simdType);
op1 = impSIMDPopStack(simdType, instMethod);
- SIMDIntrinsicID intrinsicID = impSIMDRelOp(simdIntrinsicID, clsHnd, size, &baseType, &op1, &op2);
- simdTree = gtNewSIMDNode(genActualType(callType), op1, op2, intrinsicID, baseType, size);
- retVal = simdTree;
+ SIMDIntrinsicID intrinsicID = impSIMDRelOp(simdIntrinsicID, clsHnd, size, &simdBaseJitType, &op1, &op2);
+ simdTree = gtNewSIMDNode(genActualType(callType), op1, op2, intrinsicID, simdBaseJitType, size);
+ retVal = simdTree;
}
break;
@@ -2234,7 +2272,7 @@ GenTree* Compiler::impSIMDIntrinsic(OPCODE opcode,
op2 = impSIMDPopStack(simdType);
op1 = impSIMDPopStack(simdType, instMethod);
- simdTree = gtNewSIMDNode(simdType, op1, op2, simdIntrinsicID, baseType, size);
+ simdTree = gtNewSIMDNode(simdType, op1, op2, simdIntrinsicID, simdBaseJitType, size);
retVal = simdTree;
}
break;
@@ -2245,7 +2283,7 @@ GenTree* Compiler::impSIMDIntrinsic(OPCODE opcode,
// op2 is an index of TYP_INT
op2 = impSIMDPopStack(TYP_INT);
op1 = impSIMDPopStack(simdType, instMethod);
- int vectorLength = getSIMDVectorLength(size, baseType);
+ int vectorLength = getSIMDVectorLength(size, simdBaseType);
if (!op2->IsCnsIntOrI() || op2->AsIntCon()->gtIconVal >= vectorLength || op2->AsIntCon()->gtIconVal < 0)
{
// We need to bounds-check the length of the vector.
@@ -2275,25 +2313,25 @@ GenTree* Compiler::impSIMDIntrinsic(OPCODE opcode,
assert(op1->TypeGet() == simdType);
assert(op2->TypeGet() == TYP_INT);
- simdTree = gtNewSIMDNode(genActualType(callType), op1, op2, simdIntrinsicID, baseType, size);
+ simdTree = gtNewSIMDNode(genActualType(callType), op1, op2, simdIntrinsicID, simdBaseJitType, size);
retVal = simdTree;
}
break;
case SIMDIntrinsicGetW:
- retVal = impSIMDGetFixed(simdType, baseType, size, 3);
+ retVal = impSIMDGetFixed(simdType, simdBaseJitType, size, 3);
break;
case SIMDIntrinsicGetZ:
- retVal = impSIMDGetFixed(simdType, baseType, size, 2);
+ retVal = impSIMDGetFixed(simdType, simdBaseJitType, size, 2);
break;
case SIMDIntrinsicGetY:
- retVal = impSIMDGetFixed(simdType, baseType, size, 1);
+ retVal = impSIMDGetFixed(simdType, simdBaseJitType, size, 1);
break;
case SIMDIntrinsicGetX:
- retVal = impSIMDGetFixed(simdType, baseType, size, 0);
+ retVal = impSIMDGetFixed(simdType, simdBaseJitType, size, 0);
break;
case SIMDIntrinsicSetW:
@@ -2315,12 +2353,12 @@ GenTree* Compiler::impSIMDIntrinsic(OPCODE opcode,
return nullptr;
}
- op2 = impSIMDPopStack(baseType);
+ op2 = impSIMDPopStack(simdBaseType);
op1 = impSIMDPopStack(simdType, instMethod);
GenTree* src = gtCloneExpr(op1);
assert(src != nullptr);
- simdTree = gtNewSIMDNode(simdType, src, op2, simdIntrinsicID, baseType, size);
+ simdTree = gtNewSIMDNode(simdType, src, op2, simdIntrinsicID, simdBaseJitType, size);
copyBlkDst = gtNewOperNode(GT_ADDR, TYP_BYREF, op1);
doCopyBlk = true;
@@ -2335,7 +2373,7 @@ GenTree* Compiler::impSIMDIntrinsic(OPCODE opcode,
{
op1 = impSIMDPopStack(simdType, instMethod);
- simdTree = gtNewSIMDNode(simdType, op1, nullptr, simdIntrinsicID, baseType, size);
+ simdTree = gtNewSIMDNode(simdType, op1, nullptr, simdIntrinsicID, simdBaseJitType, size);
retVal = simdTree;
}
break;
@@ -2345,7 +2383,7 @@ GenTree* Compiler::impSIMDIntrinsic(OPCODE opcode,
#ifdef TARGET_64BIT
op1 = impSIMDPopStack(simdType, instMethod);
- simdTree = gtNewSIMDNode(simdType, op1, nullptr, simdIntrinsicID, baseType, size);
+ simdTree = gtNewSIMDNode(simdType, op1, nullptr, simdIntrinsicID, simdBaseJitType, size);
retVal = simdTree;
#else
JITDUMP("SIMD Conversion to Int64 is not supported on this platform\n");
@@ -2360,7 +2398,7 @@ GenTree* Compiler::impSIMDIntrinsic(OPCODE opcode,
op2 = impSIMDPopStack(simdType);
op1 = impSIMDPopStack(simdType);
// op1 and op2 are two input Vector<T>.
- simdTree = gtNewSIMDNode(simdType, op1, op2, simdIntrinsicID, baseType, size);
+ simdTree = gtNewSIMDNode(simdType, op1, op2, simdIntrinsicID, simdBaseJitType, size);
retVal = simdTree;
}
break;
@@ -2375,7 +2413,7 @@ GenTree* Compiler::impSIMDIntrinsic(OPCODE opcode,
GenTree* dupOp1 = fgInsertCommaFormTemp(&op1, op1Handle);
// Widen the lower half and assign it to dstAddrLo.
- simdTree = gtNewSIMDNode(simdType, op1, nullptr, SIMDIntrinsicWidenLo, baseType, size);
+ simdTree = gtNewSIMDNode(simdType, op1, nullptr, SIMDIntrinsicWidenLo, simdBaseJitType, size);
// TODO-1stClassStructs: With the introduction of ClassLayout it would be preferrable to use
// GT_OBJ instead of GT_BLK nodes to avoid losing information about the actual vector type.
GenTree* loDest = new (this, GT_BLK)
@@ -2386,7 +2424,7 @@ GenTree* Compiler::impSIMDIntrinsic(OPCODE opcode,
loAsg->gtFlags |= ((simdTree->gtFlags | dstAddrLo->gtFlags) & GTF_ALL_EFFECT);
// Widen the upper half and assign it to dstAddrHi.
- simdTree = gtNewSIMDNode(simdType, dupOp1, nullptr, SIMDIntrinsicWidenHi, baseType, size);
+ simdTree = gtNewSIMDNode(simdType, dupOp1, nullptr, SIMDIntrinsicWidenHi, simdBaseJitType, size);
GenTree* hiDest = new (this, GT_BLK)
GenTreeBlk(GT_BLK, simdType, dstAddrHi, typGetBlkLayout(getSIMDTypeSizeInBytes(clsHnd)));
GenTree* hiAsg = gtNewBlkOpNode(hiDest, simdTree,
diff --git a/src/coreclr/jit/simdashwintrinsic.cpp b/src/coreclr/jit/simdashwintrinsic.cpp
index f117c48f425..c093fb05f75 100644
--- a/src/coreclr/jit/simdashwintrinsic.cpp
+++ b/src/coreclr/jit/simdashwintrinsic.cpp
@@ -194,7 +194,7 @@ GenTree* Compiler::impSimdAsHWIntrinsic(NamedIntrinsic intrinsic,
CORINFO_CLASS_HANDLE argClass = NO_CLASS_HANDLE;
var_types retType = JITtype2varType(sig->retType);
- var_types baseType = TYP_UNKNOWN;
+ CorInfoType simdBaseJitType = CORINFO_TYPE_UNDEF;
var_types simdType = TYP_UNKNOWN;
unsigned simdSize = 0;
unsigned numArgs = sig->numArgs;
@@ -202,9 +202,10 @@ GenTree* Compiler::impSimdAsHWIntrinsic(NamedIntrinsic intrinsic,
// We want to resolve and populate the handle cache for this type even
// if it isn't the basis for anything carried on the node.
- baseType = getBaseTypeAndSizeOfSIMDType(clsHnd, &simdSize);
+ simdBaseJitType = getBaseJitTypeAndSizeOfSIMDType(clsHnd, &simdSize);
- if ((clsHnd != m_simdHandleCache->SIMDVectorHandle) && !varTypeIsArithmetic(baseType))
+ if ((clsHnd != m_simdHandleCache->SIMDVectorHandle) &&
+ ((simdBaseJitType == CORINFO_TYPE_UNDEF) || !varTypeIsArithmetic(JitType2PreciseVarType(simdBaseJitType))))
{
// We want to exit early if the clsHnd should have a base type and it isn't one
// of the supported types. This handles cases like op_Explicit which take a Vector<T>
@@ -213,13 +214,13 @@ GenTree* Compiler::impSimdAsHWIntrinsic(NamedIntrinsic intrinsic,
if (retType == TYP_STRUCT)
{
- baseType = getBaseTypeAndSizeOfSIMDType(sig->retTypeSigClass, &simdSize);
- retType = getSIMDTypeForSize(simdSize);
+ simdBaseJitType = getBaseJitTypeAndSizeOfSIMDType(sig->retTypeSigClass, &simdSize);
+ retType = getSIMDTypeForSize(simdSize);
}
else if (numArgs != 0)
{
- argClass = info.compCompHnd->getArgClass(sig, sig->args);
- baseType = getBaseTypeAndSizeOfSIMDType(argClass, &simdSize);
+ argClass = info.compCompHnd->getArgClass(sig, sig->args);
+ simdBaseJitType = getBaseJitTypeAndSizeOfSIMDType(argClass, &simdSize);
}
if (sig->hasThis())
@@ -236,28 +237,30 @@ GenTree* Compiler::impSimdAsHWIntrinsic(NamedIntrinsic intrinsic,
// The first argument will be the appropriate Vector<T> handle to use
clsHnd = info.compCompHnd->getArgClass(sig, sig->args);
- // We also need to adjust the baseType as some methods on Vector return
+ // We also need to adjust the simdBaseJitType as some methods on Vector return
// a type different than the operation we need to perform. An example
// is LessThan or Equals which takes double but returns long. This is
// unlike the counterparts on Vector<T> which take a return the same type.
- baseType = getBaseTypeAndSizeOfSIMDType(clsHnd, &simdSize);
+ simdBaseJitType = getBaseJitTypeAndSizeOfSIMDType(clsHnd, &simdSize);
}
- if (!varTypeIsArithmetic(baseType) || (simdSize == 0))
+ if ((simdBaseJitType == CORINFO_TYPE_UNDEF) || !varTypeIsArithmetic(JitType2PreciseVarType(simdBaseJitType)) ||
+ (simdSize == 0))
{
// We get here for a devirtualization of IEquatable`1.Equals
// or if the user tries to use Vector<T> with an unsupported type
return nullptr;
}
- simdType = getSIMDTypeForSize(simdSize);
+ var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType);
+ simdType = getSIMDTypeForSize(simdSize);
assert(varTypeIsSIMD(simdType));
- NamedIntrinsic hwIntrinsic = SimdAsHWIntrinsicInfo::lookupHWIntrinsic(intrinsic, baseType);
+ NamedIntrinsic hwIntrinsic = SimdAsHWIntrinsicInfo::lookupHWIntrinsic(intrinsic, simdBaseType);
if ((hwIntrinsic == NI_Illegal) || !varTypeIsSIMD(simdType))
{
- // The baseType isn't supported by the intrinsic
+ // The simdBaseJitType isn't supported by the intrinsic
return nullptr;
}
@@ -271,7 +274,7 @@ GenTree* Compiler::impSimdAsHWIntrinsic(NamedIntrinsic intrinsic,
if (hwIntrinsic == intrinsic)
{
// The SIMD intrinsic requires special handling outside the normal code path
- return impSimdAsHWIntrinsicSpecial(intrinsic, clsHnd, sig, retType, baseType, simdSize, newobjThis);
+ return impSimdAsHWIntrinsicSpecial(intrinsic, clsHnd, sig, retType, simdBaseJitType, simdSize, newobjThis);
}
CORINFO_InstructionSet hwIntrinsicIsa = HWIntrinsicInfo::lookupIsa(hwIntrinsic);
@@ -293,7 +296,7 @@ GenTree* Compiler::impSimdAsHWIntrinsic(NamedIntrinsic intrinsic,
case 0:
{
assert(!SimdAsHWIntrinsicInfo::NeedsOperandsSwapped(intrinsic));
- return gtNewSimdAsHWIntrinsicNode(retType, hwIntrinsic, baseType, simdSize);
+ return gtNewSimdAsHWIntrinsicNode(retType, hwIntrinsic, simdBaseJitType, simdSize);
}
case 1:
@@ -303,7 +306,7 @@ GenTree* Compiler::impSimdAsHWIntrinsic(NamedIntrinsic intrinsic,
op1 = getArgForHWIntrinsic(argType, argClass, isInstanceMethod);
assert(!SimdAsHWIntrinsicInfo::NeedsOperandsSwapped(intrinsic));
- return gtNewSimdAsHWIntrinsicNode(retType, op1, hwIntrinsic, baseType, simdSize);
+ return gtNewSimdAsHWIntrinsicNode(retType, op1, hwIntrinsic, simdBaseJitType, simdSize);
}
case 2:
@@ -321,7 +324,7 @@ GenTree* Compiler::impSimdAsHWIntrinsic(NamedIntrinsic intrinsic,
std::swap(op1, op2);
}
- return gtNewSimdAsHWIntrinsicNode(retType, op1, op2, hwIntrinsic, baseType, simdSize);
+ return gtNewSimdAsHWIntrinsicNode(retType, op1, op2, hwIntrinsic, simdBaseJitType, simdSize);
}
}
@@ -334,12 +337,12 @@ GenTree* Compiler::impSimdAsHWIntrinsic(NamedIntrinsic intrinsic,
// This method handles cases which cannot be table driven
//
// Arguments:
-// intrinsic -- id of the intrinsic function.
-// clsHnd -- class handle containing the intrinsic function.
-// sig -- signature of the intrinsic call
-// retType -- the return type of the intrinsic call
-// baseType -- the base type of SIMD type of the intrinsic
-// simdSize -- the size of the SIMD type of the intrinsic
+// intrinsic -- id of the intrinsic function.
+// clsHnd -- class handle containing the intrinsic function.
+// sig -- signature of the intrinsic call
+// retType -- the return type of the intrinsic call
+// simdBaseJitType -- the base JIT type of SIMD type of the intrinsic
+// simdSize -- the size of the SIMD type of the intrinsic
//
// Return Value:
// The GT_HWINTRINSIC node, or nullptr if not a supported intrinsic
@@ -348,15 +351,17 @@ GenTree* Compiler::impSimdAsHWIntrinsicSpecial(NamedIntrinsic intrinsic,
CORINFO_CLASS_HANDLE clsHnd,
CORINFO_SIG_INFO* sig,
var_types retType,
- var_types baseType,
+ CorInfoType simdBaseJitType,
unsigned simdSize,
GenTree* newobjThis)
{
+ var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType);
+
assert(featureSIMD);
assert(retType != TYP_UNKNOWN);
- assert(varTypeIsArithmetic(baseType));
+ assert(varTypeIsArithmetic(simdBaseType));
assert(simdSize != 0);
- assert(SimdAsHWIntrinsicInfo::lookupHWIntrinsic(intrinsic, baseType) == intrinsic);
+ assert(SimdAsHWIntrinsicInfo::lookupHWIntrinsic(intrinsic, simdBaseType) == intrinsic);
var_types simdType = getSIMDTypeForSize(simdSize);
assert(varTypeIsSIMD(simdType));
@@ -402,7 +407,7 @@ GenTree* Compiler::impSimdAsHWIntrinsicSpecial(NamedIntrinsic intrinsic,
case NI_VectorT128_CreateBroadcast:
case NI_VectorT256_CreateBroadcast:
{
- if (varTypeIsLong(baseType))
+ if (varTypeIsLong(simdBaseType))
{
// TODO-XARCH-CQ: It may be beneficial to emit the movq
// instruction, which takes a 64-bit memory address and
@@ -418,10 +423,11 @@ GenTree* Compiler::impSimdAsHWIntrinsicSpecial(NamedIntrinsic intrinsic,
#endif // TARGET_XARCH
case NI_VectorT128_As:
{
- unsigned retSimdSize;
- var_types retBaseType = getBaseTypeAndSizeOfSIMDType(sig->retTypeSigClass, &retSimdSize);
+ unsigned retSimdSize;
+ CorInfoType retBaseJitType = getBaseJitTypeAndSizeOfSIMDType(sig->retTypeSigClass, &retSimdSize);
- if (!varTypeIsArithmetic(retBaseType) || (retSimdSize == 0))
+ if ((retBaseJitType == CORINFO_TYPE_UNDEF) ||
+ !varTypeIsArithmetic(JitType2PreciseVarType(retBaseJitType)) || (retSimdSize == 0))
{
// We get here if the return type is an unsupported type
return nullptr;
@@ -437,7 +443,7 @@ GenTree* Compiler::impSimdAsHWIntrinsicSpecial(NamedIntrinsic intrinsic,
// We need to exit early if this is Vector<T>.Dot for int or uint and SSE41 is not supported
// The other types should be handled via the table driven paths
- assert((baseType == TYP_INT) || (baseType == TYP_UINT));
+ assert((simdBaseType == TYP_INT) || (simdBaseType == TYP_UINT));
return nullptr;
}
break;
@@ -469,7 +475,7 @@ GenTree* Compiler::impSimdAsHWIntrinsicSpecial(NamedIntrinsic intrinsic,
case NI_VectorT128_get_One:
case NI_VectorT256_get_One:
{
- switch (baseType)
+ switch (simdBaseType)
{
case TYP_BYTE:
case TYP_UBYTE:
@@ -492,7 +498,7 @@ GenTree* Compiler::impSimdAsHWIntrinsicSpecial(NamedIntrinsic intrinsic,
case TYP_FLOAT:
case TYP_DOUBLE:
{
- op1 = gtNewDconNode(1.0, baseType);
+ op1 = gtNewDconNode(1.0, simdBaseType);
break;
}
@@ -502,14 +508,14 @@ GenTree* Compiler::impSimdAsHWIntrinsicSpecial(NamedIntrinsic intrinsic,
}
}
- return gtNewSimdCreateBroadcastNode(retType, op1, baseType, simdSize,
+ return gtNewSimdCreateBroadcastNode(retType, op1, simdBaseJitType, simdSize,
/* isSimdAsHWIntrinsic */ true);
}
case NI_VectorT128_get_Count:
case NI_VectorT256_get_Count:
{
- GenTreeIntCon* countNode = gtNewIconNode(getSIMDVectorLength(simdSize, baseType), TYP_INT);
+ GenTreeIntCon* countNode = gtNewIconNode(getSIMDVectorLength(simdSize, simdBaseType), TYP_INT);
countNode->gtFlags |= GTF_ICON_SIMD_COUNT;
return countNode;
}
@@ -519,7 +525,7 @@ GenTree* Compiler::impSimdAsHWIntrinsicSpecial(NamedIntrinsic intrinsic,
case NI_Vector4_get_One:
case NI_VectorT128_get_One:
{
- switch (baseType)
+ switch (simdBaseType)
{
case TYP_BYTE:
case TYP_UBYTE:
@@ -542,7 +548,7 @@ GenTree* Compiler::impSimdAsHWIntrinsicSpecial(NamedIntrinsic intrinsic,
case TYP_FLOAT:
case TYP_DOUBLE:
{
- op1 = gtNewDconNode(1.0, baseType);
+ op1 = gtNewDconNode(1.0, simdBaseType);
break;
}
@@ -552,13 +558,13 @@ GenTree* Compiler::impSimdAsHWIntrinsicSpecial(NamedIntrinsic intrinsic,
}
}
- return gtNewSimdCreateBroadcastNode(retType, op1, baseType, simdSize,
+ return gtNewSimdCreateBroadcastNode(retType, op1, simdBaseJitType, simdSize,
/* isSimdAsHWIntrinsic */ true);
}
case NI_VectorT128_get_Count:
{
- GenTreeIntCon* countNode = gtNewIconNode(getSIMDVectorLength(simdSize, baseType), TYP_INT);
+ GenTreeIntCon* countNode = gtNewIconNode(getSIMDVectorLength(simdSize, simdBaseType), TYP_INT);
countNode->gtFlags |= GTF_ICON_SIMD_COUNT;
return countNode;
}
@@ -614,13 +620,13 @@ GenTree* Compiler::impSimdAsHWIntrinsicSpecial(NamedIntrinsic intrinsic,
case NI_VectorT128_Abs:
case NI_VectorT256_Abs:
{
- if (varTypeIsFloating(baseType))
+ if (varTypeIsFloating(simdBaseType))
{
// Abs(vf) = vf & new SIMDVector<float>(0x7fffffff);
// Abs(vd) = vf & new SIMDVector<double>(0x7fffffffffffffff);
GenTree* bitMask = nullptr;
- if (baseType == TYP_FLOAT)
+ if (simdBaseType == TYP_FLOAT)
{
static_assert_no_msg(sizeof(float) == sizeof(int));
int mask = 0x7fffffff;
@@ -628,7 +634,7 @@ GenTree* Compiler::impSimdAsHWIntrinsicSpecial(NamedIntrinsic intrinsic,
}
else
{
- assert(baseType == TYP_DOUBLE);
+ assert(simdBaseType == TYP_DOUBLE);
static_assert_no_msg(sizeof(double) == sizeof(__int64));
__int64 mask = 0x7fffffffffffffffLL;
@@ -636,21 +642,21 @@ GenTree* Compiler::impSimdAsHWIntrinsicSpecial(NamedIntrinsic intrinsic,
}
assert(bitMask != nullptr);
- bitMask = gtNewSimdCreateBroadcastNode(retType, bitMask, baseType, simdSize,
+ bitMask = gtNewSimdCreateBroadcastNode(retType, bitMask, simdBaseJitType, simdSize,
/* isSimdAsHWIntrinsic */ true);
intrinsic = isVectorT256 ? NI_VectorT256_op_BitwiseAnd : NI_VectorT128_op_BitwiseAnd;
- intrinsic = SimdAsHWIntrinsicInfo::lookupHWIntrinsic(intrinsic, baseType);
+ intrinsic = SimdAsHWIntrinsicInfo::lookupHWIntrinsic(intrinsic, simdBaseType);
- return gtNewSimdAsHWIntrinsicNode(retType, op1, bitMask, intrinsic, baseType, simdSize);
+ return gtNewSimdAsHWIntrinsicNode(retType, op1, bitMask, intrinsic, simdBaseJitType, simdSize);
}
- else if (varTypeIsUnsigned(baseType))
+ else if (varTypeIsUnsigned(simdBaseType))
{
return op1;
}
- else if ((baseType != TYP_LONG) && compOpportunisticallyDependsOn(InstructionSet_SSSE3))
+ else if ((simdBaseType != TYP_LONG) && compOpportunisticallyDependsOn(InstructionSet_SSSE3))
{
- return gtNewSimdAsHWIntrinsicNode(retType, op1, NI_SSSE3_Abs, baseType, simdSize);
+ return gtNewSimdAsHWIntrinsicNode(retType, op1, NI_SSSE3_Abs, simdBaseJitType, simdSize);
}
else
{
@@ -666,24 +672,26 @@ GenTree* Compiler::impSimdAsHWIntrinsicSpecial(NamedIntrinsic intrinsic,
nullptr DEBUGARG("Clone op1 for Vector<T>.Abs"));
// op1 = op1 < Zero
- tmp = gtNewSIMDVectorZero(retType, baseType, simdSize);
+ tmp = gtNewSIMDVectorZero(retType, simdBaseJitType, simdSize);
hwIntrinsic = isVectorT256 ? NI_VectorT256_LessThan : NI_VectorT128_LessThan;
- op1 = impSimdAsHWIntrinsicRelOp(hwIntrinsic, clsHnd, retType, baseType, simdSize, op1, tmp);
+ op1 = impSimdAsHWIntrinsicRelOp(hwIntrinsic, clsHnd, retType, simdBaseJitType, simdSize, op1,
+ tmp);
// tmp = Zero - op1Dup1
- tmp = gtNewSIMDVectorZero(retType, baseType, simdSize);
+ tmp = gtNewSIMDVectorZero(retType, simdBaseJitType, simdSize);
hwIntrinsic = isVectorT256 ? NI_AVX2_Subtract : NI_SSE2_Subtract;
- tmp = gtNewSimdAsHWIntrinsicNode(retType, tmp, op1Dup1, hwIntrinsic, baseType, simdSize);
+ tmp = gtNewSimdAsHWIntrinsicNode(retType, tmp, op1Dup1, hwIntrinsic, simdBaseJitType, simdSize);
// result = ConditionalSelect(op1, tmp, op1Dup2)
- return impSimdAsHWIntrinsicCndSel(clsHnd, retType, baseType, simdSize, op1, tmp, op1Dup2);
+ return impSimdAsHWIntrinsicCndSel(clsHnd, retType, simdBaseJitType, simdSize, op1, tmp,
+ op1Dup2);
}
break;
}
#elif defined(TARGET_ARM64)
case NI_VectorT128_Abs:
{
- assert(varTypeIsUnsigned(baseType));
+ assert(varTypeIsUnsigned(simdBaseType));
return op1;
}
#else
@@ -724,8 +732,8 @@ GenTree* Compiler::impSimdAsHWIntrinsicSpecial(NamedIntrinsic intrinsic,
assert(retType == TYP_VOID);
copyBlkDst = op1;
- copyBlkSrc =
- gtNewSimdCreateBroadcastNode(simdType, op2, baseType, simdSize, /* isSimdAsHWIntrinsic */ true);
+ copyBlkSrc = gtNewSimdCreateBroadcastNode(simdType, op2, simdBaseJitType, simdSize,
+ /* isSimdAsHWIntrinsic */ true);
break;
}
@@ -742,24 +750,27 @@ GenTree* Compiler::impSimdAsHWIntrinsicSpecial(NamedIntrinsic intrinsic,
assert((shiftCount > 0) && (shiftCount <= 16));
// retNode = Sse.Divide(op1, op2);
- GenTree* retNode = gtNewSimdAsHWIntrinsicNode(retType, op1, op2, NI_SSE_Divide, baseType, simdSize);
+ GenTree* retNode =
+ gtNewSimdAsHWIntrinsicNode(retType, op1, op2, NI_SSE_Divide, simdBaseJitType, simdSize);
// retNode = Sse.ShiftLeftLogical128BitLane(retNode.AsInt32(), shiftCount).AsSingle()
- retNode = gtNewSimdAsHWIntrinsicNode(retType, retNode, gtNewIconNode(shiftCount, TYP_INT),
- NI_SSE2_ShiftLeftLogical128BitLane, TYP_INT, simdSize);
+ retNode =
+ gtNewSimdAsHWIntrinsicNode(retType, retNode, gtNewIconNode(shiftCount, TYP_INT),
+ NI_SSE2_ShiftLeftLogical128BitLane, CORINFO_TYPE_INT, simdSize);
// retNode = Sse.ShiftRightLogical128BitLane(retNode.AsInt32(), shiftCount).AsSingle()
- retNode = gtNewSimdAsHWIntrinsicNode(retType, retNode, gtNewIconNode(shiftCount, TYP_INT),
- NI_SSE2_ShiftRightLogical128BitLane, TYP_INT, simdSize);
+ retNode =
+ gtNewSimdAsHWIntrinsicNode(retType, retNode, gtNewIconNode(shiftCount, TYP_INT),
+ NI_SSE2_ShiftRightLogical128BitLane, CORINFO_TYPE_INT, simdSize);
return retNode;
}
case NI_VectorT128_Dot:
{
- assert((baseType == TYP_INT) || (baseType == TYP_UINT));
+ assert((simdBaseType == TYP_INT) || (simdBaseType == TYP_UINT));
assert(compIsaSupportedDebugOnly(InstructionSet_SSE41));
- return gtNewSimdAsHWIntrinsicNode(retType, op1, op2, NI_Vector128_Dot, baseType, simdSize);
+ return gtNewSimdAsHWIntrinsicNode(retType, op1, op2, NI_Vector128_Dot, simdBaseJitType, simdSize);
}
case NI_VectorT128_Equals:
@@ -772,7 +783,7 @@ GenTree* Compiler::impSimdAsHWIntrinsicSpecial(NamedIntrinsic intrinsic,
case NI_VectorT256_LessThan:
case NI_VectorT256_LessThanOrEqual:
{
- return impSimdAsHWIntrinsicRelOp(intrinsic, clsHnd, retType, baseType, simdSize, op1, op2);
+ return impSimdAsHWIntrinsicRelOp(intrinsic, clsHnd, retType, simdBaseJitType, simdSize, op1, op2);
}
case NI_VectorT128_Max:
@@ -780,29 +791,32 @@ GenTree* Compiler::impSimdAsHWIntrinsicSpecial(NamedIntrinsic intrinsic,
case NI_VectorT256_Max:
case NI_VectorT256_Min:
{
- if ((baseType == TYP_BYTE) || (baseType == TYP_USHORT))
+ if ((simdBaseType == TYP_BYTE) || (simdBaseType == TYP_USHORT))
{
- GenTree* constVal = nullptr;
- var_types opType = baseType;
+ GenTree* constVal = nullptr;
+ CorInfoType opJitType = simdBaseJitType;
+ var_types opType = simdBaseType;
NamedIntrinsic opIntrinsic;
NamedIntrinsic hwIntrinsic;
- switch (baseType)
+ switch (simdBaseType)
{
case TYP_BYTE:
{
- constVal = gtNewIconNode(0x80808080, TYP_INT);
- opIntrinsic = NI_VectorT128_op_Subtraction;
- baseType = TYP_UBYTE;
+ constVal = gtNewIconNode(0x80808080, TYP_INT);
+ opIntrinsic = NI_VectorT128_op_Subtraction;
+ simdBaseJitType = CORINFO_TYPE_UBYTE;
+ simdBaseType = TYP_UBYTE;
break;
}
case TYP_USHORT:
{
- constVal = gtNewIconNode(0x80008000, TYP_INT);
- opIntrinsic = NI_VectorT128_op_Addition;
- baseType = TYP_SHORT;
+ constVal = gtNewIconNode(0x80008000, TYP_INT);
+ opIntrinsic = NI_VectorT128_op_Addition;
+ simdBaseJitType = CORINFO_TYPE_SHORT;
+ simdBaseType = TYP_SHORT;
break;
}
@@ -812,8 +826,9 @@ GenTree* Compiler::impSimdAsHWIntrinsicSpecial(NamedIntrinsic intrinsic,
}
}
- GenTree* constVector = gtNewSimdCreateBroadcastNode(retType, constVal, TYP_INT, simdSize,
- /* isSimdAsHWIntrinsic */ true);
+ GenTree* constVector =
+ gtNewSimdCreateBroadcastNode(retType, constVal, CORINFO_TYPE_INT, simdSize,
+ /* isSimdAsHWIntrinsic */ true);
GenTree* constVectorDup1;
constVector = impCloneExpr(constVector, &constVectorDup1, clsHnd, (unsigned)CHECK_SPILL_ALL,
@@ -829,18 +844,19 @@ GenTree* Compiler::impSimdAsHWIntrinsicSpecial(NamedIntrinsic intrinsic,
// op1 = op1 - constVector
// -or-
// op1 = op1 + constVector
- op1 = gtNewSimdAsHWIntrinsicNode(retType, op1, constVector, hwIntrinsic, opType, simdSize);
+ op1 = gtNewSimdAsHWIntrinsicNode(retType, op1, constVector, hwIntrinsic, opJitType, simdSize);
// op2 = op2 - constVectorDup1
// -or-
// op2 = op2 + constVectorDup1
- op2 = gtNewSimdAsHWIntrinsicNode(retType, op2, constVectorDup1, hwIntrinsic, opType, simdSize);
+ op2 =
+ gtNewSimdAsHWIntrinsicNode(retType, op2, constVectorDup1, hwIntrinsic, opJitType, simdSize);
// op1 = Max(op1, op2)
// -or-
// op1 = Min(op1, op2)
- hwIntrinsic = SimdAsHWIntrinsicInfo::lookupHWIntrinsic(intrinsic, baseType);
- op1 = gtNewSimdAsHWIntrinsicNode(retType, op1, op2, hwIntrinsic, baseType, simdSize);
+ hwIntrinsic = SimdAsHWIntrinsicInfo::lookupHWIntrinsic(intrinsic, simdBaseType);
+ op1 = gtNewSimdAsHWIntrinsicNode(retType, op1, op2, hwIntrinsic, simdBaseJitType, simdSize);
// result = op1 + constVectorDup2
// -or-
@@ -848,7 +864,8 @@ GenTree* Compiler::impSimdAsHWIntrinsicSpecial(NamedIntrinsic intrinsic,
opIntrinsic = (opIntrinsic == NI_VectorT128_op_Subtraction) ? NI_VectorT128_op_Addition
: NI_VectorT128_op_Subtraction;
hwIntrinsic = SimdAsHWIntrinsicInfo::lookupHWIntrinsic(opIntrinsic, opType);
- return gtNewSimdAsHWIntrinsicNode(retType, op1, constVectorDup2, hwIntrinsic, opType, simdSize);
+ return gtNewSimdAsHWIntrinsicNode(retType, op1, constVectorDup2, hwIntrinsic, opJitType,
+ simdSize);
}
GenTree* op1Dup;
@@ -871,10 +888,10 @@ GenTree* Compiler::impSimdAsHWIntrinsicSpecial(NamedIntrinsic intrinsic,
// op1 = op1 > op2
// -or-
// op1 = op1 < op2
- op1 = impSimdAsHWIntrinsicRelOp(intrinsic, clsHnd, retType, baseType, simdSize, op1, op2);
+ op1 = impSimdAsHWIntrinsicRelOp(intrinsic, clsHnd, retType, simdBaseJitType, simdSize, op1, op2);
// result = ConditionalSelect(op1, op1Dup, op2Dup)
- return impSimdAsHWIntrinsicCndSel(clsHnd, retType, baseType, simdSize, op1, op1Dup, op2Dup);
+ return impSimdAsHWIntrinsicCndSel(clsHnd, retType, simdBaseJitType, simdSize, op1, op1Dup, op2Dup);
}
case NI_VectorT128_op_Multiply:
@@ -893,11 +910,11 @@ GenTree* Compiler::impSimdAsHWIntrinsicSpecial(NamedIntrinsic intrinsic,
if (broadcastOp != nullptr)
{
- *broadcastOp = gtNewSimdCreateBroadcastNode(simdType, *broadcastOp, baseType, simdSize,
+ *broadcastOp = gtNewSimdCreateBroadcastNode(simdType, *broadcastOp, simdBaseJitType, simdSize,
/* isSimdAsHWIntrinsic */ true);
}
- switch (baseType)
+ switch (simdBaseType)
{
case TYP_SHORT:
case TYP_USHORT:
@@ -926,30 +943,30 @@ GenTree* Compiler::impSimdAsHWIntrinsicSpecial(NamedIntrinsic intrinsic,
nullptr DEBUGARG("Clone op2 for Vector<T>.Multiply"));
// op1 = Sse2.ShiftRightLogical128BitLane(op1, 4)
- op1 =
- gtNewSimdAsHWIntrinsicNode(retType, op1, gtNewIconNode(4, TYP_INT),
- NI_SSE2_ShiftRightLogical128BitLane, baseType, simdSize);
+ op1 = gtNewSimdAsHWIntrinsicNode(retType, op1, gtNewIconNode(4, TYP_INT),
+ NI_SSE2_ShiftRightLogical128BitLane, simdBaseJitType,
+ simdSize);
// op2 = Sse2.ShiftRightLogical128BitLane(op1, 4)
- op2 =
- gtNewSimdAsHWIntrinsicNode(retType, op2, gtNewIconNode(4, TYP_INT),
- NI_SSE2_ShiftRightLogical128BitLane, baseType, simdSize);
+ op2 = gtNewSimdAsHWIntrinsicNode(retType, op2, gtNewIconNode(4, TYP_INT),
+ NI_SSE2_ShiftRightLogical128BitLane, simdBaseJitType,
+ simdSize);
// op2 = Sse2.Multiply(op2.AsUInt64(), op1.AsUInt64()).AsInt32()
- op2 = gtNewSimdAsHWIntrinsicNode(retType, op2, op1, NI_SSE2_Multiply, TYP_ULONG,
- simdSize);
+ op2 = gtNewSimdAsHWIntrinsicNode(retType, op2, op1, NI_SSE2_Multiply,
+ CORINFO_TYPE_ULONG, simdSize);
// op2 = Sse2.Shuffle(op2, (0, 0, 2, 0))
op2 = gtNewSimdAsHWIntrinsicNode(retType, op2, gtNewIconNode(SHUFFLE_XXZX, TYP_INT),
- NI_SSE2_Shuffle, baseType, simdSize);
+ NI_SSE2_Shuffle, simdBaseJitType, simdSize);
// op1 = Sse2.Multiply(op1Dup.AsUInt64(), op2Dup.AsUInt64()).AsInt32()
- op1 = gtNewSimdAsHWIntrinsicNode(retType, op1Dup, op2Dup, NI_SSE2_Multiply, TYP_ULONG,
- simdSize);
+ op1 = gtNewSimdAsHWIntrinsicNode(retType, op1Dup, op2Dup, NI_SSE2_Multiply,
+ CORINFO_TYPE_ULONG, simdSize);
// op1 = Sse2.Shuffle(op1, (0, 0, 2, 0))
op1 = gtNewSimdAsHWIntrinsicNode(retType, op1, gtNewIconNode(SHUFFLE_XXZX, TYP_INT),
- NI_SSE2_Shuffle, baseType, simdSize);
+ NI_SSE2_Shuffle, simdBaseJitType, simdSize);
// result = Sse2.UnpackLow(op1, op2)
hwIntrinsic = NI_SSE2_UnpackLow;
@@ -976,7 +993,7 @@ GenTree* Compiler::impSimdAsHWIntrinsicSpecial(NamedIntrinsic intrinsic,
}
assert(hwIntrinsic != NI_Illegal);
- return gtNewSimdAsHWIntrinsicNode(retType, op1, op2, hwIntrinsic, baseType, simdSize);
+ return gtNewSimdAsHWIntrinsicNode(retType, op1, op2, hwIntrinsic, simdBaseJitType, simdSize);
}
case NI_VectorT256_op_Multiply:
@@ -995,11 +1012,11 @@ GenTree* Compiler::impSimdAsHWIntrinsicSpecial(NamedIntrinsic intrinsic,
if (broadcastOp != nullptr)
{
- *broadcastOp = gtNewSimdCreateBroadcastNode(simdType, *broadcastOp, baseType, simdSize,
+ *broadcastOp = gtNewSimdCreateBroadcastNode(simdType, *broadcastOp, simdBaseJitType, simdSize,
/* isSimdAsHWIntrinsic */ true);
}
- switch (baseType)
+ switch (simdBaseType)
{
case TYP_SHORT:
case TYP_USHORT:
@@ -1024,7 +1041,7 @@ GenTree* Compiler::impSimdAsHWIntrinsicSpecial(NamedIntrinsic intrinsic,
}
assert(hwIntrinsic != NI_Illegal);
- return gtNewSimdAsHWIntrinsicNode(retType, op1, op2, hwIntrinsic, baseType, simdSize);
+ return gtNewSimdAsHWIntrinsicNode(retType, op1, op2, hwIntrinsic, simdBaseJitType, simdSize);
}
#elif defined(TARGET_ARM64)
@@ -1036,15 +1053,15 @@ GenTree* Compiler::impSimdAsHWIntrinsicSpecial(NamedIntrinsic intrinsic,
assert(retType == TYP_VOID);
copyBlkDst = op1;
- copyBlkSrc =
- gtNewSimdCreateBroadcastNode(simdType, op2, baseType, simdSize, /* isSimdAsHWIntrinsic */ true);
+ copyBlkSrc = gtNewSimdCreateBroadcastNode(simdType, op2, simdBaseJitType, simdSize,
+ /* isSimdAsHWIntrinsic */ true);
break;
}
case NI_VectorT128_Max:
case NI_VectorT128_Min:
{
- assert((baseType == TYP_LONG) || (baseType == TYP_ULONG));
+ assert((simdBaseType == TYP_LONG) || (simdBaseType == TYP_ULONG));
NamedIntrinsic hwIntrinsic;
@@ -1061,11 +1078,11 @@ GenTree* Compiler::impSimdAsHWIntrinsicSpecial(NamedIntrinsic intrinsic,
// op1 = op1 > op2
// -or-
// op1 = op1 < op2
- hwIntrinsic = SimdAsHWIntrinsicInfo::lookupHWIntrinsic(intrinsic, baseType);
- op1 = gtNewSimdAsHWIntrinsicNode(retType, op1, op2, hwIntrinsic, baseType, simdSize);
+ hwIntrinsic = SimdAsHWIntrinsicInfo::lookupHWIntrinsic(intrinsic, simdBaseType);
+ op1 = gtNewSimdAsHWIntrinsicNode(retType, op1, op2, hwIntrinsic, simdBaseJitType, simdSize);
// result = ConditionalSelect(op1, op1Dup, op2Dup)
- return impSimdAsHWIntrinsicCndSel(clsHnd, retType, baseType, simdSize, op1, op1Dup, op2Dup);
+ return impSimdAsHWIntrinsicCndSel(clsHnd, retType, simdBaseJitType, simdSize, op1, op1Dup, op2Dup);
}
case NI_VectorT128_op_Multiply:
@@ -1086,14 +1103,14 @@ GenTree* Compiler::impSimdAsHWIntrinsicSpecial(NamedIntrinsic intrinsic,
scalarOp = &op2;
}
- switch (baseType)
+ switch (simdBaseType)
{
case TYP_BYTE:
case TYP_UBYTE:
{
if (scalarOp != nullptr)
{
- *scalarOp = gtNewSimdCreateBroadcastNode(simdType, *scalarOp, baseType, simdSize,
+ *scalarOp = gtNewSimdCreateBroadcastNode(simdType, *scalarOp, simdBaseJitType, simdSize,
/* isSimdAsHWIntrinsic */ true);
}
@@ -1110,8 +1127,9 @@ GenTree* Compiler::impSimdAsHWIntrinsicSpecial(NamedIntrinsic intrinsic,
if (scalarOp != nullptr)
{
hwIntrinsic = NI_AdvSimd_MultiplyByScalar;
- *scalarOp = gtNewSimdAsHWIntrinsicNode(TYP_SIMD8, *scalarOp,
- NI_Vector64_CreateScalarUnsafe, baseType, 8);
+ *scalarOp =
+ gtNewSimdAsHWIntrinsicNode(TYP_SIMD8, *scalarOp, NI_Vector64_CreateScalarUnsafe,
+ simdBaseJitType, 8);
}
else
{
@@ -1125,8 +1143,8 @@ GenTree* Compiler::impSimdAsHWIntrinsicSpecial(NamedIntrinsic intrinsic,
if (scalarOp != nullptr)
{
hwIntrinsic = NI_AdvSimd_Arm64_MultiplyByScalar;
- *scalarOp =
- gtNewSimdAsHWIntrinsicNode(TYP_SIMD8, *scalarOp, NI_Vector64_Create, baseType, 8);
+ *scalarOp = gtNewSimdAsHWIntrinsicNode(TYP_SIMD8, *scalarOp, NI_Vector64_Create,
+ simdBaseJitType, 8);
}
else
{
@@ -1142,7 +1160,7 @@ GenTree* Compiler::impSimdAsHWIntrinsicSpecial(NamedIntrinsic intrinsic,
}
assert(hwIntrinsic != NI_Illegal);
- return gtNewSimdAsHWIntrinsicNode(retType, op1, op2, hwIntrinsic, baseType, simdSize);
+ return gtNewSimdAsHWIntrinsicNode(retType, op1, op2, hwIntrinsic, simdBaseJitType, simdSize);
}
#else
#error Unsupported platform
@@ -1183,12 +1201,12 @@ GenTree* Compiler::impSimdAsHWIntrinsicSpecial(NamedIntrinsic intrinsic,
case NI_VectorT128_ConditionalSelect:
case NI_VectorT256_ConditionalSelect:
{
- return impSimdAsHWIntrinsicCndSel(clsHnd, retType, baseType, simdSize, op1, op2, op3);
+ return impSimdAsHWIntrinsicCndSel(clsHnd, retType, simdBaseJitType, simdSize, op1, op2, op3);
}
#elif defined(TARGET_ARM64)
case NI_VectorT128_ConditionalSelect:
{
- return impSimdAsHWIntrinsicCndSel(clsHnd, retType, baseType, simdSize, op1, op2, op3);
+ return impSimdAsHWIntrinsicCndSel(clsHnd, retType, simdBaseJitType, simdSize, op1, op2, op3);
}
#else
#error Unsupported platform
@@ -1233,28 +1251,30 @@ GenTree* Compiler::impSimdAsHWIntrinsicSpecial(NamedIntrinsic intrinsic,
// impSimdAsHWIntrinsicCndSel: Import a SIMD conditional select intrinsic
//
// Arguments:
-// clsHnd -- class handle containing the intrinsic function.
-// retType -- the return type of the intrinsic call
-// baseType -- the base type of SIMD type of the intrinsic
-// simdSize -- the size of the SIMD type of the intrinsic
-// op1 -- the first operand of the intrinsic
-// op2 -- the second operand of the intrinsic
-// op3 -- the third operand of the intrinsic
+// clsHnd -- class handle containing the intrinsic function.
+// retType -- the return type of the intrinsic call
+// simdBaseJitType -- the base JIT type of SIMD type of the intrinsic
+// simdSize -- the size of the SIMD type of the intrinsic
+// op1 -- the first operand of the intrinsic
+// op2 -- the second operand of the intrinsic
+// op3 -- the third operand of the intrinsic
//
// Return Value:
// The GT_HWINTRINSIC node representing the conditional select
//
GenTree* Compiler::impSimdAsHWIntrinsicCndSel(CORINFO_CLASS_HANDLE clsHnd,
var_types retType,
- var_types baseType,
+ CorInfoType simdBaseJitType,
unsigned simdSize,
GenTree* op1,
GenTree* op2,
GenTree* op3)
{
+ var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType);
+
assert(featureSIMD);
assert(retType != TYP_UNKNOWN);
- assert(varTypeIsArithmetic(baseType));
+ assert(varTypeIsArithmetic(simdBaseType));
assert(simdSize != 0);
assert(varTypeIsSIMD(getSIMDTypeForSize(simdSize)));
assert(op1 != nullptr);
@@ -1275,24 +1295,24 @@ GenTree* Compiler::impSimdAsHWIntrinsicCndSel(CORINFO_CLASS_HANDLE clsHnd,
nullptr DEBUGARG("Clone op1 for Vector<T>.ConditionalSelect"));
// op2 = op2 & op1
- hwIntrinsic = SimdAsHWIntrinsicInfo::lookupHWIntrinsic(NI_VectorT128_op_BitwiseAnd, baseType);
- op2 = gtNewSimdAsHWIntrinsicNode(retType, op2, op1, hwIntrinsic, baseType, simdSize);
+ hwIntrinsic = SimdAsHWIntrinsicInfo::lookupHWIntrinsic(NI_VectorT128_op_BitwiseAnd, simdBaseType);
+ op2 = gtNewSimdAsHWIntrinsicNode(retType, op2, op1, hwIntrinsic, simdBaseJitType, simdSize);
// op3 = op3 & ~op1Dup
- hwIntrinsic = SimdAsHWIntrinsicInfo::lookupHWIntrinsic(NI_VectorT128_AndNot, baseType);
+ hwIntrinsic = SimdAsHWIntrinsicInfo::lookupHWIntrinsic(NI_VectorT128_AndNot, simdBaseType);
if (SimdAsHWIntrinsicInfo::NeedsOperandsSwapped(NI_VectorT128_AndNot))
{
std::swap(op3, op1Dup);
}
- op3 = gtNewSimdAsHWIntrinsicNode(retType, op3, op1Dup, hwIntrinsic, baseType, simdSize);
+ op3 = gtNewSimdAsHWIntrinsicNode(retType, op3, op1Dup, hwIntrinsic, simdBaseJitType, simdSize);
// result = op2 | op3
- hwIntrinsic = SimdAsHWIntrinsicInfo::lookupHWIntrinsic(NI_VectorT128_op_BitwiseOr, baseType);
- return gtNewSimdAsHWIntrinsicNode(retType, op2, op3, hwIntrinsic, baseType, simdSize);
+ hwIntrinsic = SimdAsHWIntrinsicInfo::lookupHWIntrinsic(NI_VectorT128_op_BitwiseOr, simdBaseType);
+ return gtNewSimdAsHWIntrinsicNode(retType, op2, op3, hwIntrinsic, simdBaseJitType, simdSize);
#elif defined(TARGET_ARM64)
- return gtNewSimdAsHWIntrinsicNode(retType, op1, op2, op3, NI_AdvSimd_BitwiseSelect, baseType, simdSize);
+ return gtNewSimdAsHWIntrinsicNode(retType, op1, op2, op3, NI_AdvSimd_BitwiseSelect, simdBaseJitType, simdSize);
#else
#error Unsupported platform
#endif // !TARGET_XARCH && !TARGET_ARM64
@@ -1303,13 +1323,13 @@ GenTree* Compiler::impSimdAsHWIntrinsicCndSel(CORINFO_CLASS_HANDLE clsHnd,
// impSimdAsHWIntrinsicRelOp: Import a SIMD relational operator intrinsic
//
// Arguments:
-// intrinsic -- id of the intrinsic function.
-// clsHnd -- class handle containing the intrinsic function.
-// retType -- the return type of the intrinsic call
-// baseType -- the base type of SIMD type of the intrinsic
-// simdSize -- the size of the SIMD type of the intrinsic
-// op1 -- the first operand of the intrinsic
-// op2 -- the second operand of the intrinsic
+// intrinsic -- id of the intrinsic function.
+// clsHnd -- class handle containing the intrinsic function.
+// retType -- the return type of the intrinsic call
+// simdBaseJitType -- the base JIT type of SIMD type of the intrinsic
+// simdSize -- the size of the SIMD type of the intrinsic
+// op1 -- the first operand of the intrinsic
+// op2 -- the second operand of the intrinsic
//
// Return Value:
// The GT_HWINTRINSIC node representing the relational operator
@@ -1317,14 +1337,16 @@ GenTree* Compiler::impSimdAsHWIntrinsicCndSel(CORINFO_CLASS_HANDLE clsHnd,
GenTree* Compiler::impSimdAsHWIntrinsicRelOp(NamedIntrinsic intrinsic,
CORINFO_CLASS_HANDLE clsHnd,
var_types retType,
- var_types baseType,
+ CorInfoType simdBaseJitType,
unsigned simdSize,
GenTree* op1,
GenTree* op2)
{
+ var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType);
+
assert(featureSIMD);
assert(retType != TYP_UNKNOWN);
- assert(varTypeIsIntegral(baseType));
+ assert(varTypeIsIntegral(simdBaseType));
assert(simdSize != 0);
assert(varTypeIsSIMD(getSIMDTypeForSize(simdSize)));
assert(op1 != nullptr);
@@ -1349,9 +1371,9 @@ GenTree* Compiler::impSimdAsHWIntrinsicRelOp(NamedIntrinsic intrinsic,
NamedIntrinsic hwIntrinsic = NI_Illegal;
- if (isVectorT256 || ((baseType != TYP_LONG) && (baseType != TYP_ULONG)))
+ if (isVectorT256 || ((simdBaseType != TYP_LONG) && (simdBaseType != TYP_ULONG)))
{
- hwIntrinsic = SimdAsHWIntrinsicInfo::lookupHWIntrinsic(intrinsic, baseType);
+ hwIntrinsic = SimdAsHWIntrinsicInfo::lookupHWIntrinsic(intrinsic, simdBaseType);
assert(hwIntrinsic != intrinsic);
}
else if (compOpportunisticallyDependsOn(InstructionSet_SSE41))
@@ -1374,20 +1396,20 @@ GenTree* Compiler::impSimdAsHWIntrinsicRelOp(NamedIntrinsic intrinsic,
hwIntrinsic = SimdAsHWIntrinsicInfo::lookupHWIntrinsic(intrinsic, TYP_INT);
assert(hwIntrinsic != intrinsic);
- GenTree* tmp = gtNewSimdAsHWIntrinsicNode(retType, op1, op2, hwIntrinsic, TYP_INT, simdSize);
+ GenTree* tmp = gtNewSimdAsHWIntrinsicNode(retType, op1, op2, hwIntrinsic, CORINFO_TYPE_INT, simdSize);
tmp = impCloneExpr(tmp, &op1, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone tmp for Vector<T>.Equals"));
op2 = gtNewSimdAsHWIntrinsicNode(retType, tmp, gtNewIconNode(SHUFFLE_ZWXY, TYP_INT), NI_SSE2_Shuffle,
- TYP_INT, simdSize);
+ CORINFO_TYPE_INT, simdSize);
- hwIntrinsic = SimdAsHWIntrinsicInfo::lookupHWIntrinsic(NI_VectorT128_op_BitwiseAnd, baseType);
+ hwIntrinsic = SimdAsHWIntrinsicInfo::lookupHWIntrinsic(NI_VectorT128_op_BitwiseAnd, simdBaseType);
assert(hwIntrinsic != NI_VectorT128_op_BitwiseAnd);
}
assert(hwIntrinsic != NI_Illegal);
- return gtNewSimdAsHWIntrinsicNode(retType, op1, op2, hwIntrinsic, baseType, simdSize);
+ return gtNewSimdAsHWIntrinsicNode(retType, op1, op2, hwIntrinsic, simdBaseJitType, simdSize);
}
case NI_VectorT128_GreaterThanOrEqual:
@@ -1450,12 +1472,12 @@ GenTree* Compiler::impSimdAsHWIntrinsicRelOp(NamedIntrinsic intrinsic,
}
}
- op1 = impSimdAsHWIntrinsicRelOp(eqIntrinsic, clsHnd, retType, baseType, simdSize, op1, op2);
- op2 = impSimdAsHWIntrinsicRelOp(intrinsic, clsHnd, retType, baseType, simdSize, op1Dup, op2Dup);
+ op1 = impSimdAsHWIntrinsicRelOp(eqIntrinsic, clsHnd, retType, simdBaseJitType, simdSize, op1, op2);
+ op2 = impSimdAsHWIntrinsicRelOp(intrinsic, clsHnd, retType, simdBaseJitType, simdSize, op1Dup, op2Dup);
intrinsic = isVectorT256 ? NI_VectorT256_op_BitwiseOr : NI_VectorT128_op_BitwiseOr;
- NamedIntrinsic hwIntrinsic = SimdAsHWIntrinsicInfo::lookupHWIntrinsic(intrinsic, baseType);
- return gtNewSimdAsHWIntrinsicNode(retType, op1, op2, hwIntrinsic, baseType, simdSize);
+ NamedIntrinsic hwIntrinsic = SimdAsHWIntrinsicInfo::lookupHWIntrinsic(intrinsic, simdBaseType);
+ return gtNewSimdAsHWIntrinsicNode(retType, op1, op2, hwIntrinsic, simdBaseJitType, simdSize);
}
case NI_VectorT128_GreaterThan:
@@ -1465,7 +1487,7 @@ GenTree* Compiler::impSimdAsHWIntrinsicRelOp(NamedIntrinsic intrinsic,
{
NamedIntrinsic hwIntrinsic = NI_Illegal;
- if (varTypeIsUnsigned(baseType))
+ if (varTypeIsUnsigned(simdBaseType))
{
// Vector<byte>, Vector<ushort>, Vector<uint> and Vector<ulong>:
// Hardware supports > for signed comparison. Therefore, to use it for
@@ -1481,36 +1503,43 @@ GenTree* Compiler::impSimdAsHWIntrinsicRelOp(NamedIntrinsic intrinsic,
// We need to treat op1 and op2 as signed for comparison purpose after
// the transformation.
- GenTree* constVal = nullptr;
- var_types opType = baseType;
+ GenTree* constVal = nullptr;
+ CorInfoType opJitType = simdBaseJitType;
+ var_types opType = simdBaseType;
+ CorInfoType constValJitType = CORINFO_TYPE_INT;
- switch (baseType)
+ switch (simdBaseType)
{
case TYP_UBYTE:
{
- constVal = gtNewIconNode(0x80808080, TYP_INT);
- baseType = TYP_BYTE;
+ constVal = gtNewIconNode(0x80808080, TYP_INT);
+ simdBaseJitType = CORINFO_TYPE_BYTE;
+ simdBaseType = TYP_BYTE;
break;
}
case TYP_USHORT:
{
- constVal = gtNewIconNode(0x80008000, TYP_INT);
- baseType = TYP_SHORT;
+ constVal = gtNewIconNode(0x80008000, TYP_INT);
+ simdBaseJitType = CORINFO_TYPE_SHORT;
+ simdBaseType = TYP_SHORT;
break;
}
case TYP_UINT:
{
- constVal = gtNewIconNode(0x80000000, TYP_INT);
- baseType = TYP_INT;
+ constVal = gtNewIconNode(0x80000000, TYP_INT);
+ simdBaseJitType = CORINFO_TYPE_INT;
+ simdBaseType = TYP_INT;
break;
}
case TYP_ULONG:
{
- constVal = gtNewLconNode(0x8000000000000000);
- baseType = TYP_LONG;
+ constVal = gtNewLconNode(0x8000000000000000);
+ constValJitType = CORINFO_TYPE_LONG;
+ simdBaseJitType = CORINFO_TYPE_LONG;
+ simdBaseType = TYP_LONG;
break;
}
@@ -1520,7 +1549,7 @@ GenTree* Compiler::impSimdAsHWIntrinsicRelOp(NamedIntrinsic intrinsic,
}
}
- GenTree* constVector = gtNewSimdCreateBroadcastNode(retType, constVal, constVal->TypeGet(), simdSize,
+ GenTree* constVector = gtNewSimdCreateBroadcastNode(retType, constVal, constValJitType, simdSize,
/* isSimdAsHWIntrinsic */ true);
GenTree* constVectorDup;
@@ -1530,18 +1559,18 @@ GenTree* Compiler::impSimdAsHWIntrinsicRelOp(NamedIntrinsic intrinsic,
NamedIntrinsic hwIntrinsic = isVectorT256 ? NI_AVX2_Subtract : NI_SSE2_Subtract;
// op1 = op1 - constVector
- op1 = gtNewSimdAsHWIntrinsicNode(retType, op1, constVector, hwIntrinsic, opType, simdSize);
+ op1 = gtNewSimdAsHWIntrinsicNode(retType, op1, constVector, hwIntrinsic, opJitType, simdSize);
// op2 = op2 - constVector
- op2 = gtNewSimdAsHWIntrinsicNode(retType, op2, constVectorDup, hwIntrinsic, opType, simdSize);
+ op2 = gtNewSimdAsHWIntrinsicNode(retType, op2, constVectorDup, hwIntrinsic, opJitType, simdSize);
}
// This should have been mutated by the above path
- assert(varTypeIsIntegral(baseType) && !varTypeIsUnsigned(baseType));
+ assert(varTypeIsIntegral(simdBaseType) && !varTypeIsUnsigned(simdBaseType));
- if (isVectorT256 || (baseType != TYP_LONG))
+ if (isVectorT256 || (simdBaseType != TYP_LONG))
{
- hwIntrinsic = SimdAsHWIntrinsicInfo::lookupHWIntrinsic(intrinsic, baseType);
+ hwIntrinsic = SimdAsHWIntrinsicInfo::lookupHWIntrinsic(intrinsic, simdBaseType);
assert(hwIntrinsic != intrinsic);
}
else if (compOpportunisticallyDependsOn(InstructionSet_SSE42))
@@ -1595,28 +1624,29 @@ GenTree* Compiler::impSimdAsHWIntrinsicRelOp(NamedIntrinsic intrinsic,
op2Dup1 = impCloneExpr(op2Dup1, &op2Dup2, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone op2 Vector<T>.GreaterThan/LessThan"));
- GenTree* t = impSimdAsHWIntrinsicRelOp(intrinsic, clsHnd, retType, TYP_INT, simdSize, op1, op2);
- GenTree* u = impSimdAsHWIntrinsicRelOp(NI_VectorT128_Equals, clsHnd, retType, TYP_INT, simdSize,
- op1Dup1, op2Dup1);
- GenTree* v =
- impSimdAsHWIntrinsicRelOp(intrinsic, clsHnd, retType, TYP_UINT, simdSize, op1Dup2, op2Dup2);
+ GenTree* t =
+ impSimdAsHWIntrinsicRelOp(intrinsic, clsHnd, retType, CORINFO_TYPE_INT, simdSize, op1, op2);
+ GenTree* u = impSimdAsHWIntrinsicRelOp(NI_VectorT128_Equals, clsHnd, retType, CORINFO_TYPE_INT,
+ simdSize, op1Dup1, op2Dup1);
+ GenTree* v = impSimdAsHWIntrinsicRelOp(intrinsic, clsHnd, retType, CORINFO_TYPE_UINT, simdSize, op1Dup2,
+ op2Dup2);
op1 = gtNewSimdAsHWIntrinsicNode(retType, t, gtNewIconNode(SHUFFLE_WWYY, TYP_INT), NI_SSE2_Shuffle,
- TYP_INT, simdSize);
+ CORINFO_TYPE_INT, simdSize);
v = gtNewSimdAsHWIntrinsicNode(retType, v, gtNewIconNode(SHUFFLE_ZZXX, TYP_INT), NI_SSE2_Shuffle,
- TYP_INT, simdSize);
+ CORINFO_TYPE_INT, simdSize);
u = gtNewSimdAsHWIntrinsicNode(retType, u, gtNewIconNode(SHUFFLE_WWYY, TYP_INT), NI_SSE2_Shuffle,
- TYP_INT, simdSize);
+ CORINFO_TYPE_INT, simdSize);
- hwIntrinsic = SimdAsHWIntrinsicInfo::lookupHWIntrinsic(NI_VectorT128_op_BitwiseAnd, baseType);
- op2 = gtNewSimdAsHWIntrinsicNode(retType, v, u, hwIntrinsic, baseType, simdSize);
+ hwIntrinsic = SimdAsHWIntrinsicInfo::lookupHWIntrinsic(NI_VectorT128_op_BitwiseAnd, simdBaseType);
+ op2 = gtNewSimdAsHWIntrinsicNode(retType, v, u, hwIntrinsic, simdBaseJitType, simdSize);
- hwIntrinsic = SimdAsHWIntrinsicInfo::lookupHWIntrinsic(NI_VectorT128_op_BitwiseOr, baseType);
+ hwIntrinsic = SimdAsHWIntrinsicInfo::lookupHWIntrinsic(NI_VectorT128_op_BitwiseOr, simdBaseType);
}
assert(hwIntrinsic != NI_Illegal);
- return gtNewSimdAsHWIntrinsicNode(retType, op1, op2, hwIntrinsic, baseType, simdSize);
+ return gtNewSimdAsHWIntrinsicNode(retType, op1, op2, hwIntrinsic, simdBaseJitType, simdSize);
}
default:
diff --git a/src/coreclr/jit/simdcodegenxarch.cpp b/src/coreclr/jit/simdcodegenxarch.cpp
index f85e8b6cd4f..13c36742e74 100644
--- a/src/coreclr/jit/simdcodegenxarch.cpp
+++ b/src/coreclr/jit/simdcodegenxarch.cpp
@@ -489,12 +489,12 @@ void CodeGen::genSIMDIntrinsicInit(GenTreeSIMD* simdNode)
assert(simdNode->gtSIMDIntrinsicID == SIMDIntrinsicInit);
GenTree* op1 = simdNode->gtGetOp1();
- var_types baseType = simdNode->gtSIMDBaseType;
+ var_types baseType = simdNode->GetSimdBaseType();
regNumber targetReg = simdNode->GetRegNum();
assert(targetReg != REG_NA);
var_types targetType = simdNode->TypeGet();
SIMDLevel level = compiler->getSIMDSupportLevel();
- unsigned size = simdNode->gtSIMDSize;
+ unsigned size = simdNode->GetSimdSize();
// Should never see small int base type vectors except for zero initialization.
noway_assert(!varTypeIsSmallInt(baseType) || op1->IsIntegralConst(0));
@@ -674,7 +674,7 @@ void CodeGen::genSIMDIntrinsicInitN(GenTreeSIMD* simdNode)
assert(simdNode->gtSIMDIntrinsicID == SIMDIntrinsicInitN);
// Right now this intrinsic is supported only on TYP_FLOAT vectors
- var_types baseType = simdNode->gtSIMDBaseType;
+ var_types baseType = simdNode->GetSimdBaseType();
noway_assert(baseType == TYP_FLOAT);
regNumber targetReg = simdNode->GetRegNum();
@@ -733,7 +733,7 @@ void CodeGen::genSIMDIntrinsicInitN(GenTreeSIMD* simdNode)
offset += baseTypeSize;
}
- noway_assert(offset == simdNode->gtSIMDSize);
+ noway_assert(offset == simdNode->GetSimdSize());
// Load the initialized value.
if (targetReg != vectorReg)
@@ -757,7 +757,7 @@ void CodeGen::genSIMDIntrinsicUnOp(GenTreeSIMD* simdNode)
assert(simdNode->gtSIMDIntrinsicID == SIMDIntrinsicCast);
GenTree* op1 = simdNode->gtGetOp1();
- var_types baseType = simdNode->gtSIMDBaseType;
+ var_types baseType = simdNode->GetSimdBaseType();
regNumber targetReg = simdNode->GetRegNum();
assert(targetReg != REG_NA);
var_types targetType = simdNode->TypeGet();
@@ -786,7 +786,7 @@ void CodeGen::genSIMDIntrinsic32BitConvert(GenTreeSIMD* simdNode)
assert((intrinsicID == SIMDIntrinsicConvertToSingle) || (intrinsicID == SIMDIntrinsicConvertToInt32));
GenTree* op1 = simdNode->gtGetOp1();
- var_types baseType = simdNode->gtSIMDBaseType;
+ var_types baseType = simdNode->GetSimdBaseType();
regNumber targetReg = simdNode->GetRegNum();
assert(targetReg != REG_NA);
var_types targetType = simdNode->TypeGet();
@@ -920,7 +920,7 @@ void CodeGen::genSIMDIntrinsic64BitConvert(GenTreeSIMD* simdNode)
assert((intrinsicID == SIMDIntrinsicConvertToDouble) || (intrinsicID == SIMDIntrinsicConvertToInt64));
GenTree* op1 = simdNode->gtGetOp1();
- var_types baseType = simdNode->gtSIMDBaseType;
+ var_types baseType = simdNode->GetSimdBaseType();
regNumber targetReg = simdNode->GetRegNum();
assert(targetReg != REG_NA);
var_types simdType = simdNode->TypeGet();
@@ -1212,7 +1212,7 @@ void CodeGen::genSIMDExtractUpperHalf(GenTreeSIMD* simdNode, regNumber srcReg, r
emitAttr emitSize = emitActualTypeSize(simdType);
if (compiler->getSIMDSupportLevel() == SIMD_AVX2_Supported)
{
- instruction extractIns = varTypeIsFloating(simdNode->gtSIMDBaseType) ? INS_vextractf128 : INS_vextracti128;
+ instruction extractIns = varTypeIsFloating(simdNode->GetSimdBaseType()) ? INS_vextractf128 : INS_vextracti128;
GetEmitter()->emitIns_R_R_I(extractIns, EA_32BYTE, tgtReg, srcReg, 0x01);
}
else
@@ -1241,7 +1241,7 @@ void CodeGen::genSIMDIntrinsicWiden(GenTreeSIMD* simdNode)
(simdNode->gtSIMDIntrinsicID == SIMDIntrinsicWidenHi));
GenTree* op1 = simdNode->gtGetOp1();
- var_types baseType = simdNode->gtSIMDBaseType;
+ var_types baseType = simdNode->GetSimdBaseType();
regNumber targetReg = simdNode->GetRegNum();
assert(targetReg != REG_NA);
var_types simdType = simdNode->TypeGet();
@@ -1334,7 +1334,7 @@ void CodeGen::genSIMDIntrinsicNarrow(GenTreeSIMD* simdNode)
GenTree* op1 = simdNode->gtGetOp1();
GenTree* op2 = simdNode->gtGetOp2();
- var_types baseType = simdNode->gtSIMDBaseType;
+ var_types baseType = simdNode->GetSimdBaseType();
regNumber targetReg = simdNode->GetRegNum();
assert(targetReg != REG_NA);
var_types simdType = simdNode->TypeGet();
@@ -1482,7 +1482,7 @@ void CodeGen::genSIMDIntrinsicBinOp(GenTreeSIMD* simdNode)
GenTree* op1 = simdNode->gtGetOp1();
GenTree* op2 = simdNode->gtGetOp2();
- var_types baseType = simdNode->gtSIMDBaseType;
+ var_types baseType = simdNode->GetSimdBaseType();
regNumber targetReg = simdNode->GetRegNum();
assert(targetReg != REG_NA);
var_types targetType = simdNode->TypeGet();
@@ -1532,7 +1532,7 @@ void CodeGen::genSIMDIntrinsicRelOp(GenTreeSIMD* simdNode)
{
GenTree* op1 = simdNode->gtGetOp1();
GenTree* op2 = simdNode->gtGetOp2();
- var_types baseType = simdNode->gtSIMDBaseType;
+ var_types baseType = simdNode->GetSimdBaseType();
regNumber targetReg = simdNode->GetRegNum();
var_types targetType = simdNode->TypeGet();
SIMDLevel level = compiler->getSIMDSupportLevel();
@@ -1621,7 +1621,7 @@ void CodeGen::genSIMDIntrinsicGetItem(GenTreeSIMD* simdNode)
simdType = TYP_SIMD16;
}
- var_types baseType = simdNode->gtSIMDBaseType;
+ var_types baseType = simdNode->GetSimdBaseType();
regNumber targetReg = simdNode->GetRegNum();
assert(targetReg != REG_NA);
var_types targetType = simdNode->TypeGet();
@@ -1931,7 +1931,7 @@ void CodeGen::genSIMDIntrinsicSetItem(GenTreeSIMD* simdNode)
GenTree* op1 = simdNode->gtGetOp1();
GenTree* op2 = simdNode->gtGetOp2();
- var_types baseType = simdNode->gtSIMDBaseType;
+ var_types baseType = simdNode->GetSimdBaseType();
regNumber targetReg = simdNode->GetRegNum();
assert(targetReg != REG_NA);
var_types targetType = simdNode->TypeGet();
@@ -1941,7 +1941,7 @@ void CodeGen::genSIMDIntrinsicSetItem(GenTreeSIMD* simdNode)
// supported only on vector2f/3f/4f right now
noway_assert(baseType == TYP_FLOAT);
assert(op2->TypeGet() == baseType);
- assert(simdNode->gtSIMDSize >= ((index + 1) * genTypeSize(baseType)));
+ assert(simdNode->GetSimdSize() >= ((index + 1) * genTypeSize(baseType)));
genConsumeOperands(simdNode);
regNumber op1Reg = op1->GetRegNum();
@@ -2006,7 +2006,7 @@ void CodeGen::genSIMDIntrinsicShuffleSSE2(GenTreeSIMD* simdNode)
assert(op2->isContained());
assert(op2->IsCnsIntOrI());
ssize_t shuffleControl = op2->AsIntConCommon()->IconValue();
- var_types baseType = simdNode->gtSIMDBaseType;
+ var_types baseType = simdNode->GetSimdBaseType();
var_types targetType = simdNode->TypeGet();
regNumber targetReg = simdNode->GetRegNum();
assert(targetReg != REG_NA);
@@ -2131,7 +2131,7 @@ void CodeGen::genStoreLclTypeSIMD12(GenTree* treeNode)
{
// This is only possible for a zero-init.
assert(op1->IsIntegralConst(0) || op1->IsSIMDZero());
- genSIMDZero(TYP_SIMD16, op1->AsSIMD()->gtSIMDBaseType, tmpReg);
+ genSIMDZero(TYP_SIMD16, op1->AsSIMD()->GetSimdBaseType(), tmpReg);
// store lower 8 bytes
GetEmitter()->emitIns_S_R(ins_Store(TYP_DOUBLE), EA_8BYTE, tmpReg, varNum, offs);
@@ -2353,11 +2353,11 @@ void CodeGen::genSIMDIntrinsicUpperRestore(GenTreeSIMD* simdNode)
void CodeGen::genSIMDIntrinsic(GenTreeSIMD* simdNode)
{
// NYI for unsupported base types
- if (simdNode->gtSIMDBaseType != TYP_INT && simdNode->gtSIMDBaseType != TYP_LONG &&
- simdNode->gtSIMDBaseType != TYP_FLOAT && simdNode->gtSIMDBaseType != TYP_DOUBLE &&
- simdNode->gtSIMDBaseType != TYP_USHORT && simdNode->gtSIMDBaseType != TYP_UBYTE &&
- simdNode->gtSIMDBaseType != TYP_SHORT && simdNode->gtSIMDBaseType != TYP_BYTE &&
- simdNode->gtSIMDBaseType != TYP_UINT && simdNode->gtSIMDBaseType != TYP_ULONG)
+ if (simdNode->GetSimdBaseType() != TYP_INT && simdNode->GetSimdBaseType() != TYP_LONG &&
+ simdNode->GetSimdBaseType() != TYP_FLOAT && simdNode->GetSimdBaseType() != TYP_DOUBLE &&
+ simdNode->GetSimdBaseType() != TYP_USHORT && simdNode->GetSimdBaseType() != TYP_UBYTE &&
+ simdNode->GetSimdBaseType() != TYP_SHORT && simdNode->GetSimdBaseType() != TYP_BYTE &&
+ simdNode->GetSimdBaseType() != TYP_UINT && simdNode->GetSimdBaseType() != TYP_ULONG)
{
noway_assert(!"SIMD intrinsic with unsupported base type.");
}
diff --git a/src/coreclr/jit/valuenum.cpp b/src/coreclr/jit/valuenum.cpp
index e57e5f68a7e..27b4b342ba7 100644
--- a/src/coreclr/jit/valuenum.cpp
+++ b/src/coreclr/jit/valuenum.cpp
@@ -8858,8 +8858,8 @@ void Compiler::fgValueNumberSimd(GenTree* tree)
if (encodeResultType)
{
- ValueNum vnSize = vnStore->VNForIntCon(simdNode->gtSIMDSize);
- ValueNum vnBaseType = vnStore->VNForIntCon(INT32(simdNode->gtSIMDBaseType));
+ ValueNum vnSize = vnStore->VNForIntCon(simdNode->GetSimdSize());
+ ValueNum vnBaseType = vnStore->VNForIntCon(INT32(simdNode->GetSimdBaseType()));
ValueNum simdTypeVN = vnStore->VNForFunc(TYP_REF, VNF_SimdType, vnSize, vnBaseType);
resvnp.SetBoth(simdTypeVN);
@@ -8975,8 +8975,8 @@ void Compiler::fgValueNumberHWIntrinsic(GenTree* tree)
if (encodeResultType)
{
- ValueNum vnSize = vnStore->VNForIntCon(hwIntrinsicNode->gtSIMDSize);
- ValueNum vnBaseType = vnStore->VNForIntCon(INT32(hwIntrinsicNode->gtSIMDBaseType));
+ ValueNum vnSize = vnStore->VNForIntCon(hwIntrinsicNode->GetSimdSize());
+ ValueNum vnBaseType = vnStore->VNForIntCon(INT32(hwIntrinsicNode->GetSimdBaseType()));
ValueNum simdTypeVN = vnStore->VNForFunc(TYP_REF, VNF_SimdType, vnSize, vnBaseType);
resvnp.SetBoth(simdTypeVN);