Welcome to mirror list, hosted at ThFree Co, Russian Federation.

github.com/mono/corert.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authordotnet-bot <dotnet-bot@microsoft.com>2017-09-26 03:31:09 +0300
committerdotnet-bot <dotnet-bot@microsoft.com>2017-09-26 06:14:34 +0300
commit94f2d8ea3943f7e90cda86ae7b7cb47d3da5a53a (patch)
tree941c381b0df93a9a335bf7d40c31c3ab94bd95f0 /src/Native
parent351d113aaa275354213aeb31bdc0709b56774508 (diff)
[tfs-changeset: 1676255]
Diffstat (limited to 'src/Native')
-rw-r--r--src/Native/Runtime/AsmOffsets.h6
-rw-r--r--src/Native/Runtime/CachedInterfaceDispatch.cpp2
-rw-r--r--src/Native/Runtime/EHHelpers.cpp10
-rw-r--r--src/Native/Runtime/MiscHelpers.cpp3
-rw-r--r--src/Native/Runtime/PalRedhawk.h70
-rw-r--r--src/Native/Runtime/PalRedhawkCommon.h28
-rw-r--r--src/Native/Runtime/RHCodeMan.cpp79
-rw-r--r--src/Native/Runtime/StackFrameIterator.cpp22
-rw-r--r--src/Native/Runtime/StackFrameIterator.h11
-rw-r--r--src/Native/Runtime/ThunksMapping.cpp9
-rw-r--r--src/Native/Runtime/arm64/AllocFast.asm240
-rw-r--r--src/Native/Runtime/arm64/AsmMacros.h132
-rw-r--r--src/Native/Runtime/arm64/AsmOffsetsCpu.h10
-rw-r--r--src/Native/Runtime/arm64/CallDescrWorker.asm22
-rw-r--r--src/Native/Runtime/arm64/CallingConventionConverterHelpers.asm57
-rw-r--r--src/Native/Runtime/arm64/Dummies.asm19
-rw-r--r--src/Native/Runtime/arm64/ExceptionHandling.asm115
-rw-r--r--src/Native/Runtime/arm64/FloatingPoint.asm13
-rw-r--r--src/Native/Runtime/arm64/GcProbe.asm194
-rw-r--r--src/Native/Runtime/arm64/GetThread.asm27
-rw-r--r--src/Native/Runtime/arm64/InteropThunksHelpers.asm46
-rw-r--r--src/Native/Runtime/arm64/MiscStubs.asm126
-rw-r--r--src/Native/Runtime/arm64/PInvoke.asm84
-rw-r--r--src/Native/Runtime/arm64/StubDispatch.asm80
-rw-r--r--src/Native/Runtime/arm64/ThunkPoolThunks.asm253
-rw-r--r--src/Native/Runtime/arm64/UniversalTransition.asm28
-rw-r--r--src/Native/Runtime/arm64/WriteBarriers.asm81
-rw-r--r--src/Native/Runtime/coreclr/gcinfodecoder.cpp2
-rw-r--r--src/Native/Runtime/gcdump.cpp23
-rw-r--r--src/Native/Runtime/inc/daccess.h2
-rw-r--r--src/Native/Runtime/inc/gcinfo.h203
-rw-r--r--src/Native/Runtime/portable.cpp18
-rw-r--r--src/Native/Runtime/windows/AsmOffsets.cpp2
-rw-r--r--src/Native/Runtime/windows/PalRedhawkInline.h2
-rw-r--r--src/Native/Runtime/windows/PalRedhawkMinWin.cpp24
-rw-r--r--src/Native/libunwind/src/Registers.hpp8
36 files changed, 1975 insertions, 76 deletions
diff --git a/src/Native/Runtime/AsmOffsets.h b/src/Native/Runtime/AsmOffsets.h
index 314598281..299454a28 100644
--- a/src/Native/Runtime/AsmOffsets.h
+++ b/src/Native/Runtime/AsmOffsets.h
@@ -113,6 +113,10 @@ private:
void BogusFunction()
{
// Sample usage to generate the error
- FindCompileTimeConstant<offsetof(ExInfo, m_passNumber)> bogus_variable;
+ FindCompileTimeConstant<sizeof(ExInfo)> bogus_variable;
+ FindCompileTimeConstant<offsetof(ExInfo, m_notifyDebuggerSP)> bogus_variable2;
+ FindCompileTimeConstant<sizeof(StackFrameIterator)> bogus_variable3;
+ FindCompileTimeConstant<sizeof(PAL_LIMITED_CONTEXT)> bogus_variable4;
+ FindCompileTimeConstant<offsetof(PAL_LIMITED_CONTEXT, IP)> bogus_variable5;
}
#endif // defined(__cplusplus) && defined(USE_COMPILE_TIME_CONSTANT_FINDER)
diff --git a/src/Native/Runtime/CachedInterfaceDispatch.cpp b/src/Native/Runtime/CachedInterfaceDispatch.cpp
index e1ce01f8e..3c12cc7d9 100644
--- a/src/Native/Runtime/CachedInterfaceDispatch.cpp
+++ b/src/Native/Runtime/CachedInterfaceDispatch.cpp
@@ -108,7 +108,7 @@ static void * UpdatePointerPairAtomically(void * pPairLocation,
// The update failed due to a racing update to the same location. Return the new value of the second
// pointer (either a new cache that lost the race or a non-NULL pointer in the cache entry update case).
return pSecondPointer;
-#elif defined(_AMD64_)
+#elif defined(_AMD64_) || defined(_ARM64_)
// The same comments apply to the AMD64 version. The CompareExchange looks a little different since the
// API was refactored in terms of Int64 to avoid creating a 128-bit integer type.
diff --git a/src/Native/Runtime/EHHelpers.cpp b/src/Native/Runtime/EHHelpers.cpp
index 9401e915d..85d2b6649 100644
--- a/src/Native/Runtime/EHHelpers.cpp
+++ b/src/Native/Runtime/EHHelpers.cpp
@@ -190,7 +190,13 @@ COOP_PINVOKE_HELPER(void, RhpCopyContextFromExInfo,
pContext->Lr = pPalContext->LR;
pContext->Pc = pPalContext->IP;
#elif defined(_ARM64_)
- PORTABILITY_ASSERT("@TODO: FIXME:ARM64");
+ for (int i = 0; i < GEN_REG_COUNT; ++i) {
+ pContext->X[i] = pPalContext->X[i];
+ }
+ pContext->Fp = pPalContext->FP;
+ pContext->Sp = pPalContext->SP;
+ pContext->Lr = pPalContext->LR;
+ pContext->Pc = pPalContext->IP;
#else
#error Not Implemented for this architecture -- RhpCopyContextFromExInfo
#endif
@@ -284,7 +290,7 @@ EXTERN_C Int32 RhpPInvokeExceptionGuard()
}
#endif
-#if defined(_AMD64_) || defined(_ARM_) || defined(_X86_)
+#if defined(_AMD64_) || defined(_ARM_) || defined(_X86_) || defined(_ARM64_)
EXTERN_C REDHAWK_API void __fastcall RhpThrowHwEx();
#else
COOP_PINVOKE_HELPER(void, RhpThrowHwEx, ())
diff --git a/src/Native/Runtime/MiscHelpers.cpp b/src/Native/Runtime/MiscHelpers.cpp
index 507ffa608..a719758b0 100644
--- a/src/Native/Runtime/MiscHelpers.cpp
+++ b/src/Native/Runtime/MiscHelpers.cpp
@@ -586,7 +586,8 @@ COOP_PINVOKE_HELPER(UInt8 *, RhGetCodeTarget, (UInt8 * pCodeOrg))
}
#elif _TARGET_ARM64_
- PORTABILITY_ASSERT("@TODO: FIXME:ARM64");
+ UNREFERENCED_PARAMETER(unboxingStub);
+ PORTABILITY_ASSERT("@TODO: FIXME:ARM64");
#else
#error 'Unsupported Architecture'
diff --git a/src/Native/Runtime/PalRedhawk.h b/src/Native/Runtime/PalRedhawk.h
index e20c761b4..86d0ee917 100644
--- a/src/Native/Runtime/PalRedhawk.h
+++ b/src/Native/Runtime/PalRedhawk.h
@@ -383,6 +383,10 @@ typedef struct _NEON128 {
Int64 High;
} NEON128, *PNEON128;
+#if !defined(GEN_REG_COUNT)
+#define GEN_REG_COUNT 29
+#endif
+
typedef struct DECLSPEC_ALIGN(16) _CONTEXT {
//
// Control flags.
@@ -393,35 +397,43 @@ typedef struct DECLSPEC_ALIGN(16) _CONTEXT {
// Integer registers
//
UInt32 Cpsr; // NZVF + DAIF + CurrentEL + SPSel
- UInt64 X0;
- UInt64 X1;
- UInt64 X2;
- UInt64 X3;
- UInt64 X4;
- UInt64 X5;
- UInt64 X6;
- UInt64 X7;
- UInt64 X8;
- UInt64 X9;
- UInt64 X10;
- UInt64 X11;
- UInt64 X12;
- UInt64 X13;
- UInt64 X14;
- UInt64 X15;
- UInt64 X16;
- UInt64 X17;
- UInt64 X18;
- UInt64 X19;
- UInt64 X20;
- UInt64 X21;
- UInt64 X22;
- UInt64 X23;
- UInt64 X24;
- UInt64 X25;
- UInt64 X26;
- UInt64 X27;
- UInt64 X28;
+ union {
+ struct {
+ UInt64 X0;
+ UInt64 X1;
+ UInt64 X2;
+ UInt64 X3;
+ UInt64 X4;
+ UInt64 X5;
+ UInt64 X6;
+ UInt64 X7;
+ UInt64 X8;
+ UInt64 X9;
+ UInt64 X10;
+ UInt64 X11;
+ UInt64 X12;
+ UInt64 X13;
+ UInt64 X14;
+ UInt64 X15;
+ UInt64 X16;
+ UInt64 X17;
+ UInt64 X18;
+ UInt64 X19;
+ UInt64 X20;
+ UInt64 X21;
+ UInt64 X22;
+ UInt64 X23;
+ UInt64 X24;
+ UInt64 X25;
+ UInt64 X26;
+ UInt64 X27;
+ UInt64 X28;
+#pragma warning(push)
+#pragma warning(disable:4201) // nameless struct
+ };
+ UInt64 X[GEN_REG_COUNT];
+ };
+#pragma warning(pop)
UInt64 Fp; // X29
UInt64 Lr; // X30
UInt64 Sp;
diff --git a/src/Native/Runtime/PalRedhawkCommon.h b/src/Native/Runtime/PalRedhawkCommon.h
index 3ee11e9a2..4edab876b 100644
--- a/src/Native/Runtime/PalRedhawkCommon.h
+++ b/src/Native/Runtime/PalRedhawkCommon.h
@@ -41,6 +41,7 @@ struct AMD64_ALIGN_16 Fp128 {
struct PAL_LIMITED_CONTEXT
{
+ // Includes special registers, callee saved registers and general purpose registers used to return values from functions (not floating point return registers)
#ifdef _TARGET_ARM_
UIntNative R0;
UIntNative R4;
@@ -65,12 +66,27 @@ struct PAL_LIMITED_CONTEXT
void SetIp(UIntNative ip) { IP = ip; }
void SetSp(UIntNative sp) { SP = sp; }
#elif defined(_TARGET_ARM64_)
- // @TODO: Add ARM64 registers
- UIntNative IP;
- UIntNative GetIp() const { PORTABILITY_ASSERT("@TODO: FIXME:ARM64"); }
- UIntNative GetSp() const { PORTABILITY_ASSERT("@TODO: FIXME:ARM64"); }
- UIntNative GetFp() const { PORTABILITY_ASSERT("@TODO: FIXME:ARM64"); }
- UIntNative GetLr() const { PORTABILITY_ASSERT("@TODO: FIXME:ARM64"); }
+#if !defined(GEN_REG_COUNT)
+#define GEN_REG_COUNT 29
+#endif
+
+ // ARM64TODO: we don't need to save X9-X15
+ // ARM64TODO: do we need X16 (IP0), X17 (IP1), X18 or can we skip them?
+ UIntNative X[GEN_REG_COUNT];
+
+ UIntNative FP;
+ UIntNative LR;
+ UIntNative SP;
+ UIntNative IP;
+
+ UIntNative D[16 - 8]; // Only the bottom 64-bit value of the V registers V8..V15 needs to be preserved
+ // (V0-V7 and V16-V31 are not preserved according to the ABI spec).
+
+
+ UIntNative GetIp() const { return IP; }
+ UIntNative GetSp() const { return SP; }
+ UIntNative GetFp() const { return FP; }
+ UIntNative GetLr() const { return LR; }
#elif defined(UNIX_AMD64_ABI)
// Param regs: rdi, rsi, rdx, rcx, r8, r9, scratch: rax, rdx (both return val), preserved: rbp, rbx, r12-r15
UIntNative IP;
diff --git a/src/Native/Runtime/RHCodeMan.cpp b/src/Native/Runtime/RHCodeMan.cpp
index 39361c2fa..2d9d83359 100644
--- a/src/Native/Runtime/RHCodeMan.cpp
+++ b/src/Native/Runtime/RHCodeMan.cpp
@@ -113,23 +113,80 @@ void ReportRegisterSet(UInt8 regSet, REGDISPLAY * pContext, GCEnumContext * hCal
template <CalleeSavedRegNum regNum>
PTR_PTR_Object GetRegObjectAddr(REGDISPLAY * pContext)
{
- PORTABILITY_ASSERT("@TODO: FIXME:ARM64");
+ switch (regNum)
+ {
+ case CSR_NUM_X19: return (PTR_PTR_Object)pContext->pX19;
+ case CSR_NUM_X20: return (PTR_PTR_Object)pContext->pX20;
+ case CSR_NUM_X21: return (PTR_PTR_Object)pContext->pX21;
+ case CSR_NUM_X22: return (PTR_PTR_Object)pContext->pX22;
+ case CSR_NUM_X23: return (PTR_PTR_Object)pContext->pX23;
+ case CSR_NUM_X24: return (PTR_PTR_Object)pContext->pX24;
+ case CSR_NUM_X25: return (PTR_PTR_Object)pContext->pX25;
+ case CSR_NUM_X26: return (PTR_PTR_Object)pContext->pX26;
+ case CSR_NUM_X27: return (PTR_PTR_Object)pContext->pX27;
+ case CSR_NUM_X28: return (PTR_PTR_Object)pContext->pX28;
+ }
+ UNREACHABLE_MSG("unexpected CalleeSavedRegNum");
}
#pragma warning(pop)
PTR_PTR_Object GetRegObjectAddr(CalleeSavedRegNum regNum, REGDISPLAY * pContext)
{
- PORTABILITY_ASSERT("@TODO: FIXME:ARM64");
+ switch (regNum)
+ {
+ case CSR_NUM_X19: return (PTR_PTR_Object)pContext->pX19;
+ case CSR_NUM_X20: return (PTR_PTR_Object)pContext->pX20;
+ case CSR_NUM_X21: return (PTR_PTR_Object)pContext->pX21;
+ case CSR_NUM_X22: return (PTR_PTR_Object)pContext->pX22;
+ case CSR_NUM_X23: return (PTR_PTR_Object)pContext->pX23;
+ case CSR_NUM_X24: return (PTR_PTR_Object)pContext->pX24;
+ case CSR_NUM_X25: return (PTR_PTR_Object)pContext->pX25;
+ case CSR_NUM_X26: return (PTR_PTR_Object)pContext->pX26;
+ case CSR_NUM_X27: return (PTR_PTR_Object)pContext->pX27;
+ case CSR_NUM_X28: return (PTR_PTR_Object)pContext->pX28;
+ }
+ UNREACHABLE_MSG("unexpected CalleeSavedRegNum");
}
PTR_PTR_Object GetScratchRegObjectAddr(ScratchRegNum regNum, REGDISPLAY * pContext)
{
- PORTABILITY_ASSERT("@TODO: FIXME:ARM64");
+ switch (regNum)
+ {
+ case SR_NUM_X0: return (PTR_PTR_Object)pContext->pX0;
+ case SR_NUM_X1: return (PTR_PTR_Object)pContext->pX1;
+ case SR_NUM_X2: return (PTR_PTR_Object)pContext->pX2;
+ case SR_NUM_X3: return (PTR_PTR_Object)pContext->pX3;
+ case SR_NUM_X4: return (PTR_PTR_Object)pContext->pX4;
+ case SR_NUM_X5: return (PTR_PTR_Object)pContext->pX5;
+ case SR_NUM_X6: return (PTR_PTR_Object)pContext->pX6;
+ case SR_NUM_X7: return (PTR_PTR_Object)pContext->pX7;
+ case SR_NUM_X8: return (PTR_PTR_Object)pContext->pX8;
+ case SR_NUM_X9: return (PTR_PTR_Object)pContext->pX9;
+ case SR_NUM_X10: return (PTR_PTR_Object)pContext->pX10;
+ case SR_NUM_X11: return (PTR_PTR_Object)pContext->pX11;
+ case SR_NUM_X12: return (PTR_PTR_Object)pContext->pX12;
+ case SR_NUM_X13: return (PTR_PTR_Object)pContext->pX13;
+ case SR_NUM_X14: return (PTR_PTR_Object)pContext->pX14;
+ case SR_NUM_X15: return (PTR_PTR_Object)pContext->pX15;
+ case SR_NUM_XIP0: return (PTR_PTR_Object)pContext->pX16;
+ case SR_NUM_XIP1: return (PTR_PTR_Object)pContext->pX17;
+ }
+ UNREACHABLE_MSG("unexpected ScratchRegNum");
}
void ReportRegisterSet(UInt8 regSet, REGDISPLAY * pContext, GCEnumContext * hCallback)
{
- PORTABILITY_ASSERT("@TODO: FIXME:ARM64");
+ // ARM64TODO: All these won't fit into 8 bits. FIX IT!
+ if (regSet & CSR_MASK_X19) { ReportObject(hCallback, GetRegObjectAddr<CSR_NUM_X19>(pContext), 0); }
+ if (regSet & CSR_MASK_X20) { ReportObject(hCallback, GetRegObjectAddr<CSR_NUM_X20>(pContext), 0); }
+ if (regSet & CSR_MASK_X21) { ReportObject(hCallback, GetRegObjectAddr<CSR_NUM_X21>(pContext), 0); }
+ if (regSet & CSR_MASK_X22) { ReportObject(hCallback, GetRegObjectAddr<CSR_NUM_X22>(pContext), 0); }
+ if (regSet & CSR_MASK_X23) { ReportObject(hCallback, GetRegObjectAddr<CSR_NUM_X23>(pContext), 0); }
+ if (regSet & CSR_MASK_X24) { ReportObject(hCallback, GetRegObjectAddr<CSR_NUM_X24>(pContext), 0); }
+ if (regSet & CSR_MASK_X25) { ReportObject(hCallback, GetRegObjectAddr<CSR_NUM_X25>(pContext), 0); }
+ if (regSet & CSR_MASK_X26) { ReportObject(hCallback, GetRegObjectAddr<CSR_NUM_X26>(pContext), 0); }
+ if (regSet & CSR_MASK_X27) { ReportObject(hCallback, GetRegObjectAddr<CSR_NUM_X27>(pContext), 0); }
+ if (regSet & CSR_MASK_X28) { ReportObject(hCallback, GetRegObjectAddr<CSR_NUM_X28>(pContext), 0); }
}
@@ -767,6 +824,7 @@ bool EECodeManager::UnwindStackFrame(GCInfoHeader * pInfoHeader,
if (regMask & CSR_MASK_R10) { pContext->pR10 = RSP++; }
if (regMask & CSR_MASK_R11) { pContext->pR11 = RSP++; }
#elif defined(_TARGET_ARM64_)
+ UNREFERENCED_PARAMETER(regMask);
PORTABILITY_ASSERT("@TODO: FIXME:ARM64");
#endif // _TARGET_AMD64_
}
@@ -947,7 +1005,13 @@ PTR_PTR_VOID EECodeManager::GetReturnAddressLocationForHijack(
if (!pHeader->IsRegSaved(CSR_MASK_LR))
return NULL;
#elif defined(_ARM64_)
- PORTABILITY_ASSERT("@TODO: FIXME:ARM64");
+ // ARM64TODO: for now no gc:
+ UNREFERENCED_PARAMETER(pGCInfoHeader);
+ UNREFERENCED_PARAMETER(cbMethodCodeSize);
+ UNREFERENCED_PARAMETER(pbEpilogTable);
+ UNREFERENCED_PARAMETER(codeOffset);
+ UNREFERENCED_PARAMETER(pContext);
+ return NULL;
#endif // _ARM_
void ** ppvResult;
@@ -1496,6 +1560,7 @@ void ** EECodeManager::GetReturnAddressLocationFromEpilog(GCInfoHeader * pInfoHe
return NULL;
#elif defined(_ARM64_)
+ UNREFERENCED_PARAMETER(pbEpilog);
PORTABILITY_ASSERT("@TODO: FIXME:ARM64");
#endif
@@ -1579,6 +1644,7 @@ void CheckHijackInEpilog(GCInfoHeader * pInfoHeader, Code * pEpilog, Code * pEpi
context.pR11 = &RBP_TEST_VAL;
context.SP = RSP_TEST_VAL;
#elif defined(_ARM64_)
+ UNREFERENCED_PARAMETER(RBP_TEST_VAL);
PORTABILITY_ASSERT("@TODO: FIXME:ARM64");
#endif
@@ -2229,6 +2295,9 @@ bool VerifyEpilogBytesARM(GCInfoHeader * pInfoHeader, Code * pEpilogStart, UInt3
#elif defined(_ARM64_)
bool VerifyEpilogBytesARM64(GCInfoHeader * pInfoHeader, Code * pEpilogStart, UInt32 epilogSize)
{
+ UNREFERENCED_PARAMETER(pInfoHeader);
+ UNREFERENCED_PARAMETER(pEpilogStart);
+ UNREFERENCED_PARAMETER(epilogSize);
PORTABILITY_ASSERT("@TODO: FIXME:ARM64");
}
#endif // _ARM_
diff --git a/src/Native/Runtime/StackFrameIterator.cpp b/src/Native/Runtime/StackFrameIterator.cpp
index 7341b3233..3d9a2eb4a 100644
--- a/src/Native/Runtime/StackFrameIterator.cpp
+++ b/src/Native/Runtime/StackFrameIterator.cpp
@@ -209,6 +209,7 @@ void StackFrameIterator::InternalInit(Thread * pThreadToWalk, PTR_PInvokeTransit
}
#elif defined(_TARGET_ARM64_)
+ UNREFERENCED_PARAMETER(pPreservedRegsCursor);
PORTABILITY_ASSERT("@TODO: FIXME:ARM64");
#else // _TARGET_ARM_
@@ -730,6 +731,7 @@ void StackFrameIterator::UnwindFuncletInvokeThunk()
m_RegDisplay.pR11 = SP++;
#elif defined(_TARGET_ARM64_)
+ UNREFERENCED_PARAMETER(isFilterInvoke);
PORTABILITY_ASSERT("@TODO: FIXME:ARM64");
#else
@@ -847,6 +849,20 @@ public:
pRegisterSet->pRbp = GET_POINTER_TO_FIELD(m_pushedEBP);
}
+#elif defined(_TARGET_ARM64_)
+private:
+ // ARM64TODO: #error NYI for this arch
+ UIntNative m_stackPassedArgs[1]; // Placeholder
+public:
+ PTR_UIntNative get_CallerSP() { PORTABILITY_ASSERT("@TODO: FIXME:ARM64"); return NULL; }
+ PTR_UIntNative get_AddressOfPushedCallerIP() { PORTABILITY_ASSERT("@TODO: FIXME:ARM64"); return NULL; }
+ PTR_UIntNative get_LowerBoundForConservativeReporting() { PORTABILITY_ASSERT("@TODO: FIXME:ARM64"); return NULL; }
+
+ void UnwindNonVolatileRegisters(REGDISPLAY * pRegisterSet)
+ {
+ UNREFERENCED_PARAMETER(pRegisterSet);
+ PORTABILITY_ASSERT("@TODO: FIXME:ARM64");
+ }
#else
#error NYI for this arch
#endif
@@ -983,6 +999,12 @@ void StackFrameIterator::UnwindCallDescrThunk()
newSP += sizeof(CALL_DESCR_CONTEXT);
#elif defined(_TARGET_ARM64_)
+ // ARM64TODO: pFP points to the SP that we want to capture? (This arrangement allows for
+ // the arguments from this function to be loaded into memory with an adjustment
+ // to SP, like an alloca
+ newSP = *(PTR_UIntNative)m_RegDisplay.pFP;
+ PTR_CALL_DESCR_CONTEXT pContext = (PTR_CALL_DESCR_CONTEXT)newSP;
+
PORTABILITY_ASSERT("@TODO: FIXME:ARM64");
#elif defined(_TARGET_X86_)
diff --git a/src/Native/Runtime/StackFrameIterator.h b/src/Native/Runtime/StackFrameIterator.h
index 36eecaf21..8ae206f50 100644
--- a/src/Native/Runtime/StackFrameIterator.h
+++ b/src/Native/Runtime/StackFrameIterator.h
@@ -153,6 +153,17 @@ private:
PTR_UIntNative pR9;
PTR_UIntNative pR10;
PTR_UIntNative pR11;
+#elif defined(_TARGET_ARM64_)
+ PTR_UIntNative pX19;
+ PTR_UIntNative pX20;
+ PTR_UIntNative pX21;
+ PTR_UIntNative pX22;
+ PTR_UIntNative pX23;
+ PTR_UIntNative pX24;
+ PTR_UIntNative pX25;
+ PTR_UIntNative pX26;
+ PTR_UIntNative pX27;
+ PTR_UIntNative pX28;
#elif defined(UNIX_AMD64_ABI)
PTR_UIntNative pRbp;
PTR_UIntNative pRbx;
diff --git a/src/Native/Runtime/ThunksMapping.cpp b/src/Native/Runtime/ThunksMapping.cpp
index f1a653e3c..a99cf49c6 100644
--- a/src/Native/Runtime/ThunksMapping.cpp
+++ b/src/Native/Runtime/ThunksMapping.cpp
@@ -15,7 +15,7 @@
#ifdef FEATURE_RX_THUNKS
-#ifdef USE_PORTABLE_HELPERS
+#if defined(USE_PORTABLE_HELPERS)
static_assert(false, "Cannot use the portable helpers with FEATURE_RX_THUNKS");
#endif
@@ -26,6 +26,7 @@ static_assert(false, "Cannot use the portable helpers with FEATURE_RX_THUNKS");
#elif _TARGET_ARM_
#define THUNK_SIZE 20
#elif _TARGET_ARM64_
+//ARM64TODO
#define THUNK_SIZE 0x8000 // This will cause RhpGetNumThunksPerBlock to return 0 for now
#endif
@@ -198,9 +199,11 @@ EXTERN_C REDHAWK_API void* __cdecl RhAllocateThunksMapping()
pCurrentThunkAddress += 2;
#elif _TARGET_ARM64_
-
+ UNREFERENCED_PARAMETER(pCurrentDataAddress);
+ UNREFERENCED_PARAMETER(pCurrentThunkAddress);
/* TODO */ ASSERT_UNCONDITIONALLY("NYI");
-
+#else
+#error
#endif
}
}
diff --git a/src/Native/Runtime/arm64/AllocFast.asm b/src/Native/Runtime/arm64/AllocFast.asm
new file mode 100644
index 000000000..157d76f13
--- /dev/null
+++ b/src/Native/Runtime/arm64/AllocFast.asm
@@ -0,0 +1,240 @@
+;; Licensed to the .NET Foundation under one or more agreements.
+;; The .NET Foundation licenses this file to you under the MIT license.
+;; See the LICENSE file in the project root for more information.
+
+#include "AsmMacros.h"
+
+ TEXTAREA
+
+;; Allocate non-array, non-finalizable object. If the allocation doesn't fit into the current thread's
+;; allocation context then automatically fallback to the slow allocation path.
+;; x0 == EEType
+ LEAF_ENTRY RhpNewFast
+
+ ;; x1 = GetThread(), TRASHES x2
+ INLINE_GETTHREAD x1, x2
+
+ ;;
+ ;; x0 contains EEType pointer
+ ;;
+ ldur w2, [x0, #OFFSETOF__EEType__m_uBaseSize]
+
+ ;;
+ ;; x0: EEType pointer
+ ;; x1: Thread pointer
+ ;; x2: base size
+ ;;
+
+ ldr x12, [x1, #OFFSETOF__Thread__m_alloc_context__alloc_ptr]
+ add x2, x2, x12
+ ldr x13, [x1, #OFFSETOF__Thread__m_alloc_context__alloc_limit]
+ cmp x2, x13
+ bhi RhpNewFast_RarePath
+
+ ;; Update the alloc pointer to account for the allocation.
+ str x2, [x1, #OFFSETOF__Thread__m_alloc_context__alloc_ptr]
+
+ ;; Set the new object's EEType pointer
+ str x0, [x12, #OFFSETOF__Object__m_pEEType]
+
+ mov x0, x12
+ ret
+
+RhpNewFast_RarePath
+ mov x1, #0
+ b RhpNewObject
+ LEAF_END RhpNewFast
+
+ INLINE_GETTHREAD_CONSTANT_POOL
+
+;; Allocate non-array object with finalizer.
+;; x0 == EEType
+ LEAF_ENTRY RhpNewFinalizable
+ mov x1, #GC_ALLOC_FINALIZE
+ b RhpNewObject
+ LEAF_END RhpNewFinalizable
+
+;; Allocate non-array object.
+;; x0 == EEType
+;; x1 == alloc flags
+ NESTED_ENTRY RhpNewObject
+
+ PUSH_COOP_PINVOKE_FRAME x3
+
+ ;; x3: transition frame
+
+ ;; Preserve the EEType in x19
+ mov x19, x0
+
+ ldur w2, [x0, #OFFSETOF__EEType__m_uBaseSize]
+
+ ;; Call the rest of the allocation helper.
+ ;; void* RhpGcAlloc(EEType *pEEType, UInt32 uFlags, UIntNative cbSize, void * pTransitionFrame)
+ bl RhpGcAlloc
+
+ ;; Set the new object's EEType pointer on success.
+ cbz x0, NewOutOfMemory
+ str x19, [x0, #OFFSETOF__Object__m_pEEType]
+
+ ;; If the object is bigger than RH_LARGE_OBJECT_SIZE, we must publish it to the BGC
+ ldur w1, [x19, #OFFSETOF__EEType__m_uBaseSize]
+ movk x2, #(RH_LARGE_OBJECT_SIZE & 0xFFFF)
+ movk x2, #(RH_LARGE_OBJECT_SIZE >> 16), lsl #16
+ cmp x1, x2
+ blo New_SkipPublish
+
+ ;; x0: object
+ ;; x1: already contains object size
+ bl RhpPublishObject ;; x0: this function returns the object that was passed-in
+
+New_SkipPublish
+
+ POP_COOP_PINVOKE_FRAME
+ ret
+
+NewOutOfMemory
+ ;; This is the OOM failure path. We're going to tail-call to a managed helper that will throw
+ ;; an out of memory exception that the caller of this allocator understands.
+
+ mov x0, x19 ; EEType pointer
+ mov x1, 0 ; Indicate that we should throw OOM.
+
+ POP_COOP_PINVOKE_FRAME
+
+ ldr x12, =RhExceptionHandling_FailedAllocation
+ EPILOG_BRANCH_REG x12
+
+ NESTED_END RhpNewObject
+
+;; Allocate a string.
+;; x0 == EEType
+;; x1 == element/character count
+ LEAF_ENTRY RhNewString
+ ;; ARM64TODO
+ brk 0xf000
+ LEAF_END RhNewString
+
+ INLINE_GETTHREAD_CONSTANT_POOL
+
+
+;; Allocate one dimensional, zero based array (SZARRAY).
+;; x0 == EEType
+;; x1 == element count
+ LEAF_ENTRY RhpNewArray
+
+ ; we want to limit the element count to the non-negative 32-bit int range
+ mov x2,#0x7FFFFFFF
+ cmp x1,x2
+ bgt ArraySizeOverflow
+
+ ldurh w2, [x0, #OFFSETOF__EEType__m_usComponentSize]
+ umull x2, w1, w2
+ ldur w3, [x0, #OFFSETOF__EEType__m_uBaseSize]
+ add x2, x2, x3
+ add x2, x2, #7
+ and x2, x2, #-8
+
+ ; x0 == EEType
+ ; x1 == element count
+ ; x2 == array size
+
+ INLINE_GETTHREAD x3, x5
+
+ ;; Load potential new object address into x12.
+ ldr x12, [x3, #OFFSETOF__Thread__m_alloc_context__alloc_ptr]
+
+ ;; Determine whether the end of the object would lie outside of the current allocation context. If so,
+ ;; we abandon the attempt to allocate the object directly and fall back to the slow helper.
+
+ add x2, x2, x12
+ ldr x12, [x3, #OFFSETOF__Thread__m_alloc_context__alloc_limit]
+ cmp x2, x12
+ bhi RhpNewArrayRare
+
+ ;; Reload new object address into x12.
+ ldr x12, [x3, #OFFSETOF__Thread__m_alloc_context__alloc_ptr]
+
+ ;; Update the alloc pointer to account for the allocation.
+ str x2, [x3, #OFFSETOF__Thread__m_alloc_context__alloc_ptr]
+
+ ;; Set the new object's EEType pointer and element count.
+ str x0, [x12, #OFFSETOF__Object__m_pEEType]
+ str x1, [x12, #OFFSETOF__Array__m_Length]
+
+ ;; Return the object allocated in r0.
+ mov x0, x12
+
+ ret
+
+ArraySizeOverflow
+ ; We get here if the size of the final array object can't be represented as an unsigned
+ ; 64-bit value. We're going to tail-call to a managed helper that will throw
+ ; an overflow exception that the caller of this allocator understands.
+
+ ; x0 holds EEType pointer already
+ mov x1, #1 ; Indicate that we should throw OverflowException
+ bl RhExceptionHandling_FailedAllocation
+ LEAF_END RhpNewArray
+
+ INLINE_GETTHREAD_CONSTANT_POOL
+
+;; Allocate one dimensional, zero based array (SZARRAY) using the slow path that calls a runtime helper.
+;; x0 == EEType
+;; x1 == element count
+;; x2 == array size + Thread::m_alloc_context::alloc_ptr
+;; x3 == Thread
+ NESTED_ENTRY RhpNewArrayRare
+
+ ; Recover array size by subtracting the alloc_ptr from r2.
+ ldr x12, [x3, #OFFSETOF__Thread__m_alloc_context__alloc_ptr]
+ sub x2, x2, x12
+
+ PUSH_COOP_PINVOKE_FRAME x3
+
+ ; Preserve data we'll need later into the callee saved registers
+ mov x19, x0 ; Preserve EEType
+ mov x20, x1 ; Preserve element count
+ mov x21, x2 ; Preserve array size
+
+ mov x1, #0
+
+ ;; void* RhpGcAlloc(EEType *pEEType, UInt32 uFlags, UIntNative cbSize, void * pTransitionFrame)
+ bl RhpGcAlloc
+
+ ; Set the new object's EEType pointer and length on success.
+ cbz x0, ArrayOutOfMemory
+
+ ; Success, set the array's type and element count in the new object.
+ str x19, [x0, #OFFSETOF__Object__m_pEEType]
+ str x20, [x0, #OFFSETOF__Array__m_Length]
+
+ ;; If the object is bigger than RH_LARGE_OBJECT_SIZE, we must publish it to the BGC
+ movk x2, #(RH_LARGE_OBJECT_SIZE & 0xFFFF)
+ movk x2, #(RH_LARGE_OBJECT_SIZE >> 16), lsl #16
+ cmp x21, x2
+ blo NewArray_SkipPublish
+
+ ;; x0 = newly allocated array. x1 = size
+ mov x1, x21
+ bl RhpPublishObject
+
+NewArray_SkipPublish
+
+ POP_COOP_PINVOKE_FRAME
+ EPILOG_RETURN
+
+ArrayOutOfMemory
+ ;; This is the OOM failure path. We're going to tail-call to a managed helper that will throw
+ ;; an out of memory exception that the caller of this allocator understands.
+
+ mov x0, x19 ; EEType Pointer
+ mov x1, 0 ; Indicate that we should throw OOM.
+
+ POP_COOP_PINVOKE_FRAME
+
+ ldr x12, =RhExceptionHandling_FailedAllocation
+ EPILOG_BRANCH_REG x12
+
+ NESTED_END RhpNewArrayRare
+
+ END
diff --git a/src/Native/Runtime/arm64/AsmMacros.h b/src/Native/Runtime/arm64/AsmMacros.h
index c7903d141..0f64afafe 100644
--- a/src/Native/Runtime/arm64/AsmMacros.h
+++ b/src/Native/Runtime/arm64/AsmMacros.h
@@ -3,7 +3,7 @@
;; See the LICENSE file in the project root for more information.
;; OS provided macros
-#include <ksarm.h>
+#include <ksarm64.h>
;; generated by the build from AsmOffsets.cpp
#include "AsmOffsets.inc"
@@ -21,7 +21,7 @@ GC_ALLOC_ALIGN8_BIAS equ 4
GC_ALLOC_ALIGN8 equ 8
;; Note: these must match the defs in PInvokeTransitionFrameFlags defined in rhbinder.h
-;; FIXME:ARM64
+;; ARM64TODO
;;
;; Rename fields of nested structs
@@ -29,3 +29,131 @@ GC_ALLOC_ALIGN8 equ 8
OFFSETOF__Thread__m_alloc_context__alloc_ptr equ OFFSETOF__Thread__m_rgbAllocContextBuffer + OFFSETOF__gc_alloc_context__alloc_ptr
OFFSETOF__Thread__m_alloc_context__alloc_limit equ OFFSETOF__Thread__m_rgbAllocContextBuffer + OFFSETOF__gc_alloc_context__alloc_limit
+;;
+;; IMPORTS
+;;
+ EXTERN RhpGcAlloc
+ EXTERN RhpPublishObject
+ EXTERN RhExceptionHandling_FailedAllocation
+
+;; -----------------------------------------------------------------------------
+;;
+;; Macro to export a pointer to an address inside a stub as a 64-bit variable
+;;
+ MACRO
+ EXPORT_POINTER_TO_ADDRESS $Name
+1
+ AREA | .rdata | , ALIGN = 8, DATA, READONLY
+$Name
+ DCQ $Name, 0
+ EXPORT $Name
+ TEXTAREA
+ ROUT
+
+ MEND
+
+;; -----------------------------------------------------------------------------
+;;
+;; Macro for indicating an alternate entry point into a function.
+;;
+
+ MACRO
+ LABELED_RETURN_ADDRESS $ReturnAddressName
+
+ ; export the return address name, but do not perturb the code by forcing alignment
+$ReturnAddressName
+ EXPORT $ReturnAddressName
+
+ ; flush any pending literal pool stuff
+ ROUT
+
+ MEND
+
+;; -----------------------------------------------------------------------------
+;;
+;; Macro to get a pointer to the Thread* object for the currently executing thread
+;;
+
+__tls_array equ 0x58 ;; offsetof(TEB, ThreadLocalStoragePointer)
+
+ EXTERN _tls_index
+
+ GBLS __SECTIONREL_tls_CurrentThread
+__SECTIONREL_tls_CurrentThread SETS "SECTIONREL_tls_CurrentThread"
+
+ MACRO
+ INLINE_GETTHREAD $destReg, $trashReg
+
+ ldr $trashReg, =_tls_index
+ ldr $trashReg, [$trashReg]
+ ldr $destReg, [xpr, #__tls_array]
+ ldr $destReg, [$destReg, $trashReg lsl #3]
+ ldr $trashReg, $__SECTIONREL_tls_CurrentThread
+ add $destReg, $destReg, $trashReg
+ MEND
+
+ ;; INLINE_GETTHREAD_CONSTANT_POOL macro has to be used after the last function in the .asm file that used
+ ;; INLINE_GETTHREAD. Optionally, it can be also used after any function that used INLINE_GETTHREAD
+ ;; to improve density, or to reduce distance betweeen the constant pool and its use.
+ MACRO
+ INLINE_GETTHREAD_CONSTANT_POOL
+ EXTERN tls_CurrentThread
+
+ ;; Section relocs are 32 bits. Using an extra DCD initialized to zero for 8-byte alignment.
+$__SECTIONREL_tls_CurrentThread
+ DCD tls_CurrentThread
+ RELOC 8, tls_CurrentThread ;; SECREL
+ DCD 0
+
+__SECTIONREL_tls_CurrentThread SETS "$__SECTIONREL_tls_CurrentThread":CC:"_"
+
+ MEND
+
+;; -----------------------------------------------------------------------------
+;;
+;; Macro used from unmanaged helpers called from managed code where the helper does not transition immediately
+;; into pre-emptive mode but may cause a GC and thus requires the stack is crawlable. This is typically the
+;; case for helpers that meddle in GC state (e.g. allocation helpers) where the code must remain in
+;; cooperative mode since it handles object references and internal GC state directly but a garbage collection
+;; may be inevitable. In these cases we need to be able to transition to pre-meptive mode deep within the
+;; unmanaged code but still be able to initialize the stack iterator at the first stack frame which may hold
+;; interesting GC references. In all our helper cases this corresponds to the most recent managed frame (e.g.
+;; the helper's caller).
+;;
+;; This macro builds a frame describing the current state of managed code.
+;;
+;; INVARIANTS
+;; - The macro assumes it defines the method prolog, it should typically be the first code in a method and
+;; certainly appear before any attempt to alter the stack pointer.
+;; - This macro uses trashReg (after its initial value has been saved in the frame) and upon exit trashReg
+;; will contain the address of transition frame.
+;;
+ MACRO
+ PUSH_COOP_PINVOKE_FRAME $trashReg
+
+ ;; ARM64TODO: reserve stack for any data+flags needed to make the stack walker do its job
+
+ PROLOG_SAVE_REG_PAIR fp, lr, #-0x60! ;; Push down stack pointer and store FP and LR
+ mov fp, sp ;; Set the frame pointer to the bottom of the new frame
+ ;; Save callee saved registers
+ PROLOG_SAVE_REG_PAIR x19, x20, #16
+ PROLOG_SAVE_REG_PAIR x21, x22, #32
+ PROLOG_SAVE_REG_PAIR x23, x24, #48
+ PROLOG_SAVE_REG_PAIR x25, x26, #64
+ PROLOG_SAVE_REG_PAIR x27, x28, #80
+ mov $trashReg, sp
+ MEND
+
+;; Pop the frame and restore register state preserved by PUSH_COOP_PINVOKE_FRAME
+ MACRO
+ POP_COOP_PINVOKE_FRAME
+
+ ;; ARM64TODO: restore stack used by any data + flags needed to make the stack walker do its job
+
+ EPILOG_RESTORE_REG_PAIR x19, x20, #16
+ EPILOG_RESTORE_REG_PAIR x21, x22, #32
+ EPILOG_RESTORE_REG_PAIR x23, x24, #48
+ EPILOG_RESTORE_REG_PAIR x25, x26, #64
+ EPILOG_RESTORE_REG_PAIR x27, x28, #80
+ EPILOG_RESTORE_REG_PAIR fp, lr, #0x60!
+ MEND
diff --git a/src/Native/Runtime/arm64/AsmOffsetsCpu.h b/src/Native/Runtime/arm64/AsmOffsetsCpu.h
index b25f11d1d..8e4a7867d 100644
--- a/src/Native/Runtime/arm64/AsmOffsetsCpu.h
+++ b/src/Native/Runtime/arm64/AsmOffsetsCpu.h
@@ -8,7 +8,7 @@
//
// NOTE: the offsets MUST be in hex notation WITHOUT the 0x prefix
-PLAT_ASM_SIZEOF(240, ExInfo)
+PLAT_ASM_SIZEOF(280, ExInfo)
PLAT_ASM_OFFSET(0, ExInfo, m_pPrevExInfo)
PLAT_ASM_OFFSET(8, ExInfo, m_pExContext)
PLAT_ASM_OFFSET(10, ExInfo, m_exception)
@@ -16,7 +16,7 @@ PLAT_ASM_OFFSET(18, ExInfo, m_kind)
PLAT_ASM_OFFSET(19, ExInfo, m_passNumber)
PLAT_ASM_OFFSET(1c, ExInfo, m_idxCurClause)
PLAT_ASM_OFFSET(20, ExInfo, m_frameIter)
-PLAT_ASM_OFFSET(238, ExInfo, m_notifyDebuggerSP)
+PLAT_ASM_OFFSET(278, ExInfo, m_notifyDebuggerSP)
PLAT_ASM_OFFSET(0, PInvokeTransitionFrame, m_RIP)
PLAT_ASM_OFFSET(8, PInvokeTransitionFrame, m_FramePointer)
@@ -24,13 +24,13 @@ PLAT_ASM_OFFSET(10, PInvokeTransitionFrame, m_pThread)
PLAT_ASM_OFFSET(18, PInvokeTransitionFrame, m_dwFlags)
PLAT_ASM_OFFSET(20, PInvokeTransitionFrame, m_PreservedRegs)
-PLAT_ASM_SIZEOF(218, StackFrameIterator)
+PLAT_ASM_SIZEOF(258, StackFrameIterator)
PLAT_ASM_OFFSET(10, StackFrameIterator, m_FramePointer)
PLAT_ASM_OFFSET(18, StackFrameIterator, m_ControlPC)
PLAT_ASM_OFFSET(20, StackFrameIterator, m_RegDisplay)
-PLAT_ASM_SIZEOF(8, PAL_LIMITED_CONTEXT)
-PLAT_ASM_OFFSET(0, PAL_LIMITED_CONTEXT, IP)
+PLAT_ASM_SIZEOF(148, PAL_LIMITED_CONTEXT)
+PLAT_ASM_OFFSET(100, PAL_LIMITED_CONTEXT, IP)
// @TODO: Add ARM64 entries for PAL_LIMITED_CONTEXT
diff --git a/src/Native/Runtime/arm64/CallDescrWorker.asm b/src/Native/Runtime/arm64/CallDescrWorker.asm
new file mode 100644
index 000000000..eb0be533f
--- /dev/null
+++ b/src/Native/Runtime/arm64/CallDescrWorker.asm
@@ -0,0 +1,22 @@
+;; Licensed to the .NET Foundation under one or more agreements.
+;; The .NET Foundation licenses this file to you under the MIT license.
+;; See the LICENSE file in the project root for more information.
+
+#include "AsmMacros.h"
+
+ TEXTAREA
+
+;;-----------------------------------------------------------------------------
+;; This helper routine enregisters the appropriate arguments and makes the
+;; actual call.
+;;-----------------------------------------------------------------------------
+;;void RhCallDescrWorker(CallDescrData * pCallDescrData);
+ NESTED_ENTRY RhCallDescrWorker
+ brk 0xf000
+
+ EXPORT_POINTER_TO_ADDRESS PointerToReturnFromCallDescrThunk
+ brk 0xf000
+
+ NESTED_END RhCallDescrWorker
+
+ END
diff --git a/src/Native/Runtime/arm64/CallingConventionConverterHelpers.asm b/src/Native/Runtime/arm64/CallingConventionConverterHelpers.asm
index ac3f03b66..5433d49d8 100644
--- a/src/Native/Runtime/arm64/CallingConventionConverterHelpers.asm
+++ b/src/Native/Runtime/arm64/CallingConventionConverterHelpers.asm
@@ -2,4 +2,59 @@
;; The .NET Foundation licenses this file to you under the MIT license.
;; See the LICENSE file in the project root for more information.
-;; TODO \ No newline at end of file
+#include "ksarm64.h"
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; CallingConventionCoverter Helpers ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+;;
+;; Note: The "__jmpstub__" prefix is used to indicate to debugger
+;; that it must step-through this stub when it encounters it while
+;; stepping.
+;;
+
+ ;;
+ ;; void CallingConventionConverter_ReturnThunk()
+ ;;
+ LEAF_ENTRY CallingConventionConverter_ReturnThunk
+ brk 0xf000
+ LEAF_END CallingConventionConverter_ReturnThunk
+
+ ;;
+ ;; __jmpstub__CallingConventionConverter_CommonCallingStub
+ ;;
+ ;; struct CallingConventionConverter_CommonCallingStub_PointerData
+ ;; {
+ ;; void *ManagedCallConverterThunk;
+ ;; void *UniversalThunk;
+ ;; }
+ ;;
+ ;; struct CommonCallingStubInputData
+ ;; {
+ ;; ULONG_PTR CallingConventionId;
+ ;; CallingConventionConverter_CommonCallingStub_PointerData *commonData; // Only the ManagedCallConverterThunk field is used
+ ;; // However, it is specified just like other platforms, so the behavior of the common
+ ;; // calling stub is easier to debug
+ ;; }
+ ;;
+ ;; sp-4 - Points at CommonCallingStubInputData
+ ;;
+ ;;
+ LEAF_ENTRY __jmpstub__CallingConventionConverter_CommonCallingStub
+ brk 0xf000
+ LEAF_END __jmpstub__CallingConventionConverter_CommonCallingStub
+
+ ;;
+ ;; void CallingConventionConverter_SpecifyCommonStubData(CallingConventionConverter_CommonCallingStub_PointerData *commonData);
+ ;;
+ LEAF_ENTRY CallingConventionConverter_SpecifyCommonStubData
+ brk 0xf000
+ LEAF_END CallingConventionConverter_SpecifyCommonStubData
+
+ ;;
+ ;; void CallingConventionConverter_GetStubs(IntPtr *returnVoidStub, IntPtr *returnIntegerStub, IntPtr *commonCallingStub)
+ ;;
+ LEAF_ENTRY CallingConventionConverter_GetStubs
+ brk 0xf000
+ LEAF_END CallingConventionConverter_GetStubs
+
+ END
diff --git a/src/Native/Runtime/arm64/Dummies.asm b/src/Native/Runtime/arm64/Dummies.asm
new file mode 100644
index 000000000..0da3907bd
--- /dev/null
+++ b/src/Native/Runtime/arm64/Dummies.asm
@@ -0,0 +1,19 @@
+;; Licensed to the .NET Foundation under one or more agreements.
+;; The .NET Foundation licenses this file to you under the MIT license.
+;; See the LICENSE file in the project root for more information.
+
+#include "AsmMacros.h"
+
+ TEXTAREA
+
+ LEAF_ENTRY RhpLMod
+ DCW 0xdefe
+ bx lr
+ LEAF_END RhpLMod
+
+ LEAF_ENTRY RhpLMul
+ DCW 0xdefe
+ bx lr
+ LEAF_END RhpLMul
+
+ END
diff --git a/src/Native/Runtime/arm64/ExceptionHandling.asm b/src/Native/Runtime/arm64/ExceptionHandling.asm
new file mode 100644
index 000000000..ddd6701eb
--- /dev/null
+++ b/src/Native/Runtime/arm64/ExceptionHandling.asm
@@ -0,0 +1,115 @@
+;; Licensed to the .NET Foundation under one or more agreements.
+;; The .NET Foundation licenses this file to you under the MIT license.
+;; See the LICENSE file in the project root for more information.
+
+#include "AsmMacros.h"
+
+ TEXTAREA
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;
+;; RhpThrowHwEx
+;;
+;; INPUT: R0: exception code of fault
+;; R1: faulting IP
+;;
+;; OUTPUT:
+;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+ NESTED_ENTRY RhpThrowHwEx
+ brk 0xf000
+ EXPORT_POINTER_TO_ADDRESS PointerToRhpThrowHwEx2
+ ;; no return
+ brk 0xf000
+ NESTED_END RhpThrowHwEx
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;
+;; RhpThrowEx
+;;
+;; INPUT: R0: exception object
+;;
+;; OUTPUT:
+;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+ NESTED_ENTRY RhpThrowEx
+ brk 0xf000
+ EXPORT_POINTER_TO_ADDRESS PointerToRhpThrowEx2
+
+ ;; no return
+ brk 0xf000
+ NESTED_END RhpThrowEx
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;
+;; void FASTCALL RhpRethrow()
+;;
+;; SUMMARY: Similar to RhpThrowEx, except that it passes along the currently active ExInfo
+;;
+;; INPUT:
+;;
+;; OUTPUT:
+;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+ NESTED_ENTRY RhpRethrow
+ brk 0xf000
+ EXPORT_POINTER_TO_ADDRESS PointerToRhpRethrow2
+
+ ;; no return
+ brk 0xf000
+ NESTED_END RhpRethrow
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;
+;; void* FASTCALL RhpCallCatchFunclet(RtuObjectRef exceptionObj, void* pHandlerIP, REGDISPLAY* pRegDisplay,
+;; ExInfo* pExInfo)
+;;
+;; INPUT: R0: exception object
+;; R1: handler funclet address
+;; R2: REGDISPLAY*
+;; R3: ExInfo*
+;;
+;; OUTPUT:
+;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+ NESTED_ENTRY RhpCallCatchFunclet
+ brk 0xf000
+ EXPORT_POINTER_TO_ADDRESS PointerToRhpCallCatchFunclet2
+ brk 0xf000
+ NESTED_END RhpCallCatchFunclet
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;
+;; void FASTCALL RhpCallFinallyFunclet(void* pHandlerIP, REGDISPLAY* pRegDisplay)
+;;
+;; INPUT: R0: handler funclet address
+;; R1: REGDISPLAY*
+;;
+;; OUTPUT:
+;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+ NESTED_ENTRY RhpCallFinallyFunclet
+ brk 0xf000
+ EXPORT_POINTER_TO_ADDRESS PointerToRhpCallFinallyFunclet2
+ brk 0xf000
+ NESTED_END RhpCallFinallyFunclet
+
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;
+;; void* FASTCALL RhpCallFilterFunclet(RtuObjectRef exceptionObj, void* pFilterIP, REGDISPLAY* pRegDisplay)
+;;
+;; INPUT: R0: exception object
+;; R1: filter funclet address
+;; R2: REGDISPLAY*
+;;
+;; OUTPUT:
+;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+ NESTED_ENTRY RhpCallFilterFunclet
+ brk 0xf000
+ EXPORT_POINTER_TO_ADDRESS PointerToRhpCallFilterFunclet2
+ brk 0xf000
+ NESTED_END RhpCallFilterFunclet
+
+ end
diff --git a/src/Native/Runtime/arm64/FloatingPoint.asm b/src/Native/Runtime/arm64/FloatingPoint.asm
new file mode 100644
index 000000000..519143bed
--- /dev/null
+++ b/src/Native/Runtime/arm64/FloatingPoint.asm
@@ -0,0 +1,13 @@
+;; Licensed to the .NET Foundation under one or more agreements.
+;; The .NET Foundation licenses this file to you under the MIT license.
+;; See the LICENSE file in the project root for more information.
+
+#include "AsmMacros.h"
+
+ TEXTAREA
+
+ NESTED_ENTRY RhpFltRemRev
+ brk 0xf000
+ NESTED_END RhpFltRemRev
+
+ end
diff --git a/src/Native/Runtime/arm64/GcProbe.asm b/src/Native/Runtime/arm64/GcProbe.asm
new file mode 100644
index 000000000..e97d24921
--- /dev/null
+++ b/src/Native/Runtime/arm64/GcProbe.asm
@@ -0,0 +1,194 @@
+;; Licensed to the .NET Foundation under one or more agreements.
+;; The .NET Foundation licenses this file to you under the MIT license.
+;; See the LICENSE file in the project root for more information.
+
+#include "AsmMacros.h"
+
+ TEXTAREA
+
+;;
+;;
+;;
+;; GC Probe Hijack targets
+;;
+;;
+ EXTERN RhpPInvokeExceptionGuard
+
+
+ NESTED_ENTRY RhpGcProbeHijackScalarWrapper, .text, RhpPInvokeExceptionGuard
+ brk 0xf000
+ LABELED_RETURN_ADDRESS RhpGcProbeHijackScalar
+ brk 0xf000
+ NESTED_END RhpGcProbeHijackScalarWrapper
+
+ NESTED_ENTRY RhpGcProbeHijackObjectWrapper, .text, RhpPInvokeExceptionGuard
+ brk 0xf000
+ LABELED_RETURN_ADDRESS RhpGcProbeHijackObject
+ brk 0xf000
+ NESTED_END RhpGcProbeHijackObjectWrapper
+
+ NESTED_ENTRY RhpGcProbeHijackByrefWrapper, .text, RhpPInvokeExceptionGuard
+ brk 0xf000
+ LABELED_RETURN_ADDRESS RhpGcProbeHijackByref
+ brk 0xf000
+ NESTED_END RhpGcProbeHijackByrefWrapper
+
+#ifdef FEATURE_GC_STRESS
+;;
+;;
+;; GC Stress Hijack targets
+;;
+;;
+ LEAF_ENTRY RhpGcStressHijackScalar
+ brk 0xf000
+ LEAF_END RhpGcStressHijackScalar
+
+ LEAF_ENTRY RhpGcStressHijackObject
+ brk 0xf000
+ LEAF_END RhpGcStressHijackObject
+
+ LEAF_ENTRY RhpGcStressHijackByref
+ brk 0xf000
+ LEAF_END RhpGcStressHijackByref
+
+
+;;
+;; Worker for our GC stress probes. Do not call directly!!
+;; Instead, go through RhpGcStressHijack{Scalar|Object|Byref}.
+;; This worker performs the GC Stress work and returns to the original return address.
+;;
+;; Register state on entry:
+;; r0: hijacked function return value
+;; r1: hijacked function return value
+;; r2: thread pointer
+;; r12: register bitmask
+;;
+;; Register state on exit:
+;; Scratch registers, except for r0, have been trashed
+;; All other registers restored as they were when the hijack was first reached.
+;;
+ NESTED_ENTRY RhpGcStressProbe
+ brk 0xf000
+ NESTED_END RhpGcStressProbe
+#endif ;; FEATURE_GC_STRESS
+
+ LEAF_ENTRY RhpGcProbe
+ brk 0xf000
+ LEAF_END RhpGcProbe
+
+ NESTED_ENTRY RhpGcProbeRare
+ brk 0xf000
+ NESTED_END RhpGcProbe
+
+ LEAF_ENTRY RhpGcPoll
+ brk 0xf000
+ LEAF_END RhpGcPoll
+
+ NESTED_ENTRY RhpGcPollRare
+ brk 0xf000
+ NESTED_END RhpGcPoll
+
+ LEAF_ENTRY RhpGcPollStress
+ ;
+ ; loop hijacking is used instead
+ ;
+ brk 0xf000
+
+ LEAF_END RhpGcPollStress
+
+
+#ifdef FEATURE_GC_STRESS
+ NESTED_ENTRY RhpHijackForGcStress
+ brk 0xf000
+ NESTED_END RhpHijackForGcStress
+#endif ;; FEATURE_GC_STRESS
+
+
+;;
+;; The following functions are _jumped_ to when we need to transfer control from one method to another for EH
+;; dispatch. These are needed to properly coordinate with the GC hijacking logic. We are essentially replacing
+;; the return from the throwing method with a jump to the handler in the caller, but we need to be aware of
+;; any return address hijack that may be in place for GC suspension. These routines use a quick test of the
+;; return address against a specific GC hijack routine, and then fixup the stack pointer to what it would be
+;; after a real return from the throwing method. Then, if we are not hijacked we can simply jump to the
+;; handler in the caller.
+;;
+;; If we are hijacked, then we jump to a routine that will unhijack appropriatley and wait for the GC to
+;; complete. There are also variants for GC stress.
+;;
+;; Note that at this point we are eiher hijacked or we are not, and this will not change until we return to
+;; managed code. It is an invariant of the system that a thread will only attempt to hijack or unhijack
+;; another thread while the target thread is suspended in managed code, and this is _not_ managed code.
+;;
+ MACRO
+ RTU_EH_JUMP_HELPER $funcName, $hijackFuncName, $isStress, $stressFuncName
+
+ LEAF_ENTRY $funcName
+ brk 0xf000
+ LEAF_END $funcName
+ MEND
+;; We need an instance of the helper for each possible hijack function. The binder has enough
+;; information to determine which one we need to use for any function.
+ RTU_EH_JUMP_HELPER RhpEHJumpScalar, RhpGcProbeHijackScalar, {false}, 0
+ RTU_EH_JUMP_HELPER RhpEHJumpObject, RhpGcProbeHijackObject, {false}, 0
+ RTU_EH_JUMP_HELPER RhpEHJumpByref, RhpGcProbeHijackByref, {false}, 0
+#ifdef FEATURE_GC_STRESS
+ RTU_EH_JUMP_HELPER RhpEHJumpScalarGCStress, RhpGcProbeHijackScalar, {true}, RhpGcStressHijackScalar
+ RTU_EH_JUMP_HELPER RhpEHJumpObjectGCStress, RhpGcProbeHijackObject, {true}, RhpGcStressHijackObject
+ RTU_EH_JUMP_HELPER RhpEHJumpByrefGCStress, RhpGcProbeHijackByref, {true}, RhpGcStressHijackByref
+#endif
+
+
+;;
+;; We are hijacked for a normal GC (not GC stress), so we need to unhijack and wait for the GC to complete.
+;;
+;; Register state on entry:
+;; r0: reference to the exception object.
+;; r2: thread
+;; Non-volatile registers are all already correct for return to the caller.
+;; The stack is as if we have tail called to this function (lr points to return address).
+;;
+;; Register state on exit:
+;; r7: previous frame pointer
+;; r0: reference to the exception object
+;;
+ NESTED_ENTRY RhpGCProbeForEHJump
+ brk 0xf000
+ NESTED_END RhpGCProbeForEHJump
+
+#ifdef FEATURE_GC_STRESS
+;;
+;; We are hijacked for GC Stress (not a normal GC) so we need to invoke the GC stress helper.
+;;
+;; Register state on entry:
+;; r1: reference to the exception object.
+;; r2: thread
+;; Non-volatile registers are all already correct for return to the caller.
+;; The stack is as if we have tail called to this function (lr points to return address).
+;;
+;; Register state on exit:
+;; r7: previous frame pointer
+;; r0: reference to the exception object
+;;
+ NESTED_ENTRY RhpGCStressProbeForEHJump
+ brk 0xf000
+ NESTED_END RhpGCStressProbeForEHJump
+
+;;
+;; INVARIANT: Don't trash the argument registers, the binder codegen depends on this.
+;;
+ LEAF_ENTRY RhpSuppressGcStress
+ brk 0xf000
+ LEAF_END RhpSuppressGcStress
+#endif ;; FEATURE_GC_STRESS
+
+;; ALLOC_PROBE_FRAME will save the first 4 vfp registers, in order to avoid trashing VFP registers across the loop
+;; hijack, we must save the rest -- d4-d15 (12) and d16-d31 (16).
+VFP_EXTRA_SAVE_SIZE equ ((12*8) + (16*8))
+
+;; Helper called from hijacked loops
+ LEAF_ENTRY RhpLoopHijack
+ brk 0xf000
+ LEAF_END RhpLoopHijack
+
+ end
diff --git a/src/Native/Runtime/arm64/GetThread.asm b/src/Native/Runtime/arm64/GetThread.asm
new file mode 100644
index 000000000..3d132a006
--- /dev/null
+++ b/src/Native/Runtime/arm64/GetThread.asm
@@ -0,0 +1,27 @@
+;; Licensed to the .NET Foundation under one or more agreements.
+;; The .NET Foundation licenses this file to you under the MIT license.
+;; See the LICENSE file in the project root for more information.
+
+#include "AsmMacros.h"
+
+ TEXTAREA
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;
+;; RhpGetThread
+;;
+;;
+;; INPUT: none
+;;
+;; OUTPUT: r0: Thread pointer
+;;
+;; MUST PRESERVE ARGUMENT REGISTERS
+;; @todo check the actual requirements here, r0 is both return and argument register
+;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+ LEAF_ENTRY RhpGetThread
+ brk 0xf000
+ LEAF_END
+FASTCALL_ENDFUNC
+
+ end
diff --git a/src/Native/Runtime/arm64/InteropThunksHelpers.asm b/src/Native/Runtime/arm64/InteropThunksHelpers.asm
new file mode 100644
index 000000000..aaad54dd4
--- /dev/null
+++ b/src/Native/Runtime/arm64/InteropThunksHelpers.asm
@@ -0,0 +1,46 @@
+;; Licensed to the .NET Foundation under one or more agreements.
+;; The .NET Foundation licenses this file to you under the MIT license.
+;; See the LICENSE file in the project root for more information.
+
+
+#include "ksarm64.h"
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; DATA SECTIONS ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+;; TODO __tls_array equ 0x2C ;; offsetof(TEB, ThreadLocalStoragePointer)
+
+POINTER_SIZE equ 0x08
+
+;; TLS variables
+ AREA |.tls$|, DATA
+ThunkParamSlot % 0x8
+
+ TEXTAREA
+
+ EXTERN _tls_index
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; Interop Thunks Helpers ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+ ;;
+ ;; RhCommonStub
+ ;;
+ NESTED_ENTRY RhCommonStub
+ brk 0xf000
+ NESTED_END RhCommonStub
+
+ ;;
+ ;; IntPtr RhGetCommonStubAddress()
+ ;;
+ LEAF_ENTRY RhGetCommonStubAddress
+ brk 0xf000
+ LEAF_END RhGetCommonStubAddress
+
+
+ ;;
+ ;; IntPtr RhGetCurrentThunkContext()
+ ;;
+ LEAF_ENTRY RhGetCurrentThunkContext
+ brk 0xf000
+ LEAF_END RhGetCurrentThunkContext
+
+ END
diff --git a/src/Native/Runtime/arm64/MiscStubs.asm b/src/Native/Runtime/arm64/MiscStubs.asm
new file mode 100644
index 000000000..9d2ea38d3
--- /dev/null
+++ b/src/Native/Runtime/arm64/MiscStubs.asm
@@ -0,0 +1,126 @@
+;; Licensed to the .NET Foundation under one or more agreements.
+;; The .NET Foundation licenses this file to you under the MIT license.
+;; See the LICENSE file in the project root for more information.
+
+#include "AsmMacros.h"
+
+ TEXTAREA
+
+;;
+;; Checks whether the static class constructor for the type indicated by the context structure has been
+;; executed yet. If not the classlib is called via their CheckStaticClassConstruction callback which will
+;; execute the cctor and update the context to record this fact.
+;;
+;; Input:
+;; r0 : Address of StaticClassConstructionContext structure
+;;
+;; Output:
+;; All volatile registers and the condition codes may be trashed.
+;;
+ LEAF_ENTRY RhpCheckCctor
+ brk 0xf000
+ LEAF_END RhpCheckCctor
+
+;;
+;; Checks whether the static class constructor for the type indicated by the context structure has been
+;; executed yet. If not the classlib is called via their CheckStaticClassConstruction callback which will
+;; execute the cctor and update the context to record this fact.
+;;
+;; Input:
+;; r0 : Value that must be preserved in this register across the cctor check.
+;; r1 : Address of StaticClassConstructionContext structure
+;;
+;; Output:
+;; All volatile registers other than r0 may be trashed and the condition codes may also be trashed.
+;;
+ LEAF_ENTRY RhpCheckCctor2
+ brk 0xf000
+ LEAF_END RhpCheckCctor2
+
+;;
+;; Slow path helper for RhpCheckCctor.
+;;
+;; Input:
+;; r0 : Value that must be preserved in this register across the cctor check.
+;; r1 : Address of StaticClassConstructionContext structure
+;;
+;; Output:
+;; All volatile registers other than r0 may be trashed and the condition codes may also be trashed.
+;;
+ NESTED_ENTRY RhpCheckCctor2__SlowPath
+ brk 0xf000
+ NESTED_END RhpCheckCctor__SlowPath2
+
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;
+;; void* RhpCopyMultibyteNoGCRefs(void*, void*, size_t)
+;;
+;; The purpose of this wrapper is to hoist the potential null reference exceptions of copying memory up to a place where
+;; the stack unwinder and exception dispatch can properly transform the exception into a managed exception and dispatch
+;; it to managed code.
+;;
+
+ LEAF_ENTRY RhpCopyMultibyteNoGCRefs
+ brk 0xf000
+ ALTERNATE_ENTRY RhpCopyMultibyteNoGCRefsDestAVLocation
+ brk 0xf000
+ ALTERNATE_ENTRY RhpCopyMultibyteNoGCRefsSrcAVLocation
+ brk 0xf000
+ LEAF_END
+
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;
+;; void* RhpCopyMultibyte(void*, void*, size_t)
+;;
+;; The purpose of this wrapper is to hoist the potential null reference exceptions of copying memory up to a place where
+;; the stack unwinder and exception dispatch can properly transform the exception into a managed exception and dispatch
+;; it to managed code.
+;;
+
+ LEAF_ENTRY RhpCopyMultibyte
+ brk 0xf000
+ ALTERNATE_ENTRY RhpCopyMultibyteDestAVLocation
+ brk 0xf000
+ ALTERNATE_ENTRY RhpCopyMultibyteSrcAVLocation
+ brk 0xf000
+ LEAF_END
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;
+;; void* RhpCopyMultibyteWithWriteBarrier(void*, void*, size_t)
+;;
+;; The purpose of this wrapper is to hoist the potential null reference exceptions of copying memory up to a place where
+;; the stack unwinder and exception dispatch can properly transform the exception into a managed exception and dispatch
+;; it to managed code.
+;; Runs a card table update via RhpBulkWriteBarrier after the copy
+;;
+
+ LEAF_ENTRY RhpCopyMultibyteWithWriteBarrier
+ brk 0xf000
+ ALTERNATE_ENTRY RhpCopyMultibyteWithWriteBarrierDestAVLocation
+ brk 0xf000
+ ALTERNATE_ENTRY RhpCopyMultibyteWithWriteBarrierSrcAVLocation
+ brk 0xf000
+ LEAF_END
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;
+;; void* RhpCopyAnyWithWriteBarrier(void*, void*, size_t)
+;;
+;; The purpose of this wrapper is to hoist the potential null reference exceptions of copying memory up to a place where
+;; the stack unwinder and exception dispatch can properly transform the exception into a managed exception and dispatch
+;; it to managed code.
+;; Runs a card table update via RhpBulkWriteBarrier after the copy if it contained GC pointers
+;;
+
+ LEAF_ENTRY RhpCopyAnyWithWriteBarrier
+ brk 0xf000
+ ALTERNATE_ENTRY RhpCopyAnyWithWriteBarrierDestAVLocation
+ brk 0xf000
+ ALTERNATE_ENTRY RhpCopyAnyWithWriteBarrierSrcAVLocation
+ brk 0xf000
+ LEAF_END
+
+ end
diff --git a/src/Native/Runtime/arm64/PInvoke.asm b/src/Native/Runtime/arm64/PInvoke.asm
new file mode 100644
index 000000000..8119485e0
--- /dev/null
+++ b/src/Native/Runtime/arm64/PInvoke.asm
@@ -0,0 +1,84 @@
+;; Licensed to the .NET Foundation under one or more agreements.
+;; The .NET Foundation licenses this file to you under the MIT license.
+;; See the LICENSE file in the project root for more information.
+
+#include "AsmMacros.h"
+
+ TEXTAREA
+
+ IMPORT RhpReversePInvokeBadTransition
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;
+;; RhpWaitForSuspend -- rare path for RhpPInvoke and RhpReversePInvokeReturn
+;;
+;;
+;; INPUT: none
+;;
+;; TRASHES: none
+;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+ NESTED_ENTRY RhpWaitForSuspend
+ brk 0xf000
+ NESTED_END RhpWaitForSuspend
+
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;
+;; RhpWaitForGC
+;;
+;;
+;; INPUT: r2: transition frame
+;;
+;; OUTPUT:
+;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+ NESTED_ENTRY RhpWaitForGC
+ brk 0xf000
+ NESTED_END RhpWaitForGC
+
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;
+;; RhpReversePInvoke
+;;
+;; IN: r4: address of reverse pinvoke frame
+;; 0: save slot for previous M->U transition frame
+;; 4: save slot for thread pointer to avoid re-calc in epilog sequence
+;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+ NESTED_ENTRY RhpReversePInvoke
+ brk 0xf000
+ NESTED_END RhpReversePInvoke
+
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;
+;; RhpReversePInvokeAttachOrTrapThread -- rare path for RhpPInvoke
+;;
+;;
+;; INPUT: r4: address of reverse pinvoke frame
+;;
+;; TRASHES: none
+;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+ NESTED_ENTRY RhpReversePInvokeAttachOrTrapThread
+ brk 0xf000
+ NESTED_END RhpReversePInvokeTrapThread
+
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;
+;; RhpReversePInvokeReturn
+;;
+;; IN: r3: address of reverse pinvoke frame
+;; 0: save slot for previous M->U transition frame
+;; 4: save slot for thread pointer to avoid re-calc in epilog sequence
+;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+ LEAF_ENTRY RhpReversePInvokeReturn
+ brk 0xf000
+ LEAF_END RhpReversePInvokeReturn
+
+
+ end
diff --git a/src/Native/Runtime/arm64/StubDispatch.asm b/src/Native/Runtime/arm64/StubDispatch.asm
new file mode 100644
index 000000000..e82a49b21
--- /dev/null
+++ b/src/Native/Runtime/arm64/StubDispatch.asm
@@ -0,0 +1,80 @@
+;; Licensed to the .NET Foundation under one or more agreements.
+;; The .NET Foundation licenses this file to you under the MIT license.
+;; See the LICENSE file in the project root for more information.
+
+#include "AsmMacros.h"
+
+ TEXTAREA
+
+
+#ifdef FEATURE_CACHED_INTERFACE_DISPATCH
+
+
+ ;;EXTERN t_TLS_DispatchCell
+
+SECTIONREL_t_TLS_DispatchCell
+ ;;DCD t_TLS_DispatchCell
+ ;;RELOC 15 ;; SECREL
+
+ LEAF_ENTRY RhpCastableObjectDispatch_CommonStub
+ brk 0xf000
+ LEAF_END RhpCastableObjectDispatch_CommonStub
+
+ LEAF_ENTRY RhpTailCallTLSDispatchCell
+ brk 0xf000
+ LEAF_END RhpTailCallTLSDispatchCell
+
+ LEAF_ENTRY RhpCastableObjectDispatchHelper_TailCalled
+ brk 0xf000
+ LEAF_END RhpCastableObjectDispatchHelper_TailCalled
+
+ LEAF_ENTRY RhpCastableObjectDispatchHelper
+ brk 0xf000
+ ALTERNATE_ENTRY RhpCastableObjectDispatchHelper2
+ brk 0xf000
+ LEAF_END RhpCastableObjectDispatchHelper
+
+
+;; Macro that generates a stub consuming a cache with the given number of entries.
+ GBLS StubName
+
+ MACRO
+ DEFINE_INTERFACE_DISPATCH_STUB $entries
+
+StubName SETS "RhpInterfaceDispatch$entries"
+
+ NESTED_ENTRY $StubName
+ brk 0xf000
+ NESTED_END $StubName
+
+ MEND
+
+;; Define all the stub routines we currently need.
+ DEFINE_INTERFACE_DISPATCH_STUB 1
+ DEFINE_INTERFACE_DISPATCH_STUB 2
+ DEFINE_INTERFACE_DISPATCH_STUB 4
+ DEFINE_INTERFACE_DISPATCH_STUB 8
+ DEFINE_INTERFACE_DISPATCH_STUB 16
+ DEFINE_INTERFACE_DISPATCH_STUB 32
+ DEFINE_INTERFACE_DISPATCH_STUB 64
+
+
+;; Initial dispatch on an interface when we don't have a cache yet.
+ LEAF_ENTRY RhpInitialInterfaceDispatch
+ brk 0xf000
+ LEAF_END RhpInitialInterfaceDispatch
+
+ LEAF_ENTRY RhpVTableOffsetDispatch
+ brk 0xf000
+ LEAF_END RhpVTableOffsetDispatch
+
+;; Cache miss case, call the runtime to resolve the target and update the cache.
+ LEAF_ENTRY RhpInterfaceDispatchSlow
+ ALTERNATE_ENTRY RhpInitialDynamicInterfaceDispatch
+ brk 0xf000
+ LEAF_END RhpInterfaceDispatchSlow
+
+
+#endif // FEATURE_CACHED_INTERFACE_DISPATCH
+
+ end
diff --git a/src/Native/Runtime/arm64/ThunkPoolThunks.asm b/src/Native/Runtime/arm64/ThunkPoolThunks.asm
new file mode 100644
index 000000000..a039939c5
--- /dev/null
+++ b/src/Native/Runtime/arm64/ThunkPoolThunks.asm
@@ -0,0 +1,253 @@
+;; ==++==
+;;
+;; Copyright (c) Microsoft Corporation. All rights reserved.
+;;
+;; ==--==
+
+#include "ksarm64.h"
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; STUBS & DATA SECTIONS ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+;; ARM64TODO
+;; THUNK_CODESIZE equ 0x10 ;; 4-byte mov, 2-byte add, 4-byte str, 4-byte ldr, 2-byte branch
+;; THUNK_DATASIZE equ 0x08 ;; 2 dwords
+;;
+;; THUNK_POOL_NUM_THUNKS_PER_PAGE equ 0xFA ;; 250 thunks per page
+;;
+;; PAGE_SIZE equ 0x1000 ;; 4K
+;; POINTER_SIZE equ 0x04
+
+ MACRO
+ NAMED_READONLY_DATA_SECTION $name, $areaAlias
+ AREA $areaAlias,DATA,READONLY
+RO$name % 4
+ MEND
+
+ MACRO
+ NAMED_READWRITE_DATA_SECTION $name, $areaAlias
+ AREA $areaAlias,DATA
+RW$name % 4
+ MEND
+
+ MACRO
+ LOAD_DATA_ADDRESS $groupIndex, $index
+ ALIGN 0x10 ;; make sure we align to 16-byte boundary for CFG table
+ brk 0xf000
+ MEND
+
+ MACRO
+ JUMP_TO_COMMON $groupIndex, $index
+ brk 0xf000
+ MEND
+
+ MACRO
+ TenThunks $groupIndex
+ ;; Each thunk will load the address of its corresponding data (from the page that immediately follows)
+ ;; and call a common stub. The address of the common stub is setup by the caller (last dword
+ ;; in the thunks data section) depending on the 'kind' of thunks needed (interop, fat function pointers, etc...)
+
+ ;; Each data block used by a thunk consists of two dword values:
+ ;; - Context: some value given to the thunk as context. Example for fat-fptrs: context = generic dictionary
+ ;; - Target : target code that the thunk eventually jumps to.
+
+ LOAD_DATA_ADDRESS $groupIndex,0
+ JUMP_TO_COMMON $groupIndex,0
+
+ LOAD_DATA_ADDRESS $groupIndex,1
+ JUMP_TO_COMMON $groupIndex,1
+
+ LOAD_DATA_ADDRESS $groupIndex,2
+ JUMP_TO_COMMON $groupIndex,2
+
+ LOAD_DATA_ADDRESS $groupIndex,3
+ JUMP_TO_COMMON $groupIndex,3
+
+ LOAD_DATA_ADDRESS $groupIndex,4
+ JUMP_TO_COMMON $groupIndex,4
+
+ LOAD_DATA_ADDRESS $groupIndex,5
+ JUMP_TO_COMMON $groupIndex,5
+
+ LOAD_DATA_ADDRESS $groupIndex,6
+ JUMP_TO_COMMON $groupIndex,6
+
+ LOAD_DATA_ADDRESS $groupIndex,7
+ JUMP_TO_COMMON $groupIndex,7
+
+ LOAD_DATA_ADDRESS $groupIndex,8
+ JUMP_TO_COMMON $groupIndex,8
+
+ LOAD_DATA_ADDRESS $groupIndex,9
+ JUMP_TO_COMMON $groupIndex,9
+ MEND
+
+ MACRO
+ THUNKS_PAGE_BLOCK
+
+ TenThunks 0
+ TenThunks 1
+ TenThunks 2
+ TenThunks 3
+ TenThunks 4
+ TenThunks 5
+ TenThunks 6
+ TenThunks 7
+ TenThunks 8
+ TenThunks 9
+ TenThunks 10
+ TenThunks 11
+ TenThunks 12
+ TenThunks 13
+ TenThunks 14
+ TenThunks 15
+ TenThunks 16
+ TenThunks 17
+ TenThunks 18
+ TenThunks 19
+ TenThunks 20
+ TenThunks 21
+ TenThunks 22
+ TenThunks 23
+ TenThunks 24
+ MEND
+
+ ;;
+ ;; The first thunks section should be 64K aligned because it can get
+ ;; mapped multiple times in memory, and mapping works on allocation
+ ;; granularity boundaries (we don't want to map more than what we need)
+ ;;
+ ;; The easiest way to do so is by having the thunks section at the
+ ;; first 64K aligned virtual address in the binary. We provide a section
+ ;; layout file to the linker to tell it how to layout the thunks sections
+ ;; that we care about. (ndp\rh\src\runtime\DLLs\app\mrt100_app_sectionlayout.txt)
+ ;;
+ ;; The PE spec says images cannot have gaps between sections (other
+ ;; than what is required by the section alignment value in the header),
+ ;; therefore we need a couple of padding data sections (otherwise the
+ ;; OS will not load the image).
+ ;;
+
+ NAMED_READONLY_DATA_SECTION PaddingFor64KAlignment0, "|.pad0|"
+ NAMED_READONLY_DATA_SECTION PaddingFor64KAlignment1, "|.pad1|"
+ NAMED_READONLY_DATA_SECTION PaddingFor64KAlignment2, "|.pad2|"
+ NAMED_READONLY_DATA_SECTION PaddingFor64KAlignment3, "|.pad3|"
+ NAMED_READONLY_DATA_SECTION PaddingFor64KAlignment4, "|.pad4|"
+ NAMED_READONLY_DATA_SECTION PaddingFor64KAlignment5, "|.pad5|"
+ NAMED_READONLY_DATA_SECTION PaddingFor64KAlignment6, "|.pad6|"
+ NAMED_READONLY_DATA_SECTION PaddingFor64KAlignment7, "|.pad7|"
+ NAMED_READONLY_DATA_SECTION PaddingFor64KAlignment8, "|.pad8|"
+ NAMED_READONLY_DATA_SECTION PaddingFor64KAlignment9, "|.pad9|"
+ NAMED_READONLY_DATA_SECTION PaddingFor64KAlignment10, "|.pad10|"
+ NAMED_READONLY_DATA_SECTION PaddingFor64KAlignment11, "|.pad11|"
+ NAMED_READONLY_DATA_SECTION PaddingFor64KAlignment12, "|.pad12|"
+ NAMED_READONLY_DATA_SECTION PaddingFor64KAlignment13, "|.pad13|"
+ NAMED_READONLY_DATA_SECTION PaddingFor64KAlignment14, "|.pad14|"
+
+ ;;
+ ;; Thunk Stubs
+ ;; NOTE: Keep number of blocks in sync with macro/constant named 'NUM_THUNK_BLOCKS' in:
+ ;; - ndp\FxCore\src\System.Private.CoreLib\System\Runtime\InteropServices\ThunkPool.cs
+ ;; - ndp\rh\src\tools\rhbind\zapimage.h
+ ;;
+ LEAF_ENTRY ThunkPool, "|.tks0|"
+ THUNKS_PAGE_BLOCK
+ LEAF_END ThunkPool
+
+ NAMED_READWRITE_DATA_SECTION ThunkData0, "|.tkd0|"
+
+ LEAF_ENTRY ThunkPool1, "|.tks1|"
+ THUNKS_PAGE_BLOCK
+ LEAF_END ThunkPool1
+
+ NAMED_READWRITE_DATA_SECTION ThunkData1, "|.tkd1|"
+
+ LEAF_ENTRY ThunkPool2, "|.tks2|"
+ THUNKS_PAGE_BLOCK
+ LEAF_END ThunkPool2
+
+ NAMED_READWRITE_DATA_SECTION ThunkData2, "|.tkd2|"
+
+ LEAF_ENTRY ThunkPool3, "|.tks3|"
+ THUNKS_PAGE_BLOCK
+ LEAF_END ThunkPool3
+
+ NAMED_READWRITE_DATA_SECTION ThunkData3, "|.tkd3|"
+
+ LEAF_ENTRY ThunkPool4, "|.tks4|"
+ THUNKS_PAGE_BLOCK
+ LEAF_END ThunkPool4
+
+ NAMED_READWRITE_DATA_SECTION ThunkData4, "|.tkd4|"
+
+ LEAF_ENTRY ThunkPool5, "|.tks5|"
+ THUNKS_PAGE_BLOCK
+ LEAF_END ThunkPool5
+
+ NAMED_READWRITE_DATA_SECTION ThunkData5, "|.tkd5|"
+
+ LEAF_ENTRY ThunkPool6, "|.tks6|"
+ THUNKS_PAGE_BLOCK
+ LEAF_END ThunkPool6
+
+ NAMED_READWRITE_DATA_SECTION ThunkData6, "|.tkd6|"
+
+ LEAF_ENTRY ThunkPool7, "|.tks7|"
+ THUNKS_PAGE_BLOCK
+ LEAF_END ThunkPool7
+
+ NAMED_READWRITE_DATA_SECTION ThunkData7, "|.tkd7|"
+
+
+ ;;
+ ;; IntPtr RhpGetThunksBase()
+ ;;
+ LEAF_ENTRY RhpGetThunksBase
+ brk 0xf000
+ LEAF_END RhpGetThunksBase
+
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; General Helpers ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+ ;;
+ ;; int RhpGetNumThunksPerBlock()
+ ;;
+ LEAF_ENTRY RhpGetNumThunksPerBlock
+ brk 0xf000
+ LEAF_END RhpGetNumThunksPerBlock
+
+ ;;
+ ;; int RhpGetThunkSize()
+ ;;
+ LEAF_ENTRY RhpGetThunkSize
+ brk 0xf000
+ LEAF_END RhpGetThunkSize
+
+ ;;
+ ;; int RhpGetNumThunkBlocksPerMapping()
+ ;;
+ LEAF_ENTRY RhpGetNumThunkBlocksPerMapping
+ brk 0xf000
+ LEAF_END RhpGetNumThunkBlocksPerMapping
+
+ ;;
+ ;; int RhpGetThunkBlockSize
+ ;;
+ LEAF_ENTRY RhpGetThunkBlockSize
+ brk 0xf000
+ LEAF_END RhpGetThunkBlockSize
+
+ ;;
+ ;; IntPtr RhpGetThunkDataBlockAddress(IntPtr thunkStubAddress)
+ ;;
+ LEAF_ENTRY RhpGetThunkDataBlockAddress
+ brk 0xf000
+ LEAF_END RhpGetThunkDataBlockAddress
+
+ ;;
+ ;; IntPtr RhpGetThunkStubsBlockAddress(IntPtr thunkDataAddress)
+ ;;
+ LEAF_ENTRY RhpGetThunkStubsBlockAddress
+ brk 0xf000
+ LEAF_END RhpGetThunkStubsBlockAddress
+
+ END
diff --git a/src/Native/Runtime/arm64/UniversalTransition.asm b/src/Native/Runtime/arm64/UniversalTransition.asm
new file mode 100644
index 000000000..3cdf971f3
--- /dev/null
+++ b/src/Native/Runtime/arm64/UniversalTransition.asm
@@ -0,0 +1,28 @@
+;; Licensed to the .NET Foundation under one or more agreements.
+;; The .NET Foundation licenses this file to you under the MIT license.
+;; See the LICENSE file in the project root for more information.
+
+#include "AsmMacros.h"
+
+ TEXTAREA
+
+ MACRO
+ UNIVERSAL_TRANSITION $FunctionName
+
+ NESTED_ENTRY Rhp$FunctionName
+ brk 0xf000
+
+ EXPORT_POINTER_TO_ADDRESS PointerToReturnFrom$FunctionName
+ brk 0xf000
+
+ NESTED_END Rhp$FunctionName
+
+ MEND
+
+ ; To enable proper step-in behavior in the debugger, we need to have two instances
+ ; of the thunk. For the first one, the debugger steps into the call in the function,
+ ; for the other, it steps over it.
+ UNIVERSAL_TRANSITION UniversalTransition
+ UNIVERSAL_TRANSITION UniversalTransition_DebugStepTailCall
+
+ END
diff --git a/src/Native/Runtime/arm64/WriteBarriers.asm b/src/Native/Runtime/arm64/WriteBarriers.asm
new file mode 100644
index 000000000..a307b6f62
--- /dev/null
+++ b/src/Native/Runtime/arm64/WriteBarriers.asm
@@ -0,0 +1,81 @@
+;; Licensed to the .NET Foundation under one or more agreements.
+;; The .NET Foundation licenses this file to you under the MIT license.
+;; See the LICENSE file in the project root for more information.
+
+;;
+;; Define the helpers used to implement the write barrier required when writing an object reference into a
+;; location residing on the GC heap. Such write barriers allow the GC to optimize which objects in
+;; non-ephemeral generations need to be scanned for references to ephemeral objects during an ephemeral
+;; collection.
+;;
+
+#include "AsmMacros.h"
+
+ TEXTAREA
+
+ LEAF_ENTRY RhpCheckedAssignRefXXX
+ brk 0xf000
+ ALTERNATE_ENTRY RhpCheckedAssignRef
+ brk 0xf000
+ ALTERNATE_ENTRY RhpCheckedAssignRefAvLocation
+ brk 0xf000
+ ALTERNATE_ENTRY RhpCheckedAssignRefAVLocation
+ brk 0xf000
+ LEAF_END RhpCheckedAssignRefXXX
+
+ LEAF_ENTRY RhpAssignRefXXX
+ brk 0xf000
+ ALTERNATE_ENTRY RhpAssignRef
+ brk 0xf000
+ ALTERNATE_ENTRY RhpAssignRefAvLocationXXX
+ brk 0xf000
+ ALTERNATE_ENTRY RhpAssignRefAVLocation
+ brk 0xf000
+ LEAF_END RhpAssignRefXXX
+
+;; Interlocked operation helpers where the location is an objectref, thus requiring a GC write barrier upon
+;; successful updates.
+
+;; WARNING: Code in EHHelpers.cpp makes assumptions about write barrier code, in particular:
+;; - Function "InWriteBarrierHelper" assumes an AV due to passed in null pointer will happen at RhpCheckedLockCmpXchgAVLocation
+;; - Function "UnwindWriteBarrierToCaller" assumes no registers where pushed and LR contains the return address
+
+ ;; Interlocked compare exchange on objectref.
+ ;;
+ ;; On entry:
+ ;; r0: pointer to objectref
+ ;; r1: exchange value
+ ;; r2: comparand
+ ;;
+ ;; On exit:
+ ;; r0: original value of objectref
+ ;; r1,r2,r3,r12: trashed
+ ;;
+ LEAF_ENTRY RhpCheckedLockCmpXchg
+ brk 0xf000
+ ALTERNATE_ENTRY RhpCheckedLockCmpXchgAVLocation
+ brk 0xf000
+ LEAF_END RhpCheckedLockCmpXchg
+
+ ;; Interlocked exchange on objectref.
+ ;;
+ ;; On entry:
+ ;; r0: pointer to objectref
+ ;; r1: exchange value
+ ;;
+ ;; On exit:
+ ;; r0: original value of objectref
+ ;; r1,r2,r3,r12: trashed
+ ;;
+
+ ;; WARNING: Code in EHHelpers.cpp makes assumptions about write barrier code, in particular:
+ ;; - Function "InWriteBarrierHelper" assumes an AV due to passed in null pointer will happen within at RhpCheckedXchgAVLocation
+ ;; - Function "UnwindWriteBarrierToCaller" assumes no registers where pushed and LR contains the return address
+
+ LEAF_ENTRY RhpCheckedXchg
+ brk 0xf000
+ ALTERNATE_ENTRY RhpCheckedXchgAVLocation
+ brk 0xf000
+ LEAF_END RhpCheckedXchg
+
+ end
diff --git a/src/Native/Runtime/coreclr/gcinfodecoder.cpp b/src/Native/Runtime/coreclr/gcinfodecoder.cpp
index 8e538d2a0..a7bc3d204 100644
--- a/src/Native/Runtime/coreclr/gcinfodecoder.cpp
+++ b/src/Native/Runtime/coreclr/gcinfodecoder.cpp
@@ -1680,7 +1680,7 @@ OBJECTREF* GcInfoDecoder::GetCapturedRegister(
PREGDISPLAY pRD
)
{
- _ASSERTE(regNum >= 0 && regNum <= 28);
+ _ASSERTE(regNum >= 0 && regNum < GEN_REG_COUNT);
// The fields of CONTEXT are in the same order as
// the processor encoding numbers.
diff --git a/src/Native/Runtime/gcdump.cpp b/src/Native/Runtime/gcdump.cpp
index ed4ee98cf..8b178f649 100644
--- a/src/Native/Runtime/gcdump.cpp
+++ b/src/Native/Runtime/gcdump.cpp
@@ -244,6 +244,18 @@ void GCDump::DumpCallsiteString(UInt32 callsiteOffset, PTR_UInt8 pbCallsiteStrin
if (b & CSR_MASK_R6) { gcPrintf("R6 "); count++; }
if (b & CSR_MASK_R7) { gcPrintf("R7 "); count++; }
if (b & CSR_MASK_R8) { gcPrintf("R8 "); count++; }
+#elif defined(_TARGET_ARM64_)
+ // ARM64TODO: not all of these are needed?
+ if (b & CSR_MASK_X19) { gcPrintf("X19 "); count++; }
+ if (b & CSR_MASK_X20) { gcPrintf("X20 "); count++; }
+ if (b & CSR_MASK_X21) { gcPrintf("X21 "); count++; }
+ if (b & CSR_MASK_X22) { gcPrintf("X22 "); count++; }
+ if (b & CSR_MASK_X23) { gcPrintf("X23 "); count++; }
+ if (b & CSR_MASK_X24) { gcPrintf("X24 "); count++; }
+ if (b & CSR_MASK_X25) { gcPrintf("X25 "); count++; }
+ if (b & CSR_MASK_X26) { gcPrintf("X26 "); count++; }
+ if (b & CSR_MASK_X27) { gcPrintf("X27 "); count++; }
+ if (b & CSR_MASK_X28) { gcPrintf("X28 "); count++; }
#else // _ARM_
if (b & CSR_MASK_RBX) { gcPrintf("RBX "); count++; }
if (b & CSR_MASK_RSI) { gcPrintf("RSI "); count++; }
@@ -273,6 +285,17 @@ void GCDump::DumpCallsiteString(UInt32 callsiteOffset, PTR_UInt8 pbCallsiteStrin
case CSR_NUM_R9: regName = "R9"; break;
case CSR_NUM_R10: regName = "R10"; break;
case CSR_NUM_R11: regName = "R11"; break;
+#elif defined(_TARGET_ARM64_)
+ case CSR_NUM_X19: regName = "X19"; break;
+ case CSR_NUM_X20: regName = "X20"; break;
+ case CSR_NUM_X21: regName = "X21"; break;
+ case CSR_NUM_X22: regName = "X22"; break;
+ case CSR_NUM_X23: regName = "X23"; break;
+ case CSR_NUM_X24: regName = "X24"; break;
+ case CSR_NUM_X25: regName = "X25"; break;
+ case CSR_NUM_X26: regName = "X26"; break;
+ case CSR_NUM_X27: regName = "X27"; break;
+ case CSR_NUM_X28: regName = "X28"; break;
#else // _ARM_
case CSR_NUM_RBX: regName = "RBX"; break;
case CSR_NUM_RSI: regName = "RSI"; break;
diff --git a/src/Native/Runtime/inc/daccess.h b/src/Native/Runtime/inc/daccess.h
index d65beb54d..936c4aac0 100644
--- a/src/Native/Runtime/inc/daccess.h
+++ b/src/Native/Runtime/inc/daccess.h
@@ -536,7 +536,7 @@
#include "safemath.h"
-#ifdef _TARGET_AMD64_
+#if defined(_TARGET_AMD64_) || defined(_TARGET_ARM64_)
typedef UInt64 UIntTarget;
#elif defined(_TARGET_X86_)
typedef UInt32 UIntTarget;
diff --git a/src/Native/Runtime/inc/gcinfo.h b/src/Native/Runtime/inc/gcinfo.h
index 9cf0f9fdc..5ce6b1c27 100644
--- a/src/Native/Runtime/inc/gcinfo.h
+++ b/src/Native/Runtime/inc/gcinfo.h
@@ -112,6 +112,184 @@ enum ScratchRegMask
SR_MASK_LR = 0x20,
};
+#elif defined(_TARGET_ARM64_)
+// ARM64TODO: add all arm64-related changes in this file to gcinfo.h in E:\ProjNDev3X\src\Nutc\UTC
+
+enum RegMask
+{
+ RBM_NONE = 0,
+
+ RBM_X0 = 0x00000001,
+ RBM_X1 = 0x00000002,
+ RBM_X2 = 0x00000004,
+ RBM_X3 = 0x00000008,
+ RBM_X4 = 0x00000010,
+ RBM_X5 = 0x00000020,
+ RBM_X6 = 0x00000040,
+ RBM_X7 = 0x00000080,
+ RBM_X8 = 0x00000100, // ARM64TODO: ARM64 ABI: indirect result register
+ RBM_X9 = 0x00000200,
+ RBM_X10 = 0x00000400,
+ RBM_X11 = 0x00000800,
+ RBM_X12 = 0x00001000,
+ RBM_X13 = 0x00002000,
+ RBM_X14 = 0x00004000,
+ RBM_X15 = 0x00008000,
+
+ RBM_XIP0 = 0x00010000, // This one is occasionally used as a scratch register (but can be destroyed by branching or a call)
+ RBM_XIP1 = 0x00020000, // This one may be also used as a scratch register (but can be destroyed by branching or a call)
+ RBM_XPR = 0x00040000,
+
+ RBM_X19 = 0x00080000, // RA_CALLEESAVE
+ RBM_X20 = 0x00100000, // RA_CALLEESAVE
+ RBM_X21 = 0x00200000, // RA_CALLEESAVE
+ RBM_X22 = 0x00400000, // RA_CALLEESAVE
+ RBM_X23 = 0x00800000, // RA_CALLEESAVE
+ RBM_X24 = 0x01000000, // RA_CALLEESAVE
+ RBM_X25 = 0x02000000, // RA_CALLEESAVE
+ RBM_X26 = 0x04000000, // RA_CALLEESAVE
+ RBM_X27 = 0x08000000, // RA_CALLEESAVE
+ RBM_X28 = 0x10000000, // RA_CALLEESAVE
+
+ RBM_FP = 0x20000000,
+ RBM_LR = 0x40000000, // ARM64TODO: check to which lists it should be added
+ RBM_SP = 0x80000000,
+
+
+ RBM_RETVAL = RBM_X8,
+ RBM_CALLEE_SAVED_REGS = (RBM_X19 | RBM_X20 | RBM_X21 | RBM_X22 | RBM_X23 | RBM_X24 | RBM_X25 | RBM_X26 | RBM_X27 | RBM_X28),
+ RBM_CALLEE_SAVED_REG_COUNT = 10,
+
+ RBM_SCRATCH_REGS = (RBM_X0 | RBM_X1 | RBM_X2 | RBM_X3 | RBM_X4 | RBM_X5 | RBM_X6 | RBM_X7 | RBM_X8 | RBM_X9 |
+ RBM_X10 | RBM_X11 | RBM_X12 | RBM_X13 | RBM_X14 | RBM_X15 | RBM_XIP0| RBM_XIP1),
+ RBM_SCRATCH_REG_COUNT = 18,
+};
+
+#define NUM_PRESERVED_REGS RBM_CALLEE_SAVED_REG_COUNT
+
+enum RegNumber
+{
+ RN_X0 = 0,
+ RN_X1 = 1,
+ RN_X2 = 2,
+ RN_X3 = 3,
+ RN_X4 = 4,
+ RN_X5 = 5,
+ RN_X6 = 6,
+ RN_X7 = 7,
+ RN_X8 = 8, // indirect result register
+ RN_X9 = 9,
+ RN_X10 = 10,
+ RN_X11 = 11,
+ RN_X12 = 12,
+ RN_X13 = 13,
+ RN_X14 = 14,
+ RN_X15 = 15,
+
+ RN_XIP0 = 16,
+ RN_XIP1 = 17,
+ RN_XPR = 18,
+
+ RN_X19 = 19, // RA_CALLEESAVE
+ RN_X20 = 20, // RA_CALLEESAVE
+ RN_X21 = 21, // RA_CALLEESAVE
+ RN_X22 = 22, // RA_CALLEESAVE
+ RN_X23 = 23, // RA_CALLEESAVE
+ RN_X24 = 24, // RA_CALLEESAVE
+ RN_X25 = 25, // RA_CALLEESAVE
+ RN_X26 = 26, // RA_CALLEESAVE
+ RN_X27 = 27, // RA_CALLEESAVE
+ RN_X28 = 28, // RA_CALLEESAVE
+
+ RN_FP = 29,
+ RN_LR = 30,
+ RN_SP = 31,
+
+ RN_NONE = 32,
+};
+
+enum CalleeSavedRegNum
+{
+ CSR_NUM_X19 = 0,
+ CSR_NUM_X20 = 1,
+ CSR_NUM_X21 = 2,
+ CSR_NUM_X22 = 3,
+ CSR_NUM_X23 = 4,
+ CSR_NUM_X24 = 5,
+ CSR_NUM_X25 = 6,
+ CSR_NUM_X26 = 7,
+ CSR_NUM_X27 = 8,
+ CSR_NUM_X28 = 9,
+ CSR_NUM_NONE = 10,
+};
+
+enum CalleeSavedRegMask
+{
+ CSR_MASK_NONE = 0x00,
+ CSR_MASK_X19 = 0x001,
+ CSR_MASK_X20 = 0x002,
+ CSR_MASK_X21 = 0x004,
+ CSR_MASK_X22 = 0x008,
+ CSR_MASK_X23 = 0x010,
+ CSR_MASK_X24 = 0x020,
+ CSR_MASK_X25 = 0x040,
+ CSR_MASK_X26 = 0x080,
+ CSR_MASK_X27 = 0x100,
+ CSR_MASK_X28 = 0x200,
+
+ CSR_MASK_ALL = 0x3ff,
+ CSR_MASK_HIGHEST = 0x200,
+};
+
+enum ScratchRegNum
+{
+ SR_NUM_X0 = 0,
+ SR_NUM_X1 = 1,
+ SR_NUM_X2 = 2,
+ SR_NUM_X3 = 3,
+ SR_NUM_X4 = 4,
+ SR_NUM_X5 = 5,
+ SR_NUM_X6 = 6,
+ SR_NUM_X7 = 7,
+ SR_NUM_X8 = 8,
+ SR_NUM_X9 = 9,
+ SR_NUM_X10 = 10,
+ SR_NUM_X11 = 11,
+ SR_NUM_X12 = 12,
+ SR_NUM_X13 = 13,
+ SR_NUM_X14 = 14,
+ SR_NUM_X15 = 15,
+
+ SR_NUM_XIP0 = 16,
+ SR_NUM_XIP1 = 17,
+
+ SR_NUM_NONE = 18,
+};
+
+enum ScratchRegMask
+{
+ SR_MASK_NONE = 0x00,
+ SR_MASK_X0 = 0x01,
+ SR_MASK_X1 = 0x02,
+ SR_MASK_X2 = 0x04,
+ SR_MASK_X3 = 0x08,
+ SR_MASK_X4 = 0x10,
+ SR_MASK_X5 = 0x20,
+ SR_MASK_X6 = 0x40,
+ SR_MASK_X7 = 0x80,
+ SR_MASK_X8 = 0x100,
+ SR_MASK_X9 = 0x200,
+ SR_MASK_X10 = 0x400,
+ SR_MASK_X11 = 0x800,
+ SR_MASK_X12 = 0x1000,
+ SR_MASK_X13 = 0x2000,
+ SR_MASK_X14 = 0x4000,
+ SR_MASK_X15 = 0x8000,
+
+ SR_MASK_XIP0 = 0x10000,
+ SR_MASK_XIP1 = 0x20000,
+};
+
#else // _TARGET_ARM_
#ifdef _TARGET_AMD64_
@@ -251,6 +429,13 @@ private:
UInt16 hasFrameSize : 1; // 2 [4] 1: frame size is encoded below, 0: frame size is 0
UInt16 calleeSavedRegMask : NUM_PRESERVED_REGS; // 2 [5:7] 3 [0:5]
UInt16 arm_areParmOrVfpRegsPushed:1; // 1: pushed parm register set from R0-R3 and pushed fp reg start and count is encoded below, 0: no pushed parm or fp registers
+#elif defined (_TARGET_ARM64_)
+ // ARM64TODO: check
+ UInt16 returnKind : 2; // 2 [0:1] one of: MethodReturnKind enum
+ UInt16 ebpFrame : 1; // 2 [2] on x64, this means "has frame pointer and it is RBP", on ARM64 FP
+ UInt16 epilogAtEnd : 1; // 2 [3]
+ UInt16 hasFrameSize : 1; // 2 [4] 1: frame size is encoded below, 0: frame size is 0
+ UInt16 calleeSavedRegMask : NUM_PRESERVED_REGS; // 2 [5:7] + 3 [0:7]
#else // _TARGET_ARM_
UInt8 returnKind : 2; // 2 [0:1] one of: MethodReturnKind enum
UInt8 ebpFrame : 1; // 2 [2] on x64, this means "has frame pointer and it is RBP", on ARM R7
@@ -513,8 +698,12 @@ public:
{
#ifdef _TARGET_ARM_
ASSERT(regNum == RN_R7);
-#else
+#elif defined(_TARGET_AMD64_) || defined(_TARGET_X86_)
ASSERT(regNum == RN_EBP);
+#elif defined(_TARGET_ARM64_)
+ ASSERT(regNum == RN_FP);
+#else
+ ASSERT(!"NYI");
#endif
ebpFrame = 1;
}
@@ -568,19 +757,21 @@ public:
ASSERT((offsetInBytes % POINTER_SIZE) == 0);
ASSERT(GetReturnKind() == MRK_ReturnsToNative);
-#if defined(_TARGET_ARM_) || defined(_TARGET_AMD64_)
+#if defined(_TARGET_ARM_) || defined(_TARGET_AMD64_) || defined(_TARGET_ARM64_)
// The offset can be either positive or negative on ARM and x64.
bool isNeg = (offsetInBytes < 0);
UInt32 uOffsetInBytes = isNeg ? -offsetInBytes : offsetInBytes;
UInt32 uEncodedVal = ((uOffsetInBytes / POINTER_SIZE) << 1) | (isNeg ? 1 : 0);
reversePinvokeFrameOffset = uEncodedVal;
ASSERT(reversePinvokeFrameOffset == uEncodedVal);
-#else
+#elif defined (_TARGET_X86_)
// Use a positive number because it encodes better and
// the offset is always negative on x86.
ASSERT(offsetInBytes < 0);
reversePinvokeFrameOffset = (-offsetInBytes / POINTER_SIZE);
ASSERT(reversePinvokeFrameOffset == (UInt32)(-offsetInBytes / POINTER_SIZE));
+#else
+ ASSERT(!"NYI");
#endif
}
@@ -821,7 +1012,7 @@ public:
int GetReversePinvokeFrameOffset()
{
-#if defined(_TARGET_ARM_) || defined(_TARGET_AMD64_)
+#if defined(_TARGET_ARM_) || defined(_TARGET_AMD64_) || defined(_TARGET_ARM64_)
// The offset can be either positive or negative on ARM.
Int32 offsetInBytes;
UInt32 uEncodedVal = reversePinvokeFrameOffset;
@@ -829,11 +1020,13 @@ public:
offsetInBytes = (uEncodedVal >> 1) * POINTER_SIZE;
offsetInBytes = isNeg ? -offsetInBytes : offsetInBytes;
return offsetInBytes;
-#else
+#elif defined(_TARGET_X86_)
// it's always at "EBP - something", so we encode it as a positive
// number and then apply the negative here.
int unsignedOffset = reversePinvokeFrameOffset * POINTER_SIZE;
return -unsignedOffset;
+#else
+ ASSERT(!"NYI");
#endif
}
diff --git a/src/Native/Runtime/portable.cpp b/src/Native/Runtime/portable.cpp
index f1e04f980..336253ae3 100644
--- a/src/Native/Runtime/portable.cpp
+++ b/src/Native/Runtime/portable.cpp
@@ -35,7 +35,7 @@
#include "GCMemoryHelpers.h"
#include "GCMemoryHelpers.inl"
-#ifdef USE_PORTABLE_HELPERS
+#if defined(USE_PORTABLE_HELPERS)
EXTERN_C REDHAWK_API void* REDHAWK_CALLCONV RhpGcAlloc(EEType *pEEType, UInt32 uFlags, UIntNative cbSize, void * pTransitionFrame);
EXTERN_C REDHAWK_API void* REDHAWK_CALLCONV RhpPublishObject(void* pObject, UIntNative cbSize);
@@ -178,6 +178,9 @@ COOP_PINVOKE_HELPER(String *, RhNewString, (EEType * pArrayEEType, int numElemen
return (String*)RhpNewArray(pArrayEEType, numElements);
}
+#endif
+#if defined(USE_PORTABLE_HELPERS)
+
#ifdef _ARM_
COOP_PINVOKE_HELPER(Object *, RhpNewFinalizableAlign8, (EEType* pEEType))
{
@@ -290,6 +293,7 @@ void * ReturnFromCallDescrThunk;
#endif
#if defined(USE_PORTABLE_HELPERS) || defined(PLATFORM_UNIX)
+#if !defined (_ARM64_)
//
// Return address hijacking
//
@@ -317,10 +321,12 @@ COOP_PINVOKE_HELPER(void, RhpGcStressHijackByref, ())
{
ASSERT_UNCONDITIONALLY("NYI");
}
+#endif
#endif // defined(USE_PORTABLE_HELPERS) || defined(PLATFORM_UNIX)
-#ifdef USE_PORTABLE_HELPERS
+#if defined(USE_PORTABLE_HELPERS)
+#if !defined (_ARM64_)
COOP_PINVOKE_HELPER(void, RhpAssignRef, (Object ** dst, Object * ref))
{
// @TODO: USE_PORTABLE_HELPERS - Null check
@@ -334,6 +340,7 @@ COOP_PINVOKE_HELPER(void, RhpCheckedAssignRef, (Object ** dst, Object * ref))
*dst = ref;
InlineCheckedWriteBarrier(dst, ref);
}
+#endif
COOP_PINVOKE_HELPER(Object *, RhpCheckedLockCmpXchg, (Object ** location, Object * value, Object * comparand))
{
@@ -365,12 +372,14 @@ COOP_PINVOKE_HELPER(Int64, RhpLockCmpXchg64, (Int64 * location, Int64 value, Int
#endif // USE_PORTABLE_HELPERS
+#if !defined(_ARM64_)
COOP_PINVOKE_HELPER(void, RhpMemoryBarrier, ())
{
PalMemoryBarrier();
}
+#endif
-#ifdef USE_PORTABLE_HELPERS
+#if defined(USE_PORTABLE_HELPERS)
COOP_PINVOKE_HELPER(void *, RhpGetThunksBase, ())
{
return NULL;
@@ -440,6 +449,7 @@ COOP_PINVOKE_HELPER(void *, RhGetCurrentThunkContext, ())
#endif
+#if !defined(_ARM64_)
COOP_PINVOKE_HELPER(void, RhpETWLogLiveCom, (Int32 eventType, void * ccwHandle, void * objectId, void * typeRawValue, void * iUnknown, void * vTable, Int32 comRefCount, Int32 jupiterRefCount, Int32 flags))
{
ASSERT_UNCONDITIONALLY("NYI");
@@ -450,3 +460,5 @@ COOP_PINVOKE_HELPER(bool, RhpETWShouldWalkCom, ())
ASSERT_UNCONDITIONALLY("NYI");
return false;
}
+
+#endif
diff --git a/src/Native/Runtime/windows/AsmOffsets.cpp b/src/Native/Runtime/windows/AsmOffsets.cpp
index a76400235..28415f764 100644
--- a/src/Native/Runtime/windows/AsmOffsets.cpp
+++ b/src/Native/Runtime/windows/AsmOffsets.cpp
@@ -1,7 +1,7 @@
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
// See the LICENSE file in the project root for more information.
-#ifdef _ARM_
+#if defined(_ARM_) || defined(_ARM64_)
#define HASH_DEFINE #define
#define PLAT_ASM_OFFSET(offset, cls, member) HASH_DEFINE OFFSETOF__##cls##__##member 0x##offset
diff --git a/src/Native/Runtime/windows/PalRedhawkInline.h b/src/Native/Runtime/windows/PalRedhawkInline.h
index 4f3e75e63..9341544fc 100644
--- a/src/Native/Runtime/windows/PalRedhawkInline.h
+++ b/src/Native/Runtime/windows/PalRedhawkInline.h
@@ -53,7 +53,7 @@ FORCEINLINE Int64 PalInterlockedCompareExchange64(_Inout_ _Interlocked_operand_
return _InterlockedCompareExchange64(pDst, iValue, iComparand);
}
-#if defined(_AMD64_)
+#if defined(_AMD64_) || defined(_ARM64_)
EXTERN_C UInt8 _InterlockedCompareExchange128(Int64 volatile *, Int64, Int64, Int64 *);
#pragma intrinsic(_InterlockedCompareExchange128)
FORCEINLINE UInt8 PalInterlockedCompareExchange128(_Inout_ _Interlocked_operand_ Int64 volatile *pDst, Int64 iValueHigh, Int64 iValueLow, Int64 *pComparandAndResult)
diff --git a/src/Native/Runtime/windows/PalRedhawkMinWin.cpp b/src/Native/Runtime/windows/PalRedhawkMinWin.cpp
index 99d5a0315..b57f59a90 100644
--- a/src/Native/Runtime/windows/PalRedhawkMinWin.cpp
+++ b/src/Native/Runtime/windows/PalRedhawkMinWin.cpp
@@ -330,6 +330,14 @@ REDHAWK_PALEXPORT _Success_(return) bool REDHAWK_PALAPI PalGetThreadContext(HAND
pCtx->R11 = win32ctx.R11;
pCtx->SP = win32ctx.Sp;
pCtx->LR = win32ctx.Lr;
+#elif defined(_ARM64_)
+ for (int i = 0; i < GEN_REG_COUNT; ++i) {
+ pCtx->X[i] = win32ctx.X[i];
+ }
+ pCtx->SP = win32ctx.Sp;
+ pCtx->LR = win32ctx.Lr;
+ pCtx->FP = win32ctx.Fp;
+ pCtx->IP = win32ctx.Pc;
#else
#error Unsupported platform
#endif
@@ -1063,7 +1071,8 @@ UInt32 CountBits(size_t bfBitfield)
// 'answers' between the current implementation and the CLR implementation.
//
//#define TRACE_CACHE_TOPOLOGY
-#ifdef _DEBUG
+#if defined(_DEBUG) && !defined(_ARM64_)
+// ARM64TODO: restore
void DumpCacheTopology(_In_reads_(cRecords) SYSTEM_LOGICAL_PROCESSOR_INFORMATION * pProcInfos, UInt32 cRecords)
{
printf("----------------\n");
@@ -1115,7 +1124,7 @@ void DumpCacheTopologyResults(UInt32 maxCpuId, CpuVendor cpuVendor, _In_reads_(c
printf(" g_cbLargestOnDieCache: 0x%08zx 0x%08zx :CLR_LargestOnDieCache(TRUE)\n", g_cbLargestOnDieCache, CLR_GetLargestOnDieCacheSize(TRUE, pProcInfos, cRecords));
printf("g_cbLargestOnDieCacheAdjusted: 0x%08zx 0x%08zx :CLR_LargestOnDieCache(FALSE)\n", g_cbLargestOnDieCacheAdjusted, CLR_GetLargestOnDieCacheSize(FALSE, pProcInfos, cRecords));
}
-#endif // _DEBUG
+#endif // defined(_DEBUG) && !defined(_ARM64_)
// Method used to initialize the above values.
bool PalQueryProcessorTopology()
@@ -1288,18 +1297,21 @@ bool PalQueryProcessorTopology()
g_cbLargestOnDieCache = cbCache;
g_cbLargestOnDieCacheAdjusted = cbCacheAdjusted;
-#ifdef _DEBUG
-#ifdef TRACE_CACHE_TOPOLOGY
+#if defined(_DEBUG)
+#if defined(TRACE_CACHE_TOPOLOGY) && !defined(_ARM64_)
+// ARM64TODO: restore
DumpCacheTopologyResults(maxCpuId, cpuVendor, pProcInfos, cRecords);
-#endif // TRACE_CACHE_TOPOLOGY
+#endif // defined(TRACE_CACHE_TOPOLOGY) && !defined(_ARM64_)
if ((CLR_GetLargestOnDieCacheSize(TRUE, pProcInfos, cRecords) != g_cbLargestOnDieCache) ||
(CLR_GetLargestOnDieCacheSize(FALSE, pProcInfos, cRecords) != g_cbLargestOnDieCacheAdjusted) ||
(CLR_GetLogicalCpuCount(pProcInfos, cRecords) != g_cLogicalCpus))
{
+#if !defined(_ARM64_)
DumpCacheTopologyResults(maxCpuId, cpuVendor, pProcInfos, cRecords);
+#endif
assert(!"QueryProcessorTopology doesn't match CLR's results. See stdout for more info.");
}
-#endif // TRACE_CACHE_TOPOLOGY
+#endif
}
if (pProcInfos)
diff --git a/src/Native/libunwind/src/Registers.hpp b/src/Native/libunwind/src/Registers.hpp
index cca90092f..f15c4fa54 100644
--- a/src/Native/libunwind/src/Registers.hpp
+++ b/src/Native/libunwind/src/Registers.hpp
@@ -1195,8 +1195,12 @@ public:
void setFP(uint64_t value, uint64_t location) { _registers.__fp = value; _registerLocations.__fp = location; }
private:
+#if !defined(GEN_REG_COUNT)
+#define GEN_REG_COUNT 29
+#endif
+
struct GPRs {
- uint64_t __x[29]; // x0-x28
+ uint64_t __x[GEN_REG_COUNT]; // x0-x28
uint64_t __fp; // Frame pointer x29
uint64_t __lr; // Link register x30
uint64_t __sp; // Stack pointer x31
@@ -1205,7 +1209,7 @@ private:
};
struct GPRLocations {
- uint64_t __x[29]; // x0-x28
+ uint64_t __x[GEN_REG_COUNT]; // x0-x28
uint64_t __fp; // Frame pointer x29
uint64_t __lr; // Link register x30
uint64_t __sp; // Stack pointer x31