Welcome to mirror list, hosted at ThFree Co, Russian Federation.

github.com/mono/corert.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorManu <manu-silicon@users.noreply.github.com>2015-12-02 12:01:47 +0300
committerManu <manu-silicon@users.noreply.github.com>2015-12-04 05:53:56 +0300
commitf89a90aa7f1aeefe69fbf8913c66b795ab7ddff7 (patch)
tree130f7faf9505713c0875ff3a3433b895a6f6c072 /src/Native
parent7721f2896d21166e85df3b05251b094e324e5ef7 (diff)
Enabled native compilation on ARM64
Enabled enough code to have a clean compilation on ARM64. Currently nothing is really implemented and we either use the PORTABILITY_ASSERT macro or a @TODO comment places that still need work.
Diffstat (limited to 'src/Native')
-rw-r--r--src/Native/Runtime/AsmOffsets.h300
-rw-r--r--src/Native/Runtime/CommonMacros.h16
-rw-r--r--src/Native/Runtime/EHHelpers.cpp5
-rw-r--r--src/Native/Runtime/MiscHelpers.cpp2
-rw-r--r--src/Native/Runtime/PalRedhawk.h101
-rw-r--r--src/Native/Runtime/PalRedhawkCommon.h9
-rw-r--r--src/Native/Runtime/RHCodeMan.cpp77
-rw-r--r--src/Native/Runtime/StackFrameIterator.cpp36
-rw-r--r--src/Native/Runtime/inc/TargetPtrs.h6
-rw-r--r--src/Native/Runtime/regdisplay.h56
-rw-r--r--src/Native/Runtime/thread.h2
-rw-r--r--src/Native/Runtime/unix/PalRedhawkUnix.cpp1
12 files changed, 453 insertions, 158 deletions
diff --git a/src/Native/Runtime/AsmOffsets.h b/src/Native/Runtime/AsmOffsets.h
index 2020c8e85..b13e19be5 100644
--- a/src/Native/Runtime/AsmOffsets.h
+++ b/src/Native/Runtime/AsmOffsets.h
@@ -11,17 +11,21 @@
//
#if defined(_X86_)
-#define ASM_OFFSET(x86_offset, arm_offset, amd64_offset, cls, member) PLAT_ASM_OFFSET(x86_offset, cls, member)
-#define ASM_SIZEOF(x86_offset, arm_offset, amd64_offset, cls ) PLAT_ASM_SIZEOF(x86_offset, cls)
-#define ASM_CONST(x86_const, arm_const, amd64_const, expr) PLAT_ASM_CONST(x86_const, expr)
+#define ASM_OFFSET(x86_offset, arm_offset, amd64_offset, arm64_offset, cls, member) PLAT_ASM_OFFSET(x86_offset, cls, member)
+#define ASM_SIZEOF(x86_offset, arm_offset, amd64_offset, arm64_offset, cls ) PLAT_ASM_SIZEOF(x86_offset, cls)
+#define ASM_CONST(x86_const, arm_const, amd64_const, arm64_const, expr) PLAT_ASM_CONST(x86_const, expr)
#elif defined(_AMD64_)
-#define ASM_OFFSET(x86_offset, arm_offset, amd64_offset, cls, member) PLAT_ASM_OFFSET(amd64_offset, cls, member)
-#define ASM_SIZEOF(x86_offset, arm_offset, amd64_offset, cls ) PLAT_ASM_SIZEOF(amd64_offset, cls)
-#define ASM_CONST(x86_const, arm_const, amd64_const, expr) PLAT_ASM_CONST(amd64_const, expr)
+#define ASM_OFFSET(x86_offset, arm_offset, amd64_offset, arm64_offset, cls, member) PLAT_ASM_OFFSET(amd64_offset, cls, member)
+#define ASM_SIZEOF(x86_offset, arm_offset, amd64_offset, arm64_offset, cls ) PLAT_ASM_SIZEOF(amd64_offset, cls)
+#define ASM_CONST(x86_const, arm_const, amd64_const, arm64_const, expr) PLAT_ASM_CONST(amd64_const, expr)
#elif defined(_ARM_)
-#define ASM_OFFSET(x86_offset, arm_offset, amd64_offset, cls, member) PLAT_ASM_OFFSET(arm_offset, cls, member)
-#define ASM_SIZEOF(x86_offset, arm_offset, amd64_offset, cls ) PLAT_ASM_SIZEOF(arm_offset, cls)
-#define ASM_CONST(x86_const, arm_const, amd64_const, expr) PLAT_ASM_CONST(arm_const, expr)
+#define ASM_OFFSET(x86_offset, arm_offset, amd64_offset, arm64_offset, cls, member) PLAT_ASM_OFFSET(arm_offset, cls, member)
+#define ASM_SIZEOF(x86_offset, arm_offset, amd64_offset, arm64_offset, cls ) PLAT_ASM_SIZEOF(arm_offset, cls)
+#define ASM_CONST(x86_const, arm_const, amd64_const, arm64_const, expr) PLAT_ASM_CONST(arm_const, expr)
+#elif defined(_ARM64_)
+#define ASM_OFFSET(x86_offset, arm_offset, amd64_offset, arm64_offset, cls, member) PLAT_ASM_OFFSET(arm64_offset, cls, member)
+#define ASM_SIZEOF(x86_offset, arm_offset, amd64_offset, arm64_offset, cls ) PLAT_ASM_SIZEOF(arm64_offset, cls)
+#define ASM_CONST(x86_const, arm_const, amd64_const, arm64_const, expr) PLAT_ASM_CONST(arm64_const, expr)
#else
#error unknown architecture
#endif
@@ -29,140 +33,150 @@
//
// NOTE: the offsets MUST be in hex notation WITHOUT the 0x prefix
//
-// x86, arm,amd64, constant symbol
-ASM_CONST(14c08,14c08,14c08, RH_LARGE_OBJECT_SIZE)
-ASM_CONST( 400, 400, 800, CLUMP_SIZE)
-ASM_CONST( a, a, b, LOG2_CLUMP_SIZE)
-
-// x86, arm,amd64, class, member
-
-ASM_OFFSET( 0, 0, 0, Object, m_pEEType)
-
-ASM_OFFSET( 4, 4, 8, Array, m_Length)
-
-ASM_OFFSET( 0, 0, 0, EEType, m_usComponentSize)
-ASM_OFFSET( 2, 2, 2, EEType, m_usFlags)
-ASM_OFFSET( 4, 4, 4, EEType, m_uBaseSize)
-ASM_OFFSET( 14, 14, 18, EEType, m_VTable)
-
-ASM_OFFSET( 0, 0, 0, Thread, m_rgbAllocContextBuffer)
-ASM_OFFSET( 1c, 1c, 28, Thread, m_ThreadStateFlags)
-ASM_OFFSET( 20, 20, 30, Thread, m_pTransitionFrame)
-ASM_OFFSET( 24, 24, 38, Thread, m_pHackPInvokeTunnel)
-ASM_OFFSET( 34, 34, 58, Thread, m_ppvHijackedReturnAddressLocation)
-ASM_OFFSET( 38, 38, 60, Thread, m_pvHijackedReturnAddress)
-ASM_OFFSET( 3c, 3c, 68, Thread, m_pExInfoStackHead)
-
-ASM_SIZEOF( 14, 14, 20, EHEnum)
-
-ASM_SIZEOF( b0, 128, 250, ExInfo)
-ASM_OFFSET( 0, 0, 0, ExInfo, m_pPrevExInfo)
-ASM_OFFSET( 4, 4, 8, ExInfo, m_pExContext)
-ASM_OFFSET( 8, 8, 10, ExInfo, m_exception)
-ASM_OFFSET( 0c, 0c, 18, ExInfo, m_kind)
-ASM_OFFSET( 0d, 0d, 19, ExInfo, m_passNumber)
-ASM_OFFSET( 10, 10, 1c, ExInfo, m_idxCurClause)
-ASM_OFFSET( 14, 18, 20, ExInfo, m_frameIter)
-ASM_OFFSET( ac, 120, 240, ExInfo, m_notifyDebuggerSP)
-
-ASM_OFFSET( 0, 0, 0, alloc_context, alloc_ptr)
-ASM_OFFSET( 4, 4, 8, alloc_context, alloc_limit)
-
-
-ASM_OFFSET( 4, 4, 8, RuntimeInstance, m_pThreadStore)
-
-ASM_OFFSET( 0, 4, 0, PInvokeTransitionFrame, m_RIP)
-ASM_OFFSET( 4, 8, 8, PInvokeTransitionFrame, m_FramePointer)
-ASM_OFFSET( 8, 0C, 10, PInvokeTransitionFrame, m_pThread)
-ASM_OFFSET( 0C, 10, 18, PInvokeTransitionFrame, m_dwFlags)
-ASM_OFFSET( 10, 14, 20, PInvokeTransitionFrame, m_PreservedRegs)
-
-ASM_SIZEOF( 98, 108, 220, StackFrameIterator)
-ASM_OFFSET( 08, 08, 10, StackFrameIterator, m_FramePointer)
-ASM_OFFSET( 0C, 0C, 18, StackFrameIterator, m_ControlPC)
-ASM_OFFSET( 10, 10, 20, StackFrameIterator, m_RegDisplay)
-
-ASM_SIZEOF( 1c, 70, 100, PAL_LIMITED_CONTEXT)
-ASM_OFFSET( 0, 24, 0, PAL_LIMITED_CONTEXT, IP)
-#ifdef _ARM_
-ASM_OFFSET( 0, 0, 0, PAL_LIMITED_CONTEXT, R0)
-ASM_OFFSET( 0, 4, 0, PAL_LIMITED_CONTEXT, R4)
-ASM_OFFSET( 0, 8, 0, PAL_LIMITED_CONTEXT, R5)
-ASM_OFFSET( 0, 0c, 0, PAL_LIMITED_CONTEXT, R6)
-ASM_OFFSET( 0, 10, 0, PAL_LIMITED_CONTEXT, R7)
-ASM_OFFSET( 0, 14, 0, PAL_LIMITED_CONTEXT, R8)
-ASM_OFFSET( 0, 18, 0, PAL_LIMITED_CONTEXT, R9)
-ASM_OFFSET( 0, 1c, 0, PAL_LIMITED_CONTEXT, R10)
-ASM_OFFSET( 0, 20, 0, PAL_LIMITED_CONTEXT, R11)
-ASM_OFFSET( 0, 28, 0, PAL_LIMITED_CONTEXT, SP)
-ASM_OFFSET( 0, 2c, 0, PAL_LIMITED_CONTEXT, LR)
-#else // _ARM_
-ASM_OFFSET( 4, 0, 8, PAL_LIMITED_CONTEXT, Rsp)
-ASM_OFFSET( 8, 0, 10, PAL_LIMITED_CONTEXT, Rbp)
-ASM_OFFSET( 0c, 0, 18, PAL_LIMITED_CONTEXT, Rdi)
-ASM_OFFSET( 10, 0, 20, PAL_LIMITED_CONTEXT, Rsi)
-ASM_OFFSET( 14, 0, 28, PAL_LIMITED_CONTEXT, Rax)
-ASM_OFFSET( 18, 0, 30, PAL_LIMITED_CONTEXT, Rbx)
-#ifdef _AMD64_
-ASM_OFFSET( 0, 0, 38, PAL_LIMITED_CONTEXT, R12)
-ASM_OFFSET( 0, 0, 40, PAL_LIMITED_CONTEXT, R13)
-ASM_OFFSET( 0, 0, 48, PAL_LIMITED_CONTEXT, R14)
-ASM_OFFSET( 0, 0, 50, PAL_LIMITED_CONTEXT, R15)
-ASM_OFFSET( 0, 0, 60, PAL_LIMITED_CONTEXT, Xmm6)
-ASM_OFFSET( 0, 0, 70, PAL_LIMITED_CONTEXT, Xmm7)
-ASM_OFFSET( 0, 0, 80, PAL_LIMITED_CONTEXT, Xmm8)
-ASM_OFFSET( 0, 0, 90, PAL_LIMITED_CONTEXT, Xmm9)
-ASM_OFFSET( 0, 0, 0a0, PAL_LIMITED_CONTEXT, Xmm10)
-ASM_OFFSET( 0, 0, 0b0, PAL_LIMITED_CONTEXT, Xmm11)
-ASM_OFFSET( 0, 0, 0c0, PAL_LIMITED_CONTEXT, Xmm12)
-ASM_OFFSET( 0, 0, 0d0, PAL_LIMITED_CONTEXT, Xmm13)
-ASM_OFFSET( 0, 0, 0e0, PAL_LIMITED_CONTEXT, Xmm14)
-ASM_OFFSET( 0, 0, 0f0, PAL_LIMITED_CONTEXT, Xmm15)
-#endif // _AMD64_
-#endif // _ARM_
-
-ASM_SIZEOF( 28, 88, 130, REGDISPLAY)
-ASM_OFFSET( 1c, 38, 78, REGDISPLAY, SP)
-#ifdef _ARM_
-ASM_OFFSET( 0, 10, 0, REGDISPLAY, pR4)
-ASM_OFFSET( 0, 14, 0, REGDISPLAY, pR5)
-ASM_OFFSET( 0, 18, 0, REGDISPLAY, pR6)
-ASM_OFFSET( 0, 1c, 0, REGDISPLAY, pR7)
-ASM_OFFSET( 0, 20, 0, REGDISPLAY, pR8)
-ASM_OFFSET( 0, 24, 0, REGDISPLAY, pR9)
-ASM_OFFSET( 0, 28, 0, REGDISPLAY, pR10)
-ASM_OFFSET( 0, 2c, 0, REGDISPLAY, pR11)
-ASM_OFFSET( 0, 48, 0, REGDISPLAY, D)
-#else // _ARM_
-ASM_OFFSET( 0c, 0, 18, REGDISPLAY, pRbx)
-ASM_OFFSET( 10, 0, 20, REGDISPLAY, pRbp)
-ASM_OFFSET( 14, 0, 28, REGDISPLAY, pRsi)
-ASM_OFFSET( 18, 0, 30, REGDISPLAY, pRdi)
-#ifdef _AMD64_
-ASM_OFFSET( 0, 0, 58, REGDISPLAY, pR12)
-ASM_OFFSET( 0, 0, 60, REGDISPLAY, pR13)
-ASM_OFFSET( 0, 0, 68, REGDISPLAY, pR14)
-ASM_OFFSET( 0, 0, 70, REGDISPLAY, pR15)
-ASM_OFFSET( 0, 0, 90, REGDISPLAY, Xmm)
-#endif // _AMD64_
-#endif // _ARM_
-
+// x86, arm,amd64, arm64, constant symbol
+ASM_CONST(14c08,14c08,14c08, 14c08, RH_LARGE_OBJECT_SIZE)
+ASM_CONST( 400, 400, 800, 800, CLUMP_SIZE)
+ASM_CONST( a, a, b, b, LOG2_CLUMP_SIZE)
+
+// x86, arm,amd64, arm64, class, member
+
+ASM_OFFSET( 0, 0, 0, 0, Object, m_pEEType)
+
+ASM_OFFSET( 4, 4, 8, 8, Array, m_Length)
+
+ASM_OFFSET( 0, 0, 0, 0, EEType, m_usComponentSize)
+ASM_OFFSET( 2, 2, 2, 2, EEType, m_usFlags)
+ASM_OFFSET( 4, 4, 4, 4, EEType, m_uBaseSize)
+ASM_OFFSET( 14, 14, 18, 18, EEType, m_VTable)
+
+ASM_OFFSET( 0, 0, 0, 0, Thread, m_rgbAllocContextBuffer)
+ASM_OFFSET( 1c, 1c, 28, 28, Thread, m_ThreadStateFlags)
+ASM_OFFSET( 20, 20, 30, 30, Thread, m_pTransitionFrame)
+ASM_OFFSET( 24, 24, 38, 38, Thread, m_pHackPInvokeTunnel)
+ASM_OFFSET( 34, 34, 58, 58, Thread, m_ppvHijackedReturnAddressLocation)
+ASM_OFFSET( 38, 38, 60, 60, Thread, m_pvHijackedReturnAddress)
+ASM_OFFSET( 3c, 3c, 68, 68, Thread, m_pExInfoStackHead)
+
+ASM_SIZEOF( 14, 14, 20, 20, EHEnum)
+
+ASM_SIZEOF( b0, 128, 250, 240, ExInfo)
+ASM_OFFSET( 0, 0, 0, 0, ExInfo, m_pPrevExInfo)
+ASM_OFFSET( 4, 4, 8, 8, ExInfo, m_pExContext)
+ASM_OFFSET( 8, 8, 10, 10, ExInfo, m_exception)
+ASM_OFFSET( 0c, 0c, 18, 18, ExInfo, m_kind)
+ASM_OFFSET( 0d, 0d, 19, 19, ExInfo, m_passNumber)
+ASM_OFFSET( 10, 10, 1c, 1c, ExInfo, m_idxCurClause)
+ASM_OFFSET( 14, 18, 20, 20, ExInfo, m_frameIter)
+ASM_OFFSET( ac, 120, 240, 238, ExInfo, m_notifyDebuggerSP)
+
+ASM_OFFSET( 0, 0, 0, 0, alloc_context, alloc_ptr)
+ASM_OFFSET( 4, 4, 8, 8, alloc_context, alloc_limit)
+
+
+ASM_OFFSET( 4, 4, 8, 8, RuntimeInstance, m_pThreadStore)
+
+ASM_OFFSET( 0, 4, 0, 0, PInvokeTransitionFrame, m_RIP)
+ASM_OFFSET( 4, 8, 8, 8, PInvokeTransitionFrame, m_FramePointer)
+ASM_OFFSET( 8, 0C, 10, 10, PInvokeTransitionFrame, m_pThread)
+ASM_OFFSET( 0C, 10, 18, 18, PInvokeTransitionFrame, m_dwFlags)
+ASM_OFFSET( 10, 14, 20, 20, PInvokeTransitionFrame, m_PreservedRegs)
+
+ASM_SIZEOF( 98, 108, 220, 218, StackFrameIterator)
+ASM_OFFSET( 08, 08, 10, 10, StackFrameIterator, m_FramePointer)
+ASM_OFFSET( 0C, 0C, 18, 18, StackFrameIterator, m_ControlPC)
+ASM_OFFSET( 10, 10, 20, 20, StackFrameIterator, m_RegDisplay)
+
+ASM_SIZEOF( 1c, 70, 100, 8, PAL_LIMITED_CONTEXT)
+ASM_OFFSET( 0, 24, 0, 0, PAL_LIMITED_CONTEXT, IP)
+
+#ifdef _ARM_
+ASM_OFFSET( 0, 0, 0, 0, PAL_LIMITED_CONTEXT, R0)
+ASM_OFFSET( 0, 4, 0, 0, PAL_LIMITED_CONTEXT, R4)
+ASM_OFFSET( 0, 8, 0, 0, PAL_LIMITED_CONTEXT, R5)
+ASM_OFFSET( 0, 0c, 0, 0, PAL_LIMITED_CONTEXT, R6)
+ASM_OFFSET( 0, 10, 0, 0, PAL_LIMITED_CONTEXT, R7)
+ASM_OFFSET( 0, 14, 0, 0, PAL_LIMITED_CONTEXT, R8)
+ASM_OFFSET( 0, 18, 0, 0, PAL_LIMITED_CONTEXT, R9)
+ASM_OFFSET( 0, 1c, 0, 0, PAL_LIMITED_CONTEXT, R10)
+ASM_OFFSET( 0, 20, 0, 0, PAL_LIMITED_CONTEXT, R11)
+ASM_OFFSET( 0, 28, 0, 0, PAL_LIMITED_CONTEXT, SP)
+ASM_OFFSET( 0, 2c, 0, 0, PAL_LIMITED_CONTEXT, LR)
+
+#elif defined _ARM64_
+// @TODO: Add ARM64 entries
+
+#else // _ARM64_
+
+ASM_OFFSET( 4, 0, 8, 8, PAL_LIMITED_CONTEXT, Rsp)
+ASM_OFFSET( 8, 0, 10, 10, PAL_LIMITED_CONTEXT, Rbp)
+ASM_OFFSET( 0c, 0, 18, 18, PAL_LIMITED_CONTEXT, Rdi)
+ASM_OFFSET( 10, 0, 20, 20, PAL_LIMITED_CONTEXT, Rsi)
+ASM_OFFSET( 14, 0, 28, 28, PAL_LIMITED_CONTEXT, Rax)
+ASM_OFFSET( 18, 0, 30, 30, PAL_LIMITED_CONTEXT, Rbx)
+#ifdef _AMD64_
+ASM_OFFSET( 0, 0, 38, 38, PAL_LIMITED_CONTEXT, R12)
+ASM_OFFSET( 0, 0, 40, 40, PAL_LIMITED_CONTEXT, R13)
+ASM_OFFSET( 0, 0, 48, 48, PAL_LIMITED_CONTEXT, R14)
+ASM_OFFSET( 0, 0, 50, 50, PAL_LIMITED_CONTEXT, R15)
+ASM_OFFSET( 0, 0, 60, 60, PAL_LIMITED_CONTEXT, Xmm6)
+ASM_OFFSET( 0, 0, 70, 70, PAL_LIMITED_CONTEXT, Xmm7)
+ASM_OFFSET( 0, 0, 80, 80, PAL_LIMITED_CONTEXT, Xmm8)
+ASM_OFFSET( 0, 0, 90, 90, PAL_LIMITED_CONTEXT, Xmm9)
+ASM_OFFSET( 0, 0, 0a0, 0a0, PAL_LIMITED_CONTEXT, Xmm10)
+ASM_OFFSET( 0, 0, 0b0, 0b0, PAL_LIMITED_CONTEXT, Xmm11)
+ASM_OFFSET( 0, 0, 0c0, 0c0, PAL_LIMITED_CONTEXT, Xmm12)
+ASM_OFFSET( 0, 0, 0d0, 0d0, PAL_LIMITED_CONTEXT, Xmm13)
+ASM_OFFSET( 0, 0, 0e0, 0e0, PAL_LIMITED_CONTEXT, Xmm14)
+ASM_OFFSET( 0, 0, 0f0, 0f0, PAL_LIMITED_CONTEXT, Xmm15)
+#endif // _AMD64_
+#endif // _ARM_
+
+ASM_SIZEOF( 28, 88, 130, 150, REGDISPLAY)
+ASM_OFFSET( 1c, 38, 78, f8, REGDISPLAY, SP)
+
+#ifdef _ARM_
+ASM_OFFSET( 0, 10, 0, 0, REGDISPLAY, pR4)
+ASM_OFFSET( 0, 14, 0, 0, REGDISPLAY, pR5)
+ASM_OFFSET( 0, 18, 0, 0, REGDISPLAY, pR6)
+ASM_OFFSET( 0, 1c, 0, 0, REGDISPLAY, pR7)
+ASM_OFFSET( 0, 20, 0, 0, REGDISPLAY, pR8)
+ASM_OFFSET( 0, 24, 0, 0, REGDISPLAY, pR9)
+ASM_OFFSET( 0, 28, 0, 0, REGDISPLAY, pR10)
+ASM_OFFSET( 0, 2c, 0, 0, REGDISPLAY, pR11)
+ASM_OFFSET( 0, 48, 0, 0, REGDISPLAY, D)
+#elif _ARM64_
+// @TODO: Add ARM64 entries
+
+#else // _ARM64_
+ASM_OFFSET( 0c, 0, 18, 18, REGDISPLAY, pRbx)
+ASM_OFFSET( 10, 0, 20, 20, REGDISPLAY, pRbp)
+ASM_OFFSET( 14, 0, 28, 28, REGDISPLAY, pRsi)
+ASM_OFFSET( 18, 0, 30, 30, REGDISPLAY, pRdi)
+#ifdef _AMD64_
+ASM_OFFSET( 0, 0, 58, 58, REGDISPLAY, pR12)
+ASM_OFFSET( 0, 0, 60, 60, REGDISPLAY, pR13)
+ASM_OFFSET( 0, 0, 68, 68, REGDISPLAY, pR14)
+ASM_OFFSET( 0, 0, 70, 70, REGDISPLAY, pR15)
+ASM_OFFSET( 0, 0, 90, 90, REGDISPLAY, Xmm)
+#endif // _AMD64_
+#endif // _ARM_
+
#ifdef FEATURE_CACHED_INTERFACE_DISPATCH
-ASM_OFFSET( 4, 4, 8, InterfaceDispatchCell, m_pCache)
-#ifndef _AMD64_
-ASM_OFFSET( 8, 8, 10, InterfaceDispatchCache, m_pCell)
-#endif
-ASM_OFFSET( 10, 10, 20, InterfaceDispatchCache, m_rgEntries)
-#endif
-
-ASM_OFFSET( 4, 4, 8, StaticClassConstructionContext, m_initialized)
-
+ASM_OFFSET( 4, 4, 8, 8, InterfaceDispatchCell, m_pCache)
+#ifndef _AMD64_
+ASM_OFFSET( 8, 8, 10, 10, InterfaceDispatchCache, m_pCell)
+#endif
+ASM_OFFSET( 10, 10, 20, 20, InterfaceDispatchCache, m_rgEntries)
+#endif
+
+ASM_OFFSET( 4, 4, 8, 8, StaticClassConstructionContext, m_initialized)
+
#ifdef FEATURE_DYNAMIC_CODE
-ASM_OFFSET( 0, 0, 0, CallDescrData, pSrc)
-ASM_OFFSET( 4, 4, 8, CallDescrData, numStackSlots)
-ASM_OFFSET( 8, 8, C, CallDescrData, fpReturnSize)
-ASM_OFFSET( C, C, 10, CallDescrData, pArgumentRegisters)
-ASM_OFFSET( 10, 10, 18, CallDescrData, pFloatArgumentRegisters)
-ASM_OFFSET( 14, 14, 20, CallDescrData, pTarget)
-ASM_OFFSET( 18, 18, 28, CallDescrData, pReturnBuffer)
+ASM_OFFSET( 0, 0, 0, 0, CallDescrData, pSrc)
+ASM_OFFSET( 4, 4, 8, 8, CallDescrData, numStackSlots)
+ASM_OFFSET( 8, 8, C, C, CallDescrData, fpReturnSize)
+ASM_OFFSET( C, C, 10, 10, CallDescrData, pArgumentRegisters)
+ASM_OFFSET( 10, 10, 18, 18, CallDescrData, pFloatArgumentRegisters)
+ASM_OFFSET( 14, 14, 20, 20, CallDescrData, pTarget)
+ASM_OFFSET( 18, 18, 28, 28, CallDescrData, pReturnBuffer)
#endif
diff --git a/src/Native/Runtime/CommonMacros.h b/src/Native/Runtime/CommonMacros.h
index e41ec3f62..da619bd24 100644
--- a/src/Native/Runtime/CommonMacros.h
+++ b/src/Native/Runtime/CommonMacros.h
@@ -7,6 +7,7 @@
#if defined(_TARGET_AMD64_)
#elif defined(_TARGET_X86_)
#elif defined(_TARGET_ARM_)
+#elif defined(_TARGET_ARM64_)
#else
#error Unsupported architecture
#endif
@@ -98,22 +99,24 @@ EXTERN_C int __cdecl memcmp(const void *,const void *,size_t);
#if defined(_AMD64_)
-#define VIRTUAL_ALLOC_RESERVE_GRANULARITY (64*1024) // 0x10000 (64 KB)
#define LOG2_PTRSIZE 3
#define POINTER_SIZE 8
#elif defined(_X86_)
-#define VIRTUAL_ALLOC_RESERVE_GRANULARITY (64*1024) // 0x10000 (64 KB)
#define LOG2_PTRSIZE 2
#define POINTER_SIZE 4
#elif defined(_ARM_)
-#define VIRTUAL_ALLOC_RESERVE_GRANULARITY (64*1024) // 0x10000 (64 KB)
#define LOG2_PTRSIZE 2
#define POINTER_SIZE 4
+#elif defined(_ARM64_)
+
+#define LOG2_PTRSIZE 3
+#define POINTER_SIZE 8
+
#else
#error Unsupported target architecture
#endif
@@ -138,6 +141,13 @@ EXTERN_C int __cdecl memcmp(const void *,const void *,size_t);
#define OS_PAGE_SIZE 0x1000
#endif
+#elif defined(_ARM64_)
+
+#define DATA_ALIGNMENT 8
+#ifndef OS_PAGE_SIZE
+#define OS_PAGE_SIZE 0x1000
+#endif
+
#else
#error Unsupported target architecture
#endif
diff --git a/src/Native/Runtime/EHHelpers.cpp b/src/Native/Runtime/EHHelpers.cpp
index 7bd33d4d8..d83c1282e 100644
--- a/src/Native/Runtime/EHHelpers.cpp
+++ b/src/Native/Runtime/EHHelpers.cpp
@@ -223,6 +223,8 @@ COOP_PINVOKE_HELPER(void, RhpCopyContextFromExInfo,
pContext->Sp = pPalContext->SP;
pContext->Lr = pPalContext->LR;
pContext->Pc = pPalContext->IP;
+#elif defined(_ARM64_)
+ PORTABILITY_ASSERT("@TODO: FIXME:ARM64");
#else
#error Not Implemented for this architecture -- RhpCopyContextFromExInfo
#endif
@@ -376,6 +378,9 @@ static UIntNative UnwindWriteBarrierToCaller(_CONTEXT * pContext)
pContext->SetSP(sp+sizeof(UIntNative)); // pop the stack
#elif defined(_ARM_)
UIntNative adjustedFaultingIP = pContext->GetLR() - 2; // bl instruction will be 4 bytes - act as if start of call instruction + 2 were the faulting IP
+#elif defined(_ARM64_)
+ PORTABILITY_ASSERT("@TODO: FIXME:ARM64");
+ UIntNative adjustedFaultingIP = -1;
#else
#error "Unknown Architecture"
#endif
diff --git a/src/Native/Runtime/MiscHelpers.cpp b/src/Native/Runtime/MiscHelpers.cpp
index de1333dec..a9f79f7e6 100644
--- a/src/Native/Runtime/MiscHelpers.cpp
+++ b/src/Native/Runtime/MiscHelpers.cpp
@@ -457,6 +457,8 @@ COOP_PINVOKE_HELPER(UInt8 *, RhGetCodeTarget, (UInt8 * pCodeOrg))
UInt8 * pTarget = (UInt8 *)(pCode + 2) + distToTarget + THUMB_BIT;
return (UInt8 *)pTarget;
}
+#elif _TARGET_ARM64_
+ PORTABILITY_ASSERT("@TODO: FIXME:ARM64");
#else
#error 'Unsupported Architecture'
#endif
diff --git a/src/Native/Runtime/PalRedhawk.h b/src/Native/Runtime/PalRedhawk.h
index c06bbedc4..e4bdea96b 100644
--- a/src/Native/Runtime/PalRedhawk.h
+++ b/src/Native/Runtime/PalRedhawk.h
@@ -286,9 +286,9 @@ typedef struct DECLSPEC_ALIGN(8) _CONTEXT {
UInt32 R10;
UInt32 R11;
UInt32 R12;
- UInt32 Sp;
- UInt32 Lr;
- UInt32 Pc;
+ UInt32 Sp; // R13
+ UInt32 Lr; // R14
+ UInt32 Pc; // R15
UInt32 Cpsr;
UInt32 Fpscr;
UInt32 Padding;
@@ -363,6 +363,86 @@ typedef struct _CONTEXT {
} CONTEXT, *PCONTEXT;
#include "poppack.h"
+#elif defined(_ARM64_)
+
+// Specify the number of breakpoints and watchpoints that the OS
+// will track. Architecturally, ARM64 supports up to 16. In practice,
+// however, almost no one implements more than 4 of each.
+
+#define ARM64_MAX_BREAKPOINTS 8
+#define ARM64_MAX_WATCHPOINTS 2
+
+typedef struct _NEON128 {
+ UInt64 Low;
+ Int64 High;
+} NEON128, *PNEON128;
+
+typedef struct DECLSPEC_ALIGN(16) _CONTEXT {
+ //
+ // Control flags.
+ //
+ UInt32 ContextFlags;
+
+ //
+ // Integer registers
+ //
+ UInt32 Cpsr; // NZVF + DAIF + CurrentEL + SPSel
+ UInt64 X0;
+ UInt64 X1;
+ UInt64 X2;
+ UInt64 X3;
+ UInt64 X4;
+ UInt64 X5;
+ UInt64 X6;
+ UInt64 X7;
+ UInt64 X8;
+ UInt64 X9;
+ UInt64 X10;
+ UInt64 X11;
+ UInt64 X12;
+ UInt64 X13;
+ UInt64 X14;
+ UInt64 X15;
+ UInt64 X16;
+ UInt64 X17;
+ UInt64 X18;
+ UInt64 X19;
+ UInt64 X20;
+ UInt64 X21;
+ UInt64 X22;
+ UInt64 X23;
+ UInt64 X24;
+ UInt64 X25;
+ UInt64 X26;
+ UInt64 X27;
+ UInt64 X28;
+ UInt64 Fp; // X29
+ UInt64 Lr; // X30
+ UInt64 Sp;
+ UInt64 Pc;
+
+ //
+ // Floating Point/NEON Registers
+ //
+ NEON128 V[32];
+ UInt32 Fpcr;
+ UInt32 Fpsr;
+
+ //
+ // Debug registers
+ //
+ UInt32 Bcr[ARM64_MAX_BREAKPOINTS];
+ UInt64 Bvr[ARM64_MAX_BREAKPOINTS];
+ UInt32 Wcr[ARM64_MAX_WATCHPOINTS];
+ UInt64 Wvr[ARM64_MAX_WATCHPOINTS];
+
+ void SetIP(UIntNative ip) { Pc = ip; }
+ void SetArg0Reg(UIntNative val) { X0 = val; }
+ void SetArg1Reg(UIntNative val) { X1 = val; }
+ UIntNative GetIP() { return Pc; }
+ UIntNative GetLR() { return Lr; }
+} CONTEXT, *PCONTEXT;
+
#endif
@@ -633,6 +713,9 @@ EXTERN_C unsigned __int64 __readgsqword(unsigned long Offset);
#elif defined(_ARM_)
EXTERN_C unsigned int _MoveFromCoprocessor(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int);
#pragma intrinsic(_MoveFromCoprocessor)
+#elif defined(_ARM64_)
+EXTERN_C unsigned __int64 __getReg(int);
+#pragma intrinsic(__getReg)
#else
#error Unsupported architecture
#endif
@@ -646,6 +729,9 @@ inline UInt8 * PalNtCurrentTeb()
return (UInt8*)__readgsqword(0x30);
#elif defined(_ARM_)
return (UInt8*)_MoveFromCoprocessor(15, 0, 13, 0, 2);
+#elif defined(_ARM64_)
+ // The calling convention recommend using X18 for storing TEB
+ return (UInt8*)__getReg(18);
#else
#error Unsupported architecture
#endif
@@ -656,6 +742,9 @@ inline UInt8 * PalNtCurrentTeb()
#define OFFSETOF__TEB__ThreadLocalStoragePointer 0x2c
#elif defined(_AMD64_)
#define OFFSETOF__TEB__ThreadLocalStoragePointer 0x58
+#elif defined(_ARM64_)
+// @TODO: Find out what the offset is for ARM64
+#define OFFSETOF__TEB__ThreadLocalStoragePointer 0x58
#else
#error Unsupported architecture
#endif
@@ -701,6 +790,12 @@ EXTERN_C void __emit(const unsigned __int32 opcode);
#pragma intrinsic(__emit)
#define PalMemoryBarrier() { __emit(0xF3BF); __emit(0x8F5F); }
+#elif defined(_ARM64_)
+
+FORCEINLINE void PalYieldProcessor() {}
+// Using Urcu memory barrier
+#define PalMemoryBarrier() cmm_mb()
+
#else
#error Unsupported architecture
#endif
diff --git a/src/Native/Runtime/PalRedhawkCommon.h b/src/Native/Runtime/PalRedhawkCommon.h
index 23af02e3a..76443f9c2 100644
--- a/src/Native/Runtime/PalRedhawkCommon.h
+++ b/src/Native/Runtime/PalRedhawkCommon.h
@@ -15,6 +15,8 @@
#ifndef __PAL_REDHAWK_COMMON_INCLUDED
#define __PAL_REDHAWK_COMMON_INCLUDED
+#include "assert.h"
+
#ifndef GCENV_INCLUDED
// We define the notion of capabilities: optional functionality that the PAL may expose. Use
// PalHasCapability() with the constants below to determine what is supported at runtime.
@@ -62,6 +64,13 @@ struct PAL_LIMITED_CONTEXT
UIntNative GetIp() const { return IP; }
UIntNative GetSp() const { return SP; }
UIntNative GetFp() const { return R7; }
+#elif defined(_ARM64_)
+ // @TODO: Add ARM64 registers
+ UIntNative IP;
+ UIntNative GetIp() const { PORTABILITY_ASSERT("@TODO: FIXME:ARM64"); }
+ UIntNative GetSp() const { PORTABILITY_ASSERT("@TODO: FIXME:ARM64"); }
+ UIntNative GetFp() const { PORTABILITY_ASSERT("@TODO: FIXME:ARM64"); }
+
#else // _ARM_
UIntNative IP;
UIntNative Rsp;
diff --git a/src/Native/Runtime/RHCodeMan.cpp b/src/Native/Runtime/RHCodeMan.cpp
index 651004785..2eebb2c90 100644
--- a/src/Native/Runtime/RHCodeMan.cpp
+++ b/src/Native/Runtime/RHCodeMan.cpp
@@ -107,9 +107,34 @@ void ReportRegisterSet(UInt8 regSet, REGDISPLAY * pContext, GCEnumContext * hCal
if (regSet & CSR_MASK_R8) { ReportObject(hCallback, GetRegObjectAddr<CSR_NUM_R8>(pContext), 0); }
}
+#elif defined(_TARGET_ARM64_)
+#pragma warning(push)
+#pragma warning(disable:4127) // conditional expression is constant
+template <CalleeSavedRegNum regNum>
+PTR_PTR_Object GetRegObjectAddr(REGDISPLAY * pContext)
+{
+ PORTABILITY_ASSERT("@TODO: FIXME:ARM64");
+}
+#pragma warning(pop)
+
+PTR_PTR_Object GetRegObjectAddr(CalleeSavedRegNum regNum, REGDISPLAY * pContext)
+{
+ PORTABILITY_ASSERT("@TODO: FIXME:ARM64");
+}
+
+PTR_PTR_Object GetScratchRegObjectAddr(ScratchRegNum regNum, REGDISPLAY * pContext)
+{
+ PORTABILITY_ASSERT("@TODO: FIXME:ARM64");
+}
+
+void ReportRegisterSet(UInt8 regSet, REGDISPLAY * pContext, GCEnumContext * hCallback)
+{
+ PORTABILITY_ASSERT("@TODO: FIXME:ARM64");
+}
-#else // _TARGET_ARM_
+
+#else // _TARGET_ARM_ && _TARGET_ARM64_
#pragma warning(push)
#pragma warning(disable:4127) // conditional expression is constant
@@ -214,6 +239,8 @@ void ReportLocalSlot(UInt32 slotNum, REGDISPLAY * pContext, GCEnumContext * hCal
#ifdef _TARGET_ARM_
// ARM places the FP at the top of the locals area.
rbpOffset = pHeader->GetFrameSize() - ((slotNum + 1) * sizeof(void *));
+#elif defined(_TARGET_ARM64_)
+ PORTABILITY_ASSERT("@TODO: FIXME:ARM64");
#else
# ifdef _TARGET_AMD64_
if (pHeader->GetFramePointerOffset() != 0)
@@ -578,6 +605,10 @@ bool EECodeManager::UnwindStackFrame(EEMethodInfo * pMethodInfo,
UInt32 codeOffset,
REGDISPLAY * pContext)
{
+#ifdef _TARGET_ARM64_
+ PORTABILITY_ASSERT("@TODO: FIXME:ARM64");
+#endif
+
GCInfoHeader * pInfoHeader = pMethodInfo->GetGCInfoHeader();
// We could implement this unwind if we wanted, but there really isn't any reason
@@ -611,6 +642,8 @@ bool EECodeManager::UnwindStackFrame(EEMethodInfo * pMethodInfo,
{
#ifdef _TARGET_ARM_
rawRSP = pContext->GetFP() + pInfoHeader->GetFrameSize();
+#elif defined(_TARGET_ARM64_)
+ PORTABILITY_ASSERT("@TODO: FIXME:ARM64");
#else
saveSize -= sizeof(void *); // don't count RBP
Int32 framePointerOffset = 0;
@@ -655,6 +688,8 @@ bool EECodeManager::UnwindStackFrame(EEMethodInfo * pMethodInfo,
regIndex++;
RSP = (PTR_UIntNative)((PTR_UInt8)RSP + sizeof(UInt64));
}
+#elif defined(_TARGET_ARM64_)
+ PORTABILITY_ASSERT("@TODO: FIXME:ARM64");
#endif
#if defined(_TARGET_X86_)
@@ -689,10 +724,12 @@ bool EECodeManager::UnwindStackFrame(EEMethodInfo * pMethodInfo,
if (regMask & CSR_MASK_R9) { pContext->pR9 = RSP++; }
if (regMask & CSR_MASK_R10) { pContext->pR10 = RSP++; }
if (regMask & CSR_MASK_R11) { pContext->pR11 = RSP++; }
+#elif defined(_TARGET_ARM64_)
+ PORTABILITY_ASSERT("@TODO: FIXME:ARM64");
#endif // _TARGET_AMD64_
}
-#ifndef _TARGET_ARM_
+#if !defined(_TARGET_ARM_) && !defined(_TARGET_ARM64_)
if (ebpFrame)
pContext->pRbp = RSP++;
#endif
@@ -731,6 +768,8 @@ bool EECodeManager::UnwindStackFrame(EEMethodInfo * pMethodInfo,
#ifdef _TARGET_ARM_
RSP += pInfoHeader->ParmRegsPushedCount();
+#elif defined(_TARGET_ARM64_)
+ PORTABILITY_ASSERT("@TODO: FIXME:ARM64");
#endif
pContext->SetSP((UIntNative) dac_cast<TADDR>(RSP));
@@ -785,6 +824,8 @@ PTR_PTR_VOID EECodeManager::GetReturnAddressLocationForHijack(EEMethodInfo *
// be saved in the prolog.
if (!pHeader->IsRegSaved(CSR_MASK_LR))
return NULL;
+#elif defined(_ARM64_)
+ PORTABILITY_ASSERT("@TODO: FIXME:ARM64");
#endif // _ARM_
void ** ppvResult;
@@ -796,6 +837,8 @@ PTR_PTR_VOID EECodeManager::GetReturnAddressLocationForHijack(EEMethodInfo *
#ifdef _ARM_
// Disable hijacking from epilogs on ARM until we implement GetReturnAddressLocationFromEpilog.
return NULL;
+#elif defined(_ARM64_)
+ PORTABILITY_ASSERT("@TODO: FIXME:ARM64");
#else
ppvResult = GetReturnAddressLocationFromEpilog(pHeader, pContext, epilogOffset, epilogSize);
// Early out if GetReturnAddressLocationFromEpilog indicates a non-hijackable epilog (e.g. exception
@@ -812,6 +855,9 @@ PTR_PTR_VOID EECodeManager::GetReturnAddressLocationForHijack(EEMethodInfo *
// address is pushed at [r11, #4].
ppvResult = (void **)((*pContext->pR11) + sizeof(void *));
goto Finished;
+#elif _ARM64_
+ PORTABILITY_ASSERT("@TODO: FIXME:ARM64");
+ goto Finished;
#else
// We are in the body of the method, so just find the return address using the unwind info.
@@ -1327,7 +1373,11 @@ void ** EECodeManager::GetReturnAddressLocationFromEpilog(GCInfoHeader * pInfoHe
// Shouldn't be any other instructions in the epilog.
UNREACHABLE_MSG("Unknown epilog instruction");
return NULL;
-#endif // _X86_
+
+#elif defined(_ARM64_)
+ PORTABILITY_ASSERT("@TODO: FIXME:ARM64");
+
+#endif
}
#ifdef _DEBUG
@@ -1366,6 +1416,12 @@ bool EECodeManager::FindNextEpilog(GCInfoHeader * pInfoHeader, UInt32 methodSize
#ifdef _ARM_
#define IS_FRAMELESS() ((pInfoHeader->GetSavedRegs() & CSR_MASK_LR) == 0)
+#elif defined(_ARM64_)
+inline bool IsFramelessArm64(void)
+{
+ PORTABILITY_ASSERT("@TODO: FIXME:ARM64");
+}
+#define IS_FRAMELESS() (IsFramelessArm64())
#else
#define IS_FRAMELESS() (!pInfoHeader->HasFramePointer())
#endif
@@ -1401,6 +1457,8 @@ void CheckHijackInEpilog(GCInfoHeader * pInfoHeader, Code * pEpilog, Code * pEpi
#elif defined(_ARM_)
context.pR11 = &RBP_TEST_VAL;
context.SP = RSP_TEST_VAL;
+#elif defined(_ARM64_)
+ PORTABILITY_ASSERT("@TODO: FIXME:ARM64");
#endif
context.SetIP((PCODE)pEpilog);
@@ -2047,18 +2105,23 @@ bool VerifyEpilogBytesARM(GCInfoHeader * pInfoHeader, Code * pEpilogStart, UInt3
return false;
}
+#elif defined(_ARM64_)
+bool VerifyEpilogBytesARM64(GCInfoHeader * pInfoHeader, Code * pEpilogStart, UInt32 epilogSize)
+{
+ PORTABILITY_ASSERT("@TODO: FIXME:ARM64");
+}
#endif // _ARM_
bool EECodeManager::VerifyEpilogBytes(GCInfoHeader * pInfoHeader, Code * pEpilogStart, UInt32 epilogSize)
{
#ifdef _X86_
return VerifyEpilogBytesX86(pInfoHeader, pEpilogStart, epilogSize);
-#endif // _X86_
-#ifdef _AMD64_
+#elif defined(_AMD64_)
return VerifyEpilogBytesAMD64(pInfoHeader, pEpilogStart, epilogSize);
-#endif // _AMD64_
-#ifdef _ARM_
+#elif defined(_ARM_)
return VerifyEpilogBytesARM(pInfoHeader, pEpilogStart, epilogSize);
+#elif defined(_ARM64_)
+ return VerifyEpilogBytesARM64(pInfoHeader, pEpilogStart, epilogSize);
#endif
}
diff --git a/src/Native/Runtime/StackFrameIterator.cpp b/src/Native/Runtime/StackFrameIterator.cpp
index 88f77c80f..10acfdce8 100644
--- a/src/Native/Runtime/StackFrameIterator.cpp
+++ b/src/Native/Runtime/StackFrameIterator.cpp
@@ -167,6 +167,10 @@ void StackFrameIterator::InternalInit(Thread * pThreadToWalk, PTR_PInvokeTransit
}
m_ControlPC = dac_cast<PTR_VOID>(*(m_RegDisplay.pIP));
+
+#elif defined(_TARGET_ARM64_)
+ PORTABILITY_ASSERT("@TODO: FIXME:ARM64");
+
#else // _TARGET_ARM_
if (pFrame->m_dwFlags & PTFF_SAVE_RBX) { m_RegDisplay.pRbx = pPreservedRegsCursor++; }
if (pFrame->m_dwFlags & PTFF_SAVE_RSI) { m_RegDisplay.pRsi = pPreservedRegsCursor++; }
@@ -306,6 +310,10 @@ void StackFrameIterator::InternalInit(Thread * pThreadToWalk, PTR_PAL_LIMITED_CO
// scratch regs
//
m_RegDisplay.pR0 = PTR_TO_MEMBER(PAL_LIMITED_CONTEXT, pCtx, R0);
+
+#elif defined(_TARGET_ARM64_)
+ PORTABILITY_ASSERT("@TODO: FIXME:ARM64");
+
#else // _TARGET_ARM_
//
// preserved regs
@@ -426,6 +434,10 @@ void StackFrameIterator::UpdateFromExceptionDispatch(PTR_StackFrameIterator pSou
m_RegDisplay.pR9 = thisFuncletPtrs.pR9 ;
m_RegDisplay.pR10 = thisFuncletPtrs.pR10;
m_RegDisplay.pR11 = thisFuncletPtrs.pR11;
+
+#elif defined(_TARGET_ARM64_)
+ PORTABILITY_ASSERT("@TODO: FIXME:ARM64");
+
#else
// Save the preserved regs portion of the REGDISPLAY across the unwind through the C# EH dispatch code.
m_RegDisplay.pRbp = thisFuncletPtrs.pRbp;
@@ -548,6 +560,10 @@ bool StackFrameIterator::HandleFuncletInvokeThunk()
m_RegDisplay.pR9 = SP++;
m_RegDisplay.pR10 = SP++;
m_RegDisplay.pR11 = SP++;
+
+#elif defined(_TARGET_ARM64_)
+ PORTABILITY_ASSERT("@TODO: FIXME:ARM64");
+
#else
SP = (PTR_UIntNative)(m_RegDisplay.SP);
ASSERT_UNCONDITIONALLY("NYI for this arch");
@@ -569,6 +585,8 @@ bool StackFrameIterator::HandleFuncletInvokeThunk()
#define STACK_ALIGN_SIZE 16
#elif defined(_ARM_)
#define STACK_ALIGN_SIZE 8
+#elif defined(_ARM64_)
+#define STACK_ALIGN_SIZE 16
#elif defined(_X86_)
#define STACK_ALIGN_SIZE 4
#endif
@@ -589,6 +607,12 @@ struct CALL_DESCR_CONTEXT
UIntNative R7;
UIntNative IP;
};
+#elif defined(_TARGET_ARM64_)
+// @TODO: Add ARM64 entries
+struct CALL_DESCR_CONTEXT
+{
+ UIntNative IP;
+};
#elif defined(_TARGET_X86_)
struct CALL_DESCR_CONTEXT
{
@@ -648,6 +672,10 @@ bool StackFrameIterator::HandleCallDescrThunk()
// And adjust SP to be the state that it should be in just after returning from
// the CallDescrFunction
newSP += sizeof(CALL_DESCR_CONTEXT);
+
+#elif defined(_TARGET_ARM64_)
+ PORTABILITY_ASSERT("@TODO: FIXME:ARM64");
+
#elif defined(_TARGET_X86_)
// RBP points to the SP that we want to capture. (This arrangement allows for
// the arguments from this function to be loaded into memory with an adjustment
@@ -721,6 +749,8 @@ bool StackFrameIterator::HandleThrowSiteThunk()
m_RegDisplay.pR9 = PTR_TO_MEMBER(PAL_LIMITED_CONTEXT, pContext, R9);
m_RegDisplay.pR10 = PTR_TO_MEMBER(PAL_LIMITED_CONTEXT, pContext, R10);
m_RegDisplay.pR11 = PTR_TO_MEMBER(PAL_LIMITED_CONTEXT, pContext, R11);
+#elif defined(_TARGET_ARM64_)
+ PORTABILITY_ASSERT("@TODO: FIXME:ARM64");
#elif defined(_TARGET_X86_)
m_RegDisplay.pRbp = PTR_TO_MEMBER(PAL_LIMITED_CONTEXT, pContext, Rbp);
m_RegDisplay.pRdi = PTR_TO_MEMBER(PAL_LIMITED_CONTEXT, pContext, Rdi);
@@ -974,6 +1004,8 @@ KeepUnwinding:
{
#ifdef _TARGET_ARM_
m_pConservativeStackRangeUpperBound = (PTR_RtuObjectRef)*m_RegDisplay.pR11;
+#elif defined(_TARGET_ARM64_)
+ PORTABILITY_ASSERT("@TODO: FIXME:ARM64");
#else
m_pConservativeStackRangeUpperBound = (PTR_RtuObjectRef)m_RegDisplay.GetFP();
#endif
@@ -1108,6 +1140,8 @@ PTR_VOID StackFrameIterator::AdjustReturnAddressForward(PTR_VOID controlPC)
{
#ifdef _TARGET_ARM_
return (PTR_VOID)(((PTR_UInt8)controlPC) + 2);
+#elif defined(_TARGET_ARM64_)
+ PORTABILITY_ASSERT("@TODO: FIXME:ARM64");
#else
return (PTR_VOID)(((PTR_UInt8)controlPC) + 1);
#endif
@@ -1116,6 +1150,8 @@ PTR_VOID StackFrameIterator::AdjustReturnAddressBackward(PTR_VOID controlPC)
{
#ifdef _TARGET_ARM_
return (PTR_VOID)(((PTR_UInt8)controlPC) - 2);
+#elif defined(_TARGET_ARM64_)
+ PORTABILITY_ASSERT("@TODO: FIXME:ARM64");
#else
return (PTR_VOID)(((PTR_UInt8)controlPC) - 1);
#endif
diff --git a/src/Native/Runtime/inc/TargetPtrs.h b/src/Native/Runtime/inc/TargetPtrs.h
index 75b3b43bc..d1dbe635b 100644
--- a/src/Native/Runtime/inc/TargetPtrs.h
+++ b/src/Native/Runtime/inc/TargetPtrs.h
@@ -14,6 +14,8 @@ typedef UInt64 UIntTarget;
typedef UInt32 UIntTarget;
#elif defined(_TARGET_ARM_)
typedef UInt32 UIntTarget;
+#elif defined(_TARGET_ARM64_)
+typedef UInt64 UIntTarget;
#else
#error unexpected target architecture
#endif
@@ -84,6 +86,8 @@ typedef UInt64 UIntTarget;
typedef UInt32 UIntTarget;
#elif defined(_TARGET_ARM_)
typedef UInt32 UIntTarget;
+#elif defined(_TARGET_ARM64_)
+typedef UInt64 UIntTarget;
#else
#error unexpected target architecture
#endif
@@ -109,6 +113,8 @@ typedef UInt64 UIntTarget;
typedef UInt32 UIntTarget;
#elif defined(_TARGET_ARM_)
typedef UInt32 UIntTarget;
+#elif defined(_TARGET_ARM64_)
+typedef UInt64 UIntTarget;
#else
#error unexpected target architecture
#endif
diff --git a/src/Native/Runtime/regdisplay.h b/src/Native/Runtime/regdisplay.h
index 477b09a8c..9a796ff9a 100644
--- a/src/Native/Runtime/regdisplay.h
+++ b/src/Native/Runtime/regdisplay.h
@@ -86,6 +86,62 @@ struct REGDISPLAY
inline void SetSP(UIntNative SP) { this->SP = SP; }
};
+#elif defined(_TARGET_ARM64_)
+
+struct REGDISPLAY
+{
+ PTR_UIntNative pX0;
+ PTR_UIntNative pX1;
+ PTR_UIntNative pX2;
+ PTR_UIntNative pX3;
+ PTR_UIntNative pX4;
+ PTR_UIntNative pX5;
+ PTR_UIntNative pX6;
+ PTR_UIntNative pX7;
+ PTR_UIntNative pX8;
+ PTR_UIntNative pX9;
+ PTR_UIntNative pX10;
+ PTR_UIntNative pX11;
+ PTR_UIntNative pX12;
+ PTR_UIntNative pX13;
+ PTR_UIntNative pX14;
+ PTR_UIntNative pX15;
+ PTR_UIntNative pX16;
+ PTR_UIntNative pX17;
+ PTR_UIntNative pX18;
+ PTR_UIntNative pX19;
+ PTR_UIntNative pX20;
+ PTR_UIntNative pX21;
+ PTR_UIntNative pX22;
+ PTR_UIntNative pX23;
+ PTR_UIntNative pX24;
+ PTR_UIntNative pX25;
+ PTR_UIntNative pX26;
+ PTR_UIntNative pX27;
+ PTR_UIntNative pX28;
+ PTR_UIntNative pFP; // X29
+ PTR_UIntNative pLR; // X30
+
+ UIntNative SP;
+ PTR_PCODE pIP;
+ PCODE IP;
+
+ UInt64 D[16-8]; // Only the bottom 64-bit value of the V registers V8..V15 needs to be preserved
+ // (V0-V7 and V16-V31 are not preserved according to the ABI spec).
+ // These need to be unwound during a stack walk
+ // for EH, but not adjusted, so we only need
+ // their values, not their addresses
+
+ inline PCODE GetIP() { return IP; }
+ inline PTR_PCODE GetAddrOfIP() { return pIP; }
+ inline UIntNative GetSP() { return SP; }
+ inline UIntNative GetFP() { return *pFP; }
+
+ inline void SetIP(PCODE IP) { this->IP = IP; }
+ inline void SetAddrOfIP(PTR_PCODE pIP) { this->pIP = pIP; }
+ inline void SetSP(UIntNative SP) { this->SP = SP; }
+};
+
#endif // _X86_ || _AMD64_
typedef REGDISPLAY * PREGDISPLAY;
diff --git a/src/Native/Runtime/thread.h b/src/Native/Runtime/thread.h
index e5f452a37..aefc7e3d9 100644
--- a/src/Native/Runtime/thread.h
+++ b/src/Native/Runtime/thread.h
@@ -16,7 +16,7 @@ class Thread;
# else // FEATURE_SVR_GC
# define SIZEOF_ALLOC_CONTEXT 28
# endif // FEATURE_SVR_GC
-#elif defined(_AMD64_)
+#elif defined(_AMD64_) || defined(_ARM64_)
# ifdef FEATURE_SVR_GC
# define SIZEOF_ALLOC_CONTEXT 56
# else // FEATURE_SVR_GC
diff --git a/src/Native/Runtime/unix/PalRedhawkUnix.cpp b/src/Native/Runtime/unix/PalRedhawkUnix.cpp
index 38cfa29e5..785327077 100644
--- a/src/Native/Runtime/unix/PalRedhawkUnix.cpp
+++ b/src/Native/Runtime/unix/PalRedhawkUnix.cpp
@@ -14,7 +14,6 @@
#include <PalRedhawkCommon.h>
#include "CommonMacros.h"
#include <sal.h>
-#include "assert.h"
#include "config.h"
#include "UnixHandle.h"